aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralexv-smirnov <alex@ydb.tech>2023-12-01 12:02:50 +0300
committeralexv-smirnov <alex@ydb.tech>2023-12-01 13:28:10 +0300
commit0e578a4c44d4abd539d9838347b9ebafaca41dfb (patch)
treea0c1969c37f818c830ebeff9c077eacf30be6ef8
parent84f2d3d4cc985e63217cff149bd2e6d67ae6fe22 (diff)
downloadydb-0e578a4c44d4abd539d9838347b9ebafaca41dfb.tar.gz
Change "ya.make"
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/.dist-info/METADATA432
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/.dist-info/top_level.txt2
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/LICENSE23
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/README.rst416
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/ordereddict.c4705
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/ordereddict.h214
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/ruamel/ordereddict/__init__.py7
-rw-r--r--contrib/deprecated/python/ruamel.ordereddict/ya.make36
-rw-r--r--contrib/python/PyJWT/py2/AUTHORS29
-rw-r--r--contrib/python/PyJWT/py2/LICENSE21
-rw-r--r--contrib/python/PyJWT/py2/README.rst81
-rw-r--r--contrib/python/PyJWT/py3/.dist-info/METADATA107
-rw-r--r--contrib/python/PyJWT/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/PyJWT/py3/AUTHORS.rst7
-rw-r--r--contrib/python/PyJWT/py3/LICENSE21
-rw-r--r--contrib/python/PyJWT/py3/README.rst62
-rw-r--r--contrib/python/PyJWT/py3/jwt/__init__.py70
-rw-r--r--contrib/python/PyJWT/py3/jwt/algorithms.py674
-rw-r--r--contrib/python/PyJWT/py3/jwt/api_jwk.py97
-rw-r--r--contrib/python/PyJWT/py3/jwt/api_jws.py259
-rw-r--r--contrib/python/PyJWT/py3/jwt/api_jwt.py221
-rw-r--r--contrib/python/PyJWT/py3/jwt/exceptions.py66
-rw-r--r--contrib/python/PyJWT/py3/jwt/help.py60
-rw-r--r--contrib/python/PyJWT/py3/jwt/jwks_client.py59
-rw-r--r--contrib/python/PyJWT/py3/jwt/py.typed0
-rw-r--r--contrib/python/PyJWT/py3/jwt/utils.py99
-rw-r--r--contrib/python/PyJWT/py3/ya.make39
-rw-r--r--contrib/python/PyJWT/ya.make18
-rw-r--r--contrib/python/PySocks/py2/.dist-info/METADATA321
-rw-r--r--contrib/python/PySocks/py2/.dist-info/top_level.txt2
-rw-r--r--contrib/python/PySocks/py2/LICENSE22
-rw-r--r--contrib/python/PySocks/py2/README.md300
-rw-r--r--contrib/python/PySocks/py2/socks.py847
-rw-r--r--contrib/python/PySocks/py2/sockshandler.py111
-rw-r--r--contrib/python/PySocks/py2/ya.make23
-rw-r--r--contrib/python/PySocks/py3/LICENSE22
-rw-r--r--contrib/python/PySocks/py3/README.md300
-rw-r--r--contrib/python/PySocks/ya.make18
-rw-r--r--contrib/python/blinker/py2/LICENSE.rst20
-rw-r--r--contrib/python/blinker/py2/README.rst40
-rw-r--r--contrib/python/blinker/py3/.dist-info/METADATA62
-rw-r--r--contrib/python/blinker/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/blinker/py3/LICENSE.rst20
-rw-r--r--contrib/python/blinker/py3/README.rst40
-rw-r--r--contrib/python/blinker/py3/blinker/__init__.py19
-rw-r--r--contrib/python/blinker/py3/blinker/_saferef.py230
-rw-r--r--contrib/python/blinker/py3/blinker/_utilities.py105
-rw-r--r--contrib/python/blinker/py3/blinker/base.py558
-rw-r--r--contrib/python/blinker/py3/blinker/py.typed0
-rw-r--r--contrib/python/blinker/py3/ya.make26
-rw-r--r--contrib/python/blinker/ya.make18
-rw-r--r--contrib/python/cachetools/py2/.dist-info/METADATA124
-rw-r--r--contrib/python/cachetools/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/cachetools/py2/LICENSE20
-rw-r--r--contrib/python/cachetools/py2/README.rst95
-rw-r--r--contrib/python/cachetools/py2/cachetools/__init__.py112
-rw-r--r--contrib/python/cachetools/py2/cachetools/abc.py52
-rw-r--r--contrib/python/cachetools/py2/cachetools/cache.py91
-rw-r--r--contrib/python/cachetools/py2/cachetools/func.py140
-rw-r--r--contrib/python/cachetools/py2/cachetools/keys.py54
-rw-r--r--contrib/python/cachetools/py2/cachetools/lfu.py35
-rw-r--r--contrib/python/cachetools/py2/cachetools/lru.py48
-rw-r--r--contrib/python/cachetools/py2/cachetools/rr.py36
-rw-r--r--contrib/python/cachetools/py2/cachetools/ttl.py220
-rw-r--r--contrib/python/cachetools/py2/ya.make30
-rw-r--r--contrib/python/cachetools/py3/.dist-info/METADATA148
-rw-r--r--contrib/python/cachetools/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/cachetools/py3/LICENSE20
-rw-r--r--contrib/python/cachetools/py3/README.rst123
-rw-r--r--contrib/python/cachetools/py3/cachetools/__init__.py844
-rw-r--r--contrib/python/cachetools/py3/cachetools/func.py117
-rw-r--r--contrib/python/cachetools/py3/cachetools/keys.py57
-rw-r--r--contrib/python/cachetools/py3/ya.make24
-rw-r--r--contrib/python/cachetools/ya.make18
-rw-r--r--contrib/python/google-auth/py2/.dist-info/METADATA105
-rw-r--r--contrib/python/google-auth/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/google-auth/py2/LICENSE201
-rw-r--r--contrib/python/google-auth/py2/README.rst63
-rw-r--r--contrib/python/google-auth/py2/google/auth/__init__.py29
-rw-r--r--contrib/python/google-auth/py2/google/auth/_cloud_sdk.py159
-rw-r--r--contrib/python/google-auth/py2/google/auth/_default.py488
-rw-r--r--contrib/python/google-auth/py2/google/auth/_helpers.py232
-rw-r--r--contrib/python/google-auth/py2/google/auth/_oauth2client.py169
-rw-r--r--contrib/python/google-auth/py2/google/auth/_service_account_info.py74
-rw-r--r--contrib/python/google-auth/py2/google/auth/app_engine.py179
-rw-r--r--contrib/python/google-auth/py2/google/auth/aws.py718
-rw-r--r--contrib/python/google-auth/py2/google/auth/compute_engine/__init__.py21
-rw-r--r--contrib/python/google-auth/py2/google/auth/compute_engine/_metadata.py267
-rw-r--r--contrib/python/google-auth/py2/google/auth/compute_engine/credentials.py413
-rw-r--r--contrib/python/google-auth/py2/google/auth/credentials.py362
-rw-r--r--contrib/python/google-auth/py2/google/auth/crypt/__init__.py100
-rw-r--r--contrib/python/google-auth/py2/google/auth/crypt/_cryptography_rsa.py136
-rw-r--r--contrib/python/google-auth/py2/google/auth/crypt/_helpers.py0
-rw-r--r--contrib/python/google-auth/py2/google/auth/crypt/_python_rsa.py173
-rw-r--r--contrib/python/google-auth/py2/google/auth/crypt/base.py131
-rw-r--r--contrib/python/google-auth/py2/google/auth/crypt/es256.py148
-rw-r--r--contrib/python/google-auth/py2/google/auth/crypt/rsa.py30
-rw-r--r--contrib/python/google-auth/py2/google/auth/downscoped.py499
-rw-r--r--contrib/python/google-auth/py2/google/auth/environment_vars.py78
-rw-r--r--contrib/python/google-auth/py2/google/auth/exceptions.py59
-rw-r--r--contrib/python/google-auth/py2/google/auth/external_account.py368
-rw-r--r--contrib/python/google-auth/py2/google/auth/iam.py100
-rw-r--r--contrib/python/google-auth/py2/google/auth/identity_pool.py279
-rw-r--r--contrib/python/google-auth/py2/google/auth/impersonated_credentials.py412
-rw-r--r--contrib/python/google-auth/py2/google/auth/jwt.py849
-rw-r--r--contrib/python/google-auth/py2/google/auth/transport/__init__.py97
-rw-r--r--contrib/python/google-auth/py2/google/auth/transport/_http_client.py115
-rw-r--r--contrib/python/google-auth/py2/google/auth/transport/_mtls_helper.py254
-rw-r--r--contrib/python/google-auth/py2/google/auth/transport/grpc.py349
-rw-r--r--contrib/python/google-auth/py2/google/auth/transport/mtls.py105
-rw-r--r--contrib/python/google-auth/py2/google/auth/transport/requests.py542
-rw-r--r--contrib/python/google-auth/py2/google/auth/transport/urllib3.py439
-rw-r--r--contrib/python/google-auth/py2/google/auth/version.py15
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/__init__.py15
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/_client.py327
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/challenges.py157
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/credentials.py479
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/id_token.py264
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/reauth.py341
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/service_account.py685
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/sts.py155
-rw-r--r--contrib/python/google-auth/py2/google/oauth2/utils.py171
-rw-r--r--contrib/python/google-auth/py2/tests/__init__.py0
-rw-r--r--contrib/python/google-auth/py2/tests/compute_engine/__init__.py0
-rw-r--r--contrib/python/google-auth/py2/tests/compute_engine/test__metadata.py373
-rw-r--r--contrib/python/google-auth/py2/tests/compute_engine/test_credentials.py798
-rw-r--r--contrib/python/google-auth/py2/tests/conftest.py45
-rw-r--r--contrib/python/google-auth/py2/tests/crypt/__init__.py0
-rw-r--r--contrib/python/google-auth/py2/tests/crypt/test__cryptography_rsa.py161
-rw-r--r--contrib/python/google-auth/py2/tests/crypt/test__python_rsa.py194
-rw-r--r--contrib/python/google-auth/py2/tests/crypt/test_crypt.py59
-rw-r--r--contrib/python/google-auth/py2/tests/crypt/test_es256.py144
-rw-r--r--contrib/python/google-auth/py2/tests/data/authorized_user.json6
-rw-r--r--contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk.json6
-rw-r--r--contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json7
-rw-r--r--contrib/python/google-auth/py2/tests/data/client_secrets.json14
-rw-r--r--contrib/python/google-auth/py2/tests/data/cloud_sdk_config.json19
-rw-r--r--contrib/python/google-auth/py2/tests/data/context_aware_metadata.json6
-rw-r--r--contrib/python/google-auth/py2/tests/data/es256_privatekey.pem5
-rw-r--r--contrib/python/google-auth/py2/tests/data/es256_public_cert.pem8
-rw-r--r--contrib/python/google-auth/py2/tests/data/es256_publickey.pem4
-rw-r--r--contrib/python/google-auth/py2/tests/data/es256_service_account.json10
-rw-r--r--contrib/python/google-auth/py2/tests/data/external_subject_token.json3
-rw-r--r--contrib/python/google-auth/py2/tests/data/external_subject_token.txt1
-rw-r--r--contrib/python/google-auth/py2/tests/data/old_oauth_credentials_py3.picklebin0 -> 283 bytes
-rw-r--r--contrib/python/google-auth/py2/tests/data/other_cert.pem33
-rw-r--r--contrib/python/google-auth/py2/tests/data/pem_from_pkcs12.pem32
-rw-r--r--contrib/python/google-auth/py2/tests/data/privatekey.p12bin0 -> 2452 bytes
-rw-r--r--contrib/python/google-auth/py2/tests/data/privatekey.pem27
-rw-r--r--contrib/python/google-auth/py2/tests/data/privatekey.pub8
-rw-r--r--contrib/python/google-auth/py2/tests/data/public_cert.pem19
-rw-r--r--contrib/python/google-auth/py2/tests/data/service_account.json10
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/__init__.py0
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test__client.py330
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test_challenges.py132
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test_credentials.py876
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test_id_token.py228
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test_reauth.py308
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test_service_account.py433
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test_sts.py395
-rw-r--r--contrib/python/google-auth/py2/tests/oauth2/test_utils.py264
-rw-r--r--contrib/python/google-auth/py2/tests/test__cloud_sdk.py188
-rw-r--r--contrib/python/google-auth/py2/tests/test__default.py782
-rw-r--r--contrib/python/google-auth/py2/tests/test__helpers.py170
-rw-r--r--contrib/python/google-auth/py2/tests/test__oauth2client.py171
-rw-r--r--contrib/python/google-auth/py2/tests/test__service_account_info.py63
-rw-r--r--contrib/python/google-auth/py2/tests/test_app_engine.py217
-rw-r--r--contrib/python/google-auth/py2/tests/test_aws.py1497
-rw-r--r--contrib/python/google-auth/py2/tests/test_credentials.py177
-rw-r--r--contrib/python/google-auth/py2/tests/test_downscoped.py694
-rw-r--r--contrib/python/google-auth/py2/tests/test_external_account.py1203
-rw-r--r--contrib/python/google-auth/py2/tests/test_iam.py102
-rw-r--r--contrib/python/google-auth/py2/tests/test_identity_pool.py900
-rw-r--r--contrib/python/google-auth/py2/tests/test_impersonated_credentials.py541
-rw-r--r--contrib/python/google-auth/py2/tests/test_jwt.py605
-rw-r--r--contrib/python/google-auth/py2/tests/transport/__init__.py0
-rw-r--r--contrib/python/google-auth/py2/tests/transport/compliance.py108
-rw-r--r--contrib/python/google-auth/py2/tests/transport/test__http_client.py31
-rw-r--r--contrib/python/google-auth/py2/tests/transport/test__mtls_helper.py443
-rw-r--r--contrib/python/google-auth/py2/tests/transport/test_grpc.py504
-rw-r--r--contrib/python/google-auth/py2/tests/transport/test_mtls.py83
-rw-r--r--contrib/python/google-auth/py2/tests/transport/test_requests.py506
-rw-r--r--contrib/python/google-auth/py2/tests/transport/test_urllib3.py307
-rw-r--r--contrib/python/google-auth/py2/tests/ya.make73
-rw-r--r--contrib/python/google-auth/py2/ya.make85
-rw-r--r--contrib/python/google-auth/py3/.dist-info/METADATA125
-rw-r--r--contrib/python/google-auth/py3/.dist-info/top_level.txt3
-rw-r--r--contrib/python/google-auth/py3/LICENSE201
-rw-r--r--contrib/python/google-auth/py3/README.rst82
-rw-r--r--contrib/python/google-auth/py3/google/auth/__init__.py33
-rw-r--r--contrib/python/google-auth/py3/google/auth/_cloud_sdk.py153
-rw-r--r--contrib/python/google-auth/py3/google/auth/_credentials_async.py171
-rw-r--r--contrib/python/google-auth/py3/google/auth/_default.py691
-rw-r--r--contrib/python/google-auth/py3/google/auth/_default_async.py282
-rw-r--r--contrib/python/google-auth/py3/google/auth/_exponential_backoff.py109
-rw-r--r--contrib/python/google-auth/py3/google/auth/_helpers.py245
-rw-r--r--contrib/python/google-auth/py3/google/auth/_jwt_async.py164
-rw-r--r--contrib/python/google-auth/py3/google/auth/_oauth2client.py167
-rw-r--r--contrib/python/google-auth/py3/google/auth/_service_account_info.py80
-rw-r--r--contrib/python/google-auth/py3/google/auth/api_key.py76
-rw-r--r--contrib/python/google-auth/py3/google/auth/app_engine.py180
-rw-r--r--contrib/python/google-auth/py3/google/auth/aws.py777
-rw-r--r--contrib/python/google-auth/py3/google/auth/compute_engine/__init__.py21
-rw-r--r--contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py322
-rw-r--r--contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py445
-rw-r--r--contrib/python/google-auth/py3/google/auth/credentials.py410
-rw-r--r--contrib/python/google-auth/py3/google/auth/crypt/__init__.py98
-rw-r--r--contrib/python/google-auth/py3/google/auth/crypt/_cryptography_rsa.py136
-rw-r--r--contrib/python/google-auth/py3/google/auth/crypt/_helpers.py0
-rw-r--r--contrib/python/google-auth/py3/google/auth/crypt/_python_rsa.py175
-rw-r--r--contrib/python/google-auth/py3/google/auth/crypt/base.py127
-rw-r--r--contrib/python/google-auth/py3/google/auth/crypt/es256.py160
-rw-r--r--contrib/python/google-auth/py3/google/auth/crypt/rsa.py30
-rw-r--r--contrib/python/google-auth/py3/google/auth/downscoped.py504
-rw-r--r--contrib/python/google-auth/py3/google/auth/environment_vars.py84
-rw-r--r--contrib/python/google-auth/py3/google/auth/exceptions.py100
-rw-r--r--contrib/python/google-auth/py3/google/auth/external_account.py523
-rw-r--r--contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py350
-rw-r--r--contrib/python/google-auth/py3/google/auth/iam.py99
-rw-r--r--contrib/python/google-auth/py3/google/auth/identity_pool.py261
-rw-r--r--contrib/python/google-auth/py3/google/auth/impersonated_credentials.py462
-rw-r--r--contrib/python/google-auth/py3/google/auth/jwt.py878
-rw-r--r--contrib/python/google-auth/py3/google/auth/metrics.py154
-rw-r--r--contrib/python/google-auth/py3/google/auth/pluggable.py429
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/__init__.py103
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/_aiohttp_requests.py390
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/_custom_tls_signer.py234
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/_http_client.py113
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/_mtls_helper.py252
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/grpc.py343
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/mtls.py103
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/requests.py604
-rw-r--r--contrib/python/google-auth/py3/google/auth/transport/urllib3.py437
-rw-r--r--contrib/python/google-auth/py3/google/auth/version.py15
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/__init__.py15
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/_client.py507
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/_client_async.py292
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/_credentials_async.py112
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/_id_token_async.py285
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/_reauth_async.py328
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/_service_account_async.py132
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/challenges.py203
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/credentials.py545
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/gdch_credentials.py251
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/id_token.py339
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/reauth.py368
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/service_account.py819
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/sts.py176
-rw-r--r--contrib/python/google-auth/py3/google/oauth2/utils.py168
-rw-r--r--contrib/python/google-auth/py3/tests/__init__.py0
-rw-r--r--contrib/python/google-auth/py3/tests/compute_engine/__init__.py0
-rw-r--r--contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name1
-rw-r--r--contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name_non_google1
-rw-r--r--contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py450
-rw-r--r--contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py875
-rw-r--r--contrib/python/google-auth/py3/tests/conftest.py45
-rw-r--r--contrib/python/google-auth/py3/tests/crypt/__init__.py0
-rw-r--r--contrib/python/google-auth/py3/tests/crypt/test__cryptography_rsa.py162
-rw-r--r--contrib/python/google-auth/py3/tests/crypt/test__python_rsa.py194
-rw-r--r--contrib/python/google-auth/py3/tests/crypt/test_crypt.py59
-rw-r--r--contrib/python/google-auth/py3/tests/crypt/test_es256.py144
-rw-r--r--contrib/python/google-auth/py3/tests/data/authorized_user.json6
-rw-r--r--contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk.json6
-rw-r--r--contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json7
-rw-r--r--contrib/python/google-auth/py3/tests/data/authorized_user_with_rapt_token.json8
-rw-r--r--contrib/python/google-auth/py3/tests/data/client_secrets.json14
-rw-r--r--contrib/python/google-auth/py3/tests/data/context_aware_metadata.json6
-rw-r--r--contrib/python/google-auth/py3/tests/data/enterprise_cert_invalid.json3
-rw-r--r--contrib/python/google-auth/py3/tests/data/enterprise_cert_valid.json6
-rw-r--r--contrib/python/google-auth/py3/tests/data/es256_privatekey.pem5
-rw-r--r--contrib/python/google-auth/py3/tests/data/es256_public_cert.pem8
-rw-r--r--contrib/python/google-auth/py3/tests/data/es256_publickey.pem4
-rw-r--r--contrib/python/google-auth/py3/tests/data/es256_service_account.json10
-rw-r--r--contrib/python/google-auth/py3/tests/data/external_account_authorized_user.json9
-rw-r--r--contrib/python/google-auth/py3/tests/data/external_subject_token.json3
-rw-r--r--contrib/python/google-auth/py3/tests/data/external_subject_token.txt1
-rw-r--r--contrib/python/google-auth/py3/tests/data/gdch_service_account.json11
-rw-r--r--contrib/python/google-auth/py3/tests/data/impersonated_service_account_authorized_user_source.json13
-rw-r--r--contrib/python/google-auth/py3/tests/data/impersonated_service_account_service_account_source.json17
-rw-r--r--contrib/python/google-auth/py3/tests/data/impersonated_service_account_with_quota_project.json14
-rw-r--r--contrib/python/google-auth/py3/tests/data/old_oauth_credentials_py3.picklebin0 -> 283 bytes
-rw-r--r--contrib/python/google-auth/py3/tests/data/other_cert.pem33
-rw-r--r--contrib/python/google-auth/py3/tests/data/pem_from_pkcs12.pem32
-rw-r--r--contrib/python/google-auth/py3/tests/data/privatekey.p12bin0 -> 2452 bytes
-rw-r--r--contrib/python/google-auth/py3/tests/data/privatekey.pem27
-rw-r--r--contrib/python/google-auth/py3/tests/data/privatekey.pub8
-rw-r--r--contrib/python/google-auth/py3/tests/data/public_cert.pem19
-rw-r--r--contrib/python/google-auth/py3/tests/data/service_account.json10
-rw-r--r--contrib/python/google-auth/py3/tests/data/service_account_non_gdu.json15
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/__init__.py0
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test__client.py622
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_challenges.py198
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_credentials.py997
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_gdch_credentials.py175
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_id_token.py312
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_reauth.py388
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_service_account.py789
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_sts.py480
-rw-r--r--contrib/python/google-auth/py3/tests/oauth2/test_utils.py264
-rw-r--r--contrib/python/google-auth/py3/tests/test__cloud_sdk.py182
-rw-r--r--contrib/python/google-auth/py3/tests/test__default.py1352
-rw-r--r--contrib/python/google-auth/py3/tests/test__exponential_backoff.py41
-rw-r--r--contrib/python/google-auth/py3/tests/test__helpers.py170
-rw-r--r--contrib/python/google-auth/py3/tests/test__oauth2client.py178
-rw-r--r--contrib/python/google-auth/py3/tests/test__service_account_info.py83
-rw-r--r--contrib/python/google-auth/py3/tests/test_api_key.py45
-rw-r--r--contrib/python/google-auth/py3/tests/test_app_engine.py217
-rw-r--r--contrib/python/google-auth/py3/tests/test_aws.py2125
-rw-r--r--contrib/python/google-auth/py3/tests/test_credentials.py224
-rw-r--r--contrib/python/google-auth/py3/tests/test_downscoped.py696
-rw-r--r--contrib/python/google-auth/py3/tests/test_exceptions.py55
-rw-r--r--contrib/python/google-auth/py3/tests/test_external_account.py1900
-rw-r--r--contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py512
-rw-r--r--contrib/python/google-auth/py3/tests/test_iam.py102
-rw-r--r--contrib/python/google-auth/py3/tests/test_identity_pool.py1302
-rw-r--r--contrib/python/google-auth/py3/tests/test_impersonated_credentials.py660
-rw-r--r--contrib/python/google-auth/py3/tests/test_jwt.py671
-rw-r--r--contrib/python/google-auth/py3/tests/test_metrics.py96
-rw-r--r--contrib/python/google-auth/py3/tests/test_packaging.py30
-rw-r--r--contrib/python/google-auth/py3/tests/test_pluggable.py1250
-rw-r--r--contrib/python/google-auth/py3/tests/transport/__init__.py0
-rw-r--r--contrib/python/google-auth/py3/tests/transport/compliance.py108
-rw-r--r--contrib/python/google-auth/py3/tests/transport/test__custom_tls_signer.py234
-rw-r--r--contrib/python/google-auth/py3/tests/transport/test__http_client.py31
-rw-r--r--contrib/python/google-auth/py3/tests/transport/test__mtls_helper.py441
-rw-r--r--contrib/python/google-auth/py3/tests/transport/test_grpc.py503
-rw-r--r--contrib/python/google-auth/py3/tests/transport/test_mtls.py83
-rw-r--r--contrib/python/google-auth/py3/tests/transport/test_requests.py575
-rw-r--r--contrib/python/google-auth/py3/tests/transport/test_urllib3.py322
-rw-r--r--contrib/python/google-auth/py3/tests/ya.make77
-rw-r--r--contrib/python/google-auth/py3/ya.make100
-rw-r--r--contrib/python/google-auth/ya.make18
-rw-r--r--contrib/python/httplib2/py2/.dist-info/METADATA76
-rw-r--r--contrib/python/httplib2/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/httplib2/py2/LICENSE23
-rw-r--r--contrib/python/httplib2/py2/README.md113
-rw-r--r--contrib/python/httplib2/py2/httplib2/__init__.py1989
-rw-r--r--contrib/python/httplib2/py2/httplib2/auth.py63
-rw-r--r--contrib/python/httplib2/py2/httplib2/certs.py42
-rw-r--r--contrib/python/httplib2/py2/httplib2/error.py48
-rw-r--r--contrib/python/httplib2/py2/httplib2/iri2uri.py123
-rw-r--r--contrib/python/httplib2/py2/httplib2/socks.py518
-rw-r--r--contrib/python/httplib2/py2/ya.make33
-rw-r--r--contrib/python/httplib2/py3/.dist-info/METADATA75
-rw-r--r--contrib/python/httplib2/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/httplib2/py3/LICENSE23
-rw-r--r--contrib/python/httplib2/py3/README.md115
-rw-r--r--contrib/python/httplib2/py3/httplib2/__init__.py1799
-rw-r--r--contrib/python/httplib2/py3/httplib2/auth.py69
-rw-r--r--contrib/python/httplib2/py3/httplib2/certs.py42
-rw-r--r--contrib/python/httplib2/py3/httplib2/error.py48
-rw-r--r--contrib/python/httplib2/py3/httplib2/iri2uri.py124
-rw-r--r--contrib/python/httplib2/py3/httplib2/socks.py518
-rw-r--r--contrib/python/httplib2/py3/ya.make32
-rw-r--r--contrib/python/httplib2/ya.make18
-rw-r--r--contrib/python/kubernetes/.dist-info/METADATA40
-rw-r--r--contrib/python/kubernetes/.dist-info/top_level.txt1
-rw-r--r--contrib/python/kubernetes/LICENSE202
-rw-r--r--contrib/python/kubernetes/README.md226
-rw-r--r--contrib/python/kubernetes/kubernetes/__init__.py25
-rw-r--r--contrib/python/kubernetes/kubernetes/client/__init__.py645
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/__init__.py63
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/admissionregistration_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1_api.py2196
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1alpha1_api.py2610
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1beta1_api.py2610
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/apiextensions_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/apiextensions_v1_api.py1583
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/apiregistration_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/apiregistration_v1_api.py1583
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/apis_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/apps_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/apps_v1_api.py9479
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/authentication_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/authentication_v1_api.py410
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/authentication_v1alpha1_api.py276
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/authentication_v1beta1_api.py276
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/authorization_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/authorization_v1_api.py687
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/autoscaling_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/autoscaling_v1_api.py1833
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/autoscaling_v2_api.py1833
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/batch_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/batch_v1_api.py3524
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/certificates_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/certificates_v1_api.py1997
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/certificates_v1alpha1_api.py1169
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/coordination_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/coordination_v1_api.py1392
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/core_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/core_v1_api.py29863
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/custom_objects_api.py4429
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/discovery_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/discovery_v1_api.py1392
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/events_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/events_v1_api.py1392
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta2_api.py3024
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta3_api.py3024
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_v1alpha1_api.py1583
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/logs_api.py244
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/networking_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/networking_v1_api.py4110
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/networking_v1alpha1_api.py2196
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/node_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/node_v1_api.py1169
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/openid_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/policy_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/policy_v1_api.py1833
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_v1_api.py4696
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/resource_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/resource_v1alpha2_api.py5801
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/scheduling_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/scheduling_v1_api.py1169
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/storage_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/storage_v1_api.py5914
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/version_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api/well_known_api.py142
-rw-r--r--contrib/python/kubernetes/kubernetes/client/api_client.py647
-rw-r--r--contrib/python/kubernetes/kubernetes/client/apis/__init__.py13
-rw-r--r--contrib/python/kubernetes/kubernetes/client/configuration.py405
-rw-r--r--contrib/python/kubernetes/kubernetes/client/exceptions.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/__init__.py573
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_service_reference.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_webhook_client_config.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_service_reference.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_webhook_client_config.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/apiregistration_v1_service_reference.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/authentication_v1_token_request.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/core_v1_endpoint_port.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/core_v1_event.py562
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/core_v1_event_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/core_v1_event_series.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/discovery_v1_endpoint_port.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/events_v1_event.py561
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/events_v1_event_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/events_v1_event_series.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/storage_v1_token_request.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_affinity.py172
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_aggregation_rule.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_group.py262
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_group_list.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_resource.py379
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_resource_list.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_service.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_service_condition.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_service_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_service_spec.py293
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_service_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_api_versions.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_attached_volume.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_azure_disk_volume_source.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_persistent_volume_source.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_volume_source.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_binding.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_bound_object_reference.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_capabilities.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_persistent_volume_source.py261
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_volume_source.py261
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_condition.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_spec.py323
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_status.py153
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cinder_persistent_volume_source.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cinder_volume_source.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_claim_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_client_ip_config.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role.py230
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding.py231
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_component_condition.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_component_status.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_component_status_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_condition.py267
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_config_map.py260
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_config_map_env_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_config_map_key_selector.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_config_map_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_config_map_node_config_source.py237
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_config_map_projection.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_config_map_volume_source.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container.py755
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_image.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_port.py235
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_resize_policy.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_state.py172
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_state_running.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_state_terminated.py291
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_state_waiting.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_container_status.py401
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision.py233
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cron_job.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_spec.py318
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_status.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_cross_version_object_reference.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_spec.py318
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_node.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_driver.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_spec.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_persistent_volume_source.py366
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity.py287
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_csi_volume_source.py233
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_column_definition.py265
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_conversion.py149
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_condition.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_names.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_spec.py262
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_status.py176
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_version.py317
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresource_scale.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresources.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_validation.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_daemon_endpoint.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_condition.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_spec.py230
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_status.py378
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_update_strategy.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_delete_options.py288
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_deployment.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_deployment_condition.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_deployment_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_deployment_spec.py314
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_deployment_status.py318
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_deployment_strategy.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_projection.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_file.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_empty_dir_volume_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoint.py313
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_address.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_conditions.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_hints.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice.py262
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_subset.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoints.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_endpoints_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_env_from_source.py174
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_env_var.py177
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_env_var_source.py198
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_container.py783
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_volume_source.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_event_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_eviction.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_exec_action.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_external_documentation.py146
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_fc_volume_source.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_flex_persistent_volume_source.py233
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_flex_volume_source.py233
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_flocker_volume_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_for_zone.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_gce_persistent_disk_volume_source.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_git_repo_volume_source.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_persistent_volume_source.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_volume_source.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_group_version_for_discovery.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_grpc_action.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_status.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_host_alias.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_host_ip.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_host_path_volume_source.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_http_get_action.py235
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_http_header.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_path.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_rule_value.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_backend.py146
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_parameters_reference.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_spec.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_ingress.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_port_status.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_rule.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_service_backend.py149
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_spec.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_status.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ingress_tls.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_ip_block.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_persistent_volume_source.py403
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_volume_source.py403
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_job.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_job_condition.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_job_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_job_spec.py481
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_job_status.py400
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_job_template_spec.py146
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_json_schema_props.py1264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_key_to_path.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_label_selector.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_label_selector_requirement.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_lease.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_lease_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_lease_spec.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle.py146
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle_handler.py172
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_limit_range.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_item.py263
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_spec.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_list_meta.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_ingress.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_local_object_reference.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_local_subject_access_review.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_local_volume_source.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_managed_fields_entry.py290
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_match_condition.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook.py428
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_namespace.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_namespace_condition.py232
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_namespace_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_namespace_spec.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_namespace_status.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_egress_rule.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_ingress_rule.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_peer.py172
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_port.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_spec.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_nfs_volume_source.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_address.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_affinity.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_condition.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_config_source.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_config_status.py200
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_daemon_endpoints.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_selector.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_requirement.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_term.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_spec.py288
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_status.py396
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_node_system_info.py384
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_attributes.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_rule.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_object_field_selector.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_object_meta.py514
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_object_reference.py290
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_overhead.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_owner_reference.py266
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_condition.py260
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_spec.py310
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_status.py262
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_template.py147
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_volume_source.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_spec.py886
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_status.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_photon_persistent_disk_volume_source.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity_term.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_anti_affinity.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_condition.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_spec.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_status.py294
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config_option.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_exit_codes_requirement.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_pod_conditions_pattern.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_rule.py177
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_ip.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_os.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_readiness_gate.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim.py149
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim_status.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_scheduling_gate.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_security_context.py368
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_spec.py1179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_status.py542
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_template.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_spec.py146
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_policy_rule.py235
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_port_status.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_portworx_volume_source.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_preconditions.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_preferred_scheduling_term.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_priority_class.py289
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_priority_class_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_probe.py366
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_projected_volume_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_quobyte_volume_source.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_rbd_persistent_volume_source.py318
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_rbd_volume_source.py318
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replica_set.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_condition.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_spec.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_status.py263
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_condition.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_spec.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_status.py263
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_attributes.py290
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_claim.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_field_selector.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_spec.py176
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_status.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_requirements.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_resource_rule.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_role.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_role_binding.py231
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_role_binding_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_role_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_role_ref.py181
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_daemon_set.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_deployment.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_rule_with_operations.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class.py257
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scale.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_persistent_volume_source.py375
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_volume_source.py375
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scale_spec.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scale_status.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scheduling.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scope_selector.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_scoped_resource_selector_requirement.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_se_linux_options.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_seccomp_profile.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_secret.py288
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_secret_env_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_secret_key_selector.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_secret_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_secret_projection.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_secret_reference.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_secret_volume_source.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_security_context.py394
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review_spec.py146
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review_status.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review_spec.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_server_address_by_client_cidr.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_account.py260
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_account_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_account_token_projection.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_backend_port.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_port.py263
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_spec.py624
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_service_status.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_session_affinity_config.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_condition.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_ordinals.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_persistent_volume_claim_retention_policy.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_spec.py395
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_status.py375
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_update_strategy.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_status.py314
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_status_cause.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_status_details.py262
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_storage_class.py373
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_storage_class_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_persistent_volume_source.py232
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_volume_source.py232
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_subject.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_spec.py258
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_status.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_subject_rules_review_status.py209
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_sysctl.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_taint.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_tcp_socket_action.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_token_request_spec.py177
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_token_request_status.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_token_review.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_token_review_spec.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_token_review_status.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_toleration.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_label_requirement.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_term.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_topology_spread_constraint.py319
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_typed_local_object_reference.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_typed_object_reference.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_uncounted_terminated_pods.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_user_info.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook.py400
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_validation_rule.py235
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume.py877
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_source.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_spec.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_status.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_device.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_error.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_mount.py264
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_affinity.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_resources.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_volume_projection.py198
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_vsphere_virtual_disk_volume_source.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_watch_event.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_webhook_conversion.py149
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_weighted_pod_affinity_term.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1_windows_security_context_options.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_audit_annotation.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_spec.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_spec.py151
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_expression_warning.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_spec.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_condition.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_resources.py230
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_named_rule_with_operations.py262
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_kind.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_ref.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_parent_reference.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review_status.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_server_storage_version.py206
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version.py232
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_condition.py265
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_status.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_type_checking.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_list.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_spec.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_list.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_spec.py286
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_status.py176
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validation.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha1_variable.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_allocation_result.py176
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_spec.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim.py229
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_consumer_reference.py209
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_parameters_reference.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_scheduling_status.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_spec.py177
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_status.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_spec.py147
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class.py257
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_parameters_reference.py208
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_handle.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_audit_annotation.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_expression_warning.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_condition.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_resources.py230
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_named_rule_with_operations.py262
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_kind.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_ref.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review_status.py120
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_type_checking.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_list.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_spec.py202
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_list.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_spec.py286
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_status.py176
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_validation.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta1_variable.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_exempt_priority_level_configuration.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_distinguisher_method.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_condition.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_spec.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_group_subject.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_limit_response.py149
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_limited_priority_level_configuration.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_non_resource_policy_rule.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_policy_rules_with_subjects.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_condition.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_reference.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_spec.py175
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_queuing_configuration.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_resource_policy_rule.py237
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_service_account_subject.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_subject.py201
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta2_user_subject.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_exempt_priority_level_configuration.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_distinguisher_method.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_condition.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_spec.py203
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_group_subject.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_limit_response.py149
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_limited_priority_level_configuration.py204
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_non_resource_policy_rule.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_policy_rules_with_subjects.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_condition.py234
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_reference.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_spec.py175
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_status.py122
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_queuing_configuration.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_resource_policy_rule.py237
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_service_account_subject.py152
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_subject.py201
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v1beta3_user_subject.py123
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_source.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_status.py179
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_cross_version_object_reference.py180
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_source.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_status.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler.py228
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_behavior.py146
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_condition.py236
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_list.py205
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_spec.py232
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_status.py263
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_policy.py181
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_rules.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_metric_identifier.py149
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_metric_spec.py253
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_metric_status.py253
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_metric_target.py207
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_metric_value_status.py178
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_source.py175
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_status.py175
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_source.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_status.py148
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_source.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_status.py150
-rw-r--r--contrib/python/kubernetes/kubernetes/client/models/version_info.py337
-rw-r--r--contrib/python/kubernetes/kubernetes/client/rest.py305
-rw-r--r--contrib/python/kubernetes/kubernetes/config/__init__.py49
-rw-r--r--contrib/python/kubernetes/kubernetes/config/config_exception.py17
-rw-r--r--contrib/python/kubernetes/kubernetes/config/dateutil.py84
-rw-r--r--contrib/python/kubernetes/kubernetes/config/exec_provider.py100
-rw-r--r--contrib/python/kubernetes/kubernetes/config/incluster_config.py121
-rw-r--r--contrib/python/kubernetes/kubernetes/config/kube_config.py893
-rw-r--r--contrib/python/kubernetes/kubernetes/dynamic/__init__.py15
-rw-r--r--contrib/python/kubernetes/kubernetes/dynamic/client.py320
-rw-r--r--contrib/python/kubernetes/kubernetes/dynamic/discovery.py433
-rw-r--r--contrib/python/kubernetes/kubernetes/dynamic/exceptions.py110
-rw-r--r--contrib/python/kubernetes/kubernetes/dynamic/resource.py403
-rw-r--r--contrib/python/kubernetes/kubernetes/leaderelection/__init__.py13
-rw-r--r--contrib/python/kubernetes/kubernetes/leaderelection/electionconfig.py59
-rw-r--r--contrib/python/kubernetes/kubernetes/leaderelection/leaderelection.py191
-rw-r--r--contrib/python/kubernetes/kubernetes/leaderelection/leaderelectionrecord.py22
-rw-r--r--contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/__init__.py13
-rw-r--r--contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/configmaplock.py129
-rw-r--r--contrib/python/kubernetes/kubernetes/stream/__init__.py15
-rw-r--r--contrib/python/kubernetes/kubernetes/stream/stream.py41
-rw-r--r--contrib/python/kubernetes/kubernetes/stream/ws_client.py562
-rw-r--r--contrib/python/kubernetes/kubernetes/utils/__init__.py19
-rw-r--r--contrib/python/kubernetes/kubernetes/utils/create_from_yaml.py287
-rw-r--r--contrib/python/kubernetes/kubernetes/utils/quantity.py75
-rw-r--r--contrib/python/kubernetes/kubernetes/watch/__init__.py15
-rw-r--r--contrib/python/kubernetes/kubernetes/watch/watch.py200
-rw-r--r--contrib/python/kubernetes/ya.make684
-rw-r--r--contrib/python/monotonic/py2/.dist-info/METADATA40
-rw-r--r--contrib/python/monotonic/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/monotonic/py2/LICENSE202
-rw-r--r--contrib/python/monotonic/py2/README.md49
-rw-r--r--contrib/python/monotonic/py2/monotonic.py170
-rw-r--r--contrib/python/monotonic/py2/ya.make22
-rw-r--r--contrib/python/monotonic/py3/LICENSE202
-rw-r--r--contrib/python/monotonic/py3/README.md49
-rw-r--r--contrib/python/monotonic/ya.make18
-rw-r--r--contrib/python/oauth2client/py2/.dist-info/METADATA34
-rw-r--r--contrib/python/oauth2client/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/oauth2client/py2/LICENSE210
-rw-r--r--contrib/python/oauth2client/py2/README.md33
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/__init__.py24
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/_helpers.py341
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/_openssl_crypt.py136
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/_pkce.py67
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/_pure_python_crypt.py184
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/_pycrypto_crypt.py124
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/client.py2170
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/clientsecrets.py173
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/__init__.py6
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/_appengine_ndb.py163
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/_metadata.py118
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/appengine.py910
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/devshell.py152
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/dictionary_storage.py65
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/__init__.py489
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/apps.py32
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/decorators.py145
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/models.py82
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/signals.py28
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/site.py26
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/storage.py81
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/django_util/views.py193
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/flask_util.py557
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/gce.py156
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/keyring_storage.py95
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/multiprocess_file_storage.py355
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/sqlalchemy.py173
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/contrib/xsrfutil.py101
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/crypt.py250
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/file.py95
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/service_account.py685
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/tools.py256
-rw-r--r--contrib/python/oauth2client/py2/oauth2client/transport.py285
-rw-r--r--contrib/python/oauth2client/py2/ya.make68
-rw-r--r--contrib/python/oauth2client/py3/.dist-info/METADATA34
-rw-r--r--contrib/python/oauth2client/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/oauth2client/py3/LICENSE210
-rw-r--r--contrib/python/oauth2client/py3/README.md33
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/__init__.py24
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/_helpers.py341
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/_openssl_crypt.py136
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/_pkce.py67
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/_pure_python_crypt.py184
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/_pycrypto_crypt.py124
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/client.py2170
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/clientsecrets.py173
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/__init__.py6
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/_appengine_ndb.py163
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/_metadata.py118
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/appengine.py910
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/devshell.py152
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/dictionary_storage.py65
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/__init__.py489
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/apps.py32
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/decorators.py145
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/models.py82
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/signals.py28
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/site.py26
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/storage.py81
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/django_util/views.py193
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/flask_util.py557
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/gce.py156
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/keyring_storage.py95
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/multiprocess_file_storage.py355
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/sqlalchemy.py173
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/contrib/xsrfutil.py101
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/crypt.py250
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/file.py95
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/service_account.py685
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/tools.py256
-rw-r--r--contrib/python/oauth2client/py3/oauth2client/transport.py285
-rw-r--r--contrib/python/oauth2client/py3/ya.make68
-rw-r--r--contrib/python/oauth2client/ya.make18
-rw-r--r--contrib/python/oauthlib/.dist-info/METADATA179
-rw-r--r--contrib/python/oauthlib/.dist-info/top_level.txt1
-rw-r--r--contrib/python/oauthlib/LICENSE27
-rw-r--r--contrib/python/oauthlib/README.rst137
-rw-r--r--contrib/python/oauthlib/oauthlib/__init__.py34
-rw-r--r--contrib/python/oauthlib/oauthlib/common.py432
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/__init__.py23
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/__init__.py365
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/__init__.py8
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/access_token.py215
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/authorization.py158
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/base.py244
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/pre_configured.py14
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/request_token.py209
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/resource.py163
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/signature_only.py82
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/errors.py76
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/parameters.py133
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/request_validator.py849
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/signature.py852
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth1/rfc5849/utils.py83
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/__init__.py36
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/__init__.py16
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/__init__.py14
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/backend_application.py74
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/base.py604
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/legacy_application.py84
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/mobile_application.py174
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/service_application.py189
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/web_application.py222
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/__init__.py17
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/authorization.py114
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/base.py113
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/introspect.py120
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/metadata.py238
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/pre_configured.py216
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/resource.py84
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/revocation.py126
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/token.py119
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/errors.py400
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/__init__.py11
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py548
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/base.py268
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/client_credentials.py123
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/implicit.py376
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py136
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py199
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/parameters.py471
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/request_validator.py680
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/tokens.py356
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc6749/utils.py83
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc8628/__init__.py10
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/__init__.py8
-rw-r--r--contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/device.py95
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/__init__.py7
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/__init__.py0
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/__init__.py0
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/__init__.py9
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/pre_configured.py97
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/userinfo.py106
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/exceptions.py149
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/__init__.py13
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/authorization_code.py43
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/base.py326
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/dispatchers.py101
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/hybrid.py63
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/implicit.py51
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/refresh_token.py34
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/request_validator.py320
-rw-r--r--contrib/python/oauthlib/oauthlib/openid/connect/core/tokens.py48
-rw-r--r--contrib/python/oauthlib/oauthlib/signals.py40
-rw-r--r--contrib/python/oauthlib/oauthlib/uri_validate.py190
-rw-r--r--contrib/python/oauthlib/tests/__init__.py3
-rw-r--r--contrib/python/oauthlib/tests/oauth1/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_access_token.py91
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_authorization.py54
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_base.py406
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_request_token.py90
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_resource.py102
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_signature_only.py50
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/test_client.py269
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/test_parameters.py90
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/test_request_validator.py68
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/test_signatures.py896
-rw-r--r--contrib/python/oauthlib/tests/oauth1/rfc5849/test_utils.py138
-rw-r--r--contrib/python/oauthlib/tests/oauth2/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/clients/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_backend_application.py86
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_base.py355
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_legacy_application.py140
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_mobile_application.py111
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_service_application.py185
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_web_application.py269
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_base_endpoint.py75
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_client_authentication.py162
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_credentials_preservation.py128
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_error_responses.py491
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_extra_credentials.py69
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_introspect_endpoint.py168
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_metadata.py148
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_resource_owner_association.py108
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py148
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_scope_handling.py193
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_utils.py11
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_authorization_code.py382
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_client_credentials.py76
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_implicit.py62
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_refresh_token.py211
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_resource_owner_password.py156
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/test_parameters.py304
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/test_request_validator.py51
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/test_server.py391
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/test_tokens.py170
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc6749/test_utils.py100
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc8628/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc8628/clients/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/oauth2/rfc8628/clients/test_device.py63
-rw-r--r--contrib/python/oauthlib/tests/openid/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/endpoints/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_claims_handling.py107
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_openid_connect_params_handling.py78
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_userinfo_endpoint.py67
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/grant_types/__init__.py0
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_authorization_code.py200
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_base.py104
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_dispatchers.py122
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_hybrid.py102
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_implicit.py170
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_refresh_token.py105
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/test_request_validator.py50
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/test_server.py184
-rw-r--r--contrib/python/oauthlib/tests/openid/connect/core/test_tokens.py157
-rw-r--r--contrib/python/oauthlib/tests/test_common.py243
-rw-r--r--contrib/python/oauthlib/tests/test_uri_validate.py84
-rw-r--r--contrib/python/oauthlib/tests/unittest/__init__.py32
-rw-r--r--contrib/python/oauthlib/tests/ya.make88
-rw-r--r--contrib/python/oauthlib/ya.make93
-rw-r--r--contrib/python/pyOpenSSL/py3/.dist-info/METADATA198
-rw-r--r--contrib/python/pyOpenSSL/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pyOpenSSL/py3/OpenSSL/SSL.py2505
-rw-r--r--contrib/python/pyOpenSSL/py3/OpenSSL/__init__.py32
-rw-r--r--contrib/python/pyOpenSSL/py3/OpenSSL/_util.py155
-rw-r--r--contrib/python/pyOpenSSL/py3/OpenSSL/crypto.py3288
-rw-r--r--contrib/python/pyOpenSSL/py3/OpenSSL/debug.py42
-rw-r--r--contrib/python/pyOpenSSL/py3/OpenSSL/rand.py40
-rw-r--r--contrib/python/pyOpenSSL/py3/OpenSSL/version.py28
-rw-r--r--contrib/python/pyOpenSSL/py3/ya.make37
-rw-r--r--contrib/python/pyasn1-modules/py2/.dist-info/METADATA74
-rw-r--r--contrib/python/pyasn1-modules/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pyasn1-modules/py2/LICENSE.txt24
-rw-r--r--contrib/python/pyasn1-modules/py2/README.md32
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/__init__.py2
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/pem.py65
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1155.py96
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1157.py126
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1901.py22
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1902.py129
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1905.py135
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2251.py563
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2314.py48
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2315.py294
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2437.py69
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2459.py1339
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2511.py258
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2560.py225
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2631.py37
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2634.py336
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2876.py56
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2985.py588
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2986.py75
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3058.py42
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3114.py77
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3125.py469
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3161.py142
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3274.py59
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3279.py260
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3280.py1543
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3281.py331
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3370.py146
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3412.py53
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3414.py28
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3447.py45
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3537.py34
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3560.py74
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3565.py57
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3657.py66
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3709.py207
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3739.py203
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3770.py75
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3779.py137
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3820.py65
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3852.py706
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4010.py58
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4043.py43
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4055.py258
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4073.py59
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4108.py350
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4210.py803
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4211.py396
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4334.py75
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4357.py477
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4387.py23
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4476.py93
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4490.py113
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4491.py44
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4683.py72
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4985.py49
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5035.py199
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5083.py52
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5084.py97
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5126.py577
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5208.py56
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5275.py404
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5280.py1658
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5480.py190
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5636.py113
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5639.py49
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5649.py33
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5652.py761
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5697.py70
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5751.py124
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5752.py49
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5753.py157
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5755.py398
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5913.py44
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5914.py119
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5915.py32
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5916.py35
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5917.py55
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5924.py19
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5934.py786
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5940.py59
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5958.py98
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5990.py237
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6010.py88
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6019.py45
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6031.py469
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6032.py68
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6120.py43
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6170.py17
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6187.py22
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6210.py42
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6211.py72
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6402.py628
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6482.py74
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6486.py68
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6487.py22
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6664.py147
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6955.py108
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6960.py223
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7030.py66
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7191.py261
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7229.py29
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7292.py357
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7296.py32
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7508.py90
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7585.py50
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7633.py38
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7773.py52
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7894.py92
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7906.py736
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7914.py49
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8017.py153
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8018.py260
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8103.py36
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8209.py20
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8226.py149
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8358.py50
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8360.py44
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8398.py52
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8410.py43
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8418.py36
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8419.py68
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8479.py45
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8494.py80
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8520.py63
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8619.py45
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8649.py40
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8692.py79
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8696.py104
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8702.py105
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8708.py41
-rw-r--r--contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8769.py21
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/__init__.py1
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/__main__.py138
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_missing.py18
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_pem.py103
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2314.py56
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2315.py165
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2437.py46
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2459.py142
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2511.py48
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2560.py80
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2631.py41
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2634.py191
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2876.py185
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2985.py319
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc2986.py90
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3058.py140
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3114.py244
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3125.py109
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3161.py81
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3274.py81
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3279.py385
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3280.py79
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3281.py80
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3370.py234
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3447.py66
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3537.py76
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3560.py68
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3565.py68
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3657.py167
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3709.py194
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3739.py126
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3770.py95
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3779.py98
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3820.py78
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc3852.py128
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4010.py136
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4043.py118
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4055.py181
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4073.py146
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4108.py113
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4210.py128
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4211.py55
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4334.py83
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4357.py248
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4387.py84
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4476.py144
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4490.py274
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4491.py156
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4683.py122
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc4985.py113
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5035.py192
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5083.py95
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5084.py122
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5126.py103
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5208.py75
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5275.py190
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5280.py253
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5480.py81
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5636.py118
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5639.py80
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5649.py56
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5652.py169
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5697.py126
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5751.py103
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5752.py207
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5753.py129
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5755.py212
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5913.py122
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5914.py79
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5915.py45
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5916.py107
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5917.py119
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5924.py74
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5934.py299
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5940.py141
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5958.py84
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc5990.py87
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6010.py101
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6019.py56
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6031.py91
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6032.py96
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6120.py115
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6187.py70
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6210.py73
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6211.py122
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6402.py157
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6482.py116
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6486.py122
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6487.py146
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6664.py103
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6955.py101
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc6960.py176
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7030.py89
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7191.py313
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7229.py93
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7292.py183
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7296.py160
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7508.py134
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7585.py126
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7633.py80
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7773.py113
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7894.py84
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7906.py168
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc7914.py97
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8017.py125
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8018.py58
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8103.py53
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8209.py63
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8226.py104
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8358.py195
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8360.py464
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8398.py66
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8410.py44
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8418.py43
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8419.py130
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8479.py108
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8494.py55
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8520.py115
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8619.py80
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8649.py60
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8692.py55
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8696.py193
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8702.py140
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8708.py127
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/test_rfc8769.py134
-rw-r--r--contrib/python/pyasn1-modules/py2/tests/ya.make136
-rw-r--r--contrib/python/pyasn1-modules/py2/ya.make161
-rw-r--r--contrib/python/pyasn1-modules/py3/.dist-info/METADATA74
-rw-r--r--contrib/python/pyasn1-modules/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pyasn1-modules/py3/LICENSE.txt24
-rw-r--r--contrib/python/pyasn1-modules/py3/README.md32
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/__init__.py2
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/pem.py65
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1155.py96
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1157.py126
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1901.py22
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1902.py129
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1905.py135
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2251.py563
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2314.py48
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2315.py294
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2437.py69
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2459.py1339
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2511.py258
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2560.py225
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2631.py37
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2634.py336
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2876.py56
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2985.py588
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2986.py75
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3058.py42
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3114.py77
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3125.py469
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3161.py142
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3274.py59
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3279.py260
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3280.py1543
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3281.py331
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3370.py146
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3412.py53
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3414.py28
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3447.py45
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3537.py34
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3560.py74
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3565.py57
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3657.py66
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3709.py207
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3739.py203
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3770.py75
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3779.py137
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3820.py65
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3852.py706
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4010.py58
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4043.py43
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4055.py258
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4073.py59
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4108.py350
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4210.py803
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4211.py396
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4334.py75
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4357.py477
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4387.py23
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4476.py93
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4490.py113
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4491.py44
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4683.py72
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4985.py49
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5035.py199
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5083.py52
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5084.py97
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5126.py577
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5208.py56
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5275.py404
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5280.py1658
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5480.py190
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5636.py113
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5639.py49
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5649.py33
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5652.py761
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5697.py70
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5751.py124
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5752.py49
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5753.py157
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5755.py398
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5913.py44
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5914.py119
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5915.py32
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5916.py35
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5917.py55
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5924.py19
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5934.py786
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5940.py59
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5958.py98
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5990.py237
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6010.py88
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6019.py45
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6031.py469
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6032.py68
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6120.py43
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6170.py17
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6187.py22
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6210.py42
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6211.py72
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6402.py628
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6482.py74
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6486.py68
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6487.py22
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6664.py147
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6955.py108
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6960.py223
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7030.py66
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7191.py261
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7229.py29
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7292.py357
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7296.py32
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7508.py90
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7585.py50
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7633.py38
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7773.py52
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7894.py92
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7906.py736
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7914.py49
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8017.py153
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8018.py260
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8103.py36
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8209.py20
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8226.py149
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8358.py50
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8360.py44
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8398.py52
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8410.py43
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8418.py36
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8419.py68
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8479.py45
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8494.py80
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8520.py63
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8619.py45
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8649.py40
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8692.py79
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8696.py104
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8702.py105
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8708.py41
-rw-r--r--contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8769.py21
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/__init__.py1
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/__main__.py138
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_missing.py18
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_pem.py103
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2314.py56
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2315.py165
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2437.py46
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2459.py142
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2511.py48
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2560.py80
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2631.py41
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2634.py191
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2876.py185
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2985.py319
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc2986.py90
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3058.py140
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3114.py244
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3125.py109
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3161.py81
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3274.py81
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3279.py385
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3280.py79
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3281.py80
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3370.py234
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3447.py66
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3537.py76
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3560.py68
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3565.py68
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3657.py167
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3709.py194
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3739.py126
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3770.py95
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3779.py98
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3820.py78
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc3852.py128
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4010.py136
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4043.py118
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4055.py181
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4073.py146
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4108.py113
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4210.py128
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4211.py55
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4334.py83
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4357.py248
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4387.py84
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4476.py144
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4490.py274
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4491.py156
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4683.py122
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc4985.py113
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5035.py192
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5083.py95
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5084.py122
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5126.py103
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5208.py75
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5275.py190
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5280.py253
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5480.py81
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5636.py118
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5639.py80
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5649.py56
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5652.py169
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5697.py126
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5751.py103
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5752.py207
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5753.py129
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5755.py212
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5913.py122
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5914.py79
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5915.py45
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5916.py107
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5917.py119
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5924.py74
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5934.py299
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5940.py141
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5958.py84
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc5990.py87
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6010.py101
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6019.py56
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6031.py91
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6032.py96
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6120.py115
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6187.py70
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6210.py73
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6211.py122
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6402.py157
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6482.py116
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6486.py122
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6487.py146
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6664.py103
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6955.py101
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc6960.py176
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7030.py89
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7191.py313
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7229.py93
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7292.py183
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7296.py160
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7508.py134
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7585.py126
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7633.py80
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7773.py113
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7894.py84
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7906.py168
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc7914.py97
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8017.py125
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8018.py58
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8103.py53
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8209.py63
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8226.py104
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8358.py195
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8360.py464
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8398.py66
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8410.py44
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8418.py43
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8419.py130
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8479.py108
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8494.py55
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8520.py115
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8619.py80
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8649.py60
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8692.py55
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8696.py193
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8702.py140
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8708.py127
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/test_rfc8769.py134
-rw-r--r--contrib/python/pyasn1-modules/py3/tests/ya.make136
-rw-r--r--contrib/python/pyasn1-modules/py3/ya.make161
-rw-r--r--contrib/python/pyasn1-modules/ya.make18
-rw-r--r--contrib/python/pyasn1/py2/.dist-info/METADATA230
-rw-r--r--contrib/python/pyasn1/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pyasn1/py2/LICENSE.rst24
-rw-r--r--contrib/python/pyasn1/py2/README.md188
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/__init__.py2
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/ber/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/ber/decoder.py2071
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/ber/encoder.py917
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/ber/eoo.py28
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/cer/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/cer/decoder.py146
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/cer/encoder.py327
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/der/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/der/decoder.py116
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/der/encoder.py122
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/native/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/native/decoder.py238
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/native/encoder.py274
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/codec/streaming.py244
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/compat/__init__.py4
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/compat/integer.py103
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/compat/octets.py46
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/debug.py147
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/error.py116
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/base.py706
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/char.py335
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/constraint.py756
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/error.py11
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/namedtype.py561
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/namedval.py192
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/opentype.py104
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/tag.py335
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/tagmap.py96
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/univ.py3305
-rw-r--r--contrib/python/pyasn1/py2/pyasn1/type/useful.py189
-rw-r--r--contrib/python/pyasn1/py2/tests/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/__main__.py18
-rw-r--r--contrib/python/pyasn1/py2/tests/base.py18
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/__main__.py19
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/ber/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/ber/__main__.py16
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/ber/test_decoder.py1847
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/ber/test_encoder.py1497
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/cer/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/cer/__main__.py16
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/cer/test_decoder.py370
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/cer/test_encoder.py956
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/der/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/der/__main__.py16
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/der/test_decoder.py368
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/der/test_encoder.py665
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/native/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/native/__main__.py15
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/native/test_decoder.py120
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/native/test_encoder.py141
-rw-r--r--contrib/python/pyasn1/py2/tests/codec/test_streaming.py75
-rw-r--r--contrib/python/pyasn1/py2/tests/compat/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/compat/__main__.py16
-rw-r--r--contrib/python/pyasn1/py2/tests/compat/test_integer.py49
-rw-r--r--contrib/python/pyasn1/py2/tests/compat/test_octets.py113
-rw-r--r--contrib/python/pyasn1/py2/tests/test_debug.py37
-rw-r--r--contrib/python/pyasn1/py2/tests/type/__init__.py1
-rw-r--r--contrib/python/pyasn1/py2/tests/type/__main__.py22
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_char.py169
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_constraint.py420
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_namedtype.py135
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_namedval.py53
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_opentype.py101
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_tag.py133
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_univ.py2184
-rw-r--r--contrib/python/pyasn1/py2/tests/type/test_useful.py138
-rw-r--r--contrib/python/pyasn1/py2/tests/ya.make41
-rw-r--r--contrib/python/pyasn1/py2/ya.make58
-rw-r--r--contrib/python/pyasn1/py3/.dist-info/METADATA230
-rw-r--r--contrib/python/pyasn1/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pyasn1/py3/LICENSE.rst24
-rw-r--r--contrib/python/pyasn1/py3/README.md188
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/__init__.py2
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/ber/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/ber/decoder.py2071
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/ber/encoder.py917
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/ber/eoo.py28
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/cer/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/cer/decoder.py146
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/cer/encoder.py327
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/der/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/der/decoder.py116
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/der/encoder.py122
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/native/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/native/decoder.py238
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/native/encoder.py274
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/codec/streaming.py244
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/compat/__init__.py4
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/compat/integer.py103
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/compat/octets.py46
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/debug.py147
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/error.py116
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/base.py706
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/char.py335
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/constraint.py756
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/error.py11
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/namedtype.py561
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/namedval.py192
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/opentype.py104
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/tag.py335
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/tagmap.py96
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/univ.py3305
-rw-r--r--contrib/python/pyasn1/py3/pyasn1/type/useful.py189
-rw-r--r--contrib/python/pyasn1/py3/tests/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/__main__.py18
-rw-r--r--contrib/python/pyasn1/py3/tests/base.py18
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/__main__.py19
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/ber/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/ber/__main__.py16
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/ber/test_decoder.py1847
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/ber/test_encoder.py1497
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/cer/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/cer/__main__.py16
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/cer/test_decoder.py370
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/cer/test_encoder.py956
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/der/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/der/__main__.py16
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/der/test_decoder.py368
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/der/test_encoder.py665
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/native/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/native/__main__.py15
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/native/test_decoder.py120
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/native/test_encoder.py141
-rw-r--r--contrib/python/pyasn1/py3/tests/codec/test_streaming.py75
-rw-r--r--contrib/python/pyasn1/py3/tests/compat/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/compat/__main__.py16
-rw-r--r--contrib/python/pyasn1/py3/tests/compat/test_integer.py49
-rw-r--r--contrib/python/pyasn1/py3/tests/compat/test_octets.py113
-rw-r--r--contrib/python/pyasn1/py3/tests/test_debug.py37
-rw-r--r--contrib/python/pyasn1/py3/tests/type/__init__.py1
-rw-r--r--contrib/python/pyasn1/py3/tests/type/__main__.py22
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_char.py169
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_constraint.py420
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_namedtype.py135
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_namedval.py53
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_opentype.py101
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_tag.py133
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_univ.py2184
-rw-r--r--contrib/python/pyasn1/py3/tests/type/test_useful.py138
-rw-r--r--contrib/python/pyasn1/py3/tests/ya.make41
-rw-r--r--contrib/python/pyasn1/py3/ya.make58
-rw-r--r--contrib/python/pyasn1/ya.make18
-rw-r--r--contrib/python/pytest-localserver/py3/.dist-info/METADATA300
-rw-r--r--contrib/python/pytest-localserver/py3/.dist-info/entry_points.txt2
-rw-r--r--contrib/python/pytest-localserver/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pytest-localserver/py3/pytest_localserver/__init__.py1
-rw-r--r--contrib/python/pytest-localserver/py3/pytest_localserver/_version.py16
-rw-r--r--contrib/python/pytest-localserver/py3/pytest_localserver/http.py183
-rw-r--r--contrib/python/pytest-localserver/py3/pytest_localserver/https.py149
-rw-r--r--contrib/python/pytest-localserver/py3/pytest_localserver/plugin.py90
-rw-r--r--contrib/python/pytest-localserver/py3/pytest_localserver/server.pem84
-rw-r--r--contrib/python/pytest-localserver/py3/pytest_localserver/smtp.py177
-rw-r--r--contrib/python/pytest-localserver/py3/ya.make41
-rw-r--r--contrib/python/requests-mock/py2/AUTHORS50
-rw-r--r--contrib/python/requests-mock/py2/LICENSE180
-rw-r--r--contrib/python/requests-mock/py2/README.rst101
-rw-r--r--contrib/python/requests-mock/py3/.dist-info/METADATA144
-rw-r--r--contrib/python/requests-mock/py3/.dist-info/entry_points.txt2
-rw-r--r--contrib/python/requests-mock/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/requests-mock/py3/AUTHORS50
-rw-r--r--contrib/python/requests-mock/py3/LICENSE180
-rw-r--r--contrib/python/requests-mock/py3/README.rst101
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/__init__.py37
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/adapter.py323
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/compat.py30
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/contrib/__init__.py0
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/contrib/_pytest_plugin.py86
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/contrib/fixture.py27
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/exceptions.py30
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/mocker.py342
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/py.typed0
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/request.py178
-rw-r--r--contrib/python/requests-mock/py3/requests_mock/response.py281
-rw-r--r--contrib/python/requests-mock/py3/ya.make54
-rw-r--r--contrib/python/requests-mock/ya.make18
-rw-r--r--contrib/python/requests-oauthlib/.dist-info/METADATA245
-rw-r--r--contrib/python/requests-oauthlib/.dist-info/top_level.txt1
-rw-r--r--contrib/python/requests-oauthlib/AUTHORS.rst25
-rw-r--r--contrib/python/requests-oauthlib/LICENSE15
-rw-r--r--contrib/python/requests-oauthlib/README.rst58
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/__init__.py19
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/__init__.py10
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/douban.py17
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/ebay.py23
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/facebook.py33
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/fitbit.py25
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/instagram.py26
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/mailchimp.py23
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/plentymarkets.py29
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/slack.py37
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/weibo.py15
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/oauth1_auth.py117
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/oauth1_session.py400
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/oauth2_auth.py37
-rw-r--r--contrib/python/requests-oauthlib/requests_oauthlib/oauth2_session.py540
-rw-r--r--contrib/python/requests-oauthlib/tests/__init__.py0
-rw-r--r--contrib/python/requests-oauthlib/tests/test.bin1
-rw-r--r--contrib/python/requests-oauthlib/tests/test_compliance_fixes.py334
-rw-r--r--contrib/python/requests-oauthlib/tests/test_core.py170
-rw-r--r--contrib/python/requests-oauthlib/tests/test_oauth1_session.py348
-rw-r--r--contrib/python/requests-oauthlib/tests/test_oauth2_auth.py54
-rw-r--r--contrib/python/requests-oauthlib/tests/test_oauth2_session.py527
-rw-r--r--contrib/python/requests-oauthlib/tests/ya.make28
-rw-r--r--contrib/python/requests-oauthlib/ya.make45
-rw-r--r--contrib/python/rsa/py2/.dist-info/METADATA85
-rw-r--r--contrib/python/rsa/py2/.dist-info/entry_points.txt8
-rw-r--r--contrib/python/rsa/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/rsa/py2/LICENSE13
-rw-r--r--contrib/python/rsa/py2/README.md52
-rw-r--r--contrib/python/rsa/py2/rsa/__init__.py42
-rw-r--r--contrib/python/rsa/py2/rsa/_compat.py162
-rw-r--r--contrib/python/rsa/py2/rsa/asn1.py53
-rw-r--r--contrib/python/rsa/py2/rsa/cli.py288
-rw-r--r--contrib/python/rsa/py2/rsa/common.py188
-rw-r--r--contrib/python/rsa/py2/rsa/core.py57
-rw-r--r--contrib/python/rsa/py2/rsa/key.py798
-rw-r--r--contrib/python/rsa/py2/rsa/machine_size.py74
-rw-r--r--contrib/python/rsa/py2/rsa/parallel.py101
-rw-r--r--contrib/python/rsa/py2/rsa/pem.py126
-rw-r--r--contrib/python/rsa/py2/rsa/pkcs1.py448
-rw-r--r--contrib/python/rsa/py2/rsa/pkcs1_v2.py103
-rw-r--r--contrib/python/rsa/py2/rsa/prime.py201
-rw-r--r--contrib/python/rsa/py2/rsa/randnum.py98
-rw-r--r--contrib/python/rsa/py2/rsa/transform.py215
-rw-r--r--contrib/python/rsa/py2/rsa/util.py79
-rw-r--r--contrib/python/rsa/py2/tests/__init__.py0
-rw-r--r--contrib/python/rsa/py2/tests/private.pem5
-rw-r--r--contrib/python/rsa/py2/tests/test_cli.py296
-rw-r--r--contrib/python/rsa/py2/tests/test_common.py96
-rw-r--r--contrib/python/rsa/py2/tests/test_compat.py80
-rw-r--r--contrib/python/rsa/py2/tests/test_integers.py50
-rw-r--r--contrib/python/rsa/py2/tests/test_key.py79
-rw-r--r--contrib/python/rsa/py2/tests/test_load_save_keys.py217
-rw-r--r--contrib/python/rsa/py2/tests/test_parallel.py20
-rw-r--r--contrib/python/rsa/py2/tests/test_pem.py102
-rw-r--r--contrib/python/rsa/py2/tests/test_pkcs1.py184
-rw-r--r--contrib/python/rsa/py2/tests/test_pkcs1_v2.py83
-rw-r--r--contrib/python/rsa/py2/tests/test_prime.py110
-rw-r--r--contrib/python/rsa/py2/tests/test_strings.py42
-rw-r--r--contrib/python/rsa/py2/tests/test_transform.py79
-rw-r--r--contrib/python/rsa/py2/tests/ya.make30
-rw-r--r--contrib/python/rsa/py2/ya.make46
-rw-r--r--contrib/python/rsa/py3/.dist-info/METADATA106
-rw-r--r--contrib/python/rsa/py3/.dist-info/entry_points.txt8
-rw-r--r--contrib/python/rsa/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/rsa/py3/LICENSE13
-rw-r--r--contrib/python/rsa/py3/README.md76
-rw-r--r--contrib/python/rsa/py3/rsa/__init__.py60
-rw-r--r--contrib/python/rsa/py3/rsa/asn1.py52
-rw-r--r--contrib/python/rsa/py3/rsa/cli.py321
-rw-r--r--contrib/python/rsa/py3/rsa/common.py184
-rw-r--r--contrib/python/rsa/py3/rsa/core.py53
-rw-r--r--contrib/python/rsa/py3/rsa/key.py858
-rw-r--r--contrib/python/rsa/py3/rsa/parallel.py96
-rw-r--r--contrib/python/rsa/py3/rsa/pem.py134
-rw-r--r--contrib/python/rsa/py3/rsa/pkcs1.py485
-rw-r--r--contrib/python/rsa/py3/rsa/pkcs1_v2.py100
-rw-r--r--contrib/python/rsa/py3/rsa/prime.py198
-rw-r--r--contrib/python/rsa/py3/rsa/py.typed1
-rw-r--r--contrib/python/rsa/py3/rsa/randnum.py95
-rw-r--r--contrib/python/rsa/py3/rsa/transform.py72
-rw-r--r--contrib/python/rsa/py3/rsa/util.py97
-rw-r--r--contrib/python/rsa/py3/tests/__init__.py0
-rw-r--r--contrib/python/rsa/py3/tests/private.pem5
-rw-r--r--contrib/python/rsa/py3/tests/test_cli.py291
-rw-r--r--contrib/python/rsa/py3/tests/test_common.py83
-rw-r--r--contrib/python/rsa/py3/tests/test_integers.py48
-rw-r--r--contrib/python/rsa/py3/tests/test_key.py87
-rw-r--r--contrib/python/rsa/py3/tests/test_load_save_keys.py234
-rw-r--r--contrib/python/rsa/py3/tests/test_mypy.py31
-rw-r--r--contrib/python/rsa/py3/tests/test_parallel.py20
-rw-r--r--contrib/python/rsa/py3/tests/test_pem.py100
-rw-r--r--contrib/python/rsa/py3/tests/test_pkcs1.py218
-rw-r--r--contrib/python/rsa/py3/tests/test_pkcs1_v2.py79
-rw-r--r--contrib/python/rsa/py3/tests/test_prime.py133
-rw-r--r--contrib/python/rsa/py3/tests/test_strings.py40
-rw-r--r--contrib/python/rsa/py3/tests/test_transform.py53
-rw-r--r--contrib/python/rsa/py3/tests/ya.make28
-rw-r--r--contrib/python/rsa/py3/ya.make45
-rw-r--r--contrib/python/rsa/ya.make18
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/.dist-info/METADATA55
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/.dist-info/top_level.txt2
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/LICENSE21
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/README.rst25
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.h23
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pxd251
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pyx1526
-rw-r--r--contrib/python/ruamel.yaml.clib/py2/ya.make31
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/.dist-info/METADATA55
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/.dist-info/top_level.txt2
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/LICENSE21
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/README.rst25
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.h23
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pxd251
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pyx1526
-rw-r--r--contrib/python/ruamel.yaml.clib/py3/ya.make31
-rw-r--r--contrib/python/ruamel.yaml.clib/ya.make18
-rw-r--r--contrib/python/ruamel.yaml/py2/.dist-info/METADATA815
-rw-r--r--contrib/python/ruamel.yaml/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/ruamel.yaml/py2/LICENSE21
-rw-r--r--contrib/python/ruamel.yaml/py2/README.rst779
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/__init__.py59
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/anchor.py19
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/comments.py1154
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/compat.py324
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/composer.py238
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/configobjwalker.py14
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/constructor.py1806
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/cyaml.py185
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/dumper.py221
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/emitter.py1696
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/error.py311
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/events.py157
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/loader.py74
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/main.py1534
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/nodes.py131
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/parser.py802
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/py.typed0
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/reader.py311
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/representer.py1282
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/resolver.py399
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarbool.py51
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarfloat.py127
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarint.py130
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarstring.py156
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/scanner.py1980
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/serializer.py240
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/timestamp.py54
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/tokens.py286
-rw-r--r--contrib/python/ruamel.yaml/py2/ruamel/yaml/util.py190
-rw-r--r--contrib/python/ruamel.yaml/py2/ya.make55
-rw-r--r--contrib/python/ruamel.yaml/py3/.dist-info/METADATA400
-rw-r--r--contrib/python/ruamel.yaml/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/ruamel.yaml/py3/LICENSE21
-rw-r--r--contrib/python/ruamel.yaml/py3/README.md365
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/__init__.py57
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/anchor.py18
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/comments.py1166
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/compat.py235
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/composer.py228
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/configobjwalker.py15
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/constructor.py1723
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/cyaml.py195
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/dumper.py218
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/emitter.py1766
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/error.py297
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/events.py264
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/loader.py90
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/main.py1664
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/nodes.py145
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/parser.py851
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/py.typed0
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/reader.py275
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/representer.py1127
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/resolver.py389
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarbool.py42
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarfloat.py103
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarint.py122
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarstring.py140
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/scanner.py2359
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/serializer.py231
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/tag.py124
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/timestamp.py58
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/tokens.py379
-rw-r--r--contrib/python/ruamel.yaml/py3/ruamel/yaml/util.py257
-rw-r--r--contrib/python/ruamel.yaml/py3/ya.make55
-rw-r--r--contrib/python/ruamel.yaml/ya.make18
-rw-r--r--contrib/python/tenacity/py2/.dist-info/METADATA33
-rw-r--r--contrib/python/tenacity/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/tenacity/py2/LICENSE202
-rw-r--r--contrib/python/tenacity/py2/README.rst599
-rw-r--r--contrib/python/tenacity/py2/tenacity/__init__.py523
-rw-r--r--contrib/python/tenacity/py2/tenacity/_utils.py159
-rw-r--r--contrib/python/tenacity/py2/tenacity/after.py40
-rw-r--r--contrib/python/tenacity/py2/tenacity/before.py35
-rw-r--r--contrib/python/tenacity/py2/tenacity/before_sleep.py51
-rw-r--r--contrib/python/tenacity/py2/tenacity/compat.py23
-rw-r--r--contrib/python/tenacity/py2/tenacity/nap.py40
-rw-r--r--contrib/python/tenacity/py2/tenacity/py.typed0
-rw-r--r--contrib/python/tenacity/py2/tenacity/retry.py192
-rw-r--r--contrib/python/tenacity/py2/tenacity/stop.py95
-rw-r--r--contrib/python/tenacity/py2/tenacity/tornadoweb.py49
-rw-r--r--contrib/python/tenacity/py2/tenacity/wait.py183
-rw-r--r--contrib/python/tenacity/py2/ya.make44
-rw-r--r--contrib/python/tenacity/py3/.dist-info/METADATA27
-rw-r--r--contrib/python/tenacity/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/tenacity/py3/LICENSE202
-rw-r--r--contrib/python/tenacity/py3/README.rst647
-rw-r--r--contrib/python/tenacity/py3/tenacity/__init__.py606
-rw-r--r--contrib/python/tenacity/py3/tenacity/_asyncio.py94
-rw-r--r--contrib/python/tenacity/py3/tenacity/_utils.py76
-rw-r--r--contrib/python/tenacity/py3/tenacity/after.py51
-rw-r--r--contrib/python/tenacity/py3/tenacity/before.py46
-rw-r--r--contrib/python/tenacity/py3/tenacity/before_sleep.py71
-rw-r--r--contrib/python/tenacity/py3/tenacity/nap.py43
-rw-r--r--contrib/python/tenacity/py3/tenacity/py.typed0
-rw-r--r--contrib/python/tenacity/py3/tenacity/retry.py272
-rw-r--r--contrib/python/tenacity/py3/tenacity/stop.py103
-rw-r--r--contrib/python/tenacity/py3/tenacity/tornadoweb.py59
-rw-r--r--contrib/python/tenacity/py3/tenacity/wait.py228
-rw-r--r--contrib/python/tenacity/py3/ya.make37
-rw-r--r--contrib/python/tenacity/ya.make18
-rw-r--r--contrib/python/websocket-client/py2/.dist-info/METADATA174
-rw-r--r--contrib/python/websocket-client/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/websocket-client/py2/COPYING.LESSER503
-rw-r--r--contrib/python/websocket-client/py2/LICENSE503
-rw-r--r--contrib/python/websocket-client/py2/README.md136
-rw-r--r--contrib/python/websocket-client/py2/tests/ya.make28
-rw-r--r--contrib/python/websocket-client/py2/websocket/__init__.py28
-rw-r--r--contrib/python/websocket-client/py2/websocket/_abnf.py458
-rw-r--r--contrib/python/websocket-client/py2/websocket/_app.py399
-rw-r--r--contrib/python/websocket-client/py2/websocket/_cookiejar.py78
-rw-r--r--contrib/python/websocket-client/py2/websocket/_core.py595
-rw-r--r--contrib/python/websocket-client/py2/websocket/_exceptions.py86
-rw-r--r--contrib/python/websocket-client/py2/websocket/_handshake.py212
-rw-r--r--contrib/python/websocket-client/py2/websocket/_http.py335
-rw-r--r--contrib/python/websocket-client/py2/websocket/_logging.py92
-rw-r--r--contrib/python/websocket-client/py2/websocket/_socket.py176
-rw-r--r--contrib/python/websocket-client/py2/websocket/_ssl_compat.py53
-rw-r--r--contrib/python/websocket-client/py2/websocket/_url.py178
-rw-r--r--contrib/python/websocket-client/py2/websocket/_utils.py110
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/__init__.py0
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/data/header01.txt6
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/data/header02.txt6
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/data/header03.txt6
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/test_abnf.py77
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/test_app.py137
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/test_cookiejar.py117
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/test_http.py110
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/test_url.py309
-rw-r--r--contrib/python/websocket-client/py2/websocket/tests/test_websocket.py434
-rw-r--r--contrib/python/websocket-client/py2/ya.make42
-rw-r--r--contrib/python/websocket-client/py3/.dist-info/METADATA184
-rw-r--r--contrib/python/websocket-client/py3/.dist-info/entry_points.txt3
-rw-r--r--contrib/python/websocket-client/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/websocket-client/py3/LICENSE203
-rw-r--r--contrib/python/websocket-client/py3/README.md141
-rw-r--r--contrib/python/websocket-client/py3/tests/ya.make27
-rw-r--r--contrib/python/websocket-client/py3/websocket/__init__.py26
-rw-r--r--contrib/python/websocket-client/py3/websocket/_abnf.py426
-rw-r--r--contrib/python/websocket-client/py3/websocket/_app.py558
-rw-r--r--contrib/python/websocket-client/py3/websocket/_cookiejar.py66
-rw-r--r--contrib/python/websocket-client/py3/websocket/_core.py611
-rw-r--r--contrib/python/websocket-client/py3/websocket/_exceptions.py80
-rw-r--r--contrib/python/websocket-client/py3/websocket/_handshake.py197
-rw-r--r--contrib/python/websocket-client/py3/websocket/_http.py340
-rw-r--r--contrib/python/websocket-client/py3/websocket/_logging.py93
-rw-r--r--contrib/python/websocket-client/py3/websocket/_socket.py181
-rw-r--r--contrib/python/websocket-client/py3/websocket/_ssl_compat.py39
-rw-r--r--contrib/python/websocket-client/py3/websocket/_url.py169
-rw-r--r--contrib/python/websocket-client/py3/websocket/_utils.py106
-rw-r--r--contrib/python/websocket-client/py3/websocket/_wsdump.py231
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/__init__.py0
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/data/header01.txt6
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/data/header02.txt6
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/data/header03.txt8
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/test_abnf.py89
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/test_app.py299
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/test_cookiejar.py116
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/test_http.py177
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/test_url.py319
-rw-r--r--contrib/python/websocket-client/py3/websocket/tests/test_websocket.py456
-rw-r--r--contrib/python/websocket-client/py3/ya.make40
-rw-r--r--contrib/python/websocket-client/ya.make18
-rw-r--r--ydb/tools/ya.make1
-rw-r--r--ydb/tools/ydbd_slice/__init__.py1189
-rw-r--r--ydb/tools/ydbd_slice/bin/ya.make9
-rw-r--r--ydb/tools/ydbd_slice/cluster_description.py195
-rw-r--r--ydb/tools/ydbd_slice/handlers.py421
-rw-r--r--ydb/tools/ydbd_slice/kube/__init__.py0
-rw-r--r--ydb/tools/ydbd_slice/kube/api.py424
-rw-r--r--ydb/tools/ydbd_slice/kube/cms.py294
-rw-r--r--ydb/tools/ydbd_slice/kube/docker.py78
-rw-r--r--ydb/tools/ydbd_slice/kube/generate.py98
-rw-r--r--ydb/tools/ydbd_slice/kube/handlers.py499
-rw-r--r--ydb/tools/ydbd_slice/kube/kubectl.py106
-rw-r--r--ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/nodeclaim.yaml14
-rw-r--r--ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/storage.yaml103
-rw-r--r--ydb/tools/ydbd_slice/kube/templates/common/database.yaml21
-rw-r--r--ydb/tools/ydbd_slice/kube/templates/common/namespace.yaml9
-rw-r--r--ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/table-profile.txt52
-rw-r--r--ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/unified-agent.txt12
-rw-r--r--ydb/tools/ydbd_slice/kube/yaml.py30
-rw-r--r--ydb/tools/ydbd_slice/nodes.py135
-rw-r--r--ydb/tools/ydbd_slice/ya.make41
2273 files changed, 557379 insertions, 0 deletions
diff --git a/contrib/deprecated/python/ruamel.ordereddict/.dist-info/METADATA b/contrib/deprecated/python/ruamel.ordereddict/.dist-info/METADATA
new file mode 100644
index 0000000000..fb3cc3bc13
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/.dist-info/METADATA
@@ -0,0 +1,432 @@
+Metadata-Version: 2.1
+Name: ruamel.ordereddict
+Version: 0.4.15
+Summary: a version of dict that keeps keys in insertion resp. sorted order
+Home-page: https://bitbucket.org/ruamel/ordereddict
+Author: Anthon van der Neut
+Author-email: a.van.der.neut@ruamel.eu
+License: MIT license
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Programming Language :: Python
+
+The ordereddict module in short
+-------------------------------
+
+This is an implementation of an ordered dictionary with Key Insertion
+Order (KIO: updates of values do not affect the position of the key),
+Key Value Insertion Order (KVIO, an existing key's position is removed
+and put at the back). The standard library module OrderedDict, implemented
+later, implements a subset of ``ordereddict`` functionality.
+
+Sorted dictionaries are also provided. Currently only with Key Sorted
+Order (KSO, no sorting function can be specified, but you can specify a
+transform to apply on the key before comparison (e.g. string.lower)).
+
+This package is hosted on BitBucket and installable from PyPI::
+
+ pip install ruamel.ordereddict
+
+For Windows there are 32 and 64 bit installable wheels available.
+
+Usage::
+
+ from ruamel.ordereddict import ordereddict
+ kio = ordereddict()
+ kvio = ordereddict(kvio=True)
+ # without relax unordered initalisation is not allowed
+ d = ordereddict({'a':1, 'b': 2}, relax=True)
+ sd = sorteddict({'a':1, 'b': 2}) # sorteddict is always relaxed
+
+**please note that starting with 0.4.6 you should not import _ordereddict
+directly**
+
+This module has been tested under:
+
+============= ========================= ==========
+OS compiler Python
+Linux Mint 17 gcc 4.8.4 2.7.13
+Windows Visual Studio 2010 2.7.13-32
+Windows Visual Studio 2010 2.7.13-64
+============= ========================= ==========
+
+Older versions of this module has been tested under
+and I expect those to still work:
+
+============= ======================== =========
+OS compiler Python
+Windows XP-64 Visual Studio 2010 2.7.10-32
+Windows XP-64 Visual Studio 2010 2.7.10-64
+Windows XP-64 Visual Studio 2008 2.6.9-32
+Windows XP-64 Visual Studio 2008 2.6.9-64
+Linux Mint 17 gcc 4.8.2 2.6.9
+Ubuntu 12.04 gcc 4.7.2 2.7.6
+Ubuntu 12.04 gcc 4.7.2 2.6.8
+Ubuntu 8.04 gcc 4.2.4 2.7.6
+Ubuntu 8.04 gcc 4.2.4 2.5.2
+Windows XP Visual C++ 2008 Express 2.7.6
+Windows 7 64 Windows SDK for Win7 SP1 2.7.6
+Ubuntu 12.04 gcc 4.6.3 2.7.3
+Ubuntu 8.04 gcc 4.2.4 2.6.4
+Ubuntu 8.04 gcc 4.2.4 2.5.2
+Ubuntu 8.10 gcc 4.3.2 2.5.4
+Ubuntu 8.10 gcc 4.3.2 2.4.6
+Ubuntu 7.04 gcc 4.1.2 2.5.1
+Ubuntu 7.04 gcc 4.1.2 2.4.4
+Ubuntu 6.06 gcc 2.5.1
+Windows XP Visual Studio 2003 2.5.1
+Windows XP Visual C++ 2008 Express 2.6.5
+Windows MingGW 4.7.0 2.7.3
+Solaris 10 GCC 4.4.x 2.7.3
+============= ======================== =========
+
+Version 0.4.1 was tested and found working on SuSE Linux Enterprise Server
+(GCC 4.1.0 and Intel C/C++ 10.1) by Stuart Stock.
+
+MingGW and Solaris were tested and reported to work by Wladimir with version
+0.4.5
+
+Home
+----------------------------
+
+https://bitbucket.org/ruamel/ordereddict is ordereddict's home on the web.
+
+Clone the repository there if you want to work from the source.
+
+http://www.xs4all.nl/~anthon/Python/ordereddict used to be
+ordereddict's home on the web.
+There you can still find the links for downloading the older version (0.4.5).
+
+
+
+Installation
+------------
+
+.. comment: To install the package you can use::
+
+ pip install ruamel.ordereddict
+
+You can clone and checkout the sources, and then run::
+
+ python setup.py install
+
+
+Bugreporting
+------------
+
+If you find any problems, please let me know, but also realise that I
+have a spamfilter that catches over 100 emails a day and yours might
+get in there unnoticed. So if there is no response within a few days
+please try again.
+
+Functionality
+-------------
+
+ordereddict has all of the functionality of dict() except that there
+is no keyword based initialisation and that you cannot pass a normal
+dict to the initialisation of the basic ordereddict (however see the
+relax-ed keyword below). sorteddict cannot be initialised from keywords
+either, but can be initialised from normal dict (ie. they are always
+relaxed).
+
+As you probably would expect .keys(), .values(), .items(),
+.iterkeys(), itervalues(), iteritems() and "for i in some_ordereddict"
+have elements ordered based on the key insertion order (or key value
+insertion order if kvio is specified, or sort order for sorteddict).
+
+ordered/sorteddicts can be pickled.
+
+Some methods have been slightly changed:
+
+- initialisation of ordereddict takes keywords:
+
+ - kvio: if set to True, then move an existing key on update
+ - relax: if set to True, the ordereddict is relaxed for its life regarding
+ initialisation and/or update from unordered data (read a normal dict).
+
+- initialisation of sorteddict takes keyword:
+
+ - key: specifies a function to apply on key (e.g. string.lower)
+
+- .popitem() takes an optional argument (defaulting to -1) indicating which
+ key/value pair to return (by default the last one available)
+- .dict()/.values()/.items()/.iterdict()/.itervalues()/.iteritems()
+ all take an optional reverse (default False) parameter that gives
+ the list reversed order resp. iterates in reverse
+ (the non-iterator can also be done relatively efficient with e.g.
+ od.dict().reverse() )
+- .update(): takes an optional relax=True which allows one time
+ ordereddict update from normal dictionaries regardless of
+ initialisation time relax setting.
+
+In addition to that ordereddict and sorteddict have some extra methods:
+
+- .index(key) - gives an integer value that is the index of the key
+- .setkeys()/.setvalues()/.setitems(), work like those in the Larosa/Foord
+ implementation, although they might throw different exceptions:
+ - setvalues' argument must be an itereable that returns the same number of
+ items as the length of the ordereddict
+ - setitems' argument is free in length, it performs a clear and adds
+ the items in order.
+- slice retrieval for all
+
+and ordereddict only also has:
+
+- .setkeys(), works like the one in the Larosa/Foord
+ implementation. Argument must be an itereable returning a permutation of the
+ existing keys ( that implies having the same length as the ordereddict)
+- .reverse() - reverses the keys in place
+- .insert(position, key, value) - this will put a key at a particular position
+ so that afterwards .index(key) == position, if the key was already there
+ the original position (and value) is lost to the new position. This often
+ means moving keys to new positions!
+- slice deletion/assignment:
+ - stepped deletion could be optimized a bit (individual items are deleted
+ which can require memmoving multiple items)
+ - assignment only from OrderedDict (with the same length as the slice). This
+ could also be optimised as I first delete, then insert individual items.
+ If the assigned items contain keys that are still there after the deletion
+ 'phase' then retrieving that slice does not always give the original
+ assigned ordereddict (depending on the position of the items
+ with those keys in either ordereddict)
+- .rename(oldkey, newkey) renames a key, but keeps the items position and value
+
+The new OrderedDict in the standard collections module
+------------------------------------------------------
+
+With Python 3.1 and backported to 2.7 there is an OrderedDict class
+available in the collections modules. Raymond Hettinger indicated in
+2009 at EuroPython that he preferred to start from a minimal
+OrderedDict instead of using the Larosa/Foord
+implementation. Unfortunately the available tests (for the
+functionality that the simple collections.OrderedDict supports) were
+not used either resulting in preventable bugs like repr initially not
+working on recursive OrderedDicts.
+
+ordereddict (and the Larosa/Foord implementation) is essentially
+a superset of collections.OrderedDict, but there are a few
+differences:
+
+- OrderedDict is by default relax-ed.
+- repr of recursive OrderedDict does not give any indication of the
+ value of the recursive key, as it only displays `...`. ordereddict
+ displays `ordereddict([...])` as value. Just using the dots like
+ OrderedDict does is going to be ambiguous as soon as you have two different
+ types A and B and nest A in B in A or B in B in A.
+- some newer build-in functions available in OrderedDict are not
+ available in ordereddict ( __reversed__, viewkeys, viewvalues, viewitems).
+
+All of the differences can be straightened out in small (70 lines of
+Python) OrderedDict wrapper around ordereddict. With this wrapper the
+OrderedDict tests in the standard test_collections.py all pass.
+
+Testing
+-------
+
+testordereddict.py in the test subdirectory has been used to test the module.
+You can use::
+
+ python testordereddict
+
+to run the tests (py.test support has been dropped as newer versions
+of py.test were not compatible).
+
+There is a somewhat patched copy of the python lib/Test dictionary testing
+routines included as well, it fails on the _update test however
+because the default is not to use a relaxed ordereddict.
+You can run it with::
+
+ cd test/unit
+ python test_dict.py
+
+To Do
+-----
+- implement Value Sorted Order (VSO: specify value=True for normal
+ value comparison), or a value rewrite function for VSO ( e.g.
+ value=string.lower )
+- implement Item Sorted Order (ISO): compare value then key ( the other way
+ around would not make sense with unique keys, but we might have
+ non-unique values).
+- implement slice deletion for sorteddict
+- more testing of sorteddict functionality
+- speedtest slices
+- speedtest sorteddict
+- check on the test_update unittest in test_dict.py
+
+To Consider
+-----------
+- comparing ordereddicts (as per Larosa/Foord)
+- implement the whole (optionally) using pointers in the DictObject Items
+ (Faster on insertion/deletion, slower on accessing slices, makes
+ implementing algorithms somewhat more difficult), would have to seperate
+ code for sorteddict as key position determination would be much slower.
+- supply a pure Python implementation of exactly the functionality in
+ ordereddict
+- test on older versions (< 2.4) of Python and make portable (if this can
+ be done without too much clutter) or port.
+- test on the Mac
+- optimise searching for an item pointer for sorteddict with binary search
+ (for deletion)
+
+Background information
+----------------------
+
+ordereddict is directly derived from Python's own dictobject.c file.
+The extensions and the representation of ordereddicts() are based
+on Larosa/Foord's excellent pure Python OrderedDict() module
+(http://www.voidspace.org.uk/python/odict.html).
+
+The implemenation adds a vector of pointers to elements to the basic
+dictionary structure and keeps this vector compact (and in order) so
+indexing is fast. The elements do not know about their position (so
+nothing needs to be updated there if that position changes, but then
+finding an item's index is expensive. Insertion/deletion is also relatively
+expensive in that on average half of the vector of pointers needs to
+be memmove-d one position.
+There is also a long value for bit info like kvio, relaxed.
+
+The sorteddict structure has an additional 3 pointers of which only
+one (sd_key) is currently used (the others are sd_cmp and sd_value).
+
+Speed
+-----
+
+Based on some tests with best of 10 iterations of 10000 iterations of various
+functions under Ubuntu 7.10 (see test/timeordereddict.py and test/ta.py)::
+
+ Results in seconds:
+
+ ------------------------------- dict ordereddict Larosa/Ford collections
+ OrderedDict OrderedDict
+ empty 0.023 0.025 0.023 0.024
+ create_empty 0.028 0.031 0.147 0.329
+ create_five_entry 0.037 0.042 0.384 0.558
+ create_26_entry 0.187 0.203 1.494 1.602
+ create_676_entry 5.330 5.574 36.797 34.810
+ get_keys_from_26_entry 0.209 0.231 1.501 1.762
+ pop_5_items_26_entry 0.219 0.247 1.952 1.864
+ pop_26_items_676_entry 7.550 8.127 46.578 41.851
+ popitem_last_26_entry 0.203 0.225 1.624 1.734
+ popitem_last_676_entry 5.285 5.534 36.912 34.799
+ popitem_100_676_entry -------- 5.552 36.577 --------
+ walk_26_iteritems -------- 0.494 2.792 2.238
+ ------------------------------- dict ordereddict Larosa/Ford collections
+ OrderedDict OrderedDict
+
+ empty 0.930 1.000 0.950 0.966
+ create_empty 0.909 1.000 4.728 10.594
+ create_five_entry 0.892 1.000 9.201 13.374
+ create_26_entry 0.923 1.000 7.368 7.901
+ create_676_entry 0.956 1.000 6.601 6.245
+ get_keys_from_26_entry 0.908 1.000 6.508 7.641
+ pop_5_items_26_entry 0.888 1.000 7.916 7.559
+ pop_26_items_676_entry 0.929 1.000 5.732 5.150
+ popitem_last_26_entry 0.901 1.000 7.222 7.712
+ popitem_last_676_entry 0.955 1.000 6.670 6.288
+ popitem_100_676_entry -------- 1.000 6.588 --------
+ walk_26_iteritems -------- 1.000 5.653 4.532
+
+Why
+---
+
+Because I am orderly ;-O, and because I use dictionaries to
+store key/value information read from some text file quite often.
+Unfortunately comparing those files with diff when written from
+normal dictionaries often obfucates changes because of the reordering
+of lines when key/value pairs are added and then written.
+
+I have special routine for YAML files that takes lines like::
+
+ - key1: val1
+ - key2: val3
+ - key3:
+ - val3a
+ - val3b
+
+(i.e. a list of key-value pairs) directly to a single ordered dictionary
+and back. (I find it kind of strange to finally have a structured,
+human readeable, format that does not try to preserve the
+order of key-value pairs so that comparing files is difficult with
+'standard' text tools).
+
+Older versions
+--------------
+
+http://www.xs4all.nl/~anthon/Python/ordereddict used to be
+ordereddict's home on the web.
+
+There you can still find the links for downloading the older version (0.4.5).
+
+
+History
+-------
+
+``0.4.13``: 2017-0723
+-
+
+| ``0.4.9 2015-08-10``
+| typos fixed by Gianfranco Costamagna
+|
+| ``0.4.8 2015-05-31``
+| dependent on ruamel.base
+| version number in a single place
+| using py.test under tox
+| generate wheel for 32/64bit py26/py27 on windows
+|
+| ``0.4.6 2014-01-18``
+| Move to ruamel namespace, hosted on bitbucket, MIT License
+| Testing with tox
+|
+| ``0.4.5 2012-06-17``
+| Fix for a bug while inserting last item again beyond last position (reported
+| by Volkan Çetin / volki tolki ( cetinv at gmail.com )
+| Fix for repeated deletion and insertion fail. Found by and solution provided
+| by Darren Dowker (including tests). Also found by Fabio Zadronzy (including
+| a less elegant fix).
+| applied reindent to .py and astyle to .c files
+|
+| ``0.4.3 2009-05-11``
+| Fix for a bug in slicing SortedDicts.
+| Found by, and fix provided by, Migel Anguel (linos.es)
+|
+| ``0.4.2 2009-03-27``
+| Bug found and by Alexandre Andrade and Fabio Zadrozny in
+| doing deepcopy
+|
+| ``0.4.1 2007-11-06``
+| Bug found and fixed by Fabio Zadrozny on resizing dictionaries
+|
+| ``0.4 2007-10-30``
+| added pickling, added relaxed initialisation/update (from unordered dicts)
+| added KVIO (Key Value Insertion Order ie. key moves to back on update)
+| implemented sorteddict, with KSO, Key Sorted Order. You can specify
+| a function for key transformation before comparison (such as string.lower)
+| sorteddict does not have all of the ordereddict methods as not all make
+| sense (eg. slice assignment, rename, setkeys)
+|
+| ``0.3 2007-10-24``
+| added setkeys/setvalues/setitems; slice retrieval, deletion, assignment
+| .rename(oldkey, newkey) rename a key keeping same value and position
+| .index() of non-existing key now returns ValueError instead of SystemError
+| Changed the module name to _ordereddict (from ordereddict), as Jason
+| Kirstland probably rightfully suggested that any private implementation
+| likely has the (file)name ordereddict.py. A modulename with leading
+| underscore seams more common for extension modules anyway.
+|
+| ``0.2a 2007-10-16``
+| Solved the potential GC problem on Windows
+|
+| ``0.2 2007-10-16``
+| First release, with some tests, and possible still a GC problem
+| with Windows.
+|
+| ``0.1 2007-10-..``
+| This version was never released. While testing it I was far in writing
+| an email to comp.lang.python about why timing with timeit did seem to
+| be memory hungry ....
+| and then I realised ordereddict had a memory leak %-)
+
+
diff --git a/contrib/deprecated/python/ruamel.ordereddict/.dist-info/top_level.txt b/contrib/deprecated/python/ruamel.ordereddict/.dist-info/top_level.txt
new file mode 100644
index 0000000000..3202f77d2b
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+_ordereddict
+ruamel
diff --git a/contrib/deprecated/python/ruamel.ordereddict/LICENSE b/contrib/deprecated/python/ruamel.ordereddict/LICENSE
new file mode 100644
index 0000000000..0c12e55403
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/LICENSE
@@ -0,0 +1,23 @@
+
+ The MIT License (MIT)
+
+ Copyright (c) 2007-2017 Anthon van der Neut/Ruamel BVBA
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
diff --git a/contrib/deprecated/python/ruamel.ordereddict/README.rst b/contrib/deprecated/python/ruamel.ordereddict/README.rst
new file mode 100644
index 0000000000..0005f78acb
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/README.rst
@@ -0,0 +1,416 @@
+
+The ordereddict module in short
+-------------------------------
+
+This is an implementation of an ordered dictionary with Key Insertion
+Order (KIO: updates of values do not affect the position of the key),
+Key Value Insertion Order (KVIO, an existing key's position is removed
+and put at the back). The standard library module OrderedDict, implemented
+later, implements a subset of ``ordereddict`` functionality.
+
+Sorted dictionaries are also provided. Currently only with Key Sorted
+Order (KSO, no sorting function can be specified, but you can specify a
+transform to apply on the key before comparison (e.g. string.lower)).
+
+This package is hosted on BitBucket and installable from PyPI::
+
+ pip install ruamel.ordereddict
+
+For Windows there are 32 and 64 bit installable wheels available.
+
+Usage::
+
+ from ruamel.ordereddict import ordereddict
+ kio = ordereddict()
+ kvio = ordereddict(kvio=True)
+ # without relax unordered initalisation is not allowed
+ d = ordereddict({'a':1, 'b': 2}, relax=True)
+ sd = sorteddict({'a':1, 'b': 2}) # sorteddict is always relaxed
+
+**please note that starting with 0.4.6 you should not import _ordereddict
+directly**
+
+This module has been tested under:
+
+============= ========================= ==========
+OS compiler Python
+Linux Mint 17 gcc 4.8.4 2.7.13
+Windows Visual Studio 2010 2.7.13-32
+Windows Visual Studio 2010 2.7.13-64
+============= ========================= ==========
+
+Older versions of this module has been tested under
+and I expect those to still work:
+
+============= ======================== =========
+OS compiler Python
+Windows XP-64 Visual Studio 2010 2.7.10-32
+Windows XP-64 Visual Studio 2010 2.7.10-64
+Windows XP-64 Visual Studio 2008 2.6.9-32
+Windows XP-64 Visual Studio 2008 2.6.9-64
+Linux Mint 17 gcc 4.8.2 2.6.9
+Ubuntu 12.04 gcc 4.7.2 2.7.6
+Ubuntu 12.04 gcc 4.7.2 2.6.8
+Ubuntu 8.04 gcc 4.2.4 2.7.6
+Ubuntu 8.04 gcc 4.2.4 2.5.2
+Windows XP Visual C++ 2008 Express 2.7.6
+Windows 7 64 Windows SDK for Win7 SP1 2.7.6
+Ubuntu 12.04 gcc 4.6.3 2.7.3
+Ubuntu 8.04 gcc 4.2.4 2.6.4
+Ubuntu 8.04 gcc 4.2.4 2.5.2
+Ubuntu 8.10 gcc 4.3.2 2.5.4
+Ubuntu 8.10 gcc 4.3.2 2.4.6
+Ubuntu 7.04 gcc 4.1.2 2.5.1
+Ubuntu 7.04 gcc 4.1.2 2.4.4
+Ubuntu 6.06 gcc 2.5.1
+Windows XP Visual Studio 2003 2.5.1
+Windows XP Visual C++ 2008 Express 2.6.5
+Windows MingGW 4.7.0 2.7.3
+Solaris 10 GCC 4.4.x 2.7.3
+============= ======================== =========
+
+Version 0.4.1 was tested and found working on SuSE Linux Enterprise Server
+(GCC 4.1.0 and Intel C/C++ 10.1) by Stuart Stock.
+
+MingGW and Solaris were tested and reported to work by Wladimir with version
+0.4.5
+
+Home
+----------------------------
+
+https://bitbucket.org/ruamel/ordereddict is ordereddict's home on the web.
+
+Clone the repository there if you want to work from the source.
+
+http://www.xs4all.nl/~anthon/Python/ordereddict used to be
+ordereddict's home on the web.
+There you can still find the links for downloading the older version (0.4.5).
+
+
+
+Installation
+------------
+
+.. comment: To install the package you can use::
+
+ pip install ruamel.ordereddict
+
+You can clone and checkout the sources, and then run::
+
+ python setup.py install
+
+
+Bugreporting
+------------
+
+If you find any problems, please let me know, but also realise that I
+have a spamfilter that catches over 100 emails a day and yours might
+get in there unnoticed. So if there is no response within a few days
+please try again.
+
+Functionality
+-------------
+
+ordereddict has all of the functionality of dict() except that there
+is no keyword based initialisation and that you cannot pass a normal
+dict to the initialisation of the basic ordereddict (however see the
+relax-ed keyword below). sorteddict cannot be initialised from keywords
+either, but can be initialised from normal dict (ie. they are always
+relaxed).
+
+As you probably would expect .keys(), .values(), .items(),
+.iterkeys(), itervalues(), iteritems() and "for i in some_ordereddict"
+have elements ordered based on the key insertion order (or key value
+insertion order if kvio is specified, or sort order for sorteddict).
+
+ordered/sorteddicts can be pickled.
+
+Some methods have been slightly changed:
+
+- initialisation of ordereddict takes keywords:
+
+ - kvio: if set to True, then move an existing key on update
+ - relax: if set to True, the ordereddict is relaxed for its life regarding
+ initialisation and/or update from unordered data (read a normal dict).
+
+- initialisation of sorteddict takes keyword:
+
+ - key: specifies a function to apply on key (e.g. string.lower)
+
+- .popitem() takes an optional argument (defaulting to -1) indicating which
+ key/value pair to return (by default the last one available)
+- .dict()/.values()/.items()/.iterdict()/.itervalues()/.iteritems()
+ all take an optional reverse (default False) parameter that gives
+ the list reversed order resp. iterates in reverse
+ (the non-iterator can also be done relatively efficient with e.g.
+ od.dict().reverse() )
+- .update(): takes an optional relax=True which allows one time
+ ordereddict update from normal dictionaries regardless of
+ initialisation time relax setting.
+
+In addition to that ordereddict and sorteddict have some extra methods:
+
+- .index(key) - gives an integer value that is the index of the key
+- .setkeys()/.setvalues()/.setitems(), work like those in the Larosa/Foord
+ implementation, although they might throw different exceptions:
+ - setvalues' argument must be an itereable that returns the same number of
+ items as the length of the ordereddict
+ - setitems' argument is free in length, it performs a clear and adds
+ the items in order.
+- slice retrieval for all
+
+and ordereddict only also has:
+
+- .setkeys(), works like the one in the Larosa/Foord
+ implementation. Argument must be an itereable returning a permutation of the
+ existing keys ( that implies having the same length as the ordereddict)
+- .reverse() - reverses the keys in place
+- .insert(position, key, value) - this will put a key at a particular position
+ so that afterwards .index(key) == position, if the key was already there
+ the original position (and value) is lost to the new position. This often
+ means moving keys to new positions!
+- slice deletion/assignment:
+ - stepped deletion could be optimized a bit (individual items are deleted
+ which can require memmoving multiple items)
+ - assignment only from OrderedDict (with the same length as the slice). This
+ could also be optimised as I first delete, then insert individual items.
+ If the assigned items contain keys that are still there after the deletion
+ 'phase' then retrieving that slice does not always give the original
+ assigned ordereddict (depending on the position of the items
+ with those keys in either ordereddict)
+- .rename(oldkey, newkey) renames a key, but keeps the items position and value
+
+The new OrderedDict in the standard collections module
+------------------------------------------------------
+
+With Python 3.1 and backported to 2.7 there is an OrderedDict class
+available in the collections modules. Raymond Hettinger indicated in
+2009 at EuroPython that he preferred to start from a minimal
+OrderedDict instead of using the Larosa/Foord
+implementation. Unfortunately the available tests (for the
+functionality that the simple collections.OrderedDict supports) were
+not used either resulting in preventable bugs like repr initially not
+working on recursive OrderedDicts.
+
+ordereddict (and the Larosa/Foord implementation) is essentially
+a superset of collections.OrderedDict, but there are a few
+differences:
+
+- OrderedDict is by default relax-ed.
+- repr of recursive OrderedDict does not give any indication of the
+ value of the recursive key, as it only displays `...`. ordereddict
+ displays `ordereddict([...])` as value. Just using the dots like
+ OrderedDict does is going to be ambiguous as soon as you have two different
+ types A and B and nest A in B in A or B in B in A.
+- some newer build-in functions available in OrderedDict are not
+ available in ordereddict ( __reversed__, viewkeys, viewvalues, viewitems).
+
+All of the differences can be straightened out in small (70 lines of
+Python) OrderedDict wrapper around ordereddict. With this wrapper the
+OrderedDict tests in the standard test_collections.py all pass.
+
+Testing
+-------
+
+testordereddict.py in the test subdirectory has been used to test the module.
+You can use::
+
+ python testordereddict
+
+to run the tests (py.test support has been dropped as newer versions
+of py.test were not compatible).
+
+There is a somewhat patched copy of the python lib/Test dictionary testing
+routines included as well, it fails on the _update test however
+because the default is not to use a relaxed ordereddict.
+You can run it with::
+
+ cd test/unit
+ python test_dict.py
+
+To Do
+-----
+- implement Value Sorted Order (VSO: specify value=True for normal
+ value comparison), or a value rewrite function for VSO ( e.g.
+ value=string.lower )
+- implement Item Sorted Order (ISO): compare value then key ( the other way
+ around would not make sense with unique keys, but we might have
+ non-unique values).
+- implement slice deletion for sorteddict
+- more testing of sorteddict functionality
+- speedtest slices
+- speedtest sorteddict
+- check on the test_update unittest in test_dict.py
+
+To Consider
+-----------
+- comparing ordereddicts (as per Larosa/Foord)
+- implement the whole (optionally) using pointers in the DictObject Items
+ (Faster on insertion/deletion, slower on accessing slices, makes
+ implementing algorithms somewhat more difficult), would have to seperate
+ code for sorteddict as key position determination would be much slower.
+- supply a pure Python implementation of exactly the functionality in
+ ordereddict
+- test on older versions (< 2.4) of Python and make portable (if this can
+ be done without too much clutter) or port.
+- test on the Mac
+- optimise searching for an item pointer for sorteddict with binary search
+ (for deletion)
+
+Background information
+----------------------
+
+ordereddict is directly derived from Python's own dictobject.c file.
+The extensions and the representation of ordereddicts() are based
+on Larosa/Foord's excellent pure Python OrderedDict() module
+(http://www.voidspace.org.uk/python/odict.html).
+
+The implemenation adds a vector of pointers to elements to the basic
+dictionary structure and keeps this vector compact (and in order) so
+indexing is fast. The elements do not know about their position (so
+nothing needs to be updated there if that position changes, but then
+finding an item's index is expensive. Insertion/deletion is also relatively
+expensive in that on average half of the vector of pointers needs to
+be memmove-d one position.
+There is also a long value for bit info like kvio, relaxed.
+
+The sorteddict structure has an additional 3 pointers of which only
+one (sd_key) is currently used (the others are sd_cmp and sd_value).
+
+Speed
+-----
+
+Based on some tests with best of 10 iterations of 10000 iterations of various
+functions under Ubuntu 7.10 (see test/timeordereddict.py and test/ta.py)::
+
+ Results in seconds:
+
+ ------------------------------- dict ordereddict Larosa/Ford collections
+ OrderedDict OrderedDict
+ empty 0.023 0.025 0.023 0.024
+ create_empty 0.028 0.031 0.147 0.329
+ create_five_entry 0.037 0.042 0.384 0.558
+ create_26_entry 0.187 0.203 1.494 1.602
+ create_676_entry 5.330 5.574 36.797 34.810
+ get_keys_from_26_entry 0.209 0.231 1.501 1.762
+ pop_5_items_26_entry 0.219 0.247 1.952 1.864
+ pop_26_items_676_entry 7.550 8.127 46.578 41.851
+ popitem_last_26_entry 0.203 0.225 1.624 1.734
+ popitem_last_676_entry 5.285 5.534 36.912 34.799
+ popitem_100_676_entry -------- 5.552 36.577 --------
+ walk_26_iteritems -------- 0.494 2.792 2.238
+ ------------------------------- dict ordereddict Larosa/Ford collections
+ OrderedDict OrderedDict
+
+ empty 0.930 1.000 0.950 0.966
+ create_empty 0.909 1.000 4.728 10.594
+ create_five_entry 0.892 1.000 9.201 13.374
+ create_26_entry 0.923 1.000 7.368 7.901
+ create_676_entry 0.956 1.000 6.601 6.245
+ get_keys_from_26_entry 0.908 1.000 6.508 7.641
+ pop_5_items_26_entry 0.888 1.000 7.916 7.559
+ pop_26_items_676_entry 0.929 1.000 5.732 5.150
+ popitem_last_26_entry 0.901 1.000 7.222 7.712
+ popitem_last_676_entry 0.955 1.000 6.670 6.288
+ popitem_100_676_entry -------- 1.000 6.588 --------
+ walk_26_iteritems -------- 1.000 5.653 4.532
+
+Why
+---
+
+Because I am orderly ;-O, and because I use dictionaries to
+store key/value information read from some text file quite often.
+Unfortunately comparing those files with diff when written from
+normal dictionaries often obfucates changes because of the reordering
+of lines when key/value pairs are added and then written.
+
+I have special routine for YAML files that takes lines like::
+
+ - key1: val1
+ - key2: val3
+ - key3:
+ - val3a
+ - val3b
+
+(i.e. a list of key-value pairs) directly to a single ordered dictionary
+and back. (I find it kind of strange to finally have a structured,
+human readeable, format that does not try to preserve the
+order of key-value pairs so that comparing files is difficult with
+'standard' text tools).
+
+Older versions
+--------------
+
+http://www.xs4all.nl/~anthon/Python/ordereddict used to be
+ordereddict's home on the web.
+
+There you can still find the links for downloading the older version (0.4.5).
+
+
+History
+-------
+
+``0.4.13``: 2017-0723
+-
+
+| ``0.4.9 2015-08-10``
+| typos fixed by Gianfranco Costamagna
+|
+| ``0.4.8 2015-05-31``
+| dependent on ruamel.base
+| version number in a single place
+| using py.test under tox
+| generate wheel for 32/64bit py26/py27 on windows
+|
+| ``0.4.6 2014-01-18``
+| Move to ruamel namespace, hosted on bitbucket, MIT License
+| Testing with tox
+|
+| ``0.4.5 2012-06-17``
+| Fix for a bug while inserting last item again beyond last position (reported
+| by Volkan Çetin / volki tolki ( cetinv at gmail.com )
+| Fix for repeated deletion and insertion fail. Found by and solution provided
+| by Darren Dowker (including tests). Also found by Fabio Zadronzy (including
+| a less elegant fix).
+| applied reindent to .py and astyle to .c files
+|
+| ``0.4.3 2009-05-11``
+| Fix for a bug in slicing SortedDicts.
+| Found by, and fix provided by, Migel Anguel (linos.es)
+|
+| ``0.4.2 2009-03-27``
+| Bug found and by Alexandre Andrade and Fabio Zadrozny in
+| doing deepcopy
+|
+| ``0.4.1 2007-11-06``
+| Bug found and fixed by Fabio Zadrozny on resizing dictionaries
+|
+| ``0.4 2007-10-30``
+| added pickling, added relaxed initialisation/update (from unordered dicts)
+| added KVIO (Key Value Insertion Order ie. key moves to back on update)
+| implemented sorteddict, with KSO, Key Sorted Order. You can specify
+| a function for key transformation before comparison (such as string.lower)
+| sorteddict does not have all of the ordereddict methods as not all make
+| sense (eg. slice assignment, rename, setkeys)
+|
+| ``0.3 2007-10-24``
+| added setkeys/setvalues/setitems; slice retrieval, deletion, assignment
+| .rename(oldkey, newkey) rename a key keeping same value and position
+| .index() of non-existing key now returns ValueError instead of SystemError
+| Changed the module name to _ordereddict (from ordereddict), as Jason
+| Kirstland probably rightfully suggested that any private implementation
+| likely has the (file)name ordereddict.py. A modulename with leading
+| underscore seams more common for extension modules anyway.
+|
+| ``0.2a 2007-10-16``
+| Solved the potential GC problem on Windows
+|
+| ``0.2 2007-10-16``
+| First release, with some tests, and possible still a GC problem
+| with Windows.
+|
+| ``0.1 2007-10-..``
+| This version was never released. While testing it I was far in writing
+| an email to comp.lang.python about why timing with timeit did seem to
+| be memory hungry ....
+| and then I realised ordereddict had a memory leak %-)
diff --git a/contrib/deprecated/python/ruamel.ordereddict/ordereddict.c b/contrib/deprecated/python/ruamel.ordereddict/ordereddict.c
new file mode 100644
index 0000000000..355cb84a3f
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/ordereddict.c
@@ -0,0 +1,4705 @@
+/* Ordered Dictionary object implementation using a hash table and a vector of
+ pointers to the items.
+*/
+/*
+
+ This file has been directly derived from and retains many algorithms from
+ objectdict.c in the Python 2.5.1 source distribution. Its licensing therefore
+ is governed by the license as distributed with Python 2.5.1 available in the
+ file LICNESE in the source distribution of ordereddict
+
+ Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software
+ Foundation; All Rights Reserved"
+
+ Copyrigh (c) 2007-10-13 onwards: Anthon van der Neut
+*/
+
+/*
+Ordering by key insertion order (KIO) instead of key/val insertion order
+(KVIO) is less expensive (as the list of keys does not have to be updated).
+*/
+
+#include "Python.h"
+#include "ordereddict.h"
+
+#if PY_VERSION_HEX < 0x03000000
+#define PyUNISTR_Object PyStringObject
+#define PyUNISTR_Concat PyString_Concat
+#define PyUNISTR_ConcatAndDel PyString_ConcatAndDel
+#define PyUNISTR_CheckExact PyString_CheckExact
+#define PyUNISTR_FromString PyString_FromString
+#define PyUNISTR_FromFormat PyString_FromFormat
+#define PyUNISTR_Join _PyString_Join
+#define PyUNISTR_Eq _PyString_Eq
+#define Py_hash_t long
+#define Py_hash_ssize_t Py_ssize_t
+#define OB_HASH ob_shash
+#else
+
+/* Return 1 if two unicode objects are equal, 0 if not.
+ * unicode_eq() is called when the hash of two unicode objects is equal.
+ */
+#if PY_VERSION_HEX < 0x03030000
+Py_LOCAL_INLINE(int)
+unicode_eq(PyObject *aa, PyObject *bb)
+{
+ register PyUnicodeObject *a = (PyUnicodeObject *)aa;
+ register PyUnicodeObject *b = (PyUnicodeObject *)bb;
+
+ if (a->length != b->length)
+ return 0;
+ if (a->length == 0)
+ return 1;
+ if (a->str[0] != b->str[0])
+ return 0;
+ if (a->length == 1)
+ return 1;
+ return memcmp(a->str, b->str, a->length * sizeof(Py_UNICODE)) == 0;
+}
+#else
+Py_LOCAL_INLINE(int)
+unicode_eq(PyObject *aa, PyObject *bb)
+{
+ register PyUnicodeObject *a = (PyUnicodeObject *)aa;
+ register PyUnicodeObject *b = (PyUnicodeObject *)bb;
+
+ if (PyUnicode_READY(a) == -1 || PyUnicode_READY(b) == -1) {
+ assert(0 && "unicode_eq ready fail");
+ return 0;
+ }
+
+ if (PyUnicode_GET_LENGTH(a) != PyUnicode_GET_LENGTH(b))
+ return 0;
+ if (PyUnicode_GET_LENGTH(a) == 0)
+ return 1;
+ if (PyUnicode_KIND(a) != PyUnicode_KIND(b))
+ return 0;
+ return memcmp(PyUnicode_1BYTE_DATA(a), PyUnicode_1BYTE_DATA(b),
+ PyUnicode_GET_LENGTH(a) * PyUnicode_KIND(a)) == 0;
+}
+#endif
+
+#if PY_VERSION_HEX < 0x03030000
+#define PyUNISTR_Object PyUnicodeObject
+#else
+#define PyUNISTR_Object PyASCIIObject
+#endif
+#define PyUNISTR_Concat PyUnicode_Append
+#define PyUNISTR_ConcatAndDel PyUnicode_AppendAndDel
+#define PyUNISTR_CheckExact PyUnicode_CheckExact
+#define PyUNISTR_FromString PyUnicode_FromString
+#define PyUNISTR_FromFormat PyUnicode_FromFormat
+#define PyUNISTR_Join PyUnicode_Join
+#define PyUNISTR_Eq unicode_eq
+#define Py_hash_ssize_t Py_hash_t
+#define OB_HASH hash
+#endif
+
+#if PY_VERSION_HEX < 0x02050000
+#define SPR "%d"
+#else
+#define SPR "%ld"
+#endif
+
+#if PY_VERSION_HEX < 0x02080000
+#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+#endif
+
+#ifdef NDEBUG
+#undef NDEBUG
+#endif
+
+#define DEFERRED_ADDRESS(ADDR) 0
+
+/* Set a key error with the specified argument, wrapping it in a
+ * tuple automatically so that tuple keys are not unpacked as the
+ * exception arguments. */
+static void
+set_key_error(PyObject *arg)
+{
+ PyObject *tup;
+ tup = PyTuple_Pack(1, arg);
+ if (!tup)
+ return; /* caller will expect error to be set anyway */
+ PyErr_SetObject(PyExc_KeyError, tup);
+ Py_DECREF(tup);
+}
+
+/* Define this out if you don't want conversion statistics on exit. */
+#undef SHOW_CONVERSION_COUNTS
+
+/* See large comment block below. This must be >= 1. */
+#define PERTURB_SHIFT 5
+
+/*
+see object/dictobject.c for subtilities of the base dict implementation
+*/
+
+/* Object used as dummy key to fill deleted entries */
+static PyObject *dummy = NULL; /* Initialized by first call to newPyDictObject() */
+
+#ifdef Py_REF_DEBUG
+PyObject *
+_PyOrderedDict_Dummy(void)
+{
+ return dummy;
+}
+#endif
+
+/* relaxed: allow init etc. of ordereddict from dicts if true */
+static int ordereddict_relaxed = 0;
+/* Key Value Insertion Order: rearrange at end on update if true */
+static int ordereddict_kvio = 0;
+
+/* forward declarations */
+static PyOrderedDictEntry *
+lookdict_string(PyOrderedDictObject *mp, PyObject *key, Py_hash_t hash);
+int PyOrderedDict_CopySome(PyObject *a, PyObject *b,
+ Py_ssize_t start, Py_ssize_t step,
+ Py_ssize_t count, int override);
+
+#ifdef SHOW_CONVERSION_COUNTS
+static long created = 0L;
+static long converted = 0L;
+
+static void
+show_counts(void)
+{
+ fprintf(stderr, "created %ld string dicts\n", created);
+ fprintf(stderr, "converted %ld to normal dicts\n", converted);
+ fprintf(stderr, "%.2f%% conversion rate\n", (100.0*converted)/created);
+}
+#endif
+
+/* Debug statistic to compare allocations with reuse through the free list */
+#undef SHOW_ALLOC_COUNT
+#ifdef SHOW_ALLOC_COUNT
+static size_t count_alloc = 0;
+static size_t count_reuse = 0;
+
+static void
+show_alloc(void)
+{
+ fprintf(stderr, "Dict allocations: %" PY_FORMAT_SIZE_T "d\n",
+ count_alloc);
+ fprintf(stderr, "Dict reuse through freelist: %" PY_FORMAT_SIZE_T
+ "d\n", count_reuse);
+ fprintf(stderr, "%.2f%% reuse rate\n\n",
+ (100.0*count_reuse/(count_alloc+count_reuse)));
+}
+#endif
+
+/* Initialization macros.
+ There are two ways to create a dict: PyOrderedDict_New() is the main C API
+ function, and the tp_new slot maps to dict_new(). In the latter case we
+ can save a little time over what PyOrderedDict_New does because it's guaranteed
+ that the PyOrderedDictObject struct is already zeroed out.
+ Everyone except dict_new() should use EMPTY_TO_MINSIZE (unless they have
+ an excellent reason not to).
+*/
+
+#define INIT_NONZERO_DICT_SLOTS(mp) do { \
+ (mp)->ma_table = (mp)->ma_smalltable; \
+ (mp)->od_otablep = (mp)->ma_smallotablep; \
+ (mp)->ma_mask = PyOrderedDict_MINSIZE - 1; \
+ } while(0)
+
+#define EMPTY_TO_MINSIZE(mp) do { \
+ memset((mp)->ma_smalltable, 0, sizeof((mp)->ma_smalltable)); \
+ memset((mp)->ma_smallotablep, 0, sizeof((mp)->ma_smallotablep)); \
+ (mp)->ma_used = (mp)->od_fill = (mp)->od_state = 0; \
+ INIT_NONZERO_DICT_SLOTS(mp); \
+ } while(0)
+
+/* (mp)->od_cmp = (mp)->od_key = NULL; \*/
+
+#define INIT_SORT_FUNCS(SD) do { \
+ SD->sd_cmp = Py_None; Py_INCREF(Py_None); \
+ SD->sd_key = Py_None; Py_INCREF(Py_None); \
+ SD->sd_value = Py_None; Py_INCREF(Py_None); \
+ } while(0)
+
+
+#define OD_KVIO_BIT (1<<0)
+#define OD_RELAXED_BIT (1<<1)
+#define OD_REVERSE_BIT (1<<2)
+
+#define KVIO(mp) (mp->od_state & OD_KVIO_BIT)
+#define RELAXED(mp) (mp->od_state & OD_RELAXED_BIT)
+#define REVERSE(mp) (mp->od_state & OD_REVERSE_BIT)
+
+/* Dictionary reuse scheme to save calls to malloc, free, and memset */
+#ifndef PyDict_MAXFREELIST
+#define PyDict_MAXFREELIST 80
+#endif
+static PyOrderedDictObject *free_list[PyDict_MAXFREELIST];
+static int numfree = 0;
+
+void
+PyOrderedDict_Fini(void)
+{
+ PyOrderedDictObject *op;
+
+ while (numfree) {
+ op = free_list[--numfree];
+ assert(PyOrderedDict_CheckExact(op));
+ PyObject_GC_Del(op);
+ }
+}
+
+PyObject *
+PyOrderedDict_New(void)
+{
+ register PyOrderedDictObject *mp;
+ assert(dummy != NULL); /* initialisation in the module init */
+#ifdef SHOW_CONVERSION_COUNTS
+ Py_AtExit(show_counts);
+#endif
+#ifdef SHOW_ALLOC_COUNT
+ Py_AtExit(show_alloc);
+#endif
+ if (numfree) {
+ mp = free_list[--numfree];
+ assert (mp != NULL);
+ assert (Py_TYPE(mp) == &PyOrderedDict_Type);
+ _Py_NewReference((PyObject *)mp);
+ if (mp->od_fill) {
+ EMPTY_TO_MINSIZE(mp);
+ } else {
+ /* At least set ma_table and ma_mask; these are wrong
+ if an empty but presized dict is added to freelist */
+ INIT_NONZERO_DICT_SLOTS(mp);
+ }
+ assert (mp->ma_used == 0);
+ assert (mp->ma_table == mp->ma_smalltable);
+ assert (mp->od_otablep == mp->ma_smallotablep);
+ assert (mp->ma_mask == PyOrderedDict_MINSIZE - 1);
+#ifdef SHOW_ALLOC_COUNT
+ count_reuse++;
+#endif
+ } else {
+ mp = PyObject_GC_New(PyOrderedDictObject, &PyOrderedDict_Type);
+ if (mp == NULL)
+ return NULL;
+ EMPTY_TO_MINSIZE(mp);
+#ifdef SHOW_ALLOC_COUNT
+ count_alloc++;
+#endif
+ }
+ mp->ma_lookup = lookdict_string;
+#ifdef SHOW_CONVERSION_COUNTS
+ ++created;
+#endif
+ PyObject_GC_Track(mp);
+ return (PyObject *)mp;
+}
+
+
+PyObject *
+PySortedDict_New(void)
+{
+ register PyOrderedDictObject *mp;
+ register PySortedDictObject *sd;
+ assert(dummy != NULL);
+ mp = (PyOrderedDictObject *) PyObject_GC_New(PySortedDictObject, &PySortedDict_Type);
+ if (mp == NULL)
+ return NULL;
+ EMPTY_TO_MINSIZE(mp);
+ mp->ma_lookup = lookdict_string;
+ sd = (PySortedDictObject*)mp;
+ INIT_SORT_FUNCS(sd);
+#ifdef SHOW_CONVERSION_COUNTS
+ ++created;
+#endif
+ PyObject_GC_Track(mp);
+ return (PyObject *)mp;
+}
+
+/*
+The basic lookup function used by all operations.
+This is based on Algorithm D from Knuth Vol. 3, Sec. 6.4.
+Open addressing is preferred over chaining since the link overhead for
+chaining would be substantial (100% with typical malloc overhead).
+
+The initial probe index is computed as hash mod the table size. Subsequent
+probe indices are computed as explained earlier.
+
+All arithmetic on hash should ignore overflow.
+
+(The details in this version are due to Tim Peters, building on many past
+contributions by Reimer Behrends, Jyrki Alakuijala, Vladimir Marangozov and
+Christian Tismer).
+
+lookdict() is general-purpose, and may return NULL if (and only if) a
+comparison raises an exception (this was new in Python 2.5).
+lookdict_string() below is specialized to string keys, comparison of which can
+never raise an exception; that function can never return NULL. For both, when
+the key isn't found a PyOrderedDictEntry* is returned for which the me_value field is
+NULL; this is the slot in the dict at which the key would have been found, and
+the caller can (if it wishes) add the <key, value> pair to the returned
+PyOrderedDictEntry *.
+*/
+static PyOrderedDictEntry *
+lookdict(PyOrderedDictObject *mp, PyObject *key, register Py_hash_t hash)
+{
+ register size_t i;
+ register size_t perturb;
+ register PyOrderedDictEntry *freeslot;
+ register size_t mask = (size_t)mp->ma_mask;
+ PyOrderedDictEntry *ep0 = mp->ma_table;
+ register PyOrderedDictEntry *ep;
+ register int cmp;
+ PyObject *startkey;
+
+ i = (size_t)hash & mask;
+ ep = &ep0[i];
+ if (ep->me_key == NULL || ep->me_key == key)
+ return ep;
+
+ if (ep->me_key == dummy)
+ freeslot = ep;
+ else {
+ if (ep->me_hash == hash) {
+ startkey = ep->me_key;
+ Py_INCREF(startkey);
+ cmp = PyObject_RichCompareBool(startkey, key, Py_EQ);
+ Py_DECREF(startkey);
+ if (cmp < 0)
+ return NULL;
+ if (ep0 == mp->ma_table && ep->me_key == startkey) {
+ if (cmp > 0)
+ return ep;
+ } else {
+ /* The compare did major nasty stuff to the
+ * dict: start over.
+ * XXX A clever adversary could prevent this
+ * XXX from terminating.
+ */
+ return lookdict(mp, key, hash);
+ }
+ }
+ freeslot = NULL;
+ }
+
+ /* In the loop, me_key == dummy is by far (factor of 100s) the
+ least likely outcome, so test for that last. */
+ for (perturb = hash; ; perturb >>= PERTURB_SHIFT) {
+ i = (i << 2) + i + perturb + 1;
+ ep = &ep0[i & mask];
+ if (ep->me_key == NULL)
+ return freeslot == NULL ? ep : freeslot;
+ if (ep->me_key == key)
+ return ep;
+ if (ep->me_hash == hash && ep->me_key != dummy) {
+ startkey = ep->me_key;
+ Py_INCREF(startkey);
+ cmp = PyObject_RichCompareBool(startkey, key, Py_EQ);
+ Py_DECREF(startkey);
+ if (cmp < 0)
+ return NULL;
+ if (ep0 == mp->ma_table && ep->me_key == startkey) {
+ if (cmp > 0)
+ return ep;
+ } else {
+ /* The compare did major nasty stuff to the
+ * dict: start over.
+ * XXX A clever adversary could prevent this
+ * XXX from terminating.
+ */
+ return lookdict(mp, key, hash);
+ }
+ } else if (ep->me_key == dummy && freeslot == NULL)
+ freeslot = ep;
+ }
+ assert(0); /* NOT REACHED */
+ return 0;
+}
+
+/*
+ * Hacked up version of lookdict which can assume keys are always strings;
+ * this assumption allows testing for errors during PyObject_RichCompareBool()
+ * to be dropped; string-string comparisons never raise exceptions. This also
+ * means we don't need to go through PyObject_RichCompareBool(); we can always
+ * use PyUNISTR_Eq() directly.
+ *
+ * This is valuable because dicts with only string keys are very common.
+ */
+static PyOrderedDictEntry *
+lookdict_string(PyOrderedDictObject *mp, PyObject *key, register Py_hash_t hash)
+{
+ register size_t i;
+ register size_t perturb;
+ register PyOrderedDictEntry *freeslot;
+ register size_t mask = (size_t)mp->ma_mask;
+ PyOrderedDictEntry *ep0 = mp->ma_table;
+ register PyOrderedDictEntry *ep;
+
+ /* Make sure this function doesn't have to handle non-string keys,
+ including subclasses of str; e.g., one reason to subclass
+ strings is to override __eq__, and for speed we don't cater to
+ that here. */
+ if (!PyUNISTR_CheckExact(key)) {
+#ifdef SHOW_CONVERSION_COUNTS
+ ++converted;
+#endif
+ mp->ma_lookup = lookdict;
+ return lookdict(mp, key, hash);
+ }
+ i = hash & mask;
+ ep = &ep0[i];
+ if (ep->me_key == NULL || ep->me_key == key)
+ return ep;
+ if (ep->me_key == dummy)
+ freeslot = ep;
+ else {
+ if (ep->me_hash == hash && PyUNISTR_Eq(ep->me_key, key))
+ return ep;
+ freeslot = NULL;
+ }
+
+ /* In the loop, me_key == dummy is by far (factor of 100s) the
+ least likely outcome, so test for that last. */
+ for (perturb = hash; ; perturb >>= PERTURB_SHIFT) {
+ i = (i << 2) + i + perturb + 1;
+ ep = &ep0[i & mask];
+ if (ep->me_key == NULL)
+ return freeslot == NULL ? ep : freeslot;
+ if (ep->me_key == key
+ || (ep->me_hash == hash
+ && ep->me_key != dummy
+ && PyUNISTR_Eq(ep->me_key, key)))
+ return ep;
+ if (ep->me_key == dummy && freeslot == NULL)
+ freeslot = ep;
+ }
+ assert(0); /* NOT REACHED */
+ return 0;
+}
+
+static int
+dump_ordereddict_head(register PyOrderedDictObject *mp)
+{
+ if (mp == NULL) {
+ printf("ordereddict header printing received NULL");
+ return -1;
+ }
+ if (PySortedDict_CheckExact(mp))
+ printf("sorteddict");
+ else
+ printf("ordereddict");
+ printf(": fill " SPR ", ", mp->od_fill);
+ printf("used " SPR ", ", mp->ma_used);
+ printf("mask " SPR ", ", mp->ma_mask);
+ printf("mask " SPR ", ", mp->ma_mask);
+ printf("\nbits: ");
+ if (KVIO(mp))
+ printf("kvio ");
+ if (RELAXED(mp))
+ printf("relax ");
+ if (REVERSE(mp))
+ printf("reverse ");
+ printf("\n");
+ return 0;
+}
+
+static void
+dump_sorteddict_fun(register PySortedDictObject *mp)
+{
+ printf("cmp %p, key %p, value %p\n", mp->sd_cmp, mp->sd_key, mp->sd_value);
+}
+
+
+static void
+dump_otablep(register PyOrderedDictObject *mp)
+{
+ Py_ssize_t index;
+ PyOrderedDictEntry **p;
+ printf("mp %p\n", mp);
+ for (index = 0, p = mp->od_otablep; index < mp->ma_used; index++, p++) {
+ printf("index " SPR " %p %p\n", index, p, *p);
+ }
+}
+
+/*
+https://github.com/pbrady/fastcache/issues/32
+mentions no tracking with GC_TRACK in extensions
+*/
+
+/* #if (PY_VERSION_HEX < 0x02070000) */
+#if 1
+#define MAINTAIN_TRACKING(mp, key, value)
+#define _PyDict_MaybeUntrack(x)
+#else
+#ifdef SHOW_TRACK_COUNT
+#define INCREASE_TRACK_COUNT \
+ (count_tracked++, count_untracked--);
+#define DECREASE_TRACK_COUNT \
+ (count_tracked--, count_untracked++);
+#else
+#define INCREASE_TRACK_COUNT
+#define DECREASE_TRACK_COUNT
+#endif
+
+#define MAINTAIN_TRACKING(mp, key, value) \
+ do { \
+ if (!_PyObject_GC_IS_TRACKED(mp)) { \
+ if (_PyObject_GC_MAY_BE_TRACKED(key) || \
+ _PyObject_GC_MAY_BE_TRACKED(value)) { \
+ _PyObject_GC_TRACK(mp); \
+ INCREASE_TRACK_COUNT \
+ } \
+ } \
+ } while(0)
+
+ PyAPI_FUNC(void)
+_PyOrderedDict_MaybeUntrack(PyObject *op)
+{
+ PyDictObject *mp;
+ PyObject *value;
+ Py_ssize_t mask, i;
+ PyDictEntry *ep;
+
+ if (!PyDict_CheckExact(op) || !_PyObject_GC_IS_TRACKED(op))
+ return;
+
+ mp = (PyDictObject *) op;
+ ep = mp->ma_table;
+ mask = mp->ma_mask;
+ for (i = 0; i <= mask; i++) {
+ if ((value = ep[i].me_value) == NULL)
+ continue;
+ if (_PyObject_GC_MAY_BE_TRACKED(value) ||
+ _PyObject_GC_MAY_BE_TRACKED(ep[i].me_key))
+ return;
+ }
+ DECREASE_TRACK_COUNT
+ _PyObject_GC_UNTRACK(op);
+}
+#endif
+
+/*
+Internal routine to insert a new item into the table when you have entry object.
+Used by insertdict.
+*/
+static int
+insertdict_by_entry(register PyOrderedDictObject *mp, PyObject *key, Py_hash_t hash,
+ PyOrderedDictEntry *ep, PyObject *value, Py_ssize_t index)
+{
+ PyObject *old_value;
+ Py_ssize_t oindex;
+ register PyOrderedDictEntry **epp = NULL;
+
+ MAINTAIN_TRACKING(mp, key, value);
+ if (ep->me_value != NULL) { /* updating a value */
+ old_value = ep->me_value;
+ ep->me_value = value;
+ if (index != -1) {
+ if (index == -2) /* kvio */
+ index = mp->ma_used-1;
+ for (oindex = 0, epp = mp->od_otablep; oindex < mp->ma_used;
+ oindex++, epp++)
+ if (*epp == ep)
+ break;
+ /* epp now points to item and oindex is its index (optimize?) */
+ /* if index == oindex we don't have to anything */
+ if (index < oindex) {
+ epp = mp->od_otablep;
+ epp += index;
+ memmove(epp + 1, epp, (oindex - index) * sizeof(PyOrderedDictEntry *));
+ *epp = ep;
+ } else if ((index == oindex + 1) && (index == mp->ma_used)) {
+ /* nothing to do for inserting beyond last with same key */
+ } else if (index > oindex) {
+ /*
+ printf("moving %d %d %p\n", index, oindex, epp);
+ dump_otablep(mp); */
+ memmove(epp, epp + 1, (index - oindex) * sizeof(PyOrderedDictEntry *));
+ mp->od_otablep[index] = ep;
+ /*
+ dump_otablep(mp);
+ */
+ }
+ }
+ Py_DECREF(old_value); /* which **CAN** re-enter */
+ Py_DECREF(key);
+ } else { /* new value */
+ if (ep->me_key == NULL)
+ mp->od_fill++;
+ else {
+ assert(ep->me_key == dummy);
+ Py_DECREF(dummy);
+ }
+ ep->me_key = key;
+ ep->me_hash = (Py_ssize_t)hash;
+ ep->me_value = value;
+ if (index < 0)
+ mp->od_otablep[mp->ma_used] = ep;
+ else {
+ epp = mp->od_otablep;
+ epp += index;
+ /* make space */
+ memmove(epp + 1, epp, (mp->ma_used - index) * sizeof(PyOrderedDictEntry *));
+ *epp = ep;
+ }
+ mp->ma_used++;
+ }
+ return 0;
+}
+
+/*
+Internal routine to insert a new item into the table.
+Used both by the internal resize routine and by the public insert routine.
+Eats a reference to key and one to value.
+Returns -1 if an error occurred, or 0 on success.
+*/
+static int
+insertdict(register PyOrderedDictObject *mp, PyObject *key, Py_hash_t hash,
+ PyObject *value, Py_ssize_t index)
+{
+ register PyOrderedDictEntry *ep;
+
+ assert(mp->ma_lookup != NULL);
+ ep = mp->ma_lookup(mp, key, hash);
+ if (ep == NULL) {
+ Py_DECREF(key);
+ Py_DECREF(value);
+ return -1;
+ }
+ return insertdict_by_entry(mp, key, hash, ep, value, index);
+}
+
+/*
+Internal routine to insert a new item into the table when you have entry object.
+Used by insertdict.
+*/
+static int
+insertsorteddict_by_entry(register PyOrderedDictObject *mp, PyObject *key, Py_hash_t hash,
+ PyOrderedDictEntry *ep, PyObject *value)
+{
+ PyObject *old_value;
+ Py_ssize_t index = 0, lower, upper;
+ int res;
+ register PySortedDictObject *sd = (PySortedDictObject *) mp;
+ register PyOrderedDictEntry **epp = NULL;
+
+ MAINTAIN_TRACKING(mp, key, value);
+ if (ep->me_value != NULL) { /* updating a value */
+ old_value = ep->me_value;
+ ep->me_value = value;
+ Py_DECREF(old_value); /* which **CAN** re-enter */
+ Py_DECREF(key);
+ if (sd->sd_value != Py_None || sd->sd_cmp != Py_None) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "updating a value for a cmp/value sorted dict not implemented"
+ );
+ return -1;
+ }
+ } else { /* new value */
+ if (ep->me_key == NULL)
+ mp->od_fill++;
+ else {
+ assert(ep->me_key == dummy);
+ Py_DECREF(dummy);
+ }
+ ep->me_key = key;
+ ep->me_hash = (Py_ssize_t)hash;
+ ep->me_value = value;
+ /* determine epp */
+ epp = mp->od_otablep;
+ lower = 0;
+ upper = mp->ma_used;
+ if (sd->sd_key != Py_None && sd->sd_key != Py_True) {
+ PyObject *transkey;
+ PyObject *chkkey;
+ transkey = PyObject_CallFunctionObjArgs(sd->sd_key, key, NULL);
+ if (transkey == NULL)
+ transkey = key;
+ while (lower < upper) {
+ index = (lower+upper) / 2;
+ chkkey = PyObject_CallFunctionObjArgs(sd->sd_key,(epp[index])->me_key, NULL);
+ if (chkkey == NULL)
+ chkkey = (epp[index])->me_key;
+ res = PyObject_RichCompareBool(chkkey, transkey, Py_GT);
+ if (res == 0)
+ lower = index + 1;
+ else if (res == 1)
+ upper = index;
+ else
+ return -1; /* res was -1 -> error */
+ }
+ } else {
+ while (lower < upper) {
+ index = (lower+upper) / 2;
+ res = PyObject_RichCompareBool((epp[index])->me_key, key, Py_GT);
+ if (res == 0)
+ lower = index + 1;
+ else if (res == 1)
+ upper = index;
+ else
+ return -1; /* res was -1 -> error */
+ }
+ }
+ epp += lower;
+ /* make space */
+ memmove(epp + 1, epp, (mp->ma_used - lower) * sizeof(PyOrderedDictEntry *));
+ *epp = ep;
+ mp->ma_used++;
+ }
+ return 0;
+}
+
+static int
+insertsorteddict(register PyOrderedDictObject *mp, PyObject *key, Py_hash_t hash,
+ PyObject *value)
+{
+ register PyOrderedDictEntry *ep;
+
+ /* printf("insert sorted dict\n"); */
+ assert(mp->ma_lookup != NULL);
+ ep = mp->ma_lookup(mp, key, hash);
+ if (ep == NULL) {
+ Py_DECREF(key);
+ Py_DECREF(value);
+ return -1;
+ }
+ return insertsorteddict_by_entry(mp, key, hash, ep, value);
+}
+
+
+/*
+Internal routine used by dictresize() to insert an item which is
+known to be absent from the dict. This routine also assumes that
+the dict contains no deleted entries. Besides the performance benefit,
+using insertdict() in dictresize() is dangerous (SF bug #1456209).
+Note that no refcounts are changed by this routine; if needed, the caller
+is responsible for incref'ing `key` and `value`.
+*/
+static void
+insertdict_clean(register PyOrderedDictObject *mp, PyObject *key, Py_hash_t hash,
+ PyObject *value)
+{
+ register size_t i;
+ register size_t perturb;
+ register size_t mask = (size_t)mp->ma_mask;
+ PyOrderedDictEntry *ep0 = mp->ma_table;
+ register PyOrderedDictEntry *ep;
+
+ MAINTAIN_TRACKING(mp, key, value);
+ i = hash & mask;
+ ep = &ep0[i];
+ for (perturb = hash; ep->me_key != NULL; perturb >>= PERTURB_SHIFT) {
+ i = (i << 2) + i + perturb + 1;
+ ep = &ep0[i & mask];
+ }
+ assert(ep->me_value == NULL);
+ mp->od_fill++;
+ ep->me_key = key;
+ ep->me_hash = (Py_ssize_t)hash;
+ ep->me_value = value;
+ mp->od_otablep[mp->ma_used] = ep;
+ mp->ma_used++;
+}
+
+/*
+Restructure the table by allocating a new table and reinserting all
+items again. When entries have been deleted, the new table may
+actually be smaller than the old one.
+*/
+static int
+dictresize(PyOrderedDictObject *mp, Py_ssize_t minused)
+{
+ Py_ssize_t newsize;
+ PyOrderedDictEntry *oldtable, *newtable, *ep, **epp;
+ PyOrderedDictEntry **oldotablep, **newotablep;
+ register Py_ssize_t i, j;
+ int is_oldtable_malloced;
+ int reusing_smalltable;
+ PyOrderedDictEntry small_copy[PyOrderedDict_MINSIZE];
+ PyOrderedDictEntry *small_ocopyp[PyOrderedDict_MINSIZE];
+
+ assert(minused >= 0);
+
+ /* Find the smallest table size > minused. */
+ for (newsize = PyOrderedDict_MINSIZE;
+ newsize <= minused && newsize > 0;
+ newsize <<= 1)
+ ;
+ if (newsize <= 0) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ /* Get space for a new table. */
+ oldtable = mp->ma_table;
+ oldotablep = mp->od_otablep;
+ assert(oldtable != NULL);
+ assert(oldotablep != NULL);
+ is_oldtable_malloced = oldtable != mp->ma_smalltable;
+
+ reusing_smalltable = 0;
+
+ if (newsize == PyOrderedDict_MINSIZE) {
+ /* A large table is shrinking, or we can't get any smaller. */
+ newtable = mp->ma_smalltable;
+ newotablep = mp->ma_smallotablep;
+ if (newtable == oldtable) {
+ if (mp->od_fill == mp->ma_used) {
+ /* No dummies, so no point doing anything. */
+ return 0;
+ }
+ /* We're not going to resize it, but rebuild the
+ table anyway to purge old dummy entries.
+ Subtle: This is *necessary* if fill==size,
+ as lookdict needs at least one virgin slot to
+ terminate failing searches. If fill < size, it's
+ merely desirable, as dummies slow searches. */
+ assert(mp->od_fill > mp->ma_used);
+ memcpy(small_copy, oldtable, sizeof(small_copy));
+ /* Small_ocopyp must point into small_copy */
+ for (i = 0; i < PyOrderedDict_MINSIZE; i++) {
+ small_ocopyp[i] = oldotablep[i] ? &small_copy[oldotablep[i]-&oldtable[0]]: NULL;
+ }
+ oldtable = small_copy;
+ reusing_smalltable = 1;
+ }
+ } else {
+ newtable = PyMem_NEW(PyOrderedDictEntry, newsize);
+ if (newtable == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ newotablep = PyMem_NEW(PyOrderedDictEntry*, newsize);
+ if (newotablep == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ }
+
+ /* Make the dict empty, using the new table. */
+ assert(newtable != oldtable);
+ assert(newotablep != oldotablep);
+ mp->ma_table = newtable;
+ mp->od_otablep = newotablep;
+ mp->ma_mask = newsize - 1;
+ memset(newtable, 0, sizeof(PyOrderedDictEntry) * newsize);
+ memcpy(newotablep, oldotablep, sizeof(PyOrderedDictEntry *) * mp->ma_used);
+ epp = mp->od_otablep;
+ j = mp->ma_used;
+ mp->ma_used = 0;
+ i = mp->od_fill;
+ mp->od_fill = 0;
+
+ /* Copy the data over; this is refcount-neutral for active entries;
+ dummy entries aren't copied over, of course */
+
+ for (epp = reusing_smalltable ? small_ocopyp: mp->od_otablep; j > 0; epp++, j--) {
+ insertdict_clean(mp, (*epp)->me_key, (long)(*epp)->me_hash,
+ (*epp)->me_value);
+ }
+ for (ep = oldtable; i > 0; ep++) {
+ if (ep->me_value != NULL) { /* active entry */
+ --i;
+ } else if (ep->me_key != NULL) { /* dummy entry */
+ --i;
+ assert(ep->me_key == dummy);
+ Py_DECREF(ep->me_key);
+ }
+ /* else key == value == NULL: nothing to do */
+ }
+
+ if (is_oldtable_malloced) {
+ PyMem_DEL(oldtable);
+ PyMem_DEL(oldotablep);
+ }
+ return 0;
+}
+
+
+/* Create a new dictionary pre-sized to hold an estimated number of elements.
+ Underestimates are okay because the dictionary will resize as necessary.
+ Overestimates just mean the dictionary will be more sparse than usual.
+*/
+
+PyAPI_FUNC(PyObject *)
+_PyOrderedDict_NewPresized(Py_ssize_t minused)
+{
+ PyObject *op = PyOrderedDict_New();
+
+ if (minused>5 && op != NULL && dictresize((PyOrderedDictObject *)op, minused) == -1) {
+ Py_DECREF(op);
+ return NULL;
+ }
+ return op;
+}
+
+
+/* Note that, for historical reasons, PyOrderedDict_GetItem() suppresses all errors
+ * that may occur (originally dicts supported only string keys, and exceptions
+ * weren't possible). So, while the original intent was that a NULL return
+ * meant the key wasn't present, in reality it can mean that, or that an error
+ * (suppressed) occurred while computing the key's hash, or that some error
+ * (suppressed) occurred when comparing keys in the dict's internal probe
+ * sequence. A nasty example of the latter is when a Python-coded comparison
+ * function hits a stack-depth error, which can cause this to return NULL
+ * even if the key is present.
+ */
+PyObject *
+PyOrderedDict_GetItem(PyObject *op, PyObject *key)
+{
+ Py_hash_t hash;
+ PyOrderedDictObject *mp = (PyOrderedDictObject *)op;
+ PyOrderedDictEntry *ep;
+ PyThreadState *tstate;
+
+ if (!PyOrderedDict_Check(op))
+ return NULL;
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1) {
+ PyErr_Clear();
+ return NULL;
+ }
+ }
+
+ /* We can arrive here with a NULL tstate during initialization: try
+ running "python -Wi" for an example related to string interning.
+ Let's just hope that no exception occurs then... This must be
+ _PyThreadState_Current and not PyThreadState_GET() because in debug
+ mode, the latter complains if tstate is NULL. */
+#if PY_VERSION_HEX < 0x03000000
+ tstate = _PyThreadState_Current;
+#else
+ tstate = (PyThreadState*)_Py_atomic_load_relaxed(
+ &_PyThreadState_Current);
+#endif
+ if (tstate != NULL && tstate->curexc_type != NULL) {
+ /* preserve the existing exception */
+ PyObject *err_type, *err_value, *err_tb;
+ PyErr_Fetch(&err_type, &err_value, &err_tb);
+ ep = (mp->ma_lookup)(mp, key, hash);
+ /* ignore errors */
+ PyErr_Restore(err_type, err_value, err_tb);
+ if (ep == NULL)
+ return NULL;
+ } else {
+ ep = (mp->ma_lookup)(mp, key, hash);
+ if (ep == NULL) {
+ PyErr_Clear();
+ return NULL;
+ }
+ }
+ return ep->me_value;
+}
+
+static int
+dict_set_item_by_hash_or_entry(register PyObject *op, PyObject *key,
+ Py_hash_t hash, PyOrderedDictEntry *ep, PyObject *value)
+{
+ register PyOrderedDictObject *mp;
+ register Py_ssize_t n_used;
+ mp = (PyOrderedDictObject *)op;
+ assert(mp->od_fill <= mp->ma_mask); /* at least one empty slot */
+ n_used = mp->ma_used;
+ Py_INCREF(value);
+ Py_INCREF(key);
+#if PY_MAJOR_VERSION < 3
+ if (PySortedDict_Check(op)) {
+#else
+ if (PySortedDict_CheckExact(op)) {
+#endif
+ if (insertsorteddict(mp, key, hash, value) != 0)
+ return -1;
+ } else if (insertdict(mp, key, hash, value, KVIO(mp) ? -2: -1) != 0)
+ return -1;
+ /* If we added a key, we can safely resize. Otherwise just return!
+ * If fill >= 2/3 size, adjust size. Normally, this doubles or
+ * quaduples the size, but it's also possible for the dict to shrink
+ * (if od_fill is much larger than ma_used, meaning a lot of dict
+ * keys have been * deleted).
+ *
+ * Quadrupling the size improves average dictionary sparseness
+ * (reducing collisions) at the cost of some memory and iteration
+ * speed (which loops over every possible entry). It also halves
+ * the number of expensive resize operations in a growing dictionary.
+ *
+ * Very large dictionaries (over 50K items) use doubling instead.
+ * This may help applications with severe memory constraints.
+ */
+ if (!(mp->ma_used > n_used && mp->od_fill*3 >= (mp->ma_mask+1)*2))
+ return 0;
+ return dictresize(mp, (mp->ma_used > 50000 ? 2 : 4) * mp->ma_used);
+}
+
+/* CAUTION: PyOrderedDict_SetItem() must guarantee that it won't resize the
+ * dictionary if it's merely replacing the value for an existing key.
+ * This means that it's safe to loop over a dictionary with PyOrderedDict_Next()
+ * and occasionally replace a value -- but you can't insert new keys or
+ * remove them.
+ * This does never hold for kvio
+ */
+int
+PyOrderedDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
+{
+ register Py_hash_t hash;
+
+ if (!PyOrderedDict_Check(op)) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ assert(key);
+ assert(value);
+ if (PyUNISTR_CheckExact(key)) {
+ hash = ((PyUNISTR_Object *)key)->OB_HASH;
+ if (hash == -1)
+ hash = PyObject_Hash(key);
+ } else {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return -1;
+ }
+ return dict_set_item_by_hash_or_entry(op, key, hash, NULL, value);
+}
+
+int
+PyOrderedDict_InsertItem(register PyOrderedDictObject *mp, Py_ssize_t index,
+ PyObject *key, PyObject *value)
+{
+ register Py_hash_t hash;
+ register Py_ssize_t n_used;
+
+#if PY_MAJOR_VERSION < 3
+ if (PySortedDict_Check(mp)) {
+#else
+ if (PySortedDict_CheckExact(mp)) {
+#endif
+ PyErr_SetString(PyExc_TypeError,
+ "sorteddict does not support insert()");
+ return -1;
+ }
+ if (!PyOrderedDict_Check(mp)) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ assert(key);
+ assert(value);
+ if (index < 0)
+ index += mp->ma_used;
+ /* test to see if index is in range */
+ if (index > mp->ma_used)
+ index = mp->ma_used;
+ else if (index < 0)
+ index = 0;
+ if (PyUNISTR_CheckExact(key)) {
+ hash = ((PyUNISTR_Object *)key)->OB_HASH;
+ if (hash == -1)
+ hash = PyObject_Hash(key);
+ } else {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return -1;
+ }
+ assert(mp->od_fill <= mp->ma_mask); /* at least one empty slot */
+ n_used = mp->ma_used;
+ Py_INCREF(value);
+ Py_INCREF(key);
+ if (insertdict(mp, key, hash, value, index) != 0)
+ return -1;
+ /* If we added a key, we can safely resize. Otherwise just return!
+ * If fill >= 2/3 size, adjust size. Normally, this doubles or
+ * quaduples the size, but it's also possible for the dict to shrink
+ * (if od_fill is much larger than ma_used, meaning a lot of dict
+ * keys have been * deleted).
+ *
+ * Quadrupling the size improves average dictionary sparseness
+ * (reducing collisions) at the cost of some memory and iteration
+ * speed (which loops over every possible entry). It also halves
+ * the number of expensive resize operations in a growing dictionary.
+ *
+ * Very large dictionaries (over 50K items) use doubling instead.
+ * This may help applications with severe memory constraints.
+ */
+ if (!(mp->ma_used > n_used && mp->od_fill*3 >= (mp->ma_mask+1)*2))
+ return 0;
+ return dictresize(mp, (mp->ma_used > 50000 ? 2 : 4) * mp->ma_used);
+}
+
+
+static int
+del_inorder(PyOrderedDictObject *op, PyOrderedDictEntry* ep)
+{
+ register Py_ssize_t count = op->ma_used;
+ PyOrderedDictEntry **tmp = op->od_otablep;
+ while (count--) {
+ if (*tmp == ep) {
+ memmove(tmp, tmp+1, count * sizeof(PyOrderedDictEntry *));
+ return 1;
+ }
+ tmp++;
+ }
+ return 0; /* not found */
+}
+
+int
+PyOrderedDict_DelItem(PyObject *op, PyObject *key)
+{
+ register PyOrderedDictObject *mp;
+ register Py_hash_t hash;
+ register PyOrderedDictEntry *ep;
+ PyObject *old_value, *old_key;
+
+ if (!PyOrderedDict_Check(op)) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ assert(key);
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return -1;
+ }
+ mp = (PyOrderedDictObject *)op;
+ ep = (mp->ma_lookup)(mp, key, hash);
+ /* at this point we have to move all the entries beyond the one found
+ back on space (this could be optimised by deferring) */
+ del_inorder(mp, ep);
+ if (ep == NULL)
+ return -1;
+ if (ep->me_value == NULL) {
+ set_key_error(key);
+ return -1;
+ }
+ old_key = ep->me_key;
+ assert(ep->me_key);
+ Py_INCREF(dummy);
+ ep->me_key = dummy;
+ old_value = ep->me_value;
+ ep->me_value = NULL;
+ mp->ma_used--;
+ Py_DECREF(old_value);
+ Py_DECREF(old_key);
+ return 0;
+}
+
+void
+PyOrderedDict_Clear(PyObject *op)
+{
+ PyOrderedDictObject *mp;
+ PyOrderedDictEntry *ep, *table, **otablep;
+ int table_is_malloced;
+ Py_ssize_t fill;
+ PyOrderedDictEntry small_copy[PyOrderedDict_MINSIZE];
+#ifdef Py_DEBUG
+ Py_ssize_t i, n;
+#endif
+
+ if (!PyOrderedDict_Check(op))
+ return;
+ mp = (PyOrderedDictObject *)op;
+#ifdef Py_DEBUG
+ n = mp->ma_mask + 1;
+ i = 0;
+#endif
+
+ table = mp->ma_table;
+ otablep = mp->od_otablep;
+ assert(table != NULL);
+ assert(otablep != NULL);
+ table_is_malloced = table != mp->ma_smalltable;
+
+ /* This is delicate. During the process of clearing the dict,
+ * decrefs can cause the dict to mutate. To avoid fatal confusion
+ * (voice of experience), we have to make the dict empty before
+ * clearing the slots, and never refer to anything via mp->xxx while
+ * clearing.
+ */
+ fill = mp->od_fill;
+ if (table_is_malloced)
+ EMPTY_TO_MINSIZE(mp);
+
+ else if (fill > 0) {
+ /* It's a small table with something that needs to be cleared.
+ * Afraid the only safe way is to copy the dict entries into
+ * another small table first.
+ */
+ memcpy(small_copy, table, sizeof(small_copy));
+ table = small_copy;
+ EMPTY_TO_MINSIZE(mp);
+ }
+ /* else it's a small table that's already empty */
+
+ /* Now we can finally clear things. If C had refcounts, we could
+ * assert that the refcount on table is 1 now, i.e. that this function
+ * has unique access to it, so decref side-effects can't alter it.
+ */
+ for (ep = table; fill > 0; ++ep) {
+#ifdef Py_DEBUG
+ assert(i < n);
+ ++i;
+#endif
+ if (ep->me_key) {
+ --fill;
+ Py_DECREF(ep->me_key);
+ Py_XDECREF(ep->me_value);
+ }
+#ifdef Py_DEBUG
+ else
+ assert(ep->me_value == NULL);
+#endif
+ }
+
+ if (table_is_malloced) {
+ PyMem_DEL(table);
+ PyMem_DEL(otablep);
+ }
+}
+
+/*
+ * Iterate over a dict. Use like so:
+ *
+ * Py_ssize_t i;
+ * PyObject *key, *value;
+ * i = 0; # important! i should not otherwise be changed by you
+ * while (PyOrderedDict_Next(yourdict, &i, &key, &value)) {
+ * Refer to borrowed references in key and value.
+ * }
+ *
+ * CAUTION: In general, it isn't safe to use PyOrderedDict_Next in a loop that
+ * mutates the dict. One exception: it is safe if the loop merely changes
+ * the values associated with the keys (but doesn't insert new keys or
+ * delete keys), via PyOrderedDict_SetItem().
+ */
+int
+PyOrderedDict_Next(PyObject *op, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue)
+{
+ register Py_ssize_t i;
+ register PyOrderedDictEntry **epp;
+
+ if (!PyOrderedDict_Check(op) && !PySortedDict_Check(op))
+ return 0;
+ i = *ppos;
+ if (i < 0)
+ return 0;
+ /* review: not sure why different from 2.5.1 here. */
+ if (i >= ((PyOrderedDictObject *)op)->ma_used)
+ return 0;
+ *ppos = i+1;
+ epp = ((PyOrderedDictObject *)op)->od_otablep;
+ if (pkey)
+ *pkey = epp[i]->me_key;
+ if (pvalue)
+ *pvalue = epp[i]->me_value;
+ return 1;
+}
+
+/* Internal version of PyOrderedDict_Next that returns a hash value in addition to the key and value.*/
+int
+_PyOrderedDict_Next(PyObject *op, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue, Py_hash_t *phash)
+{
+ register Py_ssize_t i;
+ register Py_ssize_t mask;
+ register PyOrderedDictEntry *ep;
+
+ if (!PyOrderedDict_Check(op))
+ return 0;
+ i = *ppos;
+ if (i < 0)
+ return 0;
+ ep = ((PyOrderedDictObject *)op)->ma_table;
+ mask = ((PyOrderedDictObject *)op)->ma_mask;
+ while (i <= mask && ep[i].me_value == NULL)
+ i++;
+ *ppos = i+1;
+ if (i > mask)
+ return 0;
+ *phash = (long)(ep[i].me_hash);
+ if (pkey)
+ *pkey = ep[i].me_key;
+ if (pvalue)
+ *pvalue = ep[i].me_value;
+ return 1;
+}
+
+/* Methods */
+
+static void
+dict_dealloc(register PyOrderedDictObject *mp)
+{
+ register PyOrderedDictEntry *ep;
+ Py_ssize_t fill = mp->od_fill;
+ PyObject_GC_UnTrack(mp);
+ Py_TRASHCAN_SAFE_BEGIN(mp)
+ for (ep = mp->ma_table; fill > 0; ep++) {
+ if (ep->me_key) {
+ --fill;
+ Py_DECREF(ep->me_key);
+ Py_XDECREF(ep->me_value);
+ }
+ }
+ if (mp->ma_table != mp->ma_smalltable) {
+ PyMem_DEL(mp->ma_table);
+ PyMem_DEL(mp->od_otablep);
+ }
+ if (numfree < PyDict_MAXFREELIST && Py_TYPE(mp) == &PyOrderedDict_Type)
+ free_list[numfree++] = mp;
+ else
+ Py_TYPE(mp)->tp_free((PyObject *)mp);
+ Py_TRASHCAN_SAFE_END(mp)
+}
+
+#if PY_MAJOR_VERSION < 3
+static int
+ordereddict_print(register PyOrderedDictObject *mp, register FILE *fp, register int flags)
+{
+ register Py_ssize_t i;
+ register Py_ssize_t any;
+ char *typestr = "ordered";
+ int status;
+ PyOrderedDictEntry **epp;
+
+ if (PySortedDict_CheckExact(mp))
+ typestr = "sorted";
+ status = Py_ReprEnter((PyObject*)mp);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+ Py_BEGIN_ALLOW_THREADS
+ fprintf(fp, "%sdict([...])", typestr);
+ Py_END_ALLOW_THREADS
+ return 0;
+ }
+
+ Py_BEGIN_ALLOW_THREADS
+ fprintf(fp, "%sdict([", typestr);
+ Py_END_ALLOW_THREADS
+ any = 0;
+ epp = mp->od_otablep;
+ for (i = 0; i < mp->ma_used; i++) {
+ PyObject *pvalue = (*epp)->me_value;
+ /* Prevent PyObject_Repr from deleting value during
+ key format */
+ Py_INCREF(pvalue);
+ if (any++ > 0)
+ Py_BEGIN_ALLOW_THREADS
+ fprintf(fp, ", ");
+ Py_END_ALLOW_THREADS
+ Py_BEGIN_ALLOW_THREADS
+ fprintf(fp, "(");
+ Py_END_ALLOW_THREADS
+ if (PyObject_Print((PyObject *)((*epp)->me_key), fp, 0)!=0) {
+ Py_DECREF(pvalue);
+ Py_ReprLeave((PyObject*)mp);
+ return -1;
+ }
+ Py_BEGIN_ALLOW_THREADS
+ fprintf(fp, ", ");
+ Py_END_ALLOW_THREADS
+ if (PyObject_Print(pvalue, fp, 0) != 0) {
+ Py_DECREF(pvalue);
+ Py_ReprLeave((PyObject*)mp);
+ return -1;
+ }
+ Py_DECREF(pvalue);
+ Py_BEGIN_ALLOW_THREADS
+ fprintf(fp, ")");
+ Py_END_ALLOW_THREADS
+ epp++;
+ }
+ Py_BEGIN_ALLOW_THREADS
+ fprintf(fp, "])");
+ Py_END_ALLOW_THREADS
+ Py_ReprLeave((PyObject*)mp);
+ return 0;
+}
+#endif
+
+static PyObject *
+basedict_repr(PyOrderedDictObject *mp, char *typestr)
+{
+ Py_ssize_t i;
+ PyObject *s, *temp, *comma = NULL, *rightpar = NULL;
+ PyObject *pieces = NULL, *result = NULL;
+ PyObject *key, *value;
+/* char *typestr = "ordered"; */
+
+ /* if (PySortedDict_CheckExact(mp))*/
+/*
+#if PY_MAJOR_VERSION < 3
+ if (PySortedDict_Check(mp))
+#else
+ if (PySortedDict_Check(mp))
+#endif
+ typestr = "sorted";
+*/
+ i = Py_ReprEnter((PyObject *)mp);
+ if (i != 0) {
+ return i > 0 ? PyUNISTR_FromFormat("%sdict([...])", typestr) : NULL;
+ }
+
+ if (mp->ma_used == 0) {
+ result = PyUNISTR_FromFormat("%sdict([])", typestr);
+ goto Done;
+ }
+
+ pieces = PyList_New(0);
+ if (pieces == NULL)
+ goto Done;
+
+ comma = PyUNISTR_FromString(", ");
+ if (comma == NULL)
+ goto Done;
+ rightpar = PyUNISTR_FromString(")");
+ if (rightpar == NULL)
+ goto Done;
+
+ /* Do repr() on each key+value pair, and insert ": " between them.
+ Note that repr may mutate the dict. */
+ i = 0;
+ while (PyOrderedDict_Next((PyObject *)mp, &i, &key, &value)) {
+ int status;
+ /* Prevent repr from deleting value during key format. */
+ Py_INCREF(value);
+ s = PyUNISTR_FromString("(");
+ PyUNISTR_ConcatAndDel(&s, PyObject_Repr(key));
+ PyUNISTR_Concat(&s, comma);
+ PyUNISTR_ConcatAndDel(&s, PyObject_Repr(value));
+ Py_DECREF(value);
+ PyUNISTR_Concat(&s, rightpar);
+ if (s == NULL)
+ goto Done;
+ status = PyList_Append(pieces, s);
+ Py_DECREF(s); /* append created a new ref */
+ if (status < 0)
+ goto Done;
+ }
+
+ /* Add "[]" decorations to the first and last items. */
+ assert(PyList_GET_SIZE(pieces) > 0);
+ s = PyUNISTR_FromFormat("%sdict([", typestr);
+ if (s == NULL)
+ goto Done;
+ temp = PyList_GET_ITEM(pieces, 0);
+ PyUNISTR_ConcatAndDel(&s, temp);
+ PyList_SET_ITEM(pieces, 0, s);
+ if (s == NULL)
+ goto Done;
+
+ s = PyUNISTR_FromString("])");
+ if (s == NULL)
+ goto Done;
+ temp = PyList_GET_ITEM(pieces, PyList_GET_SIZE(pieces) - 1);
+ PyUNISTR_ConcatAndDel(&temp, s);
+ PyList_SET_ITEM(pieces, PyList_GET_SIZE(pieces) - 1, temp);
+ if (temp == NULL)
+ goto Done;
+
+ /* Paste them all together with ", " between. */
+ result = PyUNISTR_Join(comma, pieces);
+
+Done:
+ Py_XDECREF(pieces);
+ Py_XDECREF(comma);
+ Py_XDECREF(rightpar);
+ Py_ReprLeave((PyObject *)mp);
+ return result;
+}
+
+static PyObject *
+ordereddict_repr(PyOrderedDictObject *mp)
+{
+ return basedict_repr(mp, "ordered");
+}
+
+static PyObject *
+sorteddict_repr(PySortedDictObject *mp)
+{
+ return basedict_repr((PyOrderedDictObject *)mp, "sorted");
+}
+
+static Py_ssize_t
+dict_length(PyOrderedDictObject *mp)
+{
+ return mp->ma_used;
+}
+
+static PyObject *
+dict_subscript(PyOrderedDictObject *mp, register PyObject *key)
+{
+ PyObject *v;
+ Py_hash_t hash;
+ PyOrderedDictEntry *ep;
+ if (PySlice_Check(key)) {
+ Py_ssize_t start, stop, step, slicelength;
+ PyObject* result;
+
+ if (PySlice_GetIndicesEx(
+#if PY_VERSION_HEX < 0x03000000
+ (PySliceObject*)
+#endif
+ key, mp->ma_used,
+ &start, &stop, &step, &slicelength) < 0) {
+ return NULL;
+ }
+ result = PyOrderedDict_New();
+ if (!result) return NULL;
+ if (slicelength <= 0) return result;
+ if (PyOrderedDict_CopySome(result, (PyObject *) mp, start, step, slicelength, 1) == 0)
+ return result;
+ Py_DECREF(result);
+ return NULL;
+ }
+ assert(mp->ma_table != NULL);
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return NULL;
+ }
+ ep = (mp->ma_lookup)(mp, key, hash);
+ if (ep == NULL)
+ return NULL;
+ v = ep->me_value;
+ if (v == NULL) {
+ if (!PyOrderedDict_CheckExact(mp) && !PySortedDict_CheckExact(mp)) {
+ /* Look up __missing__ method if we're a subclass. */
+#if PY_VERSION_HEX < 0x02070000
+ PyObject *missing;
+ static PyObject *missing_str = NULL;
+ if (missing_str == NULL)
+ missing_str =
+ PyString_InternFromString("__missing__");
+ missing = _PyType_Lookup(Py_TYPE(mp), missing_str);
+ if (missing != NULL)
+ return PyObject_CallFunctionObjArgs(missing,
+ (PyObject *)mp, key, NULL);
+#else
+ PyObject *missing, *res;
+ static PyObject *missing_str = NULL;
+ missing = _PyObject_LookupSpecial((PyObject *)mp,
+ "__missing__",
+ &missing_str);
+ if (missing != NULL) {
+ res = PyObject_CallFunctionObjArgs(missing,
+ key, NULL);
+ Py_DECREF(missing);
+ return res;
+ }
+ else if (PyErr_Occurred())
+ return NULL;
+#endif
+ }
+ set_key_error(key);
+ return NULL;
+ } else
+ Py_INCREF(v);
+ return v;
+}
+
+/* a[ilow:ihigh] = v if v != NULL.
+ * del a[ilow:ihigh] if v == NULL.
+ *
+ * Special speed gimmick: when v is NULL and ihigh - ilow <= 8, it's
+ * guaranteed the call cannot fail.
+ */
+static Py_ssize_t
+dict_ass_slice(PyOrderedDictObject *self, Py_ssize_t ilow, Py_ssize_t ihigh, PyObject *value)
+{
+ PyObject *recycle_on_stack[8];
+ PyObject **recycle = recycle_on_stack; /* will allocate more if needed */
+ Py_ssize_t result = -1, i;
+ Py_ssize_t num_to_delete = 0, s;
+ PyOrderedDictEntry **epp;
+
+ if (PySortedDict_CheckExact(self)) {
+ PyErr_Format(PyExc_TypeError,
+ "sorteddict does not support slice %s", value ? "assignment" : "deletion");
+ return -1;
+ }
+ if (ilow < 0)
+ ilow = 0;
+ else if (ilow > self->ma_used)
+ ilow = self->ma_used;
+
+ if (ihigh < ilow)
+ ihigh = ilow;
+ else if (ihigh > self->ma_used)
+ ihigh = self->ma_used;
+
+ if (value != NULL) {
+ if (PyObject_Length(value) != (ihigh - ilow)) {
+ PyErr_SetString(PyExc_ValueError,
+ "slice assignment: wrong size"
+ );
+ return -1;
+ }
+ if (!PyOrderedDict_CheckExact(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "slice assignment: argument must be ordereddict"
+ );
+ return -1;
+ }
+ }
+
+ /* for now lazy implementation: first delete then insert */
+#define DELETION_AND_OVERWRITING_SEPERATE 0
+#if DELETION_AND_OVERWRITING_SEPERATE == 1
+ if (value == NULL) {
+#endif
+ s = (ihigh - ilow) * 2 * sizeof(PyObject *);
+ if (s > sizeof(recycle_on_stack)) {
+ recycle = (PyObject **)PyMem_MALLOC(s);
+ if (recycle == NULL) {
+ PyErr_NoMemory();
+ goto Error;
+ }
+
+ }
+ epp = self->od_otablep;
+ epp += ilow;
+ for (i = ilow; i < ihigh; i++, epp++) {
+ /* AvdN: ToDo DECREF key and value */
+ recycle[num_to_delete++] = (*epp)->me_key;
+ Py_INCREF(dummy);
+ (*epp)->me_key = dummy;
+ recycle[num_to_delete++] = (*epp)->me_value;
+ (*epp)->me_value = NULL;
+ }
+ epp = self->od_otablep;
+ memmove(epp+ilow, epp+ihigh, (self->ma_used - ihigh) * sizeof(PyOrderedDictEntry *));
+ self->ma_used -= (ihigh - ilow);
+ result = 0;
+#if DELETION_AND_OVERWRITING_SEPERATE == 1
+ } else {
+ /* assignment first delete slice */
+ /* then delete any items whose keys are in itereable that are already in */
+ PyErr_SetString(PyExc_NotImplementedError,
+ "ordered dictionary does not support slice assignment"
+ );
+ result = -1;
+ }
+#endif
+ for (i = num_to_delete - 1; i >= 0; --i)
+ Py_XDECREF(recycle[i]);
+#if DELETION_AND_OVERWRITING_SEPERATE != 1
+ if (value != NULL) { /* now insert */
+ epp = ((PyOrderedDictObject *) value)->od_otablep;
+ for (i = ilow; i < ihigh; i++) {
+ if(PyOrderedDict_InsertItem(self, i, (*epp)->me_key, (*epp)->me_value) != 0)
+ return -1;
+ epp++;
+ }
+ }
+#endif
+Error:
+ if (recycle != recycle_on_stack)
+ PyMem_FREE(recycle);
+ return result;
+}
+
+static Py_ssize_t
+dict_ass_subscript(PyOrderedDictObject *self, PyObject *item, PyObject *value)
+{
+ if (PySlice_Check(item)) {
+ Py_ssize_t start, stop, step, slicelength;
+ if (PySortedDict_CheckExact(self)) {
+ PyErr_Format(PyExc_TypeError,
+ "sorteddict does not support slice %s", value ? "assignment" : "deletion");
+ return -1;
+ }
+ if (PySlice_GetIndicesEx(
+#if PY_VERSION_HEX < 0x03000000
+ (PySliceObject*)
+#endif
+ item, self->ma_used,
+ &start, &stop, &step, &slicelength) < 0) {
+ return -1;
+ }
+
+ /* treat L[slice(a,b)] = v _exactly_ like L[a:b] = v */
+ if (step == 1 && ((PySliceObject*)item)->step == Py_None)
+ return dict_ass_slice(self, start, stop, value);
+
+ /* do soemthing about step == -1 ? */
+
+ if (slicelength <= 0)
+ return 0;
+ if (value == NULL) {
+ /* delete slice */
+ /* printf("Deleting %d %d %d %d %p\n", start, stop, step, slicelength, value);*/
+ while (slicelength--) {
+ /* ToDo optimize */
+ if (step > 0) { /* do it from the back to preserve right indices */
+ dict_ass_slice(self, start + slicelength * step,
+ start + (slicelength * step) + 1, NULL);
+ } else {
+ dict_ass_slice(self, start,
+ start + 1, NULL);
+ start += step;
+ }
+ }
+ return 0;
+ } else {
+ /* assign slice */
+ Py_ssize_t count = slicelength, start2 = start;
+ PyOrderedDictEntry **epp;
+ /* printf("Assigning %d %d %d %d %d %p\n", start, stop, step, slicelength, PyObject_Length(value), value); */
+ if (PyObject_Length(value) != slicelength) {
+ PyErr_SetString(PyExc_ValueError,
+ "slice assignment: wrong size"
+ );
+ return -1;
+ }
+ if (!PyOrderedDict_CheckExact(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "slice assignment: argument must be ordereddict"
+ );
+ return -1;
+ }
+ while (count--) {
+ /* ToDo optimize */
+ if (step > 0) { /* do it from the back to preserve right indices */
+ dict_ass_slice(self, start2 + count * step,
+ start2 + (count * step) + 1, NULL);
+ } else {
+ dict_ass_slice(self, start2, start2 + 1, NULL);
+ start2 += step;
+ }
+ }
+ count = slicelength;
+ start2 = start;
+ epp = ((PyOrderedDictObject *) value)->od_otablep;
+ if (step < 0) {
+ epp += slicelength;
+ }
+ while (count--) {
+ /* ToDo optimize */
+ if (step > 0) { /* do it from the front */
+ if(PyOrderedDict_InsertItem(self, start2, (*epp)->me_key, (*epp)->me_value) != 0)
+ return -1;
+ start2 += step;
+ epp++;
+ } else {
+ epp--;
+ if(PyOrderedDict_InsertItem(self, start2 + count * step, (*epp)->me_key, (*epp)->me_value) != 0)
+ return -1;
+ }
+ }
+ return 0;
+
+ }
+ }
+ if (value == NULL)
+ return PyOrderedDict_DelItem((PyObject *)self, item);
+ else
+ return PyOrderedDict_SetItem((PyObject *)self, item, value);
+}
+
+static PyMappingMethods dict_as_mapping = {
+ (lenfunc)dict_length, /*mp_length*/
+ (binaryfunc)dict_subscript, /*mp_subscript*/
+ (objobjargproc)dict_ass_subscript, /*mp_ass_subscript*/
+};
+
+static PyObject *
+dict_keys(register PyOrderedDictObject *mp, PyObject *args, PyObject *kwds)
+{
+ register PyObject *v;
+ register Py_ssize_t i;
+ PyOrderedDictEntry **epp;
+ Py_ssize_t n;
+
+ int reverse = 0;
+ static char *kwlist[] = {"reverse", 0};
+
+ if (args != NULL)
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|i:keys",
+ kwlist, &reverse))
+ return NULL;
+
+
+again:
+ n = mp->ma_used;
+ v = PyList_New(n);
+ if (v == NULL)
+ return NULL;
+ if (n != mp->ma_used) {
+ /* Durnit. The allocations caused the dict to resize.
+ * Just over, this shouldn't normally happen.
+ */
+ Py_DECREF(v);
+ goto again;
+ }
+ if (reverse) {
+ epp = mp->od_otablep + (n-1);
+ reverse = -1;
+ } else {
+ epp = mp->od_otablep;
+ reverse = 1;
+ }
+ for (i = 0; i < n; i++) {
+ PyObject *key = (*epp)->me_key;
+ Py_INCREF(key);
+ PyList_SET_ITEM(v, i, key);
+ epp += reverse;
+ }
+ return v;
+}
+
+static PyObject *
+dict_values(register PyOrderedDictObject *mp, PyObject *args, PyObject *kwds)
+{
+ register PyObject *v;
+ register Py_ssize_t i;
+ PyOrderedDictEntry **epp;
+ Py_ssize_t n;
+
+ int reverse = 0;
+ static char *kwlist[] = {"reverse", 0};
+
+ if (args != NULL)
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|i:values",
+ kwlist, &reverse))
+ return NULL;
+
+again:
+ n = mp->ma_used;
+ v = PyList_New(n);
+ if (v == NULL)
+ return NULL;
+ if (n != mp->ma_used) {
+ /* Durnit. The allocations caused the dict to resize.
+ * Just start over, this shouldn't normally happen.
+ */
+ Py_DECREF(v);
+ goto again;
+ }
+ if (reverse) {
+ epp = mp->od_otablep + (n-1);
+ reverse = -1;
+ } else {
+ epp = mp->od_otablep;
+ reverse = 1;
+ }
+ for (i = 0; i < n; i++) {
+ PyObject *value = (*epp)->me_value;
+ Py_INCREF(value);
+ PyList_SET_ITEM(v, i, value);
+ epp += reverse;
+ }
+ return v;
+}
+
+static PyObject *
+dict_items(register PyOrderedDictObject *mp, PyObject *args, PyObject *kwds)
+{
+ register PyObject *v;
+ register Py_ssize_t i, n;
+ PyObject *item, *key, *value;
+ PyOrderedDictEntry **epp;
+
+ int reverse = 0;
+ static char *kwlist[] = {"reverse", 0};
+
+ if (args != NULL)
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|i:items",
+ kwlist, &reverse))
+ return NULL;
+
+ /* Preallocate the list of tuples, to avoid allocations during
+ * the loop over the items, which could trigger GC, which
+ * could resize the dict. :-(
+ */
+again:
+ n = mp->ma_used;
+ v = PyList_New(n);
+ if (v == NULL)
+ return NULL;
+ for (i = 0; i < n; i++) {
+ item = PyTuple_New(2);
+ if (item == NULL) {
+ Py_DECREF(v);
+ return NULL;
+ }
+ PyList_SET_ITEM(v, i, item);
+ }
+ if (n != mp->ma_used) {
+ /* Durnit. The allocations caused the dict to resize.
+ * Just start over, this shouldn't normally happen.
+ */
+ Py_DECREF(v);
+ goto again;
+ }
+ /* Nothing we do below makes any function calls. */
+ if (reverse) {
+ epp = mp->od_otablep + (n-1);
+ reverse = -1;
+ } else {
+ epp = mp->od_otablep;
+ reverse = 1;
+ }
+ for (i = 0; i < n; i++) {
+ key = (*epp)->me_key;
+ value = (*epp)->me_value;
+ item = PyList_GET_ITEM(v, i);
+ Py_INCREF(key);
+ PyTuple_SET_ITEM(item, 0, key);
+ Py_INCREF(value);
+ PyTuple_SET_ITEM(item, 1, value);
+ epp += reverse;
+ }
+ return v;
+}
+
+static PyObject *
+dict_fromkeys(PyObject *cls, PyObject *args)
+{
+ PyObject *seq;
+ PyObject *value = Py_None;
+ PyObject *it; /* iter(seq) */
+ PyObject *key;
+ PyObject *d;
+ int status;
+
+ if (!PyArg_UnpackTuple(args, "fromkeys", 1, 2, &seq, &value))
+ return NULL;
+
+ d = PyObject_CallObject(cls, NULL);
+ if (d == NULL)
+ return NULL;
+
+
+ if ((PyOrderedDict_CheckExact(d) || PySortedDict_CheckExact(d)) && ((PyDictObject *)d)->ma_used == 0) {
+ if (PyAnySet_CheckExact(seq)) {
+ PyOrderedDictObject *mp = (PyOrderedDictObject *)d;
+ Py_ssize_t pos = 0;
+ PyObject *key;
+ Py_hash_t hash;
+
+ if (dictresize(mp, PySet_GET_SIZE(seq))) {
+ Py_DECREF(d);
+ return NULL;
+ }
+
+ while (_PySet_NextEntry(seq, &pos, &key, &hash)) {
+ Py_INCREF(key);
+ Py_INCREF(value);
+ if (insertdict(mp, key, hash, value, -1)) {
+ Py_DECREF(d);
+ return NULL;
+ }
+ }
+ return d;
+ }
+ }
+
+ it = PyObject_GetIter(seq);
+ if (it == NULL) {
+ Py_DECREF(d);
+ return NULL;
+ }
+
+#ifndef OLD
+ if (PyOrderedDict_CheckExact(d) || PySortedDict_CheckExact(d)) {
+ while ((key = PyIter_Next(it)) != NULL) {
+ status = PyOrderedDict_SetItem(d, key, value);
+ Py_DECREF(key);
+ if (status < 0)
+ goto Fail;
+ }
+ } else {
+ while ((key = PyIter_Next(it)) != NULL) {
+ status = PyObject_SetItem(d, key, value);
+ Py_DECREF(key);
+ if (status < 0)
+ goto Fail;
+ }
+ }
+ if (PyErr_Occurred())
+ goto Fail;
+#else
+ for (;;) {
+ key = PyIter_Next(it);
+ if (key == NULL) {
+ if (PyErr_Occurred())
+ goto Fail;
+ break;
+ }
+ status = PyObject_SetItem(d, key, value);
+ Py_DECREF(key);
+ if (status < 0)
+ goto Fail;
+ }
+
+#endif
+ Py_DECREF(it);
+ return d;
+
+Fail:
+ Py_DECREF(it);
+ Py_DECREF(d);
+ return NULL;
+}
+
+/* called by init, update and setitems */
+static int
+dict_update_common(PyObject *self, PyObject *args, PyObject *kwds, char *args_name)
+{
+ PyObject *arg = NULL;
+ int result = 0, tmprelax = 0;
+
+ static char *kwlist[] = {"src", "relax", 0};
+
+ if (args != NULL)
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, args_name,
+ kwlist, &arg, &tmprelax))
+ return -1;
+
+ if (arg != NULL) {
+ if (PyObject_HasAttrString(arg, "keys"))
+ result = PyOrderedDict_Merge(self, arg, 1, tmprelax);
+ else
+ result = PyOrderedDict_MergeFromSeq2(self, arg, 1);
+ }
+ /* do not initialise from keywords at all */
+ /* if (result == 0 && kwds != NULL)
+ result = PyOrderedDict_Merge(self, kwds, 1); */
+ return result;
+}
+
+static PyObject *
+dict_update(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ if (dict_update_common(self, args, kwds, "|Oi:update") != -1)
+ Py_RETURN_NONE;
+ return NULL;
+}
+
+/* Update unconditionally replaces existing items.
+ Merge has a 3rd argument 'override'; if set, it acts like Update,
+ otherwise it leaves existing items unchanged.
+
+ PyOrderedDict_{Update,Merge} update/merge from a mapping object.
+
+ PyOrderedDict_MergeFromSeq2 updates/merges from any iterable object
+ producing iterable objects of length 2.
+*/
+
+int
+PyOrderedDict_MergeFromSeq2(PyObject *d, PyObject *seq2, int override)
+{
+ PyObject *it; /* iter(seq2) */
+ Py_ssize_t i; /* index into seq2 of current element */
+ PyObject *item; /* seq2[i] */
+ PyObject *fast; /* item as a 2-tuple or 2-list */
+
+ assert(d != NULL);
+ assert(PyOrderedDict_Check(d));
+ assert(seq2 != NULL);
+
+ it = PyObject_GetIter(seq2);
+ if (it == NULL)
+ return -1;
+
+ for (i = 0; ; ++i) {
+ PyObject *key, *value;
+ Py_ssize_t n;
+
+ fast = NULL;
+ item = PyIter_Next(it);
+ if (item == NULL) {
+ if (PyErr_Occurred())
+ goto Fail;
+ break;
+ }
+
+ /* Convert item to sequence, and verify length 2. */
+ fast = PySequence_Fast(item, "");
+ if (fast == NULL) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError))
+ PyErr_Format(PyExc_TypeError,
+ "cannot convert dictionary update "
+ "sequence element #%zd to a sequence",
+ i);
+ goto Fail;
+ }
+ n = PySequence_Fast_GET_SIZE(fast);
+ if (n != 2) {
+ PyErr_Format(PyExc_ValueError,
+ "dictionary update sequence element #%zd "
+ "has length %zd; 2 is required",
+ i, n);
+ goto Fail;
+ }
+
+ /* Update/merge with this (key, value) pair. */
+ key = PySequence_Fast_GET_ITEM(fast, 0);
+ value = PySequence_Fast_GET_ITEM(fast, 1);
+ if (override || PyOrderedDict_GetItem(d, key) == NULL) {
+ int status = PyOrderedDict_SetItem(d, key, value);
+ if (status < 0)
+ goto Fail;
+ }
+ Py_DECREF(fast);
+ Py_DECREF(item);
+ }
+
+ i = 0;
+ goto Return;
+Fail:
+ Py_XDECREF(item);
+ Py_XDECREF(fast);
+ i = -1;
+Return:
+ Py_DECREF(it);
+ return Py_SAFE_DOWNCAST(i, Py_ssize_t, int);
+}
+
+int
+PyOrderedDict_Update(PyObject *a, PyObject *b)
+{
+ return PyOrderedDict_Merge(a, b, 1, 0);
+}
+
+int
+PyOrderedDict_Merge(PyObject *a, PyObject *b, int override, int relaxed)
+{
+ register PyOrderedDictObject *mp, *other;
+ register Py_ssize_t i;
+ PyOrderedDictEntry *entry, **epp;
+
+ /* We accept for the argument either a concrete ordered dictionary object,
+ * or an abstract "mapping" object. For the former, we can do
+ * things quite efficiently. For the latter, we only require that
+ * PyMapping_Keys() and PyObject_GetItem() be supported.
+ */
+ if (a == NULL || !PyOrderedDict_Check(a) || b == NULL) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ mp = (PyOrderedDictObject*)a;
+ /* sorted dicts are always done with individual elements */
+ if (!PySortedDict_CheckExact(a) && PyOrderedDict_CheckExact(b)) {
+ other = (PyOrderedDictObject *) b;
+ if (other == mp || other->ma_used == 0)
+ /* a.update(a) or a.update({}); nothing to do */
+ return 0;
+ if (mp->ma_used == 0)
+ /* Since the target dict is empty, PyOrderedDict_GetItem()
+ * always returns NULL. Setting override to 1
+ * skips the unnecessary test.
+ */
+ override = 1;
+ /* Do one big resize at the start, rather than
+ * incrementally resizing as we insert new items. Expect
+ * that there will be no (or few) overlapping keys.
+ */
+ if ((mp->od_fill + other->ma_used)*3 >= (mp->ma_mask+1)*2) {
+ if (dictresize(mp, (mp->ma_used + other->ma_used)*2) != 0)
+ return -1;
+ }
+ epp = other->od_otablep;
+ for (i = 0; i < other->ma_used; i++) {
+ entry = *epp++;
+ /* entry->me_value is never NULL when following the otablep */
+ /*
+ if (entry->me_value != NULL &&
+ (override ||
+ PyOrderedDict_GetItem(a, entry->me_key) == NULL)) {
+ */
+ if (override || PyOrderedDict_GetItem(a, entry->me_key) == NULL) {
+ Py_INCREF(entry->me_key);
+ Py_INCREF(entry->me_value);
+ if (insertdict(mp, entry->me_key,
+ (long)entry->me_hash,
+ entry->me_value, -1) != 0)
+ return -1;
+ }
+ }
+ } else if (relaxed || RELAXED(mp)) {
+ /* Do it the generic, slower way */
+ PyObject *keys = PyMapping_Keys(b);
+ PyObject *iter;
+ PyObject *key, *value;
+ int status;
+
+ if (keys == NULL)
+ /* Docstring says this is equivalent to E.keys() so
+ * if E doesn't have a .keys() method we want
+ * AttributeError to percolate up. Might as well
+ * do the same for any other error.
+ */
+ return -1;
+
+ iter = PyObject_GetIter(keys);
+ Py_DECREF(keys);
+ if (iter == NULL)
+ return -1;
+
+ for (key = PyIter_Next(iter); key; key = PyIter_Next(iter)) {
+ if (!override && PyDict_GetItem(a, key) != NULL) {
+ Py_DECREF(key);
+ continue;
+ }
+ value = PyObject_GetItem(b, key);
+ if (value == NULL) {
+ Py_DECREF(iter);
+ Py_DECREF(key);
+ return -1;
+ }
+ status = PyOrderedDict_SetItem(a, key, value);
+ Py_DECREF(key);
+ Py_DECREF(value);
+ if (status < 0) {
+ Py_DECREF(iter);
+ return -1;
+ }
+ }
+ Py_DECREF(iter);
+ if (PyErr_Occurred())
+ /* Iterator completed, via error */
+ return -1;
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "source has undefined order");
+ return -1;
+ }
+ return 0;
+}
+
+
+/*
+ assume that the start step and count are all within the
+ borders of what b provides
+*/
+int
+PyOrderedDict_CopySome(PyObject *a, PyObject *b,
+ Py_ssize_t start, Py_ssize_t step,
+ Py_ssize_t count, int override)
+{
+ register PyOrderedDictObject *mp, *other;
+ register Py_ssize_t i;
+ PyOrderedDictEntry *entry, **epp;
+
+ /* We accept for the argument either a concrete ordered dictionary object
+ */
+ if (a == NULL || !PyOrderedDict_Check(a) || b == NULL) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ mp = (PyOrderedDictObject*)a;
+ if (PyOrderedDict_CheckExact(b) || PySortedDict_CheckExact(b)) {
+ other = (PyOrderedDictObject*)b;
+ if (other == mp || other->ma_used == 0)
+ /* a.update(a) or a.update({}); nothing to do */
+ return 0;
+ if (mp->ma_used == 0)
+ /* Since the target dict is empty, PyOrderedDict_GetItem()
+ * always returns NULL. Setting override to 1
+ * skips the unnecessary test.
+ */
+ override = 1;
+ /* Do one big resize at the start, rather than
+ * incrementally resizing as we insert new items. Expect
+ * that there will be no (or few) overlapping keys.
+ */
+ if ((mp->od_fill + count)*3 >= (mp->ma_mask+1)*2) {
+ if (dictresize(mp, (mp->ma_used + count)*2) != 0)
+ return -1;
+ }
+ epp = other->od_otablep;
+ epp += start;
+ for (i = 0; i < count; i++, epp += step) {
+ entry = *epp;
+ if (override || PyOrderedDict_GetItem(a, entry->me_key) == NULL) {
+ Py_INCREF(entry->me_key);
+ Py_INCREF(entry->me_value);
+ if (insertdict(mp, entry->me_key,
+ (long)entry->me_hash,
+ entry->me_value, -1) != 0)
+ return -1;
+ }
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "source has undefined order");
+ return -1;
+ }
+ return 0;
+}
+
+static PyObject *
+dict_copy(register PyOrderedDictObject *mp)
+{
+ return PyOrderedDict_Copy((PyObject*)mp);
+}
+
+PyObject *
+PyOrderedDict_Copy(PyObject *o)
+{
+ PyObject *copy;
+
+ if (o == NULL || !PyOrderedDict_Check(o)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+ if (PySortedDict_CheckExact(o)) {
+ copy = PySortedDict_New();
+ if (copy == NULL)
+ return NULL;
+ ((PySortedDictObject *) copy)->sd_cmp = ((PySortedDictObject *) o)->sd_cmp;
+ ((PySortedDictObject *) copy)->sd_key = ((PySortedDictObject *) o)->sd_key;
+ ((PySortedDictObject *) copy)->sd_value = ((PySortedDictObject *) o)->sd_value;
+ } else {
+ copy = PyOrderedDict_New();
+ if (copy == NULL)
+ return NULL;
+ }
+ ((PyOrderedDictObject *) copy)->od_state = ((PyOrderedDictObject *) o)->od_state;
+ if (PyOrderedDict_Merge(copy, o, 1, 0) == 0)
+ return copy;
+ Py_DECREF(copy);
+ return NULL;
+}
+
+Py_ssize_t
+PyOrderedDict_Size(PyObject *mp)
+{
+ if (mp == NULL || !PyOrderedDict_Check(mp)) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ return ((PyOrderedDictObject *)mp)->ma_used;
+}
+
+PyObject *
+PyOrderedDict_Keys(PyObject *mp)
+{
+ if (mp == NULL || !PyOrderedDict_Check(mp)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+ return dict_keys((PyOrderedDictObject *)mp, NULL, NULL);
+}
+
+PyObject *
+PyOrderedDict_Values(PyObject *mp)
+{
+ if (mp == NULL || !PyOrderedDict_Check(mp)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+ return dict_values((PyOrderedDictObject *)mp, NULL, NULL);
+}
+
+PyObject *
+PyOrderedDict_Items(PyObject *mp)
+{
+ if (mp == NULL || !PyOrderedDict_Check(mp)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+ return dict_items((PyOrderedDictObject *)mp, NULL, NULL);
+}
+
+#if PY_VERSION_HEX < 0x03000000
+/* Subroutine which returns the smallest key in a for which b's value
+ is different or absent. The value is returned too, through the
+ pval argument. Both are NULL if no key in a is found for which b's status
+ differs. The refcounts on (and only on) non-NULL *pval and function return
+ values must be decremented by the caller (characterize() increments them
+ to ensure that mutating comparison and PyOrderedDict_GetItem calls can't delete
+ them before the caller is done looking at them). */
+
+static PyObject *
+characterize(PyOrderedDictObject *a, PyOrderedDictObject *b, PyObject **pval)
+{
+ PyObject *akey = NULL; /* smallest key in a s.t. a[akey] != b[akey] */
+ PyObject *aval = NULL; /* a[akey] */
+ Py_ssize_t i;
+ int cmp;
+
+ for (i = 0; i <= a->ma_mask; i++) {
+ PyObject *thiskey, *thisaval, *thisbval;
+ if (a->ma_table[i].me_value == NULL)
+ continue;
+ thiskey = a->ma_table[i].me_key;
+ Py_INCREF(thiskey); /* keep alive across compares */
+ if (akey != NULL) {
+ cmp = PyObject_RichCompareBool(akey, thiskey, Py_LT);
+ if (cmp < 0) {
+ Py_DECREF(thiskey);
+ goto Fail;
+ }
+ if (cmp > 0 ||
+ i > a->ma_mask ||
+ a->ma_table[i].me_value == NULL) {
+ /* Not the *smallest* a key; or maybe it is
+ * but the compare shrunk the dict so we can't
+ * find its associated value anymore; or
+ * maybe it is but the compare deleted the
+ * a[thiskey] entry.
+ */
+ Py_DECREF(thiskey);
+ continue;
+ }
+ }
+
+ /* Compare a[thiskey] to b[thiskey]; cmp <- true iff equal. */
+ thisaval = a->ma_table[i].me_value;
+ assert(thisaval);
+ Py_INCREF(thisaval); /* keep alive */
+ thisbval = PyOrderedDict_GetItem((PyObject *)b, thiskey);
+ if (thisbval == NULL)
+ cmp = 0;
+ else {
+ /* both dicts have thiskey: same values? */
+ cmp = PyObject_RichCompareBool(
+ thisaval, thisbval, Py_EQ);
+ if (cmp < 0) {
+ Py_DECREF(thiskey);
+ Py_DECREF(thisaval);
+ goto Fail;
+ }
+ }
+ if (cmp == 0) {
+ /* New winner. */
+ Py_XDECREF(akey);
+ Py_XDECREF(aval);
+ akey = thiskey;
+ aval = thisaval;
+ } else {
+ Py_DECREF(thiskey);
+ Py_DECREF(thisaval);
+ }
+ }
+ *pval = aval;
+ return akey;
+
+Fail:
+ Py_XDECREF(akey);
+ Py_XDECREF(aval);
+ *pval = NULL;
+ return NULL;
+}
+
+static int
+dict_compare(PyOrderedDictObject *a, PyOrderedDictObject *b)
+{
+ PyObject *adiff, *bdiff, *aval, *bval;
+ int res;
+
+ /* Compare lengths first */
+ if (a->ma_used < b->ma_used)
+ return -1; /* a is shorter */
+ else if (a->ma_used > b->ma_used)
+ return 1; /* b is shorter */
+
+ /* Same length -- check all keys */
+ bdiff = bval = NULL;
+ adiff = characterize(a, b, &aval);
+ if (adiff == NULL) {
+ assert(!aval);
+ /* Either an error, or a is a subset with the same length so
+ * must be equal.
+ */
+ res = PyErr_Occurred() ? -1 : 0;
+ goto Finished;
+ }
+ bdiff = characterize(b, a, &bval);
+ if (bdiff == NULL && PyErr_Occurred()) {
+ assert(!bval);
+ res = -1;
+ goto Finished;
+ }
+ res = 0;
+ if (bdiff) {
+ /* bdiff == NULL "should be" impossible now, but perhaps
+ * the last comparison done by the characterize() on a had
+ * the side effect of making the dicts equal!
+ */
+ res = PyObject_Compare(adiff, bdiff);
+ }
+ if (res == 0 && bval != NULL)
+ res = PyObject_Compare(aval, bval);
+
+Finished:
+ Py_XDECREF(adiff);
+ Py_XDECREF(bdiff);
+ Py_XDECREF(aval);
+ Py_XDECREF(bval);
+ return res;
+}
+#endif
+
+/* Return 1 if dicts equal, 0 if not, -1 if error.
+ * Gets out as soon as any difference is detected.
+ * Uses only Py_EQ comparison.
+ */
+static int
+dict_equal(PyOrderedDictObject *a, PyOrderedDictObject *b)
+{
+ Py_ssize_t i;
+ PyOrderedDictEntry **app, **bpp;
+
+ if (a->ma_used != b->ma_used)
+ /* can't be equal if # of entries differ */
+ return 0;
+
+ /* Same # of entries -- check all of 'em. Exit early on any diff. */
+
+ for (i = 0, app = a->od_otablep, bpp = b->od_otablep; i < a->ma_used;
+ i++, app++, bpp++) {
+ int cmp;
+ PyObject *aval = (*app)->me_value;
+ PyObject *bval = (*bpp)->me_value;
+ PyObject *akey = (*app)->me_key;
+ PyObject *bkey = (*bpp)->me_key;
+ /* temporarily bump aval's refcount to ensure it stays
+ alive until we're done with it */
+ Py_INCREF(aval);
+ Py_INCREF(bval);
+ /* ditto for key */
+ Py_INCREF(akey);
+ Py_INCREF(bkey);
+ cmp = PyObject_RichCompareBool(akey, bkey, Py_EQ);
+ if (cmp > 0) /* keys compare ok, now do values */
+ cmp = PyObject_RichCompareBool(aval, bval, Py_EQ);
+ Py_DECREF(bkey);
+ Py_DECREF(akey);
+ Py_DECREF(bval);
+ Py_DECREF(aval);
+ if (cmp <= 0) /* error or not equal */
+ return cmp;
+ }
+ return 1;
+}
+
+static PyObject *
+dict_richcompare(PyObject *v, PyObject *w, int op)
+{
+ int cmp;
+ PyObject *res;
+
+ if (!PyOrderedDict_Check(v) || !PyOrderedDict_Check(w)) {
+ res = Py_NotImplemented;
+ } else if (op == Py_EQ || op == Py_NE) {
+ cmp = dict_equal((PyOrderedDictObject *)v, (PyOrderedDictObject *)w);
+ if (cmp < 0)
+ return NULL;
+ res = (cmp == (op == Py_EQ)) ? Py_True : Py_False;
+ } else {
+#if PY_VERSION_HEX < 0x03000000
+ /* Py3K warning if comparison isn't == or != */
+ if (PyErr_WarnPy3k("dict inequality comparisons not supported "
+ "in 3.x", 1) < 0) {
+ return NULL;
+ }
+#endif
+ res = Py_NotImplemented;
+ }
+ Py_INCREF(res);
+ return res;
+}
+
+static PyObject *
+dict_contains(register PyOrderedDictObject *mp, PyObject *key)
+{
+ Py_hash_t hash;
+ PyOrderedDictEntry *ep;
+
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return NULL;
+ }
+ ep = (mp->ma_lookup)(mp, key, hash);
+ if (ep == NULL)
+ return NULL;
+ return PyBool_FromLong(ep->me_value != NULL);
+}
+
+#if PY_VERSION_HEX < 0x03000000
+static PyObject *
+dict_has_key(register PyOrderedDictObject *mp, PyObject *key)
+{
+ if (Py_Py3kWarningFlag &&
+ PyErr_Warn(PyExc_DeprecationWarning,
+ "dict.has_key() not supported in 3.x") < 0)
+ return NULL;
+ return dict_contains(mp, key);
+}
+#endif
+
+static PyObject *
+dict_get(register PyOrderedDictObject *mp, PyObject *args)
+{
+ PyObject *key;
+ PyObject *failobj = Py_None;
+ PyObject *val = NULL;
+ Py_hash_t hash;
+ PyOrderedDictEntry *ep;
+
+ if (!PyArg_UnpackTuple(args, "get", 1, 2, &key, &failobj))
+ return NULL;
+
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return NULL;
+ }
+ ep = (mp->ma_lookup)(mp, key, hash);
+ if (ep == NULL)
+ return NULL;
+ val = ep->me_value;
+ if (val == NULL)
+ val = failobj;
+ Py_INCREF(val);
+ return val;
+}
+
+
+static PyObject *
+dict_setdefault(register PyOrderedDictObject *mp, PyObject *args)
+{
+ PyObject *key;
+ PyObject *failobj = Py_None;
+ PyObject *val = NULL;
+ Py_hash_t hash;
+ PyOrderedDictEntry *ep;
+
+ if (!PyArg_UnpackTuple(args, "setdefault", 1, 2, &key, &failobj))
+ return NULL;
+
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return NULL;
+ }
+ ep = (mp->ma_lookup)(mp, key, hash);
+ if (ep == NULL)
+ return NULL;
+ val = ep->me_value;
+ if (val == NULL) {
+ if (dict_set_item_by_hash_or_entry((PyObject*)mp, key, hash, ep,
+ failobj) == 0)
+ val = failobj;
+ }
+ Py_XINCREF(val);
+ return val;
+}
+
+
+static PyObject *
+dict_clear(register PyOrderedDictObject *mp)
+{
+ PyOrderedDict_Clear((PyObject *)mp);
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+dict_pop(PyOrderedDictObject *mp, PyObject *args)
+{
+ Py_hash_t hash;
+ PyOrderedDictEntry *ep;
+ PyObject *old_value, *old_key;
+ PyObject *key, *deflt = NULL;
+
+ if(!PyArg_UnpackTuple(args, "pop", 1, 2, &key, &deflt))
+ return NULL;
+ if (mp->ma_used == 0) {
+ if (deflt) {
+ Py_INCREF(deflt);
+ return deflt;
+ }
+ PyErr_SetString(PyExc_KeyError,
+ "pop(): dictionary is empty");
+ return NULL;
+ }
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return NULL;
+ }
+ ep = (mp->ma_lookup)(mp, key, hash);
+ if (ep == NULL)
+ return NULL;
+ if (ep->me_value == NULL) {
+ if (deflt) {
+ Py_INCREF(deflt);
+ return deflt;
+ }
+ set_key_error(key);
+ return NULL;
+ }
+ old_key = ep->me_key;
+ Py_INCREF(dummy);
+ ep->me_key = dummy;
+ old_value = ep->me_value;
+ ep->me_value = NULL;
+ del_inorder(mp, ep);
+ mp->ma_used--;
+ Py_DECREF(old_key);
+ return old_value;
+}
+
+static PyObject *
+dict_popitem(PyOrderedDictObject *mp, PyObject *args)
+{
+ Py_hash_ssize_t i = -1, j;
+ PyOrderedDictEntry **epp;
+ PyObject *res;
+
+ /* Allocate the result tuple before checking the size. Believe it
+ * or not, this allocation could trigger a garbage collection which
+ * could empty the dict, so if we checked the size first and that
+ * happened, the result would be an infinite loop (searching for an
+ * entry that no longer exists). Note that the usual popitem()
+ * idiom is "while d: k, v = d.popitem()". so needing to throw the
+ * tuple away if the dict *is* empty isn't a significant
+ * inefficiency -- possible, but unlikely in practice.
+ */
+ if (!PyArg_ParseTuple(args, "|n:popitem", &i))
+ return NULL;
+
+ res = PyTuple_New(2);
+ if (res == NULL)
+ return NULL;
+ if (mp->ma_used == 0) {
+ Py_DECREF(res);
+ PyErr_SetString(PyExc_KeyError,
+ "popitem(): dictionary is empty");
+ return NULL;
+ }
+ if (i < 0)
+ j = mp->ma_used + i;
+ else
+ j = i;
+ if (j < 0 || j >= mp->ma_used) {
+ Py_DECREF(res);
+ PyErr_SetString(PyExc_KeyError,
+ "popitem(): index out of range");
+ return NULL;
+ }
+ epp = mp->od_otablep;
+ epp += j;
+ PyTuple_SET_ITEM(res, 0, (*epp)->me_key);
+ PyTuple_SET_ITEM(res, 1, (*epp)->me_value);
+ Py_INCREF(dummy);
+ (*epp)->me_key = dummy;
+ (*epp)->me_value = NULL;
+ mp->ma_used--;
+ if (i != -1) { /* for default case -1, we don't have to do anything */
+ /* ma_used has already been decremented ! */
+ memmove(epp, epp+1, (mp->ma_used - j) * sizeof(PyOrderedDictEntry *));
+ }
+ return res;
+}
+
+static int
+dict_traverse(PyObject *op, visitproc visit, void *arg)
+{
+ Py_ssize_t i = 0;
+ PyObject *pk;
+ PyObject *pv;
+
+ while (PyOrderedDict_Next(op, &i, &pk, &pv)) {
+ Py_VISIT(pk);
+ Py_VISIT(pv);
+ }
+ return 0;
+}
+
+static int
+dict_tp_clear(PyObject *op)
+{
+ PyOrderedDict_Clear(op);
+ return 0;
+}
+
+#if PY_MAJOR_VERSION < 3
+extern PyTypeObject PyOrderedDictIterKey_Type; /* Forward */
+extern PyTypeObject PyOrderedDictIterValue_Type; /* Forward */
+extern PyTypeObject PyOrderedDictIterItem_Type; /* Forward */
+#endif
+static PyObject *dictiter_new(PyOrderedDictObject *, PyTypeObject *,
+ PyObject *args, PyObject *kwds);
+
+#if PY_MAJOR_VERSION < 3
+static PyObject *
+dict_iterkeys(PyOrderedDictObject *dict, PyObject *args, PyObject *kwds)
+{
+ return dictiter_new(dict, &PyOrderedDictIterKey_Type, args, kwds);
+}
+
+static PyObject *
+dict_itervalues(PyOrderedDictObject *dict, PyObject *args, PyObject *kwds)
+{
+ return dictiter_new(dict, &PyOrderedDictIterValue_Type, args, kwds);
+}
+
+static PyObject *
+dict_iteritems(PyOrderedDictObject *dict, PyObject *args, PyObject *kwds)
+{
+ return dictiter_new(dict, &PyOrderedDictIterItem_Type, args, kwds);
+}
+#endif
+
+static PyObject *
+dict_sizeof(PyDictObject *mp)
+{
+ Py_ssize_t res;
+
+ res = sizeof(PyOrderedDictObject);
+ if (mp->ma_table != mp->ma_smalltable)
+ res = res + (mp->ma_mask + 1) * sizeof(PyOrderedDictEntry);
+#if PY_VERSION_HEX < 0x03000000
+ return PyInt_FromSize_t(res);
+#else
+ return PyLong_FromSize_t(res);
+#endif
+}
+
+static PyObject *
+dict_index(register PyOrderedDictObject *mp, PyObject *key)
+{
+ Py_hash_t hash;
+ PyOrderedDictEntry *ep, **tmp;
+ register Py_ssize_t index;
+
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return NULL;
+ }
+ ep = (mp->ma_lookup)(mp, key, hash);
+ if (ep == NULL || ep->me_value == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "ordereddict.index(x): x not a key in ordereddict"
+ );
+ return NULL;
+ }
+
+ for (index = 0, tmp = mp->od_otablep; index < mp->ma_used; index++, tmp++) {
+ if (*tmp == ep) {
+#if PY_VERSION_HEX < 0x03000000
+ return PyInt_FromSize_t(index);
+#else
+ return PyLong_FromSize_t(index);
+#endif
+ }
+ }
+ return NULL; /* not found */
+}
+
+static PyObject *
+dict_insert(PyOrderedDictObject *mp, PyObject *args)
+{
+ Py_ssize_t i;
+ PyObject *key;
+ PyObject *val;
+
+#if PY_VERSION_HEX >= 0x02050000
+ if (!PyArg_ParseTuple(args, "nOO:insert", &i, &key, &val))
+#else
+ if (!PyArg_ParseTuple(args, "iOO:insert", &i, &key, &val))
+#endif
+ return NULL;
+ if(PyOrderedDict_InsertItem(mp, i, key, val) != 0)
+ return NULL;
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+dict_reverse(register PyOrderedDictObject *mp)
+{
+ PyOrderedDictEntry **epps, **eppe, *tmp;
+
+ epps = mp->od_otablep;
+ eppe = epps + ((mp->ma_used)-1);
+ while (epps < eppe) {
+ tmp = *epps;
+ *epps++ = *eppe;
+ *eppe-- = tmp;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+dict_setkeys(register PyOrderedDictObject *mp, PyObject *keys)
+{
+ PyOrderedDictEntry **newtable, *item;
+ Py_ssize_t size = mp->ma_used * sizeof(PyOrderedDictEntry *), i, oldindex;
+ PyObject *key = NULL;
+ PyObject *it;
+ Py_hash_t hash;
+
+ if (PySortedDict_CheckExact(mp)) {
+ PyErr_SetString(PyExc_TypeError,
+ "sorteddict does not support setkeys() assignment");
+ return NULL;
+ }
+
+ /* determine length -> ok if ok
+ if ok, then we still don't know if all keys will be found
+ so we allocate an array of ma_mask+1 size (which is what was used for
+ last resize and start filling that.
+ On finish, memcopy (so we don't have to worry about where the
+ values actually are (allocated or in smallbuffer), and
+ delete the tmp stuff,
+ if some key cannot be found (or is double) we don't update
+ */
+
+ newtable = PyMem_NEW(PyOrderedDictEntry *, size);
+ if (newtable == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ i = PyObject_Length(keys);
+ if ((i >=0) && (i != mp->ma_used)) {
+ PyErr_Format(PyExc_ValueError,
+ "ordereddict setkeys requires sequence of length #%zd; "
+ "provided was length %zd",
+ mp->ma_used, i);
+ return NULL;
+ }
+ if (i == -1) PyErr_Clear();
+
+
+ it = PyObject_GetIter(keys);
+ if (it == NULL)
+ return NULL;
+
+ for (i = 0; ; ++i) {
+ key = PyIter_Next(it);
+ if (key == NULL) {
+ if (PyErr_Occurred()) break;
+ if (i != mp->ma_used) {
+ PyErr_Format(PyExc_ValueError,
+ "ordereddict setkeys requires sequence of length #%zd; "
+ "provided was length %zd",
+ mp->ma_used, i);
+ break;
+ }
+ memcpy(mp->od_otablep, newtable, size);
+ PyMem_DEL(newtable);
+ Py_DECREF(it);
+ Py_RETURN_NONE;
+ }
+ if (i >= mp->ma_used) {
+ PyErr_Format(PyExc_ValueError,
+ "ordereddict setkeys requires sequence of max length #%zd; "
+ "a longer sequence was provided",
+ mp->ma_used);
+ Py_DECREF(it);
+ return NULL;
+ }
+ /* find the item with this key */
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ break;
+ }
+ item = (mp->ma_lookup)(mp, key, hash);
+ if (item == NULL || item->me_value == NULL) {
+ PyErr_Format(PyExc_KeyError,
+ "ordereddict setkeys unknown key at pos " SPR,
+ i);
+ break;
+ }
+ /* PyObject_Print((PyObject *)item->me_key, stdout, 0);*/
+ /* check if a pointer to this item has been set */
+ for (oldindex = 0; oldindex < i; oldindex++) {
+ if (newtable[oldindex] == item) {
+ PyErr_Format(PyExc_KeyError,
+ "ordereddict setkeys same key at pos " SPR "and " SPR,
+ oldindex, i);
+ break;
+ }
+ }
+ /* insert the pointer to this item */
+ newtable[i] = item;
+ }
+ PyMem_DEL(newtable);
+ Py_XDECREF(key);
+ Py_DECREF(it);
+ return NULL;
+}
+
+static PyObject *
+dict_setvalues(register PyOrderedDictObject *mp, PyObject *values)
+{
+ PyObject *it; /* iter(seq2) */
+ Py_ssize_t i; /* index into seq2 of current element */
+ PyObject *item = NULL; /* values[i] */
+ PyOrderedDictEntry **epp = mp->od_otablep, *tmp;
+
+ assert(mp != NULL);
+ assert(PyOrderedDict_Check(mp));
+ assert(values != NULL);
+
+ i = PyObject_Length(values);
+ /* printf("\nlength %d %d\n", i, mp->ma_used); */
+ if ((i >=0) && (i != mp->ma_used)) {
+ PyErr_Format(PyExc_ValueError,
+ "ordereddict setvalues requires sequence of length #%zd; "
+ "provided was length %zd",
+ mp->ma_used, i);
+ return NULL;
+ }
+ if (i == -1) PyErr_Clear();
+
+
+ it = PyObject_GetIter(values);
+ if (it == NULL)
+ return NULL;
+
+ for (i = 0; ; ++i) {
+ item = PyIter_Next(it);
+ if (item == NULL) {
+ if (PyErr_Occurred()) break;
+ if (i != mp->ma_used) {
+ PyErr_Format(PyExc_ValueError,
+ "ordereddict setvalues requires sequence of length #%zd; "
+ "provided was length %zd, ordereddict partially updated",
+ mp->ma_used, i);
+ break;
+ }
+ Py_DECREF(it);
+ Py_RETURN_NONE;
+ }
+ if (i >= mp->ma_used) {
+ PyErr_Format(PyExc_ValueError,
+ "ordereddict setvalues requires sequence of max length #%zd; "
+ "a longer sequence was provided, ordereddict fully updated",
+ mp->ma_used);
+ Py_DECREF(it);
+ return NULL;
+ }
+ tmp = *epp++;
+ Py_DECREF(tmp->me_value);
+ tmp->me_value = item;
+ }
+ Py_XDECREF(item);
+ Py_DECREF(it);
+ return NULL;
+}
+
+static PyObject *
+dict_setitems(register PyObject *mp, PyObject *args, PyObject *kwds)
+{
+ PyOrderedDict_Clear((PyObject *)mp);
+ if (dict_update_common(mp, args, kwds, "|Oi:setitems") != -1)
+ Py_RETURN_NONE;
+ return NULL;
+}
+
+static PyObject *
+dict_rename(register PyOrderedDictObject *mp, PyObject *args)
+{
+ PyObject *oldkey, *newkey;
+ PyObject *val = NULL;
+ Py_hash_t hash;
+ PyOrderedDictEntry *ep, **epp;
+ register Py_ssize_t index;
+
+ if (PySortedDict_CheckExact(mp)) {
+ PyErr_SetString(PyExc_TypeError,
+ "sorteddict does not support rename()");
+ return NULL;
+ }
+ if (!PyArg_UnpackTuple(args, "get", 1, 2, &oldkey, &newkey))
+ return NULL;
+
+ if (!PyUNISTR_CheckExact(oldkey) ||
+ (hash = ((PyUNISTR_Object *) oldkey)->OB_HASH) == -1) {
+ hash = PyObject_Hash(oldkey);
+ if (hash == -1)
+ return NULL;
+ }
+ ep = (mp->ma_lookup)(mp, oldkey, hash);
+ if (ep == NULL || ep->me_value == NULL)
+ return NULL;
+ epp = mp->od_otablep;
+ for (index = 0; index < mp->ma_used; index++, epp++)
+ if (*epp == ep)
+ break;
+ if (*epp != ep)
+ return NULL; /* this is bad! */
+
+ oldkey = ep->me_key; /* now point to key from item */
+ val = ep->me_value;
+ Py_INCREF(dummy);
+ ep->me_key = dummy;
+ ep->me_value = NULL;
+ memmove(epp, epp+1, (mp->ma_used - index) * sizeof(PyOrderedDictEntry *));
+ mp->ma_used--;
+ Py_DECREF(oldkey);
+ if(PyOrderedDict_InsertItem(mp, index, newkey, val) != 0)
+ return NULL;
+ Py_DECREF(val);
+ Py_RETURN_NONE;
+}
+
+#if PY_VERSION_HEX < 0x03000000
+#define REDUCE
+
+/* support for pickling */
+static PyObject *
+dict_reduce(PyOrderedDictObject *self)
+{
+ PyObject *result, *it, *dict=NULL;
+ it = dictiter_new(self, &PyOrderedDictIterItem_Type, NULL, NULL);
+ dict = Py_None;
+ Py_INCREF(dict);
+ Py_INCREF(dict);
+ if (PySortedDict_CheckExact(self)) {
+ if (((PySortedDictObject *) self)->sd_cmp == NULL)
+ printf("NULL!!!!\n");
+ result = Py_BuildValue("O(()OOOi)NNO", self->ob_type,
+ ((PySortedDictObject *) self)->sd_cmp,
+ ((PySortedDictObject *) self)->sd_key,
+ ((PySortedDictObject *) self)->sd_value,
+ REVERSE(self), dict, dict, it);
+ } else {
+ result = Py_BuildValue("O(()ii)NNO", self->ob_type, RELAXED(self), KVIO(self), dict, dict, it);
+ }
+ return result;
+}
+#endif
+
+static PyObject *
+ordereddict_getstate(register PyOrderedDictObject *mp)
+{
+#if PY_MAJOR_VERSION >= 3
+ return PyLong_FromLong(mp->od_state);
+#else
+ return PyInt_FromLong(mp->od_state);
+#endif
+}
+
+static PyObject *
+ordereddict_dump(register PyOrderedDictObject *mp)
+{
+ if (dump_ordereddict_head(mp) != -1)
+ dump_otablep(mp);
+ if (PySortedDict_CheckExact(mp))
+ dump_sorteddict_fun((PySortedDictObject *) mp);
+ Py_RETURN_NONE;
+}
+
+#if PY_VERSION_HEX < 0x03000000
+PyDoc_STRVAR(has_key__doc__,
+ "D.has_key(k) -> True if D has a key k, else False");
+#endif
+
+PyDoc_STRVAR(contains__doc__,
+ "D.__contains__(k) -> True if D has a key k, else False");
+
+#ifdef REDUCE
+PyDoc_STRVAR(reduce__doc__, "Return state information for pickling.");
+#endif
+
+PyDoc_STRVAR(getitem__doc__, "x.__getitem__(y) <==> x[y]");
+
+PyDoc_STRVAR(sizeof__doc__,
+"D.__sizeof__() -> size of D in memory, in bytes");
+
+PyDoc_STRVAR(get__doc__,
+ "D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.");
+
+PyDoc_STRVAR(setdefault_doc__,
+ "D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D");
+
+PyDoc_STRVAR(pop__doc__,
+ "D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n\
+If key is not found, d is returned if given, otherwise KeyError is raised");
+
+PyDoc_STRVAR(popitem__doc__,
+ "D.popitem([index]) -> (k, v), remove and return indexed (key, value) pair as a\n\
+2-tuple (default is last); but raise KeyError if D is empty.");
+
+#if PY_VERSION_HEX < 0x03000000
+PyDoc_STRVAR(keys__doc__,
+ "D.keys([reverse=False]) -> list of D's keys, optionally reversed");
+
+PyDoc_STRVAR(items__doc__,
+ "D.items() -> list of D's (key, value) pairs, as 2-tuples");
+
+PyDoc_STRVAR(values__doc__,
+ "D.values() -> list of D's values");
+#endif
+
+PyDoc_STRVAR(update__doc__,
+"D.update([E,] **F) -> None. Update D from dict/iterable E and F.\n"
+"If E present and has a .keys() method, does: for k in E: D[k] = E[k]\n\
+If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v\n\
+In either case, this is followed by: for k in F: D[k] = F[k]");
+
+PyDoc_STRVAR(fromkeys__doc__,
+ "dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.\n\
+v defaults to None.");
+
+PyDoc_STRVAR(clear__doc__,
+ "D.clear() -> None. Remove all items from D.");
+
+PyDoc_STRVAR(copy__doc__,
+ "D.copy() -> a shallow copy of D");
+
+#if PY_VERSION_HEX < 0x03000000
+PyDoc_STRVAR(iterkeys__doc__,
+ "D.iterkeys([reverse=False]) -> an iterator over the keys of D");
+
+PyDoc_STRVAR(itervalues__doc__,
+ "D.itervalues() -> an iterator over the values of D");
+
+PyDoc_STRVAR(iteritems__doc__,
+ "D.iteritems() -> an iterator over the (key, value) items of D");
+#endif
+
+PyDoc_STRVAR(index_doc,
+ "D.index(key) -> return position of key in ordered dict");
+
+PyDoc_STRVAR(insert_doc,
+ "D.insert(index, key, value) -> add/update (key, value) and insert key at index");
+
+PyDoc_STRVAR(reverse_doc,
+ "D.reverse() -> reverse the order of the keys of D");
+
+PyDoc_STRVAR(setkeys_doc,
+ "D.setkeys(keys) -> set the keys of D (keys must be iterable and a permutation of .keys())");
+
+PyDoc_STRVAR(setvalues_doc,
+ "D.setvalues(values) -> set D values to values (must be iterable)");
+
+PyDoc_STRVAR(setitems_doc,
+ "D.setitems(items) -> clear D and then set items");
+
+PyDoc_STRVAR(rename_doc,
+ "D.rename(oldkey, newkey) -> exchange keys without changing order");
+
+PyDoc_STRVAR(getstate_doc,
+ "D.getstate() -> return the state integer");
+
+PyDoc_STRVAR(dump_doc,
+ "D.dump() -> print internals of an orereddict");
+
+
+/* Forward */
+static PyObject *dictkeys_new(PyObject *);
+static PyObject *dictitems_new(PyObject *);
+static PyObject *dictvalues_new(PyObject *);
+
+#if PY_VERSION_HEX < 0x03000000
+PyDoc_STRVAR(viewkeys__doc__,
+ "D.viewkeys() -> a set-like object providing a view on D's keys");
+PyDoc_STRVAR(viewitems__doc__,
+ "D.viewitems() -> a set-like object providing a view on D's items");
+PyDoc_STRVAR(viewvalues__doc__,
+ "D.viewvalues() -> an object providing a view on D's values");
+#else
+PyDoc_STRVAR(viewkeys__doc__,
+ "D.keys() -> a set-like object providing a view on D's keys");
+PyDoc_STRVAR(viewitems__doc__,
+ "D.items() -> a set-like object providing a view on D's items");
+PyDoc_STRVAR(viewvalues__doc__,
+ "D.values() -> an object providing a view on D's values");
+#endif
+
+static PyMethodDef ordereddict_methods[] = {
+ {
+ "__contains__",(PyCFunction)dict_contains, METH_O | METH_COEXIST,
+ contains__doc__
+ },
+ {
+ "__getitem__", (PyCFunction)dict_subscript, METH_O | METH_COEXIST,
+ getitem__doc__
+ },
+ {"__sizeof__", (PyCFunction)dict_sizeof, METH_NOARGS,
+ sizeof__doc__},
+#ifdef REDUCE
+
+ {"__reduce__", (PyCFunction)dict_reduce, METH_NOARGS, reduce__doc__},
+#endif
+#if PY_VERSION_HEX < 0x03000000
+ {
+ "has_key", (PyCFunction)dict_has_key, METH_O,
+ has_key__doc__
+ },
+#endif
+ {
+ "get", (PyCFunction)dict_get, METH_VARARGS,
+ get__doc__
+ },
+ {
+ "setdefault", (PyCFunction)dict_setdefault, METH_VARARGS,
+ setdefault_doc__
+ },
+ {
+ "pop", (PyCFunction)dict_pop, METH_VARARGS,
+ pop__doc__
+ },
+ {
+ "popitem", (PyCFunction)dict_popitem, METH_VARARGS,
+ popitem__doc__
+ },
+#if PY_VERSION_HEX < 0x03000000
+ {
+ "keys", (PyCFunction)dict_keys, METH_VARARGS | METH_KEYWORDS,
+ keys__doc__
+ },
+ {
+ "items", (PyCFunction)dict_items, METH_VARARGS | METH_KEYWORDS,
+ items__doc__
+ },
+ {
+ "values", (PyCFunction)dict_values, METH_VARARGS | METH_KEYWORDS,
+ values__doc__
+ },
+
+#if PY_VERSION_HEX >= 0x02070000
+ {"viewkeys", (PyCFunction)dictkeys_new, METH_NOARGS,
+ viewkeys__doc__},
+ {"viewitems", (PyCFunction)dictitems_new, METH_NOARGS,
+ viewitems__doc__},
+ {"viewvalues", (PyCFunction)dictvalues_new, METH_NOARGS,
+ viewvalues__doc__},
+#endif
+#else /* Py3K */
+ {"keys", (PyCFunction)dictkeys_new, METH_NOARGS,
+ viewkeys__doc__},
+ {"items", (PyCFunction)dictitems_new, METH_NOARGS,
+ viewitems__doc__},
+ {"values", (PyCFunction)dictvalues_new, METH_NOARGS,
+ viewvalues__doc__},
+#endif
+ {
+ "update", (PyCFunction)dict_update, METH_VARARGS | METH_KEYWORDS,
+ update__doc__
+ },
+ {
+ "fromkeys", (PyCFunction)dict_fromkeys, METH_VARARGS | METH_CLASS,
+ fromkeys__doc__
+ },
+ {
+ "clear", (PyCFunction)dict_clear, METH_NOARGS,
+ clear__doc__
+ },
+ {
+ "copy", (PyCFunction)dict_copy, METH_NOARGS,
+ copy__doc__
+ },
+#if PY_VERSION_HEX < 0x03000000
+ {
+ "iterkeys", (PyCFunction)dict_iterkeys, METH_VARARGS | METH_KEYWORDS,
+ iterkeys__doc__
+ },
+ {
+ "itervalues", (PyCFunction)dict_itervalues, METH_VARARGS | METH_KEYWORDS,
+ itervalues__doc__
+ },
+ {
+ "iteritems", (PyCFunction)dict_iteritems, METH_VARARGS | METH_KEYWORDS,
+ iteritems__doc__
+ },
+#endif
+ {"index", (PyCFunction)dict_index, METH_O, index_doc},
+ {"insert", (PyCFunction)dict_insert, METH_VARARGS, insert_doc},
+ {"reverse", (PyCFunction)dict_reverse, METH_NOARGS, reverse_doc},
+ {"setkeys", (PyCFunction)dict_setkeys, METH_O, setkeys_doc},
+ {"setvalues", (PyCFunction)dict_setvalues, METH_O, setvalues_doc},
+ {"setitems", (PyCFunction)dict_setitems, METH_VARARGS | METH_KEYWORDS, setitems_doc},
+ {"rename", (PyCFunction)dict_rename, METH_VARARGS, rename_doc},
+ {"getstate", (PyCFunction)ordereddict_getstate, METH_NOARGS, getstate_doc},
+ {"dump", (PyCFunction)ordereddict_dump, METH_NOARGS, dump_doc},
+ {NULL, NULL} /* sentinel */
+};
+
+/* Return 1 if `key` is in dict `op`, 0 if not, and -1 on error. */
+int
+PyOrderedDict_Contains(PyObject *op, PyObject *key)
+{
+ Py_hash_t hash;
+ PyOrderedDictObject *mp = (PyOrderedDictObject *)op;
+ PyOrderedDictEntry *ep;
+
+ if (!PyUNISTR_CheckExact(key) ||
+ (hash = ((PyUNISTR_Object *) key)->OB_HASH) == -1) {
+ hash = PyObject_Hash(key);
+ if (hash == -1)
+ return -1;
+ }
+ ep = (mp->ma_lookup)(mp, key, hash);
+ return ep == NULL ? -1 : (ep->me_value != NULL);
+}
+
+/* Internal version of PyOrderedDict_Contains used when the hash value is already known */
+int
+_PyOrderedDict_Contains(PyObject *op, PyObject *key, Py_hash_t hash)
+{
+ PyOrderedDictObject *mp = (PyOrderedDictObject *)op;
+ PyOrderedDictEntry *ep;
+
+ ep = (mp->ma_lookup)(mp, key, hash);
+ return ep == NULL ? -1 : (ep->me_value != NULL);
+}
+
+static PyObject *
+PyOderedDict_Slice(PyObject *op, register Py_ssize_t ilow,
+ register Py_ssize_t ihigh)
+{
+ PyOrderedDictObject *mp = (PyOrderedDictObject *)op;
+ PyOrderedDictObject *slice;
+
+ if (mp == NULL || !PyOrderedDict_Check(mp)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+ slice = (PyOrderedDictObject *) PyOrderedDict_New();
+ if (slice == NULL)
+ return NULL;
+ /* [:] -> ilow = 0, ihigh MAXINT */
+ if (ilow < 0)
+ ilow += mp->ma_used;
+ if (ihigh < 0)
+ ihigh += mp->ma_used;
+ if (ilow < 0)
+ ilow = 0;
+ else if (ilow > mp->ma_used)
+ ilow = mp->ma_used;
+ if (ihigh < ilow)
+ ihigh = ilow;
+ else if (ihigh > mp->ma_used)
+ ihigh = mp->ma_used;
+
+ if (PyOrderedDict_CopySome((PyObject *) slice,
+ op, ilow, 1, (ihigh-ilow), 1) == 0) {
+ return (PyObject *) slice;
+ }
+ Py_DECREF(slice);
+ return NULL;
+}
+
+/* Hack to implement "key in dict" */
+static PySequenceMethods dict_as_sequence = {
+ 0, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ 0, /* sq_item */
+ (ssizessizeargfunc)PyOderedDict_Slice, /* sq_slice */
+ 0, /* sq_ass_item */
+ (ssizessizeobjargproc)dict_ass_slice, /* sq_ass_slice */
+ PyOrderedDict_Contains, /* sq_contains */
+ 0, /* sq_inplace_concat */
+ 0, /* sq_inplace_repeat */
+};
+
+static PyObject *
+dict_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyObject *self;
+
+ assert(type != NULL && type->tp_alloc != NULL);
+ assert(dummy != NULL);
+ self = type->tp_alloc(type, 0);
+ if (self != NULL) {
+ PyOrderedDictObject *d = (PyOrderedDictObject *)self;
+ /* It's guaranteed that tp->alloc zeroed out the struct. */
+ assert(d->ma_table == NULL && d->od_fill == 0 && d->ma_used == 0);
+ INIT_NONZERO_DICT_SLOTS(d);
+ d->ma_lookup = lookdict_string;
+ /* The object has been implicitly tracked by tp_alloc */
+ if (type == &PyOrderedDict_Type)
+ _PyObject_GC_UNTRACK(d);
+#ifdef SHOW_CONVERSION_COUNTS
+ ++created;
+#endif
+#ifdef SHOW_TRACK_COUNT
+ if (_PyObject_GC_IS_TRACKED(d))
+ count_tracked++;
+ else
+ count_untracked++;
+#endif
+ }
+ return self;
+}
+
+static PyObject *
+sorteddict_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyObject *self;
+
+ assert(type != NULL && type->tp_alloc != NULL);
+ assert(dummy != NULL);
+ self = type->tp_alloc(type, 0);
+ if (self != NULL) {
+ PyOrderedDictObject *d = (PyOrderedDictObject *)self;
+ /* It's guaranteed that tp->alloc zeroed out the struct. */
+ assert(d->ma_table == NULL && d->od_fill == 0 && d->ma_used == 0);
+ INIT_NONZERO_DICT_SLOTS(d);
+ d->ma_lookup = lookdict_string;
+ INIT_SORT_FUNCS(((PySortedDictObject *) self));
+ if (type == &PySortedDict_Type)
+ _PyObject_GC_UNTRACK(d);
+#ifdef SHOW_CONVERSION_COUNTS
+ ++created;
+#endif
+#ifdef SHOW_TRACK_COUNT
+ if (_PyObject_GC_IS_TRACKED(d))
+ count_tracked++;
+ else
+ count_untracked++;
+#endif
+ }
+ return self;
+}
+
+static int
+ordereddict_init(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *arg = NULL;
+ int result = 0, tmprelax = -1, tmpkvio = -1;
+
+ static char *kwlist[] = {"src", "relax", "kvio", 0};
+ if (args != NULL) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oii:ordereddict",
+ kwlist, &arg, &tmprelax, &tmpkvio)) {
+ return -1;
+ }
+ }
+ if (tmpkvio == -1)
+ tmpkvio = ordereddict_kvio;
+ if (tmpkvio)
+ ((PyOrderedDictObject *)self)->od_state |= OD_KVIO_BIT;
+ if (tmprelax == -1)
+ tmprelax = ordereddict_relaxed;
+ if (tmprelax)
+ ((PyOrderedDictObject *)self)->od_state |= OD_RELAXED_BIT;
+
+ if (arg != NULL) {
+ if (PyObject_HasAttrString(arg, "keys"))
+ result = PyOrderedDict_Merge(self, arg, 1, tmprelax);
+ else
+ result = PyOrderedDict_MergeFromSeq2(self, arg, 1);
+ }
+ /* do not initialise from keywords at all */
+ return result;
+}
+
+static int
+sorteddict_init(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *arg = NULL, *cmpfun = NULL, *keyfun = NULL, *valuefun = NULL;
+ int result = 0, reverse = 0;
+
+ static char *kwlist[] = {"src", "cmp", "key", "value", "reverse", 0};
+ if (args != NULL)
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOOOi:sorteddict",
+ kwlist, &arg, &cmpfun, &keyfun, &valuefun, &reverse))
+ return -1;
+ if (reverse)
+ ((PyOrderedDictObject *)self)->od_state |= OD_REVERSE_BIT;
+ /* always relaxed about order of source */
+ ((PyOrderedDictObject *)self)->od_state |= OD_RELAXED_BIT;
+
+ if (keyfun != NULL && keyfun != Py_False)
+ ((PySortedDictObject *)self)->sd_key = keyfun;
+
+ if (arg != NULL) {
+ if (PyObject_HasAttrString(arg, "keys"))
+ result = PyOrderedDict_Merge(self, arg, 1, 1);
+ else
+ result = PyOrderedDict_MergeFromSeq2(self, arg, 1);
+ }
+ /* do not initialise from keywords at all */
+ return result;
+}
+
+static PyObject *
+dict_iter(PyOrderedDictObject *dict)
+{
+ return dictiter_new(dict, &PyOrderedDictIterKey_Type, NULL, NULL);
+}
+
+PyDoc_STRVAR(ordereddict_doc,
+ "ordereddict() -> new empty dictionary.\n"
+ "dict(orderddict) -> new dictionary initialized from a mappings object's\n"
+ " (key, value) pairs.\n"
+//"dict(iterable) -> new dictionary initialized as if via:\n"
+//" d = {}\n"
+//" for k, v in iterable:\n"
+//" d[k] = v\n"
+//"dict(**kwargs) -> new dictionary initialized with the name=value pairs\n"
+//" in the keyword argument list. For example: dict(one=1, two=2)"
+ );
+
+PyTypeObject PyOrderedDict_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "_ordereddict.ordereddict",
+ sizeof(PyOrderedDictObject),
+ 0,
+ (destructor)dict_dealloc, /* tp_dealloc */
+#if PY_MAJOR_VERSION < 3
+ (printfunc)ordereddict_print, /* tp_print */
+#else
+ 0, /* tp_print */
+#endif
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if PY_MAJOR_VERSION < 3
+ (cmpfunc)dict_compare, /* tp_compare */
+#else
+ 0, /* tp_reserved */
+#endif
+ (reprfunc)ordereddict_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ &dict_as_sequence, /* tp_as_sequence */
+ &dict_as_mapping, /* tp_as_mapping */
+ (hashfunc)PyObject_HashNotImplemented, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
+ Py_TPFLAGS_BASETYPE | Py_TPFLAGS_DICT_SUBCLASS, /* tp_flags */
+ ordereddict_doc, /* tp_doc */
+ dict_traverse, /* tp_traverse */
+ dict_tp_clear, /* tp_clear */
+ dict_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ (getiterfunc)dict_iter, /* tp_iter */
+ 0, /* tp_iternext */
+ ordereddict_methods, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ DEFERRED_ADDRESS(&PyDict_Type), /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ ordereddict_init, /* tp_init */
+ PyType_GenericAlloc, /* tp_alloc */
+ dict_new, /* tp_new */
+ PyObject_GC_Del, /* tp_free */
+};
+
+
+PyDoc_STRVAR(sorteddict_doc,
+ "sorteddict() -> new empty dictionary.\n"
+ );
+
+
+PyTypeObject PySortedDict_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "_ordereddict.sorteddict",
+ sizeof(PySortedDictObject),
+ 0,
+ (destructor)dict_dealloc, /* tp_dealloc */
+#if PY_MAJOR_VERSION < 3
+ (printfunc)ordereddict_print, /* tp_print */
+#else
+ 0, /* tp_print */
+#endif
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if PY_MAJOR_VERSION < 3
+ (cmpfunc)dict_compare, /* tp_compare */
+#else
+ 0, /* tp_reserved */
+#endif
+ (reprfunc)sorteddict_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ &dict_as_sequence, /* tp_as_sequence */
+ &dict_as_mapping, /* tp_as_mapping */
+ (hashfunc)PyObject_HashNotImplemented, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
+ Py_TPFLAGS_BASETYPE | Py_TPFLAGS_DICT_SUBCLASS, /* tp_flags */
+ sorteddict_doc, /* tp_doc */
+ dict_traverse, /* tp_traverse */
+ dict_tp_clear, /* tp_clear */
+ dict_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ (getiterfunc)dict_iter, /* tp_iter */
+ 0, /* tp_iternext */
+ ordereddict_methods, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ DEFERRED_ADDRESS(&PyDict_Type), /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ sorteddict_init, /* tp_init */
+ PyType_GenericAlloc, /* tp_alloc */
+ sorteddict_new, /* tp_new */
+ PyObject_GC_Del, /* tp_free */
+};
+
+
+
+/* Dictionary iterator types */
+
+typedef struct {
+ PyObject_HEAD
+ PyOrderedDictObject *di_dict; /* Set to NULL when iterator is exhausted */
+ Py_ssize_t di_used;
+ Py_ssize_t di_pos;
+ PyObject* di_result; /* reusable result tuple for iteritems */
+ Py_ssize_t len;
+ int step;
+} ordereddictiterobject;
+
+static PyObject *
+dictiter_new(PyOrderedDictObject *dict, PyTypeObject *itertype,
+ PyObject *args, PyObject *kwds)
+{
+ ordereddictiterobject *di;
+ int reverse = 0;
+ static char *kwlist[] = {"reverse", 0};
+
+ if (args != NULL)
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|i:keys",
+ kwlist, &reverse))
+ return NULL;
+
+ /* review: introduce GC_New */
+ di = PyObject_GC_New(ordereddictiterobject, itertype);
+ if (di == NULL)
+ return NULL;
+ Py_INCREF(dict);
+ di->di_dict = dict;
+ di->di_used = dict->ma_used;
+ di->len = dict->ma_used;
+ if (reverse) {
+ di->di_pos = (dict->ma_used) - 1;
+ di->step = -1;
+ } else {
+ di->di_pos = 0;
+ di->step = 1;
+ }
+ if (itertype == &PyOrderedDictIterItem_Type) {
+ di->di_result = PyTuple_Pack(2, Py_None, Py_None);
+ if (di->di_result == NULL) {
+ Py_DECREF(di);
+ return NULL;
+ }
+ } else
+ di->di_result = NULL;
+ PyObject_GC_Track(di);
+ return (PyObject *)di;
+}
+
+static void
+dictiter_dealloc(ordereddictiterobject *di)
+{
+ Py_XDECREF(di->di_dict);
+ Py_XDECREF(di->di_result);
+ PyObject_GC_Del(di);
+}
+
+static int
+dictiter_traverse(ordereddictiterobject *di, visitproc visit, void *arg)
+{
+ Py_VISIT(di->di_dict);
+ Py_VISIT(di->di_result);
+ return 0;
+}
+
+static PyObject *
+dictiter_len(ordereddictiterobject *di)
+{
+ Py_ssize_t len = 0;
+ if (di->di_dict != NULL && di->di_used == di->di_dict->ma_used)
+ len = di->len;
+#if PY_VERSION_HEX < 0x03000000
+ return PyInt_FromSize_t(len);
+#else
+ return PyLong_FromSize_t(len);
+#endif
+}
+
+PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
+
+static PyMethodDef dictiter_methods[] = {
+ {"__length_hint__", (PyCFunction)dictiter_len, METH_NOARGS, length_hint_doc},
+ {NULL, NULL} /* sentinel */
+};
+
+static PyObject *dictiter_iternextkey(ordereddictiterobject *di)
+{
+ PyObject *key;
+ register Py_ssize_t i;
+ register PyOrderedDictEntry **epp;
+ PyOrderedDictObject *d = di->di_dict;
+
+ if (d == NULL)
+ return NULL;
+ assert (PyOrderedDict_Check(d));
+
+ if (di->di_used != d->ma_used) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "dictionary changed size during iteration");
+ di->di_used = -1; /* Make this state sticky */
+ return NULL;
+ }
+
+ i = di->di_pos;
+ if (i < 0)
+ goto fail;
+ if (i >= d->ma_used)
+ goto fail;
+ epp = d->od_otablep;
+ di->di_pos = i+di->step;
+ di->len--; /* len can be calculated */
+ key = epp[i]->me_key;
+ Py_INCREF(key);
+ return key;
+
+fail:
+ Py_DECREF(d);
+ di->di_dict = NULL;
+ return NULL;
+}
+
+PyTypeObject PyOrderedDictIterKey_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "_ordereddict.keyiterator", /* tp_name */
+ sizeof(ordereddictiterobject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)dictiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)dictiter_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ (iternextfunc)dictiter_iternextkey, /* tp_iternext */
+ dictiter_methods, /* tp_methods */
+ 0,
+};
+
+static PyObject *dictiter_iternextvalue(ordereddictiterobject *di)
+{
+ PyObject *value;
+ register Py_ssize_t i;
+ register PyOrderedDictEntry **epp;
+ PyOrderedDictObject *d = di->di_dict;
+
+ if (d == NULL)
+ return NULL;
+ assert (PyOrderedDict_Check(d));
+
+ if (di->di_used != d->ma_used) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "dictionary changed size during iteration");
+ di->di_used = -1; /* Make this state sticky */
+ return NULL;
+ }
+
+ i = di->di_pos;
+ if (i < 0 || i >= d->ma_used)
+ goto fail;
+ epp = d->od_otablep;
+ di->di_pos = i+di->step;
+ di->len--; /* len can be calculated */
+ value = epp[i]->me_value;
+ Py_INCREF(value);
+ return value;
+
+fail:
+ Py_DECREF(d);
+ di->di_dict = NULL;
+ return NULL;
+}
+
+PyTypeObject PyOrderedDictIterValue_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "_ordereddict.valueiterator", /* tp_name */
+ sizeof(ordereddictiterobject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)dictiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)dictiter_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ (iternextfunc)dictiter_iternextvalue, /* tp_iternext */
+ dictiter_methods, /* tp_methods */
+ 0,
+};
+
+static PyObject *dictiter_iternextitem(ordereddictiterobject *di)
+{
+ PyObject *key, *value, *result = di->di_result;
+ register Py_ssize_t i;
+ register PyOrderedDictEntry **epp;
+ PyOrderedDictObject *d = di->di_dict;
+
+ if (d == NULL)
+ return NULL;
+ assert (PyOrderedDict_Check(d));
+
+ if (di->di_used != d->ma_used) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "dictionary changed size during iteration");
+ di->di_used = -1; /* Make this state sticky */
+ return NULL;
+ }
+
+ i = di->di_pos;
+ if (i < 0)
+ goto fail;
+
+ /* review: differs in 2.5.6 */
+ if (i >= d->ma_used)
+ goto fail;
+ epp = d->od_otablep;
+ di->di_pos = i+di->step;
+ if (result->ob_refcnt == 1) {
+ Py_INCREF(result);
+ Py_DECREF(PyTuple_GET_ITEM(result, 0));
+ Py_DECREF(PyTuple_GET_ITEM(result, 1));
+ } else {
+ result = PyTuple_New(2);
+ if (result == NULL)
+ return NULL;
+ }
+ di->len--; /* len can be calculated */
+ key = epp[i]->me_key;
+ value = epp[i]->me_value;
+ Py_INCREF(key);
+ Py_INCREF(value);
+ PyTuple_SET_ITEM(result, 0, key);
+ PyTuple_SET_ITEM(result, 1, value);
+ return result;
+
+fail:
+ Py_DECREF(d);
+ di->di_dict = NULL;
+ return NULL;
+}
+
+PyTypeObject PyOrderedDictIterItem_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "_ordereddict.itemiterator", /* tp_name */
+ sizeof(ordereddictiterobject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)dictiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)dictiter_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ (iternextfunc)dictiter_iternextitem, /* tp_iternext */
+ dictiter_methods, /* tp_methods */
+ 0,
+};
+
+/*******************************************************************/
+
+static PyObject *
+getset_relaxed(PyObject *self, PyObject *args)
+{
+ int n = -1, oldval = ordereddict_relaxed;
+ if (!PyArg_ParseTuple(args, "|i", &n))
+ return NULL;
+ if (n != -1) {
+ ordereddict_relaxed = n;
+ }
+ return PyBool_FromLong(oldval);
+}
+
+static PyObject *
+getset_kvio(PyObject *self, PyObject *args)
+{
+ int n = -1, oldval = ordereddict_kvio;
+ if (!PyArg_ParseTuple(args, "|i", &n))
+ return NULL;
+ if (n != -1) {
+ ordereddict_kvio = n;
+ }
+ return PyBool_FromLong(oldval);
+}
+
+static PyMethodDef ordereddict_functions[] = {
+ {
+ "relax", getset_relaxed, METH_VARARGS,
+ "get/set routine for allowing global undeordered dict initialisation"
+ },
+ {
+ "kvio", getset_kvio, METH_VARARGS,
+ "get/set routine for allowing global KeyValue Insertion Order initialisation"
+ },
+ {NULL, NULL} /* sentinel */
+};
+
+#if PY_VERSION_HEX >= 0x02070000
+/* dictionary views are 2.7+ */
+
+/***********************************************/
+/* View objects for keys(), items(), values(). */
+/***********************************************/
+
+/* The instance lay-out is the same for all three; but the type differs. */
+
+typedef struct {
+ PyObject_HEAD
+ PyOrderedDictObject *dv_dict;
+} dictviewobject;
+
+static void
+dictview_dealloc(dictviewobject *dv)
+{
+ Py_XDECREF(dv->dv_dict);
+ PyObject_GC_Del(dv);
+}
+
+static int
+dictview_traverse(dictviewobject *dv, visitproc visit, void *arg)
+{
+ Py_VISIT(dv->dv_dict);
+ return 0;
+}
+
+static Py_ssize_t
+dictview_len(dictviewobject *dv)
+{
+ Py_ssize_t len = 0;
+ if (dv->dv_dict != NULL)
+ len = dv->dv_dict->ma_used;
+ return len;
+}
+
+static PyObject *
+dictview_new(PyObject *dict, PyTypeObject *type)
+{
+ dictviewobject *dv;
+ if (dict == NULL) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+ if (!PyDict_Check(dict)) {
+ /* XXX Get rid of this restriction later */
+ PyErr_Format(PyExc_TypeError,
+ "%s() requires a dict argument, not '%s'",
+ type->tp_name, dict->ob_type->tp_name);
+ return NULL;
+ }
+ dv = PyObject_GC_New(dictviewobject, type);
+ if (dv == NULL)
+ return NULL;
+ Py_INCREF(dict);
+ dv->dv_dict = (PyOrderedDictObject *)dict;
+ PyObject_GC_Track(dv);
+ return (PyObject *)dv;
+}
+
+/* TODO(guido): The views objects are not complete:
+
+ * support more set operations
+ * support arbitrary mappings?
+ - either these should be static or exported in dictobject.h
+ - if public then they should probably be in builtins
+*/
+
+/* Return 1 if self is a subset of other, iterating over self;
+ 0 if not; -1 if an error occurred. */
+static int
+all_contained_in(PyObject *self, PyObject *other)
+{
+ PyObject *iter = PyObject_GetIter(self);
+ int ok = 1;
+
+ if (iter == NULL)
+ return -1;
+ for (;;) {
+ PyObject *next = PyIter_Next(iter);
+ if (next == NULL) {
+ if (PyErr_Occurred())
+ ok = -1;
+ break;
+ }
+ ok = PySequence_Contains(other, next);
+ Py_DECREF(next);
+ if (ok <= 0)
+ break;
+ }
+ Py_DECREF(iter);
+ return ok;
+}
+
+static PyObject *
+dictview_richcompare(PyObject *self, PyObject *other, int op)
+{
+ Py_ssize_t len_self, len_other;
+ int ok;
+ PyObject *result;
+
+ assert(self != NULL);
+ assert(PyDictViewSet_Check(self));
+ assert(other != NULL);
+
+ if (!PyAnySet_Check(other) && !PyDictViewSet_Check(other)) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+
+ len_self = PyObject_Size(self);
+ if (len_self < 0)
+ return NULL;
+ len_other = PyObject_Size(other);
+ if (len_other < 0)
+ return NULL;
+
+ ok = 0;
+ switch(op) {
+
+ case Py_NE:
+ case Py_EQ:
+ if (len_self == len_other)
+ ok = all_contained_in(self, other);
+ if (op == Py_NE && ok >= 0)
+ ok = !ok;
+ break;
+
+ case Py_LT:
+ if (len_self < len_other)
+ ok = all_contained_in(self, other);
+ break;
+
+ case Py_LE:
+ if (len_self <= len_other)
+ ok = all_contained_in(self, other);
+ break;
+
+ case Py_GT:
+ if (len_self > len_other)
+ ok = all_contained_in(other, self);
+ break;
+
+ case Py_GE:
+ if (len_self >= len_other)
+ ok = all_contained_in(other, self);
+ break;
+
+ }
+ if (ok < 0)
+ return NULL;
+ result = ok ? Py_True : Py_False;
+ Py_INCREF(result);
+ return result;
+}
+
+static PyObject *
+dictview_repr(dictviewobject *dv)
+{
+ PyObject *seq;
+ PyObject *result;
+#if PY_MAJOR_VERSION < 3
+ PyObject *seq_str;
+#endif
+
+ seq = PySequence_List((PyObject *)dv);
+ if (seq == NULL)
+ return NULL;
+
+#if PY_MAJOR_VERSION < 3
+ seq_str = PyObject_Repr(seq);
+ if (seq_str == NULL) {
+ Py_DECREF(seq);
+ return NULL;
+ }
+ result = PyUNISTR_FromFormat("%s(%s)", Py_TYPE(dv)->tp_name,
+ PyString_AS_STRING(seq_str));
+ Py_DECREF(seq_str);
+#else
+ result = PyUnicode_FromFormat("%s(%R)", Py_TYPE(dv)->tp_name, seq);
+#endif
+ Py_DECREF(seq);
+ return result;
+}
+
+/*** dict_keys ***/
+
+static PyObject *
+dictkeys_iter(dictviewobject *dv)
+{
+ if (dv->dv_dict == NULL) {
+ Py_RETURN_NONE;
+ }
+ return dictiter_new(dv->dv_dict, &PyOrderedDictIterKey_Type, NULL, NULL);
+}
+
+static int
+dictkeys_contains(dictviewobject *dv, PyObject *obj)
+{
+ if (dv->dv_dict == NULL)
+ return 0;
+ return PyDict_Contains((PyObject *)dv->dv_dict, obj);
+}
+
+static PySequenceMethods dictkeys_as_sequence = {
+ (lenfunc)dictview_len, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ 0, /* sq_item */
+ 0, /* sq_slice */
+ 0, /* sq_ass_item */
+ 0, /* sq_ass_slice */
+ (objobjproc)dictkeys_contains, /* sq_contains */
+};
+
+static PyObject*
+dictviews_sub(PyObject* self, PyObject *other)
+{
+ PyObject *result = PySet_New(self);
+ PyObject *tmp;
+ if (result == NULL)
+ return NULL;
+
+ tmp = PyObject_CallMethod(result, "difference_update", "O", other);
+ if (tmp == NULL) {
+ Py_DECREF(result);
+ return NULL;
+ }
+
+ Py_DECREF(tmp);
+ return result;
+}
+
+static PyObject*
+dictviews_and(PyObject* self, PyObject *other)
+{
+ PyObject *result = PySet_New(self);
+ PyObject *tmp;
+ if (result == NULL)
+ return NULL;
+
+ tmp = PyObject_CallMethod(result, "intersection_update", "O", other);
+ if (tmp == NULL) {
+ Py_DECREF(result);
+ return NULL;
+ }
+
+ Py_DECREF(tmp);
+ return result;
+}
+
+static PyObject*
+dictviews_or(PyObject* self, PyObject *other)
+{
+ PyObject *result = PySet_New(self);
+ PyObject *tmp;
+ if (result == NULL)
+ return NULL;
+
+ tmp = PyObject_CallMethod(result, "update", "O", other);
+ if (tmp == NULL) {
+ Py_DECREF(result);
+ return NULL;
+ }
+
+ Py_DECREF(tmp);
+ return result;
+}
+
+static PyObject*
+dictviews_xor(PyObject* self, PyObject *other)
+{
+ PyObject *result = PySet_New(self);
+ PyObject *tmp;
+ if (result == NULL)
+ return NULL;
+
+ tmp = PyObject_CallMethod(result, "symmetric_difference_update", "O",
+ other);
+ if (tmp == NULL) {
+ Py_DECREF(result);
+ return NULL;
+ }
+
+ Py_DECREF(tmp);
+ return result;
+}
+
+static PyNumberMethods dictviews_as_number = {
+ 0, /*nb_add*/
+ (binaryfunc)dictviews_sub, /*nb_subtract*/
+ 0, /*nb_multiply*/
+#if PY_MAJOR_VERSION < 3
+ 0, /*nb_divide*/
+#endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero/nb_bool*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ (binaryfunc)dictviews_and, /*nb_and*/
+ (binaryfunc)dictviews_xor, /*nb_xor*/
+ (binaryfunc)dictviews_or, /*nb_or*/
+};
+
+static PyMethodDef dictkeys_methods[] = {
+ {NULL, NULL} /* sentinel */
+};
+
+PyTypeObject PyOrderedDictKeys_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "dict_keys", /* tp_name */
+ sizeof(dictviewobject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)dictview_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_reserved */
+ (reprfunc)dictview_repr, /* tp_repr */
+ &dictviews_as_number, /* tp_as_number */
+ &dictkeys_as_sequence, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+#if PY_MAJOR_VERSION < 3
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
+ Py_TPFLAGS_CHECKTYPES, /* tp_flags */
+#else
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+#endif
+ 0, /* tp_doc */
+ (traverseproc)dictview_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ dictview_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ (getiterfunc)dictkeys_iter, /* tp_iter */
+ 0, /* tp_iternext */
+ dictkeys_methods, /* tp_methods */
+ 0,
+};
+
+static PyObject *
+dictkeys_new(PyObject *dict)
+{
+ return dictview_new(dict, &PyOrderedDictKeys_Type);
+}
+
+/*** dict_items ***/
+
+static PyObject *
+dictitems_iter(dictviewobject *dv)
+{
+ if (dv->dv_dict == NULL) {
+ Py_RETURN_NONE;
+ }
+ return dictiter_new(dv->dv_dict, &PyOrderedDictIterItem_Type, NULL, NULL);
+}
+
+static int
+dictitems_contains(dictviewobject *dv, PyObject *obj)
+{
+ PyObject *key, *value, *found;
+ if (dv->dv_dict == NULL)
+ return 0;
+ if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 2)
+ return 0;
+ key = PyTuple_GET_ITEM(obj, 0);
+ value = PyTuple_GET_ITEM(obj, 1);
+ found = PyDict_GetItem((PyObject *)dv->dv_dict, key);
+ if (found == NULL) {
+ if (PyErr_Occurred())
+ return -1;
+ return 0;
+ }
+ return PyObject_RichCompareBool(value, found, Py_EQ);
+}
+
+static PySequenceMethods dictitems_as_sequence = {
+ (lenfunc)dictview_len, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ 0, /* sq_item */
+ 0, /* sq_slice */
+ 0, /* sq_ass_item */
+ 0, /* sq_ass_slice */
+ (objobjproc)dictitems_contains, /* sq_contains */
+};
+
+static PyMethodDef dictitems_methods[] = {
+ {NULL, NULL} /* sentinel */
+};
+
+PyTypeObject PyOrderedDictItems_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "dict_items", /* tp_name */
+ sizeof(dictviewobject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)dictview_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_reserved */
+ (reprfunc)dictview_repr, /* tp_repr */
+ &dictviews_as_number, /* tp_as_number */
+ &dictitems_as_sequence, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+#if PY_MAJOR_VERSION < 3
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
+ Py_TPFLAGS_CHECKTYPES, /* tp_flags */
+#else
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+#endif
+ 0, /* tp_doc */
+ (traverseproc)dictview_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ dictview_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ (getiterfunc)dictitems_iter, /* tp_iter */
+ 0, /* tp_iternext */
+ dictitems_methods, /* tp_methods */
+ 0,
+};
+
+static PyObject *
+dictitems_new(PyObject *dict)
+{
+ return dictview_new(dict, &PyOrderedDictItems_Type);
+}
+
+
+/*** dict_values ***/
+
+static PyObject *
+dictvalues_iter(dictviewobject *dv)
+{
+ if (dv->dv_dict == NULL) {
+ Py_RETURN_NONE;
+ }
+ return dictiter_new(dv->dv_dict, &PyOrderedDictIterValue_Type, NULL, NULL);
+}
+
+
+static PySequenceMethods dictvalues_as_sequence = {
+ (lenfunc)dictview_len, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ 0, /* sq_item */
+ 0, /* sq_slice */
+ 0, /* sq_ass_item */
+ 0, /* sq_ass_slice */
+ (objobjproc)0, /* sq_contains */
+};
+
+static PyMethodDef dictvalues_methods[] = {
+ {NULL, NULL} /* sentinel */
+};
+
+PyTypeObject PyOrderedDictValues_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "dict_values", /* tp_name */
+ sizeof(dictviewobject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)dictview_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_reserved */
+ (reprfunc)dictview_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ &dictvalues_as_sequence, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)dictview_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ (getiterfunc)dictvalues_iter, /* tp_iter */
+ 0, /* tp_iternext */
+ dictvalues_methods, /* tp_methods */
+ 0,
+};
+
+static PyObject *
+dictvalues_new(PyObject *dict)
+{
+ return dictview_new(dict, &PyOrderedDictValues_Type);
+}
+
+#endif /* PY_VERSION_HEX >= 0x02070000 */
+
+
+/************************************************************************/
+
+
+#if PY_MAJOR_VERSION >= 3
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_ordereddict", /* m_name */
+ ordereddict_doc, /* m_doc */
+ -1, /* m_size */
+ ordereddict_functions, /* m_methods */
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL, /* m_free */
+ };
+#endif
+
+
+static PyObject *
+ruamel_ordereddict_moduleinit(void)
+{
+ PyObject *m;
+
+ /* moved here as we have two primitives and dictobject.c had
+ no initialisation function */
+ if (dummy == NULL) { /* Auto-initialize dummy */
+ dummy = PyUNISTR_FromString("<dummy key>");
+ if (dummy == NULL)
+ return NULL;
+#ifdef SHOW_CONVERSION_COUNTS
+ Py_AtExit(show_counts);
+#endif
+ }
+
+ /* Fill in deferred data addresses. This must be done before
+ PyType_Ready() is called. Note that PyType_Ready() automatically
+ initializes the ob.ob_type field to &PyType_Type if it's NULL,
+ so it's not necessary to fill in ob_type first. */
+ PyOrderedDict_Type.tp_base = &PyDict_Type;
+ PySortedDict_Type.tp_base = &PyOrderedDict_Type;
+
+ if (PyType_Ready(&PyOrderedDict_Type) < 0)
+ return NULL;
+ if (PyType_Ready(&PySortedDict_Type) < 0)
+ return NULL;
+
+ /* AvdN: TODO understand why it is necessary or not (as it seems)
+ to PyTypeReady the iterator types
+ */
+
+#if PY_MAJOR_VERSION >= 3
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_ordereddict",
+ ordereddict_functions,
+ ordereddict_doc
+ // , NULL, PYTHON_API_VERSION
+ );
+#endif
+ if (m == NULL)
+ return NULL;
+
+ /* this allows PyVarObject_HEAD_INIT to take NULL as first
+ parameter: https://docs.python.org/3.1/extending/windows.html
+ */
+ if (PyType_Ready(&PyOrderedDict_Type) < 0)
+ return NULL;
+
+ Py_INCREF(&PyOrderedDict_Type);
+ if (PyModule_AddObject(m, "ordereddict",
+ (PyObject *) &PyOrderedDict_Type) < 0)
+ Py_INCREF(&PySortedDict_Type);
+ if (PyModule_AddObject(m, "sorteddict",
+ (PyObject *) &PySortedDict_Type) < 0)
+ return NULL;
+ return m;
+}
+
+#if PY_MAJOR_VERSION < 3
+ PyMODINIT_FUNC init_ordereddict(void)
+ {
+ ruamel_ordereddict_moduleinit();
+ }
+#else
+ PyMODINIT_FUNC PyInit__ordereddict(void)
+ {
+ return ruamel_ordereddict_moduleinit();
+ }
+#endif
diff --git a/contrib/deprecated/python/ruamel.ordereddict/ordereddict.h b/contrib/deprecated/python/ruamel.ordereddict/ordereddict.h
new file mode 100644
index 0000000000..9791ca71ff
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/ordereddict.h
@@ -0,0 +1,214 @@
+#ifndef Py_ORDEREDDICTOBJECT_H
+#define Py_ORDEREDDICTOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined _MSC_VER || defined __CYGWIN__
+#undef PyAPI_FUNC
+#undef PyAPI_DATA
+#define PyAPI_FUNC(RTYPE) __declspec(dllexport) RTYPE
+#define PyAPI_DATA(RTYPE) __declspec(dllexport) RTYPE
+#endif
+
+/* Ordered Dictionary object implementation using a hash table and a vector of
+ pointers to the items.
+*/
+/*
+
+ This file has been directly derived from dictobject.h in the Python 2.5.1
+ source distribution. Its licensing therefore is governed by the license
+ as distributed with Python 2.5.1 available in the
+ file LICNESE in the source distribution of ordereddict
+
+ Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software
+ Foundation; All Rights Reserved"
+
+ 2007-10-13: Anthon van der Neut
+*/
+
+/* Dictionary object type -- mapping from hashable object to object */
+
+/* The distribution includes a separate file, Objects/dictnotes.txt,
+ describing explorations into dictionary design and optimization.
+ It covers typical dictionary use patterns, the parameters for
+ tuning dictionaries, and several ideas for possible optimizations.
+*/
+
+/*
+There are three kinds of slots in the table:
+
+
+1. Unused. me_key == me_value == NULL
+ Does not hold an active (key, value) pair now and never did. Unused can
+ transition to Active upon key insertion. This is the only case in which
+ me_key is NULL, and is each slot's initial state.
+
+2. Active. me_key != NULL and me_key != dummy and me_value != NULL
+ Holds an active (key, value) pair. Active can transition to Dummy upon
+ key deletion. This is the only case in which me_value != NULL.
+
+3. Dummy. me_key == dummy and me_value == NULL
+ Previously held an active (key, value) pair, but that was deleted and an
+ active pair has not yet overwritten the slot. Dummy can transition to
+ Active upon key insertion. Dummy slots cannot be made Unused again
+ (cannot have me_key set to NULL), else the probe sequence in case of
+ collision would have no way to know they were once active.
+
+Note: .popitem() abuses the me_hash field of an Unused or Dummy slot to
+hold a search finger. The me_hash field of Unused or Dummy slots has no
+meaning otherwise.
+*/
+
+#if PY_VERSION_HEX < 0x02050000
+#ifdef _MSC_VER
+ typedef int Py_ssize_t;
+#else
+typedef ssize_t Py_ssize_t;
+#endif
+typedef Py_ssize_t (*lenfunc)(PyObject *);
+typedef intintargfunc ssizessizeargfunc;
+typedef intintobjargproc ssizessizeobjargproc;
+
+#define PyInt_FromSize_t(A) PyInt_FromLong((long) A)
+#endif
+
+/* PyOrderedDict_MINSIZE is the minimum size of a dictionary. This many slots are
+ * allocated directly in the dict object (in the ma_smalltable member).
+ * It must be a power of 2, and at least 4. 8 allows dicts with no more
+ * than 5 active entries to live in ma_smalltable (and so avoid an
+ * additional malloc); instrumentation suggested this suffices for the
+ * majority of dicts (consisting mostly of usually-small instance dicts and
+ * usually-small dicts created to pass keyword arguments).
+ */
+#define PyOrderedDict_MINSIZE 8
+
+typedef struct {
+ /* Cached hash code of me_key. Note that hash codes are C longs.
+ * We have to use Py_ssize_t instead because dict_popitem() abuses
+ * me_hash to hold a search finger.
+ */
+ Py_ssize_t me_hash;
+ PyObject *me_key;
+ PyObject *me_value;
+} PyOrderedDictEntry;
+
+/*
+To ensure the lookup algorithm terminates, there must be at least one Unused
+slot (NULL key) in the table.
+The value od_fill is the number of non-NULL keys (sum of Active and Dummy);
+ma_used is the number of non-NULL, non-dummy keys (== the number of non-NULL
+values == the number of Active items).
+To avoid slowing down lookups on a near-full table, we resize the table when
+it's two-thirds full.
+*/
+typedef struct _ordereddictobject PyOrderedDictObject;
+struct _ordereddictobject {
+#if PY_MAJOR_VERSION < 3
+ PyObject_HEAD
+#else
+ PyObject_VAR_HEAD
+#endif
+ Py_ssize_t od_fill; /* # Active + # Dummy */
+ Py_ssize_t ma_used; /* # Active */
+
+ /* The table contains ma_mask + 1 slots, and that's a power of 2.
+ * We store the mask instead of the size because the mask is more
+ * frequently needed.
+ */
+ Py_ssize_t ma_mask;
+
+ /* ma_table points to ma_smalltable for small tables, else to
+ * additional malloc'ed memory. ma_table is never NULL! This rule
+ * saves repeated runtime null-tests in the workhorse getitem and
+ * setitem calls.
+ */
+ PyOrderedDictEntry *ma_table;
+ PyOrderedDictEntry *(*ma_lookup)(PyOrderedDictObject *mp, PyObject *key, long hash);
+ PyOrderedDictEntry ma_smalltable[PyOrderedDict_MINSIZE];
+ /* for small arrays, ordered table pointer points to small array of tables */
+ PyOrderedDictEntry **od_otablep;
+ PyOrderedDictEntry *ma_smallotablep[PyOrderedDict_MINSIZE];
+ /* for storing kvio, relaxed bits */
+ long od_state;
+};
+
+typedef struct _sorteddictobject PySortedDictObject;
+struct _sorteddictobject {
+ struct _ordereddictobject od;
+ PyObject *sd_cmp;
+ PyObject *sd_key;
+ PyObject *sd_value;
+};
+
+
+PyAPI_DATA(PyTypeObject) PyOrderedDict_Type;
+PyAPI_DATA(PyTypeObject) PySortedDict_Type;
+#if PY_VERSION_HEX >= 0x02070000
+PyAPI_DATA(PyTypeObject) PyOrderedDictIterKey_Type;
+PyAPI_DATA(PyTypeObject) PyOrderedDictIterValue_Type;
+PyAPI_DATA(PyTypeObject) PyOrderedDictIterItem_Type;
+#endif
+PyAPI_DATA(PyTypeObject) PyOrderedDictKeys_Type;
+PyAPI_DATA(PyTypeObject) PyOrderedDictItems_Type;
+PyAPI_DATA(PyTypeObject) PyOrderedDictValues_Type;
+
+#if PY_VERSION_HEX >= 0x02080000
+ /* AvdN: this might need reviewing for > 2.7 */
+ #define PyOrderedDict_Check(op) \
+ PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_DICT_SUBCLASS)
+ #define PySortedDict_Check(op) \
+ PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_DICT_SUBCLASS)
+ #define PyOrderedDict_CheckExact(op) (Py_TYPE(op) == &PyOrderedDict_Type)
+ #define PySortedDict_CheckExact(op) (Py_TYPE(op) == &PySortedDict_Type)
+#else
+ #define PyOrderedDict_Check(op) PyObject_TypeCheck(op, &PyOrderedDict_Type)
+ #define PySortedDict_Check(op) PyObject_TypeCheck(op, &PySortedDict_Type)
+ #define PyOrderedDict_CheckExact(op) ((op)->ob_type == &PyOrderedDict_Type)
+ #define PySortedDict_CheckExact(op) ((op)->ob_type == &PySortedDict_Type)
+#endif
+
+PyAPI_FUNC(PyObject *) PyOrderedDict_New(void);
+PyAPI_FUNC(PyObject *) PyOrderedDict_GetItem(PyObject *mp, PyObject *key);
+PyAPI_FUNC(int) PyOrderedDict_SetItem(PyObject *mp, PyObject *key, PyObject *item);
+PyAPI_FUNC(int) PyOrderedDict_DelItem(PyObject *mp, PyObject *key);
+PyAPI_FUNC(void) PyOrderedDict_Clear(PyObject *mp);
+PyAPI_FUNC(int) PyOrderedDict_Next(
+ PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value);
+PyAPI_FUNC(int) _PyOrderedDict_Next(
+ PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value, long *hash);
+PyAPI_FUNC(PyObject *) PyOrderedDict_Keys(PyObject *mp);
+PyAPI_FUNC(PyObject *) PyOrderedDict_Values(PyObject *mp);
+PyAPI_FUNC(PyObject *) PyOrderedDict_Items(PyObject *mp);
+PyAPI_FUNC(Py_ssize_t) PyOrderedDict_Size(PyObject *mp);
+PyAPI_FUNC(PyObject *) PyOrderedDict_Copy(PyObject *mp);
+PyAPI_FUNC(int) PyOrderedDict_Contains(PyObject *mp, PyObject *key);
+PyAPI_FUNC(int) _PyOrderedDict_Contains(PyObject *mp, PyObject *key, long hash);
+PyAPI_FUNC(PyObject *) _PyOrderedDict_NewPresized(Py_ssize_t minused);
+PyAPI_FUNC(void) _PyOrderedDict_MaybeUntrack(PyObject *mp);
+
+/* PyOrderedDict_Update(mp, other) is equivalent to PyOrderedDict_Merge(mp, other, 1). */
+PyAPI_FUNC(int) PyOrderedDict_Update(PyObject *mp, PyObject *other);
+
+/* PyOrderedDict_Merge updates/merges from a mapping object (an object that
+ supports PyMapping_Keys() and PyObject_GetItem()). If override is true,
+ the last occurrence of a key wins, else the first. The Python
+ dict.update(other) is equivalent to PyOrderedDict_Merge(dict, other, 1).
+*/
+PyAPI_FUNC(int) PyOrderedDict_Merge(PyObject *mp,
+ PyObject *other,
+ int override, int relaxed);
+
+/* PyOrderedDict_MergeFromSeq2 updates/merges from an iterable object producing
+ iterable objects of length 2. If override is true, the last occurrence
+ of a key wins, else the first. The Python dict constructor dict(seq2)
+ is equivalent to dict={}; PyOrderedDict_MergeFromSeq(dict, seq2, 1).
+*/
+PyAPI_FUNC(int) PyOrderedDict_MergeFromSeq2(PyObject *d,
+ PyObject *seq2,
+ int override);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_ORDEREDDICTOBJECT_H */
diff --git a/contrib/deprecated/python/ruamel.ordereddict/ruamel/ordereddict/__init__.py b/contrib/deprecated/python/ruamel.ordereddict/ruamel/ordereddict/__init__.py
new file mode 100644
index 0000000000..2e76ec3e16
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/ruamel/ordereddict/__init__.py
@@ -0,0 +1,7 @@
+# coding: utf-8
+
+version_info = (0, 4, 15)
+version = '.'.join([str(x) if isinstance(x, int) else '.' + x + '.'
+ for x in version_info]).replace('..', '')
+
+from _ordereddict import ordereddict, sorteddict
diff --git a/contrib/deprecated/python/ruamel.ordereddict/ya.make b/contrib/deprecated/python/ruamel.ordereddict/ya.make
new file mode 100644
index 0000000000..a67faa8b88
--- /dev/null
+++ b/contrib/deprecated/python/ruamel.ordereddict/ya.make
@@ -0,0 +1,36 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(0.4.15)
+
+LICENSE(MIT)
+
+NO_COMPILER_WARNINGS()
+
+NO_LINT()
+
+SRCS(
+ ordereddict.c
+)
+
+PY_REGISTER(
+ _ordereddict
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ ruamel/ordereddict/__init__.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/deprecated/python/ruamel.ordereddict/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/PyJWT/py2/AUTHORS b/contrib/python/PyJWT/py2/AUTHORS
new file mode 100644
index 0000000000..90c7fa4d8c
--- /dev/null
+++ b/contrib/python/PyJWT/py2/AUTHORS
@@ -0,0 +1,29 @@
+PyJWT lead developer
+---------------------
+
+ - jpadilla <hello@jpadilla.com>
+
+
+Original author
+------------------
+
+- progrium <progrium@gmail.com>
+
+
+Patches and Suggestions
+-----------------------
+
+ - Boris Feld <boris.feld@novapost.fr> <lothiraldan@gmail.com>
+
+ - Åsmund Ødegård <asmund@xal.no> <ao@mcash.no>
+ Adding support for RSA-SHA256 privat/public signature.
+
+ - Mark Adams <mark@markadams.me>
+
+ - Wouter Bolsterlee <uws@xs4all.nl>
+
+ - Michael Davis <mike.philip.davis@gmail.com> <mike.davis@workiva.com>
+
+ - Vinod Gupta <codervinod@gmail.com>
+
+ - Derek Weitzel <djw8605@gmail.com>
diff --git a/contrib/python/PyJWT/py2/LICENSE b/contrib/python/PyJWT/py2/LICENSE
new file mode 100644
index 0000000000..bdc7819ea1
--- /dev/null
+++ b/contrib/python/PyJWT/py2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 José Padilla
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/python/PyJWT/py2/README.rst b/contrib/python/PyJWT/py2/README.rst
new file mode 100644
index 0000000000..07f6fead01
--- /dev/null
+++ b/contrib/python/PyJWT/py2/README.rst
@@ -0,0 +1,81 @@
+PyJWT
+=====
+
+.. image:: https://travis-ci.com/jpadilla/pyjwt.svg?branch=master
+ :target: http://travis-ci.com/jpadilla/pyjwt?branch=master
+
+.. image:: https://ci.appveyor.com/api/projects/status/h8nt70aqtwhht39t?svg=true
+ :target: https://ci.appveyor.com/project/jpadilla/pyjwt
+
+.. image:: https://img.shields.io/pypi/v/pyjwt.svg
+ :target: https://pypi.python.org/pypi/pyjwt
+
+.. image:: https://coveralls.io/repos/jpadilla/pyjwt/badge.svg?branch=master
+ :target: https://coveralls.io/r/jpadilla/pyjwt?branch=master
+
+.. image:: https://readthedocs.org/projects/pyjwt/badge/?version=latest
+ :target: https://pyjwt.readthedocs.io
+
+A Python implementation of `RFC 7519 <https://tools.ietf.org/html/rfc7519>`_. Original implementation was written by `@progrium <https://github.com/progrium>`_.
+
+Sponsor
+-------
+
++--------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| |auth0-logo| | If you want to quickly add secure token-based authentication to Python projects, feel free to check Auth0's Python SDK and free plan at `auth0.com/overview <https://auth0.com/overview?utm_source=GHsponsor&utm_medium=GHsponsor&utm_campaign=pyjwt&utm_content=auth>`_. |
++--------------+-----------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+.. |auth0-logo| image:: https://user-images.githubusercontent.com/83319/31722733-de95bbde-b3ea-11e7-96bf-4f4e8f915588.png
+
+Installing
+----------
+
+Install with **pip**:
+
+.. code-block:: sh
+
+ $ pip install PyJWT
+
+
+Usage
+-----
+
+.. code:: python
+
+ >>> import jwt
+ >>> encoded = jwt.encode({'some': 'payload'}, 'secret', algorithm='HS256')
+ 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb21lIjoicGF5bG9hZCJ9.4twFt5NiznN84AWoo1d7KO1T_yoc0Z6XOpOVswacPZg'
+
+ >>> jwt.decode(encoded, 'secret', algorithms=['HS256'])
+ {'some': 'payload'}
+
+
+Command line
+------------
+
+Usage::
+
+ pyjwt [options] INPUT
+
+Decoding examples::
+
+ pyjwt --key=secret decode TOKEN
+ pyjwt decode --no-verify TOKEN
+
+See more options executing ``pyjwt --help``.
+
+
+Documentation
+-------------
+
+View the full docs online at https://pyjwt.readthedocs.io/en/latest/
+
+
+Tests
+-----
+
+You can run tests from the project root after cloning with:
+
+.. code-block:: sh
+
+ $ python setup.py test
diff --git a/contrib/python/PyJWT/py3/.dist-info/METADATA b/contrib/python/PyJWT/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..f5fbdf64a5
--- /dev/null
+++ b/contrib/python/PyJWT/py3/.dist-info/METADATA
@@ -0,0 +1,107 @@
+Metadata-Version: 2.1
+Name: PyJWT
+Version: 2.3.0
+Summary: JSON Web Token implementation in Python
+Home-page: https://github.com/jpadilla/pyjwt
+Author: Jose Padilla
+Author-email: hello@jpadilla.com
+License: MIT
+Keywords: json,jwt,security,signing,token,web
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Utilities
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+Provides-Extra: crypto
+Requires-Dist: cryptography (>=3.3.1) ; extra == 'crypto'
+Provides-Extra: dev
+Requires-Dist: sphinx ; extra == 'dev'
+Requires-Dist: sphinx-rtd-theme ; extra == 'dev'
+Requires-Dist: zope.interface ; extra == 'dev'
+Requires-Dist: cryptography (>=3.3.1) ; extra == 'dev'
+Requires-Dist: pytest (<7.0.0,>=6.0.0) ; extra == 'dev'
+Requires-Dist: coverage[toml] (==5.0.4) ; extra == 'dev'
+Requires-Dist: mypy ; extra == 'dev'
+Requires-Dist: pre-commit ; extra == 'dev'
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: sphinx-rtd-theme ; extra == 'docs'
+Requires-Dist: zope.interface ; extra == 'docs'
+Provides-Extra: tests
+Requires-Dist: pytest (<7.0.0,>=6.0.0) ; extra == 'tests'
+Requires-Dist: coverage[toml] (==5.0.4) ; extra == 'tests'
+
+PyJWT
+=====
+
+.. image:: https://github.com/jpadilla/pyjwt/workflows/CI/badge.svg
+ :target: https://github.com/jpadilla/pyjwt/actions?query=workflow%3ACI
+
+.. image:: https://img.shields.io/pypi/v/pyjwt.svg
+ :target: https://pypi.python.org/pypi/pyjwt
+
+.. image:: https://codecov.io/gh/jpadilla/pyjwt/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/jpadilla/pyjwt
+
+.. image:: https://readthedocs.org/projects/pyjwt/badge/?version=stable
+ :target: https://pyjwt.readthedocs.io/en/stable/
+
+A Python implementation of `RFC 7519 <https://tools.ietf.org/html/rfc7519>`_. Original implementation was written by `@progrium <https://github.com/progrium>`_.
+
+Sponsor
+-------
+
++--------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| |auth0-logo| | If you want to quickly add secure token-based authentication to Python projects, feel free to check Auth0's Python SDK and free plan at `auth0.com/developers <https://auth0.com/developers?utm_source=GHsponsor&utm_medium=GHsponsor&utm_campaign=pyjwt&utm_content=auth>`_. |
++--------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+.. |auth0-logo| image:: https://user-images.githubusercontent.com/83319/31722733-de95bbde-b3ea-11e7-96bf-4f4e8f915588.png
+
+Installing
+----------
+
+Install with **pip**:
+
+.. code-block:: console
+
+ $ pip install PyJWT
+
+
+Usage
+-----
+
+.. code-block:: pycon
+
+ >>> import jwt
+ >>> encoded = jwt.encode({"some": "payload"}, "secret", algorithm="HS256")
+ >>> print(encoded)
+ eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzb21lIjoicGF5bG9hZCJ9.Joh1R2dYzkRvDkqv3sygm5YyK8Gi4ShZqbhK2gxcs2U
+ >>> jwt.decode(encoded, "secret", algorithms=["HS256"])
+ {'some': 'payload'}
+
+Documentation
+-------------
+
+View the full docs online at https://pyjwt.readthedocs.io/en/stable/
+
+
+Tests
+-----
+
+You can run tests from the project root after cloning with:
+
+.. code-block:: console
+
+ $ tox
+
+
diff --git a/contrib/python/PyJWT/py3/.dist-info/top_level.txt b/contrib/python/PyJWT/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..27ccc9bc3a
--- /dev/null
+++ b/contrib/python/PyJWT/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+jwt
diff --git a/contrib/python/PyJWT/py3/AUTHORS.rst b/contrib/python/PyJWT/py3/AUTHORS.rst
new file mode 100644
index 0000000000..88e2b6ad75
--- /dev/null
+++ b/contrib/python/PyJWT/py3/AUTHORS.rst
@@ -0,0 +1,7 @@
+Authors
+=======
+
+``pyjwt`` is currently written and maintained by `Jose Padilla <https://github.com/jpadilla>`_.
+Originally written and maintained by `Jeff Lindsay <https://github.com/progrium>`_.
+
+A full list of contributors can be found on GitHub’s `overview <https://github.com/jpadilla/pyjwt/graphs/contributors>`_.
diff --git a/contrib/python/PyJWT/py3/LICENSE b/contrib/python/PyJWT/py3/LICENSE
new file mode 100644
index 0000000000..bdc7819ea1
--- /dev/null
+++ b/contrib/python/PyJWT/py3/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 José Padilla
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/python/PyJWT/py3/README.rst b/contrib/python/PyJWT/py3/README.rst
new file mode 100644
index 0000000000..49aa77a8e7
--- /dev/null
+++ b/contrib/python/PyJWT/py3/README.rst
@@ -0,0 +1,62 @@
+PyJWT
+=====
+
+.. image:: https://github.com/jpadilla/pyjwt/workflows/CI/badge.svg
+ :target: https://github.com/jpadilla/pyjwt/actions?query=workflow%3ACI
+
+.. image:: https://img.shields.io/pypi/v/pyjwt.svg
+ :target: https://pypi.python.org/pypi/pyjwt
+
+.. image:: https://codecov.io/gh/jpadilla/pyjwt/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/jpadilla/pyjwt
+
+.. image:: https://readthedocs.org/projects/pyjwt/badge/?version=stable
+ :target: https://pyjwt.readthedocs.io/en/stable/
+
+A Python implementation of `RFC 7519 <https://tools.ietf.org/html/rfc7519>`_. Original implementation was written by `@progrium <https://github.com/progrium>`_.
+
+Sponsor
+-------
+
++--------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| |auth0-logo| | If you want to quickly add secure token-based authentication to Python projects, feel free to check Auth0's Python SDK and free plan at `auth0.com/developers <https://auth0.com/developers?utm_source=GHsponsor&utm_medium=GHsponsor&utm_campaign=pyjwt&utm_content=auth>`_. |
++--------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+.. |auth0-logo| image:: https://user-images.githubusercontent.com/83319/31722733-de95bbde-b3ea-11e7-96bf-4f4e8f915588.png
+
+Installing
+----------
+
+Install with **pip**:
+
+.. code-block:: console
+
+ $ pip install PyJWT
+
+
+Usage
+-----
+
+.. code-block:: pycon
+
+ >>> import jwt
+ >>> encoded = jwt.encode({"some": "payload"}, "secret", algorithm="HS256")
+ >>> print(encoded)
+ eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzb21lIjoicGF5bG9hZCJ9.Joh1R2dYzkRvDkqv3sygm5YyK8Gi4ShZqbhK2gxcs2U
+ >>> jwt.decode(encoded, "secret", algorithms=["HS256"])
+ {'some': 'payload'}
+
+Documentation
+-------------
+
+View the full docs online at https://pyjwt.readthedocs.io/en/stable/
+
+
+Tests
+-----
+
+You can run tests from the project root after cloning with:
+
+.. code-block:: console
+
+ $ tox
diff --git a/contrib/python/PyJWT/py3/jwt/__init__.py b/contrib/python/PyJWT/py3/jwt/__init__.py
new file mode 100644
index 0000000000..3208c39f39
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/__init__.py
@@ -0,0 +1,70 @@
+from .api_jwk import PyJWK, PyJWKSet
+from .api_jws import (
+ PyJWS,
+ get_unverified_header,
+ register_algorithm,
+ unregister_algorithm,
+)
+from .api_jwt import PyJWT, decode, encode
+from .exceptions import (
+ DecodeError,
+ ExpiredSignatureError,
+ ImmatureSignatureError,
+ InvalidAlgorithmError,
+ InvalidAudienceError,
+ InvalidIssuedAtError,
+ InvalidIssuerError,
+ InvalidKeyError,
+ InvalidSignatureError,
+ InvalidTokenError,
+ MissingRequiredClaimError,
+ PyJWKClientError,
+ PyJWKError,
+ PyJWKSetError,
+ PyJWTError,
+)
+from .jwks_client import PyJWKClient
+
+__version__ = "2.3.0"
+
+__title__ = "PyJWT"
+__description__ = "JSON Web Token implementation in Python"
+__url__ = "https://pyjwt.readthedocs.io"
+__uri__ = __url__
+__doc__ = __description__ + " <" + __uri__ + ">"
+
+__author__ = "José Padilla"
+__email__ = "hello@jpadilla.com"
+
+__license__ = "MIT"
+__copyright__ = "Copyright 2015-2020 José Padilla"
+
+
+__all__ = [
+ "PyJWS",
+ "PyJWT",
+ "PyJWKClient",
+ "PyJWK",
+ "PyJWKSet",
+ "decode",
+ "encode",
+ "get_unverified_header",
+ "register_algorithm",
+ "unregister_algorithm",
+ # Exceptions
+ "DecodeError",
+ "ExpiredSignatureError",
+ "ImmatureSignatureError",
+ "InvalidAlgorithmError",
+ "InvalidAudienceError",
+ "InvalidIssuedAtError",
+ "InvalidIssuerError",
+ "InvalidKeyError",
+ "InvalidSignatureError",
+ "InvalidTokenError",
+ "MissingRequiredClaimError",
+ "PyJWKClientError",
+ "PyJWKError",
+ "PyJWKSetError",
+ "PyJWTError",
+]
diff --git a/contrib/python/PyJWT/py3/jwt/algorithms.py b/contrib/python/PyJWT/py3/jwt/algorithms.py
new file mode 100644
index 0000000000..1f8865afbd
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/algorithms.py
@@ -0,0 +1,674 @@
+import hashlib
+import hmac
+import json
+
+from .exceptions import InvalidKeyError
+from .utils import (
+ base64url_decode,
+ base64url_encode,
+ der_to_raw_signature,
+ force_bytes,
+ from_base64url_uint,
+ raw_to_der_signature,
+ to_base64url_uint,
+)
+
+try:
+ import cryptography.exceptions
+ from cryptography.exceptions import InvalidSignature
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives.asymmetric import ec, padding
+ from cryptography.hazmat.primitives.asymmetric.ec import (
+ EllipticCurvePrivateKey,
+ EllipticCurvePublicKey,
+ )
+ from cryptography.hazmat.primitives.asymmetric.ed448 import (
+ Ed448PrivateKey,
+ Ed448PublicKey,
+ )
+ from cryptography.hazmat.primitives.asymmetric.ed25519 import (
+ Ed25519PrivateKey,
+ Ed25519PublicKey,
+ )
+ from cryptography.hazmat.primitives.asymmetric.rsa import (
+ RSAPrivateKey,
+ RSAPrivateNumbers,
+ RSAPublicKey,
+ RSAPublicNumbers,
+ rsa_crt_dmp1,
+ rsa_crt_dmq1,
+ rsa_crt_iqmp,
+ rsa_recover_prime_factors,
+ )
+ from cryptography.hazmat.primitives.serialization import (
+ Encoding,
+ NoEncryption,
+ PrivateFormat,
+ PublicFormat,
+ load_pem_private_key,
+ load_pem_public_key,
+ load_ssh_public_key,
+ )
+
+ has_crypto = True
+except ModuleNotFoundError:
+ has_crypto = False
+
+requires_cryptography = {
+ "RS256",
+ "RS384",
+ "RS512",
+ "ES256",
+ "ES256K",
+ "ES384",
+ "ES521",
+ "ES512",
+ "PS256",
+ "PS384",
+ "PS512",
+ "EdDSA",
+}
+
+
+def get_default_algorithms():
+ """
+ Returns the algorithms that are implemented by the library.
+ """
+ default_algorithms = {
+ "none": NoneAlgorithm(),
+ "HS256": HMACAlgorithm(HMACAlgorithm.SHA256),
+ "HS384": HMACAlgorithm(HMACAlgorithm.SHA384),
+ "HS512": HMACAlgorithm(HMACAlgorithm.SHA512),
+ }
+
+ if has_crypto:
+ default_algorithms.update(
+ {
+ "RS256": RSAAlgorithm(RSAAlgorithm.SHA256),
+ "RS384": RSAAlgorithm(RSAAlgorithm.SHA384),
+ "RS512": RSAAlgorithm(RSAAlgorithm.SHA512),
+ "ES256": ECAlgorithm(ECAlgorithm.SHA256),
+ "ES256K": ECAlgorithm(ECAlgorithm.SHA256),
+ "ES384": ECAlgorithm(ECAlgorithm.SHA384),
+ "ES521": ECAlgorithm(ECAlgorithm.SHA512),
+ "ES512": ECAlgorithm(
+ ECAlgorithm.SHA512
+ ), # Backward compat for #219 fix
+ "PS256": RSAPSSAlgorithm(RSAPSSAlgorithm.SHA256),
+ "PS384": RSAPSSAlgorithm(RSAPSSAlgorithm.SHA384),
+ "PS512": RSAPSSAlgorithm(RSAPSSAlgorithm.SHA512),
+ "EdDSA": OKPAlgorithm(),
+ }
+ )
+
+ return default_algorithms
+
+
+class Algorithm:
+ """
+ The interface for an algorithm used to sign and verify tokens.
+ """
+
+ def prepare_key(self, key):
+ """
+ Performs necessary validation and conversions on the key and returns
+ the key value in the proper format for sign() and verify().
+ """
+ raise NotImplementedError
+
+ def sign(self, msg, key):
+ """
+ Returns a digital signature for the specified message
+ using the specified key value.
+ """
+ raise NotImplementedError
+
+ def verify(self, msg, key, sig):
+ """
+ Verifies that the specified digital signature is valid
+ for the specified message and key values.
+ """
+ raise NotImplementedError
+
+ @staticmethod
+ def to_jwk(key_obj):
+ """
+ Serializes a given RSA key into a JWK
+ """
+ raise NotImplementedError
+
+ @staticmethod
+ def from_jwk(jwk):
+ """
+ Deserializes a given RSA key from JWK back into a PublicKey or PrivateKey object
+ """
+ raise NotImplementedError
+
+
+class NoneAlgorithm(Algorithm):
+ """
+ Placeholder for use when no signing or verification
+ operations are required.
+ """
+
+ def prepare_key(self, key):
+ if key == "":
+ key = None
+
+ if key is not None:
+ raise InvalidKeyError('When alg = "none", key value must be None.')
+
+ return key
+
+ def sign(self, msg, key):
+ return b""
+
+ def verify(self, msg, key, sig):
+ return False
+
+
+class HMACAlgorithm(Algorithm):
+ """
+ Performs signing and verification operations using HMAC
+ and the specified hash function.
+ """
+
+ SHA256 = hashlib.sha256
+ SHA384 = hashlib.sha384
+ SHA512 = hashlib.sha512
+
+ def __init__(self, hash_alg):
+ self.hash_alg = hash_alg
+
+ def prepare_key(self, key):
+ key = force_bytes(key)
+
+ invalid_strings = [
+ b"-----BEGIN PUBLIC KEY-----",
+ b"-----BEGIN CERTIFICATE-----",
+ b"-----BEGIN RSA PUBLIC KEY-----",
+ b"ssh-rsa",
+ ]
+
+ if any(string_value in key for string_value in invalid_strings):
+ raise InvalidKeyError(
+ "The specified key is an asymmetric key or x509 certificate and"
+ " should not be used as an HMAC secret."
+ )
+
+ return key
+
+ @staticmethod
+ def to_jwk(key_obj):
+ return json.dumps(
+ {
+ "k": base64url_encode(force_bytes(key_obj)).decode(),
+ "kty": "oct",
+ }
+ )
+
+ @staticmethod
+ def from_jwk(jwk):
+ try:
+ if isinstance(jwk, str):
+ obj = json.loads(jwk)
+ elif isinstance(jwk, dict):
+ obj = jwk
+ else:
+ raise ValueError
+ except ValueError:
+ raise InvalidKeyError("Key is not valid JSON")
+
+ if obj.get("kty") != "oct":
+ raise InvalidKeyError("Not an HMAC key")
+
+ return base64url_decode(obj["k"])
+
+ def sign(self, msg, key):
+ return hmac.new(key, msg, self.hash_alg).digest()
+
+ def verify(self, msg, key, sig):
+ return hmac.compare_digest(sig, self.sign(msg, key))
+
+
+if has_crypto:
+
+ class RSAAlgorithm(Algorithm):
+ """
+ Performs signing and verification operations using
+ RSASSA-PKCS-v1_5 and the specified hash function.
+ """
+
+ SHA256 = hashes.SHA256
+ SHA384 = hashes.SHA384
+ SHA512 = hashes.SHA512
+
+ def __init__(self, hash_alg):
+ self.hash_alg = hash_alg
+
+ def prepare_key(self, key):
+ if isinstance(key, (RSAPrivateKey, RSAPublicKey)):
+ return key
+
+ if not isinstance(key, (bytes, str)):
+ raise TypeError("Expecting a PEM-formatted key.")
+
+ key = force_bytes(key)
+
+ try:
+ if key.startswith(b"ssh-rsa"):
+ key = load_ssh_public_key(key)
+ else:
+ key = load_pem_private_key(key, password=None)
+ except ValueError:
+ key = load_pem_public_key(key)
+ return key
+
+ @staticmethod
+ def to_jwk(key_obj):
+ obj = None
+
+ if getattr(key_obj, "private_numbers", None):
+ # Private key
+ numbers = key_obj.private_numbers()
+
+ obj = {
+ "kty": "RSA",
+ "key_ops": ["sign"],
+ "n": to_base64url_uint(numbers.public_numbers.n).decode(),
+ "e": to_base64url_uint(numbers.public_numbers.e).decode(),
+ "d": to_base64url_uint(numbers.d).decode(),
+ "p": to_base64url_uint(numbers.p).decode(),
+ "q": to_base64url_uint(numbers.q).decode(),
+ "dp": to_base64url_uint(numbers.dmp1).decode(),
+ "dq": to_base64url_uint(numbers.dmq1).decode(),
+ "qi": to_base64url_uint(numbers.iqmp).decode(),
+ }
+
+ elif getattr(key_obj, "verify", None):
+ # Public key
+ numbers = key_obj.public_numbers()
+
+ obj = {
+ "kty": "RSA",
+ "key_ops": ["verify"],
+ "n": to_base64url_uint(numbers.n).decode(),
+ "e": to_base64url_uint(numbers.e).decode(),
+ }
+ else:
+ raise InvalidKeyError("Not a public or private key")
+
+ return json.dumps(obj)
+
+ @staticmethod
+ def from_jwk(jwk):
+ try:
+ if isinstance(jwk, str):
+ obj = json.loads(jwk)
+ elif isinstance(jwk, dict):
+ obj = jwk
+ else:
+ raise ValueError
+ except ValueError:
+ raise InvalidKeyError("Key is not valid JSON")
+
+ if obj.get("kty") != "RSA":
+ raise InvalidKeyError("Not an RSA key")
+
+ if "d" in obj and "e" in obj and "n" in obj:
+ # Private key
+ if "oth" in obj:
+ raise InvalidKeyError(
+ "Unsupported RSA private key: > 2 primes not supported"
+ )
+
+ other_props = ["p", "q", "dp", "dq", "qi"]
+ props_found = [prop in obj for prop in other_props]
+ any_props_found = any(props_found)
+
+ if any_props_found and not all(props_found):
+ raise InvalidKeyError(
+ "RSA key must include all parameters if any are present besides d"
+ )
+
+ public_numbers = RSAPublicNumbers(
+ from_base64url_uint(obj["e"]),
+ from_base64url_uint(obj["n"]),
+ )
+
+ if any_props_found:
+ numbers = RSAPrivateNumbers(
+ d=from_base64url_uint(obj["d"]),
+ p=from_base64url_uint(obj["p"]),
+ q=from_base64url_uint(obj["q"]),
+ dmp1=from_base64url_uint(obj["dp"]),
+ dmq1=from_base64url_uint(obj["dq"]),
+ iqmp=from_base64url_uint(obj["qi"]),
+ public_numbers=public_numbers,
+ )
+ else:
+ d = from_base64url_uint(obj["d"])
+ p, q = rsa_recover_prime_factors(
+ public_numbers.n, d, public_numbers.e
+ )
+
+ numbers = RSAPrivateNumbers(
+ d=d,
+ p=p,
+ q=q,
+ dmp1=rsa_crt_dmp1(d, p),
+ dmq1=rsa_crt_dmq1(d, q),
+ iqmp=rsa_crt_iqmp(p, q),
+ public_numbers=public_numbers,
+ )
+
+ return numbers.private_key()
+ elif "n" in obj and "e" in obj:
+ # Public key
+ numbers = RSAPublicNumbers(
+ from_base64url_uint(obj["e"]),
+ from_base64url_uint(obj["n"]),
+ )
+
+ return numbers.public_key()
+ else:
+ raise InvalidKeyError("Not a public or private key")
+
+ def sign(self, msg, key):
+ return key.sign(msg, padding.PKCS1v15(), self.hash_alg())
+
+ def verify(self, msg, key, sig):
+ try:
+ key.verify(sig, msg, padding.PKCS1v15(), self.hash_alg())
+ return True
+ except InvalidSignature:
+ return False
+
+ class ECAlgorithm(Algorithm):
+ """
+ Performs signing and verification operations using
+ ECDSA and the specified hash function
+ """
+
+ SHA256 = hashes.SHA256
+ SHA384 = hashes.SHA384
+ SHA512 = hashes.SHA512
+
+ def __init__(self, hash_alg):
+ self.hash_alg = hash_alg
+
+ def prepare_key(self, key):
+ if isinstance(key, (EllipticCurvePrivateKey, EllipticCurvePublicKey)):
+ return key
+
+ if not isinstance(key, (bytes, str)):
+ raise TypeError("Expecting a PEM-formatted key.")
+
+ key = force_bytes(key)
+
+ # Attempt to load key. We don't know if it's
+ # a Signing Key or a Verifying Key, so we try
+ # the Verifying Key first.
+ try:
+ if key.startswith(b"ecdsa-sha2-"):
+ key = load_ssh_public_key(key)
+ else:
+ key = load_pem_public_key(key)
+ except ValueError:
+ key = load_pem_private_key(key, password=None)
+
+ return key
+
+ def sign(self, msg, key):
+ der_sig = key.sign(msg, ec.ECDSA(self.hash_alg()))
+
+ return der_to_raw_signature(der_sig, key.curve)
+
+ def verify(self, msg, key, sig):
+ try:
+ der_sig = raw_to_der_signature(sig, key.curve)
+ except ValueError:
+ return False
+
+ try:
+ if isinstance(key, EllipticCurvePrivateKey):
+ key = key.public_key()
+ key.verify(der_sig, msg, ec.ECDSA(self.hash_alg()))
+ return True
+ except InvalidSignature:
+ return False
+
+ @staticmethod
+ def from_jwk(jwk):
+ try:
+ if isinstance(jwk, str):
+ obj = json.loads(jwk)
+ elif isinstance(jwk, dict):
+ obj = jwk
+ else:
+ raise ValueError
+ except ValueError:
+ raise InvalidKeyError("Key is not valid JSON")
+
+ if obj.get("kty") != "EC":
+ raise InvalidKeyError("Not an Elliptic curve key")
+
+ if "x" not in obj or "y" not in obj:
+ raise InvalidKeyError("Not an Elliptic curve key")
+
+ x = base64url_decode(obj.get("x"))
+ y = base64url_decode(obj.get("y"))
+
+ curve = obj.get("crv")
+ if curve == "P-256":
+ if len(x) == len(y) == 32:
+ curve_obj = ec.SECP256R1()
+ else:
+ raise InvalidKeyError("Coords should be 32 bytes for curve P-256")
+ elif curve == "P-384":
+ if len(x) == len(y) == 48:
+ curve_obj = ec.SECP384R1()
+ else:
+ raise InvalidKeyError("Coords should be 48 bytes for curve P-384")
+ elif curve == "P-521":
+ if len(x) == len(y) == 66:
+ curve_obj = ec.SECP521R1()
+ else:
+ raise InvalidKeyError("Coords should be 66 bytes for curve P-521")
+ elif curve == "secp256k1":
+ if len(x) == len(y) == 32:
+ curve_obj = ec.SECP256K1()
+ else:
+ raise InvalidKeyError(
+ "Coords should be 32 bytes for curve secp256k1"
+ )
+ else:
+ raise InvalidKeyError(f"Invalid curve: {curve}")
+
+ public_numbers = ec.EllipticCurvePublicNumbers(
+ x=int.from_bytes(x, byteorder="big"),
+ y=int.from_bytes(y, byteorder="big"),
+ curve=curve_obj,
+ )
+
+ if "d" not in obj:
+ return public_numbers.public_key()
+
+ d = base64url_decode(obj.get("d"))
+ if len(d) != len(x):
+ raise InvalidKeyError(
+ "D should be {} bytes for curve {}", len(x), curve
+ )
+
+ return ec.EllipticCurvePrivateNumbers(
+ int.from_bytes(d, byteorder="big"), public_numbers
+ ).private_key()
+
+ class RSAPSSAlgorithm(RSAAlgorithm):
+ """
+ Performs a signature using RSASSA-PSS with MGF1
+ """
+
+ def sign(self, msg, key):
+ return key.sign(
+ msg,
+ padding.PSS(
+ mgf=padding.MGF1(self.hash_alg()),
+ salt_length=self.hash_alg.digest_size,
+ ),
+ self.hash_alg(),
+ )
+
+ def verify(self, msg, key, sig):
+ try:
+ key.verify(
+ sig,
+ msg,
+ padding.PSS(
+ mgf=padding.MGF1(self.hash_alg()),
+ salt_length=self.hash_alg.digest_size,
+ ),
+ self.hash_alg(),
+ )
+ return True
+ except InvalidSignature:
+ return False
+
+ class OKPAlgorithm(Algorithm):
+ """
+ Performs signing and verification operations using EdDSA
+
+ This class requires ``cryptography>=2.6`` to be installed.
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ def prepare_key(self, key):
+
+ if isinstance(
+ key,
+ (Ed25519PrivateKey, Ed25519PublicKey, Ed448PrivateKey, Ed448PublicKey),
+ ):
+ return key
+
+ if isinstance(key, (bytes, str)):
+ if isinstance(key, str):
+ key = key.encode("utf-8")
+ str_key = key.decode("utf-8")
+
+ if "-----BEGIN PUBLIC" in str_key:
+ return load_pem_public_key(key)
+ if "-----BEGIN PRIVATE" in str_key:
+ return load_pem_private_key(key, password=None)
+ if str_key[0:4] == "ssh-":
+ return load_ssh_public_key(key)
+
+ raise TypeError("Expecting a PEM-formatted or OpenSSH key.")
+
+ def sign(self, msg, key):
+ """
+ Sign a message ``msg`` using the EdDSA private key ``key``
+ :param str|bytes msg: Message to sign
+ :param Ed25519PrivateKey}Ed448PrivateKey key: A :class:`.Ed25519PrivateKey`
+ or :class:`.Ed448PrivateKey` iinstance
+ :return bytes signature: The signature, as bytes
+ """
+ msg = bytes(msg, "utf-8") if type(msg) is not bytes else msg
+ return key.sign(msg)
+
+ def verify(self, msg, key, sig):
+ """
+ Verify a given ``msg`` against a signature ``sig`` using the EdDSA key ``key``
+
+ :param str|bytes sig: EdDSA signature to check ``msg`` against
+ :param str|bytes msg: Message to sign
+ :param Ed25519PrivateKey|Ed25519PublicKey|Ed448PrivateKey|Ed448PublicKey key:
+ A private or public EdDSA key instance
+ :return bool verified: True if signature is valid, False if not.
+ """
+ try:
+ msg = bytes(msg, "utf-8") if type(msg) is not bytes else msg
+ sig = bytes(sig, "utf-8") if type(sig) is not bytes else sig
+
+ if isinstance(key, (Ed25519PrivateKey, Ed448PrivateKey)):
+ key = key.public_key()
+ key.verify(sig, msg)
+ return True # If no exception was raised, the signature is valid.
+ except cryptography.exceptions.InvalidSignature:
+ return False
+
+ @staticmethod
+ def to_jwk(key):
+ if isinstance(key, (Ed25519PublicKey, Ed448PublicKey)):
+ x = key.public_bytes(
+ encoding=Encoding.Raw,
+ format=PublicFormat.Raw,
+ )
+ crv = "Ed25519" if isinstance(key, Ed25519PublicKey) else "Ed448"
+ return json.dumps(
+ {
+ "x": base64url_encode(force_bytes(x)).decode(),
+ "kty": "OKP",
+ "crv": crv,
+ }
+ )
+
+ if isinstance(key, (Ed25519PrivateKey, Ed448PrivateKey)):
+ d = key.private_bytes(
+ encoding=Encoding.Raw,
+ format=PrivateFormat.Raw,
+ encryption_algorithm=NoEncryption(),
+ )
+
+ x = key.public_key().public_bytes(
+ encoding=Encoding.Raw,
+ format=PublicFormat.Raw,
+ )
+
+ crv = "Ed25519" if isinstance(key, Ed25519PrivateKey) else "Ed448"
+ return json.dumps(
+ {
+ "x": base64url_encode(force_bytes(x)).decode(),
+ "d": base64url_encode(force_bytes(d)).decode(),
+ "kty": "OKP",
+ "crv": crv,
+ }
+ )
+
+ raise InvalidKeyError("Not a public or private key")
+
+ @staticmethod
+ def from_jwk(jwk):
+ try:
+ if isinstance(jwk, str):
+ obj = json.loads(jwk)
+ elif isinstance(jwk, dict):
+ obj = jwk
+ else:
+ raise ValueError
+ except ValueError:
+ raise InvalidKeyError("Key is not valid JSON")
+
+ if obj.get("kty") != "OKP":
+ raise InvalidKeyError("Not an Octet Key Pair")
+
+ curve = obj.get("crv")
+ if curve != "Ed25519" and curve != "Ed448":
+ raise InvalidKeyError(f"Invalid curve: {curve}")
+
+ if "x" not in obj:
+ raise InvalidKeyError('OKP should have "x" parameter')
+ x = base64url_decode(obj.get("x"))
+
+ try:
+ if "d" not in obj:
+ if curve == "Ed25519":
+ return Ed25519PublicKey.from_public_bytes(x)
+ return Ed448PublicKey.from_public_bytes(x)
+ d = base64url_decode(obj.get("d"))
+ if curve == "Ed25519":
+ return Ed25519PrivateKey.from_private_bytes(d)
+ return Ed448PrivateKey.from_private_bytes(d)
+ except ValueError as err:
+ raise InvalidKeyError("Invalid key parameter") from err
diff --git a/contrib/python/PyJWT/py3/jwt/api_jwk.py b/contrib/python/PyJWT/py3/jwt/api_jwk.py
new file mode 100644
index 0000000000..a0f6364da0
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/api_jwk.py
@@ -0,0 +1,97 @@
+import json
+
+from .algorithms import get_default_algorithms
+from .exceptions import InvalidKeyError, PyJWKError, PyJWKSetError
+
+
+class PyJWK:
+ def __init__(self, jwk_data, algorithm=None):
+ self._algorithms = get_default_algorithms()
+ self._jwk_data = jwk_data
+
+ kty = self._jwk_data.get("kty", None)
+ if not kty:
+ raise InvalidKeyError("kty is not found: %s" % self._jwk_data)
+
+ if not algorithm and isinstance(self._jwk_data, dict):
+ algorithm = self._jwk_data.get("alg", None)
+
+ if not algorithm:
+ # Determine alg with kty (and crv).
+ crv = self._jwk_data.get("crv", None)
+ if kty == "EC":
+ if crv == "P-256" or not crv:
+ algorithm = "ES256"
+ elif crv == "P-384":
+ algorithm = "ES384"
+ elif crv == "P-521":
+ algorithm = "ES512"
+ elif crv == "secp256k1":
+ algorithm = "ES256K"
+ else:
+ raise InvalidKeyError("Unsupported crv: %s" % crv)
+ elif kty == "RSA":
+ algorithm = "RS256"
+ elif kty == "oct":
+ algorithm = "HS256"
+ elif kty == "OKP":
+ if not crv:
+ raise InvalidKeyError("crv is not found: %s" % self._jwk_data)
+ if crv == "Ed25519":
+ algorithm = "EdDSA"
+ else:
+ raise InvalidKeyError("Unsupported crv: %s" % crv)
+ else:
+ raise InvalidKeyError("Unsupported kty: %s" % kty)
+
+ self.Algorithm = self._algorithms.get(algorithm)
+
+ if not self.Algorithm:
+ raise PyJWKError("Unable to find a algorithm for key: %s" % self._jwk_data)
+
+ self.key = self.Algorithm.from_jwk(self._jwk_data)
+
+ @staticmethod
+ def from_dict(obj, algorithm=None):
+ return PyJWK(obj, algorithm)
+
+ @staticmethod
+ def from_json(data, algorithm=None):
+ obj = json.loads(data)
+ return PyJWK.from_dict(obj, algorithm)
+
+ @property
+ def key_type(self):
+ return self._jwk_data.get("kty", None)
+
+ @property
+ def key_id(self):
+ return self._jwk_data.get("kid", None)
+
+ @property
+ def public_key_use(self):
+ return self._jwk_data.get("use", None)
+
+
+class PyJWKSet:
+ def __init__(self, keys):
+ self.keys = []
+
+ if not keys or not isinstance(keys, list):
+ raise PyJWKSetError("Invalid JWK Set value")
+
+ if len(keys) == 0:
+ raise PyJWKSetError("The JWK Set did not contain any keys")
+
+ for key in keys:
+ self.keys.append(PyJWK(key))
+
+ @staticmethod
+ def from_dict(obj):
+ keys = obj.get("keys", [])
+ return PyJWKSet(keys)
+
+ @staticmethod
+ def from_json(data):
+ obj = json.loads(data)
+ return PyJWKSet.from_dict(obj)
diff --git a/contrib/python/PyJWT/py3/jwt/api_jws.py b/contrib/python/PyJWT/py3/jwt/api_jws.py
new file mode 100644
index 0000000000..f85072e05e
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/api_jws.py
@@ -0,0 +1,259 @@
+import binascii
+import json
+from collections.abc import Mapping
+from typing import Any, Dict, List, Optional, Type
+
+from .algorithms import (
+ Algorithm,
+ get_default_algorithms,
+ has_crypto,
+ requires_cryptography,
+)
+from .exceptions import (
+ DecodeError,
+ InvalidAlgorithmError,
+ InvalidSignatureError,
+ InvalidTokenError,
+)
+from .utils import base64url_decode, base64url_encode
+
+
+class PyJWS:
+ header_typ = "JWT"
+
+ def __init__(self, algorithms=None, options=None):
+ self._algorithms = get_default_algorithms()
+ self._valid_algs = (
+ set(algorithms) if algorithms is not None else set(self._algorithms)
+ )
+
+ # Remove algorithms that aren't on the whitelist
+ for key in list(self._algorithms.keys()):
+ if key not in self._valid_algs:
+ del self._algorithms[key]
+
+ if options is None:
+ options = {}
+ self.options = {**self._get_default_options(), **options}
+
+ @staticmethod
+ def _get_default_options():
+ return {"verify_signature": True}
+
+ def register_algorithm(self, alg_id, alg_obj):
+ """
+ Registers a new Algorithm for use when creating and verifying tokens.
+ """
+ if alg_id in self._algorithms:
+ raise ValueError("Algorithm already has a handler.")
+
+ if not isinstance(alg_obj, Algorithm):
+ raise TypeError("Object is not of type `Algorithm`")
+
+ self._algorithms[alg_id] = alg_obj
+ self._valid_algs.add(alg_id)
+
+ def unregister_algorithm(self, alg_id):
+ """
+ Unregisters an Algorithm for use when creating and verifying tokens
+ Throws KeyError if algorithm is not registered.
+ """
+ if alg_id not in self._algorithms:
+ raise KeyError(
+ "The specified algorithm could not be removed"
+ " because it is not registered."
+ )
+
+ del self._algorithms[alg_id]
+ self._valid_algs.remove(alg_id)
+
+ def get_algorithms(self):
+ """
+ Returns a list of supported values for the 'alg' parameter.
+ """
+ return list(self._valid_algs)
+
+ def encode(
+ self,
+ payload: bytes,
+ key: str,
+ algorithm: Optional[str] = "HS256",
+ headers: Optional[Dict] = None,
+ json_encoder: Optional[Type[json.JSONEncoder]] = None,
+ ) -> str:
+ segments = []
+
+ if algorithm is None:
+ algorithm = "none"
+
+ # Prefer headers["alg"] if present to algorithm parameter.
+ if headers and "alg" in headers and headers["alg"]:
+ algorithm = headers["alg"]
+
+ # Header
+ header = {"typ": self.header_typ, "alg": algorithm}
+
+ if headers:
+ self._validate_headers(headers)
+ header.update(headers)
+ if not header["typ"]:
+ del header["typ"]
+
+ json_header = json.dumps(
+ header, separators=(",", ":"), cls=json_encoder
+ ).encode()
+
+ segments.append(base64url_encode(json_header))
+ segments.append(base64url_encode(payload))
+
+ # Segments
+ signing_input = b".".join(segments)
+ try:
+ alg_obj = self._algorithms[algorithm]
+ key = alg_obj.prepare_key(key)
+ signature = alg_obj.sign(signing_input, key)
+
+ except KeyError as e:
+ if not has_crypto and algorithm in requires_cryptography:
+ raise NotImplementedError(
+ "Algorithm '%s' could not be found. Do you have cryptography "
+ "installed?" % algorithm
+ ) from e
+ else:
+ raise NotImplementedError("Algorithm not supported") from e
+
+ segments.append(base64url_encode(signature))
+
+ encoded_string = b".".join(segments)
+
+ return encoded_string.decode("utf-8")
+
+ def decode_complete(
+ self,
+ jwt: str,
+ key: str = "",
+ algorithms: List[str] = None,
+ options: Dict = None,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ if options is None:
+ options = {}
+ merged_options = {**self.options, **options}
+ verify_signature = merged_options["verify_signature"]
+
+ if verify_signature and not algorithms:
+ raise DecodeError(
+ 'It is required that you pass in a value for the "algorithms" argument when calling decode().'
+ )
+
+ payload, signing_input, header, signature = self._load(jwt)
+
+ if verify_signature:
+ self._verify_signature(signing_input, header, signature, key, algorithms)
+
+ return {
+ "payload": payload,
+ "header": header,
+ "signature": signature,
+ }
+
+ def decode(
+ self,
+ jwt: str,
+ key: str = "",
+ algorithms: List[str] = None,
+ options: Dict = None,
+ **kwargs,
+ ) -> str:
+ decoded = self.decode_complete(jwt, key, algorithms, options, **kwargs)
+ return decoded["payload"]
+
+ def get_unverified_header(self, jwt):
+ """Returns back the JWT header parameters as a dict()
+
+ Note: The signature is not verified so the header parameters
+ should not be fully trusted until signature verification is complete
+ """
+ headers = self._load(jwt)[2]
+ self._validate_headers(headers)
+
+ return headers
+
+ def _load(self, jwt):
+ if isinstance(jwt, str):
+ jwt = jwt.encode("utf-8")
+
+ if not isinstance(jwt, bytes):
+ raise DecodeError(f"Invalid token type. Token must be a {bytes}")
+
+ try:
+ signing_input, crypto_segment = jwt.rsplit(b".", 1)
+ header_segment, payload_segment = signing_input.split(b".", 1)
+ except ValueError as err:
+ raise DecodeError("Not enough segments") from err
+
+ try:
+ header_data = base64url_decode(header_segment)
+ except (TypeError, binascii.Error) as err:
+ raise DecodeError("Invalid header padding") from err
+
+ try:
+ header = json.loads(header_data)
+ except ValueError as e:
+ raise DecodeError("Invalid header string: %s" % e) from e
+
+ if not isinstance(header, Mapping):
+ raise DecodeError("Invalid header string: must be a json object")
+
+ try:
+ payload = base64url_decode(payload_segment)
+ except (TypeError, binascii.Error) as err:
+ raise DecodeError("Invalid payload padding") from err
+
+ try:
+ signature = base64url_decode(crypto_segment)
+ except (TypeError, binascii.Error) as err:
+ raise DecodeError("Invalid crypto padding") from err
+
+ return (payload, signing_input, header, signature)
+
+ def _verify_signature(
+ self,
+ signing_input,
+ header,
+ signature,
+ key="",
+ algorithms=None,
+ ):
+
+ alg = header.get("alg")
+
+ if algorithms is not None and alg not in algorithms:
+ raise InvalidAlgorithmError("The specified alg value is not allowed")
+
+ try:
+ alg_obj = self._algorithms[alg]
+ key = alg_obj.prepare_key(key)
+
+ if not alg_obj.verify(signing_input, key, signature):
+ raise InvalidSignatureError("Signature verification failed")
+
+ except KeyError as e:
+ raise InvalidAlgorithmError("Algorithm not supported") from e
+
+ def _validate_headers(self, headers):
+ if "kid" in headers:
+ self._validate_kid(headers["kid"])
+
+ def _validate_kid(self, kid):
+ if not isinstance(kid, str):
+ raise InvalidTokenError("Key ID header parameter must be a string")
+
+
+_jws_global_obj = PyJWS()
+encode = _jws_global_obj.encode
+decode_complete = _jws_global_obj.decode_complete
+decode = _jws_global_obj.decode
+register_algorithm = _jws_global_obj.register_algorithm
+unregister_algorithm = _jws_global_obj.unregister_algorithm
+get_unverified_header = _jws_global_obj.get_unverified_header
diff --git a/contrib/python/PyJWT/py3/jwt/api_jwt.py b/contrib/python/PyJWT/py3/jwt/api_jwt.py
new file mode 100644
index 0000000000..f3b55d360e
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/api_jwt.py
@@ -0,0 +1,221 @@
+import json
+from calendar import timegm
+from collections.abc import Iterable, Mapping
+from datetime import datetime, timedelta, timezone
+from typing import Any, Dict, List, Optional, Type, Union
+
+from . import api_jws
+from .exceptions import (
+ DecodeError,
+ ExpiredSignatureError,
+ ImmatureSignatureError,
+ InvalidAudienceError,
+ InvalidIssuedAtError,
+ InvalidIssuerError,
+ MissingRequiredClaimError,
+)
+
+
+class PyJWT:
+ def __init__(self, options=None):
+ if options is None:
+ options = {}
+ self.options = {**self._get_default_options(), **options}
+
+ @staticmethod
+ def _get_default_options() -> Dict[str, Union[bool, List[str]]]:
+ return {
+ "verify_signature": True,
+ "verify_exp": True,
+ "verify_nbf": True,
+ "verify_iat": True,
+ "verify_aud": True,
+ "verify_iss": True,
+ "require": [],
+ }
+
+ def encode(
+ self,
+ payload: Dict[str, Any],
+ key: str,
+ algorithm: Optional[str] = "HS256",
+ headers: Optional[Dict] = None,
+ json_encoder: Optional[Type[json.JSONEncoder]] = None,
+ ) -> str:
+ # Check that we get a mapping
+ if not isinstance(payload, Mapping):
+ raise TypeError(
+ "Expecting a mapping object, as JWT only supports "
+ "JSON objects as payloads."
+ )
+
+ # Payload
+ payload = payload.copy()
+ for time_claim in ["exp", "iat", "nbf"]:
+ # Convert datetime to a intDate value in known time-format claims
+ if isinstance(payload.get(time_claim), datetime):
+ payload[time_claim] = timegm(payload[time_claim].utctimetuple())
+
+ json_payload = json.dumps(
+ payload, separators=(",", ":"), cls=json_encoder
+ ).encode("utf-8")
+
+ return api_jws.encode(json_payload, key, algorithm, headers, json_encoder)
+
+ def decode_complete(
+ self,
+ jwt: str,
+ key: str = "",
+ algorithms: List[str] = None,
+ options: Dict = None,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ if options is None:
+ options = {"verify_signature": True}
+ else:
+ options.setdefault("verify_signature", True)
+
+ if not options["verify_signature"]:
+ options.setdefault("verify_exp", False)
+ options.setdefault("verify_nbf", False)
+ options.setdefault("verify_iat", False)
+ options.setdefault("verify_aud", False)
+ options.setdefault("verify_iss", False)
+
+ if options["verify_signature"] and not algorithms:
+ raise DecodeError(
+ 'It is required that you pass in a value for the "algorithms" argument when calling decode().'
+ )
+
+ decoded = api_jws.decode_complete(
+ jwt,
+ key=key,
+ algorithms=algorithms,
+ options=options,
+ **kwargs,
+ )
+
+ try:
+ payload = json.loads(decoded["payload"])
+ except ValueError as e:
+ raise DecodeError("Invalid payload string: %s" % e)
+ if not isinstance(payload, dict):
+ raise DecodeError("Invalid payload string: must be a json object")
+
+ merged_options = {**self.options, **options}
+ self._validate_claims(payload, merged_options, **kwargs)
+
+ decoded["payload"] = payload
+ return decoded
+
+ def decode(
+ self,
+ jwt: str,
+ key: str = "",
+ algorithms: List[str] = None,
+ options: Dict = None,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ decoded = self.decode_complete(jwt, key, algorithms, options, **kwargs)
+ return decoded["payload"]
+
+ def _validate_claims(
+ self, payload, options, audience=None, issuer=None, leeway=0, **kwargs
+ ):
+ if isinstance(leeway, timedelta):
+ leeway = leeway.total_seconds()
+
+ if not isinstance(audience, (bytes, str, type(None), Iterable)):
+ raise TypeError("audience must be a string, iterable, or None")
+
+ self._validate_required_claims(payload, options)
+
+ now = timegm(datetime.now(tz=timezone.utc).utctimetuple())
+
+ if "iat" in payload and options["verify_iat"]:
+ self._validate_iat(payload, now, leeway)
+
+ if "nbf" in payload and options["verify_nbf"]:
+ self._validate_nbf(payload, now, leeway)
+
+ if "exp" in payload and options["verify_exp"]:
+ self._validate_exp(payload, now, leeway)
+
+ if options["verify_iss"]:
+ self._validate_iss(payload, issuer)
+
+ if options["verify_aud"]:
+ self._validate_aud(payload, audience)
+
+ def _validate_required_claims(self, payload, options):
+ for claim in options["require"]:
+ if payload.get(claim) is None:
+ raise MissingRequiredClaimError(claim)
+
+ def _validate_iat(self, payload, now, leeway):
+ try:
+ int(payload["iat"])
+ except ValueError:
+ raise InvalidIssuedAtError("Issued At claim (iat) must be an integer.")
+
+ def _validate_nbf(self, payload, now, leeway):
+ try:
+ nbf = int(payload["nbf"])
+ except ValueError:
+ raise DecodeError("Not Before claim (nbf) must be an integer.")
+
+ if nbf > (now + leeway):
+ raise ImmatureSignatureError("The token is not yet valid (nbf)")
+
+ def _validate_exp(self, payload, now, leeway):
+ try:
+ exp = int(payload["exp"])
+ except ValueError:
+ raise DecodeError("Expiration Time claim (exp) must be an" " integer.")
+
+ if exp < (now - leeway):
+ raise ExpiredSignatureError("Signature has expired")
+
+ def _validate_aud(self, payload, audience):
+ if audience is None:
+ if "aud" not in payload or not payload["aud"]:
+ return
+ # Application did not specify an audience, but
+ # the token has the 'aud' claim
+ raise InvalidAudienceError("Invalid audience")
+
+ if "aud" not in payload or not payload["aud"]:
+ # Application specified an audience, but it could not be
+ # verified since the token does not contain a claim.
+ raise MissingRequiredClaimError("aud")
+
+ audience_claims = payload["aud"]
+
+ if isinstance(audience_claims, str):
+ audience_claims = [audience_claims]
+ if not isinstance(audience_claims, list):
+ raise InvalidAudienceError("Invalid claim format in token")
+ if any(not isinstance(c, str) for c in audience_claims):
+ raise InvalidAudienceError("Invalid claim format in token")
+
+ if isinstance(audience, str):
+ audience = [audience]
+
+ if all(aud not in audience_claims for aud in audience):
+ raise InvalidAudienceError("Invalid audience")
+
+ def _validate_iss(self, payload, issuer):
+ if issuer is None:
+ return
+
+ if "iss" not in payload:
+ raise MissingRequiredClaimError("iss")
+
+ if payload["iss"] != issuer:
+ raise InvalidIssuerError("Invalid issuer")
+
+
+_jwt_global_obj = PyJWT()
+encode = _jwt_global_obj.encode
+decode_complete = _jwt_global_obj.decode_complete
+decode = _jwt_global_obj.decode
diff --git a/contrib/python/PyJWT/py3/jwt/exceptions.py b/contrib/python/PyJWT/py3/jwt/exceptions.py
new file mode 100644
index 0000000000..308899aa6a
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/exceptions.py
@@ -0,0 +1,66 @@
+class PyJWTError(Exception):
+ """
+ Base class for all exceptions
+ """
+
+ pass
+
+
+class InvalidTokenError(PyJWTError):
+ pass
+
+
+class DecodeError(InvalidTokenError):
+ pass
+
+
+class InvalidSignatureError(DecodeError):
+ pass
+
+
+class ExpiredSignatureError(InvalidTokenError):
+ pass
+
+
+class InvalidAudienceError(InvalidTokenError):
+ pass
+
+
+class InvalidIssuerError(InvalidTokenError):
+ pass
+
+
+class InvalidIssuedAtError(InvalidTokenError):
+ pass
+
+
+class ImmatureSignatureError(InvalidTokenError):
+ pass
+
+
+class InvalidKeyError(PyJWTError):
+ pass
+
+
+class InvalidAlgorithmError(InvalidTokenError):
+ pass
+
+
+class MissingRequiredClaimError(InvalidTokenError):
+ def __init__(self, claim):
+ self.claim = claim
+
+ def __str__(self):
+ return 'Token is missing the "%s" claim' % self.claim
+
+
+class PyJWKError(PyJWTError):
+ pass
+
+
+class PyJWKSetError(PyJWTError):
+ pass
+
+
+class PyJWKClientError(PyJWTError):
+ pass
diff --git a/contrib/python/PyJWT/py3/jwt/help.py b/contrib/python/PyJWT/py3/jwt/help.py
new file mode 100644
index 0000000000..d8f2302421
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/help.py
@@ -0,0 +1,60 @@
+import json
+import platform
+import sys
+
+from . import __version__ as pyjwt_version
+
+try:
+ import cryptography
+except ModuleNotFoundError:
+ cryptography = None # type: ignore
+
+
+def info():
+ """
+ Generate information for a bug report.
+ Based on the requests package help utility module.
+ """
+ try:
+ platform_info = {
+ "system": platform.system(),
+ "release": platform.release(),
+ }
+ except OSError:
+ platform_info = {"system": "Unknown", "release": "Unknown"}
+
+ implementation = platform.python_implementation()
+
+ if implementation == "CPython":
+ implementation_version = platform.python_version()
+ elif implementation == "PyPy":
+ implementation_version = "{}.{}.{}".format(
+ sys.pypy_version_info.major,
+ sys.pypy_version_info.minor,
+ sys.pypy_version_info.micro,
+ )
+ if sys.pypy_version_info.releaselevel != "final":
+ implementation_version = "".join(
+ [implementation_version, sys.pypy_version_info.releaselevel]
+ )
+ else:
+ implementation_version = "Unknown"
+
+ return {
+ "platform": platform_info,
+ "implementation": {
+ "name": implementation,
+ "version": implementation_version,
+ },
+ "cryptography": {"version": getattr(cryptography, "__version__", "")},
+ "pyjwt": {"version": pyjwt_version},
+ }
+
+
+def main():
+ """Pretty-print the bug information as JSON."""
+ print(json.dumps(info(), sort_keys=True, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/contrib/python/PyJWT/py3/jwt/jwks_client.py b/contrib/python/PyJWT/py3/jwt/jwks_client.py
new file mode 100644
index 0000000000..767b7179db
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/jwks_client.py
@@ -0,0 +1,59 @@
+import json
+import urllib.request
+from functools import lru_cache
+from typing import Any, List
+
+from .api_jwk import PyJWK, PyJWKSet
+from .api_jwt import decode_complete as decode_token
+from .exceptions import PyJWKClientError
+
+
+class PyJWKClient:
+ def __init__(self, uri: str, cache_keys: bool = True, max_cached_keys: int = 16):
+ self.uri = uri
+ if cache_keys:
+ # Cache signing keys
+ # Ignore mypy (https://github.com/python/mypy/issues/2427)
+ self.get_signing_key = lru_cache(maxsize=max_cached_keys)(self.get_signing_key) # type: ignore
+
+ def fetch_data(self) -> Any:
+ with urllib.request.urlopen(self.uri) as response:
+ return json.load(response)
+
+ def get_jwk_set(self) -> PyJWKSet:
+ data = self.fetch_data()
+ return PyJWKSet.from_dict(data)
+
+ def get_signing_keys(self) -> List[PyJWK]:
+ jwk_set = self.get_jwk_set()
+ signing_keys = [
+ jwk_set_key
+ for jwk_set_key in jwk_set.keys
+ if jwk_set_key.public_key_use in ["sig", None] and jwk_set_key.key_id
+ ]
+
+ if not signing_keys:
+ raise PyJWKClientError("The JWKS endpoint did not contain any signing keys")
+
+ return signing_keys
+
+ def get_signing_key(self, kid: str) -> PyJWK:
+ signing_keys = self.get_signing_keys()
+ signing_key = None
+
+ for key in signing_keys:
+ if key.key_id == kid:
+ signing_key = key
+ break
+
+ if not signing_key:
+ raise PyJWKClientError(
+ f'Unable to find a signing key that matches: "{kid}"'
+ )
+
+ return signing_key
+
+ def get_signing_key_from_jwt(self, token: str) -> PyJWK:
+ unverified = decode_token(token, options={"verify_signature": False})
+ header = unverified["header"]
+ return self.get_signing_key(header.get("kid"))
diff --git a/contrib/python/PyJWT/py3/jwt/py.typed b/contrib/python/PyJWT/py3/jwt/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/py.typed
diff --git a/contrib/python/PyJWT/py3/jwt/utils.py b/contrib/python/PyJWT/py3/jwt/utils.py
new file mode 100644
index 0000000000..9dde10cf8e
--- /dev/null
+++ b/contrib/python/PyJWT/py3/jwt/utils.py
@@ -0,0 +1,99 @@
+import base64
+import binascii
+from typing import Any, Union
+
+try:
+ from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurve
+ from cryptography.hazmat.primitives.asymmetric.utils import (
+ decode_dss_signature,
+ encode_dss_signature,
+ )
+except ModuleNotFoundError:
+ EllipticCurve = Any # type: ignore
+
+
+def force_bytes(value: Union[str, bytes]) -> bytes:
+ if isinstance(value, str):
+ return value.encode("utf-8")
+ elif isinstance(value, bytes):
+ return value
+ else:
+ raise TypeError("Expected a string value")
+
+
+def base64url_decode(input: Union[str, bytes]) -> bytes:
+ if isinstance(input, str):
+ input = input.encode("ascii")
+
+ rem = len(input) % 4
+
+ if rem > 0:
+ input += b"=" * (4 - rem)
+
+ return base64.urlsafe_b64decode(input)
+
+
+def base64url_encode(input: bytes) -> bytes:
+ return base64.urlsafe_b64encode(input).replace(b"=", b"")
+
+
+def to_base64url_uint(val: int) -> bytes:
+ if val < 0:
+ raise ValueError("Must be a positive integer")
+
+ int_bytes = bytes_from_int(val)
+
+ if len(int_bytes) == 0:
+ int_bytes = b"\x00"
+
+ return base64url_encode(int_bytes)
+
+
+def from_base64url_uint(val: Union[str, bytes]) -> int:
+ if isinstance(val, str):
+ val = val.encode("ascii")
+
+ data = base64url_decode(val)
+ return int.from_bytes(data, byteorder="big")
+
+
+def number_to_bytes(num: int, num_bytes: int) -> bytes:
+ padded_hex = "%0*x" % (2 * num_bytes, num)
+ return binascii.a2b_hex(padded_hex.encode("ascii"))
+
+
+def bytes_to_number(string: bytes) -> int:
+ return int(binascii.b2a_hex(string), 16)
+
+
+def bytes_from_int(val: int) -> bytes:
+ remaining = val
+ byte_length = 0
+
+ while remaining != 0:
+ remaining >>= 8
+ byte_length += 1
+
+ return val.to_bytes(byte_length, "big", signed=False)
+
+
+def der_to_raw_signature(der_sig: bytes, curve: EllipticCurve) -> bytes:
+ num_bits = curve.key_size
+ num_bytes = (num_bits + 7) // 8
+
+ r, s = decode_dss_signature(der_sig)
+
+ return number_to_bytes(r, num_bytes) + number_to_bytes(s, num_bytes)
+
+
+def raw_to_der_signature(raw_sig: bytes, curve: EllipticCurve) -> bytes:
+ num_bits = curve.key_size
+ num_bytes = (num_bits + 7) // 8
+
+ if len(raw_sig) != 2 * num_bytes:
+ raise ValueError("Invalid signature")
+
+ r = bytes_to_number(raw_sig[:num_bytes])
+ s = bytes_to_number(raw_sig[num_bytes:])
+
+ return encode_dss_signature(r, s)
diff --git a/contrib/python/PyJWT/py3/ya.make b/contrib/python/PyJWT/py3/ya.make
new file mode 100644
index 0000000000..0cbee2bb2e
--- /dev/null
+++ b/contrib/python/PyJWT/py3/ya.make
@@ -0,0 +1,39 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(2.3.0)
+
+LICENSE(MIT)
+
+PEERDIR(
+ contrib/python/cryptography
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ jwt.contrib.*
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ jwt/__init__.py
+ jwt/algorithms.py
+ jwt/api_jwk.py
+ jwt/api_jws.py
+ jwt/api_jwt.py
+ jwt/exceptions.py
+ jwt/help.py
+ jwt/jwks_client.py
+ jwt/utils.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/PyJWT/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+ jwt/py.typed
+)
+
+END()
diff --git a/contrib/python/PyJWT/ya.make b/contrib/python/PyJWT/ya.make
new file mode 100644
index 0000000000..3ccb0a62dc
--- /dev/null
+++ b/contrib/python/PyJWT/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/PyJWT/py2)
+ELSE()
+ PEERDIR(contrib/python/PyJWT/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/PySocks/py2/.dist-info/METADATA b/contrib/python/PySocks/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..ae2ae3419c
--- /dev/null
+++ b/contrib/python/PySocks/py2/.dist-info/METADATA
@@ -0,0 +1,321 @@
+Metadata-Version: 2.1
+Name: PySocks
+Version: 1.7.1
+Summary: A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information.
+Home-page: https://github.com/Anorov/PySocks
+Author: Anorov
+Author-email: anorov.vorona@gmail.com
+License: BSD
+Keywords: socks,proxy
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Description-Content-Type: text/markdown
+
+PySocks
+=======
+
+PySocks lets you send traffic through SOCKS and HTTP proxy servers. It is a modern fork of [SocksiPy](http://socksipy.sourceforge.net/) with bug fixes and extra features.
+
+Acts as a drop-in replacement to the socket module. Seamlessly configure SOCKS proxies for any socket object by calling `socket_object.set_proxy()`.
+
+----------------
+
+Features
+========
+
+* SOCKS proxy client for Python 2.7 and 3.4+
+* TCP supported
+* UDP mostly supported (issues may occur in some edge cases)
+* HTTP proxy client included but not supported or recommended (you should use urllib2's or requests' own HTTP proxy interface)
+* urllib2 handler included. `pip install` / `setup.py install` will automatically install the `sockshandler` module.
+
+Installation
+============
+
+ pip install PySocks
+
+Or download the tarball / `git clone` and...
+
+ python setup.py install
+
+These will install both the `socks` and `sockshandler` modules.
+
+Alternatively, include just `socks.py` in your project.
+
+--------------------------------------------
+
+*Warning:* PySocks/SocksiPy only supports HTTP proxies that use CONNECT tunneling. Certain HTTP proxies may not work with this library. If you wish to use HTTP (not SOCKS) proxies, it is recommended that you rely on your HTTP client's native proxy support (`proxies` dict for `requests`, or `urllib2.ProxyHandler` for `urllib2`) instead.
+
+--------------------------------------------
+
+Usage
+=====
+
+## socks.socksocket ##
+
+ import socks
+
+ s = socks.socksocket() # Same API as socket.socket in the standard lib
+
+ s.set_proxy(socks.SOCKS5, "localhost") # SOCKS4 and SOCKS5 use port 1080 by default
+ # Or
+ s.set_proxy(socks.SOCKS4, "localhost", 4444)
+ # Or
+ s.set_proxy(socks.HTTP, "5.5.5.5", 8888)
+
+ # Can be treated identical to a regular socket object
+ s.connect(("www.somesite.com", 80))
+ s.sendall("GET / HTTP/1.1 ...")
+ print s.recv(4096)
+
+## Monkeypatching ##
+
+To monkeypatch the entire standard library with a single default proxy:
+
+ import urllib2
+ import socket
+ import socks
+
+ socks.set_default_proxy(socks.SOCKS5, "localhost")
+ socket.socket = socks.socksocket
+
+ urllib2.urlopen("http://www.somesite.com/") # All requests will pass through the SOCKS proxy
+
+Note that monkeypatching may not work for all standard modules or for all third party modules, and generally isn't recommended. Monkeypatching is usually an anti-pattern in Python.
+
+## urllib2 Handler ##
+
+Example use case with the `sockshandler` urllib2 handler. Note that you must import both `socks` and `sockshandler`, as the handler is its own module separate from PySocks. The module is included in the PyPI package.
+
+ import urllib2
+ import socks
+ from sockshandler import SocksiPyHandler
+
+ opener = urllib2.build_opener(SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050))
+ print opener.open("http://www.somesite.com/") # All requests made by the opener will pass through the SOCKS proxy
+
+--------------------------------------------
+
+Original SocksiPy README attached below, amended to reflect API changes.
+
+--------------------------------------------
+
+SocksiPy
+
+A Python SOCKS module.
+
+(C) 2006 Dan-Haim. All rights reserved.
+
+See LICENSE file for details.
+
+
+*WHAT IS A SOCKS PROXY?*
+
+A SOCKS proxy is a proxy server at the TCP level. In other words, it acts as
+a tunnel, relaying all traffic going through it without modifying it.
+SOCKS proxies can be used to relay traffic using any network protocol that
+uses TCP.
+
+*WHAT IS SOCKSIPY?*
+
+This Python module allows you to create TCP connections through a SOCKS
+proxy without any special effort.
+It also supports relaying UDP packets with a SOCKS5 proxy.
+
+*PROXY COMPATIBILITY*
+
+SocksiPy is compatible with three different types of proxies:
+
+1. SOCKS Version 4 (SOCKS4), including the SOCKS4a extension.
+2. SOCKS Version 5 (SOCKS5).
+3. HTTP Proxies which support tunneling using the CONNECT method.
+
+*SYSTEM REQUIREMENTS*
+
+Being written in Python, SocksiPy can run on any platform that has a Python
+interpreter and TCP/IP support.
+This module has been tested with Python 2.3 and should work with greater versions
+just as well.
+
+
+INSTALLATION
+-------------
+
+Simply copy the file "socks.py" to your Python's `lib/site-packages` directory,
+and you're ready to go. [Editor's note: it is better to use `python setup.py install` for PySocks]
+
+
+USAGE
+------
+
+First load the socks module with the command:
+
+ >>> import socks
+ >>>
+
+The socks module provides a class called `socksocket`, which is the base to all of the module's functionality.
+
+The `socksocket` object has the same initialization parameters as the normal socket
+object to ensure maximal compatibility, however it should be noted that `socksocket` will only function with family being `AF_INET` and
+type being either `SOCK_STREAM` or `SOCK_DGRAM`.
+Generally, it is best to initialize the `socksocket` object with no parameters
+
+ >>> s = socks.socksocket()
+ >>>
+
+The `socksocket` object has an interface which is very similiar to socket's (in fact
+the `socksocket` class is derived from socket) with a few extra methods.
+To select the proxy server you would like to use, use the `set_proxy` method, whose
+syntax is:
+
+ set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
+
+Explanation of the parameters:
+
+`proxy_type` - The type of the proxy server. This can be one of three possible
+choices: `PROXY_TYPE_SOCKS4`, `PROXY_TYPE_SOCKS5` and `PROXY_TYPE_HTTP` for SOCKS4,
+SOCKS5 and HTTP servers respectively. `SOCKS4`, `SOCKS5`, and `HTTP` are all aliases, respectively.
+
+`addr` - The IP address or DNS name of the proxy server.
+
+`port` - The port of the proxy server. Defaults to 1080 for socks and 8080 for http.
+
+`rdns` - This is a boolean flag than modifies the behavior regarding DNS resolving.
+If it is set to True, DNS resolving will be preformed remotely, on the server.
+If it is set to False, DNS resolving will be preformed locally. Please note that
+setting this to True with SOCKS4 servers actually use an extension to the protocol,
+called SOCKS4a, which may not be supported on all servers (SOCKS5 and http servers
+always support DNS). The default is True.
+
+`username` - For SOCKS5 servers, this allows simple username / password authentication
+with the server. For SOCKS4 servers, this parameter will be sent as the userid.
+This parameter is ignored if an HTTP server is being used. If it is not provided,
+authentication will not be used (servers may accept unauthenticated requests).
+
+`password` - This parameter is valid only for SOCKS5 servers and specifies the
+respective password for the username provided.
+
+Example of usage:
+
+ >>> s.set_proxy(socks.SOCKS5, "socks.example.com") # uses default port 1080
+ >>> s.set_proxy(socks.SOCKS4, "socks.test.com", 1081)
+
+After the set_proxy method has been called, simply call the connect method with the
+traditional parameters to establish a connection through the proxy:
+
+ >>> s.connect(("www.sourceforge.net", 80))
+ >>>
+
+Connection will take a bit longer to allow negotiation with the proxy server.
+Please note that calling connect without calling `set_proxy` earlier will connect
+without a proxy (just like a regular socket).
+
+Errors: Any errors in the connection process will trigger exceptions. The exception
+may either be generated by the underlying socket layer or may be custom module
+exceptions, whose details follow:
+
+class `ProxyError` - This is a base exception class. It is not raised directly but
+rather all other exception classes raised by this module are derived from it.
+This allows an easy way to catch all proxy-related errors. It descends from `IOError`.
+
+All `ProxyError` exceptions have an attribute `socket_err`, which will contain either a
+caught `socket.error` exception, or `None` if there wasn't any.
+
+class `GeneralProxyError` - When thrown, it indicates a problem which does not fall
+into another category.
+
+* `Sent invalid data` - This error means that unexpected data has been received from
+the server. The most common reason is that the server specified as the proxy is
+not really a SOCKS4/SOCKS5/HTTP proxy, or maybe the proxy type specified is wrong.
+
+* `Connection closed unexpectedly` - The proxy server unexpectedly closed the connection.
+This may indicate that the proxy server is experiencing network or software problems.
+
+* `Bad proxy type` - This will be raised if the type of the proxy supplied to the
+set_proxy function was not one of `SOCKS4`/`SOCKS5`/`HTTP`.
+
+* `Bad input` - This will be raised if the `connect()` method is called with bad input
+parameters.
+
+class `SOCKS5AuthError` - This indicates that the connection through a SOCKS5 server
+failed due to an authentication problem.
+
+* `Authentication is required` - This will happen if you use a SOCKS5 server which
+requires authentication without providing a username / password at all.
+
+* `All offered authentication methods were rejected` - This will happen if the proxy
+requires a special authentication method which is not supported by this module.
+
+* `Unknown username or invalid password` - Self descriptive.
+
+class `SOCKS5Error` - This will be raised for SOCKS5 errors which are not related to
+authentication.
+The parameter is a tuple containing a code, as given by the server,
+and a description of the
+error. The possible errors, according to the RFC, are:
+
+* `0x01` - General SOCKS server failure - If for any reason the proxy server is unable to
+fulfill your request (internal server error).
+* `0x02` - connection not allowed by ruleset - If the address you're trying to connect to
+is blacklisted on the server or requires authentication.
+* `0x03` - Network unreachable - The target could not be contacted. A router on the network
+had replied with a destination net unreachable error.
+* `0x04` - Host unreachable - The target could not be contacted. A router on the network
+had replied with a destination host unreachable error.
+* `0x05` - Connection refused - The target server has actively refused the connection
+(the requested port is closed).
+* `0x06` - TTL expired - The TTL value of the SYN packet from the proxy to the target server
+has expired. This usually means that there are network problems causing the packet
+to be caught in a router-to-router "ping-pong".
+* `0x07` - Command not supported - For instance if the server does not support UDP.
+* `0x08` - Address type not supported - The client has provided an invalid address type.
+When using this module, this error should not occur.
+
+class `SOCKS4Error` - This will be raised for SOCKS4 errors. The parameter is a tuple
+containing a code and a description of the error, as given by the server. The
+possible error, according to the specification are:
+
+* `0x5B` - Request rejected or failed - Will be raised in the event of an failure for any
+reason other then the two mentioned next.
+* `0x5C` - request rejected because SOCKS server cannot connect to identd on the client -
+The Socks server had tried an ident lookup on your computer and has failed. In this
+case you should run an identd server and/or configure your firewall to allow incoming
+connections to local port 113 from the remote server.
+* `0x5D` - request rejected because the client program and identd report different user-ids -
+The Socks server had performed an ident lookup on your computer and has received a
+different userid than the one you have provided. Change your userid (through the
+username parameter of the set_proxy method) to match and try again.
+
+class `HTTPError` - This will be raised for HTTP errors. The message will contain
+the HTTP status code and provided error message.
+
+After establishing the connection, the object behaves like a standard socket.
+
+Methods like `makefile()` and `settimeout()` should behave just like regular sockets.
+Call the `close()` method to close the connection.
+
+In addition to the `socksocket` class, an additional function worth mentioning is the
+`set_default_proxy` function. The parameters are the same as the `set_proxy` method.
+This function will set default proxy settings for newly created `socksocket` objects,
+in which the proxy settings haven't been changed via the `set_proxy` method.
+This is quite useful if you wish to force 3rd party modules to use a SOCKS proxy,
+by overriding the socket object.
+For example:
+
+ >>> socks.set_default_proxy(socks.SOCKS5, "socks.example.com")
+ >>> socket.socket = socks.socksocket
+ >>> urllib.urlopen("http://www.sourceforge.net/")
+
+
+PROBLEMS
+---------
+
+Please open a GitHub issue at https://github.com/Anorov/PySocks
+
+
diff --git a/contrib/python/PySocks/py2/.dist-info/top_level.txt b/contrib/python/PySocks/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..9476163ae5
--- /dev/null
+++ b/contrib/python/PySocks/py2/.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+socks
+sockshandler
diff --git a/contrib/python/PySocks/py2/LICENSE b/contrib/python/PySocks/py2/LICENSE
new file mode 100644
index 0000000000..04b6b1f37c
--- /dev/null
+++ b/contrib/python/PySocks/py2/LICENSE
@@ -0,0 +1,22 @@
+Copyright 2006 Dan-Haim. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+3. Neither the name of Dan Haim nor the names of his contributors may be used
+ to endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
+OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
diff --git a/contrib/python/PySocks/py2/README.md b/contrib/python/PySocks/py2/README.md
new file mode 100644
index 0000000000..0035b7d1a4
--- /dev/null
+++ b/contrib/python/PySocks/py2/README.md
@@ -0,0 +1,300 @@
+PySocks
+=======
+
+PySocks lets you send traffic through SOCKS and HTTP proxy servers. It is a modern fork of [SocksiPy](http://socksipy.sourceforge.net/) with bug fixes and extra features.
+
+Acts as a drop-in replacement to the socket module. Seamlessly configure SOCKS proxies for any socket object by calling `socket_object.set_proxy()`.
+
+----------------
+
+Features
+========
+
+* SOCKS proxy client for Python 2.7 and 3.4+
+* TCP supported
+* UDP mostly supported (issues may occur in some edge cases)
+* HTTP proxy client included but not supported or recommended (you should use urllib2's or requests' own HTTP proxy interface)
+* urllib2 handler included. `pip install` / `setup.py install` will automatically install the `sockshandler` module.
+
+Installation
+============
+
+ pip install PySocks
+
+Or download the tarball / `git clone` and...
+
+ python setup.py install
+
+These will install both the `socks` and `sockshandler` modules.
+
+Alternatively, include just `socks.py` in your project.
+
+--------------------------------------------
+
+*Warning:* PySocks/SocksiPy only supports HTTP proxies that use CONNECT tunneling. Certain HTTP proxies may not work with this library. If you wish to use HTTP (not SOCKS) proxies, it is recommended that you rely on your HTTP client's native proxy support (`proxies` dict for `requests`, or `urllib2.ProxyHandler` for `urllib2`) instead.
+
+--------------------------------------------
+
+Usage
+=====
+
+## socks.socksocket ##
+
+ import socks
+
+ s = socks.socksocket() # Same API as socket.socket in the standard lib
+
+ s.set_proxy(socks.SOCKS5, "localhost") # SOCKS4 and SOCKS5 use port 1080 by default
+ # Or
+ s.set_proxy(socks.SOCKS4, "localhost", 4444)
+ # Or
+ s.set_proxy(socks.HTTP, "5.5.5.5", 8888)
+
+ # Can be treated identical to a regular socket object
+ s.connect(("www.somesite.com", 80))
+ s.sendall("GET / HTTP/1.1 ...")
+ print s.recv(4096)
+
+## Monkeypatching ##
+
+To monkeypatch the entire standard library with a single default proxy:
+
+ import urllib2
+ import socket
+ import socks
+
+ socks.set_default_proxy(socks.SOCKS5, "localhost")
+ socket.socket = socks.socksocket
+
+ urllib2.urlopen("http://www.somesite.com/") # All requests will pass through the SOCKS proxy
+
+Note that monkeypatching may not work for all standard modules or for all third party modules, and generally isn't recommended. Monkeypatching is usually an anti-pattern in Python.
+
+## urllib2 Handler ##
+
+Example use case with the `sockshandler` urllib2 handler. Note that you must import both `socks` and `sockshandler`, as the handler is its own module separate from PySocks. The module is included in the PyPI package.
+
+ import urllib2
+ import socks
+ from sockshandler import SocksiPyHandler
+
+ opener = urllib2.build_opener(SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050))
+ print opener.open("http://www.somesite.com/") # All requests made by the opener will pass through the SOCKS proxy
+
+--------------------------------------------
+
+Original SocksiPy README attached below, amended to reflect API changes.
+
+--------------------------------------------
+
+SocksiPy
+
+A Python SOCKS module.
+
+(C) 2006 Dan-Haim. All rights reserved.
+
+See LICENSE file for details.
+
+
+*WHAT IS A SOCKS PROXY?*
+
+A SOCKS proxy is a proxy server at the TCP level. In other words, it acts as
+a tunnel, relaying all traffic going through it without modifying it.
+SOCKS proxies can be used to relay traffic using any network protocol that
+uses TCP.
+
+*WHAT IS SOCKSIPY?*
+
+This Python module allows you to create TCP connections through a SOCKS
+proxy without any special effort.
+It also supports relaying UDP packets with a SOCKS5 proxy.
+
+*PROXY COMPATIBILITY*
+
+SocksiPy is compatible with three different types of proxies:
+
+1. SOCKS Version 4 (SOCKS4), including the SOCKS4a extension.
+2. SOCKS Version 5 (SOCKS5).
+3. HTTP Proxies which support tunneling using the CONNECT method.
+
+*SYSTEM REQUIREMENTS*
+
+Being written in Python, SocksiPy can run on any platform that has a Python
+interpreter and TCP/IP support.
+This module has been tested with Python 2.3 and should work with greater versions
+just as well.
+
+
+INSTALLATION
+-------------
+
+Simply copy the file "socks.py" to your Python's `lib/site-packages` directory,
+and you're ready to go. [Editor's note: it is better to use `python setup.py install` for PySocks]
+
+
+USAGE
+------
+
+First load the socks module with the command:
+
+ >>> import socks
+ >>>
+
+The socks module provides a class called `socksocket`, which is the base to all of the module's functionality.
+
+The `socksocket` object has the same initialization parameters as the normal socket
+object to ensure maximal compatibility, however it should be noted that `socksocket` will only function with family being `AF_INET` and
+type being either `SOCK_STREAM` or `SOCK_DGRAM`.
+Generally, it is best to initialize the `socksocket` object with no parameters
+
+ >>> s = socks.socksocket()
+ >>>
+
+The `socksocket` object has an interface which is very similiar to socket's (in fact
+the `socksocket` class is derived from socket) with a few extra methods.
+To select the proxy server you would like to use, use the `set_proxy` method, whose
+syntax is:
+
+ set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
+
+Explanation of the parameters:
+
+`proxy_type` - The type of the proxy server. This can be one of three possible
+choices: `PROXY_TYPE_SOCKS4`, `PROXY_TYPE_SOCKS5` and `PROXY_TYPE_HTTP` for SOCKS4,
+SOCKS5 and HTTP servers respectively. `SOCKS4`, `SOCKS5`, and `HTTP` are all aliases, respectively.
+
+`addr` - The IP address or DNS name of the proxy server.
+
+`port` - The port of the proxy server. Defaults to 1080 for socks and 8080 for http.
+
+`rdns` - This is a boolean flag than modifies the behavior regarding DNS resolving.
+If it is set to True, DNS resolving will be preformed remotely, on the server.
+If it is set to False, DNS resolving will be preformed locally. Please note that
+setting this to True with SOCKS4 servers actually use an extension to the protocol,
+called SOCKS4a, which may not be supported on all servers (SOCKS5 and http servers
+always support DNS). The default is True.
+
+`username` - For SOCKS5 servers, this allows simple username / password authentication
+with the server. For SOCKS4 servers, this parameter will be sent as the userid.
+This parameter is ignored if an HTTP server is being used. If it is not provided,
+authentication will not be used (servers may accept unauthenticated requests).
+
+`password` - This parameter is valid only for SOCKS5 servers and specifies the
+respective password for the username provided.
+
+Example of usage:
+
+ >>> s.set_proxy(socks.SOCKS5, "socks.example.com") # uses default port 1080
+ >>> s.set_proxy(socks.SOCKS4, "socks.test.com", 1081)
+
+After the set_proxy method has been called, simply call the connect method with the
+traditional parameters to establish a connection through the proxy:
+
+ >>> s.connect(("www.sourceforge.net", 80))
+ >>>
+
+Connection will take a bit longer to allow negotiation with the proxy server.
+Please note that calling connect without calling `set_proxy` earlier will connect
+without a proxy (just like a regular socket).
+
+Errors: Any errors in the connection process will trigger exceptions. The exception
+may either be generated by the underlying socket layer or may be custom module
+exceptions, whose details follow:
+
+class `ProxyError` - This is a base exception class. It is not raised directly but
+rather all other exception classes raised by this module are derived from it.
+This allows an easy way to catch all proxy-related errors. It descends from `IOError`.
+
+All `ProxyError` exceptions have an attribute `socket_err`, which will contain either a
+caught `socket.error` exception, or `None` if there wasn't any.
+
+class `GeneralProxyError` - When thrown, it indicates a problem which does not fall
+into another category.
+
+* `Sent invalid data` - This error means that unexpected data has been received from
+the server. The most common reason is that the server specified as the proxy is
+not really a SOCKS4/SOCKS5/HTTP proxy, or maybe the proxy type specified is wrong.
+
+* `Connection closed unexpectedly` - The proxy server unexpectedly closed the connection.
+This may indicate that the proxy server is experiencing network or software problems.
+
+* `Bad proxy type` - This will be raised if the type of the proxy supplied to the
+set_proxy function was not one of `SOCKS4`/`SOCKS5`/`HTTP`.
+
+* `Bad input` - This will be raised if the `connect()` method is called with bad input
+parameters.
+
+class `SOCKS5AuthError` - This indicates that the connection through a SOCKS5 server
+failed due to an authentication problem.
+
+* `Authentication is required` - This will happen if you use a SOCKS5 server which
+requires authentication without providing a username / password at all.
+
+* `All offered authentication methods were rejected` - This will happen if the proxy
+requires a special authentication method which is not supported by this module.
+
+* `Unknown username or invalid password` - Self descriptive.
+
+class `SOCKS5Error` - This will be raised for SOCKS5 errors which are not related to
+authentication.
+The parameter is a tuple containing a code, as given by the server,
+and a description of the
+error. The possible errors, according to the RFC, are:
+
+* `0x01` - General SOCKS server failure - If for any reason the proxy server is unable to
+fulfill your request (internal server error).
+* `0x02` - connection not allowed by ruleset - If the address you're trying to connect to
+is blacklisted on the server or requires authentication.
+* `0x03` - Network unreachable - The target could not be contacted. A router on the network
+had replied with a destination net unreachable error.
+* `0x04` - Host unreachable - The target could not be contacted. A router on the network
+had replied with a destination host unreachable error.
+* `0x05` - Connection refused - The target server has actively refused the connection
+(the requested port is closed).
+* `0x06` - TTL expired - The TTL value of the SYN packet from the proxy to the target server
+has expired. This usually means that there are network problems causing the packet
+to be caught in a router-to-router "ping-pong".
+* `0x07` - Command not supported - For instance if the server does not support UDP.
+* `0x08` - Address type not supported - The client has provided an invalid address type.
+When using this module, this error should not occur.
+
+class `SOCKS4Error` - This will be raised for SOCKS4 errors. The parameter is a tuple
+containing a code and a description of the error, as given by the server. The
+possible error, according to the specification are:
+
+* `0x5B` - Request rejected or failed - Will be raised in the event of an failure for any
+reason other then the two mentioned next.
+* `0x5C` - request rejected because SOCKS server cannot connect to identd on the client -
+The Socks server had tried an ident lookup on your computer and has failed. In this
+case you should run an identd server and/or configure your firewall to allow incoming
+connections to local port 113 from the remote server.
+* `0x5D` - request rejected because the client program and identd report different user-ids -
+The Socks server had performed an ident lookup on your computer and has received a
+different userid than the one you have provided. Change your userid (through the
+username parameter of the set_proxy method) to match and try again.
+
+class `HTTPError` - This will be raised for HTTP errors. The message will contain
+the HTTP status code and provided error message.
+
+After establishing the connection, the object behaves like a standard socket.
+
+Methods like `makefile()` and `settimeout()` should behave just like regular sockets.
+Call the `close()` method to close the connection.
+
+In addition to the `socksocket` class, an additional function worth mentioning is the
+`set_default_proxy` function. The parameters are the same as the `set_proxy` method.
+This function will set default proxy settings for newly created `socksocket` objects,
+in which the proxy settings haven't been changed via the `set_proxy` method.
+This is quite useful if you wish to force 3rd party modules to use a SOCKS proxy,
+by overriding the socket object.
+For example:
+
+ >>> socks.set_default_proxy(socks.SOCKS5, "socks.example.com")
+ >>> socket.socket = socks.socksocket
+ >>> urllib.urlopen("http://www.sourceforge.net/")
+
+
+PROBLEMS
+---------
+
+Please open a GitHub issue at https://github.com/Anorov/PySocks
diff --git a/contrib/python/PySocks/py2/socks.py b/contrib/python/PySocks/py2/socks.py
new file mode 100644
index 0000000000..83b1435dfa
--- /dev/null
+++ b/contrib/python/PySocks/py2/socks.py
@@ -0,0 +1,847 @@
+from base64 import b64encode
+try:
+ from collections.abc import Callable
+except ImportError:
+ from collections import Callable
+from errno import EOPNOTSUPP, EINVAL, EAGAIN
+import functools
+from io import BytesIO
+import logging
+import os
+from os import SEEK_CUR
+import socket
+import struct
+import sys
+
+__version__ = "1.7.1"
+
+
+if os.name == "nt" and sys.version_info < (3, 0):
+ try:
+ import win_inet_pton
+ except ImportError:
+ raise ImportError(
+ "To run PySocks on Windows you must install win_inet_pton")
+
+log = logging.getLogger(__name__)
+
+PROXY_TYPE_SOCKS4 = SOCKS4 = 1
+PROXY_TYPE_SOCKS5 = SOCKS5 = 2
+PROXY_TYPE_HTTP = HTTP = 3
+
+PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
+PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
+
+_orgsocket = _orig_socket = socket.socket
+
+
+def set_self_blocking(function):
+
+ @functools.wraps(function)
+ def wrapper(*args, **kwargs):
+ self = args[0]
+ try:
+ _is_blocking = self.gettimeout()
+ if _is_blocking == 0:
+ self.setblocking(True)
+ return function(*args, **kwargs)
+ except Exception as e:
+ raise
+ finally:
+ # set orgin blocking
+ if _is_blocking == 0:
+ self.setblocking(False)
+ return wrapper
+
+
+class ProxyError(IOError):
+ """Socket_err contains original socket.error exception."""
+ def __init__(self, msg, socket_err=None):
+ self.msg = msg
+ self.socket_err = socket_err
+
+ if socket_err:
+ self.msg += ": {}".format(socket_err)
+
+ def __str__(self):
+ return self.msg
+
+
+class GeneralProxyError(ProxyError):
+ pass
+
+
+class ProxyConnectionError(ProxyError):
+ pass
+
+
+class SOCKS5AuthError(ProxyError):
+ pass
+
+
+class SOCKS5Error(ProxyError):
+ pass
+
+
+class SOCKS4Error(ProxyError):
+ pass
+
+
+class HTTPError(ProxyError):
+ pass
+
+SOCKS4_ERRORS = {
+ 0x5B: "Request rejected or failed",
+ 0x5C: ("Request rejected because SOCKS server cannot connect to identd on"
+ " the client"),
+ 0x5D: ("Request rejected because the client program and identd report"
+ " different user-ids")
+}
+
+SOCKS5_ERRORS = {
+ 0x01: "General SOCKS server failure",
+ 0x02: "Connection not allowed by ruleset",
+ 0x03: "Network unreachable",
+ 0x04: "Host unreachable",
+ 0x05: "Connection refused",
+ 0x06: "TTL expired",
+ 0x07: "Command not supported, or protocol error",
+ 0x08: "Address type not supported"
+}
+
+DEFAULT_PORTS = {SOCKS4: 1080, SOCKS5: 1080, HTTP: 8080}
+
+
+def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True,
+ username=None, password=None):
+ """Sets a default proxy.
+
+ All further socksocket objects will use the default unless explicitly
+ changed. All parameters are as for socket.set_proxy()."""
+ socksocket.default_proxy = (proxy_type, addr, port, rdns,
+ username.encode() if username else None,
+ password.encode() if password else None)
+
+
+def setdefaultproxy(*args, **kwargs):
+ if "proxytype" in kwargs:
+ kwargs["proxy_type"] = kwargs.pop("proxytype")
+ return set_default_proxy(*args, **kwargs)
+
+
+def get_default_proxy():
+ """Returns the default proxy, set by set_default_proxy."""
+ return socksocket.default_proxy
+
+getdefaultproxy = get_default_proxy
+
+
+def wrap_module(module):
+ """Attempts to replace a module's socket library with a SOCKS socket.
+
+ Must set a default proxy using set_default_proxy(...) first. This will
+ only work on modules that import socket directly into the namespace;
+ most of the Python Standard Library falls into this category."""
+ if socksocket.default_proxy:
+ module.socket.socket = socksocket
+ else:
+ raise GeneralProxyError("No default proxy specified")
+
+wrapmodule = wrap_module
+
+
+def create_connection(dest_pair,
+ timeout=None, source_address=None,
+ proxy_type=None, proxy_addr=None,
+ proxy_port=None, proxy_rdns=True,
+ proxy_username=None, proxy_password=None,
+ socket_options=None):
+ """create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
+
+ Like socket.create_connection(), but connects to proxy
+ before returning the socket object.
+
+ dest_pair - 2-tuple of (IP/hostname, port).
+ **proxy_args - Same args passed to socksocket.set_proxy() if present.
+ timeout - Optional socket timeout value, in seconds.
+ source_address - tuple (host, port) for the socket to bind to as its source
+ address before connecting (only for compatibility)
+ """
+ # Remove IPv6 brackets on the remote address and proxy address.
+ remote_host, remote_port = dest_pair
+ if remote_host.startswith("["):
+ remote_host = remote_host.strip("[]")
+ if proxy_addr and proxy_addr.startswith("["):
+ proxy_addr = proxy_addr.strip("[]")
+
+ err = None
+
+ # Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
+ for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
+ family, socket_type, proto, canonname, sa = r
+ sock = None
+ try:
+ sock = socksocket(family, socket_type, proto)
+
+ if socket_options:
+ for opt in socket_options:
+ sock.setsockopt(*opt)
+
+ if isinstance(timeout, (int, float)):
+ sock.settimeout(timeout)
+
+ if proxy_type:
+ sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
+ proxy_username, proxy_password)
+ if source_address:
+ sock.bind(source_address)
+
+ sock.connect((remote_host, remote_port))
+ return sock
+
+ except (socket.error, ProxyError) as e:
+ err = e
+ if sock:
+ sock.close()
+ sock = None
+
+ if err:
+ raise err
+
+ raise socket.error("gai returned empty list.")
+
+
+class _BaseSocket(socket.socket):
+ """Allows Python 2 delegated methods such as send() to be overridden."""
+ def __init__(self, *pos, **kw):
+ _orig_socket.__init__(self, *pos, **kw)
+
+ self._savedmethods = dict()
+ for name in self._savenames:
+ self._savedmethods[name] = getattr(self, name)
+ delattr(self, name) # Allows normal overriding mechanism to work
+
+ _savenames = list()
+
+
+def _makemethod(name):
+ return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
+for name in ("sendto", "send", "recvfrom", "recv"):
+ method = getattr(_BaseSocket, name, None)
+
+ # Determine if the method is not defined the usual way
+ # as a function in the class.
+ # Python 2 uses __slots__, so there are descriptors for each method,
+ # but they are not functions.
+ if not isinstance(method, Callable):
+ _BaseSocket._savenames.append(name)
+ setattr(_BaseSocket, name, _makemethod(name))
+
+
+class socksocket(_BaseSocket):
+ """socksocket([family[, type[, proto]]]) -> socket object
+
+ Open a SOCKS enabled socket. The parameters are the same as
+ those of the standard socket init. In order for SOCKS to work,
+ you must specify family=AF_INET and proto=0.
+ The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
+ """
+
+ default_proxy = None
+
+ def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
+ proto=0, *args, **kwargs):
+ if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
+ msg = "Socket type must be stream or datagram, not {!r}"
+ raise ValueError(msg.format(type))
+
+ super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
+ self._proxyconn = None # TCP connection to keep UDP relay alive
+
+ if self.default_proxy:
+ self.proxy = self.default_proxy
+ else:
+ self.proxy = (None, None, None, None, None, None)
+ self.proxy_sockname = None
+ self.proxy_peername = None
+
+ self._timeout = None
+
+ def _readall(self, file, count):
+ """Receive EXACTLY the number of bytes requested from the file object.
+
+ Blocks until the required number of bytes have been received."""
+ data = b""
+ while len(data) < count:
+ d = file.read(count - len(data))
+ if not d:
+ raise GeneralProxyError("Connection closed unexpectedly")
+ data += d
+ return data
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+ try:
+ # test if we're connected, if so apply timeout
+ peer = self.get_proxy_peername()
+ super(socksocket, self).settimeout(self._timeout)
+ except socket.error:
+ pass
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setblocking(self, v):
+ if v:
+ self.settimeout(None)
+ else:
+ self.settimeout(0.0)
+
+ def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True,
+ username=None, password=None):
+ """ Sets the proxy to be used.
+
+ proxy_type - The type of the proxy to be used. Three types
+ are supported: PROXY_TYPE_SOCKS4 (including socks4a),
+ PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
+ addr - The address of the server (IP or DNS).
+ port - The port of the server. Defaults to 1080 for SOCKS
+ servers and 8080 for HTTP proxy servers.
+ rdns - Should DNS queries be performed on the remote side
+ (rather than the local side). The default is True.
+ Note: This has no effect with SOCKS4 servers.
+ username - Username to authenticate with to the server.
+ The default is no authentication.
+ password - Password to authenticate with to the server.
+ Only relevant when username is also provided."""
+ self.proxy = (proxy_type, addr, port, rdns,
+ username.encode() if username else None,
+ password.encode() if password else None)
+
+ def setproxy(self, *args, **kwargs):
+ if "proxytype" in kwargs:
+ kwargs["proxy_type"] = kwargs.pop("proxytype")
+ return self.set_proxy(*args, **kwargs)
+
+ def bind(self, *pos, **kw):
+ """Implements proxy connection for UDP sockets.
+
+ Happens during the bind() phase."""
+ (proxy_type, proxy_addr, proxy_port, rdns, username,
+ password) = self.proxy
+ if not proxy_type or self.type != socket.SOCK_DGRAM:
+ return _orig_socket.bind(self, *pos, **kw)
+
+ if self._proxyconn:
+ raise socket.error(EINVAL, "Socket already bound to an address")
+ if proxy_type != SOCKS5:
+ msg = "UDP only supported by SOCKS5 proxy type"
+ raise socket.error(EOPNOTSUPP, msg)
+ super(socksocket, self).bind(*pos, **kw)
+
+ # Need to specify actual local port because
+ # some relays drop packets if a port of zero is specified.
+ # Avoid specifying host address in case of NAT though.
+ _, port = self.getsockname()
+ dst = ("0", port)
+
+ self._proxyconn = _orig_socket()
+ proxy = self._proxy_addr()
+ self._proxyconn.connect(proxy)
+
+ UDP_ASSOCIATE = b"\x03"
+ _, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
+
+ # The relay is most likely on the same host as the SOCKS proxy,
+ # but some proxies return a private IP address (10.x.y.z)
+ host, _ = proxy
+ _, port = relay
+ super(socksocket, self).connect((host, port))
+ super(socksocket, self).settimeout(self._timeout)
+ self.proxy_sockname = ("0.0.0.0", 0) # Unknown
+
+ def sendto(self, bytes, *args, **kwargs):
+ if self.type != socket.SOCK_DGRAM:
+ return super(socksocket, self).sendto(bytes, *args, **kwargs)
+ if not self._proxyconn:
+ self.bind(("", 0))
+
+ address = args[-1]
+ flags = args[:-1]
+
+ header = BytesIO()
+ RSV = b"\x00\x00"
+ header.write(RSV)
+ STANDALONE = b"\x00"
+ header.write(STANDALONE)
+ self._write_SOCKS5_address(address, header)
+
+ sent = super(socksocket, self).send(header.getvalue() + bytes, *flags,
+ **kwargs)
+ return sent - header.tell()
+
+ def send(self, bytes, flags=0, **kwargs):
+ if self.type == socket.SOCK_DGRAM:
+ return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
+ else:
+ return super(socksocket, self).send(bytes, flags, **kwargs)
+
+ def recvfrom(self, bufsize, flags=0):
+ if self.type != socket.SOCK_DGRAM:
+ return super(socksocket, self).recvfrom(bufsize, flags)
+ if not self._proxyconn:
+ self.bind(("", 0))
+
+ buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
+ buf.seek(2, SEEK_CUR)
+ frag = buf.read(1)
+ if ord(frag):
+ raise NotImplementedError("Received UDP packet fragment")
+ fromhost, fromport = self._read_SOCKS5_address(buf)
+
+ if self.proxy_peername:
+ peerhost, peerport = self.proxy_peername
+ if fromhost != peerhost or peerport not in (0, fromport):
+ raise socket.error(EAGAIN, "Packet filtered")
+
+ return (buf.read(bufsize), (fromhost, fromport))
+
+ def recv(self, *pos, **kw):
+ bytes, _ = self.recvfrom(*pos, **kw)
+ return bytes
+
+ def close(self):
+ if self._proxyconn:
+ self._proxyconn.close()
+ return super(socksocket, self).close()
+
+ def get_proxy_sockname(self):
+ """Returns the bound IP address and port number at the proxy."""
+ return self.proxy_sockname
+
+ getproxysockname = get_proxy_sockname
+
+ def get_proxy_peername(self):
+ """
+ Returns the IP and port number of the proxy.
+ """
+ return self.getpeername()
+
+ getproxypeername = get_proxy_peername
+
+ def get_peername(self):
+ """Returns the IP address and port number of the destination machine.
+
+ Note: get_proxy_peername returns the proxy."""
+ return self.proxy_peername
+
+ getpeername = get_peername
+
+ def _negotiate_SOCKS5(self, *dest_addr):
+ """Negotiates a stream connection through a SOCKS5 server."""
+ CONNECT = b"\x01"
+ self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(
+ self, CONNECT, dest_addr)
+
+ def _SOCKS5_request(self, conn, cmd, dst):
+ """
+ Send SOCKS5 request with given command (CMD field) and
+ address (DST field). Returns resolved DST address that was used.
+ """
+ proxy_type, addr, port, rdns, username, password = self.proxy
+
+ writer = conn.makefile("wb")
+ reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
+ try:
+ # First we'll send the authentication packages we support.
+ if username and password:
+ # The username/password details were supplied to the
+ # set_proxy method so we support the USERNAME/PASSWORD
+ # authentication (in addition to the standard none).
+ writer.write(b"\x05\x02\x00\x02")
+ else:
+ # No username/password were entered, therefore we
+ # only support connections with no authentication.
+ writer.write(b"\x05\x01\x00")
+
+ # We'll receive the server's response to determine which
+ # method was selected
+ writer.flush()
+ chosen_auth = self._readall(reader, 2)
+
+ if chosen_auth[0:1] != b"\x05":
+ # Note: string[i:i+1] is used because indexing of a bytestring
+ # via bytestring[i] yields an integer in Python 3
+ raise GeneralProxyError(
+ "SOCKS5 proxy server sent invalid data")
+
+ # Check the chosen authentication method
+
+ if chosen_auth[1:2] == b"\x02":
+ # Okay, we need to perform a basic username/password
+ # authentication.
+ if not (username and password):
+ # Although we said we don't support authentication, the
+ # server may still request basic username/password
+ # authentication
+ raise SOCKS5AuthError("No username/password supplied. "
+ "Server requested username/password"
+ " authentication")
+
+ writer.write(b"\x01" + chr(len(username)).encode()
+ + username
+ + chr(len(password)).encode()
+ + password)
+ writer.flush()
+ auth_status = self._readall(reader, 2)
+ if auth_status[0:1] != b"\x01":
+ # Bad response
+ raise GeneralProxyError(
+ "SOCKS5 proxy server sent invalid data")
+ if auth_status[1:2] != b"\x00":
+ # Authentication failed
+ raise SOCKS5AuthError("SOCKS5 authentication failed")
+
+ # Otherwise, authentication succeeded
+
+ # No authentication is required if 0x00
+ elif chosen_auth[1:2] != b"\x00":
+ # Reaching here is always bad
+ if chosen_auth[1:2] == b"\xFF":
+ raise SOCKS5AuthError(
+ "All offered SOCKS5 authentication methods were"
+ " rejected")
+ else:
+ raise GeneralProxyError(
+ "SOCKS5 proxy server sent invalid data")
+
+ # Now we can request the actual connection
+ writer.write(b"\x05" + cmd + b"\x00")
+ resolved = self._write_SOCKS5_address(dst, writer)
+ writer.flush()
+
+ # Get the response
+ resp = self._readall(reader, 3)
+ if resp[0:1] != b"\x05":
+ raise GeneralProxyError(
+ "SOCKS5 proxy server sent invalid data")
+
+ status = ord(resp[1:2])
+ if status != 0x00:
+ # Connection failed: server returned an error
+ error = SOCKS5_ERRORS.get(status, "Unknown error")
+ raise SOCKS5Error("{:#04x}: {}".format(status, error))
+
+ # Get the bound address/port
+ bnd = self._read_SOCKS5_address(reader)
+
+ super(socksocket, self).settimeout(self._timeout)
+ return (resolved, bnd)
+ finally:
+ reader.close()
+ writer.close()
+
+ def _write_SOCKS5_address(self, addr, file):
+ """
+ Return the host and port packed for the SOCKS5 protocol,
+ and the resolved address as a tuple object.
+ """
+ host, port = addr
+ proxy_type, _, _, rdns, username, password = self.proxy
+ family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
+
+ # If the given destination address is an IP address, we'll
+ # use the IP address request even if remote resolving was specified.
+ # Detect whether the address is IPv4/6 directly.
+ for family in (socket.AF_INET, socket.AF_INET6):
+ try:
+ addr_bytes = socket.inet_pton(family, host)
+ file.write(family_to_byte[family] + addr_bytes)
+ host = socket.inet_ntop(family, addr_bytes)
+ file.write(struct.pack(">H", port))
+ return host, port
+ except socket.error:
+ continue
+
+ # Well it's not an IP number, so it's probably a DNS name.
+ if rdns:
+ # Resolve remotely
+ host_bytes = host.encode("idna")
+ file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
+ else:
+ # Resolve locally
+ addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM,
+ socket.IPPROTO_TCP,
+ socket.AI_ADDRCONFIG)
+ # We can't really work out what IP is reachable, so just pick the
+ # first.
+ target_addr = addresses[0]
+ family = target_addr[0]
+ host = target_addr[4][0]
+
+ addr_bytes = socket.inet_pton(family, host)
+ file.write(family_to_byte[family] + addr_bytes)
+ host = socket.inet_ntop(family, addr_bytes)
+ file.write(struct.pack(">H", port))
+ return host, port
+
+ def _read_SOCKS5_address(self, file):
+ atyp = self._readall(file, 1)
+ if atyp == b"\x01":
+ addr = socket.inet_ntoa(self._readall(file, 4))
+ elif atyp == b"\x03":
+ length = self._readall(file, 1)
+ addr = self._readall(file, ord(length))
+ elif atyp == b"\x04":
+ addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
+ else:
+ raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
+
+ port = struct.unpack(">H", self._readall(file, 2))[0]
+ return addr, port
+
+ def _negotiate_SOCKS4(self, dest_addr, dest_port):
+ """Negotiates a connection through a SOCKS4 server."""
+ proxy_type, addr, port, rdns, username, password = self.proxy
+
+ writer = self.makefile("wb")
+ reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
+ try:
+ # Check if the destination address provided is an IP address
+ remote_resolve = False
+ try:
+ addr_bytes = socket.inet_aton(dest_addr)
+ except socket.error:
+ # It's a DNS name. Check where it should be resolved.
+ if rdns:
+ addr_bytes = b"\x00\x00\x00\x01"
+ remote_resolve = True
+ else:
+ addr_bytes = socket.inet_aton(
+ socket.gethostbyname(dest_addr))
+
+ # Construct the request packet
+ writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
+ writer.write(addr_bytes)
+
+ # The username parameter is considered userid for SOCKS4
+ if username:
+ writer.write(username)
+ writer.write(b"\x00")
+
+ # DNS name if remote resolving is required
+ # NOTE: This is actually an extension to the SOCKS4 protocol
+ # called SOCKS4A and may not be supported in all cases.
+ if remote_resolve:
+ writer.write(dest_addr.encode("idna") + b"\x00")
+ writer.flush()
+
+ # Get the response from the server
+ resp = self._readall(reader, 8)
+ if resp[0:1] != b"\x00":
+ # Bad data
+ raise GeneralProxyError(
+ "SOCKS4 proxy server sent invalid data")
+
+ status = ord(resp[1:2])
+ if status != 0x5A:
+ # Connection failed: server returned an error
+ error = SOCKS4_ERRORS.get(status, "Unknown error")
+ raise SOCKS4Error("{:#04x}: {}".format(status, error))
+
+ # Get the bound address/port
+ self.proxy_sockname = (socket.inet_ntoa(resp[4:]),
+ struct.unpack(">H", resp[2:4])[0])
+ if remote_resolve:
+ self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
+ else:
+ self.proxy_peername = dest_addr, dest_port
+ finally:
+ reader.close()
+ writer.close()
+
+ def _negotiate_HTTP(self, dest_addr, dest_port):
+ """Negotiates a connection through an HTTP server.
+
+ NOTE: This currently only supports HTTP CONNECT-style proxies."""
+ proxy_type, addr, port, rdns, username, password = self.proxy
+
+ # If we need to resolve locally, we do this now
+ addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
+
+ http_headers = [
+ (b"CONNECT " + addr.encode("idna") + b":"
+ + str(dest_port).encode() + b" HTTP/1.1"),
+ b"Host: " + dest_addr.encode("idna")
+ ]
+
+ if username and password:
+ http_headers.append(b"Proxy-Authorization: basic "
+ + b64encode(username + b":" + password))
+
+ http_headers.append(b"\r\n")
+
+ self.sendall(b"\r\n".join(http_headers))
+
+ # We just need the first line to check if the connection was successful
+ fobj = self.makefile()
+ status_line = fobj.readline()
+ fobj.close()
+
+ if not status_line:
+ raise GeneralProxyError("Connection closed unexpectedly")
+
+ try:
+ proto, status_code, status_msg = status_line.split(" ", 2)
+ except ValueError:
+ raise GeneralProxyError("HTTP proxy server sent invalid response")
+
+ if not proto.startswith("HTTP/"):
+ raise GeneralProxyError(
+ "Proxy server does not appear to be an HTTP proxy")
+
+ try:
+ status_code = int(status_code)
+ except ValueError:
+ raise HTTPError(
+ "HTTP proxy server did not return a valid HTTP status")
+
+ if status_code != 200:
+ error = "{}: {}".format(status_code, status_msg)
+ if status_code in (400, 403, 405):
+ # It's likely that the HTTP proxy server does not support the
+ # CONNECT tunneling method
+ error += ("\n[*] Note: The HTTP proxy server may not be"
+ " supported by PySocks (must be a CONNECT tunnel"
+ " proxy)")
+ raise HTTPError(error)
+
+ self.proxy_sockname = (b"0.0.0.0", 0)
+ self.proxy_peername = addr, dest_port
+
+ _proxy_negotiators = {
+ SOCKS4: _negotiate_SOCKS4,
+ SOCKS5: _negotiate_SOCKS5,
+ HTTP: _negotiate_HTTP
+ }
+
+ @set_self_blocking
+ def connect(self, dest_pair, catch_errors=None):
+ """
+ Connects to the specified destination through a proxy.
+ Uses the same API as socket's connect().
+ To select the proxy server, use set_proxy().
+
+ dest_pair - 2-tuple of (IP/hostname, port).
+ """
+ if len(dest_pair) != 2 or dest_pair[0].startswith("["):
+ # Probably IPv6, not supported -- raise an error, and hope
+ # Happy Eyeballs (RFC6555) makes sure at least the IPv4
+ # connection works...
+ raise socket.error("PySocks doesn't support IPv6: %s"
+ % str(dest_pair))
+
+ dest_addr, dest_port = dest_pair
+
+ if self.type == socket.SOCK_DGRAM:
+ if not self._proxyconn:
+ self.bind(("", 0))
+ dest_addr = socket.gethostbyname(dest_addr)
+
+ # If the host address is INADDR_ANY or similar, reset the peer
+ # address so that packets are received from any peer
+ if dest_addr == "0.0.0.0" and not dest_port:
+ self.proxy_peername = None
+ else:
+ self.proxy_peername = (dest_addr, dest_port)
+ return
+
+ (proxy_type, proxy_addr, proxy_port, rdns, username,
+ password) = self.proxy
+
+ # Do a minimal input check first
+ if (not isinstance(dest_pair, (list, tuple))
+ or len(dest_pair) != 2
+ or not dest_addr
+ or not isinstance(dest_port, int)):
+ # Inputs failed, raise an error
+ raise GeneralProxyError(
+ "Invalid destination-connection (host, port) pair")
+
+ # We set the timeout here so that we don't hang in connection or during
+ # negotiation.
+ super(socksocket, self).settimeout(self._timeout)
+
+ if proxy_type is None:
+ # Treat like regular socket object
+ self.proxy_peername = dest_pair
+ super(socksocket, self).settimeout(self._timeout)
+ super(socksocket, self).connect((dest_addr, dest_port))
+ return
+
+ proxy_addr = self._proxy_addr()
+
+ try:
+ # Initial connection to proxy server.
+ super(socksocket, self).connect(proxy_addr)
+
+ except socket.error as error:
+ # Error while connecting to proxy
+ self.close()
+ if not catch_errors:
+ proxy_addr, proxy_port = proxy_addr
+ proxy_server = "{}:{}".format(proxy_addr, proxy_port)
+ printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
+
+ msg = "Error connecting to {} proxy {}".format(printable_type,
+ proxy_server)
+ log.debug("%s due to: %s", msg, error)
+ raise ProxyConnectionError(msg, error)
+ else:
+ raise error
+
+ else:
+ # Connected to proxy server, now negotiate
+ try:
+ # Calls negotiate_{SOCKS4, SOCKS5, HTTP}
+ negotiate = self._proxy_negotiators[proxy_type]
+ negotiate(self, dest_addr, dest_port)
+ except socket.error as error:
+ if not catch_errors:
+ # Wrap socket errors
+ self.close()
+ raise GeneralProxyError("Socket error", error)
+ else:
+ raise error
+ except ProxyError:
+ # Protocol error while negotiating with proxy
+ self.close()
+ raise
+
+ @set_self_blocking
+ def connect_ex(self, dest_pair):
+ """ https://docs.python.org/3/library/socket.html#socket.socket.connect_ex
+ Like connect(address), but return an error indicator instead of raising an exception for errors returned by the C-level connect() call (other problems, such as "host not found" can still raise exceptions).
+ """
+ try:
+ self.connect(dest_pair, catch_errors=True)
+ return 0
+ except OSError as e:
+ # If the error is numeric (socket errors are numeric), then return number as
+ # connect_ex expects. Otherwise raise the error again (socket timeout for example)
+ if e.errno:
+ return e.errno
+ else:
+ raise
+
+ def _proxy_addr(self):
+ """
+ Return proxy address to connect to as tuple object
+ """
+ (proxy_type, proxy_addr, proxy_port, rdns, username,
+ password) = self.proxy
+ proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
+ if not proxy_port:
+ raise GeneralProxyError("Invalid proxy type")
+ return proxy_addr, proxy_port
diff --git a/contrib/python/PySocks/py2/sockshandler.py b/contrib/python/PySocks/py2/sockshandler.py
new file mode 100644
index 0000000000..6a2ed81cee
--- /dev/null
+++ b/contrib/python/PySocks/py2/sockshandler.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+"""
+SocksiPy + urllib2 handler
+
+version: 0.3
+author: e<e@tr0ll.in>
+
+This module provides a Handler which you can use with urllib2 to allow it to tunnel your connection through a socks.sockssocket socket, with out monkey patching the original socket...
+"""
+import socket
+import ssl
+
+try:
+ import urllib2
+ import httplib
+except ImportError: # Python 3
+ import urllib.request as urllib2
+ import http.client as httplib
+
+import socks # $ pip install PySocks
+
+def merge_dict(a, b):
+ d = a.copy()
+ d.update(b)
+ return d
+
+def is_ip(s):
+ try:
+ if ':' in s:
+ socket.inet_pton(socket.AF_INET6, s)
+ elif '.' in s:
+ socket.inet_aton(s)
+ else:
+ return False
+ except:
+ return False
+ else:
+ return True
+
+socks4_no_rdns = set()
+
+class SocksiPyConnection(httplib.HTTPConnection):
+ def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
+ self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
+ httplib.HTTPConnection.__init__(self, *args, **kwargs)
+
+ def connect(self):
+ (proxytype, proxyaddr, proxyport, rdns, username, password) = self.proxyargs
+ rdns = rdns and proxyaddr not in socks4_no_rdns
+ while True:
+ try:
+ sock = socks.create_connection(
+ (self.host, self.port), self.timeout, None,
+ proxytype, proxyaddr, proxyport, rdns, username, password,
+ ((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),))
+ break
+ except socks.SOCKS4Error as e:
+ if rdns and "0x5b" in str(e) and not is_ip(self.host):
+ # Maybe a SOCKS4 server that doesn't support remote resolving
+ # Let's try again
+ rdns = False
+ socks4_no_rdns.add(proxyaddr)
+ else:
+ raise
+ self.sock = sock
+
+class SocksiPyConnectionS(httplib.HTTPSConnection):
+ def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
+ self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+
+ def connect(self):
+ SocksiPyConnection.connect(self)
+ self.sock = self._context.wrap_socket(self.sock, server_hostname=self.host)
+ if not self._context.check_hostname and self._check_hostname:
+ try:
+ ssl.match_hostname(self.sock.getpeercert(), self.host)
+ except Exception:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ raise
+
+class SocksiPyHandler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kw = kwargs
+ urllib2.HTTPHandler.__init__(self)
+
+ def http_open(self, req):
+ def build(host, port=None, timeout=0, **kwargs):
+ kw = merge_dict(self.kw, kwargs)
+ conn = SocksiPyConnection(*self.args, host=host, port=port, timeout=timeout, **kw)
+ return conn
+ return self.do_open(build, req)
+
+ def https_open(self, req):
+ def build(host, port=None, timeout=0, **kwargs):
+ kw = merge_dict(self.kw, kwargs)
+ conn = SocksiPyConnectionS(*self.args, host=host, port=port, timeout=timeout, **kw)
+ return conn
+ return self.do_open(build, req)
+
+if __name__ == "__main__":
+ import sys
+ try:
+ port = int(sys.argv[1])
+ except (ValueError, IndexError):
+ port = 9050
+ opener = urllib2.build_opener(SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "localhost", port))
+ print("HTTP: " + opener.open("http://httpbin.org/ip").read().decode())
+ print("HTTPS: " + opener.open("https://httpbin.org/ip").read().decode())
diff --git a/contrib/python/PySocks/py2/ya.make b/contrib/python/PySocks/py2/ya.make
new file mode 100644
index 0000000000..28970a0770
--- /dev/null
+++ b/contrib/python/PySocks/py2/ya.make
@@ -0,0 +1,23 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(1.7.1)
+
+LICENSE(BSD-3-Clause)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ socks.py
+ sockshandler.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/PySocks/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/PySocks/py3/LICENSE b/contrib/python/PySocks/py3/LICENSE
new file mode 100644
index 0000000000..04b6b1f37c
--- /dev/null
+++ b/contrib/python/PySocks/py3/LICENSE
@@ -0,0 +1,22 @@
+Copyright 2006 Dan-Haim. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+3. Neither the name of Dan Haim nor the names of his contributors may be used
+ to endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
+OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
diff --git a/contrib/python/PySocks/py3/README.md b/contrib/python/PySocks/py3/README.md
new file mode 100644
index 0000000000..0035b7d1a4
--- /dev/null
+++ b/contrib/python/PySocks/py3/README.md
@@ -0,0 +1,300 @@
+PySocks
+=======
+
+PySocks lets you send traffic through SOCKS and HTTP proxy servers. It is a modern fork of [SocksiPy](http://socksipy.sourceforge.net/) with bug fixes and extra features.
+
+Acts as a drop-in replacement to the socket module. Seamlessly configure SOCKS proxies for any socket object by calling `socket_object.set_proxy()`.
+
+----------------
+
+Features
+========
+
+* SOCKS proxy client for Python 2.7 and 3.4+
+* TCP supported
+* UDP mostly supported (issues may occur in some edge cases)
+* HTTP proxy client included but not supported or recommended (you should use urllib2's or requests' own HTTP proxy interface)
+* urllib2 handler included. `pip install` / `setup.py install` will automatically install the `sockshandler` module.
+
+Installation
+============
+
+ pip install PySocks
+
+Or download the tarball / `git clone` and...
+
+ python setup.py install
+
+These will install both the `socks` and `sockshandler` modules.
+
+Alternatively, include just `socks.py` in your project.
+
+--------------------------------------------
+
+*Warning:* PySocks/SocksiPy only supports HTTP proxies that use CONNECT tunneling. Certain HTTP proxies may not work with this library. If you wish to use HTTP (not SOCKS) proxies, it is recommended that you rely on your HTTP client's native proxy support (`proxies` dict for `requests`, or `urllib2.ProxyHandler` for `urllib2`) instead.
+
+--------------------------------------------
+
+Usage
+=====
+
+## socks.socksocket ##
+
+ import socks
+
+ s = socks.socksocket() # Same API as socket.socket in the standard lib
+
+ s.set_proxy(socks.SOCKS5, "localhost") # SOCKS4 and SOCKS5 use port 1080 by default
+ # Or
+ s.set_proxy(socks.SOCKS4, "localhost", 4444)
+ # Or
+ s.set_proxy(socks.HTTP, "5.5.5.5", 8888)
+
+ # Can be treated identical to a regular socket object
+ s.connect(("www.somesite.com", 80))
+ s.sendall("GET / HTTP/1.1 ...")
+ print s.recv(4096)
+
+## Monkeypatching ##
+
+To monkeypatch the entire standard library with a single default proxy:
+
+ import urllib2
+ import socket
+ import socks
+
+ socks.set_default_proxy(socks.SOCKS5, "localhost")
+ socket.socket = socks.socksocket
+
+ urllib2.urlopen("http://www.somesite.com/") # All requests will pass through the SOCKS proxy
+
+Note that monkeypatching may not work for all standard modules or for all third party modules, and generally isn't recommended. Monkeypatching is usually an anti-pattern in Python.
+
+## urllib2 Handler ##
+
+Example use case with the `sockshandler` urllib2 handler. Note that you must import both `socks` and `sockshandler`, as the handler is its own module separate from PySocks. The module is included in the PyPI package.
+
+ import urllib2
+ import socks
+ from sockshandler import SocksiPyHandler
+
+ opener = urllib2.build_opener(SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050))
+ print opener.open("http://www.somesite.com/") # All requests made by the opener will pass through the SOCKS proxy
+
+--------------------------------------------
+
+Original SocksiPy README attached below, amended to reflect API changes.
+
+--------------------------------------------
+
+SocksiPy
+
+A Python SOCKS module.
+
+(C) 2006 Dan-Haim. All rights reserved.
+
+See LICENSE file for details.
+
+
+*WHAT IS A SOCKS PROXY?*
+
+A SOCKS proxy is a proxy server at the TCP level. In other words, it acts as
+a tunnel, relaying all traffic going through it without modifying it.
+SOCKS proxies can be used to relay traffic using any network protocol that
+uses TCP.
+
+*WHAT IS SOCKSIPY?*
+
+This Python module allows you to create TCP connections through a SOCKS
+proxy without any special effort.
+It also supports relaying UDP packets with a SOCKS5 proxy.
+
+*PROXY COMPATIBILITY*
+
+SocksiPy is compatible with three different types of proxies:
+
+1. SOCKS Version 4 (SOCKS4), including the SOCKS4a extension.
+2. SOCKS Version 5 (SOCKS5).
+3. HTTP Proxies which support tunneling using the CONNECT method.
+
+*SYSTEM REQUIREMENTS*
+
+Being written in Python, SocksiPy can run on any platform that has a Python
+interpreter and TCP/IP support.
+This module has been tested with Python 2.3 and should work with greater versions
+just as well.
+
+
+INSTALLATION
+-------------
+
+Simply copy the file "socks.py" to your Python's `lib/site-packages` directory,
+and you're ready to go. [Editor's note: it is better to use `python setup.py install` for PySocks]
+
+
+USAGE
+------
+
+First load the socks module with the command:
+
+ >>> import socks
+ >>>
+
+The socks module provides a class called `socksocket`, which is the base to all of the module's functionality.
+
+The `socksocket` object has the same initialization parameters as the normal socket
+object to ensure maximal compatibility, however it should be noted that `socksocket` will only function with family being `AF_INET` and
+type being either `SOCK_STREAM` or `SOCK_DGRAM`.
+Generally, it is best to initialize the `socksocket` object with no parameters
+
+ >>> s = socks.socksocket()
+ >>>
+
+The `socksocket` object has an interface which is very similiar to socket's (in fact
+the `socksocket` class is derived from socket) with a few extra methods.
+To select the proxy server you would like to use, use the `set_proxy` method, whose
+syntax is:
+
+ set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
+
+Explanation of the parameters:
+
+`proxy_type` - The type of the proxy server. This can be one of three possible
+choices: `PROXY_TYPE_SOCKS4`, `PROXY_TYPE_SOCKS5` and `PROXY_TYPE_HTTP` for SOCKS4,
+SOCKS5 and HTTP servers respectively. `SOCKS4`, `SOCKS5`, and `HTTP` are all aliases, respectively.
+
+`addr` - The IP address or DNS name of the proxy server.
+
+`port` - The port of the proxy server. Defaults to 1080 for socks and 8080 for http.
+
+`rdns` - This is a boolean flag than modifies the behavior regarding DNS resolving.
+If it is set to True, DNS resolving will be preformed remotely, on the server.
+If it is set to False, DNS resolving will be preformed locally. Please note that
+setting this to True with SOCKS4 servers actually use an extension to the protocol,
+called SOCKS4a, which may not be supported on all servers (SOCKS5 and http servers
+always support DNS). The default is True.
+
+`username` - For SOCKS5 servers, this allows simple username / password authentication
+with the server. For SOCKS4 servers, this parameter will be sent as the userid.
+This parameter is ignored if an HTTP server is being used. If it is not provided,
+authentication will not be used (servers may accept unauthenticated requests).
+
+`password` - This parameter is valid only for SOCKS5 servers and specifies the
+respective password for the username provided.
+
+Example of usage:
+
+ >>> s.set_proxy(socks.SOCKS5, "socks.example.com") # uses default port 1080
+ >>> s.set_proxy(socks.SOCKS4, "socks.test.com", 1081)
+
+After the set_proxy method has been called, simply call the connect method with the
+traditional parameters to establish a connection through the proxy:
+
+ >>> s.connect(("www.sourceforge.net", 80))
+ >>>
+
+Connection will take a bit longer to allow negotiation with the proxy server.
+Please note that calling connect without calling `set_proxy` earlier will connect
+without a proxy (just like a regular socket).
+
+Errors: Any errors in the connection process will trigger exceptions. The exception
+may either be generated by the underlying socket layer or may be custom module
+exceptions, whose details follow:
+
+class `ProxyError` - This is a base exception class. It is not raised directly but
+rather all other exception classes raised by this module are derived from it.
+This allows an easy way to catch all proxy-related errors. It descends from `IOError`.
+
+All `ProxyError` exceptions have an attribute `socket_err`, which will contain either a
+caught `socket.error` exception, or `None` if there wasn't any.
+
+class `GeneralProxyError` - When thrown, it indicates a problem which does not fall
+into another category.
+
+* `Sent invalid data` - This error means that unexpected data has been received from
+the server. The most common reason is that the server specified as the proxy is
+not really a SOCKS4/SOCKS5/HTTP proxy, or maybe the proxy type specified is wrong.
+
+* `Connection closed unexpectedly` - The proxy server unexpectedly closed the connection.
+This may indicate that the proxy server is experiencing network or software problems.
+
+* `Bad proxy type` - This will be raised if the type of the proxy supplied to the
+set_proxy function was not one of `SOCKS4`/`SOCKS5`/`HTTP`.
+
+* `Bad input` - This will be raised if the `connect()` method is called with bad input
+parameters.
+
+class `SOCKS5AuthError` - This indicates that the connection through a SOCKS5 server
+failed due to an authentication problem.
+
+* `Authentication is required` - This will happen if you use a SOCKS5 server which
+requires authentication without providing a username / password at all.
+
+* `All offered authentication methods were rejected` - This will happen if the proxy
+requires a special authentication method which is not supported by this module.
+
+* `Unknown username or invalid password` - Self descriptive.
+
+class `SOCKS5Error` - This will be raised for SOCKS5 errors which are not related to
+authentication.
+The parameter is a tuple containing a code, as given by the server,
+and a description of the
+error. The possible errors, according to the RFC, are:
+
+* `0x01` - General SOCKS server failure - If for any reason the proxy server is unable to
+fulfill your request (internal server error).
+* `0x02` - connection not allowed by ruleset - If the address you're trying to connect to
+is blacklisted on the server or requires authentication.
+* `0x03` - Network unreachable - The target could not be contacted. A router on the network
+had replied with a destination net unreachable error.
+* `0x04` - Host unreachable - The target could not be contacted. A router on the network
+had replied with a destination host unreachable error.
+* `0x05` - Connection refused - The target server has actively refused the connection
+(the requested port is closed).
+* `0x06` - TTL expired - The TTL value of the SYN packet from the proxy to the target server
+has expired. This usually means that there are network problems causing the packet
+to be caught in a router-to-router "ping-pong".
+* `0x07` - Command not supported - For instance if the server does not support UDP.
+* `0x08` - Address type not supported - The client has provided an invalid address type.
+When using this module, this error should not occur.
+
+class `SOCKS4Error` - This will be raised for SOCKS4 errors. The parameter is a tuple
+containing a code and a description of the error, as given by the server. The
+possible error, according to the specification are:
+
+* `0x5B` - Request rejected or failed - Will be raised in the event of an failure for any
+reason other then the two mentioned next.
+* `0x5C` - request rejected because SOCKS server cannot connect to identd on the client -
+The Socks server had tried an ident lookup on your computer and has failed. In this
+case you should run an identd server and/or configure your firewall to allow incoming
+connections to local port 113 from the remote server.
+* `0x5D` - request rejected because the client program and identd report different user-ids -
+The Socks server had performed an ident lookup on your computer and has received a
+different userid than the one you have provided. Change your userid (through the
+username parameter of the set_proxy method) to match and try again.
+
+class `HTTPError` - This will be raised for HTTP errors. The message will contain
+the HTTP status code and provided error message.
+
+After establishing the connection, the object behaves like a standard socket.
+
+Methods like `makefile()` and `settimeout()` should behave just like regular sockets.
+Call the `close()` method to close the connection.
+
+In addition to the `socksocket` class, an additional function worth mentioning is the
+`set_default_proxy` function. The parameters are the same as the `set_proxy` method.
+This function will set default proxy settings for newly created `socksocket` objects,
+in which the proxy settings haven't been changed via the `set_proxy` method.
+This is quite useful if you wish to force 3rd party modules to use a SOCKS proxy,
+by overriding the socket object.
+For example:
+
+ >>> socks.set_default_proxy(socks.SOCKS5, "socks.example.com")
+ >>> socket.socket = socks.socksocket
+ >>> urllib.urlopen("http://www.sourceforge.net/")
+
+
+PROBLEMS
+---------
+
+Please open a GitHub issue at https://github.com/Anorov/PySocks
diff --git a/contrib/python/PySocks/ya.make b/contrib/python/PySocks/ya.make
new file mode 100644
index 0000000000..1d7ecb60a1
--- /dev/null
+++ b/contrib/python/PySocks/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/PySocks/py2)
+ELSE()
+ PEERDIR(contrib/python/PySocks/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/blinker/py2/LICENSE.rst b/contrib/python/blinker/py2/LICENSE.rst
new file mode 100644
index 0000000000..79c9825adb
--- /dev/null
+++ b/contrib/python/blinker/py2/LICENSE.rst
@@ -0,0 +1,20 @@
+Copyright 2010 Jason Kirtland
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/blinker/py2/README.rst b/contrib/python/blinker/py2/README.rst
new file mode 100644
index 0000000000..883dded1c8
--- /dev/null
+++ b/contrib/python/blinker/py2/README.rst
@@ -0,0 +1,40 @@
+Blinker
+=======
+
+Blinker provides a fast dispatching system that allows any number of
+interested parties to subscribe to events, or "signals".
+
+Signal receivers can subscribe to specific senders or receive signals
+sent by any sender.
+
+.. code-block:: pycon
+
+ >>> from blinker import signal
+ >>> started = signal('round-started')
+ >>> def each(round):
+ ... print "Round %s!" % round
+ ...
+ >>> started.connect(each)
+
+ >>> def round_two(round):
+ ... print "This is round two."
+ ...
+ >>> started.connect(round_two, sender=2)
+
+ >>> for round in range(1, 4):
+ ... started.send(round)
+ ...
+ Round 1!
+ Round 2!
+ This is round two.
+ Round 3!
+
+
+Links
+-----
+
+- Documentation: https://blinker.readthedocs.io/
+- Changes: https://blinker.readthedocs.io/#changes
+- PyPI Releases: https://pypi.org/project/blinker/
+- Source Code: https://github.com/pallets-eco/blinker/
+- Issue Tracker: https://github.com/pallets-eco/blinker/issues/
diff --git a/contrib/python/blinker/py3/.dist-info/METADATA b/contrib/python/blinker/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..f96613c4b8
--- /dev/null
+++ b/contrib/python/blinker/py3/.dist-info/METADATA
@@ -0,0 +1,62 @@
+Metadata-Version: 2.1
+Name: blinker
+Version: 1.7.0
+Summary: Fast, simple object-to-object and broadcast signaling
+Keywords: signal,emit,events,broadcast
+Author-email: Jason Kirtland <jek@discorporate.us>
+Maintainer-email: Pallets Ecosystem <contact@palletsprojects.com>
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Libraries
+Project-URL: Chat, https://discord.gg/pallets
+Project-URL: Documentation, https://blinker.readthedocs.io
+Project-URL: Homepage, https://blinker.readthedocs.io
+Project-URL: Issue Tracker, https://github.com/pallets-eco/blinker/issues/
+Project-URL: Source Code, https://github.com/pallets-eco/blinker/
+
+Blinker
+=======
+
+Blinker provides a fast dispatching system that allows any number of
+interested parties to subscribe to events, or "signals".
+
+Signal receivers can subscribe to specific senders or receive signals
+sent by any sender.
+
+.. code-block:: pycon
+
+ >>> from blinker import signal
+ >>> started = signal('round-started')
+ >>> def each(round):
+ ... print(f"Round {round}")
+ ...
+ >>> started.connect(each)
+
+ >>> def round_two(round):
+ ... print("This is round two.")
+ ...
+ >>> started.connect(round_two, sender=2)
+
+ >>> for round in range(1, 4):
+ ... started.send(round)
+ ...
+ Round 1!
+ Round 2!
+ This is round two.
+ Round 3!
+
+
+Links
+-----
+
+- Documentation: https://blinker.readthedocs.io/
+- Changes: https://blinker.readthedocs.io/#changes
+- PyPI Releases: https://pypi.org/project/blinker/
+- Source Code: https://github.com/pallets-eco/blinker/
+- Issue Tracker: https://github.com/pallets-eco/blinker/issues/
+
diff --git a/contrib/python/blinker/py3/.dist-info/top_level.txt b/contrib/python/blinker/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..1ff4ca5510
--- /dev/null
+++ b/contrib/python/blinker/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+blinker
diff --git a/contrib/python/blinker/py3/LICENSE.rst b/contrib/python/blinker/py3/LICENSE.rst
new file mode 100644
index 0000000000..79c9825adb
--- /dev/null
+++ b/contrib/python/blinker/py3/LICENSE.rst
@@ -0,0 +1,20 @@
+Copyright 2010 Jason Kirtland
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/blinker/py3/README.rst b/contrib/python/blinker/py3/README.rst
new file mode 100644
index 0000000000..e3bc6d4781
--- /dev/null
+++ b/contrib/python/blinker/py3/README.rst
@@ -0,0 +1,40 @@
+Blinker
+=======
+
+Blinker provides a fast dispatching system that allows any number of
+interested parties to subscribe to events, or "signals".
+
+Signal receivers can subscribe to specific senders or receive signals
+sent by any sender.
+
+.. code-block:: pycon
+
+ >>> from blinker import signal
+ >>> started = signal('round-started')
+ >>> def each(round):
+ ... print(f"Round {round}")
+ ...
+ >>> started.connect(each)
+
+ >>> def round_two(round):
+ ... print("This is round two.")
+ ...
+ >>> started.connect(round_two, sender=2)
+
+ >>> for round in range(1, 4):
+ ... started.send(round)
+ ...
+ Round 1!
+ Round 2!
+ This is round two.
+ Round 3!
+
+
+Links
+-----
+
+- Documentation: https://blinker.readthedocs.io/
+- Changes: https://blinker.readthedocs.io/#changes
+- PyPI Releases: https://pypi.org/project/blinker/
+- Source Code: https://github.com/pallets-eco/blinker/
+- Issue Tracker: https://github.com/pallets-eco/blinker/issues/
diff --git a/contrib/python/blinker/py3/blinker/__init__.py b/contrib/python/blinker/py3/blinker/__init__.py
new file mode 100644
index 0000000000..d014caa0ff
--- /dev/null
+++ b/contrib/python/blinker/py3/blinker/__init__.py
@@ -0,0 +1,19 @@
+from blinker.base import ANY
+from blinker.base import NamedSignal
+from blinker.base import Namespace
+from blinker.base import receiver_connected
+from blinker.base import Signal
+from blinker.base import signal
+from blinker.base import WeakNamespace
+
+__all__ = [
+ "ANY",
+ "NamedSignal",
+ "Namespace",
+ "Signal",
+ "WeakNamespace",
+ "receiver_connected",
+ "signal",
+]
+
+__version__ = "1.7.0"
diff --git a/contrib/python/blinker/py3/blinker/_saferef.py b/contrib/python/blinker/py3/blinker/_saferef.py
new file mode 100644
index 0000000000..dcb70c1899
--- /dev/null
+++ b/contrib/python/blinker/py3/blinker/_saferef.py
@@ -0,0 +1,230 @@
+# extracted from Louie, http://pylouie.org/
+# updated for Python 3
+#
+# Copyright (c) 2006 Patrick K. O'Brien, Mike C. Fletcher,
+# Matthew R. Scott
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# * Neither the name of the <ORGANIZATION> nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+"""Refactored 'safe reference from dispatcher.py"""
+import operator
+import sys
+import traceback
+import weakref
+
+
+get_self = operator.attrgetter("__self__")
+get_func = operator.attrgetter("__func__")
+
+
+def safe_ref(target, on_delete=None):
+ """Return a *safe* weak reference to a callable target.
+
+ - ``target``: The object to be weakly referenced, if it's a bound
+ method reference, will create a BoundMethodWeakref, otherwise
+ creates a simple weakref.
+
+ - ``on_delete``: If provided, will have a hard reference stored to
+ the callable to be called after the safe reference goes out of
+ scope with the reference object, (either a weakref or a
+ BoundMethodWeakref) as argument.
+ """
+ try:
+ im_self = get_self(target)
+ except AttributeError:
+ if callable(on_delete):
+ return weakref.ref(target, on_delete)
+ else:
+ return weakref.ref(target)
+ else:
+ if im_self is not None:
+ # Turn a bound method into a BoundMethodWeakref instance.
+ # Keep track of these instances for lookup by disconnect().
+ assert hasattr(target, "im_func") or hasattr(target, "__func__"), (
+ f"safe_ref target {target!r} has im_self, but no im_func, "
+ "don't know how to create reference"
+ )
+ reference = BoundMethodWeakref(target=target, on_delete=on_delete)
+ return reference
+
+
+class BoundMethodWeakref:
+ """'Safe' and reusable weak references to instance methods.
+
+ BoundMethodWeakref objects provide a mechanism for referencing a
+ bound method without requiring that the method object itself
+ (which is normally a transient object) is kept alive. Instead,
+ the BoundMethodWeakref object keeps weak references to both the
+ object and the function which together define the instance method.
+
+ Attributes:
+
+ - ``key``: The identity key for the reference, calculated by the
+ class's calculate_key method applied to the target instance method.
+
+ - ``deletion_methods``: Sequence of callable objects taking single
+ argument, a reference to this object which will be called when
+ *either* the target object or target function is garbage
+ collected (i.e. when this object becomes invalid). These are
+ specified as the on_delete parameters of safe_ref calls.
+
+ - ``weak_self``: Weak reference to the target object.
+
+ - ``weak_func``: Weak reference to the target function.
+
+ Class Attributes:
+
+ - ``_all_instances``: Class attribute pointing to all live
+ BoundMethodWeakref objects indexed by the class's
+ calculate_key(target) method applied to the target objects.
+ This weak value dictionary is used to short-circuit creation so
+ that multiple references to the same (object, function) pair
+ produce the same BoundMethodWeakref instance.
+ """
+
+ _all_instances = weakref.WeakValueDictionary() # type: ignore[var-annotated]
+
+ def __new__(cls, target, on_delete=None, *arguments, **named):
+ """Create new instance or return current instance.
+
+ Basically this method of construction allows us to
+ short-circuit creation of references to already-referenced
+ instance methods. The key corresponding to the target is
+ calculated, and if there is already an existing reference,
+ that is returned, with its deletion_methods attribute updated.
+ Otherwise the new instance is created and registered in the
+ table of already-referenced methods.
+ """
+ key = cls.calculate_key(target)
+ current = cls._all_instances.get(key)
+ if current is not None:
+ current.deletion_methods.append(on_delete)
+ return current
+ else:
+ base = super().__new__(cls)
+ cls._all_instances[key] = base
+ base.__init__(target, on_delete, *arguments, **named)
+ return base
+
+ def __init__(self, target, on_delete=None):
+ """Return a weak-reference-like instance for a bound method.
+
+ - ``target``: The instance-method target for the weak reference,
+ must have im_self and im_func attributes and be
+ reconstructable via the following, which is true of built-in
+ instance methods::
+
+ target.im_func.__get__( target.im_self )
+
+ - ``on_delete``: Optional callback which will be called when
+ this weak reference ceases to be valid (i.e. either the
+ object or the function is garbage collected). Should take a
+ single argument, which will be passed a pointer to this
+ object.
+ """
+
+ def remove(weak, self=self):
+ """Set self.isDead to True when method or instance is destroyed."""
+ methods = self.deletion_methods[:]
+ del self.deletion_methods[:]
+ try:
+ del self.__class__._all_instances[self.key]
+ except KeyError:
+ pass
+ for function in methods:
+ try:
+ if callable(function):
+ function(self)
+ except Exception:
+ try:
+ traceback.print_exc()
+ except AttributeError:
+ e = sys.exc_info()[1]
+ print(
+ f"Exception during saferef {self} "
+ f"cleanup function {function}: {e}"
+ )
+
+ self.deletion_methods = [on_delete]
+ self.key = self.calculate_key(target)
+ im_self = get_self(target)
+ im_func = get_func(target)
+ self.weak_self = weakref.ref(im_self, remove)
+ self.weak_func = weakref.ref(im_func, remove)
+ self.self_name = str(im_self)
+ self.func_name = str(im_func.__name__)
+
+ @classmethod
+ def calculate_key(cls, target):
+ """Calculate the reference key for this reference.
+
+ Currently this is a two-tuple of the id()'s of the target
+ object and the target function respectively.
+ """
+ return (id(get_self(target)), id(get_func(target)))
+
+ def __str__(self):
+ """Give a friendly representation of the object."""
+ return "{}({}.{})".format(
+ self.__class__.__name__,
+ self.self_name,
+ self.func_name,
+ )
+
+ __repr__ = __str__
+
+ def __hash__(self):
+ return hash((self.self_name, self.key))
+
+ def __nonzero__(self):
+ """Whether we are still a valid reference."""
+ return self() is not None
+
+ def __eq__(self, other):
+ """Compare with another reference."""
+ if not isinstance(other, self.__class__):
+ return operator.eq(self.__class__, type(other))
+ return operator.eq(self.key, other.key)
+
+ def __call__(self):
+ """Return a strong reference to the bound method.
+
+ If the target cannot be retrieved, then will return None,
+ otherwise returns a bound instance method for our object and
+ function.
+
+ Note: You may call this method any number of times, as it does
+ not invalidate the reference.
+ """
+ target = self.weak_self()
+ if target is not None:
+ function = self.weak_func()
+ if function is not None:
+ return function.__get__(target)
+ return None
diff --git a/contrib/python/blinker/py3/blinker/_utilities.py b/contrib/python/blinker/py3/blinker/_utilities.py
new file mode 100644
index 0000000000..4b711c67d3
--- /dev/null
+++ b/contrib/python/blinker/py3/blinker/_utilities.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import typing as t
+from weakref import ref
+
+from blinker._saferef import BoundMethodWeakref
+
+IdentityType = t.Union[t.Tuple[int, int], str, int]
+
+
+class _symbol:
+ def __init__(self, name):
+ """Construct a new named symbol."""
+ self.__name__ = self.name = name
+
+ def __reduce__(self):
+ return symbol, (self.name,)
+
+ def __repr__(self):
+ return self.name
+
+
+_symbol.__name__ = "symbol"
+
+
+class symbol:
+ """A constant symbol.
+
+ >>> symbol('foo') is symbol('foo')
+ True
+ >>> symbol('foo')
+ foo
+
+ A slight refinement of the MAGICCOOKIE=object() pattern. The primary
+ advantage of symbol() is its repr(). They are also singletons.
+
+ Repeated calls of symbol('name') will all return the same instance.
+
+ """
+
+ symbols = {} # type: ignore[var-annotated]
+
+ def __new__(cls, name):
+ try:
+ return cls.symbols[name]
+ except KeyError:
+ return cls.symbols.setdefault(name, _symbol(name))
+
+
+def hashable_identity(obj: object) -> IdentityType:
+ if hasattr(obj, "__func__"):
+ return (id(obj.__func__), id(obj.__self__)) # type: ignore[attr-defined]
+ elif hasattr(obj, "im_func"):
+ return (id(obj.im_func), id(obj.im_self)) # type: ignore[attr-defined]
+ elif isinstance(obj, (int, str)):
+ return obj
+ else:
+ return id(obj)
+
+
+WeakTypes = (ref, BoundMethodWeakref)
+
+
+class annotatable_weakref(ref):
+ """A weakref.ref that supports custom instance attributes."""
+
+ receiver_id: t.Optional[IdentityType]
+ sender_id: t.Optional[IdentityType]
+
+
+def reference( # type: ignore[no-untyped-def]
+ object, callback=None, **annotations
+) -> annotatable_weakref:
+ """Return an annotated weak ref."""
+ if callable(object):
+ weak = callable_reference(object, callback)
+ else:
+ weak = annotatable_weakref(object, callback)
+ for key, value in annotations.items():
+ setattr(weak, key, value)
+ return weak # type: ignore[no-any-return]
+
+
+def callable_reference(object, callback=None):
+ """Return an annotated weak ref, supporting bound instance methods."""
+ if hasattr(object, "im_self") and object.im_self is not None:
+ return BoundMethodWeakref(target=object, on_delete=callback)
+ elif hasattr(object, "__self__") and object.__self__ is not None:
+ return BoundMethodWeakref(target=object, on_delete=callback)
+ return annotatable_weakref(object, callback)
+
+
+class lazy_property:
+ """A @property that is only evaluated once."""
+
+ def __init__(self, deferred):
+ self._deferred = deferred
+ self.__doc__ = deferred.__doc__
+
+ def __get__(self, obj, cls):
+ if obj is None:
+ return self
+ value = self._deferred(obj)
+ setattr(obj, self._deferred.__name__, value)
+ return value
diff --git a/contrib/python/blinker/py3/blinker/base.py b/contrib/python/blinker/py3/blinker/base.py
new file mode 100644
index 0000000000..b9d703586d
--- /dev/null
+++ b/contrib/python/blinker/py3/blinker/base.py
@@ -0,0 +1,558 @@
+"""Signals and events.
+
+A small implementation of signals, inspired by a snippet of Django signal
+API client code seen in a blog post. Signals are first-class objects and
+each manages its own receivers and message emission.
+
+The :func:`signal` function provides singleton behavior for named signals.
+
+"""
+from __future__ import annotations
+
+import typing as t
+from collections import defaultdict
+from contextlib import contextmanager
+from inspect import iscoroutinefunction
+from warnings import warn
+from weakref import WeakValueDictionary
+
+from blinker._utilities import annotatable_weakref
+from blinker._utilities import hashable_identity
+from blinker._utilities import IdentityType
+from blinker._utilities import lazy_property
+from blinker._utilities import reference
+from blinker._utilities import symbol
+from blinker._utilities import WeakTypes
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ T_callable = t.TypeVar("T_callable", bound=t.Callable[..., t.Any])
+
+ T = t.TypeVar("T")
+ P = te.ParamSpec("P")
+
+ AsyncWrapperType = t.Callable[[t.Callable[P, t.Awaitable[T]]], t.Callable[P, T]]
+ SyncWrapperType = t.Callable[[t.Callable[P, T]], t.Callable[P, t.Awaitable[T]]]
+
+ANY = symbol("ANY")
+ANY.__doc__ = 'Token for "any sender".'
+ANY_ID = 0
+
+# NOTE: We need a reference to cast for use in weakref callbacks otherwise
+# t.cast may have already been set to None during finalization.
+cast = t.cast
+
+
+class Signal:
+ """A notification emitter."""
+
+ #: An :obj:`ANY` convenience synonym, allows ``Signal.ANY``
+ #: without an additional import.
+ ANY = ANY
+
+ set_class: type[set] = set
+
+ @lazy_property
+ def receiver_connected(self) -> Signal:
+ """Emitted after each :meth:`connect`.
+
+ The signal sender is the signal instance, and the :meth:`connect`
+ arguments are passed through: *receiver*, *sender*, and *weak*.
+
+ .. versionadded:: 1.2
+
+ """
+ return Signal(doc="Emitted after a receiver connects.")
+
+ @lazy_property
+ def receiver_disconnected(self) -> Signal:
+ """Emitted after :meth:`disconnect`.
+
+ The sender is the signal instance, and the :meth:`disconnect` arguments
+ are passed through: *receiver* and *sender*.
+
+ Note, this signal is emitted **only** when :meth:`disconnect` is
+ called explicitly.
+
+ The disconnect signal can not be emitted by an automatic disconnect
+ (due to a weakly referenced receiver or sender going out of scope),
+ as the receiver and/or sender instances are no longer available for
+ use at the time this signal would be emitted.
+
+ An alternative approach is available by subscribing to
+ :attr:`receiver_connected` and setting up a custom weakref cleanup
+ callback on weak receivers and senders.
+
+ .. versionadded:: 1.2
+
+ """
+ return Signal(doc="Emitted after a receiver disconnects.")
+
+ def __init__(self, doc: str | None = None) -> None:
+ """
+ :param doc: optional. If provided, will be assigned to the signal's
+ __doc__ attribute.
+
+ """
+ if doc:
+ self.__doc__ = doc
+ #: A mapping of connected receivers.
+ #:
+ #: The values of this mapping are not meaningful outside of the
+ #: internal :class:`Signal` implementation, however the boolean value
+ #: of the mapping is useful as an extremely efficient check to see if
+ #: any receivers are connected to the signal.
+ self.receivers: dict[IdentityType, t.Callable | annotatable_weakref] = {}
+ self.is_muted = False
+ self._by_receiver: dict[IdentityType, set[IdentityType]] = defaultdict(
+ self.set_class
+ )
+ self._by_sender: dict[IdentityType, set[IdentityType]] = defaultdict(
+ self.set_class
+ )
+ self._weak_senders: dict[IdentityType, annotatable_weakref] = {}
+
+ def connect(
+ self, receiver: T_callable, sender: t.Any = ANY, weak: bool = True
+ ) -> T_callable:
+ """Connect *receiver* to signal events sent by *sender*.
+
+ :param receiver: A callable. Will be invoked by :meth:`send` with
+ `sender=` as a single positional argument and any ``kwargs`` that
+ were provided to a call to :meth:`send`.
+
+ :param sender: Any object or :obj:`ANY`, defaults to ``ANY``.
+ Restricts notifications delivered to *receiver* to only those
+ :meth:`send` emissions sent by *sender*. If ``ANY``, the receiver
+ will always be notified. A *receiver* may be connected to
+ multiple *sender* values on the same Signal through multiple calls
+ to :meth:`connect`.
+
+ :param weak: If true, the Signal will hold a weakref to *receiver*
+ and automatically disconnect when *receiver* goes out of scope or
+ is garbage collected. Defaults to True.
+
+ """
+ receiver_id = hashable_identity(receiver)
+ receiver_ref: T_callable | annotatable_weakref
+
+ if weak:
+ receiver_ref = reference(receiver, self._cleanup_receiver)
+ receiver_ref.receiver_id = receiver_id
+ else:
+ receiver_ref = receiver
+ sender_id: IdentityType
+ if sender is ANY:
+ sender_id = ANY_ID
+ else:
+ sender_id = hashable_identity(sender)
+
+ self.receivers.setdefault(receiver_id, receiver_ref)
+ self._by_sender[sender_id].add(receiver_id)
+ self._by_receiver[receiver_id].add(sender_id)
+ del receiver_ref
+
+ if sender is not ANY and sender_id not in self._weak_senders:
+ # wire together a cleanup for weakref-able senders
+ try:
+ sender_ref = reference(sender, self._cleanup_sender)
+ sender_ref.sender_id = sender_id
+ except TypeError:
+ pass
+ else:
+ self._weak_senders.setdefault(sender_id, sender_ref)
+ del sender_ref
+
+ # broadcast this connection. if receivers raise, disconnect.
+ if "receiver_connected" in self.__dict__ and self.receiver_connected.receivers:
+ try:
+ self.receiver_connected.send(
+ self, receiver=receiver, sender=sender, weak=weak
+ )
+ except TypeError as e:
+ self.disconnect(receiver, sender)
+ raise e
+ if receiver_connected.receivers and self is not receiver_connected:
+ try:
+ receiver_connected.send(
+ self, receiver_arg=receiver, sender_arg=sender, weak_arg=weak
+ )
+ except TypeError as e:
+ self.disconnect(receiver, sender)
+ raise e
+ return receiver
+
+ def connect_via(
+ self, sender: t.Any, weak: bool = False
+ ) -> t.Callable[[T_callable], T_callable]:
+ """Connect the decorated function as a receiver for *sender*.
+
+ :param sender: Any object or :obj:`ANY`. The decorated function
+ will only receive :meth:`send` emissions sent by *sender*. If
+ ``ANY``, the receiver will always be notified. A function may be
+ decorated multiple times with differing *sender* values.
+
+ :param weak: If true, the Signal will hold a weakref to the
+ decorated function and automatically disconnect when *receiver*
+ goes out of scope or is garbage collected. Unlike
+ :meth:`connect`, this defaults to False.
+
+ The decorated function will be invoked by :meth:`send` with
+ `sender=` as a single positional argument and any ``kwargs`` that
+ were provided to the call to :meth:`send`.
+
+
+ .. versionadded:: 1.1
+
+ """
+
+ def decorator(fn: T_callable) -> T_callable:
+ self.connect(fn, sender, weak)
+ return fn
+
+ return decorator
+
+ @contextmanager
+ def connected_to(
+ self, receiver: t.Callable, sender: t.Any = ANY
+ ) -> t.Generator[None, None, None]:
+ """Execute a block with the signal temporarily connected to *receiver*.
+
+ :param receiver: a receiver callable
+ :param sender: optional, a sender to filter on
+
+ This is a context manager for use in the ``with`` statement. It can
+ be useful in unit tests. *receiver* is connected to the signal for
+ the duration of the ``with`` block, and will be disconnected
+ automatically when exiting the block:
+
+ .. code-block:: python
+
+ with on_ready.connected_to(receiver):
+ # do stuff
+ on_ready.send(123)
+
+ .. versionadded:: 1.1
+
+ """
+ self.connect(receiver, sender=sender, weak=False)
+ try:
+ yield None
+ finally:
+ self.disconnect(receiver)
+
+ @contextmanager
+ def muted(self) -> t.Generator[None, None, None]:
+ """Context manager for temporarily disabling signal.
+ Useful for test purposes.
+ """
+ self.is_muted = True
+ try:
+ yield None
+ except Exception as e:
+ raise e
+ finally:
+ self.is_muted = False
+
+ def temporarily_connected_to(
+ self, receiver: t.Callable, sender: t.Any = ANY
+ ) -> t.ContextManager[None]:
+ """An alias for :meth:`connected_to`.
+
+ :param receiver: a receiver callable
+ :param sender: optional, a sender to filter on
+
+ .. versionadded:: 0.9
+
+ .. versionchanged:: 1.1
+ Renamed to :meth:`connected_to`. ``temporarily_connected_to`` was
+ deprecated in 1.2 and will be removed in a subsequent version.
+
+ """
+ warn(
+ "temporarily_connected_to is deprecated; use connected_to instead.",
+ DeprecationWarning,
+ )
+ return self.connected_to(receiver, sender)
+
+ def send(
+ self,
+ *sender: t.Any,
+ _async_wrapper: AsyncWrapperType | None = None,
+ **kwargs: t.Any,
+ ) -> list[tuple[t.Callable, t.Any]]:
+ """Emit this signal on behalf of *sender*, passing on ``kwargs``.
+
+ Returns a list of 2-tuples, pairing receivers with their return
+ value. The ordering of receiver notification is undefined.
+
+ :param sender: Any object or ``None``. If omitted, synonymous
+ with ``None``. Only accepts one positional argument.
+ :param _async_wrapper: A callable that should wrap a coroutine
+ receiver and run it when called synchronously.
+
+ :param kwargs: Data to be sent to receivers.
+ """
+ if self.is_muted:
+ return []
+
+ sender = self._extract_sender(sender)
+ results = []
+ for receiver in self.receivers_for(sender):
+ if iscoroutinefunction(receiver):
+ if _async_wrapper is None:
+ raise RuntimeError("Cannot send to a coroutine function")
+ receiver = _async_wrapper(receiver)
+ result = receiver(sender, **kwargs)
+ results.append((receiver, result))
+ return results
+
+ async def send_async(
+ self,
+ *sender: t.Any,
+ _sync_wrapper: SyncWrapperType | None = None,
+ **kwargs: t.Any,
+ ) -> list[tuple[t.Callable, t.Any]]:
+ """Emit this signal on behalf of *sender*, passing on ``kwargs``.
+
+ Returns a list of 2-tuples, pairing receivers with their return
+ value. The ordering of receiver notification is undefined.
+
+ :param sender: Any object or ``None``. If omitted, synonymous
+ with ``None``. Only accepts one positional argument.
+ :param _sync_wrapper: A callable that should wrap a synchronous
+ receiver and run it when awaited.
+
+ :param kwargs: Data to be sent to receivers.
+ """
+ if self.is_muted:
+ return []
+
+ sender = self._extract_sender(sender)
+ results = []
+ for receiver in self.receivers_for(sender):
+ if not iscoroutinefunction(receiver):
+ if _sync_wrapper is None:
+ raise RuntimeError("Cannot send to a non-coroutine function")
+ receiver = _sync_wrapper(receiver)
+ result = await receiver(sender, **kwargs)
+ results.append((receiver, result))
+ return results
+
+ def _extract_sender(self, sender: t.Any) -> t.Any:
+ if not self.receivers:
+ # Ensure correct signature even on no-op sends, disable with -O
+ # for lowest possible cost.
+ if __debug__ and sender and len(sender) > 1:
+ raise TypeError(
+ f"send() accepts only one positional argument, {len(sender)} given"
+ )
+ return []
+
+ # Using '*sender' rather than 'sender=None' allows 'sender' to be
+ # used as a keyword argument- i.e. it's an invisible name in the
+ # function signature.
+ if len(sender) == 0:
+ sender = None
+ elif len(sender) > 1:
+ raise TypeError(
+ f"send() accepts only one positional argument, {len(sender)} given"
+ )
+ else:
+ sender = sender[0]
+ return sender
+
+ def has_receivers_for(self, sender: t.Any) -> bool:
+ """True if there is probably a receiver for *sender*.
+
+ Performs an optimistic check only. Does not guarantee that all
+ weakly referenced receivers are still alive. See
+ :meth:`receivers_for` for a stronger search.
+
+ """
+ if not self.receivers:
+ return False
+ if self._by_sender[ANY_ID]:
+ return True
+ if sender is ANY:
+ return False
+ return hashable_identity(sender) in self._by_sender
+
+ def receivers_for(
+ self, sender: t.Any
+ ) -> t.Generator[t.Callable[[t.Any], t.Any], None, None]:
+ """Iterate all live receivers listening for *sender*."""
+ # TODO: test receivers_for(ANY)
+ if self.receivers:
+ sender_id = hashable_identity(sender)
+ if sender_id in self._by_sender:
+ ids = self._by_sender[ANY_ID] | self._by_sender[sender_id]
+ else:
+ ids = self._by_sender[ANY_ID].copy()
+ for receiver_id in ids:
+ receiver = self.receivers.get(receiver_id)
+ if receiver is None:
+ continue
+ if isinstance(receiver, WeakTypes):
+ strong = receiver()
+ if strong is None:
+ self._disconnect(receiver_id, ANY_ID)
+ continue
+ receiver = strong
+ yield receiver # type: ignore[misc]
+
+ def disconnect(self, receiver: t.Callable, sender: t.Any = ANY) -> None:
+ """Disconnect *receiver* from this signal's events.
+
+ :param receiver: a previously :meth:`connected<connect>` callable
+
+ :param sender: a specific sender to disconnect from, or :obj:`ANY`
+ to disconnect from all senders. Defaults to ``ANY``.
+
+ """
+ sender_id: IdentityType
+ if sender is ANY:
+ sender_id = ANY_ID
+ else:
+ sender_id = hashable_identity(sender)
+ receiver_id = hashable_identity(receiver)
+ self._disconnect(receiver_id, sender_id)
+
+ if (
+ "receiver_disconnected" in self.__dict__
+ and self.receiver_disconnected.receivers
+ ):
+ self.receiver_disconnected.send(self, receiver=receiver, sender=sender)
+
+ def _disconnect(self, receiver_id: IdentityType, sender_id: IdentityType) -> None:
+ if sender_id == ANY_ID:
+ if self._by_receiver.pop(receiver_id, False):
+ for bucket in self._by_sender.values():
+ bucket.discard(receiver_id)
+ self.receivers.pop(receiver_id, None)
+ else:
+ self._by_sender[sender_id].discard(receiver_id)
+ self._by_receiver[receiver_id].discard(sender_id)
+
+ def _cleanup_receiver(self, receiver_ref: annotatable_weakref) -> None:
+ """Disconnect a receiver from all senders."""
+ self._disconnect(cast(IdentityType, receiver_ref.receiver_id), ANY_ID)
+
+ def _cleanup_sender(self, sender_ref: annotatable_weakref) -> None:
+ """Disconnect all receivers from a sender."""
+ sender_id = cast(IdentityType, sender_ref.sender_id)
+ assert sender_id != ANY_ID
+ self._weak_senders.pop(sender_id, None)
+ for receiver_id in self._by_sender.pop(sender_id, ()):
+ self._by_receiver[receiver_id].discard(sender_id)
+
+ def _cleanup_bookkeeping(self) -> None:
+ """Prune unused sender/receiver bookkeeping. Not threadsafe.
+
+ Connecting & disconnecting leave behind a small amount of bookkeeping
+ for the receiver and sender values. Typical workloads using Blinker,
+ for example in most web apps, Flask, CLI scripts, etc., are not
+ adversely affected by this bookkeeping.
+
+ With a long-running Python process performing dynamic signal routing
+ with high volume- e.g. connecting to function closures, "senders" are
+ all unique object instances, and doing all of this over and over- you
+ may see memory usage will grow due to extraneous bookkeeping. (An empty
+ set() for each stale sender/receiver pair.)
+
+ This method will prune that bookkeeping away, with the caveat that such
+ pruning is not threadsafe. The risk is that cleanup of a fully
+ disconnected receiver/sender pair occurs while another thread is
+ connecting that same pair. If you are in the highly dynamic, unique
+ receiver/sender situation that has lead you to this method, that
+ failure mode is perhaps not a big deal for you.
+ """
+ for mapping in (self._by_sender, self._by_receiver):
+ for _id, bucket in list(mapping.items()):
+ if not bucket:
+ mapping.pop(_id, None)
+
+ def _clear_state(self) -> None:
+ """Throw away all signal state. Useful for unit tests."""
+ self._weak_senders.clear()
+ self.receivers.clear()
+ self._by_sender.clear()
+ self._by_receiver.clear()
+
+
+receiver_connected = Signal(
+ """\
+Sent by a :class:`Signal` after a receiver connects.
+
+:argument: the Signal that was connected to
+:keyword receiver_arg: the connected receiver
+:keyword sender_arg: the sender to connect to
+:keyword weak_arg: true if the connection to receiver_arg is a weak reference
+
+.. deprecated:: 1.2
+
+As of 1.2, individual signals have their own private
+:attr:`~Signal.receiver_connected` and
+:attr:`~Signal.receiver_disconnected` signals with a slightly simplified
+call signature. This global signal is planned to be removed in 1.6.
+
+"""
+)
+
+
+class NamedSignal(Signal):
+ """A named generic notification emitter."""
+
+ def __init__(self, name: str, doc: str | None = None) -> None:
+ Signal.__init__(self, doc)
+
+ #: The name of this signal.
+ self.name = name
+
+ def __repr__(self) -> str:
+ base = Signal.__repr__(self)
+ return f"{base[:-1]}; {self.name!r}>" # noqa: E702
+
+
+class Namespace(dict):
+ """A mapping of signal names to signals."""
+
+ def signal(self, name: str, doc: str | None = None) -> NamedSignal:
+ """Return the :class:`NamedSignal` *name*, creating it if required.
+
+ Repeated calls to this function will return the same signal object.
+
+ """
+ try:
+ return self[name] # type: ignore[no-any-return]
+ except KeyError:
+ result = self.setdefault(name, NamedSignal(name, doc))
+ return result # type: ignore[no-any-return]
+
+
+class WeakNamespace(WeakValueDictionary):
+ """A weak mapping of signal names to signals.
+
+ Automatically cleans up unused Signals when the last reference goes out
+ of scope. This namespace implementation exists for a measure of legacy
+ compatibility with Blinker <= 1.2, and may be dropped in the future.
+
+ .. versionadded:: 1.3
+
+ """
+
+ def signal(self, name: str, doc: str | None = None) -> NamedSignal:
+ """Return the :class:`NamedSignal` *name*, creating it if required.
+
+ Repeated calls to this function will return the same signal object.
+
+ """
+ try:
+ return self[name] # type: ignore[no-any-return]
+ except KeyError:
+ result = self.setdefault(name, NamedSignal(name, doc))
+ return result # type: ignore[no-any-return]
+
+
+signal = Namespace().signal
diff --git a/contrib/python/blinker/py3/blinker/py.typed b/contrib/python/blinker/py3/blinker/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/blinker/py3/blinker/py.typed
diff --git a/contrib/python/blinker/py3/ya.make b/contrib/python/blinker/py3/ya.make
new file mode 100644
index 0000000000..f1ac7057e7
--- /dev/null
+++ b/contrib/python/blinker/py3/ya.make
@@ -0,0 +1,26 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(1.7.0)
+
+LICENSE(MIT)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ blinker/__init__.py
+ blinker/_saferef.py
+ blinker/_utilities.py
+ blinker/base.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/blinker/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+ blinker/py.typed
+)
+
+END()
diff --git a/contrib/python/blinker/ya.make b/contrib/python/blinker/ya.make
new file mode 100644
index 0000000000..269c8ae2e4
--- /dev/null
+++ b/contrib/python/blinker/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/blinker/py2)
+ELSE()
+ PEERDIR(contrib/python/blinker/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/cachetools/py2/.dist-info/METADATA b/contrib/python/cachetools/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..2317651ff4
--- /dev/null
+++ b/contrib/python/cachetools/py2/.dist-info/METADATA
@@ -0,0 +1,124 @@
+Metadata-Version: 2.1
+Name: cachetools
+Version: 3.1.1
+Summary: Extensible memoizing collections and decorators
+Home-page: https://github.com/tkem/cachetools
+Author: Thomas Kemmer
+Author-email: tkemmer@computer.org
+License: MIT
+Keywords: cache caching memoize memoizing memoization LRU LFU TTL
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+cachetools
+========================================================================
+
+This module provides various memoizing collections and decorators,
+including variants of the Python 3 Standard Library `@lru_cache`_
+function decorator.
+
+.. code-block:: python
+
+ from cachetools import cached, LRUCache, TTLCache
+
+ # speed up calculating Fibonacci numbers with dynamic programming
+ @cached(cache={})
+ def fib(n):
+ return n if n < 2 else fib(n - 1) + fib(n - 2)
+
+ # cache least recently used Python Enhancement Proposals
+ @cached(cache=LRUCache(maxsize=32))
+ def get_pep(num):
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ # cache weather data for no longer than ten minutes
+ @cached(cache=TTLCache(maxsize=1024, ttl=600))
+ def get_weather(place):
+ return owm.weather_at_place(place).get_weather()
+
+For the purpose of this module, a *cache* is a mutable_ mapping_ of a
+fixed maximum size. When the cache is full, i.e. by adding another
+item the cache would exceed its maximum size, the cache must choose
+which item(s) to discard based on a suitable `cache algorithm`_. In
+general, a cache's size is the total size of its items, and an item's
+size is a property or function of its value, e.g. the result of
+``sys.getsizeof(value)``. For the trivial but common case that each
+item counts as ``1``, a cache's size is equal to the number of its
+items, or ``len(cache)``.
+
+Multiple cache classes based on different caching algorithms are
+implemented, and decorators for easily memoizing function and method
+calls are provided, too.
+
+For more information, please refer to the online documentation_.
+
+
+Installation
+------------------------------------------------------------------------
+
+Install cachetools using pip::
+
+ pip install cachetools
+
+
+Project Resources
+------------------------------------------------------------------------
+
+.. image:: http://img.shields.io/pypi/v/cachetools.svg?style=flat
+ :target: https://pypi.python.org/pypi/cachetools/
+ :alt: Latest PyPI version
+
+.. image:: http://img.shields.io/travis/tkem/cachetools/master.svg?style=flat
+ :target: https://travis-ci.org/tkem/cachetools/
+ :alt: Travis CI build status
+
+.. image:: http://img.shields.io/coveralls/tkem/cachetools/master.svg?style=flat
+ :target: https://coveralls.io/r/tkem/cachetools
+ :alt: Test coverage
+
+.. image:: https://readthedocs.org/projects/cachetools/badge/?version=latest&style=flat
+ :target: http://cachetools.readthedocs.io/en/latest/
+ :alt: Documentation Status
+
+- `Issue Tracker`_
+- `Source Code`_
+- `Change Log`_
+
+
+License
+------------------------------------------------------------------------
+
+Copyright (c) 2014-2019 Thomas Kemmer.
+
+Licensed under the `MIT License`_.
+
+
+.. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache
+.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable
+.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping
+.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms
+
+.. _Documentation: http://cachetools.readthedocs.io/en/latest/
+.. _Issue Tracker: https://github.com/tkem/cachetools/issues/
+.. _Source Code: https://github.com/tkem/cachetools/
+.. _Change Log: https://github.com/tkem/cachetools/blob/master/CHANGES.rst
+.. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE
+
+
diff --git a/contrib/python/cachetools/py2/.dist-info/top_level.txt b/contrib/python/cachetools/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..50d14084a9
--- /dev/null
+++ b/contrib/python/cachetools/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+cachetools
diff --git a/contrib/python/cachetools/py2/LICENSE b/contrib/python/cachetools/py2/LICENSE
new file mode 100644
index 0000000000..7da84f4c63
--- /dev/null
+++ b/contrib/python/cachetools/py2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2019 Thomas Kemmer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/cachetools/py2/README.rst b/contrib/python/cachetools/py2/README.rst
new file mode 100644
index 0000000000..96f16340f1
--- /dev/null
+++ b/contrib/python/cachetools/py2/README.rst
@@ -0,0 +1,95 @@
+cachetools
+========================================================================
+
+This module provides various memoizing collections and decorators,
+including variants of the Python 3 Standard Library `@lru_cache`_
+function decorator.
+
+.. code-block:: python
+
+ from cachetools import cached, LRUCache, TTLCache
+
+ # speed up calculating Fibonacci numbers with dynamic programming
+ @cached(cache={})
+ def fib(n):
+ return n if n < 2 else fib(n - 1) + fib(n - 2)
+
+ # cache least recently used Python Enhancement Proposals
+ @cached(cache=LRUCache(maxsize=32))
+ def get_pep(num):
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ # cache weather data for no longer than ten minutes
+ @cached(cache=TTLCache(maxsize=1024, ttl=600))
+ def get_weather(place):
+ return owm.weather_at_place(place).get_weather()
+
+For the purpose of this module, a *cache* is a mutable_ mapping_ of a
+fixed maximum size. When the cache is full, i.e. by adding another
+item the cache would exceed its maximum size, the cache must choose
+which item(s) to discard based on a suitable `cache algorithm`_. In
+general, a cache's size is the total size of its items, and an item's
+size is a property or function of its value, e.g. the result of
+``sys.getsizeof(value)``. For the trivial but common case that each
+item counts as ``1``, a cache's size is equal to the number of its
+items, or ``len(cache)``.
+
+Multiple cache classes based on different caching algorithms are
+implemented, and decorators for easily memoizing function and method
+calls are provided, too.
+
+For more information, please refer to the online documentation_.
+
+
+Installation
+------------------------------------------------------------------------
+
+Install cachetools using pip::
+
+ pip install cachetools
+
+
+Project Resources
+------------------------------------------------------------------------
+
+.. image:: http://img.shields.io/pypi/v/cachetools.svg?style=flat
+ :target: https://pypi.python.org/pypi/cachetools/
+ :alt: Latest PyPI version
+
+.. image:: http://img.shields.io/travis/tkem/cachetools/master.svg?style=flat
+ :target: https://travis-ci.org/tkem/cachetools/
+ :alt: Travis CI build status
+
+.. image:: http://img.shields.io/coveralls/tkem/cachetools/master.svg?style=flat
+ :target: https://coveralls.io/r/tkem/cachetools
+ :alt: Test coverage
+
+.. image:: https://readthedocs.org/projects/cachetools/badge/?version=latest&style=flat
+ :target: http://cachetools.readthedocs.io/en/latest/
+ :alt: Documentation Status
+
+- `Issue Tracker`_
+- `Source Code`_
+- `Change Log`_
+
+
+License
+------------------------------------------------------------------------
+
+Copyright (c) 2014-2019 Thomas Kemmer.
+
+Licensed under the `MIT License`_.
+
+
+.. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache
+.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable
+.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping
+.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms
+
+.. _Documentation: http://cachetools.readthedocs.io/en/latest/
+.. _Issue Tracker: https://github.com/tkem/cachetools/issues/
+.. _Source Code: https://github.com/tkem/cachetools/
+.. _Change Log: https://github.com/tkem/cachetools/blob/master/CHANGES.rst
+.. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE
diff --git a/contrib/python/cachetools/py2/cachetools/__init__.py b/contrib/python/cachetools/py2/cachetools/__init__.py
new file mode 100644
index 0000000000..d95c58db58
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/__init__.py
@@ -0,0 +1,112 @@
+"""Extensible memoizing collections and decorators."""
+
+from __future__ import absolute_import
+
+import functools
+
+from . import keys
+from .cache import Cache
+from .lfu import LFUCache
+from .lru import LRUCache
+from .rr import RRCache
+from .ttl import TTLCache
+
+__all__ = (
+ 'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache',
+ 'cached', 'cachedmethod'
+)
+
+__version__ = '3.1.1'
+
+if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'):
+ _update_wrapper = functools.update_wrapper
+else:
+ def _update_wrapper(wrapper, wrapped):
+ functools.update_wrapper(wrapper, wrapped)
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+
+
+def cached(cache, key=keys.hashkey, lock=None):
+ """Decorator to wrap a function with a memoizing callable that saves
+ results in a cache.
+
+ """
+ def decorator(func):
+ if cache is None:
+ def wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+ elif lock is None:
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ try:
+ cache[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+ else:
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ with lock:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ try:
+ with lock:
+ cache[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+ return _update_wrapper(wrapper, func)
+ return decorator
+
+
+def cachedmethod(cache, key=keys.hashkey, lock=None):
+ """Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a cache.
+
+ """
+ def decorator(method):
+ if lock is None:
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(*args, **kwargs)
+ try:
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ try:
+ c[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+ else:
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(*args, **kwargs)
+ try:
+ with lock(self):
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ try:
+ with lock(self):
+ c[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+ return _update_wrapper(wrapper, method)
+ return decorator
diff --git a/contrib/python/cachetools/py2/cachetools/abc.py b/contrib/python/cachetools/py2/cachetools/abc.py
new file mode 100644
index 0000000000..3bc43cc44e
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/abc.py
@@ -0,0 +1,52 @@
+from __future__ import absolute_import
+
+from abc import abstractmethod
+
+try:
+ from collections.abc import MutableMapping
+except ImportError:
+ from collections import MutableMapping
+
+
+class DefaultMapping(MutableMapping):
+
+ __slots__ = ()
+
+ @abstractmethod
+ def __contains__(self, key): # pragma: nocover
+ return False
+
+ @abstractmethod
+ def __getitem__(self, key): # pragma: nocover
+ if hasattr(self.__class__, '__missing__'):
+ return self.__class__.__missing__(self, key)
+ else:
+ raise KeyError(key)
+
+ def get(self, key, default=None):
+ if key in self:
+ return self[key]
+ else:
+ return default
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ if key in self:
+ value = self[key]
+ del self[key]
+ elif default is self.__marker:
+ raise KeyError(key)
+ else:
+ value = default
+ return value
+
+ def setdefault(self, key, default=None):
+ if key in self:
+ value = self[key]
+ else:
+ self[key] = value = default
+ return value
+
+
+DefaultMapping.register(dict)
diff --git a/contrib/python/cachetools/py2/cachetools/cache.py b/contrib/python/cachetools/py2/cachetools/cache.py
new file mode 100644
index 0000000000..5cb8071558
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/cache.py
@@ -0,0 +1,91 @@
+from __future__ import absolute_import
+
+from .abc import DefaultMapping
+
+
+class _DefaultSize(object):
+ def __getitem__(self, _):
+ return 1
+
+ def __setitem__(self, _, value):
+ assert value == 1
+
+ def pop(self, _):
+ return 1
+
+
+class Cache(DefaultMapping):
+ """Mutable mapping to serve as a simple cache or cache base class."""
+
+ __size = _DefaultSize()
+
+ def __init__(self, maxsize, getsizeof=None):
+ if getsizeof:
+ self.getsizeof = getsizeof
+ if self.getsizeof is not Cache.getsizeof:
+ self.__size = dict()
+ self.__data = dict()
+ self.__currsize = 0
+ self.__maxsize = maxsize
+
+ def __repr__(self):
+ return '%s(%r, maxsize=%r, currsize=%r)' % (
+ self.__class__.__name__,
+ list(self.__data.items()),
+ self.__maxsize,
+ self.__currsize,
+ )
+
+ def __getitem__(self, key):
+ try:
+ return self.__data[key]
+ except KeyError:
+ return self.__missing__(key)
+
+ def __setitem__(self, key, value):
+ maxsize = self.__maxsize
+ size = self.getsizeof(value)
+ if size > maxsize:
+ raise ValueError('value too large')
+ if key not in self.__data or self.__size[key] < size:
+ while self.__currsize + size > maxsize:
+ self.popitem()
+ if key in self.__data:
+ diffsize = size - self.__size[key]
+ else:
+ diffsize = size
+ self.__data[key] = value
+ self.__size[key] = size
+ self.__currsize += diffsize
+
+ def __delitem__(self, key):
+ size = self.__size.pop(key)
+ del self.__data[key]
+ self.__currsize -= size
+
+ def __contains__(self, key):
+ return key in self.__data
+
+ def __missing__(self, key):
+ raise KeyError(key)
+
+ def __iter__(self):
+ return iter(self.__data)
+
+ def __len__(self):
+ return len(self.__data)
+
+ @property
+ def maxsize(self):
+ """The maximum size of the cache."""
+ return self.__maxsize
+
+ @property
+ def currsize(self):
+ """The current size of the cache."""
+ return self.__currsize
+
+ @staticmethod
+ def getsizeof(value):
+ """Return the size of a cache element's value."""
+ return 1
diff --git a/contrib/python/cachetools/py2/cachetools/func.py b/contrib/python/cachetools/py2/cachetools/func.py
new file mode 100644
index 0000000000..8ced5dda2e
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/func.py
@@ -0,0 +1,140 @@
+"""`functools.lru_cache` compatible memoizing function decorators."""
+
+from __future__ import absolute_import
+
+import collections
+import functools
+import random
+
+try:
+ from time import monotonic as default_timer
+except ImportError:
+ from time import time as default_timer
+
+try:
+ from threading import RLock
+except ImportError: # pragma: no cover
+ from dummy_threading import RLock
+
+from . import keys
+from .lfu import LFUCache
+from .lru import LRUCache
+from .rr import RRCache
+from .ttl import TTLCache
+
+__all__ = ('lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache')
+
+
+_CacheInfo = collections.namedtuple('CacheInfo', [
+ 'hits', 'misses', 'maxsize', 'currsize'
+])
+
+
+class _UnboundCache(dict):
+
+ maxsize = None
+
+ @property
+ def currsize(self):
+ return len(self)
+
+
+class _UnboundTTLCache(TTLCache):
+ def __init__(self, ttl, timer):
+ TTLCache.__init__(self, float('inf'), ttl, timer)
+
+ @property
+ def maxsize(self):
+ return None
+
+
+def _cache(cache, typed=False):
+ def decorator(func):
+ key = keys.typedkey if typed else keys.hashkey
+ lock = RLock()
+ stats = [0, 0]
+
+ def cache_info():
+ with lock:
+ hits, misses = stats
+ maxsize = cache.maxsize
+ currsize = cache.currsize
+ return _CacheInfo(hits, misses, maxsize, currsize)
+
+ def cache_clear():
+ with lock:
+ try:
+ cache.clear()
+ finally:
+ stats[:] = [0, 0]
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ with lock:
+ try:
+ v = cache[k]
+ stats[0] += 1
+ return v
+ except KeyError:
+ stats[1] += 1
+ v = func(*args, **kwargs)
+ try:
+ with lock:
+ cache[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+ functools.update_wrapper(wrapper, func)
+ if not hasattr(wrapper, '__wrapped__'):
+ wrapper.__wrapped__ = func # Python 2.7
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ return wrapper
+ return decorator
+
+
+def lfu_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Frequently Used (LFU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ else:
+ return _cache(LFUCache(maxsize), typed)
+
+
+def lru_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ else:
+ return _cache(LRUCache(maxsize), typed)
+
+
+def rr_cache(maxsize=128, choice=random.choice, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ else:
+ return _cache(RRCache(maxsize, choice), typed)
+
+
+def ttl_cache(maxsize=128, ttl=600, timer=default_timer, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm with a per-item time-to-live (TTL) value.
+ """
+ if maxsize is None:
+ return _cache(_UnboundTTLCache(ttl, timer), typed)
+ else:
+ return _cache(TTLCache(maxsize, ttl, timer), typed)
diff --git a/contrib/python/cachetools/py2/cachetools/keys.py b/contrib/python/cachetools/py2/cachetools/keys.py
new file mode 100644
index 0000000000..25ac0a76db
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/keys.py
@@ -0,0 +1,54 @@
+"""Key functions for memoizing decorators."""
+
+from __future__ import absolute_import
+
+__all__ = ('hashkey', 'typedkey')
+
+
+class _HashedTuple(tuple):
+ """A tuple that ensures that hash() will be called no more than once
+ per element, since cache decorators will hash the key multiple
+ times on a cache miss. See also _HashedSeq in the standard
+ library functools implementation.
+
+ """
+
+ __hashvalue = None
+
+ def __hash__(self, hash=tuple.__hash__):
+ hashvalue = self.__hashvalue
+ if hashvalue is None:
+ self.__hashvalue = hashvalue = hash(self)
+ return hashvalue
+
+ def __add__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(self, other))
+
+ def __radd__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(other, self))
+
+ def __getstate__(self):
+ return {}
+
+
+# used for separating keyword arguments; we do not use an object
+# instance here so identity is preserved when pickling/unpickling
+_kwmark = (_HashedTuple,)
+
+
+def hashkey(*args, **kwargs):
+ """Return a cache key for the specified hashable arguments."""
+
+ if kwargs:
+ return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark))
+ else:
+ return _HashedTuple(args)
+
+
+def typedkey(*args, **kwargs):
+ """Return a typed cache key for the specified hashable arguments."""
+
+ key = hashkey(*args, **kwargs)
+ key += tuple(type(v) for v in args)
+ key += tuple(type(v) for _, v in sorted(kwargs.items()))
+ return key
diff --git a/contrib/python/cachetools/py2/cachetools/lfu.py b/contrib/python/cachetools/py2/cachetools/lfu.py
new file mode 100644
index 0000000000..4857c4e91f
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/lfu.py
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+
+import collections
+
+from .cache import Cache
+
+
+class LFUCache(Cache):
+ """Least Frequently Used (LFU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__counter = collections.Counter()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ self.__counter[key] -= 1
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__counter[key] -= 1
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__counter[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least frequently used."""
+ try:
+ (key, _), = self.__counter.most_common(1)
+ except ValueError:
+ raise KeyError('%s is empty' % self.__class__.__name__)
+ else:
+ return (key, self.pop(key))
diff --git a/contrib/python/cachetools/py2/cachetools/lru.py b/contrib/python/cachetools/py2/cachetools/lru.py
new file mode 100644
index 0000000000..44ec4f1cee
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/lru.py
@@ -0,0 +1,48 @@
+from __future__ import absolute_import
+
+import collections
+
+from .cache import Cache
+
+
+class LRUCache(Cache):
+ """Least Recently Used (LRU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ self.__update(key)
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__update(key)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError('%s is empty' % self.__class__.__name__)
+ else:
+ return (key, self.pop(key))
+
+ if hasattr(collections.OrderedDict, 'move_to_end'):
+ def __update(self, key):
+ try:
+ self.__order.move_to_end(key)
+ except KeyError:
+ self.__order[key] = None
+ else:
+ def __update(self, key):
+ try:
+ self.__order[key] = self.__order.pop(key)
+ except KeyError:
+ self.__order[key] = None
diff --git a/contrib/python/cachetools/py2/cachetools/rr.py b/contrib/python/cachetools/py2/cachetools/rr.py
new file mode 100644
index 0000000000..09ff7708e0
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/rr.py
@@ -0,0 +1,36 @@
+from __future__ import absolute_import
+
+import random
+
+from .cache import Cache
+
+
+# random.choice cannot be pickled in Python 2.7
+def _choice(seq):
+ return random.choice(seq)
+
+
+class RRCache(Cache):
+ """Random Replacement (RR) cache implementation."""
+
+ def __init__(self, maxsize, choice=random.choice, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ # TODO: use None as default, assing to self.choice directly?
+ if choice is random.choice:
+ self.__choice = _choice
+ else:
+ self.__choice = choice
+
+ @property
+ def choice(self):
+ """The `choice` function used by the cache."""
+ return self.__choice
+
+ def popitem(self):
+ """Remove and return a random `(key, value)` pair."""
+ try:
+ key = self.__choice(list(self))
+ except IndexError:
+ raise KeyError('%s is empty' % self.__class__.__name__)
+ else:
+ return (key, self.pop(key))
diff --git a/contrib/python/cachetools/py2/cachetools/ttl.py b/contrib/python/cachetools/py2/cachetools/ttl.py
new file mode 100644
index 0000000000..1edde3abcd
--- /dev/null
+++ b/contrib/python/cachetools/py2/cachetools/ttl.py
@@ -0,0 +1,220 @@
+from __future__ import absolute_import
+
+import collections
+
+try:
+ from time import monotonic as default_timer
+except ImportError:
+ from time import time as default_timer
+
+from .cache import Cache
+
+
+class _Link(object):
+
+ __slots__ = ('key', 'expire', 'next', 'prev')
+
+ def __init__(self, key=None, expire=None):
+ self.key = key
+ self.expire = expire
+
+ def __reduce__(self):
+ return _Link, (self.key, self.expire)
+
+ def unlink(self):
+ next = self.next
+ prev = self.prev
+ prev.next = next
+ next.prev = prev
+
+
+class _Timer(object):
+
+ def __init__(self, timer):
+ self.__timer = timer
+ self.__nesting = 0
+
+ def __call__(self):
+ if self.__nesting == 0:
+ return self.__timer()
+ else:
+ return self.__time
+
+ def __enter__(self):
+ if self.__nesting == 0:
+ self.__time = time = self.__timer()
+ else:
+ time = self.__time
+ self.__nesting += 1
+ return time
+
+ def __exit__(self, *exc):
+ self.__nesting -= 1
+
+ def __reduce__(self):
+ return _Timer, (self.__timer,)
+
+ def __getattr__(self, name):
+ return getattr(self.__timer, name)
+
+
+class TTLCache(Cache):
+ """LRU Cache implementation with per-item time-to-live (TTL) value."""
+
+ def __init__(self, maxsize, ttl, timer=default_timer, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__root = root = _Link()
+ root.prev = root.next = root
+ self.__links = collections.OrderedDict()
+ self.__timer = _Timer(timer)
+ self.__ttl = ttl
+
+ def __contains__(self, key):
+ try:
+ link = self.__links[key] # no reordering
+ except KeyError:
+ return False
+ else:
+ return not (link.expire < self.__timer())
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ expired = False
+ else:
+ expired = link.expire < self.__timer()
+ if expired:
+ return self.__missing__(key)
+ else:
+ return cache_getitem(self, key)
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ with self.__timer as time:
+ self.expire(time)
+ cache_setitem(self, key, value)
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ self.__links[key] = link = _Link(key)
+ else:
+ link.unlink()
+ link.expire = time + self.__ttl
+ link.next = root = self.__root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ link = self.__links.pop(key)
+ link.unlink()
+ if link.expire < self.__timer():
+ raise KeyError(key)
+
+ def __iter__(self):
+ root = self.__root
+ curr = root.next
+ while curr is not root:
+ # "freeze" time for iterator access
+ with self.__timer as time:
+ if not (curr.expire < time):
+ yield curr.key
+ curr = curr.next
+
+ def __len__(self):
+ root = self.__root
+ curr = root.next
+ time = self.__timer()
+ count = len(self.__links)
+ while curr is not root and curr.expire < time:
+ count -= 1
+ curr = curr.next
+ return count
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ root = self.__root
+ root.prev = root.next = root
+ for link in sorted(self.__links.values(), key=lambda obj: obj.expire):
+ link.next = root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+ self.expire(self.__timer())
+
+ def __repr__(self, cache_repr=Cache.__repr__):
+ with self.__timer as time:
+ self.expire(time)
+ return cache_repr(self)
+
+ @property
+ def currsize(self):
+ with self.__timer as time:
+ self.expire(time)
+ return super(TTLCache, self).currsize
+
+ @property
+ def timer(self):
+ """The timer function used by the cache."""
+ return self.__timer
+
+ @property
+ def ttl(self):
+ """The time-to-live value of the cache's items."""
+ return self.__ttl
+
+ def expire(self, time=None):
+ """Remove expired items from the cache."""
+ if time is None:
+ time = self.__timer()
+ root = self.__root
+ curr = root.next
+ links = self.__links
+ cache_delitem = Cache.__delitem__
+ while curr is not root and curr.expire < time:
+ cache_delitem(self, curr.key)
+ del links[curr.key]
+ next = curr.next
+ curr.unlink()
+ curr = next
+
+ def clear(self):
+ with self.__timer as time:
+ self.expire(time)
+ Cache.clear(self)
+
+ def get(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.get(self, *args, **kwargs)
+
+ def pop(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.pop(self, *args, **kwargs)
+
+ def setdefault(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.setdefault(self, *args, **kwargs)
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used that
+ has not already expired.
+
+ """
+ with self.__timer as time:
+ self.expire(time)
+ try:
+ key = next(iter(self.__links))
+ except StopIteration:
+ raise KeyError('%s is empty' % self.__class__.__name__)
+ else:
+ return (key, self.pop(key))
+
+ if hasattr(collections.OrderedDict, 'move_to_end'):
+ def __getlink(self, key):
+ value = self.__links[key]
+ self.__links.move_to_end(key)
+ return value
+ else:
+ def __getlink(self, key):
+ value = self.__links.pop(key)
+ self.__links[key] = value
+ return value
diff --git a/contrib/python/cachetools/py2/ya.make b/contrib/python/cachetools/py2/ya.make
new file mode 100644
index 0000000000..c28a02aa15
--- /dev/null
+++ b/contrib/python/cachetools/py2/ya.make
@@ -0,0 +1,30 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(3.1.1)
+
+LICENSE(MIT)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ cachetools/__init__.py
+ cachetools/abc.py
+ cachetools/cache.py
+ cachetools/func.py
+ cachetools/keys.py
+ cachetools/lfu.py
+ cachetools/lru.py
+ cachetools/rr.py
+ cachetools/ttl.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/cachetools/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/cachetools/py3/.dist-info/METADATA b/contrib/python/cachetools/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..9504a9426b
--- /dev/null
+++ b/contrib/python/cachetools/py3/.dist-info/METADATA
@@ -0,0 +1,148 @@
+Metadata-Version: 2.1
+Name: cachetools
+Version: 5.3.2
+Summary: Extensible memoizing collections and decorators
+Home-page: https://github.com/tkem/cachetools/
+Author: Thomas Kemmer
+Author-email: tkemmer@computer.org
+License: MIT
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.7
+License-File: LICENSE
+
+cachetools
+========================================================================
+
+.. image:: https://img.shields.io/pypi/v/cachetools
+ :target: https://pypi.org/project/cachetools/
+ :alt: Latest PyPI version
+
+.. image:: https://img.shields.io/github/actions/workflow/status/tkem/cachetools/ci.yml
+ :target: https://github.com/tkem/cachetools/actions/workflows/ci.yml
+ :alt: CI build status
+
+.. image:: https://img.shields.io/readthedocs/cachetools
+ :target: https://cachetools.readthedocs.io/
+ :alt: Documentation build status
+
+.. image:: https://img.shields.io/codecov/c/github/tkem/cachetools/master.svg
+ :target: https://codecov.io/gh/tkem/cachetools
+ :alt: Test coverage
+
+.. image:: https://img.shields.io/librariesio/sourcerank/pypi/cachetools
+ :target: https://libraries.io/pypi/cachetools
+ :alt: Libraries.io SourceRank
+
+.. image:: https://img.shields.io/github/license/tkem/cachetools
+ :target: https://raw.github.com/tkem/cachetools/master/LICENSE
+ :alt: License
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: black
+
+
+This module provides various memoizing collections and decorators,
+including variants of the Python Standard Library's `@lru_cache`_
+function decorator.
+
+.. code-block:: python
+
+ from cachetools import cached, LRUCache, TTLCache
+
+ # speed up calculating Fibonacci numbers with dynamic programming
+ @cached(cache={})
+ def fib(n):
+ return n if n < 2 else fib(n - 1) + fib(n - 2)
+
+ # cache least recently used Python Enhancement Proposals
+ @cached(cache=LRUCache(maxsize=32))
+ def get_pep(num):
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ # cache weather data for no longer than ten minutes
+ @cached(cache=TTLCache(maxsize=1024, ttl=600))
+ def get_weather(place):
+ return owm.weather_at_place(place).get_weather()
+
+For the purpose of this module, a *cache* is a mutable_ mapping_ of a
+fixed maximum size. When the cache is full, i.e. by adding another
+item the cache would exceed its maximum size, the cache must choose
+which item(s) to discard based on a suitable `cache algorithm`_.
+
+This module provides multiple cache classes based on different cache
+algorithms, as well as decorators for easily memoizing function and
+method calls.
+
+
+Installation
+------------------------------------------------------------------------
+
+cachetools is available from PyPI_ and can be installed by running::
+
+ pip install cachetools
+
+Typing stubs for this package are provided by typeshed_ and can be
+installed by running::
+
+ pip install types-cachetools
+
+
+Project Resources
+------------------------------------------------------------------------
+
+- `Documentation`_
+- `Issue tracker`_
+- `Source code`_
+- `Change log`_
+
+
+Related Projects
+------------------------------------------------------------------------
+
+- asyncache_: Helpers to use cachetools with async functions
+- CacheToolsUtils_: Cachetools Utilities
+- `kids.cache`_: Kids caching library
+- shelved-cache_: Persistent cache for Python cachetools
+
+
+License
+------------------------------------------------------------------------
+
+Copyright (c) 2014-2023 Thomas Kemmer.
+
+Licensed under the `MIT License`_.
+
+
+.. _@lru_cache: https://docs.python.org/3/library/functools.html#functools.lru_cache
+.. _mutable: https://docs.python.org/dev/glossary.html#term-mutable
+.. _mapping: https://docs.python.org/dev/glossary.html#term-mapping
+.. _cache algorithm: https://en.wikipedia.org/wiki/Cache_algorithms
+
+.. _PyPI: https://pypi.org/project/cachetools/
+.. _typeshed: https://github.com/python/typeshed/
+.. _Documentation: https://cachetools.readthedocs.io/
+.. _Issue tracker: https://github.com/tkem/cachetools/issues/
+.. _Source code: https://github.com/tkem/cachetools/
+.. _Change log: https://github.com/tkem/cachetools/blob/master/CHANGELOG.rst
+.. _MIT License: https://raw.github.com/tkem/cachetools/master/LICENSE
+
+.. _asyncache: https://pypi.org/project/asyncache/
+.. _CacheToolsUtils: https://pypi.org/project/CacheToolsUtils/
+.. _kids.cache: https://pypi.org/project/kids.cache/
+.. _shelved-cache: https://pypi.org/project/shelved-cache/
diff --git a/contrib/python/cachetools/py3/.dist-info/top_level.txt b/contrib/python/cachetools/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..50d14084a9
--- /dev/null
+++ b/contrib/python/cachetools/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+cachetools
diff --git a/contrib/python/cachetools/py3/LICENSE b/contrib/python/cachetools/py3/LICENSE
new file mode 100644
index 0000000000..bd185ce7ac
--- /dev/null
+++ b/contrib/python/cachetools/py3/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2022 Thomas Kemmer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/cachetools/py3/README.rst b/contrib/python/cachetools/py3/README.rst
new file mode 100644
index 0000000000..42662c5a35
--- /dev/null
+++ b/contrib/python/cachetools/py3/README.rst
@@ -0,0 +1,123 @@
+cachetools
+========================================================================
+
+.. image:: https://img.shields.io/pypi/v/cachetools
+ :target: https://pypi.org/project/cachetools/
+ :alt: Latest PyPI version
+
+.. image:: https://img.shields.io/github/actions/workflow/status/tkem/cachetools/ci.yml
+ :target: https://github.com/tkem/cachetools/actions/workflows/ci.yml
+ :alt: CI build status
+
+.. image:: https://img.shields.io/readthedocs/cachetools
+ :target: https://cachetools.readthedocs.io/
+ :alt: Documentation build status
+
+.. image:: https://img.shields.io/codecov/c/github/tkem/cachetools/master.svg
+ :target: https://codecov.io/gh/tkem/cachetools
+ :alt: Test coverage
+
+.. image:: https://img.shields.io/librariesio/sourcerank/pypi/cachetools
+ :target: https://libraries.io/pypi/cachetools
+ :alt: Libraries.io SourceRank
+
+.. image:: https://img.shields.io/github/license/tkem/cachetools
+ :target: https://raw.github.com/tkem/cachetools/master/LICENSE
+ :alt: License
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: black
+
+
+This module provides various memoizing collections and decorators,
+including variants of the Python Standard Library's `@lru_cache`_
+function decorator.
+
+.. code-block:: python
+
+ from cachetools import cached, LRUCache, TTLCache
+
+ # speed up calculating Fibonacci numbers with dynamic programming
+ @cached(cache={})
+ def fib(n):
+ return n if n < 2 else fib(n - 1) + fib(n - 2)
+
+ # cache least recently used Python Enhancement Proposals
+ @cached(cache=LRUCache(maxsize=32))
+ def get_pep(num):
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ # cache weather data for no longer than ten minutes
+ @cached(cache=TTLCache(maxsize=1024, ttl=600))
+ def get_weather(place):
+ return owm.weather_at_place(place).get_weather()
+
+For the purpose of this module, a *cache* is a mutable_ mapping_ of a
+fixed maximum size. When the cache is full, i.e. by adding another
+item the cache would exceed its maximum size, the cache must choose
+which item(s) to discard based on a suitable `cache algorithm`_.
+
+This module provides multiple cache classes based on different cache
+algorithms, as well as decorators for easily memoizing function and
+method calls.
+
+
+Installation
+------------------------------------------------------------------------
+
+cachetools is available from PyPI_ and can be installed by running::
+
+ pip install cachetools
+
+Typing stubs for this package are provided by typeshed_ and can be
+installed by running::
+
+ pip install types-cachetools
+
+
+Project Resources
+------------------------------------------------------------------------
+
+- `Documentation`_
+- `Issue tracker`_
+- `Source code`_
+- `Change log`_
+
+
+Related Projects
+------------------------------------------------------------------------
+
+- asyncache_: Helpers to use cachetools with async functions
+- CacheToolsUtils_: Cachetools Utilities
+- `kids.cache`_: Kids caching library
+- shelved-cache_: Persistent cache for Python cachetools
+
+
+License
+------------------------------------------------------------------------
+
+Copyright (c) 2014-2023 Thomas Kemmer.
+
+Licensed under the `MIT License`_.
+
+
+.. _@lru_cache: https://docs.python.org/3/library/functools.html#functools.lru_cache
+.. _mutable: https://docs.python.org/dev/glossary.html#term-mutable
+.. _mapping: https://docs.python.org/dev/glossary.html#term-mapping
+.. _cache algorithm: https://en.wikipedia.org/wiki/Cache_algorithms
+
+.. _PyPI: https://pypi.org/project/cachetools/
+.. _typeshed: https://github.com/python/typeshed/
+.. _Documentation: https://cachetools.readthedocs.io/
+.. _Issue tracker: https://github.com/tkem/cachetools/issues/
+.. _Source code: https://github.com/tkem/cachetools/
+.. _Change log: https://github.com/tkem/cachetools/blob/master/CHANGELOG.rst
+.. _MIT License: https://raw.github.com/tkem/cachetools/master/LICENSE
+
+.. _asyncache: https://pypi.org/project/asyncache/
+.. _CacheToolsUtils: https://pypi.org/project/CacheToolsUtils/
+.. _kids.cache: https://pypi.org/project/kids.cache/
+.. _shelved-cache: https://pypi.org/project/shelved-cache/
diff --git a/contrib/python/cachetools/py3/cachetools/__init__.py b/contrib/python/cachetools/py3/cachetools/__init__.py
new file mode 100644
index 0000000000..61c12bef47
--- /dev/null
+++ b/contrib/python/cachetools/py3/cachetools/__init__.py
@@ -0,0 +1,844 @@
+"""Extensible memoizing collections and decorators."""
+
+__all__ = (
+ "Cache",
+ "FIFOCache",
+ "LFUCache",
+ "LRUCache",
+ "MRUCache",
+ "RRCache",
+ "TLRUCache",
+ "TTLCache",
+ "cached",
+ "cachedmethod",
+)
+
+__version__ = "5.3.2"
+
+import collections
+import collections.abc
+import functools
+import heapq
+import random
+import time
+
+from . import keys
+
+
+class _DefaultSize:
+
+ __slots__ = ()
+
+ def __getitem__(self, _):
+ return 1
+
+ def __setitem__(self, _, value):
+ assert value == 1
+
+ def pop(self, _):
+ return 1
+
+
+class Cache(collections.abc.MutableMapping):
+ """Mutable mapping to serve as a simple cache or cache base class."""
+
+ __marker = object()
+
+ __size = _DefaultSize()
+
+ def __init__(self, maxsize, getsizeof=None):
+ if getsizeof:
+ self.getsizeof = getsizeof
+ if self.getsizeof is not Cache.getsizeof:
+ self.__size = dict()
+ self.__data = dict()
+ self.__currsize = 0
+ self.__maxsize = maxsize
+
+ def __repr__(self):
+ return "%s(%s, maxsize=%r, currsize=%r)" % (
+ self.__class__.__name__,
+ repr(self.__data),
+ self.__maxsize,
+ self.__currsize,
+ )
+
+ def __getitem__(self, key):
+ try:
+ return self.__data[key]
+ except KeyError:
+ return self.__missing__(key)
+
+ def __setitem__(self, key, value):
+ maxsize = self.__maxsize
+ size = self.getsizeof(value)
+ if size > maxsize:
+ raise ValueError("value too large")
+ if key not in self.__data or self.__size[key] < size:
+ while self.__currsize + size > maxsize:
+ self.popitem()
+ if key in self.__data:
+ diffsize = size - self.__size[key]
+ else:
+ diffsize = size
+ self.__data[key] = value
+ self.__size[key] = size
+ self.__currsize += diffsize
+
+ def __delitem__(self, key):
+ size = self.__size.pop(key)
+ del self.__data[key]
+ self.__currsize -= size
+
+ def __contains__(self, key):
+ return key in self.__data
+
+ def __missing__(self, key):
+ raise KeyError(key)
+
+ def __iter__(self):
+ return iter(self.__data)
+
+ def __len__(self):
+ return len(self.__data)
+
+ def get(self, key, default=None):
+ if key in self:
+ return self[key]
+ else:
+ return default
+
+ def pop(self, key, default=__marker):
+ if key in self:
+ value = self[key]
+ del self[key]
+ elif default is self.__marker:
+ raise KeyError(key)
+ else:
+ value = default
+ return value
+
+ def setdefault(self, key, default=None):
+ if key in self:
+ value = self[key]
+ else:
+ self[key] = value = default
+ return value
+
+ @property
+ def maxsize(self):
+ """The maximum size of the cache."""
+ return self.__maxsize
+
+ @property
+ def currsize(self):
+ """The current size of the cache."""
+ return self.__currsize
+
+ @staticmethod
+ def getsizeof(value):
+ """Return the size of a cache element's value."""
+ return 1
+
+
+class FIFOCache(Cache):
+ """First In First Out (FIFO) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ try:
+ self.__order.move_to_end(key)
+ except KeyError:
+ self.__order[key] = None
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair first inserted."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class LFUCache(Cache):
+ """Least Frequently Used (LFU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__counter = collections.Counter()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__counter[key] -= 1
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__counter[key] -= 1
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__counter[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least frequently used."""
+ try:
+ ((key, _),) = self.__counter.most_common(1)
+ except ValueError:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class LRUCache(Cache):
+ """Least Recently Used (LRU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__update(key)
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__update(key)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __update(self, key):
+ try:
+ self.__order.move_to_end(key)
+ except KeyError:
+ self.__order[key] = None
+
+
+class MRUCache(Cache):
+ """Most Recently Used (MRU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__update(key)
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__update(key)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair most recently used."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __update(self, key):
+ try:
+ self.__order.move_to_end(key, last=False)
+ except KeyError:
+ self.__order[key] = None
+
+
+class RRCache(Cache):
+ """Random Replacement (RR) cache implementation."""
+
+ def __init__(self, maxsize, choice=random.choice, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__choice = choice
+
+ @property
+ def choice(self):
+ """The `choice` function used by the cache."""
+ return self.__choice
+
+ def popitem(self):
+ """Remove and return a random `(key, value)` pair."""
+ try:
+ key = self.__choice(list(self))
+ except IndexError:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class _TimedCache(Cache):
+ """Base class for time aware cache implementations."""
+
+ class _Timer:
+ def __init__(self, timer):
+ self.__timer = timer
+ self.__nesting = 0
+
+ def __call__(self):
+ if self.__nesting == 0:
+ return self.__timer()
+ else:
+ return self.__time
+
+ def __enter__(self):
+ if self.__nesting == 0:
+ self.__time = time = self.__timer()
+ else:
+ time = self.__time
+ self.__nesting += 1
+ return time
+
+ def __exit__(self, *exc):
+ self.__nesting -= 1
+
+ def __reduce__(self):
+ return _TimedCache._Timer, (self.__timer,)
+
+ def __getattr__(self, name):
+ return getattr(self.__timer, name)
+
+ def __init__(self, maxsize, timer=time.monotonic, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__timer = _TimedCache._Timer(timer)
+
+ def __repr__(self, cache_repr=Cache.__repr__):
+ with self.__timer as time:
+ self.expire(time)
+ return cache_repr(self)
+
+ def __len__(self, cache_len=Cache.__len__):
+ with self.__timer as time:
+ self.expire(time)
+ return cache_len(self)
+
+ @property
+ def currsize(self):
+ with self.__timer as time:
+ self.expire(time)
+ return super().currsize
+
+ @property
+ def timer(self):
+ """The timer function used by the cache."""
+ return self.__timer
+
+ def clear(self):
+ with self.__timer as time:
+ self.expire(time)
+ Cache.clear(self)
+
+ def get(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.get(self, *args, **kwargs)
+
+ def pop(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.pop(self, *args, **kwargs)
+
+ def setdefault(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.setdefault(self, *args, **kwargs)
+
+
+class TTLCache(_TimedCache):
+ """LRU Cache implementation with per-item time-to-live (TTL) value."""
+
+ class _Link:
+
+ __slots__ = ("key", "expires", "next", "prev")
+
+ def __init__(self, key=None, expires=None):
+ self.key = key
+ self.expires = expires
+
+ def __reduce__(self):
+ return TTLCache._Link, (self.key, self.expires)
+
+ def unlink(self):
+ next = self.next
+ prev = self.prev
+ prev.next = next
+ next.prev = prev
+
+ def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None):
+ _TimedCache.__init__(self, maxsize, timer, getsizeof)
+ self.__root = root = TTLCache._Link()
+ root.prev = root.next = root
+ self.__links = collections.OrderedDict()
+ self.__ttl = ttl
+
+ def __contains__(self, key):
+ try:
+ link = self.__links[key] # no reordering
+ except KeyError:
+ return False
+ else:
+ return self.timer() < link.expires
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ expired = False
+ else:
+ expired = not (self.timer() < link.expires)
+ if expired:
+ return self.__missing__(key)
+ else:
+ return cache_getitem(self, key)
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ with self.timer as time:
+ self.expire(time)
+ cache_setitem(self, key, value)
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ self.__links[key] = link = TTLCache._Link(key)
+ else:
+ link.unlink()
+ link.expires = time + self.__ttl
+ link.next = root = self.__root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ link = self.__links.pop(key)
+ link.unlink()
+ if not (self.timer() < link.expires):
+ raise KeyError(key)
+
+ def __iter__(self):
+ root = self.__root
+ curr = root.next
+ while curr is not root:
+ # "freeze" time for iterator access
+ with self.timer as time:
+ if time < curr.expires:
+ yield curr.key
+ curr = curr.next
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ root = self.__root
+ root.prev = root.next = root
+ for link in sorted(self.__links.values(), key=lambda obj: obj.expires):
+ link.next = root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+ self.expire(self.timer())
+
+ @property
+ def ttl(self):
+ """The time-to-live value of the cache's items."""
+ return self.__ttl
+
+ def expire(self, time=None):
+ """Remove expired items from the cache."""
+ if time is None:
+ time = self.timer()
+ root = self.__root
+ curr = root.next
+ links = self.__links
+ cache_delitem = Cache.__delitem__
+ while curr is not root and not (time < curr.expires):
+ cache_delitem(self, curr.key)
+ del links[curr.key]
+ next = curr.next
+ curr.unlink()
+ curr = next
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used that
+ has not already expired.
+
+ """
+ with self.timer as time:
+ self.expire(time)
+ try:
+ key = next(iter(self.__links))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __getlink(self, key):
+ value = self.__links[key]
+ self.__links.move_to_end(key)
+ return value
+
+
+class TLRUCache(_TimedCache):
+ """Time aware Least Recently Used (TLRU) cache implementation."""
+
+ @functools.total_ordering
+ class _Item:
+
+ __slots__ = ("key", "expires", "removed")
+
+ def __init__(self, key=None, expires=None):
+ self.key = key
+ self.expires = expires
+ self.removed = False
+
+ def __lt__(self, other):
+ return self.expires < other.expires
+
+ def __init__(self, maxsize, ttu, timer=time.monotonic, getsizeof=None):
+ _TimedCache.__init__(self, maxsize, timer, getsizeof)
+ self.__items = collections.OrderedDict()
+ self.__order = []
+ self.__ttu = ttu
+
+ def __contains__(self, key):
+ try:
+ item = self.__items[key] # no reordering
+ except KeyError:
+ return False
+ else:
+ return self.timer() < item.expires
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ try:
+ item = self.__getitem(key)
+ except KeyError:
+ expired = False
+ else:
+ expired = not (self.timer() < item.expires)
+ if expired:
+ return self.__missing__(key)
+ else:
+ return cache_getitem(self, key)
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ with self.timer as time:
+ expires = self.__ttu(key, value, time)
+ if not (time < expires):
+ return # skip expired items
+ self.expire(time)
+ cache_setitem(self, key, value)
+ # removing an existing item would break the heap structure, so
+ # only mark it as removed for now
+ try:
+ self.__getitem(key).removed = True
+ except KeyError:
+ pass
+ self.__items[key] = item = TLRUCache._Item(key, expires)
+ heapq.heappush(self.__order, item)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ with self.timer as time:
+ # no self.expire() for performance reasons, e.g. self.clear() [#67]
+ cache_delitem(self, key)
+ item = self.__items.pop(key)
+ item.removed = True
+ if not (time < item.expires):
+ raise KeyError(key)
+
+ def __iter__(self):
+ for curr in self.__order:
+ # "freeze" time for iterator access
+ with self.timer as time:
+ if time < curr.expires and not curr.removed:
+ yield curr.key
+
+ @property
+ def ttu(self):
+ """The local time-to-use function used by the cache."""
+ return self.__ttu
+
+ def expire(self, time=None):
+ """Remove expired items from the cache."""
+ if time is None:
+ time = self.timer()
+ items = self.__items
+ order = self.__order
+ # clean up the heap if too many items are marked as removed
+ if len(order) > len(items) * 2:
+ self.__order = order = [item for item in order if not item.removed]
+ heapq.heapify(order)
+ cache_delitem = Cache.__delitem__
+ while order and (order[0].removed or not (time < order[0].expires)):
+ item = heapq.heappop(order)
+ if not item.removed:
+ cache_delitem(self, item.key)
+ del items[item.key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used that
+ has not already expired.
+
+ """
+ with self.timer as time:
+ self.expire(time)
+ try:
+ key = next(iter(self.__items))
+ except StopIteration:
+ raise KeyError("%s is empty" % self.__class__.__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __getitem(self, key):
+ value = self.__items[key]
+ self.__items.move_to_end(key)
+ return value
+
+
+_CacheInfo = collections.namedtuple(
+ "CacheInfo", ["hits", "misses", "maxsize", "currsize"]
+)
+
+
+def cached(cache, key=keys.hashkey, lock=None, info=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ results in a cache.
+
+ """
+
+ def decorator(func):
+ if info:
+ hits = misses = 0
+
+ if isinstance(cache, Cache):
+
+ def getinfo():
+ nonlocal hits, misses
+ return _CacheInfo(hits, misses, cache.maxsize, cache.currsize)
+
+ elif isinstance(cache, collections.abc.Mapping):
+
+ def getinfo():
+ nonlocal hits, misses
+ return _CacheInfo(hits, misses, None, len(cache))
+
+ else:
+
+ def getinfo():
+ nonlocal hits, misses
+ return _CacheInfo(hits, misses, 0, 0)
+
+ if cache is None:
+
+ def wrapper(*args, **kwargs):
+ nonlocal misses
+ misses += 1
+ return func(*args, **kwargs)
+
+ def cache_clear():
+ nonlocal hits, misses
+ hits = misses = 0
+
+ cache_info = getinfo
+
+ elif lock is None:
+
+ def wrapper(*args, **kwargs):
+ nonlocal hits, misses
+ k = key(*args, **kwargs)
+ try:
+ result = cache[k]
+ hits += 1
+ return result
+ except KeyError:
+ misses += 1
+ v = func(*args, **kwargs)
+ try:
+ cache[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+
+ def cache_clear():
+ nonlocal hits, misses
+ cache.clear()
+ hits = misses = 0
+
+ cache_info = getinfo
+
+ else:
+
+ def wrapper(*args, **kwargs):
+ nonlocal hits, misses
+ k = key(*args, **kwargs)
+ try:
+ with lock:
+ result = cache[k]
+ hits += 1
+ return result
+ except KeyError:
+ with lock:
+ misses += 1
+ v = func(*args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock:
+ return cache.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ def cache_clear():
+ nonlocal hits, misses
+ with lock:
+ cache.clear()
+ hits = misses = 0
+
+ def cache_info():
+ with lock:
+ return getinfo()
+
+ else:
+ if cache is None:
+
+ def wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ def cache_clear():
+ pass
+
+ elif lock is None:
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ try:
+ cache[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+
+ def cache_clear():
+ cache.clear()
+
+ else:
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ with lock:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock:
+ return cache.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ def cache_clear():
+ with lock:
+ cache.clear()
+
+ cache_info = None
+
+ wrapper.cache = cache
+ wrapper.cache_key = key
+ wrapper.cache_lock = lock
+ wrapper.cache_clear = cache_clear
+ wrapper.cache_info = cache_info
+
+ return functools.update_wrapper(wrapper, func)
+
+ return decorator
+
+
+def cachedmethod(cache, key=keys.methodkey, lock=None):
+ """Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a cache.
+
+ """
+
+ def decorator(method):
+ if lock is None:
+
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(self, *args, **kwargs)
+ try:
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ try:
+ c[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+
+ def clear(self):
+ c = cache(self)
+ if c is not None:
+ c.clear()
+
+ else:
+
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(self, *args, **kwargs)
+ try:
+ with lock(self):
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock(self):
+ return c.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ def clear(self):
+ c = cache(self)
+ if c is not None:
+ with lock(self):
+ c.clear()
+
+ wrapper.cache = cache
+ wrapper.cache_key = key
+ wrapper.cache_lock = lock
+ wrapper.cache_clear = clear
+
+ return functools.update_wrapper(wrapper, method)
+
+ return decorator
diff --git a/contrib/python/cachetools/py3/cachetools/func.py b/contrib/python/cachetools/py3/cachetools/func.py
new file mode 100644
index 0000000000..0c09a60b49
--- /dev/null
+++ b/contrib/python/cachetools/py3/cachetools/func.py
@@ -0,0 +1,117 @@
+"""`functools.lru_cache` compatible memoizing function decorators."""
+
+__all__ = ("fifo_cache", "lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache")
+
+import math
+import random
+import time
+
+try:
+ from threading import RLock
+except ImportError: # pragma: no cover
+ from dummy_threading import RLock
+
+from . import FIFOCache, LFUCache, LRUCache, MRUCache, RRCache, TTLCache
+from . import cached
+from . import keys
+
+
+class _UnboundTTLCache(TTLCache):
+ def __init__(self, ttl, timer):
+ TTLCache.__init__(self, math.inf, ttl, timer)
+
+ @property
+ def maxsize(self):
+ return None
+
+
+def _cache(cache, maxsize, typed):
+ def decorator(func):
+ key = keys.typedkey if typed else keys.hashkey
+ wrapper = cached(cache=cache, key=key, lock=RLock(), info=True)(func)
+ wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed}
+ return wrapper
+
+ return decorator
+
+
+def fifo_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a First In First Out (FIFO)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache({}, None, typed)
+ elif callable(maxsize):
+ return _cache(FIFOCache(128), 128, typed)(maxsize)
+ else:
+ return _cache(FIFOCache(maxsize), maxsize, typed)
+
+
+def lfu_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Frequently Used (LFU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache({}, None, typed)
+ elif callable(maxsize):
+ return _cache(LFUCache(128), 128, typed)(maxsize)
+ else:
+ return _cache(LFUCache(maxsize), maxsize, typed)
+
+
+def lru_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache({}, None, typed)
+ elif callable(maxsize):
+ return _cache(LRUCache(128), 128, typed)(maxsize)
+ else:
+ return _cache(LRUCache(maxsize), maxsize, typed)
+
+
+def mru_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Most Recently Used (MRU)
+ algorithm.
+ """
+ if maxsize is None:
+ return _cache({}, None, typed)
+ elif callable(maxsize):
+ return _cache(MRUCache(128), 128, typed)(maxsize)
+ else:
+ return _cache(MRUCache(maxsize), maxsize, typed)
+
+
+def rr_cache(maxsize=128, choice=random.choice, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache({}, None, typed)
+ elif callable(maxsize):
+ return _cache(RRCache(128, choice), 128, typed)(maxsize)
+ else:
+ return _cache(RRCache(maxsize, choice), maxsize, typed)
+
+
+def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm with a per-item time-to-live (TTL) value.
+ """
+ if maxsize is None:
+ return _cache(_UnboundTTLCache(ttl, timer), None, typed)
+ elif callable(maxsize):
+ return _cache(TTLCache(128, ttl, timer), 128, typed)(maxsize)
+ else:
+ return _cache(TTLCache(maxsize, ttl, timer), maxsize, typed)
diff --git a/contrib/python/cachetools/py3/cachetools/keys.py b/contrib/python/cachetools/py3/cachetools/keys.py
new file mode 100644
index 0000000000..f2feb4182b
--- /dev/null
+++ b/contrib/python/cachetools/py3/cachetools/keys.py
@@ -0,0 +1,57 @@
+"""Key functions for memoizing decorators."""
+
+__all__ = ("hashkey", "methodkey", "typedkey")
+
+
+class _HashedTuple(tuple):
+ """A tuple that ensures that hash() will be called no more than once
+ per element, since cache decorators will hash the key multiple
+ times on a cache miss. See also _HashedSeq in the standard
+ library functools implementation.
+
+ """
+
+ __hashvalue = None
+
+ def __hash__(self, hash=tuple.__hash__):
+ hashvalue = self.__hashvalue
+ if hashvalue is None:
+ self.__hashvalue = hashvalue = hash(self)
+ return hashvalue
+
+ def __add__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(self, other))
+
+ def __radd__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(other, self))
+
+ def __getstate__(self):
+ return {}
+
+
+# used for separating keyword arguments; we do not use an object
+# instance here so identity is preserved when pickling/unpickling
+_kwmark = (_HashedTuple,)
+
+
+def hashkey(*args, **kwargs):
+ """Return a cache key for the specified hashable arguments."""
+
+ if kwargs:
+ return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark))
+ else:
+ return _HashedTuple(args)
+
+
+def methodkey(self, *args, **kwargs):
+ """Return a cache key for use with cached methods."""
+ return hashkey(*args, **kwargs)
+
+
+def typedkey(*args, **kwargs):
+ """Return a typed cache key for the specified hashable arguments."""
+
+ key = hashkey(*args, **kwargs)
+ key += tuple(type(v) for v in args)
+ key += tuple(type(v) for _, v in sorted(kwargs.items()))
+ return key
diff --git a/contrib/python/cachetools/py3/ya.make b/contrib/python/cachetools/py3/ya.make
new file mode 100644
index 0000000000..25d5ac4cdf
--- /dev/null
+++ b/contrib/python/cachetools/py3/ya.make
@@ -0,0 +1,24 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(5.3.2)
+
+LICENSE(MIT)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ cachetools/__init__.py
+ cachetools/func.py
+ cachetools/keys.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/cachetools/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/cachetools/ya.make b/contrib/python/cachetools/ya.make
new file mode 100644
index 0000000000..3a8f917778
--- /dev/null
+++ b/contrib/python/cachetools/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/cachetools/py2)
+ELSE()
+ PEERDIR(contrib/python/cachetools/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/google-auth/py2/.dist-info/METADATA b/contrib/python/google-auth/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..3b2de39d93
--- /dev/null
+++ b/contrib/python/google-auth/py2/.dist-info/METADATA
@@ -0,0 +1,105 @@
+Metadata-Version: 2.1
+Name: google-auth
+Version: 1.35.0
+Summary: Google Authentication Library
+Home-page: https://github.com/googleapis/google-auth-library-python
+Author: Google Cloud Platform
+Author-email: googleapis-packages@google.com
+License: Apache 2.0
+Keywords: google auth oauth client
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Internet :: WWW/HTTP
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*
+Requires-Dist: cachetools (<5.0,>=2.0.0)
+Requires-Dist: pyasn1-modules (>=0.2.1)
+Requires-Dist: setuptools (>=40.3.0)
+Requires-Dist: six (>=1.9.0)
+Requires-Dist: rsa (<4.6) ; python_version < "3.6"
+Requires-Dist: rsa (<5,>=3.1.4) ; python_version >= "3.6"
+Provides-Extra: aiohttp
+Requires-Dist: requests (<3.0.0dev,>=2.20.0) ; extra == 'aiohttp'
+Requires-Dist: aiohttp (<4.0.0dev,>=3.6.2) ; (python_version >= "3.6") and extra == 'aiohttp'
+Provides-Extra: pyopenssl
+Requires-Dist: pyopenssl (>=20.0.0) ; extra == 'pyopenssl'
+Provides-Extra: reauth
+Requires-Dist: pyu2f (>=0.1.5) ; extra == 'reauth'
+
+Google Auth Python Library
+==========================
+
+|pypi|
+
+This library simplifies using Google's various server-to-server authentication
+mechanisms to access Google APIs.
+
+.. |pypi| image:: https://img.shields.io/pypi/v/google-auth.svg
+ :target: https://pypi.python.org/pypi/google-auth
+
+Installing
+----------
+
+You can install using `pip`_::
+
+ $ pip install google-auth
+
+.. _pip: https://pip.pypa.io/en/stable/
+
+For more information on setting up your Python development environment, please refer to `Python Development Environment Setup Guide`_ for Google Cloud Platform.
+
+.. _`Python Development Environment Setup Guide`: https://cloud.google.com/python/setup
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.5
+
+Deprecated Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+Python == 2.7. Python 2.7 support will be removed on January 1, 2020.
+
+Documentation
+-------------
+
+Google Auth Python Library has usage and reference documentation at https://googleapis.dev/python/google-auth/latest/index.html.
+
+Current Maintainers
+-------------------
+- `@busunkim96 <https://github.com/busunkim96>`_ (Bu Sun Kim)
+
+Authors
+-------
+
+- `@theacodes <https://github.com/theacodes>`_ (Thea Flowers)
+- `@dhermes <https://github.com/dhermes>`_ (Danny Hermes)
+- `@lukesneeringer <https://github.com/lukesneeringer>`_ (Luke Sneeringer)
+
+Contributing
+------------
+
+Contributions to this library are always welcome and highly encouraged.
+
+See `CONTRIBUTING.rst`_ for more information on how to get started.
+
+.. _CONTRIBUTING.rst: https://github.com/googleapis/google-auth-library-python/blob/master/CONTRIBUTING.rst
+
+License
+-------
+
+Apache 2.0 - See `the LICENSE`_ for more information.
+
+.. _the LICENSE: https://github.com/googleapis/google-auth-library-python/blob/master/LICENSE
+
+
diff --git a/contrib/python/google-auth/py2/.dist-info/top_level.txt b/contrib/python/google-auth/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..cb429113e0
--- /dev/null
+++ b/contrib/python/google-auth/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+google
diff --git a/contrib/python/google-auth/py2/LICENSE b/contrib/python/google-auth/py2/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/contrib/python/google-auth/py2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/python/google-auth/py2/README.rst b/contrib/python/google-auth/py2/README.rst
new file mode 100644
index 0000000000..10de0ac06f
--- /dev/null
+++ b/contrib/python/google-auth/py2/README.rst
@@ -0,0 +1,63 @@
+Google Auth Python Library
+==========================
+
+|pypi|
+
+This library simplifies using Google's various server-to-server authentication
+mechanisms to access Google APIs.
+
+.. |pypi| image:: https://img.shields.io/pypi/v/google-auth.svg
+ :target: https://pypi.python.org/pypi/google-auth
+
+Installing
+----------
+
+You can install using `pip`_::
+
+ $ pip install google-auth
+
+.. _pip: https://pip.pypa.io/en/stable/
+
+For more information on setting up your Python development environment, please refer to `Python Development Environment Setup Guide`_ for Google Cloud Platform.
+
+.. _`Python Development Environment Setup Guide`: https://cloud.google.com/python/setup
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.5
+
+Deprecated Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+Python == 2.7. Python 2.7 support will be removed on January 1, 2020.
+
+Documentation
+-------------
+
+Google Auth Python Library has usage and reference documentation at https://googleapis.dev/python/google-auth/latest/index.html.
+
+Current Maintainers
+-------------------
+- `@busunkim96 <https://github.com/busunkim96>`_ (Bu Sun Kim)
+
+Authors
+-------
+
+- `@theacodes <https://github.com/theacodes>`_ (Thea Flowers)
+- `@dhermes <https://github.com/dhermes>`_ (Danny Hermes)
+- `@lukesneeringer <https://github.com/lukesneeringer>`_ (Luke Sneeringer)
+
+Contributing
+------------
+
+Contributions to this library are always welcome and highly encouraged.
+
+See `CONTRIBUTING.rst`_ for more information on how to get started.
+
+.. _CONTRIBUTING.rst: https://github.com/googleapis/google-auth-library-python/blob/master/CONTRIBUTING.rst
+
+License
+-------
+
+Apache 2.0 - See `the LICENSE`_ for more information.
+
+.. _the LICENSE: https://github.com/googleapis/google-auth-library-python/blob/master/LICENSE
diff --git a/contrib/python/google-auth/py2/google/auth/__init__.py b/contrib/python/google-auth/py2/google/auth/__init__.py
new file mode 100644
index 0000000000..861abe7ea6
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Auth Library for Python."""
+
+import logging
+
+from google.auth import version as google_auth_version
+from google.auth._default import default, load_credentials_from_file
+
+
+__version__ = google_auth_version.__version__
+
+
+__all__ = ["default", "load_credentials_from_file"]
+
+# Set default logging handler to avoid "No handler found" warnings.
+logging.getLogger(__name__).addHandler(logging.NullHandler())
diff --git a/contrib/python/google-auth/py2/google/auth/_cloud_sdk.py b/contrib/python/google-auth/py2/google/auth/_cloud_sdk.py
new file mode 100644
index 0000000000..40e6aec13a
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/_cloud_sdk.py
@@ -0,0 +1,159 @@
+# Copyright 2015 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helpers for reading the Google Cloud SDK's configuration."""
+
+import json
+import os
+import subprocess
+
+import six
+
+from google.auth import environment_vars
+from google.auth import exceptions
+
+
+# The ~/.config subdirectory containing gcloud credentials.
+_CONFIG_DIRECTORY = "gcloud"
+# Windows systems store config at %APPDATA%\gcloud
+_WINDOWS_CONFIG_ROOT_ENV_VAR = "APPDATA"
+# The name of the file in the Cloud SDK config that contains default
+# credentials.
+_CREDENTIALS_FILENAME = "application_default_credentials.json"
+# The name of the Cloud SDK shell script
+_CLOUD_SDK_POSIX_COMMAND = "gcloud"
+_CLOUD_SDK_WINDOWS_COMMAND = "gcloud.cmd"
+# The command to get the Cloud SDK configuration
+_CLOUD_SDK_CONFIG_COMMAND = ("config", "config-helper", "--format", "json")
+# The command to get google user access token
+_CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND = ("auth", "print-access-token")
+# Cloud SDK's application-default client ID
+CLOUD_SDK_CLIENT_ID = (
+ "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com"
+)
+
+
+def get_config_path():
+ """Returns the absolute path the the Cloud SDK's configuration directory.
+
+ Returns:
+ str: The Cloud SDK config path.
+ """
+ # If the path is explicitly set, return that.
+ try:
+ return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]
+ except KeyError:
+ pass
+
+ # Non-windows systems store this at ~/.config/gcloud
+ if os.name != "nt":
+ return os.path.join(os.path.expanduser("~"), ".config", _CONFIG_DIRECTORY)
+ # Windows systems store config at %APPDATA%\gcloud
+ else:
+ try:
+ return os.path.join(
+ os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY
+ )
+ except KeyError:
+ # This should never happen unless someone is really
+ # messing with things, but we'll cover the case anyway.
+ drive = os.environ.get("SystemDrive", "C:")
+ return os.path.join(drive, "\\", _CONFIG_DIRECTORY)
+
+
+def get_application_default_credentials_path():
+ """Gets the path to the application default credentials file.
+
+ The path may or may not exist.
+
+ Returns:
+ str: The full path to application default credentials.
+ """
+ config_path = get_config_path()
+ return os.path.join(config_path, _CREDENTIALS_FILENAME)
+
+
+def _run_subprocess_ignore_stderr(command):
+ """ Return subprocess.check_output with the given command and ignores stderr."""
+ with open(os.devnull, "w") as devnull:
+ output = subprocess.check_output(command, stderr=devnull)
+ return output
+
+
+def get_project_id():
+ """Gets the project ID from the Cloud SDK.
+
+ Returns:
+ Optional[str]: The project ID.
+ """
+ if os.name == "nt":
+ command = _CLOUD_SDK_WINDOWS_COMMAND
+ else:
+ command = _CLOUD_SDK_POSIX_COMMAND
+
+ try:
+ # Ignore the stderr coming from gcloud, so it won't be mixed into the output.
+ # https://github.com/googleapis/google-auth-library-python/issues/673
+ output = _run_subprocess_ignore_stderr((command,) + _CLOUD_SDK_CONFIG_COMMAND)
+ except (subprocess.CalledProcessError, OSError, IOError):
+ return None
+
+ try:
+ configuration = json.loads(output.decode("utf-8"))
+ except ValueError:
+ return None
+
+ try:
+ return configuration["configuration"]["properties"]["core"]["project"]
+ except KeyError:
+ return None
+
+
+def get_auth_access_token(account=None):
+ """Load user access token with the ``gcloud auth print-access-token`` command.
+
+ Args:
+ account (Optional[str]): Account to get the access token for. If not
+ specified, the current active account will be used.
+
+ Returns:
+ str: The user access token.
+
+ Raises:
+ google.auth.exceptions.UserAccessTokenError: if failed to get access
+ token from gcloud.
+ """
+ if os.name == "nt":
+ command = _CLOUD_SDK_WINDOWS_COMMAND
+ else:
+ command = _CLOUD_SDK_POSIX_COMMAND
+
+ try:
+ if account:
+ command = (
+ (command,)
+ + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND
+ + ("--account=" + account,)
+ )
+ else:
+ command = (command,) + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND
+
+ access_token = subprocess.check_output(command, stderr=subprocess.STDOUT)
+ # remove the trailing "\n"
+ return access_token.decode("utf-8").strip()
+ except (subprocess.CalledProcessError, OSError, IOError) as caught_exc:
+ new_exc = exceptions.UserAccessTokenError(
+ "Failed to obtain access token", caught_exc
+ )
+ six.raise_from(new_exc, caught_exc)
diff --git a/contrib/python/google-auth/py2/google/auth/_default.py b/contrib/python/google-auth/py2/google/auth/_default.py
new file mode 100644
index 0000000000..f7e308f3e0
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/_default.py
@@ -0,0 +1,488 @@
+# Copyright 2015 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Application default credentials.
+
+Implements application default credentials and project ID detection.
+"""
+
+import io
+import json
+import logging
+import os
+import warnings
+
+import six
+
+from google.auth import environment_vars
+from google.auth import exceptions
+import google.auth.transport._http_client
+
+_LOGGER = logging.getLogger(__name__)
+
+# Valid types accepted for file-based credentials.
+_AUTHORIZED_USER_TYPE = "authorized_user"
+_SERVICE_ACCOUNT_TYPE = "service_account"
+_EXTERNAL_ACCOUNT_TYPE = "external_account"
+_VALID_TYPES = (_AUTHORIZED_USER_TYPE, _SERVICE_ACCOUNT_TYPE, _EXTERNAL_ACCOUNT_TYPE)
+
+# Help message when no credentials can be found.
+_HELP_MESSAGE = """\
+Could not automatically determine credentials. Please set {env} or \
+explicitly create credentials and re-run the application. For more \
+information, please see \
+https://cloud.google.com/docs/authentication/getting-started
+""".format(
+ env=environment_vars.CREDENTIALS
+).strip()
+
+# Warning when using Cloud SDK user credentials
+_CLOUD_SDK_CREDENTIALS_WARNING = """\
+Your application has authenticated using end user credentials from Google \
+Cloud SDK without a quota project. You might receive a "quota exceeded" \
+or "API not enabled" error. We recommend you rerun \
+`gcloud auth application-default login` and make sure a quota project is \
+added. Or you can use service accounts instead. For more information \
+about service accounts, see https://cloud.google.com/docs/authentication/"""
+
+
+def _warn_about_problematic_credentials(credentials):
+ """Determines if the credentials are problematic.
+
+ Credentials from the Cloud SDK that are associated with Cloud SDK's project
+ are problematic because they may not have APIs enabled and have limited
+ quota. If this is the case, warn about it.
+ """
+ from google.auth import _cloud_sdk
+
+ if credentials.client_id == _cloud_sdk.CLOUD_SDK_CLIENT_ID:
+ warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING)
+
+
+def load_credentials_from_file(
+ filename, scopes=None, default_scopes=None, quota_project_id=None, request=None
+):
+ """Loads Google credentials from a file.
+
+ The credentials file must be a service account key, stored authorized
+ user credentials or external account credentials.
+
+ Args:
+ filename (str): The full path to the credentials file.
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ request (Optional[google.auth.transport.Request]): An object used to make
+ HTTP requests. This is used to determine the associated project ID
+ for a workload identity pool resource (external account credentials).
+ If not specified, then it will use a
+ google.auth.transport.requests.Request client to make requests.
+
+ Returns:
+ Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
+ credentials and the project ID. Authorized user credentials do not
+ have the project ID information. External account credentials project
+ IDs may not always be determined.
+
+ Raises:
+ google.auth.exceptions.DefaultCredentialsError: if the file is in the
+ wrong format or is missing.
+ """
+ if not os.path.exists(filename):
+ raise exceptions.DefaultCredentialsError(
+ "File {} was not found.".format(filename)
+ )
+
+ with io.open(filename, "r") as file_obj:
+ try:
+ info = json.load(file_obj)
+ except ValueError as caught_exc:
+ new_exc = exceptions.DefaultCredentialsError(
+ "File {} is not a valid json file.".format(filename), caught_exc
+ )
+ six.raise_from(new_exc, caught_exc)
+
+ # The type key should indicate that the file is either a service account
+ # credentials file or an authorized user credentials file.
+ credential_type = info.get("type")
+
+ if credential_type == _AUTHORIZED_USER_TYPE:
+ from google.oauth2 import credentials
+
+ try:
+ credentials = credentials.Credentials.from_authorized_user_info(
+ info, scopes=scopes
+ )
+ except ValueError as caught_exc:
+ msg = "Failed to load authorized user credentials from {}".format(filename)
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ six.raise_from(new_exc, caught_exc)
+ if quota_project_id:
+ credentials = credentials.with_quota_project(quota_project_id)
+ if not credentials.quota_project_id:
+ _warn_about_problematic_credentials(credentials)
+ return credentials, None
+
+ elif credential_type == _SERVICE_ACCOUNT_TYPE:
+ from google.oauth2 import service_account
+
+ try:
+ credentials = service_account.Credentials.from_service_account_info(
+ info, scopes=scopes, default_scopes=default_scopes
+ )
+ except ValueError as caught_exc:
+ msg = "Failed to load service account credentials from {}".format(filename)
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ six.raise_from(new_exc, caught_exc)
+ if quota_project_id:
+ credentials = credentials.with_quota_project(quota_project_id)
+ return credentials, info.get("project_id")
+
+ elif credential_type == _EXTERNAL_ACCOUNT_TYPE:
+ credentials, project_id = _get_external_account_credentials(
+ info,
+ filename,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ request=request,
+ )
+ if quota_project_id:
+ credentials = credentials.with_quota_project(quota_project_id)
+ return credentials, project_id
+
+ else:
+ raise exceptions.DefaultCredentialsError(
+ "The file {file} does not have a valid type. "
+ "Type is {type}, expected one of {valid_types}.".format(
+ file=filename, type=credential_type, valid_types=_VALID_TYPES
+ )
+ )
+
+
+def _get_gcloud_sdk_credentials():
+ """Gets the credentials and project ID from the Cloud SDK."""
+ from google.auth import _cloud_sdk
+
+ _LOGGER.debug("Checking Cloud SDK credentials as part of auth process...")
+
+ # Check if application default credentials exist.
+ credentials_filename = _cloud_sdk.get_application_default_credentials_path()
+
+ if not os.path.isfile(credentials_filename):
+ _LOGGER.debug("Cloud SDK credentials not found on disk; not using them")
+ return None, None
+
+ credentials, project_id = load_credentials_from_file(credentials_filename)
+
+ if not project_id:
+ project_id = _cloud_sdk.get_project_id()
+
+ return credentials, project_id
+
+
+def _get_explicit_environ_credentials():
+ """Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
+ variable."""
+ from google.auth import _cloud_sdk
+
+ cloud_sdk_adc_path = _cloud_sdk.get_application_default_credentials_path()
+ explicit_file = os.environ.get(environment_vars.CREDENTIALS)
+
+ _LOGGER.debug(
+ "Checking %s for explicit credentials as part of auth process...", explicit_file
+ )
+
+ if explicit_file is not None and explicit_file == cloud_sdk_adc_path:
+ # Cloud sdk flow calls gcloud to fetch project id, so if the explicit
+ # file path is cloud sdk credentials path, then we should fall back
+ # to cloud sdk flow, otherwise project id cannot be obtained.
+ _LOGGER.debug(
+ "Explicit credentials path %s is the same as Cloud SDK credentials path, fall back to Cloud SDK credentials flow...",
+ explicit_file,
+ )
+ return _get_gcloud_sdk_credentials()
+
+ if explicit_file is not None:
+ credentials, project_id = load_credentials_from_file(
+ os.environ[environment_vars.CREDENTIALS]
+ )
+
+ return credentials, project_id
+
+ else:
+ return None, None
+
+
+def _get_gae_credentials():
+ """Gets Google App Engine App Identity credentials and project ID."""
+ # If not GAE gen1, prefer the metadata service even if the GAE APIs are
+ # available as per https://google.aip.dev/auth/4115.
+ if os.environ.get(environment_vars.LEGACY_APPENGINE_RUNTIME) != "python27":
+ return None, None
+
+ # While this library is normally bundled with app_engine, there are
+ # some cases where it's not available, so we tolerate ImportError.
+ try:
+ _LOGGER.debug("Checking for App Engine runtime as part of auth process...")
+ import google.auth.app_engine as app_engine
+ except ImportError:
+ _LOGGER.warning("Import of App Engine auth library failed.")
+ return None, None
+
+ try:
+ credentials = app_engine.Credentials()
+ project_id = app_engine.get_project_id()
+ return credentials, project_id
+ except EnvironmentError:
+ _LOGGER.debug(
+ "No App Engine library was found so cannot authentication via App Engine Identity Credentials."
+ )
+ return None, None
+
+
+def _get_gce_credentials(request=None):
+ """Gets credentials and project ID from the GCE Metadata Service."""
+ # Ping requires a transport, but we want application default credentials
+ # to require no arguments. So, we'll use the _http_client transport which
+ # uses http.client. This is only acceptable because the metadata server
+ # doesn't do SSL and never requires proxies.
+
+ # While this library is normally bundled with compute_engine, there are
+ # some cases where it's not available, so we tolerate ImportError.
+ try:
+ from google.auth import compute_engine
+ from google.auth.compute_engine import _metadata
+ except ImportError:
+ _LOGGER.warning("Import of Compute Engine auth library failed.")
+ return None, None
+
+ if request is None:
+ request = google.auth.transport._http_client.Request()
+
+ if _metadata.ping(request=request):
+ # Get the project ID.
+ try:
+ project_id = _metadata.get_project_id(request=request)
+ except exceptions.TransportError:
+ project_id = None
+
+ return compute_engine.Credentials(), project_id
+ else:
+ _LOGGER.warning(
+ "Authentication failed using Compute Engine authentication due to unavailable metadata server."
+ )
+ return None, None
+
+
+def _get_external_account_credentials(
+ info, filename, scopes=None, default_scopes=None, request=None
+):
+ """Loads external account Credentials from the parsed external account info.
+
+ The credentials information must correspond to a supported external account
+ credentials.
+
+ Args:
+ info (Mapping[str, str]): The external account info in Google format.
+ filename (str): The full path to the credentials file.
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ request (Optional[google.auth.transport.Request]): An object used to make
+ HTTP requests. This is used to determine the associated project ID
+ for a workload identity pool resource (external account credentials).
+ If not specified, then it will use a
+ google.auth.transport.requests.Request client to make requests.
+
+ Returns:
+ Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
+ credentials and the project ID. External account credentials project
+ IDs may not always be determined.
+
+ Raises:
+ google.auth.exceptions.DefaultCredentialsError: if the info dictionary
+ is in the wrong format or is missing required information.
+ """
+ # There are currently 2 types of external_account credentials.
+ try:
+ # Check if configuration corresponds to an AWS credentials.
+ from google.auth import aws
+
+ credentials = aws.Credentials.from_info(
+ info, scopes=scopes, default_scopes=default_scopes
+ )
+ except ValueError:
+ try:
+ # Check if configuration corresponds to an Identity Pool credentials.
+ from google.auth import identity_pool
+
+ credentials = identity_pool.Credentials.from_info(
+ info, scopes=scopes, default_scopes=default_scopes
+ )
+ except ValueError:
+ # If the configuration is invalid or does not correspond to any
+ # supported external_account credentials, raise an error.
+ raise exceptions.DefaultCredentialsError(
+ "Failed to load external account credentials from {}".format(filename)
+ )
+ if request is None:
+ request = google.auth.transport.requests.Request()
+
+ return credentials, credentials.get_project_id(request=request)
+
+
+def default(scopes=None, request=None, quota_project_id=None, default_scopes=None):
+ """Gets the default credentials for the current environment.
+
+ `Application Default Credentials`_ provides an easy way to obtain
+ credentials to call Google APIs for server-to-server or local applications.
+ This function acquires credentials from the environment in the following
+ order:
+
+ 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
+ to the path of a valid service account JSON private key file, then it is
+ loaded and returned. The project ID returned is the project ID defined
+ in the service account file if available (some older files do not
+ contain project ID information).
+
+ If the environment variable is set to the path of a valid external
+ account JSON configuration file (workload identity federation), then the
+ configuration file is used to determine and retrieve the external
+ credentials from the current environment (AWS, Azure, etc).
+ These will then be exchanged for Google access tokens via the Google STS
+ endpoint.
+ The project ID returned in this case is the one corresponding to the
+ underlying workload identity pool resource if determinable.
+ 2. If the `Google Cloud SDK`_ is installed and has application default
+ credentials set they are loaded and returned.
+
+ To enable application default credentials with the Cloud SDK run::
+
+ gcloud auth application-default login
+
+ If the Cloud SDK has an active project, the project ID is returned. The
+ active project can be set using::
+
+ gcloud config set project
+
+ 3. If the application is running in the `App Engine standard environment`_
+ (first generation) then the credentials and project ID from the
+ `App Identity Service`_ are used.
+ 4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or
+ the `App Engine flexible environment`_ or the `App Engine standard
+ environment`_ (second generation) then the credentials and project ID
+ are obtained from the `Metadata Service`_.
+ 5. If no credentials are found,
+ :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.
+
+ .. _Application Default Credentials: https://developers.google.com\
+ /identity/protocols/application-default-credentials
+ .. _Google Cloud SDK: https://cloud.google.com/sdk
+ .. _App Engine standard environment: https://cloud.google.com/appengine
+ .. _App Identity Service: https://cloud.google.com/appengine/docs/python\
+ /appidentity/
+ .. _Compute Engine: https://cloud.google.com/compute
+ .. _App Engine flexible environment: https://cloud.google.com\
+ /appengine/flexible
+ .. _Metadata Service: https://cloud.google.com/compute/docs\
+ /storing-retrieving-metadata
+ .. _Cloud Run: https://cloud.google.com/run
+
+ Example::
+
+ import google.auth
+
+ credentials, project_id = google.auth.default()
+
+ Args:
+ scopes (Sequence[str]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary.
+ request (Optional[google.auth.transport.Request]): An object used to make
+ HTTP requests. This is used to either detect whether the application
+ is running on Compute Engine or to determine the associated project
+ ID for a workload identity pool resource (external account
+ credentials). If not specified, then it will either use the standard
+ library http client to make requests for Compute Engine credentials
+ or a google.auth.transport.requests.Request client for external
+ account credentials.
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ Returns:
+ Tuple[~google.auth.credentials.Credentials, Optional[str]]:
+ the current environment's credentials and project ID. Project ID
+ may be None, which indicates that the Project ID could not be
+ ascertained from the environment.
+
+ Raises:
+ ~google.auth.exceptions.DefaultCredentialsError:
+ If no credentials were found, or if the credentials found were
+ invalid.
+ """
+ from google.auth.credentials import with_scopes_if_required
+
+ explicit_project_id = os.environ.get(
+ environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT)
+ )
+
+ checkers = (
+ # Avoid passing scopes here to prevent passing scopes to user credentials.
+ # with_scopes_if_required() below will ensure scopes/default scopes are
+ # safely set on the returned credentials since requires_scopes will
+ # guard against setting scopes on user credentials.
+ _get_explicit_environ_credentials,
+ _get_gcloud_sdk_credentials,
+ _get_gae_credentials,
+ lambda: _get_gce_credentials(request),
+ )
+
+ for checker in checkers:
+ credentials, project_id = checker()
+ if credentials is not None:
+ credentials = with_scopes_if_required(
+ credentials, scopes, default_scopes=default_scopes
+ )
+
+ # For external account credentials, scopes are required to determine
+ # the project ID. Try to get the project ID again if not yet
+ # determined.
+ if not project_id and callable(
+ getattr(credentials, "get_project_id", None)
+ ):
+ if request is None:
+ request = google.auth.transport.requests.Request()
+ project_id = credentials.get_project_id(request=request)
+
+ if quota_project_id:
+ credentials = credentials.with_quota_project(quota_project_id)
+
+ effective_project_id = explicit_project_id or project_id
+ if not effective_project_id:
+ _LOGGER.warning(
+ "No project ID could be determined. Consider running "
+ "`gcloud config set project` or setting the %s "
+ "environment variable",
+ environment_vars.PROJECT,
+ )
+ return credentials, effective_project_id
+
+ raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)
diff --git a/contrib/python/google-auth/py2/google/auth/_helpers.py b/contrib/python/google-auth/py2/google/auth/_helpers.py
new file mode 100644
index 0000000000..21c987a732
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/_helpers.py
@@ -0,0 +1,232 @@
+# Copyright 2015 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for commonly used utilities."""
+
+import base64
+import calendar
+import datetime
+
+import six
+from six.moves import urllib
+
+
+CLOCK_SKEW_SECS = 10 # 10 seconds
+CLOCK_SKEW = datetime.timedelta(seconds=CLOCK_SKEW_SECS)
+
+
+def copy_docstring(source_class):
+ """Decorator that copies a method's docstring from another class.
+
+ Args:
+ source_class (type): The class that has the documented method.
+
+ Returns:
+ Callable: A decorator that will copy the docstring of the same
+ named method in the source class to the decorated method.
+ """
+
+ def decorator(method):
+ """Decorator implementation.
+
+ Args:
+ method (Callable): The method to copy the docstring to.
+
+ Returns:
+ Callable: the same method passed in with an updated docstring.
+
+ Raises:
+ ValueError: if the method already has a docstring.
+ """
+ if method.__doc__:
+ raise ValueError("Method already has a docstring.")
+
+ source_method = getattr(source_class, method.__name__)
+ method.__doc__ = source_method.__doc__
+
+ return method
+
+ return decorator
+
+
+def utcnow():
+ """Returns the current UTC datetime.
+
+ Returns:
+ datetime: The current time in UTC.
+ """
+ return datetime.datetime.utcnow()
+
+
+def datetime_to_secs(value):
+ """Convert a datetime object to the number of seconds since the UNIX epoch.
+
+ Args:
+ value (datetime): The datetime to convert.
+
+ Returns:
+ int: The number of seconds since the UNIX epoch.
+ """
+ return calendar.timegm(value.utctimetuple())
+
+
+def to_bytes(value, encoding="utf-8"):
+ """Converts a string value to bytes, if necessary.
+
+ Unfortunately, ``six.b`` is insufficient for this task since in
+ Python 2 because it does not modify ``unicode`` objects.
+
+ Args:
+ value (Union[str, bytes]): The value to be converted.
+ encoding (str): The encoding to use to convert unicode to bytes.
+ Defaults to "utf-8".
+
+ Returns:
+ bytes: The original value converted to bytes (if unicode) or as
+ passed in if it started out as bytes.
+
+ Raises:
+ ValueError: If the value could not be converted to bytes.
+ """
+ result = value.encode(encoding) if isinstance(value, six.text_type) else value
+ if isinstance(result, six.binary_type):
+ return result
+ else:
+ raise ValueError("{0!r} could not be converted to bytes".format(value))
+
+
+def from_bytes(value):
+ """Converts bytes to a string value, if necessary.
+
+ Args:
+ value (Union[str, bytes]): The value to be converted.
+
+ Returns:
+ str: The original value converted to unicode (if bytes) or as passed in
+ if it started out as unicode.
+
+ Raises:
+ ValueError: If the value could not be converted to unicode.
+ """
+ result = value.decode("utf-8") if isinstance(value, six.binary_type) else value
+ if isinstance(result, six.text_type):
+ return result
+ else:
+ raise ValueError("{0!r} could not be converted to unicode".format(value))
+
+
+def update_query(url, params, remove=None):
+ """Updates a URL's query parameters.
+
+ Replaces any current values if they are already present in the URL.
+
+ Args:
+ url (str): The URL to update.
+ params (Mapping[str, str]): A mapping of query parameter
+ keys to values.
+ remove (Sequence[str]): Parameters to remove from the query string.
+
+ Returns:
+ str: The URL with updated query parameters.
+
+ Examples:
+
+ >>> url = 'http://example.com?a=1'
+ >>> update_query(url, {'a': '2'})
+ http://example.com?a=2
+ >>> update_query(url, {'b': '3'})
+ http://example.com?a=1&b=3
+ >> update_query(url, {'b': '3'}, remove=['a'])
+ http://example.com?b=3
+
+ """
+ if remove is None:
+ remove = []
+
+ # Split the URL into parts.
+ parts = urllib.parse.urlparse(url)
+ # Parse the query string.
+ query_params = urllib.parse.parse_qs(parts.query)
+ # Update the query parameters with the new parameters.
+ query_params.update(params)
+ # Remove any values specified in remove.
+ query_params = {
+ key: value for key, value in six.iteritems(query_params) if key not in remove
+ }
+ # Re-encoded the query string.
+ new_query = urllib.parse.urlencode(query_params, doseq=True)
+ # Unsplit the url.
+ new_parts = parts._replace(query=new_query)
+ return urllib.parse.urlunparse(new_parts)
+
+
+def scopes_to_string(scopes):
+ """Converts scope value to a string suitable for sending to OAuth 2.0
+ authorization servers.
+
+ Args:
+ scopes (Sequence[str]): The sequence of scopes to convert.
+
+ Returns:
+ str: The scopes formatted as a single string.
+ """
+ return " ".join(scopes)
+
+
+def string_to_scopes(scopes):
+ """Converts stringifed scopes value to a list.
+
+ Args:
+ scopes (Union[Sequence, str]): The string of space-separated scopes
+ to convert.
+ Returns:
+ Sequence(str): The separated scopes.
+ """
+ if not scopes:
+ return []
+
+ return scopes.split(" ")
+
+
+def padded_urlsafe_b64decode(value):
+ """Decodes base64 strings lacking padding characters.
+
+ Google infrastructure tends to omit the base64 padding characters.
+
+ Args:
+ value (Union[str, bytes]): The encoded value.
+
+ Returns:
+ bytes: The decoded value
+ """
+ b64string = to_bytes(value)
+ padded = b64string + b"=" * (-len(b64string) % 4)
+ return base64.urlsafe_b64decode(padded)
+
+
+def unpadded_urlsafe_b64encode(value):
+ """Encodes base64 strings removing any padding characters.
+
+ `rfc 7515`_ defines Base64url to NOT include any padding
+ characters, but the stdlib doesn't do that by default.
+
+ _rfc7515: https://tools.ietf.org/html/rfc7515#page-6
+
+ Args:
+ value (Union[str|bytes]): The bytes-like value to encode
+
+ Returns:
+ Union[str|bytes]: The encoded value
+ """
+ return base64.urlsafe_b64encode(value).rstrip(b"=")
diff --git a/contrib/python/google-auth/py2/google/auth/_oauth2client.py b/contrib/python/google-auth/py2/google/auth/_oauth2client.py
new file mode 100644
index 0000000000..95a9876f31
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/_oauth2client.py
@@ -0,0 +1,169 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helpers for transitioning from oauth2client to google-auth.
+
+.. warning::
+ This module is private as it is intended to assist first-party downstream
+ clients with the transition from oauth2client to google-auth.
+"""
+
+from __future__ import absolute_import
+
+import six
+
+from google.auth import _helpers
+import google.auth.app_engine
+import google.auth.compute_engine
+import google.oauth2.credentials
+import google.oauth2.service_account
+
+try:
+ import oauth2client.client
+ import oauth2client.contrib.gce
+ import oauth2client.service_account
+except ImportError as caught_exc:
+ six.raise_from(ImportError("oauth2client is not installed."), caught_exc)
+
+try:
+ import oauth2client.contrib.appengine # pytype: disable=import-error
+
+ _HAS_APPENGINE = True
+except ImportError:
+ _HAS_APPENGINE = False
+
+
+_CONVERT_ERROR_TMPL = "Unable to convert {} to a google-auth credentials class."
+
+
+def _convert_oauth2_credentials(credentials):
+ """Converts to :class:`google.oauth2.credentials.Credentials`.
+
+ Args:
+ credentials (Union[oauth2client.client.OAuth2Credentials,
+ oauth2client.client.GoogleCredentials]): The credentials to
+ convert.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The converted credentials.
+ """
+ new_credentials = google.oauth2.credentials.Credentials(
+ token=credentials.access_token,
+ refresh_token=credentials.refresh_token,
+ token_uri=credentials.token_uri,
+ client_id=credentials.client_id,
+ client_secret=credentials.client_secret,
+ scopes=credentials.scopes,
+ )
+
+ new_credentials._expires = credentials.token_expiry
+
+ return new_credentials
+
+
+def _convert_service_account_credentials(credentials):
+ """Converts to :class:`google.oauth2.service_account.Credentials`.
+
+ Args:
+ credentials (Union[
+ oauth2client.service_account.ServiceAccountCredentials,
+ oauth2client.service_account._JWTAccessCredentials]): The
+ credentials to convert.
+
+ Returns:
+ google.oauth2.service_account.Credentials: The converted credentials.
+ """
+ info = credentials.serialization_data.copy()
+ info["token_uri"] = credentials.token_uri
+ return google.oauth2.service_account.Credentials.from_service_account_info(info)
+
+
+def _convert_gce_app_assertion_credentials(credentials):
+ """Converts to :class:`google.auth.compute_engine.Credentials`.
+
+ Args:
+ credentials (oauth2client.contrib.gce.AppAssertionCredentials): The
+ credentials to convert.
+
+ Returns:
+ google.oauth2.service_account.Credentials: The converted credentials.
+ """
+ return google.auth.compute_engine.Credentials(
+ service_account_email=credentials.service_account_email
+ )
+
+
+def _convert_appengine_app_assertion_credentials(credentials):
+ """Converts to :class:`google.auth.app_engine.Credentials`.
+
+ Args:
+ credentials (oauth2client.contrib.app_engine.AppAssertionCredentials):
+ The credentials to convert.
+
+ Returns:
+ google.oauth2.service_account.Credentials: The converted credentials.
+ """
+ # pylint: disable=invalid-name
+ return google.auth.app_engine.Credentials(
+ scopes=_helpers.string_to_scopes(credentials.scope),
+ service_account_id=credentials.service_account_id,
+ )
+
+
+_CLASS_CONVERSION_MAP = {
+ oauth2client.client.OAuth2Credentials: _convert_oauth2_credentials,
+ oauth2client.client.GoogleCredentials: _convert_oauth2_credentials,
+ oauth2client.service_account.ServiceAccountCredentials: _convert_service_account_credentials,
+ oauth2client.service_account._JWTAccessCredentials: _convert_service_account_credentials,
+ oauth2client.contrib.gce.AppAssertionCredentials: _convert_gce_app_assertion_credentials,
+}
+
+if _HAS_APPENGINE:
+ _CLASS_CONVERSION_MAP[
+ oauth2client.contrib.appengine.AppAssertionCredentials
+ ] = _convert_appengine_app_assertion_credentials
+
+
+def convert(credentials):
+ """Convert oauth2client credentials to google-auth credentials.
+
+ This class converts:
+
+ - :class:`oauth2client.client.OAuth2Credentials` to
+ :class:`google.oauth2.credentials.Credentials`.
+ - :class:`oauth2client.client.GoogleCredentials` to
+ :class:`google.oauth2.credentials.Credentials`.
+ - :class:`oauth2client.service_account.ServiceAccountCredentials` to
+ :class:`google.oauth2.service_account.Credentials`.
+ - :class:`oauth2client.service_account._JWTAccessCredentials` to
+ :class:`google.oauth2.service_account.Credentials`.
+ - :class:`oauth2client.contrib.gce.AppAssertionCredentials` to
+ :class:`google.auth.compute_engine.Credentials`.
+ - :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to
+ :class:`google.auth.app_engine.Credentials`.
+
+ Returns:
+ google.auth.credentials.Credentials: The converted credentials.
+
+ Raises:
+ ValueError: If the credentials could not be converted.
+ """
+
+ credentials_class = type(credentials)
+
+ try:
+ return _CLASS_CONVERSION_MAP[credentials_class](credentials)
+ except KeyError as caught_exc:
+ new_exc = ValueError(_CONVERT_ERROR_TMPL.format(credentials_class))
+ six.raise_from(new_exc, caught_exc)
diff --git a/contrib/python/google-auth/py2/google/auth/_service_account_info.py b/contrib/python/google-auth/py2/google/auth/_service_account_info.py
new file mode 100644
index 0000000000..3d340c78d4
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/_service_account_info.py
@@ -0,0 +1,74 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for loading data from a Google service account file."""
+
+import io
+import json
+
+import six
+
+from google.auth import crypt
+
+
+def from_dict(data, require=None):
+ """Validates a dictionary containing Google service account data.
+
+ Creates and returns a :class:`google.auth.crypt.Signer` instance from the
+ private key specified in the data.
+
+ Args:
+ data (Mapping[str, str]): The service account data
+ require (Sequence[str]): List of keys required to be present in the
+ info.
+
+ Returns:
+ google.auth.crypt.Signer: A signer created from the private key in the
+ service account file.
+
+ Raises:
+ ValueError: if the data was in the wrong format, or if one of the
+ required keys is missing.
+ """
+ keys_needed = set(require if require is not None else [])
+
+ missing = keys_needed.difference(six.iterkeys(data))
+
+ if missing:
+ raise ValueError(
+ "Service account info was not in the expected format, missing "
+ "fields {}.".format(", ".join(missing))
+ )
+
+ # Create a signer.
+ signer = crypt.RSASigner.from_service_account_info(data)
+
+ return signer
+
+
+def from_filename(filename, require=None):
+ """Reads a Google service account JSON file and returns its parsed info.
+
+ Args:
+ filename (str): The path to the service account .json file.
+ require (Sequence[str]): List of keys required to be present in the
+ info.
+
+ Returns:
+ Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified
+ info and a signer instance.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return data, from_dict(data, require=require)
diff --git a/contrib/python/google-auth/py2/google/auth/app_engine.py b/contrib/python/google-auth/py2/google/auth/app_engine.py
new file mode 100644
index 0000000000..81aef73b45
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/app_engine.py
@@ -0,0 +1,179 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google App Engine standard environment support.
+
+This module provides authentication and signing for applications running on App
+Engine in the standard environment using the `App Identity API`_.
+
+
+.. _App Identity API:
+ https://cloud.google.com/appengine/docs/python/appidentity/
+"""
+
+import datetime
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import crypt
+
+# pytype: disable=import-error
+try:
+ from google.appengine.api import app_identity
+except ImportError:
+ app_identity = None
+# pytype: enable=import-error
+
+
+class Signer(crypt.Signer):
+ """Signs messages using the App Engine App Identity service.
+
+ This can be used in place of :class:`google.auth.crypt.Signer` when
+ running in the App Engine standard environment.
+ """
+
+ @property
+ def key_id(self):
+ """Optional[str]: The key ID used to identify this private key.
+
+ .. warning::
+ This is always ``None``. The key ID used by App Engine can not
+ be reliably determined ahead of time.
+ """
+ return None
+
+ @_helpers.copy_docstring(crypt.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ _, signature = app_identity.sign_blob(message)
+ return signature
+
+
+def get_project_id():
+ """Gets the project ID for the current App Engine application.
+
+ Returns:
+ str: The project ID
+
+ Raises:
+ EnvironmentError: If the App Engine APIs are unavailable.
+ """
+ # pylint: disable=missing-raises-doc
+ # Pylint rightfully thinks EnvironmentError is OSError, but doesn't
+ # realize it's a valid alias.
+ if app_identity is None:
+ raise EnvironmentError("The App Engine APIs are not available.")
+ return app_identity.get_application_id()
+
+
+class Credentials(
+ credentials.Scoped, credentials.Signing, credentials.CredentialsWithQuotaProject
+):
+ """App Engine standard environment credentials.
+
+ These credentials use the App Engine App Identity API to obtain access
+ tokens.
+ """
+
+ def __init__(
+ self,
+ scopes=None,
+ default_scopes=None,
+ service_account_id=None,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ scopes (Sequence[str]): Scopes to request from the App Identity
+ API.
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ service_account_id (str): The service account ID passed into
+ :func:`google.appengine.api.app_identity.get_access_token`.
+ If not specified, the default application service account
+ ID will be used.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+
+ Raises:
+ EnvironmentError: If the App Engine APIs are unavailable.
+ """
+ # pylint: disable=missing-raises-doc
+ # Pylint rightfully thinks EnvironmentError is OSError, but doesn't
+ # realize it's a valid alias.
+ if app_identity is None:
+ raise EnvironmentError("The App Engine APIs are not available.")
+
+ super(Credentials, self).__init__()
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+ self._service_account_id = service_account_id
+ self._signer = Signer()
+ self._quota_project_id = quota_project_id
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # pylint: disable=unused-argument
+ token, ttl = app_identity.get_access_token(scopes, self._service_account_id)
+ expiry = datetime.datetime.utcfromtimestamp(ttl)
+
+ self.token, self.expiry = token, expiry
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ if self._service_account_id is None:
+ self._service_account_id = app_identity.get_service_account_name()
+ return self._service_account_id
+
+ @property
+ def requires_scopes(self):
+ """Checks if the credentials requires scopes.
+
+ Returns:
+ bool: True if there are no scopes set otherwise False.
+ """
+ return not self._scopes and not self._default_scopes
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ return self.__class__(
+ scopes=scopes,
+ default_scopes=default_scopes,
+ service_account_id=self._service_account_id,
+ quota_project_id=self.quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ scopes=self._scopes,
+ service_account_id=self._service_account_id,
+ quota_project_id=quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer_email(self):
+ return self.service_account_email
+
+ @property
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
diff --git a/contrib/python/google-auth/py2/google/auth/aws.py b/contrib/python/google-auth/py2/google/auth/aws.py
new file mode 100644
index 0000000000..c2b521c360
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/aws.py
@@ -0,0 +1,718 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""AWS Credentials and AWS Signature V4 Request Signer.
+
+This module provides credentials to access Google Cloud resources from Amazon
+Web Services (AWS) workloads. These credentials are recommended over the
+use of service account credentials in AWS as they do not involve the management
+of long-live service account private keys.
+
+AWS Credentials are initialized using external_account arguments which are
+typically loaded from the external credentials JSON file.
+Unlike other Credentials that can be initialized with a list of explicit
+arguments, secrets or credentials, external account clients use the
+environment and hints/guidelines provided by the external_account JSON
+file to retrieve credentials and exchange them for Google access tokens.
+
+This module also provides a basic implementation of the
+`AWS Signature Version 4`_ request signing algorithm.
+
+AWS Credentials use serialized signed requests to the
+`AWS STS GetCallerIdentity`_ API that can be exchanged for Google access tokens
+via the GCP STS endpoint.
+
+.. _AWS Signature Version 4: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
+.. _AWS STS GetCallerIdentity: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html
+"""
+
+import hashlib
+import hmac
+import io
+import json
+import os
+import re
+
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import external_account
+
+# AWS Signature Version 4 signing algorithm identifier.
+_AWS_ALGORITHM = "AWS4-HMAC-SHA256"
+# The termination string for the AWS credential scope value as defined in
+# https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
+_AWS_REQUEST_TYPE = "aws4_request"
+# The AWS authorization header name for the security session token if available.
+_AWS_SECURITY_TOKEN_HEADER = "x-amz-security-token"
+# The AWS authorization header name for the auto-generated date.
+_AWS_DATE_HEADER = "x-amz-date"
+
+
+class RequestSigner(object):
+ """Implements an AWS request signer based on the AWS Signature Version 4 signing
+ process.
+ https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
+ """
+
+ def __init__(self, region_name):
+ """Instantiates an AWS request signer used to compute authenticated signed
+ requests to AWS APIs based on the AWS Signature Version 4 signing process.
+
+ Args:
+ region_name (str): The AWS region to use.
+ """
+
+ self._region_name = region_name
+
+ def get_request_options(
+ self,
+ aws_security_credentials,
+ url,
+ method,
+ request_payload="",
+ additional_headers={},
+ ):
+ """Generates the signed request for the provided HTTP request for calling
+ an AWS API. This follows the steps described at:
+ https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
+
+ Args:
+ aws_security_credentials (Mapping[str, str]): A dictionary containing
+ the AWS security credentials.
+ url (str): The AWS service URL containing the canonical URI and
+ query string.
+ method (str): The HTTP method used to call this API.
+ request_payload (Optional[str]): The optional request payload if
+ available.
+ additional_headers (Optional[Mapping[str, str]]): The optional
+ additional headers needed for the requested AWS API.
+
+ Returns:
+ Mapping[str, str]: The AWS signed request dictionary object.
+ """
+ # Get AWS credentials.
+ access_key = aws_security_credentials.get("access_key_id")
+ secret_key = aws_security_credentials.get("secret_access_key")
+ security_token = aws_security_credentials.get("security_token")
+
+ additional_headers = additional_headers or {}
+
+ uri = urllib.parse.urlparse(url)
+ # Validate provided URL.
+ if not uri.hostname or uri.scheme != "https":
+ raise ValueError("Invalid AWS service URL")
+
+ header_map = _generate_authentication_header_map(
+ host=uri.hostname,
+ canonical_uri=os.path.normpath(uri.path or "/"),
+ canonical_querystring=_get_canonical_querystring(uri.query),
+ method=method,
+ region=self._region_name,
+ access_key=access_key,
+ secret_key=secret_key,
+ security_token=security_token,
+ request_payload=request_payload,
+ additional_headers=additional_headers,
+ )
+ headers = {
+ "Authorization": header_map.get("authorization_header"),
+ "host": uri.hostname,
+ }
+ # Add x-amz-date if available.
+ if "amz_date" in header_map:
+ headers[_AWS_DATE_HEADER] = header_map.get("amz_date")
+ # Append additional optional headers, eg. X-Amz-Target, Content-Type, etc.
+ for key in additional_headers:
+ headers[key] = additional_headers[key]
+
+ # Add session token if available.
+ if security_token is not None:
+ headers[_AWS_SECURITY_TOKEN_HEADER] = security_token
+
+ signed_request = {"url": url, "method": method, "headers": headers}
+ if request_payload:
+ signed_request["data"] = request_payload
+ return signed_request
+
+
+def _get_canonical_querystring(query):
+ """Generates the canonical query string given a raw query string.
+ Logic is based on
+ https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+
+ Args:
+ query (str): The raw query string.
+
+ Returns:
+ str: The canonical query string.
+ """
+ # Parse raw query string.
+ querystring = urllib.parse.parse_qs(query)
+ querystring_encoded_map = {}
+ for key in querystring:
+ quote_key = urllib.parse.quote(key, safe="-_.~")
+ # URI encode key.
+ querystring_encoded_map[quote_key] = []
+ for item in querystring[key]:
+ # For each key, URI encode all values for that key.
+ querystring_encoded_map[quote_key].append(
+ urllib.parse.quote(item, safe="-_.~")
+ )
+ # Sort values for each key.
+ querystring_encoded_map[quote_key].sort()
+ # Sort keys.
+ sorted_keys = list(querystring_encoded_map.keys())
+ sorted_keys.sort()
+ # Reconstruct the query string. Preserve keys with multiple values.
+ querystring_encoded_pairs = []
+ for key in sorted_keys:
+ for item in querystring_encoded_map[key]:
+ querystring_encoded_pairs.append("{}={}".format(key, item))
+ return "&".join(querystring_encoded_pairs)
+
+
+def _sign(key, msg):
+ """Creates the HMAC-SHA256 hash of the provided message using the provided
+ key.
+
+ Args:
+ key (str): The HMAC-SHA256 key to use.
+ msg (str): The message to hash.
+
+ Returns:
+ str: The computed hash bytes.
+ """
+ return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
+
+
+def _get_signing_key(key, date_stamp, region_name, service_name):
+ """Calculates the signing key used to calculate the signature for
+ AWS Signature Version 4 based on:
+ https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
+
+ Args:
+ key (str): The AWS secret access key.
+ date_stamp (str): The '%Y%m%d' date format.
+ region_name (str): The AWS region.
+ service_name (str): The AWS service name, eg. sts.
+
+ Returns:
+ str: The signing key bytes.
+ """
+ k_date = _sign(("AWS4" + key).encode("utf-8"), date_stamp)
+ k_region = _sign(k_date, region_name)
+ k_service = _sign(k_region, service_name)
+ k_signing = _sign(k_service, "aws4_request")
+ return k_signing
+
+
+def _generate_authentication_header_map(
+ host,
+ canonical_uri,
+ canonical_querystring,
+ method,
+ region,
+ access_key,
+ secret_key,
+ security_token,
+ request_payload="",
+ additional_headers={},
+):
+ """Generates the authentication header map needed for generating the AWS
+ Signature Version 4 signed request.
+
+ Args:
+ host (str): The AWS service URL hostname.
+ canonical_uri (str): The AWS service URL path name.
+ canonical_querystring (str): The AWS service URL query string.
+ method (str): The HTTP method used to call this API.
+ region (str): The AWS region.
+ access_key (str): The AWS access key ID.
+ secret_key (str): The AWS secret access key.
+ security_token (Optional[str]): The AWS security session token. This is
+ available for temporary sessions.
+ request_payload (Optional[str]): The optional request payload if
+ available.
+ additional_headers (Optional[Mapping[str, str]]): The optional
+ additional headers needed for the requested AWS API.
+
+ Returns:
+ Mapping[str, str]: The AWS authentication header dictionary object.
+ This contains the x-amz-date and authorization header information.
+ """
+ # iam.amazonaws.com host => iam service.
+ # sts.us-east-2.amazonaws.com host => sts service.
+ service_name = host.split(".")[0]
+
+ current_time = _helpers.utcnow()
+ amz_date = current_time.strftime("%Y%m%dT%H%M%SZ")
+ date_stamp = current_time.strftime("%Y%m%d")
+
+ # Change all additional headers to be lower case.
+ full_headers = {}
+ for key in additional_headers:
+ full_headers[key.lower()] = additional_headers[key]
+ # Add AWS session token if available.
+ if security_token is not None:
+ full_headers[_AWS_SECURITY_TOKEN_HEADER] = security_token
+
+ # Required headers
+ full_headers["host"] = host
+ # Do not use generated x-amz-date if the date header is provided.
+ # Previously the date was not fixed with x-amz- and could be provided
+ # manually.
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.req
+ if "date" not in full_headers:
+ full_headers[_AWS_DATE_HEADER] = amz_date
+
+ # Header keys need to be sorted alphabetically.
+ canonical_headers = ""
+ header_keys = list(full_headers.keys())
+ header_keys.sort()
+ for key in header_keys:
+ canonical_headers = "{}{}:{}\n".format(
+ canonical_headers, key, full_headers[key]
+ )
+ signed_headers = ";".join(header_keys)
+
+ payload_hash = hashlib.sha256((request_payload or "").encode("utf-8")).hexdigest()
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ canonical_request = "{}\n{}\n{}\n{}\n{}\n{}".format(
+ method,
+ canonical_uri,
+ canonical_querystring,
+ canonical_headers,
+ signed_headers,
+ payload_hash,
+ )
+
+ credential_scope = "{}/{}/{}/{}".format(
+ date_stamp, region, service_name, _AWS_REQUEST_TYPE
+ )
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
+ string_to_sign = "{}\n{}\n{}\n{}".format(
+ _AWS_ALGORITHM,
+ amz_date,
+ credential_scope,
+ hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
+ )
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
+ signing_key = _get_signing_key(secret_key, date_stamp, region, service_name)
+ signature = hmac.new(
+ signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
+ ).hexdigest()
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-add-signature-to-request.html
+ authorization_header = "{} Credential={}/{}, SignedHeaders={}, Signature={}".format(
+ _AWS_ALGORITHM, access_key, credential_scope, signed_headers, signature
+ )
+
+ authentication_header = {"authorization_header": authorization_header}
+ # Do not use generated x-amz-date if the date header is provided.
+ if "date" not in full_headers:
+ authentication_header["amz_date"] = amz_date
+ return authentication_header
+
+
+class Credentials(external_account.Credentials):
+ """AWS external account credentials.
+ This is used to exchange serialized AWS signature v4 signed requests to
+ AWS STS GetCallerIdentity service for Google access tokens.
+ """
+
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source=None,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ ):
+ """Instantiates an AWS workload external account credentials object.
+
+ Args:
+ audience (str): The STS audience field.
+ subject_token_type (str): The subject token type.
+ token_url (str): The STS endpoint URL.
+ credential_source (Mapping): The credential source dictionary used
+ to provide instructions on how to retrieve external credential
+ to be exchanged for Google access tokens.
+ service_account_impersonation_url (Optional[str]): The optional
+ service account impersonation getAccessToken URL.
+ client_id (Optional[str]): The optional client ID.
+ client_secret (Optional[str]): The optional client secret.
+ quota_project_id (Optional[str]): The optional quota project ID.
+ scopes (Optional[Sequence[str]]): Optional scopes to request during
+ the authorization grant.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error is encountered during
+ access token retrieval logic.
+ ValueError: For invalid parameters.
+
+ .. note:: Typically one of the helper constructors
+ :meth:`from_file` or
+ :meth:`from_info` are used instead of calling the constructor directly.
+ """
+ super(Credentials, self).__init__(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ credential_source=credential_source,
+ service_account_impersonation_url=service_account_impersonation_url,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+ credential_source = credential_source or {}
+ self._environment_id = credential_source.get("environment_id") or ""
+ self._region_url = credential_source.get("region_url")
+ self._security_credentials_url = credential_source.get("url")
+ self._cred_verification_url = credential_source.get(
+ "regional_cred_verification_url"
+ )
+ self._region = None
+ self._request_signer = None
+ self._target_resource = audience
+
+ # Get the environment ID. Currently, only one version supported (v1).
+ matches = re.match(r"^(aws)([\d]+)$", self._environment_id)
+ if matches:
+ env_id, env_version = matches.groups()
+ else:
+ env_id, env_version = (None, None)
+
+ if env_id != "aws" or self._cred_verification_url is None:
+ raise ValueError("No valid AWS 'credential_source' provided")
+ elif int(env_version or "") != 1:
+ raise ValueError(
+ "aws version '{}' is not supported in the current build.".format(
+ env_version
+ )
+ )
+
+ def retrieve_subject_token(self, request):
+ """Retrieves the subject token using the credential_source object.
+ The subject token is a serialized `AWS GetCallerIdentity signed request`_.
+
+ The logic is summarized as:
+
+ Retrieve the AWS region from the AWS_REGION or AWS_DEFAULT_REGION
+ environment variable or from the AWS metadata server availability-zone
+ if not found in the environment variable.
+
+ Check AWS credentials in environment variables. If not found, retrieve
+ from the AWS metadata server security-credentials endpoint.
+
+ When retrieving AWS credentials from the metadata server
+ security-credentials endpoint, the AWS role needs to be determined by
+ calling the security-credentials endpoint without any argument. Then the
+ credentials can be retrieved via: security-credentials/role_name
+
+ Generate the signed request to AWS STS GetCallerIdentity action.
+
+ Inject x-goog-cloud-target-resource into header and serialize the
+ signed request. This will be the subject-token to pass to GCP STS.
+
+ .. _AWS GetCallerIdentity signed request:
+ https://cloud.google.com/iam/docs/access-resources-aws#exchange-token
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ Returns:
+ str: The retrieved subject token.
+ """
+ # Initialize the request signer if not yet initialized after determining
+ # the current AWS region.
+ if self._request_signer is None:
+ self._region = self._get_region(request, self._region_url)
+ self._request_signer = RequestSigner(self._region)
+
+ # Retrieve the AWS security credentials needed to generate the signed
+ # request.
+ aws_security_credentials = self._get_security_credentials(request)
+ # Generate the signed request to AWS STS GetCallerIdentity API.
+ # Use the required regional endpoint. Otherwise, the request will fail.
+ request_options = self._request_signer.get_request_options(
+ aws_security_credentials,
+ self._cred_verification_url.replace("{region}", self._region),
+ "POST",
+ )
+ # The GCP STS endpoint expects the headers to be formatted as:
+ # [
+ # {key: 'x-amz-date', value: '...'},
+ # {key: 'Authorization', value: '...'},
+ # ...
+ # ]
+ # And then serialized as:
+ # quote(json.dumps({
+ # url: '...',
+ # method: 'POST',
+ # headers: [{key: 'x-amz-date', value: '...'}, ...]
+ # }))
+ request_headers = request_options.get("headers")
+ # The full, canonical resource name of the workload identity pool
+ # provider, with or without the HTTPS prefix.
+ # Including this header as part of the signature is recommended to
+ # ensure data integrity.
+ request_headers["x-goog-cloud-target-resource"] = self._target_resource
+
+ # Serialize AWS signed request.
+ # Keeping inner keys in sorted order makes testing easier for Python
+ # versions <=3.5 as the stringified JSON string would have a predictable
+ # key order.
+ aws_signed_req = {}
+ aws_signed_req["url"] = request_options.get("url")
+ aws_signed_req["method"] = request_options.get("method")
+ aws_signed_req["headers"] = []
+ # Reformat header to GCP STS expected format.
+ for key in sorted(request_headers.keys()):
+ aws_signed_req["headers"].append(
+ {"key": key, "value": request_headers[key]}
+ )
+
+ return urllib.parse.quote(
+ json.dumps(aws_signed_req, separators=(",", ":"), sort_keys=True)
+ )
+
+ def _get_region(self, request, url):
+ """Retrieves the current AWS region from either the AWS_REGION or
+ AWS_DEFAULT_REGION environment variable or from the AWS metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ url (str): The AWS metadata server region URL.
+
+ Returns:
+ str: The current AWS region.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS region.
+ """
+ # The AWS metadata server is not available in some AWS environments
+ # such as AWS lambda. Instead, it is available via environment
+ # variable.
+ env_aws_region = os.environ.get(environment_vars.AWS_REGION)
+ if env_aws_region is not None:
+ return env_aws_region
+
+ env_aws_region = os.environ.get(environment_vars.AWS_DEFAULT_REGION)
+ if env_aws_region is not None:
+ return env_aws_region
+
+ if not self._region_url:
+ raise exceptions.RefreshError("Unable to determine AWS region")
+ response = request(url=self._region_url, method="GET")
+
+ # Support both string and bytes type response.data.
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != 200:
+ raise exceptions.RefreshError(
+ "Unable to retrieve AWS region", response_body
+ )
+
+ # This endpoint will return the region in format: us-east-2b.
+ # Only the us-east-2 part should be used.
+ return response_body[:-1]
+
+ def _get_security_credentials(self, request):
+ """Retrieves the AWS security credentials required for signing AWS
+ requests from either the AWS security credentials environment variables
+ or from the AWS metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+
+ Returns:
+ Mapping[str, str]: The AWS security credentials dictionary object.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS security credentials.
+ """
+
+ # Check environment variables for permanent credentials first.
+ # https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html
+ env_aws_access_key_id = os.environ.get(environment_vars.AWS_ACCESS_KEY_ID)
+ env_aws_secret_access_key = os.environ.get(
+ environment_vars.AWS_SECRET_ACCESS_KEY
+ )
+ # This is normally not available for permanent credentials.
+ env_aws_session_token = os.environ.get(environment_vars.AWS_SESSION_TOKEN)
+ if env_aws_access_key_id and env_aws_secret_access_key:
+ return {
+ "access_key_id": env_aws_access_key_id,
+ "secret_access_key": env_aws_secret_access_key,
+ "security_token": env_aws_session_token,
+ }
+
+ # Get role name.
+ role_name = self._get_metadata_role_name(request)
+
+ # Get security credentials.
+ credentials = self._get_metadata_security_credentials(request, role_name)
+
+ return {
+ "access_key_id": credentials.get("AccessKeyId"),
+ "secret_access_key": credentials.get("SecretAccessKey"),
+ "security_token": credentials.get("Token"),
+ }
+
+ def _get_metadata_security_credentials(self, request, role_name):
+ """Retrieves the AWS security credentials required for signing AWS
+ requests from the AWS metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ role_name (str): The AWS role name required by the AWS metadata
+ server security_credentials endpoint in order to return the
+ credentials.
+
+ Returns:
+ Mapping[str, str]: The AWS metadata server security credentials
+ response.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS security credentials.
+ """
+ headers = {"Content-Type": "application/json"}
+ response = request(
+ url="{}/{}".format(self._security_credentials_url, role_name),
+ method="GET",
+ headers=headers,
+ )
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != http_client.OK:
+ raise exceptions.RefreshError(
+ "Unable to retrieve AWS security credentials", response_body
+ )
+
+ credentials_response = json.loads(response_body)
+
+ return credentials_response
+
+ def _get_metadata_role_name(self, request):
+ """Retrieves the AWS role currently attached to the current AWS
+ workload by querying the AWS metadata server. This is needed for the
+ AWS metadata server security credentials endpoint in order to retrieve
+ the AWS security credentials needed to sign requests to AWS APIs.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+
+ Returns:
+ str: The AWS role name.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS role name.
+ """
+ if self._security_credentials_url is None:
+ raise exceptions.RefreshError(
+ "Unable to determine the AWS metadata server security credentials endpoint"
+ )
+ response = request(url=self._security_credentials_url, method="GET")
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != http_client.OK:
+ raise exceptions.RefreshError(
+ "Unable to retrieve AWS role name", response_body
+ )
+
+ return response_body
+
+ @classmethod
+ def from_info(cls, info, **kwargs):
+ """Creates an AWS Credentials instance from parsed external account info.
+
+ Args:
+ info (Mapping[str, str]): The AWS external account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.aws.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: For invalid parameters.
+ """
+ return cls(
+ audience=info.get("audience"),
+ subject_token_type=info.get("subject_token_type"),
+ token_url=info.get("token_url"),
+ service_account_impersonation_url=info.get(
+ "service_account_impersonation_url"
+ ),
+ client_id=info.get("client_id"),
+ client_secret=info.get("client_secret"),
+ credential_source=info.get("credential_source"),
+ quota_project_id=info.get("quota_project_id"),
+ **kwargs
+ )
+
+ @classmethod
+ def from_file(cls, filename, **kwargs):
+ """Creates an AWS Credentials instance from an external account json file.
+
+ Args:
+ filename (str): The path to the AWS external account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.aws.Credentials: The constructed credentials.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return cls.from_info(data, **kwargs)
diff --git a/contrib/python/google-auth/py2/google/auth/compute_engine/__init__.py b/contrib/python/google-auth/py2/google/auth/compute_engine/__init__.py
new file mode 100644
index 0000000000..5c84234e93
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/compute_engine/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Compute Engine authentication."""
+
+from google.auth.compute_engine.credentials import Credentials
+from google.auth.compute_engine.credentials import IDTokenCredentials
+
+
+__all__ = ["Credentials", "IDTokenCredentials"]
diff --git a/contrib/python/google-auth/py2/google/auth/compute_engine/_metadata.py b/contrib/python/google-auth/py2/google/auth/compute_engine/_metadata.py
new file mode 100644
index 0000000000..9db7bea92d
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/compute_engine/_metadata.py
@@ -0,0 +1,267 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provides helper methods for talking to the Compute Engine metadata server.
+
+See https://cloud.google.com/compute/docs/metadata for more details.
+"""
+
+import datetime
+import json
+import logging
+import os
+
+import six
+from six.moves import http_client
+from six.moves.urllib import parse as urlparse
+
+from google.auth import _helpers
+from google.auth import environment_vars
+from google.auth import exceptions
+
+_LOGGER = logging.getLogger(__name__)
+
+# Environment variable GCE_METADATA_HOST is originally named
+# GCE_METADATA_ROOT. For compatiblity reasons, here it checks
+# the new variable first; if not set, the system falls back
+# to the old variable.
+_GCE_METADATA_HOST = os.getenv(environment_vars.GCE_METADATA_HOST, None)
+if not _GCE_METADATA_HOST:
+ _GCE_METADATA_HOST = os.getenv(
+ environment_vars.GCE_METADATA_ROOT, "metadata.google.internal"
+ )
+_METADATA_ROOT = "http://{}/computeMetadata/v1/".format(_GCE_METADATA_HOST)
+
+# This is used to ping the metadata server, it avoids the cost of a DNS
+# lookup.
+_METADATA_IP_ROOT = "http://{}".format(
+ os.getenv(environment_vars.GCE_METADATA_IP, "169.254.169.254")
+)
+_METADATA_FLAVOR_HEADER = "metadata-flavor"
+_METADATA_FLAVOR_VALUE = "Google"
+_METADATA_HEADERS = {_METADATA_FLAVOR_HEADER: _METADATA_FLAVOR_VALUE}
+
+# Timeout in seconds to wait for the GCE metadata server when detecting the
+# GCE environment.
+try:
+ _METADATA_DEFAULT_TIMEOUT = int(os.getenv("GCE_METADATA_TIMEOUT", 3))
+except ValueError: # pragma: NO COVER
+ _METADATA_DEFAULT_TIMEOUT = 3
+
+
+def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3):
+ """Checks to see if the metadata server is available.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ timeout (int): How long to wait for the metadata server to respond.
+ retry_count (int): How many times to attempt connecting to metadata
+ server using above timeout.
+
+ Returns:
+ bool: True if the metadata server is reachable, False otherwise.
+ """
+ # NOTE: The explicit ``timeout`` is a workaround. The underlying
+ # issue is that resolving an unknown host on some networks will take
+ # 20-30 seconds; making this timeout short fixes the issue, but
+ # could lead to false negatives in the event that we are on GCE, but
+ # the metadata resolution was particularly slow. The latter case is
+ # "unlikely".
+ retries = 0
+ while retries < retry_count:
+ try:
+ response = request(
+ url=_METADATA_IP_ROOT,
+ method="GET",
+ headers=_METADATA_HEADERS,
+ timeout=timeout,
+ )
+
+ metadata_flavor = response.headers.get(_METADATA_FLAVOR_HEADER)
+ return (
+ response.status == http_client.OK
+ and metadata_flavor == _METADATA_FLAVOR_VALUE
+ )
+
+ except exceptions.TransportError as e:
+ _LOGGER.warning(
+ "Compute Engine Metadata server unavailable on "
+ "attempt %s of %s. Reason: %s",
+ retries + 1,
+ retry_count,
+ e,
+ )
+ retries += 1
+
+ return False
+
+
+def get(
+ request, path, root=_METADATA_ROOT, params=None, recursive=False, retry_count=5
+):
+ """Fetch a resource from the metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ path (str): The resource to retrieve. For example,
+ ``'instance/service-accounts/default'``.
+ root (str): The full path to the metadata server root.
+ params (Optional[Mapping[str, str]]): A mapping of query parameter
+ keys to values.
+ recursive (bool): Whether to do a recursive query of metadata. See
+ https://cloud.google.com/compute/docs/metadata#aggcontents for more
+ details.
+ retry_count (int): How many times to attempt connecting to metadata
+ server using above timeout.
+
+ Returns:
+ Union[Mapping, str]: If the metadata server returns JSON, a mapping of
+ the decoded JSON is return. Otherwise, the response content is
+ returned as a string.
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ base_url = urlparse.urljoin(root, path)
+ query_params = {} if params is None else params
+
+ if recursive:
+ query_params["recursive"] = "true"
+
+ url = _helpers.update_query(base_url, query_params)
+
+ retries = 0
+ while retries < retry_count:
+ try:
+ response = request(url=url, method="GET", headers=_METADATA_HEADERS)
+ break
+
+ except exceptions.TransportError as e:
+ _LOGGER.warning(
+ "Compute Engine Metadata server unavailable on "
+ "attempt %s of %s. Reason: %s",
+ retries + 1,
+ retry_count,
+ e,
+ )
+ retries += 1
+ else:
+ raise exceptions.TransportError(
+ "Failed to retrieve {} from the Google Compute Engine"
+ "metadata service. Compute Engine Metadata server unavailable".format(url)
+ )
+
+ if response.status == http_client.OK:
+ content = _helpers.from_bytes(response.data)
+ if response.headers["content-type"] == "application/json":
+ try:
+ return json.loads(content)
+ except ValueError as caught_exc:
+ new_exc = exceptions.TransportError(
+ "Received invalid JSON from the Google Compute Engine"
+ "metadata service: {:.20}".format(content)
+ )
+ six.raise_from(new_exc, caught_exc)
+ else:
+ return content
+ else:
+ raise exceptions.TransportError(
+ "Failed to retrieve {} from the Google Compute Engine"
+ "metadata service. Status: {} Response:\n{}".format(
+ url, response.status, response.data
+ ),
+ response,
+ )
+
+
+def get_project_id(request):
+ """Get the Google Cloud Project ID from the metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+
+ Returns:
+ str: The project ID
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ return get(request, "project/project-id")
+
+
+def get_service_account_info(request, service_account="default"):
+ """Get information about a service account from the metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ service_account (str): The string 'default' or a service account email
+ address. The determines which service account for which to acquire
+ information.
+
+ Returns:
+ Mapping: The service account's information, for example::
+
+ {
+ 'email': '...',
+ 'scopes': ['scope', ...],
+ 'aliases': ['default', '...']
+ }
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ path = "instance/service-accounts/{0}/".format(service_account)
+ # See https://cloud.google.com/compute/docs/metadata#aggcontents
+ # for more on the use of 'recursive'.
+ return get(request, path, params={"recursive": "true"})
+
+
+def get_service_account_token(request, service_account="default", scopes=None):
+ """Get the OAuth 2.0 access token for a service account.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ service_account (str): The string 'default' or a service account email
+ address. The determines which service account for which to acquire
+ an access token.
+ scopes (Optional[Union[str, List[str]]]): Optional string or list of
+ strings with auth scopes.
+ Returns:
+ Union[str, datetime]: The access token and its expiration.
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ if scopes:
+ if not isinstance(scopes, str):
+ scopes = ",".join(scopes)
+ params = {"scopes": scopes}
+ else:
+ params = None
+
+ path = "instance/service-accounts/{0}/token".format(service_account)
+ token_json = get(request, path, params=params)
+ token_expiry = _helpers.utcnow() + datetime.timedelta(
+ seconds=token_json["expires_in"]
+ )
+ return token_json["access_token"], token_expiry
diff --git a/contrib/python/google-auth/py2/google/auth/compute_engine/credentials.py b/contrib/python/google-auth/py2/google/auth/compute_engine/credentials.py
new file mode 100644
index 0000000000..1671656200
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/compute_engine/credentials.py
@@ -0,0 +1,413 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Compute Engine credentials.
+
+This module provides authentication for an application running on Google
+Compute Engine using the Compute Engine metadata server.
+
+"""
+
+import datetime
+
+import six
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import iam
+from google.auth import jwt
+from google.auth.compute_engine import _metadata
+from google.oauth2 import _client
+
+
+class Credentials(credentials.Scoped, credentials.CredentialsWithQuotaProject):
+ """Compute Engine Credentials.
+
+ These credentials use the Google Compute Engine metadata server to obtain
+ OAuth 2.0 access tokens associated with the instance's service account,
+ and are also used for Cloud Run, Flex and App Engine (except for the Python
+ 2.7 runtime).
+
+ For more information about Compute Engine authentication, including how
+ to configure scopes, see the `Compute Engine authentication
+ documentation`_.
+
+ .. note:: On Compute Engine the metadata server ignores requested scopes.
+ On Cloud Run, Flex and App Engine the server honours requested scopes.
+
+ .. _Compute Engine authentication documentation:
+ https://cloud.google.com/compute/docs/authentication#using
+ """
+
+ def __init__(
+ self,
+ service_account_email="default",
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ ):
+ """
+ Args:
+ service_account_email (str): The service account email to use, or
+ 'default'. A Compute Engine instance may have multiple service
+ accounts.
+ quota_project_id (Optional[str]): The project ID used for quota and
+ billing.
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ """
+ super(Credentials, self).__init__()
+ self._service_account_email = service_account_email
+ self._quota_project_id = quota_project_id
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+
+ def _retrieve_info(self, request):
+ """Retrieve information about the service account.
+
+ Updates the scopes and retrieves the full service account email.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ """
+ info = _metadata.get_service_account_info(
+ request, service_account=self._service_account_email
+ )
+
+ self._service_account_email = info["email"]
+
+ # Don't override scopes requested by the user.
+ if self._scopes is None:
+ self._scopes = info["scopes"]
+
+ def refresh(self, request):
+ """Refresh the access token and scopes.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the Compute Engine metadata
+ service can't be reached if if the instance has not
+ credentials.
+ """
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ try:
+ self._retrieve_info(request)
+ self.token, self.expiry = _metadata.get_service_account_token(
+ request, service_account=self._service_account_email, scopes=scopes
+ )
+ except exceptions.TransportError as caught_exc:
+ new_exc = exceptions.RefreshError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ @property
+ def service_account_email(self):
+ """The service account email.
+
+ .. note:: This is not guaranteed to be set until :meth:`refresh` has been
+ called.
+ """
+ return self._service_account_email
+
+ @property
+ def requires_scopes(self):
+ return not self._scopes
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ service_account_email=self._service_account_email,
+ quota_project_id=quota_project_id,
+ scopes=self._scopes,
+ )
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ # Compute Engine credentials can not be scoped (the metadata service
+ # ignores the scopes parameter). App Engine, Cloud Run and Flex support
+ # requesting scopes.
+ return self.__class__(
+ scopes=scopes,
+ default_scopes=default_scopes,
+ service_account_email=self._service_account_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+_DEFAULT_TOKEN_URI = "https://www.googleapis.com/oauth2/v4/token"
+
+
+class IDTokenCredentials(credentials.CredentialsWithQuotaProject, credentials.Signing):
+ """Open ID Connect ID Token-based service account credentials.
+
+ These credentials relies on the default service account of a GCE instance.
+
+ ID token can be requested from `GCE metadata server identity endpoint`_, IAM
+ token endpoint or other token endpoints you specify. If metadata server
+ identity endpoint is not used, the GCE instance must have been started with
+ a service account that has access to the IAM Cloud API.
+
+ .. _GCE metadata server identity endpoint:
+ https://cloud.google.com/compute/docs/instances/verifying-instance-identity
+ """
+
+ def __init__(
+ self,
+ request,
+ target_audience,
+ token_uri=None,
+ additional_claims=None,
+ service_account_email=None,
+ signer=None,
+ use_metadata_identity_endpoint=False,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token. The ID Token's ``aud`` claim
+ will be set to this string.
+ token_uri (str): The OAuth 2.0 Token URI.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT assertion used in the authorization grant.
+ service_account_email (str): Optional explicit service account to
+ use to sign JWT tokens.
+ By default, this is the default GCE service account.
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ In case the signer is specified, the request argument will be
+ ignored.
+ use_metadata_identity_endpoint (bool): Whether to use GCE metadata
+ identity endpoint. For backward compatibility the default value
+ is False. If set to True, ``token_uri``, ``additional_claims``,
+ ``service_account_email``, ``signer`` argument should not be set;
+ otherwise ValueError will be raised.
+ quota_project_id (Optional[str]): The project ID used for quota and
+ billing.
+
+ Raises:
+ ValueError:
+ If ``use_metadata_identity_endpoint`` is set to True, and one of
+ ``token_uri``, ``additional_claims``, ``service_account_email``,
+ ``signer`` arguments is set.
+ """
+ super(IDTokenCredentials, self).__init__()
+
+ self._quota_project_id = quota_project_id
+ self._use_metadata_identity_endpoint = use_metadata_identity_endpoint
+ self._target_audience = target_audience
+
+ if use_metadata_identity_endpoint:
+ if token_uri or additional_claims or service_account_email or signer:
+ raise ValueError(
+ "If use_metadata_identity_endpoint is set, token_uri, "
+ "additional_claims, service_account_email, signer arguments"
+ " must not be set"
+ )
+ self._token_uri = None
+ self._additional_claims = None
+ self._signer = None
+
+ if service_account_email is None:
+ sa_info = _metadata.get_service_account_info(request)
+ self._service_account_email = sa_info["email"]
+ else:
+ self._service_account_email = service_account_email
+
+ if not use_metadata_identity_endpoint:
+ if signer is None:
+ signer = iam.Signer(
+ request=request,
+ credentials=Credentials(),
+ service_account_email=self._service_account_email,
+ )
+ self._signer = signer
+ self._token_uri = token_uri or _DEFAULT_TOKEN_URI
+
+ if additional_claims is not None:
+ self._additional_claims = additional_claims
+ else:
+ self._additional_claims = {}
+
+ def with_target_audience(self, target_audience):
+ """Create a copy of these credentials with the specified target
+ audience.
+ Args:
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token.
+ Returns:
+ google.auth.service_account.IDTokenCredentials: A new credentials
+ instance.
+ """
+ # since the signer is already instantiated,
+ # the request is not needed
+ if self._use_metadata_identity_endpoint:
+ return self.__class__(
+ None,
+ target_audience=target_audience,
+ use_metadata_identity_endpoint=True,
+ quota_project_id=self._quota_project_id,
+ )
+ else:
+ return self.__class__(
+ None,
+ service_account_email=self._service_account_email,
+ token_uri=self._token_uri,
+ target_audience=target_audience,
+ additional_claims=self._additional_claims.copy(),
+ signer=self.signer,
+ use_metadata_identity_endpoint=False,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+
+ # since the signer is already instantiated,
+ # the request is not needed
+ if self._use_metadata_identity_endpoint:
+ return self.__class__(
+ None,
+ target_audience=self._target_audience,
+ use_metadata_identity_endpoint=True,
+ quota_project_id=quota_project_id,
+ )
+ else:
+ return self.__class__(
+ None,
+ service_account_email=self._service_account_email,
+ token_uri=self._token_uri,
+ target_audience=self._target_audience,
+ additional_claims=self._additional_claims.copy(),
+ signer=self.signer,
+ use_metadata_identity_endpoint=False,
+ quota_project_id=quota_project_id,
+ )
+
+ def _make_authorization_grant_assertion(self):
+ """Create the OAuth 2.0 assertion.
+ This assertion is used during the OAuth 2.0 grant to acquire an
+ ID token.
+ Returns:
+ bytes: The authorization grant assertion.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+
+ payload = {
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ # The issuer must be the service account email.
+ "iss": self.service_account_email,
+ # The audience must be the auth token endpoint's URI
+ "aud": self._token_uri,
+ # The target audience specifies which service the ID token is
+ # intended for.
+ "target_audience": self._target_audience,
+ }
+
+ payload.update(self._additional_claims)
+
+ token = jwt.encode(self._signer, payload)
+
+ return token
+
+ def _call_metadata_identity_endpoint(self, request):
+ """Request ID token from metadata identity endpoint.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Returns:
+ Tuple[str, datetime.datetime]: The ID token and the expiry of the ID token.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the Compute Engine metadata
+ service can't be reached or if the instance has no credentials.
+ ValueError: If extracting expiry from the obtained ID token fails.
+ """
+ try:
+ path = "instance/service-accounts/default/identity"
+ params = {"audience": self._target_audience, "format": "full"}
+ id_token = _metadata.get(request, path, params=params)
+ except exceptions.TransportError as caught_exc:
+ new_exc = exceptions.RefreshError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ _, payload, _, _ = jwt._unverified_decode(id_token)
+ return id_token, datetime.datetime.fromtimestamp(payload["exp"])
+
+ def refresh(self, request):
+ """Refreshes the ID token.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the credentials could
+ not be refreshed.
+ ValueError: If extracting expiry from the obtained ID token fails.
+ """
+ if self._use_metadata_identity_endpoint:
+ self.token, self.expiry = self._call_metadata_identity_endpoint(request)
+ else:
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = _client.id_token_jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
+
+ @property
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
+
+ def sign_bytes(self, message):
+ """Signs the given message.
+
+ Args:
+ message (bytes): The message to sign.
+
+ Returns:
+ bytes: The message's cryptographic signature.
+
+ Raises:
+ ValueError:
+ Signer is not available if metadata identity endpoint is used.
+ """
+ if self._use_metadata_identity_endpoint:
+ raise ValueError(
+ "Signer is not available if metadata identity endpoint is used"
+ )
+ return self._signer.sign(message)
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ return self._service_account_email
+
+ @property
+ def signer_email(self):
+ return self._service_account_email
diff --git a/contrib/python/google-auth/py2/google/auth/credentials.py b/contrib/python/google-auth/py2/google/auth/credentials.py
new file mode 100644
index 0000000000..7d3c798b13
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/credentials.py
@@ -0,0 +1,362 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Interfaces for credentials."""
+
+import abc
+
+import six
+
+from google.auth import _helpers
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Credentials(object):
+ """Base class for all credentials.
+
+ All credentials have a :attr:`token` that is used for authentication and
+ may also optionally set an :attr:`expiry` to indicate when the token will
+ no longer be valid.
+
+ Most credentials will be :attr:`invalid` until :meth:`refresh` is called.
+ Credentials can do this automatically before the first HTTP request in
+ :meth:`before_request`.
+
+ Although the token and expiration will change as the credentials are
+ :meth:`refreshed <refresh>` and used, credentials should be considered
+ immutable. Various credentials will accept configuration such as private
+ keys, scopes, and other options. These options are not changeable after
+ construction. Some classes will provide mechanisms to copy the credentials
+ with modifications such as :meth:`ScopedCredentials.with_scopes`.
+ """
+
+ def __init__(self):
+ self.token = None
+ """str: The bearer token that can be used in HTTP headers to make
+ authenticated requests."""
+ self.expiry = None
+ """Optional[datetime]: When the token expires and is no longer valid.
+ If this is None, the token is assumed to never expire."""
+ self._quota_project_id = None
+ """Optional[str]: Project to use for quota and billing purposes."""
+
+ @property
+ def expired(self):
+ """Checks if the credentials are expired.
+
+ Note that credentials can be invalid but not expired because
+ Credentials with :attr:`expiry` set to None is considered to never
+ expire.
+ """
+ if not self.expiry:
+ return False
+
+ # Remove 10 seconds from expiry to err on the side of reporting
+ # expiration early so that we avoid the 401-refresh-retry loop.
+ skewed_expiry = self.expiry - _helpers.CLOCK_SKEW
+ return _helpers.utcnow() >= skewed_expiry
+
+ @property
+ def valid(self):
+ """Checks the validity of the credentials.
+
+ This is True if the credentials have a :attr:`token` and the token
+ is not :attr:`expired`.
+ """
+ return self.token is not None and not self.expired
+
+ @property
+ def quota_project_id(self):
+ """Project to use for quota and billing purposes."""
+ return self._quota_project_id
+
+ @abc.abstractmethod
+ def refresh(self, request):
+ """Refreshes the access token.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the credentials could
+ not be refreshed.
+ """
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Refresh must be implemented")
+
+ def apply(self, headers, token=None):
+ """Apply the token to the authentication header.
+
+ Args:
+ headers (Mapping): The HTTP request headers.
+ token (Optional[str]): If specified, overrides the current access
+ token.
+ """
+ headers["authorization"] = "Bearer {}".format(
+ _helpers.from_bytes(token or self.token)
+ )
+ if self.quota_project_id:
+ headers["x-goog-user-project"] = self.quota_project_id
+
+ def before_request(self, request, method, url, headers):
+ """Performs credential-specific before request logic.
+
+ Refreshes the credentials if necessary, then calls :meth:`apply` to
+ apply the token to the authentication header.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ method (str): The request's HTTP method or the RPC method being
+ invoked.
+ url (str): The request's URI or the RPC service's URI.
+ headers (Mapping): The request's headers.
+ """
+ # pylint: disable=unused-argument
+ # (Subclasses may use these arguments to ascertain information about
+ # the http request.)
+ if not self.valid:
+ self.refresh(request)
+ self.apply(headers)
+
+
+class CredentialsWithQuotaProject(Credentials):
+ """Abstract base for credentials supporting ``with_quota_project`` factory"""
+
+ def with_quota_project(self, quota_project_id):
+ """Returns a copy of these credentials with a modified quota project.
+
+ Args:
+ quota_project_id (str): The project to use for quota and
+ billing purposes
+
+ Returns:
+ google.oauth2.credentials.Credentials: A new credentials instance.
+ """
+ raise NotImplementedError("This credential does not support quota project.")
+
+
+class AnonymousCredentials(Credentials):
+ """Credentials that do not provide any authentication information.
+
+ These are useful in the case of services that support anonymous access or
+ local service emulators that do not use credentials.
+ """
+
+ @property
+ def expired(self):
+ """Returns `False`, anonymous credentials never expire."""
+ return False
+
+ @property
+ def valid(self):
+ """Returns `True`, anonymous credentials are always valid."""
+ return True
+
+ def refresh(self, request):
+ """Raises :class:`ValueError``, anonymous credentials cannot be
+ refreshed."""
+ raise ValueError("Anonymous credentials cannot be refreshed.")
+
+ def apply(self, headers, token=None):
+ """Anonymous credentials do nothing to the request.
+
+ The optional ``token`` argument is not supported.
+
+ Raises:
+ ValueError: If a token was specified.
+ """
+ if token is not None:
+ raise ValueError("Anonymous credentials don't support tokens.")
+
+ def before_request(self, request, method, url, headers):
+ """Anonymous credentials do nothing to the request."""
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ReadOnlyScoped(object):
+ """Interface for credentials whose scopes can be queried.
+
+ OAuth 2.0-based credentials allow limiting access using scopes as described
+ in `RFC6749 Section 3.3`_.
+ If a credential class implements this interface then the credentials either
+ use scopes in their implementation.
+
+ Some credentials require scopes in order to obtain a token. You can check
+ if scoping is necessary with :attr:`requires_scopes`::
+
+ if credentials.requires_scopes:
+ # Scoping is required.
+ credentials = credentials.with_scopes(scopes=['one', 'two'])
+
+ Credentials that require scopes must either be constructed with scopes::
+
+ credentials = SomeScopedCredentials(scopes=['one', 'two'])
+
+ Or must copy an existing instance using :meth:`with_scopes`::
+
+ scoped_credentials = credentials.with_scopes(scopes=['one', 'two'])
+
+ Some credentials have scopes but do not allow or require scopes to be set,
+ these credentials can be used as-is.
+
+ .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
+ """
+
+ def __init__(self):
+ super(ReadOnlyScoped, self).__init__()
+ self._scopes = None
+ self._default_scopes = None
+
+ @property
+ def scopes(self):
+ """Sequence[str]: the credentials' current set of scopes."""
+ return self._scopes
+
+ @property
+ def default_scopes(self):
+ """Sequence[str]: the credentials' current set of default scopes."""
+ return self._default_scopes
+
+ @abc.abstractproperty
+ def requires_scopes(self):
+ """True if these credentials require scopes to obtain an access token.
+ """
+ return False
+
+ def has_scopes(self, scopes):
+ """Checks if the credentials have the given scopes.
+
+ .. warning: This method is not guaranteed to be accurate if the
+ credentials are :attr:`~Credentials.invalid`.
+
+ Args:
+ scopes (Sequence[str]): The list of scopes to check.
+
+ Returns:
+ bool: True if the credentials have the given scopes.
+ """
+ credential_scopes = (
+ self._scopes if self._scopes is not None else self._default_scopes
+ )
+ return set(scopes).issubset(set(credential_scopes or []))
+
+
+class Scoped(ReadOnlyScoped):
+ """Interface for credentials whose scopes can be replaced while copying.
+
+ OAuth 2.0-based credentials allow limiting access using scopes as described
+ in `RFC6749 Section 3.3`_.
+ If a credential class implements this interface then the credentials either
+ use scopes in their implementation.
+
+ Some credentials require scopes in order to obtain a token. You can check
+ if scoping is necessary with :attr:`requires_scopes`::
+
+ if credentials.requires_scopes:
+ # Scoping is required.
+ credentials = credentials.create_scoped(['one', 'two'])
+
+ Credentials that require scopes must either be constructed with scopes::
+
+ credentials = SomeScopedCredentials(scopes=['one', 'two'])
+
+ Or must copy an existing instance using :meth:`with_scopes`::
+
+ scoped_credentials = credentials.with_scopes(scopes=['one', 'two'])
+
+ Some credentials have scopes but do not allow or require scopes to be set,
+ these credentials can be used as-is.
+
+ .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
+ """
+
+ @abc.abstractmethod
+ def with_scopes(self, scopes, default_scopes=None):
+ """Create a copy of these credentials with the specified scopes.
+
+ Args:
+ scopes (Sequence[str]): The list of scopes to attach to the
+ current credentials.
+
+ Raises:
+ NotImplementedError: If the credentials' scopes can not be changed.
+ This can be avoided by checking :attr:`requires_scopes` before
+ calling this method.
+ """
+ raise NotImplementedError("This class does not require scoping.")
+
+
+def with_scopes_if_required(credentials, scopes, default_scopes=None):
+ """Creates a copy of the credentials with scopes if scoping is required.
+
+ This helper function is useful when you do not know (or care to know) the
+ specific type of credentials you are using (such as when you use
+ :func:`google.auth.default`). This function will call
+ :meth:`Scoped.with_scopes` if the credentials are scoped credentials and if
+ the credentials require scoping. Otherwise, it will return the credentials
+ as-is.
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ scope if necessary.
+ scopes (Sequence[str]): The list of scopes to use.
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+
+ Returns:
+ google.auth.credentials.Credentials: Either a new set of scoped
+ credentials, or the passed in credentials instance if no scoping
+ was required.
+ """
+ if isinstance(credentials, Scoped) and credentials.requires_scopes:
+ return credentials.with_scopes(scopes, default_scopes=default_scopes)
+ else:
+ return credentials
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Signing(object):
+ """Interface for credentials that can cryptographically sign messages."""
+
+ @abc.abstractmethod
+ def sign_bytes(self, message):
+ """Signs the given message.
+
+ Args:
+ message (bytes): The message to sign.
+
+ Returns:
+ bytes: The message's cryptographic signature.
+ """
+ # pylint: disable=missing-raises-doc,redundant-returns-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Sign bytes must be implemented.")
+
+ @abc.abstractproperty
+ def signer_email(self):
+ """Optional[str]: An email address that identifies the signer."""
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Signer email must be implemented.")
+
+ @abc.abstractproperty
+ def signer(self):
+ """google.auth.crypt.Signer: The signer used to sign bytes."""
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Signer must be implemented.")
diff --git a/contrib/python/google-auth/py2/google/auth/crypt/__init__.py b/contrib/python/google-auth/py2/google/auth/crypt/__init__.py
new file mode 100644
index 0000000000..15ac950686
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/crypt/__init__.py
@@ -0,0 +1,100 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cryptography helpers for verifying and signing messages.
+
+The simplest way to verify signatures is using :func:`verify_signature`::
+
+ cert = open('certs.pem').read()
+ valid = crypt.verify_signature(message, signature, cert)
+
+If you're going to verify many messages with the same certificate, you can use
+:class:`RSAVerifier`::
+
+ cert = open('certs.pem').read()
+ verifier = crypt.RSAVerifier.from_string(cert)
+ valid = verifier.verify(message, signature)
+
+To sign messages use :class:`RSASigner` with a private key::
+
+ private_key = open('private_key.pem').read()
+ signer = crypt.RSASigner.from_string(private_key)
+ signature = signer.sign(message)
+
+The code above also works for :class:`ES256Signer` and :class:`ES256Verifier`.
+Note that these two classes are only available if your `cryptography` dependency
+version is at least 1.4.0.
+"""
+
+import six
+
+from google.auth.crypt import base
+from google.auth.crypt import rsa
+
+try:
+ from google.auth.crypt import es256
+except ImportError: # pragma: NO COVER
+ es256 = None
+
+if es256 is not None: # pragma: NO COVER
+ __all__ = [
+ "ES256Signer",
+ "ES256Verifier",
+ "RSASigner",
+ "RSAVerifier",
+ "Signer",
+ "Verifier",
+ ]
+else: # pragma: NO COVER
+ __all__ = ["RSASigner", "RSAVerifier", "Signer", "Verifier"]
+
+
+# Aliases to maintain the v1.0.0 interface, as the crypt module was split
+# into submodules.
+Signer = base.Signer
+Verifier = base.Verifier
+RSASigner = rsa.RSASigner
+RSAVerifier = rsa.RSAVerifier
+
+if es256 is not None: # pragma: NO COVER
+ ES256Signer = es256.ES256Signer
+ ES256Verifier = es256.ES256Verifier
+
+
+def verify_signature(message, signature, certs, verifier_cls=rsa.RSAVerifier):
+ """Verify an RSA or ECDSA cryptographic signature.
+
+ Checks that the provided ``signature`` was generated from ``bytes`` using
+ the private key associated with the ``cert``.
+
+ Args:
+ message (Union[str, bytes]): The plaintext message.
+ signature (Union[str, bytes]): The cryptographic signature to check.
+ certs (Union[Sequence, str, bytes]): The certificate or certificates
+ to use to check the signature.
+ verifier_cls (Optional[~google.auth.crypt.base.Signer]): Which verifier
+ class to use for verification. This can be used to select different
+ algorithms, such as RSA or ECDSA. Default value is :class:`RSAVerifier`.
+
+ Returns:
+ bool: True if the signature is valid, otherwise False.
+ """
+ if isinstance(certs, (six.text_type, six.binary_type)):
+ certs = [certs]
+
+ for cert in certs:
+ verifier = verifier_cls.from_string(cert)
+ if verifier.verify(message, signature):
+ return True
+ return False
diff --git a/contrib/python/google-auth/py2/google/auth/crypt/_cryptography_rsa.py b/contrib/python/google-auth/py2/google/auth/crypt/_cryptography_rsa.py
new file mode 100644
index 0000000000..916c9d80a8
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/crypt/_cryptography_rsa.py
@@ -0,0 +1,136 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""RSA verifier and signer that use the ``cryptography`` library.
+
+This is a much faster implementation than the default (in
+``google.auth.crypt._python_rsa``), which depends on the pure-Python
+``rsa`` library.
+"""
+
+import cryptography.exceptions
+from cryptography.hazmat import backends
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import padding
+import cryptography.x509
+
+from google.auth import _helpers
+from google.auth.crypt import base
+
+_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
+_BACKEND = backends.default_backend()
+_PADDING = padding.PKCS1v15()
+_SHA256 = hashes.SHA256()
+
+
+class RSAVerifier(base.Verifier):
+ """Verifies RSA cryptographic signatures using public keys.
+
+ Args:
+ public_key (
+ cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey):
+ The public key used to verify signatures.
+ """
+
+ def __init__(self, public_key):
+ self._pubkey = public_key
+
+ @_helpers.copy_docstring(base.Verifier)
+ def verify(self, message, signature):
+ message = _helpers.to_bytes(message)
+ try:
+ self._pubkey.verify(signature, message, _PADDING, _SHA256)
+ return True
+ except (ValueError, cryptography.exceptions.InvalidSignature):
+ return False
+
+ @classmethod
+ def from_string(cls, public_key):
+ """Construct an Verifier instance from a public key or public
+ certificate string.
+
+ Args:
+ public_key (Union[str, bytes]): The public key in PEM format or the
+ x509 public key certificate.
+
+ Returns:
+ Verifier: The constructed verifier.
+
+ Raises:
+ ValueError: If the public key can't be parsed.
+ """
+ public_key_data = _helpers.to_bytes(public_key)
+
+ if _CERTIFICATE_MARKER in public_key_data:
+ cert = cryptography.x509.load_pem_x509_certificate(
+ public_key_data, _BACKEND
+ )
+ pubkey = cert.public_key()
+
+ else:
+ pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND)
+
+ return cls(pubkey)
+
+
+class RSASigner(base.Signer, base.FromServiceAccountMixin):
+ """Signs messages with an RSA private key.
+
+ Args:
+ private_key (
+ cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
+ The private key to sign with.
+ key_id (str): Optional key ID used to identify this private key. This
+ can be useful to associate the private key with its associated
+ public key or certificate.
+ """
+
+ def __init__(self, private_key, key_id=None):
+ self._key = private_key
+ self._key_id = key_id
+
+ @property
+ @_helpers.copy_docstring(base.Signer)
+ def key_id(self):
+ return self._key_id
+
+ @_helpers.copy_docstring(base.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ return self._key.sign(message, _PADDING, _SHA256)
+
+ @classmethod
+ def from_string(cls, key, key_id=None):
+ """Construct a RSASigner from a private key in PEM format.
+
+ Args:
+ key (Union[bytes, str]): Private key in PEM format.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt._cryptography_rsa.RSASigner: The
+ constructed signer.
+
+ Raises:
+ ValueError: If ``key`` is not ``bytes`` or ``str`` (unicode).
+ UnicodeDecodeError: If ``key`` is ``bytes`` but cannot be decoded
+ into a UTF-8 ``str``.
+ ValueError: If ``cryptography`` "Could not deserialize key data."
+ """
+ key = _helpers.to_bytes(key)
+ private_key = serialization.load_pem_private_key(
+ key, password=None, backend=_BACKEND
+ )
+ return cls(private_key, key_id=key_id)
diff --git a/contrib/python/google-auth/py2/google/auth/crypt/_helpers.py b/contrib/python/google-auth/py2/google/auth/crypt/_helpers.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/crypt/_helpers.py
diff --git a/contrib/python/google-auth/py2/google/auth/crypt/_python_rsa.py b/contrib/python/google-auth/py2/google/auth/crypt/_python_rsa.py
new file mode 100644
index 0000000000..ec30dd09a3
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/crypt/_python_rsa.py
@@ -0,0 +1,173 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pure-Python RSA cryptography implementation.
+
+Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
+to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
+certificates. There is no support for p12 files.
+"""
+
+from __future__ import absolute_import
+
+from pyasn1.codec.der import decoder
+from pyasn1_modules import pem
+from pyasn1_modules.rfc2459 import Certificate
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
+import rsa
+import six
+
+from google.auth import _helpers
+from google.auth.crypt import base
+
+_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
+_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
+_PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----")
+_PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----")
+_PKCS8_SPEC = PrivateKeyInfo()
+
+
+def _bit_list_to_bytes(bit_list):
+ """Converts an iterable of 1s and 0s to bytes.
+
+ Combines the list 8 at a time, treating each group of 8 bits
+ as a single byte.
+
+ Args:
+ bit_list (Sequence): Sequence of 1s and 0s.
+
+ Returns:
+ bytes: The decoded bytes.
+ """
+ num_bits = len(bit_list)
+ byte_vals = bytearray()
+ for start in six.moves.xrange(0, num_bits, 8):
+ curr_bits = bit_list[start : start + 8]
+ char_val = sum(val * digit for val, digit in six.moves.zip(_POW2, curr_bits))
+ byte_vals.append(char_val)
+ return bytes(byte_vals)
+
+
+class RSAVerifier(base.Verifier):
+ """Verifies RSA cryptographic signatures using public keys.
+
+ Args:
+ public_key (rsa.key.PublicKey): The public key used to verify
+ signatures.
+ """
+
+ def __init__(self, public_key):
+ self._pubkey = public_key
+
+ @_helpers.copy_docstring(base.Verifier)
+ def verify(self, message, signature):
+ message = _helpers.to_bytes(message)
+ try:
+ return rsa.pkcs1.verify(message, signature, self._pubkey)
+ except (ValueError, rsa.pkcs1.VerificationError):
+ return False
+
+ @classmethod
+ def from_string(cls, public_key):
+ """Construct an Verifier instance from a public key or public
+ certificate string.
+
+ Args:
+ public_key (Union[str, bytes]): The public key in PEM format or the
+ x509 public key certificate.
+
+ Returns:
+ google.auth.crypt._python_rsa.RSAVerifier: The constructed verifier.
+
+ Raises:
+ ValueError: If the public_key can't be parsed.
+ """
+ public_key = _helpers.to_bytes(public_key)
+ is_x509_cert = _CERTIFICATE_MARKER in public_key
+
+ # If this is a certificate, extract the public key info.
+ if is_x509_cert:
+ der = rsa.pem.load_pem(public_key, "CERTIFICATE")
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
+ if remaining != b"":
+ raise ValueError("Unused bytes", remaining)
+
+ cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"]
+ key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"])
+ pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER")
+ else:
+ pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM")
+ return cls(pubkey)
+
+
+class RSASigner(base.Signer, base.FromServiceAccountMixin):
+ """Signs messages with an RSA private key.
+
+ Args:
+ private_key (rsa.key.PrivateKey): The private key to sign with.
+ key_id (str): Optional key ID used to identify this private key. This
+ can be useful to associate the private key with its associated
+ public key or certificate.
+ """
+
+ def __init__(self, private_key, key_id=None):
+ self._key = private_key
+ self._key_id = key_id
+
+ @property
+ @_helpers.copy_docstring(base.Signer)
+ def key_id(self):
+ return self._key_id
+
+ @_helpers.copy_docstring(base.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ return rsa.pkcs1.sign(message, self._key, "SHA-256")
+
+ @classmethod
+ def from_string(cls, key, key_id=None):
+ """Construct an Signer instance from a private key in PEM format.
+
+ Args:
+ key (str): Private key in PEM format.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+
+ Raises:
+ ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in
+ PEM format.
+ """
+ key = _helpers.from_bytes(key) # PEM expects str in Python 3
+ marker_id, key_bytes = pem.readPemBlocksFromFile(
+ six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER
+ )
+
+ # Key is in pkcs1 format.
+ if marker_id == 0:
+ private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER")
+ # Key is in pkcs8.
+ elif marker_id == 1:
+ key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)
+ if remaining != b"":
+ raise ValueError("Unused bytes", remaining)
+ private_key_info = key_info.getComponentByName("privateKey")
+ private_key = rsa.key.PrivateKey.load_pkcs1(
+ private_key_info.asOctets(), format="DER"
+ )
+ else:
+ raise ValueError("No key could be detected.")
+
+ return cls(private_key, key_id=key_id)
diff --git a/contrib/python/google-auth/py2/google/auth/crypt/base.py b/contrib/python/google-auth/py2/google/auth/crypt/base.py
new file mode 100644
index 0000000000..c98d5bf64f
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/crypt/base.py
@@ -0,0 +1,131 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base classes for cryptographic signers and verifiers."""
+
+import abc
+import io
+import json
+
+import six
+
+
+_JSON_FILE_PRIVATE_KEY = "private_key"
+_JSON_FILE_PRIVATE_KEY_ID = "private_key_id"
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Verifier(object):
+ """Abstract base class for crytographic signature verifiers."""
+
+ @abc.abstractmethod
+ def verify(self, message, signature):
+ """Verifies a message against a cryptographic signature.
+
+ Args:
+ message (Union[str, bytes]): The message to verify.
+ signature (Union[str, bytes]): The cryptography signature to check.
+
+ Returns:
+ bool: True if message was signed by the private key associated
+ with the public key that this object was constructed with.
+ """
+ # pylint: disable=missing-raises-doc,redundant-returns-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Verify must be implemented")
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Signer(object):
+ """Abstract base class for cryptographic signers."""
+
+ @abc.abstractproperty
+ def key_id(self):
+ """Optional[str]: The key ID used to identify this private key."""
+ raise NotImplementedError("Key id must be implemented")
+
+ @abc.abstractmethod
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message (Union[str, bytes]): The message to be signed.
+
+ Returns:
+ bytes: The signature of the message.
+ """
+ # pylint: disable=missing-raises-doc,redundant-returns-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Sign must be implemented")
+
+
+@six.add_metaclass(abc.ABCMeta)
+class FromServiceAccountMixin(object):
+ """Mix-in to enable factory constructors for a Signer."""
+
+ @abc.abstractmethod
+ def from_string(cls, key, key_id=None):
+ """Construct an Signer instance from a private key string.
+
+ Args:
+ key (str): Private key as a string.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+
+ Raises:
+ ValueError: If the key cannot be parsed.
+ """
+ raise NotImplementedError("from_string must be implemented")
+
+ @classmethod
+ def from_service_account_info(cls, info):
+ """Creates a Signer instance instance from a dictionary containing
+ service account info in Google format.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ if _JSON_FILE_PRIVATE_KEY not in info:
+ raise ValueError(
+ "The private_key field was not found in the service account " "info."
+ )
+
+ return cls.from_string(
+ info[_JSON_FILE_PRIVATE_KEY], info.get(_JSON_FILE_PRIVATE_KEY_ID)
+ )
+
+ @classmethod
+ def from_service_account_file(cls, filename):
+ """Creates a Signer instance from a service account .json file
+ in Google format.
+
+ Args:
+ filename (str): The path to the service account .json file.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+
+ return cls.from_service_account_info(data)
diff --git a/contrib/python/google-auth/py2/google/auth/crypt/es256.py b/contrib/python/google-auth/py2/google/auth/crypt/es256.py
new file mode 100644
index 0000000000..c6d6176067
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/crypt/es256.py
@@ -0,0 +1,148 @@
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""ECDSA (ES256) verifier and signer that use the ``cryptography`` library.
+"""
+
+from cryptography import utils
+import cryptography.exceptions
+from cryptography.hazmat import backends
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography.hazmat.primitives.asymmetric.utils import decode_dss_signature
+from cryptography.hazmat.primitives.asymmetric.utils import encode_dss_signature
+import cryptography.x509
+
+from google.auth import _helpers
+from google.auth.crypt import base
+
+
+_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
+_BACKEND = backends.default_backend()
+_PADDING = padding.PKCS1v15()
+
+
+class ES256Verifier(base.Verifier):
+ """Verifies ECDSA cryptographic signatures using public keys.
+
+ Args:
+ public_key (
+ cryptography.hazmat.primitives.asymmetric.ec.ECDSAPublicKey):
+ The public key used to verify signatures.
+ """
+
+ def __init__(self, public_key):
+ self._pubkey = public_key
+
+ @_helpers.copy_docstring(base.Verifier)
+ def verify(self, message, signature):
+ # First convert (r||s) raw signature to ASN1 encoded signature.
+ sig_bytes = _helpers.to_bytes(signature)
+ if len(sig_bytes) != 64:
+ return False
+ r = utils.int_from_bytes(sig_bytes[:32], byteorder="big")
+ s = utils.int_from_bytes(sig_bytes[32:], byteorder="big")
+ asn1_sig = encode_dss_signature(r, s)
+
+ message = _helpers.to_bytes(message)
+ try:
+ self._pubkey.verify(asn1_sig, message, ec.ECDSA(hashes.SHA256()))
+ return True
+ except (ValueError, cryptography.exceptions.InvalidSignature):
+ return False
+
+ @classmethod
+ def from_string(cls, public_key):
+ """Construct an Verifier instance from a public key or public
+ certificate string.
+
+ Args:
+ public_key (Union[str, bytes]): The public key in PEM format or the
+ x509 public key certificate.
+
+ Returns:
+ Verifier: The constructed verifier.
+
+ Raises:
+ ValueError: If the public key can't be parsed.
+ """
+ public_key_data = _helpers.to_bytes(public_key)
+
+ if _CERTIFICATE_MARKER in public_key_data:
+ cert = cryptography.x509.load_pem_x509_certificate(
+ public_key_data, _BACKEND
+ )
+ pubkey = cert.public_key()
+
+ else:
+ pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND)
+
+ return cls(pubkey)
+
+
+class ES256Signer(base.Signer, base.FromServiceAccountMixin):
+ """Signs messages with an ECDSA private key.
+
+ Args:
+ private_key (
+ cryptography.hazmat.primitives.asymmetric.ec.ECDSAPrivateKey):
+ The private key to sign with.
+ key_id (str): Optional key ID used to identify this private key. This
+ can be useful to associate the private key with its associated
+ public key or certificate.
+ """
+
+ def __init__(self, private_key, key_id=None):
+ self._key = private_key
+ self._key_id = key_id
+
+ @property
+ @_helpers.copy_docstring(base.Signer)
+ def key_id(self):
+ return self._key_id
+
+ @_helpers.copy_docstring(base.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ asn1_signature = self._key.sign(message, ec.ECDSA(hashes.SHA256()))
+
+ # Convert ASN1 encoded signature to (r||s) raw signature.
+ (r, s) = decode_dss_signature(asn1_signature)
+ return utils.int_to_bytes(r, 32) + utils.int_to_bytes(s, 32)
+
+ @classmethod
+ def from_string(cls, key, key_id=None):
+ """Construct a RSASigner from a private key in PEM format.
+
+ Args:
+ key (Union[bytes, str]): Private key in PEM format.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt._cryptography_rsa.RSASigner: The
+ constructed signer.
+
+ Raises:
+ ValueError: If ``key`` is not ``bytes`` or ``str`` (unicode).
+ UnicodeDecodeError: If ``key`` is ``bytes`` but cannot be decoded
+ into a UTF-8 ``str``.
+ ValueError: If ``cryptography`` "Could not deserialize key data."
+ """
+ key = _helpers.to_bytes(key)
+ private_key = serialization.load_pem_private_key(
+ key, password=None, backend=_BACKEND
+ )
+ return cls(private_key, key_id=key_id)
diff --git a/contrib/python/google-auth/py2/google/auth/crypt/rsa.py b/contrib/python/google-auth/py2/google/auth/crypt/rsa.py
new file mode 100644
index 0000000000..8b2d64c103
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/crypt/rsa.py
@@ -0,0 +1,30 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""RSA cryptography signer and verifier."""
+
+
+try:
+ # Prefer cryptograph-based RSA implementation.
+ from google.auth.crypt import _cryptography_rsa
+
+ RSASigner = _cryptography_rsa.RSASigner
+ RSAVerifier = _cryptography_rsa.RSAVerifier
+except ImportError: # pragma: NO COVER
+ # Fallback to pure-python RSA implementation if cryptography is
+ # unavailable.
+ from google.auth.crypt import _python_rsa
+
+ RSASigner = _python_rsa.RSASigner
+ RSAVerifier = _python_rsa.RSAVerifier
diff --git a/contrib/python/google-auth/py2/google/auth/downscoped.py b/contrib/python/google-auth/py2/google/auth/downscoped.py
new file mode 100644
index 0000000000..96a4e65473
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/downscoped.py
@@ -0,0 +1,499 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Downscoping with Credential Access Boundaries
+
+This module provides the ability to downscope credentials using
+`Downscoping with Credential Access Boundaries`_. This is useful to restrict the
+Identity and Access Management (IAM) permissions that a short-lived credential
+can use.
+
+To downscope permissions of a source credential, a Credential Access Boundary
+that specifies which resources the new credential can access, as well as
+an upper bound on the permissions that are available on each resource, has to
+be defined. A downscoped credential can then be instantiated using the source
+credential and the Credential Access Boundary.
+
+The common pattern of usage is to have a token broker with elevated access
+generate these downscoped credentials from higher access source credentials and
+pass the downscoped short-lived access tokens to a token consumer via some
+secure authenticated channel for limited access to Google Cloud Storage
+resources.
+
+For example, a token broker can be set up on a server in a private network.
+Various workloads (token consumers) in the same network will send authenticated
+requests to that broker for downscoped tokens to access or modify specific google
+cloud storage buckets.
+
+The broker will instantiate downscoped credentials instances that can be used to
+generate short lived downscoped access tokens that can be passed to the token
+consumer. These downscoped access tokens can be injected by the consumer into
+google.oauth2.Credentials and used to initialize a storage client instance to
+access Google Cloud Storage resources with restricted access.
+
+Note: Only Cloud Storage supports Credential Access Boundaries. Other Google
+Cloud services do not support this feature.
+
+.. _Downscoping with Credential Access Boundaries: https://cloud.google.com/iam/docs/downscoping-short-lived-credentials
+"""
+
+import datetime
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.oauth2 import sts
+
+# The maximum number of access boundary rules a Credential Access Boundary can
+# contain.
+_MAX_ACCESS_BOUNDARY_RULES_COUNT = 10
+# The token exchange grant_type used for exchanging credentials.
+_STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+# The token exchange requested_token_type. This is always an access_token.
+_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+# The STS token URL used to exchanged a short lived access token for a downscoped one.
+_STS_TOKEN_URL = "https://sts.googleapis.com/v1/token"
+# The subject token type to use when exchanging a short lived access token for a
+# downscoped token.
+_STS_SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+
+
+class CredentialAccessBoundary(object):
+ """Defines a Credential Access Boundary which contains a list of access boundary
+ rules. Each rule contains information on the resource that the rule applies to,
+ the upper bound of the permissions that are available on that resource and an
+ optional condition to further restrict permissions.
+ """
+
+ def __init__(self, rules=[]):
+ """Instantiates a Credential Access Boundary. A Credential Access Boundary
+ can contain up to 10 access boundary rules.
+
+ Args:
+ rules (Sequence[google.auth.downscoped.AccessBoundaryRule]): The list of
+ access boundary rules limiting the access that a downscoped credential
+ will have.
+ Raises:
+ TypeError: If any of the rules are not a valid type.
+ ValueError: If the provided rules exceed the maximum allowed.
+ """
+ self.rules = rules
+
+ @property
+ def rules(self):
+ """Returns the list of access boundary rules defined on the Credential
+ Access Boundary.
+
+ Returns:
+ Tuple[google.auth.downscoped.AccessBoundaryRule, ...]: The list of access
+ boundary rules defined on the Credential Access Boundary. These are returned
+ as an immutable tuple to prevent modification.
+ """
+ return tuple(self._rules)
+
+ @rules.setter
+ def rules(self, value):
+ """Updates the current rules on the Credential Access Boundary. This will overwrite
+ the existing set of rules.
+
+ Args:
+ value (Sequence[google.auth.downscoped.AccessBoundaryRule]): The list of
+ access boundary rules limiting the access that a downscoped credential
+ will have.
+ Raises:
+ TypeError: If any of the rules are not a valid type.
+ ValueError: If the provided rules exceed the maximum allowed.
+ """
+ if len(value) > _MAX_ACCESS_BOUNDARY_RULES_COUNT:
+ raise ValueError(
+ "Credential access boundary rules can have a maximum of {} rules.".format(
+ _MAX_ACCESS_BOUNDARY_RULES_COUNT
+ )
+ )
+ for access_boundary_rule in value:
+ if not isinstance(access_boundary_rule, AccessBoundaryRule):
+ raise TypeError(
+ "List of rules provided do not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+ # Make a copy of the original list.
+ self._rules = list(value)
+
+ def add_rule(self, rule):
+ """Adds a single access boundary rule to the existing rules.
+
+ Args:
+ rule (google.auth.downscoped.AccessBoundaryRule): The access boundary rule,
+ limiting the access that a downscoped credential will have, to be added to
+ the existing rules.
+ Raises:
+ TypeError: If any of the rules are not a valid type.
+ ValueError: If the provided rules exceed the maximum allowed.
+ """
+ if len(self.rules) == _MAX_ACCESS_BOUNDARY_RULES_COUNT:
+ raise ValueError(
+ "Credential access boundary rules can have a maximum of {} rules.".format(
+ _MAX_ACCESS_BOUNDARY_RULES_COUNT
+ )
+ )
+ if not isinstance(rule, AccessBoundaryRule):
+ raise TypeError(
+ "The provided rule does not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+ self._rules.append(rule)
+
+ def to_json(self):
+ """Generates the dictionary representation of the Credential Access Boundary.
+ This uses the format expected by the Security Token Service API as documented in
+ `Defining a Credential Access Boundary`_.
+
+ .. _Defining a Credential Access Boundary:
+ https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary
+
+ Returns:
+ Mapping: Credential Access Boundary Rule represented in a dictionary object.
+ """
+ rules = []
+ for access_boundary_rule in self.rules:
+ rules.append(access_boundary_rule.to_json())
+
+ return {"accessBoundary": {"accessBoundaryRules": rules}}
+
+
+class AccessBoundaryRule(object):
+ """Defines an access boundary rule which contains information on the resource that
+ the rule applies to, the upper bound of the permissions that are available on that
+ resource and an optional condition to further restrict permissions.
+ """
+
+ def __init__(
+ self, available_resource, available_permissions, availability_condition=None
+ ):
+ """Instantiates a single access boundary rule.
+
+ Args:
+ available_resource (str): The full resource name of the Cloud Storage bucket
+ that the rule applies to. Use the format
+ "//storage.googleapis.com/projects/_/buckets/bucket-name".
+ available_permissions (Sequence[str]): A list defining the upper bound that
+ the downscoped token will have on the available permissions for the
+ resource. Each value is the identifier for an IAM predefined role or
+ custom role, with the prefix "inRole:". For example:
+ "inRole:roles/storage.objectViewer".
+ Only the permissions in these roles will be available.
+ availability_condition (Optional[google.auth.downscoped.AvailabilityCondition]):
+ Optional condition that restricts the availability of permissions to
+ specific Cloud Storage objects.
+
+ Raises:
+ TypeError: If any of the parameters are not of the expected types.
+ ValueError: If any of the parameters are not of the expected values.
+ """
+ self.available_resource = available_resource
+ self.available_permissions = available_permissions
+ self.availability_condition = availability_condition
+
+ @property
+ def available_resource(self):
+ """Returns the current available resource.
+
+ Returns:
+ str: The current available resource.
+ """
+ return self._available_resource
+
+ @available_resource.setter
+ def available_resource(self, value):
+ """Updates the current available resource.
+
+ Args:
+ value (str): The updated value of the available resource.
+
+ Raises:
+ TypeError: If the value is not a string.
+ """
+ if not isinstance(value, str):
+ raise TypeError("The provided available_resource is not a string.")
+ self._available_resource = value
+
+ @property
+ def available_permissions(self):
+ """Returns the current available permissions.
+
+ Returns:
+ Tuple[str, ...]: The current available permissions. These are returned
+ as an immutable tuple to prevent modification.
+ """
+ return tuple(self._available_permissions)
+
+ @available_permissions.setter
+ def available_permissions(self, value):
+ """Updates the current available permissions.
+
+ Args:
+ value (Sequence[str]): The updated value of the available permissions.
+
+ Raises:
+ TypeError: If the value is not a list of strings.
+ ValueError: If the value is not valid.
+ """
+ for available_permission in value:
+ if not isinstance(available_permission, str):
+ raise TypeError(
+ "Provided available_permissions are not a list of strings."
+ )
+ if available_permission.find("inRole:") != 0:
+ raise ValueError(
+ "available_permissions must be prefixed with 'inRole:'."
+ )
+ # Make a copy of the original list.
+ self._available_permissions = list(value)
+
+ @property
+ def availability_condition(self):
+ """Returns the current availability condition.
+
+ Returns:
+ Optional[google.auth.downscoped.AvailabilityCondition]: The current
+ availability condition.
+ """
+ return self._availability_condition
+
+ @availability_condition.setter
+ def availability_condition(self, value):
+ """Updates the current availability condition.
+
+ Args:
+ value (Optional[google.auth.downscoped.AvailabilityCondition]): The updated
+ value of the availability condition.
+
+ Raises:
+ TypeError: If the value is not of type google.auth.downscoped.AvailabilityCondition
+ or None.
+ """
+ if not isinstance(value, AvailabilityCondition) and value is not None:
+ raise TypeError(
+ "The provided availability_condition is not a 'google.auth.downscoped.AvailabilityCondition' or None."
+ )
+ self._availability_condition = value
+
+ def to_json(self):
+ """Generates the dictionary representation of the access boundary rule.
+ This uses the format expected by the Security Token Service API as documented in
+ `Defining a Credential Access Boundary`_.
+
+ .. _Defining a Credential Access Boundary:
+ https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary
+
+ Returns:
+ Mapping: The access boundary rule represented in a dictionary object.
+ """
+ json = {
+ "availablePermissions": list(self.available_permissions),
+ "availableResource": self.available_resource,
+ }
+ if self.availability_condition:
+ json["availabilityCondition"] = self.availability_condition.to_json()
+ return json
+
+
+class AvailabilityCondition(object):
+ """An optional condition that can be used as part of a Credential Access Boundary
+ to further restrict permissions."""
+
+ def __init__(self, expression, title=None, description=None):
+ """Instantiates an availability condition using the provided expression and
+ optional title or description.
+
+ Args:
+ expression (str): A condition expression that specifies the Cloud Storage
+ objects where permissions are available. For example, this expression
+ makes permissions available for objects whose name starts with "customer-a":
+ "resource.name.startsWith('projects/_/buckets/example-bucket/objects/customer-a')"
+ title (Optional[str]): An optional short string that identifies the purpose of
+ the condition.
+ description (Optional[str]): Optional details about the purpose of the condition.
+
+ Raises:
+ TypeError: If any of the parameters are not of the expected types.
+ ValueError: If any of the parameters are not of the expected values.
+ """
+ self.expression = expression
+ self.title = title
+ self.description = description
+
+ @property
+ def expression(self):
+ """Returns the current condition expression.
+
+ Returns:
+ str: The current conditon expression.
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, value):
+ """Updates the current condition expression.
+
+ Args:
+ value (str): The updated value of the condition expression.
+
+ Raises:
+ TypeError: If the value is not of type string.
+ """
+ if not isinstance(value, str):
+ raise TypeError("The provided expression is not a string.")
+ self._expression = value
+
+ @property
+ def title(self):
+ """Returns the current title.
+
+ Returns:
+ Optional[str]: The current title.
+ """
+ return self._title
+
+ @title.setter
+ def title(self, value):
+ """Updates the current title.
+
+ Args:
+ value (Optional[str]): The updated value of the title.
+
+ Raises:
+ TypeError: If the value is not of type string or None.
+ """
+ if not isinstance(value, str) and value is not None:
+ raise TypeError("The provided title is not a string or None.")
+ self._title = value
+
+ @property
+ def description(self):
+ """Returns the current description.
+
+ Returns:
+ Optional[str]: The current description.
+ """
+ return self._description
+
+ @description.setter
+ def description(self, value):
+ """Updates the current description.
+
+ Args:
+ value (Optional[str]): The updated value of the description.
+
+ Raises:
+ TypeError: If the value is not of type string or None.
+ """
+ if not isinstance(value, str) and value is not None:
+ raise TypeError("The provided description is not a string or None.")
+ self._description = value
+
+ def to_json(self):
+ """Generates the dictionary representation of the availability condition.
+ This uses the format expected by the Security Token Service API as documented in
+ `Defining a Credential Access Boundary`_.
+
+ .. _Defining a Credential Access Boundary:
+ https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary
+
+ Returns:
+ Mapping[str, str]: The availability condition represented in a dictionary
+ object.
+ """
+ json = {"expression": self.expression}
+ if self.title:
+ json["title"] = self.title
+ if self.description:
+ json["description"] = self.description
+ return json
+
+
+class Credentials(credentials.CredentialsWithQuotaProject):
+ """Defines a set of Google credentials that are downscoped from an existing set
+ of Google OAuth2 credentials. This is useful to restrict the Identity and Access
+ Management (IAM) permissions that a short-lived credential can use.
+ The common pattern of usage is to have a token broker with elevated access
+ generate these downscoped credentials from higher access source credentials and
+ pass the downscoped short-lived access tokens to a token consumer via some
+ secure authenticated channel for limited access to Google Cloud Storage
+ resources.
+ """
+
+ def __init__(
+ self, source_credentials, credential_access_boundary, quota_project_id=None
+ ):
+ """Instantiates a downscoped credentials object using the provided source
+ credentials and credential access boundary rules.
+ To downscope permissions of a source credential, a Credential Access Boundary
+ that specifies which resources the new credential can access, as well as an
+ upper bound on the permissions that are available on each resource, has to be
+ defined. A downscoped credential can then be instantiated using the source
+ credential and the Credential Access Boundary.
+
+ Args:
+ source_credentials (google.auth.credentials.Credentials): The source credentials
+ to be downscoped based on the provided Credential Access Boundary rules.
+ credential_access_boundary (google.auth.downscoped.CredentialAccessBoundary):
+ The Credential Access Boundary which contains a list of access boundary
+ rules. Each rule contains information on the resource that the rule applies to,
+ the upper bound of the permissions that are available on that resource and an
+ optional condition to further restrict permissions.
+ quota_project_id (Optional[str]): The optional quota project ID.
+ Raises:
+ google.auth.exceptions.RefreshError: If the source credentials
+ return an error on token refresh.
+ google.auth.exceptions.OAuthError: If the STS token exchange
+ endpoint returned an error during downscoped token generation.
+ """
+
+ super(Credentials, self).__init__()
+ self._source_credentials = source_credentials
+ self._credential_access_boundary = credential_access_boundary
+ self._quota_project_id = quota_project_id
+ self._sts_client = sts.Client(_STS_TOKEN_URL)
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ # Generate an access token from the source credentials.
+ self._source_credentials.refresh(request)
+ now = _helpers.utcnow()
+ # Exchange the access token for a downscoped access token.
+ response_data = self._sts_client.exchange_token(
+ request=request,
+ grant_type=_STS_GRANT_TYPE,
+ subject_token=self._source_credentials.token,
+ subject_token_type=_STS_SUBJECT_TOKEN_TYPE,
+ requested_token_type=_STS_REQUESTED_TOKEN_TYPE,
+ additional_options=self._credential_access_boundary.to_json(),
+ )
+ self.token = response_data.get("access_token")
+ # For downscoping CAB flow, the STS endpoint may not return the expiration
+ # field for some flows. The generated downscoped token should always have
+ # the same expiration time as the source credentials. When no expires_in
+ # field is returned in the response, we can just get the expiration time
+ # from the source credentials.
+ if response_data.get("expires_in"):
+ lifetime = datetime.timedelta(seconds=response_data.get("expires_in"))
+ self.expiry = now + lifetime
+ else:
+ self.expiry = self._source_credentials.expiry
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ self._source_credentials,
+ self._credential_access_boundary,
+ quota_project_id=quota_project_id,
+ )
diff --git a/contrib/python/google-auth/py2/google/auth/environment_vars.py b/contrib/python/google-auth/py2/google/auth/environment_vars.py
new file mode 100644
index 0000000000..d36d6c4afa
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/environment_vars.py
@@ -0,0 +1,78 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Environment variables used by :mod:`google.auth`."""
+
+
+PROJECT = "GOOGLE_CLOUD_PROJECT"
+"""Environment variable defining default project.
+
+This used by :func:`google.auth.default` to explicitly set a project ID. This
+environment variable is also used by the Google Cloud Python Library.
+"""
+
+LEGACY_PROJECT = "GCLOUD_PROJECT"
+"""Previously used environment variable defining the default project.
+
+This environment variable is used instead of the current one in some
+situations (such as Google App Engine).
+"""
+
+CREDENTIALS = "GOOGLE_APPLICATION_CREDENTIALS"
+"""Environment variable defining the location of Google application default
+credentials."""
+
+# The environment variable name which can replace ~/.config if set.
+CLOUD_SDK_CONFIG_DIR = "CLOUDSDK_CONFIG"
+"""Environment variable defines the location of Google Cloud SDK's config
+files."""
+
+# These two variables allow for customization of the addresses used when
+# contacting the GCE metadata service.
+GCE_METADATA_HOST = "GCE_METADATA_HOST"
+GCE_METADATA_ROOT = "GCE_METADATA_ROOT"
+"""Environment variable providing an alternate hostname or host:port to be
+used for GCE metadata requests.
+
+This environment variable is originally named GCE_METADATA_ROOT. System will
+check the new variable first; should there be no value present,
+the system falls back to the old variable.
+"""
+
+GCE_METADATA_IP = "GCE_METADATA_IP"
+"""Environment variable providing an alternate ip:port to be used for ip-only
+GCE metadata requests."""
+
+GOOGLE_API_USE_CLIENT_CERTIFICATE = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
+"""Environment variable controlling whether to use client certificate or not.
+
+The default value is false. Users have to explicitly set this value to true
+in order to use client certificate to establish a mutual TLS channel."""
+
+LEGACY_APPENGINE_RUNTIME = "APPENGINE_RUNTIME"
+"""Gen1 environment variable defining the App Engine Runtime.
+
+Used to distinguish between GAE gen1 and GAE gen2+.
+"""
+
+# AWS environment variables used with AWS workload identity pools to retrieve
+# AWS security credentials and the AWS region needed to create a serialized
+# signed requests to the AWS STS GetCalledIdentity API that can be exchanged
+# for a Google access tokens via the GCP STS endpoint.
+# When not available the AWS metadata server is used to retrieve these values.
+AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID"
+AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY"
+AWS_SESSION_TOKEN = "AWS_SESSION_TOKEN"
+AWS_REGION = "AWS_REGION"
+AWS_DEFAULT_REGION = "AWS_DEFAULT_REGION"
diff --git a/contrib/python/google-auth/py2/google/auth/exceptions.py b/contrib/python/google-auth/py2/google/auth/exceptions.py
new file mode 100644
index 0000000000..57f181ea1a
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/exceptions.py
@@ -0,0 +1,59 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Exceptions used in the google.auth package."""
+
+
+class GoogleAuthError(Exception):
+ """Base class for all google.auth errors."""
+
+
+class TransportError(GoogleAuthError):
+ """Used to indicate an error occurred during an HTTP request."""
+
+
+class RefreshError(GoogleAuthError):
+ """Used to indicate that an refreshing the credentials' access token
+ failed."""
+
+
+class UserAccessTokenError(GoogleAuthError):
+ """Used to indicate ``gcloud auth print-access-token`` command failed."""
+
+
+class DefaultCredentialsError(GoogleAuthError):
+ """Used to indicate that acquiring default credentials failed."""
+
+
+class MutualTLSChannelError(GoogleAuthError):
+ """Used to indicate that mutual TLS channel creation is failed, or mutual
+ TLS channel credentials is missing or invalid."""
+
+
+class ClientCertError(GoogleAuthError):
+ """Used to indicate that client certificate is missing or invalid."""
+
+
+class OAuthError(GoogleAuthError):
+ """Used to indicate an error occurred during an OAuth related HTTP
+ request."""
+
+
+class ReauthFailError(RefreshError):
+ """An exception for when reauth failed."""
+
+ def __init__(self, message=None):
+ super(ReauthFailError, self).__init__(
+ "Reauthentication failed. {0}".format(message)
+ )
diff --git a/contrib/python/google-auth/py2/google/auth/external_account.py b/contrib/python/google-auth/py2/google/auth/external_account.py
new file mode 100644
index 0000000000..1f3034ac35
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/external_account.py
@@ -0,0 +1,368 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""External Account Credentials.
+
+This module provides credentials that exchange workload identity pool external
+credentials for Google access tokens. This facilitates accessing Google Cloud
+Platform resources from on-prem and non-Google Cloud platforms (e.g. AWS,
+Microsoft Azure, OIDC identity providers), using native credentials retrieved
+from the current environment without the need to copy, save and manage
+long-lived service account credentials.
+
+Specifically, this is intended to use access tokens acquired using the GCP STS
+token exchange endpoint following the `OAuth 2.0 Token Exchange`_ spec.
+
+.. _OAuth 2.0 Token Exchange: https://tools.ietf.org/html/rfc8693
+"""
+
+import abc
+import copy
+import datetime
+import json
+import re
+
+import six
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import impersonated_credentials
+from google.oauth2 import sts
+from google.oauth2 import utils
+
+# External account JSON type identifier.
+_EXTERNAL_ACCOUNT_JSON_TYPE = "external_account"
+# The token exchange grant_type used for exchanging credentials.
+_STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+# The token exchange requested_token_type. This is always an access_token.
+_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+# Cloud resource manager URL used to retrieve project information.
+_CLOUD_RESOURCE_MANAGER = "https://cloudresourcemanager.googleapis.com/v1/projects/"
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Credentials(credentials.Scoped, credentials.CredentialsWithQuotaProject):
+ """Base class for all external account credentials.
+
+ This is used to instantiate Credentials for exchanging external account
+ credentials for Google access token and authorizing requests to Google APIs.
+ The base class implements the common logic for exchanging external account
+ credentials for Google access tokens.
+ """
+
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ ):
+ """Instantiates an external account credentials object.
+
+ Args:
+ audience (str): The STS audience field.
+ subject_token_type (str): The subject token type.
+ token_url (str): The STS endpoint URL.
+ credential_source (Mapping): The credential source dictionary.
+ service_account_impersonation_url (Optional[str]): The optional service account
+ impersonation generateAccessToken URL.
+ client_id (Optional[str]): The optional client ID.
+ client_secret (Optional[str]): The optional client secret.
+ quota_project_id (Optional[str]): The optional quota project ID.
+ scopes (Optional[Sequence[str]]): Optional scopes to request during the
+ authorization grant.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ Raises:
+ google.auth.exceptions.RefreshError: If the generateAccessToken
+ endpoint returned an error.
+ """
+ super(Credentials, self).__init__()
+ self._audience = audience
+ self._subject_token_type = subject_token_type
+ self._token_url = token_url
+ self._credential_source = credential_source
+ self._service_account_impersonation_url = service_account_impersonation_url
+ self._client_id = client_id
+ self._client_secret = client_secret
+ self._quota_project_id = quota_project_id
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+
+ if self._client_id:
+ self._client_auth = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, self._client_id, self._client_secret
+ )
+ else:
+ self._client_auth = None
+ self._sts_client = sts.Client(self._token_url, self._client_auth)
+
+ if self._service_account_impersonation_url:
+ self._impersonated_credentials = self._initialize_impersonated_credentials()
+ else:
+ self._impersonated_credentials = None
+ self._project_id = None
+
+ @property
+ def info(self):
+ """Generates the dictionary representation of the current credentials.
+
+ Returns:
+ Mapping: The dictionary representation of the credentials. This is the
+ reverse of "from_info" defined on the subclasses of this class. It is
+ useful for serializing the current credentials so it can deserialized
+ later.
+ """
+ config_info = {
+ "type": _EXTERNAL_ACCOUNT_JSON_TYPE,
+ "audience": self._audience,
+ "subject_token_type": self._subject_token_type,
+ "token_url": self._token_url,
+ "service_account_impersonation_url": self._service_account_impersonation_url,
+ "credential_source": copy.deepcopy(self._credential_source),
+ "quota_project_id": self._quota_project_id,
+ "client_id": self._client_id,
+ "client_secret": self._client_secret,
+ }
+ return {key: value for key, value in config_info.items() if value is not None}
+
+ @property
+ def service_account_email(self):
+ """Returns the service account email if service account impersonation is used.
+
+ Returns:
+ Optional[str]: The service account email if impersonation is used. Otherwise
+ None is returned.
+ """
+ if self._service_account_impersonation_url:
+ # Parse email from URL. The formal looks as follows:
+ # https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/name@project-id.iam.gserviceaccount.com:generateAccessToken
+ url = self._service_account_impersonation_url
+ start_index = url.rfind("/")
+ end_index = url.find(":generateAccessToken")
+ if start_index != -1 and end_index != -1 and start_index < end_index:
+ start_index = start_index + 1
+ return url[start_index:end_index]
+ return None
+
+ @property
+ def is_user(self):
+ """Returns whether the credentials represent a user (True) or workload (False).
+ Workloads behave similarly to service accounts. Currently workloads will use
+ service account impersonation but will eventually not require impersonation.
+ As a result, this property is more reliable than the service account email
+ property in determining if the credentials represent a user or workload.
+
+ Returns:
+ bool: True if the credentials represent a user. False if they represent a
+ workload.
+ """
+ # If service account impersonation is used, the credentials will always represent a
+ # service account.
+ if self._service_account_impersonation_url:
+ return False
+ # Workforce pools representing users have the following audience format:
+ # //iam.googleapis.com/locations/$location/workforcePools/$poolId/providers/$providerId
+ p = re.compile(r"//iam\.googleapis\.com/locations/[^/]+/workforcePools/")
+ if p.match(self._audience):
+ return True
+ return False
+
+ @property
+ def requires_scopes(self):
+ """Checks if the credentials requires scopes.
+
+ Returns:
+ bool: True if there are no scopes set otherwise False.
+ """
+ return not self._scopes and not self._default_scopes
+
+ @property
+ def project_number(self):
+ """Optional[str]: The project number corresponding to the workload identity pool."""
+
+ # STS audience pattern:
+ # //iam.googleapis.com/projects/$PROJECT_NUMBER/locations/...
+ components = self._audience.split("/")
+ try:
+ project_index = components.index("projects")
+ if project_index + 1 < len(components):
+ return components[project_index + 1] or None
+ except ValueError:
+ return None
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ return self.__class__(
+ audience=self._audience,
+ subject_token_type=self._subject_token_type,
+ token_url=self._token_url,
+ credential_source=self._credential_source,
+ service_account_impersonation_url=self._service_account_impersonation_url,
+ client_id=self._client_id,
+ client_secret=self._client_secret,
+ quota_project_id=self._quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+
+ @abc.abstractmethod
+ def retrieve_subject_token(self, request):
+ """Retrieves the subject token using the credential_source object.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ Returns:
+ str: The retrieved subject token.
+ """
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("retrieve_subject_token must be implemented")
+
+ def get_project_id(self, request):
+ """Retrieves the project ID corresponding to the workload identity pool.
+
+ When not determinable, None is returned.
+
+ This is introduced to support the current pattern of using the Auth library:
+
+ credentials, project_id = google.auth.default()
+
+ The resource may not have permission (resourcemanager.projects.get) to
+ call this API or the required scopes may not be selected:
+ https://cloud.google.com/resource-manager/reference/rest/v1/projects/get#authorization-scopes
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ Returns:
+ Optional[str]: The project ID corresponding to the workload identity pool
+ if determinable.
+ """
+ if self._project_id:
+ # If already retrieved, return the cached project ID value.
+ return self._project_id
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # Scopes are required in order to retrieve a valid access token.
+ if self.project_number and scopes:
+ headers = {}
+ url = _CLOUD_RESOURCE_MANAGER + self.project_number
+ self.before_request(request, "GET", url, headers)
+ response = request(url=url, method="GET", headers=headers)
+
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+ response_data = json.loads(response_body)
+
+ if response.status == 200:
+ # Cache result as this field is immutable.
+ self._project_id = response_data.get("projectId")
+ return self._project_id
+
+ return None
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ if self._impersonated_credentials:
+ self._impersonated_credentials.refresh(request)
+ self.token = self._impersonated_credentials.token
+ self.expiry = self._impersonated_credentials.expiry
+ else:
+ now = _helpers.utcnow()
+ response_data = self._sts_client.exchange_token(
+ request=request,
+ grant_type=_STS_GRANT_TYPE,
+ subject_token=self.retrieve_subject_token(request),
+ subject_token_type=self._subject_token_type,
+ audience=self._audience,
+ scopes=scopes,
+ requested_token_type=_STS_REQUESTED_TOKEN_TYPE,
+ )
+ self.token = response_data.get("access_token")
+ lifetime = datetime.timedelta(seconds=response_data.get("expires_in"))
+ self.expiry = now + lifetime
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ # Return copy of instance with the provided quota project ID.
+ return self.__class__(
+ audience=self._audience,
+ subject_token_type=self._subject_token_type,
+ token_url=self._token_url,
+ credential_source=self._credential_source,
+ service_account_impersonation_url=self._service_account_impersonation_url,
+ client_id=self._client_id,
+ client_secret=self._client_secret,
+ quota_project_id=quota_project_id,
+ scopes=self._scopes,
+ default_scopes=self._default_scopes,
+ )
+
+ def _initialize_impersonated_credentials(self):
+ """Generates an impersonated credentials.
+
+ For more details, see `projects.serviceAccounts.generateAccessToken`_.
+
+ .. _projects.serviceAccounts.generateAccessToken: https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateAccessToken
+
+ Returns:
+ impersonated_credentials.Credential: The impersonated credentials
+ object.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the generateAccessToken
+ endpoint returned an error.
+ """
+ # Return copy of instance with no service account impersonation.
+ source_credentials = self.__class__(
+ audience=self._audience,
+ subject_token_type=self._subject_token_type,
+ token_url=self._token_url,
+ credential_source=self._credential_source,
+ service_account_impersonation_url=None,
+ client_id=self._client_id,
+ client_secret=self._client_secret,
+ quota_project_id=self._quota_project_id,
+ scopes=self._scopes,
+ default_scopes=self._default_scopes,
+ )
+
+ # Determine target_principal.
+ target_principal = self.service_account_email
+ if not target_principal:
+ raise exceptions.RefreshError(
+ "Unable to determine target principal from service account impersonation URL."
+ )
+
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # Initialize and return impersonated credentials.
+ return impersonated_credentials.Credentials(
+ source_credentials=source_credentials,
+ target_principal=target_principal,
+ target_scopes=scopes,
+ quota_project_id=self._quota_project_id,
+ iam_endpoint_override=self._service_account_impersonation_url,
+ )
diff --git a/contrib/python/google-auth/py2/google/auth/iam.py b/contrib/python/google-auth/py2/google/auth/iam.py
new file mode 100644
index 0000000000..5d63dc5d8a
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/iam.py
@@ -0,0 +1,100 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools for using the Google `Cloud Identity and Access Management (IAM)
+API`_'s auth-related functionality.
+
+.. _Cloud Identity and Access Management (IAM) API:
+ https://cloud.google.com/iam/docs/
+"""
+
+import base64
+import json
+
+from six.moves import http_client
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+
+_IAM_API_ROOT_URI = "https://iamcredentials.googleapis.com/v1"
+_SIGN_BLOB_URI = _IAM_API_ROOT_URI + "/projects/-/serviceAccounts/{}:signBlob?alt=json"
+
+
+class Signer(crypt.Signer):
+ """Signs messages using the IAM `signBlob API`_.
+
+ This is useful when you need to sign bytes but do not have access to the
+ credential's private key file.
+
+ .. _signBlob API:
+ https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts
+ /signBlob
+ """
+
+ def __init__(self, request, credentials, service_account_email):
+ """
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ credentials (google.auth.credentials.Credentials): The credentials
+ that will be used to authenticate the request to the IAM API.
+ The credentials must have of one the following scopes:
+
+ - https://www.googleapis.com/auth/iam
+ - https://www.googleapis.com/auth/cloud-platform
+ service_account_email (str): The service account email identifying
+ which service account to use to sign bytes. Often, this can
+ be the same as the service account email in the given
+ credentials.
+ """
+ self._request = request
+ self._credentials = credentials
+ self._service_account_email = service_account_email
+
+ def _make_signing_request(self, message):
+ """Makes a request to the API signBlob API."""
+ message = _helpers.to_bytes(message)
+
+ method = "POST"
+ url = _SIGN_BLOB_URI.format(self._service_account_email)
+ headers = {"Content-Type": "application/json"}
+ body = json.dumps(
+ {"payload": base64.b64encode(message).decode("utf-8")}
+ ).encode("utf-8")
+
+ self._credentials.before_request(self._request, method, url, headers)
+ response = self._request(url=url, method=method, body=body, headers=headers)
+
+ if response.status != http_client.OK:
+ raise exceptions.TransportError(
+ "Error calling the IAM signBlob API: {}".format(response.data)
+ )
+
+ return json.loads(response.data.decode("utf-8"))
+
+ @property
+ def key_id(self):
+ """Optional[str]: The key ID used to identify this private key.
+
+ .. warning::
+ This is always ``None``. The key ID used by IAM can not
+ be reliably determined ahead of time.
+ """
+ return None
+
+ @_helpers.copy_docstring(crypt.Signer)
+ def sign(self, message):
+ response = self._make_signing_request(message)
+ return base64.b64decode(response["signedBlob"])
diff --git a/contrib/python/google-auth/py2/google/auth/identity_pool.py b/contrib/python/google-auth/py2/google/auth/identity_pool.py
new file mode 100644
index 0000000000..5362199555
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/identity_pool.py
@@ -0,0 +1,279 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Identity Pool Credentials.
+
+This module provides credentials to access Google Cloud resources from on-prem
+or non-Google Cloud platforms which support external credentials (e.g. OIDC ID
+tokens) retrieved from local file locations or local servers. This includes
+Microsoft Azure and OIDC identity providers (e.g. K8s workloads registered with
+Hub with Hub workload identity enabled).
+
+These credentials are recommended over the use of service account credentials
+in on-prem/non-Google Cloud platforms as they do not involve the management of
+long-live service account private keys.
+
+Identity Pool Credentials are initialized using external_account
+arguments which are typically loaded from an external credentials file or
+an external credentials URL. Unlike other Credentials that can be initialized
+with a list of explicit arguments, secrets or credentials, external account
+clients use the environment and hints/guidelines provided by the
+external_account JSON file to retrieve credentials and exchange them for Google
+access tokens.
+"""
+
+try:
+ from collections.abc import Mapping
+# Python 2.7 compatibility
+except ImportError: # pragma: NO COVER
+ from collections import Mapping
+import io
+import json
+import os
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import external_account
+
+
+class Credentials(external_account.Credentials):
+ """External account credentials sourced from files and URLs."""
+
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ ):
+ """Instantiates an external account credentials object from a file/URL.
+
+ Args:
+ audience (str): The STS audience field.
+ subject_token_type (str): The subject token type.
+ token_url (str): The STS endpoint URL.
+ credential_source (Mapping): The credential source dictionary used to
+ provide instructions on how to retrieve external credential to be
+ exchanged for Google access tokens.
+
+ Example credential_source for url-sourced credential::
+
+ {
+ "url": "http://www.example.com",
+ "format": {
+ "type": "json",
+ "subject_token_field_name": "access_token",
+ },
+ "headers": {"foo": "bar"},
+ }
+
+ Example credential_source for file-sourced credential::
+
+ {
+ "file": "/path/to/token/file.txt"
+ }
+
+ service_account_impersonation_url (Optional[str]): The optional service account
+ impersonation getAccessToken URL.
+ client_id (Optional[str]): The optional client ID.
+ client_secret (Optional[str]): The optional client secret.
+ quota_project_id (Optional[str]): The optional quota project ID.
+ scopes (Optional[Sequence[str]]): Optional scopes to request during the
+ authorization grant.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error is encountered during
+ access token retrieval logic.
+ ValueError: For invalid parameters.
+
+ .. note:: Typically one of the helper constructors
+ :meth:`from_file` or
+ :meth:`from_info` are used instead of calling the constructor directly.
+ """
+
+ super(Credentials, self).__init__(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ credential_source=credential_source,
+ service_account_impersonation_url=service_account_impersonation_url,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+ if not isinstance(credential_source, Mapping):
+ self._credential_source_file = None
+ self._credential_source_url = None
+ else:
+ self._credential_source_file = credential_source.get("file")
+ self._credential_source_url = credential_source.get("url")
+ self._credential_source_headers = credential_source.get("headers")
+ credential_source_format = credential_source.get("format", {})
+ # Get credential_source format type. When not provided, this
+ # defaults to text.
+ self._credential_source_format_type = (
+ credential_source_format.get("type") or "text"
+ )
+ # environment_id is only supported in AWS or dedicated future external
+ # account credentials.
+ if "environment_id" in credential_source:
+ raise ValueError(
+ "Invalid Identity Pool credential_source field 'environment_id'"
+ )
+ if self._credential_source_format_type not in ["text", "json"]:
+ raise ValueError(
+ "Invalid credential_source format '{}'".format(
+ self._credential_source_format_type
+ )
+ )
+ # For JSON types, get the required subject_token field name.
+ if self._credential_source_format_type == "json":
+ self._credential_source_field_name = credential_source_format.get(
+ "subject_token_field_name"
+ )
+ if self._credential_source_field_name is None:
+ raise ValueError(
+ "Missing subject_token_field_name for JSON credential_source format"
+ )
+ else:
+ self._credential_source_field_name = None
+
+ if self._credential_source_file and self._credential_source_url:
+ raise ValueError(
+ "Ambiguous credential_source. 'file' is mutually exclusive with 'url'."
+ )
+ if not self._credential_source_file and not self._credential_source_url:
+ raise ValueError(
+ "Missing credential_source. A 'file' or 'url' must be provided."
+ )
+
+ @_helpers.copy_docstring(external_account.Credentials)
+ def retrieve_subject_token(self, request):
+ return self._parse_token_data(
+ self._get_token_data(request),
+ self._credential_source_format_type,
+ self._credential_source_field_name,
+ )
+
+ def _get_token_data(self, request):
+ if self._credential_source_file:
+ return self._get_file_data(self._credential_source_file)
+ else:
+ return self._get_url_data(
+ request, self._credential_source_url, self._credential_source_headers
+ )
+
+ def _get_file_data(self, filename):
+ if not os.path.exists(filename):
+ raise exceptions.RefreshError("File '{}' was not found.".format(filename))
+
+ with io.open(filename, "r", encoding="utf-8") as file_obj:
+ return file_obj.read(), filename
+
+ def _get_url_data(self, request, url, headers):
+ response = request(url=url, method="GET", headers=headers)
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != 200:
+ raise exceptions.RefreshError(
+ "Unable to retrieve Identity Pool subject token", response_body
+ )
+
+ return response_body, url
+
+ def _parse_token_data(
+ self, token_content, format_type="text", subject_token_field_name=None
+ ):
+ content, filename = token_content
+ if format_type == "text":
+ token = content
+ else:
+ try:
+ # Parse file content as JSON.
+ response_data = json.loads(content)
+ # Get the subject_token.
+ token = response_data[subject_token_field_name]
+ except (KeyError, ValueError):
+ raise exceptions.RefreshError(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ filename, subject_token_field_name
+ )
+ )
+ if not token:
+ raise exceptions.RefreshError(
+ "Missing subject_token in the credential_source file"
+ )
+ return token
+
+ @classmethod
+ def from_info(cls, info, **kwargs):
+ """Creates an Identity Pool Credentials instance from parsed external account info.
+
+ Args:
+ info (Mapping[str, str]): The Identity Pool external account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.identity_pool.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: For invalid parameters.
+ """
+ return cls(
+ audience=info.get("audience"),
+ subject_token_type=info.get("subject_token_type"),
+ token_url=info.get("token_url"),
+ service_account_impersonation_url=info.get(
+ "service_account_impersonation_url"
+ ),
+ client_id=info.get("client_id"),
+ client_secret=info.get("client_secret"),
+ credential_source=info.get("credential_source"),
+ quota_project_id=info.get("quota_project_id"),
+ **kwargs
+ )
+
+ @classmethod
+ def from_file(cls, filename, **kwargs):
+ """Creates an IdentityPool Credentials instance from an external account json file.
+
+ Args:
+ filename (str): The path to the IdentityPool external account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.identity_pool.Credentials: The constructed
+ credentials.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return cls.from_info(data, **kwargs)
diff --git a/contrib/python/google-auth/py2/google/auth/impersonated_credentials.py b/contrib/python/google-auth/py2/google/auth/impersonated_credentials.py
new file mode 100644
index 0000000000..b8a6c49a1e
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/impersonated_credentials.py
@@ -0,0 +1,412 @@
+# Copyright 2018 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Cloud Impersonated credentials.
+
+This module provides authentication for applications where local credentials
+impersonates a remote service account using `IAM Credentials API`_.
+
+This class can be used to impersonate a service account as long as the original
+Credential object has the "Service Account Token Creator" role on the target
+service account.
+
+ .. _IAM Credentials API:
+ https://cloud.google.com/iam/credentials/reference/rest/
+"""
+
+import base64
+import copy
+from datetime import datetime
+import json
+
+import six
+from six.moves import http_client
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth.transport.requests import AuthorizedSession
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+
+_IAM_SCOPE = ["https://www.googleapis.com/auth/iam"]
+
+_IAM_ENDPOINT = (
+ "https://iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken"
+)
+
+_IAM_SIGN_ENDPOINT = (
+ "https://iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:signBlob"
+)
+
+_IAM_IDTOKEN_ENDPOINT = (
+ "https://iamcredentials.googleapis.com/v1/"
+ + "projects/-/serviceAccounts/{}:generateIdToken"
+)
+
+_REFRESH_ERROR = "Unable to acquire impersonated credentials"
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+
+_DEFAULT_TOKEN_URI = "https://oauth2.googleapis.com/token"
+
+
+def _make_iam_token_request(
+ request, principal, headers, body, iam_endpoint_override=None
+):
+ """Makes a request to the Google Cloud IAM service for an access token.
+ Args:
+ request (Request): The Request object to use.
+ principal (str): The principal to request an access token for.
+ headers (Mapping[str, str]): Map of headers to transmit.
+ body (Mapping[str, str]): JSON Payload body for the iamcredentials
+ API call.
+ iam_endpoint_override (Optiona[str]): The full IAM endpoint override
+ with the target_principal embedded. This is useful when supporting
+ impersonation with regional endpoints.
+
+ Raises:
+ google.auth.exceptions.TransportError: Raised if there is an underlying
+ HTTP connection error
+ google.auth.exceptions.RefreshError: Raised if the impersonated
+ credentials are not available. Common reasons are
+ `iamcredentials.googleapis.com` is not enabled or the
+ `Service Account Token Creator` is not assigned
+ """
+ iam_endpoint = iam_endpoint_override or _IAM_ENDPOINT.format(principal)
+
+ body = json.dumps(body).encode("utf-8")
+
+ response = request(url=iam_endpoint, method="POST", headers=headers, body=body)
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != http_client.OK:
+ exceptions.RefreshError(_REFRESH_ERROR, response_body)
+
+ try:
+ token_response = json.loads(response_body)
+ token = token_response["accessToken"]
+ expiry = datetime.strptime(token_response["expireTime"], "%Y-%m-%dT%H:%M:%SZ")
+
+ return token, expiry
+
+ except (KeyError, ValueError) as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "{}: No access token or invalid expiration in response.".format(
+ _REFRESH_ERROR
+ ),
+ response_body,
+ )
+ six.raise_from(new_exc, caught_exc)
+
+
+class Credentials(credentials.CredentialsWithQuotaProject, credentials.Signing):
+ """This module defines impersonated credentials which are essentially
+ impersonated identities.
+
+ Impersonated Credentials allows credentials issued to a user or
+ service account to impersonate another. The target service account must
+ grant the originating credential principal the
+ `Service Account Token Creator`_ IAM role:
+
+ For more information about Token Creator IAM role and
+ IAMCredentials API, see
+ `Creating Short-Lived Service Account Credentials`_.
+
+ .. _Service Account Token Creator:
+ https://cloud.google.com/iam/docs/service-accounts#the_service_account_token_creator_role
+
+ .. _Creating Short-Lived Service Account Credentials:
+ https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials
+
+ Usage:
+
+ First grant source_credentials the `Service Account Token Creator`
+ role on the target account to impersonate. In this example, the
+ service account represented by svc_account.json has the
+ token creator role on
+ `impersonated-account@_project_.iam.gserviceaccount.com`.
+
+ Enable the IAMCredentials API on the source project:
+ `gcloud services enable iamcredentials.googleapis.com`.
+
+ Initialize a source credential which does not have access to
+ list bucket::
+
+ from google.oauth2 import service_account
+
+ target_scopes = [
+ 'https://www.googleapis.com/auth/devstorage.read_only']
+
+ source_credentials = (
+ service_account.Credentials.from_service_account_file(
+ '/path/to/svc_account.json',
+ scopes=target_scopes))
+
+ Now use the source credentials to acquire credentials to impersonate
+ another service account::
+
+ from google.auth import impersonated_credentials
+
+ target_credentials = impersonated_credentials.Credentials(
+ source_credentials=source_credentials,
+ target_principal='impersonated-account@_project_.iam.gserviceaccount.com',
+ target_scopes = target_scopes,
+ lifetime=500)
+
+ Resource access is granted::
+
+ client = storage.Client(credentials=target_credentials)
+ buckets = client.list_buckets(project='your_project')
+ for bucket in buckets:
+ print(bucket.name)
+ """
+
+ def __init__(
+ self,
+ source_credentials,
+ target_principal,
+ target_scopes,
+ delegates=None,
+ lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
+ quota_project_id=None,
+ iam_endpoint_override=None,
+ ):
+ """
+ Args:
+ source_credentials (google.auth.Credentials): The source credential
+ used as to acquire the impersonated credentials.
+ target_principal (str): The service account to impersonate.
+ target_scopes (Sequence[str]): Scopes to request during the
+ authorization grant.
+ delegates (Sequence[str]): The chained list of delegates required
+ to grant the final access_token. If set, the sequence of
+ identities must have "Service Account Token Creator" capability
+ granted to the prceeding identity. For example, if set to
+ [serviceAccountB, serviceAccountC], the source_credential
+ must have the Token Creator role on serviceAccountB.
+ serviceAccountB must have the Token Creator on
+ serviceAccountC.
+ Finally, C must have Token Creator on target_principal.
+ If left unset, source_credential must have that role on
+ target_principal.
+ lifetime (int): Number of seconds the delegated credential should
+ be valid for (upto 3600).
+ quota_project_id (Optional[str]): The project ID used for quota and billing.
+ This project may be different from the project used to
+ create the credentials.
+ iam_endpoint_override (Optiona[str]): The full IAM endpoint override
+ with the target_principal embedded. This is useful when supporting
+ impersonation with regional endpoints.
+ """
+
+ super(Credentials, self).__init__()
+
+ self._source_credentials = copy.copy(source_credentials)
+ # Service account source credentials must have the _IAM_SCOPE
+ # added to refresh correctly. User credentials cannot have
+ # their original scopes modified.
+ if isinstance(self._source_credentials, credentials.Scoped):
+ self._source_credentials = self._source_credentials.with_scopes(_IAM_SCOPE)
+ self._target_principal = target_principal
+ self._target_scopes = target_scopes
+ self._delegates = delegates
+ self._lifetime = lifetime
+ self.token = None
+ self.expiry = _helpers.utcnow()
+ self._quota_project_id = quota_project_id
+ self._iam_endpoint_override = iam_endpoint_override
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ self._update_token(request)
+
+ def _update_token(self, request):
+ """Updates credentials with a new access_token representing
+ the impersonated account.
+
+ Args:
+ request (google.auth.transport.requests.Request): Request object
+ to use for refreshing credentials.
+ """
+
+ # Refresh our source credentials if it is not valid.
+ if not self._source_credentials.valid:
+ self._source_credentials.refresh(request)
+
+ body = {
+ "delegates": self._delegates,
+ "scope": self._target_scopes,
+ "lifetime": str(self._lifetime) + "s",
+ }
+
+ headers = {"Content-Type": "application/json"}
+
+ # Apply the source credentials authentication info.
+ self._source_credentials.apply(headers)
+
+ self.token, self.expiry = _make_iam_token_request(
+ request=request,
+ principal=self._target_principal,
+ headers=headers,
+ body=body,
+ iam_endpoint_override=self._iam_endpoint_override,
+ )
+
+ def sign_bytes(self, message):
+
+ iam_sign_endpoint = _IAM_SIGN_ENDPOINT.format(self._target_principal)
+
+ body = {
+ "payload": base64.b64encode(message).decode("utf-8"),
+ "delegates": self._delegates,
+ }
+
+ headers = {"Content-Type": "application/json"}
+
+ authed_session = AuthorizedSession(self._source_credentials)
+
+ response = authed_session.post(
+ url=iam_sign_endpoint, headers=headers, json=body
+ )
+
+ return base64.b64decode(response.json()["signedBlob"])
+
+ @property
+ def signer_email(self):
+ return self._target_principal
+
+ @property
+ def service_account_email(self):
+ return self._target_principal
+
+ @property
+ def signer(self):
+ return self
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ self._source_credentials,
+ target_principal=self._target_principal,
+ target_scopes=self._target_scopes,
+ delegates=self._delegates,
+ lifetime=self._lifetime,
+ quota_project_id=quota_project_id,
+ iam_endpoint_override=self._iam_endpoint_override,
+ )
+
+
+class IDTokenCredentials(credentials.CredentialsWithQuotaProject):
+ """Open ID Connect ID Token-based service account credentials.
+
+ """
+
+ def __init__(
+ self,
+ target_credentials,
+ target_audience=None,
+ include_email=False,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ target_credentials (google.auth.Credentials): The target
+ credential used as to acquire the id tokens for.
+ target_audience (string): Audience to issue the token for.
+ include_email (bool): Include email in IdToken
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ """
+ super(IDTokenCredentials, self).__init__()
+
+ if not isinstance(target_credentials, Credentials):
+ raise exceptions.GoogleAuthError(
+ "Provided Credential must be " "impersonated_credentials"
+ )
+ self._target_credentials = target_credentials
+ self._target_audience = target_audience
+ self._include_email = include_email
+ self._quota_project_id = quota_project_id
+
+ def from_credentials(self, target_credentials, target_audience=None):
+ return self.__class__(
+ target_credentials=self._target_credentials,
+ target_audience=target_audience,
+ include_email=self._include_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+ def with_target_audience(self, target_audience):
+ return self.__class__(
+ target_credentials=self._target_credentials,
+ target_audience=target_audience,
+ include_email=self._include_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+ def with_include_email(self, include_email):
+ return self.__class__(
+ target_credentials=self._target_credentials,
+ target_audience=self._target_audience,
+ include_email=include_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ target_credentials=self._target_credentials,
+ target_audience=self._target_audience,
+ include_email=self._include_email,
+ quota_project_id=quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+
+ iam_sign_endpoint = _IAM_IDTOKEN_ENDPOINT.format(
+ self._target_credentials.signer_email
+ )
+
+ body = {
+ "audience": self._target_audience,
+ "delegates": self._target_credentials._delegates,
+ "includeEmail": self._include_email,
+ }
+
+ headers = {"Content-Type": "application/json"}
+
+ authed_session = AuthorizedSession(
+ self._target_credentials._source_credentials, auth_request=request
+ )
+
+ response = authed_session.post(
+ url=iam_sign_endpoint,
+ headers=headers,
+ data=json.dumps(body).encode("utf-8"),
+ )
+
+ id_token = response.json()["token"]
+ self.token = id_token
+ self.expiry = datetime.fromtimestamp(jwt.decode(id_token, verify=False)["exp"])
diff --git a/contrib/python/google-auth/py2/google/auth/jwt.py b/contrib/python/google-auth/py2/google/auth/jwt.py
new file mode 100644
index 0000000000..e9f4f69ca0
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/jwt.py
@@ -0,0 +1,849 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""JSON Web Tokens
+
+Provides support for creating (encoding) and verifying (decoding) JWTs,
+especially JWTs generated and consumed by Google infrastructure.
+
+See `rfc7519`_ for more details on JWTs.
+
+To encode a JWT use :func:`encode`::
+
+ from google.auth import crypt
+ from google.auth import jwt
+
+ signer = crypt.Signer(private_key)
+ payload = {'some': 'payload'}
+ encoded = jwt.encode(signer, payload)
+
+To decode a JWT and verify claims use :func:`decode`::
+
+ claims = jwt.decode(encoded, certs=public_certs)
+
+You can also skip verification::
+
+ claims = jwt.decode(encoded, verify=False)
+
+.. _rfc7519: https://tools.ietf.org/html/rfc7519
+
+"""
+
+try:
+ from collections.abc import Mapping
+# Python 2.7 compatibility
+except ImportError: # pragma: NO COVER
+ from collections import Mapping
+import copy
+import datetime
+import json
+
+import cachetools
+import six
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import _service_account_info
+from google.auth import crypt
+from google.auth import exceptions
+import google.auth.credentials
+
+try:
+ from google.auth.crypt import es256
+except ImportError: # pragma: NO COVER
+ es256 = None
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+_DEFAULT_MAX_CACHE_SIZE = 10
+_ALGORITHM_TO_VERIFIER_CLASS = {"RS256": crypt.RSAVerifier}
+_CRYPTOGRAPHY_BASED_ALGORITHMS = frozenset(["ES256"])
+
+if es256 is not None: # pragma: NO COVER
+ _ALGORITHM_TO_VERIFIER_CLASS["ES256"] = es256.ES256Verifier
+
+
+def encode(signer, payload, header=None, key_id=None):
+ """Make a signed JWT.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign the JWT.
+ payload (Mapping[str, str]): The JWT payload.
+ header (Mapping[str, str]): Additional JWT header payload.
+ key_id (str): The key id to add to the JWT header. If the
+ signer has a key id it will be used as the default. If this is
+ specified it will override the signer's key id.
+
+ Returns:
+ bytes: The encoded JWT.
+ """
+ if header is None:
+ header = {}
+
+ if key_id is None:
+ key_id = signer.key_id
+
+ header.update({"typ": "JWT"})
+
+ if "alg" not in header:
+ if es256 is not None and isinstance(signer, es256.ES256Signer):
+ header.update({"alg": "ES256"})
+ else:
+ header.update({"alg": "RS256"})
+
+ if key_id is not None:
+ header["kid"] = key_id
+
+ segments = [
+ _helpers.unpadded_urlsafe_b64encode(json.dumps(header).encode("utf-8")),
+ _helpers.unpadded_urlsafe_b64encode(json.dumps(payload).encode("utf-8")),
+ ]
+
+ signing_input = b".".join(segments)
+ signature = signer.sign(signing_input)
+ segments.append(_helpers.unpadded_urlsafe_b64encode(signature))
+
+ return b".".join(segments)
+
+
+def _decode_jwt_segment(encoded_section):
+ """Decodes a single JWT segment."""
+ section_bytes = _helpers.padded_urlsafe_b64decode(encoded_section)
+ try:
+ return json.loads(section_bytes.decode("utf-8"))
+ except ValueError as caught_exc:
+ new_exc = ValueError("Can't parse segment: {0}".format(section_bytes))
+ six.raise_from(new_exc, caught_exc)
+
+
+def _unverified_decode(token):
+ """Decodes a token and does no verification.
+
+ Args:
+ token (Union[str, bytes]): The encoded JWT.
+
+ Returns:
+ Tuple[str, str, str, str]: header, payload, signed_section, and
+ signature.
+
+ Raises:
+ ValueError: if there are an incorrect amount of segments in the token.
+ """
+ token = _helpers.to_bytes(token)
+
+ if token.count(b".") != 2:
+ raise ValueError("Wrong number of segments in token: {0}".format(token))
+
+ encoded_header, encoded_payload, signature = token.split(b".")
+ signed_section = encoded_header + b"." + encoded_payload
+ signature = _helpers.padded_urlsafe_b64decode(signature)
+
+ # Parse segments
+ header = _decode_jwt_segment(encoded_header)
+ payload = _decode_jwt_segment(encoded_payload)
+
+ return header, payload, signed_section, signature
+
+
+def decode_header(token):
+ """Return the decoded header of a token.
+
+ No verification is done. This is useful to extract the key id from
+ the header in order to acquire the appropriate certificate to verify
+ the token.
+
+ Args:
+ token (Union[str, bytes]): the encoded JWT.
+
+ Returns:
+ Mapping: The decoded JWT header.
+ """
+ header, _, _, _ = _unverified_decode(token)
+ return header
+
+
+def _verify_iat_and_exp(payload):
+ """Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token
+ payload.
+
+ Args:
+ payload (Mapping[str, str]): The JWT payload.
+
+ Raises:
+ ValueError: if any checks failed.
+ """
+ now = _helpers.datetime_to_secs(_helpers.utcnow())
+
+ # Make sure the iat and exp claims are present.
+ for key in ("iat", "exp"):
+ if key not in payload:
+ raise ValueError("Token does not contain required claim {}".format(key))
+
+ # Make sure the token wasn't issued in the future.
+ iat = payload["iat"]
+ # Err on the side of accepting a token that is slightly early to account
+ # for clock skew.
+ earliest = iat - _helpers.CLOCK_SKEW_SECS
+ if now < earliest:
+ raise ValueError("Token used too early, {} < {}".format(now, iat))
+
+ # Make sure the token wasn't issued in the past.
+ exp = payload["exp"]
+ # Err on the side of accepting a token that is slightly out of date
+ # to account for clow skew.
+ latest = exp + _helpers.CLOCK_SKEW_SECS
+ if latest < now:
+ raise ValueError("Token expired, {} < {}".format(latest, now))
+
+
+def decode(token, certs=None, verify=True, audience=None):
+ """Decode and verify a JWT.
+
+ Args:
+ token (str): The encoded JWT.
+ certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The
+ certificate used to validate the JWT signature. If bytes or string,
+ it must the the public key certificate in PEM format. If a mapping,
+ it must be a mapping of key IDs to public key certificates in PEM
+ format. The mapping must contain the same key ID that's specified
+ in the token's header.
+ verify (bool): Whether to perform signature and claim validation.
+ Verification is done by default.
+ audience (str or list): The audience claim, 'aud', that this JWT should
+ contain. Or a list of audience claims. If None then the JWT's 'aud'
+ parameter is not verified.
+
+ Returns:
+ Mapping[str, str]: The deserialized JSON payload in the JWT.
+
+ Raises:
+ ValueError: if any verification checks failed.
+ """
+ header, payload, signed_section, signature = _unverified_decode(token)
+
+ if not verify:
+ return payload
+
+ # Pluck the key id and algorithm from the header and make sure we have
+ # a verifier that can support it.
+ key_alg = header.get("alg")
+ key_id = header.get("kid")
+
+ try:
+ verifier_cls = _ALGORITHM_TO_VERIFIER_CLASS[key_alg]
+ except KeyError as exc:
+ if key_alg in _CRYPTOGRAPHY_BASED_ALGORITHMS:
+ six.raise_from(
+ ValueError(
+ "The key algorithm {} requires the cryptography package "
+ "to be installed.".format(key_alg)
+ ),
+ exc,
+ )
+ else:
+ six.raise_from(
+ ValueError("Unsupported signature algorithm {}".format(key_alg)), exc
+ )
+
+ # If certs is specified as a dictionary of key IDs to certificates, then
+ # use the certificate identified by the key ID in the token header.
+ if isinstance(certs, Mapping):
+ if key_id:
+ if key_id not in certs:
+ raise ValueError("Certificate for key id {} not found.".format(key_id))
+ certs_to_check = [certs[key_id]]
+ # If there's no key id in the header, check against all of the certs.
+ else:
+ certs_to_check = certs.values()
+ else:
+ certs_to_check = certs
+
+ # Verify that the signature matches the message.
+ if not crypt.verify_signature(
+ signed_section, signature, certs_to_check, verifier_cls
+ ):
+ raise ValueError("Could not verify token signature.")
+
+ # Verify the issued at and created times in the payload.
+ _verify_iat_and_exp(payload)
+
+ # Check audience.
+ if audience is not None:
+ claim_audience = payload.get("aud")
+ if isinstance(audience, str):
+ audience = [audience]
+ if claim_audience not in audience:
+ raise ValueError(
+ "Token has wrong audience {}, expected one of {}".format(
+ claim_audience, audience
+ )
+ )
+
+ return payload
+
+
+class Credentials(
+ google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject
+):
+ """Credentials that use a JWT as the bearer token.
+
+ These credentials require an "audience" claim. This claim identifies the
+ intended recipient of the bearer token.
+
+ The constructor arguments determine the claims for the JWT that is
+ sent with requests. Usually, you'll construct these credentials with
+ one of the helper constructors as shown in the next section.
+
+ To create JWT credentials using a Google service account private key
+ JSON file::
+
+ audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher'
+ credentials = jwt.Credentials.from_service_account_file(
+ 'service-account.json',
+ audience=audience)
+
+ If you already have the service account file loaded and parsed::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = jwt.Credentials.from_service_account_info(
+ service_account_info,
+ audience=audience)
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify the JWT claims::
+
+ credentials = jwt.Credentials.from_service_account_file(
+ 'service-account.json',
+ audience=audience,
+ additional_claims={'meta': 'data'})
+
+ You can also construct the credentials directly if you have a
+ :class:`~google.auth.crypt.Signer` instance::
+
+ credentials = jwt.Credentials(
+ signer,
+ issuer='your-issuer',
+ subject='your-subject',
+ audience=audience)
+
+ The claims are considered immutable. If you want to modify the claims,
+ you can easily create another instance using :meth:`with_claims`::
+
+ new_audience = (
+ 'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber')
+ new_credentials = credentials.with_claims(audience=new_audience)
+ """
+
+ def __init__(
+ self,
+ signer,
+ issuer,
+ subject,
+ audience,
+ additional_claims=None,
+ token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ issuer (str): The `iss` claim.
+ subject (str): The `sub` claim.
+ audience (str): the `aud` claim. The intended audience for the
+ credentials.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload.
+ token_lifetime (int): The amount of time in seconds for
+ which the token is valid. Defaults to 1 hour.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+ """
+ super(Credentials, self).__init__()
+ self._signer = signer
+ self._issuer = issuer
+ self._subject = subject
+ self._audience = audience
+ self._token_lifetime = token_lifetime
+ self._quota_project_id = quota_project_id
+
+ if additional_claims is None:
+ additional_claims = {}
+
+ self._additional_claims = additional_claims
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates a Credentials instance from a signer and service account
+ info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ kwargs.setdefault("subject", info["client_email"])
+ kwargs.setdefault("issuer", info["client_email"])
+ return cls(signer, **kwargs)
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates an Credentials instance from a dictionary.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(info, require=["client_email"])
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates a Credentials instance from a service account .json file
+ in Google format.
+
+ Args:
+ filename (str): The path to the service account .json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_signing_credentials(cls, credentials, audience, **kwargs):
+ """Creates a new :class:`google.auth.jwt.Credentials` instance from an
+ existing :class:`google.auth.credentials.Signing` instance.
+
+ The new instance will use the same signer as the existing instance and
+ will use the existing instance's signer email as the issuer and
+ subject by default.
+
+ Example::
+
+ svc_creds = service_account.Credentials.from_service_account_file(
+ 'service_account.json')
+ audience = (
+ 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher')
+ jwt_creds = jwt.Credentials.from_signing_credentials(
+ svc_creds, audience=audience)
+
+ Args:
+ credentials (google.auth.credentials.Signing): The credentials to
+ use to construct the new credentials.
+ audience (str): the `aud` claim. The intended audience for the
+ credentials.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: A new Credentials instance.
+ """
+ kwargs.setdefault("issuer", credentials.signer_email)
+ kwargs.setdefault("subject", credentials.signer_email)
+ return cls(credentials.signer, audience=audience, **kwargs)
+
+ def with_claims(
+ self, issuer=None, subject=None, audience=None, additional_claims=None
+ ):
+ """Returns a copy of these credentials with modified claims.
+
+ Args:
+ issuer (str): The `iss` claim. If unspecified the current issuer
+ claim will be used.
+ subject (str): The `sub` claim. If unspecified the current subject
+ claim will be used.
+ audience (str): the `aud` claim. If unspecified the current
+ audience claim will be used.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload. This will be merged with the current
+ additional claims.
+
+ Returns:
+ google.auth.jwt.Credentials: A new credentials instance.
+ """
+ new_additional_claims = copy.deepcopy(self._additional_claims)
+ new_additional_claims.update(additional_claims or {})
+
+ return self.__class__(
+ self._signer,
+ issuer=issuer if issuer is not None else self._issuer,
+ subject=subject if subject is not None else self._subject,
+ audience=audience if audience is not None else self._audience,
+ additional_claims=new_additional_claims,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ self._signer,
+ issuer=self._issuer,
+ subject=self._subject,
+ audience=self._audience,
+ additional_claims=self._additional_claims,
+ quota_project_id=quota_project_id,
+ )
+
+ def _make_jwt(self):
+ """Make a signed JWT.
+
+ Returns:
+ Tuple[bytes, datetime]: The encoded JWT and the expiration.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=self._token_lifetime)
+ expiry = now + lifetime
+
+ payload = {
+ "iss": self._issuer,
+ "sub": self._subject,
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ }
+ if self._audience:
+ payload["aud"] = self._audience
+
+ payload.update(self._additional_claims)
+
+ jwt = encode(self._signer, payload)
+
+ return jwt, expiry
+
+ def refresh(self, request):
+ """Refreshes the access token.
+
+ Args:
+ request (Any): Unused.
+ """
+ # pylint: disable=unused-argument
+ # (pylint doesn't correctly recognize overridden methods.)
+ self.token, self.expiry = self._make_jwt()
+
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer_email(self):
+ return self._issuer
+
+ @property
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer(self):
+ return self._signer
+
+
+class OnDemandCredentials(
+ google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject
+):
+ """On-demand JWT credentials.
+
+ Like :class:`Credentials`, this class uses a JWT as the bearer token for
+ authentication. However, this class does not require the audience at
+ construction time. Instead, it will generate a new token on-demand for
+ each request using the request URI as the audience. It caches tokens
+ so that multiple requests to the same URI do not incur the overhead
+ of generating a new token every time.
+
+ This behavior is especially useful for `gRPC`_ clients. A gRPC service may
+ have multiple audience and gRPC clients may not know all of the audiences
+ required for accessing a particular service. With these credentials,
+ no knowledge of the audiences is required ahead of time.
+
+ .. _grpc: http://www.grpc.io/
+ """
+
+ def __init__(
+ self,
+ signer,
+ issuer,
+ subject,
+ additional_claims=None,
+ token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
+ max_cache_size=_DEFAULT_MAX_CACHE_SIZE,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ issuer (str): The `iss` claim.
+ subject (str): The `sub` claim.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload.
+ token_lifetime (int): The amount of time in seconds for
+ which the token is valid. Defaults to 1 hour.
+ max_cache_size (int): The maximum number of JWT tokens to keep in
+ cache. Tokens are cached using :class:`cachetools.LRUCache`.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+
+ """
+ super(OnDemandCredentials, self).__init__()
+ self._signer = signer
+ self._issuer = issuer
+ self._subject = subject
+ self._token_lifetime = token_lifetime
+ self._quota_project_id = quota_project_id
+
+ if additional_claims is None:
+ additional_claims = {}
+
+ self._additional_claims = additional_claims
+ self._cache = cachetools.LRUCache(maxsize=max_cache_size)
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates an OnDemandCredentials instance from a signer and service
+ account info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ kwargs.setdefault("subject", info["client_email"])
+ kwargs.setdefault("issuer", info["client_email"])
+ return cls(signer, **kwargs)
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates an OnDemandCredentials instance from a dictionary.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(info, require=["client_email"])
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates an OnDemandCredentials instance from a service account .json
+ file in Google format.
+
+ Args:
+ filename (str): The path to the service account .json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: The constructed credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_signing_credentials(cls, credentials, **kwargs):
+ """Creates a new :class:`google.auth.jwt.OnDemandCredentials` instance
+ from an existing :class:`google.auth.credentials.Signing` instance.
+
+ The new instance will use the same signer as the existing instance and
+ will use the existing instance's signer email as the issuer and
+ subject by default.
+
+ Example::
+
+ svc_creds = service_account.Credentials.from_service_account_file(
+ 'service_account.json')
+ jwt_creds = jwt.OnDemandCredentials.from_signing_credentials(
+ svc_creds)
+
+ Args:
+ credentials (google.auth.credentials.Signing): The credentials to
+ use to construct the new credentials.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: A new Credentials instance.
+ """
+ kwargs.setdefault("issuer", credentials.signer_email)
+ kwargs.setdefault("subject", credentials.signer_email)
+ return cls(credentials.signer, **kwargs)
+
+ def with_claims(self, issuer=None, subject=None, additional_claims=None):
+ """Returns a copy of these credentials with modified claims.
+
+ Args:
+ issuer (str): The `iss` claim. If unspecified the current issuer
+ claim will be used.
+ subject (str): The `sub` claim. If unspecified the current subject
+ claim will be used.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload. This will be merged with the current
+ additional claims.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: A new credentials instance.
+ """
+ new_additional_claims = copy.deepcopy(self._additional_claims)
+ new_additional_claims.update(additional_claims or {})
+
+ return self.__class__(
+ self._signer,
+ issuer=issuer if issuer is not None else self._issuer,
+ subject=subject if subject is not None else self._subject,
+ additional_claims=new_additional_claims,
+ max_cache_size=self._cache.maxsize,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+
+ return self.__class__(
+ self._signer,
+ issuer=self._issuer,
+ subject=self._subject,
+ additional_claims=self._additional_claims,
+ max_cache_size=self._cache.maxsize,
+ quota_project_id=quota_project_id,
+ )
+
+ @property
+ def valid(self):
+ """Checks the validity of the credentials.
+
+ These credentials are always valid because it generates tokens on
+ demand.
+ """
+ return True
+
+ def _make_jwt_for_audience(self, audience):
+ """Make a new JWT for the given audience.
+
+ Args:
+ audience (str): The intended audience.
+
+ Returns:
+ Tuple[bytes, datetime]: The encoded JWT and the expiration.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=self._token_lifetime)
+ expiry = now + lifetime
+
+ payload = {
+ "iss": self._issuer,
+ "sub": self._subject,
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ "aud": audience,
+ }
+
+ payload.update(self._additional_claims)
+
+ jwt = encode(self._signer, payload)
+
+ return jwt, expiry
+
+ def _get_jwt_for_audience(self, audience):
+ """Get a JWT For a given audience.
+
+ If there is already an existing, non-expired token in the cache for
+ the audience, that token is used. Otherwise, a new token will be
+ created.
+
+ Args:
+ audience (str): The intended audience.
+
+ Returns:
+ bytes: The encoded JWT.
+ """
+ token, expiry = self._cache.get(audience, (None, None))
+
+ if token is None or expiry < _helpers.utcnow():
+ token, expiry = self._make_jwt_for_audience(audience)
+ self._cache[audience] = token, expiry
+
+ return token
+
+ def refresh(self, request):
+ """Raises an exception, these credentials can not be directly
+ refreshed.
+
+ Args:
+ request (Any): Unused.
+
+ Raises:
+ google.auth.RefreshError
+ """
+ # pylint: disable=unused-argument
+ # (pylint doesn't correctly recognize overridden methods.)
+ raise exceptions.RefreshError(
+ "OnDemandCredentials can not be directly refreshed."
+ )
+
+ def before_request(self, request, method, url, headers):
+ """Performs credential-specific before request logic.
+
+ Args:
+ request (Any): Unused. JWT credentials do not need to make an
+ HTTP request to refresh.
+ method (str): The request's HTTP method.
+ url (str): The request's URI. This is used as the audience claim
+ when generating the JWT.
+ headers (Mapping): The request's headers.
+ """
+ # pylint: disable=unused-argument
+ # (pylint doesn't correctly recognize overridden methods.)
+ parts = urllib.parse.urlsplit(url)
+ # Strip query string and fragment
+ audience = urllib.parse.urlunsplit(
+ (parts.scheme, parts.netloc, parts.path, "", "")
+ )
+ token = self._get_jwt_for_audience(audience)
+ self.apply(headers, token=token)
+
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer_email(self):
+ return self._issuer
+
+ @property
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer(self):
+ return self._signer
diff --git a/contrib/python/google-auth/py2/google/auth/transport/__init__.py b/contrib/python/google-auth/py2/google/auth/transport/__init__.py
new file mode 100644
index 0000000000..374e7b4d72
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/transport/__init__.py
@@ -0,0 +1,97 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport - HTTP client library support.
+
+:mod:`google.auth` is designed to work with various HTTP client libraries such
+as urllib3 and requests. In order to work across these libraries with different
+interfaces some abstraction is needed.
+
+This module provides two interfaces that are implemented by transport adapters
+to support HTTP libraries. :class:`Request` defines the interface expected by
+:mod:`google.auth` to make requests. :class:`Response` defines the interface
+for the return value of :class:`Request`.
+"""
+
+import abc
+
+import six
+from six.moves import http_client
+
+DEFAULT_REFRESH_STATUS_CODES = (http_client.UNAUTHORIZED,)
+"""Sequence[int]: Which HTTP status code indicate that credentials should be
+refreshed and a request should be retried.
+"""
+
+DEFAULT_MAX_REFRESH_ATTEMPTS = 2
+"""int: How many times to refresh the credentials and retry a request."""
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Response(object):
+ """HTTP Response data."""
+
+ @abc.abstractproperty
+ def status(self):
+ """int: The HTTP status code."""
+ raise NotImplementedError("status must be implemented.")
+
+ @abc.abstractproperty
+ def headers(self):
+ """Mapping[str, str]: The HTTP response headers."""
+ raise NotImplementedError("headers must be implemented.")
+
+ @abc.abstractproperty
+ def data(self):
+ """bytes: The response body."""
+ raise NotImplementedError("data must be implemented.")
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Request(object):
+ """Interface for a callable that makes HTTP requests.
+
+ Specific transport implementations should provide an implementation of
+ this that adapts their specific request / response API.
+
+ .. automethod:: __call__
+ """
+
+ @abc.abstractmethod
+ def __call__(
+ self, url, method="GET", body=None, headers=None, timeout=None, **kwargs
+ ):
+ """Make an HTTP request.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload / body in HTTP request.
+ headers (Mapping[str, str]): Request headers.
+ timeout (Optional[int]): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ transport-specific default timeout will be used.
+ kwargs: Additionally arguments passed on to the transport's
+ request method.
+
+ Returns:
+ Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ # pylint: disable=redundant-returns-doc, missing-raises-doc
+ # (pylint doesn't play well with abstract docstrings.)
+ raise NotImplementedError("__call__ must be implemented.")
diff --git a/contrib/python/google-auth/py2/google/auth/transport/_http_client.py b/contrib/python/google-auth/py2/google/auth/transport/_http_client.py
new file mode 100644
index 0000000000..c153763efa
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/transport/_http_client.py
@@ -0,0 +1,115 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport adapter for http.client, for internal use only."""
+
+import logging
+import socket
+
+import six
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import exceptions
+from google.auth import transport
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Response(transport.Response):
+ """http.client transport response adapter.
+
+ Args:
+ response (http.client.HTTPResponse): The raw http client response.
+ """
+
+ def __init__(self, response):
+ self._status = response.status
+ self._headers = {key.lower(): value for key, value in response.getheaders()}
+ self._data = response.read()
+
+ @property
+ def status(self):
+ return self._status
+
+ @property
+ def headers(self):
+ return self._headers
+
+ @property
+ def data(self):
+ return self._data
+
+
+class Request(transport.Request):
+ """http.client transport request adapter."""
+
+ def __call__(
+ self, url, method="GET", body=None, headers=None, timeout=None, **kwargs
+ ):
+ """Make an HTTP request using http.client.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload / body in HTTP request.
+ headers (Mapping): Request headers.
+ timeout (Optional(int)): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ socket global default timeout will be used.
+ kwargs: Additional arguments passed throught to the underlying
+ :meth:`~http.client.HTTPConnection.request` method.
+
+ Returns:
+ Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.
+ if timeout is None:
+ timeout = socket._GLOBAL_DEFAULT_TIMEOUT
+
+ # http.client doesn't allow None as the headers argument.
+ if headers is None:
+ headers = {}
+
+ # http.client needs the host and path parts specified separately.
+ parts = urllib.parse.urlsplit(url)
+ path = urllib.parse.urlunsplit(
+ ("", "", parts.path, parts.query, parts.fragment)
+ )
+
+ if parts.scheme != "http":
+ raise exceptions.TransportError(
+ "http.client transport only supports the http scheme, {}"
+ "was specified".format(parts.scheme)
+ )
+
+ connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)
+
+ try:
+ _LOGGER.debug("Making request: %s %s", method, url)
+
+ connection.request(method, path, body=body, headers=headers, **kwargs)
+ response = connection.getresponse()
+ return Response(response)
+
+ except (http_client.HTTPException, socket.error) as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ finally:
+ connection.close()
diff --git a/contrib/python/google-auth/py2/google/auth/transport/_mtls_helper.py b/contrib/python/google-auth/py2/google/auth/transport/_mtls_helper.py
new file mode 100644
index 0000000000..4dccb1062f
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/transport/_mtls_helper.py
@@ -0,0 +1,254 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for getting mTLS cert and key."""
+
+import json
+import logging
+from os import path
+import re
+import subprocess
+
+import six
+
+from google.auth import exceptions
+
+CONTEXT_AWARE_METADATA_PATH = "~/.secureConnect/context_aware_metadata.json"
+_CERT_PROVIDER_COMMAND = "cert_provider_command"
+_CERT_REGEX = re.compile(
+ b"-----BEGIN CERTIFICATE-----.+-----END CERTIFICATE-----\r?\n?", re.DOTALL
+)
+
+# support various format of key files, e.g.
+# "-----BEGIN PRIVATE KEY-----...",
+# "-----BEGIN EC PRIVATE KEY-----...",
+# "-----BEGIN RSA PRIVATE KEY-----..."
+# "-----BEGIN ENCRYPTED PRIVATE KEY-----"
+_KEY_REGEX = re.compile(
+ b"-----BEGIN [A-Z ]*PRIVATE KEY-----.+-----END [A-Z ]*PRIVATE KEY-----\r?\n?",
+ re.DOTALL,
+)
+
+_LOGGER = logging.getLogger(__name__)
+
+
+_PASSPHRASE_REGEX = re.compile(
+ b"-----BEGIN PASSPHRASE-----(.+)-----END PASSPHRASE-----", re.DOTALL
+)
+
+
+def _check_dca_metadata_path(metadata_path):
+ """Checks for context aware metadata. If it exists, returns the absolute path;
+ otherwise returns None.
+
+ Args:
+ metadata_path (str): context aware metadata path.
+
+ Returns:
+ str: absolute path if exists and None otherwise.
+ """
+ metadata_path = path.expanduser(metadata_path)
+ if not path.exists(metadata_path):
+ _LOGGER.debug("%s is not found, skip client SSL authentication.", metadata_path)
+ return None
+ return metadata_path
+
+
+def _read_dca_metadata_file(metadata_path):
+ """Loads context aware metadata from the given path.
+
+ Args:
+ metadata_path (str): context aware metadata path.
+
+ Returns:
+ Dict[str, str]: The metadata.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: If failed to parse metadata as JSON.
+ """
+ try:
+ with open(metadata_path) as f:
+ metadata = json.load(f)
+ except ValueError as caught_exc:
+ new_exc = exceptions.ClientCertError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ return metadata
+
+
+def _run_cert_provider_command(command, expect_encrypted_key=False):
+ """Run the provided command, and return client side mTLS cert, key and
+ passphrase.
+
+ Args:
+ command (List[str]): cert provider command.
+ expect_encrypted_key (bool): If encrypted private key is expected.
+
+ Returns:
+ Tuple[bytes, bytes, bytes]: client certificate bytes in PEM format, key
+ bytes in PEM format and passphrase bytes.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: if problems occurs when running
+ the cert provider command or generating cert, key and passphrase.
+ """
+ try:
+ process = subprocess.Popen(
+ command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
+ stdout, stderr = process.communicate()
+ except OSError as caught_exc:
+ new_exc = exceptions.ClientCertError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ # Check cert provider command execution error.
+ if process.returncode != 0:
+ raise exceptions.ClientCertError(
+ "Cert provider command returns non-zero status code %s" % process.returncode
+ )
+
+ # Extract certificate (chain), key and passphrase.
+ cert_match = re.findall(_CERT_REGEX, stdout)
+ if len(cert_match) != 1:
+ raise exceptions.ClientCertError("Client SSL certificate is missing or invalid")
+ key_match = re.findall(_KEY_REGEX, stdout)
+ if len(key_match) != 1:
+ raise exceptions.ClientCertError("Client SSL key is missing or invalid")
+ passphrase_match = re.findall(_PASSPHRASE_REGEX, stdout)
+
+ if expect_encrypted_key:
+ if len(passphrase_match) != 1:
+ raise exceptions.ClientCertError("Passphrase is missing or invalid")
+ if b"ENCRYPTED" not in key_match[0]:
+ raise exceptions.ClientCertError("Encrypted private key is expected")
+ return cert_match[0], key_match[0], passphrase_match[0].strip()
+
+ if b"ENCRYPTED" in key_match[0]:
+ raise exceptions.ClientCertError("Encrypted private key is not expected")
+ if len(passphrase_match) > 0:
+ raise exceptions.ClientCertError("Passphrase is not expected")
+ return cert_match[0], key_match[0], None
+
+
+def get_client_ssl_credentials(
+ generate_encrypted_key=False,
+ context_aware_metadata_path=CONTEXT_AWARE_METADATA_PATH,
+):
+ """Returns the client side certificate, private key and passphrase.
+
+ Args:
+ generate_encrypted_key (bool): If set to True, encrypted private key
+ and passphrase will be generated; otherwise, unencrypted private key
+ will be generated and passphrase will be None.
+ context_aware_metadata_path (str): The context_aware_metadata.json file path.
+
+ Returns:
+ Tuple[bool, bytes, bytes, bytes]:
+ A boolean indicating if cert, key and passphrase are obtained, the
+ cert bytes and key bytes both in PEM format, and passphrase bytes.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: if problems occurs when getting
+ the cert, key and passphrase.
+ """
+ metadata_path = _check_dca_metadata_path(context_aware_metadata_path)
+
+ if metadata_path:
+ metadata_json = _read_dca_metadata_file(metadata_path)
+
+ if _CERT_PROVIDER_COMMAND not in metadata_json:
+ raise exceptions.ClientCertError("Cert provider command is not found")
+
+ command = metadata_json[_CERT_PROVIDER_COMMAND]
+
+ if generate_encrypted_key and "--with_passphrase" not in command:
+ command.append("--with_passphrase")
+
+ # Execute the command.
+ cert, key, passphrase = _run_cert_provider_command(
+ command, expect_encrypted_key=generate_encrypted_key
+ )
+ return True, cert, key, passphrase
+
+ return False, None, None, None
+
+
+def get_client_cert_and_key(client_cert_callback=None):
+ """Returns the client side certificate and private key. The function first
+ tries to get certificate and key from client_cert_callback; if the callback
+ is None or doesn't provide certificate and key, the function tries application
+ default SSL credentials.
+
+ Args:
+ client_cert_callback (Optional[Callable[[], (bytes, bytes)]]): An
+ optional callback which returns client certificate bytes and private
+ key bytes both in PEM format.
+
+ Returns:
+ Tuple[bool, bytes, bytes]:
+ A boolean indicating if cert and key are obtained, the cert bytes
+ and key bytes both in PEM format.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: if problems occurs when getting
+ the cert and key.
+ """
+ if client_cert_callback:
+ cert, key = client_cert_callback()
+ return True, cert, key
+
+ has_cert, cert, key, _ = get_client_ssl_credentials(generate_encrypted_key=False)
+ return has_cert, cert, key
+
+
+def decrypt_private_key(key, passphrase):
+ """A helper function to decrypt the private key with the given passphrase.
+ google-auth library doesn't support passphrase protected private key for
+ mutual TLS channel. This helper function can be used to decrypt the
+ passphrase protected private key in order to estalish mutual TLS channel.
+
+ For example, if you have a function which produces client cert, passphrase
+ protected private key and passphrase, you can convert it to a client cert
+ callback function accepted by google-auth::
+
+ from google.auth.transport import _mtls_helper
+
+ def your_client_cert_function():
+ return cert, encrypted_key, passphrase
+
+ # callback accepted by google-auth for mutual TLS channel.
+ def client_cert_callback():
+ cert, encrypted_key, passphrase = your_client_cert_function()
+ decrypted_key = _mtls_helper.decrypt_private_key(encrypted_key,
+ passphrase)
+ return cert, decrypted_key
+
+ Args:
+ key (bytes): The private key bytes in PEM format.
+ passphrase (bytes): The passphrase bytes.
+
+ Returns:
+ bytes: The decrypted private key in PEM format.
+
+ Raises:
+ ImportError: If pyOpenSSL is not installed.
+ OpenSSL.crypto.Error: If there is any problem decrypting the private key.
+ """
+ from OpenSSL import crypto
+
+ # First convert encrypted_key_bytes to PKey object
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key, passphrase=passphrase)
+
+ # Then dump the decrypted key bytes
+ return crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)
diff --git a/contrib/python/google-auth/py2/google/auth/transport/grpc.py b/contrib/python/google-auth/py2/google/auth/transport/grpc.py
new file mode 100644
index 0000000000..c47cb3ddaf
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/transport/grpc.py
@@ -0,0 +1,349 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Authorization support for gRPC."""
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import six
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth.transport import _mtls_helper
+from google.oauth2 import service_account
+
+try:
+ import grpc
+except ImportError as caught_exc: # pragma: NO COVER
+ six.raise_from(
+ ImportError(
+ "gRPC is not installed, please install the grpcio package "
+ "to use the gRPC transport."
+ ),
+ caught_exc,
+ )
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class AuthMetadataPlugin(grpc.AuthMetadataPlugin):
+ """A `gRPC AuthMetadataPlugin`_ that inserts the credentials into each
+ request.
+
+ .. _gRPC AuthMetadataPlugin:
+ http://www.grpc.io/grpc/python/grpc.html#grpc.AuthMetadataPlugin
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to requests.
+ request (google.auth.transport.Request): A HTTP transport request
+ object used to refresh credentials as needed.
+ default_host (Optional[str]): A host like "pubsub.googleapis.com".
+ This is used when a self-signed JWT is created from service
+ account credentials.
+ """
+
+ def __init__(self, credentials, request, default_host=None):
+ # pylint: disable=no-value-for-parameter
+ # pylint doesn't realize that the super method takes no arguments
+ # because this class is the same name as the superclass.
+ super(AuthMetadataPlugin, self).__init__()
+ self._credentials = credentials
+ self._request = request
+ self._default_host = default_host
+
+ def _get_authorization_headers(self, context):
+ """Gets the authorization headers for a request.
+
+ Returns:
+ Sequence[Tuple[str, str]]: A list of request headers (key, value)
+ to add to the request.
+ """
+ headers = {}
+
+ # https://google.aip.dev/auth/4111
+ # Attempt to use self-signed JWTs when a service account is used.
+ # A default host must be explicitly provided since it cannot always
+ # be determined from the context.service_url.
+ if isinstance(self._credentials, service_account.Credentials):
+ self._credentials._create_self_signed_jwt(
+ "https://{}/".format(self._default_host) if self._default_host else None
+ )
+
+ self._credentials.before_request(
+ self._request, context.method_name, context.service_url, headers
+ )
+
+ return list(six.iteritems(headers))
+
+ def __call__(self, context, callback):
+ """Passes authorization metadata into the given callback.
+
+ Args:
+ context (grpc.AuthMetadataContext): The RPC context.
+ callback (grpc.AuthMetadataPluginCallback): The callback that will
+ be invoked to pass in the authorization metadata.
+ """
+ callback(self._get_authorization_headers(context), None)
+
+
+def secure_authorized_channel(
+ credentials,
+ request,
+ target,
+ ssl_credentials=None,
+ client_cert_callback=None,
+ **kwargs
+):
+ """Creates a secure authorized gRPC channel.
+
+ This creates a channel with SSL and :class:`AuthMetadataPlugin`. This
+ channel can be used to create a stub that can make authorized requests.
+ Users can configure client certificate or rely on device certificates to
+ establish a mutual TLS channel, if the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ variable is explicitly set to `true`.
+
+ Example::
+
+ import google.auth
+ import google.auth.transport.grpc
+ import google.auth.transport.requests
+ from google.cloud.speech.v1 import cloud_speech_pb2
+
+ # Get credentials.
+ credentials, _ = google.auth.default()
+
+ # Get an HTTP request function to refresh credentials.
+ request = google.auth.transport.requests.Request()
+
+ # Create a channel.
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, regular_endpoint, request,
+ ssl_credentials=grpc.ssl_channel_credentials())
+
+ # Use the channel to create a stub.
+ cloud_speech.create_Speech_stub(channel)
+
+ Usage:
+
+ There are actually a couple of options to create a channel, depending on if
+ you want to create a regular or mutual TLS channel.
+
+ First let's list the endpoints (regular vs mutual TLS) to choose from::
+
+ regular_endpoint = 'speech.googleapis.com:443'
+ mtls_endpoint = 'speech.mtls.googleapis.com:443'
+
+ Option 1: create a regular (non-mutual) TLS channel by explicitly setting
+ the ssl_credentials::
+
+ regular_ssl_credentials = grpc.ssl_channel_credentials()
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, regular_endpoint, request,
+ ssl_credentials=regular_ssl_credentials)
+
+ Option 2: create a mutual TLS channel by calling a callback which returns
+ the client side certificate and the key (Note that
+ `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly
+ set to `true`)::
+
+ def my_client_cert_callback():
+ code_to_load_client_cert_and_key()
+ if loaded:
+ return (pem_cert_bytes, pem_key_bytes)
+ raise MyClientCertFailureException()
+
+ try:
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, mtls_endpoint, request,
+ client_cert_callback=my_client_cert_callback)
+ except MyClientCertFailureException:
+ # handle the exception
+
+ Option 3: use application default SSL credentials. It searches and uses
+ the command in a context aware metadata file, which is available on devices
+ with endpoint verification support (Note that
+ `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly
+ set to `true`).
+ See https://cloud.google.com/endpoint-verification/docs/overview::
+
+ try:
+ default_ssl_credentials = SslCredentials()
+ except:
+ # Exception can be raised if the context aware metadata is malformed.
+ # See :class:`SslCredentials` for the possible exceptions.
+
+ # Choose the endpoint based on the SSL credentials type.
+ if default_ssl_credentials.is_mtls:
+ endpoint_to_use = mtls_endpoint
+ else:
+ endpoint_to_use = regular_endpoint
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, endpoint_to_use, request,
+ ssl_credentials=default_ssl_credentials)
+
+ Option 4: not setting ssl_credentials and client_cert_callback. For devices
+ without endpoint verification support or `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable is not `true`, a regular TLS channel is created;
+ otherwise, a mutual TLS channel is created, however, the call should be
+ wrapped in a try/except block in case of malformed context aware metadata.
+
+ The following code uses regular_endpoint, it works the same no matter the
+ created channle is regular or mutual TLS. Regular endpoint ignores client
+ certificate and key::
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, regular_endpoint, request)
+
+ The following code uses mtls_endpoint, if the created channle is regular,
+ and API mtls_endpoint is confgured to require client SSL credentials, API
+ calls using this channel will be rejected::
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, mtls_endpoint, request)
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to requests.
+ request (google.auth.transport.Request): A HTTP transport request
+ object used to refresh credentials as needed. Even though gRPC
+ is a separate transport, there's no way to refresh the credentials
+ without using a standard http transport.
+ target (str): The host and port of the service.
+ ssl_credentials (grpc.ChannelCredentials): Optional SSL channel
+ credentials. This can be used to specify different certificates.
+ This argument is mutually exclusive with client_cert_callback;
+ providing both will raise an exception.
+ If ssl_credentials and client_cert_callback are None, application
+ default SSL credentials are used if `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable is explicitly set to `true`, otherwise one way TLS
+ SSL credentials are used.
+ client_cert_callback (Callable[[], (bytes, bytes)]): Optional
+ callback function to obtain client certicate and key for mutual TLS
+ connection. This argument is mutually exclusive with
+ ssl_credentials; providing both will raise an exception.
+ This argument does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable is explicitly set to `true`.
+ kwargs: Additional arguments to pass to :func:`grpc.secure_channel`.
+
+ Returns:
+ grpc.Channel: The created gRPC channel.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ # Create the metadata plugin for inserting the authorization header.
+ metadata_plugin = AuthMetadataPlugin(credentials, request)
+
+ # Create a set of grpc.CallCredentials using the metadata plugin.
+ google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin)
+
+ if ssl_credentials and client_cert_callback:
+ raise ValueError(
+ "Received both ssl_credentials and client_cert_callback; "
+ "these are mutually exclusive."
+ )
+
+ # If SSL credentials are not explicitly set, try client_cert_callback and ADC.
+ if not ssl_credentials:
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert == "true" and client_cert_callback:
+ # Use the callback if provided.
+ cert, key = client_cert_callback()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ elif use_client_cert == "true":
+ # Use application default SSL credentials.
+ adc_ssl_credentils = SslCredentials()
+ ssl_credentials = adc_ssl_credentils.ssl_credentials
+ else:
+ ssl_credentials = grpc.ssl_channel_credentials()
+
+ # Combine the ssl credentials and the authorization credentials.
+ composite_credentials = grpc.composite_channel_credentials(
+ ssl_credentials, google_auth_credentials
+ )
+
+ return grpc.secure_channel(target, composite_credentials, **kwargs)
+
+
+class SslCredentials:
+ """Class for application default SSL credentials.
+
+ The behavior is controlled by `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment
+ variable whose default value is `false`. Client certificate will not be used
+ unless the environment variable is explicitly set to `true`. See
+ https://google.aip.dev/auth/4114
+
+ If the environment variable is `true`, then for devices with endpoint verification
+ support, a device certificate will be automatically loaded and mutual TLS will
+ be established.
+ See https://cloud.google.com/endpoint-verification/docs/overview.
+ """
+
+ def __init__(self):
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert != "true":
+ self._is_mtls = False
+ else:
+ # Load client SSL credentials.
+ metadata_path = _mtls_helper._check_dca_metadata_path(
+ _mtls_helper.CONTEXT_AWARE_METADATA_PATH
+ )
+ self._is_mtls = metadata_path is not None
+
+ @property
+ def ssl_credentials(self):
+ """Get the created SSL channel credentials.
+
+ For devices with endpoint verification support, if the device certificate
+ loading has any problems, corresponding exceptions will be raised. For
+ a device without endpoint verification support, no exceptions will be
+ raised.
+
+ Returns:
+ grpc.ChannelCredentials: The created grpc channel credentials.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ if self._is_mtls:
+ try:
+ _, cert, key, _ = _mtls_helper.get_client_ssl_credentials()
+ self._ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ except exceptions.ClientCertError as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+ else:
+ self._ssl_credentials = grpc.ssl_channel_credentials()
+
+ return self._ssl_credentials
+
+ @property
+ def is_mtls(self):
+ """Indicates if the created SSL channel credentials is mutual TLS."""
+ return self._is_mtls
diff --git a/contrib/python/google-auth/py2/google/auth/transport/mtls.py b/contrib/python/google-auth/py2/google/auth/transport/mtls.py
new file mode 100644
index 0000000000..b40bfbedf9
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/transport/mtls.py
@@ -0,0 +1,105 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilites for mutual TLS."""
+
+import six
+
+from google.auth import exceptions
+from google.auth.transport import _mtls_helper
+
+
+def has_default_client_cert_source():
+ """Check if default client SSL credentials exists on the device.
+
+ Returns:
+ bool: indicating if the default client cert source exists.
+ """
+ metadata_path = _mtls_helper._check_dca_metadata_path(
+ _mtls_helper.CONTEXT_AWARE_METADATA_PATH
+ )
+ return metadata_path is not None
+
+
+def default_client_cert_source():
+ """Get a callback which returns the default client SSL credentials.
+
+ Returns:
+ Callable[[], [bytes, bytes]]: A callback which returns the default
+ client certificate bytes and private key bytes, both in PEM format.
+
+ Raises:
+ google.auth.exceptions.DefaultClientCertSourceError: If the default
+ client SSL credentials don't exist or are malformed.
+ """
+ if not has_default_client_cert_source():
+ raise exceptions.MutualTLSChannelError(
+ "Default client cert source doesn't exist"
+ )
+
+ def callback():
+ try:
+ _, cert_bytes, key_bytes = _mtls_helper.get_client_cert_and_key()
+ except (OSError, RuntimeError, ValueError) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ return cert_bytes, key_bytes
+
+ return callback
+
+
+def default_client_encrypted_cert_source(cert_path, key_path):
+ """Get a callback which returns the default encrpyted client SSL credentials.
+
+ Args:
+ cert_path (str): The cert file path. The default client certificate will
+ be written to this file when the returned callback is called.
+ key_path (str): The key file path. The default encrypted client key will
+ be written to this file when the returned callback is called.
+
+ Returns:
+ Callable[[], [str, str, bytes]]: A callback which generates the default
+ client certificate, encrpyted private key and passphrase. It writes
+ the certificate and private key into the cert_path and key_path, and
+ returns the cert_path, key_path and passphrase bytes.
+
+ Raises:
+ google.auth.exceptions.DefaultClientCertSourceError: If any problem
+ occurs when loading or saving the client certificate and key.
+ """
+ if not has_default_client_cert_source():
+ raise exceptions.MutualTLSChannelError(
+ "Default client encrypted cert source doesn't exist"
+ )
+
+ def callback():
+ try:
+ (
+ _,
+ cert_bytes,
+ key_bytes,
+ passphrase_bytes,
+ ) = _mtls_helper.get_client_ssl_credentials(generate_encrypted_key=True)
+ with open(cert_path, "wb") as cert_file:
+ cert_file.write(cert_bytes)
+ with open(key_path, "wb") as key_file:
+ key_file.write(key_bytes)
+ except (exceptions.ClientCertError, OSError) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ return cert_path, key_path, passphrase_bytes
+
+ return callback
diff --git a/contrib/python/google-auth/py2/google/auth/transport/requests.py b/contrib/python/google-auth/py2/google/auth/transport/requests.py
new file mode 100644
index 0000000000..817176befa
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/transport/requests.py
@@ -0,0 +1,542 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport adapter for Requests."""
+
+from __future__ import absolute_import
+
+import functools
+import logging
+import numbers
+import os
+import time
+
+try:
+ import requests
+except ImportError as caught_exc: # pragma: NO COVER
+ import six
+
+ six.raise_from(
+ ImportError(
+ "The requests library is not installed, please install the "
+ "requests package to use the requests transport."
+ ),
+ caught_exc,
+ )
+import requests.adapters # pylint: disable=ungrouped-imports
+import requests.exceptions # pylint: disable=ungrouped-imports
+from requests.packages.urllib3.util.ssl_ import (
+ create_urllib3_context,
+) # pylint: disable=ungrouped-imports
+import six # pylint: disable=ungrouped-imports
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+import google.auth.transport._mtls_helper
+from google.oauth2 import service_account
+
+_LOGGER = logging.getLogger(__name__)
+
+_DEFAULT_TIMEOUT = 120 # in seconds
+
+
+class _Response(transport.Response):
+ """Requests transport response adapter.
+
+ Args:
+ response (requests.Response): The raw Requests response.
+ """
+
+ def __init__(self, response):
+ self._response = response
+
+ @property
+ def status(self):
+ return self._response.status_code
+
+ @property
+ def headers(self):
+ return self._response.headers
+
+ @property
+ def data(self):
+ return self._response.content
+
+
+class TimeoutGuard(object):
+ """A context manager raising an error if the suite execution took too long.
+
+ Args:
+ timeout (Union[None, Union[float, Tuple[float, float]]]):
+ The maximum number of seconds a suite can run without the context
+ manager raising a timeout exception on exit. If passed as a tuple,
+ the smaller of the values is taken as a timeout. If ``None``, a
+ timeout error is never raised.
+ timeout_error_type (Optional[Exception]):
+ The type of the error to raise on timeout. Defaults to
+ :class:`requests.exceptions.Timeout`.
+ """
+
+ def __init__(self, timeout, timeout_error_type=requests.exceptions.Timeout):
+ self._timeout = timeout
+ self.remaining_timeout = timeout
+ self._timeout_error_type = timeout_error_type
+
+ def __enter__(self):
+ self._start = time.time()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_value:
+ return # let the error bubble up automatically
+
+ if self._timeout is None:
+ return # nothing to do, the timeout was not specified
+
+ elapsed = time.time() - self._start
+ deadline_hit = False
+
+ if isinstance(self._timeout, numbers.Number):
+ self.remaining_timeout = self._timeout - elapsed
+ deadline_hit = self.remaining_timeout <= 0
+ else:
+ self.remaining_timeout = tuple(x - elapsed for x in self._timeout)
+ deadline_hit = min(self.remaining_timeout) <= 0
+
+ if deadline_hit:
+ raise self._timeout_error_type()
+
+
+class Request(transport.Request):
+ """Requests request adapter.
+
+ This class is used internally for making requests using various transports
+ in a consistent way. If you use :class:`AuthorizedSession` you do not need
+ to construct or use this class directly.
+
+ This class can be useful if you want to manually refresh a
+ :class:`~google.auth.credentials.Credentials` instance::
+
+ import google.auth.transport.requests
+ import requests
+
+ request = google.auth.transport.requests.Request()
+
+ credentials.refresh(request)
+
+ Args:
+ session (requests.Session): An instance :class:`requests.Session` used
+ to make HTTP requests. If not specified, a session will be created.
+
+ .. automethod:: __call__
+ """
+
+ def __init__(self, session=None):
+ if not session:
+ session = requests.Session()
+
+ self.session = session
+
+ def __call__(
+ self,
+ url,
+ method="GET",
+ body=None,
+ headers=None,
+ timeout=_DEFAULT_TIMEOUT,
+ **kwargs
+ ):
+ """Make an HTTP request using requests.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload or body in HTTP request.
+ headers (Mapping[str, str]): Request headers.
+ timeout (Optional[int]): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ requests default timeout will be used.
+ kwargs: Additional arguments passed through to the underlying
+ requests :meth:`~requests.Session.request` method.
+
+ Returns:
+ google.auth.transport.Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ try:
+ _LOGGER.debug("Making request: %s %s", method, url)
+ response = self.session.request(
+ method, url, data=body, headers=headers, timeout=timeout, **kwargs
+ )
+ return _Response(response)
+ except requests.exceptions.RequestException as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+
+class _MutualTlsAdapter(requests.adapters.HTTPAdapter):
+ """
+ A TransportAdapter that enables mutual TLS.
+
+ Args:
+ cert (bytes): client certificate in PEM format
+ key (bytes): client private key in PEM format
+
+ Raises:
+ ImportError: if certifi or pyOpenSSL is not installed
+ OpenSSL.crypto.Error: if client cert or key is invalid
+ """
+
+ def __init__(self, cert, key):
+ import certifi
+ from OpenSSL import crypto
+ import urllib3.contrib.pyopenssl
+
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
+ x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+
+ ctx_poolmanager = create_urllib3_context()
+ ctx_poolmanager.load_verify_locations(cafile=certifi.where())
+ ctx_poolmanager._ctx.use_certificate(x509)
+ ctx_poolmanager._ctx.use_privatekey(pkey)
+ self._ctx_poolmanager = ctx_poolmanager
+
+ ctx_proxymanager = create_urllib3_context()
+ ctx_proxymanager.load_verify_locations(cafile=certifi.where())
+ ctx_proxymanager._ctx.use_certificate(x509)
+ ctx_proxymanager._ctx.use_privatekey(pkey)
+ self._ctx_proxymanager = ctx_proxymanager
+
+ super(_MutualTlsAdapter, self).__init__()
+
+ def init_poolmanager(self, *args, **kwargs):
+ kwargs["ssl_context"] = self._ctx_poolmanager
+ super(_MutualTlsAdapter, self).init_poolmanager(*args, **kwargs)
+
+ def proxy_manager_for(self, *args, **kwargs):
+ kwargs["ssl_context"] = self._ctx_proxymanager
+ return super(_MutualTlsAdapter, self).proxy_manager_for(*args, **kwargs)
+
+
+class AuthorizedSession(requests.Session):
+ """A Requests Session class with credentials.
+
+ This class is used to perform requests to API endpoints that require
+ authorization::
+
+ from google.auth.transport.requests import AuthorizedSession
+
+ authed_session = AuthorizedSession(credentials)
+
+ response = authed_session.request(
+ 'GET', 'https://www.googleapis.com/storage/v1/b')
+
+
+ The underlying :meth:`request` implementation handles adding the
+ credentials' headers to the request and refreshing credentials as needed.
+
+ This class also supports mutual TLS via :meth:`configure_mtls_channel`
+ method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable must be explicitly set to ``true``, otherwise it does
+ nothing. Assume the environment is set to ``true``, the method behaves in the
+ following manner:
+
+ If client_cert_callback is provided, client certificate and private
+ key are loaded using the callback; if client_cert_callback is None,
+ application default SSL credentials will be used. Exceptions are raised if
+ there are problems with the certificate, private key, or the loading process,
+ so it should be called within a try/except block.
+
+ First we set the environment variable to ``true``, then create an :class:`AuthorizedSession`
+ instance and specify the endpoints::
+
+ regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics'
+ mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics'
+
+ authed_session = AuthorizedSession(credentials)
+
+ Now we can pass a callback to :meth:`configure_mtls_channel`::
+
+ def my_cert_callback():
+ # some code to load client cert bytes and private key bytes, both in
+ # PEM format.
+ some_code_to_load_client_cert_and_key()
+ if loaded:
+ return cert, key
+ raise MyClientCertFailureException()
+
+ # Always call configure_mtls_channel within a try/except block.
+ try:
+ authed_session.configure_mtls_channel(my_cert_callback)
+ except:
+ # handle exceptions.
+
+ if authed_session.is_mtls:
+ response = authed_session.request('GET', mtls_endpoint)
+ else:
+ response = authed_session.request('GET', regular_endpoint)
+
+
+ You can alternatively use application default SSL credentials like this::
+
+ try:
+ authed_session.configure_mtls_channel()
+ except:
+ # handle exceptions.
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to the request.
+ refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
+ that credentials should be refreshed and the request should be
+ retried.
+ max_refresh_attempts (int): The maximum number of times to attempt to
+ refresh the credentials and retry the request.
+ refresh_timeout (Optional[int]): The timeout value in seconds for
+ credential refresh HTTP requests.
+ auth_request (google.auth.transport.requests.Request):
+ (Optional) An instance of
+ :class:`~google.auth.transport.requests.Request` used when
+ refreshing credentials. If not passed,
+ an instance of :class:`~google.auth.transport.requests.Request`
+ is created.
+ default_host (Optional[str]): A host like "pubsub.googleapis.com".
+ This is used when a self-signed JWT is created from service
+ account credentials.
+ """
+
+ def __init__(
+ self,
+ credentials,
+ refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
+ max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS,
+ refresh_timeout=None,
+ auth_request=None,
+ default_host=None,
+ ):
+ super(AuthorizedSession, self).__init__()
+ self.credentials = credentials
+ self._refresh_status_codes = refresh_status_codes
+ self._max_refresh_attempts = max_refresh_attempts
+ self._refresh_timeout = refresh_timeout
+ self._is_mtls = False
+ self._default_host = default_host
+
+ if auth_request is None:
+ self._auth_request_session = requests.Session()
+
+ # Using an adapter to make HTTP requests robust to network errors.
+ # This adapter retrys HTTP requests when network errors occur
+ # and the requests seems safely retryable.
+ retry_adapter = requests.adapters.HTTPAdapter(max_retries=3)
+ self._auth_request_session.mount("https://", retry_adapter)
+
+ # Do not pass `self` as the session here, as it can lead to
+ # infinite recursion.
+ auth_request = Request(self._auth_request_session)
+ else:
+ self._auth_request_session = None
+
+ # Request instance used by internal methods (for example,
+ # credentials.refresh).
+ self._auth_request = auth_request
+
+ # https://google.aip.dev/auth/4111
+ # Attempt to use self-signed JWTs when a service account is used.
+ if isinstance(self.credentials, service_account.Credentials):
+ self.credentials._create_self_signed_jwt(
+ "https://{}/".format(self._default_host) if self._default_host else None
+ )
+
+ def configure_mtls_channel(self, client_cert_callback=None):
+ """Configure the client certificate and key for SSL connection.
+
+ The function does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE` is
+ explicitly set to `true`. In this case if client certificate and key are
+ successfully obtained (from the given client_cert_callback or from application
+ default SSL credentials), a :class:`_MutualTlsAdapter` instance will be mounted
+ to "https://" prefix.
+
+ Args:
+ client_cert_callback (Optional[Callable[[], (bytes, bytes)]]):
+ The optional callback returns the client certificate and private
+ key bytes both in PEM format.
+ If the callback is None, application default SSL credentials
+ will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert != "true":
+ self._is_mtls = False
+ return
+
+ try:
+ import OpenSSL
+ except ImportError as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ try:
+ (
+ self._is_mtls,
+ cert,
+ key,
+ ) = google.auth.transport._mtls_helper.get_client_cert_and_key(
+ client_cert_callback
+ )
+
+ if self._is_mtls:
+ mtls_adapter = _MutualTlsAdapter(cert, key)
+ self.mount("https://", mtls_adapter)
+ except (
+ exceptions.ClientCertError,
+ ImportError,
+ OpenSSL.crypto.Error,
+ ) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ def request(
+ self,
+ method,
+ url,
+ data=None,
+ headers=None,
+ max_allowed_time=None,
+ timeout=_DEFAULT_TIMEOUT,
+ **kwargs
+ ):
+ """Implementation of Requests' request.
+
+ Args:
+ timeout (Optional[Union[float, Tuple[float, float]]]):
+ The amount of time in seconds to wait for the server response
+ with each individual request. Can also be passed as a tuple
+ ``(connect_timeout, read_timeout)``. See :meth:`requests.Session.request`
+ documentation for details.
+ max_allowed_time (Optional[float]):
+ If the method runs longer than this, a ``Timeout`` exception is
+ automatically raised. Unlike the ``timeout`` parameter, this
+ value applies to the total method execution time, even if
+ multiple requests are made under the hood.
+
+ Mind that it is not guaranteed that the timeout error is raised
+ at ``max_allowed_time``. It might take longer, for example, if
+ an underlying request takes a lot of time, but the request
+ itself does not timeout, e.g. if a large file is being
+ transmitted. The timout error will be raised after such
+ request completes.
+ """
+ # pylint: disable=arguments-differ
+ # Requests has a ton of arguments to request, but only two
+ # (method, url) are required. We pass through all of the other
+ # arguments to super, so no need to exhaustively list them here.
+
+ # Use a kwarg for this instead of an attribute to maintain
+ # thread-safety.
+ _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0)
+
+ # Make a copy of the headers. They will be modified by the credentials
+ # and we want to pass the original headers if we recurse.
+ request_headers = headers.copy() if headers is not None else {}
+
+ # Do not apply the timeout unconditionally in order to not override the
+ # _auth_request's default timeout.
+ auth_request = (
+ self._auth_request
+ if timeout is None
+ else functools.partial(self._auth_request, timeout=timeout)
+ )
+
+ remaining_time = max_allowed_time
+
+ with TimeoutGuard(remaining_time) as guard:
+ self.credentials.before_request(auth_request, method, url, request_headers)
+ remaining_time = guard.remaining_timeout
+
+ with TimeoutGuard(remaining_time) as guard:
+ response = super(AuthorizedSession, self).request(
+ method,
+ url,
+ data=data,
+ headers=request_headers,
+ timeout=timeout,
+ **kwargs
+ )
+ remaining_time = guard.remaining_timeout
+
+ # If the response indicated that the credentials needed to be
+ # refreshed, then refresh the credentials and re-attempt the
+ # request.
+ # A stored token may expire between the time it is retrieved and
+ # the time the request is made, so we may need to try twice.
+ if (
+ response.status_code in self._refresh_status_codes
+ and _credential_refresh_attempt < self._max_refresh_attempts
+ ):
+
+ _LOGGER.info(
+ "Refreshing credentials due to a %s response. Attempt %s/%s.",
+ response.status_code,
+ _credential_refresh_attempt + 1,
+ self._max_refresh_attempts,
+ )
+
+ # Do not apply the timeout unconditionally in order to not override the
+ # _auth_request's default timeout.
+ auth_request = (
+ self._auth_request
+ if timeout is None
+ else functools.partial(self._auth_request, timeout=timeout)
+ )
+
+ with TimeoutGuard(remaining_time) as guard:
+ self.credentials.refresh(auth_request)
+ remaining_time = guard.remaining_timeout
+
+ # Recurse. Pass in the original headers, not our modified set, but
+ # do pass the adjusted max allowed time (i.e. the remaining total time).
+ return self.request(
+ method,
+ url,
+ data=data,
+ headers=headers,
+ max_allowed_time=remaining_time,
+ timeout=timeout,
+ _credential_refresh_attempt=_credential_refresh_attempt + 1,
+ **kwargs
+ )
+
+ return response
+
+ @property
+ def is_mtls(self):
+ """Indicates if the created SSL channel is mutual TLS."""
+ return self._is_mtls
+
+ def close(self):
+ if self._auth_request_session is not None:
+ self._auth_request_session.close()
+ super(AuthorizedSession, self).close()
diff --git a/contrib/python/google-auth/py2/google/auth/transport/urllib3.py b/contrib/python/google-auth/py2/google/auth/transport/urllib3.py
new file mode 100644
index 0000000000..6a2504d972
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/transport/urllib3.py
@@ -0,0 +1,439 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport adapter for urllib3."""
+
+from __future__ import absolute_import
+
+import logging
+import os
+import warnings
+
+# Certifi is Mozilla's certificate bundle. Urllib3 needs a certificate bundle
+# to verify HTTPS requests, and certifi is the recommended and most reliable
+# way to get a root certificate bundle. See
+# http://urllib3.readthedocs.io/en/latest/user-guide.html\
+# #certificate-verification
+# For more details.
+try:
+ import certifi
+except ImportError: # pragma: NO COVER
+ certifi = None
+
+try:
+ import urllib3
+except ImportError as caught_exc: # pragma: NO COVER
+ import six
+
+ six.raise_from(
+ ImportError(
+ "The urllib3 library is not installed, please install the "
+ "urllib3 package to use the urllib3 transport."
+ ),
+ caught_exc,
+ )
+import six
+import urllib3.exceptions # pylint: disable=ungrouped-imports
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import service_account
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class _Response(transport.Response):
+ """urllib3 transport response adapter.
+
+ Args:
+ response (urllib3.response.HTTPResponse): The raw urllib3 response.
+ """
+
+ def __init__(self, response):
+ self._response = response
+
+ @property
+ def status(self):
+ return self._response.status
+
+ @property
+ def headers(self):
+ return self._response.headers
+
+ @property
+ def data(self):
+ return self._response.data
+
+
+class Request(transport.Request):
+ """urllib3 request adapter.
+
+ This class is used internally for making requests using various transports
+ in a consistent way. If you use :class:`AuthorizedHttp` you do not need
+ to construct or use this class directly.
+
+ This class can be useful if you want to manually refresh a
+ :class:`~google.auth.credentials.Credentials` instance::
+
+ import google.auth.transport.urllib3
+ import urllib3
+
+ http = urllib3.PoolManager()
+ request = google.auth.transport.urllib3.Request(http)
+
+ credentials.refresh(request)
+
+ Args:
+ http (urllib3.request.RequestMethods): An instance of any urllib3
+ class that implements :class:`~urllib3.request.RequestMethods`,
+ usually :class:`urllib3.PoolManager`.
+
+ .. automethod:: __call__
+ """
+
+ def __init__(self, http):
+ self.http = http
+
+ def __call__(
+ self, url, method="GET", body=None, headers=None, timeout=None, **kwargs
+ ):
+ """Make an HTTP request using urllib3.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload / body in HTTP request.
+ headers (Mapping[str, str]): Request headers.
+ timeout (Optional[int]): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ urllib3 default timeout will be used.
+ kwargs: Additional arguments passed throught to the underlying
+ urllib3 :meth:`urlopen` method.
+
+ Returns:
+ google.auth.transport.Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ # urllib3 uses a sentinel default value for timeout, so only set it if
+ # specified.
+ if timeout is not None:
+ kwargs["timeout"] = timeout
+
+ try:
+ _LOGGER.debug("Making request: %s %s", method, url)
+ response = self.http.request(
+ method, url, body=body, headers=headers, **kwargs
+ )
+ return _Response(response)
+ except urllib3.exceptions.HTTPError as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+
+def _make_default_http():
+ if certifi is not None:
+ return urllib3.PoolManager(cert_reqs="CERT_REQUIRED", ca_certs=certifi.where())
+ else:
+ return urllib3.PoolManager()
+
+
+def _make_mutual_tls_http(cert, key):
+ """Create a mutual TLS HTTP connection with the given client cert and key.
+ See https://github.com/urllib3/urllib3/issues/474#issuecomment-253168415
+
+ Args:
+ cert (bytes): client certificate in PEM format
+ key (bytes): client private key in PEM format
+
+ Returns:
+ urllib3.PoolManager: Mutual TLS HTTP connection.
+
+ Raises:
+ ImportError: If certifi or pyOpenSSL is not installed.
+ OpenSSL.crypto.Error: If the cert or key is invalid.
+ """
+ import certifi
+ from OpenSSL import crypto
+ import urllib3.contrib.pyopenssl
+
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ ctx = urllib3.util.ssl_.create_urllib3_context()
+ ctx.load_verify_locations(cafile=certifi.where())
+
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
+ x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+
+ ctx._ctx.use_certificate(x509)
+ ctx._ctx.use_privatekey(pkey)
+
+ http = urllib3.PoolManager(ssl_context=ctx)
+ return http
+
+
+class AuthorizedHttp(urllib3.request.RequestMethods):
+ """A urllib3 HTTP class with credentials.
+
+ This class is used to perform requests to API endpoints that require
+ authorization::
+
+ from google.auth.transport.urllib3 import AuthorizedHttp
+
+ authed_http = AuthorizedHttp(credentials)
+
+ response = authed_http.request(
+ 'GET', 'https://www.googleapis.com/storage/v1/b')
+
+ This class implements :class:`urllib3.request.RequestMethods` and can be
+ used just like any other :class:`urllib3.PoolManager`.
+
+ The underlying :meth:`urlopen` implementation handles adding the
+ credentials' headers to the request and refreshing credentials as needed.
+
+ This class also supports mutual TLS via :meth:`configure_mtls_channel`
+ method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable must be explicitly set to `true`, otherwise it does
+ nothing. Assume the environment is set to `true`, the method behaves in the
+ following manner:
+ If client_cert_callback is provided, client certificate and private
+ key are loaded using the callback; if client_cert_callback is None,
+ application default SSL credentials will be used. Exceptions are raised if
+ there are problems with the certificate, private key, or the loading process,
+ so it should be called within a try/except block.
+
+ First we set the environment variable to `true`, then create an :class:`AuthorizedHttp`
+ instance and specify the endpoints::
+
+ regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics'
+ mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics'
+
+ authed_http = AuthorizedHttp(credentials)
+
+ Now we can pass a callback to :meth:`configure_mtls_channel`::
+
+ def my_cert_callback():
+ # some code to load client cert bytes and private key bytes, both in
+ # PEM format.
+ some_code_to_load_client_cert_and_key()
+ if loaded:
+ return cert, key
+ raise MyClientCertFailureException()
+
+ # Always call configure_mtls_channel within a try/except block.
+ try:
+ is_mtls = authed_http.configure_mtls_channel(my_cert_callback)
+ except:
+ # handle exceptions.
+
+ if is_mtls:
+ response = authed_http.request('GET', mtls_endpoint)
+ else:
+ response = authed_http.request('GET', regular_endpoint)
+
+ You can alternatively use application default SSL credentials like this::
+
+ try:
+ is_mtls = authed_http.configure_mtls_channel()
+ except:
+ # handle exceptions.
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to the request.
+ http (urllib3.PoolManager): The underlying HTTP object to
+ use to make requests. If not specified, a
+ :class:`urllib3.PoolManager` instance will be constructed with
+ sane defaults.
+ refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
+ that credentials should be refreshed and the request should be
+ retried.
+ max_refresh_attempts (int): The maximum number of times to attempt to
+ refresh the credentials and retry the request.
+ default_host (Optional[str]): A host like "pubsub.googleapis.com".
+ This is used when a self-signed JWT is created from service
+ account credentials.
+ """
+
+ def __init__(
+ self,
+ credentials,
+ http=None,
+ refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
+ max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS,
+ default_host=None,
+ ):
+ if http is None:
+ self.http = _make_default_http()
+ self._has_user_provided_http = False
+ else:
+ self.http = http
+ self._has_user_provided_http = True
+
+ self.credentials = credentials
+ self._refresh_status_codes = refresh_status_codes
+ self._max_refresh_attempts = max_refresh_attempts
+ self._default_host = default_host
+ # Request instance used by internal methods (for example,
+ # credentials.refresh).
+ self._request = Request(self.http)
+
+ # https://google.aip.dev/auth/4111
+ # Attempt to use self-signed JWTs when a service account is used.
+ if isinstance(self.credentials, service_account.Credentials):
+ self.credentials._create_self_signed_jwt(
+ "https://{}/".format(self._default_host) if self._default_host else None
+ )
+
+ super(AuthorizedHttp, self).__init__()
+
+ def configure_mtls_channel(self, client_cert_callback=None):
+ """Configures mutual TLS channel using the given client_cert_callback or
+ application default SSL credentials. The behavior is controlled by
+ `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable.
+ (1) If the environment variable value is `true`, the function returns True
+ if the channel is mutual TLS and False otherwise. The `http` provided
+ in the constructor will be overwritten.
+ (2) If the environment variable is not set or `false`, the function does
+ nothing and it always return False.
+
+ Args:
+ client_cert_callback (Optional[Callable[[], (bytes, bytes)]]):
+ The optional callback returns the client certificate and private
+ key bytes both in PEM format.
+ If the callback is None, application default SSL credentials
+ will be used.
+
+ Returns:
+ True if the channel is mutual TLS and False otherwise.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert != "true":
+ return False
+
+ try:
+ import OpenSSL
+ except ImportError as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ try:
+ found_cert_key, cert, key = transport._mtls_helper.get_client_cert_and_key(
+ client_cert_callback
+ )
+
+ if found_cert_key:
+ self.http = _make_mutual_tls_http(cert, key)
+ else:
+ self.http = _make_default_http()
+ except (
+ exceptions.ClientCertError,
+ ImportError,
+ OpenSSL.crypto.Error,
+ ) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ six.raise_from(new_exc, caught_exc)
+
+ if self._has_user_provided_http:
+ self._has_user_provided_http = False
+ warnings.warn(
+ "`http` provided in the constructor is overwritten", UserWarning
+ )
+
+ return found_cert_key
+
+ def urlopen(self, method, url, body=None, headers=None, **kwargs):
+ """Implementation of urllib3's urlopen."""
+ # pylint: disable=arguments-differ
+ # We use kwargs to collect additional args that we don't need to
+ # introspect here. However, we do explicitly collect the two
+ # positional arguments.
+
+ # Use a kwarg for this instead of an attribute to maintain
+ # thread-safety.
+ _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0)
+
+ if headers is None:
+ headers = self.headers
+
+ # Make a copy of the headers. They will be modified by the credentials
+ # and we want to pass the original headers if we recurse.
+ request_headers = headers.copy()
+
+ self.credentials.before_request(self._request, method, url, request_headers)
+
+ response = self.http.urlopen(
+ method, url, body=body, headers=request_headers, **kwargs
+ )
+
+ # If the response indicated that the credentials needed to be
+ # refreshed, then refresh the credentials and re-attempt the
+ # request.
+ # A stored token may expire between the time it is retrieved and
+ # the time the request is made, so we may need to try twice.
+ # The reason urllib3's retries aren't used is because they
+ # don't allow you to modify the request headers. :/
+ if (
+ response.status in self._refresh_status_codes
+ and _credential_refresh_attempt < self._max_refresh_attempts
+ ):
+
+ _LOGGER.info(
+ "Refreshing credentials due to a %s response. Attempt %s/%s.",
+ response.status,
+ _credential_refresh_attempt + 1,
+ self._max_refresh_attempts,
+ )
+
+ self.credentials.refresh(self._request)
+
+ # Recurse. Pass in the original headers, not our modified set.
+ return self.urlopen(
+ method,
+ url,
+ body=body,
+ headers=headers,
+ _credential_refresh_attempt=_credential_refresh_attempt + 1,
+ **kwargs
+ )
+
+ return response
+
+ # Proxy methods for compliance with the urllib3.PoolManager interface
+
+ def __enter__(self):
+ """Proxy to ``self.http``."""
+ return self.http.__enter__()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Proxy to ``self.http``."""
+ return self.http.__exit__(exc_type, exc_val, exc_tb)
+
+ @property
+ def headers(self):
+ """Proxy to ``self.http``."""
+ return self.http.headers
+
+ @headers.setter
+ def headers(self, value):
+ """Proxy to ``self.http``."""
+ self.http.headers = value
diff --git a/contrib/python/google-auth/py2/google/auth/version.py b/contrib/python/google-auth/py2/google/auth/version.py
new file mode 100644
index 0000000000..989cbbceb0
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/auth/version.py
@@ -0,0 +1,15 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "1.35.0"
diff --git a/contrib/python/google-auth/py2/google/oauth2/__init__.py b/contrib/python/google-auth/py2/google/oauth2/__init__.py
new file mode 100644
index 0000000000..4fb71fd1ad
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google OAuth 2.0 Library for Python."""
diff --git a/contrib/python/google-auth/py2/google/oauth2/_client.py b/contrib/python/google-auth/py2/google/oauth2/_client.py
new file mode 100644
index 0000000000..2f4e8474b5
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/_client.py
@@ -0,0 +1,327 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 client.
+
+This is a client for interacting with an OAuth 2.0 authorization server's
+token endpoint.
+
+For more information about the token endpoint, see
+`Section 3.1 of rfc6749`_
+
+.. _Section 3.1 of rfc6749: https://tools.ietf.org/html/rfc6749#section-3.2
+"""
+
+import datetime
+import json
+
+import six
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import jwt
+
+_URLENCODED_CONTENT_TYPE = "application/x-www-form-urlencoded"
+_JSON_CONTENT_TYPE = "application/json"
+_JWT_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+_REFRESH_GRANT_TYPE = "refresh_token"
+
+
+def _handle_error_response(response_data):
+ """Translates an error response into an exception.
+
+ Args:
+ response_data (Mapping): The decoded response data.
+
+ Raises:
+ google.auth.exceptions.RefreshError: The errors contained in response_data.
+ """
+ try:
+ error_details = "{}: {}".format(
+ response_data["error"], response_data.get("error_description")
+ )
+ # If no details could be extracted, use the response data.
+ except (KeyError, ValueError):
+ error_details = json.dumps(response_data)
+
+ raise exceptions.RefreshError(error_details, response_data)
+
+
+def _parse_expiry(response_data):
+ """Parses the expiry field from a response into a datetime.
+
+ Args:
+ response_data (Mapping): The JSON-parsed response data.
+
+ Returns:
+ Optional[datetime]: The expiration or ``None`` if no expiration was
+ specified.
+ """
+ expires_in = response_data.get("expires_in", None)
+
+ if expires_in is not None:
+ return _helpers.utcnow() + datetime.timedelta(seconds=expires_in)
+ else:
+ return None
+
+
+def _token_endpoint_request_no_throw(
+ request, token_uri, body, access_token=None, use_json=False
+):
+ """Makes a request to the OAuth 2.0 authorization server's token endpoint.
+ This function doesn't throw on response errors.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ body (Mapping[str, str]): The parameters to send in the request body.
+ access_token (Optional(str)): The access token needed to make the request.
+ use_json (Optional(bool)): Use urlencoded format or json format for the
+ content type. The default value is False.
+
+ Returns:
+ Tuple(bool, Mapping[str, str]): A boolean indicating if the request is
+ successful, and a mapping for the JSON-decoded response data.
+ """
+ if use_json:
+ headers = {"Content-Type": _JSON_CONTENT_TYPE}
+ body = json.dumps(body).encode("utf-8")
+ else:
+ headers = {"Content-Type": _URLENCODED_CONTENT_TYPE}
+ body = urllib.parse.urlencode(body).encode("utf-8")
+
+ if access_token:
+ headers["Authorization"] = "Bearer {}".format(access_token)
+
+ retry = 0
+ # retry to fetch token for maximum of two times if any internal failure
+ # occurs.
+ while True:
+ response = request(method="POST", url=token_uri, headers=headers, body=body)
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+ response_data = json.loads(response_body)
+
+ if response.status == http_client.OK:
+ break
+ else:
+ error_desc = response_data.get("error_description") or ""
+ error_code = response_data.get("error") or ""
+ if (
+ any(e == "internal_failure" for e in (error_code, error_desc))
+ and retry < 1
+ ):
+ retry += 1
+ continue
+ return response.status == http_client.OK, response_data
+
+ return response.status == http_client.OK, response_data
+
+
+def _token_endpoint_request(
+ request, token_uri, body, access_token=None, use_json=False
+):
+ """Makes a request to the OAuth 2.0 authorization server's token endpoint.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ body (Mapping[str, str]): The parameters to send in the request body.
+ access_token (Optional(str)): The access token needed to make the request.
+ use_json (Optional(bool)): Use urlencoded format or json format for the
+ content type. The default value is False.
+
+ Returns:
+ Mapping[str, str]: The JSON-decoded response data.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ response_status_ok, response_data = _token_endpoint_request_no_throw(
+ request, token_uri, body, access_token=access_token, use_json=use_json
+ )
+ if not response_status_ok:
+ _handle_error_response(response_data)
+ return response_data
+
+
+def jwt_grant(request, token_uri, assertion):
+ """Implements the JWT Profile for OAuth 2.0 Authorization Grants.
+
+ For more details, see `rfc7523 section 4`_.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ assertion (str): The OAuth 2.0 assertion.
+
+ Returns:
+ Tuple[str, Optional[datetime], Mapping[str, str]]: The access token,
+ expiration, and additional data returned by the token endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+
+ .. _rfc7523 section 4: https://tools.ietf.org/html/rfc7523#section-4
+ """
+ body = {"assertion": assertion, "grant_type": _JWT_GRANT_TYPE}
+
+ response_data = _token_endpoint_request(request, token_uri, body)
+
+ try:
+ access_token = response_data["access_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError("No access token in response.", response_data)
+ six.raise_from(new_exc, caught_exc)
+
+ expiry = _parse_expiry(response_data)
+
+ return access_token, expiry, response_data
+
+
+def id_token_jwt_grant(request, token_uri, assertion):
+ """Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
+ requests an OpenID Connect ID Token instead of an access token.
+
+ This is a variant on the standard JWT Profile that is currently unique
+ to Google. This was added for the benefit of authenticating to services
+ that require ID Tokens instead of access tokens or JWT bearer tokens.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorization server's token endpoint
+ URI.
+ assertion (str): JWT token signed by a service account. The token's
+ payload must include a ``target_audience`` claim.
+
+ Returns:
+ Tuple[str, Optional[datetime], Mapping[str, str]]:
+ The (encoded) Open ID Connect ID Token, expiration, and additional
+ data returned by the endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ body = {"assertion": assertion, "grant_type": _JWT_GRANT_TYPE}
+
+ response_data = _token_endpoint_request(request, token_uri, body)
+
+ try:
+ id_token = response_data["id_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError("No ID token in response.", response_data)
+ six.raise_from(new_exc, caught_exc)
+
+ payload = jwt.decode(id_token, verify=False)
+ expiry = datetime.datetime.utcfromtimestamp(payload["exp"])
+
+ return id_token, expiry, response_data
+
+
+def _handle_refresh_grant_response(response_data, refresh_token):
+ """Extract tokens from refresh grant response.
+
+ Args:
+ response_data (Mapping[str, str]): Refresh grant response data.
+ refresh_token (str): Current refresh token.
+
+ Returns:
+ Tuple[str, str, Optional[datetime], Mapping[str, str]]: The access token,
+ refresh token, expiration, and additional data returned by the token
+ endpoint. If response_data doesn't have refresh token, then the current
+ refresh token will be returned.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ try:
+ access_token = response_data["access_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError("No access token in response.", response_data)
+ six.raise_from(new_exc, caught_exc)
+
+ refresh_token = response_data.get("refresh_token", refresh_token)
+ expiry = _parse_expiry(response_data)
+
+ return access_token, refresh_token, expiry, response_data
+
+
+def refresh_grant(
+ request,
+ token_uri,
+ refresh_token,
+ client_id,
+ client_secret,
+ scopes=None,
+ rapt_token=None,
+):
+ """Implements the OAuth 2.0 refresh token grant.
+
+ For more details, see `rfc678 section 6`_.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ refresh_token (str): The refresh token to use to get a new access
+ token.
+ client_id (str): The OAuth 2.0 application's client ID.
+ client_secret (str): The Oauth 2.0 appliaction's client secret.
+ scopes (Optional(Sequence[str])): Scopes to request. If present, all
+ scopes must be authorized for the refresh token. Useful if refresh
+ token has a wild card scope (e.g.
+ 'https://www.googleapis.com/auth/any-api').
+ rapt_token (Optional(str)): The reauth Proof Token.
+
+ Returns:
+ Tuple[str, str, Optional[datetime], Mapping[str, str]]: The access
+ token, new or current refresh token, expiration, and additional data
+ returned by the token endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+
+ .. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6
+ """
+ body = {
+ "grant_type": _REFRESH_GRANT_TYPE,
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "refresh_token": refresh_token,
+ }
+ if scopes:
+ body["scope"] = " ".join(scopes)
+ if rapt_token:
+ body["rapt"] = rapt_token
+
+ response_data = _token_endpoint_request(request, token_uri, body)
+ return _handle_refresh_grant_response(response_data, refresh_token)
diff --git a/contrib/python/google-auth/py2/google/oauth2/challenges.py b/contrib/python/google-auth/py2/google/oauth2/challenges.py
new file mode 100644
index 0000000000..d0b070eda6
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/challenges.py
@@ -0,0 +1,157 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Challenges for reauthentication.
+"""
+
+import abc
+import base64
+import getpass
+import sys
+
+import six
+
+from google.auth import _helpers
+from google.auth import exceptions
+
+
+REAUTH_ORIGIN = "https://accounts.google.com"
+
+
+def get_user_password(text):
+ """Get password from user.
+
+ Override this function with a different logic if you are using this library
+ outside a CLI.
+
+ Args:
+ text (str): message for the password prompt.
+
+ Returns:
+ str: password string.
+ """
+ return getpass.getpass(text)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ReauthChallenge(object):
+ """Base class for reauth challenges."""
+
+ @property
+ @abc.abstractmethod
+ def name(self): # pragma: NO COVER
+ """Returns the name of the challenge."""
+ raise NotImplementedError("name property must be implemented")
+
+ @property
+ @abc.abstractmethod
+ def is_locally_eligible(self): # pragma: NO COVER
+ """Returns true if a challenge is supported locally on this machine."""
+ raise NotImplementedError("is_locally_eligible property must be implemented")
+
+ @abc.abstractmethod
+ def obtain_challenge_input(self, metadata): # pragma: NO COVER
+ """Performs logic required to obtain credentials and returns it.
+
+ Args:
+ metadata (Mapping): challenge metadata returned in the 'challenges' field in
+ the initial reauth request. Includes the 'challengeType' field
+ and other challenge-specific fields.
+
+ Returns:
+ response that will be send to the reauth service as the content of
+ the 'proposalResponse' field in the request body. Usually a dict
+ with the keys specific to the challenge. For example,
+ ``{'credential': password}`` for password challenge.
+ """
+ raise NotImplementedError("obtain_challenge_input method must be implemented")
+
+
+class PasswordChallenge(ReauthChallenge):
+ """Challenge that asks for user's password."""
+
+ @property
+ def name(self):
+ return "PASSWORD"
+
+ @property
+ def is_locally_eligible(self):
+ return True
+
+ @_helpers.copy_docstring(ReauthChallenge)
+ def obtain_challenge_input(self, unused_metadata):
+ passwd = get_user_password("Please enter your password:")
+ if not passwd:
+ passwd = " " # avoid the server crashing in case of no password :D
+ return {"credential": passwd}
+
+
+class SecurityKeyChallenge(ReauthChallenge):
+ """Challenge that asks for user's security key touch."""
+
+ @property
+ def name(self):
+ return "SECURITY_KEY"
+
+ @property
+ def is_locally_eligible(self):
+ return True
+
+ @_helpers.copy_docstring(ReauthChallenge)
+ def obtain_challenge_input(self, metadata):
+ try:
+ import pyu2f.convenience.authenticator
+ import pyu2f.errors
+ import pyu2f.model
+ except ImportError:
+ raise exceptions.ReauthFailError(
+ "pyu2f dependency is required to use Security key reauth feature. "
+ "It can be installed via `pip install pyu2f` or `pip install google-auth[reauth]`."
+ )
+ sk = metadata["securityKey"]
+ challenges = sk["challenges"]
+ app_id = sk["applicationId"]
+
+ challenge_data = []
+ for c in challenges:
+ kh = c["keyHandle"].encode("ascii")
+ key = pyu2f.model.RegisteredKey(bytearray(base64.urlsafe_b64decode(kh)))
+ challenge = c["challenge"].encode("ascii")
+ challenge = base64.urlsafe_b64decode(challenge)
+ challenge_data.append({"key": key, "challenge": challenge})
+
+ try:
+ api = pyu2f.convenience.authenticator.CreateCompositeAuthenticator(
+ REAUTH_ORIGIN
+ )
+ response = api.Authenticate(
+ app_id, challenge_data, print_callback=sys.stderr.write
+ )
+ return {"securityKey": response}
+ except pyu2f.errors.U2FError as e:
+ if e.code == pyu2f.errors.U2FError.DEVICE_INELIGIBLE:
+ sys.stderr.write("Ineligible security key.\n")
+ elif e.code == pyu2f.errors.U2FError.TIMEOUT:
+ sys.stderr.write("Timed out while waiting for security key touch.\n")
+ else:
+ raise e
+ except pyu2f.errors.NoDeviceFoundError:
+ sys.stderr.write("No security key found.\n")
+ return None
+
+
+AVAILABLE_CHALLENGES = {
+ challenge.name: challenge
+ for challenge in [SecurityKeyChallenge(), PasswordChallenge()]
+}
diff --git a/contrib/python/google-auth/py2/google/oauth2/credentials.py b/contrib/python/google-auth/py2/google/oauth2/credentials.py
new file mode 100644
index 0000000000..158249ed5f
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/credentials.py
@@ -0,0 +1,479 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Credentials.
+
+This module provides credentials based on OAuth 2.0 access and refresh tokens.
+These credentials usually access resources on behalf of a user (resource
+owner).
+
+Specifically, this is intended to use access tokens acquired using the
+`Authorization Code grant`_ and can refresh those tokens using a
+optional `refresh token`_.
+
+Obtaining the initial access and refresh token is outside of the scope of this
+module. Consult `rfc6749 section 4.1`_ for complete details on the
+Authorization Code grant flow.
+
+.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
+.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
+.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
+"""
+
+from datetime import datetime
+import io
+import json
+
+import six
+
+from google.auth import _cloud_sdk
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.oauth2 import reauth
+
+
+# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.
+_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token"
+
+
+class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaProject):
+ """Credentials using OAuth 2.0 access and refresh tokens.
+
+ The credentials are considered immutable. If you want to modify the
+ quota project, use :meth:`with_quota_project` or ::
+
+ credentials = credentials.with_quota_project('myproject-123)
+
+ If reauth is enabled, `pyu2f` dependency has to be installed in order to use security
+ key reauth feature. Dependency can be installed via `pip install pyu2f` or `pip install
+ google-auth[reauth]`.
+ """
+
+ def __init__(
+ self,
+ token,
+ refresh_token=None,
+ id_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ scopes=None,
+ default_scopes=None,
+ quota_project_id=None,
+ expiry=None,
+ rapt_token=None,
+ refresh_handler=None,
+ ):
+ """
+ Args:
+ token (Optional(str)): The OAuth 2.0 access token. Can be None
+ if refresh information is provided.
+ refresh_token (str): The OAuth 2.0 refresh token. If specified,
+ credentials can be refreshed.
+ id_token (str): The Open ID Connect ID Token.
+ token_uri (str): The OAuth 2.0 authorization server's token
+ endpoint URI. Must be specified for refresh, can be left as
+ None if the token can not be refreshed.
+ client_id (str): The OAuth 2.0 client ID. Must be specified for
+ refresh, can be left as None if the token can not be refreshed.
+ client_secret(str): The OAuth 2.0 client secret. Must be specified
+ for refresh, can be left as None if the token can not be
+ refreshed.
+ scopes (Sequence[str]): The scopes used to obtain authorization.
+ This parameter is used by :meth:`has_scopes`. OAuth 2.0
+ credentials can not request additional scopes after
+ authorization. The scopes must be derivable from the refresh
+ token if refresh information is provided (e.g. The refresh
+ token scopes are a superset of this or contain a wild card
+ scope like 'https://www.googleapis.com/auth/any-api').
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ quota_project_id (Optional[str]): The project ID used for quota and billing.
+ This project may be different from the project used to
+ create the credentials.
+ rapt_token (Optional[str]): The reauth Proof Token.
+ refresh_handler (Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]):
+ A callable which takes in the HTTP request callable and the list of
+ OAuth scopes and when called returns an access token string for the
+ requested scopes and its expiry datetime. This is useful when no
+ refresh tokens are provided and tokens are obtained by calling
+ some external process on demand. It is particularly useful for
+ retrieving downscoped tokens from a token broker.
+ """
+ super(Credentials, self).__init__()
+ self.token = token
+ self.expiry = expiry
+ self._refresh_token = refresh_token
+ self._id_token = id_token
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+ self._token_uri = token_uri
+ self._client_id = client_id
+ self._client_secret = client_secret
+ self._quota_project_id = quota_project_id
+ self._rapt_token = rapt_token
+ self.refresh_handler = refresh_handler
+
+ def __getstate__(self):
+ """A __getstate__ method must exist for the __setstate__ to be called
+ This is identical to the default implementation.
+ See https://docs.python.org/3.7/library/pickle.html#object.__setstate__
+ """
+ state_dict = self.__dict__.copy()
+ # Remove _refresh_handler function as there are limitations pickling and
+ # unpickling certain callables (lambda, functools.partial instances)
+ # because they need to be importable.
+ # Instead, the refresh_handler setter should be used to repopulate this.
+ del state_dict["_refresh_handler"]
+ return state_dict
+
+ def __setstate__(self, d):
+ """Credentials pickled with older versions of the class do not have
+ all the attributes."""
+ self.token = d.get("token")
+ self.expiry = d.get("expiry")
+ self._refresh_token = d.get("_refresh_token")
+ self._id_token = d.get("_id_token")
+ self._scopes = d.get("_scopes")
+ self._default_scopes = d.get("_default_scopes")
+ self._token_uri = d.get("_token_uri")
+ self._client_id = d.get("_client_id")
+ self._client_secret = d.get("_client_secret")
+ self._quota_project_id = d.get("_quota_project_id")
+ self._rapt_token = d.get("_rapt_token")
+ # The refresh_handler setter should be used to repopulate this.
+ self._refresh_handler = None
+
+ @property
+ def refresh_token(self):
+ """Optional[str]: The OAuth 2.0 refresh token."""
+ return self._refresh_token
+
+ @property
+ def scopes(self):
+ """Optional[str]: The OAuth 2.0 permission scopes."""
+ return self._scopes
+
+ @property
+ def token_uri(self):
+ """Optional[str]: The OAuth 2.0 authorization server's token endpoint
+ URI."""
+ return self._token_uri
+
+ @property
+ def id_token(self):
+ """Optional[str]: The Open ID Connect ID Token.
+
+ Depending on the authorization server and the scopes requested, this
+ may be populated when credentials are obtained and updated when
+ :meth:`refresh` is called. This token is a JWT. It can be verified
+ and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`.
+ """
+ return self._id_token
+
+ @property
+ def client_id(self):
+ """Optional[str]: The OAuth 2.0 client ID."""
+ return self._client_id
+
+ @property
+ def client_secret(self):
+ """Optional[str]: The OAuth 2.0 client secret."""
+ return self._client_secret
+
+ @property
+ def requires_scopes(self):
+ """False: OAuth 2.0 credentials have their scopes set when
+ the initial token is requested and can not be changed."""
+ return False
+
+ @property
+ def rapt_token(self):
+ """Optional[str]: The reauth Proof Token."""
+ return self._rapt_token
+
+ @property
+ def refresh_handler(self):
+ """Returns the refresh handler if available.
+
+ Returns:
+ Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]:
+ The current refresh handler.
+ """
+ return self._refresh_handler
+
+ @refresh_handler.setter
+ def refresh_handler(self, value):
+ """Updates the current refresh handler.
+
+ Args:
+ value (Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]):
+ The updated value of the refresh handler.
+
+ Raises:
+ TypeError: If the value is not a callable or None.
+ """
+ if not callable(value) and value is not None:
+ raise TypeError("The provided refresh_handler is not a callable or None.")
+ self._refresh_handler = value
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+
+ return self.__class__(
+ self.token,
+ refresh_token=self.refresh_token,
+ id_token=self.id_token,
+ token_uri=self.token_uri,
+ client_id=self.client_id,
+ client_secret=self.client_secret,
+ scopes=self.scopes,
+ default_scopes=self.default_scopes,
+ quota_project_id=quota_project_id,
+ rapt_token=self.rapt_token,
+ )
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # Use refresh handler if available and no refresh token is
+ # available. This is useful in general when tokens are obtained by calling
+ # some external process on demand. It is particularly useful for retrieving
+ # downscoped tokens from a token broker.
+ if self._refresh_token is None and self.refresh_handler:
+ token, expiry = self.refresh_handler(request, scopes=scopes)
+ # Validate returned data.
+ if not isinstance(token, str):
+ raise exceptions.RefreshError(
+ "The refresh_handler returned token is not a string."
+ )
+ if not isinstance(expiry, datetime):
+ raise exceptions.RefreshError(
+ "The refresh_handler returned expiry is not a datetime object."
+ )
+ if _helpers.utcnow() >= expiry - _helpers.CLOCK_SKEW:
+ raise exceptions.RefreshError(
+ "The credentials returned by the refresh_handler are "
+ "already expired."
+ )
+ self.token = token
+ self.expiry = expiry
+ return
+
+ if (
+ self._refresh_token is None
+ or self._token_uri is None
+ or self._client_id is None
+ or self._client_secret is None
+ ):
+ raise exceptions.RefreshError(
+ "The credentials do not contain the necessary fields need to "
+ "refresh the access token. You must specify refresh_token, "
+ "token_uri, client_id, and client_secret."
+ )
+
+ (
+ access_token,
+ refresh_token,
+ expiry,
+ grant_response,
+ rapt_token,
+ ) = reauth.refresh_grant(
+ request,
+ self._token_uri,
+ self._refresh_token,
+ self._client_id,
+ self._client_secret,
+ scopes=scopes,
+ rapt_token=self._rapt_token,
+ )
+
+ self.token = access_token
+ self.expiry = expiry
+ self._refresh_token = refresh_token
+ self._id_token = grant_response.get("id_token")
+ self._rapt_token = rapt_token
+
+ if scopes and "scope" in grant_response:
+ requested_scopes = frozenset(scopes)
+ granted_scopes = frozenset(grant_response["scope"].split())
+ scopes_requested_but_not_granted = requested_scopes - granted_scopes
+ if scopes_requested_but_not_granted:
+ raise exceptions.RefreshError(
+ "Not all requested scopes were granted by the "
+ "authorization server, missing scopes {}.".format(
+ ", ".join(scopes_requested_but_not_granted)
+ )
+ )
+
+ @classmethod
+ def from_authorized_user_info(cls, info, scopes=None):
+ """Creates a Credentials instance from parsed authorized user info.
+
+ Args:
+ info (Mapping[str, str]): The authorized user info in Google
+ format.
+ scopes (Sequence[str]): Optional list of scopes to include in the
+ credentials.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ keys_needed = set(("refresh_token", "client_id", "client_secret"))
+ missing = keys_needed.difference(six.iterkeys(info))
+
+ if missing:
+ raise ValueError(
+ "Authorized user info was not in the expected format, missing "
+ "fields {}.".format(", ".join(missing))
+ )
+
+ # access token expiry (datetime obj); auto-expire if not saved
+ expiry = info.get("expiry")
+ if expiry:
+ expiry = datetime.strptime(
+ expiry.rstrip("Z").split(".")[0], "%Y-%m-%dT%H:%M:%S"
+ )
+ else:
+ expiry = _helpers.utcnow() - _helpers.CLOCK_SKEW
+
+ # process scopes, which needs to be a seq
+ if scopes is None and "scopes" in info:
+ scopes = info.get("scopes")
+ if isinstance(scopes, str):
+ scopes = scopes.split(" ")
+
+ return cls(
+ token=info.get("token"),
+ refresh_token=info.get("refresh_token"),
+ token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, # always overrides
+ scopes=scopes,
+ client_id=info.get("client_id"),
+ client_secret=info.get("client_secret"),
+ quota_project_id=info.get("quota_project_id"), # may not exist
+ expiry=expiry,
+ )
+
+ @classmethod
+ def from_authorized_user_file(cls, filename, scopes=None):
+ """Creates a Credentials instance from an authorized user json file.
+
+ Args:
+ filename (str): The path to the authorized user json file.
+ scopes (Sequence[str]): Optional list of scopes to include in the
+ credentials.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the file is not in the expected format.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return cls.from_authorized_user_info(data, scopes)
+
+ def to_json(self, strip=None):
+ """Utility function that creates a JSON representation of a Credentials
+ object.
+
+ Args:
+ strip (Sequence[str]): Optional list of members to exclude from the
+ generated JSON.
+
+ Returns:
+ str: A JSON representation of this instance. When converted into
+ a dictionary, it can be passed to from_authorized_user_info()
+ to create a new credential instance.
+ """
+ prep = {
+ "token": self.token,
+ "refresh_token": self.refresh_token,
+ "token_uri": self.token_uri,
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "scopes": self.scopes,
+ "rapt_token": self.rapt_token,
+ }
+ if self.expiry: # flatten expiry timestamp
+ prep["expiry"] = self.expiry.isoformat() + "Z"
+
+ # Remove empty entries (those which are None)
+ prep = {k: v for k, v in prep.items() if v is not None}
+
+ # Remove entries that explicitely need to be removed
+ if strip is not None:
+ prep = {k: v for k, v in prep.items() if k not in strip}
+
+ return json.dumps(prep)
+
+
+class UserAccessTokenCredentials(credentials.CredentialsWithQuotaProject):
+ """Access token credentials for user account.
+
+ Obtain the access token for a given user account or the current active
+ user account with the ``gcloud auth print-access-token`` command.
+
+ Args:
+ account (Optional[str]): Account to get the access token for. If not
+ specified, the current active account will be used.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+ """
+
+ def __init__(self, account=None, quota_project_id=None):
+ super(UserAccessTokenCredentials, self).__init__()
+ self._account = account
+ self._quota_project_id = quota_project_id
+
+ def with_account(self, account):
+ """Create a new instance with the given account.
+
+ Args:
+ account (str): Account to get the access token for.
+
+ Returns:
+ google.oauth2.credentials.UserAccessTokenCredentials: The created
+ credentials with the given account.
+ """
+ return self.__class__(account=account, quota_project_id=self._quota_project_id)
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(account=self._account, quota_project_id=quota_project_id)
+
+ def refresh(self, request):
+ """Refreshes the access token.
+
+ Args:
+ request (google.auth.transport.Request): This argument is required
+ by the base class interface but not used in this implementation,
+ so just set it to `None`.
+
+ Raises:
+ google.auth.exceptions.UserAccessTokenError: If the access token
+ refresh failed.
+ """
+ self.token = _cloud_sdk.get_auth_access_token(self._account)
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def before_request(self, request, method, url, headers):
+ self.refresh(request)
+ self.apply(headers)
diff --git a/contrib/python/google-auth/py2/google/oauth2/id_token.py b/contrib/python/google-auth/py2/google/oauth2/id_token.py
new file mode 100644
index 0000000000..540ccd1251
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/id_token.py
@@ -0,0 +1,264 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google ID Token helpers.
+
+Provides support for verifying `OpenID Connect ID Tokens`_, especially ones
+generated by Google infrastructure.
+
+To parse and verify an ID Token issued by Google's OAuth 2.0 authorization
+server use :func:`verify_oauth2_token`. To verify an ID Token issued by
+Firebase, use :func:`verify_firebase_token`.
+
+A general purpose ID Token verifier is available as :func:`verify_token`.
+
+Example::
+
+ from google.oauth2 import id_token
+ from google.auth.transport import requests
+
+ request = requests.Request()
+
+ id_info = id_token.verify_oauth2_token(
+ token, request, 'my-client-id.example.com')
+
+ userid = id_info['sub']
+
+By default, this will re-fetch certificates for each verification. Because
+Google's public keys are only changed infrequently (on the order of once per
+day), you may wish to take advantage of caching to reduce latency and the
+potential for network errors. This can be accomplished using an external
+library like `CacheControl`_ to create a cache-aware
+:class:`google.auth.transport.Request`::
+
+ import cachecontrol
+ import google.auth.transport.requests
+ import requests
+
+ session = requests.session()
+ cached_session = cachecontrol.CacheControl(session)
+ request = google.auth.transport.requests.Request(session=cached_session)
+
+.. _OpenID Connect ID Tokens:
+ http://openid.net/specs/openid-connect-core-1_0.html#IDToken
+.. _CacheControl: https://cachecontrol.readthedocs.io
+"""
+
+import json
+import os
+
+import six
+from six.moves import http_client
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import jwt
+
+
+# The URL that provides public certificates for verifying ID tokens issued
+# by Google's OAuth 2.0 authorization server.
+_GOOGLE_OAUTH2_CERTS_URL = "https://www.googleapis.com/oauth2/v1/certs"
+
+# The URL that provides public certificates for verifying ID tokens issued
+# by Firebase and the Google APIs infrastructure
+_GOOGLE_APIS_CERTS_URL = (
+ "https://www.googleapis.com/robot/v1/metadata/x509"
+ "/securetoken@system.gserviceaccount.com"
+)
+
+_GOOGLE_ISSUERS = ["accounts.google.com", "https://accounts.google.com"]
+
+
+def _fetch_certs(request, certs_url):
+ """Fetches certificates.
+
+ Google-style cerificate endpoints return JSON in the format of
+ ``{'key id': 'x509 certificate'}``.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ certs_url (str): The certificate endpoint URL.
+
+ Returns:
+ Mapping[str, str]: A mapping of public key ID to x.509 certificate
+ data.
+ """
+ response = request(certs_url, method="GET")
+
+ if response.status != http_client.OK:
+ raise exceptions.TransportError(
+ "Could not fetch certificates at {}".format(certs_url)
+ )
+
+ return json.loads(response.data.decode("utf-8"))
+
+
+def verify_token(id_token, request, audience=None, certs_url=_GOOGLE_OAUTH2_CERTS_URL):
+ """Verifies an ID token and returns the decoded token.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ audience (str or list): The audience or audiences that this token is
+ intended for. If None then the audience is not verified.
+ certs_url (str): The URL that specifies the certificates to use to
+ verify the token. This URL should return JSON in the format of
+ ``{'key id': 'x509 certificate'}``.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+ """
+ certs = _fetch_certs(request, certs_url)
+
+ return jwt.decode(id_token, certs=certs, audience=audience)
+
+
+def verify_oauth2_token(id_token, request, audience=None):
+ """Verifies an ID Token issued by Google's OAuth 2.0 authorization server.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ audience (str): The audience that this token is intended for. This is
+ typically your application's OAuth 2.0 client ID. If None then the
+ audience is not verified.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+
+ Raises:
+ exceptions.GoogleAuthError: If the issuer is invalid.
+ """
+ idinfo = verify_token(
+ id_token, request, audience=audience, certs_url=_GOOGLE_OAUTH2_CERTS_URL
+ )
+
+ if idinfo["iss"] not in _GOOGLE_ISSUERS:
+ raise exceptions.GoogleAuthError(
+ "Wrong issuer. 'iss' should be one of the following: {}".format(
+ _GOOGLE_ISSUERS
+ )
+ )
+
+ return idinfo
+
+
+def verify_firebase_token(id_token, request, audience=None):
+ """Verifies an ID Token issued by Firebase Authentication.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ audience (str): The audience that this token is intended for. This is
+ typically your Firebase application ID. If None then the audience
+ is not verified.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+ """
+ return verify_token(
+ id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL
+ )
+
+
+def fetch_id_token(request, audience):
+ """Fetch the ID Token from the current environment.
+
+ This function acquires ID token from the environment in the following order.
+ See https://google.aip.dev/auth/4110.
+
+ 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
+ to the path of a valid service account JSON file, then ID token is
+ acquired using this service account credentials.
+ 2. If the application is running in Compute Engine, App Engine or Cloud Run,
+ then the ID token are obtained from the metadata server.
+ 3. If metadata server doesn't exist and no valid service account credentials
+ are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will
+ be raised.
+
+ Example::
+
+ import google.oauth2.id_token
+ import google.auth.transport.requests
+
+ request = google.auth.transport.requests.Request()
+ target_audience = "https://pubsub.googleapis.com"
+
+ id_token = google.oauth2.id_token.fetch_id_token(request, target_audience)
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ audience (str): The audience that this ID token is intended for.
+
+ Returns:
+ str: The ID token.
+
+ Raises:
+ ~google.auth.exceptions.DefaultCredentialsError:
+ If metadata server doesn't exist and no valid service account
+ credentials are found.
+ """
+ # 1. Try to get credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
+ # variable.
+ credentials_filename = os.environ.get(environment_vars.CREDENTIALS)
+ if credentials_filename:
+ if not (
+ os.path.exists(credentials_filename)
+ and os.path.isfile(credentials_filename)
+ ):
+ raise exceptions.DefaultCredentialsError(
+ "GOOGLE_APPLICATION_CREDENTIALS path is either not found or invalid."
+ )
+
+ try:
+ with open(credentials_filename, "r") as f:
+ from google.oauth2 import service_account
+
+ info = json.load(f)
+ if info.get("type") == "service_account":
+ credentials = service_account.IDTokenCredentials.from_service_account_info(
+ info, target_audience=audience
+ )
+ credentials.refresh(request)
+ return credentials.token
+ except ValueError as caught_exc:
+ new_exc = exceptions.DefaultCredentialsError(
+ "GOOGLE_APPLICATION_CREDENTIALS is not valid service account credentials.",
+ caught_exc,
+ )
+ six.raise_from(new_exc, caught_exc)
+
+ # 2. Try to fetch ID token from metada server if it exists. The code works for GAE and
+ # Cloud Run metadata server as well.
+ try:
+ from google.auth import compute_engine
+ from google.auth.compute_engine import _metadata
+
+ if _metadata.ping(request):
+ credentials = compute_engine.IDTokenCredentials(
+ request, audience, use_metadata_identity_endpoint=True
+ )
+ credentials.refresh(request)
+ return credentials.token
+ except (ImportError, exceptions.TransportError):
+ pass
+
+ raise exceptions.DefaultCredentialsError(
+ "Neither metadata server or valid service account credentials are found."
+ )
diff --git a/contrib/python/google-auth/py2/google/oauth2/reauth.py b/contrib/python/google-auth/py2/google/oauth2/reauth.py
new file mode 100644
index 0000000000..d914fe9a7d
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/reauth.py
@@ -0,0 +1,341 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module that provides functions for handling rapt authentication.
+
+Reauth is a process of obtaining additional authentication (such as password,
+security token, etc.) while refreshing OAuth 2.0 credentials for a user.
+
+Credentials that use the Reauth flow must have the reauth scope,
+``https://www.googleapis.com/auth/accounts.reauth``.
+
+This module provides a high-level function for executing the Reauth process,
+:func:`refresh_grant`, and lower-level helpers for doing the individual
+steps of the reauth process.
+
+Those steps are:
+
+1. Obtaining a list of challenges from the reauth server.
+2. Running through each challenge and sending the result back to the reauth
+ server.
+3. Refreshing the access token using the returned rapt token.
+"""
+
+import sys
+
+from six.moves import range
+
+from google.auth import exceptions
+from google.oauth2 import _client
+from google.oauth2 import challenges
+
+
+_REAUTH_SCOPE = "https://www.googleapis.com/auth/accounts.reauth"
+_REAUTH_API = "https://reauth.googleapis.com/v2/sessions"
+
+_REAUTH_NEEDED_ERROR = "invalid_grant"
+_REAUTH_NEEDED_ERROR_INVALID_RAPT = "invalid_rapt"
+_REAUTH_NEEDED_ERROR_RAPT_REQUIRED = "rapt_required"
+
+_AUTHENTICATED = "AUTHENTICATED"
+_CHALLENGE_REQUIRED = "CHALLENGE_REQUIRED"
+_CHALLENGE_PENDING = "CHALLENGE_PENDING"
+
+
+# Override this global variable to set custom max number of rounds of reauth
+# challenges should be run.
+RUN_CHALLENGE_RETRY_LIMIT = 5
+
+
+def is_interactive():
+ """Check if we are in an interractive environment.
+
+ Override this function with a different logic if you are using this library
+ outside a CLI.
+
+ If the rapt token needs refreshing, the user needs to answer the challenges.
+ If the user is not in an interractive environment, the challenges can not
+ be answered and we just wait for timeout for no reason.
+
+ Returns:
+ bool: True if is interactive environment, False otherwise.
+ """
+
+ return sys.stdin.isatty()
+
+
+def _get_challenges(
+ request, supported_challenge_types, access_token, requested_scopes=None
+):
+ """Does initial request to reauth API to get the challenges.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ supported_challenge_types (Sequence[str]): list of challenge names
+ supported by the manager.
+ access_token (str): Access token with reauth scopes.
+ requested_scopes (Optional(Sequence[str])): Authorized scopes for the credentials.
+
+ Returns:
+ dict: The response from the reauth API.
+ """
+ body = {"supportedChallengeTypes": supported_challenge_types}
+ if requested_scopes:
+ body["oauthScopesForDomainPolicyLookup"] = requested_scopes
+
+ return _client._token_endpoint_request(
+ request, _REAUTH_API + ":start", body, access_token=access_token, use_json=True
+ )
+
+
+def _send_challenge_result(
+ request, session_id, challenge_id, client_input, access_token
+):
+ """Attempt to refresh access token by sending next challenge result.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ session_id (str): session id returned by the initial reauth call.
+ challenge_id (str): challenge id returned by the initial reauth call.
+ client_input: dict with a challenge-specific client input. For example:
+ ``{'credential': password}`` for password challenge.
+ access_token (str): Access token with reauth scopes.
+
+ Returns:
+ dict: The response from the reauth API.
+ """
+ body = {
+ "sessionId": session_id,
+ "challengeId": challenge_id,
+ "action": "RESPOND",
+ "proposalResponse": client_input,
+ }
+
+ return _client._token_endpoint_request(
+ request,
+ _REAUTH_API + "/{}:continue".format(session_id),
+ body,
+ access_token=access_token,
+ use_json=True,
+ )
+
+
+def _run_next_challenge(msg, request, access_token):
+ """Get the next challenge from msg and run it.
+
+ Args:
+ msg (dict): Reauth API response body (either from the initial request to
+ https://reauth.googleapis.com/v2/sessions:start or from sending the
+ previous challenge response to
+ https://reauth.googleapis.com/v2/sessions/id:continue)
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ access_token (str): reauth access token
+
+ Returns:
+ dict: The response from the reauth API.
+
+ Raises:
+ google.auth.exceptions.ReauthError: if reauth failed.
+ """
+ for challenge in msg["challenges"]:
+ if challenge["status"] != "READY":
+ # Skip non-activated challenges.
+ continue
+ c = challenges.AVAILABLE_CHALLENGES.get(challenge["challengeType"], None)
+ if not c:
+ raise exceptions.ReauthFailError(
+ "Unsupported challenge type {0}. Supported types: {1}".format(
+ challenge["challengeType"],
+ ",".join(list(challenges.AVAILABLE_CHALLENGES.keys())),
+ )
+ )
+ if not c.is_locally_eligible:
+ raise exceptions.ReauthFailError(
+ "Challenge {0} is not locally eligible".format(
+ challenge["challengeType"]
+ )
+ )
+ client_input = c.obtain_challenge_input(challenge)
+ if not client_input:
+ return None
+ return _send_challenge_result(
+ request,
+ msg["sessionId"],
+ challenge["challengeId"],
+ client_input,
+ access_token,
+ )
+ return None
+
+
+def _obtain_rapt(request, access_token, requested_scopes):
+ """Given an http request method and reauth access token, get rapt token.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ access_token (str): reauth access token
+ requested_scopes (Sequence[str]): scopes required by the client application
+
+ Returns:
+ str: The rapt token.
+
+ Raises:
+ google.auth.exceptions.ReauthError: if reauth failed
+ """
+ msg = _get_challenges(
+ request,
+ list(challenges.AVAILABLE_CHALLENGES.keys()),
+ access_token,
+ requested_scopes,
+ )
+
+ if msg["status"] == _AUTHENTICATED:
+ return msg["encodedProofOfReauthToken"]
+
+ for _ in range(0, RUN_CHALLENGE_RETRY_LIMIT):
+ if not (
+ msg["status"] == _CHALLENGE_REQUIRED or msg["status"] == _CHALLENGE_PENDING
+ ):
+ raise exceptions.ReauthFailError(
+ "Reauthentication challenge failed due to API error: {}".format(
+ msg["status"]
+ )
+ )
+
+ if not is_interactive():
+ raise exceptions.ReauthFailError(
+ "Reauthentication challenge could not be answered because you are not"
+ " in an interactive session."
+ )
+
+ msg = _run_next_challenge(msg, request, access_token)
+
+ if msg["status"] == _AUTHENTICATED:
+ return msg["encodedProofOfReauthToken"]
+
+ # If we got here it means we didn't get authenticated.
+ raise exceptions.ReauthFailError("Failed to obtain rapt token.")
+
+
+def get_rapt_token(
+ request, client_id, client_secret, refresh_token, token_uri, scopes=None
+):
+ """Given an http request method and refresh_token, get rapt token.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ client_id (str): client id to get access token for reauth scope.
+ client_secret (str): client secret for the client_id
+ refresh_token (str): refresh token to refresh access token
+ token_uri (str): uri to refresh access token
+ scopes (Optional(Sequence[str])): scopes required by the client application
+
+ Returns:
+ str: The rapt token.
+ Raises:
+ google.auth.exceptions.RefreshError: If reauth failed.
+ """
+ sys.stderr.write("Reauthentication required.\n")
+
+ # Get access token for reauth.
+ access_token, _, _, _ = _client.refresh_grant(
+ request=request,
+ client_id=client_id,
+ client_secret=client_secret,
+ refresh_token=refresh_token,
+ token_uri=token_uri,
+ scopes=[_REAUTH_SCOPE],
+ )
+
+ # Get rapt token from reauth API.
+ rapt_token = _obtain_rapt(request, access_token, requested_scopes=scopes)
+
+ return rapt_token
+
+
+def refresh_grant(
+ request,
+ token_uri,
+ refresh_token,
+ client_id,
+ client_secret,
+ scopes=None,
+ rapt_token=None,
+):
+ """Implements the reauthentication flow.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ refresh_token (str): The refresh token to use to get a new access
+ token.
+ client_id (str): The OAuth 2.0 application's client ID.
+ client_secret (str): The Oauth 2.0 appliaction's client secret.
+ scopes (Optional(Sequence[str])): Scopes to request. If present, all
+ scopes must be authorized for the refresh token. Useful if refresh
+ token has a wild card scope (e.g.
+ 'https://www.googleapis.com/auth/any-api').
+ rapt_token (Optional(str)): The rapt token for reauth.
+
+ Returns:
+ Tuple[str, Optional[str], Optional[datetime], Mapping[str, str], str]: The
+ access token, new refresh token, expiration, the additional data
+ returned by the token endpoint, and the rapt token.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ body = {
+ "grant_type": _client._REFRESH_GRANT_TYPE,
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "refresh_token": refresh_token,
+ }
+ if scopes:
+ body["scope"] = " ".join(scopes)
+ if rapt_token:
+ body["rapt"] = rapt_token
+
+ response_status_ok, response_data = _client._token_endpoint_request_no_throw(
+ request, token_uri, body
+ )
+ if (
+ not response_status_ok
+ and response_data.get("error") == _REAUTH_NEEDED_ERROR
+ and (
+ response_data.get("error_subtype") == _REAUTH_NEEDED_ERROR_INVALID_RAPT
+ or response_data.get("error_subtype") == _REAUTH_NEEDED_ERROR_RAPT_REQUIRED
+ )
+ ):
+ rapt_token = get_rapt_token(
+ request, client_id, client_secret, refresh_token, token_uri, scopes=scopes
+ )
+ body["rapt"] = rapt_token
+ (response_status_ok, response_data) = _client._token_endpoint_request_no_throw(
+ request, token_uri, body
+ )
+
+ if not response_status_ok:
+ _client._handle_error_response(response_data)
+ return _client._handle_refresh_grant_response(response_data, refresh_token) + (
+ rapt_token,
+ )
diff --git a/contrib/python/google-auth/py2/google/oauth2/service_account.py b/contrib/python/google-auth/py2/google/oauth2/service_account.py
new file mode 100644
index 0000000000..8f18f26ea1
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/service_account.py
@@ -0,0 +1,685 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Service Accounts: JSON Web Token (JWT) Profile for OAuth 2.0
+
+This module implements the JWT Profile for OAuth 2.0 Authorization Grants
+as defined by `RFC 7523`_ with particular support for how this RFC is
+implemented in Google's infrastructure. Google refers to these credentials
+as *Service Accounts*.
+
+Service accounts are used for server-to-server communication, such as
+interactions between a web application server and a Google service. The
+service account belongs to your application instead of to an individual end
+user. In contrast to other OAuth 2.0 profiles, no users are involved and your
+application "acts" as the service account.
+
+Typically an application uses a service account when the application uses
+Google APIs to work with its own data rather than a user's data. For example,
+an application that uses Google Cloud Datastore for data persistence would use
+a service account to authenticate its calls to the Google Cloud Datastore API.
+However, an application that needs to access a user's Drive documents would
+use the normal OAuth 2.0 profile.
+
+Additionally, Google Apps domain administrators can grant service accounts
+`domain-wide delegation`_ authority to access user data on behalf of users in
+the domain.
+
+This profile uses a JWT to acquire an OAuth 2.0 access token. The JWT is used
+in place of the usual authorization token returned during the standard
+OAuth 2.0 Authorization Code grant. The JWT is only used for this purpose, as
+the acquired access token is used as the bearer token when making requests
+using these credentials.
+
+This profile differs from normal OAuth 2.0 profile because no user consent
+step is required. The use of the private key allows this profile to assert
+identity directly.
+
+This profile also differs from the :mod:`google.auth.jwt` authentication
+because the JWT credentials use the JWT directly as the bearer token. This
+profile instead only uses the JWT to obtain an OAuth 2.0 access token. The
+obtained OAuth 2.0 access token is used as the bearer token.
+
+Domain-wide delegation
+----------------------
+
+Domain-wide delegation allows a service account to access user data on
+behalf of any user in a Google Apps domain without consent from the user.
+For example, an application that uses the Google Calendar API to add events to
+the calendars of all users in a Google Apps domain would use a service account
+to access the Google Calendar API on behalf of users.
+
+The Google Apps administrator must explicitly authorize the service account to
+do this. This authorization step is referred to as "delegating domain-wide
+authority" to a service account.
+
+You can use domain-wise delegation by creating a set of credentials with a
+specific subject using :meth:`~Credentials.with_subject`.
+
+.. _RFC 7523: https://tools.ietf.org/html/rfc7523
+"""
+
+import copy
+import datetime
+
+from google.auth import _helpers
+from google.auth import _service_account_info
+from google.auth import credentials
+from google.auth import jwt
+from google.oauth2 import _client
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token"
+
+
+class Credentials(
+ credentials.Signing, credentials.Scoped, credentials.CredentialsWithQuotaProject
+):
+ """Service account credentials
+
+ Usually, you'll create these credentials with one of the helper
+ constructors. To create credentials using a Google service account
+ private key JSON file::
+
+ credentials = service_account.Credentials.from_service_account_file(
+ 'service-account.json')
+
+ Or if you already have the service account file loaded::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = service_account.Credentials.from_service_account_info(
+ service_account_info)
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify additional scopes and a subject if necessary::
+
+ credentials = service_account.Credentials.from_service_account_file(
+ 'service-account.json',
+ scopes=['email'],
+ subject='user@example.com')
+
+ The credentials are considered immutable. If you want to modify the scopes
+ or the subject used for delegation, use :meth:`with_scopes` or
+ :meth:`with_subject`::
+
+ scoped_credentials = credentials.with_scopes(['email'])
+ delegated_credentials = credentials.with_subject(subject)
+
+ To add a quota project, use :meth:`with_quota_project`::
+
+ credentials = credentials.with_quota_project('myproject-123')
+ """
+
+ def __init__(
+ self,
+ signer,
+ service_account_email,
+ token_uri,
+ scopes=None,
+ default_scopes=None,
+ subject=None,
+ project_id=None,
+ quota_project_id=None,
+ additional_claims=None,
+ always_use_jwt_access=False,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ service_account_email (str): The service account's email.
+ scopes (Sequence[str]): User-defined scopes to request during the
+ authorization grant.
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ token_uri (str): The OAuth 2.0 Token URI.
+ subject (str): For domain-wide delegation, the email address of the
+ user to for which to request delegated access.
+ project_id (str): Project ID associated with the service account
+ credential.
+ quota_project_id (Optional[str]): The project ID used for quota and
+ billing.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT assertion used in the authorization grant.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be always used.
+
+ .. note:: Typically one of the helper constructors
+ :meth:`from_service_account_file` or
+ :meth:`from_service_account_info` are used instead of calling the
+ constructor directly.
+ """
+ super(Credentials, self).__init__()
+
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+ self._signer = signer
+ self._service_account_email = service_account_email
+ self._subject = subject
+ self._project_id = project_id
+ self._quota_project_id = quota_project_id
+ self._token_uri = token_uri
+ self._always_use_jwt_access = always_use_jwt_access
+
+ self._jwt_credentials = None
+
+ if additional_claims is not None:
+ self._additional_claims = additional_claims
+ else:
+ self._additional_claims = {}
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates a Credentials instance from a signer and service account
+ info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ return cls(
+ signer,
+ service_account_email=info["client_email"],
+ token_uri=info["token_uri"],
+ project_id=info.get("project_id"),
+ **kwargs
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates a Credentials instance from parsed service account info.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(
+ info, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates a Credentials instance from a service account json file.
+
+ Args:
+ filename (str): The path to the service account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.Credentials: The constructed
+ credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ return self._service_account_email
+
+ @property
+ def project_id(self):
+ """Project ID associated with this credential."""
+ return self._project_id
+
+ @property
+ def requires_scopes(self):
+ """Checks if the credentials requires scopes.
+
+ Returns:
+ bool: True if there are no scopes set otherwise False.
+ """
+ return True if not self._scopes else False
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ return self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ token_uri=self._token_uri,
+ subject=self._subject,
+ project_id=self._project_id,
+ quota_project_id=self._quota_project_id,
+ additional_claims=self._additional_claims.copy(),
+ always_use_jwt_access=self._always_use_jwt_access,
+ )
+
+ def with_always_use_jwt_access(self, always_use_jwt_access):
+ """Create a copy of these credentials with the specified always_use_jwt_access value.
+
+ Args:
+ always_use_jwt_access (bool): Whether always use self signed JWT or not.
+
+ Returns:
+ google.auth.service_account.Credentials: A new credentials
+ instance.
+ """
+ return self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ scopes=self._scopes,
+ default_scopes=self._default_scopes,
+ token_uri=self._token_uri,
+ subject=self._subject,
+ project_id=self._project_id,
+ quota_project_id=self._quota_project_id,
+ additional_claims=self._additional_claims.copy(),
+ always_use_jwt_access=always_use_jwt_access,
+ )
+
+ def with_subject(self, subject):
+ """Create a copy of these credentials with the specified subject.
+
+ Args:
+ subject (str): The subject claim.
+
+ Returns:
+ google.auth.service_account.Credentials: A new credentials
+ instance.
+ """
+ return self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ scopes=self._scopes,
+ default_scopes=self._default_scopes,
+ token_uri=self._token_uri,
+ subject=subject,
+ project_id=self._project_id,
+ quota_project_id=self._quota_project_id,
+ additional_claims=self._additional_claims.copy(),
+ always_use_jwt_access=self._always_use_jwt_access,
+ )
+
+ def with_claims(self, additional_claims):
+ """Returns a copy of these credentials with modified claims.
+
+ Args:
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload. This will be merged with the current
+ additional claims.
+
+ Returns:
+ google.auth.service_account.Credentials: A new credentials
+ instance.
+ """
+ new_additional_claims = copy.deepcopy(self._additional_claims)
+ new_additional_claims.update(additional_claims or {})
+
+ return self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ scopes=self._scopes,
+ default_scopes=self._default_scopes,
+ token_uri=self._token_uri,
+ subject=self._subject,
+ project_id=self._project_id,
+ quota_project_id=self._quota_project_id,
+ additional_claims=new_additional_claims,
+ always_use_jwt_access=self._always_use_jwt_access,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+
+ return self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ default_scopes=self._default_scopes,
+ scopes=self._scopes,
+ token_uri=self._token_uri,
+ subject=self._subject,
+ project_id=self._project_id,
+ quota_project_id=quota_project_id,
+ additional_claims=self._additional_claims.copy(),
+ always_use_jwt_access=self._always_use_jwt_access,
+ )
+
+ def _make_authorization_grant_assertion(self):
+ """Create the OAuth 2.0 assertion.
+
+ This assertion is used during the OAuth 2.0 grant to acquire an
+ access token.
+
+ Returns:
+ bytes: The authorization grant assertion.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+
+ payload = {
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ # The issuer must be the service account email.
+ "iss": self._service_account_email,
+ # The audience must be the auth token endpoint's URI
+ "aud": _GOOGLE_OAUTH2_TOKEN_ENDPOINT,
+ "scope": _helpers.scopes_to_string(self._scopes or ()),
+ }
+
+ payload.update(self._additional_claims)
+
+ # The subject can be a user email for domain-wide delegation.
+ if self._subject:
+ payload.setdefault("sub", self._subject)
+
+ token = jwt.encode(self._signer, payload)
+
+ return token
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ if self._jwt_credentials is not None:
+ self._jwt_credentials.refresh(request)
+ self.token = self._jwt_credentials.token
+ self.expiry = self._jwt_credentials.expiry
+ else:
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = _client.jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
+
+ def _create_self_signed_jwt(self, audience):
+ """Create a self-signed JWT from the credentials if requirements are met.
+
+ Args:
+ audience (str): The service URL. ``https://[API_ENDPOINT]/``
+ """
+ # https://google.aip.dev/auth/4111
+ if self._always_use_jwt_access:
+ if self._scopes:
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self, None, additional_claims={"scope": " ".join(self._scopes)}
+ )
+ elif audience:
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self, audience
+ )
+ elif self._default_scopes:
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self,
+ None,
+ additional_claims={"scope": " ".join(self._default_scopes)},
+ )
+ elif not self._scopes and audience:
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self, audience
+ )
+
+ @_helpers.copy_docstring(credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
+
+ @property
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer_email(self):
+ return self._service_account_email
+
+
+class IDTokenCredentials(credentials.Signing, credentials.CredentialsWithQuotaProject):
+ """Open ID Connect ID Token-based service account credentials.
+
+ These credentials are largely similar to :class:`.Credentials`, but instead
+ of using an OAuth 2.0 Access Token as the bearer token, they use an Open
+ ID Connect ID Token as the bearer token. These credentials are useful when
+ communicating to services that require ID Tokens and can not accept access
+ tokens.
+
+ Usually, you'll create these credentials with one of the helper
+ constructors. To create credentials using a Google service account
+ private key JSON file::
+
+ credentials = (
+ service_account.IDTokenCredentials.from_service_account_file(
+ 'service-account.json'))
+
+
+ Or if you already have the service account file loaded::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = (
+ service_account.IDTokenCredentials.from_service_account_info(
+ service_account_info))
+
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify additional scopes and a subject if necessary::
+
+ credentials = (
+ service_account.IDTokenCredentials.from_service_account_file(
+ 'service-account.json',
+ scopes=['email'],
+ subject='user@example.com'))
+
+
+ The credentials are considered immutable. If you want to modify the scopes
+ or the subject used for delegation, use :meth:`with_scopes` or
+ :meth:`with_subject`::
+
+ scoped_credentials = credentials.with_scopes(['email'])
+ delegated_credentials = credentials.with_subject(subject)
+
+ """
+
+ def __init__(
+ self,
+ signer,
+ service_account_email,
+ token_uri,
+ target_audience,
+ additional_claims=None,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ service_account_email (str): The service account's email.
+ token_uri (str): The OAuth 2.0 Token URI.
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token. The ID Token's ``aud`` claim
+ will be set to this string.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT assertion used in the authorization grant.
+ quota_project_id (Optional[str]): The project ID used for quota and billing.
+ .. note:: Typically one of the helper constructors
+ :meth:`from_service_account_file` or
+ :meth:`from_service_account_info` are used instead of calling the
+ constructor directly.
+ """
+ super(IDTokenCredentials, self).__init__()
+ self._signer = signer
+ self._service_account_email = service_account_email
+ self._token_uri = token_uri
+ self._target_audience = target_audience
+ self._quota_project_id = quota_project_id
+
+ if additional_claims is not None:
+ self._additional_claims = additional_claims
+ else:
+ self._additional_claims = {}
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates a credentials instance from a signer and service account
+ info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.IDTokenCredentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ kwargs.setdefault("service_account_email", info["client_email"])
+ kwargs.setdefault("token_uri", info["token_uri"])
+ return cls(signer, **kwargs)
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates a credentials instance from parsed service account info.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.IDTokenCredentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(
+ info, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates a credentials instance from a service account json file.
+
+ Args:
+ filename (str): The path to the service account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.IDTokenCredentials: The constructed
+ credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ def with_target_audience(self, target_audience):
+ """Create a copy of these credentials with the specified target
+ audience.
+
+ Args:
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token.
+
+ Returns:
+ google.auth.service_account.IDTokenCredentials: A new credentials
+ instance.
+ """
+ return self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ token_uri=self._token_uri,
+ target_audience=target_audience,
+ additional_claims=self._additional_claims.copy(),
+ quota_project_id=self.quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ token_uri=self._token_uri,
+ target_audience=self._target_audience,
+ additional_claims=self._additional_claims.copy(),
+ quota_project_id=quota_project_id,
+ )
+
+ def _make_authorization_grant_assertion(self):
+ """Create the OAuth 2.0 assertion.
+
+ This assertion is used during the OAuth 2.0 grant to acquire an
+ ID token.
+
+ Returns:
+ bytes: The authorization grant assertion.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+
+ payload = {
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ # The issuer must be the service account email.
+ "iss": self.service_account_email,
+ # The audience must be the auth token endpoint's URI
+ "aud": _GOOGLE_OAUTH2_TOKEN_ENDPOINT,
+ # The target audience specifies which service the ID token is
+ # intended for.
+ "target_audience": self._target_audience,
+ }
+
+ payload.update(self._additional_claims)
+
+ token = jwt.encode(self._signer, payload)
+
+ return token
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = _client.id_token_jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ return self._service_account_email
+
+ @_helpers.copy_docstring(credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
+
+ @property
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer_email(self):
+ return self._service_account_email
diff --git a/contrib/python/google-auth/py2/google/oauth2/sts.py b/contrib/python/google-auth/py2/google/oauth2/sts.py
new file mode 100644
index 0000000000..ae3c0146b1
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/sts.py
@@ -0,0 +1,155 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Token Exchange Spec.
+
+This module defines a token exchange utility based on the `OAuth 2.0 Token
+Exchange`_ spec. This will be mainly used to exchange external credentials
+for GCP access tokens in workload identity pools to access Google APIs.
+
+The implementation will support various types of client authentication as
+allowed in the spec.
+
+A deviation on the spec will be for additional Google specific options that
+cannot be easily mapped to parameters defined in the RFC.
+
+The returned dictionary response will be based on the `rfc8693 section 2.2.1`_
+spec JSON response.
+
+.. _OAuth 2.0 Token Exchange: https://tools.ietf.org/html/rfc8693
+.. _rfc8693 section 2.2.1: https://tools.ietf.org/html/rfc8693#section-2.2.1
+"""
+
+import json
+
+from six.moves import http_client
+from six.moves import urllib
+
+from google.oauth2 import utils
+
+
+_URLENCODED_HEADERS = {"Content-Type": "application/x-www-form-urlencoded"}
+
+
+class Client(utils.OAuthClientAuthHandler):
+ """Implements the OAuth 2.0 token exchange spec based on
+ https://tools.ietf.org/html/rfc8693.
+ """
+
+ def __init__(self, token_exchange_endpoint, client_authentication=None):
+ """Initializes an STS client instance.
+
+ Args:
+ token_exchange_endpoint (str): The token exchange endpoint.
+ client_authentication (Optional(google.oauth2.oauth2_utils.ClientAuthentication)):
+ The optional OAuth client authentication credentials if available.
+ """
+ super(Client, self).__init__(client_authentication)
+ self._token_exchange_endpoint = token_exchange_endpoint
+
+ def exchange_token(
+ self,
+ request,
+ grant_type,
+ subject_token,
+ subject_token_type,
+ resource=None,
+ audience=None,
+ scopes=None,
+ requested_token_type=None,
+ actor_token=None,
+ actor_token_type=None,
+ additional_options=None,
+ additional_headers=None,
+ ):
+ """Exchanges the provided token for another type of token based on the
+ rfc8693 spec.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ grant_type (str): The OAuth 2.0 token exchange grant type.
+ subject_token (str): The OAuth 2.0 token exchange subject token.
+ subject_token_type (str): The OAuth 2.0 token exchange subject token type.
+ resource (Optional[str]): The optional OAuth 2.0 token exchange resource field.
+ audience (Optional[str]): The optional OAuth 2.0 token exchange audience field.
+ scopes (Optional[Sequence[str]]): The optional list of scopes to use.
+ requested_token_type (Optional[str]): The optional OAuth 2.0 token exchange requested
+ token type.
+ actor_token (Optional[str]): The optional OAuth 2.0 token exchange actor token.
+ actor_token_type (Optional[str]): The optional OAuth 2.0 token exchange actor token type.
+ additional_options (Optional[Mapping[str, str]]): The optional additional
+ non-standard Google specific options.
+ additional_headers (Optional[Mapping[str, str]]): The optional additional
+ headers to pass to the token exchange endpoint.
+
+ Returns:
+ Mapping[str, str]: The token exchange JSON-decoded response data containing
+ the requested token and its expiration time.
+
+ Raises:
+ google.auth.exceptions.OAuthError: If the token endpoint returned
+ an error.
+ """
+ # Initialize request headers.
+ headers = _URLENCODED_HEADERS.copy()
+ # Inject additional headers.
+ if additional_headers:
+ for k, v in dict(additional_headers).items():
+ headers[k] = v
+ # Initialize request body.
+ request_body = {
+ "grant_type": grant_type,
+ "resource": resource,
+ "audience": audience,
+ "scope": " ".join(scopes or []),
+ "requested_token_type": requested_token_type,
+ "subject_token": subject_token,
+ "subject_token_type": subject_token_type,
+ "actor_token": actor_token,
+ "actor_token_type": actor_token_type,
+ "options": None,
+ }
+ # Add additional non-standard options.
+ if additional_options:
+ request_body["options"] = urllib.parse.quote(json.dumps(additional_options))
+ # Remove empty fields in request body.
+ for k, v in dict(request_body).items():
+ if v is None or v == "":
+ del request_body[k]
+ # Apply OAuth client authentication.
+ self.apply_client_authentication_options(headers, request_body)
+
+ # Execute request.
+ response = request(
+ url=self._token_exchange_endpoint,
+ method="POST",
+ headers=headers,
+ body=urllib.parse.urlencode(request_body).encode("utf-8"),
+ )
+
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ # If non-200 response received, translate to OAuthError exception.
+ if response.status != http_client.OK:
+ utils.handle_error_response(response_body)
+
+ response_data = json.loads(response_body)
+
+ # Return successful response.
+ return response_data
diff --git a/contrib/python/google-auth/py2/google/oauth2/utils.py b/contrib/python/google-auth/py2/google/oauth2/utils.py
new file mode 100644
index 0000000000..593f03236e
--- /dev/null
+++ b/contrib/python/google-auth/py2/google/oauth2/utils.py
@@ -0,0 +1,171 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Utilities.
+
+This module provides implementations for various OAuth 2.0 utilities.
+This includes `OAuth error handling`_ and
+`Client authentication for OAuth flows`_.
+
+OAuth error handling
+--------------------
+This will define interfaces for handling OAuth related error responses as
+stated in `RFC 6749 section 5.2`_.
+This will include a common function to convert these HTTP error responses to a
+:class:`google.auth.exceptions.OAuthError` exception.
+
+
+Client authentication for OAuth flows
+-------------------------------------
+We introduce an interface for defining client authentication credentials based
+on `RFC 6749 section 2.3.1`_. This will expose the following
+capabilities:
+
+ * Ability to support basic authentication via request header.
+ * Ability to support bearer token authentication via request header.
+ * Ability to support client ID / secret authentication via request body.
+
+.. _RFC 6749 section 2.3.1: https://tools.ietf.org/html/rfc6749#section-2.3.1
+.. _RFC 6749 section 5.2: https://tools.ietf.org/html/rfc6749#section-5.2
+"""
+
+import abc
+import base64
+import enum
+import json
+
+import six
+
+from google.auth import exceptions
+
+
+# OAuth client authentication based on
+# https://tools.ietf.org/html/rfc6749#section-2.3.
+class ClientAuthType(enum.Enum):
+ basic = 1
+ request_body = 2
+
+
+class ClientAuthentication(object):
+ """Defines the client authentication credentials for basic and request-body
+ types based on https://tools.ietf.org/html/rfc6749#section-2.3.1.
+ """
+
+ def __init__(self, client_auth_type, client_id, client_secret=None):
+ """Instantiates a client authentication object containing the client ID
+ and secret credentials for basic and response-body auth.
+
+ Args:
+ client_auth_type (google.oauth2.oauth_utils.ClientAuthType): The
+ client authentication type.
+ client_id (str): The client ID.
+ client_secret (Optional[str]): The client secret.
+ """
+ self.client_auth_type = client_auth_type
+ self.client_id = client_id
+ self.client_secret = client_secret
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OAuthClientAuthHandler(object):
+ """Abstract class for handling client authentication in OAuth-based
+ operations.
+ """
+
+ def __init__(self, client_authentication=None):
+ """Instantiates an OAuth client authentication handler.
+
+ Args:
+ client_authentication (Optional[google.oauth2.utils.ClientAuthentication]):
+ The OAuth client authentication credentials if available.
+ """
+ super(OAuthClientAuthHandler, self).__init__()
+ self._client_authentication = client_authentication
+
+ def apply_client_authentication_options(
+ self, headers, request_body=None, bearer_token=None
+ ):
+ """Applies client authentication on the OAuth request's headers or POST
+ body.
+
+ Args:
+ headers (Mapping[str, str]): The HTTP request header.
+ request_body (Optional[Mapping[str, str]]): The HTTP request body
+ dictionary. For requests that do not support request body, this
+ is None and will be ignored.
+ bearer_token (Optional[str]): The optional bearer token.
+ """
+ # Inject authenticated header.
+ self._inject_authenticated_headers(headers, bearer_token)
+ # Inject authenticated request body.
+ if bearer_token is None:
+ self._inject_authenticated_request_body(request_body)
+
+ def _inject_authenticated_headers(self, headers, bearer_token=None):
+ if bearer_token is not None:
+ headers["Authorization"] = "Bearer %s" % bearer_token
+ elif (
+ self._client_authentication is not None
+ and self._client_authentication.client_auth_type is ClientAuthType.basic
+ ):
+ username = self._client_authentication.client_id
+ password = self._client_authentication.client_secret or ""
+
+ credentials = base64.b64encode(
+ ("%s:%s" % (username, password)).encode()
+ ).decode()
+ headers["Authorization"] = "Basic %s" % credentials
+
+ def _inject_authenticated_request_body(self, request_body):
+ if (
+ self._client_authentication is not None
+ and self._client_authentication.client_auth_type
+ is ClientAuthType.request_body
+ ):
+ if request_body is None:
+ raise exceptions.OAuthError(
+ "HTTP request does not support request-body"
+ )
+ else:
+ request_body["client_id"] = self._client_authentication.client_id
+ request_body["client_secret"] = (
+ self._client_authentication.client_secret or ""
+ )
+
+
+def handle_error_response(response_body):
+ """Translates an error response from an OAuth operation into an
+ OAuthError exception.
+
+ Args:
+ response_body (str): The decoded response data.
+
+ Raises:
+ google.auth.exceptions.OAuthError
+ """
+ try:
+ error_components = []
+ error_data = json.loads(response_body)
+
+ error_components.append("Error code {}".format(error_data["error"]))
+ if "error_description" in error_data:
+ error_components.append(": {}".format(error_data["error_description"]))
+ if "error_uri" in error_data:
+ error_components.append(" - {}".format(error_data["error_uri"]))
+ error_details = "".join(error_components)
+ # If no details could be extracted, use the response data.
+ except (KeyError, ValueError):
+ error_details = response_body
+
+ raise exceptions.OAuthError(error_details, response_body)
diff --git a/contrib/python/google-auth/py2/tests/__init__.py b/contrib/python/google-auth/py2/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/__init__.py
diff --git a/contrib/python/google-auth/py2/tests/compute_engine/__init__.py b/contrib/python/google-auth/py2/tests/compute_engine/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/compute_engine/__init__.py
diff --git a/contrib/python/google-auth/py2/tests/compute_engine/test__metadata.py b/contrib/python/google-auth/py2/tests/compute_engine/test__metadata.py
new file mode 100644
index 0000000000..18f0066d98
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/compute_engine/test__metadata.py
@@ -0,0 +1,373 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+
+import mock
+import pytest
+from six.moves import http_client
+from six.moves import reload_module
+
+from google.auth import _helpers
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+from google.auth.compute_engine import _metadata
+
+PATH = "instance/service-accounts/default"
+
+
+def make_request(data, status=http_client.OK, headers=None, retry=False):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = _helpers.to_bytes(data)
+ response.headers = headers or {}
+
+ request = mock.create_autospec(transport.Request)
+ if retry:
+ request.side_effect = [exceptions.TransportError(), response]
+ else:
+ request.return_value = response
+
+ return request
+
+
+def test_ping_success():
+ request = make_request("", headers=_metadata._METADATA_HEADERS)
+
+ assert _metadata.ping(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_IP_ROOT,
+ headers=_metadata._METADATA_HEADERS,
+ timeout=_metadata._METADATA_DEFAULT_TIMEOUT,
+ )
+
+
+def test_ping_success_retry():
+ request = make_request("", headers=_metadata._METADATA_HEADERS, retry=True)
+
+ assert _metadata.ping(request)
+
+ request.assert_called_with(
+ method="GET",
+ url=_metadata._METADATA_IP_ROOT,
+ headers=_metadata._METADATA_HEADERS,
+ timeout=_metadata._METADATA_DEFAULT_TIMEOUT,
+ )
+ assert request.call_count == 2
+
+
+def test_ping_failure_bad_flavor():
+ request = make_request("", headers={_metadata._METADATA_FLAVOR_HEADER: "meep"})
+
+ assert not _metadata.ping(request)
+
+
+def test_ping_failure_connection_failed():
+ request = make_request("")
+ request.side_effect = exceptions.TransportError()
+
+ assert not _metadata.ping(request)
+
+
+def _test_ping_success_custom_root():
+ request = make_request("", headers=_metadata._METADATA_HEADERS)
+
+ fake_ip = "1.2.3.4"
+ os.environ[environment_vars.GCE_METADATA_IP] = fake_ip
+ reload_module(_metadata)
+
+ try:
+ assert _metadata.ping(request)
+ finally:
+ del os.environ[environment_vars.GCE_METADATA_IP]
+ reload_module(_metadata)
+
+ request.assert_called_once_with(
+ method="GET",
+ url="http://" + fake_ip,
+ headers=_metadata._METADATA_HEADERS,
+ timeout=_metadata._METADATA_DEFAULT_TIMEOUT,
+ )
+
+
+def test_get_success_json():
+ key, value = "foo", "bar"
+
+ data = json.dumps({key: value})
+ request = make_request(data, headers={"content-type": "application/json"})
+
+ result = _metadata.get(request, PATH)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result[key] == value
+
+
+def test_get_success_retry():
+ key, value = "foo", "bar"
+
+ data = json.dumps({key: value})
+ request = make_request(
+ data, headers={"content-type": "application/json"}, retry=True
+ )
+
+ result = _metadata.get(request, PATH)
+
+ request.assert_called_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert request.call_count == 2
+ assert result[key] == value
+
+
+def test_get_success_text():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+
+ result = _metadata.get(request, PATH)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def test_get_success_params():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+ params = {"recursive": "true"}
+
+ result = _metadata.get(request, PATH, params=params)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def test_get_success_recursive_and_params():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+ params = {"recursive": "false"}
+ result = _metadata.get(request, PATH, recursive=True, params=params)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def test_get_success_recursive():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+
+ result = _metadata.get(request, PATH, recursive=True)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def _test_get_success_custom_root_new_variable():
+ request = make_request("{}", headers={"content-type": "application/json"})
+
+ fake_root = "another.metadata.service"
+ os.environ[environment_vars.GCE_METADATA_HOST] = fake_root
+ reload_module(_metadata)
+
+ try:
+ _metadata.get(request, PATH)
+ finally:
+ del os.environ[environment_vars.GCE_METADATA_HOST]
+ reload_module(_metadata)
+
+ request.assert_called_once_with(
+ method="GET",
+ url="http://{}/computeMetadata/v1/{}".format(fake_root, PATH),
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def _test_get_success_custom_root_old_variable():
+ request = make_request("{}", headers={"content-type": "application/json"})
+
+ fake_root = "another.metadata.service"
+ os.environ[environment_vars.GCE_METADATA_ROOT] = fake_root
+ reload_module(_metadata)
+
+ try:
+ _metadata.get(request, PATH)
+ finally:
+ del os.environ[environment_vars.GCE_METADATA_ROOT]
+ reload_module(_metadata)
+
+ request.assert_called_once_with(
+ method="GET",
+ url="http://{}/computeMetadata/v1/{}".format(fake_root, PATH),
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def test_get_failure():
+ request = make_request("Metadata error", status=http_client.NOT_FOUND)
+
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ _metadata.get(request, PATH)
+
+ assert excinfo.match(r"Metadata error")
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def test_get_failure_connection_failed():
+ request = make_request("")
+ request.side_effect = exceptions.TransportError()
+
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ _metadata.get(request, PATH)
+
+ assert excinfo.match(r"Compute Engine Metadata server unavailable")
+
+ request.assert_called_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert request.call_count == 5
+
+
+def test_get_failure_bad_json():
+ request = make_request("{", headers={"content-type": "application/json"})
+
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ _metadata.get(request, PATH)
+
+ assert excinfo.match(r"invalid JSON")
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def test_get_project_id():
+ project = "example-project"
+ request = make_request(project, headers={"content-type": "text/plain"})
+
+ project_id = _metadata.get_project_id(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + "project/project-id",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert project_id == project
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_get_service_account_token(utcnow):
+ ttl = 500
+ request = make_request(
+ json.dumps({"access_token": "token", "expires_in": ttl}),
+ headers={"content-type": "application/json"},
+ )
+
+ token, expiry = _metadata.get_service_account_token(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/token",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=ttl)
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_get_service_account_token_with_scopes_list(utcnow):
+ ttl = 500
+ request = make_request(
+ json.dumps({"access_token": "token", "expires_in": ttl}),
+ headers={"content-type": "application/json"},
+ )
+
+ token, expiry = _metadata.get_service_account_token(request, scopes=["foo", "bar"])
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/token" + "?scopes=foo%2Cbar",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=ttl)
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_get_service_account_token_with_scopes_string(utcnow):
+ ttl = 500
+ request = make_request(
+ json.dumps({"access_token": "token", "expires_in": ttl}),
+ headers={"content-type": "application/json"},
+ )
+
+ token, expiry = _metadata.get_service_account_token(request, scopes="foo,bar")
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/token" + "?scopes=foo%2Cbar",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=ttl)
+
+
+def test_get_service_account_info():
+ key, value = "foo", "bar"
+ request = make_request(
+ json.dumps({key: value}), headers={"content-type": "application/json"}
+ )
+
+ info = _metadata.get_service_account_info(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+ assert info[key] == value
diff --git a/contrib/python/google-auth/py2/tests/compute_engine/test_credentials.py b/contrib/python/google-auth/py2/tests/compute_engine/test_credentials.py
new file mode 100644
index 0000000000..ebe9aa5ba3
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/compute_engine/test_credentials.py
@@ -0,0 +1,798 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import base64
+import datetime
+
+import mock
+import pytest
+import responses
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import transport
+from google.auth.compute_engine import credentials
+from google.auth.transport import requests
+
+SAMPLE_ID_TOKEN_EXP = 1584393400
+
+# header: {"alg": "RS256", "typ": "JWT", "kid": "1"}
+# payload: {"iss": "issuer", "iat": 1584393348, "sub": "subject",
+# "exp": 1584393400,"aud": "audience"}
+SAMPLE_ID_TOKEN = (
+ b"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCIsICJraWQiOiAiMSJ9."
+ b"eyJpc3MiOiAiaXNzdWVyIiwgImlhdCI6IDE1ODQzOTMzNDgsICJzdWIiO"
+ b"iAic3ViamVjdCIsICJleHAiOiAxNTg0MzkzNDAwLCAiYXVkIjogImF1ZG"
+ b"llbmNlIn0."
+ b"OquNjHKhTmlgCk361omRo18F_uY-7y0f_AmLbzW062Q1Zr61HAwHYP5FM"
+ b"316CK4_0cH8MUNGASsvZc3VqXAqub6PUTfhemH8pFEwBdAdG0LhrNkU0H"
+ b"WN1YpT55IiQ31esLdL5q-qDsOPpNZJUti1y1lAreM5nIn2srdWzGXGs4i"
+ b"TRQsn0XkNUCL4RErpciXmjfhMrPkcAjKA-mXQm2fa4jmTlEZFqFmUlym1"
+ b"ozJ0yf5grjN6AslN4OGvAv1pS-_Ko_pGBS6IQtSBC6vVKCUuBfaqNjykg"
+ b"bsxbLa6Fp0SYeYwO8ifEnkRvasVpc1WTQqfRB2JCj5pTBDzJpIpFCMmnQ"
+)
+
+
+class TestCredentials(object):
+ credentials = None
+
+ @pytest.fixture(autouse=True)
+ def credentials_fixture(self):
+ self.credentials = credentials.Credentials()
+
+ def test_default_state(self):
+ assert not self.credentials.valid
+ # Expiration hasn't been set yet
+ assert not self.credentials.expired
+ # Scopes are needed
+ assert self.credentials.requires_scopes
+ # Service account email hasn't been populated
+ assert self.credentials.service_account_email == "default"
+ # No quota project
+ assert not self.credentials._quota_project_id
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_refresh_success(self, get, utcnow):
+ get.side_effect = [
+ {
+ # First request is for sevice account info.
+ "email": "service-account@example.com",
+ "scopes": ["one", "two"],
+ },
+ {
+ # Second request is for the token.
+ "access_token": "token",
+ "expires_in": 500,
+ },
+ ]
+
+ # Refresh credentials
+ self.credentials.refresh(None)
+
+ # Check that the credentials have the token and proper expiration
+ assert self.credentials.token == "token"
+ assert self.credentials.expiry == (utcnow() + datetime.timedelta(seconds=500))
+
+ # Check the credential info
+ assert self.credentials.service_account_email == "service-account@example.com"
+ assert self.credentials._scopes == ["one", "two"]
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert self.credentials.valid
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_refresh_success_with_scopes(self, get, utcnow):
+ get.side_effect = [
+ {
+ # First request is for sevice account info.
+ "email": "service-account@example.com",
+ "scopes": ["one", "two"],
+ },
+ {
+ # Second request is for the token.
+ "access_token": "token",
+ "expires_in": 500,
+ },
+ ]
+
+ # Refresh credentials
+ scopes = ["three", "four"]
+ self.credentials = self.credentials.with_scopes(scopes)
+ self.credentials.refresh(None)
+
+ # Check that the credentials have the token and proper expiration
+ assert self.credentials.token == "token"
+ assert self.credentials.expiry == (utcnow() + datetime.timedelta(seconds=500))
+
+ # Check the credential info
+ assert self.credentials.service_account_email == "service-account@example.com"
+ assert self.credentials._scopes == scopes
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert self.credentials.valid
+
+ kwargs = get.call_args[1]
+ assert kwargs == {"params": {"scopes": "three,four"}}
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_refresh_error(self, get):
+ get.side_effect = exceptions.TransportError("http error")
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ self.credentials.refresh(None)
+
+ assert excinfo.match(r"http error")
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_before_request_refreshes(self, get):
+ get.side_effect = [
+ {
+ # First request is for sevice account info.
+ "email": "service-account@example.com",
+ "scopes": "one two",
+ },
+ {
+ # Second request is for the token.
+ "access_token": "token",
+ "expires_in": 500,
+ },
+ ]
+
+ # Credentials should start as invalid
+ assert not self.credentials.valid
+
+ # before_request should cause a refresh
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert get.called
+
+ # Credentials should now be valid.
+ assert self.credentials.valid
+
+ def test_with_quota_project(self):
+ quota_project_creds = self.credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds._quota_project_id == "project-foo"
+
+ def test_with_scopes(self):
+ assert self.credentials._scopes is None
+
+ scopes = ["one", "two"]
+ self.credentials = self.credentials.with_scopes(scopes)
+
+ assert self.credentials._scopes == scopes
+
+
+class TestIDTokenCredentials(object):
+ credentials = None
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_default_state(self, get):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scope": ["one", "two"]}
+ ]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://example.com"
+ )
+
+ assert not self.credentials.valid
+ # Expiration hasn't been set yet
+ assert not self.credentials.expired
+ # Service account email hasn't been populated
+ assert self.credentials.service_account_email == "service-account@example.com"
+ # Signer is initialized
+ assert self.credentials.signer
+ assert self.credentials.signer_email == "service-account@example.com"
+ # No quota project
+ assert not self.credentials._quota_project_id
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_make_authorization_grant_assertion(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://audience.com",
+ }
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_service_account(self, sign, get, utcnow):
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ target_audience="https://audience.com",
+ service_account_email="service-account@other.com",
+ )
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@other.com",
+ "target_audience": "https://audience.com",
+ }
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_additional_claims(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ target_audience="https://audience.com",
+ additional_claims={"foo": "bar"},
+ )
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://audience.com",
+ "foo": "bar",
+ }
+
+ def test_token_uri(self):
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ signer=mock.Mock(),
+ service_account_email="foo@example.com",
+ target_audience="https://audience.com",
+ )
+ assert self.credentials._token_uri == credentials._DEFAULT_TOKEN_URI
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ signer=mock.Mock(),
+ service_account_email="foo@example.com",
+ target_audience="https://audience.com",
+ token_uri="https://example.com/token",
+ )
+ assert self.credentials._token_uri == "https://example.com/token"
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_target_audience(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+ self.credentials = self.credentials.with_target_audience("https://actually.not")
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://actually.not",
+ }
+
+ # Check that the signer have been initialized with a Request object
+ assert isinstance(self.credentials._signer._request, transport.Request)
+
+ @responses.activate
+ def test_with_target_audience_integration(self):
+ """ Test that it is possible to refresh credentials
+ generated from `with_target_audience`.
+
+ Instead of mocking the methods, the HTTP responses
+ have been mocked.
+ """
+
+ # mock information about credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/default/?recursive=true",
+ status=200,
+ content_type="application/json",
+ json={
+ "scopes": "email",
+ "email": "service-account@example.com",
+ "aliases": ["default"],
+ },
+ )
+
+ # mock token for credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/service-account@example.com/token",
+ status=200,
+ content_type="application/json",
+ json={
+ "access_token": "some-token",
+ "expires_in": 3210,
+ "token_type": "Bearer",
+ },
+ )
+
+ # mock sign blob endpoint
+ signature = base64.b64encode(b"some-signature").decode("utf-8")
+ responses.add(
+ responses.POST,
+ "https://iamcredentials.googleapis.com/v1/projects/-/"
+ "serviceAccounts/service-account@example.com:signBlob?alt=json",
+ status=200,
+ content_type="application/json",
+ json={"keyId": "some-key-id", "signedBlob": signature},
+ )
+
+ id_token = "{}.{}.{}".format(
+ base64.b64encode(b'{"some":"some"}').decode("utf-8"),
+ base64.b64encode(b'{"exp": 3210}').decode("utf-8"),
+ base64.b64encode(b"token").decode("utf-8"),
+ )
+
+ # mock id token endpoint
+ responses.add(
+ responses.POST,
+ "https://www.googleapis.com/oauth2/v4/token",
+ status=200,
+ content_type="application/json",
+ json={"id_token": id_token, "expiry": 3210},
+ )
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=requests.Request(),
+ service_account_email="service-account@example.com",
+ target_audience="https://audience.com",
+ )
+
+ self.credentials = self.credentials.with_target_audience("https://actually.not")
+
+ self.credentials.refresh(requests.Request())
+
+ assert self.credentials.token is not None
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_quota_project(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+ self.credentials = self.credentials.with_quota_project("project-foo")
+
+ assert self.credentials._quota_project_id == "project-foo"
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://audience.com",
+ }
+
+ # Check that the signer have been initialized with a Request object
+ assert isinstance(self.credentials._signer._request, transport.Request)
+
+ @responses.activate
+ def test_with_quota_project_integration(self):
+ """ Test that it is possible to refresh credentials
+ generated from `with_quota_project`.
+
+ Instead of mocking the methods, the HTTP responses
+ have been mocked.
+ """
+
+ # mock information about credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/default/?recursive=true",
+ status=200,
+ content_type="application/json",
+ json={
+ "scopes": "email",
+ "email": "service-account@example.com",
+ "aliases": ["default"],
+ },
+ )
+
+ # mock token for credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/service-account@example.com/token",
+ status=200,
+ content_type="application/json",
+ json={
+ "access_token": "some-token",
+ "expires_in": 3210,
+ "token_type": "Bearer",
+ },
+ )
+
+ # mock sign blob endpoint
+ signature = base64.b64encode(b"some-signature").decode("utf-8")
+ responses.add(
+ responses.POST,
+ "https://iamcredentials.googleapis.com/v1/projects/-/"
+ "serviceAccounts/service-account@example.com:signBlob?alt=json",
+ status=200,
+ content_type="application/json",
+ json={"keyId": "some-key-id", "signedBlob": signature},
+ )
+
+ id_token = "{}.{}.{}".format(
+ base64.b64encode(b'{"some":"some"}').decode("utf-8"),
+ base64.b64encode(b'{"exp": 3210}').decode("utf-8"),
+ base64.b64encode(b"token").decode("utf-8"),
+ )
+
+ # mock id token endpoint
+ responses.add(
+ responses.POST,
+ "https://www.googleapis.com/oauth2/v4/token",
+ status=200,
+ content_type="application/json",
+ json={"id_token": id_token, "expiry": 3210},
+ )
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=requests.Request(),
+ service_account_email="service-account@example.com",
+ target_audience="https://audience.com",
+ )
+
+ self.credentials = self.credentials.with_quota_project("project-foo")
+
+ self.credentials.refresh(requests.Request())
+
+ assert self.credentials.token is not None
+ assert self.credentials._quota_project_id == "project-foo"
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_refresh_success(self, id_token_jwt_grant, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+ id_token_jwt_grant.side_effect = [
+ ("idtoken", datetime.datetime.utcfromtimestamp(3600), {})
+ ]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Refresh credentials
+ self.credentials.refresh(None)
+
+ # Check that the credentials have the token and proper expiration
+ assert self.credentials.token == "idtoken"
+ assert self.credentials.expiry == (datetime.datetime.utcfromtimestamp(3600))
+
+ # Check the credential info
+ assert self.credentials.service_account_email == "service-account@example.com"
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert self.credentials.valid
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_refresh_error(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ response = mock.Mock()
+ response.data = b'{"error": "http error"}'
+ response.status = 500
+ request.side_effect = [response]
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ self.credentials.refresh(request)
+
+ assert excinfo.match(r"http error")
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_before_request_refreshes(self, id_token_jwt_grant, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": "one two"}
+ ]
+ sign.side_effect = [b"signature"]
+ id_token_jwt_grant.side_effect = [
+ ("idtoken", datetime.datetime.utcfromtimestamp(3600), {})
+ ]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Credentials should start as invalid
+ assert not self.credentials.valid
+
+ # before_request should cause a refresh
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert get.called
+
+ # Credentials should now be valid.
+ assert self.credentials.valid
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_sign_bytes(self, sign, get):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ response = mock.Mock()
+ response.data = b'{"signature": "c2lnbmF0dXJl"}'
+ response.status = 200
+ request.side_effect = [response]
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Generate authorization grant:
+ signature = self.credentials.sign_bytes(b"some bytes")
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert signature == b"signature"
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_get_id_token_from_metadata(self, get, get_service_account_info):
+ get.return_value = SAMPLE_ID_TOKEN
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+ cred.refresh(request=mock.Mock())
+
+ assert cred.token == SAMPLE_ID_TOKEN
+ assert cred.expiry == datetime.datetime.fromtimestamp(SAMPLE_ID_TOKEN_EXP)
+ assert cred._use_metadata_identity_endpoint
+ assert cred._signer is None
+ assert cred._token_uri is None
+ assert cred._service_account_email == "foo@example.com"
+ assert cred._target_audience == "audience"
+ with pytest.raises(ValueError):
+ cred.sign_bytes(b"bytes")
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ def test_with_target_audience_for_metadata(self, get_service_account_info):
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+ cred = cred.with_target_audience("new_audience")
+
+ assert cred._target_audience == "new_audience"
+ assert cred._use_metadata_identity_endpoint
+ assert cred._signer is None
+ assert cred._token_uri is None
+ assert cred._service_account_email == "foo@example.com"
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ def test_id_token_with_quota_project(self, get_service_account_info):
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+ cred = cred.with_quota_project("project-foo")
+
+ assert cred._quota_project_id == "project-foo"
+ assert cred._use_metadata_identity_endpoint
+ assert cred._signer is None
+ assert cred._token_uri is None
+ assert cred._service_account_email == "foo@example.com"
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_invalid_id_token_from_metadata(self, get, get_service_account_info):
+ get.return_value = "invalid_id_token"
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+
+ with pytest.raises(ValueError):
+ cred.refresh(request=mock.Mock())
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_transport_error_from_metadata(self, get, get_service_account_info):
+ get.side_effect = exceptions.TransportError("transport error")
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ cred.refresh(request=mock.Mock())
+ assert excinfo.match(r"transport error")
+
+ def test_get_id_token_from_metadata_constructor(self):
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ token_uri="token_uri",
+ )
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ signer=mock.Mock(),
+ )
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ additional_claims={"key", "value"},
+ )
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ service_account_email="foo@example.com",
+ )
diff --git a/contrib/python/google-auth/py2/tests/conftest.py b/contrib/python/google-auth/py2/tests/conftest.py
new file mode 100644
index 0000000000..edd623731c
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/conftest.py
@@ -0,0 +1,45 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+import mock
+import pytest
+
+
+def pytest_configure():
+ """Load public certificate and private key."""
+ import __res
+ pytest.private_key_bytes = __res.find("data/privatekey.pem")
+ pytest.public_cert_bytes = __res.find("data/public_cert.pem")
+
+
+@pytest.fixture
+def mock_non_existent_module(monkeypatch):
+ """Mocks a non-existing module in sys.modules.
+
+ Additionally mocks any non-existing modules specified in the dotted path.
+ """
+
+ def _mock_non_existent_module(path):
+ parts = path.split(".")
+ partial = []
+ for part in parts:
+ partial.append(part)
+ current_module = ".".join(partial)
+ if current_module not in sys.modules:
+ monkeypatch.setitem(sys.modules, current_module, mock.MagicMock())
+
+ return _mock_non_existent_module
diff --git a/contrib/python/google-auth/py2/tests/crypt/__init__.py b/contrib/python/google-auth/py2/tests/crypt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/crypt/__init__.py
diff --git a/contrib/python/google-auth/py2/tests/crypt/test__cryptography_rsa.py b/contrib/python/google-auth/py2/tests/crypt/test__cryptography_rsa.py
new file mode 100644
index 0000000000..21fc9cf0fe
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/crypt/test__cryptography_rsa.py
@@ -0,0 +1,161 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+from cryptography.hazmat.primitives.asymmetric import rsa
+import pytest
+
+from google.auth import _helpers
+from google.auth.crypt import _cryptography_rsa
+from google.auth.crypt import base
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate privatekey.pem, privatekey.pub, and public_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out public_cert.pem \
+# > -keyout privatekey.pem
+# $ openssl rsa -in privatekey.pem -pubout -out privatekey.pub
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+ PKCS1_KEY_BYTES = PRIVATE_KEY_BYTES
+
+with open(os.path.join(DATA_DIR, "privatekey.pub"), "rb") as fh:
+ PUBLIC_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+# To generate pem_from_pkcs12.pem and privatekey.p12:
+# $ openssl pkcs12 -export -out privatekey.p12 -inkey privatekey.pem \
+# > -in public_cert.pem
+# $ openssl pkcs12 -in privatekey.p12 -nocerts -nodes \
+# > -out pem_from_pkcs12.pem
+
+with open(os.path.join(DATA_DIR, "pem_from_pkcs12.pem"), "rb") as fh:
+ PKCS8_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "privatekey.p12"), "rb") as fh:
+ PKCS12_KEY_BYTES = fh.read()
+
+# The service account JSON file can be generated from the Google Cloud Console.
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+class TestRSAVerifier(object):
+ def test_verify_success(self):
+ to_sign = b"foo"
+ signer = _cryptography_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_unicode_success(self):
+ to_sign = u"foo"
+ signer = _cryptography_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_failure(self):
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ bad_signature1 = b""
+ assert not verifier.verify(b"foo", bad_signature1)
+ bad_signature2 = b"a"
+ assert not verifier.verify(b"foo", bad_signature2)
+
+ def test_from_string_pub_key(self):
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+ def test_from_string_pub_key_unicode(self):
+ public_key = _helpers.from_bytes(PUBLIC_KEY_BYTES)
+ verifier = _cryptography_rsa.RSAVerifier.from_string(public_key)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+ def test_from_string_pub_cert(self):
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_CERT_BYTES)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+ def test_from_string_pub_cert_unicode(self):
+ public_cert = _helpers.from_bytes(PUBLIC_CERT_BYTES)
+ verifier = _cryptography_rsa.RSAVerifier.from_string(public_cert)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+
+class TestRSASigner(object):
+ def test_from_string_pkcs1(self):
+ signer = _cryptography_rsa.RSASigner.from_string(PKCS1_KEY_BYTES)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs1_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS1_KEY_BYTES)
+ signer = _cryptography_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs8(self):
+ signer = _cryptography_rsa.RSASigner.from_string(PKCS8_KEY_BYTES)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs8_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS8_KEY_BYTES)
+ signer = _cryptography_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs12(self):
+ with pytest.raises(ValueError):
+ _cryptography_rsa.RSASigner.from_string(PKCS12_KEY_BYTES)
+
+ def test_from_string_bogus_key(self):
+ key_bytes = "bogus-key"
+ with pytest.raises(ValueError):
+ _cryptography_rsa.RSASigner.from_string(key_bytes)
+
+ def test_from_service_account_info(self):
+ signer = _cryptography_rsa.RSASigner.from_service_account_info(
+ SERVICE_ACCOUNT_INFO
+ )
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_service_account_info_missing_key(self):
+ with pytest.raises(ValueError) as excinfo:
+ _cryptography_rsa.RSASigner.from_service_account_info({})
+
+ assert excinfo.match(base._JSON_FILE_PRIVATE_KEY)
+
+ def test_from_service_account_file(self):
+ signer = _cryptography_rsa.RSASigner.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
diff --git a/contrib/python/google-auth/py2/tests/crypt/test__python_rsa.py b/contrib/python/google-auth/py2/tests/crypt/test__python_rsa.py
new file mode 100644
index 0000000000..41711f12d2
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/crypt/test__python_rsa.py
@@ -0,0 +1,194 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import mock
+from pyasn1_modules import pem
+import pytest
+import rsa
+import six
+
+from google.auth import _helpers
+from google.auth.crypt import _python_rsa
+from google.auth.crypt import base
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate privatekey.pem, privatekey.pub, and public_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out public_cert.pem \
+# > -keyout privatekey.pem
+# $ openssl rsa -in privatekey.pem -pubout -out privatekey.pub
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+ PKCS1_KEY_BYTES = PRIVATE_KEY_BYTES
+
+with open(os.path.join(DATA_DIR, "privatekey.pub"), "rb") as fh:
+ PUBLIC_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+# To generate pem_from_pkcs12.pem and privatekey.p12:
+# $ openssl pkcs12 -export -out privatekey.p12 -inkey privatekey.pem \
+# > -in public_cert.pem
+# $ openssl pkcs12 -in privatekey.p12 -nocerts -nodes \
+# > -out pem_from_pkcs12.pem
+
+with open(os.path.join(DATA_DIR, "pem_from_pkcs12.pem"), "rb") as fh:
+ PKCS8_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "privatekey.p12"), "rb") as fh:
+ PKCS12_KEY_BYTES = fh.read()
+
+# The service account JSON file can be generated from the Google Cloud Console.
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+class TestRSAVerifier(object):
+ def test_verify_success(self):
+ to_sign = b"foo"
+ signer = _python_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_unicode_success(self):
+ to_sign = u"foo"
+ signer = _python_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_failure(self):
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ bad_signature1 = b""
+ assert not verifier.verify(b"foo", bad_signature1)
+ bad_signature2 = b"a"
+ assert not verifier.verify(b"foo", bad_signature2)
+
+ def test_from_string_pub_key(self):
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_key_unicode(self):
+ public_key = _helpers.from_bytes(PUBLIC_KEY_BYTES)
+ verifier = _python_rsa.RSAVerifier.from_string(public_key)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_cert(self):
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_CERT_BYTES)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_cert_unicode(self):
+ public_cert = _helpers.from_bytes(PUBLIC_CERT_BYTES)
+ verifier = _python_rsa.RSAVerifier.from_string(public_cert)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_cert_failure(self):
+ cert_bytes = PUBLIC_CERT_BYTES
+ true_der = rsa.pem.load_pem(cert_bytes, "CERTIFICATE")
+ load_pem_patch = mock.patch(
+ "rsa.pem.load_pem", return_value=true_der + b"extra", autospec=True
+ )
+
+ with load_pem_patch as load_pem:
+ with pytest.raises(ValueError):
+ _python_rsa.RSAVerifier.from_string(cert_bytes)
+ load_pem.assert_called_once_with(cert_bytes, "CERTIFICATE")
+
+
+class TestRSASigner(object):
+ def test_from_string_pkcs1(self):
+ signer = _python_rsa.RSASigner.from_string(PKCS1_KEY_BYTES)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs1_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS1_KEY_BYTES)
+ signer = _python_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs8(self):
+ signer = _python_rsa.RSASigner.from_string(PKCS8_KEY_BYTES)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs8_extra_bytes(self):
+ key_bytes = PKCS8_KEY_BYTES
+ _, pem_bytes = pem.readPemBlocksFromFile(
+ six.StringIO(_helpers.from_bytes(key_bytes)), _python_rsa._PKCS8_MARKER
+ )
+
+ key_info, remaining = None, "extra"
+ decode_patch = mock.patch(
+ "pyasn1.codec.der.decoder.decode",
+ return_value=(key_info, remaining),
+ autospec=True,
+ )
+
+ with decode_patch as decode:
+ with pytest.raises(ValueError):
+ _python_rsa.RSASigner.from_string(key_bytes)
+ # Verify mock was called.
+ decode.assert_called_once_with(pem_bytes, asn1Spec=_python_rsa._PKCS8_SPEC)
+
+ def test_from_string_pkcs8_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS8_KEY_BYTES)
+ signer = _python_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs12(self):
+ with pytest.raises(ValueError):
+ _python_rsa.RSASigner.from_string(PKCS12_KEY_BYTES)
+
+ def test_from_string_bogus_key(self):
+ key_bytes = "bogus-key"
+ with pytest.raises(ValueError):
+ _python_rsa.RSASigner.from_string(key_bytes)
+
+ def test_from_service_account_info(self):
+ signer = _python_rsa.RSASigner.from_service_account_info(SERVICE_ACCOUNT_INFO)
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_service_account_info_missing_key(self):
+ with pytest.raises(ValueError) as excinfo:
+ _python_rsa.RSASigner.from_service_account_info({})
+
+ assert excinfo.match(base._JSON_FILE_PRIVATE_KEY)
+
+ def test_from_service_account_file(self):
+ signer = _python_rsa.RSASigner.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.key.PrivateKey)
diff --git a/contrib/python/google-auth/py2/tests/crypt/test_crypt.py b/contrib/python/google-auth/py2/tests/crypt/test_crypt.py
new file mode 100644
index 0000000000..97c2abc257
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/crypt/test_crypt.py
@@ -0,0 +1,59 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from google.auth import crypt
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate privatekey.pem, privatekey.pub, and public_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out public_cert.pem \
+# > -keyout privatekey.pem
+# $ openssl rsa -in privatekey.pem -pubout -out privatekey.pub
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+# To generate other_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out other_cert.pem
+
+with open(os.path.join(DATA_DIR, "other_cert.pem"), "rb") as fh:
+ OTHER_CERT_BYTES = fh.read()
+
+
+def test_verify_signature():
+ to_sign = b"foo"
+ signer = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ signature = signer.sign(to_sign)
+
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ # List of certs
+ assert crypt.verify_signature(
+ to_sign, signature, [OTHER_CERT_BYTES, PUBLIC_CERT_BYTES]
+ )
+
+
+def test_verify_signature_failure():
+ to_sign = b"foo"
+ signer = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ signature = signer.sign(to_sign)
+
+ assert not crypt.verify_signature(to_sign, signature, OTHER_CERT_BYTES)
diff --git a/contrib/python/google-auth/py2/tests/crypt/test_es256.py b/contrib/python/google-auth/py2/tests/crypt/test_es256.py
new file mode 100644
index 0000000000..b10d0187b7
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/crypt/test_es256.py
@@ -0,0 +1,144 @@
+# Copyright 2016 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import json
+import os
+
+from cryptography.hazmat.primitives.asymmetric import ec
+import pytest
+
+from google.auth import _helpers
+from google.auth.crypt import base
+from google.auth.crypt import es256
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate es256_privatekey.pem, es256_privatekey.pub, and
+# es256_public_cert.pem:
+# $ openssl ecparam -genkey -name prime256v1 -noout -out es256_privatekey.pem
+# $ openssl ec -in es256-private-key.pem -pubout -out es256-publickey.pem
+# $ openssl req -new -x509 -key es256_privatekey.pem -out \
+# > es256_public_cert.pem
+
+with open(os.path.join(DATA_DIR, "es256_privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+ PKCS1_KEY_BYTES = PRIVATE_KEY_BYTES
+
+with open(os.path.join(DATA_DIR, "es256_publickey.pem"), "rb") as fh:
+ PUBLIC_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "es256_public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "es256_service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+class TestES256Verifier(object):
+ def test_verify_success(self):
+ to_sign = b"foo"
+ signer = es256.ES256Signer.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_unicode_success(self):
+ to_sign = u"foo"
+ signer = es256.ES256Signer.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_failure(self):
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ bad_signature1 = b""
+ assert not verifier.verify(b"foo", bad_signature1)
+ bad_signature2 = b"a"
+ assert not verifier.verify(b"foo", bad_signature2)
+
+ def test_verify_failure_with_wrong_raw_signature(self):
+ to_sign = b"foo"
+
+ # This signature has a wrong "r" value in the "(r,s)" raw signature.
+ wrong_signature = base64.urlsafe_b64decode(
+ b"m7oaRxUDeYqjZ8qiMwo0PZLTMZWKJLFQREpqce1StMIa_yXQQ-C5WgeIRHW7OqlYSDL0XbUrj_uAw9i-QhfOJQ=="
+ )
+
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert not verifier.verify(to_sign, wrong_signature)
+
+ def test_from_string_pub_key(self):
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+ def test_from_string_pub_key_unicode(self):
+ public_key = _helpers.from_bytes(PUBLIC_KEY_BYTES)
+ verifier = es256.ES256Verifier.from_string(public_key)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+ def test_from_string_pub_cert(self):
+ verifier = es256.ES256Verifier.from_string(PUBLIC_CERT_BYTES)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+ def test_from_string_pub_cert_unicode(self):
+ public_cert = _helpers.from_bytes(PUBLIC_CERT_BYTES)
+ verifier = es256.ES256Verifier.from_string(public_cert)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+
+class TestES256Signer(object):
+ def test_from_string_pkcs1(self):
+ signer = es256.ES256Signer.from_string(PKCS1_KEY_BYTES)
+ assert isinstance(signer, es256.ES256Signer)
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
+
+ def test_from_string_pkcs1_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS1_KEY_BYTES)
+ signer = es256.ES256Signer.from_string(key_bytes)
+ assert isinstance(signer, es256.ES256Signer)
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
+
+ def test_from_string_bogus_key(self):
+ key_bytes = "bogus-key"
+ with pytest.raises(ValueError):
+ es256.ES256Signer.from_string(key_bytes)
+
+ def test_from_service_account_info(self):
+ signer = es256.ES256Signer.from_service_account_info(SERVICE_ACCOUNT_INFO)
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
+
+ def test_from_service_account_info_missing_key(self):
+ with pytest.raises(ValueError) as excinfo:
+ es256.ES256Signer.from_service_account_info({})
+
+ assert excinfo.match(base._JSON_FILE_PRIVATE_KEY)
+
+ def test_from_service_account_file(self):
+ signer = es256.ES256Signer.from_service_account_file(SERVICE_ACCOUNT_JSON_FILE)
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
diff --git a/contrib/python/google-auth/py2/tests/data/authorized_user.json b/contrib/python/google-auth/py2/tests/data/authorized_user.json
new file mode 100644
index 0000000000..4787acee57
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/authorized_user.json
@@ -0,0 +1,6 @@
+{
+ "client_id": "123",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user"
+}
diff --git a/contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk.json b/contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk.json
new file mode 100644
index 0000000000..c9e19a66e0
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk.json
@@ -0,0 +1,6 @@
+{
+ "client_id": "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user"
+}
diff --git a/contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json b/contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json
new file mode 100644
index 0000000000..53a8ff88aa
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json
@@ -0,0 +1,7 @@
+{
+ "client_id": "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user",
+ "quota_project_id": "quota_project_id"
+}
diff --git a/contrib/python/google-auth/py2/tests/data/client_secrets.json b/contrib/python/google-auth/py2/tests/data/client_secrets.json
new file mode 100644
index 0000000000..1baa4995af
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/client_secrets.json
@@ -0,0 +1,14 @@
+{
+ "web": {
+ "client_id": "example.apps.googleusercontent.com",
+ "project_id": "example",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+ "client_secret": "itsasecrettoeveryone",
+ "redirect_uris": [
+ "urn:ietf:wg:oauth:2.0:oob",
+ "http://localhost"
+ ]
+ }
+}
diff --git a/contrib/python/google-auth/py2/tests/data/cloud_sdk_config.json b/contrib/python/google-auth/py2/tests/data/cloud_sdk_config.json
new file mode 100644
index 0000000000..a5fe4a9a47
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/cloud_sdk_config.json
@@ -0,0 +1,19 @@
+{
+ "configuration": {
+ "active_configuration": "default",
+ "properties": {
+ "core": {
+ "account": "user@example.com",
+ "disable_usage_reporting": "False",
+ "project": "example-project"
+ }
+ }
+ },
+ "credential": {
+ "access_token": "don't use me",
+ "token_expiry": "2017-03-23T23:09:49Z"
+ },
+ "sentinels": {
+ "config_sentinel": "/Users/example/.config/gcloud/config_sentinel"
+ }
+}
diff --git a/contrib/python/google-auth/py2/tests/data/context_aware_metadata.json b/contrib/python/google-auth/py2/tests/data/context_aware_metadata.json
new file mode 100644
index 0000000000..ec40e783f1
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/context_aware_metadata.json
@@ -0,0 +1,6 @@
+{
+ "cert_provider_command":[
+ "/opt/google/endpoint-verification/bin/SecureConnectHelper",
+ "--print_certificate"],
+ "device_resource_ids":["11111111-1111-1111"]
+}
diff --git a/contrib/python/google-auth/py2/tests/data/es256_privatekey.pem b/contrib/python/google-auth/py2/tests/data/es256_privatekey.pem
new file mode 100644
index 0000000000..5c950b514f
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/es256_privatekey.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIAIC57aTx5ev4T2HBMQk4fXV09AzLDQ3Ju1uNoEB0LngoAoGCCqGSM49
+AwEHoUQDQgAEsACsrmP6Bp216OCFm73C8W/VRHZWcO8yU/bMwx96f05BkTII3KeJ
+z2O0IRAnXfso8K6YsjMuUDGCfj+b1IDIoA==
+-----END EC PRIVATE KEY-----
diff --git a/contrib/python/google-auth/py2/tests/data/es256_public_cert.pem b/contrib/python/google-auth/py2/tests/data/es256_public_cert.pem
new file mode 100644
index 0000000000..774ca14843
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/es256_public_cert.pem
@@ -0,0 +1,8 @@
+-----BEGIN CERTIFICATE-----
+MIIBGDCBwAIJAPUA0H4EQWsdMAoGCCqGSM49BAMCMBUxEzARBgNVBAMMCnVuaXQt
+dGVzdHMwHhcNMTkwNTA5MDI1MDExWhcNMTkwNjA4MDI1MDExWjAVMRMwEQYDVQQD
+DAp1bml0LXRlc3RzMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEsACsrmP6Bp21
+6OCFm73C8W/VRHZWcO8yU/bMwx96f05BkTII3KeJz2O0IRAnXfso8K6YsjMuUDGC
+fj+b1IDIoDAKBggqhkjOPQQDAgNHADBEAh8PcDTMyWk8SHqV/v8FLuMbDxdtAsq2
+dwCpuHQwqCcmAiEAnwtkiyieN+8zozaf1P4QKp2mAqNGqua50y3ua5uVotc=
+-----END CERTIFICATE-----
diff --git a/contrib/python/google-auth/py2/tests/data/es256_publickey.pem b/contrib/python/google-auth/py2/tests/data/es256_publickey.pem
new file mode 100644
index 0000000000..51f2a03fa4
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/es256_publickey.pem
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEsACsrmP6Bp216OCFm73C8W/VRHZW
+cO8yU/bMwx96f05BkTII3KeJz2O0IRAnXfso8K6YsjMuUDGCfj+b1IDIoA==
+-----END PUBLIC KEY-----
diff --git a/contrib/python/google-auth/py2/tests/data/es256_service_account.json b/contrib/python/google-auth/py2/tests/data/es256_service_account.json
new file mode 100644
index 0000000000..dd26719f62
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/es256_service_account.json
@@ -0,0 +1,10 @@
+{
+ "type": "service_account",
+ "project_id": "example-project",
+ "private_key_id": "1",
+ "private_key": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIAIC57aTx5ev4T2HBMQk4fXV09AzLDQ3Ju1uNoEB0LngoAoGCCqGSM49\nAwEHoUQDQgAEsACsrmP6Bp216OCFm73C8W/VRHZWcO8yU/bMwx96f05BkTII3KeJ\nz2O0IRAnXfso8K6YsjMuUDGCfj+b1IDIoA==\n-----END EC PRIVATE KEY-----",
+ "client_email": "service-account@example.com",
+ "client_id": "1234",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token"
+}
diff --git a/contrib/python/google-auth/py2/tests/data/external_subject_token.json b/contrib/python/google-auth/py2/tests/data/external_subject_token.json
new file mode 100644
index 0000000000..a47ec34127
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/external_subject_token.json
@@ -0,0 +1,3 @@
+{
+ "access_token": "HEADER.SIMULATED_JWT_PAYLOAD.SIGNATURE"
+} \ No newline at end of file
diff --git a/contrib/python/google-auth/py2/tests/data/external_subject_token.txt b/contrib/python/google-auth/py2/tests/data/external_subject_token.txt
new file mode 100644
index 0000000000..c668d8f71d
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/external_subject_token.txt
@@ -0,0 +1 @@
+HEADER.SIMULATED_JWT_PAYLOAD.SIGNATURE \ No newline at end of file
diff --git a/contrib/python/google-auth/py2/tests/data/old_oauth_credentials_py3.pickle b/contrib/python/google-auth/py2/tests/data/old_oauth_credentials_py3.pickle
new file mode 100644
index 0000000000..c8a05599b1
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/old_oauth_credentials_py3.pickle
Binary files differ
diff --git a/contrib/python/google-auth/py2/tests/data/other_cert.pem b/contrib/python/google-auth/py2/tests/data/other_cert.pem
new file mode 100644
index 0000000000..6895d1e7bf
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/other_cert.pem
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFtTCCA52gAwIBAgIJAPBsLZmNGfKtMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
+aWRnaXRzIFB0eSBMdGQwHhcNMTYwOTIxMDI0NTEyWhcNMTYxMDIxMDI0NTEyWjBF
+MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
+ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAsiMC7mTsmUXwZoYlT4aHY1FLw8bxIXC+z3IqA+TY1WqfbeiZRo8MA5Zx
+lTTxYMKPCZUE1XBc7jvD8GJhWIj6pToPYHn73B01IBkLBxq4kF1yV2Z7DVmkvc6H
+EcxXXq8zkCx0j6XOfiI4+qkXnuQn8cvrk8xfhtnMMZM7iVm6VSN93iRP/8ey6xuL
+XTHrDX7ukoRce1hpT8O+15GXNrY0irhhYQz5xKibNCJF3EjV28WMry8y7I8uYUFU
+RWDiQawwK9ec1zhZ94v92+GZDlPevmcFmSERKYQ0NsKcT0Y3lGuGnaExs8GyOpnC
+oksu4YJGXQjg7lkv4MxzsNbRqmCkUwxw1Mg6FP0tsCNsw9qTrkvWCRA9zp/aU+sZ
+IBGh1t4UGCub8joeQFvHxvr/3F7mH/dyvCjA34u0Lo1VPx+jYUIi9i0odltMspDW
+xOpjqdGARZYmlJP5Au9q5cQjPMcwS/EBIb8cwNl32mUE6WnFlep+38mNR/FghIjO
+ViAkXuKQmcHe6xppZAoHFsO/t3l4Tjek5vNW7erI1rgrFku/fvkIW/G8V1yIm/+Q
+F+CE4maQzCJfhftpkhM/sPC/FuLNBmNE8BHVX8y58xG4is/cQxL4Z9TsFIw0C5+3
+uTrFW9D0agysahMVzPGtCqhDQqJdIJrBQqlS6bztpzBA8zEI0skCAwEAAaOBpzCB
+pDAdBgNVHQ4EFgQUz/8FmW6TfqXyNJZr7rhc+Tn5sKQwdQYDVR0jBG4wbIAUz/8F
+mW6TfqXyNJZr7rhc+Tn5sKShSaRHMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpT
+b21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGSCCQDw
+bC2ZjRnyrTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQCQmrcfhurX
+riR3Q0Y+nq040/3dJIAJXjyI9CEtxaU0nzCNTng7PwgZ0CKmCelQfInuwWFwBSHS
+6kBfC1rgJeFnjnTt8a3RCgRlIgUr9NCdPSEccB7TurobwPJ2h6cJjjR8urcb0CXh
+CEMvPneyPj0xUFY8vVKXMGWahz/kyfwIiVqcX/OtMZ29fUu1onbWl71g2gVLtUZl
+sECdZ+AC/6HDCVpYIVETMl1T7N/XyqXZQiDLDNRDeZhnapz8w9fsW1KVujAZLNQR
+pVnw2qa2UK1dSf2FHX+lQU5mFSYM4vtwaMlX/LgfdLZ9I796hFh619WwTVz+LO2N
+vHnwBMabld3XSPuZRqlbBulDQ07Vbqdjv8DYSLA2aKI4ZkMMKuFLG/oS28V2ZYmv
+/KpGEs5UgKY+P9NulYpTDwCU/6SomuQpP795wbG6sm7Hzq82r2RmB61GupNRGeqi
+pXKsy69T388zBxYu6zQrosXiDl5YzaViH7tm0J7opye8dCWjjpnahki0vq2znti7
+6cWla2j8Xz1glvLz+JI/NCOMfxUInb82T7ijo80N0VJ2hzf7p2GxRZXAxAV9knLI
+nM4F5TLjSd7ZhOOZ7ni/eZFueTMisWfypt2nc41whGjHMX/Zp1kPfhB4H2bLKIX/
+lSrwNr3qbGTEJX8JqpDBNVAd96XkMvDNyA==
+-----END CERTIFICATE-----
diff --git a/contrib/python/google-auth/py2/tests/data/pem_from_pkcs12.pem b/contrib/python/google-auth/py2/tests/data/pem_from_pkcs12.pem
new file mode 100644
index 0000000000..2d77e10c1f
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/pem_from_pkcs12.pem
@@ -0,0 +1,32 @@
+Bag Attributes
+ friendlyName: key
+ localKeyID: 22 7E 04 FC 64 48 20 83 1E C1 BD E3 F5 2F 44 7D EA 99 A5 BC
+Key Attributes: <No Attributes>
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDh6PSnttDsv+vi
+tUZTP1E3hVBah6PUGDWZhYgNiyW8quTWCmPvBmCR2YzuhUrY5+CtKP8UJOQico+p
+oJHSAPsrzSr6YsGs3c9SQOslBmm9Fkh9/f/GZVTVZ6u5AsUmOcVvZ2q7Sz8Vj/aR
+aIm0EJqRe9cQ5vvN9sg25rIv4xKwIZJ1VixKWJLmpCmDINqn7xvl+ldlUmSr3aGt
+w21uSDuEJhQlzO3yf2FwJMkJ9SkCm9oVDXyl77OnKXj5bOQ/rojbyGeIxDJSUDWE
+GKyRPuqKi6rSbwg6h2G/Z9qBJkqM5NNTbGRIFz/9/LdmmwvtaqCxlLtD7RVEryAp
++qTGDk5hAgMBAAECggEBAMYYfNDEYpf4A2SdCLne/9zrrfZ0kphdUkL48MDPj5vN
+TzTRj6f9s5ixZ/+QKn3hdwbguCx13QbH5mocP0IjUhyqoFFHYAWxyyaZfpjM8tO4
+QoEYxby3BpjLe62UXESUzChQSytJZFwIDXKcdIPNO3zvVzufEJcfG5no2b9cIvsG
+Dy6J1FNILWxCtDIqBM+G1B1is9DhZnUDgn0iKzINiZmh1I1l7k/4tMnozVIKAfwo
+f1kYjG/d2IzDM02mTeTElz3IKeNriaOIYTZgI26xLJxTkiFnBV4JOWFAZw15X+yR
++DrjGSIkTfhzbLa20Vt3AFM+LFK0ZoXT2dRnjbYPjQECgYEA+9XJFGwLcEX6pl1p
+IwXAjXKJdju9DDn4lmHTW0Pbw25h1EXONwm/NPafwsWmPll9kW9IwsxUQVUyBC9a
+c3Q7rF1e8ai/qqVFRIZof275MI82ciV2Mw8Hz7FPAUyoju5CvnjAEH4+irt1VE/7
+SgdvQ1gDBQFegS69ijdz+cOhFxkCgYEA5aVoseMy/gIlsCvNPyw9+Jz/zBpKItX0
+jGzdF7lhERRO2cursujKaoHntRckHcE3P/Z4K565bvVq+VaVG0T/BcBKPmPHrLmY
+iuVXidltW7Jh9/RCVwb5+BvqlwlC470PEwhqoUatY/fPJ74srztrqJHvp1L29FT5
+sdmlJW8YwokCgYAUa3dMgp5C0knKp5RY1KSSU5E11w4zKZgwiWob4lq1dAPWtHpO
+GCo63yyBHImoUJVP75gUw4Cpc4EEudo5tlkIVuHV8nroGVKOhd9/Rb5K47Hke4kk
+Brn5a0Ues9qPDF65Fw1ryPDFSwHufjXAAO5SpZZJF51UGDgiNvDedbBgMQKBgHSk
+t7DjPhtW69234eCckD2fQS5ijBV1p2lMQmCygGM0dXiawvN02puOsCqDPoz+fxm2
+DwPY80cw0M0k9UeMnBxHt25JMDrDan/iTbxu++T/jlNrdebOXFlxlI5y3c7fULDS
+LZcNVzTXwhjlt7yp6d0NgzTyJw2ju9BiREfnTiRBAoGBAOPHrTOnPyjO+bVcCPTB
+WGLsbBd77mVPGIuL0XGrvbVYPE8yIcNbZcthd8VXL/38Ygy8SIZh2ZqsrU1b5WFa
+XUMLnGEODSS8x/GmW3i3KeirW5OxBNjfUzEF4XkJP8m41iTdsQEXQf9DdUY7X+CB
+VL5h7N0VstYhGgycuPpcIUQa
+-----END PRIVATE KEY-----
diff --git a/contrib/python/google-auth/py2/tests/data/privatekey.p12 b/contrib/python/google-auth/py2/tests/data/privatekey.p12
new file mode 100644
index 0000000000..c369ecb6e6
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/privatekey.p12
Binary files differ
diff --git a/contrib/python/google-auth/py2/tests/data/privatekey.pem b/contrib/python/google-auth/py2/tests/data/privatekey.pem
new file mode 100644
index 0000000000..57443540ad
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/privatekey.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj
+7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/
+xmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs
+SliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18
+pe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk
+SBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk
+nQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq
+HD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y
+nHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9
+IisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2
+YCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU
+Z422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ
+vzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP
+B8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl
+aLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2
+eCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI
+aqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk
+klORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ
+CFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu
+UqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg
+soBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28
+bvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH
+504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL
+YXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx
+BeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/python/google-auth/py2/tests/data/privatekey.pub b/contrib/python/google-auth/py2/tests/data/privatekey.pub
new file mode 100644
index 0000000000..11fdaa42f0
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/privatekey.pub
@@ -0,0 +1,8 @@
+-----BEGIN RSA PUBLIC KEY-----
+MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+kdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/xmVU
+1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYsSliS
+5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18pe+z
+pyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xkSBc/
+/fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+-----END RSA PUBLIC KEY-----
diff --git a/contrib/python/google-auth/py2/tests/data/public_cert.pem b/contrib/python/google-auth/py2/tests/data/public_cert.pem
new file mode 100644
index 0000000000..7af6ca3f93
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/public_cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDIzCCAgugAwIBAgIJAMfISuBQ5m+5MA0GCSqGSIb3DQEBBQUAMBUxEzARBgNV
+BAMTCnVuaXQtdGVzdHMwHhcNMTExMjA2MTYyNjAyWhcNMjExMjAzMTYyNjAyWjAV
+MRMwEQYDVQQDEwp1bml0LXRlc3RzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZgkdmM
+7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/xmVU1Wer
+uQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYsSliS5qQp
+gyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18pe+zpyl4
++WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xkSBc//fy3
+ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABo3YwdDAdBgNVHQ4EFgQU2RQ8yO+O
+gN8oVW2SW7RLrfYd9jEwRQYDVR0jBD4wPIAU2RQ8yO+OgN8oVW2SW7RLrfYd9jGh
+GaQXMBUxEzARBgNVBAMTCnVuaXQtdGVzdHOCCQDHyErgUOZvuTAMBgNVHRMEBTAD
+AQH/MA0GCSqGSIb3DQEBBQUAA4IBAQBRv+M/6+FiVu7KXNjFI5pSN17OcW5QUtPr
+odJMlWrJBtynn/TA1oJlYu3yV5clc/71Vr/AxuX5xGP+IXL32YDF9lTUJXG/uUGk
++JETpKmQviPbRsvzYhz4pf6ZIOZMc3/GIcNq92ECbseGO+yAgyWUVKMmZM0HqXC9
+ovNslqe0M8C1sLm1zAR5z/h/litE7/8O2ietija3Q/qtl2TOXJdCA6sgjJX2WUql
+ybrC55ct18NKf3qhpcEkGQvFU40rVYApJpi98DiZPYFdx1oBDp/f4uZ3ojpxRVFT
+cDwcJLfNRCPUhormsY7fDS9xSyThiHsW9mjJYdcaKQkwYZ0F11yB
+-----END CERTIFICATE-----
diff --git a/contrib/python/google-auth/py2/tests/data/service_account.json b/contrib/python/google-auth/py2/tests/data/service_account.json
new file mode 100644
index 0000000000..9e76f4d355
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/data/service_account.json
@@ -0,0 +1,10 @@
+{
+ "type": "service_account",
+ "project_id": "example-project",
+ "private_key_id": "1",
+ "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj\n7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/\nxmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs\nSliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18\npe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk\nSBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk\nnQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq\nHD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y\nnHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9\nIisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2\nYCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU\nZ422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ\nvzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP\nB8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl\naLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2\neCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI\naqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk\nklORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ\nCFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu\nUqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg\nsoBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28\nbvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH\n504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL\nYXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx\nBeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==\n-----END RSA PRIVATE KEY-----\n",
+ "client_email": "service-account@example.com",
+ "client_id": "1234",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token"
+}
diff --git a/contrib/python/google-auth/py2/tests/oauth2/__init__.py b/contrib/python/google-auth/py2/tests/oauth2/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/__init__.py
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test__client.py b/contrib/python/google-auth/py2/tests/oauth2/test__client.py
new file mode 100644
index 0000000000..1dba2523e7
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test__client.py
@@ -0,0 +1,330 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+
+import mock
+import pytest
+import six
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import transport
+from google.oauth2 import _client
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+SIGNER = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+
+SCOPES_AS_LIST = [
+ "https://www.googleapis.com/auth/pubsub",
+ "https://www.googleapis.com/auth/logging.write",
+]
+SCOPES_AS_STRING = (
+ "https://www.googleapis.com/auth/pubsub"
+ " https://www.googleapis.com/auth/logging.write"
+)
+
+
+def test__handle_error_response():
+ response_data = {"error": "help", "error_description": "I'm alive"}
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client._handle_error_response(response_data)
+
+ assert excinfo.match(r"help: I\'m alive")
+
+
+def test__handle_error_response_non_json():
+ response_data = {"foo": "bar"}
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client._handle_error_response(response_data)
+
+ assert excinfo.match(r"{\"foo\": \"bar\"}")
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test__parse_expiry(unused_utcnow):
+ result = _client._parse_expiry({"expires_in": 500})
+ assert result == datetime.datetime.min + datetime.timedelta(seconds=500)
+
+
+def test__parse_expiry_none():
+ assert _client._parse_expiry({}) is None
+
+
+def make_request(response_data, status=http_client.OK):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = json.dumps(response_data).encode("utf-8")
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+ return request
+
+
+def test__token_endpoint_request():
+ request = make_request({"test": "response"})
+
+ result = _client._token_endpoint_request(
+ request, "http://example.com", {"test": "params"}
+ )
+
+ # Check request call
+ request.assert_called_with(
+ method="POST",
+ url="http://example.com",
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ body="test=params".encode("utf-8"),
+ )
+
+ # Check result
+ assert result == {"test": "response"}
+
+
+def test__token_endpoint_request_use_json():
+ request = make_request({"test": "response"})
+
+ result = _client._token_endpoint_request(
+ request,
+ "http://example.com",
+ {"test": "params"},
+ access_token="access_token",
+ use_json=True,
+ )
+
+ # Check request call
+ request.assert_called_with(
+ method="POST",
+ url="http://example.com",
+ headers={
+ "Content-Type": "application/json",
+ "Authorization": "Bearer access_token",
+ },
+ body=b'{"test": "params"}',
+ )
+
+ # Check result
+ assert result == {"test": "response"}
+
+
+def test__token_endpoint_request_error():
+ request = make_request({}, status=http_client.BAD_REQUEST)
+
+ with pytest.raises(exceptions.RefreshError):
+ _client._token_endpoint_request(request, "http://example.com", {})
+
+
+def test__token_endpoint_request_internal_failure_error():
+ request = make_request(
+ {"error_description": "internal_failure"}, status=http_client.BAD_REQUEST
+ )
+
+ with pytest.raises(exceptions.RefreshError):
+ _client._token_endpoint_request(
+ request, "http://example.com", {"error_description": "internal_failure"}
+ )
+
+ request = make_request(
+ {"error": "internal_failure"}, status=http_client.BAD_REQUEST
+ )
+
+ with pytest.raises(exceptions.RefreshError):
+ _client._token_endpoint_request(
+ request, "http://example.com", {"error": "internal_failure"}
+ )
+
+
+def verify_request_params(request, params):
+ request_body = request.call_args[1]["body"].decode("utf-8")
+ request_params = urllib.parse.parse_qs(request_body)
+
+ for key, value in six.iteritems(params):
+ assert request_params[key][0] == value
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_jwt_grant(utcnow):
+ request = make_request(
+ {"access_token": "token", "expires_in": 500, "extra": "data"}
+ )
+
+ token, expiry, extra_data = _client.jwt_grant(
+ request, "http://example.com", "assertion_value"
+ )
+
+ # Check request call
+ verify_request_params(
+ request, {"grant_type": _client._JWT_GRANT_TYPE, "assertion": "assertion_value"}
+ )
+
+ # Check result
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=500)
+ assert extra_data["extra"] == "data"
+
+
+def test_jwt_grant_no_access_token():
+ request = make_request(
+ {
+ # No access token.
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ with pytest.raises(exceptions.RefreshError):
+ _client.jwt_grant(request, "http://example.com", "assertion_value")
+
+
+def test_id_token_jwt_grant():
+ now = _helpers.utcnow()
+ id_token_expiry = _helpers.datetime_to_secs(now)
+ id_token = jwt.encode(SIGNER, {"exp": id_token_expiry}).decode("utf-8")
+ request = make_request({"id_token": id_token, "extra": "data"})
+
+ token, expiry, extra_data = _client.id_token_jwt_grant(
+ request, "http://example.com", "assertion_value"
+ )
+
+ # Check request call
+ verify_request_params(
+ request, {"grant_type": _client._JWT_GRANT_TYPE, "assertion": "assertion_value"}
+ )
+
+ # Check result
+ assert token == id_token
+ # JWT does not store microseconds
+ now = now.replace(microsecond=0)
+ assert expiry == now
+ assert extra_data["extra"] == "data"
+
+
+def test_id_token_jwt_grant_no_access_token():
+ request = make_request(
+ {
+ # No access token.
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ with pytest.raises(exceptions.RefreshError):
+ _client.id_token_jwt_grant(request, "http://example.com", "assertion_value")
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_refresh_grant(unused_utcnow):
+ request = make_request(
+ {
+ "access_token": "token",
+ "refresh_token": "new_refresh_token",
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ token, refresh_token, expiry, extra_data = _client.refresh_grant(
+ request,
+ "http://example.com",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ rapt_token="rapt_token",
+ )
+
+ # Check request call
+ verify_request_params(
+ request,
+ {
+ "grant_type": _client._REFRESH_GRANT_TYPE,
+ "refresh_token": "refresh_token",
+ "client_id": "client_id",
+ "client_secret": "client_secret",
+ "rapt": "rapt_token",
+ },
+ )
+
+ # Check result
+ assert token == "token"
+ assert refresh_token == "new_refresh_token"
+ assert expiry == datetime.datetime.min + datetime.timedelta(seconds=500)
+ assert extra_data["extra"] == "data"
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_refresh_grant_with_scopes(unused_utcnow):
+ request = make_request(
+ {
+ "access_token": "token",
+ "refresh_token": "new_refresh_token",
+ "expires_in": 500,
+ "extra": "data",
+ "scope": SCOPES_AS_STRING,
+ }
+ )
+
+ token, refresh_token, expiry, extra_data = _client.refresh_grant(
+ request,
+ "http://example.com",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ SCOPES_AS_LIST,
+ )
+
+ # Check request call.
+ verify_request_params(
+ request,
+ {
+ "grant_type": _client._REFRESH_GRANT_TYPE,
+ "refresh_token": "refresh_token",
+ "client_id": "client_id",
+ "client_secret": "client_secret",
+ "scope": SCOPES_AS_STRING,
+ },
+ )
+
+ # Check result.
+ assert token == "token"
+ assert refresh_token == "new_refresh_token"
+ assert expiry == datetime.datetime.min + datetime.timedelta(seconds=500)
+ assert extra_data["extra"] == "data"
+
+
+def test_refresh_grant_no_access_token():
+ request = make_request(
+ {
+ # No access token.
+ "refresh_token": "new_refresh_token",
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ with pytest.raises(exceptions.RefreshError):
+ _client.refresh_grant(
+ request, "http://example.com", "refresh_token", "client_id", "client_secret"
+ )
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test_challenges.py b/contrib/python/google-auth/py2/tests/oauth2/test_challenges.py
new file mode 100644
index 0000000000..019b908dae
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test_challenges.py
@@ -0,0 +1,132 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for the reauth module."""
+
+import base64
+import sys
+
+import mock
+import pytest
+import pyu2f
+
+from google.auth import exceptions
+from google.oauth2 import challenges
+
+
+def test_get_user_password():
+ with mock.patch("getpass.getpass", return_value="foo"):
+ assert challenges.get_user_password("") == "foo"
+
+
+def test_security_key():
+ metadata = {
+ "status": "READY",
+ "challengeId": 2,
+ "challengeType": "SECURITY_KEY",
+ "securityKey": {
+ "applicationId": "security_key_application_id",
+ "challenges": [
+ {
+ "keyHandle": "some_key",
+ "challenge": base64.urlsafe_b64encode(
+ "some_challenge".encode("ascii")
+ ).decode("ascii"),
+ }
+ ],
+ },
+ }
+ mock_key = mock.Mock()
+
+ challenge = challenges.SecurityKeyChallenge()
+
+ # Test the case that security key challenge is passed.
+ with mock.patch("pyu2f.model.RegisteredKey", return_value=mock_key):
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.return_value = "security key response"
+ assert challenge.name == "SECURITY_KEY"
+ assert challenge.is_locally_eligible
+ assert challenge.obtain_challenge_input(metadata) == {
+ "securityKey": "security key response"
+ }
+ mock_authenticate.assert_called_with(
+ "security_key_application_id",
+ [{"key": mock_key, "challenge": b"some_challenge"}],
+ print_callback=sys.stderr.write,
+ )
+
+ # Test various types of exceptions.
+ with mock.patch("pyu2f.model.RegisteredKey", return_value=mock_key):
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.U2FError(
+ pyu2f.errors.U2FError.DEVICE_INELIGIBLE
+ )
+ assert challenge.obtain_challenge_input(metadata) is None
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.U2FError(
+ pyu2f.errors.U2FError.TIMEOUT
+ )
+ assert challenge.obtain_challenge_input(metadata) is None
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.U2FError(
+ pyu2f.errors.U2FError.BAD_REQUEST
+ )
+ with pytest.raises(pyu2f.errors.U2FError):
+ challenge.obtain_challenge_input(metadata)
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.NoDeviceFoundError()
+ assert challenge.obtain_challenge_input(metadata) is None
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.UnsupportedVersionException()
+ with pytest.raises(pyu2f.errors.UnsupportedVersionException):
+ challenge.obtain_challenge_input(metadata)
+
+ with mock.patch.dict("sys.modules"):
+ sys.modules["pyu2f"] = None
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ challenge.obtain_challenge_input(metadata)
+ assert excinfo.match(r"pyu2f dependency is required")
+
+
+@mock.patch("getpass.getpass", return_value="foo")
+def test_password_challenge(getpass_mock):
+ challenge = challenges.PasswordChallenge()
+
+ with mock.patch("getpass.getpass", return_value="foo"):
+ assert challenge.is_locally_eligible
+ assert challenge.name == "PASSWORD"
+ assert challenges.PasswordChallenge().obtain_challenge_input({}) == {
+ "credential": "foo"
+ }
+
+ with mock.patch("getpass.getpass", return_value=None):
+ assert challenges.PasswordChallenge().obtain_challenge_input({}) == {
+ "credential": " "
+ }
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test_credentials.py b/contrib/python/google-auth/py2/tests/oauth2/test_credentials.py
new file mode 100644
index 0000000000..5c21ebe547
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test_credentials.py
@@ -0,0 +1,876 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+import pickle
+import sys
+
+import mock
+import pytest
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import credentials
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+AUTH_USER_JSON_FILE = os.path.join(DATA_DIR, "authorized_user.json")
+
+with open(AUTH_USER_JSON_FILE, "r") as fh:
+ AUTH_USER_INFO = json.load(fh)
+
+
+class TestCredentials(object):
+ TOKEN_URI = "https://example.com/oauth2/token"
+ REFRESH_TOKEN = "refresh_token"
+ RAPT_TOKEN = "rapt_token"
+ CLIENT_ID = "client_id"
+ CLIENT_SECRET = "client_secret"
+
+ @classmethod
+ def make_credentials(cls):
+ return credentials.Credentials(
+ token=None,
+ refresh_token=cls.REFRESH_TOKEN,
+ token_uri=cls.TOKEN_URI,
+ client_id=cls.CLIENT_ID,
+ client_secret=cls.CLIENT_SECRET,
+ rapt_token=cls.RAPT_TOKEN,
+ )
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+ # Scopes aren't required for these credentials
+ assert not credentials.requires_scopes
+ # Test properties
+ assert credentials.refresh_token == self.REFRESH_TOKEN
+ assert credentials.token_uri == self.TOKEN_URI
+ assert credentials.client_id == self.CLIENT_ID
+ assert credentials.client_secret == self.CLIENT_SECRET
+ assert credentials.rapt_token == self.RAPT_TOKEN
+ assert credentials.refresh_handler is None
+
+ def test_refresh_handler_setter_and_getter(self):
+ scopes = ["email", "profile"]
+ original_refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN_1", None))
+ updated_refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN_2", None))
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=None,
+ refresh_handler=original_refresh_handler,
+ )
+
+ assert creds.refresh_handler is original_refresh_handler
+
+ creds.refresh_handler = updated_refresh_handler
+
+ assert creds.refresh_handler is updated_refresh_handler
+
+ creds.refresh_handler = None
+
+ assert creds.refresh_handler is None
+
+ def test_invalid_refresh_handler(self):
+ scopes = ["email", "profile"]
+ with pytest.raises(TypeError) as excinfo:
+ credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=None,
+ refresh_handler=object(),
+ )
+
+ assert excinfo.match("The provided refresh_handler is not a callable or None.")
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ def test_refresh_success(self, unused_utcnow, refresh_grant):
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt_token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ credentials = self.make_credentials()
+
+ # Refresh credentials
+ credentials.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ None,
+ self.RAPT_TOKEN,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert credentials.token == token
+ assert credentials.expiry == expiry
+ assert credentials.id_token == mock.sentinel.id_token
+ assert credentials.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert credentials.valid
+
+ def test_refresh_no_refresh_token(self):
+ request = mock.create_autospec(transport.Request)
+ credentials_ = credentials.Credentials(token=None, refresh_token=None)
+
+ with pytest.raises(exceptions.RefreshError, match="necessary fields"):
+ credentials_.refresh(request)
+
+ request.assert_not_called()
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ def test_refresh_with_refresh_token_and_refresh_handler(
+ self, unused_utcnow, refresh_grant
+ ):
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt_token
+ new_rapt_token,
+ )
+
+ refresh_handler = mock.Mock()
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ rapt_token=self.RAPT_TOKEN,
+ refresh_handler=refresh_handler,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ None,
+ self.RAPT_TOKEN,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert creds.valid
+
+ # Assert refresh handler not called as the refresh token has
+ # higher priority.
+ refresh_handler.assert_not_called()
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_success_scopes(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + datetime.timedelta(seconds=2800)
+ refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN", expected_expiry))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ creds.refresh(request)
+
+ assert creds.token == "ACCESS_TOKEN"
+ assert creds.expiry == expected_expiry
+ assert creds.valid
+ assert not creds.expired
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_success_default_scopes(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + datetime.timedelta(seconds=2800)
+ original_refresh_handler = mock.Mock(
+ return_value=("UNUSED_TOKEN", expected_expiry)
+ )
+ refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN", expected_expiry))
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=None,
+ default_scopes=default_scopes,
+ refresh_handler=original_refresh_handler,
+ )
+
+ # Test newly set refresh_handler is used instead of the original one.
+ creds.refresh_handler = refresh_handler
+ creds.refresh(request)
+
+ assert creds.token == "ACCESS_TOKEN"
+ assert creds.expiry == expected_expiry
+ assert creds.valid
+ assert not creds.expired
+ # default_scopes should be used since no developer provided scopes
+ # are provided.
+ refresh_handler.assert_called_with(request, scopes=default_scopes)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_invalid_token(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + datetime.timedelta(seconds=2800)
+ # Simulate refresh handler does not return a valid token.
+ refresh_handler = mock.Mock(return_value=(None, expected_expiry))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ with pytest.raises(
+ exceptions.RefreshError, match="returned token is not a string"
+ ):
+ creds.refresh(request)
+
+ assert creds.token is None
+ assert creds.expiry is None
+ assert not creds.valid
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ def test_refresh_with_refresh_handler_invalid_expiry(self):
+ # Simulate refresh handler returns expiration time in an invalid unit.
+ refresh_handler = mock.Mock(return_value=("TOKEN", 2800))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ with pytest.raises(
+ exceptions.RefreshError, match="returned expiry is not a datetime object"
+ ):
+ creds.refresh(request)
+
+ assert creds.token is None
+ assert creds.expiry is None
+ assert not creds.valid
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_expired_token(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + _helpers.CLOCK_SKEW
+ # Simulate refresh handler returns an expired token.
+ refresh_handler = mock.Mock(return_value=("TOKEN", expected_expiry))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ with pytest.raises(exceptions.RefreshError, match="already expired"):
+ creds.refresh(request)
+
+ assert creds.token is None
+ assert creds.expiry is None
+ assert not creds.valid
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ def test_credentials_with_scopes_requested_refresh_success(
+ self, unused_utcnow, refresh_grant
+ ):
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token, "scope": "email profile"}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ rapt_token=self.RAPT_TOKEN,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ scopes,
+ self.RAPT_TOKEN,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(scopes)
+ assert creds.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ def test_credentials_with_only_default_scopes_requested(
+ self, unused_utcnow, refresh_grant
+ ):
+ default_scopes = ["email", "profile"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ default_scopes=default_scopes,
+ rapt_token=self.RAPT_TOKEN,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ default_scopes,
+ self.RAPT_TOKEN,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(default_scopes)
+ assert creds.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ def test_credentials_with_scopes_returned_refresh_success(
+ self, unused_utcnow, refresh_grant
+ ):
+ scopes = ["email", "profile"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {
+ "id_token": mock.sentinel.id_token,
+ "scopes": " ".join(scopes),
+ }
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ scopes=scopes,
+ rapt_token=self.RAPT_TOKEN,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ scopes,
+ self.RAPT_TOKEN,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(scopes)
+ assert creds.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.CLOCK_SKEW,
+ )
+ def test_credentials_with_scopes_refresh_failure_raises_refresh_error(
+ self, unused_utcnow, refresh_grant
+ ):
+ scopes = ["email", "profile"]
+ scopes_returned = ["email"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {
+ "id_token": mock.sentinel.id_token,
+ "scope": " ".join(scopes_returned),
+ }
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ scopes=scopes,
+ rapt_token=self.RAPT_TOKEN,
+ )
+
+ # Refresh credentials
+ with pytest.raises(
+ exceptions.RefreshError, match="Not all requested scopes were granted"
+ ):
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ scopes,
+ self.RAPT_TOKEN,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(scopes)
+ assert creds.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ def test_apply_with_quota_project_id(self):
+ creds = credentials.Credentials(
+ token="token",
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ quota_project_id="quota-project-123",
+ )
+
+ headers = {}
+ creds.apply(headers)
+ assert headers["x-goog-user-project"] == "quota-project-123"
+ assert "token" in headers["authorization"]
+
+ def test_apply_with_no_quota_project_id(self):
+ creds = credentials.Credentials(
+ token="token",
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ )
+
+ headers = {}
+ creds.apply(headers)
+ assert "x-goog-user-project" not in headers
+ assert "token" in headers["authorization"]
+
+ def test_with_quota_project(self):
+ creds = credentials.Credentials(
+ token="token",
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ quota_project_id="quota-project-123",
+ )
+
+ new_creds = creds.with_quota_project("new-project-456")
+ assert new_creds.quota_project_id == "new-project-456"
+ headers = {}
+ creds.apply(headers)
+ assert "x-goog-user-project" in headers
+
+ def test_from_authorized_user_info(self):
+ info = AUTH_USER_INFO.copy()
+
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes is None
+
+ scopes = ["email", "profile"]
+ creds = credentials.Credentials.from_authorized_user_info(info, scopes)
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes == scopes
+
+ info["scopes"] = "email" # single non-array scope from file
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.scopes == [info["scopes"]]
+
+ info["scopes"] = ["email", "profile"] # array scope from file
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.scopes == info["scopes"]
+
+ expiry = datetime.datetime(2020, 8, 14, 15, 54, 1)
+ info["expiry"] = expiry.isoformat() + "Z"
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.expiry == expiry
+ assert creds.expired
+
+ def test_from_authorized_user_file(self):
+ info = AUTH_USER_INFO.copy()
+
+ creds = credentials.Credentials.from_authorized_user_file(AUTH_USER_JSON_FILE)
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes is None
+
+ scopes = ["email", "profile"]
+ creds = credentials.Credentials.from_authorized_user_file(
+ AUTH_USER_JSON_FILE, scopes
+ )
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes == scopes
+
+ def test_to_json(self):
+ info = AUTH_USER_INFO.copy()
+ expiry = datetime.datetime(2020, 8, 14, 15, 54, 1)
+ info["expiry"] = expiry.isoformat() + "Z"
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.expiry == expiry
+
+ # Test with no `strip` arg
+ json_output = creds.to_json()
+ json_asdict = json.loads(json_output)
+ assert json_asdict.get("token") == creds.token
+ assert json_asdict.get("refresh_token") == creds.refresh_token
+ assert json_asdict.get("token_uri") == creds.token_uri
+ assert json_asdict.get("client_id") == creds.client_id
+ assert json_asdict.get("scopes") == creds.scopes
+ assert json_asdict.get("client_secret") == creds.client_secret
+ assert json_asdict.get("expiry") == info["expiry"]
+
+ # Test with a `strip` arg
+ json_output = creds.to_json(strip=["client_secret"])
+ json_asdict = json.loads(json_output)
+ assert json_asdict.get("token") == creds.token
+ assert json_asdict.get("refresh_token") == creds.refresh_token
+ assert json_asdict.get("token_uri") == creds.token_uri
+ assert json_asdict.get("client_id") == creds.client_id
+ assert json_asdict.get("scopes") == creds.scopes
+ assert json_asdict.get("client_secret") is None
+
+ # Test with no expiry
+ creds.expiry = None
+ json_output = creds.to_json()
+ json_asdict = json.loads(json_output)
+ assert json_asdict.get("expiry") is None
+
+ def test_pickle_and_unpickle(self):
+ creds = self.make_credentials()
+ unpickled = pickle.loads(pickle.dumps(creds))
+
+ # make sure attributes aren't lost during pickling
+ assert list(creds.__dict__).sort() == list(unpickled.__dict__).sort()
+
+ for attr in list(creds.__dict__):
+ assert getattr(creds, attr) == getattr(unpickled, attr)
+
+ def test_pickle_and_unpickle_with_refresh_handler(self):
+ expected_expiry = _helpers.utcnow() + datetime.timedelta(seconds=2800)
+ refresh_handler = mock.Mock(return_value=("TOKEN", expected_expiry))
+
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ refresh_handler=refresh_handler,
+ )
+ unpickled = pickle.loads(pickle.dumps(creds))
+
+ # make sure attributes aren't lost during pickling
+ assert list(creds.__dict__).sort() == list(unpickled.__dict__).sort()
+
+ for attr in list(creds.__dict__):
+ # For the _refresh_handler property, the unpickled creds should be
+ # set to None.
+ if attr == "_refresh_handler":
+ assert getattr(unpickled, attr) is None
+ else:
+ assert getattr(creds, attr) == getattr(unpickled, attr)
+
+ def test_pickle_with_missing_attribute(self):
+ creds = self.make_credentials()
+
+ # remove an optional attribute before pickling
+ # this mimics a pickle created with a previous class definition with
+ # fewer attributes
+ del creds.__dict__["_quota_project_id"]
+
+ unpickled = pickle.loads(pickle.dumps(creds))
+
+ # Attribute should be initialized by `__setstate__`
+ assert unpickled.quota_project_id is None
+
+ # pickles are not compatible across versions
+ @pytest.mark.skipif(
+ sys.version_info < (3, 5),
+ reason="pickle file can only be loaded with Python >= 3.5",
+ )
+ def test_unpickle_old_credentials_pickle(self):
+ # make sure a credentials file pickled with an older
+ # library version (google-auth==1.5.1) can be unpickled
+ with open(
+ os.path.join(DATA_DIR, "old_oauth_credentials_py3.pickle"), "rb"
+ ) as f:
+ credentials = pickle.load(f)
+ assert credentials.quota_project_id is None
+
+
+class TestUserAccessTokenCredentials(object):
+ def test_instance(self):
+ cred = credentials.UserAccessTokenCredentials()
+ assert cred._account is None
+
+ cred = cred.with_account("account")
+ assert cred._account == "account"
+
+ @mock.patch("google.auth._cloud_sdk.get_auth_access_token", autospec=True)
+ def test_refresh(self, get_auth_access_token):
+ get_auth_access_token.return_value = "access_token"
+ cred = credentials.UserAccessTokenCredentials()
+ cred.refresh(None)
+ assert cred.token == "access_token"
+
+ def test_with_quota_project(self):
+ cred = credentials.UserAccessTokenCredentials()
+ quota_project_cred = cred.with_quota_project("project-foo")
+
+ assert quota_project_cred._quota_project_id == "project-foo"
+ assert quota_project_cred._account == cred._account
+
+ @mock.patch(
+ "google.oauth2.credentials.UserAccessTokenCredentials.apply", autospec=True
+ )
+ @mock.patch(
+ "google.oauth2.credentials.UserAccessTokenCredentials.refresh", autospec=True
+ )
+ def test_before_request(self, refresh, apply):
+ cred = credentials.UserAccessTokenCredentials()
+ cred.before_request(mock.Mock(), "GET", "https://example.com", {})
+ refresh.assert_called()
+ apply.assert_called()
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test_id_token.py b/contrib/python/google-auth/py2/tests/oauth2/test_id_token.py
new file mode 100644
index 0000000000..9576f2562a
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test_id_token.py
@@ -0,0 +1,228 @@
+# Copyright 2014 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import mock
+import pytest
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+import google.auth.compute_engine._metadata
+from google.oauth2 import id_token
+from google.oauth2 import service_account
+
+import yatest.common
+SERVICE_ACCOUNT_FILE = os.path.join(
+ yatest.common.test_source_path(), "data/service_account.json"
+)
+
+
+def make_request(status, data=None):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+
+ if data is not None:
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+ return request
+
+
+def test__fetch_certs_success():
+ certs = {"1": "cert"}
+ request = make_request(200, certs)
+
+ returned_certs = id_token._fetch_certs(request, mock.sentinel.cert_url)
+
+ request.assert_called_once_with(mock.sentinel.cert_url, method="GET")
+ assert returned_certs == certs
+
+
+def test__fetch_certs_failure():
+ request = make_request(404)
+
+ with pytest.raises(exceptions.TransportError):
+ id_token._fetch_certs(request, mock.sentinel.cert_url)
+
+ request.assert_called_once_with(mock.sentinel.cert_url, method="GET")
+
+
+@mock.patch("google.auth.jwt.decode", autospec=True)
+@mock.patch("google.oauth2.id_token._fetch_certs", autospec=True)
+def test_verify_token(_fetch_certs, decode):
+ result = id_token.verify_token(mock.sentinel.token, mock.sentinel.request)
+
+ assert result == decode.return_value
+ _fetch_certs.assert_called_once_with(
+ mock.sentinel.request, id_token._GOOGLE_OAUTH2_CERTS_URL
+ )
+ decode.assert_called_once_with(
+ mock.sentinel.token, certs=_fetch_certs.return_value, audience=None
+ )
+
+
+@mock.patch("google.auth.jwt.decode", autospec=True)
+@mock.patch("google.oauth2.id_token._fetch_certs", autospec=True)
+def test_verify_token_args(_fetch_certs, decode):
+ result = id_token.verify_token(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=mock.sentinel.certs_url,
+ )
+
+ assert result == decode.return_value
+ _fetch_certs.assert_called_once_with(mock.sentinel.request, mock.sentinel.certs_url)
+ decode.assert_called_once_with(
+ mock.sentinel.token,
+ certs=_fetch_certs.return_value,
+ audience=mock.sentinel.audience,
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_oauth2_token(verify_token):
+ verify_token.return_value = {"iss": "accounts.google.com"}
+ result = id_token.verify_oauth2_token(
+ mock.sentinel.token, mock.sentinel.request, audience=mock.sentinel.audience
+ )
+
+ assert result == verify_token.return_value
+ verify_token.assert_called_once_with(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=id_token._GOOGLE_OAUTH2_CERTS_URL,
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_oauth2_token_invalid_iss(verify_token):
+ verify_token.return_value = {"iss": "invalid_issuer"}
+
+ with pytest.raises(exceptions.GoogleAuthError):
+ id_token.verify_oauth2_token(
+ mock.sentinel.token, mock.sentinel.request, audience=mock.sentinel.audience
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_firebase_token(verify_token):
+ result = id_token.verify_firebase_token(
+ mock.sentinel.token, mock.sentinel.request, audience=mock.sentinel.audience
+ )
+
+ assert result == verify_token.return_value
+ verify_token.assert_called_once_with(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=id_token._GOOGLE_APIS_CERTS_URL,
+ )
+
+
+def test_fetch_id_token_from_metadata_server(monkeypatch):
+ monkeypatch.delenv(environment_vars.CREDENTIALS, raising=False)
+
+ def mock_init(self, request, audience, use_metadata_identity_endpoint):
+ assert use_metadata_identity_endpoint
+ self.token = "id_token"
+
+ with mock.patch("google.auth.compute_engine._metadata.ping", return_value=True):
+ with mock.patch.multiple(
+ google.auth.compute_engine.IDTokenCredentials,
+ __init__=mock_init,
+ refresh=mock.Mock(),
+ ):
+ request = mock.Mock()
+ token = id_token.fetch_id_token(request, "https://pubsub.googleapis.com")
+ assert token == "id_token"
+
+
+def test_fetch_id_token_from_explicit_cred_json_file(monkeypatch):
+ monkeypatch.setenv(environment_vars.CREDENTIALS, SERVICE_ACCOUNT_FILE)
+
+ def mock_refresh(self, request):
+ self.token = "id_token"
+
+ with mock.patch.object(service_account.IDTokenCredentials, "refresh", mock_refresh):
+ request = mock.Mock()
+ token = id_token.fetch_id_token(request, "https://pubsub.googleapis.com")
+ assert token == "id_token"
+
+
+def test_fetch_id_token_no_cred_exists(monkeypatch):
+ monkeypatch.delenv(environment_vars.CREDENTIALS, raising=False)
+
+ with mock.patch(
+ "google.auth.compute_engine._metadata.ping",
+ side_effect=exceptions.TransportError(),
+ ):
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ request = mock.Mock()
+ id_token.fetch_id_token(request, "https://pubsub.googleapis.com")
+ assert excinfo.match(
+ r"Neither metadata server or valid service account credentials are found."
+ )
+
+ with mock.patch("google.auth.compute_engine._metadata.ping", return_value=False):
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ request = mock.Mock()
+ id_token.fetch_id_token(request, "https://pubsub.googleapis.com")
+ assert excinfo.match(
+ r"Neither metadata server or valid service account credentials are found."
+ )
+
+
+def test_fetch_id_token_invalid_cred_file_type(monkeypatch):
+ user_credentials_file = os.path.join(
+ yatest.common.test_source_path(), "data/authorized_user.json"
+ )
+ monkeypatch.setenv(environment_vars.CREDENTIALS, user_credentials_file)
+
+ with mock.patch("google.auth.compute_engine._metadata.ping", return_value=False):
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ request = mock.Mock()
+ id_token.fetch_id_token(request, "https://pubsub.googleapis.com")
+ assert excinfo.match(
+ r"Neither metadata server or valid service account credentials are found."
+ )
+
+
+def test_fetch_id_token_invalid_json(monkeypatch):
+ not_json_file = os.path.join(yatest.common.test_source_path(), "data/public_cert.pem")
+ monkeypatch.setenv(environment_vars.CREDENTIALS, not_json_file)
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ request = mock.Mock()
+ id_token.fetch_id_token(request, "https://pubsub.googleapis.com")
+ assert excinfo.match(
+ r"GOOGLE_APPLICATION_CREDENTIALS is not valid service account credentials."
+ )
+
+
+def test_fetch_id_token_invalid_cred_path(monkeypatch):
+ not_json_file = os.path.join(yatest.common.test_source_path(), "data/not_exists.json")
+ monkeypatch.setenv(environment_vars.CREDENTIALS, not_json_file)
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ request = mock.Mock()
+ id_token.fetch_id_token(request, "https://pubsub.googleapis.com")
+ assert excinfo.match(
+ r"GOOGLE_APPLICATION_CREDENTIALS path is either not found or invalid."
+ )
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test_reauth.py b/contrib/python/google-auth/py2/tests/oauth2/test_reauth.py
new file mode 100644
index 0000000000..7876986873
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test_reauth.py
@@ -0,0 +1,308 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+import mock
+import pytest
+
+from google.auth import exceptions
+from google.oauth2 import reauth
+
+
+MOCK_REQUEST = mock.Mock()
+CHALLENGES_RESPONSE_TEMPLATE = {
+ "status": "CHALLENGE_REQUIRED",
+ "sessionId": "123",
+ "challenges": [
+ {
+ "status": "READY",
+ "challengeId": 1,
+ "challengeType": "PASSWORD",
+ "securityKey": {},
+ }
+ ],
+}
+CHALLENGES_RESPONSE_AUTHENTICATED = {
+ "status": "AUTHENTICATED",
+ "sessionId": "123",
+ "encodedProofOfReauthToken": "new_rapt_token",
+}
+
+
+class MockChallenge(object):
+ def __init__(self, name, locally_eligible, challenge_input):
+ self.name = name
+ self.is_locally_eligible = locally_eligible
+ self.challenge_input = challenge_input
+
+ def obtain_challenge_input(self, metadata):
+ return self.challenge_input
+
+
+def _test_is_interactive():
+ with mock.patch("sys.stdin.isatty", return_value=True):
+ assert reauth.is_interactive()
+
+
+def test__get_challenges():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request"
+ ) as mock_token_endpoint_request:
+ reauth._get_challenges(MOCK_REQUEST, ["SAML"], "token")
+ mock_token_endpoint_request.assert_called_with(
+ MOCK_REQUEST,
+ reauth._REAUTH_API + ":start",
+ {"supportedChallengeTypes": ["SAML"]},
+ access_token="token",
+ use_json=True,
+ )
+
+
+def test__get_challenges_with_scopes():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request"
+ ) as mock_token_endpoint_request:
+ reauth._get_challenges(
+ MOCK_REQUEST, ["SAML"], "token", requested_scopes=["scope"]
+ )
+ mock_token_endpoint_request.assert_called_with(
+ MOCK_REQUEST,
+ reauth._REAUTH_API + ":start",
+ {
+ "supportedChallengeTypes": ["SAML"],
+ "oauthScopesForDomainPolicyLookup": ["scope"],
+ },
+ access_token="token",
+ use_json=True,
+ )
+
+
+def test__send_challenge_result():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request"
+ ) as mock_token_endpoint_request:
+ reauth._send_challenge_result(
+ MOCK_REQUEST, "123", "1", {"credential": "password"}, "token"
+ )
+ mock_token_endpoint_request.assert_called_with(
+ MOCK_REQUEST,
+ reauth._REAUTH_API + "/123:continue",
+ {
+ "sessionId": "123",
+ "challengeId": "1",
+ "action": "RESPOND",
+ "proposalResponse": {"credential": "password"},
+ },
+ access_token="token",
+ use_json=True,
+ )
+
+
+def test__run_next_challenge_not_ready():
+ challenges_response = copy.deepcopy(CHALLENGES_RESPONSE_TEMPLATE)
+ challenges_response["challenges"][0]["status"] = "STATUS_UNSPECIFIED"
+ assert (
+ reauth._run_next_challenge(challenges_response, MOCK_REQUEST, "token") is None
+ )
+
+
+def test__run_next_challenge_not_supported():
+ challenges_response = copy.deepcopy(CHALLENGES_RESPONSE_TEMPLATE)
+ challenges_response["challenges"][0]["challengeType"] = "CHALLENGE_TYPE_UNSPECIFIED"
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._run_next_challenge(challenges_response, MOCK_REQUEST, "token")
+ assert excinfo.match(r"Unsupported challenge type CHALLENGE_TYPE_UNSPECIFIED")
+
+
+def test__run_next_challenge_not_locally_eligible():
+ mock_challenge = MockChallenge("PASSWORD", False, "challenge_input")
+ with mock.patch(
+ "google.oauth2.challenges.AVAILABLE_CHALLENGES", {"PASSWORD": mock_challenge}
+ ):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._run_next_challenge(
+ CHALLENGES_RESPONSE_TEMPLATE, MOCK_REQUEST, "token"
+ )
+ assert excinfo.match(r"Challenge PASSWORD is not locally eligible")
+
+
+def test__run_next_challenge_no_challenge_input():
+ mock_challenge = MockChallenge("PASSWORD", True, None)
+ with mock.patch(
+ "google.oauth2.challenges.AVAILABLE_CHALLENGES", {"PASSWORD": mock_challenge}
+ ):
+ assert (
+ reauth._run_next_challenge(
+ CHALLENGES_RESPONSE_TEMPLATE, MOCK_REQUEST, "token"
+ )
+ is None
+ )
+
+
+def test__run_next_challenge_success():
+ mock_challenge = MockChallenge("PASSWORD", True, {"credential": "password"})
+ with mock.patch(
+ "google.oauth2.challenges.AVAILABLE_CHALLENGES", {"PASSWORD": mock_challenge}
+ ):
+ with mock.patch(
+ "google.oauth2.reauth._send_challenge_result"
+ ) as mock_send_challenge_result:
+ reauth._run_next_challenge(
+ CHALLENGES_RESPONSE_TEMPLATE, MOCK_REQUEST, "token"
+ )
+ mock_send_challenge_result.assert_called_with(
+ MOCK_REQUEST, "123", 1, {"credential": "password"}, "token"
+ )
+
+
+def test__obtain_rapt_authenticated():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_AUTHENTICATED,
+ ):
+ assert reauth._obtain_rapt(MOCK_REQUEST, "token", None) == "new_rapt_token"
+
+
+def test__obtain_rapt_authenticated_after_run_next_challenge():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_TEMPLATE,
+ ):
+ with mock.patch(
+ "google.oauth2.reauth._run_next_challenge",
+ side_effect=[
+ CHALLENGES_RESPONSE_TEMPLATE,
+ CHALLENGES_RESPONSE_AUTHENTICATED,
+ ],
+ ):
+ with mock.patch("google.oauth2.reauth.is_interactive", return_value=True):
+ assert (
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None) == "new_rapt_token"
+ )
+
+
+def test__obtain_rapt_unsupported_status():
+ challenges_response = copy.deepcopy(CHALLENGES_RESPONSE_TEMPLATE)
+ challenges_response["status"] = "STATUS_UNSPECIFIED"
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges", return_value=challenges_response
+ ):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None)
+ assert excinfo.match(r"API error: STATUS_UNSPECIFIED")
+
+
+def test__obtain_rapt_not_interactive():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_TEMPLATE,
+ ):
+ with mock.patch("google.oauth2.reauth.is_interactive", return_value=False):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None)
+ assert excinfo.match(r"not in an interactive session")
+
+
+def test__obtain_rapt_not_authenticated():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_TEMPLATE,
+ ):
+ with mock.patch("google.oauth2.reauth.RUN_CHALLENGE_RETRY_LIMIT", 0):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None)
+ assert excinfo.match(r"Reauthentication failed")
+
+
+def test_get_rapt_token():
+ with mock.patch(
+ "google.oauth2._client.refresh_grant", return_value=("token", None, None, None)
+ ) as mock_refresh_grant:
+ with mock.patch(
+ "google.oauth2.reauth._obtain_rapt", return_value="new_rapt_token"
+ ) as mock_obtain_rapt:
+ assert (
+ reauth.get_rapt_token(
+ MOCK_REQUEST,
+ "client_id",
+ "client_secret",
+ "refresh_token",
+ "token_uri",
+ )
+ == "new_rapt_token"
+ )
+ mock_refresh_grant.assert_called_with(
+ request=MOCK_REQUEST,
+ client_id="client_id",
+ client_secret="client_secret",
+ refresh_token="refresh_token",
+ token_uri="token_uri",
+ scopes=[reauth._REAUTH_SCOPE],
+ )
+ mock_obtain_rapt.assert_called_with(
+ MOCK_REQUEST, "token", requested_scopes=None
+ )
+
+
+def test_refresh_grant_failed():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request_no_throw"
+ ) as mock_token_request:
+ mock_token_request.return_value = (False, {"error": "Bad request"})
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ reauth.refresh_grant(
+ MOCK_REQUEST,
+ "token_uri",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ scopes=["foo", "bar"],
+ rapt_token="rapt_token",
+ )
+ assert excinfo.match(r"Bad request")
+ mock_token_request.assert_called_with(
+ MOCK_REQUEST,
+ "token_uri",
+ {
+ "grant_type": "refresh_token",
+ "client_id": "client_id",
+ "client_secret": "client_secret",
+ "refresh_token": "refresh_token",
+ "scope": "foo bar",
+ "rapt": "rapt_token",
+ },
+ )
+
+
+def test_refresh_grant_success():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request_no_throw"
+ ) as mock_token_request:
+ mock_token_request.side_effect = [
+ (False, {"error": "invalid_grant", "error_subtype": "rapt_required"}),
+ (True, {"access_token": "access_token"}),
+ ]
+ with mock.patch(
+ "google.oauth2.reauth.get_rapt_token", return_value="new_rapt_token"
+ ):
+ assert reauth.refresh_grant(
+ MOCK_REQUEST, "token_uri", "refresh_token", "client_id", "client_secret"
+ ) == (
+ "access_token",
+ "refresh_token",
+ None,
+ {"access_token": "access_token"},
+ "new_rapt_token",
+ )
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test_service_account.py b/contrib/python/google-auth/py2/tests/oauth2/test_service_account.py
new file mode 100644
index 0000000000..a5d59dd713
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test_service_account.py
@@ -0,0 +1,433 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+
+import mock
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import jwt
+from google.auth import transport
+from google.oauth2 import service_account
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "other_cert.pem"), "rb") as fh:
+ OTHER_CERT_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+SIGNER = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+
+
+class TestCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ TOKEN_URI = "https://example.com/oauth2/token"
+
+ @classmethod
+ def make_credentials(cls):
+ return service_account.Credentials(
+ SIGNER, cls.SERVICE_ACCOUNT_EMAIL, cls.TOKEN_URI
+ )
+
+ def test_from_service_account_info(self):
+ credentials = service_account.Credentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO
+ )
+
+ assert credentials._signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
+ assert credentials.service_account_email == SERVICE_ACCOUNT_INFO["client_email"]
+ assert credentials._token_uri == SERVICE_ACCOUNT_INFO["token_uri"]
+
+ def test_from_service_account_info_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+ scopes = ["email", "profile"]
+ subject = "subject"
+ additional_claims = {"meta": "data"}
+
+ credentials = service_account.Credentials.from_service_account_info(
+ info, scopes=scopes, subject=subject, additional_claims=additional_claims
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials.project_id == info["project_id"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._scopes == scopes
+ assert credentials._subject == subject
+ assert credentials._additional_claims == additional_claims
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = service_account.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials.project_id == info["project_id"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+
+ def test_from_service_account_file_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+ scopes = ["email", "profile"]
+ subject = "subject"
+ additional_claims = {"meta": "data"}
+
+ credentials = service_account.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE,
+ subject=subject,
+ scopes=scopes,
+ additional_claims=additional_claims,
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials.project_id == info["project_id"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._scopes == scopes
+ assert credentials._subject == subject
+ assert credentials._additional_claims == additional_claims
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+ # Scopes haven't been specified yet
+ assert credentials.requires_scopes
+
+ def test_sign_bytes(self):
+ credentials = self.make_credentials()
+ to_sign = b"123"
+ signature = credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ credentials = self.make_credentials()
+ assert isinstance(credentials.signer, crypt.Signer)
+
+ def test_signer_email(self):
+ credentials = self.make_credentials()
+ assert credentials.signer_email == self.SERVICE_ACCOUNT_EMAIL
+
+ def test_create_scoped(self):
+ credentials = self.make_credentials()
+ scopes = ["email", "profile"]
+ credentials = credentials.with_scopes(scopes)
+ assert credentials._scopes == scopes
+
+ def test_with_claims(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_claims({"meep": "moop"})
+ assert new_credentials._additional_claims == {"meep": "moop"}
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_quota_project("new-project-456")
+ assert new_credentials.quota_project_id == "new-project-456"
+ hdrs = {}
+ new_credentials.apply(hdrs, token="tok")
+ assert "x-goog-user-project" in hdrs
+
+ def test__make_authorization_grant_assertion(self):
+ credentials = self.make_credentials()
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ assert payload["aud"] == service_account._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+
+ def test__make_authorization_grant_assertion_scoped(self):
+ credentials = self.make_credentials()
+ scopes = ["email", "profile"]
+ credentials = credentials.with_scopes(scopes)
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["scope"] == "email profile"
+
+ def test__make_authorization_grant_assertion_subject(self):
+ credentials = self.make_credentials()
+ subject = "user@example.com"
+ credentials = credentials.with_subject(subject)
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["sub"] == subject
+
+ def test_apply_with_quota_project_id(self):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ quota_project_id="quota-project-123",
+ )
+
+ headers = {}
+ credentials.apply(headers, token="token")
+
+ assert headers["x-goog-user-project"] == "quota-project-123"
+ assert "token" in headers["authorization"]
+
+ def test_apply_with_no_quota_project_id(self):
+ credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI
+ )
+
+ headers = {}
+ credentials.apply(headers, token="token")
+
+ assert "x-goog-user-project" not in headers
+ assert "token" in headers["authorization"]
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt(self, jwt):
+ credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+ jwt.from_signing_credentials.assert_called_once_with(credentials, audience)
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_with_user_scopes(self, jwt):
+ credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI, scopes=["foo"]
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+
+ # JWT should not be created if there are user-defined scopes
+ jwt.from_signing_credentials.assert_not_called()
+
+ @mock.patch("google.oauth2._client.jwt_grant", autospec=True)
+ def test_refresh_success(self, jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ {},
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Refresh credentials
+ credentials.refresh(request)
+
+ # Check jwt grant call.
+ assert jwt_grant.called
+
+ called_request, token_uri, assertion = jwt_grant.call_args[0]
+ assert called_request == request
+ assert token_uri == credentials._token_uri
+ assert jwt.decode(assertion, PUBLIC_CERT_BYTES)
+ # No further assertion done on the token, as there are separate tests
+ # for checking the authorization grant assertion.
+
+ # Check that the credentials have the token.
+ assert credentials.token == token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert credentials.valid
+
+ @mock.patch("google.oauth2._client.jwt_grant", autospec=True)
+ def test_before_request_refreshes(self, jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ None,
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Credentials should start as invalid
+ assert not credentials.valid
+
+ # before_request should cause a refresh
+ credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert jwt_grant.called
+
+ # Credentials should now be valid.
+ assert credentials.valid
+
+ @mock.patch("google.auth.jwt.Credentials._make_jwt")
+ def test_refresh_with_jwt_credentials(self, make_jwt):
+ credentials = self.make_credentials()
+ credentials._create_self_signed_jwt("https://pubsub.googleapis.com")
+
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ token = "token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ make_jwt.return_value = (token, expiry)
+
+ # Credentials should start as invalid
+ assert not credentials.valid
+
+ # before_request should cause a refresh
+ credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # Credentials should now be valid.
+ assert credentials.valid
+
+ # Assert make_jwt was called
+ assert make_jwt.called_once()
+
+ assert credentials.token == token
+ assert credentials.expiry == expiry
+
+
+class TestIDTokenCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ TOKEN_URI = "https://example.com/oauth2/token"
+ TARGET_AUDIENCE = "https://example.com"
+
+ @classmethod
+ def make_credentials(cls):
+ return service_account.IDTokenCredentials(
+ SIGNER, cls.SERVICE_ACCOUNT_EMAIL, cls.TOKEN_URI, cls.TARGET_AUDIENCE
+ )
+
+ def test_from_service_account_info(self):
+ credentials = service_account.IDTokenCredentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO, target_audience=self.TARGET_AUDIENCE
+ )
+
+ assert credentials._signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
+ assert credentials.service_account_email == SERVICE_ACCOUNT_INFO["client_email"]
+ assert credentials._token_uri == SERVICE_ACCOUNT_INFO["token_uri"]
+ assert credentials._target_audience == self.TARGET_AUDIENCE
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = service_account.IDTokenCredentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE, target_audience=self.TARGET_AUDIENCE
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._target_audience == self.TARGET_AUDIENCE
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+
+ def test_sign_bytes(self):
+ credentials = self.make_credentials()
+ to_sign = b"123"
+ signature = credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ credentials = self.make_credentials()
+ assert isinstance(credentials.signer, crypt.Signer)
+
+ def test_signer_email(self):
+ credentials = self.make_credentials()
+ assert credentials.signer_email == self.SERVICE_ACCOUNT_EMAIL
+
+ def test_with_target_audience(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_target_audience("https://new.example.com")
+ assert new_credentials._target_audience == "https://new.example.com"
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_quota_project("project-foo")
+ assert new_credentials._quota_project_id == "project-foo"
+
+ def test__make_authorization_grant_assertion(self):
+ credentials = self.make_credentials()
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ assert payload["aud"] == service_account._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert payload["target_audience"] == self.TARGET_AUDIENCE
+
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_refresh_success(self, id_token_jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ id_token_jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ {},
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Refresh credentials
+ credentials.refresh(request)
+
+ # Check jwt grant call.
+ assert id_token_jwt_grant.called
+
+ called_request, token_uri, assertion = id_token_jwt_grant.call_args[0]
+ assert called_request == request
+ assert token_uri == credentials._token_uri
+ assert jwt.decode(assertion, PUBLIC_CERT_BYTES)
+ # No further assertion done on the token, as there are separate tests
+ # for checking the authorization grant assertion.
+
+ # Check that the credentials have the token.
+ assert credentials.token == token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert credentials.valid
+
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_before_request_refreshes(self, id_token_jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ id_token_jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ None,
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Credentials should start as invalid
+ assert not credentials.valid
+
+ # before_request should cause a refresh
+ credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert id_token_jwt_grant.called
+
+ # Credentials should now be valid.
+ assert credentials.valid
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test_sts.py b/contrib/python/google-auth/py2/tests/oauth2/test_sts.py
new file mode 100644
index 0000000000..e8e008df5d
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test_sts.py
@@ -0,0 +1,395 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import mock
+import pytest
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import sts
+from google.oauth2 import utils
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password"
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+
+
+class TestStsClient(object):
+ GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+ RESOURCE = "https://api.example.com/"
+ AUDIENCE = "urn:example:cooperation-context"
+ SCOPES = ["scope1", "scope2"]
+ REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+ SUBJECT_TOKEN = "HEADER.SUBJECT_TOKEN_PAYLOAD.SIGNATURE"
+ SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+ ACTOR_TOKEN = "HEADER.ACTOR_TOKEN_PAYLOAD.SIGNATURE"
+ ACTOR_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+ TOKEN_EXCHANGE_ENDPOINT = "https://example.com/token.oauth2"
+ ADDON_HEADERS = {"x-client-version": "0.1.2"}
+ ADDON_OPTIONS = {"additional": {"non-standard": ["options"], "other": "some-value"}}
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": "scope1 scope2",
+ }
+ ERROR_RESPONSE = {
+ "error": "invalid_request",
+ "error_description": "Invalid subject token",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+ }
+ CLIENT_AUTH_BASIC = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID, CLIENT_SECRET
+ )
+ CLIENT_AUTH_REQUEST_BODY = utils.ClientAuthentication(
+ utils.ClientAuthType.request_body, CLIENT_ID, CLIENT_SECRET
+ )
+
+ @classmethod
+ def make_client(cls, client_auth=None):
+ return sts.Client(cls.TOKEN_EXCHANGE_ENDPOINT, client_auth)
+
+ @classmethod
+ def make_mock_request(cls, data, status=http_client.OK):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+
+ return request
+
+ @classmethod
+ def assert_request_kwargs(cls, request_kwargs, headers, request_data):
+ """Asserts the request was called with the expected parameters.
+ """
+ assert request_kwargs["url"] == cls.TOKEN_EXCHANGE_ENDPOINT
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+ assert len(body_tuples) == len(request_data.keys())
+
+ def test_exchange_token_full_success_without_auth(self):
+ """Test token exchange success without client authentication using full
+ parameters.
+ """
+ client = self.make_client()
+ headers = self.ADDON_HEADERS.copy()
+ headers["Content-Type"] = "application/x-www-form-urlencoded"
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "resource": self.RESOURCE,
+ "audience": self.AUDIENCE,
+ "scope": " ".join(self.SCOPES),
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "actor_token": self.ACTOR_TOKEN,
+ "actor_token_type": self.ACTOR_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(self.ADDON_OPTIONS)),
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_partial_success_without_auth(self):
+ """Test token exchange success without client authentication using
+ partial (required only) parameters.
+ """
+ client = self.make_client()
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "audience": self.AUDIENCE,
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ grant_type=self.GRANT_TYPE,
+ subject_token=self.SUBJECT_TOKEN,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ audience=self.AUDIENCE,
+ requested_token_type=self.REQUESTED_TOKEN_TYPE,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_non200_without_auth(self):
+ """Test token exchange without client auth responding with non-200 status.
+ """
+ client = self.make_client()
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+
+ def test_exchange_token_full_success_with_basic_auth(self):
+ """Test token exchange success with basic client authentication using full
+ parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ headers = self.ADDON_HEADERS.copy()
+ headers["Content-Type"] = "application/x-www-form-urlencoded"
+ headers["Authorization"] = "Basic {}".format(BASIC_AUTH_ENCODING)
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "resource": self.RESOURCE,
+ "audience": self.AUDIENCE,
+ "scope": " ".join(self.SCOPES),
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "actor_token": self.ACTOR_TOKEN,
+ "actor_token_type": self.ACTOR_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(self.ADDON_OPTIONS)),
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_partial_success_with_basic_auth(self):
+ """Test token exchange success with basic client authentication using
+ partial (required only) parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ }
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "audience": self.AUDIENCE,
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ grant_type=self.GRANT_TYPE,
+ subject_token=self.SUBJECT_TOKEN,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ audience=self.AUDIENCE,
+ requested_token_type=self.REQUESTED_TOKEN_TYPE,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_non200_with_basic_auth(self):
+ """Test token exchange with basic client auth responding with non-200
+ status.
+ """
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+
+ def test_exchange_token_full_success_with_reqbody_auth(self):
+ """Test token exchange success with request body client authenticaiton
+ using full parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_REQUEST_BODY)
+ headers = self.ADDON_HEADERS.copy()
+ headers["Content-Type"] = "application/x-www-form-urlencoded"
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "resource": self.RESOURCE,
+ "audience": self.AUDIENCE,
+ "scope": " ".join(self.SCOPES),
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "actor_token": self.ACTOR_TOKEN,
+ "actor_token_type": self.ACTOR_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(self.ADDON_OPTIONS)),
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_partial_success_with_reqbody_auth(self):
+ """Test token exchange success with request body client authentication
+ using partial (required only) parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_REQUEST_BODY)
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "audience": self.AUDIENCE,
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ grant_type=self.GRANT_TYPE,
+ subject_token=self.SUBJECT_TOKEN,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ audience=self.AUDIENCE,
+ requested_token_type=self.REQUESTED_TOKEN_TYPE,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_non200_with_reqbody_auth(self):
+ """Test token exchange with POST request body client auth responding
+ with non-200 status.
+ """
+ client = self.make_client(self.CLIENT_AUTH_REQUEST_BODY)
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
diff --git a/contrib/python/google-auth/py2/tests/oauth2/test_utils.py b/contrib/python/google-auth/py2/tests/oauth2/test_utils.py
new file mode 100644
index 0000000000..6de9ff5337
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/oauth2/test_utils.py
@@ -0,0 +1,264 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import pytest
+
+from google.auth import exceptions
+from google.oauth2 import utils
+
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password"
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+# Base64 encoding of "username:"
+BASIC_AUTH_ENCODING_SECRETLESS = "dXNlcm5hbWU6"
+
+
+class AuthHandler(utils.OAuthClientAuthHandler):
+ def __init__(self, client_auth=None):
+ super(AuthHandler, self).__init__(client_auth)
+
+ def apply_client_authentication_options(
+ self, headers, request_body=None, bearer_token=None
+ ):
+ return super(AuthHandler, self).apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+
+class TestClientAuthentication(object):
+ @classmethod
+ def make_client_auth(cls, client_secret=None):
+ return utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID, client_secret
+ )
+
+ def test_initialization_with_client_secret(self):
+ client_auth = self.make_client_auth(CLIENT_SECRET)
+
+ assert client_auth.client_auth_type == utils.ClientAuthType.basic
+ assert client_auth.client_id == CLIENT_ID
+ assert client_auth.client_secret == CLIENT_SECRET
+
+ def test_initialization_no_client_secret(self):
+ client_auth = self.make_client_auth()
+
+ assert client_auth.client_auth_type == utils.ClientAuthType.basic
+ assert client_auth.client_id == CLIENT_ID
+ assert client_auth.client_secret is None
+
+
+class TestOAuthClientAuthHandler(object):
+ CLIENT_AUTH_BASIC = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID, CLIENT_SECRET
+ )
+ CLIENT_AUTH_BASIC_SECRETLESS = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID
+ )
+ CLIENT_AUTH_REQUEST_BODY = utils.ClientAuthentication(
+ utils.ClientAuthType.request_body, CLIENT_ID, CLIENT_SECRET
+ )
+ CLIENT_AUTH_REQUEST_BODY_SECRETLESS = utils.ClientAuthentication(
+ utils.ClientAuthType.request_body, CLIENT_ID
+ )
+
+ @classmethod
+ def make_oauth_client_auth_handler(cls, client_auth=None):
+ return AuthHandler(client_auth)
+
+ def test_apply_client_authentication_options_none(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler()
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {"Content-Type": "application/json"}
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_basic(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(self.CLIENT_AUTH_BASIC)
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_basic_nosecret(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_BASIC_SECRETLESS
+ )
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING_SECRETLESS),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_request_body(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY
+ )
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {"Content-Type": "application/json"}
+ assert request_body == {
+ "foo": "bar",
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ }
+
+ def test_apply_client_authentication_options_request_body_nosecret(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY_SECRETLESS
+ )
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {"Content-Type": "application/json"}
+ assert request_body == {
+ "foo": "bar",
+ "client_id": CLIENT_ID,
+ "client_secret": "",
+ }
+
+ def test_apply_client_authentication_options_request_body_no_body(self):
+ headers = {"Content-Type": "application/json"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ auth_handler.apply_client_authentication_options(headers)
+
+ assert excinfo.match(r"HTTP request does not support request-body")
+
+ def test_apply_client_authentication_options_bearer_token(self):
+ bearer_token = "ACCESS_TOKEN"
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler()
+
+ auth_handler.apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {}".format(bearer_token),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_bearer_and_basic(self):
+ bearer_token = "ACCESS_TOKEN"
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(self.CLIENT_AUTH_BASIC)
+
+ auth_handler.apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+ # Bearer token should have higher priority.
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {}".format(bearer_token),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_bearer_and_request_body(self):
+ bearer_token = "ACCESS_TOKEN"
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY
+ )
+
+ auth_handler.apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+ # Bearer token should have higher priority.
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {}".format(bearer_token),
+ }
+ assert request_body == {"foo": "bar"}
+
+
+def test__handle_error_response_code_only():
+ error_resp = {"error": "unsupported_grant_type"}
+ response_data = json.dumps(error_resp)
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(r"Error code unsupported_grant_type")
+
+
+def test__handle_error_response_code_description():
+ error_resp = {
+ "error": "unsupported_grant_type",
+ "error_description": "The provided grant_type is unsupported",
+ }
+ response_data = json.dumps(error_resp)
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(
+ r"Error code unsupported_grant_type: The provided grant_type is unsupported"
+ )
+
+
+def test__handle_error_response_code_description_uri():
+ error_resp = {
+ "error": "unsupported_grant_type",
+ "error_description": "The provided grant_type is unsupported",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+ }
+ response_data = json.dumps(error_resp)
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(
+ r"Error code unsupported_grant_type: The provided grant_type is unsupported - https://tools.ietf.org/html/rfc6749"
+ )
+
+
+def test__handle_error_response_non_json():
+ response_data = "Oops, something wrong happened"
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(r"Oops, something wrong happened")
diff --git a/contrib/python/google-auth/py2/tests/test__cloud_sdk.py b/contrib/python/google-auth/py2/tests/test__cloud_sdk.py
new file mode 100644
index 0000000000..a76bd7020b
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test__cloud_sdk.py
@@ -0,0 +1,188 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import json
+import os
+import subprocess
+
+import mock
+import pytest
+
+from google.auth import _cloud_sdk
+from google.auth import environment_vars
+from google.auth import exceptions
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+AUTHORIZED_USER_FILE = os.path.join(DATA_DIR, "authorized_user.json")
+
+with io.open(AUTHORIZED_USER_FILE) as fh:
+ AUTHORIZED_USER_FILE_DATA = json.load(fh)
+
+SERVICE_ACCOUNT_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with io.open(SERVICE_ACCOUNT_FILE) as fh:
+ SERVICE_ACCOUNT_FILE_DATA = json.load(fh)
+
+with io.open(os.path.join(DATA_DIR, "cloud_sdk_config.json"), "rb") as fh:
+ CLOUD_SDK_CONFIG_FILE_DATA = fh.read()
+
+
+@pytest.mark.parametrize(
+ "data, expected_project_id",
+ [
+ (CLOUD_SDK_CONFIG_FILE_DATA, "example-project"),
+ (b"I am some bad json", None),
+ (b"{}", None),
+ ],
+)
+def test_get_project_id(data, expected_project_id):
+ check_output_patch = mock.patch(
+ "subprocess.check_output", autospec=True, return_value=data
+ )
+
+ with check_output_patch as check_output:
+ project_id = _cloud_sdk.get_project_id()
+
+ assert project_id == expected_project_id
+ assert check_output.called
+
+
+@mock.patch(
+ "subprocess.check_output",
+ autospec=True,
+ side_effect=subprocess.CalledProcessError(-1, None),
+)
+def test_get_project_id_call_error(check_output):
+ project_id = _cloud_sdk.get_project_id()
+ assert project_id is None
+ assert check_output.called
+
+
+def test__run_subprocess_ignore_stderr():
+ command = [
+ "python",
+ "-c",
+ "from __future__ import print_function;"
+ + "import sys;"
+ + "print('error', file=sys.stderr);"
+ + "print('output', file=sys.stdout)",
+ ]
+
+ # If we ignore stderr, then the output only has stdout
+ output = _cloud_sdk._run_subprocess_ignore_stderr(command)
+ assert output == b"output\n"
+
+ # If we pipe stderr to stdout, then the output is mixed with stdout and stderr.
+ output = subprocess.check_output(command, stderr=subprocess.STDOUT)
+ assert output == b"output\nerror\n" or output == b"error\noutput\n"
+
+
+@mock.patch("os.name", new="nt")
+def test_get_project_id_windows():
+ check_output_patch = mock.patch(
+ "subprocess.check_output",
+ autospec=True,
+ return_value=CLOUD_SDK_CONFIG_FILE_DATA,
+ )
+
+ with check_output_patch as check_output:
+ project_id = _cloud_sdk.get_project_id()
+
+ assert project_id == "example-project"
+ assert check_output.called
+ # Make sure the executable is `gcloud.cmd`.
+ args = check_output.call_args[0]
+ command = args[0]
+ executable = command[0]
+ assert executable == "gcloud.cmd"
+
+
+@mock.patch("google.auth._cloud_sdk.get_config_path", autospec=True)
+def test_get_application_default_credentials_path(get_config_dir):
+ config_path = "config_path"
+ get_config_dir.return_value = config_path
+ credentials_path = _cloud_sdk.get_application_default_credentials_path()
+ assert credentials_path == os.path.join(
+ config_path, _cloud_sdk._CREDENTIALS_FILENAME
+ )
+
+
+def test_get_config_path_env_var(monkeypatch):
+ config_path_sentinel = "config_path"
+ monkeypatch.setenv(environment_vars.CLOUD_SDK_CONFIG_DIR, config_path_sentinel)
+ config_path = _cloud_sdk.get_config_path()
+ assert config_path == config_path_sentinel
+
+
+@mock.patch("os.path.expanduser")
+def test_get_config_path_unix(expanduser):
+ expanduser.side_effect = lambda path: path
+
+ config_path = _cloud_sdk.get_config_path()
+
+ assert os.path.split(config_path) == ("~/.config", _cloud_sdk._CONFIG_DIRECTORY)
+
+
+@mock.patch("os.name", new="nt")
+def test_get_config_path_windows(monkeypatch):
+ appdata = "appdata"
+ monkeypatch.setenv(_cloud_sdk._WINDOWS_CONFIG_ROOT_ENV_VAR, appdata)
+
+ config_path = _cloud_sdk.get_config_path()
+
+ assert os.path.split(config_path) == (appdata, _cloud_sdk._CONFIG_DIRECTORY)
+
+
+@mock.patch("os.name", new="nt")
+def test_get_config_path_no_appdata(monkeypatch):
+ monkeypatch.delenv(_cloud_sdk._WINDOWS_CONFIG_ROOT_ENV_VAR, raising=False)
+ monkeypatch.setenv("SystemDrive", "G:")
+
+ config_path = _cloud_sdk.get_config_path()
+
+ assert os.path.split(config_path) == ("G:/\\", _cloud_sdk._CONFIG_DIRECTORY)
+
+
+@mock.patch("os.name", new="nt")
+@mock.patch("subprocess.check_output", autospec=True)
+def test_get_auth_access_token_windows(check_output):
+ check_output.return_value = b"access_token\n"
+
+ token = _cloud_sdk.get_auth_access_token()
+ assert token == "access_token"
+ check_output.assert_called_with(
+ ("gcloud.cmd", "auth", "print-access-token"), stderr=subprocess.STDOUT
+ )
+
+
+@mock.patch("subprocess.check_output", autospec=True)
+def test_get_auth_access_token_with_account(check_output):
+ check_output.return_value = b"access_token\n"
+
+ token = _cloud_sdk.get_auth_access_token(account="account")
+ assert token == "access_token"
+ check_output.assert_called_with(
+ ("gcloud", "auth", "print-access-token", "--account=account"),
+ stderr=subprocess.STDOUT,
+ )
+
+
+@mock.patch("subprocess.check_output", autospec=True)
+def test_get_auth_access_token_with_exception(check_output):
+ check_output.side_effect = OSError()
+
+ with pytest.raises(exceptions.UserAccessTokenError):
+ _cloud_sdk.get_auth_access_token(account="account")
diff --git a/contrib/python/google-auth/py2/tests/test__default.py b/contrib/python/google-auth/py2/tests/test__default.py
new file mode 100644
index 0000000000..fd1b6b9af9
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test__default.py
@@ -0,0 +1,782 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import mock
+import pytest
+
+from google.auth import _default
+from google.auth import app_engine
+from google.auth import aws
+from google.auth import compute_engine
+from google.auth import credentials
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import external_account
+from google.auth import identity_pool
+from google.oauth2 import service_account
+import google.oauth2.credentials
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+AUTHORIZED_USER_FILE = os.path.join(DATA_DIR, "authorized_user.json")
+
+with open(AUTHORIZED_USER_FILE) as fh:
+ AUTHORIZED_USER_FILE_DATA = json.load(fh)
+
+AUTHORIZED_USER_CLOUD_SDK_FILE = os.path.join(
+ DATA_DIR, "authorized_user_cloud_sdk.json"
+)
+
+AUTHORIZED_USER_CLOUD_SDK_WITH_QUOTA_PROJECT_ID_FILE = os.path.join(
+ DATA_DIR, "authorized_user_cloud_sdk_with_quota_project_id.json"
+)
+
+SERVICE_ACCOUNT_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+CLIENT_SECRETS_FILE = os.path.join(DATA_DIR, "client_secrets.json")
+
+with open(SERVICE_ACCOUNT_FILE) as fh:
+ SERVICE_ACCOUNT_FILE_DATA = json.load(fh)
+
+SUBJECT_TOKEN_TEXT_FILE = os.path.join(DATA_DIR, "external_subject_token.txt")
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
+REGION_URL = "http://169.254.169.254/latest/meta-data/placement/availability-zone"
+SECURITY_CREDS_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials"
+CRED_VERIFICATION_URL = (
+ "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
+)
+IDENTITY_POOL_DATA = {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": "urn:ietf:params:oauth:token-type:jwt",
+ "token_url": TOKEN_URL,
+ "credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
+}
+AWS_DATA = {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": "urn:ietf:params:aws:token-type:aws4_request",
+ "token_url": TOKEN_URL,
+ "credential_source": {
+ "environment_id": "aws1",
+ "region_url": REGION_URL,
+ "url": SECURITY_CREDS_URL,
+ "regional_cred_verification_url": CRED_VERIFICATION_URL,
+ },
+}
+
+MOCK_CREDENTIALS = mock.Mock(spec=credentials.CredentialsWithQuotaProject)
+MOCK_CREDENTIALS.with_quota_project.return_value = MOCK_CREDENTIALS
+
+
+def get_project_id_side_effect(self, request=None):
+ # If no scopes are set, this will always return None.
+ if not self.scopes:
+ return None
+ return mock.sentinel.project_id
+
+
+LOAD_FILE_PATCH = mock.patch(
+ "google.auth._default.load_credentials_from_file",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH = mock.patch.object(
+ external_account.Credentials,
+ "get_project_id",
+ side_effect=get_project_id_side_effect,
+ autospec=True,
+)
+
+
+def test_load_credentials_from_missing_file():
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file("")
+
+ assert excinfo.match(r"not found")
+
+
+def test_load_credentials_from_file_invalid_json(tmpdir):
+ jsonfile = tmpdir.join("invalid.json")
+ jsonfile.write("{")
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(jsonfile))
+
+ assert excinfo.match(r"not a valid json file")
+
+
+def test_load_credentials_from_file_invalid_type(tmpdir):
+ jsonfile = tmpdir.join("invalid.json")
+ jsonfile.write(json.dumps({"type": "not-a-real-type"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(jsonfile))
+
+ assert excinfo.match(r"does not have a valid type")
+
+
+def test_load_credentials_from_file_authorized_user():
+ credentials, project_id = _default.load_credentials_from_file(AUTHORIZED_USER_FILE)
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+
+
+def test_load_credentials_from_file_no_type(tmpdir):
+ # use the client_secrets.json, which is valid json but not a
+ # loadable credentials type
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(CLIENT_SECRETS_FILE)
+
+ assert excinfo.match(r"does not have a valid type")
+ assert excinfo.match(r"Type is None")
+
+
+def test_load_credentials_from_file_authorized_user_bad_format(tmpdir):
+ filename = tmpdir.join("authorized_user_bad.json")
+ filename.write(json.dumps({"type": "authorized_user"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(filename))
+
+ assert excinfo.match(r"Failed to load authorized user")
+ assert excinfo.match(r"missing fields")
+
+
+def test_load_credentials_from_file_authorized_user_cloud_sdk():
+ with pytest.warns(UserWarning, match="Cloud SDK"):
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_FILE
+ )
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+
+ # No warning if the json file has quota project id.
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_WITH_QUOTA_PROJECT_ID_FILE
+ )
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+
+
+def test_load_credentials_from_file_authorized_user_cloud_sdk_with_scopes():
+ with pytest.warns(UserWarning, match="Cloud SDK"):
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_FILE,
+ scopes=["https://www.google.com/calendar/feeds"],
+ )
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+
+def test_load_credentials_from_file_authorized_user_cloud_sdk_with_quota_project():
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_FILE, quota_project_id="project-foo"
+ )
+
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+ assert credentials.quota_project_id == "project-foo"
+
+
+def test_load_credentials_from_file_service_account():
+ credentials, project_id = _default.load_credentials_from_file(SERVICE_ACCOUNT_FILE)
+ assert isinstance(credentials, service_account.Credentials)
+ assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
+
+
+def test_load_credentials_from_file_service_account_with_scopes():
+ credentials, project_id = _default.load_credentials_from_file(
+ SERVICE_ACCOUNT_FILE, scopes=["https://www.google.com/calendar/feeds"]
+ )
+ assert isinstance(credentials, service_account.Credentials)
+ assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+
+def test_load_credentials_from_file_service_account_with_quota_project():
+ credentials, project_id = _default.load_credentials_from_file(
+ SERVICE_ACCOUNT_FILE, quota_project_id="project-foo"
+ )
+ assert isinstance(credentials, service_account.Credentials)
+ assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
+ assert credentials.quota_project_id == "project-foo"
+
+
+def test_load_credentials_from_file_service_account_bad_format(tmpdir):
+ filename = tmpdir.join("serivce_account_bad.json")
+ filename.write(json.dumps({"type": "service_account"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(filename))
+
+ assert excinfo.match(r"Failed to load service account")
+ assert excinfo.match(r"missing fields")
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_identity_pool(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_aws(get_project_id, tmpdir):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(AWS_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, aws.Credentials)
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_with_user_and_default_scopes(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(
+ str(config_file),
+ scopes=["https://www.google.com/calendar/feeds"],
+ default_scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since scopes are specified, the project ID can be determined.
+ assert project_id is mock.sentinel.project_id
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+ assert credentials.default_scopes == [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_with_quota_project(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(
+ str(config_file), quota_project_id="project-foo"
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert credentials.quota_project_id == "project-foo"
+
+
+def test_load_credentials_from_file_external_account_bad_format(tmpdir):
+ filename = tmpdir.join("external_account_bad.json")
+ filename.write(json.dumps({"type": "external_account"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(filename))
+
+ assert excinfo.match(
+ "Failed to load external account credentials from {}".format(str(filename))
+ )
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_explicit_request(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(
+ str(config_file),
+ request=mock.sentinel.request,
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since scopes are specified, the project ID can be determined.
+ assert project_id is mock.sentinel.project_id
+ get_project_id.assert_called_with(credentials, request=mock.sentinel.request)
+
+
+@mock.patch.dict(os.environ, {}, clear=True)
+def test__get_explicit_environ_credentials_no_env():
+ assert _default._get_explicit_environ_credentials() == (None, None)
+
+
+@LOAD_FILE_PATCH
+def test__get_explicit_environ_credentials(load, monkeypatch):
+ monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
+
+ credentials, project_id = _default._get_explicit_environ_credentials()
+
+ assert credentials is MOCK_CREDENTIALS
+ assert project_id is mock.sentinel.project_id
+ load.assert_called_with("filename")
+
+
+@LOAD_FILE_PATCH
+def test__get_explicit_environ_credentials_no_project_id(load, monkeypatch):
+ load.return_value = MOCK_CREDENTIALS, None
+ monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
+
+ credentials, project_id = _default._get_explicit_environ_credentials()
+
+ assert credentials is MOCK_CREDENTIALS
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+@mock.patch("google.auth._default._get_gcloud_sdk_credentials", autospec=True)
+def test__get_explicit_environ_credentials_fallback_to_gcloud(
+ get_gcloud_creds, get_adc_path, monkeypatch
+):
+ # Set explicit credentials path to cloud sdk credentials path.
+ get_adc_path.return_value = "filename"
+ monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
+
+ _default._get_explicit_environ_credentials()
+
+ # Check we fall back to cloud sdk flow since explicit credentials path is
+ # cloud sdk credentials path
+ get_gcloud_creds.assert_called_once()
+
+
+@LOAD_FILE_PATCH
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test__get_gcloud_sdk_credentials(get_adc_path, load):
+ get_adc_path.return_value = SERVICE_ACCOUNT_FILE
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials()
+
+ assert credentials is MOCK_CREDENTIALS
+ assert project_id is mock.sentinel.project_id
+ load.assert_called_with(SERVICE_ACCOUNT_FILE)
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test__get_gcloud_sdk_credentials_non_existent(get_adc_path, tmpdir):
+ non_existent = tmpdir.join("non-existent")
+ get_adc_path.return_value = str(non_existent)
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials()
+
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_project_id",
+ return_value=mock.sentinel.project_id,
+ autospec=True,
+)
+@mock.patch("os.path.isfile", return_value=True, autospec=True)
+@LOAD_FILE_PATCH
+def test__get_gcloud_sdk_credentials_project_id(load, unused_isfile, get_project_id):
+ # Don't return a project ID from load file, make the function check
+ # the Cloud SDK project.
+ load.return_value = MOCK_CREDENTIALS, None
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials()
+
+ assert credentials == MOCK_CREDENTIALS
+ assert project_id == mock.sentinel.project_id
+ assert get_project_id.called
+
+
+@mock.patch("google.auth._cloud_sdk.get_project_id", return_value=None, autospec=True)
+@mock.patch("os.path.isfile", return_value=True)
+@LOAD_FILE_PATCH
+def test__get_gcloud_sdk_credentials_no_project_id(load, unused_isfile, get_project_id):
+ # Don't return a project ID from load file, make the function check
+ # the Cloud SDK project.
+ load.return_value = MOCK_CREDENTIALS, None
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials()
+
+ assert credentials == MOCK_CREDENTIALS
+ assert project_id is None
+ assert get_project_id.called
+
+
+class _AppIdentityModule(object):
+ """The interface of the App Idenity app engine module.
+ See https://cloud.google.com/appengine/docs/standard/python/refdocs\
+ /google.appengine.api.app_identity.app_identity
+ """
+
+ def get_application_id(self):
+ raise NotImplementedError()
+
+
+@pytest.fixture
+def app_identity(monkeypatch):
+ """Mocks the app_identity module for google.auth.app_engine."""
+ app_identity_module = mock.create_autospec(_AppIdentityModule, instance=True)
+ monkeypatch.setattr(app_engine, "app_identity", app_identity_module)
+ yield app_identity_module
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_gen1(app_identity):
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python27"
+ app_identity.get_application_id.return_value = mock.sentinel.project
+
+ credentials, project_id = _default._get_gae_credentials()
+
+ assert isinstance(credentials, app_engine.Credentials)
+ assert project_id == mock.sentinel.project
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_gen2():
+ os.environ["GAE_RUNTIME"] = "python37"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_gen2_backwards_compat():
+ # compat helpers may copy GAE_RUNTIME to APPENGINE_RUNTIME
+ # for backwards compatibility with code that relies on it
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python37"
+ os.environ["GAE_RUNTIME"] = "python37"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+def test__get_gae_credentials_env_unset():
+ assert environment_vars.LEGACY_APPENGINE_RUNTIME not in os.environ
+ assert "GAE_RUNTIME" not in os.environ
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_no_app_engine():
+ # test both with and without LEGACY_APPENGINE_RUNTIME setting
+ assert environment_vars.LEGACY_APPENGINE_RUNTIME not in os.environ
+
+ import sys
+
+ with mock.patch.dict(sys.modules, {"google.auth.app_engine": None}):
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python27"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch.dict(os.environ)
+@mock.patch.object(app_engine, "app_identity", new=None)
+def test__get_gae_credentials_no_apis():
+ # test both with and without LEGACY_APPENGINE_RUNTIME setting
+ assert environment_vars.LEGACY_APPENGINE_RUNTIME not in os.environ
+
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python27"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.ping", return_value=True, autospec=True
+)
+@mock.patch(
+ "google.auth.compute_engine._metadata.get_project_id",
+ return_value="example-project",
+ autospec=True,
+)
+def test__get_gce_credentials(unused_get, unused_ping):
+ credentials, project_id = _default._get_gce_credentials()
+
+ assert isinstance(credentials, compute_engine.Credentials)
+ assert project_id == "example-project"
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.ping", return_value=False, autospec=True
+)
+def test__get_gce_credentials_no_ping(unused_ping):
+ credentials, project_id = _default._get_gce_credentials()
+
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.ping", return_value=True, autospec=True
+)
+@mock.patch(
+ "google.auth.compute_engine._metadata.get_project_id",
+ side_effect=exceptions.TransportError(),
+ autospec=True,
+)
+def test__get_gce_credentials_no_project_id(unused_get, unused_ping):
+ credentials, project_id = _default._get_gce_credentials()
+
+ assert isinstance(credentials, compute_engine.Credentials)
+ assert project_id is None
+
+
+def test__get_gce_credentials_no_compute_engine():
+ import sys
+
+ with mock.patch.dict("sys.modules"):
+ sys.modules["google.auth.compute_engine"] = None
+ credentials, project_id = _default._get_gce_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.ping", return_value=False, autospec=True
+)
+def test__get_gce_credentials_explicit_request(ping):
+ _default._get_gce_credentials(mock.sentinel.request)
+ ping.assert_called_with(request=mock.sentinel.request)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_early_out(unused_get):
+ assert _default.default() == (MOCK_CREDENTIALS, mock.sentinel.project_id)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_explict_project_id(unused_get, monkeypatch):
+ monkeypatch.setenv(environment_vars.PROJECT, "explicit-env")
+ assert _default.default() == (MOCK_CREDENTIALS, "explicit-env")
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_explict_legacy_project_id(unused_get, monkeypatch):
+ monkeypatch.setenv(environment_vars.LEGACY_PROJECT, "explicit-env")
+ assert _default.default() == (MOCK_CREDENTIALS, "explicit-env")
+
+
+@mock.patch("logging.Logger.warning", autospec=True)
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gcloud_sdk_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gae_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gce_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+def test_default_without_project_id(
+ unused_gce, unused_gae, unused_sdk, unused_explicit, logger_warning
+):
+ assert _default.default() == (MOCK_CREDENTIALS, None)
+ logger_warning.assert_called_with(mock.ANY, mock.ANY, mock.ANY)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gcloud_sdk_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gae_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gce_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+def test_default_fail(unused_gce, unused_gae, unused_sdk, unused_explicit):
+ with pytest.raises(exceptions.DefaultCredentialsError):
+ assert _default.default()
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth.credentials.with_scopes_if_required",
+ return_value=MOCK_CREDENTIALS,
+ autospec=True,
+)
+def test_default_scoped(with_scopes, unused_get):
+ scopes = ["one", "two"]
+
+ credentials, project_id = _default.default(scopes=scopes)
+
+ assert credentials == with_scopes.return_value
+ assert project_id == mock.sentinel.project_id
+ with_scopes.assert_called_once_with(MOCK_CREDENTIALS, scopes, default_scopes=None)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_quota_project(with_quota_project):
+ credentials, project_id = _default.default(quota_project_id="project-foo")
+
+ MOCK_CREDENTIALS.with_quota_project.assert_called_once_with("project-foo")
+ assert project_id == mock.sentinel.project_id
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_no_app_engine_compute_engine_module(unused_get):
+ """
+ google.auth.compute_engine and google.auth.app_engine are both optional
+ to allow not including them when using this package. This verifies
+ that default fails gracefully if these modules are absent
+ """
+ import sys
+
+ with mock.patch.dict("sys.modules"):
+ sys.modules["google.auth.compute_engine"] = None
+ sys.modules["google.auth.app_engine"] = None
+ assert _default.default() == (MOCK_CREDENTIALS, mock.sentinel.project_id)
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials(get_project_id, monkeypatch, tmpdir):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default()
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Without scopes, project ID cannot be determined.
+ assert project_id is None
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_with_user_and_default_scopes_and_quota_project_id(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"],
+ default_scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ quota_project_id="project-foo",
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert project_id is mock.sentinel.project_id
+ assert credentials.quota_project_id == "project-foo"
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+ assert credentials.default_scopes == [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_explicit_request_with_scopes(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ request=mock.sentinel.request,
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert project_id is mock.sentinel.project_id
+ # default() will initialize new credentials via with_scopes_if_required
+ # and potentially with_quota_project.
+ # As a result the caller of get_project_id() will not match the returned
+ # credentials.
+ get_project_id.assert_called_with(mock.ANY, request=mock.sentinel.request)
+
+
+def test_default_environ_external_credentials_bad_format(monkeypatch, tmpdir):
+ filename = tmpdir.join("external_account_bad.json")
+ filename.write(json.dumps({"type": "external_account"}))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(filename))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.default()
+
+ assert excinfo.match(
+ "Failed to load external account credentials from {}".format(str(filename))
+ )
diff --git a/contrib/python/google-auth/py2/tests/test__helpers.py b/contrib/python/google-auth/py2/tests/test__helpers.py
new file mode 100644
index 0000000000..0c0bad2d2f
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test__helpers.py
@@ -0,0 +1,170 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+import pytest
+from six.moves import urllib
+
+from google.auth import _helpers
+
+
+class SourceClass(object):
+ def func(self): # pragma: NO COVER
+ """example docstring"""
+
+
+def test_copy_docstring_success():
+ def func(): # pragma: NO COVER
+ pass
+
+ _helpers.copy_docstring(SourceClass)(func)
+
+ assert func.__doc__ == SourceClass.func.__doc__
+
+
+def test_copy_docstring_conflict():
+ def func(): # pragma: NO COVER
+ """existing docstring"""
+ pass
+
+ with pytest.raises(ValueError):
+ _helpers.copy_docstring(SourceClass)(func)
+
+
+def test_copy_docstring_non_existing():
+ def func2(): # pragma: NO COVER
+ pass
+
+ with pytest.raises(AttributeError):
+ _helpers.copy_docstring(SourceClass)(func2)
+
+
+def test_utcnow():
+ assert isinstance(_helpers.utcnow(), datetime.datetime)
+
+
+def test_datetime_to_secs():
+ assert _helpers.datetime_to_secs(datetime.datetime(1970, 1, 1)) == 0
+ assert _helpers.datetime_to_secs(datetime.datetime(1990, 5, 29)) == 643939200
+
+
+def test_to_bytes_with_bytes():
+ value = b"bytes-val"
+ assert _helpers.to_bytes(value) == value
+
+
+def test_to_bytes_with_unicode():
+ value = u"string-val"
+ encoded_value = b"string-val"
+ assert _helpers.to_bytes(value) == encoded_value
+
+
+def test_to_bytes_with_nonstring_type():
+ with pytest.raises(ValueError):
+ _helpers.to_bytes(object())
+
+
+def test_from_bytes_with_unicode():
+ value = u"bytes-val"
+ assert _helpers.from_bytes(value) == value
+
+
+def test_from_bytes_with_bytes():
+ value = b"string-val"
+ decoded_value = u"string-val"
+ assert _helpers.from_bytes(value) == decoded_value
+
+
+def test_from_bytes_with_nonstring_type():
+ with pytest.raises(ValueError):
+ _helpers.from_bytes(object())
+
+
+def _assert_query(url, expected):
+ parts = urllib.parse.urlsplit(url)
+ query = urllib.parse.parse_qs(parts.query)
+ assert query == expected
+
+
+def test_update_query_params_no_params():
+ uri = "http://www.google.com"
+ updated = _helpers.update_query(uri, {"a": "b"})
+ assert updated == uri + "?a=b"
+
+
+def test_update_query_existing_params():
+ uri = "http://www.google.com?x=y"
+ updated = _helpers.update_query(uri, {"a": "b", "c": "d&"})
+ _assert_query(updated, {"x": ["y"], "a": ["b"], "c": ["d&"]})
+
+
+def test_update_query_replace_param():
+ base_uri = "http://www.google.com"
+ uri = base_uri + "?x=a"
+ updated = _helpers.update_query(uri, {"x": "b", "y": "c"})
+ _assert_query(updated, {"x": ["b"], "y": ["c"]})
+
+
+def test_update_query_remove_param():
+ base_uri = "http://www.google.com"
+ uri = base_uri + "?x=a"
+ updated = _helpers.update_query(uri, {"y": "c"}, remove=["x"])
+ _assert_query(updated, {"y": ["c"]})
+
+
+def test_scopes_to_string():
+ cases = [
+ ("", ()),
+ ("", []),
+ ("", ("",)),
+ ("", [""]),
+ ("a", ("a",)),
+ ("b", ["b"]),
+ ("a b", ["a", "b"]),
+ ("a b", ("a", "b")),
+ ("a b", (s for s in ["a", "b"])),
+ ]
+ for expected, case in cases:
+ assert _helpers.scopes_to_string(case) == expected
+
+
+def test_string_to_scopes():
+ cases = [("", []), ("a", ["a"]), ("a b c d e f", ["a", "b", "c", "d", "e", "f"])]
+
+ for case, expected in cases:
+ assert _helpers.string_to_scopes(case) == expected
+
+
+def test_padded_urlsafe_b64decode():
+ cases = [
+ ("YQ==", b"a"),
+ ("YQ", b"a"),
+ ("YWE=", b"aa"),
+ ("YWE", b"aa"),
+ ("YWFhYQ==", b"aaaa"),
+ ("YWFhYQ", b"aaaa"),
+ ("YWFhYWE=", b"aaaaa"),
+ ("YWFhYWE", b"aaaaa"),
+ ]
+
+ for case, expected in cases:
+ assert _helpers.padded_urlsafe_b64decode(case) == expected
+
+
+def test_unpadded_urlsafe_b64encode():
+ cases = [(b"", b""), (b"a", b"YQ"), (b"aa", b"YWE"), (b"aaa", b"YWFh")]
+
+ for case, expected in cases:
+ assert _helpers.unpadded_urlsafe_b64encode(case) == expected
diff --git a/contrib/python/google-auth/py2/tests/test__oauth2client.py b/contrib/python/google-auth/py2/tests/test__oauth2client.py
new file mode 100644
index 0000000000..104bc56e84
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test__oauth2client.py
@@ -0,0 +1,171 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import os
+import sys
+
+import mock
+import oauth2client.client
+import oauth2client.contrib.gce
+import oauth2client.service_account
+import pytest
+from six.moves import reload_module
+
+from google.auth import _oauth2client
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+
+def test__convert_oauth2_credentials():
+ old_credentials = oauth2client.client.OAuth2Credentials(
+ "access_token",
+ "client_id",
+ "client_secret",
+ "refresh_token",
+ datetime.datetime.min,
+ "token_uri",
+ "user_agent",
+ scopes="one two",
+ )
+
+ new_credentials = _oauth2client._convert_oauth2_credentials(old_credentials)
+
+ assert new_credentials.token == old_credentials.access_token
+ assert new_credentials._refresh_token == old_credentials.refresh_token
+ assert new_credentials._client_id == old_credentials.client_id
+ assert new_credentials._client_secret == old_credentials.client_secret
+ assert new_credentials._token_uri == old_credentials.token_uri
+ assert new_credentials.scopes == old_credentials.scopes
+
+
+def test__convert_service_account_credentials():
+ old_class = oauth2client.service_account.ServiceAccountCredentials
+ old_credentials = old_class.from_json_keyfile_name(SERVICE_ACCOUNT_JSON_FILE)
+
+ new_credentials = _oauth2client._convert_service_account_credentials(
+ old_credentials
+ )
+
+ assert (
+ new_credentials.service_account_email == old_credentials.service_account_email
+ )
+ assert new_credentials._signer.key_id == old_credentials._private_key_id
+ assert new_credentials._token_uri == old_credentials.token_uri
+
+
+def test__convert_service_account_credentials_with_jwt():
+ old_class = oauth2client.service_account._JWTAccessCredentials
+ old_credentials = old_class.from_json_keyfile_name(SERVICE_ACCOUNT_JSON_FILE)
+
+ new_credentials = _oauth2client._convert_service_account_credentials(
+ old_credentials
+ )
+
+ assert (
+ new_credentials.service_account_email == old_credentials.service_account_email
+ )
+ assert new_credentials._signer.key_id == old_credentials._private_key_id
+ assert new_credentials._token_uri == old_credentials.token_uri
+
+
+def test__convert_gce_app_assertion_credentials():
+ old_credentials = oauth2client.contrib.gce.AppAssertionCredentials(
+ email="some_email"
+ )
+
+ new_credentials = _oauth2client._convert_gce_app_assertion_credentials(
+ old_credentials
+ )
+
+ assert (
+ new_credentials.service_account_email == old_credentials.service_account_email
+ )
+
+
+@pytest.fixture
+def mock_oauth2client_gae_imports(mock_non_existent_module):
+ mock_non_existent_module("google.appengine.api.app_identity")
+ mock_non_existent_module("google.appengine.ext.ndb")
+ mock_non_existent_module("google.appengine.ext.webapp.util")
+ mock_non_existent_module("webapp2")
+
+
+@mock.patch("google.auth.app_engine.app_identity")
+def _test__convert_appengine_app_assertion_credentials(
+ app_identity, mock_oauth2client_gae_imports
+):
+
+ import oauth2client.contrib.appengine
+
+ service_account_id = "service_account_id"
+ old_credentials = oauth2client.contrib.appengine.AppAssertionCredentials(
+ scope="one two", service_account_id=service_account_id
+ )
+
+ new_credentials = _oauth2client._convert_appengine_app_assertion_credentials(
+ old_credentials
+ )
+
+ assert new_credentials.scopes == ["one", "two"]
+ assert new_credentials._service_account_id == old_credentials.service_account_id
+
+
+class FakeCredentials(object):
+ pass
+
+
+def test_convert_success():
+ convert_function = mock.Mock(spec=["__call__"])
+ conversion_map_patch = mock.patch.object(
+ _oauth2client, "_CLASS_CONVERSION_MAP", {FakeCredentials: convert_function}
+ )
+ credentials = FakeCredentials()
+
+ with conversion_map_patch:
+ result = _oauth2client.convert(credentials)
+
+ convert_function.assert_called_once_with(credentials)
+ assert result == convert_function.return_value
+
+
+def test_convert_not_found():
+ with pytest.raises(ValueError) as excinfo:
+ _oauth2client.convert("a string is not a real credentials class")
+
+ assert excinfo.match("Unable to convert")
+
+
+@pytest.fixture
+def reset__oauth2client_module():
+ """Reloads the _oauth2client module after a test."""
+ reload_module(_oauth2client)
+
+
+def _test_import_has_app_engine(
+ mock_oauth2client_gae_imports, reset__oauth2client_module
+):
+ reload_module(_oauth2client)
+ assert _oauth2client._HAS_APPENGINE
+
+
+def test_import_without_oauth2client(monkeypatch, reset__oauth2client_module):
+ monkeypatch.setitem(sys.modules, "oauth2client", None)
+ with pytest.raises(ImportError) as excinfo:
+ reload_module(_oauth2client)
+
+ assert excinfo.match("oauth2client")
diff --git a/contrib/python/google-auth/py2/tests/test__service_account_info.py b/contrib/python/google-auth/py2/tests/test__service_account_info.py
new file mode 100644
index 0000000000..ccbddb36e8
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test__service_account_info.py
@@ -0,0 +1,63 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import pytest
+import six
+
+from google.auth import _service_account_info
+from google.auth import crypt
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+def test_from_dict():
+ signer = _service_account_info.from_dict(SERVICE_ACCOUNT_INFO)
+ assert isinstance(signer, crypt.RSASigner)
+ assert signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
+
+
+def test_from_dict_bad_private_key():
+ info = SERVICE_ACCOUNT_INFO.copy()
+ info["private_key"] = "garbage"
+
+ with pytest.raises(ValueError) as excinfo:
+ _service_account_info.from_dict(info)
+
+ assert excinfo.match(r"key")
+
+
+def test_from_dict_bad_format():
+ with pytest.raises(ValueError) as excinfo:
+ _service_account_info.from_dict({}, require=("meep",))
+
+ assert excinfo.match(r"missing fields")
+
+
+def test_from_filename():
+ info, signer = _service_account_info.from_filename(SERVICE_ACCOUNT_JSON_FILE)
+
+ for key, value in six.iteritems(SERVICE_ACCOUNT_INFO):
+ assert info[key] == value
+
+ assert isinstance(signer, crypt.RSASigner)
+ assert signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
diff --git a/contrib/python/google-auth/py2/tests/test_app_engine.py b/contrib/python/google-auth/py2/tests/test_app_engine.py
new file mode 100644
index 0000000000..6a788b9e9a
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_app_engine.py
@@ -0,0 +1,217 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+import mock
+import pytest
+
+from google.auth import app_engine
+
+
+class _AppIdentityModule(object):
+ """The interface of the App Idenity app engine module.
+ See https://cloud.google.com/appengine/docs/standard/python/refdocs
+ /google.appengine.api.app_identity.app_identity
+ """
+
+ def get_application_id(self):
+ raise NotImplementedError()
+
+ def sign_blob(self, bytes_to_sign, deadline=None):
+ raise NotImplementedError()
+
+ def get_service_account_name(self, deadline=None):
+ raise NotImplementedError()
+
+ def get_access_token(self, scopes, service_account_id=None):
+ raise NotImplementedError()
+
+
+@pytest.fixture
+def app_identity(monkeypatch):
+ """Mocks the app_identity module for google.auth.app_engine."""
+ app_identity_module = mock.create_autospec(_AppIdentityModule, instance=True)
+ monkeypatch.setattr(app_engine, "app_identity", app_identity_module)
+ yield app_identity_module
+
+
+def test_get_project_id(app_identity):
+ app_identity.get_application_id.return_value = mock.sentinel.project
+ assert app_engine.get_project_id() == mock.sentinel.project
+
+
+@mock.patch.object(app_engine, "app_identity", new=None)
+def test_get_project_id_missing_apis():
+ with pytest.raises(EnvironmentError) as excinfo:
+ assert app_engine.get_project_id()
+
+ assert excinfo.match(r"App Engine APIs are not available")
+
+
+class TestSigner(object):
+ def test_key_id(self, app_identity):
+ app_identity.sign_blob.return_value = (
+ mock.sentinel.key_id,
+ mock.sentinel.signature,
+ )
+
+ signer = app_engine.Signer()
+
+ assert signer.key_id is None
+
+ def test_sign(self, app_identity):
+ app_identity.sign_blob.return_value = (
+ mock.sentinel.key_id,
+ mock.sentinel.signature,
+ )
+
+ signer = app_engine.Signer()
+ to_sign = b"123"
+
+ signature = signer.sign(to_sign)
+
+ assert signature == mock.sentinel.signature
+ app_identity.sign_blob.assert_called_with(to_sign)
+
+
+class TestCredentials(object):
+ @mock.patch.object(app_engine, "app_identity", new=None)
+ def test_missing_apis(self):
+ with pytest.raises(EnvironmentError) as excinfo:
+ app_engine.Credentials()
+
+ assert excinfo.match(r"App Engine APIs are not available")
+
+ def test_default_state(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ # Not token acquired yet
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+ # Scopes are required
+ assert not credentials.scopes
+ assert not credentials.default_scopes
+ assert credentials.requires_scopes
+ assert not credentials.quota_project_id
+
+ def test_with_scopes(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(["email"])
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_default_scopes(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ assert not credentials.scopes
+ assert not credentials.default_scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(
+ scopes=None, default_scopes=["email"]
+ )
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_quota_project(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ assert not credentials.scopes
+ assert not credentials.quota_project_id
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds.quota_project_id == "project-foo"
+
+ def test_service_account_email_implicit(self, app_identity):
+ app_identity.get_service_account_name.return_value = (
+ mock.sentinel.service_account_email
+ )
+ credentials = app_engine.Credentials()
+
+ assert credentials.service_account_email == mock.sentinel.service_account_email
+ assert app_identity.get_service_account_name.called
+
+ def test_service_account_email_explicit(self, app_identity):
+ credentials = app_engine.Credentials(
+ service_account_id=mock.sentinel.service_account_email
+ )
+
+ assert credentials.service_account_email == mock.sentinel.service_account_email
+ assert not app_identity.get_service_account_name.called
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh(self, utcnow, app_identity):
+ token = "token"
+ ttl = 643942923
+ app_identity.get_access_token.return_value = token, ttl
+ credentials = app_engine.Credentials(
+ scopes=["email"], default_scopes=["profile"]
+ )
+
+ credentials.refresh(None)
+
+ app_identity.get_access_token.assert_called_with(
+ credentials.scopes, credentials._service_account_id
+ )
+ assert credentials.token == token
+ assert credentials.expiry == datetime.datetime(1990, 5, 29, 1, 2, 3)
+ assert credentials.valid
+ assert not credentials.expired
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_default_scopes(self, utcnow, app_identity):
+ token = "token"
+ ttl = 643942923
+ app_identity.get_access_token.return_value = token, ttl
+ credentials = app_engine.Credentials(default_scopes=["email"])
+
+ credentials.refresh(None)
+
+ app_identity.get_access_token.assert_called_with(
+ credentials.default_scopes, credentials._service_account_id
+ )
+ assert credentials.token == token
+ assert credentials.expiry == datetime.datetime(1990, 5, 29, 1, 2, 3)
+ assert credentials.valid
+ assert not credentials.expired
+
+ def test_sign_bytes(self, app_identity):
+ app_identity.sign_blob.return_value = (
+ mock.sentinel.key_id,
+ mock.sentinel.signature,
+ )
+ credentials = app_engine.Credentials()
+ to_sign = b"123"
+
+ signature = credentials.sign_bytes(to_sign)
+
+ assert signature == mock.sentinel.signature
+ app_identity.sign_blob.assert_called_with(to_sign)
+
+ def test_signer(self, app_identity):
+ credentials = app_engine.Credentials()
+ assert isinstance(credentials.signer, app_engine.Signer)
+
+ def test_signer_email(self, app_identity):
+ credentials = app_engine.Credentials()
+ assert credentials.signer_email == credentials.service_account_email
diff --git a/contrib/python/google-auth/py2/tests/test_aws.py b/contrib/python/google-auth/py2/tests/test_aws.py
new file mode 100644
index 0000000000..9ca08d5b2c
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_aws.py
@@ -0,0 +1,1497 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+
+import mock
+import pytest
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import aws
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password".
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ "https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
+)
+QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+SCOPES = ["scope1", "scope2"]
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+SUBJECT_TOKEN_TYPE = "urn:ietf:params:aws:token-type:aws4_request"
+AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
+REGION_URL = "http://169.254.169.254/latest/meta-data/placement/availability-zone"
+SECURITY_CREDS_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials"
+CRED_VERIFICATION_URL = (
+ "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
+)
+# Sample AWS security credentials to be used with tests that require a session token.
+ACCESS_KEY_ID = "ASIARD4OQDT6A77FR3CL"
+SECRET_ACCESS_KEY = "Y8AfSaucF37G4PpvfguKZ3/l7Id4uocLXxX0+VTx"
+TOKEN = "IQoJb3JpZ2luX2VjEIz//////////wEaCXVzLWVhc3QtMiJGMEQCIH7MHX/Oy/OB8OlLQa9GrqU1B914+iMikqWQW7vPCKlgAiA/Lsv8Jcafn14owfxXn95FURZNKaaphj0ykpmS+Ki+CSq0AwhlEAAaDDA3NzA3MTM5MTk5NiIMx9sAeP1ovlMTMKLjKpEDwuJQg41/QUKx0laTZYjPlQvjwSqS3OB9P1KAXPWSLkliVMMqaHqelvMF/WO/glv3KwuTfQsavRNs3v5pcSEm4SPO3l7mCs7KrQUHwGP0neZhIKxEXy+Ls//1C/Bqt53NL+LSbaGv6RPHaX82laz2qElphg95aVLdYgIFY6JWV5fzyjgnhz0DQmy62/Vi8pNcM2/VnxeCQ8CC8dRDSt52ry2v+nc77vstuI9xV5k8mPtnaPoJDRANh0bjwY5Sdwkbp+mGRUJBAQRlNgHUJusefXQgVKBCiyJY4w3Csd8Bgj9IyDV+Azuy1jQqfFZWgP68LSz5bURyIjlWDQunO82stZ0BgplKKAa/KJHBPCp8Qi6i99uy7qh76FQAqgVTsnDuU6fGpHDcsDSGoCls2HgZjZFPeOj8mmRhFk1Xqvkbjuz8V1cJk54d3gIJvQt8gD2D6yJQZecnuGWd5K2e2HohvCc8Fc9kBl1300nUJPV+k4tr/A5R/0QfEKOZL1/k5lf1g9CREnrM8LVkGxCgdYMxLQow1uTL+QU67AHRRSp5PhhGX4Rek+01vdYSnJCMaPhSEgcLqDlQkhk6MPsyT91QMXcWmyO+cAZwUPwnRamFepuP4K8k2KVXs/LIJHLELwAZ0ekyaS7CptgOqS7uaSTFG3U+vzFZLEnGvWQ7y9IPNQZ+Dffgh4p3vF4J68y9049sI6Sr5d5wbKkcbm8hdCDHZcv4lnqohquPirLiFQ3q7B17V9krMPu3mz1cg4Ekgcrn/E09NTsxAqD8NcZ7C7ECom9r+X3zkDOxaajW6hu3Az8hGlyylDaMiFfRbBJpTIlxp7jfa7CxikNgNtEKLH9iCzvuSg2vhA=="
+# To avoid json.dumps() differing behavior from one version to other,
+# the JSON payload is hardcoded.
+REQUEST_PARAMS = '{"KeySchema":[{"KeyType":"HASH","AttributeName":"Id"}],"TableName":"TestTable","AttributeDefinitions":[{"AttributeName":"Id","AttributeType":"S"}],"ProvisionedThroughput":{"WriteCapacityUnits":5,"ReadCapacityUnits":5}}'
+# Each tuple contains the following entries:
+# region, time, credentials, original_request, signed_request
+TEST_FIXTURES = [
+ # GET request (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with relative path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-relative-relative.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-relative-relative.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/foo/bar/../..",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/foo/bar/../..",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with /./ path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-dot-slash.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-dot-slash.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/./",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/./",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with pointless dot path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-pointless-dot.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-pointless-dot.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/./foo",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/./foo",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with utf8 path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-utf8.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-utf8.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/%E1%88%B4",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/%E1%88%B4",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with duplicate query key (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-key-case.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-key-case.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/?foo=Zoo&foo=aha",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?foo=Zoo&foo=aha",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with duplicate out of order query key (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-value.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-value.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/?foo=b&foo=a",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?foo=b&foo=a",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with utf8 query (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-ut8-query.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-ut8-query.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/?{}=bar".format(
+ urllib.parse.unquote("%E1%88%B4")
+ ),
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?{}=bar".format(
+ urllib.parse.unquote("%E1%88%B4")
+ ),
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # POST request with sorted headers (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-key-sort.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-key-sort.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT", "ZOO": "zoobar"},
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ "ZOO": "zoobar",
+ },
+ },
+ ),
+ # POST request with upper case header value from AWS Python test harness.
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-value-case.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-value-case.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT", "zoo": "ZOOBAR"},
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ "zoo": "ZOOBAR",
+ },
+ },
+ ),
+ # POST request with header and no body (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT", "p": "phfft"},
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ "p": "phfft",
+ },
+ },
+ ),
+ # POST request with body and no header (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-x-www-form-urlencoded.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-x-www-form-urlencoded.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ "data": "foo=bar",
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
+ "host": "host.foo.com",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ "data": "foo=bar",
+ },
+ ),
+ # POST request with querystring (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-vanilla-query.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-vanilla-query.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/?foo=bar",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?foo=bar",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with session token credentials.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ },
+ {
+ "method": "GET",
+ "url": "https://ec2.us-east-2.amazonaws.com?Action=DescribeRegions&Version=2013-10-15",
+ },
+ {
+ "url": "https://ec2.us-east-2.amazonaws.com?Action=DescribeRegions&Version=2013-10-15",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/ec2/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=631ea80cddfaa545fdadb120dc92c9f18166e38a5c47b50fab9fce476e022855",
+ "host": "ec2.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ "x-amz-security-token": TOKEN,
+ },
+ },
+ ),
+ # POST request with session token credentials.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ },
+ {
+ "method": "POST",
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ },
+ {
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/sts/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=73452984e4a880ffdc5c392355733ec3f5ba310d5e0609a89244440cadfe7a7a",
+ "host": "sts.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ "x-amz-security-token": TOKEN,
+ },
+ },
+ ),
+ # POST request with computed x-amz-date and no data.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {"access_key_id": ACCESS_KEY_ID, "secret_access_key": SECRET_ACCESS_KEY},
+ {
+ "method": "POST",
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ },
+ {
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/sts/aws4_request, SignedHeaders=host;x-amz-date, Signature=d095ba304919cd0d5570ba8a3787884ee78b860f268ed040ba23831d55536d56",
+ "host": "sts.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ },
+ },
+ ),
+ # POST request with session token and additional headers/data.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ },
+ {
+ "method": "POST",
+ "url": "https://dynamodb.us-east-2.amazonaws.com/",
+ "headers": {
+ "Content-Type": "application/x-amz-json-1.0",
+ "x-amz-target": "DynamoDB_20120810.CreateTable",
+ },
+ "data": REQUEST_PARAMS,
+ },
+ {
+ "url": "https://dynamodb.us-east-2.amazonaws.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/dynamodb/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-security-token;x-amz-target, Signature=fdaa5b9cc9c86b80fe61eaf504141c0b3523780349120f2bd8145448456e0385",
+ "host": "dynamodb.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ "Content-Type": "application/x-amz-json-1.0",
+ "x-amz-target": "DynamoDB_20120810.CreateTable",
+ "x-amz-security-token": TOKEN,
+ },
+ "data": REQUEST_PARAMS,
+ },
+ ),
+]
+
+
+class TestRequestSigner(object):
+ @pytest.mark.parametrize(
+ "region, time, credentials, original_request, signed_request", TEST_FIXTURES
+ )
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_get_request_options(
+ self, utcnow, region, time, credentials, original_request, signed_request
+ ):
+ utcnow.return_value = datetime.datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ")
+ request_signer = aws.RequestSigner(region)
+ actual_signed_request = request_signer.get_request_options(
+ credentials,
+ original_request.get("url"),
+ original_request.get("method"),
+ original_request.get("data"),
+ original_request.get("headers"),
+ )
+
+ assert actual_signed_request == signed_request
+
+ def test_get_request_options_with_missing_scheme_url(self):
+ request_signer = aws.RequestSigner("us-east-2")
+
+ with pytest.raises(ValueError) as excinfo:
+ request_signer.get_request_options(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ },
+ "invalid",
+ "POST",
+ )
+
+ assert excinfo.match(r"Invalid AWS service URL")
+
+ def test_get_request_options_with_invalid_scheme_url(self):
+ request_signer = aws.RequestSigner("us-east-2")
+
+ with pytest.raises(ValueError) as excinfo:
+ request_signer.get_request_options(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ },
+ "http://invalid",
+ "POST",
+ )
+
+ assert excinfo.match(r"Invalid AWS service URL")
+
+ def test_get_request_options_with_missing_hostname_url(self):
+ request_signer = aws.RequestSigner("us-east-2")
+
+ with pytest.raises(ValueError) as excinfo:
+ request_signer.get_request_options(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ },
+ "https://",
+ "POST",
+ )
+
+ assert excinfo.match(r"Invalid AWS service URL")
+
+
+class TestCredentials(object):
+ AWS_REGION = "us-east-2"
+ AWS_ROLE = "gcp-aws-role"
+ AWS_SECURITY_CREDENTIALS_RESPONSE = {
+ "AccessKeyId": ACCESS_KEY_ID,
+ "SecretAccessKey": SECRET_ACCESS_KEY,
+ "Token": TOKEN,
+ }
+ AWS_SIGNATURE_TIME = "2020-08-11T06:55:22Z"
+ CREDENTIAL_SOURCE = {
+ "environment_id": "aws1",
+ "region_url": REGION_URL,
+ "url": SECURITY_CREDS_URL,
+ "regional_cred_verification_url": CRED_VERIFICATION_URL,
+ }
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": " ".join(SCOPES),
+ }
+
+ @classmethod
+ def make_serialized_aws_signed_request(
+ cls,
+ aws_security_credentials,
+ region_name="us-east-2",
+ url="https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ ):
+ """Utility to generate serialize AWS signed requests.
+ This makes it easy to assert generated subject tokens based on the
+ provided AWS security credentials, regions and AWS STS endpoint.
+ """
+ request_signer = aws.RequestSigner(region_name)
+ signed_request = request_signer.get_request_options(
+ aws_security_credentials, url, "POST"
+ )
+ reformatted_signed_request = {
+ "url": signed_request.get("url"),
+ "method": signed_request.get("method"),
+ "headers": [
+ {
+ "key": "Authorization",
+ "value": signed_request.get("headers").get("Authorization"),
+ },
+ {"key": "host", "value": signed_request.get("headers").get("host")},
+ {
+ "key": "x-amz-date",
+ "value": signed_request.get("headers").get("x-amz-date"),
+ },
+ ],
+ }
+ # Include security token if available.
+ if "security_token" in aws_security_credentials:
+ reformatted_signed_request.get("headers").append(
+ {
+ "key": "x-amz-security-token",
+ "value": signed_request.get("headers").get("x-amz-security-token"),
+ }
+ )
+ # Append x-goog-cloud-target-resource header.
+ reformatted_signed_request.get("headers").append(
+ {"key": "x-goog-cloud-target-resource", "value": AUDIENCE}
+ ),
+ return urllib.parse.quote(
+ json.dumps(
+ reformatted_signed_request, separators=(",", ":"), sort_keys=True
+ )
+ )
+
+ @classmethod
+ def make_mock_request(
+ cls,
+ region_status=None,
+ region_name=None,
+ role_status=None,
+ role_name=None,
+ security_credentials_status=None,
+ security_credentials_data=None,
+ token_status=None,
+ token_data=None,
+ impersonation_status=None,
+ impersonation_data=None,
+ ):
+ """Utility function to generate a mock HTTP request object.
+ This will facilitate testing various edge cases by specify how the
+ various endpoints will respond while generating a Google Access token
+ in an AWS environment.
+ """
+ responses = []
+ if region_status:
+ # AWS region request.
+ region_response = mock.create_autospec(transport.Response, instance=True)
+ region_response.status = region_status
+ if region_name:
+ region_response.data = "{}b".format(region_name).encode("utf-8")
+ responses.append(region_response)
+
+ if role_status:
+ # AWS role name request.
+ role_response = mock.create_autospec(transport.Response, instance=True)
+ role_response.status = role_status
+ if role_name:
+ role_response.data = role_name.encode("utf-8")
+ responses.append(role_response)
+
+ if security_credentials_status:
+ # AWS security credentials request.
+ security_credentials_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ security_credentials_response.status = security_credentials_status
+ if security_credentials_data:
+ security_credentials_response.data = json.dumps(
+ security_credentials_data
+ ).encode("utf-8")
+ responses.append(security_credentials_response)
+
+ if token_status:
+ # GCP token exchange request.
+ token_response = mock.create_autospec(transport.Response, instance=True)
+ token_response.status = token_status
+ token_response.data = json.dumps(token_data).encode("utf-8")
+ responses.append(token_response)
+
+ if impersonation_status:
+ # Service account impersonation request.
+ impersonation_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ impersonation_response.status = impersonation_status
+ impersonation_response.data = json.dumps(impersonation_data).encode("utf-8")
+ responses.append(impersonation_response)
+
+ request = mock.create_autospec(transport.Request)
+ request.side_effect = responses
+
+ return request
+
+ @classmethod
+ def make_credentials(
+ cls,
+ credential_source,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ ):
+ return aws.Credentials(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=service_account_impersonation_url,
+ credential_source=credential_source,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+
+ @classmethod
+ def assert_aws_metadata_request_kwargs(cls, request_kwargs, url, headers=None):
+ assert request_kwargs["url"] == url
+ # All used AWS metadata server endpoints use GET HTTP method.
+ assert request_kwargs["method"] == "GET"
+ if headers:
+ assert request_kwargs["headers"] == headers
+ else:
+ assert "headers" not in request_kwargs
+ # None of the endpoints used require any data in request.
+ assert "body" not in request_kwargs
+
+ @classmethod
+ def assert_token_request_kwargs(
+ cls, request_kwargs, headers, request_data, token_url=TOKEN_URL
+ ):
+ assert request_kwargs["url"] == token_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ assert len(body_tuples) == len(request_data.keys())
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+
+ @classmethod
+ def assert_impersonation_request_kwargs(
+ cls,
+ request_kwargs,
+ headers,
+ request_data,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ ):
+ assert request_kwargs["url"] == service_account_impersonation_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_json = json.loads(request_kwargs["body"].decode("utf-8"))
+ assert body_json == request_data
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_info_full_options(self, mock_init):
+ credentials = aws.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ )
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_info_required_options_only(self, mock_init):
+ credentials = aws.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ )
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=None,
+ )
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_file_full_options(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = aws.Credentials.from_file(str(config_file))
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_file_required_options_only(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = aws.Credentials.from_file(str(config_file))
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=None,
+ )
+
+ def test_constructor_invalid_credential_source(self):
+ # Provide invalid credential source.
+ credential_source = {"unsupported": "value"}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"No valid AWS 'credential_source' provided")
+
+ def test_constructor_invalid_environment_id(self):
+ # Provide invalid environment_id.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source["environment_id"] = "azure1"
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"No valid AWS 'credential_source' provided")
+
+ def test_constructor_missing_cred_verification_url(self):
+ # regional_cred_verification_url is a required field.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source.pop("regional_cred_verification_url")
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"No valid AWS 'credential_source' provided")
+
+ def test_constructor_invalid_environment_id_version(self):
+ # Provide an unsupported version.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source["environment_id"] = "aws3"
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"aws version '3' is not supported in the current build.")
+
+ def test_info(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE.copy()
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+
+ def test_retrieve_subject_token_missing_region_url(self):
+ # When AWS_REGION envvar is not available, region_url is required for
+ # determining the current AWS region.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source.pop("region_url")
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Unable to determine AWS region")
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_temp_creds_no_environment_vars(
+ self, utcnow
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ # Assert region request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1], REGION_URL
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[1][1], SECURITY_CREDS_URL
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[2][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {"Content-Type": "application/json"},
+ )
+
+ # Retrieve subject_token again. Region should not be queried again.
+ new_request = self.make_mock_request(
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ )
+
+ credentials.retrieve_subject_token(new_request)
+
+ # Only 2 requests should be sent as the region is cached.
+ assert len(new_request.call_args_list) == 2
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ new_request.call_args_list[0][1], SECURITY_CREDS_URL
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ new_request.call_args_list[1][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {"Content-Type": "application/json"},
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_permanent_creds_no_environment_vars(
+ self, utcnow
+ ):
+ # Simualte a permanent credential without a session token is
+ # returned by the security-credentials endpoint.
+ security_creds_response = self.AWS_SECURITY_CREDENTIALS_RESPONSE.copy()
+ security_creds_response.pop("Token")
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=security_creds_response,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {"access_key_id": ACCESS_KEY_ID, "secret_access_key": SECRET_ACCESS_KEY}
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars(self, utcnow, monkeypatch):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ monkeypatch.setenv(environment_vars.AWS_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_with_default_region(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ monkeypatch.setenv(environment_vars.AWS_DEFAULT_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_with_both_regions_set(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ monkeypatch.setenv(environment_vars.AWS_DEFAULT_REGION, "Malformed AWS Region")
+ # This test makes sure that the AWS_REGION gets used over AWS_DEFAULT_REGION,
+ # So, AWS_DEFAULT_REGION is set to something that would cause the test to fail,
+ # And AWS_REGION is set to the a valid value, and it should succeed
+ monkeypatch.setenv(environment_vars.AWS_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_no_session_token(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {"access_key_id": ACCESS_KEY_ID, "secret_access_key": SECRET_ACCESS_KEY}
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_except_region(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ # Region will be queried since it is not found in envvars.
+ request = self.make_mock_request(
+ region_status=http_client.OK, region_name=self.AWS_REGION
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ def test_retrieve_subject_token_error_determining_aws_region(self):
+ # Simulate error in retrieving the AWS region.
+ request = self.make_mock_request(region_status=http_client.BAD_REQUEST)
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS region")
+
+ def test_retrieve_subject_token_error_determining_aws_role(self):
+ # Simulate error in retrieving the AWS role name.
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.BAD_REQUEST,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS role name")
+
+ def test_retrieve_subject_token_error_determining_security_creds_url(self):
+ # Simulate the security-credentials url is missing. This is needed for
+ # determining the AWS security credentials when not found in envvars.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source.pop("url")
+ request = self.make_mock_request(
+ region_status=http_client.OK, region_name=self.AWS_REGION
+ )
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(
+ r"Unable to determine the AWS metadata server security credentials endpoint"
+ )
+
+ def test_retrieve_subject_token_error_determining_aws_security_creds(self):
+ # Simulate error in retrieving the AWS security credentials.
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.BAD_REQUEST,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS security credentials")
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_without_impersonation_ignore_default_scopes(self, utcnow):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": " ".join(SCOPES),
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 4
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes == SCOPES
+ assert credentials.default_scopes == ["ignored"]
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_without_impersonation_use_default_scopes(self, utcnow):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": " ".join(SCOPES),
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 4
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes is None
+ assert credentials.default_scopes == SCOPES
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_with_impersonation_ignore_default_scopes(self, utcnow):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "https://www.googleapis.com/auth/iam",
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": QUOTA_PROJECT_ID,
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": SCOPES,
+ "lifetime": "3600s",
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 5
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ # Fifth request should be sent to iamcredentials endpoint for service
+ # account impersonation.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[4][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.token == impersonation_response["accessToken"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes == SCOPES
+ assert credentials.default_scopes == ["ignored"]
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_with_impersonation_use_default_scopes(self, utcnow):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "https://www.googleapis.com/auth/iam",
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": QUOTA_PROJECT_ID,
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": SCOPES,
+ "lifetime": "3600s",
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 5
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ # Fifth request should be sent to iamcredentials endpoint for service
+ # account impersonation.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[4][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.token == impersonation_response["accessToken"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes is None
+ assert credentials.default_scopes == SCOPES
+
+ def test_refresh_with_retrieve_subject_token_error(self):
+ request = self.make_mock_request(region_status=http_client.BAD_REQUEST)
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS region")
diff --git a/contrib/python/google-auth/py2/tests/test_credentials.py b/contrib/python/google-auth/py2/tests/test_credentials.py
new file mode 100644
index 0000000000..0633b38c07
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_credentials.py
@@ -0,0 +1,177 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+import pytest
+
+from google.auth import _helpers
+from google.auth import credentials
+
+
+class CredentialsImpl(credentials.Credentials):
+ def refresh(self, request):
+ self.token = request
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+def test_credentials_constructor():
+ credentials = CredentialsImpl()
+ assert not credentials.token
+ assert not credentials.expiry
+ assert not credentials.expired
+ assert not credentials.valid
+
+
+def test_expired_and_valid():
+ credentials = CredentialsImpl()
+ credentials.token = "token"
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ # Set the expiration to one second more than now plus the clock skew
+ # accomodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.utcnow() + _helpers.CLOCK_SKEW + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ # Set the credentials expiration to now. Because of the clock skew
+ # accomodation, these credentials should report as expired.
+ credentials.expiry = datetime.datetime.utcnow()
+
+ assert not credentials.valid
+ assert credentials.expired
+
+
+def test_before_request():
+ credentials = CredentialsImpl()
+ request = "token"
+ headers = {}
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "http://example.com", "GET", headers)
+ assert credentials.valid
+ assert credentials.token == "token"
+ assert headers["authorization"] == "Bearer token"
+
+ request = "token2"
+ headers = {}
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "http://example.com", "GET", headers)
+ assert credentials.valid
+ assert credentials.token == "token"
+ assert headers["authorization"] == "Bearer token"
+
+
+def test_anonymous_credentials_ctor():
+ anon = credentials.AnonymousCredentials()
+ assert anon.token is None
+ assert anon.expiry is None
+ assert not anon.expired
+ assert anon.valid
+
+
+def test_anonymous_credentials_refresh():
+ anon = credentials.AnonymousCredentials()
+ request = object()
+ with pytest.raises(ValueError):
+ anon.refresh(request)
+
+
+def test_anonymous_credentials_apply_default():
+ anon = credentials.AnonymousCredentials()
+ headers = {}
+ anon.apply(headers)
+ assert headers == {}
+ with pytest.raises(ValueError):
+ anon.apply(headers, token="TOKEN")
+
+
+def test_anonymous_credentials_before_request():
+ anon = credentials.AnonymousCredentials()
+ request = object()
+ method = "GET"
+ url = "https://example.com/api/endpoint"
+ headers = {}
+ anon.before_request(request, method, url, headers)
+ assert headers == {}
+
+
+class ReadOnlyScopedCredentialsImpl(credentials.ReadOnlyScoped, CredentialsImpl):
+ @property
+ def requires_scopes(self):
+ return super(ReadOnlyScopedCredentialsImpl, self).requires_scopes
+
+
+def test_readonly_scoped_credentials_constructor():
+ credentials = ReadOnlyScopedCredentialsImpl()
+ assert credentials._scopes is None
+
+
+def test_readonly_scoped_credentials_scopes():
+ credentials = ReadOnlyScopedCredentialsImpl()
+ credentials._scopes = ["one", "two"]
+ assert credentials.scopes == ["one", "two"]
+ assert credentials.has_scopes(["one"])
+ assert credentials.has_scopes(["two"])
+ assert credentials.has_scopes(["one", "two"])
+ assert not credentials.has_scopes(["three"])
+
+
+def test_readonly_scoped_credentials_requires_scopes():
+ credentials = ReadOnlyScopedCredentialsImpl()
+ assert not credentials.requires_scopes
+
+
+class RequiresScopedCredentialsImpl(credentials.Scoped, CredentialsImpl):
+ def __init__(self, scopes=None, default_scopes=None):
+ super(RequiresScopedCredentialsImpl, self).__init__()
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+
+ @property
+ def requires_scopes(self):
+ return not self.scopes
+
+ def with_scopes(self, scopes, default_scopes=None):
+ return RequiresScopedCredentialsImpl(
+ scopes=scopes, default_scopes=default_scopes
+ )
+
+
+def test_create_scoped_if_required_scoped():
+ unscoped_credentials = RequiresScopedCredentialsImpl()
+ scoped_credentials = credentials.with_scopes_if_required(
+ unscoped_credentials, ["one", "two"]
+ )
+
+ assert scoped_credentials is not unscoped_credentials
+ assert not scoped_credentials.requires_scopes
+ assert scoped_credentials.has_scopes(["one", "two"])
+
+
+def test_create_scoped_if_required_not_scopes():
+ unscoped_credentials = CredentialsImpl()
+ scoped_credentials = credentials.with_scopes_if_required(
+ unscoped_credentials, ["one", "two"]
+ )
+
+ assert scoped_credentials is unscoped_credentials
diff --git a/contrib/python/google-auth/py2/tests/test_downscoped.py b/contrib/python/google-auth/py2/tests/test_downscoped.py
new file mode 100644
index 0000000000..795ec2942e
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_downscoped.py
@@ -0,0 +1,694 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+
+import mock
+import pytest
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import downscoped
+from google.auth import exceptions
+from google.auth import transport
+
+
+EXPRESSION = (
+ "resource.name.startsWith('projects/_/buckets/example-bucket/objects/customer-a')"
+)
+TITLE = "customer-a-objects"
+DESCRIPTION = (
+ "Condition to make permissions available for objects starting with customer-a"
+)
+AVAILABLE_RESOURCE = "//storage.googleapis.com/projects/_/buckets/example-bucket"
+AVAILABLE_PERMISSIONS = ["inRole:roles/storage.objectViewer"]
+
+OTHER_EXPRESSION = (
+ "resource.name.startsWith('projects/_/buckets/example-bucket/objects/customer-b')"
+)
+OTHER_TITLE = "customer-b-objects"
+OTHER_DESCRIPTION = (
+ "Condition to make permissions available for objects starting with customer-b"
+)
+OTHER_AVAILABLE_RESOURCE = "//storage.googleapis.com/projects/_/buckets/other-bucket"
+OTHER_AVAILABLE_PERMISSIONS = ["inRole:roles/storage.objectCreator"]
+QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+TOKEN_EXCHANGE_ENDPOINT = "https://sts.googleapis.com/v1/token"
+SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+}
+ERROR_RESPONSE = {
+ "error": "invalid_grant",
+ "error_description": "Subject token is invalid.",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+}
+CREDENTIAL_ACCESS_BOUNDARY_JSON = {
+ "accessBoundary": {
+ "accessBoundaryRules": [
+ {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ "availabilityCondition": {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ },
+ }
+ ]
+ }
+}
+
+
+class SourceCredentials(credentials.Credentials):
+ def __init__(self, raise_error=False, expires_in=3600):
+ super(SourceCredentials, self).__init__()
+ self._counter = 0
+ self._raise_error = raise_error
+ self._expires_in = expires_in
+
+ def refresh(self, request):
+ if self._raise_error:
+ raise exceptions.RefreshError(
+ "Failed to refresh access token in source credentials."
+ )
+ now = _helpers.utcnow()
+ self._counter += 1
+ self.token = "ACCESS_TOKEN_{}".format(self._counter)
+ self.expiry = now + datetime.timedelta(seconds=self._expires_in)
+
+
+def make_availability_condition(expression, title=None, description=None):
+ return downscoped.AvailabilityCondition(expression, title, description)
+
+
+def make_access_boundary_rule(
+ available_resource, available_permissions, availability_condition=None
+):
+ return downscoped.AccessBoundaryRule(
+ available_resource, available_permissions, availability_condition
+ )
+
+
+def make_credential_access_boundary(rules):
+ return downscoped.CredentialAccessBoundary(rules)
+
+
+class TestAvailabilityCondition(object):
+ def test_constructor(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+
+ assert availability_condition.expression == EXPRESSION
+ assert availability_condition.title == TITLE
+ assert availability_condition.description == DESCRIPTION
+
+ def test_constructor_required_params_only(self):
+ availability_condition = make_availability_condition(EXPRESSION)
+
+ assert availability_condition.expression == EXPRESSION
+ assert availability_condition.title is None
+ assert availability_condition.description is None
+
+ def test_setters(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ availability_condition.expression = OTHER_EXPRESSION
+ availability_condition.title = OTHER_TITLE
+ availability_condition.description = OTHER_DESCRIPTION
+
+ assert availability_condition.expression == OTHER_EXPRESSION
+ assert availability_condition.title == OTHER_TITLE
+ assert availability_condition.description == OTHER_DESCRIPTION
+
+ def test_invalid_expression_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_availability_condition([EXPRESSION], TITLE, DESCRIPTION)
+
+ assert excinfo.match("The provided expression is not a string.")
+
+ def test_invalid_title_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_availability_condition(EXPRESSION, False, DESCRIPTION)
+
+ assert excinfo.match("The provided title is not a string or None.")
+
+ def test_invalid_description_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_availability_condition(EXPRESSION, TITLE, False)
+
+ assert excinfo.match("The provided description is not a string or None.")
+
+ def test_to_json_required_params_only(self):
+ availability_condition = make_availability_condition(EXPRESSION)
+
+ assert availability_condition.to_json() == {"expression": EXPRESSION}
+
+ def test_to_json_(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+
+ assert availability_condition.to_json() == {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ }
+
+
+class TestAccessBoundaryRule(object):
+ def test_constructor(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+
+ assert access_boundary_rule.available_resource == AVAILABLE_RESOURCE
+ assert access_boundary_rule.available_permissions == tuple(
+ AVAILABLE_PERMISSIONS
+ )
+ assert access_boundary_rule.availability_condition == availability_condition
+
+ def test_constructor_required_params_only(self):
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS
+ )
+
+ assert access_boundary_rule.available_resource == AVAILABLE_RESOURCE
+ assert access_boundary_rule.available_permissions == tuple(
+ AVAILABLE_PERMISSIONS
+ )
+ assert access_boundary_rule.availability_condition is None
+
+ def test_setters(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ other_availability_condition = make_availability_condition(
+ OTHER_EXPRESSION, OTHER_TITLE, OTHER_DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ access_boundary_rule.available_resource = OTHER_AVAILABLE_RESOURCE
+ access_boundary_rule.available_permissions = OTHER_AVAILABLE_PERMISSIONS
+ access_boundary_rule.availability_condition = other_availability_condition
+
+ assert access_boundary_rule.available_resource == OTHER_AVAILABLE_RESOURCE
+ assert access_boundary_rule.available_permissions == tuple(
+ OTHER_AVAILABLE_PERMISSIONS
+ )
+ assert (
+ access_boundary_rule.availability_condition == other_availability_condition
+ )
+
+ def test_invalid_available_resource_type(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ with pytest.raises(TypeError) as excinfo:
+ make_access_boundary_rule(
+ None, AVAILABLE_PERMISSIONS, availability_condition
+ )
+
+ assert excinfo.match("The provided available_resource is not a string.")
+
+ def test_invalid_available_permissions_type(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ with pytest.raises(TypeError) as excinfo:
+ make_access_boundary_rule(
+ AVAILABLE_RESOURCE, [0, 1, 2], availability_condition
+ )
+
+ assert excinfo.match(
+ "Provided available_permissions are not a list of strings."
+ )
+
+ def test_invalid_available_permissions_value(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ with pytest.raises(ValueError) as excinfo:
+ make_access_boundary_rule(
+ AVAILABLE_RESOURCE,
+ ["roles/storage.objectViewer"],
+ availability_condition,
+ )
+
+ assert excinfo.match("available_permissions must be prefixed with 'inRole:'.")
+
+ def test_invalid_availability_condition_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, {"foo": "bar"}
+ )
+
+ assert excinfo.match(
+ "The provided availability_condition is not a 'google.auth.downscoped.AvailabilityCondition' or None."
+ )
+
+ def test_to_json(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+
+ assert access_boundary_rule.to_json() == {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ "availabilityCondition": {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ },
+ }
+
+ def test_to_json_required_params_only(self):
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS
+ )
+
+ assert access_boundary_rule.to_json() == {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ }
+
+
+class TestCredentialAccessBoundary(object):
+ def test_constructor(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ assert credential_access_boundary.rules == tuple(rules)
+
+ def test_setters(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ other_availability_condition = make_availability_condition(
+ OTHER_EXPRESSION, OTHER_TITLE, OTHER_DESCRIPTION
+ )
+ other_access_boundary_rule = make_access_boundary_rule(
+ OTHER_AVAILABLE_RESOURCE,
+ OTHER_AVAILABLE_PERMISSIONS,
+ other_availability_condition,
+ )
+ other_rules = [other_access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+ credential_access_boundary.rules = other_rules
+
+ assert credential_access_boundary.rules == tuple(other_rules)
+
+ def test_add_rule(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule] * 9
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ # Add one more rule. This should not raise an error.
+ additional_access_boundary_rule = make_access_boundary_rule(
+ OTHER_AVAILABLE_RESOURCE, OTHER_AVAILABLE_PERMISSIONS
+ )
+ credential_access_boundary.add_rule(additional_access_boundary_rule)
+
+ assert len(credential_access_boundary.rules) == 10
+ assert credential_access_boundary.rules[9] == additional_access_boundary_rule
+
+ def test_add_rule_invalid_value(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule] * 10
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ # Add one more rule to exceed maximum allowed rules.
+ with pytest.raises(ValueError) as excinfo:
+ credential_access_boundary.add_rule(access_boundary_rule)
+
+ assert excinfo.match(
+ "Credential access boundary rules can have a maximum of 10 rules."
+ )
+ assert len(credential_access_boundary.rules) == 10
+
+ def test_add_rule_invalid_type(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ # Add an invalid rule to exceed maximum allowed rules.
+ with pytest.raises(TypeError) as excinfo:
+ credential_access_boundary.add_rule("invalid")
+
+ assert excinfo.match(
+ "The provided rule does not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+ assert len(credential_access_boundary.rules) == 1
+ assert credential_access_boundary.rules[0] == access_boundary_rule
+
+ def test_invalid_rules_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_credential_access_boundary(["invalid"])
+
+ assert excinfo.match(
+ "List of rules provided do not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+
+ def test_invalid_rules_value(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ too_many_rules = [access_boundary_rule] * 11
+ with pytest.raises(ValueError) as excinfo:
+ make_credential_access_boundary(too_many_rules)
+
+ assert excinfo.match(
+ "Credential access boundary rules can have a maximum of 10 rules."
+ )
+
+ def test_to_json(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ assert credential_access_boundary.to_json() == {
+ "accessBoundary": {
+ "accessBoundaryRules": [
+ {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ "availabilityCondition": {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ },
+ }
+ ]
+ }
+ }
+
+
+class TestCredentials(object):
+ @staticmethod
+ def make_credentials(source_credentials=SourceCredentials(), quota_project_id=None):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ return downscoped.Credentials(
+ source_credentials, credential_access_boundary, quota_project_id
+ )
+
+ @staticmethod
+ def make_mock_request(data, status=http_client.OK):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+
+ return request
+
+ @staticmethod
+ def assert_request_kwargs(request_kwargs, headers, request_data):
+ """Asserts the request was called with the expected parameters.
+ """
+ assert request_kwargs["url"] == TOKEN_EXCHANGE_ENDPOINT
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+ assert len(body_tuples) == len(request_data.keys())
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+
+ # No token acquired yet.
+ assert not credentials.token
+ assert not credentials.valid
+ # Expiration hasn't been set yet.
+ assert not credentials.expiry
+ assert not credentials.expired
+ # No quota project ID set.
+ assert not credentials.quota_project_id
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.quota_project_id
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds.quota_project_id == "project-foo"
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh(self, unused_utcnow):
+ response = SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = 2800
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=response["expires_in"]
+ )
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": GRANT_TYPE,
+ "subject_token": "ACCESS_TOKEN_1",
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "requested_token_type": REQUESTED_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(CREDENTIAL_ACCESS_BOUNDARY_JSON)),
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ source_credentials = SourceCredentials()
+ credentials = self.make_credentials(source_credentials=source_credentials)
+
+ # Spy on calls to source credentials refresh to confirm the expected request
+ # instance is used.
+ with mock.patch.object(
+ source_credentials, "refresh", wraps=source_credentials.refresh
+ ) as wrapped_souce_cred_refresh:
+ credentials.refresh(request)
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+ # Confirm source credentials called with the same request instance.
+ wrapped_souce_cred_refresh.assert_called_with(request)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_without_response_expires_in(self, unused_utcnow):
+ response = SUCCESS_RESPONSE.copy()
+ # Simulate the response is missing the expires_in field.
+ # The downscoped token expiration should match the source credentials
+ # expiration.
+ del response["expires_in"]
+ expected_expires_in = 1800
+ # Simulate the source credentials generates a token with 1800 second
+ # expiration time. The generated downscoped token should have the same
+ # expiration time.
+ source_credentials = SourceCredentials(expires_in=expected_expires_in)
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=expected_expires_in
+ )
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": GRANT_TYPE,
+ "subject_token": "ACCESS_TOKEN_1",
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "requested_token_type": REQUESTED_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(CREDENTIAL_ACCESS_BOUNDARY_JSON)),
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ credentials = self.make_credentials(source_credentials=source_credentials)
+
+ # Spy on calls to source credentials refresh to confirm the expected request
+ # instance is used.
+ with mock.patch.object(
+ source_credentials, "refresh", wraps=source_credentials.refresh
+ ) as wrapped_souce_cred_refresh:
+ credentials.refresh(request)
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+ # Confirm source credentials called with the same request instance.
+ wrapped_souce_cred_refresh.assert_called_with(request)
+
+ def test_refresh_token_exchange_error(self):
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=ERROR_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(
+ r"Error code invalid_grant: Subject token is invalid. - https://tools.ietf.org/html/rfc6749"
+ )
+ assert not credentials.expired
+ assert credentials.token is None
+
+ def test_refresh_source_credentials_refresh_error(self):
+ # Initialize downscoped credentials with source credentials that raise
+ # an error on refresh.
+ credentials = self.make_credentials(
+ source_credentials=SourceCredentials(raise_error=True)
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(mock.sentinel.request)
+
+ assert excinfo.match(r"Failed to refresh access token in source credentials.")
+ assert not credentials.expired
+ assert credentials.token is None
+
+ def test_apply_without_quota_project_id(self):
+ headers = {}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials()
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"])
+ }
+
+ def test_apply_with_quota_project_id(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials(quota_project_id=QUOTA_PROJECT_ID)
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": QUOTA_PROJECT_ID,
+ }
+
+ def test_before_request(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials()
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"]),
+ }
+
+ # Second call shouldn't call refresh (request should be untouched).
+ credentials.before_request(
+ mock.sentinel.request, "POST", "https://example.com/api", headers
+ )
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"]),
+ }
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_before_request_expired(self, utcnow):
+ headers = {}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials()
+ credentials.token = "token"
+ utcnow.return_value = datetime.datetime.min
+ # Set the expiration to one second more than now plus the clock skew
+ # accommodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.min + _helpers.CLOCK_SKEW + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # Cached token should be used.
+ assert headers == {"authorization": "Bearer token"}
+
+ # Next call should simulate 1 second passed.
+ utcnow.return_value = datetime.datetime.min + datetime.timedelta(seconds=1)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # New token should be retrieved.
+ assert headers == {
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"])
+ }
diff --git a/contrib/python/google-auth/py2/tests/test_external_account.py b/contrib/python/google-auth/py2/tests/test_external_account.py
new file mode 100644
index 0000000000..7390fb980f
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_external_account.py
@@ -0,0 +1,1203 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+
+import mock
+import pytest
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import external_account
+from google.auth import transport
+
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password"
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+# List of valid workforce pool audiences.
+TEST_USER_AUDIENCES = [
+ "//iam.googleapis.com/locations/global/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/eu/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/eu/workforcePools/workloadIdentityPools/providers/provider-id",
+]
+
+
+class CredentialsImpl(external_account.Credentials):
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ ):
+ super(CredentialsImpl, self).__init__(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ credential_source=credential_source,
+ service_account_impersonation_url=service_account_impersonation_url,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+ self._counter = 0
+
+ def retrieve_subject_token(self, request):
+ counter = self._counter
+ self._counter += 1
+ return "subject_token_{}".format(counter)
+
+
+class TestCredentials(object):
+ TOKEN_URL = "https://sts.googleapis.com/v1/token"
+ PROJECT_NUMBER = "123456"
+ POOL_ID = "POOL_ID"
+ PROVIDER_ID = "PROVIDER_ID"
+ AUDIENCE = (
+ "//iam.googleapis.com/projects/{}"
+ "/locations/global/workloadIdentityPools/{}"
+ "/providers/{}"
+ ).format(PROJECT_NUMBER, POOL_ID, PROVIDER_ID)
+ SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+ CREDENTIAL_SOURCE = {"file": "/var/run/secrets/goog.id/token"}
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": "scope1 scope2",
+ }
+ ERROR_RESPONSE = {
+ "error": "invalid_request",
+ "error_description": "Invalid subject token",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+ }
+ QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+ SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ "https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
+ )
+ SCOPES = ["scope1", "scope2"]
+ IMPERSONATION_ERROR_RESPONSE = {
+ "error": {
+ "code": 400,
+ "message": "Request contains an invalid argument",
+ "status": "INVALID_ARGUMENT",
+ }
+ }
+ PROJECT_ID = "my-proj-id"
+ CLOUD_RESOURCE_MANAGER_URL = (
+ "https://cloudresourcemanager.googleapis.com/v1/projects/"
+ )
+ CLOUD_RESOURCE_MANAGER_SUCCESS_RESPONSE = {
+ "projectNumber": PROJECT_NUMBER,
+ "projectId": PROJECT_ID,
+ "lifecycleState": "ACTIVE",
+ "name": "project-name",
+ "createTime": "2018-11-06T04:42:54.109Z",
+ "parent": {"type": "folder", "id": "12345678901"},
+ }
+
+ @classmethod
+ def make_credentials(
+ cls,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ ):
+ return CredentialsImpl(
+ audience=cls.AUDIENCE,
+ subject_token_type=cls.SUBJECT_TOKEN_TYPE,
+ token_url=cls.TOKEN_URL,
+ service_account_impersonation_url=service_account_impersonation_url,
+ credential_source=cls.CREDENTIAL_SOURCE,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+
+ @classmethod
+ def make_mock_request(
+ cls,
+ status=http_client.OK,
+ data=None,
+ impersonation_status=None,
+ impersonation_data=None,
+ cloud_resource_manager_status=None,
+ cloud_resource_manager_data=None,
+ ):
+ # STS token exchange request.
+ token_response = mock.create_autospec(transport.Response, instance=True)
+ token_response.status = status
+ token_response.data = json.dumps(data).encode("utf-8")
+ responses = [token_response]
+
+ # If service account impersonation is requested, mock the expected response.
+ if impersonation_status:
+ impersonation_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ impersonation_response.status = impersonation_status
+ impersonation_response.data = json.dumps(impersonation_data).encode("utf-8")
+ responses.append(impersonation_response)
+
+ # If cloud resource manager is requested, mock the expected response.
+ if cloud_resource_manager_status:
+ cloud_resource_manager_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ cloud_resource_manager_response.status = cloud_resource_manager_status
+ cloud_resource_manager_response.data = json.dumps(
+ cloud_resource_manager_data
+ ).encode("utf-8")
+ responses.append(cloud_resource_manager_response)
+
+ request = mock.create_autospec(transport.Request)
+ request.side_effect = responses
+
+ return request
+
+ @classmethod
+ def assert_token_request_kwargs(cls, request_kwargs, headers, request_data):
+ assert request_kwargs["url"] == cls.TOKEN_URL
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+ assert len(body_tuples) == len(request_data.keys())
+
+ @classmethod
+ def assert_impersonation_request_kwargs(cls, request_kwargs, headers, request_data):
+ assert request_kwargs["url"] == cls.SERVICE_ACCOUNT_IMPERSONATION_URL
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_json = json.loads(request_kwargs["body"].decode("utf-8"))
+ assert body_json == request_data
+
+ @classmethod
+ def assert_resource_manager_request_kwargs(
+ cls, request_kwargs, project_number, headers
+ ):
+ assert request_kwargs["url"] == cls.CLOUD_RESOURCE_MANAGER_URL + project_number
+ assert request_kwargs["method"] == "GET"
+ assert request_kwargs["headers"] == headers
+ assert "body" not in request_kwargs
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+
+ # Not token acquired yet
+ assert not credentials.token
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expiry
+ assert not credentials.expired
+ # Scopes are required
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+ assert not credentials.quota_project_id
+
+ def test_with_scopes(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(["email"])
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_scopes_using_user_and_default_scopes(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(
+ ["email"], default_scopes=["profile"]
+ )
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.has_scopes(["profile"])
+ assert not scoped_credentials.requires_scopes
+ assert scoped_credentials.scopes == ["email"]
+ assert scoped_credentials.default_scopes == ["profile"]
+
+ def test_with_scopes_using_default_scopes_only(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(None, default_scopes=["profile"])
+
+ assert scoped_credentials.has_scopes(["profile"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_scopes_full_options_propagated(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ scopes=self.SCOPES,
+ default_scopes=["default1"],
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ )
+
+ with mock.patch.object(
+ external_account.Credentials, "__init__", return_value=None
+ ) as mock_init:
+ credentials.with_scopes(["email"], ["default2"])
+
+ # Confirm with_scopes initialized the credential with the expected
+ # parameters and scopes.
+ mock_init.assert_called_once_with(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ scopes=["email"],
+ default_scopes=["default2"],
+ )
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert not credentials.quota_project_id
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds.quota_project_id == "project-foo"
+
+ def test_with_quota_project_full_options_propagated(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ scopes=self.SCOPES,
+ default_scopes=["default1"],
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ )
+
+ with mock.patch.object(
+ external_account.Credentials, "__init__", return_value=None
+ ) as mock_init:
+ credentials.with_quota_project("project-foo")
+
+ # Confirm with_quota_project initialized the credential with the
+ # expected parameters and quota project ID.
+ mock_init.assert_called_once_with(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id="project-foo",
+ scopes=self.SCOPES,
+ default_scopes=["default1"],
+ )
+
+ def test_with_invalid_impersonation_target_principal(self):
+ invalid_url = "https://iamcredentials.googleapis.com/v1/invalid"
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ self.make_credentials(service_account_impersonation_url=invalid_url)
+
+ assert excinfo.match(
+ r"Unable to determine target principal from service account impersonation URL."
+ )
+
+ def test_info(self):
+ credentials = self.make_credentials()
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": self.AUDIENCE,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "token_url": self.TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE.copy(),
+ }
+
+ def test_info_with_full_options(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": self.AUDIENCE,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "token_url": self.TOKEN_URL,
+ "service_account_impersonation_url": self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "credential_source": self.CREDENTIAL_SOURCE.copy(),
+ "quota_project_id": self.QUOTA_PROJECT_ID,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ }
+
+ def test_service_account_email_without_impersonation(self):
+ credentials = self.make_credentials()
+
+ assert credentials.service_account_email is None
+
+ def test_service_account_email_with_impersonation(self):
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL
+ )
+
+ assert credentials.service_account_email == SERVICE_ACCOUNT_EMAIL
+
+ @pytest.mark.parametrize(
+ "audience",
+ # Workload identity pool audiences or invalid workforce pool audiences.
+ [
+ # Legacy K8s audience format.
+ "identitynamespace:1f12345:my_provider",
+ (
+ "//iam.googleapis.com/projects/123456/locations/"
+ "global/workloadIdentityPools/pool-id/providers/"
+ "provider-id"
+ ),
+ (
+ "//iam.googleapis.com/projects/123456/locations/"
+ "eu/workloadIdentityPools/pool-id/providers/"
+ "provider-id"
+ ),
+ # Pool ID with workforcePools string.
+ (
+ "//iam.googleapis.com/projects/123456/locations/"
+ "global/workloadIdentityPools/workforcePools/providers/"
+ "provider-id"
+ ),
+ # Unrealistic / incorrect workforce pool audiences.
+ "//iamgoogleapis.com/locations/eu/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapiscom/locations/eu/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/eu/workforcePool/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations//workforcePool/pool-id/providers/provider-id",
+ ],
+ )
+ def test_is_user_with_non_users(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.is_user is False
+
+ @pytest.mark.parametrize("audience", TEST_USER_AUDIENCES)
+ def test_is_user_with_users(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.is_user is True
+
+ @pytest.mark.parametrize("audience", TEST_USER_AUDIENCES)
+ def test_is_user_with_users_and_impersonation(self, audience):
+ # Initialize the credentials with service account impersonation.
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ )
+
+ # Even though the audience is for a workforce pool, since service account
+ # impersonation is used, the credentials will represent a service account and
+ # not a user.
+ assert credentials.is_user is False
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_without_client_auth_success(self, unused_utcnow):
+ response = self.SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = 2800
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=response["expires_in"]
+ )
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ credentials = self.make_credentials()
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+
+ def test_refresh_impersonation_without_client_auth_success(self):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ def test_refresh_without_client_auth_success_explicit_user_scopes_ignore_default_scopes(
+ self,
+ ):
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "scope1 scope2",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(
+ scopes=["scope1", "scope2"],
+ # Default scopes will be ignored in favor of user scopes.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert not credentials.expired
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.has_scopes(["scope1", "scope2"])
+ assert not credentials.has_scopes(["ignored"])
+
+ def test_refresh_without_client_auth_success_explicit_default_scopes_only(self):
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "scope1 scope2",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(
+ scopes=None,
+ # Default scopes will be used since user scopes are none.
+ default_scopes=["scope1", "scope2"],
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert not credentials.expired
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.has_scopes(["scope1", "scope2"])
+
+ def test_refresh_without_client_auth_error(self):
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+ assert not credentials.expired
+ assert credentials.token is None
+
+ def test_refresh_impersonation_without_client_auth_error(self):
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE,
+ impersonation_status=http_client.BAD_REQUEST,
+ impersonation_data=self.IMPERSONATION_ERROR_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(r"Unable to acquire impersonated credentials")
+ assert not credentials.expired
+ assert credentials.token is None
+
+ def test_refresh_with_client_auth_success(self):
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID, client_secret=CLIENT_SECRET
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert not credentials.expired
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+
+ def test_refresh_impersonation_with_client_auth_success_ignore_default_scopes(self):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ # Default scopes will be ignored since user scopes are specified.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ def test_refresh_impersonation_with_client_auth_success_use_default_scopes(self):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=None,
+ # Default scopes will be used since user specified scopes are none.
+ default_scopes=self.SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ def test_apply_without_quota_project_id(self):
+ headers = {}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"])
+ }
+
+ def test_apply_impersonation_without_quota_project_id(self):
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ )
+ headers = {}
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"])
+ }
+
+ def test_apply_with_quota_project_id(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(quota_project_id=self.QUOTA_PROJECT_ID)
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ }
+
+ def test_apply_impersonation_with_quota_project_id(self):
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ )
+ headers = {"other": "header-value"}
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ }
+
+ def test_before_request(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ }
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ }
+
+ def test_before_request_impersonation(self):
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ headers = {"other": "header-value"}
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL
+ )
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ }
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ }
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_before_request_expired(self, utcnow):
+ headers = {}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials()
+ credentials.token = "token"
+ utcnow.return_value = datetime.datetime.min
+ # Set the expiration to one second more than now plus the clock skew
+ # accomodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.min + _helpers.CLOCK_SKEW + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # Cached token should be used.
+ assert headers == {"authorization": "Bearer token"}
+
+ # Next call should simulate 1 second passed.
+ utcnow.return_value = datetime.datetime.min + datetime.timedelta(seconds=1)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # New token should be retrieved.
+ assert headers == {
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"])
+ }
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_before_request_impersonation_expired(self, utcnow):
+ headers = {}
+ expire_time = (
+ datetime.datetime.min + datetime.timedelta(seconds=3601)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL
+ )
+ credentials.token = "token"
+ utcnow.return_value = datetime.datetime.min
+ # Set the expiration to one second more than now plus the clock skew
+ # accomodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.min + _helpers.CLOCK_SKEW + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # Cached token should be used.
+ assert headers == {"authorization": "Bearer token"}
+
+ # Next call should simulate 1 second passed. This will trigger the expiration
+ # threshold.
+ utcnow.return_value = datetime.datetime.min + datetime.timedelta(seconds=1)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # New token should be retrieved.
+ assert headers == {
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"])
+ }
+
+ @pytest.mark.parametrize(
+ "audience",
+ [
+ # Legacy K8s audience format.
+ "identitynamespace:1f12345:my_provider",
+ # Unrealistic audiences.
+ "//iam.googleapis.com/projects",
+ "//iam.googleapis.com/projects/",
+ "//iam.googleapis.com/project/123456",
+ "//iam.googleapis.com/projects//123456",
+ "//iam.googleapis.com/prefix_projects/123456",
+ "//iam.googleapis.com/projects_suffix/123456",
+ ],
+ )
+ def test_project_number_indeterminable(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.project_number is None
+ assert credentials.get_project_id(None) is None
+
+ def test_project_number_determinable(self):
+ credentials = CredentialsImpl(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.project_number == self.PROJECT_NUMBER
+
+ def test_project_id_without_scopes(self):
+ # Initialize credentials with no scopes.
+ credentials = CredentialsImpl(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.get_project_id(None) is None
+
+ def test_get_project_id_cloud_resource_manager_success(self):
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange, service account
+ # impersonation and cloud resource manager request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ cloud_resource_manager_status=http_client.OK,
+ cloud_resource_manager_data=self.CLOUD_RESOURCE_MANAGER_SUCCESS_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ )
+
+ # Expected project ID from cloud resource manager response should be returned.
+ project_id = credentials.get_project_id(request)
+
+ assert project_id == self.PROJECT_ID
+ # 3 requests should be processed.
+ assert len(request.call_args_list) == 3
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ # In the process of getting project ID, an access token should be
+ # retrieved.
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+ # Verify cloud resource manager request parameters.
+ self.assert_resource_manager_request_kwargs(
+ request.call_args_list[2][1],
+ self.PROJECT_NUMBER,
+ {
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ "authorization": "Bearer {}".format(
+ impersonation_response["accessToken"]
+ ),
+ },
+ )
+
+ # Calling get_project_id again should return the cached project_id.
+ project_id = credentials.get_project_id(request)
+
+ assert project_id == self.PROJECT_ID
+ # No additional requests.
+ assert len(request.call_args_list) == 3
+
+ def test_get_project_id_cloud_resource_manager_error(self):
+ # Simulate resource doesn't have sufficient permissions to access
+ # cloud resource manager.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ cloud_resource_manager_status=http_client.UNAUTHORIZED,
+ )
+ credentials = self.make_credentials(scopes=self.SCOPES)
+
+ project_id = credentials.get_project_id(request)
+
+ assert project_id is None
+ # Only 2 requests to STS and cloud resource manager should be sent.
+ assert len(request.call_args_list) == 2
diff --git a/contrib/python/google-auth/py2/tests/test_iam.py b/contrib/python/google-auth/py2/tests/test_iam.py
new file mode 100644
index 0000000000..382713b9b1
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_iam.py
@@ -0,0 +1,102 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import datetime
+import json
+
+import mock
+import pytest
+from six.moves import http_client
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import iam
+from google.auth import transport
+import google.auth.credentials
+
+
+def make_request(status, data=None):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+
+ if data is not None:
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+ return request
+
+
+def make_credentials():
+ class CredentialsImpl(google.auth.credentials.Credentials):
+ def __init__(self):
+ super(CredentialsImpl, self).__init__()
+ self.token = "token"
+ # Force refresh
+ self.expiry = datetime.datetime.min + _helpers.CLOCK_SKEW
+
+ def refresh(self, request):
+ pass
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+ return CredentialsImpl()
+
+
+class TestSigner(object):
+ def test_constructor(self):
+ request = mock.sentinel.request
+ credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ signer = iam.Signer(request, credentials, mock.sentinel.service_account_email)
+
+ assert signer._request == mock.sentinel.request
+ assert signer._credentials == credentials
+ assert signer._service_account_email == mock.sentinel.service_account_email
+
+ def test_key_id(self):
+ signer = iam.Signer(
+ mock.sentinel.request,
+ mock.sentinel.credentials,
+ mock.sentinel.service_account_email,
+ )
+
+ assert signer.key_id is None
+
+ def test_sign_bytes(self):
+ signature = b"DEADBEEF"
+ encoded_signature = base64.b64encode(signature).decode("utf-8")
+ request = make_request(http_client.OK, data={"signedBlob": encoded_signature})
+ credentials = make_credentials()
+
+ signer = iam.Signer(request, credentials, mock.sentinel.service_account_email)
+
+ returned_signature = signer.sign("123")
+
+ assert returned_signature == signature
+ kwargs = request.call_args[1]
+ assert kwargs["headers"]["Content-Type"] == "application/json"
+
+ def test_sign_bytes_failure(self):
+ request = make_request(http_client.UNAUTHORIZED)
+ credentials = make_credentials()
+
+ signer = iam.Signer(request, credentials, mock.sentinel.service_account_email)
+
+ with pytest.raises(exceptions.TransportError):
+ signer.sign("123")
diff --git a/contrib/python/google-auth/py2/tests/test_identity_pool.py b/contrib/python/google-auth/py2/tests/test_identity_pool.py
new file mode 100644
index 0000000000..7696917d65
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_identity_pool.py
@@ -0,0 +1,900 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+
+import mock
+import pytest
+from six.moves import http_client
+from six.moves import urllib
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import identity_pool
+from google.auth import transport
+
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password".
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ "https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
+)
+QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+SCOPES = ["scope1", "scope2"]
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+SUBJECT_TOKEN_TEXT_FILE = os.path.join(DATA_DIR, "external_subject_token.txt")
+SUBJECT_TOKEN_JSON_FILE = os.path.join(DATA_DIR, "external_subject_token.json")
+SUBJECT_TOKEN_FIELD_NAME = "access_token"
+
+with open(SUBJECT_TOKEN_TEXT_FILE) as fh:
+ TEXT_FILE_SUBJECT_TOKEN = fh.read()
+
+with open(SUBJECT_TOKEN_JSON_FILE) as fh:
+ JSON_FILE_CONTENT = json.load(fh)
+ JSON_FILE_SUBJECT_TOKEN = JSON_FILE_CONTENT.get(SUBJECT_TOKEN_FIELD_NAME)
+
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
+
+
+class TestCredentials(object):
+ CREDENTIAL_SOURCE_TEXT = {"file": SUBJECT_TOKEN_TEXT_FILE}
+ CREDENTIAL_SOURCE_JSON = {
+ "file": SUBJECT_TOKEN_JSON_FILE,
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ }
+ CREDENTIAL_URL = "http://fakeurl.com"
+ CREDENTIAL_SOURCE_TEXT_URL = {"url": CREDENTIAL_URL}
+ CREDENTIAL_SOURCE_JSON_URL = {
+ "url": CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ }
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": " ".join(SCOPES),
+ }
+
+ @classmethod
+ def make_mock_response(cls, status, data):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ if isinstance(data, dict):
+ response.data = json.dumps(data).encode("utf-8")
+ else:
+ response.data = data
+ return response
+
+ @classmethod
+ def make_mock_request(
+ cls, token_status=http_client.OK, token_data=None, *extra_requests
+ ):
+ responses = []
+ responses.append(cls.make_mock_response(token_status, token_data))
+
+ while len(extra_requests) > 0:
+ # If service account impersonation is requested, mock the expected response.
+ status, data, extra_requests = (
+ extra_requests[0],
+ extra_requests[1],
+ extra_requests[2:],
+ )
+ responses.append(cls.make_mock_response(status, data))
+
+ request = mock.create_autospec(transport.Request)
+ request.side_effect = responses
+
+ return request
+
+ @classmethod
+ def assert_credential_request_kwargs(
+ cls, request_kwargs, headers, url=CREDENTIAL_URL
+ ):
+ assert request_kwargs["url"] == url
+ assert request_kwargs["method"] == "GET"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs.get("body", None) is None
+
+ @classmethod
+ def assert_token_request_kwargs(
+ cls, request_kwargs, headers, request_data, token_url=TOKEN_URL
+ ):
+ assert request_kwargs["url"] == token_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ assert len(body_tuples) == len(request_data.keys())
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+
+ @classmethod
+ def assert_impersonation_request_kwargs(
+ cls,
+ request_kwargs,
+ headers,
+ request_data,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ ):
+ assert request_kwargs["url"] == service_account_impersonation_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_json = json.loads(request_kwargs["body"].decode("utf-8"))
+ assert body_json == request_data
+
+ @classmethod
+ def assert_underlying_credentials_refresh(
+ cls,
+ credentials,
+ audience,
+ subject_token,
+ subject_token_type,
+ token_url,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=None,
+ credential_data=None,
+ scopes=None,
+ default_scopes=None,
+ ):
+ """Utility to assert that a credentials are initialized with the expected
+ attributes by calling refresh functionality and confirming response matches
+ expected one and that the underlying requests were populated with the
+ expected parameters.
+ """
+ # STS token exchange request/response.
+ token_response = cls.SUCCESS_RESPONSE.copy()
+ token_headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ if basic_auth_encoding:
+ token_headers["Authorization"] = "Basic " + basic_auth_encoding
+
+ if service_account_impersonation_url:
+ token_scopes = "https://www.googleapis.com/auth/iam"
+ else:
+ token_scopes = " ".join(used_scopes or [])
+
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": audience,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": token_scopes,
+ "subject_token": subject_token,
+ "subject_token_type": subject_token_type,
+ }
+
+ if service_account_impersonation_url:
+ # Service account impersonation request/response.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0)
+ + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": used_scopes,
+ "lifetime": "3600s",
+ }
+
+ # Initialize mock request to handle token retrieval, token exchange and
+ # service account impersonation request.
+ requests = []
+ if credential_data:
+ requests.append((http_client.OK, credential_data))
+
+ token_request_index = len(requests)
+ requests.append((http_client.OK, token_response))
+
+ if service_account_impersonation_url:
+ impersonation_request_index = len(requests)
+ requests.append((http_client.OK, impersonation_response))
+
+ request = cls.make_mock_request(*[el for req in requests for el in req])
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == len(requests)
+ if credential_data:
+ cls.assert_credential_request_kwargs(request.call_args_list[0][1], None)
+ # Verify token exchange request parameters.
+ cls.assert_token_request_kwargs(
+ request.call_args_list[token_request_index][1],
+ token_headers,
+ token_request_data,
+ token_url,
+ )
+ # Verify service account impersonation request parameters if the request
+ # is processed.
+ if service_account_impersonation_url:
+ cls.assert_impersonation_request_kwargs(
+ request.call_args_list[impersonation_request_index][1],
+ impersonation_headers,
+ impersonation_request_data,
+ service_account_impersonation_url,
+ )
+ assert credentials.token == impersonation_response["accessToken"]
+ else:
+ assert credentials.token == token_response["access_token"]
+ assert credentials.quota_project_id == quota_project_id
+ assert credentials.scopes == scopes
+ assert credentials.default_scopes == default_scopes
+
+ @classmethod
+ def make_credentials(
+ cls,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ credential_source=None,
+ ):
+ return identity_pool.Credentials(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=service_account_impersonation_url,
+ credential_source=credential_source,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_info_full_options(self, mock_init):
+ credentials = identity_pool.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ )
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_info_required_options_only(self, mock_init):
+ credentials = identity_pool.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ )
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=None,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_file_full_options(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = identity_pool.Credentials.from_file(str(config_file))
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_file_required_options_only(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = identity_pool.Credentials.from_file(str(config_file))
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=None,
+ )
+
+ def test_constructor_invalid_options(self):
+ credential_source = {"unsupported": "value"}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"Missing credential_source")
+
+ def test_constructor_invalid_options_url_and_file(self):
+ credential_source = {
+ "url": self.CREDENTIAL_URL,
+ "file": SUBJECT_TOKEN_TEXT_FILE,
+ }
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"Ambiguous credential_source")
+
+ def test_constructor_invalid_options_environment_id(self):
+ credential_source = {"url": self.CREDENTIAL_URL, "environment_id": "aws1"}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(
+ r"Invalid Identity Pool credential_source field 'environment_id'"
+ )
+
+ def test_constructor_invalid_credential_source(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source="non-dict")
+
+ assert excinfo.match(r"Missing credential_source")
+
+ def test_constructor_invalid_credential_source_format_type(self):
+ credential_source = {"format": {"type": "xml"}}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"Invalid credential_source format 'xml'")
+
+ def test_constructor_missing_subject_token_field_name(self):
+ credential_source = {"format": {"type": "json"}}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(
+ r"Missing subject_token_field_name for JSON credential_source format"
+ )
+
+ def test_info_with_file_credential_source(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL.copy()
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT_URL,
+ }
+
+ def test_info_with_url_credential_source(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL.copy()
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_JSON_URL,
+ }
+
+ def test_retrieve_subject_token_missing_subject_token(self, tmpdir):
+ # Provide empty text file.
+ empty_file = tmpdir.join("empty.txt")
+ empty_file.write("")
+ credential_source = {"file": str(empty_file)}
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Missing subject_token in the credential_source file")
+
+ def test_retrieve_subject_token_text_file(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT
+ )
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == TEXT_FILE_SUBJECT_TOKEN
+
+ def test_retrieve_subject_token_json_file(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON
+ )
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == JSON_FILE_SUBJECT_TOKEN
+
+ def test_retrieve_subject_token_json_file_invalid_field_name(self):
+ credential_source = {
+ "file": SUBJECT_TOKEN_JSON_FILE,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ SUBJECT_TOKEN_JSON_FILE, "not_found"
+ )
+ )
+
+ def test_retrieve_subject_token_invalid_json(self, tmpdir):
+ # Provide JSON file. This should result in JSON parsing error.
+ invalid_json_file = tmpdir.join("invalid.json")
+ invalid_json_file.write("{")
+ credential_source = {
+ "file": str(invalid_json_file),
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ str(invalid_json_file), "access_token"
+ )
+ )
+
+ def test_retrieve_subject_token_file_not_found(self):
+ credential_source = {"file": "./not_found.txt"}
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"File './not_found.txt' was not found")
+
+ def test_refresh_text_file_success_without_impersonation_ignore_default_scopes(
+ self,
+ ):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=["ignored"],
+ )
+
+ def test_refresh_text_file_success_without_impersonation_use_default_scopes(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=None,
+ default_scopes=SCOPES,
+ )
+
+ def test_refresh_text_file_success_with_impersonation_ignore_default_scopes(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=["ignored"],
+ )
+
+ def test_refresh_text_file_success_with_impersonation_use_default_scopes(self):
+ # Initialize credentials with service account impersonation, basic auth
+ # and default scopes (no user scopes).
+ credentials = self.make_credentials(
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=None,
+ default_scopes=SCOPES,
+ )
+
+ def test_refresh_json_file_success_without_impersonation(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ )
+
+ def test_refresh_json_file_success_with_impersonation(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ )
+
+ def test_refresh_with_retrieve_subject_token_error(self):
+ credential_source = {
+ "file": SUBJECT_TOKEN_JSON_FILE,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(None)
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ SUBJECT_TOKEN_JSON_FILE, "not_found"
+ )
+ )
+
+ def test_retrieve_subject_token_from_url(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL
+ )
+ request = self.make_mock_request(token_data=TEXT_FILE_SUBJECT_TOKEN)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == TEXT_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(request.call_args_list[0][1], None)
+
+ def test_retrieve_subject_token_from_url_with_headers(self):
+ credentials = self.make_credentials(
+ credential_source={"url": self.CREDENTIAL_URL, "headers": {"foo": "bar"}}
+ )
+ request = self.make_mock_request(token_data=TEXT_FILE_SUBJECT_TOKEN)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == TEXT_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(
+ request.call_args_list[0][1], {"foo": "bar"}
+ )
+
+ def test_retrieve_subject_token_from_url_json(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL
+ )
+ request = self.make_mock_request(token_data=JSON_FILE_CONTENT)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == JSON_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(request.call_args_list[0][1], None)
+
+ def test_retrieve_subject_token_from_url_json_with_headers(self):
+ credentials = self.make_credentials(
+ credential_source={
+ "url": self.CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ "headers": {"foo": "bar"},
+ }
+ )
+ request = self.make_mock_request(token_data=JSON_FILE_CONTENT)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == JSON_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(
+ request.call_args_list[0][1], {"foo": "bar"}
+ )
+
+ def test_retrieve_subject_token_from_url_not_found(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL
+ )
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(
+ self.make_mock_request(token_status=404, token_data=JSON_FILE_CONTENT)
+ )
+
+ assert excinfo.match("Unable to retrieve Identity Pool subject token")
+
+ def test_retrieve_subject_token_from_url_json_invalid_field(self):
+ credential_source = {
+ "url": self.CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(
+ self.make_mock_request(token_data=JSON_FILE_CONTENT)
+ )
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ self.CREDENTIAL_URL, "not_found"
+ )
+ )
+
+ def test_retrieve_subject_token_from_url_json_invalid_format(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(self.make_mock_request(token_data="{"))
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ self.CREDENTIAL_URL, "access_token"
+ )
+ )
+
+ def test_refresh_text_file_success_without_impersonation_url(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=TEXT_FILE_SUBJECT_TOKEN,
+ )
+
+ def test_refresh_text_file_success_with_impersonation_url(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=TEXT_FILE_SUBJECT_TOKEN,
+ )
+
+ def test_refresh_json_file_success_without_impersonation_url(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=JSON_FILE_CONTENT,
+ )
+
+ def test_refresh_json_file_success_with_impersonation_url(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=JSON_FILE_CONTENT,
+ )
+
+ def test_refresh_with_retrieve_subject_token_error_url(self):
+ credential_source = {
+ "url": self.CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(self.make_mock_request(token_data=JSON_FILE_CONTENT))
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ self.CREDENTIAL_URL, "not_found"
+ )
+ )
diff --git a/contrib/python/google-auth/py2/tests/test_impersonated_credentials.py b/contrib/python/google-auth/py2/tests/test_impersonated_credentials.py
new file mode 100644
index 0000000000..75cc68e836
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_impersonated_credentials.py
@@ -0,0 +1,541 @@
+# Copyright 2018 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+
+import mock
+import pytest
+from six.moves import http_client
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+from google.auth import impersonated_credentials
+from google.auth import transport
+from google.auth.impersonated_credentials import Credentials
+from google.oauth2 import credentials
+from google.oauth2 import service_account
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+ID_TOKEN_DATA = (
+ "eyJhbGciOiJSUzI1NiIsImtpZCI6ImRmMzc1ODkwOGI3OTIyOTNhZDk3N2Ew"
+ "Yjk5MWQ5OGE3N2Y0ZWVlY2QiLCJ0eXAiOiJKV1QifQ.eyJhdWQiOiJodHRwc"
+ "zovL2Zvby5iYXIiLCJhenAiOiIxMDIxMDE1NTA4MzQyMDA3MDg1NjgiLCJle"
+ "HAiOjE1NjQ0NzUwNTEsImlhdCI6MTU2NDQ3MTQ1MSwiaXNzIjoiaHR0cHM6L"
+ "y9hY2NvdW50cy5nb29nbGUuY29tIiwic3ViIjoiMTAyMTAxNTUwODM0MjAwN"
+ "zA4NTY4In0.redacted"
+)
+ID_TOKEN_EXPIRY = 1564475051
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+SIGNER = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+TOKEN_URI = "https://example.com/oauth2/token"
+
+
+@pytest.fixture
+def mock_donor_credentials():
+ with mock.patch("google.oauth2._client.jwt_grant", autospec=True) as grant:
+ grant.return_value = (
+ "source token",
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ {},
+ )
+ yield grant
+
+
+class MockResponse:
+ def __init__(self, json_data, status_code):
+ self.json_data = json_data
+ self.status_code = status_code
+
+ def json(self):
+ return self.json_data
+
+
+@pytest.fixture
+def mock_authorizedsession_sign():
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.request", autospec=True
+ ) as auth_session:
+ data = {"keyId": "1", "signedBlob": "c2lnbmF0dXJl"}
+ auth_session.return_value = MockResponse(data, http_client.OK)
+ yield auth_session
+
+
+@pytest.fixture
+def mock_authorizedsession_idtoken():
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.request", autospec=True
+ ) as auth_session:
+ data = {"token": ID_TOKEN_DATA}
+ auth_session.return_value = MockResponse(data, http_client.OK)
+ yield auth_session
+
+
+class TestImpersonatedCredentials(object):
+
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ TARGET_PRINCIPAL = "impersonated@project.iam.gserviceaccount.com"
+ TARGET_SCOPES = ["https://www.googleapis.com/auth/devstorage.read_only"]
+ DELEGATES = []
+ LIFETIME = 3600
+ SOURCE_CREDENTIALS = service_account.Credentials(
+ SIGNER, SERVICE_ACCOUNT_EMAIL, TOKEN_URI
+ )
+ USER_SOURCE_CREDENTIALS = credentials.Credentials(token="ABCDE")
+ IAM_ENDPOINT_OVERRIDE = (
+ "https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
+ )
+
+ def make_credentials(
+ self,
+ source_credentials=SOURCE_CREDENTIALS,
+ lifetime=LIFETIME,
+ target_principal=TARGET_PRINCIPAL,
+ iam_endpoint_override=None,
+ ):
+
+ return Credentials(
+ source_credentials=source_credentials,
+ target_principal=target_principal,
+ target_scopes=self.TARGET_SCOPES,
+ delegates=self.DELEGATES,
+ lifetime=lifetime,
+ iam_endpoint_override=iam_endpoint_override,
+ )
+
+ def test_make_from_user_credentials(self):
+ credentials = self.make_credentials(
+ source_credentials=self.USER_SOURCE_CREDENTIALS
+ )
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ assert credentials.expired
+
+ def make_request(
+ self,
+ data,
+ status=http_client.OK,
+ headers=None,
+ side_effect=None,
+ use_data_bytes=True,
+ ):
+ response = mock.create_autospec(transport.Response, instance=False)
+ response.status = status
+ response.data = _helpers.to_bytes(data) if use_data_bytes else data
+ response.headers = headers or {}
+
+ request = mock.create_autospec(transport.Request, instance=False)
+ request.side_effect = side_effect
+ request.return_value = response
+
+ return request
+
+ @pytest.mark.parametrize("use_data_bytes", [True, False])
+ def test_refresh_success(self, use_data_bytes, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body),
+ status=http_client.OK,
+ use_data_bytes=use_data_bytes,
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ @pytest.mark.parametrize("use_data_bytes", [True, False])
+ def test_refresh_success_iam_endpoint_override(
+ self, use_data_bytes, mock_donor_credentials
+ ):
+ credentials = self.make_credentials(
+ lifetime=None, iam_endpoint_override=self.IAM_ENDPOINT_OVERRIDE
+ )
+ token = "token"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body),
+ status=http_client.OK,
+ use_data_bytes=use_data_bytes,
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+ # Confirm override endpoint used.
+ request_kwargs = request.call_args[1]
+ assert request_kwargs["url"] == self.IAM_ENDPOINT_OVERRIDE
+
+ @pytest.mark.parametrize("time_skew", [100, -100])
+ def test_refresh_source_credentials(self, time_skew):
+ credentials = self.make_credentials(lifetime=None)
+
+ # Source credentials is refreshed only if it is expired within
+ # _helpers.CLOCK_SKEW from now. We add a time_skew to the expiry, so
+ # source credentials is refreshed only if time_skew <= 0.
+ credentials._source_credentials.expiry = (
+ _helpers.utcnow()
+ + _helpers.CLOCK_SKEW
+ + datetime.timedelta(seconds=time_skew)
+ )
+ credentials._source_credentials.token = "Token"
+
+ with mock.patch(
+ "google.oauth2.service_account.Credentials.refresh", autospec=True
+ ) as source_cred_refresh:
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0)
+ + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": "token", "expireTime": expire_time}
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ # Source credentials is refreshed only if it is expired within
+ # _helpers.CLOCK_SKEW
+ if time_skew > 0:
+ source_cred_refresh.assert_not_called()
+ else:
+ source_cred_refresh.assert_called_once()
+
+ def test_refresh_failure_malformed_expire_time(self, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+
+ expire_time = (_helpers.utcnow() + datetime.timedelta(seconds=500)).isoformat(
+ "T"
+ )
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(impersonated_credentials._REFRESH_ERROR)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_refresh_failure_unauthorzed(self, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+
+ response_body = {
+ "error": {
+ "code": 403,
+ "message": "The caller does not have permission",
+ "status": "PERMISSION_DENIED",
+ }
+ }
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.UNAUTHORIZED
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(impersonated_credentials._REFRESH_ERROR)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_refresh_failure_http_error(self, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+
+ response_body = {}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.HTTPException
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(impersonated_credentials._REFRESH_ERROR)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_expired(self):
+ credentials = self.make_credentials(lifetime=None)
+ assert credentials.expired
+
+ def test_signer(self):
+ credentials = self.make_credentials()
+ assert isinstance(credentials.signer, impersonated_credentials.Credentials)
+
+ def test_signer_email(self):
+ credentials = self.make_credentials(target_principal=self.TARGET_PRINCIPAL)
+ assert credentials.signer_email == self.TARGET_PRINCIPAL
+
+ def test_service_account_email(self):
+ credentials = self.make_credentials(target_principal=self.TARGET_PRINCIPAL)
+ assert credentials.service_account_email == self.TARGET_PRINCIPAL
+
+ def test_sign_bytes(self, mock_donor_credentials, mock_authorizedsession_sign):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ token_response_body = {"accessToken": token, "expireTime": expire_time}
+
+ response = mock.create_autospec(transport.Response, instance=False)
+ response.status = http_client.OK
+ response.data = _helpers.to_bytes(json.dumps(token_response_body))
+
+ request = mock.create_autospec(transport.Request, instance=False)
+ request.return_value = response
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ signature = credentials.sign_bytes(b"signed bytes")
+ assert signature == b"signature"
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+ assert quota_project_creds._quota_project_id == "project-foo"
+
+ @pytest.mark.parametrize("use_data_bytes", [True, False])
+ def test_with_quota_project_iam_endpoint_override(
+ self, use_data_bytes, mock_donor_credentials
+ ):
+ credentials = self.make_credentials(
+ lifetime=None, iam_endpoint_override=self.IAM_ENDPOINT_OVERRIDE
+ )
+ token = "token"
+ # iam_endpoint_override should be copied to created credentials.
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body),
+ status=http_client.OK,
+ use_data_bytes=use_data_bytes,
+ )
+
+ quota_project_creds.refresh(request)
+
+ assert quota_project_creds.valid
+ assert not quota_project_creds.expired
+ # Confirm override endpoint used.
+ request_kwargs = request.call_args[1]
+ assert request_kwargs["url"] == self.IAM_ENDPOINT_OVERRIDE
+
+ def test_id_token_success(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience
+ )
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+ assert id_creds.expiry == datetime.datetime.fromtimestamp(ID_TOKEN_EXPIRY)
+
+ def test_id_token_from_credential(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience, include_email=True
+ )
+ id_creds = id_creds.from_credentials(target_credentials=credentials)
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+ assert id_creds._include_email is True
+
+ def test_id_token_with_target_audience(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, include_email=True
+ )
+ id_creds = id_creds.with_target_audience(target_audience=target_audience)
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+ assert id_creds.expiry == datetime.datetime.fromtimestamp(ID_TOKEN_EXPIRY)
+ assert id_creds._include_email is True
+
+ def test_id_token_invalid_cred(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = None
+
+ with pytest.raises(exceptions.GoogleAuthError) as excinfo:
+ impersonated_credentials.IDTokenCredentials(credentials)
+
+ assert excinfo.match("Provided Credential must be" " impersonated_credentials")
+
+ def test_id_token_with_include_email(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience
+ )
+ id_creds = id_creds.with_include_email(True)
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+
+ def test_id_token_with_quota_project(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience
+ )
+ id_creds = id_creds.with_quota_project("project-foo")
+ id_creds.refresh(request)
+
+ assert id_creds.quota_project_id == "project-foo"
diff --git a/contrib/python/google-auth/py2/tests/test_jwt.py b/contrib/python/google-auth/py2/tests/test_jwt.py
new file mode 100644
index 0000000000..ba03d33baa
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/test_jwt.py
@@ -0,0 +1,605 @@
+# Copyright 2014 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import datetime
+import json
+import os
+
+import mock
+import pytest
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+from google.auth import jwt
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "other_cert.pem"), "rb") as fh:
+ OTHER_CERT_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "es256_privatekey.pem"), "rb") as fh:
+ EC_PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "es256_public_cert.pem"), "rb") as fh:
+ EC_PUBLIC_CERT_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+@pytest.fixture
+def signer():
+ return crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+
+
+def test_encode_basic(signer):
+ test_payload = {"test": "value"}
+ encoded = jwt.encode(signer, test_payload)
+ header, payload, _, _ = jwt._unverified_decode(encoded)
+ assert payload == test_payload
+ assert header == {"typ": "JWT", "alg": "RS256", "kid": signer.key_id}
+
+
+def test_encode_extra_headers(signer):
+ encoded = jwt.encode(signer, {}, header={"extra": "value"})
+ header = jwt.decode_header(encoded)
+ assert header == {
+ "typ": "JWT",
+ "alg": "RS256",
+ "kid": signer.key_id,
+ "extra": "value",
+ }
+
+
+def test_encode_custom_alg_in_headers(signer):
+ encoded = jwt.encode(signer, {}, header={"alg": "foo"})
+ header = jwt.decode_header(encoded)
+ assert header == {"typ": "JWT", "alg": "foo", "kid": signer.key_id}
+
+
+@pytest.fixture
+def es256_signer():
+ return crypt.ES256Signer.from_string(EC_PRIVATE_KEY_BYTES, "1")
+
+
+def test_encode_basic_es256(es256_signer):
+ test_payload = {"test": "value"}
+ encoded = jwt.encode(es256_signer, test_payload)
+ header, payload, _, _ = jwt._unverified_decode(encoded)
+ assert payload == test_payload
+ assert header == {"typ": "JWT", "alg": "ES256", "kid": es256_signer.key_id}
+
+
+@pytest.fixture
+def token_factory(signer, es256_signer):
+ def factory(claims=None, key_id=None, use_es256_signer=False):
+ now = _helpers.datetime_to_secs(_helpers.utcnow())
+ payload = {
+ "aud": "audience@example.com",
+ "iat": now,
+ "exp": now + 300,
+ "user": "billy bob",
+ "metadata": {"meta": "data"},
+ }
+ payload.update(claims or {})
+
+ # False is specified to remove the signer's key id for testing
+ # headers without key ids.
+ if key_id is False:
+ signer._key_id = None
+ key_id = None
+
+ if use_es256_signer:
+ return jwt.encode(es256_signer, payload, key_id=key_id)
+ else:
+ return jwt.encode(signer, payload, key_id=key_id)
+
+ return factory
+
+
+def test_decode_valid(token_factory):
+ payload = jwt.decode(token_factory(), certs=PUBLIC_CERT_BYTES)
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_valid_es256(token_factory):
+ payload = jwt.decode(
+ token_factory(use_es256_signer=True), certs=EC_PUBLIC_CERT_BYTES
+ )
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_valid_with_audience(token_factory):
+ payload = jwt.decode(
+ token_factory(), certs=PUBLIC_CERT_BYTES, audience="audience@example.com"
+ )
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_valid_with_audience_list(token_factory):
+ payload = jwt.decode(
+ token_factory(),
+ certs=PUBLIC_CERT_BYTES,
+ audience=["audience@example.com", "another_audience@example.com"],
+ )
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_valid_unverified(token_factory):
+ payload = jwt.decode(token_factory(), certs=OTHER_CERT_BYTES, verify=False)
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_bad_token_wrong_number_of_segments():
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode("1.2", PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Wrong number of segments")
+
+
+def test_decode_bad_token_not_base64():
+ with pytest.raises((ValueError, TypeError)) as excinfo:
+ jwt.decode("1.2.3", PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Incorrect padding|more than a multiple of 4")
+
+
+def test_decode_bad_token_not_json():
+ token = b".".join([base64.urlsafe_b64encode(b"123!")] * 3)
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Can\'t parse segment")
+
+
+def test_decode_bad_token_no_iat_or_exp(signer):
+ token = jwt.encode(signer, {"test": "value"})
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Token does not contain required claim")
+
+
+def test_decode_bad_token_too_early(token_factory):
+ token = token_factory(
+ claims={
+ "iat": _helpers.datetime_to_secs(
+ _helpers.utcnow() + datetime.timedelta(hours=1)
+ )
+ }
+ )
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Token used too early")
+
+
+def test_decode_bad_token_expired(token_factory):
+ token = token_factory(
+ claims={
+ "exp": _helpers.datetime_to_secs(
+ _helpers.utcnow() - datetime.timedelta(hours=1)
+ )
+ }
+ )
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Token expired")
+
+
+def test_decode_bad_token_wrong_audience(token_factory):
+ token = token_factory()
+ audience = "audience2@example.com"
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES, audience=audience)
+ assert excinfo.match(r"Token has wrong audience")
+
+
+def test_decode_bad_token_wrong_audience_list(token_factory):
+ token = token_factory()
+ audience = ["audience2@example.com", "audience3@example.com"]
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES, audience=audience)
+ assert excinfo.match(r"Token has wrong audience")
+
+
+def test_decode_wrong_cert(token_factory):
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token_factory(), OTHER_CERT_BYTES)
+ assert excinfo.match(r"Could not verify token signature")
+
+
+def test_decode_multicert_bad_cert(token_factory):
+ certs = {"1": OTHER_CERT_BYTES, "2": PUBLIC_CERT_BYTES}
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token_factory(), certs)
+ assert excinfo.match(r"Could not verify token signature")
+
+
+def test_decode_no_cert(token_factory):
+ certs = {"2": PUBLIC_CERT_BYTES}
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token_factory(), certs)
+ assert excinfo.match(r"Certificate for key id 1 not found")
+
+
+def test_decode_no_key_id(token_factory):
+ token = token_factory(key_id=False)
+ certs = {"2": PUBLIC_CERT_BYTES}
+ payload = jwt.decode(token, certs)
+ assert payload["user"] == "billy bob"
+
+
+def test_decode_unknown_alg():
+ headers = json.dumps({u"kid": u"1", u"alg": u"fakealg"})
+ token = b".".join(
+ map(lambda seg: base64.b64encode(seg.encode("utf-8")), [headers, u"{}", u"sig"])
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token)
+ assert excinfo.match(r"fakealg")
+
+
+def test_decode_missing_crytography_alg(monkeypatch):
+ monkeypatch.delitem(jwt._ALGORITHM_TO_VERIFIER_CLASS, "ES256")
+ headers = json.dumps({u"kid": u"1", u"alg": u"ES256"})
+ token = b".".join(
+ map(lambda seg: base64.b64encode(seg.encode("utf-8")), [headers, u"{}", u"sig"])
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token)
+ assert excinfo.match(r"cryptography")
+
+
+def test_roundtrip_explicit_key_id(token_factory):
+ token = token_factory(key_id="3")
+ certs = {"2": OTHER_CERT_BYTES, "3": PUBLIC_CERT_BYTES}
+ payload = jwt.decode(token, certs)
+ assert payload["user"] == "billy bob"
+
+
+class TestCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ SUBJECT = "subject"
+ AUDIENCE = "audience"
+ ADDITIONAL_CLAIMS = {"meta": "data"}
+ credentials = None
+
+ @pytest.fixture(autouse=True)
+ def credentials_fixture(self, signer):
+ self.credentials = jwt.Credentials(
+ signer,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.AUDIENCE,
+ )
+
+ def test_from_service_account_info(self):
+ with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ info = json.load(fh)
+
+ credentials = jwt.Credentials.from_service_account_info(
+ info, audience=self.AUDIENCE
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+ assert credentials._audience == self.AUDIENCE
+
+ def test_from_service_account_info_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.Credentials.from_service_account_info(
+ info,
+ subject=self.SUBJECT,
+ audience=self.AUDIENCE,
+ additional_claims=self.ADDITIONAL_CLAIMS,
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._audience == self.AUDIENCE
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE, audience=self.AUDIENCE
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+ assert credentials._audience == self.AUDIENCE
+
+ def test_from_service_account_file_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE,
+ subject=self.SUBJECT,
+ audience=self.AUDIENCE,
+ additional_claims=self.ADDITIONAL_CLAIMS,
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._audience == self.AUDIENCE
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_signing_credentials(self):
+ jwt_from_signing = self.credentials.from_signing_credentials(
+ self.credentials, audience=mock.sentinel.new_audience
+ )
+ jwt_from_info = jwt.Credentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO, audience=mock.sentinel.new_audience
+ )
+
+ assert isinstance(jwt_from_signing, jwt.Credentials)
+ assert jwt_from_signing._signer.key_id == jwt_from_info._signer.key_id
+ assert jwt_from_signing._issuer == jwt_from_info._issuer
+ assert jwt_from_signing._subject == jwt_from_info._subject
+ assert jwt_from_signing._audience == jwt_from_info._audience
+
+ def test_default_state(self):
+ assert not self.credentials.valid
+ # Expiration hasn't been set yet
+ assert not self.credentials.expired
+
+ def test_with_claims(self):
+ new_audience = "new_audience"
+ new_credentials = self.credentials.with_claims(audience=new_audience)
+
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._audience == new_audience
+ assert new_credentials._additional_claims == self.credentials._additional_claims
+ assert new_credentials._quota_project_id == self.credentials._quota_project_id
+
+ def test_with_quota_project(self):
+ quota_project_id = "project-foo"
+
+ new_credentials = self.credentials.with_quota_project(quota_project_id)
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._audience == self.credentials._audience
+ assert new_credentials._additional_claims == self.credentials._additional_claims
+ assert new_credentials._quota_project_id == quota_project_id
+
+ def test_sign_bytes(self):
+ to_sign = b"123"
+ signature = self.credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ assert isinstance(self.credentials.signer, crypt.RSASigner)
+
+ def test_signer_email(self):
+ assert self.credentials.signer_email == SERVICE_ACCOUNT_INFO["client_email"]
+
+ def _verify_token(self, token):
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ return payload
+
+ def test_refresh(self):
+ self.credentials.refresh(None)
+ assert self.credentials.valid
+ assert not self.credentials.expired
+
+ def test_expired(self):
+ assert not self.credentials.expired
+
+ self.credentials.refresh(None)
+ assert not self.credentials.expired
+
+ with mock.patch("google.auth._helpers.utcnow") as now:
+ one_day = datetime.timedelta(days=1)
+ now.return_value = self.credentials.expiry + one_day
+ assert self.credentials.expired
+
+ def test_before_request(self):
+ headers = {}
+
+ self.credentials.refresh(None)
+ self.credentials.before_request(
+ None, "GET", "http://example.com?a=1#3", headers
+ )
+
+ header_value = headers["authorization"]
+ _, token = header_value.split(" ")
+
+ # Since the audience is set, it should use the existing token.
+ assert token.encode("utf-8") == self.credentials.token
+
+ payload = self._verify_token(token)
+ assert payload["aud"] == self.AUDIENCE
+
+ def test_before_request_refreshes(self):
+ assert not self.credentials.valid
+ self.credentials.before_request(None, "GET", "http://example.com?a=1#3", {})
+ assert self.credentials.valid
+
+
+class TestOnDemandCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ SUBJECT = "subject"
+ ADDITIONAL_CLAIMS = {"meta": "data"}
+ credentials = None
+
+ @pytest.fixture(autouse=True)
+ def credentials_fixture(self, signer):
+ self.credentials = jwt.OnDemandCredentials(
+ signer,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.SERVICE_ACCOUNT_EMAIL,
+ max_cache_size=2,
+ )
+
+ def test_from_service_account_info(self):
+ with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ info = json.load(fh)
+
+ credentials = jwt.OnDemandCredentials.from_service_account_info(info)
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+
+ def test_from_service_account_info_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.OnDemandCredentials.from_service_account_info(
+ info, subject=self.SUBJECT, additional_claims=self.ADDITIONAL_CLAIMS
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.OnDemandCredentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+
+ def test_from_service_account_file_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.OnDemandCredentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE,
+ subject=self.SUBJECT,
+ additional_claims=self.ADDITIONAL_CLAIMS,
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_signing_credentials(self):
+ jwt_from_signing = self.credentials.from_signing_credentials(self.credentials)
+ jwt_from_info = jwt.OnDemandCredentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO
+ )
+
+ assert isinstance(jwt_from_signing, jwt.OnDemandCredentials)
+ assert jwt_from_signing._signer.key_id == jwt_from_info._signer.key_id
+ assert jwt_from_signing._issuer == jwt_from_info._issuer
+ assert jwt_from_signing._subject == jwt_from_info._subject
+
+ def test_default_state(self):
+ # Credentials are *always* valid.
+ assert self.credentials.valid
+ # Credentials *never* expire.
+ assert not self.credentials.expired
+
+ def test_with_claims(self):
+ new_claims = {"meep": "moop"}
+ new_credentials = self.credentials.with_claims(additional_claims=new_claims)
+
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._additional_claims == new_claims
+
+ def test_with_quota_project(self):
+ quota_project_id = "project-foo"
+ new_credentials = self.credentials.with_quota_project(quota_project_id)
+
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._additional_claims == self.credentials._additional_claims
+ assert new_credentials._quota_project_id == quota_project_id
+
+ def test_sign_bytes(self):
+ to_sign = b"123"
+ signature = self.credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ assert isinstance(self.credentials.signer, crypt.RSASigner)
+
+ def test_signer_email(self):
+ assert self.credentials.signer_email == SERVICE_ACCOUNT_INFO["client_email"]
+
+ def _verify_token(self, token):
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ return payload
+
+ def test_refresh(self):
+ with pytest.raises(exceptions.RefreshError):
+ self.credentials.refresh(None)
+
+ def test_before_request(self):
+ headers = {}
+
+ self.credentials.before_request(
+ None, "GET", "http://example.com?a=1#3", headers
+ )
+
+ _, token = headers["authorization"].split(" ")
+ payload = self._verify_token(token)
+
+ assert payload["aud"] == "http://example.com"
+
+ # Making another request should re-use the same token.
+ self.credentials.before_request(None, "GET", "http://example.com?b=2", headers)
+
+ _, new_token = headers["authorization"].split(" ")
+
+ assert new_token == token
+
+ def test_expired_token(self):
+ self.credentials._cache["audience"] = (
+ mock.sentinel.token,
+ datetime.datetime.min,
+ )
+
+ token = self.credentials._get_jwt_for_audience("audience")
+
+ assert token != mock.sentinel.token
diff --git a/contrib/python/google-auth/py2/tests/transport/__init__.py b/contrib/python/google-auth/py2/tests/transport/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/__init__.py
diff --git a/contrib/python/google-auth/py2/tests/transport/compliance.py b/contrib/python/google-auth/py2/tests/transport/compliance.py
new file mode 100644
index 0000000000..e093d761df
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/compliance.py
@@ -0,0 +1,108 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+import flask
+import pytest
+from pytest_localserver.http import WSGIServer
+from six.moves import http_client
+
+from google.auth import exceptions
+
+# .invalid will never resolve, see https://tools.ietf.org/html/rfc2606
+NXDOMAIN = "test.invalid"
+
+
+class RequestResponseTests(object):
+ @pytest.fixture(scope="module")
+ def server(self):
+ """Provides a test HTTP server.
+
+ The test server is automatically created before
+ a test and destroyed at the end. The server is serving a test
+ application that can be used to verify requests.
+ """
+ app = flask.Flask(__name__)
+ app.debug = True
+
+ # pylint: disable=unused-variable
+ # (pylint thinks the flask routes are unusued.)
+ @app.route("/basic")
+ def index():
+ header_value = flask.request.headers.get("x-test-header", "value")
+ headers = {"X-Test-Header": header_value}
+ return "Basic Content", http_client.OK, headers
+
+ @app.route("/server_error")
+ def server_error():
+ return "Error", http_client.INTERNAL_SERVER_ERROR
+
+ @app.route("/wait")
+ def wait():
+ time.sleep(3)
+ return "Waited"
+
+ # pylint: enable=unused-variable
+
+ server = WSGIServer(application=app.wsgi_app)
+ server.start()
+ yield server
+ server.stop()
+
+ def test_request_basic(self, server):
+ request = self.make_request()
+ response = request(url=server.url + "/basic", method="GET")
+
+ assert response.status == http_client.OK
+ assert response.headers["x-test-header"] == "value"
+ assert response.data == b"Basic Content"
+
+ def test_request_with_timeout_success(self, server):
+ request = self.make_request()
+ response = request(url=server.url + "/basic", method="GET", timeout=2)
+
+ assert response.status == http_client.OK
+ assert response.headers["x-test-header"] == "value"
+ assert response.data == b"Basic Content"
+
+ def test_request_with_timeout_failure(self, server):
+ request = self.make_request()
+
+ with pytest.raises(exceptions.TransportError):
+ request(url=server.url + "/wait", method="GET", timeout=1)
+
+ def test_request_headers(self, server):
+ request = self.make_request()
+ response = request(
+ url=server.url + "/basic",
+ method="GET",
+ headers={"x-test-header": "hello world"},
+ )
+
+ assert response.status == http_client.OK
+ assert response.headers["x-test-header"] == "hello world"
+ assert response.data == b"Basic Content"
+
+ def test_request_error(self, server):
+ request = self.make_request()
+ response = request(url=server.url + "/server_error", method="GET")
+
+ assert response.status == http_client.INTERNAL_SERVER_ERROR
+ assert response.data == b"Error"
+
+ def test_connection_error(self):
+ request = self.make_request()
+ with pytest.raises(exceptions.TransportError):
+ request(url="http://{}".format(NXDOMAIN), method="GET")
diff --git a/contrib/python/google-auth/py2/tests/transport/test__http_client.py b/contrib/python/google-auth/py2/tests/transport/test__http_client.py
new file mode 100644
index 0000000000..c176cb2f4c
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/test__http_client.py
@@ -0,0 +1,31 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from google.auth import exceptions
+import google.auth.transport._http_client
+from tests.transport import compliance
+
+
+class TestRequestResponse(compliance.RequestResponseTests):
+ def make_request(self):
+ return google.auth.transport._http_client.Request()
+
+ def test_non_http(self):
+ request = self.make_request()
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ request(url="https://{}".format(compliance.NXDOMAIN), method="GET")
+
+ assert excinfo.match("https")
diff --git a/contrib/python/google-auth/py2/tests/transport/test__mtls_helper.py b/contrib/python/google-auth/py2/tests/transport/test__mtls_helper.py
new file mode 100644
index 0000000000..c2e104a3dc
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/test__mtls_helper.py
@@ -0,0 +1,443 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+
+import mock
+from OpenSSL import crypto
+import pytest
+
+from google.auth import exceptions
+from google.auth.transport import _mtls_helper
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+CONTEXT_AWARE_METADATA = {"cert_provider_command": ["some command"]}
+
+CONTEXT_AWARE_METADATA_NO_CERT_PROVIDER_COMMAND = {}
+
+ENCRYPTED_EC_PRIVATE_KEY = b"""-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIHkME8GCSqGSIb3DQEFDTBCMCkGCSqGSIb3DQEFDDAcBAgl2/yVgs1h3QICCAAw
+DAYIKoZIhvcNAgkFADAVBgkrBgEEAZdVAQIECJk2GRrvxOaJBIGQXIBnMU4wmciT
+uA6yD8q0FxuIzjG7E2S6tc5VRgSbhRB00eBO3jWmO2pBybeQW+zVioDcn50zp2ts
+wYErWC+LCm1Zg3r+EGnT1E1GgNoODbVQ3AEHlKh1CGCYhEovxtn3G+Fjh7xOBrNB
+saVVeDb4tHD4tMkiVVUBrUcTZPndP73CtgyGHYEphasYPzEz3+AU
+-----END ENCRYPTED PRIVATE KEY-----"""
+
+EC_PUBLIC_KEY = b"""-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEvCNi1NoDY1oMqPHIgXI8RBbTYGi/
+brEjbre1nSiQW11xRTJbVeETdsuP0EAu2tG3PcRhhwDfeJ8zXREgTBurNw==
+-----END PUBLIC KEY-----"""
+
+PASSPHRASE = b"""-----BEGIN PASSPHRASE-----
+password
+-----END PASSPHRASE-----"""
+PASSPHRASE_VALUE = b"password"
+
+
+def check_cert_and_key(content, expected_cert, expected_key):
+ success = True
+
+ cert_match = re.findall(_mtls_helper._CERT_REGEX, content)
+ success = success and len(cert_match) == 1 and cert_match[0] == expected_cert
+
+ key_match = re.findall(_mtls_helper._KEY_REGEX, content)
+ success = success and len(key_match) == 1 and key_match[0] == expected_key
+
+ return success
+
+
+class TestCertAndKeyRegex(object):
+ def test_cert_and_key(self):
+ # Test single cert and single key
+ check_cert_and_key(
+ pytest.public_cert_bytes + pytest.private_key_bytes,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+ check_cert_and_key(
+ pytest.private_key_bytes + pytest.public_cert_bytes,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ # Test cert chain and single key
+ check_cert_and_key(
+ pytest.public_cert_bytes
+ + pytest.public_cert_bytes
+ + pytest.private_key_bytes,
+ pytest.public_cert_bytes + pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+ check_cert_and_key(
+ pytest.private_key_bytes
+ + pytest.public_cert_bytes
+ + pytest.public_cert_bytes,
+ pytest.public_cert_bytes + pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ def test_key(self):
+ # Create some fake keys for regex check.
+ KEY = b"""-----BEGIN PRIVATE KEY-----
+ MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+ /fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+ -----END PRIVATE KEY-----"""
+ RSA_KEY = b"""-----BEGIN RSA PRIVATE KEY-----
+ MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+ /fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+ -----END RSA PRIVATE KEY-----"""
+ EC_KEY = b"""-----BEGIN EC PRIVATE KEY-----
+ MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+ /fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+ -----END EC PRIVATE KEY-----"""
+
+ check_cert_and_key(
+ pytest.public_cert_bytes + KEY, pytest.public_cert_bytes, KEY
+ )
+ check_cert_and_key(
+ pytest.public_cert_bytes + RSA_KEY, pytest.public_cert_bytes, RSA_KEY
+ )
+ check_cert_and_key(
+ pytest.public_cert_bytes + EC_KEY, pytest.public_cert_bytes, EC_KEY
+ )
+
+
+class TestCheckaMetadataPath(object):
+ def test_success(self):
+ metadata_path = os.path.join(DATA_DIR, "context_aware_metadata.json")
+ returned_path = _mtls_helper._check_dca_metadata_path(metadata_path)
+ assert returned_path is not None
+
+ def test_failure(self):
+ metadata_path = os.path.join(DATA_DIR, "not_exists.json")
+ returned_path = _mtls_helper._check_dca_metadata_path(metadata_path)
+ assert returned_path is None
+
+
+class TestReadMetadataFile(object):
+ def test_success(self):
+ metadata_path = os.path.join(DATA_DIR, "context_aware_metadata.json")
+ metadata = _mtls_helper._read_dca_metadata_file(metadata_path)
+
+ assert "cert_provider_command" in metadata
+
+ def test_file_not_json(self):
+ # read a file which is not json format.
+ metadata_path = os.path.join(DATA_DIR, "privatekey.pem")
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._read_dca_metadata_file(metadata_path)
+
+
+class TestRunCertProviderCommand(object):
+ def create_mock_process(self, output, error):
+ # There are two steps to execute a script with subprocess.Popen.
+ # (1) process = subprocess.Popen([comannds])
+ # (2) stdout, stderr = process.communicate()
+ # This function creates a mock process which can be returned by a mock
+ # subprocess.Popen. The mock process returns the given output and error
+ # when mock_process.communicate() is called.
+ mock_process = mock.Mock()
+ attrs = {"communicate.return_value": (output, error), "returncode": 0}
+ mock_process.configure_mock(**attrs)
+ return mock_process
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_success(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + pytest.private_key_bytes, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(["command"])
+ assert cert == pytest.public_cert_bytes
+ assert key == pytest.private_key_bytes
+ assert passphrase is None
+
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + ENCRYPTED_EC_PRIVATE_KEY + PASSPHRASE, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+ assert cert == pytest.public_cert_bytes
+ assert key == ENCRYPTED_EC_PRIVATE_KEY
+ assert passphrase == PASSPHRASE_VALUE
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_success_with_cert_chain(self, mock_popen):
+ PUBLIC_CERT_CHAIN_BYTES = pytest.public_cert_bytes + pytest.public_cert_bytes
+ mock_popen.return_value = self.create_mock_process(
+ PUBLIC_CERT_CHAIN_BYTES + pytest.private_key_bytes, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(["command"])
+ assert cert == PUBLIC_CERT_CHAIN_BYTES
+ assert key == pytest.private_key_bytes
+ assert passphrase is None
+
+ mock_popen.return_value = self.create_mock_process(
+ PUBLIC_CERT_CHAIN_BYTES + ENCRYPTED_EC_PRIVATE_KEY + PASSPHRASE, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+ assert cert == PUBLIC_CERT_CHAIN_BYTES
+ assert key == ENCRYPTED_EC_PRIVATE_KEY
+ assert passphrase == PASSPHRASE_VALUE
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_missing_cert(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.private_key_bytes, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ mock_popen.return_value = self.create_mock_process(
+ ENCRYPTED_EC_PRIVATE_KEY + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_missing_key(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_missing_passphrase(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + ENCRYPTED_EC_PRIVATE_KEY, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_passphrase_not_expected(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + pytest.private_key_bytes + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_encrypted_key_expected(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + pytest.private_key_bytes + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_unencrypted_key_expected(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + ENCRYPTED_EC_PRIVATE_KEY, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_cert_provider_returns_error(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(b"", b"some error")
+ mock_popen.return_value.returncode = 1
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_popen_raise_exception(self, mock_popen):
+ mock_popen.side_effect = OSError()
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+
+class TestGetClientSslCredentials(object):
+ @mock.patch(
+ "google.auth.transport._mtls_helper._run_cert_provider_command", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_success(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_run_cert_provider_command,
+ ):
+ mock_check_dca_metadata_path.return_value = True
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["command"]
+ }
+ mock_run_cert_provider_command.return_value = (b"cert", b"key", None)
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials()
+ assert has_cert
+ assert cert == b"cert"
+ assert key == b"key"
+ assert passphrase is None
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_success_without_metadata(self, mock_check_dca_metadata_path):
+ mock_check_dca_metadata_path.return_value = False
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials()
+ assert not has_cert
+ assert cert is None
+ assert key is None
+ assert passphrase is None
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._run_cert_provider_command", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_success_with_encrypted_key(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_run_cert_provider_command,
+ ):
+ mock_check_dca_metadata_path.return_value = True
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["command"]
+ }
+ mock_run_cert_provider_command.return_value = (b"cert", b"key", b"passphrase")
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials(
+ generate_encrypted_key=True
+ )
+ assert has_cert
+ assert cert == b"cert"
+ assert key == b"key"
+ assert passphrase == b"passphrase"
+ mock_run_cert_provider_command.assert_called_once_with(
+ ["command", "--with_passphrase"], expect_encrypted_key=True
+ )
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_missing_cert_command(
+ self, mock_check_dca_metadata_path, mock_read_dca_metadata_file
+ ):
+ mock_check_dca_metadata_path.return_value = True
+ mock_read_dca_metadata_file.return_value = {}
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper.get_client_ssl_credentials()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._run_cert_provider_command", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_customize_context_aware_metadata_path(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_run_cert_provider_command,
+ ):
+ context_aware_metadata_path = "/path/to/metata/data"
+ mock_check_dca_metadata_path.return_value = context_aware_metadata_path
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["command"]
+ }
+ mock_run_cert_provider_command.return_value = (b"cert", b"key", None)
+
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials(
+ context_aware_metadata_path=context_aware_metadata_path
+ )
+
+ assert has_cert
+ assert cert == b"cert"
+ assert key == b"key"
+ assert passphrase is None
+ mock_check_dca_metadata_path.assert_called_with(context_aware_metadata_path)
+ mock_read_dca_metadata_file.assert_called_with(context_aware_metadata_path)
+
+
+class TestGetClientCertAndKey(object):
+ def test_callback_success(self):
+ callback = mock.Mock()
+ callback.return_value = (pytest.public_cert_bytes, pytest.private_key_bytes)
+
+ found_cert_key, cert, key = _mtls_helper.get_client_cert_and_key(callback)
+ assert found_cert_key
+ assert cert == pytest.public_cert_bytes
+ assert key == pytest.private_key_bytes
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+ )
+ def test_use_metadata(self, mock_get_client_ssl_credentials):
+ mock_get_client_ssl_credentials.return_value = (
+ True,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ None,
+ )
+
+ found_cert_key, cert, key = _mtls_helper.get_client_cert_and_key()
+ assert found_cert_key
+ assert cert == pytest.public_cert_bytes
+ assert key == pytest.private_key_bytes
+
+
+class TestDecryptPrivateKey(object):
+ def test_success(self):
+ decrypted_key = _mtls_helper.decrypt_private_key(
+ ENCRYPTED_EC_PRIVATE_KEY, PASSPHRASE_VALUE
+ )
+ private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, decrypted_key)
+ public_key = crypto.load_publickey(crypto.FILETYPE_PEM, EC_PUBLIC_KEY)
+ x509 = crypto.X509()
+ x509.set_pubkey(public_key)
+
+ # Test the decrypted key works by signing and verification.
+ signature = crypto.sign(private_key, b"data", "sha256")
+ crypto.verify(x509, signature, b"data", "sha256")
+
+ def test_crypto_error(self):
+ with pytest.raises(crypto.Error):
+ _mtls_helper.decrypt_private_key(
+ ENCRYPTED_EC_PRIVATE_KEY, b"wrong_password"
+ )
diff --git a/contrib/python/google-auth/py2/tests/transport/test_grpc.py b/contrib/python/google-auth/py2/tests/transport/test_grpc.py
new file mode 100644
index 0000000000..fb77a2b997
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/test_grpc.py
@@ -0,0 +1,504 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import os
+import time
+
+import mock
+import pytest
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import service_account
+
+try:
+ # pylint: disable=ungrouped-imports
+ import grpc
+ import google.auth.transport.grpc
+
+ HAS_GRPC = True
+except ImportError: # pragma: NO COVER
+ HAS_GRPC = False
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+METADATA_PATH = os.path.join(DATA_DIR, "context_aware_metadata.json")
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+pytestmark = pytest.mark.skipif(not HAS_GRPC, reason="gRPC is unavailable.")
+
+
+class CredentialsStub(credentials.Credentials):
+ def __init__(self, token="token"):
+ super(CredentialsStub, self).__init__()
+ self.token = token
+ self.expiry = None
+
+ def refresh(self, request):
+ self.token += "1"
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+class TestAuthMetadataPlugin(object):
+ def test_call_no_refresh(self):
+ credentials = CredentialsStub()
+ request = mock.create_autospec(transport.Request)
+
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(credentials, request)
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = mock.sentinel.method_name
+ context.service_url = mock.sentinel.service_url
+ callback = mock.create_autospec(grpc.AuthMetadataPluginCallback)
+
+ plugin(context, callback)
+
+ time.sleep(2)
+
+ callback.assert_called_once_with(
+ [("authorization", "Bearer {}".format(credentials.token))], None
+ )
+
+ def test_call_refresh(self):
+ credentials = CredentialsStub()
+ credentials.expiry = datetime.datetime.min + _helpers.CLOCK_SKEW
+ request = mock.create_autospec(transport.Request)
+
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(credentials, request)
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = mock.sentinel.method_name
+ context.service_url = mock.sentinel.service_url
+ callback = mock.create_autospec(grpc.AuthMetadataPluginCallback)
+
+ plugin(context, callback)
+
+ time.sleep(2)
+
+ assert credentials.token == "token1"
+ callback.assert_called_once_with(
+ [("authorization", "Bearer {}".format(credentials.token))], None
+ )
+
+ def test__get_authorization_headers_with_service_account(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+ request = mock.create_autospec(transport.Request)
+
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(credentials, request)
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = "methodName"
+ context.service_url = "https://pubsub.googleapis.com/methodName"
+
+ plugin._get_authorization_headers(context)
+
+ # self-signed JWT should not be created when default_host is not set
+ #credentials._create_self_signed_jwt.assert_not_called()
+
+ def test__get_authorization_headers_with_service_account_and_default_host(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+ request = mock.create_autospec(transport.Request)
+
+ default_host = "pubsub.googleapis.com"
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(
+ credentials, request, default_host=default_host
+ )
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = "methodName"
+ context.service_url = "https://pubsub.googleapis.com/methodName"
+
+ plugin._get_authorization_headers(context)
+
+ credentials._create_self_signed_jwt.assert_called_once_with(
+ "https://{}/".format(default_host)
+ )
+
+
+@mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+)
+@mock.patch("grpc.composite_channel_credentials", autospec=True)
+@mock.patch("grpc.metadata_call_credentials", autospec=True)
+@mock.patch("grpc.ssl_channel_credentials", autospec=True)
+@mock.patch("grpc.secure_channel", autospec=True)
+class TestSecureAuthorizedChannel(object):
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_secure_authorized_channel_adc(
+ self,
+ check_dca_metadata_path,
+ read_dca_metadata_file,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = CredentialsStub()
+ request = mock.create_autospec(transport.Request)
+ target = "example.com:80"
+
+ # Mock the context aware metadata and client cert/key so mTLS SSL channel
+ # will be used.
+ check_dca_metadata_path.return_value = METADATA_PATH
+ read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["some command"]
+ }
+ get_client_ssl_credentials.return_value = (
+ True,
+ PUBLIC_CERT_BYTES,
+ PRIVATE_KEY_BYTES,
+ None,
+ )
+
+ channel = None
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, options=mock.sentinel.options
+ )
+
+ # Check the auth plugin construction.
+ auth_plugin = metadata_call_credentials.call_args[0][0]
+ assert isinstance(auth_plugin, google.auth.transport.grpc.AuthMetadataPlugin)
+ assert auth_plugin._credentials == credentials
+ assert auth_plugin._request == request
+
+ # Check the ssl channel call.
+ ssl_channel_credentials.assert_called_once_with(
+ certificate_chain=PUBLIC_CERT_BYTES, private_key=PRIVATE_KEY_BYTES
+ )
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+ # Check the channel call.
+ secure_channel.assert_called_once_with(
+ target,
+ composite_channel_credentials.return_value,
+ options=mock.sentinel.options,
+ )
+ assert channel == secure_channel.return_value
+
+ @mock.patch("google.auth.transport.grpc.SslCredentials", autospec=True)
+ def test_secure_authorized_channel_adc_without_client_cert_env(
+ self,
+ ssl_credentials_adc_method,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # environment variable is not set.
+ credentials = CredentialsStub()
+ request = mock.create_autospec(transport.Request)
+ target = "example.com:80"
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, options=mock.sentinel.options
+ )
+
+ # Check the auth plugin construction.
+ auth_plugin = metadata_call_credentials.call_args[0][0]
+ assert isinstance(auth_plugin, google.auth.transport.grpc.AuthMetadataPlugin)
+ assert auth_plugin._credentials == credentials
+ assert auth_plugin._request == request
+
+ # Check the ssl channel call.
+ ssl_channel_credentials.assert_called_once()
+ ssl_credentials_adc_method.assert_not_called()
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+ # Check the channel call.
+ secure_channel.assert_called_once_with(
+ target,
+ composite_channel_credentials.return_value,
+ options=mock.sentinel.options,
+ )
+ assert channel == secure_channel.return_value
+
+ def test_secure_authorized_channel_explicit_ssl(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ ssl_credentials = mock.Mock()
+
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, ssl_credentials=ssl_credentials
+ )
+
+ # Since explicit SSL credentials are provided, get_client_ssl_credentials
+ # shouldn't be called.
+ assert not get_client_ssl_credentials.called
+
+ # Check the ssl channel call.
+ assert not ssl_channel_credentials.called
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_credentials, metadata_call_credentials.return_value
+ )
+
+ def test_secure_authorized_channel_mutual_exclusive(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ ssl_credentials = mock.Mock()
+ client_cert_callback = mock.Mock()
+
+ with pytest.raises(ValueError):
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials,
+ request,
+ target,
+ ssl_credentials=ssl_credentials,
+ client_cert_callback=client_cert_callback,
+ )
+
+ def test_secure_authorized_channel_with_client_cert_callback_success(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ client_cert_callback = mock.Mock()
+ client_cert_callback.return_value = (PUBLIC_CERT_BYTES, PRIVATE_KEY_BYTES)
+
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, client_cert_callback=client_cert_callback
+ )
+
+ client_cert_callback.assert_called_once()
+
+ # Check we are using the cert and key provided by client_cert_callback.
+ ssl_channel_credentials.assert_called_once_with(
+ certificate_chain=PUBLIC_CERT_BYTES, private_key=PRIVATE_KEY_BYTES
+ )
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_secure_authorized_channel_with_client_cert_callback_failure(
+ self,
+ check_dca_metadata_path,
+ read_dca_metadata_file,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+
+ client_cert_callback = mock.Mock()
+ client_cert_callback.side_effect = Exception("callback exception")
+
+ with pytest.raises(Exception) as excinfo:
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials,
+ request,
+ target,
+ client_cert_callback=client_cert_callback,
+ )
+
+ assert str(excinfo.value) == "callback exception"
+
+ def test_secure_authorized_channel_cert_callback_without_client_cert_env(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # environment variable is not set.
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ client_cert_callback = mock.Mock()
+
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, client_cert_callback=client_cert_callback
+ )
+
+ # Check client_cert_callback is not called because GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # is not set.
+ client_cert_callback.assert_not_called()
+
+ ssl_channel_credentials.assert_called_once()
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+
+@mock.patch("grpc.ssl_channel_credentials", autospec=True)
+@mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+)
+@mock.patch("google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True)
+@mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+)
+class TestSslCredentials(object):
+ def test_no_context_aware_metadata(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ # Mock that the metadata file doesn't exist.
+ mock_check_dca_metadata_path.return_value = None
+
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ ssl_credentials = google.auth.transport.grpc.SslCredentials()
+
+ # Since no context aware metadata is found, we wouldn't call
+ # get_client_ssl_credentials, and the SSL channel credentials created is
+ # non mTLS.
+ assert ssl_credentials.ssl_credentials is not None
+ assert not ssl_credentials.is_mtls
+ mock_get_client_ssl_credentials.assert_not_called()
+ mock_ssl_channel_credentials.assert_called_once_with()
+
+ def test_get_client_ssl_credentials_failure(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ mock_check_dca_metadata_path.return_value = METADATA_PATH
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["some command"]
+ }
+
+ # Mock that client cert and key are not loaded and exception is raised.
+ mock_get_client_ssl_credentials.side_effect = exceptions.ClientCertError()
+
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ assert google.auth.transport.grpc.SslCredentials().ssl_credentials
+
+ def test_get_client_ssl_credentials_success(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ mock_check_dca_metadata_path.return_value = METADATA_PATH
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["some command"]
+ }
+ mock_get_client_ssl_credentials.return_value = (
+ True,
+ PUBLIC_CERT_BYTES,
+ PRIVATE_KEY_BYTES,
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ ssl_credentials = google.auth.transport.grpc.SslCredentials()
+
+ assert ssl_credentials.ssl_credentials is not None
+ assert ssl_credentials.is_mtls
+ mock_get_client_ssl_credentials.assert_called_once()
+ mock_ssl_channel_credentials.assert_called_once_with(
+ certificate_chain=PUBLIC_CERT_BYTES, private_key=PRIVATE_KEY_BYTES
+ )
+
+ def test_get_client_ssl_credentials_without_client_cert_env(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE is not set.
+ ssl_credentials = google.auth.transport.grpc.SslCredentials()
+
+ assert ssl_credentials.ssl_credentials is not None
+ assert not ssl_credentials.is_mtls
+ mock_check_dca_metadata_path.assert_not_called()
+ mock_read_dca_metadata_file.assert_not_called()
+ mock_get_client_ssl_credentials.assert_not_called()
+ mock_ssl_channel_credentials.assert_called_once()
diff --git a/contrib/python/google-auth/py2/tests/transport/test_mtls.py b/contrib/python/google-auth/py2/tests/transport/test_mtls.py
new file mode 100644
index 0000000000..ff70bb3c22
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/test_mtls.py
@@ -0,0 +1,83 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+import pytest
+
+from google.auth import exceptions
+from google.auth.transport import mtls
+
+
+@mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+)
+def test_has_default_client_cert_source(check_dca_metadata_path):
+ check_dca_metadata_path.return_value = mock.Mock()
+ assert mtls.has_default_client_cert_source()
+
+ check_dca_metadata_path.return_value = None
+ assert not mtls.has_default_client_cert_source()
+
+
+@mock.patch("google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True)
+@mock.patch("google.auth.transport.mtls.has_default_client_cert_source", autospec=True)
+def test_default_client_cert_source(
+ has_default_client_cert_source, get_client_cert_and_key
+):
+ # Test default client cert source doesn't exist.
+ has_default_client_cert_source.return_value = False
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ mtls.default_client_cert_source()
+
+ # The following tests will assume default client cert source exists.
+ has_default_client_cert_source.return_value = True
+
+ # Test good callback.
+ get_client_cert_and_key.return_value = (True, b"cert", b"key")
+ callback = mtls.default_client_cert_source()
+ assert callback() == (b"cert", b"key")
+
+ # Test bad callback which throws exception.
+ get_client_cert_and_key.side_effect = ValueError()
+ callback = mtls.default_client_cert_source()
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ callback()
+
+
+@mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+)
+@mock.patch("google.auth.transport.mtls.has_default_client_cert_source", autospec=True)
+def test_default_client_encrypted_cert_source(
+ has_default_client_cert_source, get_client_ssl_credentials
+):
+ # Test default client cert source doesn't exist.
+ has_default_client_cert_source.return_value = False
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ mtls.default_client_encrypted_cert_source("cert_path", "key_path")
+
+ # The following tests will assume default client cert source exists.
+ has_default_client_cert_source.return_value = True
+
+ # Test good callback.
+ get_client_ssl_credentials.return_value = (True, b"cert", b"key", b"passphrase")
+ callback = mtls.default_client_encrypted_cert_source("cert_path", "key_path")
+ with mock.patch("{}.open".format(__name__), return_value=mock.MagicMock()):
+ assert callback() == ("cert_path", "key_path", b"passphrase")
+
+ # Test bad callback which throws exception.
+ get_client_ssl_credentials.side_effect = exceptions.ClientCertError()
+ callback = mtls.default_client_encrypted_cert_source("cert_path", "key_path")
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ callback()
diff --git a/contrib/python/google-auth/py2/tests/transport/test_requests.py b/contrib/python/google-auth/py2/tests/transport/test_requests.py
new file mode 100644
index 0000000000..3fdd17c3e4
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/test_requests.py
@@ -0,0 +1,506 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import functools
+import os
+import sys
+
+import freezegun
+import mock
+import OpenSSL
+import pytest
+import requests
+import requests.adapters
+from six.moves import http_client
+
+from google.auth import environment_vars
+from google.auth import exceptions
+import google.auth.credentials
+import google.auth.transport._mtls_helper
+import google.auth.transport.requests
+from google.oauth2 import service_account
+from tests.transport import compliance
+
+
+@pytest.fixture
+def frozen_time():
+ with freezegun.freeze_time("1970-01-01 00:00:00", tick=False) as frozen:
+ yield frozen
+
+
+class TestRequestResponse(compliance.RequestResponseTests):
+ def make_request(self):
+ return google.auth.transport.requests.Request()
+
+ def test_timeout(self):
+ http = mock.create_autospec(requests.Session, instance=True)
+ request = google.auth.transport.requests.Request(http)
+ request(url="http://example.com", method="GET", timeout=5)
+
+ assert http.request.call_args[1]["timeout"] == 5
+
+
+class TestTimeoutGuard(object):
+ def make_guard(self, *args, **kwargs):
+ return google.auth.transport.requests.TimeoutGuard(*args, **kwargs)
+
+ def test_tracks_elapsed_time_w_numeric_timeout(self, frozen_time):
+ with self.make_guard(timeout=10) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=3.8))
+ assert guard.remaining_timeout == 6.2
+
+ def test_tracks_elapsed_time_w_tuple_timeout(self, frozen_time):
+ with self.make_guard(timeout=(16, 19)) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=3.8))
+ assert guard.remaining_timeout == (12.2, 15.2)
+
+ def test_noop_if_no_timeout(self, frozen_time):
+ with self.make_guard(timeout=None) as guard:
+ frozen_time.tick(delta=datetime.timedelta(days=3650))
+ # NOTE: no timeout error raised, despite years have passed
+ assert guard.remaining_timeout is None
+
+ def test_timeout_error_w_numeric_timeout(self, frozen_time):
+ with pytest.raises(requests.exceptions.Timeout):
+ with self.make_guard(timeout=10) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=10.001))
+ assert guard.remaining_timeout == pytest.approx(-0.001)
+
+ def test_timeout_error_w_tuple_timeout(self, frozen_time):
+ with pytest.raises(requests.exceptions.Timeout):
+ with self.make_guard(timeout=(11, 10)) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=10.001))
+ assert guard.remaining_timeout == pytest.approx((0.999, -0.001))
+
+ def test_custom_timeout_error_type(self, frozen_time):
+ class FooError(Exception):
+ pass
+
+ with pytest.raises(FooError):
+ with self.make_guard(timeout=1, timeout_error_type=FooError):
+ frozen_time.tick(delta=datetime.timedelta(seconds=2))
+
+ def test_lets_suite_errors_bubble_up(self, frozen_time):
+ with pytest.raises(IndexError):
+ with self.make_guard(timeout=1):
+ [1, 2, 3][3]
+
+
+class CredentialsStub(google.auth.credentials.Credentials):
+ def __init__(self, token="token"):
+ super(CredentialsStub, self).__init__()
+ self.token = token
+
+ def apply(self, headers, token=None):
+ headers["authorization"] = self.token
+
+ def before_request(self, request, method, url, headers):
+ self.apply(headers)
+
+ def refresh(self, request):
+ self.token += "1"
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+class TimeTickCredentialsStub(CredentialsStub):
+ """Credentials that spend some (mocked) time when refreshing a token."""
+
+ def __init__(self, time_tick, token="token"):
+ self._time_tick = time_tick
+ super(TimeTickCredentialsStub, self).__init__(token=token)
+
+ def refresh(self, request):
+ self._time_tick()
+ super(TimeTickCredentialsStub, self).refresh(requests)
+
+
+class AdapterStub(requests.adapters.BaseAdapter):
+ def __init__(self, responses, headers=None):
+ super(AdapterStub, self).__init__()
+ self.responses = responses
+ self.requests = []
+ self.headers = headers or {}
+
+ def send(self, request, **kwargs):
+ # pylint: disable=arguments-differ
+ # request is the only required argument here and the only argument
+ # we care about.
+ self.requests.append(request)
+ return self.responses.pop(0)
+
+ def close(self): # pragma: NO COVER
+ # pylint wants this to be here because it's abstract in the base
+ # class, but requests never actually calls it.
+ return
+
+
+class TimeTickAdapterStub(AdapterStub):
+ """Adapter that spends some (mocked) time when making a request."""
+
+ def __init__(self, time_tick, responses, headers=None):
+ self._time_tick = time_tick
+ super(TimeTickAdapterStub, self).__init__(responses, headers=headers)
+
+ def send(self, request, **kwargs):
+ self._time_tick()
+ return super(TimeTickAdapterStub, self).send(request, **kwargs)
+
+
+class TestMutualTlsAdapter(object):
+ @mock.patch.object(requests.adapters.HTTPAdapter, "init_poolmanager")
+ @mock.patch.object(requests.adapters.HTTPAdapter, "proxy_manager_for")
+ def test_success(self, mock_proxy_manager_for, mock_init_poolmanager):
+ adapter = google.auth.transport.requests._MutualTlsAdapter(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+
+ adapter.init_poolmanager()
+ mock_init_poolmanager.assert_called_with(ssl_context=adapter._ctx_poolmanager)
+
+ adapter.proxy_manager_for()
+ mock_proxy_manager_for.assert_called_with(ssl_context=adapter._ctx_proxymanager)
+
+ def test_invalid_cert_or_key(self):
+ with pytest.raises(OpenSSL.crypto.Error):
+ google.auth.transport.requests._MutualTlsAdapter(
+ b"invalid cert", b"invalid key"
+ )
+
+ @mock.patch.dict("sys.modules", {"OpenSSL.crypto": None})
+ def test_import_error(self):
+ with pytest.raises(ImportError):
+ google.auth.transport.requests._MutualTlsAdapter(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+
+
+def make_response(status=http_client.OK, data=None):
+ response = requests.Response()
+ response.status_code = status
+ response._content = data
+ return response
+
+
+class TestAuthorizedSession(object):
+ TEST_URL = "http://example.com/"
+
+ def test_constructor(self):
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ mock.sentinel.credentials
+ )
+
+ assert authed_session.credentials == mock.sentinel.credentials
+
+ def test_constructor_with_auth_request(self):
+ http = mock.create_autospec(requests.Session)
+ auth_request = google.auth.transport.requests.Request(http)
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ mock.sentinel.credentials, auth_request=auth_request
+ )
+
+ assert authed_session._auth_request == auth_request
+
+ def test_request_default_timeout(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ response = make_response()
+ adapter = AdapterStub([response])
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ patcher = mock.patch("google.auth.transport.requests.requests.Session.request")
+ with patcher as patched_request:
+ authed_session.request("GET", self.TEST_URL)
+
+ expected_timeout = google.auth.transport.requests._DEFAULT_TIMEOUT
+ assert patched_request.call_args[1]["timeout"] == expected_timeout
+
+ def test_request_no_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ response = make_response()
+ adapter = AdapterStub([response])
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ result = authed_session.request("GET", self.TEST_URL)
+
+ assert response == result
+ assert credentials.before_request.called
+ assert not credentials.refresh.called
+ assert len(adapter.requests) == 1
+ assert adapter.requests[0].url == self.TEST_URL
+ assert adapter.requests[0].headers["authorization"] == "token"
+
+ def test_request_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ final_response = make_response(status=http_client.OK)
+ # First request will 401, second request will succeed.
+ adapter = AdapterStub(
+ [make_response(status=http_client.UNAUTHORIZED), final_response]
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, refresh_timeout=60
+ )
+ authed_session.mount(self.TEST_URL, adapter)
+
+ result = authed_session.request("GET", self.TEST_URL)
+
+ assert result == final_response
+ assert credentials.before_request.call_count == 2
+ assert credentials.refresh.called
+ assert len(adapter.requests) == 2
+
+ assert adapter.requests[0].url == self.TEST_URL
+ assert adapter.requests[0].headers["authorization"] == "token"
+
+ assert adapter.requests[1].url == self.TEST_URL
+ assert adapter.requests[1].headers["authorization"] == "token1"
+
+ def test_request_max_allowed_time_timeout_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second, responses=[make_response(status=http_client.OK)]
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # Because a request takes a full mocked second, max_allowed_time shorter
+ # than that will cause a timeout error.
+ with pytest.raises(requests.exceptions.Timeout):
+ authed_session.request("GET", self.TEST_URL, max_allowed_time=0.9)
+
+ def test_request_max_allowed_time_w_transport_timeout_no_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second,
+ responses=[
+ make_response(status=http_client.UNAUTHORIZED),
+ make_response(status=http_client.OK),
+ ],
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # A short configured transport timeout does not affect max_allowed_time.
+ # The latter is not adjusted to it and is only concerned with the actual
+ # execution time. The call below should thus not raise a timeout error.
+ authed_session.request("GET", self.TEST_URL, timeout=0.5, max_allowed_time=3.1)
+
+ def test_request_max_allowed_time_w_refresh_timeout_no_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second,
+ responses=[
+ make_response(status=http_client.UNAUTHORIZED),
+ make_response(status=http_client.OK),
+ ],
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, refresh_timeout=1.1
+ )
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # A short configured refresh timeout does not affect max_allowed_time.
+ # The latter is not adjusted to it and is only concerned with the actual
+ # execution time. The call below should thus not raise a timeout error
+ # (and `timeout` does not come into play either, as it's very long).
+ authed_session.request("GET", self.TEST_URL, timeout=60, max_allowed_time=3.1)
+
+ def test_request_timeout_w_refresh_timeout_timeout_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second,
+ responses=[
+ make_response(status=http_client.UNAUTHORIZED),
+ make_response(status=http_client.OK),
+ ],
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, refresh_timeout=100
+ )
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # An UNAUTHORIZED response triggers a refresh (an extra request), thus
+ # the final request that otherwise succeeds results in a timeout error
+ # (all three requests together last 3 mocked seconds).
+ with pytest.raises(requests.exceptions.Timeout):
+ authed_session.request(
+ "GET", self.TEST_URL, timeout=60, max_allowed_time=2.9
+ )
+
+ def test_authorized_session_without_default_host(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+
+ authed_session.credentials._create_self_signed_jwt.assert_not_called()
+
+ def test_authorized_session_with_default_host(self):
+ default_host = "pubsub.googleapis.com"
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, default_host=default_host
+ )
+
+ authed_session.credentials._create_self_signed_jwt.assert_called_once_with(
+ "https://{}/".format(default_host)
+ )
+
+ def test_configure_mtls_channel_with_callback(self):
+ mock_callback = mock.Mock()
+ mock_callback.return_value = (
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel(mock_callback)
+
+ assert auth_session.is_mtls
+ assert isinstance(
+ auth_session.adapters["https://"],
+ google.auth.transport.requests._MutualTlsAdapter,
+ )
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_with_metadata(self, mock_get_client_cert_and_key):
+ mock_get_client_cert_and_key.return_value = (
+ True,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel()
+
+ assert auth_session.is_mtls
+ assert isinstance(
+ auth_session.adapters["https://"],
+ google.auth.transport.requests._MutualTlsAdapter,
+ )
+
+ @mock.patch.object(google.auth.transport.requests._MutualTlsAdapter, "__init__")
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_non_mtls(
+ self, mock_get_client_cert_and_key, mock_adapter_ctor
+ ):
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel()
+
+ assert not auth_session.is_mtls
+
+ # Assert _MutualTlsAdapter constructor is not called.
+ mock_adapter_ctor.assert_not_called()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_exceptions(self, mock_get_client_cert_and_key):
+ mock_get_client_cert_and_key.side_effect = exceptions.ClientCertError()
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel()
+
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+ with mock.patch.dict("sys.modules"):
+ sys.modules["OpenSSL"] = None
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ,
+ {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"},
+ ):
+ auth_session.configure_mtls_channel()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_without_client_cert_env(
+ self, get_client_cert_and_key
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # environment variable is not set.
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+
+ auth_session.configure_mtls_channel()
+ assert not auth_session.is_mtls
+ get_client_cert_and_key.assert_not_called()
+
+ mock_callback = mock.Mock()
+ auth_session.configure_mtls_channel(mock_callback)
+ assert not auth_session.is_mtls
+ mock_callback.assert_not_called()
diff --git a/contrib/python/google-auth/py2/tests/transport/test_urllib3.py b/contrib/python/google-auth/py2/tests/transport/test_urllib3.py
new file mode 100644
index 0000000000..7c06934760
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/transport/test_urllib3.py
@@ -0,0 +1,307 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+import mock
+import OpenSSL
+import pytest
+from six.moves import http_client
+import urllib3
+
+from google.auth import environment_vars
+from google.auth import exceptions
+import google.auth.credentials
+import google.auth.transport._mtls_helper
+import google.auth.transport.urllib3
+from google.oauth2 import service_account
+from tests.transport import compliance
+
+
+class TestRequestResponse(compliance.RequestResponseTests):
+ def make_request(self):
+ http = urllib3.PoolManager()
+ return google.auth.transport.urllib3.Request(http)
+
+ def test_timeout(self):
+ http = mock.create_autospec(urllib3.PoolManager)
+ request = google.auth.transport.urllib3.Request(http)
+ request(url="http://example.com", method="GET", timeout=5)
+
+ assert http.request.call_args[1]["timeout"] == 5
+
+
+def test__make_default_http_with_certifi():
+ http = google.auth.transport.urllib3._make_default_http()
+ assert "cert_reqs" in http.connection_pool_kw
+
+
+@mock.patch.object(google.auth.transport.urllib3, "certifi", new=None)
+def test__make_default_http_without_certifi():
+ http = google.auth.transport.urllib3._make_default_http()
+ assert "cert_reqs" not in http.connection_pool_kw
+
+
+class CredentialsStub(google.auth.credentials.Credentials):
+ def __init__(self, token="token"):
+ super(CredentialsStub, self).__init__()
+ self.token = token
+
+ def apply(self, headers, token=None):
+ headers["authorization"] = self.token
+
+ def before_request(self, request, method, url, headers):
+ self.apply(headers)
+
+ def refresh(self, request):
+ self.token += "1"
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+class HttpStub(object):
+ def __init__(self, responses, headers=None):
+ self.responses = responses
+ self.requests = []
+ self.headers = headers or {}
+
+ def urlopen(self, method, url, body=None, headers=None, **kwargs):
+ self.requests.append((method, url, body, headers, kwargs))
+ return self.responses.pop(0)
+
+
+class ResponseStub(object):
+ def __init__(self, status=http_client.OK, data=None):
+ self.status = status
+ self.data = data
+
+
+class TestMakeMutualTlsHttp(object):
+ def test_success(self):
+ http = google.auth.transport.urllib3._make_mutual_tls_http(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+ assert isinstance(http, urllib3.PoolManager)
+
+ def test_crypto_error(self):
+ with pytest.raises(OpenSSL.crypto.Error):
+ google.auth.transport.urllib3._make_mutual_tls_http(
+ b"invalid cert", b"invalid key"
+ )
+
+ @mock.patch.dict("sys.modules", {"OpenSSL.crypto": None})
+ def test_import_error(self):
+ with pytest.raises(ImportError):
+ google.auth.transport.urllib3._make_mutual_tls_http(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+
+
+class TestAuthorizedHttp(object):
+ TEST_URL = "http://example.com"
+
+ def test_authed_http_defaults(self):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ mock.sentinel.credentials
+ )
+
+ assert authed_http.credentials == mock.sentinel.credentials
+ assert isinstance(authed_http.http, urllib3.PoolManager)
+
+ def test_urlopen_no_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ response = ResponseStub()
+ http = HttpStub([response])
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials, http=http
+ )
+
+ result = authed_http.urlopen("GET", self.TEST_URL)
+
+ assert result == response
+ assert credentials.before_request.called
+ assert not credentials.refresh.called
+ assert http.requests == [
+ ("GET", self.TEST_URL, None, {"authorization": "token"}, {})
+ ]
+
+ def test_urlopen_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ final_response = ResponseStub(status=http_client.OK)
+ # First request will 401, second request will succeed.
+ http = HttpStub([ResponseStub(status=http_client.UNAUTHORIZED), final_response])
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials, http=http
+ )
+
+ authed_http = authed_http.urlopen("GET", "http://example.com")
+
+ assert authed_http == final_response
+ assert credentials.before_request.call_count == 2
+ assert credentials.refresh.called
+ assert http.requests == [
+ ("GET", self.TEST_URL, None, {"authorization": "token"}, {}),
+ ("GET", self.TEST_URL, None, {"authorization": "token1"}, {}),
+ ]
+
+ def test_urlopen_no_default_host(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(credentials)
+
+ authed_http.credentials._create_self_signed_jwt.assert_not_called()
+
+ def test_urlopen_with_default_host(self):
+ default_host = "pubsub.googleapis.com"
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials, default_host=default_host
+ )
+
+ authed_http.credentials._create_self_signed_jwt.assert_called_once_with(
+ "https://{}/".format(default_host)
+ )
+
+ def test_proxies(self):
+ http = mock.create_autospec(urllib3.PoolManager)
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(None, http=http)
+
+ with authed_http:
+ pass
+
+ assert http.__enter__.called
+ assert http.__exit__.called
+
+ authed_http.headers = mock.sentinel.headers
+ assert authed_http.headers == http.headers
+
+ @mock.patch("google.auth.transport.urllib3._make_mutual_tls_http", autospec=True)
+ def test_configure_mtls_channel_with_callback(self, mock_make_mutual_tls_http):
+ callback = mock.Mock()
+ callback.return_value = (pytest.public_cert_bytes, pytest.private_key_bytes)
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock(), http=mock.Mock()
+ )
+
+ with pytest.warns(UserWarning):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ is_mtls = authed_http.configure_mtls_channel(callback)
+
+ assert is_mtls
+ mock_make_mutual_tls_http.assert_called_once_with(
+ cert=pytest.public_cert_bytes, key=pytest.private_key_bytes
+ )
+
+ @mock.patch("google.auth.transport.urllib3._make_mutual_tls_http", autospec=True)
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_with_metadata(
+ self, mock_get_client_cert_and_key, mock_make_mutual_tls_http
+ ):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock()
+ )
+
+ mock_get_client_cert_and_key.return_value = (
+ True,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ is_mtls = authed_http.configure_mtls_channel()
+
+ assert is_mtls
+ mock_get_client_cert_and_key.assert_called_once()
+ mock_make_mutual_tls_http.assert_called_once_with(
+ cert=pytest.public_cert_bytes, key=pytest.private_key_bytes
+ )
+
+ @mock.patch("google.auth.transport.urllib3._make_mutual_tls_http", autospec=True)
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_non_mtls(
+ self, mock_get_client_cert_and_key, mock_make_mutual_tls_http
+ ):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock()
+ )
+
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ is_mtls = authed_http.configure_mtls_channel()
+
+ assert not is_mtls
+ mock_get_client_cert_and_key.assert_called_once()
+ mock_make_mutual_tls_http.assert_not_called()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_exceptions(self, mock_get_client_cert_and_key):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock()
+ )
+
+ mock_get_client_cert_and_key.side_effect = exceptions.ClientCertError()
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ authed_http.configure_mtls_channel()
+
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+ with mock.patch.dict("sys.modules"):
+ sys.modules["OpenSSL"] = None
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ,
+ {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"},
+ ):
+ authed_http.configure_mtls_channel()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_without_client_cert_env(
+ self, get_client_cert_and_key
+ ):
+ callback = mock.Mock()
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock(), http=mock.Mock()
+ )
+
+ # Test the callback is not called if GOOGLE_API_USE_CLIENT_CERTIFICATE is not set.
+ is_mtls = authed_http.configure_mtls_channel(callback)
+ assert not is_mtls
+ callback.assert_not_called()
+
+ # Test ADC client cert is not used if GOOGLE_API_USE_CLIENT_CERTIFICATE is not set.
+ is_mtls = authed_http.configure_mtls_channel(callback)
+ assert not is_mtls
+ get_client_cert_and_key.assert_not_called()
diff --git a/contrib/python/google-auth/py2/tests/ya.make b/contrib/python/google-auth/py2/tests/ya.make
new file mode 100644
index 0000000000..06e07abc90
--- /dev/null
+++ b/contrib/python/google-auth/py2/tests/ya.make
@@ -0,0 +1,73 @@
+PY2TEST()
+
+PEERDIR(
+ contrib/python/Flask
+ contrib/python/google-auth
+ contrib/python/mock
+ contrib/python/responses
+ contrib/python/pyOpenSSL
+ contrib/python/pytest-localserver
+ contrib/python/oauth2client
+ contrib/python/freezegun
+)
+
+DATA(
+ arcadia/contrib/python/google-auth/py2/tests/data
+)
+
+PY_SRCS(
+ NAMESPACE tests
+ transport/__init__.py
+ transport/compliance.py
+)
+
+TEST_SRCS(
+ __init__.py
+ compute_engine/__init__.py
+ compute_engine/test__metadata.py
+ compute_engine/test_credentials.py
+ conftest.py
+ crypt/__init__.py
+ crypt/test__cryptography_rsa.py
+ crypt/test__python_rsa.py
+ crypt/test_crypt.py
+ crypt/test_es256.py
+ oauth2/__init__.py
+ oauth2/test__client.py
+ # oauth2/test_challenges.py - need pyu2f
+ oauth2/test_credentials.py
+ oauth2/test_id_token.py
+ oauth2/test_reauth.py
+ oauth2/test_service_account.py
+ oauth2/test_sts.py
+ oauth2/test_utils.py
+ test__cloud_sdk.py
+ test__default.py
+ test__helpers.py
+ test__oauth2client.py
+ test__service_account_info.py
+ test_app_engine.py
+ test_aws.py
+ test_credentials.py
+ test_downscoped.py
+ test_external_account.py
+ test_iam.py
+ test_identity_pool.py
+ test_impersonated_credentials.py
+ test_jwt.py
+ transport/test__http_client.py
+ transport/test__mtls_helper.py
+ transport/test_grpc.py
+ transport/test_mtls.py
+ # transport/test_requests.py
+ # transport/test_urllib3.py
+)
+
+RESOURCE(
+ data/privatekey.pem data/privatekey.pem
+ data/public_cert.pem data/public_cert.pem
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/google-auth/py2/ya.make b/contrib/python/google-auth/py2/ya.make
new file mode 100644
index 0000000000..5695e8b3d1
--- /dev/null
+++ b/contrib/python/google-auth/py2/ya.make
@@ -0,0 +1,85 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(1.35.0)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/cachetools
+ contrib/python/cryptography
+ contrib/python/grpcio
+ contrib/python/pyasn1-modules
+ contrib/python/requests
+ contrib/python/rsa
+ contrib/python/setuptools
+ contrib/python/six
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ google.auth._oauth2client
+ google.auth.transport._aiohttp_requests
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ google/auth/__init__.py
+ google/auth/_cloud_sdk.py
+ google/auth/_default.py
+ google/auth/_helpers.py
+ google/auth/_oauth2client.py
+ google/auth/_service_account_info.py
+ google/auth/app_engine.py
+ google/auth/aws.py
+ google/auth/compute_engine/__init__.py
+ google/auth/compute_engine/_metadata.py
+ google/auth/compute_engine/credentials.py
+ google/auth/credentials.py
+ google/auth/crypt/__init__.py
+ google/auth/crypt/_cryptography_rsa.py
+ google/auth/crypt/_helpers.py
+ google/auth/crypt/_python_rsa.py
+ google/auth/crypt/base.py
+ google/auth/crypt/es256.py
+ google/auth/crypt/rsa.py
+ google/auth/downscoped.py
+ google/auth/environment_vars.py
+ google/auth/exceptions.py
+ google/auth/external_account.py
+ google/auth/iam.py
+ google/auth/identity_pool.py
+ google/auth/impersonated_credentials.py
+ google/auth/jwt.py
+ google/auth/transport/__init__.py
+ google/auth/transport/_http_client.py
+ google/auth/transport/_mtls_helper.py
+ google/auth/transport/grpc.py
+ google/auth/transport/mtls.py
+ google/auth/transport/requests.py
+ google/auth/transport/urllib3.py
+ google/auth/version.py
+ google/oauth2/__init__.py
+ google/oauth2/_client.py
+ google/oauth2/challenges.py
+ google/oauth2/credentials.py
+ google/oauth2/id_token.py
+ google/oauth2/reauth.py
+ google/oauth2/service_account.py
+ google/oauth2/sts.py
+ google/oauth2/utils.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/google-auth/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/google-auth/py3/.dist-info/METADATA b/contrib/python/google-auth/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..23841a2ee7
--- /dev/null
+++ b/contrib/python/google-auth/py3/.dist-info/METADATA
@@ -0,0 +1,125 @@
+Metadata-Version: 2.1
+Name: google-auth
+Version: 2.23.0
+Summary: Google Authentication Library
+Home-page: https://github.com/googleapis/google-auth-library-python
+Author: Google Cloud Platform
+Author-email: googleapis-packages@google.com
+License: Apache 2.0
+Keywords: google auth oauth client
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Internet :: WWW/HTTP
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: cachetools (<6.0,>=2.0.0)
+Requires-Dist: pyasn1-modules (>=0.2.1)
+Requires-Dist: rsa (<5,>=3.1.4)
+Requires-Dist: urllib3 (<2.0)
+Provides-Extra: aiohttp
+Requires-Dist: aiohttp (<4.0.0.dev0,>=3.6.2) ; extra == 'aiohttp'
+Requires-Dist: requests (<3.0.0.dev0,>=2.20.0) ; extra == 'aiohttp'
+Provides-Extra: enterprise_cert
+Requires-Dist: cryptography (==36.0.2) ; extra == 'enterprise_cert'
+Requires-Dist: pyopenssl (==22.0.0) ; extra == 'enterprise_cert'
+Provides-Extra: pyopenssl
+Requires-Dist: pyopenssl (>=20.0.0) ; extra == 'pyopenssl'
+Requires-Dist: cryptography (>=38.0.3) ; extra == 'pyopenssl'
+Provides-Extra: reauth
+Requires-Dist: pyu2f (>=0.1.5) ; extra == 'reauth'
+Provides-Extra: requests
+Requires-Dist: requests (<3.0.0.dev0,>=2.20.0) ; extra == 'requests'
+
+Google Auth Python Library
+==========================
+
+|pypi|
+
+This library simplifies using Google's various server-to-server authentication
+mechanisms to access Google APIs.
+
+.. |pypi| image:: https://img.shields.io/pypi/v/google-auth.svg
+ :target: https://pypi.python.org/pypi/google-auth
+
+Installing
+----------
+
+You can install using `pip`_::
+
+ $ pip install google-auth
+
+.. _pip: https://pip.pypa.io/en/stable/
+
+For more information on setting up your Python development environment, please refer to `Python Development Environment Setup Guide`_ for Google Cloud Platform.
+
+.. _`Python Development Environment Setup Guide`: https://cloud.google.com/python/setup
+
+Extras
+------
+
+google-auth has few extras that you can install. For example::
+
+ $ pip install google-auth[pyopenssl]
+
+Note that the extras pyopenssl and enterprise_cert should not be used together because they use conflicting versions of `cryptography`_.
+
+.. _`cryptography`: https://cryptography.io/en/latest/
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.7
+
+Unsupported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- Python == 2.7: The last version of this library with support for Python 2.7
+ was `google.auth == 1.34.0`.
+
+- Python 3.5: The last version of this library with support for Python 3.5
+ was `google.auth == 1.23.0`.
+
+- Python 3.6: The last version of this library with support for Python 3.6
+ was `google.auth == 2.22.0`.
+
+Documentation
+-------------
+
+Google Auth Python Library has usage and reference documentation at https://googleapis.dev/python/google-auth/latest/index.html.
+
+Current Maintainers
+-------------------
+- googleapis-auth@google.com
+
+Authors
+-------
+
+- `@theacodes <https://github.com/theacodes>`_ (Thea Flowers)
+- `@dhermes <https://github.com/dhermes>`_ (Danny Hermes)
+- `@lukesneeringer <https://github.com/lukesneeringer>`_ (Luke Sneeringer)
+- `@busunkim96 <https://github.com/busunkim96>`_ (Bu Sun Kim)
+
+Contributing
+------------
+
+Contributions to this library are always welcome and highly encouraged.
+
+See `CONTRIBUTING.rst`_ for more information on how to get started.
+
+.. _CONTRIBUTING.rst: https://github.com/googleapis/google-auth-library-python/blob/main/CONTRIBUTING.rst
+
+License
+-------
+
+Apache 2.0 - See `the LICENSE`_ for more information.
+
+.. _the LICENSE: https://github.com/googleapis/google-auth-library-python/blob/main/LICENSE
diff --git a/contrib/python/google-auth/py3/.dist-info/top_level.txt b/contrib/python/google-auth/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..64f26a32e6
--- /dev/null
+++ b/contrib/python/google-auth/py3/.dist-info/top_level.txt
@@ -0,0 +1,3 @@
+google
+scripts
+testing
diff --git a/contrib/python/google-auth/py3/LICENSE b/contrib/python/google-auth/py3/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/contrib/python/google-auth/py3/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/python/google-auth/py3/README.rst b/contrib/python/google-auth/py3/README.rst
new file mode 100644
index 0000000000..cdd19bed50
--- /dev/null
+++ b/contrib/python/google-auth/py3/README.rst
@@ -0,0 +1,82 @@
+Google Auth Python Library
+==========================
+
+|pypi|
+
+This library simplifies using Google's various server-to-server authentication
+mechanisms to access Google APIs.
+
+.. |pypi| image:: https://img.shields.io/pypi/v/google-auth.svg
+ :target: https://pypi.python.org/pypi/google-auth
+
+Installing
+----------
+
+You can install using `pip`_::
+
+ $ pip install google-auth
+
+.. _pip: https://pip.pypa.io/en/stable/
+
+For more information on setting up your Python development environment, please refer to `Python Development Environment Setup Guide`_ for Google Cloud Platform.
+
+.. _`Python Development Environment Setup Guide`: https://cloud.google.com/python/setup
+
+Extras
+------
+
+google-auth has few extras that you can install. For example::
+
+ $ pip install google-auth[pyopenssl]
+
+Note that the extras pyopenssl and enterprise_cert should not be used together because they use conflicting versions of `cryptography`_.
+
+.. _`cryptography`: https://cryptography.io/en/latest/
+
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.7
+
+Unsupported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- Python == 2.7: The last version of this library with support for Python 2.7
+ was `google.auth == 1.34.0`.
+
+- Python 3.5: The last version of this library with support for Python 3.5
+ was `google.auth == 1.23.0`.
+
+- Python 3.6: The last version of this library with support for Python 3.6
+ was `google.auth == 2.22.0`.
+
+Documentation
+-------------
+
+Google Auth Python Library has usage and reference documentation at https://googleapis.dev/python/google-auth/latest/index.html.
+
+Current Maintainers
+-------------------
+- googleapis-auth@google.com
+
+Authors
+-------
+
+- `@theacodes <https://github.com/theacodes>`_ (Thea Flowers)
+- `@dhermes <https://github.com/dhermes>`_ (Danny Hermes)
+- `@lukesneeringer <https://github.com/lukesneeringer>`_ (Luke Sneeringer)
+- `@busunkim96 <https://github.com/busunkim96>`_ (Bu Sun Kim)
+
+Contributing
+------------
+
+Contributions to this library are always welcome and highly encouraged.
+
+See `CONTRIBUTING.rst`_ for more information on how to get started.
+
+.. _CONTRIBUTING.rst: https://github.com/googleapis/google-auth-library-python/blob/main/CONTRIBUTING.rst
+
+License
+-------
+
+Apache 2.0 - See `the LICENSE`_ for more information.
+
+.. _the LICENSE: https://github.com/googleapis/google-auth-library-python/blob/main/LICENSE
diff --git a/contrib/python/google-auth/py3/google/auth/__init__.py b/contrib/python/google-auth/py3/google/auth/__init__.py
new file mode 100644
index 0000000000..2875772b37
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/__init__.py
@@ -0,0 +1,33 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Auth Library for Python."""
+
+import logging
+
+from google.auth import version as google_auth_version
+from google.auth._default import (
+ default,
+ load_credentials_from_dict,
+ load_credentials_from_file,
+)
+
+
+__version__ = google_auth_version.__version__
+
+
+__all__ = ["default", "load_credentials_from_file", "load_credentials_from_dict"]
+
+# Set default logging handler to avoid "No handler found" warnings.
+logging.getLogger(__name__).addHandler(logging.NullHandler())
diff --git a/contrib/python/google-auth/py3/google/auth/_cloud_sdk.py b/contrib/python/google-auth/py3/google/auth/_cloud_sdk.py
new file mode 100644
index 0000000000..a94411949b
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_cloud_sdk.py
@@ -0,0 +1,153 @@
+# Copyright 2015 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helpers for reading the Google Cloud SDK's configuration."""
+
+import os
+import subprocess
+
+from google.auth import _helpers
+from google.auth import environment_vars
+from google.auth import exceptions
+
+
+# The ~/.config subdirectory containing gcloud credentials.
+_CONFIG_DIRECTORY = "gcloud"
+# Windows systems store config at %APPDATA%\gcloud
+_WINDOWS_CONFIG_ROOT_ENV_VAR = "APPDATA"
+# The name of the file in the Cloud SDK config that contains default
+# credentials.
+_CREDENTIALS_FILENAME = "application_default_credentials.json"
+# The name of the Cloud SDK shell script
+_CLOUD_SDK_POSIX_COMMAND = "gcloud"
+_CLOUD_SDK_WINDOWS_COMMAND = "gcloud.cmd"
+# The command to get the Cloud SDK configuration
+_CLOUD_SDK_CONFIG_GET_PROJECT_COMMAND = ("config", "get", "project")
+# The command to get google user access token
+_CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND = ("auth", "print-access-token")
+# Cloud SDK's application-default client ID
+CLOUD_SDK_CLIENT_ID = (
+ "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com"
+)
+
+
+def get_config_path():
+ """Returns the absolute path the the Cloud SDK's configuration directory.
+
+ Returns:
+ str: The Cloud SDK config path.
+ """
+ # If the path is explicitly set, return that.
+ try:
+ return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]
+ except KeyError:
+ pass
+
+ # Non-windows systems store this at ~/.config/gcloud
+ if os.name != "nt":
+ return os.path.join(os.path.expanduser("~"), ".config", _CONFIG_DIRECTORY)
+ # Windows systems store config at %APPDATA%\gcloud
+ else:
+ try:
+ return os.path.join(
+ os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY
+ )
+ except KeyError:
+ # This should never happen unless someone is really
+ # messing with things, but we'll cover the case anyway.
+ drive = os.environ.get("SystemDrive", "C:")
+ return os.path.join(drive, "\\", _CONFIG_DIRECTORY)
+
+
+def get_application_default_credentials_path():
+ """Gets the path to the application default credentials file.
+
+ The path may or may not exist.
+
+ Returns:
+ str: The full path to application default credentials.
+ """
+ config_path = get_config_path()
+ return os.path.join(config_path, _CREDENTIALS_FILENAME)
+
+
+def _run_subprocess_ignore_stderr(command):
+ """ Return subprocess.check_output with the given command and ignores stderr."""
+ with open(os.devnull, "w") as devnull:
+ output = subprocess.check_output(command, stderr=devnull)
+ return output
+
+
+def get_project_id():
+ """Gets the project ID from the Cloud SDK.
+
+ Returns:
+ Optional[str]: The project ID.
+ """
+ if os.name == "nt":
+ command = _CLOUD_SDK_WINDOWS_COMMAND
+ else:
+ command = _CLOUD_SDK_POSIX_COMMAND
+
+ try:
+ # Ignore the stderr coming from gcloud, so it won't be mixed into the output.
+ # https://github.com/googleapis/google-auth-library-python/issues/673
+ project = _run_subprocess_ignore_stderr(
+ (command,) + _CLOUD_SDK_CONFIG_GET_PROJECT_COMMAND
+ )
+
+ # Turn bytes into a string and remove "\n"
+ project = _helpers.from_bytes(project).strip()
+ return project if project else None
+ except (subprocess.CalledProcessError, OSError, IOError):
+ return None
+
+
+def get_auth_access_token(account=None):
+ """Load user access token with the ``gcloud auth print-access-token`` command.
+
+ Args:
+ account (Optional[str]): Account to get the access token for. If not
+ specified, the current active account will be used.
+
+ Returns:
+ str: The user access token.
+
+ Raises:
+ google.auth.exceptions.UserAccessTokenError: if failed to get access
+ token from gcloud.
+ """
+ if os.name == "nt":
+ command = _CLOUD_SDK_WINDOWS_COMMAND
+ else:
+ command = _CLOUD_SDK_POSIX_COMMAND
+
+ try:
+ if account:
+ command = (
+ (command,)
+ + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND
+ + ("--account=" + account,)
+ )
+ else:
+ command = (command,) + _CLOUD_SDK_USER_ACCESS_TOKEN_COMMAND
+
+ access_token = subprocess.check_output(command, stderr=subprocess.STDOUT)
+ # remove the trailing "\n"
+ return access_token.decode("utf-8").strip()
+ except (subprocess.CalledProcessError, OSError, IOError) as caught_exc:
+ new_exc = exceptions.UserAccessTokenError(
+ "Failed to obtain access token", caught_exc
+ )
+ raise new_exc from caught_exc
diff --git a/contrib/python/google-auth/py3/google/auth/_credentials_async.py b/contrib/python/google-auth/py3/google/auth/_credentials_async.py
new file mode 100644
index 0000000000..760758d851
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_credentials_async.py
@@ -0,0 +1,171 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Interfaces for credentials."""
+
+import abc
+import inspect
+
+from google.auth import credentials
+
+
+class Credentials(credentials.Credentials, metaclass=abc.ABCMeta):
+ """Async inherited credentials class from google.auth.credentials.
+ The added functionality is the before_request call which requires
+ async/await syntax.
+ All credentials have a :attr:`token` that is used for authentication and
+ may also optionally set an :attr:`expiry` to indicate when the token will
+ no longer be valid.
+
+ Most credentials will be :attr:`invalid` until :meth:`refresh` is called.
+ Credentials can do this automatically before the first HTTP request in
+ :meth:`before_request`.
+
+ Although the token and expiration will change as the credentials are
+ :meth:`refreshed <refresh>` and used, credentials should be considered
+ immutable. Various credentials will accept configuration such as private
+ keys, scopes, and other options. These options are not changeable after
+ construction. Some classes will provide mechanisms to copy the credentials
+ with modifications such as :meth:`ScopedCredentials.with_scopes`.
+ """
+
+ async def before_request(self, request, method, url, headers):
+ """Performs credential-specific before request logic.
+
+ Refreshes the credentials if necessary, then calls :meth:`apply` to
+ apply the token to the authentication header.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ method (str): The request's HTTP method or the RPC method being
+ invoked.
+ url (str): The request's URI or the RPC service's URI.
+ headers (Mapping): The request's headers.
+ """
+ # pylint: disable=unused-argument
+ # (Subclasses may use these arguments to ascertain information about
+ # the http request.)
+
+ if not self.valid:
+ if inspect.iscoroutinefunction(self.refresh):
+ await self.refresh(request)
+ else:
+ self.refresh(request)
+ self.apply(headers)
+
+
+class CredentialsWithQuotaProject(credentials.CredentialsWithQuotaProject):
+ """Abstract base for credentials supporting ``with_quota_project`` factory"""
+
+
+class AnonymousCredentials(credentials.AnonymousCredentials, Credentials):
+ """Credentials that do not provide any authentication information.
+
+ These are useful in the case of services that support anonymous access or
+ local service emulators that do not use credentials. This class inherits
+ from the sync anonymous credentials file, but is kept if async credentials
+ is initialized and we would like anonymous credentials.
+ """
+
+
+class ReadOnlyScoped(credentials.ReadOnlyScoped, metaclass=abc.ABCMeta):
+ """Interface for credentials whose scopes can be queried.
+
+ OAuth 2.0-based credentials allow limiting access using scopes as described
+ in `RFC6749 Section 3.3`_.
+ If a credential class implements this interface then the credentials either
+ use scopes in their implementation.
+
+ Some credentials require scopes in order to obtain a token. You can check
+ if scoping is necessary with :attr:`requires_scopes`::
+
+ if credentials.requires_scopes:
+ # Scoping is required.
+ credentials = _credentials_async.with_scopes(scopes=['one', 'two'])
+
+ Credentials that require scopes must either be constructed with scopes::
+
+ credentials = SomeScopedCredentials(scopes=['one', 'two'])
+
+ Or must copy an existing instance using :meth:`with_scopes`::
+
+ scoped_credentials = _credentials_async.with_scopes(scopes=['one', 'two'])
+
+ Some credentials have scopes but do not allow or require scopes to be set,
+ these credentials can be used as-is.
+
+ .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
+ """
+
+
+class Scoped(credentials.Scoped):
+ """Interface for credentials whose scopes can be replaced while copying.
+
+ OAuth 2.0-based credentials allow limiting access using scopes as described
+ in `RFC6749 Section 3.3`_.
+ If a credential class implements this interface then the credentials either
+ use scopes in their implementation.
+
+ Some credentials require scopes in order to obtain a token. You can check
+ if scoping is necessary with :attr:`requires_scopes`::
+
+ if credentials.requires_scopes:
+ # Scoping is required.
+ credentials = _credentials_async.create_scoped(['one', 'two'])
+
+ Credentials that require scopes must either be constructed with scopes::
+
+ credentials = SomeScopedCredentials(scopes=['one', 'two'])
+
+ Or must copy an existing instance using :meth:`with_scopes`::
+
+ scoped_credentials = credentials.with_scopes(scopes=['one', 'two'])
+
+ Some credentials have scopes but do not allow or require scopes to be set,
+ these credentials can be used as-is.
+
+ .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
+ """
+
+
+def with_scopes_if_required(credentials, scopes):
+ """Creates a copy of the credentials with scopes if scoping is required.
+
+ This helper function is useful when you do not know (or care to know) the
+ specific type of credentials you are using (such as when you use
+ :func:`google.auth.default`). This function will call
+ :meth:`Scoped.with_scopes` if the credentials are scoped credentials and if
+ the credentials require scoping. Otherwise, it will return the credentials
+ as-is.
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ scope if necessary.
+ scopes (Sequence[str]): The list of scopes to use.
+
+ Returns:
+ google.auth._credentials_async.Credentials: Either a new set of scoped
+ credentials, or the passed in credentials instance if no scoping
+ was required.
+ """
+ if isinstance(credentials, Scoped) and credentials.requires_scopes:
+ return credentials.with_scopes(scopes)
+ else:
+ return credentials
+
+
+class Signing(credentials.Signing, metaclass=abc.ABCMeta):
+ """Interface for credentials that can cryptographically sign messages."""
diff --git a/contrib/python/google-auth/py3/google/auth/_default.py b/contrib/python/google-auth/py3/google/auth/_default.py
new file mode 100644
index 0000000000..63009dfb86
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_default.py
@@ -0,0 +1,691 @@
+# Copyright 2015 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Application default credentials.
+
+Implements application default credentials and project ID detection.
+"""
+
+import io
+import json
+import logging
+import os
+import warnings
+
+from google.auth import environment_vars
+from google.auth import exceptions
+import google.auth.transport._http_client
+
+_LOGGER = logging.getLogger(__name__)
+
+# Valid types accepted for file-based credentials.
+_AUTHORIZED_USER_TYPE = "authorized_user"
+_SERVICE_ACCOUNT_TYPE = "service_account"
+_EXTERNAL_ACCOUNT_TYPE = "external_account"
+_EXTERNAL_ACCOUNT_AUTHORIZED_USER_TYPE = "external_account_authorized_user"
+_IMPERSONATED_SERVICE_ACCOUNT_TYPE = "impersonated_service_account"
+_GDCH_SERVICE_ACCOUNT_TYPE = "gdch_service_account"
+_VALID_TYPES = (
+ _AUTHORIZED_USER_TYPE,
+ _SERVICE_ACCOUNT_TYPE,
+ _EXTERNAL_ACCOUNT_TYPE,
+ _EXTERNAL_ACCOUNT_AUTHORIZED_USER_TYPE,
+ _IMPERSONATED_SERVICE_ACCOUNT_TYPE,
+ _GDCH_SERVICE_ACCOUNT_TYPE,
+)
+
+# Help message when no credentials can be found.
+_CLOUD_SDK_MISSING_CREDENTIALS = """\
+Your default credentials were not found. To set up Application Default Credentials, \
+see https://cloud.google.com/docs/authentication/external/set-up-adc for more information.\
+"""
+
+# Warning when using Cloud SDK user credentials
+_CLOUD_SDK_CREDENTIALS_WARNING = """\
+Your application has authenticated using end user credentials from Google \
+Cloud SDK without a quota project. You might receive a "quota exceeded" \
+or "API not enabled" error. See the following page for troubleshooting: \
+https://cloud.google.com/docs/authentication/adc-troubleshooting/user-creds. \
+"""
+
+# The subject token type used for AWS external_account credentials.
+_AWS_SUBJECT_TOKEN_TYPE = "urn:ietf:params:aws:token-type:aws4_request"
+
+
+def _warn_about_problematic_credentials(credentials):
+ """Determines if the credentials are problematic.
+
+ Credentials from the Cloud SDK that are associated with Cloud SDK's project
+ are problematic because they may not have APIs enabled and have limited
+ quota. If this is the case, warn about it.
+ """
+ from google.auth import _cloud_sdk
+
+ if credentials.client_id == _cloud_sdk.CLOUD_SDK_CLIENT_ID:
+ warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING)
+
+
+def load_credentials_from_file(
+ filename, scopes=None, default_scopes=None, quota_project_id=None, request=None
+):
+ """Loads Google credentials from a file.
+
+ The credentials file must be a service account key, stored authorized
+ user credentials, external account credentials, or impersonated service
+ account credentials.
+
+ Args:
+ filename (str): The full path to the credentials file.
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ request (Optional[google.auth.transport.Request]): An object used to make
+ HTTP requests. This is used to determine the associated project ID
+ for a workload identity pool resource (external account credentials).
+ If not specified, then it will use a
+ google.auth.transport.requests.Request client to make requests.
+
+ Returns:
+ Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
+ credentials and the project ID. Authorized user credentials do not
+ have the project ID information. External account credentials project
+ IDs may not always be determined.
+
+ Raises:
+ google.auth.exceptions.DefaultCredentialsError: if the file is in the
+ wrong format or is missing.
+ """
+ if not os.path.exists(filename):
+ raise exceptions.DefaultCredentialsError(
+ "File {} was not found.".format(filename)
+ )
+
+ with io.open(filename, "r") as file_obj:
+ try:
+ info = json.load(file_obj)
+ except ValueError as caught_exc:
+ new_exc = exceptions.DefaultCredentialsError(
+ "File {} is not a valid json file.".format(filename), caught_exc
+ )
+ raise new_exc from caught_exc
+ return _load_credentials_from_info(
+ filename, info, scopes, default_scopes, quota_project_id, request
+ )
+
+
+def load_credentials_from_dict(
+ info, scopes=None, default_scopes=None, quota_project_id=None, request=None
+):
+ """Loads Google credentials from a dict.
+
+ The credentials file must be a service account key, stored authorized
+ user credentials, external account credentials, or impersonated service
+ account credentials.
+
+ Args:
+ info (Dict[str, Any]): A dict object containing the credentials
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ request (Optional[google.auth.transport.Request]): An object used to make
+ HTTP requests. This is used to determine the associated project ID
+ for a workload identity pool resource (external account credentials).
+ If not specified, then it will use a
+ google.auth.transport.requests.Request client to make requests.
+
+ Returns:
+ Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
+ credentials and the project ID. Authorized user credentials do not
+ have the project ID information. External account credentials project
+ IDs may not always be determined.
+
+ Raises:
+ google.auth.exceptions.DefaultCredentialsError: if the file is in the
+ wrong format or is missing.
+ """
+ if not isinstance(info, dict):
+ raise exceptions.DefaultCredentialsError(
+ "info object was of type {} but dict type was expected.".format(type(info))
+ )
+
+ return _load_credentials_from_info(
+ "dict object", info, scopes, default_scopes, quota_project_id, request
+ )
+
+
+def _load_credentials_from_info(
+ filename, info, scopes, default_scopes, quota_project_id, request
+):
+ from google.auth.credentials import CredentialsWithQuotaProject
+
+ credential_type = info.get("type")
+
+ if credential_type == _AUTHORIZED_USER_TYPE:
+ credentials, project_id = _get_authorized_user_credentials(
+ filename, info, scopes
+ )
+
+ elif credential_type == _SERVICE_ACCOUNT_TYPE:
+ credentials, project_id = _get_service_account_credentials(
+ filename, info, scopes, default_scopes
+ )
+
+ elif credential_type == _EXTERNAL_ACCOUNT_TYPE:
+ credentials, project_id = _get_external_account_credentials(
+ info,
+ filename,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ request=request,
+ )
+
+ elif credential_type == _EXTERNAL_ACCOUNT_AUTHORIZED_USER_TYPE:
+ credentials, project_id = _get_external_account_authorized_user_credentials(
+ filename, info, request
+ )
+
+ elif credential_type == _IMPERSONATED_SERVICE_ACCOUNT_TYPE:
+ credentials, project_id = _get_impersonated_service_account_credentials(
+ filename, info, scopes
+ )
+ elif credential_type == _GDCH_SERVICE_ACCOUNT_TYPE:
+ credentials, project_id = _get_gdch_service_account_credentials(filename, info)
+ else:
+ raise exceptions.DefaultCredentialsError(
+ "The file {file} does not have a valid type. "
+ "Type is {type}, expected one of {valid_types}.".format(
+ file=filename, type=credential_type, valid_types=_VALID_TYPES
+ )
+ )
+ if isinstance(credentials, CredentialsWithQuotaProject):
+ credentials = _apply_quota_project_id(credentials, quota_project_id)
+ return credentials, project_id
+
+
+def _get_gcloud_sdk_credentials(quota_project_id=None):
+ """Gets the credentials and project ID from the Cloud SDK."""
+ from google.auth import _cloud_sdk
+
+ _LOGGER.debug("Checking Cloud SDK credentials as part of auth process...")
+
+ # Check if application default credentials exist.
+ credentials_filename = _cloud_sdk.get_application_default_credentials_path()
+
+ if not os.path.isfile(credentials_filename):
+ _LOGGER.debug("Cloud SDK credentials not found on disk; not using them")
+ return None, None
+
+ credentials, project_id = load_credentials_from_file(
+ credentials_filename, quota_project_id=quota_project_id
+ )
+
+ if not project_id:
+ project_id = _cloud_sdk.get_project_id()
+
+ return credentials, project_id
+
+
+def _get_explicit_environ_credentials(quota_project_id=None):
+ """Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
+ variable."""
+ from google.auth import _cloud_sdk
+
+ cloud_sdk_adc_path = _cloud_sdk.get_application_default_credentials_path()
+ explicit_file = os.environ.get(environment_vars.CREDENTIALS)
+
+ _LOGGER.debug(
+ "Checking %s for explicit credentials as part of auth process...", explicit_file
+ )
+
+ if explicit_file is not None and explicit_file == cloud_sdk_adc_path:
+ # Cloud sdk flow calls gcloud to fetch project id, so if the explicit
+ # file path is cloud sdk credentials path, then we should fall back
+ # to cloud sdk flow, otherwise project id cannot be obtained.
+ _LOGGER.debug(
+ "Explicit credentials path %s is the same as Cloud SDK credentials path, fall back to Cloud SDK credentials flow...",
+ explicit_file,
+ )
+ return _get_gcloud_sdk_credentials(quota_project_id=quota_project_id)
+
+ if explicit_file is not None:
+ credentials, project_id = load_credentials_from_file(
+ os.environ[environment_vars.CREDENTIALS], quota_project_id=quota_project_id
+ )
+
+ return credentials, project_id
+
+ else:
+ return None, None
+
+
+def _get_gae_credentials():
+ """Gets Google App Engine App Identity credentials and project ID."""
+ # If not GAE gen1, prefer the metadata service even if the GAE APIs are
+ # available as per https://google.aip.dev/auth/4115.
+ if os.environ.get(environment_vars.LEGACY_APPENGINE_RUNTIME) != "python27":
+ return None, None
+
+ # While this library is normally bundled with app_engine, there are
+ # some cases where it's not available, so we tolerate ImportError.
+ try:
+ _LOGGER.debug("Checking for App Engine runtime as part of auth process...")
+ import google.auth.app_engine as app_engine
+ except ImportError:
+ _LOGGER.warning("Import of App Engine auth library failed.")
+ return None, None
+
+ try:
+ credentials = app_engine.Credentials()
+ project_id = app_engine.get_project_id()
+ return credentials, project_id
+ except EnvironmentError:
+ _LOGGER.debug(
+ "No App Engine library was found so cannot authentication via App Engine Identity Credentials."
+ )
+ return None, None
+
+
+def _get_gce_credentials(request=None, quota_project_id=None):
+ """Gets credentials and project ID from the GCE Metadata Service."""
+ # Ping requires a transport, but we want application default credentials
+ # to require no arguments. So, we'll use the _http_client transport which
+ # uses http.client. This is only acceptable because the metadata server
+ # doesn't do SSL and never requires proxies.
+
+ # While this library is normally bundled with compute_engine, there are
+ # some cases where it's not available, so we tolerate ImportError.
+ try:
+ from google.auth import compute_engine
+ from google.auth.compute_engine import _metadata
+ except ImportError:
+ _LOGGER.warning("Import of Compute Engine auth library failed.")
+ return None, None
+
+ if request is None:
+ request = google.auth.transport._http_client.Request()
+
+ if _metadata.is_on_gce(request=request):
+ # Get the project ID.
+ try:
+ project_id = _metadata.get_project_id(request=request)
+ except exceptions.TransportError:
+ project_id = None
+
+ cred = compute_engine.Credentials()
+ cred = _apply_quota_project_id(cred, quota_project_id)
+
+ return cred, project_id
+ else:
+ _LOGGER.warning(
+ "Authentication failed using Compute Engine authentication due to unavailable metadata server."
+ )
+ return None, None
+
+
+def _get_external_account_credentials(
+ info, filename, scopes=None, default_scopes=None, request=None
+):
+ """Loads external account Credentials from the parsed external account info.
+
+ The credentials information must correspond to a supported external account
+ credentials.
+
+ Args:
+ info (Mapping[str, str]): The external account info in Google format.
+ filename (str): The full path to the credentials file.
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ request (Optional[google.auth.transport.Request]): An object used to make
+ HTTP requests. This is used to determine the associated project ID
+ for a workload identity pool resource (external account credentials).
+ If not specified, then it will use a
+ google.auth.transport.requests.Request client to make requests.
+
+ Returns:
+ Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
+ credentials and the project ID. External account credentials project
+ IDs may not always be determined.
+
+ Raises:
+ google.auth.exceptions.DefaultCredentialsError: if the info dictionary
+ is in the wrong format or is missing required information.
+ """
+ # There are currently 3 types of external_account credentials.
+ if info.get("subject_token_type") == _AWS_SUBJECT_TOKEN_TYPE:
+ # Check if configuration corresponds to an AWS credentials.
+ from google.auth import aws
+
+ credentials = aws.Credentials.from_info(
+ info, scopes=scopes, default_scopes=default_scopes
+ )
+ elif (
+ info.get("credential_source") is not None
+ and info.get("credential_source").get("executable") is not None
+ ):
+ from google.auth import pluggable
+
+ credentials = pluggable.Credentials.from_info(
+ info, scopes=scopes, default_scopes=default_scopes
+ )
+ else:
+ try:
+ # Check if configuration corresponds to an Identity Pool credentials.
+ from google.auth import identity_pool
+
+ credentials = identity_pool.Credentials.from_info(
+ info, scopes=scopes, default_scopes=default_scopes
+ )
+ except ValueError:
+ # If the configuration is invalid or does not correspond to any
+ # supported external_account credentials, raise an error.
+ raise exceptions.DefaultCredentialsError(
+ "Failed to load external account credentials from {}".format(filename)
+ )
+ if request is None:
+ import google.auth.transport.requests
+
+ request = google.auth.transport.requests.Request()
+
+ return credentials, credentials.get_project_id(request=request)
+
+
+def _get_external_account_authorized_user_credentials(
+ filename, info, scopes=None, default_scopes=None, request=None
+):
+ try:
+ from google.auth import external_account_authorized_user
+
+ credentials = external_account_authorized_user.Credentials.from_info(info)
+ except ValueError:
+ raise exceptions.DefaultCredentialsError(
+ "Failed to load external account authorized user credentials from {}".format(
+ filename
+ )
+ )
+
+ return credentials, None
+
+
+def _get_authorized_user_credentials(filename, info, scopes=None):
+ from google.oauth2 import credentials
+
+ try:
+ credentials = credentials.Credentials.from_authorized_user_info(
+ info, scopes=scopes
+ )
+ except ValueError as caught_exc:
+ msg = "Failed to load authorized user credentials from {}".format(filename)
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ raise new_exc from caught_exc
+ return credentials, None
+
+
+def _get_service_account_credentials(filename, info, scopes=None, default_scopes=None):
+ from google.oauth2 import service_account
+
+ try:
+ credentials = service_account.Credentials.from_service_account_info(
+ info, scopes=scopes, default_scopes=default_scopes
+ )
+ except ValueError as caught_exc:
+ msg = "Failed to load service account credentials from {}".format(filename)
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ raise new_exc from caught_exc
+ return credentials, info.get("project_id")
+
+
+def _get_impersonated_service_account_credentials(filename, info, scopes):
+ from google.auth import impersonated_credentials
+
+ try:
+ source_credentials_info = info.get("source_credentials")
+ source_credentials_type = source_credentials_info.get("type")
+ if source_credentials_type == _AUTHORIZED_USER_TYPE:
+ source_credentials, _ = _get_authorized_user_credentials(
+ filename, source_credentials_info
+ )
+ elif source_credentials_type == _SERVICE_ACCOUNT_TYPE:
+ source_credentials, _ = _get_service_account_credentials(
+ filename, source_credentials_info
+ )
+ else:
+ raise exceptions.InvalidType(
+ "source credential of type {} is not supported.".format(
+ source_credentials_type
+ )
+ )
+ impersonation_url = info.get("service_account_impersonation_url")
+ start_index = impersonation_url.rfind("/")
+ end_index = impersonation_url.find(":generateAccessToken")
+ if start_index == -1 or end_index == -1 or start_index > end_index:
+ raise exceptions.InvalidValue(
+ "Cannot extract target principal from {}".format(impersonation_url)
+ )
+ target_principal = impersonation_url[start_index + 1 : end_index]
+ delegates = info.get("delegates")
+ quota_project_id = info.get("quota_project_id")
+ credentials = impersonated_credentials.Credentials(
+ source_credentials,
+ target_principal,
+ scopes,
+ delegates,
+ quota_project_id=quota_project_id,
+ )
+ except ValueError as caught_exc:
+ msg = "Failed to load impersonated service account credentials from {}".format(
+ filename
+ )
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ raise new_exc from caught_exc
+ return credentials, None
+
+
+def _get_gdch_service_account_credentials(filename, info):
+ from google.oauth2 import gdch_credentials
+
+ try:
+ credentials = gdch_credentials.ServiceAccountCredentials.from_service_account_info(
+ info
+ )
+ except ValueError as caught_exc:
+ msg = "Failed to load GDCH service account credentials from {}".format(filename)
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ raise new_exc from caught_exc
+ return credentials, info.get("project")
+
+
+def get_api_key_credentials(key):
+ """Return credentials with the given API key."""
+ from google.auth import api_key
+
+ return api_key.Credentials(key)
+
+
+def _apply_quota_project_id(credentials, quota_project_id):
+ if quota_project_id:
+ credentials = credentials.with_quota_project(quota_project_id)
+ else:
+ credentials = credentials.with_quota_project_from_environment()
+
+ from google.oauth2 import credentials as authorized_user_credentials
+
+ if isinstance(credentials, authorized_user_credentials.Credentials) and (
+ not credentials.quota_project_id
+ ):
+ _warn_about_problematic_credentials(credentials)
+ return credentials
+
+
+def default(scopes=None, request=None, quota_project_id=None, default_scopes=None):
+ """Gets the default credentials for the current environment.
+
+ `Application Default Credentials`_ provides an easy way to obtain
+ credentials to call Google APIs for server-to-server or local applications.
+ This function acquires credentials from the environment in the following
+ order:
+
+ 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
+ to the path of a valid service account JSON private key file, then it is
+ loaded and returned. The project ID returned is the project ID defined
+ in the service account file if available (some older files do not
+ contain project ID information).
+
+ If the environment variable is set to the path of a valid external
+ account JSON configuration file (workload identity federation), then the
+ configuration file is used to determine and retrieve the external
+ credentials from the current environment (AWS, Azure, etc).
+ These will then be exchanged for Google access tokens via the Google STS
+ endpoint.
+ The project ID returned in this case is the one corresponding to the
+ underlying workload identity pool resource if determinable.
+
+ If the environment variable is set to the path of a valid GDCH service
+ account JSON file (`Google Distributed Cloud Hosted`_), then a GDCH
+ credential will be returned. The project ID returned is the project
+ specified in the JSON file.
+ 2. If the `Google Cloud SDK`_ is installed and has application default
+ credentials set they are loaded and returned.
+
+ To enable application default credentials with the Cloud SDK run::
+
+ gcloud auth application-default login
+
+ If the Cloud SDK has an active project, the project ID is returned. The
+ active project can be set using::
+
+ gcloud config set project
+
+ 3. If the application is running in the `App Engine standard environment`_
+ (first generation) then the credentials and project ID from the
+ `App Identity Service`_ are used.
+ 4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or
+ the `App Engine flexible environment`_ or the `App Engine standard
+ environment`_ (second generation) then the credentials and project ID
+ are obtained from the `Metadata Service`_.
+ 5. If no credentials are found,
+ :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.
+
+ .. _Application Default Credentials: https://developers.google.com\
+ /identity/protocols/application-default-credentials
+ .. _Google Cloud SDK: https://cloud.google.com/sdk
+ .. _App Engine standard environment: https://cloud.google.com/appengine
+ .. _App Identity Service: https://cloud.google.com/appengine/docs/python\
+ /appidentity/
+ .. _Compute Engine: https://cloud.google.com/compute
+ .. _App Engine flexible environment: https://cloud.google.com\
+ /appengine/flexible
+ .. _Metadata Service: https://cloud.google.com/compute/docs\
+ /storing-retrieving-metadata
+ .. _Cloud Run: https://cloud.google.com/run
+ .. _Google Distributed Cloud Hosted: https://cloud.google.com/blog/topics\
+ /hybrid-cloud/announcing-google-distributed-cloud-edge-and-hosted
+
+ Example::
+
+ import google.auth
+
+ credentials, project_id = google.auth.default()
+
+ Args:
+ scopes (Sequence[str]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary.
+ request (Optional[google.auth.transport.Request]): An object used to make
+ HTTP requests. This is used to either detect whether the application
+ is running on Compute Engine or to determine the associated project
+ ID for a workload identity pool resource (external account
+ credentials). If not specified, then it will either use the standard
+ library http client to make requests for Compute Engine credentials
+ or a google.auth.transport.requests.Request client for external
+ account credentials.
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ Returns:
+ Tuple[~google.auth.credentials.Credentials, Optional[str]]:
+ the current environment's credentials and project ID. Project ID
+ may be None, which indicates that the Project ID could not be
+ ascertained from the environment.
+
+ Raises:
+ ~google.auth.exceptions.DefaultCredentialsError:
+ If no credentials were found, or if the credentials found were
+ invalid.
+ """
+ from google.auth.credentials import with_scopes_if_required
+ from google.auth.credentials import CredentialsWithQuotaProject
+
+ explicit_project_id = os.environ.get(
+ environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT)
+ )
+
+ checkers = (
+ # Avoid passing scopes here to prevent passing scopes to user credentials.
+ # with_scopes_if_required() below will ensure scopes/default scopes are
+ # safely set on the returned credentials since requires_scopes will
+ # guard against setting scopes on user credentials.
+ lambda: _get_explicit_environ_credentials(quota_project_id=quota_project_id),
+ lambda: _get_gcloud_sdk_credentials(quota_project_id=quota_project_id),
+ _get_gae_credentials,
+ lambda: _get_gce_credentials(request, quota_project_id=quota_project_id),
+ )
+
+ for checker in checkers:
+ credentials, project_id = checker()
+ if credentials is not None:
+ credentials = with_scopes_if_required(
+ credentials, scopes, default_scopes=default_scopes
+ )
+
+ effective_project_id = explicit_project_id or project_id
+
+ # For external account credentials, scopes are required to determine
+ # the project ID. Try to get the project ID again if not yet
+ # determined.
+ if not effective_project_id and callable(
+ getattr(credentials, "get_project_id", None)
+ ):
+ if request is None:
+ import google.auth.transport.requests
+
+ request = google.auth.transport.requests.Request()
+ effective_project_id = credentials.get_project_id(request=request)
+
+ if quota_project_id and isinstance(
+ credentials, CredentialsWithQuotaProject
+ ):
+ credentials = credentials.with_quota_project(quota_project_id)
+
+ if not effective_project_id:
+ _LOGGER.warning(
+ "No project ID could be determined. Consider running "
+ "`gcloud config set project` or setting the %s "
+ "environment variable",
+ environment_vars.PROJECT,
+ )
+ return credentials, effective_project_id
+
+ raise exceptions.DefaultCredentialsError(_CLOUD_SDK_MISSING_CREDENTIALS)
diff --git a/contrib/python/google-auth/py3/google/auth/_default_async.py b/contrib/python/google-auth/py3/google/auth/_default_async.py
new file mode 100644
index 0000000000..2e53e20887
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_default_async.py
@@ -0,0 +1,282 @@
+# Copyright 2020 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Application default credentials.
+
+Implements application default credentials and project ID detection.
+"""
+
+import io
+import json
+import os
+
+from google.auth import _default
+from google.auth import environment_vars
+from google.auth import exceptions
+
+
+def load_credentials_from_file(filename, scopes=None, quota_project_id=None):
+ """Loads Google credentials from a file.
+
+ The credentials file must be a service account key or stored authorized
+ user credentials.
+
+ Args:
+ filename (str): The full path to the credentials file.
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+
+ Returns:
+ Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
+ credentials and the project ID. Authorized user credentials do not
+ have the project ID information.
+
+ Raises:
+ google.auth.exceptions.DefaultCredentialsError: if the file is in the
+ wrong format or is missing.
+ """
+ if not os.path.exists(filename):
+ raise exceptions.DefaultCredentialsError(
+ "File {} was not found.".format(filename)
+ )
+
+ with io.open(filename, "r") as file_obj:
+ try:
+ info = json.load(file_obj)
+ except ValueError as caught_exc:
+ new_exc = exceptions.DefaultCredentialsError(
+ "File {} is not a valid json file.".format(filename), caught_exc
+ )
+ raise new_exc from caught_exc
+
+ # The type key should indicate that the file is either a service account
+ # credentials file or an authorized user credentials file.
+ credential_type = info.get("type")
+
+ if credential_type == _default._AUTHORIZED_USER_TYPE:
+ from google.oauth2 import _credentials_async as credentials
+
+ try:
+ credentials = credentials.Credentials.from_authorized_user_info(
+ info, scopes=scopes
+ )
+ except ValueError as caught_exc:
+ msg = "Failed to load authorized user credentials from {}".format(filename)
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ raise new_exc from caught_exc
+ if quota_project_id:
+ credentials = credentials.with_quota_project(quota_project_id)
+ if not credentials.quota_project_id:
+ _default._warn_about_problematic_credentials(credentials)
+ return credentials, None
+
+ elif credential_type == _default._SERVICE_ACCOUNT_TYPE:
+ from google.oauth2 import _service_account_async as service_account
+
+ try:
+ credentials = service_account.Credentials.from_service_account_info(
+ info, scopes=scopes
+ ).with_quota_project(quota_project_id)
+ except ValueError as caught_exc:
+ msg = "Failed to load service account credentials from {}".format(filename)
+ new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
+ raise new_exc from caught_exc
+ return credentials, info.get("project_id")
+
+ else:
+ raise exceptions.DefaultCredentialsError(
+ "The file {file} does not have a valid type. "
+ "Type is {type}, expected one of {valid_types}.".format(
+ file=filename, type=credential_type, valid_types=_default._VALID_TYPES
+ )
+ )
+
+
+def _get_gcloud_sdk_credentials(quota_project_id=None):
+ """Gets the credentials and project ID from the Cloud SDK."""
+ from google.auth import _cloud_sdk
+
+ # Check if application default credentials exist.
+ credentials_filename = _cloud_sdk.get_application_default_credentials_path()
+
+ if not os.path.isfile(credentials_filename):
+ return None, None
+
+ credentials, project_id = load_credentials_from_file(
+ credentials_filename, quota_project_id=quota_project_id
+ )
+
+ if not project_id:
+ project_id = _cloud_sdk.get_project_id()
+
+ return credentials, project_id
+
+
+def _get_explicit_environ_credentials(quota_project_id=None):
+ """Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
+ variable."""
+ from google.auth import _cloud_sdk
+
+ cloud_sdk_adc_path = _cloud_sdk.get_application_default_credentials_path()
+ explicit_file = os.environ.get(environment_vars.CREDENTIALS)
+
+ if explicit_file is not None and explicit_file == cloud_sdk_adc_path:
+ # Cloud sdk flow calls gcloud to fetch project id, so if the explicit
+ # file path is cloud sdk credentials path, then we should fall back
+ # to cloud sdk flow, otherwise project id cannot be obtained.
+ return _get_gcloud_sdk_credentials(quota_project_id=quota_project_id)
+
+ if explicit_file is not None:
+ credentials, project_id = load_credentials_from_file(
+ os.environ[environment_vars.CREDENTIALS], quota_project_id=quota_project_id
+ )
+
+ return credentials, project_id
+
+ else:
+ return None, None
+
+
+def _get_gae_credentials():
+ """Gets Google App Engine App Identity credentials and project ID."""
+ # While this library is normally bundled with app_engine, there are
+ # some cases where it's not available, so we tolerate ImportError.
+
+ return _default._get_gae_credentials()
+
+
+def _get_gce_credentials(request=None):
+ """Gets credentials and project ID from the GCE Metadata Service."""
+ # Ping requires a transport, but we want application default credentials
+ # to require no arguments. So, we'll use the _http_client transport which
+ # uses http.client. This is only acceptable because the metadata server
+ # doesn't do SSL and never requires proxies.
+
+ # While this library is normally bundled with compute_engine, there are
+ # some cases where it's not available, so we tolerate ImportError.
+
+ return _default._get_gce_credentials(request)
+
+
+def default_async(scopes=None, request=None, quota_project_id=None):
+ """Gets the default credentials for the current environment.
+
+ `Application Default Credentials`_ provides an easy way to obtain
+ credentials to call Google APIs for server-to-server or local applications.
+ This function acquires credentials from the environment in the following
+ order:
+
+ 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
+ to the path of a valid service account JSON private key file, then it is
+ loaded and returned. The project ID returned is the project ID defined
+ in the service account file if available (some older files do not
+ contain project ID information).
+ 2. If the `Google Cloud SDK`_ is installed and has application default
+ credentials set they are loaded and returned.
+
+ To enable application default credentials with the Cloud SDK run::
+
+ gcloud auth application-default login
+
+ If the Cloud SDK has an active project, the project ID is returned. The
+ active project can be set using::
+
+ gcloud config set project
+
+ 3. If the application is running in the `App Engine standard environment`_
+ (first generation) then the credentials and project ID from the
+ `App Identity Service`_ are used.
+ 4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or
+ the `App Engine flexible environment`_ or the `App Engine standard
+ environment`_ (second generation) then the credentials and project ID
+ are obtained from the `Metadata Service`_.
+ 5. If no credentials are found,
+ :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.
+
+ .. _Application Default Credentials: https://developers.google.com\
+ /identity/protocols/application-default-credentials
+ .. _Google Cloud SDK: https://cloud.google.com/sdk
+ .. _App Engine standard environment: https://cloud.google.com/appengine
+ .. _App Identity Service: https://cloud.google.com/appengine/docs/python\
+ /appidentity/
+ .. _Compute Engine: https://cloud.google.com/compute
+ .. _App Engine flexible environment: https://cloud.google.com\
+ /appengine/flexible
+ .. _Metadata Service: https://cloud.google.com/compute/docs\
+ /storing-retrieving-metadata
+ .. _Cloud Run: https://cloud.google.com/run
+
+ Example::
+
+ import google.auth
+
+ credentials, project_id = google.auth.default()
+
+ Args:
+ scopes (Sequence[str]): The list of scopes for the credentials. If
+ specified, the credentials will automatically be scoped if
+ necessary.
+ request (google.auth.transport.Request): An object used to make
+ HTTP requests. This is used to detect whether the application
+ is running on Compute Engine. If not specified, then it will
+ use the standard library http client to make requests.
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ Returns:
+ Tuple[~google.auth.credentials.Credentials, Optional[str]]:
+ the current environment's credentials and project ID. Project ID
+ may be None, which indicates that the Project ID could not be
+ ascertained from the environment.
+
+ Raises:
+ ~google.auth.exceptions.DefaultCredentialsError:
+ If no credentials were found, or if the credentials found were
+ invalid.
+ """
+ from google.auth._credentials_async import with_scopes_if_required
+ from google.auth.credentials import CredentialsWithQuotaProject
+
+ explicit_project_id = os.environ.get(
+ environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT)
+ )
+
+ checkers = (
+ lambda: _get_explicit_environ_credentials(quota_project_id=quota_project_id),
+ lambda: _get_gcloud_sdk_credentials(quota_project_id=quota_project_id),
+ _get_gae_credentials,
+ lambda: _get_gce_credentials(request),
+ )
+
+ for checker in checkers:
+ credentials, project_id = checker()
+ if credentials is not None:
+ credentials = with_scopes_if_required(credentials, scopes)
+ if quota_project_id and isinstance(
+ credentials, CredentialsWithQuotaProject
+ ):
+ credentials = credentials.with_quota_project(quota_project_id)
+ effective_project_id = explicit_project_id or project_id
+ if not effective_project_id:
+ _default._LOGGER.warning(
+ "No project ID could be determined. Consider running "
+ "`gcloud config set project` or setting the %s "
+ "environment variable",
+ environment_vars.PROJECT,
+ )
+ return credentials, effective_project_id
+
+ raise exceptions.DefaultCredentialsError(_default._CLOUD_SDK_MISSING_CREDENTIALS)
diff --git a/contrib/python/google-auth/py3/google/auth/_exponential_backoff.py b/contrib/python/google-auth/py3/google/auth/_exponential_backoff.py
new file mode 100644
index 0000000000..0dd621a949
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_exponential_backoff.py
@@ -0,0 +1,109 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+import time
+
+# The default amount of retry attempts
+_DEFAULT_RETRY_TOTAL_ATTEMPTS = 3
+
+# The default initial backoff period (1.0 second).
+_DEFAULT_INITIAL_INTERVAL_SECONDS = 1.0
+
+# The default randomization factor (0.1 which results in a random period ranging
+# between 10% below and 10% above the retry interval).
+_DEFAULT_RANDOMIZATION_FACTOR = 0.1
+
+# The default multiplier value (2 which is 100% increase per back off).
+_DEFAULT_MULTIPLIER = 2.0
+
+"""Exponential Backoff Utility
+
+This is a private module that implements the exponential back off algorithm.
+It can be used as a utility for code that needs to retry on failure, for example
+an HTTP request.
+"""
+
+
+class ExponentialBackoff:
+ """An exponential backoff iterator. This can be used in a for loop to
+ perform requests with exponential backoff.
+
+ Args:
+ total_attempts Optional[int]:
+ The maximum amount of retries that should happen.
+ The default value is 3 attempts.
+ initial_wait_seconds Optional[int]:
+ The amount of time to sleep in the first backoff. This parameter
+ should be in seconds.
+ The default value is 1 second.
+ randomization_factor Optional[float]:
+ The amount of jitter that should be in each backoff. For example,
+ a value of 0.1 will introduce a jitter range of 10% to the
+ current backoff period.
+ The default value is 0.1.
+ multiplier Optional[float]:
+ The backoff multipler. This adjusts how much each backoff will
+ increase. For example a value of 2.0 leads to a 200% backoff
+ on each attempt. If the initial_wait is 1.0 it would look like
+ this sequence [1.0, 2.0, 4.0, 8.0].
+ The default value is 2.0.
+ """
+
+ def __init__(
+ self,
+ total_attempts=_DEFAULT_RETRY_TOTAL_ATTEMPTS,
+ initial_wait_seconds=_DEFAULT_INITIAL_INTERVAL_SECONDS,
+ randomization_factor=_DEFAULT_RANDOMIZATION_FACTOR,
+ multiplier=_DEFAULT_MULTIPLIER,
+ ):
+ self._total_attempts = total_attempts
+ self._initial_wait_seconds = initial_wait_seconds
+
+ self._current_wait_in_seconds = self._initial_wait_seconds
+
+ self._randomization_factor = randomization_factor
+ self._multiplier = multiplier
+ self._backoff_count = 0
+
+ def __iter__(self):
+ self._backoff_count = 0
+ self._current_wait_in_seconds = self._initial_wait_seconds
+ return self
+
+ def __next__(self):
+ if self._backoff_count >= self._total_attempts:
+ raise StopIteration
+ self._backoff_count += 1
+
+ jitter_variance = self._current_wait_in_seconds * self._randomization_factor
+ jitter = random.uniform(
+ self._current_wait_in_seconds - jitter_variance,
+ self._current_wait_in_seconds + jitter_variance,
+ )
+
+ time.sleep(jitter)
+
+ self._current_wait_in_seconds *= self._multiplier
+ return self._backoff_count
+
+ @property
+ def total_attempts(self):
+ """The total amount of backoff attempts that will be made."""
+ return self._total_attempts
+
+ @property
+ def backoff_count(self):
+ """The current amount of backoff attempts that have been made."""
+ return self._backoff_count
diff --git a/contrib/python/google-auth/py3/google/auth/_helpers.py b/contrib/python/google-auth/py3/google/auth/_helpers.py
new file mode 100644
index 0000000000..ad2c095f28
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_helpers.py
@@ -0,0 +1,245 @@
+# Copyright 2015 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for commonly used utilities."""
+
+import base64
+import calendar
+import datetime
+import sys
+import urllib
+
+from google.auth import exceptions
+
+# Token server doesn't provide a new a token when doing refresh unless the
+# token is expiring within 30 seconds, so refresh threshold should not be
+# more than 30 seconds. Otherwise auth lib will send tons of refresh requests
+# until 30 seconds before the expiration, and cause a spike of CPU usage.
+REFRESH_THRESHOLD = datetime.timedelta(seconds=20)
+
+
+def copy_docstring(source_class):
+ """Decorator that copies a method's docstring from another class.
+
+ Args:
+ source_class (type): The class that has the documented method.
+
+ Returns:
+ Callable: A decorator that will copy the docstring of the same
+ named method in the source class to the decorated method.
+ """
+
+ def decorator(method):
+ """Decorator implementation.
+
+ Args:
+ method (Callable): The method to copy the docstring to.
+
+ Returns:
+ Callable: the same method passed in with an updated docstring.
+
+ Raises:
+ google.auth.exceptions.InvalidOperation: if the method already has a docstring.
+ """
+ if method.__doc__:
+ raise exceptions.InvalidOperation("Method already has a docstring.")
+
+ source_method = getattr(source_class, method.__name__)
+ method.__doc__ = source_method.__doc__
+
+ return method
+
+ return decorator
+
+
+def utcnow():
+ """Returns the current UTC datetime.
+
+ Returns:
+ datetime: The current time in UTC.
+ """
+ return datetime.datetime.utcnow()
+
+
+def datetime_to_secs(value):
+ """Convert a datetime object to the number of seconds since the UNIX epoch.
+
+ Args:
+ value (datetime): The datetime to convert.
+
+ Returns:
+ int: The number of seconds since the UNIX epoch.
+ """
+ return calendar.timegm(value.utctimetuple())
+
+
+def to_bytes(value, encoding="utf-8"):
+ """Converts a string value to bytes, if necessary.
+
+ Args:
+ value (Union[str, bytes]): The value to be converted.
+ encoding (str): The encoding to use to convert unicode to bytes.
+ Defaults to "utf-8".
+
+ Returns:
+ bytes: The original value converted to bytes (if unicode) or as
+ passed in if it started out as bytes.
+
+ Raises:
+ google.auth.exceptions.InvalidValue: If the value could not be converted to bytes.
+ """
+ result = value.encode(encoding) if isinstance(value, str) else value
+ if isinstance(result, bytes):
+ return result
+ else:
+ raise exceptions.InvalidValue(
+ "{0!r} could not be converted to bytes".format(value)
+ )
+
+
+def from_bytes(value):
+ """Converts bytes to a string value, if necessary.
+
+ Args:
+ value (Union[str, bytes]): The value to be converted.
+
+ Returns:
+ str: The original value converted to unicode (if bytes) or as passed in
+ if it started out as unicode.
+
+ Raises:
+ google.auth.exceptions.InvalidValue: If the value could not be converted to unicode.
+ """
+ result = value.decode("utf-8") if isinstance(value, bytes) else value
+ if isinstance(result, str):
+ return result
+ else:
+ raise exceptions.InvalidValue(
+ "{0!r} could not be converted to unicode".format(value)
+ )
+
+
+def update_query(url, params, remove=None):
+ """Updates a URL's query parameters.
+
+ Replaces any current values if they are already present in the URL.
+
+ Args:
+ url (str): The URL to update.
+ params (Mapping[str, str]): A mapping of query parameter
+ keys to values.
+ remove (Sequence[str]): Parameters to remove from the query string.
+
+ Returns:
+ str: The URL with updated query parameters.
+
+ Examples:
+
+ >>> url = 'http://example.com?a=1'
+ >>> update_query(url, {'a': '2'})
+ http://example.com?a=2
+ >>> update_query(url, {'b': '3'})
+ http://example.com?a=1&b=3
+ >> update_query(url, {'b': '3'}, remove=['a'])
+ http://example.com?b=3
+
+ """
+ if remove is None:
+ remove = []
+
+ # Split the URL into parts.
+ parts = urllib.parse.urlparse(url)
+ # Parse the query string.
+ query_params = urllib.parse.parse_qs(parts.query)
+ # Update the query parameters with the new parameters.
+ query_params.update(params)
+ # Remove any values specified in remove.
+ query_params = {
+ key: value for key, value in query_params.items() if key not in remove
+ }
+ # Re-encoded the query string.
+ new_query = urllib.parse.urlencode(query_params, doseq=True)
+ # Unsplit the url.
+ new_parts = parts._replace(query=new_query)
+ return urllib.parse.urlunparse(new_parts)
+
+
+def scopes_to_string(scopes):
+ """Converts scope value to a string suitable for sending to OAuth 2.0
+ authorization servers.
+
+ Args:
+ scopes (Sequence[str]): The sequence of scopes to convert.
+
+ Returns:
+ str: The scopes formatted as a single string.
+ """
+ return " ".join(scopes)
+
+
+def string_to_scopes(scopes):
+ """Converts stringifed scopes value to a list.
+
+ Args:
+ scopes (Union[Sequence, str]): The string of space-separated scopes
+ to convert.
+ Returns:
+ Sequence(str): The separated scopes.
+ """
+ if not scopes:
+ return []
+
+ return scopes.split(" ")
+
+
+def padded_urlsafe_b64decode(value):
+ """Decodes base64 strings lacking padding characters.
+
+ Google infrastructure tends to omit the base64 padding characters.
+
+ Args:
+ value (Union[str, bytes]): The encoded value.
+
+ Returns:
+ bytes: The decoded value
+ """
+ b64string = to_bytes(value)
+ padded = b64string + b"=" * (-len(b64string) % 4)
+ return base64.urlsafe_b64decode(padded)
+
+
+def unpadded_urlsafe_b64encode(value):
+ """Encodes base64 strings removing any padding characters.
+
+ `rfc 7515`_ defines Base64url to NOT include any padding
+ characters, but the stdlib doesn't do that by default.
+
+ _rfc7515: https://tools.ietf.org/html/rfc7515#page-6
+
+ Args:
+ value (Union[str|bytes]): The bytes-like value to encode
+
+ Returns:
+ Union[str|bytes]: The encoded value
+ """
+ return base64.urlsafe_b64encode(value).rstrip(b"=")
+
+
+def is_python_3():
+ """Check if the Python interpreter is Python 2 or 3.
+
+ Returns:
+ bool: True if the Python interpreter is Python 3 and False otherwise.
+ """
+ return sys.version_info > (3, 0)
diff --git a/contrib/python/google-auth/py3/google/auth/_jwt_async.py b/contrib/python/google-auth/py3/google/auth/_jwt_async.py
new file mode 100644
index 0000000000..3a1abc5b85
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_jwt_async.py
@@ -0,0 +1,164 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""JSON Web Tokens
+
+Provides support for creating (encoding) and verifying (decoding) JWTs,
+especially JWTs generated and consumed by Google infrastructure.
+
+See `rfc7519`_ for more details on JWTs.
+
+To encode a JWT use :func:`encode`::
+
+ from google.auth import crypt
+ from google.auth import jwt_async
+
+ signer = crypt.Signer(private_key)
+ payload = {'some': 'payload'}
+ encoded = jwt_async.encode(signer, payload)
+
+To decode a JWT and verify claims use :func:`decode`::
+
+ claims = jwt_async.decode(encoded, certs=public_certs)
+
+You can also skip verification::
+
+ claims = jwt_async.decode(encoded, verify=False)
+
+.. _rfc7519: https://tools.ietf.org/html/rfc7519
+
+
+NOTE: This async support is experimental and marked internal. This surface may
+change in minor releases.
+"""
+
+from google.auth import _credentials_async
+from google.auth import jwt
+
+
+def encode(signer, payload, header=None, key_id=None):
+ """Make a signed JWT.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign the JWT.
+ payload (Mapping[str, str]): The JWT payload.
+ header (Mapping[str, str]): Additional JWT header payload.
+ key_id (str): The key id to add to the JWT header. If the
+ signer has a key id it will be used as the default. If this is
+ specified it will override the signer's key id.
+
+ Returns:
+ bytes: The encoded JWT.
+ """
+ return jwt.encode(signer, payload, header, key_id)
+
+
+def decode(token, certs=None, verify=True, audience=None):
+ """Decode and verify a JWT.
+
+ Args:
+ token (str): The encoded JWT.
+ certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The
+ certificate used to validate the JWT signature. If bytes or string,
+ it must the the public key certificate in PEM format. If a mapping,
+ it must be a mapping of key IDs to public key certificates in PEM
+ format. The mapping must contain the same key ID that's specified
+ in the token's header.
+ verify (bool): Whether to perform signature and claim validation.
+ Verification is done by default.
+ audience (str): The audience claim, 'aud', that this JWT should
+ contain. If None then the JWT's 'aud' parameter is not verified.
+
+ Returns:
+ Mapping[str, str]: The deserialized JSON payload in the JWT.
+
+ Raises:
+ ValueError: if any verification checks failed.
+ """
+
+ return jwt.decode(token, certs, verify, audience)
+
+
+class Credentials(
+ jwt.Credentials, _credentials_async.Signing, _credentials_async.Credentials
+):
+ """Credentials that use a JWT as the bearer token.
+
+ These credentials require an "audience" claim. This claim identifies the
+ intended recipient of the bearer token.
+
+ The constructor arguments determine the claims for the JWT that is
+ sent with requests. Usually, you'll construct these credentials with
+ one of the helper constructors as shown in the next section.
+
+ To create JWT credentials using a Google service account private key
+ JSON file::
+
+ audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher'
+ credentials = jwt_async.Credentials.from_service_account_file(
+ 'service-account.json',
+ audience=audience)
+
+ If you already have the service account file loaded and parsed::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = jwt_async.Credentials.from_service_account_info(
+ service_account_info,
+ audience=audience)
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify the JWT claims::
+
+ credentials = jwt_async.Credentials.from_service_account_file(
+ 'service-account.json',
+ audience=audience,
+ additional_claims={'meta': 'data'})
+
+ You can also construct the credentials directly if you have a
+ :class:`~google.auth.crypt.Signer` instance::
+
+ credentials = jwt_async.Credentials(
+ signer,
+ issuer='your-issuer',
+ subject='your-subject',
+ audience=audience)
+
+ The claims are considered immutable. If you want to modify the claims,
+ you can easily create another instance using :meth:`with_claims`::
+
+ new_audience = (
+ 'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber')
+ new_credentials = credentials.with_claims(audience=new_audience)
+ """
+
+
+class OnDemandCredentials(
+ jwt.OnDemandCredentials, _credentials_async.Signing, _credentials_async.Credentials
+):
+ """On-demand JWT credentials.
+
+ Like :class:`Credentials`, this class uses a JWT as the bearer token for
+ authentication. However, this class does not require the audience at
+ construction time. Instead, it will generate a new token on-demand for
+ each request using the request URI as the audience. It caches tokens
+ so that multiple requests to the same URI do not incur the overhead
+ of generating a new token every time.
+
+ This behavior is especially useful for `gRPC`_ clients. A gRPC service may
+ have multiple audience and gRPC clients may not know all of the audiences
+ required for accessing a particular service. With these credentials,
+ no knowledge of the audiences is required ahead of time.
+
+ .. _grpc: http://www.grpc.io/
+ """
diff --git a/contrib/python/google-auth/py3/google/auth/_oauth2client.py b/contrib/python/google-auth/py3/google/auth/_oauth2client.py
new file mode 100644
index 0000000000..8b83ff23c1
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_oauth2client.py
@@ -0,0 +1,167 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helpers for transitioning from oauth2client to google-auth.
+
+.. warning::
+ This module is private as it is intended to assist first-party downstream
+ clients with the transition from oauth2client to google-auth.
+"""
+
+from __future__ import absolute_import
+
+from google.auth import _helpers
+import google.auth.app_engine
+import google.auth.compute_engine
+import google.oauth2.credentials
+import google.oauth2.service_account
+
+try:
+ import oauth2client.client # type: ignore
+ import oauth2client.contrib.gce # type: ignore
+ import oauth2client.service_account # type: ignore
+except ImportError as caught_exc:
+ raise ImportError("oauth2client is not installed.") from caught_exc
+
+try:
+ import oauth2client.contrib.appengine # type: ignore
+
+ _HAS_APPENGINE = True
+except ImportError:
+ _HAS_APPENGINE = False
+
+
+_CONVERT_ERROR_TMPL = "Unable to convert {} to a google-auth credentials class."
+
+
+def _convert_oauth2_credentials(credentials):
+ """Converts to :class:`google.oauth2.credentials.Credentials`.
+
+ Args:
+ credentials (Union[oauth2client.client.OAuth2Credentials,
+ oauth2client.client.GoogleCredentials]): The credentials to
+ convert.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The converted credentials.
+ """
+ new_credentials = google.oauth2.credentials.Credentials(
+ token=credentials.access_token,
+ refresh_token=credentials.refresh_token,
+ token_uri=credentials.token_uri,
+ client_id=credentials.client_id,
+ client_secret=credentials.client_secret,
+ scopes=credentials.scopes,
+ )
+
+ new_credentials._expires = credentials.token_expiry
+
+ return new_credentials
+
+
+def _convert_service_account_credentials(credentials):
+ """Converts to :class:`google.oauth2.service_account.Credentials`.
+
+ Args:
+ credentials (Union[
+ oauth2client.service_account.ServiceAccountCredentials,
+ oauth2client.service_account._JWTAccessCredentials]): The
+ credentials to convert.
+
+ Returns:
+ google.oauth2.service_account.Credentials: The converted credentials.
+ """
+ info = credentials.serialization_data.copy()
+ info["token_uri"] = credentials.token_uri
+ return google.oauth2.service_account.Credentials.from_service_account_info(info)
+
+
+def _convert_gce_app_assertion_credentials(credentials):
+ """Converts to :class:`google.auth.compute_engine.Credentials`.
+
+ Args:
+ credentials (oauth2client.contrib.gce.AppAssertionCredentials): The
+ credentials to convert.
+
+ Returns:
+ google.oauth2.service_account.Credentials: The converted credentials.
+ """
+ return google.auth.compute_engine.Credentials(
+ service_account_email=credentials.service_account_email
+ )
+
+
+def _convert_appengine_app_assertion_credentials(credentials):
+ """Converts to :class:`google.auth.app_engine.Credentials`.
+
+ Args:
+ credentials (oauth2client.contrib.app_engine.AppAssertionCredentials):
+ The credentials to convert.
+
+ Returns:
+ google.oauth2.service_account.Credentials: The converted credentials.
+ """
+ # pylint: disable=invalid-name
+ return google.auth.app_engine.Credentials(
+ scopes=_helpers.string_to_scopes(credentials.scope),
+ service_account_id=credentials.service_account_id,
+ )
+
+
+_CLASS_CONVERSION_MAP = {
+ oauth2client.client.OAuth2Credentials: _convert_oauth2_credentials,
+ oauth2client.client.GoogleCredentials: _convert_oauth2_credentials,
+ oauth2client.service_account.ServiceAccountCredentials: _convert_service_account_credentials,
+ oauth2client.service_account._JWTAccessCredentials: _convert_service_account_credentials,
+ oauth2client.contrib.gce.AppAssertionCredentials: _convert_gce_app_assertion_credentials,
+}
+
+if _HAS_APPENGINE:
+ _CLASS_CONVERSION_MAP[
+ oauth2client.contrib.appengine.AppAssertionCredentials
+ ] = _convert_appengine_app_assertion_credentials
+
+
+def convert(credentials):
+ """Convert oauth2client credentials to google-auth credentials.
+
+ This class converts:
+
+ - :class:`oauth2client.client.OAuth2Credentials` to
+ :class:`google.oauth2.credentials.Credentials`.
+ - :class:`oauth2client.client.GoogleCredentials` to
+ :class:`google.oauth2.credentials.Credentials`.
+ - :class:`oauth2client.service_account.ServiceAccountCredentials` to
+ :class:`google.oauth2.service_account.Credentials`.
+ - :class:`oauth2client.service_account._JWTAccessCredentials` to
+ :class:`google.oauth2.service_account.Credentials`.
+ - :class:`oauth2client.contrib.gce.AppAssertionCredentials` to
+ :class:`google.auth.compute_engine.Credentials`.
+ - :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to
+ :class:`google.auth.app_engine.Credentials`.
+
+ Returns:
+ google.auth.credentials.Credentials: The converted credentials.
+
+ Raises:
+ ValueError: If the credentials could not be converted.
+ """
+
+ credentials_class = type(credentials)
+
+ try:
+ return _CLASS_CONVERSION_MAP[credentials_class](credentials)
+ except KeyError as caught_exc:
+ new_exc = ValueError(_CONVERT_ERROR_TMPL.format(credentials_class))
+ raise new_exc from caught_exc
diff --git a/contrib/python/google-auth/py3/google/auth/_service_account_info.py b/contrib/python/google-auth/py3/google/auth/_service_account_info.py
new file mode 100644
index 0000000000..6b64adcaeb
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/_service_account_info.py
@@ -0,0 +1,80 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for loading data from a Google service account file."""
+
+import io
+import json
+
+from google.auth import crypt
+from google.auth import exceptions
+
+
+def from_dict(data, require=None, use_rsa_signer=True):
+ """Validates a dictionary containing Google service account data.
+
+ Creates and returns a :class:`google.auth.crypt.Signer` instance from the
+ private key specified in the data.
+
+ Args:
+ data (Mapping[str, str]): The service account data
+ require (Sequence[str]): List of keys required to be present in the
+ info.
+ use_rsa_signer (Optional[bool]): Whether to use RSA signer or EC signer.
+ We use RSA signer by default.
+
+ Returns:
+ google.auth.crypt.Signer: A signer created from the private key in the
+ service account file.
+
+ Raises:
+ MalformedError: if the data was in the wrong format, or if one of the
+ required keys is missing.
+ """
+ keys_needed = set(require if require is not None else [])
+
+ missing = keys_needed.difference(data.keys())
+
+ if missing:
+ raise exceptions.MalformedError(
+ "Service account info was not in the expected format, missing "
+ "fields {}.".format(", ".join(missing))
+ )
+
+ # Create a signer.
+ if use_rsa_signer:
+ signer = crypt.RSASigner.from_service_account_info(data)
+ else:
+ signer = crypt.ES256Signer.from_service_account_info(data)
+
+ return signer
+
+
+def from_filename(filename, require=None, use_rsa_signer=True):
+ """Reads a Google service account JSON file and returns its parsed info.
+
+ Args:
+ filename (str): The path to the service account .json file.
+ require (Sequence[str]): List of keys required to be present in the
+ info.
+ use_rsa_signer (Optional[bool]): Whether to use RSA signer or EC signer.
+ We use RSA signer by default.
+
+ Returns:
+ Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified
+ info and a signer instance.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return data, from_dict(data, require=require, use_rsa_signer=use_rsa_signer)
diff --git a/contrib/python/google-auth/py3/google/auth/api_key.py b/contrib/python/google-auth/py3/google/auth/api_key.py
new file mode 100644
index 0000000000..4fdf7f2769
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/api_key.py
@@ -0,0 +1,76 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google API key support.
+This module provides authentication using the `API key`_.
+.. _API key:
+ https://cloud.google.com/docs/authentication/api-keys/
+"""
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+
+
+class Credentials(credentials.Credentials):
+ """API key credentials.
+ These credentials use API key to provide authorization to applications.
+ """
+
+ def __init__(self, token):
+ """
+ Args:
+ token (str): API key string
+ Raises:
+ ValueError: If the provided API key is not a non-empty string.
+ """
+ super(Credentials, self).__init__()
+ if not token:
+ raise exceptions.InvalidValue("Token must be a non-empty API key string")
+ self.token = token
+
+ @property
+ def expired(self):
+ return False
+
+ @property
+ def valid(self):
+ return True
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ return
+
+ def apply(self, headers, token=None):
+ """Apply the API key token to the x-goog-api-key header.
+ Args:
+ headers (Mapping): The HTTP request headers.
+ token (Optional[str]): If specified, overrides the current access
+ token.
+ """
+ headers["x-goog-api-key"] = token or self.token
+
+ def before_request(self, request, method, url, headers):
+ """Performs credential-specific before request logic.
+ Refreshes the credentials if necessary, then calls :meth:`apply` to
+ apply the token to the x-goog-api-key header.
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ method (str): The request's HTTP method or the RPC method being
+ invoked.
+ url (str): The request's URI or the RPC service's URI.
+ headers (Mapping): The request's headers.
+ """
+ self.apply(headers)
diff --git a/contrib/python/google-auth/py3/google/auth/app_engine.py b/contrib/python/google-auth/py3/google/auth/app_engine.py
new file mode 100644
index 0000000000..7083ee6143
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/app_engine.py
@@ -0,0 +1,180 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google App Engine standard environment support.
+
+This module provides authentication and signing for applications running on App
+Engine in the standard environment using the `App Identity API`_.
+
+
+.. _App Identity API:
+ https://cloud.google.com/appengine/docs/python/appidentity/
+"""
+
+import datetime
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import crypt
+from google.auth import exceptions
+
+# pytype: disable=import-error
+try:
+ from google.appengine.api import app_identity # type: ignore
+except ImportError:
+ app_identity = None # type: ignore
+# pytype: enable=import-error
+
+
+class Signer(crypt.Signer):
+ """Signs messages using the App Engine App Identity service.
+
+ This can be used in place of :class:`google.auth.crypt.Signer` when
+ running in the App Engine standard environment.
+ """
+
+ @property
+ def key_id(self):
+ """Optional[str]: The key ID used to identify this private key.
+
+ .. warning::
+ This is always ``None``. The key ID used by App Engine can not
+ be reliably determined ahead of time.
+ """
+ return None
+
+ @_helpers.copy_docstring(crypt.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ _, signature = app_identity.sign_blob(message)
+ return signature
+
+
+def get_project_id():
+ """Gets the project ID for the current App Engine application.
+
+ Returns:
+ str: The project ID
+
+ Raises:
+ google.auth.exceptions.OSError: If the App Engine APIs are unavailable.
+ """
+ # pylint: disable=missing-raises-doc
+ # Pylint rightfully thinks google.auth.exceptions.OSError is OSError, but doesn't
+ # realize it's a valid alias.
+ if app_identity is None:
+ raise exceptions.OSError("The App Engine APIs are not available.")
+ return app_identity.get_application_id()
+
+
+class Credentials(
+ credentials.Scoped, credentials.Signing, credentials.CredentialsWithQuotaProject
+):
+ """App Engine standard environment credentials.
+
+ These credentials use the App Engine App Identity API to obtain access
+ tokens.
+ """
+
+ def __init__(
+ self,
+ scopes=None,
+ default_scopes=None,
+ service_account_id=None,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ scopes (Sequence[str]): Scopes to request from the App Identity
+ API.
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ service_account_id (str): The service account ID passed into
+ :func:`google.appengine.api.app_identity.get_access_token`.
+ If not specified, the default application service account
+ ID will be used.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+
+ Raises:
+ google.auth.exceptions.OSError: If the App Engine APIs are unavailable.
+ """
+ # pylint: disable=missing-raises-doc
+ # Pylint rightfully thinks google.auth.exceptions.OSError is OSError, but doesn't
+ # realize it's a valid alias.
+ if app_identity is None:
+ raise exceptions.OSError("The App Engine APIs are not available.")
+
+ super(Credentials, self).__init__()
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+ self._service_account_id = service_account_id
+ self._signer = Signer()
+ self._quota_project_id = quota_project_id
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # pylint: disable=unused-argument
+ token, ttl = app_identity.get_access_token(scopes, self._service_account_id)
+ expiry = datetime.datetime.utcfromtimestamp(ttl)
+
+ self.token, self.expiry = token, expiry
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ if self._service_account_id is None:
+ self._service_account_id = app_identity.get_service_account_name()
+ return self._service_account_id
+
+ @property
+ def requires_scopes(self):
+ """Checks if the credentials requires scopes.
+
+ Returns:
+ bool: True if there are no scopes set otherwise False.
+ """
+ return not self._scopes and not self._default_scopes
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ return self.__class__(
+ scopes=scopes,
+ default_scopes=default_scopes,
+ service_account_id=self._service_account_id,
+ quota_project_id=self.quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ scopes=self._scopes,
+ service_account_id=self._service_account_id,
+ quota_project_id=quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property # type: ignore
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer_email(self):
+ return self.service_account_email
+
+ @property # type: ignore
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
diff --git a/contrib/python/google-auth/py3/google/auth/aws.py b/contrib/python/google-auth/py3/google/auth/aws.py
new file mode 100644
index 0000000000..6e0e4e864f
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/aws.py
@@ -0,0 +1,777 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""AWS Credentials and AWS Signature V4 Request Signer.
+
+This module provides credentials to access Google Cloud resources from Amazon
+Web Services (AWS) workloads. These credentials are recommended over the
+use of service account credentials in AWS as they do not involve the management
+of long-live service account private keys.
+
+AWS Credentials are initialized using external_account arguments which are
+typically loaded from the external credentials JSON file.
+Unlike other Credentials that can be initialized with a list of explicit
+arguments, secrets or credentials, external account clients use the
+environment and hints/guidelines provided by the external_account JSON
+file to retrieve credentials and exchange them for Google access tokens.
+
+This module also provides a basic implementation of the
+`AWS Signature Version 4`_ request signing algorithm.
+
+AWS Credentials use serialized signed requests to the
+`AWS STS GetCallerIdentity`_ API that can be exchanged for Google access tokens
+via the GCP STS endpoint.
+
+.. _AWS Signature Version 4: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
+.. _AWS STS GetCallerIdentity: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html
+"""
+
+import hashlib
+import hmac
+import http.client as http_client
+import json
+import os
+import posixpath
+import re
+import urllib
+from urllib.parse import urljoin
+
+from google.auth import _helpers
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import external_account
+
+# AWS Signature Version 4 signing algorithm identifier.
+_AWS_ALGORITHM = "AWS4-HMAC-SHA256"
+# The termination string for the AWS credential scope value as defined in
+# https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
+_AWS_REQUEST_TYPE = "aws4_request"
+# The AWS authorization header name for the security session token if available.
+_AWS_SECURITY_TOKEN_HEADER = "x-amz-security-token"
+# The AWS authorization header name for the auto-generated date.
+_AWS_DATE_HEADER = "x-amz-date"
+
+
+class RequestSigner(object):
+ """Implements an AWS request signer based on the AWS Signature Version 4 signing
+ process.
+ https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
+ """
+
+ def __init__(self, region_name):
+ """Instantiates an AWS request signer used to compute authenticated signed
+ requests to AWS APIs based on the AWS Signature Version 4 signing process.
+
+ Args:
+ region_name (str): The AWS region to use.
+ """
+
+ self._region_name = region_name
+
+ def get_request_options(
+ self,
+ aws_security_credentials,
+ url,
+ method,
+ request_payload="",
+ additional_headers={},
+ ):
+ """Generates the signed request for the provided HTTP request for calling
+ an AWS API. This follows the steps described at:
+ https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
+
+ Args:
+ aws_security_credentials (Mapping[str, str]): A dictionary containing
+ the AWS security credentials.
+ url (str): The AWS service URL containing the canonical URI and
+ query string.
+ method (str): The HTTP method used to call this API.
+ request_payload (Optional[str]): The optional request payload if
+ available.
+ additional_headers (Optional[Mapping[str, str]]): The optional
+ additional headers needed for the requested AWS API.
+
+ Returns:
+ Mapping[str, str]: The AWS signed request dictionary object.
+ """
+ # Get AWS credentials.
+ access_key = aws_security_credentials.get("access_key_id")
+ secret_key = aws_security_credentials.get("secret_access_key")
+ security_token = aws_security_credentials.get("security_token")
+
+ additional_headers = additional_headers or {}
+
+ uri = urllib.parse.urlparse(url)
+ # Normalize the URL path. This is needed for the canonical_uri.
+ # os.path.normpath can't be used since it normalizes "/" paths
+ # to "\\" in Windows OS.
+ normalized_uri = urllib.parse.urlparse(
+ urljoin(url, posixpath.normpath(uri.path))
+ )
+ # Validate provided URL.
+ if not uri.hostname or uri.scheme != "https":
+ raise exceptions.InvalidResource("Invalid AWS service URL")
+
+ header_map = _generate_authentication_header_map(
+ host=uri.hostname,
+ canonical_uri=normalized_uri.path or "/",
+ canonical_querystring=_get_canonical_querystring(uri.query),
+ method=method,
+ region=self._region_name,
+ access_key=access_key,
+ secret_key=secret_key,
+ security_token=security_token,
+ request_payload=request_payload,
+ additional_headers=additional_headers,
+ )
+ headers = {
+ "Authorization": header_map.get("authorization_header"),
+ "host": uri.hostname,
+ }
+ # Add x-amz-date if available.
+ if "amz_date" in header_map:
+ headers[_AWS_DATE_HEADER] = header_map.get("amz_date")
+ # Append additional optional headers, eg. X-Amz-Target, Content-Type, etc.
+ for key in additional_headers:
+ headers[key] = additional_headers[key]
+
+ # Add session token if available.
+ if security_token is not None:
+ headers[_AWS_SECURITY_TOKEN_HEADER] = security_token
+
+ signed_request = {"url": url, "method": method, "headers": headers}
+ if request_payload:
+ signed_request["data"] = request_payload
+ return signed_request
+
+
+def _get_canonical_querystring(query):
+ """Generates the canonical query string given a raw query string.
+ Logic is based on
+ https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+
+ Args:
+ query (str): The raw query string.
+
+ Returns:
+ str: The canonical query string.
+ """
+ # Parse raw query string.
+ querystring = urllib.parse.parse_qs(query)
+ querystring_encoded_map = {}
+ for key in querystring:
+ quote_key = urllib.parse.quote(key, safe="-_.~")
+ # URI encode key.
+ querystring_encoded_map[quote_key] = []
+ for item in querystring[key]:
+ # For each key, URI encode all values for that key.
+ querystring_encoded_map[quote_key].append(
+ urllib.parse.quote(item, safe="-_.~")
+ )
+ # Sort values for each key.
+ querystring_encoded_map[quote_key].sort()
+ # Sort keys.
+ sorted_keys = list(querystring_encoded_map.keys())
+ sorted_keys.sort()
+ # Reconstruct the query string. Preserve keys with multiple values.
+ querystring_encoded_pairs = []
+ for key in sorted_keys:
+ for item in querystring_encoded_map[key]:
+ querystring_encoded_pairs.append("{}={}".format(key, item))
+ return "&".join(querystring_encoded_pairs)
+
+
+def _sign(key, msg):
+ """Creates the HMAC-SHA256 hash of the provided message using the provided
+ key.
+
+ Args:
+ key (str): The HMAC-SHA256 key to use.
+ msg (str): The message to hash.
+
+ Returns:
+ str: The computed hash bytes.
+ """
+ return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
+
+
+def _get_signing_key(key, date_stamp, region_name, service_name):
+ """Calculates the signing key used to calculate the signature for
+ AWS Signature Version 4 based on:
+ https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
+
+ Args:
+ key (str): The AWS secret access key.
+ date_stamp (str): The '%Y%m%d' date format.
+ region_name (str): The AWS region.
+ service_name (str): The AWS service name, eg. sts.
+
+ Returns:
+ str: The signing key bytes.
+ """
+ k_date = _sign(("AWS4" + key).encode("utf-8"), date_stamp)
+ k_region = _sign(k_date, region_name)
+ k_service = _sign(k_region, service_name)
+ k_signing = _sign(k_service, "aws4_request")
+ return k_signing
+
+
+def _generate_authentication_header_map(
+ host,
+ canonical_uri,
+ canonical_querystring,
+ method,
+ region,
+ access_key,
+ secret_key,
+ security_token,
+ request_payload="",
+ additional_headers={},
+):
+ """Generates the authentication header map needed for generating the AWS
+ Signature Version 4 signed request.
+
+ Args:
+ host (str): The AWS service URL hostname.
+ canonical_uri (str): The AWS service URL path name.
+ canonical_querystring (str): The AWS service URL query string.
+ method (str): The HTTP method used to call this API.
+ region (str): The AWS region.
+ access_key (str): The AWS access key ID.
+ secret_key (str): The AWS secret access key.
+ security_token (Optional[str]): The AWS security session token. This is
+ available for temporary sessions.
+ request_payload (Optional[str]): The optional request payload if
+ available.
+ additional_headers (Optional[Mapping[str, str]]): The optional
+ additional headers needed for the requested AWS API.
+
+ Returns:
+ Mapping[str, str]: The AWS authentication header dictionary object.
+ This contains the x-amz-date and authorization header information.
+ """
+ # iam.amazonaws.com host => iam service.
+ # sts.us-east-2.amazonaws.com host => sts service.
+ service_name = host.split(".")[0]
+
+ current_time = _helpers.utcnow()
+ amz_date = current_time.strftime("%Y%m%dT%H%M%SZ")
+ date_stamp = current_time.strftime("%Y%m%d")
+
+ # Change all additional headers to be lower case.
+ full_headers = {}
+ for key in additional_headers:
+ full_headers[key.lower()] = additional_headers[key]
+ # Add AWS session token if available.
+ if security_token is not None:
+ full_headers[_AWS_SECURITY_TOKEN_HEADER] = security_token
+
+ # Required headers
+ full_headers["host"] = host
+ # Do not use generated x-amz-date if the date header is provided.
+ # Previously the date was not fixed with x-amz- and could be provided
+ # manually.
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.req
+ if "date" not in full_headers:
+ full_headers[_AWS_DATE_HEADER] = amz_date
+
+ # Header keys need to be sorted alphabetically.
+ canonical_headers = ""
+ header_keys = list(full_headers.keys())
+ header_keys.sort()
+ for key in header_keys:
+ canonical_headers = "{}{}:{}\n".format(
+ canonical_headers, key, full_headers[key]
+ )
+ signed_headers = ";".join(header_keys)
+
+ payload_hash = hashlib.sha256((request_payload or "").encode("utf-8")).hexdigest()
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ canonical_request = "{}\n{}\n{}\n{}\n{}\n{}".format(
+ method,
+ canonical_uri,
+ canonical_querystring,
+ canonical_headers,
+ signed_headers,
+ payload_hash,
+ )
+
+ credential_scope = "{}/{}/{}/{}".format(
+ date_stamp, region, service_name, _AWS_REQUEST_TYPE
+ )
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
+ string_to_sign = "{}\n{}\n{}\n{}".format(
+ _AWS_ALGORITHM,
+ amz_date,
+ credential_scope,
+ hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
+ )
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
+ signing_key = _get_signing_key(secret_key, date_stamp, region, service_name)
+ signature = hmac.new(
+ signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
+ ).hexdigest()
+
+ # https://docs.aws.amazon.com/general/latest/gr/sigv4-add-signature-to-request.html
+ authorization_header = "{} Credential={}/{}, SignedHeaders={}, Signature={}".format(
+ _AWS_ALGORITHM, access_key, credential_scope, signed_headers, signature
+ )
+
+ authentication_header = {"authorization_header": authorization_header}
+ # Do not use generated x-amz-date if the date header is provided.
+ if "date" not in full_headers:
+ authentication_header["amz_date"] = amz_date
+ return authentication_header
+
+
+class Credentials(external_account.Credentials):
+ """AWS external account credentials.
+ This is used to exchange serialized AWS signature v4 signed requests to
+ AWS STS GetCallerIdentity service for Google access tokens.
+ """
+
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source=None,
+ *args,
+ **kwargs
+ ):
+ """Instantiates an AWS workload external account credentials object.
+
+ Args:
+ audience (str): The STS audience field.
+ subject_token_type (str): The subject token type.
+ token_url (str): The STS endpoint URL.
+ credential_source (Mapping): The credential source dictionary used
+ to provide instructions on how to retrieve external credential
+ to be exchanged for Google access tokens.
+ args (List): Optional positional arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method.
+ kwargs (Mapping): Optional keyword arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error is encountered during
+ access token retrieval logic.
+ ValueError: For invalid parameters.
+
+ .. note:: Typically one of the helper constructors
+ :meth:`from_file` or
+ :meth:`from_info` are used instead of calling the constructor directly.
+ """
+ super(Credentials, self).__init__(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ credential_source=credential_source,
+ *args,
+ **kwargs
+ )
+ credential_source = credential_source or {}
+ self._environment_id = credential_source.get("environment_id") or ""
+ self._region_url = credential_source.get("region_url")
+ self._security_credentials_url = credential_source.get("url")
+ self._cred_verification_url = credential_source.get(
+ "regional_cred_verification_url"
+ )
+ self._imdsv2_session_token_url = credential_source.get(
+ "imdsv2_session_token_url"
+ )
+ self._region = None
+ self._request_signer = None
+ self._target_resource = audience
+
+ # Get the environment ID. Currently, only one version supported (v1).
+ matches = re.match(r"^(aws)([\d]+)$", self._environment_id)
+ if matches:
+ env_id, env_version = matches.groups()
+ else:
+ env_id, env_version = (None, None)
+
+ if env_id != "aws" or self._cred_verification_url is None:
+ raise exceptions.InvalidResource(
+ "No valid AWS 'credential_source' provided"
+ )
+ elif int(env_version or "") != 1:
+ raise exceptions.InvalidValue(
+ "aws version '{}' is not supported in the current build.".format(
+ env_version
+ )
+ )
+
+ def retrieve_subject_token(self, request):
+ """Retrieves the subject token using the credential_source object.
+ The subject token is a serialized `AWS GetCallerIdentity signed request`_.
+
+ The logic is summarized as:
+
+ Retrieve the AWS region from the AWS_REGION or AWS_DEFAULT_REGION
+ environment variable or from the AWS metadata server availability-zone
+ if not found in the environment variable.
+
+ Check AWS credentials in environment variables. If not found, retrieve
+ from the AWS metadata server security-credentials endpoint.
+
+ When retrieving AWS credentials from the metadata server
+ security-credentials endpoint, the AWS role needs to be determined by
+ calling the security-credentials endpoint without any argument. Then the
+ credentials can be retrieved via: security-credentials/role_name
+
+ Generate the signed request to AWS STS GetCallerIdentity action.
+
+ Inject x-goog-cloud-target-resource into header and serialize the
+ signed request. This will be the subject-token to pass to GCP STS.
+
+ .. _AWS GetCallerIdentity signed request:
+ https://cloud.google.com/iam/docs/access-resources-aws#exchange-token
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ Returns:
+ str: The retrieved subject token.
+ """
+ # Fetch the session token required to make meta data endpoint calls to aws.
+ if (
+ request is not None
+ and self._imdsv2_session_token_url is not None
+ and self._should_use_metadata_server()
+ ):
+ headers = {"X-aws-ec2-metadata-token-ttl-seconds": "300"}
+
+ imdsv2_session_token_response = request(
+ url=self._imdsv2_session_token_url, method="PUT", headers=headers
+ )
+
+ if imdsv2_session_token_response.status != 200:
+ raise exceptions.RefreshError(
+ "Unable to retrieve AWS Session Token",
+ imdsv2_session_token_response.data,
+ )
+
+ imdsv2_session_token = imdsv2_session_token_response.data
+ else:
+ imdsv2_session_token = None
+
+ # Initialize the request signer if not yet initialized after determining
+ # the current AWS region.
+ if self._request_signer is None:
+ self._region = self._get_region(
+ request, self._region_url, imdsv2_session_token
+ )
+ self._request_signer = RequestSigner(self._region)
+
+ # Retrieve the AWS security credentials needed to generate the signed
+ # request.
+ aws_security_credentials = self._get_security_credentials(
+ request, imdsv2_session_token
+ )
+ # Generate the signed request to AWS STS GetCallerIdentity API.
+ # Use the required regional endpoint. Otherwise, the request will fail.
+ request_options = self._request_signer.get_request_options(
+ aws_security_credentials,
+ self._cred_verification_url.replace("{region}", self._region),
+ "POST",
+ )
+ # The GCP STS endpoint expects the headers to be formatted as:
+ # [
+ # {key: 'x-amz-date', value: '...'},
+ # {key: 'Authorization', value: '...'},
+ # ...
+ # ]
+ # And then serialized as:
+ # quote(json.dumps({
+ # url: '...',
+ # method: 'POST',
+ # headers: [{key: 'x-amz-date', value: '...'}, ...]
+ # }))
+ request_headers = request_options.get("headers")
+ # The full, canonical resource name of the workload identity pool
+ # provider, with or without the HTTPS prefix.
+ # Including this header as part of the signature is recommended to
+ # ensure data integrity.
+ request_headers["x-goog-cloud-target-resource"] = self._target_resource
+
+ # Serialize AWS signed request.
+ # Keeping inner keys in sorted order makes testing easier for Python
+ # versions <=3.5 as the stringified JSON string would have a predictable
+ # key order.
+ aws_signed_req = {}
+ aws_signed_req["url"] = request_options.get("url")
+ aws_signed_req["method"] = request_options.get("method")
+ aws_signed_req["headers"] = []
+ # Reformat header to GCP STS expected format.
+ for key in sorted(request_headers.keys()):
+ aws_signed_req["headers"].append(
+ {"key": key, "value": request_headers[key]}
+ )
+
+ return urllib.parse.quote(
+ json.dumps(aws_signed_req, separators=(",", ":"), sort_keys=True)
+ )
+
+ def _get_region(self, request, url, imdsv2_session_token):
+ """Retrieves the current AWS region from either the AWS_REGION or
+ AWS_DEFAULT_REGION environment variable or from the AWS metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ url (str): The AWS metadata server region URL.
+ imdsv2_session_token (str): The AWS IMDSv2 session token to be added as a
+ header in the requests to AWS metadata endpoint.
+
+ Returns:
+ str: The current AWS region.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS region.
+ """
+ # The AWS metadata server is not available in some AWS environments
+ # such as AWS lambda. Instead, it is available via environment
+ # variable.
+ env_aws_region = os.environ.get(environment_vars.AWS_REGION)
+ if env_aws_region is not None:
+ return env_aws_region
+
+ env_aws_region = os.environ.get(environment_vars.AWS_DEFAULT_REGION)
+ if env_aws_region is not None:
+ return env_aws_region
+
+ if not self._region_url:
+ raise exceptions.RefreshError("Unable to determine AWS region")
+
+ headers = None
+ if imdsv2_session_token is not None:
+ headers = {"X-aws-ec2-metadata-token": imdsv2_session_token}
+
+ response = request(url=self._region_url, method="GET", headers=headers)
+
+ # Support both string and bytes type response.data.
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != 200:
+ raise exceptions.RefreshError(
+ "Unable to retrieve AWS region", response_body
+ )
+
+ # This endpoint will return the region in format: us-east-2b.
+ # Only the us-east-2 part should be used.
+ return response_body[:-1]
+
+ def _get_security_credentials(self, request, imdsv2_session_token):
+ """Retrieves the AWS security credentials required for signing AWS
+ requests from either the AWS security credentials environment variables
+ or from the AWS metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ imdsv2_session_token (str): The AWS IMDSv2 session token to be added as a
+ header in the requests to AWS metadata endpoint.
+
+ Returns:
+ Mapping[str, str]: The AWS security credentials dictionary object.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS security credentials.
+ """
+
+ # Check environment variables for permanent credentials first.
+ # https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html
+ env_aws_access_key_id = os.environ.get(environment_vars.AWS_ACCESS_KEY_ID)
+ env_aws_secret_access_key = os.environ.get(
+ environment_vars.AWS_SECRET_ACCESS_KEY
+ )
+ # This is normally not available for permanent credentials.
+ env_aws_session_token = os.environ.get(environment_vars.AWS_SESSION_TOKEN)
+ if env_aws_access_key_id and env_aws_secret_access_key:
+ return {
+ "access_key_id": env_aws_access_key_id,
+ "secret_access_key": env_aws_secret_access_key,
+ "security_token": env_aws_session_token,
+ }
+
+ # Get role name.
+ role_name = self._get_metadata_role_name(request, imdsv2_session_token)
+
+ # Get security credentials.
+ credentials = self._get_metadata_security_credentials(
+ request, role_name, imdsv2_session_token
+ )
+
+ return {
+ "access_key_id": credentials.get("AccessKeyId"),
+ "secret_access_key": credentials.get("SecretAccessKey"),
+ "security_token": credentials.get("Token"),
+ }
+
+ def _get_metadata_security_credentials(
+ self, request, role_name, imdsv2_session_token
+ ):
+ """Retrieves the AWS security credentials required for signing AWS
+ requests from the AWS metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ role_name (str): The AWS role name required by the AWS metadata
+ server security_credentials endpoint in order to return the
+ credentials.
+ imdsv2_session_token (str): The AWS IMDSv2 session token to be added as a
+ header in the requests to AWS metadata endpoint.
+
+ Returns:
+ Mapping[str, str]: The AWS metadata server security credentials
+ response.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS security credentials.
+ """
+ headers = {"Content-Type": "application/json"}
+ if imdsv2_session_token is not None:
+ headers["X-aws-ec2-metadata-token"] = imdsv2_session_token
+
+ response = request(
+ url="{}/{}".format(self._security_credentials_url, role_name),
+ method="GET",
+ headers=headers,
+ )
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != http_client.OK:
+ raise exceptions.RefreshError(
+ "Unable to retrieve AWS security credentials", response_body
+ )
+
+ credentials_response = json.loads(response_body)
+
+ return credentials_response
+
+ def _get_metadata_role_name(self, request, imdsv2_session_token):
+ """Retrieves the AWS role currently attached to the current AWS
+ workload by querying the AWS metadata server. This is needed for the
+ AWS metadata server security credentials endpoint in order to retrieve
+ the AWS security credentials needed to sign requests to AWS APIs.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ imdsv2_session_token (str): The AWS IMDSv2 session token to be added as a
+ header in the requests to AWS metadata endpoint.
+
+ Returns:
+ str: The AWS role name.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error occurs while
+ retrieving the AWS role name.
+ """
+ if self._security_credentials_url is None:
+ raise exceptions.RefreshError(
+ "Unable to determine the AWS metadata server security credentials endpoint"
+ )
+
+ headers = None
+ if imdsv2_session_token is not None:
+ headers = {"X-aws-ec2-metadata-token": imdsv2_session_token}
+
+ response = request(
+ url=self._security_credentials_url, method="GET", headers=headers
+ )
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != http_client.OK:
+ raise exceptions.RefreshError(
+ "Unable to retrieve AWS role name", response_body
+ )
+
+ return response_body
+
+ def _should_use_metadata_server(self):
+ # The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION.
+ # The metadata server should be used if it cannot be retrieved from one of
+ # these environment variables.
+ if not os.environ.get(environment_vars.AWS_REGION) and not os.environ.get(
+ environment_vars.AWS_DEFAULT_REGION
+ ):
+ return True
+
+ # AWS security credentials can be retrieved from the AWS_ACCESS_KEY_ID
+ # and AWS_SECRET_ACCESS_KEY environment variables. The metadata server
+ # should be used if either of these are not available.
+ if not os.environ.get(environment_vars.AWS_ACCESS_KEY_ID) or not os.environ.get(
+ environment_vars.AWS_SECRET_ACCESS_KEY
+ ):
+ return True
+
+ return False
+
+ def _create_default_metrics_options(self):
+ metrics_options = super(Credentials, self)._create_default_metrics_options()
+ metrics_options["source"] = "aws"
+ return metrics_options
+
+ @classmethod
+ def from_info(cls, info, **kwargs):
+ """Creates an AWS Credentials instance from parsed external account info.
+
+ Args:
+ info (Mapping[str, str]): The AWS external account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.aws.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: For invalid parameters.
+ """
+ return super(Credentials, cls).from_info(info, **kwargs)
+
+ @classmethod
+ def from_file(cls, filename, **kwargs):
+ """Creates an AWS Credentials instance from an external account json file.
+
+ Args:
+ filename (str): The path to the AWS external account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.aws.Credentials: The constructed credentials.
+ """
+ return super(Credentials, cls).from_file(filename, **kwargs)
diff --git a/contrib/python/google-auth/py3/google/auth/compute_engine/__init__.py b/contrib/python/google-auth/py3/google/auth/compute_engine/__init__.py
new file mode 100644
index 0000000000..5c84234e93
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/compute_engine/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Compute Engine authentication."""
+
+from google.auth.compute_engine.credentials import Credentials
+from google.auth.compute_engine.credentials import IDTokenCredentials
+
+
+__all__ = ["Credentials", "IDTokenCredentials"]
diff --git a/contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py b/contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py
new file mode 100644
index 0000000000..04abe178f5
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/compute_engine/_metadata.py
@@ -0,0 +1,322 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provides helper methods for talking to the Compute Engine metadata server.
+
+See https://cloud.google.com/compute/docs/metadata for more details.
+"""
+
+import datetime
+import http.client as http_client
+import json
+import logging
+import os
+from urllib.parse import urljoin
+
+from google.auth import _helpers
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import metrics
+
+_LOGGER = logging.getLogger(__name__)
+
+# Environment variable GCE_METADATA_HOST is originally named
+# GCE_METADATA_ROOT. For compatiblity reasons, here it checks
+# the new variable first; if not set, the system falls back
+# to the old variable.
+_GCE_METADATA_HOST = os.getenv(environment_vars.GCE_METADATA_HOST, None)
+if not _GCE_METADATA_HOST:
+ _GCE_METADATA_HOST = os.getenv(
+ environment_vars.GCE_METADATA_ROOT, "metadata.google.internal"
+ )
+_METADATA_ROOT = "http://{}/computeMetadata/v1/".format(_GCE_METADATA_HOST)
+
+# This is used to ping the metadata server, it avoids the cost of a DNS
+# lookup.
+_METADATA_IP_ROOT = "http://{}".format(
+ os.getenv(environment_vars.GCE_METADATA_IP, "169.254.169.254")
+)
+_METADATA_FLAVOR_HEADER = "metadata-flavor"
+_METADATA_FLAVOR_VALUE = "Google"
+_METADATA_HEADERS = {_METADATA_FLAVOR_HEADER: _METADATA_FLAVOR_VALUE}
+
+# Timeout in seconds to wait for the GCE metadata server when detecting the
+# GCE environment.
+try:
+ _METADATA_DEFAULT_TIMEOUT = int(os.getenv("GCE_METADATA_TIMEOUT", 3))
+except ValueError: # pragma: NO COVER
+ _METADATA_DEFAULT_TIMEOUT = 3
+
+# Detect GCE Residency
+_GOOGLE = "Google"
+_GCE_PRODUCT_NAME_FILE = "/sys/class/dmi/id/product_name"
+
+
+def is_on_gce(request):
+ """Checks to see if the code runs on Google Compute Engine
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+
+ Returns:
+ bool: True if the code runs on Google Compute Engine, False otherwise.
+ """
+ if ping(request):
+ return True
+
+ if os.name == "nt":
+ # TODO: implement GCE residency detection on Windows
+ return False
+
+ # Detect GCE residency on Linux
+ return detect_gce_residency_linux()
+
+
+def detect_gce_residency_linux():
+ """Detect Google Compute Engine residency by smbios check on Linux
+
+ Returns:
+ bool: True if the GCE product name file is detected, False otherwise.
+ """
+ try:
+ with open(_GCE_PRODUCT_NAME_FILE, "r") as file_obj:
+ content = file_obj.read().strip()
+
+ except Exception:
+ return False
+
+ return content.startswith(_GOOGLE)
+
+
+def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3):
+ """Checks to see if the metadata server is available.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ timeout (int): How long to wait for the metadata server to respond.
+ retry_count (int): How many times to attempt connecting to metadata
+ server using above timeout.
+
+ Returns:
+ bool: True if the metadata server is reachable, False otherwise.
+ """
+ # NOTE: The explicit ``timeout`` is a workaround. The underlying
+ # issue is that resolving an unknown host on some networks will take
+ # 20-30 seconds; making this timeout short fixes the issue, but
+ # could lead to false negatives in the event that we are on GCE, but
+ # the metadata resolution was particularly slow. The latter case is
+ # "unlikely".
+ retries = 0
+ headers = _METADATA_HEADERS.copy()
+ headers[metrics.API_CLIENT_HEADER] = metrics.mds_ping()
+
+ while retries < retry_count:
+ try:
+ response = request(
+ url=_METADATA_IP_ROOT, method="GET", headers=headers, timeout=timeout
+ )
+
+ metadata_flavor = response.headers.get(_METADATA_FLAVOR_HEADER)
+ return (
+ response.status == http_client.OK
+ and metadata_flavor == _METADATA_FLAVOR_VALUE
+ )
+
+ except exceptions.TransportError as e:
+ _LOGGER.warning(
+ "Compute Engine Metadata server unavailable on "
+ "attempt %s of %s. Reason: %s",
+ retries + 1,
+ retry_count,
+ e,
+ )
+ retries += 1
+
+ return False
+
+
+def get(
+ request,
+ path,
+ root=_METADATA_ROOT,
+ params=None,
+ recursive=False,
+ retry_count=5,
+ headers=None,
+):
+ """Fetch a resource from the metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ path (str): The resource to retrieve. For example,
+ ``'instance/service-accounts/default'``.
+ root (str): The full path to the metadata server root.
+ params (Optional[Mapping[str, str]]): A mapping of query parameter
+ keys to values.
+ recursive (bool): Whether to do a recursive query of metadata. See
+ https://cloud.google.com/compute/docs/metadata#aggcontents for more
+ details.
+ retry_count (int): How many times to attempt connecting to metadata
+ server using above timeout.
+ headers (Optional[Mapping[str, str]]): Headers for the request.
+
+ Returns:
+ Union[Mapping, str]: If the metadata server returns JSON, a mapping of
+ the decoded JSON is return. Otherwise, the response content is
+ returned as a string.
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ base_url = urljoin(root, path)
+ query_params = {} if params is None else params
+
+ headers_to_use = _METADATA_HEADERS.copy()
+ if headers:
+ headers_to_use.update(headers)
+
+ if recursive:
+ query_params["recursive"] = "true"
+
+ url = _helpers.update_query(base_url, query_params)
+
+ retries = 0
+ while retries < retry_count:
+ try:
+ response = request(url=url, method="GET", headers=headers_to_use)
+ break
+
+ except exceptions.TransportError as e:
+ _LOGGER.warning(
+ "Compute Engine Metadata server unavailable on "
+ "attempt %s of %s. Reason: %s",
+ retries + 1,
+ retry_count,
+ e,
+ )
+ retries += 1
+ else:
+ raise exceptions.TransportError(
+ "Failed to retrieve {} from the Google Compute Engine "
+ "metadata service. Compute Engine Metadata server unavailable".format(url)
+ )
+
+ if response.status == http_client.OK:
+ content = _helpers.from_bytes(response.data)
+ if response.headers["content-type"] == "application/json":
+ try:
+ return json.loads(content)
+ except ValueError as caught_exc:
+ new_exc = exceptions.TransportError(
+ "Received invalid JSON from the Google Compute Engine "
+ "metadata service: {:.20}".format(content)
+ )
+ raise new_exc from caught_exc
+ else:
+ return content
+ else:
+ raise exceptions.TransportError(
+ "Failed to retrieve {} from the Google Compute Engine "
+ "metadata service. Status: {} Response:\n{}".format(
+ url, response.status, response.data
+ ),
+ response,
+ )
+
+
+def get_project_id(request):
+ """Get the Google Cloud Project ID from the metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+
+ Returns:
+ str: The project ID
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ return get(request, "project/project-id")
+
+
+def get_service_account_info(request, service_account="default"):
+ """Get information about a service account from the metadata server.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ service_account (str): The string 'default' or a service account email
+ address. The determines which service account for which to acquire
+ information.
+
+ Returns:
+ Mapping: The service account's information, for example::
+
+ {
+ 'email': '...',
+ 'scopes': ['scope', ...],
+ 'aliases': ['default', '...']
+ }
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ path = "instance/service-accounts/{0}/".format(service_account)
+ # See https://cloud.google.com/compute/docs/metadata#aggcontents
+ # for more on the use of 'recursive'.
+ return get(request, path, params={"recursive": "true"})
+
+
+def get_service_account_token(request, service_account="default", scopes=None):
+ """Get the OAuth 2.0 access token for a service account.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ service_account (str): The string 'default' or a service account email
+ address. The determines which service account for which to acquire
+ an access token.
+ scopes (Optional[Union[str, List[str]]]): Optional string or list of
+ strings with auth scopes.
+ Returns:
+ Tuple[str, datetime]: The access token and its expiration.
+
+ Raises:
+ google.auth.exceptions.TransportError: if an error occurred while
+ retrieving metadata.
+ """
+ if scopes:
+ if not isinstance(scopes, str):
+ scopes = ",".join(scopes)
+ params = {"scopes": scopes}
+ else:
+ params = None
+
+ metrics_header = {
+ metrics.API_CLIENT_HEADER: metrics.token_request_access_token_mds()
+ }
+
+ path = "instance/service-accounts/{0}/token".format(service_account)
+ token_json = get(request, path, params=params, headers=metrics_header)
+ token_expiry = _helpers.utcnow() + datetime.timedelta(
+ seconds=token_json["expires_in"]
+ )
+ return token_json["access_token"], token_expiry
diff --git a/contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py b/contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py
new file mode 100644
index 0000000000..7ae673880f
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/compute_engine/credentials.py
@@ -0,0 +1,445 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Compute Engine credentials.
+
+This module provides authentication for an application running on Google
+Compute Engine using the Compute Engine metadata server.
+
+"""
+
+import datetime
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import iam
+from google.auth import jwt
+from google.auth import metrics
+from google.auth.compute_engine import _metadata
+from google.oauth2 import _client
+
+
+class Credentials(credentials.Scoped, credentials.CredentialsWithQuotaProject):
+ """Compute Engine Credentials.
+
+ These credentials use the Google Compute Engine metadata server to obtain
+ OAuth 2.0 access tokens associated with the instance's service account,
+ and are also used for Cloud Run, Flex and App Engine (except for the Python
+ 2.7 runtime, which is supported only on older versions of this library).
+
+ For more information about Compute Engine authentication, including how
+ to configure scopes, see the `Compute Engine authentication
+ documentation`_.
+
+ .. note:: On Compute Engine the metadata server ignores requested scopes.
+ On Cloud Run, Flex and App Engine the server honours requested scopes.
+
+ .. _Compute Engine authentication documentation:
+ https://cloud.google.com/compute/docs/authentication#using
+ """
+
+ def __init__(
+ self,
+ service_account_email="default",
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ ):
+ """
+ Args:
+ service_account_email (str): The service account email to use, or
+ 'default'. A Compute Engine instance may have multiple service
+ accounts.
+ quota_project_id (Optional[str]): The project ID used for quota and
+ billing.
+ scopes (Optional[Sequence[str]]): The list of scopes for the credentials.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ """
+ super(Credentials, self).__init__()
+ self._service_account_email = service_account_email
+ self._quota_project_id = quota_project_id
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+
+ def _retrieve_info(self, request):
+ """Retrieve information about the service account.
+
+ Updates the scopes and retrieves the full service account email.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ """
+ info = _metadata.get_service_account_info(
+ request, service_account=self._service_account_email
+ )
+
+ self._service_account_email = info["email"]
+
+ # Don't override scopes requested by the user.
+ if self._scopes is None:
+ self._scopes = info["scopes"]
+
+ def _metric_header_for_usage(self):
+ return metrics.CRED_TYPE_SA_MDS
+
+ def refresh(self, request):
+ """Refresh the access token and scopes.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the Compute Engine metadata
+ service can't be reached if if the instance has not
+ credentials.
+ """
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ try:
+ self._retrieve_info(request)
+ self.token, self.expiry = _metadata.get_service_account_token(
+ request, service_account=self._service_account_email, scopes=scopes
+ )
+ except exceptions.TransportError as caught_exc:
+ new_exc = exceptions.RefreshError(caught_exc)
+ raise new_exc from caught_exc
+
+ @property
+ def service_account_email(self):
+ """The service account email.
+
+ .. note:: This is not guaranteed to be set until :meth:`refresh` has been
+ called.
+ """
+ return self._service_account_email
+
+ @property
+ def requires_scopes(self):
+ return not self._scopes
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ service_account_email=self._service_account_email,
+ quota_project_id=quota_project_id,
+ scopes=self._scopes,
+ )
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ # Compute Engine credentials can not be scoped (the metadata service
+ # ignores the scopes parameter). App Engine, Cloud Run and Flex support
+ # requesting scopes.
+ return self.__class__(
+ scopes=scopes,
+ default_scopes=default_scopes,
+ service_account_email=self._service_account_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+_DEFAULT_TOKEN_URI = "https://www.googleapis.com/oauth2/v4/token"
+
+
+class IDTokenCredentials(
+ credentials.CredentialsWithQuotaProject,
+ credentials.Signing,
+ credentials.CredentialsWithTokenUri,
+):
+ """Open ID Connect ID Token-based service account credentials.
+
+ These credentials relies on the default service account of a GCE instance.
+
+ ID token can be requested from `GCE metadata server identity endpoint`_, IAM
+ token endpoint or other token endpoints you specify. If metadata server
+ identity endpoint is not used, the GCE instance must have been started with
+ a service account that has access to the IAM Cloud API.
+
+ .. _GCE metadata server identity endpoint:
+ https://cloud.google.com/compute/docs/instances/verifying-instance-identity
+ """
+
+ def __init__(
+ self,
+ request,
+ target_audience,
+ token_uri=None,
+ additional_claims=None,
+ service_account_email=None,
+ signer=None,
+ use_metadata_identity_endpoint=False,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token. The ID Token's ``aud`` claim
+ will be set to this string.
+ token_uri (str): The OAuth 2.0 Token URI.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT assertion used in the authorization grant.
+ service_account_email (str): Optional explicit service account to
+ use to sign JWT tokens.
+ By default, this is the default GCE service account.
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ In case the signer is specified, the request argument will be
+ ignored.
+ use_metadata_identity_endpoint (bool): Whether to use GCE metadata
+ identity endpoint. For backward compatibility the default value
+ is False. If set to True, ``token_uri``, ``additional_claims``,
+ ``service_account_email``, ``signer`` argument should not be set;
+ otherwise ValueError will be raised.
+ quota_project_id (Optional[str]): The project ID used for quota and
+ billing.
+
+ Raises:
+ ValueError:
+ If ``use_metadata_identity_endpoint`` is set to True, and one of
+ ``token_uri``, ``additional_claims``, ``service_account_email``,
+ ``signer`` arguments is set.
+ """
+ super(IDTokenCredentials, self).__init__()
+
+ self._quota_project_id = quota_project_id
+ self._use_metadata_identity_endpoint = use_metadata_identity_endpoint
+ self._target_audience = target_audience
+
+ if use_metadata_identity_endpoint:
+ if token_uri or additional_claims or service_account_email or signer:
+ raise exceptions.MalformedError(
+ "If use_metadata_identity_endpoint is set, token_uri, "
+ "additional_claims, service_account_email, signer arguments"
+ " must not be set"
+ )
+ self._token_uri = None
+ self._additional_claims = None
+ self._signer = None
+
+ if service_account_email is None:
+ sa_info = _metadata.get_service_account_info(request)
+ self._service_account_email = sa_info["email"]
+ else:
+ self._service_account_email = service_account_email
+
+ if not use_metadata_identity_endpoint:
+ if signer is None:
+ signer = iam.Signer(
+ request=request,
+ credentials=Credentials(),
+ service_account_email=self._service_account_email,
+ )
+ self._signer = signer
+ self._token_uri = token_uri or _DEFAULT_TOKEN_URI
+
+ if additional_claims is not None:
+ self._additional_claims = additional_claims
+ else:
+ self._additional_claims = {}
+
+ def with_target_audience(self, target_audience):
+ """Create a copy of these credentials with the specified target
+ audience.
+ Args:
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token.
+ Returns:
+ google.auth.service_account.IDTokenCredentials: A new credentials
+ instance.
+ """
+ # since the signer is already instantiated,
+ # the request is not needed
+ if self._use_metadata_identity_endpoint:
+ return self.__class__(
+ None,
+ target_audience=target_audience,
+ use_metadata_identity_endpoint=True,
+ quota_project_id=self._quota_project_id,
+ )
+ else:
+ return self.__class__(
+ None,
+ service_account_email=self._service_account_email,
+ token_uri=self._token_uri,
+ target_audience=target_audience,
+ additional_claims=self._additional_claims.copy(),
+ signer=self.signer,
+ use_metadata_identity_endpoint=False,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+
+ # since the signer is already instantiated,
+ # the request is not needed
+ if self._use_metadata_identity_endpoint:
+ return self.__class__(
+ None,
+ target_audience=self._target_audience,
+ use_metadata_identity_endpoint=True,
+ quota_project_id=quota_project_id,
+ )
+ else:
+ return self.__class__(
+ None,
+ service_account_email=self._service_account_email,
+ token_uri=self._token_uri,
+ target_audience=self._target_audience,
+ additional_claims=self._additional_claims.copy(),
+ signer=self.signer,
+ use_metadata_identity_endpoint=False,
+ quota_project_id=quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithTokenUri)
+ def with_token_uri(self, token_uri):
+
+ # since the signer is already instantiated,
+ # the request is not needed
+ if self._use_metadata_identity_endpoint:
+ raise exceptions.MalformedError(
+ "If use_metadata_identity_endpoint is set, token_uri" " must not be set"
+ )
+ else:
+ return self.__class__(
+ None,
+ service_account_email=self._service_account_email,
+ token_uri=token_uri,
+ target_audience=self._target_audience,
+ additional_claims=self._additional_claims.copy(),
+ signer=self.signer,
+ use_metadata_identity_endpoint=False,
+ quota_project_id=self.quota_project_id,
+ )
+
+ def _make_authorization_grant_assertion(self):
+ """Create the OAuth 2.0 assertion.
+ This assertion is used during the OAuth 2.0 grant to acquire an
+ ID token.
+ Returns:
+ bytes: The authorization grant assertion.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+
+ payload = {
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ # The issuer must be the service account email.
+ "iss": self.service_account_email,
+ # The audience must be the auth token endpoint's URI
+ "aud": self._token_uri,
+ # The target audience specifies which service the ID token is
+ # intended for.
+ "target_audience": self._target_audience,
+ }
+
+ payload.update(self._additional_claims)
+
+ token = jwt.encode(self._signer, payload)
+
+ return token
+
+ def _call_metadata_identity_endpoint(self, request):
+ """Request ID token from metadata identity endpoint.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Returns:
+ Tuple[str, datetime.datetime]: The ID token and the expiry of the ID token.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the Compute Engine metadata
+ service can't be reached or if the instance has no credentials.
+ ValueError: If extracting expiry from the obtained ID token fails.
+ """
+ try:
+ path = "instance/service-accounts/default/identity"
+ params = {"audience": self._target_audience, "format": "full"}
+ metrics_header = {
+ metrics.API_CLIENT_HEADER: metrics.token_request_id_token_mds()
+ }
+ id_token = _metadata.get(
+ request, path, params=params, headers=metrics_header
+ )
+ except exceptions.TransportError as caught_exc:
+ new_exc = exceptions.RefreshError(caught_exc)
+ raise new_exc from caught_exc
+
+ _, payload, _, _ = jwt._unverified_decode(id_token)
+ return id_token, datetime.datetime.utcfromtimestamp(payload["exp"])
+
+ def refresh(self, request):
+ """Refreshes the ID token.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the credentials could
+ not be refreshed.
+ ValueError: If extracting expiry from the obtained ID token fails.
+ """
+ if self._use_metadata_identity_endpoint:
+ self.token, self.expiry = self._call_metadata_identity_endpoint(request)
+ else:
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = _client.id_token_jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
+
+ @property # type: ignore
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
+
+ def sign_bytes(self, message):
+ """Signs the given message.
+
+ Args:
+ message (bytes): The message to sign.
+
+ Returns:
+ bytes: The message's cryptographic signature.
+
+ Raises:
+ ValueError:
+ Signer is not available if metadata identity endpoint is used.
+ """
+ if self._use_metadata_identity_endpoint:
+ raise exceptions.InvalidOperation(
+ "Signer is not available if metadata identity endpoint is used"
+ )
+ return self._signer.sign(message)
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ return self._service_account_email
+
+ @property
+ def signer_email(self):
+ return self._service_account_email
diff --git a/contrib/python/google-auth/py3/google/auth/credentials.py b/contrib/python/google-auth/py3/google/auth/credentials.py
new file mode 100644
index 0000000000..80a2a5c0b4
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/credentials.py
@@ -0,0 +1,410 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Interfaces for credentials."""
+
+import abc
+import os
+
+from google.auth import _helpers, environment_vars
+from google.auth import exceptions
+from google.auth import metrics
+
+
+class Credentials(metaclass=abc.ABCMeta):
+ """Base class for all credentials.
+
+ All credentials have a :attr:`token` that is used for authentication and
+ may also optionally set an :attr:`expiry` to indicate when the token will
+ no longer be valid.
+
+ Most credentials will be :attr:`invalid` until :meth:`refresh` is called.
+ Credentials can do this automatically before the first HTTP request in
+ :meth:`before_request`.
+
+ Although the token and expiration will change as the credentials are
+ :meth:`refreshed <refresh>` and used, credentials should be considered
+ immutable. Various credentials will accept configuration such as private
+ keys, scopes, and other options. These options are not changeable after
+ construction. Some classes will provide mechanisms to copy the credentials
+ with modifications such as :meth:`ScopedCredentials.with_scopes`.
+ """
+
+ def __init__(self):
+ self.token = None
+ """str: The bearer token that can be used in HTTP headers to make
+ authenticated requests."""
+ self.expiry = None
+ """Optional[datetime]: When the token expires and is no longer valid.
+ If this is None, the token is assumed to never expire."""
+ self._quota_project_id = None
+ """Optional[str]: Project to use for quota and billing purposes."""
+ self._trust_boundary = None
+ """Optional[str]: Encoded string representation of credentials trust
+ boundary."""
+ self._universe_domain = "googleapis.com"
+ """Optional[str]: The universe domain value, default is googleapis.com
+ """
+
+ @property
+ def expired(self):
+ """Checks if the credentials are expired.
+
+ Note that credentials can be invalid but not expired because
+ Credentials with :attr:`expiry` set to None is considered to never
+ expire.
+ """
+ if not self.expiry:
+ return False
+
+ # Remove some threshold from expiry to err on the side of reporting
+ # expiration early so that we avoid the 401-refresh-retry loop.
+ skewed_expiry = self.expiry - _helpers.REFRESH_THRESHOLD
+ return _helpers.utcnow() >= skewed_expiry
+
+ @property
+ def valid(self):
+ """Checks the validity of the credentials.
+
+ This is True if the credentials have a :attr:`token` and the token
+ is not :attr:`expired`.
+ """
+ return self.token is not None and not self.expired
+
+ @property
+ def quota_project_id(self):
+ """Project to use for quota and billing purposes."""
+ return self._quota_project_id
+
+ @property
+ def universe_domain(self):
+ """The universe domain value."""
+ return self._universe_domain
+
+ @abc.abstractmethod
+ def refresh(self, request):
+ """Refreshes the access token.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the credentials could
+ not be refreshed.
+ """
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Refresh must be implemented")
+
+ def _metric_header_for_usage(self):
+ """The x-goog-api-client header for token usage metric.
+
+ This header will be added to the API service requests in before_request
+ method. For example, "cred-type/sa-jwt" means service account self
+ signed jwt access token is used in the API service request
+ authorization header. Children credentials classes need to override
+ this method to provide the header value, if the token usage metric is
+ needed.
+
+ Returns:
+ str: The x-goog-api-client header value.
+ """
+ return None
+
+ def apply(self, headers, token=None):
+ """Apply the token to the authentication header.
+
+ Args:
+ headers (Mapping): The HTTP request headers.
+ token (Optional[str]): If specified, overrides the current access
+ token.
+ """
+ headers["authorization"] = "Bearer {}".format(
+ _helpers.from_bytes(token or self.token)
+ )
+ if self._trust_boundary is not None:
+ headers["x-identity-trust-boundary"] = self._trust_boundary
+ if self.quota_project_id:
+ headers["x-goog-user-project"] = self.quota_project_id
+
+ def before_request(self, request, method, url, headers):
+ """Performs credential-specific before request logic.
+
+ Refreshes the credentials if necessary, then calls :meth:`apply` to
+ apply the token to the authentication header.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ method (str): The request's HTTP method or the RPC method being
+ invoked.
+ url (str): The request's URI or the RPC service's URI.
+ headers (Mapping): The request's headers.
+ """
+ # pylint: disable=unused-argument
+ # (Subclasses may use these arguments to ascertain information about
+ # the http request.)
+ if not self.valid:
+ self.refresh(request)
+ metrics.add_metric_header(headers, self._metric_header_for_usage())
+ self.apply(headers)
+
+
+class CredentialsWithQuotaProject(Credentials):
+ """Abstract base for credentials supporting ``with_quota_project`` factory"""
+
+ def with_quota_project(self, quota_project_id):
+ """Returns a copy of these credentials with a modified quota project.
+
+ Args:
+ quota_project_id (str): The project to use for quota and
+ billing purposes
+
+ Returns:
+ google.oauth2.credentials.Credentials: A new credentials instance.
+ """
+ raise NotImplementedError("This credential does not support quota project.")
+
+ def with_quota_project_from_environment(self):
+ quota_from_env = os.environ.get(environment_vars.GOOGLE_CLOUD_QUOTA_PROJECT)
+ if quota_from_env:
+ return self.with_quota_project(quota_from_env)
+ return self
+
+
+class CredentialsWithTokenUri(Credentials):
+ """Abstract base for credentials supporting ``with_token_uri`` factory"""
+
+ def with_token_uri(self, token_uri):
+ """Returns a copy of these credentials with a modified token uri.
+
+ Args:
+ token_uri (str): The uri to use for fetching/exchanging tokens
+
+ Returns:
+ google.oauth2.credentials.Credentials: A new credentials instance.
+ """
+ raise NotImplementedError("This credential does not use token uri.")
+
+
+class AnonymousCredentials(Credentials):
+ """Credentials that do not provide any authentication information.
+
+ These are useful in the case of services that support anonymous access or
+ local service emulators that do not use credentials.
+ """
+
+ @property
+ def expired(self):
+ """Returns `False`, anonymous credentials never expire."""
+ return False
+
+ @property
+ def valid(self):
+ """Returns `True`, anonymous credentials are always valid."""
+ return True
+
+ def refresh(self, request):
+ """Raises :class:``InvalidOperation``, anonymous credentials cannot be
+ refreshed."""
+ raise exceptions.InvalidOperation("Anonymous credentials cannot be refreshed.")
+
+ def apply(self, headers, token=None):
+ """Anonymous credentials do nothing to the request.
+
+ The optional ``token`` argument is not supported.
+
+ Raises:
+ google.auth.exceptions.InvalidValue: If a token was specified.
+ """
+ if token is not None:
+ raise exceptions.InvalidValue("Anonymous credentials don't support tokens.")
+
+ def before_request(self, request, method, url, headers):
+ """Anonymous credentials do nothing to the request."""
+
+
+class ReadOnlyScoped(metaclass=abc.ABCMeta):
+ """Interface for credentials whose scopes can be queried.
+
+ OAuth 2.0-based credentials allow limiting access using scopes as described
+ in `RFC6749 Section 3.3`_.
+ If a credential class implements this interface then the credentials either
+ use scopes in their implementation.
+
+ Some credentials require scopes in order to obtain a token. You can check
+ if scoping is necessary with :attr:`requires_scopes`::
+
+ if credentials.requires_scopes:
+ # Scoping is required.
+ credentials = credentials.with_scopes(scopes=['one', 'two'])
+
+ Credentials that require scopes must either be constructed with scopes::
+
+ credentials = SomeScopedCredentials(scopes=['one', 'two'])
+
+ Or must copy an existing instance using :meth:`with_scopes`::
+
+ scoped_credentials = credentials.with_scopes(scopes=['one', 'two'])
+
+ Some credentials have scopes but do not allow or require scopes to be set,
+ these credentials can be used as-is.
+
+ .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
+ """
+
+ def __init__(self):
+ super(ReadOnlyScoped, self).__init__()
+ self._scopes = None
+ self._default_scopes = None
+
+ @property
+ def scopes(self):
+ """Sequence[str]: the credentials' current set of scopes."""
+ return self._scopes
+
+ @property
+ def default_scopes(self):
+ """Sequence[str]: the credentials' current set of default scopes."""
+ return self._default_scopes
+
+ @abc.abstractproperty
+ def requires_scopes(self):
+ """True if these credentials require scopes to obtain an access token.
+ """
+ return False
+
+ def has_scopes(self, scopes):
+ """Checks if the credentials have the given scopes.
+
+ .. warning: This method is not guaranteed to be accurate if the
+ credentials are :attr:`~Credentials.invalid`.
+
+ Args:
+ scopes (Sequence[str]): The list of scopes to check.
+
+ Returns:
+ bool: True if the credentials have the given scopes.
+ """
+ credential_scopes = (
+ self._scopes if self._scopes is not None else self._default_scopes
+ )
+ return set(scopes).issubset(set(credential_scopes or []))
+
+
+class Scoped(ReadOnlyScoped):
+ """Interface for credentials whose scopes can be replaced while copying.
+
+ OAuth 2.0-based credentials allow limiting access using scopes as described
+ in `RFC6749 Section 3.3`_.
+ If a credential class implements this interface then the credentials either
+ use scopes in their implementation.
+
+ Some credentials require scopes in order to obtain a token. You can check
+ if scoping is necessary with :attr:`requires_scopes`::
+
+ if credentials.requires_scopes:
+ # Scoping is required.
+ credentials = credentials.create_scoped(['one', 'two'])
+
+ Credentials that require scopes must either be constructed with scopes::
+
+ credentials = SomeScopedCredentials(scopes=['one', 'two'])
+
+ Or must copy an existing instance using :meth:`with_scopes`::
+
+ scoped_credentials = credentials.with_scopes(scopes=['one', 'two'])
+
+ Some credentials have scopes but do not allow or require scopes to be set,
+ these credentials can be used as-is.
+
+ .. _RFC6749 Section 3.3: https://tools.ietf.org/html/rfc6749#section-3.3
+ """
+
+ @abc.abstractmethod
+ def with_scopes(self, scopes, default_scopes=None):
+ """Create a copy of these credentials with the specified scopes.
+
+ Args:
+ scopes (Sequence[str]): The list of scopes to attach to the
+ current credentials.
+
+ Raises:
+ NotImplementedError: If the credentials' scopes can not be changed.
+ This can be avoided by checking :attr:`requires_scopes` before
+ calling this method.
+ """
+ raise NotImplementedError("This class does not require scoping.")
+
+
+def with_scopes_if_required(credentials, scopes, default_scopes=None):
+ """Creates a copy of the credentials with scopes if scoping is required.
+
+ This helper function is useful when you do not know (or care to know) the
+ specific type of credentials you are using (such as when you use
+ :func:`google.auth.default`). This function will call
+ :meth:`Scoped.with_scopes` if the credentials are scoped credentials and if
+ the credentials require scoping. Otherwise, it will return the credentials
+ as-is.
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ scope if necessary.
+ scopes (Sequence[str]): The list of scopes to use.
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+
+ Returns:
+ google.auth.credentials.Credentials: Either a new set of scoped
+ credentials, or the passed in credentials instance if no scoping
+ was required.
+ """
+ if isinstance(credentials, Scoped) and credentials.requires_scopes:
+ return credentials.with_scopes(scopes, default_scopes=default_scopes)
+ else:
+ return credentials
+
+
+class Signing(metaclass=abc.ABCMeta):
+ """Interface for credentials that can cryptographically sign messages."""
+
+ @abc.abstractmethod
+ def sign_bytes(self, message):
+ """Signs the given message.
+
+ Args:
+ message (bytes): The message to sign.
+
+ Returns:
+ bytes: The message's cryptographic signature.
+ """
+ # pylint: disable=missing-raises-doc,redundant-returns-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Sign bytes must be implemented.")
+
+ @abc.abstractproperty
+ def signer_email(self):
+ """Optional[str]: An email address that identifies the signer."""
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Signer email must be implemented.")
+
+ @abc.abstractproperty
+ def signer(self):
+ """google.auth.crypt.Signer: The signer used to sign bytes."""
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Signer must be implemented.")
diff --git a/contrib/python/google-auth/py3/google/auth/crypt/__init__.py b/contrib/python/google-auth/py3/google/auth/crypt/__init__.py
new file mode 100644
index 0000000000..6d147e7061
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/crypt/__init__.py
@@ -0,0 +1,98 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cryptography helpers for verifying and signing messages.
+
+The simplest way to verify signatures is using :func:`verify_signature`::
+
+ cert = open('certs.pem').read()
+ valid = crypt.verify_signature(message, signature, cert)
+
+If you're going to verify many messages with the same certificate, you can use
+:class:`RSAVerifier`::
+
+ cert = open('certs.pem').read()
+ verifier = crypt.RSAVerifier.from_string(cert)
+ valid = verifier.verify(message, signature)
+
+To sign messages use :class:`RSASigner` with a private key::
+
+ private_key = open('private_key.pem').read()
+ signer = crypt.RSASigner.from_string(private_key)
+ signature = signer.sign(message)
+
+The code above also works for :class:`ES256Signer` and :class:`ES256Verifier`.
+Note that these two classes are only available if your `cryptography` dependency
+version is at least 1.4.0.
+"""
+
+from google.auth.crypt import base
+from google.auth.crypt import rsa
+
+try:
+ from google.auth.crypt import es256
+except ImportError: # pragma: NO COVER
+ es256 = None # type: ignore
+
+if es256 is not None: # pragma: NO COVER
+ __all__ = [
+ "ES256Signer",
+ "ES256Verifier",
+ "RSASigner",
+ "RSAVerifier",
+ "Signer",
+ "Verifier",
+ ]
+else: # pragma: NO COVER
+ __all__ = ["RSASigner", "RSAVerifier", "Signer", "Verifier"]
+
+
+# Aliases to maintain the v1.0.0 interface, as the crypt module was split
+# into submodules.
+Signer = base.Signer
+Verifier = base.Verifier
+RSASigner = rsa.RSASigner
+RSAVerifier = rsa.RSAVerifier
+
+if es256 is not None: # pragma: NO COVER
+ ES256Signer = es256.ES256Signer
+ ES256Verifier = es256.ES256Verifier
+
+
+def verify_signature(message, signature, certs, verifier_cls=rsa.RSAVerifier):
+ """Verify an RSA or ECDSA cryptographic signature.
+
+ Checks that the provided ``signature`` was generated from ``bytes`` using
+ the private key associated with the ``cert``.
+
+ Args:
+ message (Union[str, bytes]): The plaintext message.
+ signature (Union[str, bytes]): The cryptographic signature to check.
+ certs (Union[Sequence, str, bytes]): The certificate or certificates
+ to use to check the signature.
+ verifier_cls (Optional[~google.auth.crypt.base.Signer]): Which verifier
+ class to use for verification. This can be used to select different
+ algorithms, such as RSA or ECDSA. Default value is :class:`RSAVerifier`.
+
+ Returns:
+ bool: True if the signature is valid, otherwise False.
+ """
+ if isinstance(certs, (str, bytes)):
+ certs = [certs]
+
+ for cert in certs:
+ verifier = verifier_cls.from_string(cert)
+ if verifier.verify(message, signature):
+ return True
+ return False
diff --git a/contrib/python/google-auth/py3/google/auth/crypt/_cryptography_rsa.py b/contrib/python/google-auth/py3/google/auth/crypt/_cryptography_rsa.py
new file mode 100644
index 0000000000..4f2d611666
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/crypt/_cryptography_rsa.py
@@ -0,0 +1,136 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""RSA verifier and signer that use the ``cryptography`` library.
+
+This is a much faster implementation than the default (in
+``google.auth.crypt._python_rsa``), which depends on the pure-Python
+``rsa`` library.
+"""
+
+import cryptography.exceptions
+from cryptography.hazmat import backends
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import padding
+import cryptography.x509
+
+from google.auth import _helpers
+from google.auth.crypt import base
+
+_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
+_BACKEND = backends.default_backend()
+_PADDING = padding.PKCS1v15()
+_SHA256 = hashes.SHA256()
+
+
+class RSAVerifier(base.Verifier):
+ """Verifies RSA cryptographic signatures using public keys.
+
+ Args:
+ public_key (
+ cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey):
+ The public key used to verify signatures.
+ """
+
+ def __init__(self, public_key):
+ self._pubkey = public_key
+
+ @_helpers.copy_docstring(base.Verifier)
+ def verify(self, message, signature):
+ message = _helpers.to_bytes(message)
+ try:
+ self._pubkey.verify(signature, message, _PADDING, _SHA256)
+ return True
+ except (ValueError, cryptography.exceptions.InvalidSignature):
+ return False
+
+ @classmethod
+ def from_string(cls, public_key):
+ """Construct an Verifier instance from a public key or public
+ certificate string.
+
+ Args:
+ public_key (Union[str, bytes]): The public key in PEM format or the
+ x509 public key certificate.
+
+ Returns:
+ Verifier: The constructed verifier.
+
+ Raises:
+ ValueError: If the public key can't be parsed.
+ """
+ public_key_data = _helpers.to_bytes(public_key)
+
+ if _CERTIFICATE_MARKER in public_key_data:
+ cert = cryptography.x509.load_pem_x509_certificate(
+ public_key_data, _BACKEND
+ )
+ pubkey = cert.public_key()
+
+ else:
+ pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND)
+
+ return cls(pubkey)
+
+
+class RSASigner(base.Signer, base.FromServiceAccountMixin):
+ """Signs messages with an RSA private key.
+
+ Args:
+ private_key (
+ cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
+ The private key to sign with.
+ key_id (str): Optional key ID used to identify this private key. This
+ can be useful to associate the private key with its associated
+ public key or certificate.
+ """
+
+ def __init__(self, private_key, key_id=None):
+ self._key = private_key
+ self._key_id = key_id
+
+ @property # type: ignore
+ @_helpers.copy_docstring(base.Signer)
+ def key_id(self):
+ return self._key_id
+
+ @_helpers.copy_docstring(base.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ return self._key.sign(message, _PADDING, _SHA256)
+
+ @classmethod
+ def from_string(cls, key, key_id=None):
+ """Construct a RSASigner from a private key in PEM format.
+
+ Args:
+ key (Union[bytes, str]): Private key in PEM format.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt._cryptography_rsa.RSASigner: The
+ constructed signer.
+
+ Raises:
+ ValueError: If ``key`` is not ``bytes`` or ``str`` (unicode).
+ UnicodeDecodeError: If ``key`` is ``bytes`` but cannot be decoded
+ into a UTF-8 ``str``.
+ ValueError: If ``cryptography`` "Could not deserialize key data."
+ """
+ key = _helpers.to_bytes(key)
+ private_key = serialization.load_pem_private_key(
+ key, password=None, backend=_BACKEND
+ )
+ return cls(private_key, key_id=key_id)
diff --git a/contrib/python/google-auth/py3/google/auth/crypt/_helpers.py b/contrib/python/google-auth/py3/google/auth/crypt/_helpers.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/crypt/_helpers.py
diff --git a/contrib/python/google-auth/py3/google/auth/crypt/_python_rsa.py b/contrib/python/google-auth/py3/google/auth/crypt/_python_rsa.py
new file mode 100644
index 0000000000..e553c25ed5
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/crypt/_python_rsa.py
@@ -0,0 +1,175 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pure-Python RSA cryptography implementation.
+
+Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
+to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
+certificates. There is no support for p12 files.
+"""
+
+from __future__ import absolute_import
+
+import io
+
+from pyasn1.codec.der import decoder # type: ignore
+from pyasn1_modules import pem # type: ignore
+from pyasn1_modules.rfc2459 import Certificate # type: ignore
+from pyasn1_modules.rfc5208 import PrivateKeyInfo # type: ignore
+import rsa # type: ignore
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth.crypt import base
+
+_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
+_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
+_PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----")
+_PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----")
+_PKCS8_SPEC = PrivateKeyInfo()
+
+
+def _bit_list_to_bytes(bit_list):
+ """Converts an iterable of 1s and 0s to bytes.
+
+ Combines the list 8 at a time, treating each group of 8 bits
+ as a single byte.
+
+ Args:
+ bit_list (Sequence): Sequence of 1s and 0s.
+
+ Returns:
+ bytes: The decoded bytes.
+ """
+ num_bits = len(bit_list)
+ byte_vals = bytearray()
+ for start in range(0, num_bits, 8):
+ curr_bits = bit_list[start : start + 8]
+ char_val = sum(val * digit for val, digit in zip(_POW2, curr_bits))
+ byte_vals.append(char_val)
+ return bytes(byte_vals)
+
+
+class RSAVerifier(base.Verifier):
+ """Verifies RSA cryptographic signatures using public keys.
+
+ Args:
+ public_key (rsa.key.PublicKey): The public key used to verify
+ signatures.
+ """
+
+ def __init__(self, public_key):
+ self._pubkey = public_key
+
+ @_helpers.copy_docstring(base.Verifier)
+ def verify(self, message, signature):
+ message = _helpers.to_bytes(message)
+ try:
+ return rsa.pkcs1.verify(message, signature, self._pubkey)
+ except (ValueError, rsa.pkcs1.VerificationError):
+ return False
+
+ @classmethod
+ def from_string(cls, public_key):
+ """Construct an Verifier instance from a public key or public
+ certificate string.
+
+ Args:
+ public_key (Union[str, bytes]): The public key in PEM format or the
+ x509 public key certificate.
+
+ Returns:
+ google.auth.crypt._python_rsa.RSAVerifier: The constructed verifier.
+
+ Raises:
+ ValueError: If the public_key can't be parsed.
+ """
+ public_key = _helpers.to_bytes(public_key)
+ is_x509_cert = _CERTIFICATE_MARKER in public_key
+
+ # If this is a certificate, extract the public key info.
+ if is_x509_cert:
+ der = rsa.pem.load_pem(public_key, "CERTIFICATE")
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
+ if remaining != b"":
+ raise exceptions.InvalidValue("Unused bytes", remaining)
+
+ cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"]
+ key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"])
+ pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER")
+ else:
+ pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM")
+ return cls(pubkey)
+
+
+class RSASigner(base.Signer, base.FromServiceAccountMixin):
+ """Signs messages with an RSA private key.
+
+ Args:
+ private_key (rsa.key.PrivateKey): The private key to sign with.
+ key_id (str): Optional key ID used to identify this private key. This
+ can be useful to associate the private key with its associated
+ public key or certificate.
+ """
+
+ def __init__(self, private_key, key_id=None):
+ self._key = private_key
+ self._key_id = key_id
+
+ @property # type: ignore
+ @_helpers.copy_docstring(base.Signer)
+ def key_id(self):
+ return self._key_id
+
+ @_helpers.copy_docstring(base.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ return rsa.pkcs1.sign(message, self._key, "SHA-256")
+
+ @classmethod
+ def from_string(cls, key, key_id=None):
+ """Construct an Signer instance from a private key in PEM format.
+
+ Args:
+ key (str): Private key in PEM format.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+
+ Raises:
+ ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in
+ PEM format.
+ """
+ key = _helpers.from_bytes(key) # PEM expects str in Python 3
+ marker_id, key_bytes = pem.readPemBlocksFromFile(
+ io.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER
+ )
+
+ # Key is in pkcs1 format.
+ if marker_id == 0:
+ private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER")
+ # Key is in pkcs8.
+ elif marker_id == 1:
+ key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)
+ if remaining != b"":
+ raise exceptions.InvalidValue("Unused bytes", remaining)
+ private_key_info = key_info.getComponentByName("privateKey")
+ private_key = rsa.key.PrivateKey.load_pkcs1(
+ private_key_info.asOctets(), format="DER"
+ )
+ else:
+ raise exceptions.MalformedError("No key could be detected.")
+
+ return cls(private_key, key_id=key_id)
diff --git a/contrib/python/google-auth/py3/google/auth/crypt/base.py b/contrib/python/google-auth/py3/google/auth/crypt/base.py
new file mode 100644
index 0000000000..ad871c3115
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/crypt/base.py
@@ -0,0 +1,127 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base classes for cryptographic signers and verifiers."""
+
+import abc
+import io
+import json
+
+from google.auth import exceptions
+
+_JSON_FILE_PRIVATE_KEY = "private_key"
+_JSON_FILE_PRIVATE_KEY_ID = "private_key_id"
+
+
+class Verifier(metaclass=abc.ABCMeta):
+ """Abstract base class for crytographic signature verifiers."""
+
+ @abc.abstractmethod
+ def verify(self, message, signature):
+ """Verifies a message against a cryptographic signature.
+
+ Args:
+ message (Union[str, bytes]): The message to verify.
+ signature (Union[str, bytes]): The cryptography signature to check.
+
+ Returns:
+ bool: True if message was signed by the private key associated
+ with the public key that this object was constructed with.
+ """
+ # pylint: disable=missing-raises-doc,redundant-returns-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Verify must be implemented")
+
+
+class Signer(metaclass=abc.ABCMeta):
+ """Abstract base class for cryptographic signers."""
+
+ @abc.abstractproperty
+ def key_id(self):
+ """Optional[str]: The key ID used to identify this private key."""
+ raise NotImplementedError("Key id must be implemented")
+
+ @abc.abstractmethod
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message (Union[str, bytes]): The message to be signed.
+
+ Returns:
+ bytes: The signature of the message.
+ """
+ # pylint: disable=missing-raises-doc,redundant-returns-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("Sign must be implemented")
+
+
+class FromServiceAccountMixin(metaclass=abc.ABCMeta):
+ """Mix-in to enable factory constructors for a Signer."""
+
+ @abc.abstractmethod
+ def from_string(cls, key, key_id=None):
+ """Construct an Signer instance from a private key string.
+
+ Args:
+ key (str): Private key as a string.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+
+ Raises:
+ ValueError: If the key cannot be parsed.
+ """
+ raise NotImplementedError("from_string must be implemented")
+
+ @classmethod
+ def from_service_account_info(cls, info):
+ """Creates a Signer instance instance from a dictionary containing
+ service account info in Google format.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ if _JSON_FILE_PRIVATE_KEY not in info:
+ raise exceptions.MalformedError(
+ "The private_key field was not found in the service account " "info."
+ )
+
+ return cls.from_string(
+ info[_JSON_FILE_PRIVATE_KEY], info.get(_JSON_FILE_PRIVATE_KEY_ID)
+ )
+
+ @classmethod
+ def from_service_account_file(cls, filename):
+ """Creates a Signer instance from a service account .json file
+ in Google format.
+
+ Args:
+ filename (str): The path to the service account .json file.
+
+ Returns:
+ google.auth.crypt.Signer: The constructed signer.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+
+ return cls.from_service_account_info(data)
diff --git a/contrib/python/google-auth/py3/google/auth/crypt/es256.py b/contrib/python/google-auth/py3/google/auth/crypt/es256.py
new file mode 100644
index 0000000000..7920cc7ffb
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/crypt/es256.py
@@ -0,0 +1,160 @@
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""ECDSA (ES256) verifier and signer that use the ``cryptography`` library.
+"""
+
+from cryptography import utils # type: ignore
+import cryptography.exceptions
+from cryptography.hazmat import backends
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography.hazmat.primitives.asymmetric.utils import decode_dss_signature
+from cryptography.hazmat.primitives.asymmetric.utils import encode_dss_signature
+import cryptography.x509
+
+from google.auth import _helpers
+from google.auth.crypt import base
+
+
+_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
+_BACKEND = backends.default_backend()
+_PADDING = padding.PKCS1v15()
+
+
+class ES256Verifier(base.Verifier):
+ """Verifies ECDSA cryptographic signatures using public keys.
+
+ Args:
+ public_key (
+ cryptography.hazmat.primitives.asymmetric.ec.ECDSAPublicKey):
+ The public key used to verify signatures.
+ """
+
+ def __init__(self, public_key):
+ self._pubkey = public_key
+
+ @_helpers.copy_docstring(base.Verifier)
+ def verify(self, message, signature):
+ # First convert (r||s) raw signature to ASN1 encoded signature.
+ sig_bytes = _helpers.to_bytes(signature)
+ if len(sig_bytes) != 64:
+ return False
+ r = (
+ int.from_bytes(sig_bytes[:32], byteorder="big")
+ if _helpers.is_python_3()
+ else utils.int_from_bytes(sig_bytes[:32], byteorder="big")
+ )
+ s = (
+ int.from_bytes(sig_bytes[32:], byteorder="big")
+ if _helpers.is_python_3()
+ else utils.int_from_bytes(sig_bytes[32:], byteorder="big")
+ )
+ asn1_sig = encode_dss_signature(r, s)
+
+ message = _helpers.to_bytes(message)
+ try:
+ self._pubkey.verify(asn1_sig, message, ec.ECDSA(hashes.SHA256()))
+ return True
+ except (ValueError, cryptography.exceptions.InvalidSignature):
+ return False
+
+ @classmethod
+ def from_string(cls, public_key):
+ """Construct an Verifier instance from a public key or public
+ certificate string.
+
+ Args:
+ public_key (Union[str, bytes]): The public key in PEM format or the
+ x509 public key certificate.
+
+ Returns:
+ Verifier: The constructed verifier.
+
+ Raises:
+ ValueError: If the public key can't be parsed.
+ """
+ public_key_data = _helpers.to_bytes(public_key)
+
+ if _CERTIFICATE_MARKER in public_key_data:
+ cert = cryptography.x509.load_pem_x509_certificate(
+ public_key_data, _BACKEND
+ )
+ pubkey = cert.public_key()
+
+ else:
+ pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND)
+
+ return cls(pubkey)
+
+
+class ES256Signer(base.Signer, base.FromServiceAccountMixin):
+ """Signs messages with an ECDSA private key.
+
+ Args:
+ private_key (
+ cryptography.hazmat.primitives.asymmetric.ec.ECDSAPrivateKey):
+ The private key to sign with.
+ key_id (str): Optional key ID used to identify this private key. This
+ can be useful to associate the private key with its associated
+ public key or certificate.
+ """
+
+ def __init__(self, private_key, key_id=None):
+ self._key = private_key
+ self._key_id = key_id
+
+ @property # type: ignore
+ @_helpers.copy_docstring(base.Signer)
+ def key_id(self):
+ return self._key_id
+
+ @_helpers.copy_docstring(base.Signer)
+ def sign(self, message):
+ message = _helpers.to_bytes(message)
+ asn1_signature = self._key.sign(message, ec.ECDSA(hashes.SHA256()))
+
+ # Convert ASN1 encoded signature to (r||s) raw signature.
+ (r, s) = decode_dss_signature(asn1_signature)
+ return (
+ (r.to_bytes(32, byteorder="big") + s.to_bytes(32, byteorder="big"))
+ if _helpers.is_python_3()
+ else (utils.int_to_bytes(r, 32) + utils.int_to_bytes(s, 32))
+ )
+
+ @classmethod
+ def from_string(cls, key, key_id=None):
+ """Construct a RSASigner from a private key in PEM format.
+
+ Args:
+ key (Union[bytes, str]): Private key in PEM format.
+ key_id (str): An optional key id used to identify the private key.
+
+ Returns:
+ google.auth.crypt._cryptography_rsa.RSASigner: The
+ constructed signer.
+
+ Raises:
+ ValueError: If ``key`` is not ``bytes`` or ``str`` (unicode).
+ UnicodeDecodeError: If ``key`` is ``bytes`` but cannot be decoded
+ into a UTF-8 ``str``.
+ ValueError: If ``cryptography`` "Could not deserialize key data."
+ """
+ key = _helpers.to_bytes(key)
+ private_key = serialization.load_pem_private_key(
+ key, password=None, backend=_BACKEND
+ )
+ return cls(private_key, key_id=key_id)
diff --git a/contrib/python/google-auth/py3/google/auth/crypt/rsa.py b/contrib/python/google-auth/py3/google/auth/crypt/rsa.py
new file mode 100644
index 0000000000..ed842d1eb8
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/crypt/rsa.py
@@ -0,0 +1,30 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""RSA cryptography signer and verifier."""
+
+
+try:
+ # Prefer cryptograph-based RSA implementation.
+ from google.auth.crypt import _cryptography_rsa
+
+ RSASigner = _cryptography_rsa.RSASigner
+ RSAVerifier = _cryptography_rsa.RSAVerifier
+except ImportError: # pragma: NO COVER
+ # Fallback to pure-python RSA implementation if cryptography is
+ # unavailable.
+ from google.auth.crypt import _python_rsa
+
+ RSASigner = _python_rsa.RSASigner # type: ignore
+ RSAVerifier = _python_rsa.RSAVerifier # type: ignore
diff --git a/contrib/python/google-auth/py3/google/auth/downscoped.py b/contrib/python/google-auth/py3/google/auth/downscoped.py
new file mode 100644
index 0000000000..b4d9d386e5
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/downscoped.py
@@ -0,0 +1,504 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Downscoping with Credential Access Boundaries
+
+This module provides the ability to downscope credentials using
+`Downscoping with Credential Access Boundaries`_. This is useful to restrict the
+Identity and Access Management (IAM) permissions that a short-lived credential
+can use.
+
+To downscope permissions of a source credential, a Credential Access Boundary
+that specifies which resources the new credential can access, as well as
+an upper bound on the permissions that are available on each resource, has to
+be defined. A downscoped credential can then be instantiated using the source
+credential and the Credential Access Boundary.
+
+The common pattern of usage is to have a token broker with elevated access
+generate these downscoped credentials from higher access source credentials and
+pass the downscoped short-lived access tokens to a token consumer via some
+secure authenticated channel for limited access to Google Cloud Storage
+resources.
+
+For example, a token broker can be set up on a server in a private network.
+Various workloads (token consumers) in the same network will send authenticated
+requests to that broker for downscoped tokens to access or modify specific google
+cloud storage buckets.
+
+The broker will instantiate downscoped credentials instances that can be used to
+generate short lived downscoped access tokens that can be passed to the token
+consumer. These downscoped access tokens can be injected by the consumer into
+google.oauth2.Credentials and used to initialize a storage client instance to
+access Google Cloud Storage resources with restricted access.
+
+Note: Only Cloud Storage supports Credential Access Boundaries. Other Google
+Cloud services do not support this feature.
+
+.. _Downscoping with Credential Access Boundaries: https://cloud.google.com/iam/docs/downscoping-short-lived-credentials
+"""
+
+import datetime
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.oauth2 import sts
+
+# The maximum number of access boundary rules a Credential Access Boundary can
+# contain.
+_MAX_ACCESS_BOUNDARY_RULES_COUNT = 10
+# The token exchange grant_type used for exchanging credentials.
+_STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+# The token exchange requested_token_type. This is always an access_token.
+_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+# The STS token URL used to exchanged a short lived access token for a downscoped one.
+_STS_TOKEN_URL = "https://sts.googleapis.com/v1/token"
+# The subject token type to use when exchanging a short lived access token for a
+# downscoped token.
+_STS_SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+
+
+class CredentialAccessBoundary(object):
+ """Defines a Credential Access Boundary which contains a list of access boundary
+ rules. Each rule contains information on the resource that the rule applies to,
+ the upper bound of the permissions that are available on that resource and an
+ optional condition to further restrict permissions.
+ """
+
+ def __init__(self, rules=[]):
+ """Instantiates a Credential Access Boundary. A Credential Access Boundary
+ can contain up to 10 access boundary rules.
+
+ Args:
+ rules (Sequence[google.auth.downscoped.AccessBoundaryRule]): The list of
+ access boundary rules limiting the access that a downscoped credential
+ will have.
+ Raises:
+ InvalidType: If any of the rules are not a valid type.
+ InvalidValue: If the provided rules exceed the maximum allowed.
+ """
+ self.rules = rules
+
+ @property
+ def rules(self):
+ """Returns the list of access boundary rules defined on the Credential
+ Access Boundary.
+
+ Returns:
+ Tuple[google.auth.downscoped.AccessBoundaryRule, ...]: The list of access
+ boundary rules defined on the Credential Access Boundary. These are returned
+ as an immutable tuple to prevent modification.
+ """
+ return tuple(self._rules)
+
+ @rules.setter
+ def rules(self, value):
+ """Updates the current rules on the Credential Access Boundary. This will overwrite
+ the existing set of rules.
+
+ Args:
+ value (Sequence[google.auth.downscoped.AccessBoundaryRule]): The list of
+ access boundary rules limiting the access that a downscoped credential
+ will have.
+ Raises:
+ InvalidType: If any of the rules are not a valid type.
+ InvalidValue: If the provided rules exceed the maximum allowed.
+ """
+ if len(value) > _MAX_ACCESS_BOUNDARY_RULES_COUNT:
+ raise exceptions.InvalidValue(
+ "Credential access boundary rules can have a maximum of {} rules.".format(
+ _MAX_ACCESS_BOUNDARY_RULES_COUNT
+ )
+ )
+ for access_boundary_rule in value:
+ if not isinstance(access_boundary_rule, AccessBoundaryRule):
+ raise exceptions.InvalidType(
+ "List of rules provided do not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+ # Make a copy of the original list.
+ self._rules = list(value)
+
+ def add_rule(self, rule):
+ """Adds a single access boundary rule to the existing rules.
+
+ Args:
+ rule (google.auth.downscoped.AccessBoundaryRule): The access boundary rule,
+ limiting the access that a downscoped credential will have, to be added to
+ the existing rules.
+ Raises:
+ InvalidType: If any of the rules are not a valid type.
+ InvalidValue: If the provided rules exceed the maximum allowed.
+ """
+ if len(self.rules) == _MAX_ACCESS_BOUNDARY_RULES_COUNT:
+ raise exceptions.InvalidValue(
+ "Credential access boundary rules can have a maximum of {} rules.".format(
+ _MAX_ACCESS_BOUNDARY_RULES_COUNT
+ )
+ )
+ if not isinstance(rule, AccessBoundaryRule):
+ raise exceptions.InvalidType(
+ "The provided rule does not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+ self._rules.append(rule)
+
+ def to_json(self):
+ """Generates the dictionary representation of the Credential Access Boundary.
+ This uses the format expected by the Security Token Service API as documented in
+ `Defining a Credential Access Boundary`_.
+
+ .. _Defining a Credential Access Boundary:
+ https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary
+
+ Returns:
+ Mapping: Credential Access Boundary Rule represented in a dictionary object.
+ """
+ rules = []
+ for access_boundary_rule in self.rules:
+ rules.append(access_boundary_rule.to_json())
+
+ return {"accessBoundary": {"accessBoundaryRules": rules}}
+
+
+class AccessBoundaryRule(object):
+ """Defines an access boundary rule which contains information on the resource that
+ the rule applies to, the upper bound of the permissions that are available on that
+ resource and an optional condition to further restrict permissions.
+ """
+
+ def __init__(
+ self, available_resource, available_permissions, availability_condition=None
+ ):
+ """Instantiates a single access boundary rule.
+
+ Args:
+ available_resource (str): The full resource name of the Cloud Storage bucket
+ that the rule applies to. Use the format
+ "//storage.googleapis.com/projects/_/buckets/bucket-name".
+ available_permissions (Sequence[str]): A list defining the upper bound that
+ the downscoped token will have on the available permissions for the
+ resource. Each value is the identifier for an IAM predefined role or
+ custom role, with the prefix "inRole:". For example:
+ "inRole:roles/storage.objectViewer".
+ Only the permissions in these roles will be available.
+ availability_condition (Optional[google.auth.downscoped.AvailabilityCondition]):
+ Optional condition that restricts the availability of permissions to
+ specific Cloud Storage objects.
+
+ Raises:
+ InvalidType: If any of the parameters are not of the expected types.
+ InvalidValue: If any of the parameters are not of the expected values.
+ """
+ self.available_resource = available_resource
+ self.available_permissions = available_permissions
+ self.availability_condition = availability_condition
+
+ @property
+ def available_resource(self):
+ """Returns the current available resource.
+
+ Returns:
+ str: The current available resource.
+ """
+ return self._available_resource
+
+ @available_resource.setter
+ def available_resource(self, value):
+ """Updates the current available resource.
+
+ Args:
+ value (str): The updated value of the available resource.
+
+ Raises:
+ google.auth.exceptions.InvalidType: If the value is not a string.
+ """
+ if not isinstance(value, str):
+ raise exceptions.InvalidType(
+ "The provided available_resource is not a string."
+ )
+ self._available_resource = value
+
+ @property
+ def available_permissions(self):
+ """Returns the current available permissions.
+
+ Returns:
+ Tuple[str, ...]: The current available permissions. These are returned
+ as an immutable tuple to prevent modification.
+ """
+ return tuple(self._available_permissions)
+
+ @available_permissions.setter
+ def available_permissions(self, value):
+ """Updates the current available permissions.
+
+ Args:
+ value (Sequence[str]): The updated value of the available permissions.
+
+ Raises:
+ InvalidType: If the value is not a list of strings.
+ InvalidValue: If the value is not valid.
+ """
+ for available_permission in value:
+ if not isinstance(available_permission, str):
+ raise exceptions.InvalidType(
+ "Provided available_permissions are not a list of strings."
+ )
+ if available_permission.find("inRole:") != 0:
+ raise exceptions.InvalidValue(
+ "available_permissions must be prefixed with 'inRole:'."
+ )
+ # Make a copy of the original list.
+ self._available_permissions = list(value)
+
+ @property
+ def availability_condition(self):
+ """Returns the current availability condition.
+
+ Returns:
+ Optional[google.auth.downscoped.AvailabilityCondition]: The current
+ availability condition.
+ """
+ return self._availability_condition
+
+ @availability_condition.setter
+ def availability_condition(self, value):
+ """Updates the current availability condition.
+
+ Args:
+ value (Optional[google.auth.downscoped.AvailabilityCondition]): The updated
+ value of the availability condition.
+
+ Raises:
+ google.auth.exceptions.InvalidType: If the value is not of type google.auth.downscoped.AvailabilityCondition
+ or None.
+ """
+ if not isinstance(value, AvailabilityCondition) and value is not None:
+ raise exceptions.InvalidType(
+ "The provided availability_condition is not a 'google.auth.downscoped.AvailabilityCondition' or None."
+ )
+ self._availability_condition = value
+
+ def to_json(self):
+ """Generates the dictionary representation of the access boundary rule.
+ This uses the format expected by the Security Token Service API as documented in
+ `Defining a Credential Access Boundary`_.
+
+ .. _Defining a Credential Access Boundary:
+ https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary
+
+ Returns:
+ Mapping: The access boundary rule represented in a dictionary object.
+ """
+ json = {
+ "availablePermissions": list(self.available_permissions),
+ "availableResource": self.available_resource,
+ }
+ if self.availability_condition:
+ json["availabilityCondition"] = self.availability_condition.to_json()
+ return json
+
+
+class AvailabilityCondition(object):
+ """An optional condition that can be used as part of a Credential Access Boundary
+ to further restrict permissions."""
+
+ def __init__(self, expression, title=None, description=None):
+ """Instantiates an availability condition using the provided expression and
+ optional title or description.
+
+ Args:
+ expression (str): A condition expression that specifies the Cloud Storage
+ objects where permissions are available. For example, this expression
+ makes permissions available for objects whose name starts with "customer-a":
+ "resource.name.startsWith('projects/_/buckets/example-bucket/objects/customer-a')"
+ title (Optional[str]): An optional short string that identifies the purpose of
+ the condition.
+ description (Optional[str]): Optional details about the purpose of the condition.
+
+ Raises:
+ InvalidType: If any of the parameters are not of the expected types.
+ InvalidValue: If any of the parameters are not of the expected values.
+ """
+ self.expression = expression
+ self.title = title
+ self.description = description
+
+ @property
+ def expression(self):
+ """Returns the current condition expression.
+
+ Returns:
+ str: The current conditon expression.
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, value):
+ """Updates the current condition expression.
+
+ Args:
+ value (str): The updated value of the condition expression.
+
+ Raises:
+ google.auth.exceptions.InvalidType: If the value is not of type string.
+ """
+ if not isinstance(value, str):
+ raise exceptions.InvalidType("The provided expression is not a string.")
+ self._expression = value
+
+ @property
+ def title(self):
+ """Returns the current title.
+
+ Returns:
+ Optional[str]: The current title.
+ """
+ return self._title
+
+ @title.setter
+ def title(self, value):
+ """Updates the current title.
+
+ Args:
+ value (Optional[str]): The updated value of the title.
+
+ Raises:
+ google.auth.exceptions.InvalidType: If the value is not of type string or None.
+ """
+ if not isinstance(value, str) and value is not None:
+ raise exceptions.InvalidType("The provided title is not a string or None.")
+ self._title = value
+
+ @property
+ def description(self):
+ """Returns the current description.
+
+ Returns:
+ Optional[str]: The current description.
+ """
+ return self._description
+
+ @description.setter
+ def description(self, value):
+ """Updates the current description.
+
+ Args:
+ value (Optional[str]): The updated value of the description.
+
+ Raises:
+ google.auth.exceptions.InvalidType: If the value is not of type string or None.
+ """
+ if not isinstance(value, str) and value is not None:
+ raise exceptions.InvalidType(
+ "The provided description is not a string or None."
+ )
+ self._description = value
+
+ def to_json(self):
+ """Generates the dictionary representation of the availability condition.
+ This uses the format expected by the Security Token Service API as documented in
+ `Defining a Credential Access Boundary`_.
+
+ .. _Defining a Credential Access Boundary:
+ https://cloud.google.com/iam/docs/downscoping-short-lived-credentials#define-boundary
+
+ Returns:
+ Mapping[str, str]: The availability condition represented in a dictionary
+ object.
+ """
+ json = {"expression": self.expression}
+ if self.title:
+ json["title"] = self.title
+ if self.description:
+ json["description"] = self.description
+ return json
+
+
+class Credentials(credentials.CredentialsWithQuotaProject):
+ """Defines a set of Google credentials that are downscoped from an existing set
+ of Google OAuth2 credentials. This is useful to restrict the Identity and Access
+ Management (IAM) permissions that a short-lived credential can use.
+ The common pattern of usage is to have a token broker with elevated access
+ generate these downscoped credentials from higher access source credentials and
+ pass the downscoped short-lived access tokens to a token consumer via some
+ secure authenticated channel for limited access to Google Cloud Storage
+ resources.
+ """
+
+ def __init__(
+ self, source_credentials, credential_access_boundary, quota_project_id=None
+ ):
+ """Instantiates a downscoped credentials object using the provided source
+ credentials and credential access boundary rules.
+ To downscope permissions of a source credential, a Credential Access Boundary
+ that specifies which resources the new credential can access, as well as an
+ upper bound on the permissions that are available on each resource, has to be
+ defined. A downscoped credential can then be instantiated using the source
+ credential and the Credential Access Boundary.
+
+ Args:
+ source_credentials (google.auth.credentials.Credentials): The source credentials
+ to be downscoped based on the provided Credential Access Boundary rules.
+ credential_access_boundary (google.auth.downscoped.CredentialAccessBoundary):
+ The Credential Access Boundary which contains a list of access boundary
+ rules. Each rule contains information on the resource that the rule applies to,
+ the upper bound of the permissions that are available on that resource and an
+ optional condition to further restrict permissions.
+ quota_project_id (Optional[str]): The optional quota project ID.
+ Raises:
+ google.auth.exceptions.RefreshError: If the source credentials
+ return an error on token refresh.
+ google.auth.exceptions.OAuthError: If the STS token exchange
+ endpoint returned an error during downscoped token generation.
+ """
+
+ super(Credentials, self).__init__()
+ self._source_credentials = source_credentials
+ self._credential_access_boundary = credential_access_boundary
+ self._quota_project_id = quota_project_id
+ self._sts_client = sts.Client(_STS_TOKEN_URL)
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ # Generate an access token from the source credentials.
+ self._source_credentials.refresh(request)
+ now = _helpers.utcnow()
+ # Exchange the access token for a downscoped access token.
+ response_data = self._sts_client.exchange_token(
+ request=request,
+ grant_type=_STS_GRANT_TYPE,
+ subject_token=self._source_credentials.token,
+ subject_token_type=_STS_SUBJECT_TOKEN_TYPE,
+ requested_token_type=_STS_REQUESTED_TOKEN_TYPE,
+ additional_options=self._credential_access_boundary.to_json(),
+ )
+ self.token = response_data.get("access_token")
+ # For downscoping CAB flow, the STS endpoint may not return the expiration
+ # field for some flows. The generated downscoped token should always have
+ # the same expiration time as the source credentials. When no expires_in
+ # field is returned in the response, we can just get the expiration time
+ # from the source credentials.
+ if response_data.get("expires_in"):
+ lifetime = datetime.timedelta(seconds=response_data.get("expires_in"))
+ self.expiry = now + lifetime
+ else:
+ self.expiry = self._source_credentials.expiry
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ self._source_credentials,
+ self._credential_access_boundary,
+ quota_project_id=quota_project_id,
+ )
diff --git a/contrib/python/google-auth/py3/google/auth/environment_vars.py b/contrib/python/google-auth/py3/google/auth/environment_vars.py
new file mode 100644
index 0000000000..81f31571eb
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/environment_vars.py
@@ -0,0 +1,84 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Environment variables used by :mod:`google.auth`."""
+
+
+PROJECT = "GOOGLE_CLOUD_PROJECT"
+"""Environment variable defining default project.
+
+This used by :func:`google.auth.default` to explicitly set a project ID. This
+environment variable is also used by the Google Cloud Python Library.
+"""
+
+LEGACY_PROJECT = "GCLOUD_PROJECT"
+"""Previously used environment variable defining the default project.
+
+This environment variable is used instead of the current one in some
+situations (such as Google App Engine).
+"""
+
+GOOGLE_CLOUD_QUOTA_PROJECT = "GOOGLE_CLOUD_QUOTA_PROJECT"
+"""Environment variable defining the project to be used for
+quota and billing."""
+
+CREDENTIALS = "GOOGLE_APPLICATION_CREDENTIALS"
+"""Environment variable defining the location of Google application default
+credentials."""
+
+# The environment variable name which can replace ~/.config if set.
+CLOUD_SDK_CONFIG_DIR = "CLOUDSDK_CONFIG"
+"""Environment variable defines the location of Google Cloud SDK's config
+files."""
+
+# These two variables allow for customization of the addresses used when
+# contacting the GCE metadata service.
+GCE_METADATA_HOST = "GCE_METADATA_HOST"
+"""Environment variable providing an alternate hostname or host:port to be
+used for GCE metadata requests.
+
+This environment variable was originally named GCE_METADATA_ROOT. The system will
+check this environemnt variable first; should there be no value present,
+the system will fall back to the old variable.
+"""
+
+GCE_METADATA_ROOT = "GCE_METADATA_ROOT"
+"""Old environment variable for GCE_METADATA_HOST."""
+
+GCE_METADATA_IP = "GCE_METADATA_IP"
+"""Environment variable providing an alternate ip:port to be used for ip-only
+GCE metadata requests."""
+
+GOOGLE_API_USE_CLIENT_CERTIFICATE = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
+"""Environment variable controlling whether to use client certificate or not.
+
+The default value is false. Users have to explicitly set this value to true
+in order to use client certificate to establish a mutual TLS channel."""
+
+LEGACY_APPENGINE_RUNTIME = "APPENGINE_RUNTIME"
+"""Gen1 environment variable defining the App Engine Runtime.
+
+Used to distinguish between GAE gen1 and GAE gen2+.
+"""
+
+# AWS environment variables used with AWS workload identity pools to retrieve
+# AWS security credentials and the AWS region needed to create a serialized
+# signed requests to the AWS STS GetCalledIdentity API that can be exchanged
+# for a Google access tokens via the GCP STS endpoint.
+# When not available the AWS metadata server is used to retrieve these values.
+AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID"
+AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY"
+AWS_SESSION_TOKEN = "AWS_SESSION_TOKEN"
+AWS_REGION = "AWS_REGION"
+AWS_DEFAULT_REGION = "AWS_DEFAULT_REGION"
diff --git a/contrib/python/google-auth/py3/google/auth/exceptions.py b/contrib/python/google-auth/py3/google/auth/exceptions.py
new file mode 100644
index 0000000000..fcbe61b746
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/exceptions.py
@@ -0,0 +1,100 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Exceptions used in the google.auth package."""
+
+
+class GoogleAuthError(Exception):
+ """Base class for all google.auth errors."""
+
+ def __init__(self, *args, **kwargs):
+ super(GoogleAuthError, self).__init__(*args)
+ retryable = kwargs.get("retryable", False)
+ self._retryable = retryable
+
+ @property
+ def retryable(self):
+ return self._retryable
+
+
+class TransportError(GoogleAuthError):
+ """Used to indicate an error occurred during an HTTP request."""
+
+
+class RefreshError(GoogleAuthError):
+ """Used to indicate that an refreshing the credentials' access token
+ failed."""
+
+
+class UserAccessTokenError(GoogleAuthError):
+ """Used to indicate ``gcloud auth print-access-token`` command failed."""
+
+
+class DefaultCredentialsError(GoogleAuthError):
+ """Used to indicate that acquiring default credentials failed."""
+
+
+class MutualTLSChannelError(GoogleAuthError):
+ """Used to indicate that mutual TLS channel creation is failed, or mutual
+ TLS channel credentials is missing or invalid."""
+
+
+class ClientCertError(GoogleAuthError):
+ """Used to indicate that client certificate is missing or invalid."""
+
+ @property
+ def retryable(self):
+ return False
+
+
+class OAuthError(GoogleAuthError):
+ """Used to indicate an error occurred during an OAuth related HTTP
+ request."""
+
+
+class ReauthFailError(RefreshError):
+ """An exception for when reauth failed."""
+
+ def __init__(self, message=None, **kwargs):
+ super(ReauthFailError, self).__init__(
+ "Reauthentication failed. {0}".format(message), **kwargs
+ )
+
+
+class ReauthSamlChallengeFailError(ReauthFailError):
+ """An exception for SAML reauth challenge failures."""
+
+
+class MalformedError(DefaultCredentialsError, ValueError):
+ """An exception for malformed data."""
+
+
+class InvalidResource(DefaultCredentialsError, ValueError):
+ """An exception for URL error."""
+
+
+class InvalidOperation(DefaultCredentialsError, ValueError):
+ """An exception for invalid operation."""
+
+
+class InvalidValue(DefaultCredentialsError, ValueError):
+ """Used to wrap general ValueError of python."""
+
+
+class InvalidType(DefaultCredentialsError, TypeError):
+ """Used to wrap general TypeError of python."""
+
+
+class OSError(DefaultCredentialsError, EnvironmentError):
+ """Used to wrap EnvironmentError(OSError after python3.3)."""
diff --git a/contrib/python/google-auth/py3/google/auth/external_account.py b/contrib/python/google-auth/py3/google/auth/external_account.py
new file mode 100644
index 0000000000..c45e6f2133
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/external_account.py
@@ -0,0 +1,523 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""External Account Credentials.
+
+This module provides credentials that exchange workload identity pool external
+credentials for Google access tokens. This facilitates accessing Google Cloud
+Platform resources from on-prem and non-Google Cloud platforms (e.g. AWS,
+Microsoft Azure, OIDC identity providers), using native credentials retrieved
+from the current environment without the need to copy, save and manage
+long-lived service account credentials.
+
+Specifically, this is intended to use access tokens acquired using the GCP STS
+token exchange endpoint following the `OAuth 2.0 Token Exchange`_ spec.
+
+.. _OAuth 2.0 Token Exchange: https://tools.ietf.org/html/rfc8693
+"""
+
+import abc
+import copy
+import datetime
+import io
+import json
+import re
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import impersonated_credentials
+from google.auth import metrics
+from google.oauth2 import sts
+from google.oauth2 import utils
+
+# External account JSON type identifier.
+_EXTERNAL_ACCOUNT_JSON_TYPE = "external_account"
+# The token exchange grant_type used for exchanging credentials.
+_STS_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+# The token exchange requested_token_type. This is always an access_token.
+_STS_REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+# Cloud resource manager URL used to retrieve project information.
+_CLOUD_RESOURCE_MANAGER = "https://cloudresourcemanager.googleapis.com/v1/projects/"
+
+_DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
+
+
+class Credentials(
+ credentials.Scoped,
+ credentials.CredentialsWithQuotaProject,
+ credentials.CredentialsWithTokenUri,
+ metaclass=abc.ABCMeta,
+):
+ """Base class for all external account credentials.
+
+ This is used to instantiate Credentials for exchanging external account
+ credentials for Google access token and authorizing requests to Google APIs.
+ The base class implements the common logic for exchanging external account
+ credentials for Google access tokens.
+ """
+
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options=None,
+ client_id=None,
+ client_secret=None,
+ token_info_url=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ workforce_pool_user_project=None,
+ universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ trust_boundary=None,
+ ):
+ """Instantiates an external account credentials object.
+
+ Args:
+ audience (str): The STS audience field.
+ subject_token_type (str): The subject token type.
+ token_url (str): The STS endpoint URL.
+ credential_source (Mapping): The credential source dictionary.
+ service_account_impersonation_url (Optional[str]): The optional service account
+ impersonation generateAccessToken URL.
+ client_id (Optional[str]): The optional client ID.
+ client_secret (Optional[str]): The optional client secret.
+ token_info_url (str): The optional STS endpoint URL for token introspection.
+ quota_project_id (Optional[str]): The optional quota project ID.
+ scopes (Optional[Sequence[str]]): Optional scopes to request during the
+ authorization grant.
+ default_scopes (Optional[Sequence[str]]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ workforce_pool_user_project (Optona[str]): The optional workforce pool user
+ project number when the credential corresponds to a workforce pool and not
+ a workload identity pool. The underlying principal must still have
+ serviceusage.services.use IAM permission to use the project for
+ billing/quota.
+ universe_domain (str): The universe domain. The default universe
+ domain is googleapis.com.
+ trust_boundary (str): String representation of trust boundary meta.
+ Raises:
+ google.auth.exceptions.RefreshError: If the generateAccessToken
+ endpoint returned an error.
+ """
+ super(Credentials, self).__init__()
+ self._audience = audience
+ self._subject_token_type = subject_token_type
+ self._token_url = token_url
+ self._token_info_url = token_info_url
+ self._credential_source = credential_source
+ self._service_account_impersonation_url = service_account_impersonation_url
+ self._service_account_impersonation_options = (
+ service_account_impersonation_options or {}
+ )
+ self._client_id = client_id
+ self._client_secret = client_secret
+ self._quota_project_id = quota_project_id
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+ self._workforce_pool_user_project = workforce_pool_user_project
+ self._universe_domain = universe_domain or _DEFAULT_UNIVERSE_DOMAIN
+ self._trust_boundary = "0" # expose a placeholder trust boundary value.
+
+ if self._client_id:
+ self._client_auth = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, self._client_id, self._client_secret
+ )
+ else:
+ self._client_auth = None
+ self._sts_client = sts.Client(self._token_url, self._client_auth)
+
+ self._metrics_options = self._create_default_metrics_options()
+
+ if self._service_account_impersonation_url:
+ self._impersonated_credentials = self._initialize_impersonated_credentials()
+ else:
+ self._impersonated_credentials = None
+ self._project_id = None
+
+ if not self.is_workforce_pool and self._workforce_pool_user_project:
+ # Workload identity pools do not support workforce pool user projects.
+ raise exceptions.InvalidValue(
+ "workforce_pool_user_project should not be set for non-workforce pool "
+ "credentials"
+ )
+
+ @property
+ def info(self):
+ """Generates the dictionary representation of the current credentials.
+
+ Returns:
+ Mapping: The dictionary representation of the credentials. This is the
+ reverse of "from_info" defined on the subclasses of this class. It is
+ useful for serializing the current credentials so it can deserialized
+ later.
+ """
+ config_info = self._constructor_args()
+ config_info.update(
+ type=_EXTERNAL_ACCOUNT_JSON_TYPE,
+ service_account_impersonation=config_info.pop(
+ "service_account_impersonation_options", None
+ ),
+ )
+ config_info.pop("scopes", None)
+ config_info.pop("default_scopes", None)
+ return {key: value for key, value in config_info.items() if value is not None}
+
+ def _constructor_args(self):
+ args = {
+ "audience": self._audience,
+ "subject_token_type": self._subject_token_type,
+ "token_url": self._token_url,
+ "token_info_url": self._token_info_url,
+ "service_account_impersonation_url": self._service_account_impersonation_url,
+ "service_account_impersonation_options": copy.deepcopy(
+ self._service_account_impersonation_options
+ )
+ or None,
+ "credential_source": copy.deepcopy(self._credential_source),
+ "quota_project_id": self._quota_project_id,
+ "client_id": self._client_id,
+ "client_secret": self._client_secret,
+ "workforce_pool_user_project": self._workforce_pool_user_project,
+ "scopes": self._scopes,
+ "default_scopes": self._default_scopes,
+ "universe_domain": self._universe_domain,
+ }
+ if not self.is_workforce_pool:
+ args.pop("workforce_pool_user_project")
+ return args
+
+ @property
+ def service_account_email(self):
+ """Returns the service account email if service account impersonation is used.
+
+ Returns:
+ Optional[str]: The service account email if impersonation is used. Otherwise
+ None is returned.
+ """
+ if self._service_account_impersonation_url:
+ # Parse email from URL. The formal looks as follows:
+ # https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/name@project-id.iam.gserviceaccount.com:generateAccessToken
+ url = self._service_account_impersonation_url
+ start_index = url.rfind("/")
+ end_index = url.find(":generateAccessToken")
+ if start_index != -1 and end_index != -1 and start_index < end_index:
+ start_index = start_index + 1
+ return url[start_index:end_index]
+ return None
+
+ @property
+ def is_user(self):
+ """Returns whether the credentials represent a user (True) or workload (False).
+ Workloads behave similarly to service accounts. Currently workloads will use
+ service account impersonation but will eventually not require impersonation.
+ As a result, this property is more reliable than the service account email
+ property in determining if the credentials represent a user or workload.
+
+ Returns:
+ bool: True if the credentials represent a user. False if they represent a
+ workload.
+ """
+ # If service account impersonation is used, the credentials will always represent a
+ # service account.
+ if self._service_account_impersonation_url:
+ return False
+ return self.is_workforce_pool
+
+ @property
+ def is_workforce_pool(self):
+ """Returns whether the credentials represent a workforce pool (True) or
+ workload (False) based on the credentials' audience.
+
+ This will also return True for impersonated workforce pool credentials.
+
+ Returns:
+ bool: True if the credentials represent a workforce pool. False if they
+ represent a workload.
+ """
+ # Workforce pools representing users have the following audience format:
+ # //iam.googleapis.com/locations/$location/workforcePools/$poolId/providers/$providerId
+ p = re.compile(r"//iam\.googleapis\.com/locations/[^/]+/workforcePools/")
+ return p.match(self._audience or "") is not None
+
+ @property
+ def requires_scopes(self):
+ """Checks if the credentials requires scopes.
+
+ Returns:
+ bool: True if there are no scopes set otherwise False.
+ """
+ return not self._scopes and not self._default_scopes
+
+ @property
+ def project_number(self):
+ """Optional[str]: The project number corresponding to the workload identity pool."""
+
+ # STS audience pattern:
+ # //iam.googleapis.com/projects/$PROJECT_NUMBER/locations/...
+ components = self._audience.split("/")
+ try:
+ project_index = components.index("projects")
+ if project_index + 1 < len(components):
+ return components[project_index + 1] or None
+ except ValueError:
+ return None
+
+ @property
+ def token_info_url(self):
+ """Optional[str]: The STS token introspection endpoint."""
+
+ return self._token_info_url
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ kwargs = self._constructor_args()
+ kwargs.update(scopes=scopes, default_scopes=default_scopes)
+ scoped = self.__class__(**kwargs)
+ scoped._metrics_options = self._metrics_options
+ return scoped
+
+ @abc.abstractmethod
+ def retrieve_subject_token(self, request):
+ """Retrieves the subject token using the credential_source object.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ Returns:
+ str: The retrieved subject token.
+ """
+ # pylint: disable=missing-raises-doc
+ # (pylint doesn't recognize that this is abstract)
+ raise NotImplementedError("retrieve_subject_token must be implemented")
+
+ def get_project_id(self, request):
+ """Retrieves the project ID corresponding to the workload identity or workforce pool.
+ For workforce pool credentials, it returns the project ID corresponding to
+ the workforce_pool_user_project.
+
+ When not determinable, None is returned.
+
+ This is introduced to support the current pattern of using the Auth library:
+
+ credentials, project_id = google.auth.default()
+
+ The resource may not have permission (resourcemanager.projects.get) to
+ call this API or the required scopes may not be selected:
+ https://cloud.google.com/resource-manager/reference/rest/v1/projects/get#authorization-scopes
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ Returns:
+ Optional[str]: The project ID corresponding to the workload identity pool
+ or workforce pool if determinable.
+ """
+ if self._project_id:
+ # If already retrieved, return the cached project ID value.
+ return self._project_id
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # Scopes are required in order to retrieve a valid access token.
+ project_number = self.project_number or self._workforce_pool_user_project
+ if project_number and scopes:
+ headers = {}
+ url = _CLOUD_RESOURCE_MANAGER + project_number
+ self.before_request(request, "GET", url, headers)
+ response = request(url=url, method="GET", headers=headers)
+
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+ response_data = json.loads(response_body)
+
+ if response.status == 200:
+ # Cache result as this field is immutable.
+ self._project_id = response_data.get("projectId")
+ return self._project_id
+
+ return None
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ if self._impersonated_credentials:
+ self._impersonated_credentials.refresh(request)
+ self.token = self._impersonated_credentials.token
+ self.expiry = self._impersonated_credentials.expiry
+ else:
+ now = _helpers.utcnow()
+ additional_options = None
+ # Do not pass workforce_pool_user_project when client authentication
+ # is used. The client ID is sufficient for determining the user project.
+ if self._workforce_pool_user_project and not self._client_id:
+ additional_options = {"userProject": self._workforce_pool_user_project}
+ additional_headers = {
+ metrics.API_CLIENT_HEADER: metrics.byoid_metrics_header(
+ self._metrics_options
+ )
+ }
+ response_data = self._sts_client.exchange_token(
+ request=request,
+ grant_type=_STS_GRANT_TYPE,
+ subject_token=self.retrieve_subject_token(request),
+ subject_token_type=self._subject_token_type,
+ audience=self._audience,
+ scopes=scopes,
+ requested_token_type=_STS_REQUESTED_TOKEN_TYPE,
+ additional_options=additional_options,
+ additional_headers=additional_headers,
+ )
+ self.token = response_data.get("access_token")
+ expires_in = response_data.get("expires_in")
+ # Some services do not respect the OAUTH2.0 RFC and send expires_in as a
+ # JSON String.
+ if isinstance(expires_in, str):
+ expires_in = int(expires_in)
+
+ lifetime = datetime.timedelta(seconds=expires_in)
+
+ self.expiry = now + lifetime
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ # Return copy of instance with the provided quota project ID.
+ kwargs = self._constructor_args()
+ kwargs.update(quota_project_id=quota_project_id)
+ new_cred = self.__class__(**kwargs)
+ new_cred._metrics_options = self._metrics_options
+ return new_cred
+
+ @_helpers.copy_docstring(credentials.CredentialsWithTokenUri)
+ def with_token_uri(self, token_uri):
+ kwargs = self._constructor_args()
+ kwargs.update(token_url=token_uri)
+ new_cred = self.__class__(**kwargs)
+ new_cred._metrics_options = self._metrics_options
+ return new_cred
+
+ def _initialize_impersonated_credentials(self):
+ """Generates an impersonated credentials.
+
+ For more details, see `projects.serviceAccounts.generateAccessToken`_.
+
+ .. _projects.serviceAccounts.generateAccessToken: https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateAccessToken
+
+ Returns:
+ impersonated_credentials.Credential: The impersonated credentials
+ object.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the generateAccessToken
+ endpoint returned an error.
+ """
+ # Return copy of instance with no service account impersonation.
+ kwargs = self._constructor_args()
+ kwargs.update(
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ )
+ source_credentials = self.__class__(**kwargs)
+ source_credentials._metrics_options = self._metrics_options
+
+ # Determine target_principal.
+ target_principal = self.service_account_email
+ if not target_principal:
+ raise exceptions.RefreshError(
+ "Unable to determine target principal from service account impersonation URL."
+ )
+
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # Initialize and return impersonated credentials.
+ return impersonated_credentials.Credentials(
+ source_credentials=source_credentials,
+ target_principal=target_principal,
+ target_scopes=scopes,
+ quota_project_id=self._quota_project_id,
+ iam_endpoint_override=self._service_account_impersonation_url,
+ lifetime=self._service_account_impersonation_options.get(
+ "token_lifetime_seconds"
+ ),
+ )
+
+ def _create_default_metrics_options(self):
+ metrics_options = {}
+ if self._service_account_impersonation_url:
+ metrics_options["sa-impersonation"] = "true"
+ else:
+ metrics_options["sa-impersonation"] = "false"
+ if self._service_account_impersonation_options.get("token_lifetime_seconds"):
+ metrics_options["config-lifetime"] = "true"
+ else:
+ metrics_options["config-lifetime"] = "false"
+
+ return metrics_options
+
+ @classmethod
+ def from_info(cls, info, **kwargs):
+ """Creates a Credentials instance from parsed external account info.
+
+ Args:
+ info (Mapping[str, str]): The external account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.identity_pool.Credentials: The constructed
+ credentials.
+
+ Raises:
+ InvalidValue: For invalid parameters.
+ """
+ return cls(
+ audience=info.get("audience"),
+ subject_token_type=info.get("subject_token_type"),
+ token_url=info.get("token_url"),
+ token_info_url=info.get("token_info_url"),
+ service_account_impersonation_url=info.get(
+ "service_account_impersonation_url"
+ ),
+ service_account_impersonation_options=info.get(
+ "service_account_impersonation"
+ )
+ or {},
+ client_id=info.get("client_id"),
+ client_secret=info.get("client_secret"),
+ credential_source=info.get("credential_source"),
+ quota_project_id=info.get("quota_project_id"),
+ workforce_pool_user_project=info.get("workforce_pool_user_project"),
+ universe_domain=info.get("universe_domain", _DEFAULT_UNIVERSE_DOMAIN),
+ **kwargs
+ )
+
+ @classmethod
+ def from_file(cls, filename, **kwargs):
+ """Creates a Credentials instance from an external account json file.
+
+ Args:
+ filename (str): The path to the external account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.identity_pool.Credentials: The constructed
+ credentials.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return cls.from_info(data, **kwargs)
diff --git a/contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py b/contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py
new file mode 100644
index 0000000000..a2d4edf6ff
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/external_account_authorized_user.py
@@ -0,0 +1,350 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""External Account Authorized User Credentials.
+This module provides credentials based on OAuth 2.0 access and refresh tokens.
+These credentials usually access resources on behalf of a user (resource
+owner).
+
+Specifically, these are sourced using external identities via Workforce Identity Federation.
+
+Obtaining the initial access and refresh token can be done through the Google Cloud CLI.
+
+Example credential:
+{
+ "type": "external_account_authorized_user",
+ "audience": "//iam.googleapis.com/locations/global/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID",
+ "refresh_token": "refreshToken",
+ "token_url": "https://sts.googleapis.com/v1/oauth/token",
+ "token_info_url": "https://sts.googleapis.com/v1/instrospect",
+ "client_id": "clientId",
+ "client_secret": "clientSecret"
+}
+"""
+
+import datetime
+import io
+import json
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.oauth2 import sts
+from google.oauth2 import utils
+
+_EXTERNAL_ACCOUNT_AUTHORIZED_USER_JSON_TYPE = "external_account_authorized_user"
+
+
+class Credentials(
+ credentials.CredentialsWithQuotaProject,
+ credentials.ReadOnlyScoped,
+ credentials.CredentialsWithTokenUri,
+):
+ """Credentials for External Account Authorized Users.
+
+ This is used to instantiate Credentials for exchanging refresh tokens from
+ authorized users for Google access token and authorizing requests to Google
+ APIs.
+
+ The credentials are considered immutable. If you want to modify the
+ quota project, use `with_quota_project` and if you want to modify the token
+ uri, use `with_token_uri`.
+ """
+
+ def __init__(
+ self,
+ token=None,
+ expiry=None,
+ refresh_token=None,
+ audience=None,
+ client_id=None,
+ client_secret=None,
+ token_url=None,
+ token_info_url=None,
+ revoke_url=None,
+ scopes=None,
+ quota_project_id=None,
+ ):
+ """Instantiates a external account authorized user credentials object.
+
+ Args:
+ token (str): The OAuth 2.0 access token. Can be None if refresh information
+ is provided.
+ expiry (datetime.datetime): The optional expiration datetime of the OAuth 2.0 access
+ token.
+ refresh_token (str): The optional OAuth 2.0 refresh token. If specified,
+ credentials can be refreshed.
+ audience (str): The optional STS audience which contains the resource name for the workforce
+ pool and the provider identifier in that pool.
+ client_id (str): The OAuth 2.0 client ID. Must be specified for refresh, can be left as
+ None if the token can not be refreshed.
+ client_secret (str): The OAuth 2.0 client secret. Must be specified for refresh, can be
+ left as None if the token can not be refreshed.
+ token_url (str): The optional STS token exchange endpoint for refresh. Must be specified for
+ refresh, can be left as None if the token can not be refreshed.
+ token_info_url (str): The optional STS endpoint URL for token introspection.
+ revoke_url (str): The optional STS endpoint URL for revoking tokens.
+ quota_project_id (str): The optional project ID used for quota and billing.
+ This project may be different from the project used to
+ create the credentials.
+
+ Returns:
+ google.auth.external_account_authorized_user.Credentials: The
+ constructed credentials.
+ """
+ super(Credentials, self).__init__()
+
+ self.token = token
+ self.expiry = expiry
+ self._audience = audience
+ self._refresh_token = refresh_token
+ self._token_url = token_url
+ self._token_info_url = token_info_url
+ self._client_id = client_id
+ self._client_secret = client_secret
+ self._revoke_url = revoke_url
+ self._quota_project_id = quota_project_id
+ self._scopes = scopes
+
+ if not self.valid and not self.can_refresh:
+ raise exceptions.InvalidOperation(
+ "Token should be created with fields to make it valid (`token` and "
+ "`expiry`), or fields to allow it to refresh (`refresh_token`, "
+ "`token_url`, `client_id`, `client_secret`)."
+ )
+
+ self._client_auth = None
+ if self._client_id:
+ self._client_auth = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, self._client_id, self._client_secret
+ )
+ self._sts_client = sts.Client(self._token_url, self._client_auth)
+
+ @property
+ def info(self):
+ """Generates the serializable dictionary representation of the current
+ credentials.
+
+ Returns:
+ Mapping: The dictionary representation of the credentials. This is the
+ reverse of the "from_info" method defined in this class. It is
+ useful for serializing the current credentials so it can deserialized
+ later.
+ """
+ config_info = self.constructor_args()
+ config_info.update(type=_EXTERNAL_ACCOUNT_AUTHORIZED_USER_JSON_TYPE)
+ if config_info["expiry"]:
+ config_info["expiry"] = config_info["expiry"].isoformat() + "Z"
+
+ return {key: value for key, value in config_info.items() if value is not None}
+
+ def constructor_args(self):
+ return {
+ "audience": self._audience,
+ "refresh_token": self._refresh_token,
+ "token_url": self._token_url,
+ "token_info_url": self._token_info_url,
+ "client_id": self._client_id,
+ "client_secret": self._client_secret,
+ "token": self.token,
+ "expiry": self.expiry,
+ "revoke_url": self._revoke_url,
+ "scopes": self._scopes,
+ "quota_project_id": self._quota_project_id,
+ }
+
+ @property
+ def scopes(self):
+ """Optional[str]: The OAuth 2.0 permission scopes."""
+ return self._scopes
+
+ @property
+ def requires_scopes(self):
+ """ False: OAuth 2.0 credentials have their scopes set when
+ the initial token is requested and can not be changed."""
+ return False
+
+ @property
+ def client_id(self):
+ """Optional[str]: The OAuth 2.0 client ID."""
+ return self._client_id
+
+ @property
+ def client_secret(self):
+ """Optional[str]: The OAuth 2.0 client secret."""
+ return self._client_secret
+
+ @property
+ def audience(self):
+ """Optional[str]: The STS audience which contains the resource name for the
+ workforce pool and the provider identifier in that pool."""
+ return self._audience
+
+ @property
+ def refresh_token(self):
+ """Optional[str]: The OAuth 2.0 refresh token."""
+ return self._refresh_token
+
+ @property
+ def token_url(self):
+ """Optional[str]: The STS token exchange endpoint for refresh."""
+ return self._token_url
+
+ @property
+ def token_info_url(self):
+ """Optional[str]: The STS endpoint for token info."""
+ return self._token_info_url
+
+ @property
+ def revoke_url(self):
+ """Optional[str]: The STS endpoint for token revocation."""
+ return self._revoke_url
+
+ @property
+ def is_user(self):
+ """ True: This credential always represents a user."""
+ return True
+
+ @property
+ def can_refresh(self):
+ return all(
+ (self._refresh_token, self._token_url, self._client_id, self._client_secret)
+ )
+
+ def get_project_id(self, request=None):
+ """Retrieves the project ID corresponding to the workload identity or workforce pool.
+ For workforce pool credentials, it returns the project ID corresponding to
+ the workforce_pool_user_project.
+
+ When not determinable, None is returned.
+
+ Args:
+ request (google.auth.transport.requests.Request): Request object.
+ Unused here, but passed from _default.default().
+
+ Return:
+ str: project ID is not determinable for this credential type so it returns None
+ """
+
+ return None
+
+ def to_json(self, strip=None):
+ """Utility function that creates a JSON representation of this
+ credential.
+ Args:
+ strip (Sequence[str]): Optional list of members to exclude from the
+ generated JSON.
+ Returns:
+ str: A JSON representation of this instance. When converted into
+ a dictionary, it can be passed to from_info()
+ to create a new instance.
+ """
+ strip = strip if strip else []
+ return json.dumps({k: v for (k, v) in self.info.items() if k not in strip})
+
+ def refresh(self, request):
+ """Refreshes the access token.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the credentials could
+ not be refreshed.
+ """
+ if not self.can_refresh:
+ raise exceptions.RefreshError(
+ "The credentials do not contain the necessary fields need to "
+ "refresh the access token. You must specify refresh_token, "
+ "token_url, client_id, and client_secret."
+ )
+
+ now = _helpers.utcnow()
+ response_data = self._make_sts_request(request)
+
+ self.token = response_data.get("access_token")
+
+ lifetime = datetime.timedelta(seconds=response_data.get("expires_in"))
+ self.expiry = now + lifetime
+
+ if "refresh_token" in response_data:
+ self._refresh_token = response_data["refresh_token"]
+
+ def _make_sts_request(self, request):
+ return self._sts_client.refresh_token(request, self._refresh_token)
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ kwargs = self.constructor_args()
+ kwargs.update(quota_project_id=quota_project_id)
+ return self.__class__(**kwargs)
+
+ @_helpers.copy_docstring(credentials.CredentialsWithTokenUri)
+ def with_token_uri(self, token_uri):
+ kwargs = self.constructor_args()
+ kwargs.update(token_url=token_uri)
+ return self.__class__(**kwargs)
+
+ @classmethod
+ def from_info(cls, info, **kwargs):
+ """Creates a Credentials instance from parsed external account info.
+
+ Args:
+ info (Mapping[str, str]): The external account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.external_account_authorized_user.Credentials: The
+ constructed credentials.
+
+ Raises:
+ ValueError: For invalid parameters.
+ """
+ expiry = info.get("expiry")
+ if expiry:
+ expiry = datetime.datetime.strptime(
+ expiry.rstrip("Z").split(".")[0], "%Y-%m-%dT%H:%M:%S"
+ )
+ return cls(
+ audience=info.get("audience"),
+ refresh_token=info.get("refresh_token"),
+ token_url=info.get("token_url"),
+ token_info_url=info.get("token_info_url"),
+ client_id=info.get("client_id"),
+ client_secret=info.get("client_secret"),
+ token=info.get("token"),
+ expiry=expiry,
+ revoke_url=info.get("revoke_url"),
+ quota_project_id=info.get("quota_project_id"),
+ scopes=info.get("scopes"),
+ **kwargs
+ )
+
+ @classmethod
+ def from_file(cls, filename, **kwargs):
+ """Creates a Credentials instance from an external account json file.
+
+ Args:
+ filename (str): The path to the external account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.external_account_authorized_user.Credentials: The
+ constructed credentials.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return cls.from_info(data, **kwargs)
diff --git a/contrib/python/google-auth/py3/google/auth/iam.py b/contrib/python/google-auth/py3/google/auth/iam.py
new file mode 100644
index 0000000000..e9df844178
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/iam.py
@@ -0,0 +1,99 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools for using the Google `Cloud Identity and Access Management (IAM)
+API`_'s auth-related functionality.
+
+.. _Cloud Identity and Access Management (IAM) API:
+ https://cloud.google.com/iam/docs/
+"""
+
+import base64
+import http.client as http_client
+import json
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+
+_IAM_API_ROOT_URI = "https://iamcredentials.googleapis.com/v1"
+_SIGN_BLOB_URI = _IAM_API_ROOT_URI + "/projects/-/serviceAccounts/{}:signBlob?alt=json"
+
+
+class Signer(crypt.Signer):
+ """Signs messages using the IAM `signBlob API`_.
+
+ This is useful when you need to sign bytes but do not have access to the
+ credential's private key file.
+
+ .. _signBlob API:
+ https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts
+ /signBlob
+ """
+
+ def __init__(self, request, credentials, service_account_email):
+ """
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ credentials (google.auth.credentials.Credentials): The credentials
+ that will be used to authenticate the request to the IAM API.
+ The credentials must have of one the following scopes:
+
+ - https://www.googleapis.com/auth/iam
+ - https://www.googleapis.com/auth/cloud-platform
+ service_account_email (str): The service account email identifying
+ which service account to use to sign bytes. Often, this can
+ be the same as the service account email in the given
+ credentials.
+ """
+ self._request = request
+ self._credentials = credentials
+ self._service_account_email = service_account_email
+
+ def _make_signing_request(self, message):
+ """Makes a request to the API signBlob API."""
+ message = _helpers.to_bytes(message)
+
+ method = "POST"
+ url = _SIGN_BLOB_URI.format(self._service_account_email)
+ headers = {"Content-Type": "application/json"}
+ body = json.dumps(
+ {"payload": base64.b64encode(message).decode("utf-8")}
+ ).encode("utf-8")
+
+ self._credentials.before_request(self._request, method, url, headers)
+ response = self._request(url=url, method=method, body=body, headers=headers)
+
+ if response.status != http_client.OK:
+ raise exceptions.TransportError(
+ "Error calling the IAM signBlob API: {}".format(response.data)
+ )
+
+ return json.loads(response.data.decode("utf-8"))
+
+ @property
+ def key_id(self):
+ """Optional[str]: The key ID used to identify this private key.
+
+ .. warning::
+ This is always ``None``. The key ID used by IAM can not
+ be reliably determined ahead of time.
+ """
+ return None
+
+ @_helpers.copy_docstring(crypt.Signer)
+ def sign(self, message):
+ response = self._make_signing_request(message)
+ return base64.b64decode(response["signedBlob"])
diff --git a/contrib/python/google-auth/py3/google/auth/identity_pool.py b/contrib/python/google-auth/py3/google/auth/identity_pool.py
new file mode 100644
index 0000000000..a515353c37
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/identity_pool.py
@@ -0,0 +1,261 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Identity Pool Credentials.
+
+This module provides credentials to access Google Cloud resources from on-prem
+or non-Google Cloud platforms which support external credentials (e.g. OIDC ID
+tokens) retrieved from local file locations or local servers. This includes
+Microsoft Azure and OIDC identity providers (e.g. K8s workloads registered with
+Hub with Hub workload identity enabled).
+
+These credentials are recommended over the use of service account credentials
+in on-prem/non-Google Cloud platforms as they do not involve the management of
+long-live service account private keys.
+
+Identity Pool Credentials are initialized using external_account
+arguments which are typically loaded from an external credentials file or
+an external credentials URL. Unlike other Credentials that can be initialized
+with a list of explicit arguments, secrets or credentials, external account
+clients use the environment and hints/guidelines provided by the
+external_account JSON file to retrieve credentials and exchange them for Google
+access tokens.
+"""
+
+try:
+ from collections.abc import Mapping
+# Python 2.7 compatibility
+except ImportError: # pragma: NO COVER
+ from collections import Mapping
+import io
+import json
+import os
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import external_account
+
+
+class Credentials(external_account.Credentials):
+ """External account credentials sourced from files and URLs."""
+
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source,
+ *args,
+ **kwargs
+ ):
+ """Instantiates an external account credentials object from a file/URL.
+
+ Args:
+ audience (str): The STS audience field.
+ subject_token_type (str): The subject token type.
+ token_url (str): The STS endpoint URL.
+ credential_source (Mapping): The credential source dictionary used to
+ provide instructions on how to retrieve external credential to be
+ exchanged for Google access tokens.
+
+ Example credential_source for url-sourced credential::
+
+ {
+ "url": "http://www.example.com",
+ "format": {
+ "type": "json",
+ "subject_token_field_name": "access_token",
+ },
+ "headers": {"foo": "bar"},
+ }
+
+ Example credential_source for file-sourced credential::
+
+ {
+ "file": "/path/to/token/file.txt"
+ }
+ args (List): Optional positional arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method.
+ kwargs (Mapping): Optional keyword arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error is encountered during
+ access token retrieval logic.
+ ValueError: For invalid parameters.
+
+ .. note:: Typically one of the helper constructors
+ :meth:`from_file` or
+ :meth:`from_info` are used instead of calling the constructor directly.
+ """
+
+ super(Credentials, self).__init__(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ credential_source=credential_source,
+ *args,
+ **kwargs
+ )
+ if not isinstance(credential_source, Mapping):
+ self._credential_source_file = None
+ self._credential_source_url = None
+ else:
+ self._credential_source_file = credential_source.get("file")
+ self._credential_source_url = credential_source.get("url")
+ self._credential_source_headers = credential_source.get("headers")
+ credential_source_format = credential_source.get("format", {})
+ # Get credential_source format type. When not provided, this
+ # defaults to text.
+ self._credential_source_format_type = (
+ credential_source_format.get("type") or "text"
+ )
+ # environment_id is only supported in AWS or dedicated future external
+ # account credentials.
+ if "environment_id" in credential_source:
+ raise exceptions.MalformedError(
+ "Invalid Identity Pool credential_source field 'environment_id'"
+ )
+ if self._credential_source_format_type not in ["text", "json"]:
+ raise exceptions.MalformedError(
+ "Invalid credential_source format '{}'".format(
+ self._credential_source_format_type
+ )
+ )
+ # For JSON types, get the required subject_token field name.
+ if self._credential_source_format_type == "json":
+ self._credential_source_field_name = credential_source_format.get(
+ "subject_token_field_name"
+ )
+ if self._credential_source_field_name is None:
+ raise exceptions.MalformedError(
+ "Missing subject_token_field_name for JSON credential_source format"
+ )
+ else:
+ self._credential_source_field_name = None
+
+ if self._credential_source_file and self._credential_source_url:
+ raise exceptions.MalformedError(
+ "Ambiguous credential_source. 'file' is mutually exclusive with 'url'."
+ )
+ if not self._credential_source_file and not self._credential_source_url:
+ raise exceptions.MalformedError(
+ "Missing credential_source. A 'file' or 'url' must be provided."
+ )
+
+ @_helpers.copy_docstring(external_account.Credentials)
+ def retrieve_subject_token(self, request):
+ return self._parse_token_data(
+ self._get_token_data(request),
+ self._credential_source_format_type,
+ self._credential_source_field_name,
+ )
+
+ def _get_token_data(self, request):
+ if self._credential_source_file:
+ return self._get_file_data(self._credential_source_file)
+ else:
+ return self._get_url_data(
+ request, self._credential_source_url, self._credential_source_headers
+ )
+
+ def _get_file_data(self, filename):
+ if not os.path.exists(filename):
+ raise exceptions.RefreshError("File '{}' was not found.".format(filename))
+
+ with io.open(filename, "r", encoding="utf-8") as file_obj:
+ return file_obj.read(), filename
+
+ def _get_url_data(self, request, url, headers):
+ response = request(url=url, method="GET", headers=headers)
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != 200:
+ raise exceptions.RefreshError(
+ "Unable to retrieve Identity Pool subject token", response_body
+ )
+
+ return response_body, url
+
+ def _parse_token_data(
+ self, token_content, format_type="text", subject_token_field_name=None
+ ):
+ content, filename = token_content
+ if format_type == "text":
+ token = content
+ else:
+ try:
+ # Parse file content as JSON.
+ response_data = json.loads(content)
+ # Get the subject_token.
+ token = response_data[subject_token_field_name]
+ except (KeyError, ValueError):
+ raise exceptions.RefreshError(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ filename, subject_token_field_name
+ )
+ )
+ if not token:
+ raise exceptions.RefreshError(
+ "Missing subject_token in the credential_source file"
+ )
+ return token
+
+ def _create_default_metrics_options(self):
+ metrics_options = super(Credentials, self)._create_default_metrics_options()
+ # Check that credential source is a dict before checking for file vs url. This check needs to be done
+ # here because the external_account credential constructor needs to pass the metrics options to the
+ # impersonated credential object before the identity_pool credentials are validated.
+ if isinstance(self._credential_source, Mapping):
+ if self._credential_source.get("file"):
+ metrics_options["source"] = "file"
+ else:
+ metrics_options["source"] = "url"
+ return metrics_options
+
+ @classmethod
+ def from_info(cls, info, **kwargs):
+ """Creates an Identity Pool Credentials instance from parsed external account info.
+
+ Args:
+ info (Mapping[str, str]): The Identity Pool external account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.identity_pool.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: For invalid parameters.
+ """
+ return super(Credentials, cls).from_info(info, **kwargs)
+
+ @classmethod
+ def from_file(cls, filename, **kwargs):
+ """Creates an IdentityPool Credentials instance from an external account json file.
+
+ Args:
+ filename (str): The path to the IdentityPool external account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.identity_pool.Credentials: The constructed
+ credentials.
+ """
+ return super(Credentials, cls).from_file(filename, **kwargs)
diff --git a/contrib/python/google-auth/py3/google/auth/impersonated_credentials.py b/contrib/python/google-auth/py3/google/auth/impersonated_credentials.py
new file mode 100644
index 0000000000..c272a3ca28
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/impersonated_credentials.py
@@ -0,0 +1,462 @@
+# Copyright 2018 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Cloud Impersonated credentials.
+
+This module provides authentication for applications where local credentials
+impersonates a remote service account using `IAM Credentials API`_.
+
+This class can be used to impersonate a service account as long as the original
+Credential object has the "Service Account Token Creator" role on the target
+service account.
+
+ .. _IAM Credentials API:
+ https://cloud.google.com/iam/credentials/reference/rest/
+"""
+
+import base64
+import copy
+from datetime import datetime
+import http.client as http_client
+import json
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import metrics
+
+_IAM_SCOPE = ["https://www.googleapis.com/auth/iam"]
+
+_IAM_ENDPOINT = (
+ "https://iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken"
+)
+
+_IAM_SIGN_ENDPOINT = (
+ "https://iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:signBlob"
+)
+
+_IAM_IDTOKEN_ENDPOINT = (
+ "https://iamcredentials.googleapis.com/v1/"
+ + "projects/-/serviceAccounts/{}:generateIdToken"
+)
+
+_REFRESH_ERROR = "Unable to acquire impersonated credentials"
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+
+_DEFAULT_TOKEN_URI = "https://oauth2.googleapis.com/token"
+
+
+def _make_iam_token_request(
+ request, principal, headers, body, iam_endpoint_override=None
+):
+ """Makes a request to the Google Cloud IAM service for an access token.
+ Args:
+ request (Request): The Request object to use.
+ principal (str): The principal to request an access token for.
+ headers (Mapping[str, str]): Map of headers to transmit.
+ body (Mapping[str, str]): JSON Payload body for the iamcredentials
+ API call.
+ iam_endpoint_override (Optiona[str]): The full IAM endpoint override
+ with the target_principal embedded. This is useful when supporting
+ impersonation with regional endpoints.
+
+ Raises:
+ google.auth.exceptions.TransportError: Raised if there is an underlying
+ HTTP connection error
+ google.auth.exceptions.RefreshError: Raised if the impersonated
+ credentials are not available. Common reasons are
+ `iamcredentials.googleapis.com` is not enabled or the
+ `Service Account Token Creator` is not assigned
+ """
+ iam_endpoint = iam_endpoint_override or _IAM_ENDPOINT.format(principal)
+
+ body = json.dumps(body).encode("utf-8")
+
+ response = request(url=iam_endpoint, method="POST", headers=headers, body=body)
+
+ # support both string and bytes type response.data
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ if response.status != http_client.OK:
+ raise exceptions.RefreshError(_REFRESH_ERROR, response_body)
+
+ try:
+ token_response = json.loads(response_body)
+ token = token_response["accessToken"]
+ expiry = datetime.strptime(token_response["expireTime"], "%Y-%m-%dT%H:%M:%SZ")
+
+ return token, expiry
+
+ except (KeyError, ValueError) as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "{}: No access token or invalid expiration in response.".format(
+ _REFRESH_ERROR
+ ),
+ response_body,
+ )
+ raise new_exc from caught_exc
+
+
+class Credentials(
+ credentials.Scoped, credentials.CredentialsWithQuotaProject, credentials.Signing
+):
+ """This module defines impersonated credentials which are essentially
+ impersonated identities.
+
+ Impersonated Credentials allows credentials issued to a user or
+ service account to impersonate another. The target service account must
+ grant the originating credential principal the
+ `Service Account Token Creator`_ IAM role:
+
+ For more information about Token Creator IAM role and
+ IAMCredentials API, see
+ `Creating Short-Lived Service Account Credentials`_.
+
+ .. _Service Account Token Creator:
+ https://cloud.google.com/iam/docs/service-accounts#the_service_account_token_creator_role
+
+ .. _Creating Short-Lived Service Account Credentials:
+ https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials
+
+ Usage:
+
+ First grant source_credentials the `Service Account Token Creator`
+ role on the target account to impersonate. In this example, the
+ service account represented by svc_account.json has the
+ token creator role on
+ `impersonated-account@_project_.iam.gserviceaccount.com`.
+
+ Enable the IAMCredentials API on the source project:
+ `gcloud services enable iamcredentials.googleapis.com`.
+
+ Initialize a source credential which does not have access to
+ list bucket::
+
+ from google.oauth2 import service_account
+
+ target_scopes = [
+ 'https://www.googleapis.com/auth/devstorage.read_only']
+
+ source_credentials = (
+ service_account.Credentials.from_service_account_file(
+ '/path/to/svc_account.json',
+ scopes=target_scopes))
+
+ Now use the source credentials to acquire credentials to impersonate
+ another service account::
+
+ from google.auth import impersonated_credentials
+
+ target_credentials = impersonated_credentials.Credentials(
+ source_credentials=source_credentials,
+ target_principal='impersonated-account@_project_.iam.gserviceaccount.com',
+ target_scopes = target_scopes,
+ lifetime=500)
+
+ Resource access is granted::
+
+ client = storage.Client(credentials=target_credentials)
+ buckets = client.list_buckets(project='your_project')
+ for bucket in buckets:
+ print(bucket.name)
+ """
+
+ def __init__(
+ self,
+ source_credentials,
+ target_principal,
+ target_scopes,
+ delegates=None,
+ lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
+ quota_project_id=None,
+ iam_endpoint_override=None,
+ ):
+ """
+ Args:
+ source_credentials (google.auth.Credentials): The source credential
+ used as to acquire the impersonated credentials.
+ target_principal (str): The service account to impersonate.
+ target_scopes (Sequence[str]): Scopes to request during the
+ authorization grant.
+ delegates (Sequence[str]): The chained list of delegates required
+ to grant the final access_token. If set, the sequence of
+ identities must have "Service Account Token Creator" capability
+ granted to the prceeding identity. For example, if set to
+ [serviceAccountB, serviceAccountC], the source_credential
+ must have the Token Creator role on serviceAccountB.
+ serviceAccountB must have the Token Creator on
+ serviceAccountC.
+ Finally, C must have Token Creator on target_principal.
+ If left unset, source_credential must have that role on
+ target_principal.
+ lifetime (int): Number of seconds the delegated credential should
+ be valid for (upto 3600).
+ quota_project_id (Optional[str]): The project ID used for quota and billing.
+ This project may be different from the project used to
+ create the credentials.
+ iam_endpoint_override (Optiona[str]): The full IAM endpoint override
+ with the target_principal embedded. This is useful when supporting
+ impersonation with regional endpoints.
+ """
+
+ super(Credentials, self).__init__()
+
+ self._source_credentials = copy.copy(source_credentials)
+ # Service account source credentials must have the _IAM_SCOPE
+ # added to refresh correctly. User credentials cannot have
+ # their original scopes modified.
+ if isinstance(self._source_credentials, credentials.Scoped):
+ self._source_credentials = self._source_credentials.with_scopes(_IAM_SCOPE)
+ # If the source credential is service account and self signed jwt
+ # is needed, we need to create a jwt credential inside it
+ if (
+ hasattr(self._source_credentials, "_create_self_signed_jwt")
+ and self._source_credentials._always_use_jwt_access
+ ):
+ self._source_credentials._create_self_signed_jwt(None)
+ self._target_principal = target_principal
+ self._target_scopes = target_scopes
+ self._delegates = delegates
+ self._lifetime = lifetime or _DEFAULT_TOKEN_LIFETIME_SECS
+ self.token = None
+ self.expiry = _helpers.utcnow()
+ self._quota_project_id = quota_project_id
+ self._iam_endpoint_override = iam_endpoint_override
+
+ def _metric_header_for_usage(self):
+ return metrics.CRED_TYPE_SA_IMPERSONATE
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ self._update_token(request)
+
+ def _update_token(self, request):
+ """Updates credentials with a new access_token representing
+ the impersonated account.
+
+ Args:
+ request (google.auth.transport.requests.Request): Request object
+ to use for refreshing credentials.
+ """
+
+ # Refresh our source credentials if it is not valid.
+ if not self._source_credentials.valid:
+ self._source_credentials.refresh(request)
+
+ body = {
+ "delegates": self._delegates,
+ "scope": self._target_scopes,
+ "lifetime": str(self._lifetime) + "s",
+ }
+
+ headers = {
+ "Content-Type": "application/json",
+ metrics.API_CLIENT_HEADER: metrics.token_request_access_token_impersonate(),
+ }
+
+ # Apply the source credentials authentication info.
+ self._source_credentials.apply(headers)
+
+ self.token, self.expiry = _make_iam_token_request(
+ request=request,
+ principal=self._target_principal,
+ headers=headers,
+ body=body,
+ iam_endpoint_override=self._iam_endpoint_override,
+ )
+
+ def sign_bytes(self, message):
+ from google.auth.transport.requests import AuthorizedSession
+
+ iam_sign_endpoint = _IAM_SIGN_ENDPOINT.format(self._target_principal)
+
+ body = {
+ "payload": base64.b64encode(message).decode("utf-8"),
+ "delegates": self._delegates,
+ }
+
+ headers = {"Content-Type": "application/json"}
+
+ authed_session = AuthorizedSession(self._source_credentials)
+
+ try:
+ response = authed_session.post(
+ url=iam_sign_endpoint, headers=headers, json=body
+ )
+ finally:
+ authed_session.close()
+
+ if response.status_code != http_client.OK:
+ raise exceptions.TransportError(
+ "Error calling sign_bytes: {}".format(response.json())
+ )
+
+ return base64.b64decode(response.json()["signedBlob"])
+
+ @property
+ def signer_email(self):
+ return self._target_principal
+
+ @property
+ def service_account_email(self):
+ return self._target_principal
+
+ @property
+ def signer(self):
+ return self
+
+ @property
+ def requires_scopes(self):
+ return not self._target_scopes
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ self._source_credentials,
+ target_principal=self._target_principal,
+ target_scopes=self._target_scopes,
+ delegates=self._delegates,
+ lifetime=self._lifetime,
+ quota_project_id=quota_project_id,
+ iam_endpoint_override=self._iam_endpoint_override,
+ )
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ return self.__class__(
+ self._source_credentials,
+ target_principal=self._target_principal,
+ target_scopes=scopes or default_scopes,
+ delegates=self._delegates,
+ lifetime=self._lifetime,
+ quota_project_id=self._quota_project_id,
+ iam_endpoint_override=self._iam_endpoint_override,
+ )
+
+
+class IDTokenCredentials(credentials.CredentialsWithQuotaProject):
+ """Open ID Connect ID Token-based service account credentials.
+
+ """
+
+ def __init__(
+ self,
+ target_credentials,
+ target_audience=None,
+ include_email=False,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ target_credentials (google.auth.Credentials): The target
+ credential used as to acquire the id tokens for.
+ target_audience (string): Audience to issue the token for.
+ include_email (bool): Include email in IdToken
+ quota_project_id (Optional[str]): The project ID used for
+ quota and billing.
+ """
+ super(IDTokenCredentials, self).__init__()
+
+ if not isinstance(target_credentials, Credentials):
+ raise exceptions.GoogleAuthError(
+ "Provided Credential must be " "impersonated_credentials"
+ )
+ self._target_credentials = target_credentials
+ self._target_audience = target_audience
+ self._include_email = include_email
+ self._quota_project_id = quota_project_id
+
+ def from_credentials(self, target_credentials, target_audience=None):
+ return self.__class__(
+ target_credentials=target_credentials,
+ target_audience=target_audience,
+ include_email=self._include_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+ def with_target_audience(self, target_audience):
+ return self.__class__(
+ target_credentials=self._target_credentials,
+ target_audience=target_audience,
+ include_email=self._include_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+ def with_include_email(self, include_email):
+ return self.__class__(
+ target_credentials=self._target_credentials,
+ target_audience=self._target_audience,
+ include_email=include_email,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ target_credentials=self._target_credentials,
+ target_audience=self._target_audience,
+ include_email=self._include_email,
+ quota_project_id=quota_project_id,
+ )
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ from google.auth.transport.requests import AuthorizedSession
+
+ iam_sign_endpoint = _IAM_IDTOKEN_ENDPOINT.format(
+ self._target_credentials.signer_email
+ )
+
+ body = {
+ "audience": self._target_audience,
+ "delegates": self._target_credentials._delegates,
+ "includeEmail": self._include_email,
+ }
+
+ headers = {
+ "Content-Type": "application/json",
+ metrics.API_CLIENT_HEADER: metrics.token_request_id_token_impersonate(),
+ }
+
+ authed_session = AuthorizedSession(
+ self._target_credentials._source_credentials, auth_request=request
+ )
+
+ try:
+ response = authed_session.post(
+ url=iam_sign_endpoint,
+ headers=headers,
+ data=json.dumps(body).encode("utf-8"),
+ )
+ finally:
+ authed_session.close()
+
+ if response.status_code != http_client.OK:
+ raise exceptions.RefreshError(
+ "Error getting ID token: {}".format(response.json())
+ )
+
+ id_token = response.json()["token"]
+ self.token = id_token
+ self.expiry = datetime.utcfromtimestamp(
+ jwt.decode(id_token, verify=False)["exp"]
+ )
diff --git a/contrib/python/google-auth/py3/google/auth/jwt.py b/contrib/python/google-auth/py3/google/auth/jwt.py
new file mode 100644
index 0000000000..1ebd565d4e
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/jwt.py
@@ -0,0 +1,878 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""JSON Web Tokens
+
+Provides support for creating (encoding) and verifying (decoding) JWTs,
+especially JWTs generated and consumed by Google infrastructure.
+
+See `rfc7519`_ for more details on JWTs.
+
+To encode a JWT use :func:`encode`::
+
+ from google.auth import crypt
+ from google.auth import jwt
+
+ signer = crypt.Signer(private_key)
+ payload = {'some': 'payload'}
+ encoded = jwt.encode(signer, payload)
+
+To decode a JWT and verify claims use :func:`decode`::
+
+ claims = jwt.decode(encoded, certs=public_certs)
+
+You can also skip verification::
+
+ claims = jwt.decode(encoded, verify=False)
+
+.. _rfc7519: https://tools.ietf.org/html/rfc7519
+
+"""
+
+try:
+ from collections.abc import Mapping
+# Python 2.7 compatibility
+except ImportError: # pragma: NO COVER
+ from collections import Mapping # type: ignore
+import copy
+import datetime
+import json
+import urllib
+
+import cachetools
+
+from google.auth import _helpers
+from google.auth import _service_account_info
+from google.auth import crypt
+from google.auth import exceptions
+import google.auth.credentials
+
+try:
+ from google.auth.crypt import es256
+except ImportError: # pragma: NO COVER
+ es256 = None # type: ignore
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+_DEFAULT_MAX_CACHE_SIZE = 10
+_ALGORITHM_TO_VERIFIER_CLASS = {"RS256": crypt.RSAVerifier}
+_CRYPTOGRAPHY_BASED_ALGORITHMS = frozenset(["ES256"])
+
+if es256 is not None: # pragma: NO COVER
+ _ALGORITHM_TO_VERIFIER_CLASS["ES256"] = es256.ES256Verifier # type: ignore
+
+
+def encode(signer, payload, header=None, key_id=None):
+ """Make a signed JWT.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign the JWT.
+ payload (Mapping[str, str]): The JWT payload.
+ header (Mapping[str, str]): Additional JWT header payload.
+ key_id (str): The key id to add to the JWT header. If the
+ signer has a key id it will be used as the default. If this is
+ specified it will override the signer's key id.
+
+ Returns:
+ bytes: The encoded JWT.
+ """
+ if header is None:
+ header = {}
+
+ if key_id is None:
+ key_id = signer.key_id
+
+ header.update({"typ": "JWT"})
+
+ if "alg" not in header:
+ if es256 is not None and isinstance(signer, es256.ES256Signer):
+ header.update({"alg": "ES256"})
+ else:
+ header.update({"alg": "RS256"})
+
+ if key_id is not None:
+ header["kid"] = key_id
+
+ segments = [
+ _helpers.unpadded_urlsafe_b64encode(json.dumps(header).encode("utf-8")),
+ _helpers.unpadded_urlsafe_b64encode(json.dumps(payload).encode("utf-8")),
+ ]
+
+ signing_input = b".".join(segments)
+ signature = signer.sign(signing_input)
+ segments.append(_helpers.unpadded_urlsafe_b64encode(signature))
+
+ return b".".join(segments)
+
+
+def _decode_jwt_segment(encoded_section):
+ """Decodes a single JWT segment."""
+ section_bytes = _helpers.padded_urlsafe_b64decode(encoded_section)
+ try:
+ return json.loads(section_bytes.decode("utf-8"))
+ except ValueError as caught_exc:
+ new_exc = exceptions.MalformedError(
+ "Can't parse segment: {0}".format(section_bytes)
+ )
+ raise new_exc from caught_exc
+
+
+def _unverified_decode(token):
+ """Decodes a token and does no verification.
+
+ Args:
+ token (Union[str, bytes]): The encoded JWT.
+
+ Returns:
+ Tuple[Mapping, Mapping, str, str]: header, payload, signed_section, and
+ signature.
+
+ Raises:
+ google.auth.exceptions.MalformedError: if there are an incorrect amount of segments in the token or segments of the wrong type.
+ """
+ token = _helpers.to_bytes(token)
+
+ if token.count(b".") != 2:
+ raise exceptions.MalformedError(
+ "Wrong number of segments in token: {0}".format(token)
+ )
+
+ encoded_header, encoded_payload, signature = token.split(b".")
+ signed_section = encoded_header + b"." + encoded_payload
+ signature = _helpers.padded_urlsafe_b64decode(signature)
+
+ # Parse segments
+ header = _decode_jwt_segment(encoded_header)
+ payload = _decode_jwt_segment(encoded_payload)
+
+ if not isinstance(header, Mapping):
+ raise exceptions.MalformedError(
+ "Header segment should be a JSON object: {0}".format(encoded_header)
+ )
+
+ if not isinstance(payload, Mapping):
+ raise exceptions.MalformedError(
+ "Payload segment should be a JSON object: {0}".format(encoded_payload)
+ )
+
+ return header, payload, signed_section, signature
+
+
+def decode_header(token):
+ """Return the decoded header of a token.
+
+ No verification is done. This is useful to extract the key id from
+ the header in order to acquire the appropriate certificate to verify
+ the token.
+
+ Args:
+ token (Union[str, bytes]): the encoded JWT.
+
+ Returns:
+ Mapping: The decoded JWT header.
+ """
+ header, _, _, _ = _unverified_decode(token)
+ return header
+
+
+def _verify_iat_and_exp(payload, clock_skew_in_seconds=0):
+ """Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token
+ payload.
+
+ Args:
+ payload (Mapping[str, str]): The JWT payload.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Raises:
+ google.auth.exceptions.InvalidValue: if value validation failed.
+ google.auth.exceptions.MalformedError: if schema validation failed.
+ """
+ now = _helpers.datetime_to_secs(_helpers.utcnow())
+
+ # Make sure the iat and exp claims are present.
+ for key in ("iat", "exp"):
+ if key not in payload:
+ raise exceptions.MalformedError(
+ "Token does not contain required claim {}".format(key)
+ )
+
+ # Make sure the token wasn't issued in the future.
+ iat = payload["iat"]
+ # Err on the side of accepting a token that is slightly early to account
+ # for clock skew.
+ earliest = iat - clock_skew_in_seconds
+ if now < earliest:
+ raise exceptions.InvalidValue(
+ "Token used too early, {} < {}. Check that your computer's clock is set correctly.".format(
+ now, iat
+ )
+ )
+
+ # Make sure the token wasn't issued in the past.
+ exp = payload["exp"]
+ # Err on the side of accepting a token that is slightly out of date
+ # to account for clow skew.
+ latest = exp + clock_skew_in_seconds
+ if latest < now:
+ raise exceptions.InvalidValue("Token expired, {} < {}".format(latest, now))
+
+
+def decode(token, certs=None, verify=True, audience=None, clock_skew_in_seconds=0):
+ """Decode and verify a JWT.
+
+ Args:
+ token (str): The encoded JWT.
+ certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The
+ certificate used to validate the JWT signature. If bytes or string,
+ it must the the public key certificate in PEM format. If a mapping,
+ it must be a mapping of key IDs to public key certificates in PEM
+ format. The mapping must contain the same key ID that's specified
+ in the token's header.
+ verify (bool): Whether to perform signature and claim validation.
+ Verification is done by default.
+ audience (str or list): The audience claim, 'aud', that this JWT should
+ contain. Or a list of audience claims. If None then the JWT's 'aud'
+ parameter is not verified.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Returns:
+ Mapping[str, str]: The deserialized JSON payload in the JWT.
+
+ Raises:
+ google.auth.exceptions.InvalidValue: if value validation failed.
+ google.auth.exceptions.MalformedError: if schema validation failed.
+ """
+ header, payload, signed_section, signature = _unverified_decode(token)
+
+ if not verify:
+ return payload
+
+ # Pluck the key id and algorithm from the header and make sure we have
+ # a verifier that can support it.
+ key_alg = header.get("alg")
+ key_id = header.get("kid")
+
+ try:
+ verifier_cls = _ALGORITHM_TO_VERIFIER_CLASS[key_alg]
+ except KeyError as exc:
+ if key_alg in _CRYPTOGRAPHY_BASED_ALGORITHMS:
+ raise exceptions.InvalidValue(
+ "The key algorithm {} requires the cryptography package to be installed.".format(
+ key_alg
+ )
+ ) from exc
+ else:
+ raise exceptions.InvalidValue(
+ "Unsupported signature algorithm {}".format(key_alg)
+ ) from exc
+ # If certs is specified as a dictionary of key IDs to certificates, then
+ # use the certificate identified by the key ID in the token header.
+ if isinstance(certs, Mapping):
+ if key_id:
+ if key_id not in certs:
+ raise exceptions.MalformedError(
+ "Certificate for key id {} not found.".format(key_id)
+ )
+ certs_to_check = [certs[key_id]]
+ # If there's no key id in the header, check against all of the certs.
+ else:
+ certs_to_check = certs.values()
+ else:
+ certs_to_check = certs
+
+ # Verify that the signature matches the message.
+ if not crypt.verify_signature(
+ signed_section, signature, certs_to_check, verifier_cls
+ ):
+ raise exceptions.MalformedError("Could not verify token signature.")
+
+ # Verify the issued at and created times in the payload.
+ _verify_iat_and_exp(payload, clock_skew_in_seconds)
+
+ # Check audience.
+ if audience is not None:
+ claim_audience = payload.get("aud")
+ if isinstance(audience, str):
+ audience = [audience]
+ if claim_audience not in audience:
+ raise exceptions.InvalidValue(
+ "Token has wrong audience {}, expected one of {}".format(
+ claim_audience, audience
+ )
+ )
+
+ return payload
+
+
+class Credentials(
+ google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject
+):
+ """Credentials that use a JWT as the bearer token.
+
+ These credentials require an "audience" claim. This claim identifies the
+ intended recipient of the bearer token.
+
+ The constructor arguments determine the claims for the JWT that is
+ sent with requests. Usually, you'll construct these credentials with
+ one of the helper constructors as shown in the next section.
+
+ To create JWT credentials using a Google service account private key
+ JSON file::
+
+ audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher'
+ credentials = jwt.Credentials.from_service_account_file(
+ 'service-account.json',
+ audience=audience)
+
+ If you already have the service account file loaded and parsed::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = jwt.Credentials.from_service_account_info(
+ service_account_info,
+ audience=audience)
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify the JWT claims::
+
+ credentials = jwt.Credentials.from_service_account_file(
+ 'service-account.json',
+ audience=audience,
+ additional_claims={'meta': 'data'})
+
+ You can also construct the credentials directly if you have a
+ :class:`~google.auth.crypt.Signer` instance::
+
+ credentials = jwt.Credentials(
+ signer,
+ issuer='your-issuer',
+ subject='your-subject',
+ audience=audience)
+
+ The claims are considered immutable. If you want to modify the claims,
+ you can easily create another instance using :meth:`with_claims`::
+
+ new_audience = (
+ 'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber')
+ new_credentials = credentials.with_claims(audience=new_audience)
+ """
+
+ def __init__(
+ self,
+ signer,
+ issuer,
+ subject,
+ audience,
+ additional_claims=None,
+ token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ issuer (str): The `iss` claim.
+ subject (str): The `sub` claim.
+ audience (str): the `aud` claim. The intended audience for the
+ credentials.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload.
+ token_lifetime (int): The amount of time in seconds for
+ which the token is valid. Defaults to 1 hour.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+ """
+ super(Credentials, self).__init__()
+ self._signer = signer
+ self._issuer = issuer
+ self._subject = subject
+ self._audience = audience
+ self._token_lifetime = token_lifetime
+ self._quota_project_id = quota_project_id
+
+ if additional_claims is None:
+ additional_claims = {}
+
+ self._additional_claims = additional_claims
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates a Credentials instance from a signer and service account
+ info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+
+ Raises:
+ google.auth.exceptions.MalformedError: If the info is not in the expected format.
+ """
+ kwargs.setdefault("subject", info["client_email"])
+ kwargs.setdefault("issuer", info["client_email"])
+ return cls(signer, **kwargs)
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates an Credentials instance from a dictionary.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+
+ Raises:
+ google.auth.exceptions.MalformedError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(info, require=["client_email"])
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates a Credentials instance from a service account .json file
+ in Google format.
+
+ Args:
+ filename (str): The path to the service account .json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_signing_credentials(cls, credentials, audience, **kwargs):
+ """Creates a new :class:`google.auth.jwt.Credentials` instance from an
+ existing :class:`google.auth.credentials.Signing` instance.
+
+ The new instance will use the same signer as the existing instance and
+ will use the existing instance's signer email as the issuer and
+ subject by default.
+
+ Example::
+
+ svc_creds = service_account.Credentials.from_service_account_file(
+ 'service_account.json')
+ audience = (
+ 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher')
+ jwt_creds = jwt.Credentials.from_signing_credentials(
+ svc_creds, audience=audience)
+
+ Args:
+ credentials (google.auth.credentials.Signing): The credentials to
+ use to construct the new credentials.
+ audience (str): the `aud` claim. The intended audience for the
+ credentials.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: A new Credentials instance.
+ """
+ kwargs.setdefault("issuer", credentials.signer_email)
+ kwargs.setdefault("subject", credentials.signer_email)
+ return cls(credentials.signer, audience=audience, **kwargs)
+
+ def with_claims(
+ self, issuer=None, subject=None, audience=None, additional_claims=None
+ ):
+ """Returns a copy of these credentials with modified claims.
+
+ Args:
+ issuer (str): The `iss` claim. If unspecified the current issuer
+ claim will be used.
+ subject (str): The `sub` claim. If unspecified the current subject
+ claim will be used.
+ audience (str): the `aud` claim. If unspecified the current
+ audience claim will be used.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload. This will be merged with the current
+ additional claims.
+
+ Returns:
+ google.auth.jwt.Credentials: A new credentials instance.
+ """
+ new_additional_claims = copy.deepcopy(self._additional_claims)
+ new_additional_claims.update(additional_claims or {})
+
+ return self.__class__(
+ self._signer,
+ issuer=issuer if issuer is not None else self._issuer,
+ subject=subject if subject is not None else self._subject,
+ audience=audience if audience is not None else self._audience,
+ additional_claims=new_additional_claims,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(
+ self._signer,
+ issuer=self._issuer,
+ subject=self._subject,
+ audience=self._audience,
+ additional_claims=self._additional_claims,
+ quota_project_id=quota_project_id,
+ )
+
+ def _make_jwt(self):
+ """Make a signed JWT.
+
+ Returns:
+ Tuple[bytes, datetime]: The encoded JWT and the expiration.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=self._token_lifetime)
+ expiry = now + lifetime
+
+ payload = {
+ "iss": self._issuer,
+ "sub": self._subject,
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ }
+ if self._audience:
+ payload["aud"] = self._audience
+
+ payload.update(self._additional_claims)
+
+ jwt = encode(self._signer, payload)
+
+ return jwt, expiry
+
+ def refresh(self, request):
+ """Refreshes the access token.
+
+ Args:
+ request (Any): Unused.
+ """
+ # pylint: disable=unused-argument
+ # (pylint doesn't correctly recognize overridden methods.)
+ self.token, self.expiry = self._make_jwt()
+
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property # type: ignore
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer_email(self):
+ return self._issuer
+
+ @property # type: ignore
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer(self):
+ return self._signer
+
+ @property # type: ignore
+ def additional_claims(self):
+ """ Additional claims the JWT object was created with."""
+ return self._additional_claims
+
+
+class OnDemandCredentials(
+ google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject
+):
+ """On-demand JWT credentials.
+
+ Like :class:`Credentials`, this class uses a JWT as the bearer token for
+ authentication. However, this class does not require the audience at
+ construction time. Instead, it will generate a new token on-demand for
+ each request using the request URI as the audience. It caches tokens
+ so that multiple requests to the same URI do not incur the overhead
+ of generating a new token every time.
+
+ This behavior is especially useful for `gRPC`_ clients. A gRPC service may
+ have multiple audience and gRPC clients may not know all of the audiences
+ required for accessing a particular service. With these credentials,
+ no knowledge of the audiences is required ahead of time.
+
+ .. _grpc: http://www.grpc.io/
+ """
+
+ def __init__(
+ self,
+ signer,
+ issuer,
+ subject,
+ additional_claims=None,
+ token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
+ max_cache_size=_DEFAULT_MAX_CACHE_SIZE,
+ quota_project_id=None,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ issuer (str): The `iss` claim.
+ subject (str): The `sub` claim.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload.
+ token_lifetime (int): The amount of time in seconds for
+ which the token is valid. Defaults to 1 hour.
+ max_cache_size (int): The maximum number of JWT tokens to keep in
+ cache. Tokens are cached using :class:`cachetools.LRUCache`.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+
+ """
+ super(OnDemandCredentials, self).__init__()
+ self._signer = signer
+ self._issuer = issuer
+ self._subject = subject
+ self._token_lifetime = token_lifetime
+ self._quota_project_id = quota_project_id
+
+ if additional_claims is None:
+ additional_claims = {}
+
+ self._additional_claims = additional_claims
+ self._cache = cachetools.LRUCache(maxsize=max_cache_size)
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates an OnDemandCredentials instance from a signer and service
+ account info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: The constructed credentials.
+
+ Raises:
+ google.auth.exceptions.MalformedError: If the info is not in the expected format.
+ """
+ kwargs.setdefault("subject", info["client_email"])
+ kwargs.setdefault("issuer", info["client_email"])
+ return cls(signer, **kwargs)
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates an OnDemandCredentials instance from a dictionary.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: The constructed credentials.
+
+ Raises:
+ google.auth.exceptions.MalformedError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(info, require=["client_email"])
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates an OnDemandCredentials instance from a service account .json
+ file in Google format.
+
+ Args:
+ filename (str): The path to the service account .json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: The constructed credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_signing_credentials(cls, credentials, **kwargs):
+ """Creates a new :class:`google.auth.jwt.OnDemandCredentials` instance
+ from an existing :class:`google.auth.credentials.Signing` instance.
+
+ The new instance will use the same signer as the existing instance and
+ will use the existing instance's signer email as the issuer and
+ subject by default.
+
+ Example::
+
+ svc_creds = service_account.Credentials.from_service_account_file(
+ 'service_account.json')
+ jwt_creds = jwt.OnDemandCredentials.from_signing_credentials(
+ svc_creds)
+
+ Args:
+ credentials (google.auth.credentials.Signing): The credentials to
+ use to construct the new credentials.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: A new Credentials instance.
+ """
+ kwargs.setdefault("issuer", credentials.signer_email)
+ kwargs.setdefault("subject", credentials.signer_email)
+ return cls(credentials.signer, **kwargs)
+
+ def with_claims(self, issuer=None, subject=None, additional_claims=None):
+ """Returns a copy of these credentials with modified claims.
+
+ Args:
+ issuer (str): The `iss` claim. If unspecified the current issuer
+ claim will be used.
+ subject (str): The `sub` claim. If unspecified the current subject
+ claim will be used.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload. This will be merged with the current
+ additional claims.
+
+ Returns:
+ google.auth.jwt.OnDemandCredentials: A new credentials instance.
+ """
+ new_additional_claims = copy.deepcopy(self._additional_claims)
+ new_additional_claims.update(additional_claims or {})
+
+ return self.__class__(
+ self._signer,
+ issuer=issuer if issuer is not None else self._issuer,
+ subject=subject if subject is not None else self._subject,
+ additional_claims=new_additional_claims,
+ max_cache_size=self._cache.maxsize,
+ quota_project_id=self._quota_project_id,
+ )
+
+ @_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+
+ return self.__class__(
+ self._signer,
+ issuer=self._issuer,
+ subject=self._subject,
+ additional_claims=self._additional_claims,
+ max_cache_size=self._cache.maxsize,
+ quota_project_id=quota_project_id,
+ )
+
+ @property
+ def valid(self):
+ """Checks the validity of the credentials.
+
+ These credentials are always valid because it generates tokens on
+ demand.
+ """
+ return True
+
+ def _make_jwt_for_audience(self, audience):
+ """Make a new JWT for the given audience.
+
+ Args:
+ audience (str): The intended audience.
+
+ Returns:
+ Tuple[bytes, datetime]: The encoded JWT and the expiration.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=self._token_lifetime)
+ expiry = now + lifetime
+
+ payload = {
+ "iss": self._issuer,
+ "sub": self._subject,
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ "aud": audience,
+ }
+
+ payload.update(self._additional_claims)
+
+ jwt = encode(self._signer, payload)
+
+ return jwt, expiry
+
+ def _get_jwt_for_audience(self, audience):
+ """Get a JWT For a given audience.
+
+ If there is already an existing, non-expired token in the cache for
+ the audience, that token is used. Otherwise, a new token will be
+ created.
+
+ Args:
+ audience (str): The intended audience.
+
+ Returns:
+ bytes: The encoded JWT.
+ """
+ token, expiry = self._cache.get(audience, (None, None))
+
+ if token is None or expiry < _helpers.utcnow():
+ token, expiry = self._make_jwt_for_audience(audience)
+ self._cache[audience] = token, expiry
+
+ return token
+
+ def refresh(self, request):
+ """Raises an exception, these credentials can not be directly
+ refreshed.
+
+ Args:
+ request (Any): Unused.
+
+ Raises:
+ google.auth.RefreshError
+ """
+ # pylint: disable=unused-argument
+ # (pylint doesn't correctly recognize overridden methods.)
+ raise exceptions.RefreshError(
+ "OnDemandCredentials can not be directly refreshed."
+ )
+
+ def before_request(self, request, method, url, headers):
+ """Performs credential-specific before request logic.
+
+ Args:
+ request (Any): Unused. JWT credentials do not need to make an
+ HTTP request to refresh.
+ method (str): The request's HTTP method.
+ url (str): The request's URI. This is used as the audience claim
+ when generating the JWT.
+ headers (Mapping): The request's headers.
+ """
+ # pylint: disable=unused-argument
+ # (pylint doesn't correctly recognize overridden methods.)
+ parts = urllib.parse.urlsplit(url)
+ # Strip query string and fragment
+ audience = urllib.parse.urlunsplit(
+ (parts.scheme, parts.netloc, parts.path, "", "")
+ )
+ token = self._get_jwt_for_audience(audience)
+ self.apply(headers, token=token)
+
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property # type: ignore
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer_email(self):
+ return self._issuer
+
+ @property # type: ignore
+ @_helpers.copy_docstring(google.auth.credentials.Signing)
+ def signer(self):
+ return self._signer
diff --git a/contrib/python/google-auth/py3/google/auth/metrics.py b/contrib/python/google-auth/py3/google/auth/metrics.py
new file mode 100644
index 0000000000..11e4b07730
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/metrics.py
@@ -0,0 +1,154 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" We use x-goog-api-client header to report metrics. This module provides
+the constants and helper methods to construct x-goog-api-client header.
+"""
+
+import platform
+
+from google.auth import version
+
+
+API_CLIENT_HEADER = "x-goog-api-client"
+
+# BYOID Specific consts
+BYOID_HEADER_SECTION = "google-byoid-sdk"
+
+# Auth request type
+REQUEST_TYPE_ACCESS_TOKEN = "auth-request-type/at"
+REQUEST_TYPE_ID_TOKEN = "auth-request-type/it"
+REQUEST_TYPE_MDS_PING = "auth-request-type/mds"
+REQUEST_TYPE_REAUTH_START = "auth-request-type/re-start"
+REQUEST_TYPE_REAUTH_CONTINUE = "auth-request-type/re-cont"
+
+# Credential type
+CRED_TYPE_USER = "cred-type/u"
+CRED_TYPE_SA_ASSERTION = "cred-type/sa"
+CRED_TYPE_SA_JWT = "cred-type/jwt"
+CRED_TYPE_SA_MDS = "cred-type/mds"
+CRED_TYPE_SA_IMPERSONATE = "cred-type/imp"
+
+
+# Versions
+def python_and_auth_lib_version():
+ return "gl-python/{} auth/{}".format(platform.python_version(), version.__version__)
+
+
+# Token request metric header values
+
+# x-goog-api-client header value for access token request via metadata server.
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/mds"
+def token_request_access_token_mds():
+ return "{} {} {}".format(
+ python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_MDS
+ )
+
+
+# x-goog-api-client header value for ID token request via metadata server.
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/mds"
+def token_request_id_token_mds():
+ return "{} {} {}".format(
+ python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_MDS
+ )
+
+
+# x-goog-api-client header value for impersonated credentials access token request.
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
+def token_request_access_token_impersonate():
+ return "{} {} {}".format(
+ python_and_auth_lib_version(),
+ REQUEST_TYPE_ACCESS_TOKEN,
+ CRED_TYPE_SA_IMPERSONATE,
+ )
+
+
+# x-goog-api-client header value for impersonated credentials ID token request.
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/imp"
+def token_request_id_token_impersonate():
+ return "{} {} {}".format(
+ python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_IMPERSONATE
+ )
+
+
+# x-goog-api-client header value for service account credentials access token
+# request (assertion flow).
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/sa"
+def token_request_access_token_sa_assertion():
+ return "{} {} {}".format(
+ python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_ASSERTION
+ )
+
+
+# x-goog-api-client header value for service account credentials ID token
+# request (assertion flow).
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/sa"
+def token_request_id_token_sa_assertion():
+ return "{} {} {}".format(
+ python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_ASSERTION
+ )
+
+
+# x-goog-api-client header value for user credentials token request.
+# Example: "gl-python/3.7 auth/1.1 cred-type/u"
+def token_request_user():
+ return "{} {}".format(python_and_auth_lib_version(), CRED_TYPE_USER)
+
+
+# Miscellenous metrics
+
+# x-goog-api-client header value for metadata server ping.
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/mds"
+def mds_ping():
+ return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_MDS_PING)
+
+
+# x-goog-api-client header value for reauth start endpoint calls.
+# Example: "gl-python/3.7 auth/1.1 auth-request-type/re-start"
+def reauth_start():
+ return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_START)
+
+
+# x-goog-api-client header value for reauth continue endpoint calls.
+# Example: "gl-python/3.7 auth/1.1 cred-type/re-cont"
+def reauth_continue():
+ return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_CONTINUE)
+
+
+# x-goog-api-client header value for BYOID calls to the Security Token Service exchange token endpoint.
+# Example: "gl-python/3.7 auth/1.1 google-byoid-sdk source/aws sa-impersonation/true sa-impersonation/true"
+def byoid_metrics_header(metrics_options):
+ header = "{} {}".format(python_and_auth_lib_version(), BYOID_HEADER_SECTION)
+ for key, value in metrics_options.items():
+ header = "{} {}/{}".format(header, key, value)
+ return header
+
+
+def add_metric_header(headers, metric_header_value):
+ """Add x-goog-api-client header with the given value.
+
+ Args:
+ headers (Mapping[str, str]): The headers to which we will add the
+ metric header.
+ metric_header_value (Optional[str]): If value is None, do nothing;
+ if headers already has a x-goog-api-client header, append the value
+ to the existing header; otherwise add a new x-goog-api-client
+ header with the given value.
+ """
+ if not metric_header_value:
+ return
+ if API_CLIENT_HEADER not in headers:
+ headers[API_CLIENT_HEADER] = metric_header_value
+ else:
+ headers[API_CLIENT_HEADER] += " " + metric_header_value
diff --git a/contrib/python/google-auth/py3/google/auth/pluggable.py b/contrib/python/google-auth/py3/google/auth/pluggable.py
new file mode 100644
index 0000000000..53b4eac5b4
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/pluggable.py
@@ -0,0 +1,429 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pluggable Credentials.
+Pluggable Credentials are initialized using external_account arguments which
+are typically loaded from third-party executables. Unlike other
+credentials that can be initialized with a list of explicit arguments, secrets
+or credentials, external account clients use the environment and hints/guidelines
+provided by the external_account JSON file to retrieve credentials and exchange
+them for Google access tokens.
+
+Example credential_source for pluggable credential:
+{
+ "executable": {
+ "command": "/path/to/get/credentials.sh --arg1=value1 --arg2=value2",
+ "timeout_millis": 5000,
+ "output_file": "/path/to/generated/cached/credentials"
+ }
+}
+"""
+
+try:
+ from collections.abc import Mapping
+# Python 2.7 compatibility
+except ImportError: # pragma: NO COVER
+ from collections import Mapping
+import json
+import os
+import subprocess
+import sys
+import time
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import external_account
+
+# The max supported executable spec version.
+EXECUTABLE_SUPPORTED_MAX_VERSION = 1
+
+EXECUTABLE_TIMEOUT_MILLIS_DEFAULT = 30 * 1000 # 30 seconds
+EXECUTABLE_TIMEOUT_MILLIS_LOWER_BOUND = 5 * 1000 # 5 seconds
+EXECUTABLE_TIMEOUT_MILLIS_UPPER_BOUND = 120 * 1000 # 2 minutes
+
+EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_LOWER_BOUND = 30 * 1000 # 30 seconds
+EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_UPPER_BOUND = 30 * 60 * 1000 # 30 minutes
+
+
+class Credentials(external_account.Credentials):
+ """External account credentials sourced from executables."""
+
+ def __init__(
+ self,
+ audience,
+ subject_token_type,
+ token_url,
+ credential_source,
+ *args,
+ **kwargs
+ ):
+ """Instantiates an external account credentials object from a executables.
+
+ Args:
+ audience (str): The STS audience field.
+ subject_token_type (str): The subject token type.
+ token_url (str): The STS endpoint URL.
+ credential_source (Mapping): The credential source dictionary used to
+ provide instructions on how to retrieve external credential to be
+ exchanged for Google access tokens.
+
+ Example credential_source for pluggable credential:
+
+ {
+ "executable": {
+ "command": "/path/to/get/credentials.sh --arg1=value1 --arg2=value2",
+ "timeout_millis": 5000,
+ "output_file": "/path/to/generated/cached/credentials"
+ }
+ }
+ args (List): Optional positional arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method.
+ kwargs (Mapping): Optional keyword arguments passed into the underlying :meth:`~external_account.Credentials.__init__` method.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If an error is encountered during
+ access token retrieval logic.
+ google.auth.exceptions.InvalidValue: For invalid parameters.
+ google.auth.exceptions.MalformedError: For invalid parameters.
+
+ .. note:: Typically one of the helper constructors
+ :meth:`from_file` or
+ :meth:`from_info` are used instead of calling the constructor directly.
+ """
+
+ self.interactive = kwargs.pop("interactive", False)
+ super(Credentials, self).__init__(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ credential_source=credential_source,
+ *args,
+ **kwargs
+ )
+ if not isinstance(credential_source, Mapping):
+ self._credential_source_executable = None
+ raise exceptions.MalformedError(
+ "Missing credential_source. The credential_source is not a dict."
+ )
+ self._credential_source_executable = credential_source.get("executable")
+ if not self._credential_source_executable:
+ raise exceptions.MalformedError(
+ "Missing credential_source. An 'executable' must be provided."
+ )
+ self._credential_source_executable_command = self._credential_source_executable.get(
+ "command"
+ )
+ self._credential_source_executable_timeout_millis = self._credential_source_executable.get(
+ "timeout_millis"
+ )
+ self._credential_source_executable_interactive_timeout_millis = self._credential_source_executable.get(
+ "interactive_timeout_millis"
+ )
+ self._credential_source_executable_output_file = self._credential_source_executable.get(
+ "output_file"
+ )
+
+ # Dummy value. This variable is only used via injection, not exposed to ctor
+ self._tokeninfo_username = ""
+
+ if not self._credential_source_executable_command:
+ raise exceptions.MalformedError(
+ "Missing command field. Executable command must be provided."
+ )
+ if not self._credential_source_executable_timeout_millis:
+ self._credential_source_executable_timeout_millis = (
+ EXECUTABLE_TIMEOUT_MILLIS_DEFAULT
+ )
+ elif (
+ self._credential_source_executable_timeout_millis
+ < EXECUTABLE_TIMEOUT_MILLIS_LOWER_BOUND
+ or self._credential_source_executable_timeout_millis
+ > EXECUTABLE_TIMEOUT_MILLIS_UPPER_BOUND
+ ):
+ raise exceptions.InvalidValue("Timeout must be between 5 and 120 seconds.")
+
+ if self._credential_source_executable_interactive_timeout_millis:
+ if (
+ self._credential_source_executable_interactive_timeout_millis
+ < EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_LOWER_BOUND
+ or self._credential_source_executable_interactive_timeout_millis
+ > EXECUTABLE_INTERACTIVE_TIMEOUT_MILLIS_UPPER_BOUND
+ ):
+ raise exceptions.InvalidValue(
+ "Interactive timeout must be between 30 seconds and 30 minutes."
+ )
+
+ @_helpers.copy_docstring(external_account.Credentials)
+ def retrieve_subject_token(self, request):
+ self._validate_running_mode()
+
+ # Check output file.
+ if self._credential_source_executable_output_file is not None:
+ try:
+ with open(
+ self._credential_source_executable_output_file, encoding="utf-8"
+ ) as output_file:
+ response = json.load(output_file)
+ except Exception:
+ pass
+ else:
+ try:
+ # If the cached response is expired, _parse_subject_token will raise an error which will be ignored and we will call the executable again.
+ subject_token = self._parse_subject_token(response)
+ if (
+ "expiration_time" not in response
+ ): # Always treat missing expiration_time as expired and proceed to executable run.
+ raise exceptions.RefreshError
+ except (exceptions.MalformedError, exceptions.InvalidValue):
+ raise
+ except exceptions.RefreshError:
+ pass
+ else:
+ return subject_token
+
+ if not _helpers.is_python_3():
+ raise exceptions.RefreshError(
+ "Pluggable auth is only supported for python 3.7+"
+ )
+
+ # Inject env vars.
+ env = os.environ.copy()
+ self._inject_env_variables(env)
+ env["GOOGLE_EXTERNAL_ACCOUNT_REVOKE"] = "0"
+
+ # Run executable.
+ exe_timeout = (
+ self._credential_source_executable_interactive_timeout_millis / 1000
+ if self.interactive
+ else self._credential_source_executable_timeout_millis / 1000
+ )
+ exe_stdin = sys.stdin if self.interactive else None
+ exe_stdout = sys.stdout if self.interactive else subprocess.PIPE
+ exe_stderr = sys.stdout if self.interactive else subprocess.STDOUT
+
+ result = subprocess.run(
+ self._credential_source_executable_command.split(),
+ timeout=exe_timeout,
+ stdin=exe_stdin,
+ stdout=exe_stdout,
+ stderr=exe_stderr,
+ env=env,
+ )
+ if result.returncode != 0:
+ raise exceptions.RefreshError(
+ "Executable exited with non-zero return code {}. Error: {}".format(
+ result.returncode, result.stdout
+ )
+ )
+
+ # Handle executable output.
+ response = json.loads(result.stdout.decode("utf-8")) if result.stdout else None
+ if not response and self._credential_source_executable_output_file is not None:
+ response = json.load(
+ open(self._credential_source_executable_output_file, encoding="utf-8")
+ )
+
+ subject_token = self._parse_subject_token(response)
+ return subject_token
+
+ def revoke(self, request):
+ """Revokes the subject token using the credential_source object.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ Raises:
+ google.auth.exceptions.RefreshError: If the executable revocation
+ not properly executed.
+
+ """
+ if not self.interactive:
+ raise exceptions.InvalidValue(
+ "Revoke is only enabled under interactive mode."
+ )
+ self._validate_running_mode()
+
+ if not _helpers.is_python_3():
+ raise exceptions.RefreshError(
+ "Pluggable auth is only supported for python 3.7+"
+ )
+
+ # Inject variables
+ env = os.environ.copy()
+ self._inject_env_variables(env)
+ env["GOOGLE_EXTERNAL_ACCOUNT_REVOKE"] = "1"
+
+ # Run executable
+ result = subprocess.run(
+ self._credential_source_executable_command.split(),
+ timeout=self._credential_source_executable_interactive_timeout_millis
+ / 1000,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ env=env,
+ )
+
+ if result.returncode != 0:
+ raise exceptions.RefreshError(
+ "Auth revoke failed on executable. Exit with non-zero return code {}. Error: {}".format(
+ result.returncode, result.stdout
+ )
+ )
+
+ response = json.loads(result.stdout.decode("utf-8"))
+ self._validate_revoke_response(response)
+
+ @property
+ def external_account_id(self):
+ """Returns the external account identifier.
+
+ When service account impersonation is used the identifier is the service
+ account email.
+
+ Without service account impersonation, this returns None, unless it is
+ being used by the Google Cloud CLI which populates this field.
+ """
+
+ return self.service_account_email or self._tokeninfo_username
+
+ @classmethod
+ def from_info(cls, info, **kwargs):
+ """Creates a Pluggable Credentials instance from parsed external account info.
+
+ Args:
+ info (Mapping[str, str]): The Pluggable external account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.pluggable.Credentials: The constructed
+ credentials.
+
+ Raises:
+ google.auth.exceptions.InvalidValue: For invalid parameters.
+ google.auth.exceptions.MalformedError: For invalid parameters.
+ """
+ return super(Credentials, cls).from_info(info, **kwargs)
+
+ @classmethod
+ def from_file(cls, filename, **kwargs):
+ """Creates an Pluggable Credentials instance from an external account json file.
+
+ Args:
+ filename (str): The path to the Pluggable external account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.pluggable.Credentials: The constructed
+ credentials.
+ """
+ return super(Credentials, cls).from_file(filename, **kwargs)
+
+ def _inject_env_variables(self, env):
+ env["GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE"] = self._audience
+ env["GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE"] = self._subject_token_type
+ env["GOOGLE_EXTERNAL_ACCOUNT_ID"] = self.external_account_id
+ env["GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE"] = "1" if self.interactive else "0"
+
+ if self._service_account_impersonation_url is not None:
+ env[
+ "GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL"
+ ] = self.service_account_email
+ if self._credential_source_executable_output_file is not None:
+ env[
+ "GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE"
+ ] = self._credential_source_executable_output_file
+
+ def _parse_subject_token(self, response):
+ self._validate_response_schema(response)
+ if not response["success"]:
+ if "code" not in response or "message" not in response:
+ raise exceptions.MalformedError(
+ "Error code and message fields are required in the response."
+ )
+ raise exceptions.RefreshError(
+ "Executable returned unsuccessful response: code: {}, message: {}.".format(
+ response["code"], response["message"]
+ )
+ )
+ if "expiration_time" in response and response["expiration_time"] < time.time():
+ raise exceptions.RefreshError(
+ "The token returned by the executable is expired."
+ )
+ if "token_type" not in response:
+ raise exceptions.MalformedError(
+ "The executable response is missing the token_type field."
+ )
+ if (
+ response["token_type"] == "urn:ietf:params:oauth:token-type:jwt"
+ or response["token_type"] == "urn:ietf:params:oauth:token-type:id_token"
+ ): # OIDC
+ return response["id_token"]
+ elif response["token_type"] == "urn:ietf:params:oauth:token-type:saml2": # SAML
+ return response["saml_response"]
+ else:
+ raise exceptions.RefreshError("Executable returned unsupported token type.")
+
+ def _validate_revoke_response(self, response):
+ self._validate_response_schema(response)
+ if not response["success"]:
+ raise exceptions.RefreshError("Revoke failed with unsuccessful response.")
+
+ def _validate_response_schema(self, response):
+ if "version" not in response:
+ raise exceptions.MalformedError(
+ "The executable response is missing the version field."
+ )
+ if response["version"] > EXECUTABLE_SUPPORTED_MAX_VERSION:
+ raise exceptions.RefreshError(
+ "Executable returned unsupported version {}.".format(
+ response["version"]
+ )
+ )
+
+ if "success" not in response:
+ raise exceptions.MalformedError(
+ "The executable response is missing the success field."
+ )
+
+ def _validate_running_mode(self):
+ env_allow_executables = os.environ.get(
+ "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES"
+ )
+ if env_allow_executables != "1":
+ raise exceptions.MalformedError(
+ "Executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run."
+ )
+
+ if self.interactive and not self._credential_source_executable_output_file:
+ raise exceptions.MalformedError(
+ "An output_file must be specified in the credential configuration for interactive mode."
+ )
+
+ if (
+ self.interactive
+ and not self._credential_source_executable_interactive_timeout_millis
+ ):
+ raise exceptions.InvalidOperation(
+ "Interactive mode cannot run without an interactive timeout."
+ )
+
+ if self.interactive and not self.is_workforce_pool:
+ raise exceptions.InvalidValue(
+ "Interactive mode is only enabled for workforce pool."
+ )
+
+ def _create_default_metrics_options(self):
+ metrics_options = super(Credentials, self)._create_default_metrics_options()
+ metrics_options["source"] = "executable"
+ return metrics_options
diff --git a/contrib/python/google-auth/py3/google/auth/transport/__init__.py b/contrib/python/google-auth/py3/google/auth/transport/__init__.py
new file mode 100644
index 0000000000..724568e582
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/__init__.py
@@ -0,0 +1,103 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport - HTTP client library support.
+
+:mod:`google.auth` is designed to work with various HTTP client libraries such
+as urllib3 and requests. In order to work across these libraries with different
+interfaces some abstraction is needed.
+
+This module provides two interfaces that are implemented by transport adapters
+to support HTTP libraries. :class:`Request` defines the interface expected by
+:mod:`google.auth` to make requests. :class:`Response` defines the interface
+for the return value of :class:`Request`.
+"""
+
+import abc
+import http.client as http_client
+
+DEFAULT_RETRYABLE_STATUS_CODES = (
+ http_client.INTERNAL_SERVER_ERROR,
+ http_client.SERVICE_UNAVAILABLE,
+ http_client.REQUEST_TIMEOUT,
+ http_client.TOO_MANY_REQUESTS,
+)
+"""Sequence[int]: HTTP status codes indicating a request can be retried.
+"""
+
+
+DEFAULT_REFRESH_STATUS_CODES = (http_client.UNAUTHORIZED,)
+"""Sequence[int]: Which HTTP status code indicate that credentials should be
+refreshed.
+"""
+
+DEFAULT_MAX_REFRESH_ATTEMPTS = 2
+"""int: How many times to refresh the credentials and retry a request."""
+
+
+class Response(metaclass=abc.ABCMeta):
+ """HTTP Response data."""
+
+ @abc.abstractproperty
+ def status(self):
+ """int: The HTTP status code."""
+ raise NotImplementedError("status must be implemented.")
+
+ @abc.abstractproperty
+ def headers(self):
+ """Mapping[str, str]: The HTTP response headers."""
+ raise NotImplementedError("headers must be implemented.")
+
+ @abc.abstractproperty
+ def data(self):
+ """bytes: The response body."""
+ raise NotImplementedError("data must be implemented.")
+
+
+class Request(metaclass=abc.ABCMeta):
+ """Interface for a callable that makes HTTP requests.
+
+ Specific transport implementations should provide an implementation of
+ this that adapts their specific request / response API.
+
+ .. automethod:: __call__
+ """
+
+ @abc.abstractmethod
+ def __call__(
+ self, url, method="GET", body=None, headers=None, timeout=None, **kwargs
+ ):
+ """Make an HTTP request.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload / body in HTTP request.
+ headers (Mapping[str, str]): Request headers.
+ timeout (Optional[int]): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ transport-specific default timeout will be used.
+ kwargs: Additionally arguments passed on to the transport's
+ request method.
+
+ Returns:
+ Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ # pylint: disable=redundant-returns-doc, missing-raises-doc
+ # (pylint doesn't play well with abstract docstrings.)
+ raise NotImplementedError("__call__ must be implemented.")
diff --git a/contrib/python/google-auth/py3/google/auth/transport/_aiohttp_requests.py b/contrib/python/google-auth/py3/google/auth/transport/_aiohttp_requests.py
new file mode 100644
index 0000000000..3a8da917a1
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/_aiohttp_requests.py
@@ -0,0 +1,390 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport adapter for Async HTTP (aiohttp).
+
+NOTE: This async support is experimental and marked internal. This surface may
+change in minor releases.
+"""
+
+from __future__ import absolute_import
+
+import asyncio
+import functools
+
+import aiohttp # type: ignore
+import urllib3 # type: ignore
+
+from google.auth import exceptions
+from google.auth import transport
+from google.auth.transport import requests
+
+# Timeout can be re-defined depending on async requirement. Currently made 60s more than
+# sync timeout.
+_DEFAULT_TIMEOUT = 180 # in seconds
+
+
+class _CombinedResponse(transport.Response):
+ """
+ In order to more closely resemble the `requests` interface, where a raw
+ and deflated content could be accessed at once, this class lazily reads the
+ stream in `transport.Response` so both return forms can be used.
+
+ The gzip and deflate transfer-encodings are automatically decoded for you
+ because the default parameter for autodecompress into the ClientSession is set
+ to False, and therefore we add this class to act as a wrapper for a user to be
+ able to access both the raw and decoded response bodies - mirroring the sync
+ implementation.
+ """
+
+ def __init__(self, response):
+ self._response = response
+ self._raw_content = None
+
+ def _is_compressed(self):
+ headers = self._response.headers
+ return "Content-Encoding" in headers and (
+ headers["Content-Encoding"] == "gzip"
+ or headers["Content-Encoding"] == "deflate"
+ )
+
+ @property
+ def status(self):
+ return self._response.status
+
+ @property
+ def headers(self):
+ return self._response.headers
+
+ @property
+ def data(self):
+ return self._response.content
+
+ async def raw_content(self):
+ if self._raw_content is None:
+ self._raw_content = await self._response.content.read()
+ return self._raw_content
+
+ async def content(self):
+ # Load raw_content if necessary
+ await self.raw_content()
+ if self._is_compressed():
+ decoder = urllib3.response.MultiDecoder(
+ self._response.headers["Content-Encoding"]
+ )
+ decompressed = decoder.decompress(self._raw_content)
+ return decompressed
+
+ return self._raw_content
+
+
+class _Response(transport.Response):
+ """
+ Requests transport response adapter.
+
+ Args:
+ response (requests.Response): The raw Requests response.
+ """
+
+ def __init__(self, response):
+ self._response = response
+
+ @property
+ def status(self):
+ return self._response.status
+
+ @property
+ def headers(self):
+ return self._response.headers
+
+ @property
+ def data(self):
+ return self._response.content
+
+
+class Request(transport.Request):
+ """Requests request adapter.
+
+ This class is used internally for making requests using asyncio transports
+ in a consistent way. If you use :class:`AuthorizedSession` you do not need
+ to construct or use this class directly.
+
+ This class can be useful if you want to manually refresh a
+ :class:`~google.auth.credentials.Credentials` instance::
+
+ import google.auth.transport.aiohttp_requests
+
+ request = google.auth.transport.aiohttp_requests.Request()
+
+ credentials.refresh(request)
+
+ Args:
+ session (aiohttp.ClientSession): An instance :class:`aiohttp.ClientSession` used
+ to make HTTP requests. If not specified, a session will be created.
+
+ .. automethod:: __call__
+ """
+
+ def __init__(self, session=None):
+ # TODO: Use auto_decompress property for aiohttp 3.7+
+ if session is not None and session._auto_decompress:
+ raise exceptions.InvalidOperation(
+ "Client sessions with auto_decompress=True are not supported."
+ )
+ self.session = session
+
+ async def __call__(
+ self,
+ url,
+ method="GET",
+ body=None,
+ headers=None,
+ timeout=_DEFAULT_TIMEOUT,
+ **kwargs,
+ ):
+ """
+ Make an HTTP request using aiohttp.
+
+ Args:
+ url (str): The URL to be requested.
+ method (Optional[str]):
+ The HTTP method to use for the request. Defaults to 'GET'.
+ body (Optional[bytes]):
+ The payload or body in HTTP request.
+ headers (Optional[Mapping[str, str]]):
+ Request headers.
+ timeout (Optional[int]): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ requests default timeout will be used.
+ kwargs: Additional arguments passed through to the underlying
+ requests :meth:`requests.Session.request` method.
+
+ Returns:
+ google.auth.transport.Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+
+ try:
+ if self.session is None: # pragma: NO COVER
+ self.session = aiohttp.ClientSession(
+ auto_decompress=False
+ ) # pragma: NO COVER
+ requests._LOGGER.debug("Making request: %s %s", method, url)
+ response = await self.session.request(
+ method, url, data=body, headers=headers, timeout=timeout, **kwargs
+ )
+ return _CombinedResponse(response)
+
+ except aiohttp.ClientError as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ raise new_exc from caught_exc
+
+ except asyncio.TimeoutError as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ raise new_exc from caught_exc
+
+
+class AuthorizedSession(aiohttp.ClientSession):
+ """This is an async implementation of the Authorized Session class. We utilize an
+ aiohttp transport instance, and the interface mirrors the google.auth.transport.requests
+ Authorized Session class, except for the change in the transport used in the async use case.
+
+ A Requests Session class with credentials.
+
+ This class is used to perform requests to API endpoints that require
+ authorization::
+
+ from google.auth.transport import aiohttp_requests
+
+ async with aiohttp_requests.AuthorizedSession(credentials) as authed_session:
+ response = await authed_session.request(
+ 'GET', 'https://www.googleapis.com/storage/v1/b')
+
+ The underlying :meth:`request` implementation handles adding the
+ credentials' headers to the request and refreshing credentials as needed.
+
+ Args:
+ credentials (google.auth._credentials_async.Credentials):
+ The credentials to add to the request.
+ refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
+ that credentials should be refreshed and the request should be
+ retried.
+ max_refresh_attempts (int): The maximum number of times to attempt to
+ refresh the credentials and retry the request.
+ refresh_timeout (Optional[int]): The timeout value in seconds for
+ credential refresh HTTP requests.
+ auth_request (google.auth.transport.aiohttp_requests.Request):
+ (Optional) An instance of
+ :class:`~google.auth.transport.aiohttp_requests.Request` used when
+ refreshing credentials. If not passed,
+ an instance of :class:`~google.auth.transport.aiohttp_requests.Request`
+ is created.
+ kwargs: Additional arguments passed through to the underlying
+ ClientSession :meth:`aiohttp.ClientSession` object.
+ """
+
+ def __init__(
+ self,
+ credentials,
+ refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
+ max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS,
+ refresh_timeout=None,
+ auth_request=None,
+ auto_decompress=False,
+ **kwargs,
+ ):
+ super(AuthorizedSession, self).__init__(**kwargs)
+ self.credentials = credentials
+ self._refresh_status_codes = refresh_status_codes
+ self._max_refresh_attempts = max_refresh_attempts
+ self._refresh_timeout = refresh_timeout
+ self._is_mtls = False
+ self._auth_request = auth_request
+ self._auth_request_session = None
+ self._loop = asyncio.get_event_loop()
+ self._refresh_lock = asyncio.Lock()
+ self._auto_decompress = auto_decompress
+
+ async def request(
+ self,
+ method,
+ url,
+ data=None,
+ headers=None,
+ max_allowed_time=None,
+ timeout=_DEFAULT_TIMEOUT,
+ auto_decompress=False,
+ **kwargs,
+ ):
+
+ """Implementation of Authorized Session aiohttp request.
+
+ Args:
+ method (str):
+ The http request method used (e.g. GET, PUT, DELETE)
+ url (str):
+ The url at which the http request is sent.
+ data (Optional[dict]): Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the Request.
+ headers (Optional[dict]): Dictionary of HTTP Headers to send with the
+ Request.
+ timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
+ The amount of time in seconds to wait for the server response
+ with each individual request. Can also be passed as an
+ ``aiohttp.ClientTimeout`` object.
+ max_allowed_time (Optional[float]):
+ If the method runs longer than this, a ``Timeout`` exception is
+ automatically raised. Unlike the ``timeout`` parameter, this
+ value applies to the total method execution time, even if
+ multiple requests are made under the hood.
+
+ Mind that it is not guaranteed that the timeout error is raised
+ at ``max_allowed_time``. It might take longer, for example, if
+ an underlying request takes a lot of time, but the request
+ itself does not timeout, e.g. if a large file is being
+ transmitted. The timout error will be raised after such
+ request completes.
+ """
+ # Headers come in as bytes which isn't expected behavior, the resumable
+ # media libraries in some cases expect a str type for the header values,
+ # but sometimes the operations return these in bytes types.
+ if headers:
+ for key in headers.keys():
+ if type(headers[key]) is bytes:
+ headers[key] = headers[key].decode("utf-8")
+
+ async with aiohttp.ClientSession(
+ auto_decompress=self._auto_decompress
+ ) as self._auth_request_session:
+ auth_request = Request(self._auth_request_session)
+ self._auth_request = auth_request
+
+ # Use a kwarg for this instead of an attribute to maintain
+ # thread-safety.
+ _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0)
+ # Make a copy of the headers. They will be modified by the credentials
+ # and we want to pass the original headers if we recurse.
+ request_headers = headers.copy() if headers is not None else {}
+
+ # Do not apply the timeout unconditionally in order to not override the
+ # _auth_request's default timeout.
+ auth_request = (
+ self._auth_request
+ if timeout is None
+ else functools.partial(self._auth_request, timeout=timeout)
+ )
+
+ remaining_time = max_allowed_time
+
+ with requests.TimeoutGuard(remaining_time, asyncio.TimeoutError) as guard:
+ await self.credentials.before_request(
+ auth_request, method, url, request_headers
+ )
+
+ with requests.TimeoutGuard(remaining_time, asyncio.TimeoutError) as guard:
+ response = await super(AuthorizedSession, self).request(
+ method,
+ url,
+ data=data,
+ headers=request_headers,
+ timeout=timeout,
+ **kwargs,
+ )
+
+ remaining_time = guard.remaining_timeout
+
+ if (
+ response.status in self._refresh_status_codes
+ and _credential_refresh_attempt < self._max_refresh_attempts
+ ):
+
+ requests._LOGGER.info(
+ "Refreshing credentials due to a %s response. Attempt %s/%s.",
+ response.status,
+ _credential_refresh_attempt + 1,
+ self._max_refresh_attempts,
+ )
+
+ # Do not apply the timeout unconditionally in order to not override the
+ # _auth_request's default timeout.
+ auth_request = (
+ self._auth_request
+ if timeout is None
+ else functools.partial(self._auth_request, timeout=timeout)
+ )
+
+ with requests.TimeoutGuard(
+ remaining_time, asyncio.TimeoutError
+ ) as guard:
+ async with self._refresh_lock:
+ await self._loop.run_in_executor(
+ None, self.credentials.refresh, auth_request
+ )
+
+ remaining_time = guard.remaining_timeout
+
+ return await self.request(
+ method,
+ url,
+ data=data,
+ headers=headers,
+ max_allowed_time=remaining_time,
+ timeout=timeout,
+ _credential_refresh_attempt=_credential_refresh_attempt + 1,
+ **kwargs,
+ )
+
+ return response
diff --git a/contrib/python/google-auth/py3/google/auth/transport/_custom_tls_signer.py b/contrib/python/google-auth/py3/google/auth/transport/_custom_tls_signer.py
new file mode 100644
index 0000000000..07f14df02d
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/_custom_tls_signer.py
@@ -0,0 +1,234 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Code for configuring client side TLS to offload the signing operation to
+signing libraries.
+"""
+
+import ctypes
+import json
+import logging
+import os
+import sys
+
+import cffi # type: ignore
+
+from google.auth import exceptions
+
+_LOGGER = logging.getLogger(__name__)
+
+# C++ offload lib requires google-auth lib to provide the following callback:
+# using SignFunc = int (*)(unsigned char *sig, size_t *sig_len,
+# const unsigned char *tbs, size_t tbs_len)
+# The bytes to be signed and the length are provided via `tbs` and `tbs_len`,
+# the callback computes the signature, and write the signature and its length
+# into `sig` and `sig_len`.
+# If the signing is successful, the callback returns 1, otherwise it returns 0.
+SIGN_CALLBACK_CTYPE = ctypes.CFUNCTYPE(
+ ctypes.c_int, # return type
+ ctypes.POINTER(ctypes.c_ubyte), # sig
+ ctypes.POINTER(ctypes.c_size_t), # sig_len
+ ctypes.POINTER(ctypes.c_ubyte), # tbs
+ ctypes.c_size_t, # tbs_len
+)
+
+
+# Cast SSL_CTX* to void*
+def _cast_ssl_ctx_to_void_p(ssl_ctx):
+ return ctypes.cast(int(cffi.FFI().cast("intptr_t", ssl_ctx)), ctypes.c_void_p)
+
+
+# Load offload library and set up the function types.
+def load_offload_lib(offload_lib_path):
+ _LOGGER.debug("loading offload library from %s", offload_lib_path)
+
+ # winmode parameter is only available for python 3.8+.
+ lib = (
+ ctypes.CDLL(offload_lib_path, winmode=0)
+ if sys.version_info >= (3, 8) and os.name == "nt"
+ else ctypes.CDLL(offload_lib_path)
+ )
+
+ # Set up types for:
+ # int ConfigureSslContext(SignFunc sign_func, const char *cert, SSL_CTX *ctx)
+ lib.ConfigureSslContext.argtypes = [
+ SIGN_CALLBACK_CTYPE,
+ ctypes.c_char_p,
+ ctypes.c_void_p,
+ ]
+ lib.ConfigureSslContext.restype = ctypes.c_int
+
+ return lib
+
+
+# Load signer library and set up the function types.
+# See: https://github.com/googleapis/enterprise-certificate-proxy/blob/main/cshared/main.go
+def load_signer_lib(signer_lib_path):
+ _LOGGER.debug("loading signer library from %s", signer_lib_path)
+
+ # winmode parameter is only available for python 3.8+.
+ lib = (
+ ctypes.CDLL(signer_lib_path, winmode=0)
+ if sys.version_info >= (3, 8) and os.name == "nt"
+ else ctypes.CDLL(signer_lib_path)
+ )
+
+ # Set up types for:
+ # func GetCertPemForPython(configFilePath *C.char, certHolder *byte, certHolderLen int)
+ lib.GetCertPemForPython.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int]
+ # Returns: certLen
+ lib.GetCertPemForPython.restype = ctypes.c_int
+
+ # Set up types for:
+ # func SignForPython(configFilePath *C.char, digest *byte, digestLen int,
+ # sigHolder *byte, sigHolderLen int)
+ lib.SignForPython.argtypes = [
+ ctypes.c_char_p,
+ ctypes.c_char_p,
+ ctypes.c_int,
+ ctypes.c_char_p,
+ ctypes.c_int,
+ ]
+ # Returns: the signature length
+ lib.SignForPython.restype = ctypes.c_int
+
+ return lib
+
+
+# Computes SHA256 hash.
+def _compute_sha256_digest(to_be_signed, to_be_signed_len):
+ from cryptography.hazmat.primitives import hashes
+
+ data = ctypes.string_at(to_be_signed, to_be_signed_len)
+ hash = hashes.Hash(hashes.SHA256())
+ hash.update(data)
+ return hash.finalize()
+
+
+# Create the signing callback. The actual signing work is done by the
+# `SignForPython` method from the signer lib.
+def get_sign_callback(signer_lib, config_file_path):
+ def sign_callback(sig, sig_len, tbs, tbs_len):
+ _LOGGER.debug("calling sign callback...")
+
+ digest = _compute_sha256_digest(tbs, tbs_len)
+ digestArray = ctypes.c_char * len(digest)
+
+ # reserve 2000 bytes for the signature, shoud be more then enough.
+ # RSA signature is 256 bytes, EC signature is 70~72.
+ sig_holder_len = 2000
+ sig_holder = ctypes.create_string_buffer(sig_holder_len)
+
+ signature_len = signer_lib.SignForPython(
+ config_file_path.encode(), # configFilePath
+ digestArray.from_buffer(bytearray(digest)), # digest
+ len(digest), # digestLen
+ sig_holder, # sigHolder
+ sig_holder_len, # sigHolderLen
+ )
+
+ if signature_len == 0:
+ # signing failed, return 0
+ return 0
+
+ sig_len[0] = signature_len
+ bs = bytearray(sig_holder)
+ for i in range(signature_len):
+ sig[i] = bs[i]
+
+ return 1
+
+ return SIGN_CALLBACK_CTYPE(sign_callback)
+
+
+# Obtain the certificate bytes by calling the `GetCertPemForPython` method from
+# the signer lib. The method is called twice, the first time is to compute the
+# cert length, then we create a buffer to hold the cert, and call it again to
+# fill the buffer.
+def get_cert(signer_lib, config_file_path):
+ # First call to calculate the cert length
+ cert_len = signer_lib.GetCertPemForPython(
+ config_file_path.encode(), # configFilePath
+ None, # certHolder
+ 0, # certHolderLen
+ )
+ if cert_len == 0:
+ raise exceptions.MutualTLSChannelError("failed to get certificate")
+
+ # Then we create an array to hold the cert, and call again to fill the cert
+ cert_holder = ctypes.create_string_buffer(cert_len)
+ signer_lib.GetCertPemForPython(
+ config_file_path.encode(), # configFilePath
+ cert_holder, # certHolder
+ cert_len, # certHolderLen
+ )
+ return bytes(cert_holder)
+
+
+class CustomTlsSigner(object):
+ def __init__(self, enterprise_cert_file_path):
+ """
+ This class loads the offload and signer library, and calls APIs from
+ these libraries to obtain the cert and a signing callback, and attach
+ them to SSL context. The cert and the signing callback will be used
+ for client authentication in TLS handshake.
+
+ Args:
+ enterprise_cert_file_path (str): the path to a enterprise cert JSON
+ file. The file should contain the following field:
+
+ {
+ "libs": {
+ "ecp_client": "...",
+ "tls_offload": "..."
+ }
+ }
+ """
+ self._enterprise_cert_file_path = enterprise_cert_file_path
+ self._cert = None
+ self._sign_callback = None
+
+ def load_libraries(self):
+ try:
+ with open(self._enterprise_cert_file_path, "r") as f:
+ enterprise_cert_json = json.load(f)
+ libs = enterprise_cert_json["libs"]
+ signer_library = libs["ecp_client"]
+ offload_library = libs["tls_offload"]
+ except (KeyError, ValueError) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(
+ "enterprise cert file is invalid", caught_exc
+ )
+ raise new_exc from caught_exc
+ self._offload_lib = load_offload_lib(offload_library)
+ self._signer_lib = load_signer_lib(signer_library)
+
+ def set_up_custom_key(self):
+ # We need to keep a reference of the cert and sign callback so it won't
+ # be garbage collected, otherwise it will crash when used by signer lib.
+ self._cert = get_cert(self._signer_lib, self._enterprise_cert_file_path)
+ self._sign_callback = get_sign_callback(
+ self._signer_lib, self._enterprise_cert_file_path
+ )
+
+ def attach_to_ssl_context(self, ctx):
+ # In the TLS handshake, the signing operation will be done by the
+ # sign_callback.
+ if not self._offload_lib.ConfigureSslContext(
+ self._sign_callback,
+ ctypes.c_char_p(self._cert),
+ _cast_ssl_ctx_to_void_p(ctx._ctx._context),
+ ):
+ raise exceptions.MutualTLSChannelError("failed to configure SSL context")
diff --git a/contrib/python/google-auth/py3/google/auth/transport/_http_client.py b/contrib/python/google-auth/py3/google/auth/transport/_http_client.py
new file mode 100644
index 0000000000..cec0ab73fb
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/_http_client.py
@@ -0,0 +1,113 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport adapter for http.client, for internal use only."""
+
+import http.client as http_client
+import logging
+import socket
+import urllib
+
+from google.auth import exceptions
+from google.auth import transport
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Response(transport.Response):
+ """http.client transport response adapter.
+
+ Args:
+ response (http.client.HTTPResponse): The raw http client response.
+ """
+
+ def __init__(self, response):
+ self._status = response.status
+ self._headers = {key.lower(): value for key, value in response.getheaders()}
+ self._data = response.read()
+
+ @property
+ def status(self):
+ return self._status
+
+ @property
+ def headers(self):
+ return self._headers
+
+ @property
+ def data(self):
+ return self._data
+
+
+class Request(transport.Request):
+ """http.client transport request adapter."""
+
+ def __call__(
+ self, url, method="GET", body=None, headers=None, timeout=None, **kwargs
+ ):
+ """Make an HTTP request using http.client.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload / body in HTTP request.
+ headers (Mapping): Request headers.
+ timeout (Optional(int)): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ socket global default timeout will be used.
+ kwargs: Additional arguments passed throught to the underlying
+ :meth:`~http.client.HTTPConnection.request` method.
+
+ Returns:
+ Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.
+ if timeout is None:
+ timeout = socket._GLOBAL_DEFAULT_TIMEOUT
+
+ # http.client doesn't allow None as the headers argument.
+ if headers is None:
+ headers = {}
+
+ # http.client needs the host and path parts specified separately.
+ parts = urllib.parse.urlsplit(url)
+ path = urllib.parse.urlunsplit(
+ ("", "", parts.path, parts.query, parts.fragment)
+ )
+
+ if parts.scheme != "http":
+ raise exceptions.TransportError(
+ "http.client transport only supports the http scheme, {}"
+ "was specified".format(parts.scheme)
+ )
+
+ connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)
+
+ try:
+ _LOGGER.debug("Making request: %s %s", method, url)
+
+ connection.request(method, path, body=body, headers=headers, **kwargs)
+ response = connection.getresponse()
+ return Response(response)
+
+ except (http_client.HTTPException, socket.error) as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ raise new_exc from caught_exc
+
+ finally:
+ connection.close()
diff --git a/contrib/python/google-auth/py3/google/auth/transport/_mtls_helper.py b/contrib/python/google-auth/py3/google/auth/transport/_mtls_helper.py
new file mode 100644
index 0000000000..1b9b9c285c
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/_mtls_helper.py
@@ -0,0 +1,252 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for getting mTLS cert and key."""
+
+import json
+import logging
+from os import path
+import re
+import subprocess
+
+from google.auth import exceptions
+
+CONTEXT_AWARE_METADATA_PATH = "~/.secureConnect/context_aware_metadata.json"
+_CERT_PROVIDER_COMMAND = "cert_provider_command"
+_CERT_REGEX = re.compile(
+ b"-----BEGIN CERTIFICATE-----.+-----END CERTIFICATE-----\r?\n?", re.DOTALL
+)
+
+# support various format of key files, e.g.
+# "-----BEGIN PRIVATE KEY-----...",
+# "-----BEGIN EC PRIVATE KEY-----...",
+# "-----BEGIN RSA PRIVATE KEY-----..."
+# "-----BEGIN ENCRYPTED PRIVATE KEY-----"
+_KEY_REGEX = re.compile(
+ b"-----BEGIN [A-Z ]*PRIVATE KEY-----.+-----END [A-Z ]*PRIVATE KEY-----\r?\n?",
+ re.DOTALL,
+)
+
+_LOGGER = logging.getLogger(__name__)
+
+
+_PASSPHRASE_REGEX = re.compile(
+ b"-----BEGIN PASSPHRASE-----(.+)-----END PASSPHRASE-----", re.DOTALL
+)
+
+
+def _check_dca_metadata_path(metadata_path):
+ """Checks for context aware metadata. If it exists, returns the absolute path;
+ otherwise returns None.
+
+ Args:
+ metadata_path (str): context aware metadata path.
+
+ Returns:
+ str: absolute path if exists and None otherwise.
+ """
+ metadata_path = path.expanduser(metadata_path)
+ if not path.exists(metadata_path):
+ _LOGGER.debug("%s is not found, skip client SSL authentication.", metadata_path)
+ return None
+ return metadata_path
+
+
+def _read_dca_metadata_file(metadata_path):
+ """Loads context aware metadata from the given path.
+
+ Args:
+ metadata_path (str): context aware metadata path.
+
+ Returns:
+ Dict[str, str]: The metadata.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: If failed to parse metadata as JSON.
+ """
+ try:
+ with open(metadata_path) as f:
+ metadata = json.load(f)
+ except ValueError as caught_exc:
+ new_exc = exceptions.ClientCertError(caught_exc)
+ raise new_exc from caught_exc
+
+ return metadata
+
+
+def _run_cert_provider_command(command, expect_encrypted_key=False):
+ """Run the provided command, and return client side mTLS cert, key and
+ passphrase.
+
+ Args:
+ command (List[str]): cert provider command.
+ expect_encrypted_key (bool): If encrypted private key is expected.
+
+ Returns:
+ Tuple[bytes, bytes, bytes]: client certificate bytes in PEM format, key
+ bytes in PEM format and passphrase bytes.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: if problems occurs when running
+ the cert provider command or generating cert, key and passphrase.
+ """
+ try:
+ process = subprocess.Popen(
+ command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
+ stdout, stderr = process.communicate()
+ except OSError as caught_exc:
+ new_exc = exceptions.ClientCertError(caught_exc)
+ raise new_exc from caught_exc
+
+ # Check cert provider command execution error.
+ if process.returncode != 0:
+ raise exceptions.ClientCertError(
+ "Cert provider command returns non-zero status code %s" % process.returncode
+ )
+
+ # Extract certificate (chain), key and passphrase.
+ cert_match = re.findall(_CERT_REGEX, stdout)
+ if len(cert_match) != 1:
+ raise exceptions.ClientCertError("Client SSL certificate is missing or invalid")
+ key_match = re.findall(_KEY_REGEX, stdout)
+ if len(key_match) != 1:
+ raise exceptions.ClientCertError("Client SSL key is missing or invalid")
+ passphrase_match = re.findall(_PASSPHRASE_REGEX, stdout)
+
+ if expect_encrypted_key:
+ if len(passphrase_match) != 1:
+ raise exceptions.ClientCertError("Passphrase is missing or invalid")
+ if b"ENCRYPTED" not in key_match[0]:
+ raise exceptions.ClientCertError("Encrypted private key is expected")
+ return cert_match[0], key_match[0], passphrase_match[0].strip()
+
+ if b"ENCRYPTED" in key_match[0]:
+ raise exceptions.ClientCertError("Encrypted private key is not expected")
+ if len(passphrase_match) > 0:
+ raise exceptions.ClientCertError("Passphrase is not expected")
+ return cert_match[0], key_match[0], None
+
+
+def get_client_ssl_credentials(
+ generate_encrypted_key=False,
+ context_aware_metadata_path=CONTEXT_AWARE_METADATA_PATH,
+):
+ """Returns the client side certificate, private key and passphrase.
+
+ Args:
+ generate_encrypted_key (bool): If set to True, encrypted private key
+ and passphrase will be generated; otherwise, unencrypted private key
+ will be generated and passphrase will be None.
+ context_aware_metadata_path (str): The context_aware_metadata.json file path.
+
+ Returns:
+ Tuple[bool, bytes, bytes, bytes]:
+ A boolean indicating if cert, key and passphrase are obtained, the
+ cert bytes and key bytes both in PEM format, and passphrase bytes.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: if problems occurs when getting
+ the cert, key and passphrase.
+ """
+ metadata_path = _check_dca_metadata_path(context_aware_metadata_path)
+
+ if metadata_path:
+ metadata_json = _read_dca_metadata_file(metadata_path)
+
+ if _CERT_PROVIDER_COMMAND not in metadata_json:
+ raise exceptions.ClientCertError("Cert provider command is not found")
+
+ command = metadata_json[_CERT_PROVIDER_COMMAND]
+
+ if generate_encrypted_key and "--with_passphrase" not in command:
+ command.append("--with_passphrase")
+
+ # Execute the command.
+ cert, key, passphrase = _run_cert_provider_command(
+ command, expect_encrypted_key=generate_encrypted_key
+ )
+ return True, cert, key, passphrase
+
+ return False, None, None, None
+
+
+def get_client_cert_and_key(client_cert_callback=None):
+ """Returns the client side certificate and private key. The function first
+ tries to get certificate and key from client_cert_callback; if the callback
+ is None or doesn't provide certificate and key, the function tries application
+ default SSL credentials.
+
+ Args:
+ client_cert_callback (Optional[Callable[[], (bytes, bytes)]]): An
+ optional callback which returns client certificate bytes and private
+ key bytes both in PEM format.
+
+ Returns:
+ Tuple[bool, bytes, bytes]:
+ A boolean indicating if cert and key are obtained, the cert bytes
+ and key bytes both in PEM format.
+
+ Raises:
+ google.auth.exceptions.ClientCertError: if problems occurs when getting
+ the cert and key.
+ """
+ if client_cert_callback:
+ cert, key = client_cert_callback()
+ return True, cert, key
+
+ has_cert, cert, key, _ = get_client_ssl_credentials(generate_encrypted_key=False)
+ return has_cert, cert, key
+
+
+def decrypt_private_key(key, passphrase):
+ """A helper function to decrypt the private key with the given passphrase.
+ google-auth library doesn't support passphrase protected private key for
+ mutual TLS channel. This helper function can be used to decrypt the
+ passphrase protected private key in order to estalish mutual TLS channel.
+
+ For example, if you have a function which produces client cert, passphrase
+ protected private key and passphrase, you can convert it to a client cert
+ callback function accepted by google-auth::
+
+ from google.auth.transport import _mtls_helper
+
+ def your_client_cert_function():
+ return cert, encrypted_key, passphrase
+
+ # callback accepted by google-auth for mutual TLS channel.
+ def client_cert_callback():
+ cert, encrypted_key, passphrase = your_client_cert_function()
+ decrypted_key = _mtls_helper.decrypt_private_key(encrypted_key,
+ passphrase)
+ return cert, decrypted_key
+
+ Args:
+ key (bytes): The private key bytes in PEM format.
+ passphrase (bytes): The passphrase bytes.
+
+ Returns:
+ bytes: The decrypted private key in PEM format.
+
+ Raises:
+ ImportError: If pyOpenSSL is not installed.
+ OpenSSL.crypto.Error: If there is any problem decrypting the private key.
+ """
+ from OpenSSL import crypto
+
+ # First convert encrypted_key_bytes to PKey object
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key, passphrase=passphrase)
+
+ # Then dump the decrypted key bytes
+ return crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)
diff --git a/contrib/python/google-auth/py3/google/auth/transport/grpc.py b/contrib/python/google-auth/py3/google/auth/transport/grpc.py
new file mode 100644
index 0000000000..9a817976d7
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/grpc.py
@@ -0,0 +1,343 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Authorization support for gRPC."""
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth.transport import _mtls_helper
+from google.oauth2 import service_account
+
+try:
+ import grpc # type: ignore
+except ImportError as caught_exc: # pragma: NO COVER
+ raise ImportError(
+ "gRPC is not installed from please install the grpcio package to use the gRPC transport."
+ ) from caught_exc
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class AuthMetadataPlugin(grpc.AuthMetadataPlugin):
+ """A `gRPC AuthMetadataPlugin`_ that inserts the credentials into each
+ request.
+
+ .. _gRPC AuthMetadataPlugin:
+ http://www.grpc.io/grpc/python/grpc.html#grpc.AuthMetadataPlugin
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to requests.
+ request (google.auth.transport.Request): A HTTP transport request
+ object used to refresh credentials as needed.
+ default_host (Optional[str]): A host like "pubsub.googleapis.com".
+ This is used when a self-signed JWT is created from service
+ account credentials.
+ """
+
+ def __init__(self, credentials, request, default_host=None):
+ # pylint: disable=no-value-for-parameter
+ # pylint doesn't realize that the super method takes no arguments
+ # because this class is the same name as the superclass.
+ super(AuthMetadataPlugin, self).__init__()
+ self._credentials = credentials
+ self._request = request
+ self._default_host = default_host
+
+ def _get_authorization_headers(self, context):
+ """Gets the authorization headers for a request.
+
+ Returns:
+ Sequence[Tuple[str, str]]: A list of request headers (key, value)
+ to add to the request.
+ """
+ headers = {}
+
+ # https://google.aip.dev/auth/4111
+ # Attempt to use self-signed JWTs when a service account is used.
+ # A default host must be explicitly provided since it cannot always
+ # be determined from the context.service_url.
+ if isinstance(self._credentials, service_account.Credentials):
+ self._credentials._create_self_signed_jwt(
+ "https://{}/".format(self._default_host) if self._default_host else None
+ )
+
+ self._credentials.before_request(
+ self._request, context.method_name, context.service_url, headers
+ )
+
+ return list(headers.items())
+
+ def __call__(self, context, callback):
+ """Passes authorization metadata into the given callback.
+
+ Args:
+ context (grpc.AuthMetadataContext): The RPC context.
+ callback (grpc.AuthMetadataPluginCallback): The callback that will
+ be invoked to pass in the authorization metadata.
+ """
+ callback(self._get_authorization_headers(context), None)
+
+
+def secure_authorized_channel(
+ credentials,
+ request,
+ target,
+ ssl_credentials=None,
+ client_cert_callback=None,
+ **kwargs
+):
+ """Creates a secure authorized gRPC channel.
+
+ This creates a channel with SSL and :class:`AuthMetadataPlugin`. This
+ channel can be used to create a stub that can make authorized requests.
+ Users can configure client certificate or rely on device certificates to
+ establish a mutual TLS channel, if the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ variable is explicitly set to `true`.
+
+ Example::
+
+ import google.auth
+ import google.auth.transport.grpc
+ import google.auth.transport.requests
+ from google.cloud.speech.v1 import cloud_speech_pb2
+
+ # Get credentials.
+ credentials, _ = google.auth.default()
+
+ # Get an HTTP request function to refresh credentials.
+ request = google.auth.transport.requests.Request()
+
+ # Create a channel.
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, regular_endpoint, request,
+ ssl_credentials=grpc.ssl_channel_credentials())
+
+ # Use the channel to create a stub.
+ cloud_speech.create_Speech_stub(channel)
+
+ Usage:
+
+ There are actually a couple of options to create a channel, depending on if
+ you want to create a regular or mutual TLS channel.
+
+ First let's list the endpoints (regular vs mutual TLS) to choose from::
+
+ regular_endpoint = 'speech.googleapis.com:443'
+ mtls_endpoint = 'speech.mtls.googleapis.com:443'
+
+ Option 1: create a regular (non-mutual) TLS channel by explicitly setting
+ the ssl_credentials::
+
+ regular_ssl_credentials = grpc.ssl_channel_credentials()
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, regular_endpoint, request,
+ ssl_credentials=regular_ssl_credentials)
+
+ Option 2: create a mutual TLS channel by calling a callback which returns
+ the client side certificate and the key (Note that
+ `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly
+ set to `true`)::
+
+ def my_client_cert_callback():
+ code_to_load_client_cert_and_key()
+ if loaded:
+ return (pem_cert_bytes, pem_key_bytes)
+ raise MyClientCertFailureException()
+
+ try:
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, mtls_endpoint, request,
+ client_cert_callback=my_client_cert_callback)
+ except MyClientCertFailureException:
+ # handle the exception
+
+ Option 3: use application default SSL credentials. It searches and uses
+ the command in a context aware metadata file, which is available on devices
+ with endpoint verification support (Note that
+ `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable must be explicitly
+ set to `true`).
+ See https://cloud.google.com/endpoint-verification/docs/overview::
+
+ try:
+ default_ssl_credentials = SslCredentials()
+ except:
+ # Exception can be raised if the context aware metadata is malformed.
+ # See :class:`SslCredentials` for the possible exceptions.
+
+ # Choose the endpoint based on the SSL credentials type.
+ if default_ssl_credentials.is_mtls:
+ endpoint_to_use = mtls_endpoint
+ else:
+ endpoint_to_use = regular_endpoint
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, endpoint_to_use, request,
+ ssl_credentials=default_ssl_credentials)
+
+ Option 4: not setting ssl_credentials and client_cert_callback. For devices
+ without endpoint verification support or `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable is not `true`, a regular TLS channel is created;
+ otherwise, a mutual TLS channel is created, however, the call should be
+ wrapped in a try/except block in case of malformed context aware metadata.
+
+ The following code uses regular_endpoint, it works the same no matter the
+ created channle is regular or mutual TLS. Regular endpoint ignores client
+ certificate and key::
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, regular_endpoint, request)
+
+ The following code uses mtls_endpoint, if the created channle is regular,
+ and API mtls_endpoint is confgured to require client SSL credentials, API
+ calls using this channel will be rejected::
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, mtls_endpoint, request)
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to requests.
+ request (google.auth.transport.Request): A HTTP transport request
+ object used to refresh credentials as needed. Even though gRPC
+ is a separate transport, there's no way to refresh the credentials
+ without using a standard http transport.
+ target (str): The host and port of the service.
+ ssl_credentials (grpc.ChannelCredentials): Optional SSL channel
+ credentials. This can be used to specify different certificates.
+ This argument is mutually exclusive with client_cert_callback;
+ providing both will raise an exception.
+ If ssl_credentials and client_cert_callback are None, application
+ default SSL credentials are used if `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable is explicitly set to `true`, otherwise one way TLS
+ SSL credentials are used.
+ client_cert_callback (Callable[[], (bytes, bytes)]): Optional
+ callback function to obtain client certicate and key for mutual TLS
+ connection. This argument is mutually exclusive with
+ ssl_credentials; providing both will raise an exception.
+ This argument does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable is explicitly set to `true`.
+ kwargs: Additional arguments to pass to :func:`grpc.secure_channel`.
+
+ Returns:
+ grpc.Channel: The created gRPC channel.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ # Create the metadata plugin for inserting the authorization header.
+ metadata_plugin = AuthMetadataPlugin(credentials, request)
+
+ # Create a set of grpc.CallCredentials using the metadata plugin.
+ google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin)
+
+ if ssl_credentials and client_cert_callback:
+ raise exceptions.MalformedError(
+ "Received both ssl_credentials and client_cert_callback; "
+ "these are mutually exclusive."
+ )
+
+ # If SSL credentials are not explicitly set, try client_cert_callback and ADC.
+ if not ssl_credentials:
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert == "true" and client_cert_callback:
+ # Use the callback if provided.
+ cert, key = client_cert_callback()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ elif use_client_cert == "true":
+ # Use application default SSL credentials.
+ adc_ssl_credentils = SslCredentials()
+ ssl_credentials = adc_ssl_credentils.ssl_credentials
+ else:
+ ssl_credentials = grpc.ssl_channel_credentials()
+
+ # Combine the ssl credentials and the authorization credentials.
+ composite_credentials = grpc.composite_channel_credentials(
+ ssl_credentials, google_auth_credentials
+ )
+
+ return grpc.secure_channel(target, composite_credentials, **kwargs)
+
+
+class SslCredentials:
+ """Class for application default SSL credentials.
+
+ The behavior is controlled by `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment
+ variable whose default value is `false`. Client certificate will not be used
+ unless the environment variable is explicitly set to `true`. See
+ https://google.aip.dev/auth/4114
+
+ If the environment variable is `true`, then for devices with endpoint verification
+ support, a device certificate will be automatically loaded and mutual TLS will
+ be established.
+ See https://cloud.google.com/endpoint-verification/docs/overview.
+ """
+
+ def __init__(self):
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert != "true":
+ self._is_mtls = False
+ else:
+ # Load client SSL credentials.
+ metadata_path = _mtls_helper._check_dca_metadata_path(
+ _mtls_helper.CONTEXT_AWARE_METADATA_PATH
+ )
+ self._is_mtls = metadata_path is not None
+
+ @property
+ def ssl_credentials(self):
+ """Get the created SSL channel credentials.
+
+ For devices with endpoint verification support, if the device certificate
+ loading has any problems, corresponding exceptions will be raised. For
+ a device without endpoint verification support, no exceptions will be
+ raised.
+
+ Returns:
+ grpc.ChannelCredentials: The created grpc channel credentials.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ if self._is_mtls:
+ try:
+ _, cert, key, _ = _mtls_helper.get_client_ssl_credentials()
+ self._ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ except exceptions.ClientCertError as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ raise new_exc from caught_exc
+ else:
+ self._ssl_credentials = grpc.ssl_channel_credentials()
+
+ return self._ssl_credentials
+
+ @property
+ def is_mtls(self):
+ """Indicates if the created SSL channel credentials is mutual TLS."""
+ return self._is_mtls
diff --git a/contrib/python/google-auth/py3/google/auth/transport/mtls.py b/contrib/python/google-auth/py3/google/auth/transport/mtls.py
new file mode 100644
index 0000000000..c5707617ff
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/mtls.py
@@ -0,0 +1,103 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilites for mutual TLS."""
+
+from google.auth import exceptions
+from google.auth.transport import _mtls_helper
+
+
+def has_default_client_cert_source():
+ """Check if default client SSL credentials exists on the device.
+
+ Returns:
+ bool: indicating if the default client cert source exists.
+ """
+ metadata_path = _mtls_helper._check_dca_metadata_path(
+ _mtls_helper.CONTEXT_AWARE_METADATA_PATH
+ )
+ return metadata_path is not None
+
+
+def default_client_cert_source():
+ """Get a callback which returns the default client SSL credentials.
+
+ Returns:
+ Callable[[], [bytes, bytes]]: A callback which returns the default
+ client certificate bytes and private key bytes, both in PEM format.
+
+ Raises:
+ google.auth.exceptions.DefaultClientCertSourceError: If the default
+ client SSL credentials don't exist or are malformed.
+ """
+ if not has_default_client_cert_source():
+ raise exceptions.MutualTLSChannelError(
+ "Default client cert source doesn't exist"
+ )
+
+ def callback():
+ try:
+ _, cert_bytes, key_bytes = _mtls_helper.get_client_cert_and_key()
+ except (OSError, RuntimeError, ValueError) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ raise new_exc from caught_exc
+
+ return cert_bytes, key_bytes
+
+ return callback
+
+
+def default_client_encrypted_cert_source(cert_path, key_path):
+ """Get a callback which returns the default encrpyted client SSL credentials.
+
+ Args:
+ cert_path (str): The cert file path. The default client certificate will
+ be written to this file when the returned callback is called.
+ key_path (str): The key file path. The default encrypted client key will
+ be written to this file when the returned callback is called.
+
+ Returns:
+ Callable[[], [str, str, bytes]]: A callback which generates the default
+ client certificate, encrpyted private key and passphrase. It writes
+ the certificate and private key into the cert_path and key_path, and
+ returns the cert_path, key_path and passphrase bytes.
+
+ Raises:
+ google.auth.exceptions.DefaultClientCertSourceError: If any problem
+ occurs when loading or saving the client certificate and key.
+ """
+ if not has_default_client_cert_source():
+ raise exceptions.MutualTLSChannelError(
+ "Default client encrypted cert source doesn't exist"
+ )
+
+ def callback():
+ try:
+ (
+ _,
+ cert_bytes,
+ key_bytes,
+ passphrase_bytes,
+ ) = _mtls_helper.get_client_ssl_credentials(generate_encrypted_key=True)
+ with open(cert_path, "wb") as cert_file:
+ cert_file.write(cert_bytes)
+ with open(key_path, "wb") as key_file:
+ key_file.write(key_bytes)
+ except (exceptions.ClientCertError, OSError) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ raise new_exc from caught_exc
+
+ return cert_path, key_path, passphrase_bytes
+
+ return callback
diff --git a/contrib/python/google-auth/py3/google/auth/transport/requests.py b/contrib/python/google-auth/py3/google/auth/transport/requests.py
new file mode 100644
index 0000000000..b9bcad359f
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/requests.py
@@ -0,0 +1,604 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport adapter for Requests."""
+
+from __future__ import absolute_import
+
+import functools
+import logging
+import numbers
+import os
+import time
+
+try:
+ import requests
+except ImportError as caught_exc: # pragma: NO COVER
+ raise ImportError(
+ "The requests library is not installed from please install the requests package to use the requests transport."
+ ) from caught_exc
+import requests.adapters # pylint: disable=ungrouped-imports
+import requests.exceptions # pylint: disable=ungrouped-imports
+from requests.packages.urllib3.util.ssl_ import ( # type: ignore
+ create_urllib3_context,
+) # pylint: disable=ungrouped-imports
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+import google.auth.transport._mtls_helper
+from google.oauth2 import service_account
+
+_LOGGER = logging.getLogger(__name__)
+
+_DEFAULT_TIMEOUT = 120 # in seconds
+
+
+class _Response(transport.Response):
+ """Requests transport response adapter.
+
+ Args:
+ response (requests.Response): The raw Requests response.
+ """
+
+ def __init__(self, response):
+ self._response = response
+
+ @property
+ def status(self):
+ return self._response.status_code
+
+ @property
+ def headers(self):
+ return self._response.headers
+
+ @property
+ def data(self):
+ return self._response.content
+
+
+class TimeoutGuard(object):
+ """A context manager raising an error if the suite execution took too long.
+
+ Args:
+ timeout (Union[None, Union[float, Tuple[float, float]]]):
+ The maximum number of seconds a suite can run without the context
+ manager raising a timeout exception on exit. If passed as a tuple,
+ the smaller of the values is taken as a timeout. If ``None``, a
+ timeout error is never raised.
+ timeout_error_type (Optional[Exception]):
+ The type of the error to raise on timeout. Defaults to
+ :class:`requests.exceptions.Timeout`.
+ """
+
+ def __init__(self, timeout, timeout_error_type=requests.exceptions.Timeout):
+ self._timeout = timeout
+ self.remaining_timeout = timeout
+ self._timeout_error_type = timeout_error_type
+
+ def __enter__(self):
+ self._start = time.time()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_value:
+ return # let the error bubble up automatically
+
+ if self._timeout is None:
+ return # nothing to do, the timeout was not specified
+
+ elapsed = time.time() - self._start
+ deadline_hit = False
+
+ if isinstance(self._timeout, numbers.Number):
+ self.remaining_timeout = self._timeout - elapsed
+ deadline_hit = self.remaining_timeout <= 0
+ else:
+ self.remaining_timeout = tuple(x - elapsed for x in self._timeout)
+ deadline_hit = min(self.remaining_timeout) <= 0
+
+ if deadline_hit:
+ raise self._timeout_error_type()
+
+
+class Request(transport.Request):
+ """Requests request adapter.
+
+ This class is used internally for making requests using various transports
+ in a consistent way. If you use :class:`AuthorizedSession` you do not need
+ to construct or use this class directly.
+
+ This class can be useful if you want to manually refresh a
+ :class:`~google.auth.credentials.Credentials` instance::
+
+ import google.auth.transport.requests
+ import requests
+
+ request = google.auth.transport.requests.Request()
+
+ credentials.refresh(request)
+
+ Args:
+ session (requests.Session): An instance :class:`requests.Session` used
+ to make HTTP requests. If not specified, a session will be created.
+
+ .. automethod:: __call__
+ """
+
+ def __init__(self, session=None):
+ if not session:
+ session = requests.Session()
+
+ self.session = session
+
+ def __del__(self):
+ try:
+ if hasattr(self, "session") and self.session is not None:
+ self.session.close()
+ except TypeError:
+ # NOTE: For certain Python binary built, the queue.Empty exception
+ # might not be considered a normal Python exception causing
+ # TypeError.
+ pass
+
+ def __call__(
+ self,
+ url,
+ method="GET",
+ body=None,
+ headers=None,
+ timeout=_DEFAULT_TIMEOUT,
+ **kwargs
+ ):
+ """Make an HTTP request using requests.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload or body in HTTP request.
+ headers (Mapping[str, str]): Request headers.
+ timeout (Optional[int]): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ requests default timeout will be used.
+ kwargs: Additional arguments passed through to the underlying
+ requests :meth:`~requests.Session.request` method.
+
+ Returns:
+ google.auth.transport.Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ try:
+ _LOGGER.debug("Making request: %s %s", method, url)
+ response = self.session.request(
+ method, url, data=body, headers=headers, timeout=timeout, **kwargs
+ )
+ return _Response(response)
+ except requests.exceptions.RequestException as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ raise new_exc from caught_exc
+
+
+class _MutualTlsAdapter(requests.adapters.HTTPAdapter):
+ """
+ A TransportAdapter that enables mutual TLS.
+
+ Args:
+ cert (bytes): client certificate in PEM format
+ key (bytes): client private key in PEM format
+
+ Raises:
+ ImportError: if certifi or pyOpenSSL is not installed
+ OpenSSL.crypto.Error: if client cert or key is invalid
+ """
+
+ def __init__(self, cert, key):
+ import certifi
+ from OpenSSL import crypto
+ import urllib3.contrib.pyopenssl # type: ignore
+
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
+ x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+
+ ctx_poolmanager = create_urllib3_context()
+ ctx_poolmanager.load_verify_locations(cafile=certifi.where())
+ ctx_poolmanager._ctx.use_certificate(x509)
+ ctx_poolmanager._ctx.use_privatekey(pkey)
+ self._ctx_poolmanager = ctx_poolmanager
+
+ ctx_proxymanager = create_urllib3_context()
+ ctx_proxymanager.load_verify_locations(cafile=certifi.where())
+ ctx_proxymanager._ctx.use_certificate(x509)
+ ctx_proxymanager._ctx.use_privatekey(pkey)
+ self._ctx_proxymanager = ctx_proxymanager
+
+ super(_MutualTlsAdapter, self).__init__()
+
+ def init_poolmanager(self, *args, **kwargs):
+ kwargs["ssl_context"] = self._ctx_poolmanager
+ super(_MutualTlsAdapter, self).init_poolmanager(*args, **kwargs)
+
+ def proxy_manager_for(self, *args, **kwargs):
+ kwargs["ssl_context"] = self._ctx_proxymanager
+ return super(_MutualTlsAdapter, self).proxy_manager_for(*args, **kwargs)
+
+
+class _MutualTlsOffloadAdapter(requests.adapters.HTTPAdapter):
+ """
+ A TransportAdapter that enables mutual TLS and offloads the client side
+ signing operation to the signing library.
+
+ Args:
+ enterprise_cert_file_path (str): the path to a enterprise cert JSON
+ file. The file should contain the following field:
+
+ {
+ "libs": {
+ "signer_library": "...",
+ "offload_library": "..."
+ }
+ }
+
+ Raises:
+ ImportError: if certifi or pyOpenSSL is not installed
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+
+ def __init__(self, enterprise_cert_file_path):
+ import certifi
+ import urllib3.contrib.pyopenssl
+
+ from google.auth.transport import _custom_tls_signer
+
+ # Call inject_into_urllib3 to activate certificate checking. See the
+ # following links for more info:
+ # (1) doc: https://github.com/urllib3/urllib3/blob/cb9ebf8aac5d75f64c8551820d760b72b619beff/src/urllib3/contrib/pyopenssl.py#L31-L32
+ # (2) mTLS example: https://github.com/urllib3/urllib3/issues/474#issuecomment-253168415
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+
+ self.signer = _custom_tls_signer.CustomTlsSigner(enterprise_cert_file_path)
+ self.signer.load_libraries()
+ self.signer.set_up_custom_key()
+
+ poolmanager = create_urllib3_context()
+ poolmanager.load_verify_locations(cafile=certifi.where())
+ self.signer.attach_to_ssl_context(poolmanager)
+ self._ctx_poolmanager = poolmanager
+
+ proxymanager = create_urllib3_context()
+ proxymanager.load_verify_locations(cafile=certifi.where())
+ self.signer.attach_to_ssl_context(proxymanager)
+ self._ctx_proxymanager = proxymanager
+
+ super(_MutualTlsOffloadAdapter, self).__init__()
+
+ def init_poolmanager(self, *args, **kwargs):
+ kwargs["ssl_context"] = self._ctx_poolmanager
+ super(_MutualTlsOffloadAdapter, self).init_poolmanager(*args, **kwargs)
+
+ def proxy_manager_for(self, *args, **kwargs):
+ kwargs["ssl_context"] = self._ctx_proxymanager
+ return super(_MutualTlsOffloadAdapter, self).proxy_manager_for(*args, **kwargs)
+
+
+class AuthorizedSession(requests.Session):
+ """A Requests Session class with credentials.
+
+ This class is used to perform requests to API endpoints that require
+ authorization::
+
+ from google.auth.transport.requests import AuthorizedSession
+
+ authed_session = AuthorizedSession(credentials)
+
+ response = authed_session.request(
+ 'GET', 'https://www.googleapis.com/storage/v1/b')
+
+
+ The underlying :meth:`request` implementation handles adding the
+ credentials' headers to the request and refreshing credentials as needed.
+
+ This class also supports mutual TLS via :meth:`configure_mtls_channel`
+ method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable must be explicitly set to ``true``, otherwise it does
+ nothing. Assume the environment is set to ``true``, the method behaves in the
+ following manner:
+
+ If client_cert_callback is provided, client certificate and private
+ key are loaded using the callback; if client_cert_callback is None,
+ application default SSL credentials will be used. Exceptions are raised if
+ there are problems with the certificate, private key, or the loading process,
+ so it should be called within a try/except block.
+
+ First we set the environment variable to ``true``, then create an :class:`AuthorizedSession`
+ instance and specify the endpoints::
+
+ regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics'
+ mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics'
+
+ authed_session = AuthorizedSession(credentials)
+
+ Now we can pass a callback to :meth:`configure_mtls_channel`::
+
+ def my_cert_callback():
+ # some code to load client cert bytes and private key bytes, both in
+ # PEM format.
+ some_code_to_load_client_cert_and_key()
+ if loaded:
+ return cert, key
+ raise MyClientCertFailureException()
+
+ # Always call configure_mtls_channel within a try/except block.
+ try:
+ authed_session.configure_mtls_channel(my_cert_callback)
+ except:
+ # handle exceptions.
+
+ if authed_session.is_mtls:
+ response = authed_session.request('GET', mtls_endpoint)
+ else:
+ response = authed_session.request('GET', regular_endpoint)
+
+
+ You can alternatively use application default SSL credentials like this::
+
+ try:
+ authed_session.configure_mtls_channel()
+ except:
+ # handle exceptions.
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to the request.
+ refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
+ that credentials should be refreshed and the request should be
+ retried.
+ max_refresh_attempts (int): The maximum number of times to attempt to
+ refresh the credentials and retry the request.
+ refresh_timeout (Optional[int]): The timeout value in seconds for
+ credential refresh HTTP requests.
+ auth_request (google.auth.transport.requests.Request):
+ (Optional) An instance of
+ :class:`~google.auth.transport.requests.Request` used when
+ refreshing credentials. If not passed,
+ an instance of :class:`~google.auth.transport.requests.Request`
+ is created.
+ default_host (Optional[str]): A host like "pubsub.googleapis.com".
+ This is used when a self-signed JWT is created from service
+ account credentials.
+ """
+
+ def __init__(
+ self,
+ credentials,
+ refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
+ max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS,
+ refresh_timeout=None,
+ auth_request=None,
+ default_host=None,
+ ):
+ super(AuthorizedSession, self).__init__()
+ self.credentials = credentials
+ self._refresh_status_codes = refresh_status_codes
+ self._max_refresh_attempts = max_refresh_attempts
+ self._refresh_timeout = refresh_timeout
+ self._is_mtls = False
+ self._default_host = default_host
+
+ if auth_request is None:
+ self._auth_request_session = requests.Session()
+
+ # Using an adapter to make HTTP requests robust to network errors.
+ # This adapter retrys HTTP requests when network errors occur
+ # and the requests seems safely retryable.
+ retry_adapter = requests.adapters.HTTPAdapter(max_retries=3)
+ self._auth_request_session.mount("https://", retry_adapter)
+
+ # Do not pass `self` as the session here, as it can lead to
+ # infinite recursion.
+ auth_request = Request(self._auth_request_session)
+ else:
+ self._auth_request_session = None
+
+ # Request instance used by internal methods (for example,
+ # credentials.refresh).
+ self._auth_request = auth_request
+
+ # https://google.aip.dev/auth/4111
+ # Attempt to use self-signed JWTs when a service account is used.
+ if isinstance(self.credentials, service_account.Credentials):
+ self.credentials._create_self_signed_jwt(
+ "https://{}/".format(self._default_host) if self._default_host else None
+ )
+
+ def configure_mtls_channel(self, client_cert_callback=None):
+ """Configure the client certificate and key for SSL connection.
+
+ The function does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE` is
+ explicitly set to `true`. In this case if client certificate and key are
+ successfully obtained (from the given client_cert_callback or from application
+ default SSL credentials), a :class:`_MutualTlsAdapter` instance will be mounted
+ to "https://" prefix.
+
+ Args:
+ client_cert_callback (Optional[Callable[[], (bytes, bytes)]]):
+ The optional callback returns the client certificate and private
+ key bytes both in PEM format.
+ If the callback is None, application default SSL credentials
+ will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert != "true":
+ self._is_mtls = False
+ return
+
+ try:
+ import OpenSSL
+ except ImportError as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ raise new_exc from caught_exc
+
+ try:
+ (
+ self._is_mtls,
+ cert,
+ key,
+ ) = google.auth.transport._mtls_helper.get_client_cert_and_key(
+ client_cert_callback
+ )
+
+ if self._is_mtls:
+ mtls_adapter = _MutualTlsAdapter(cert, key)
+ self.mount("https://", mtls_adapter)
+ except (
+ exceptions.ClientCertError,
+ ImportError,
+ OpenSSL.crypto.Error,
+ ) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ raise new_exc from caught_exc
+
+ def request(
+ self,
+ method,
+ url,
+ data=None,
+ headers=None,
+ max_allowed_time=None,
+ timeout=_DEFAULT_TIMEOUT,
+ **kwargs
+ ):
+ """Implementation of Requests' request.
+
+ Args:
+ timeout (Optional[Union[float, Tuple[float, float]]]):
+ The amount of time in seconds to wait for the server response
+ with each individual request. Can also be passed as a tuple
+ ``(connect_timeout, read_timeout)``. See :meth:`requests.Session.request`
+ documentation for details.
+ max_allowed_time (Optional[float]):
+ If the method runs longer than this, a ``Timeout`` exception is
+ automatically raised. Unlike the ``timeout`` parameter, this
+ value applies to the total method execution time, even if
+ multiple requests are made under the hood.
+
+ Mind that it is not guaranteed that the timeout error is raised
+ at ``max_allowed_time``. It might take longer, for example, if
+ an underlying request takes a lot of time, but the request
+ itself does not timeout, e.g. if a large file is being
+ transmitted. The timout error will be raised after such
+ request completes.
+ """
+ # pylint: disable=arguments-differ
+ # Requests has a ton of arguments to request, but only two
+ # (method, url) are required. We pass through all of the other
+ # arguments to super, so no need to exhaustively list them here.
+
+ # Use a kwarg for this instead of an attribute to maintain
+ # thread-safety.
+ _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0)
+
+ # Make a copy of the headers. They will be modified by the credentials
+ # and we want to pass the original headers if we recurse.
+ request_headers = headers.copy() if headers is not None else {}
+
+ # Do not apply the timeout unconditionally in order to not override the
+ # _auth_request's default timeout.
+ auth_request = (
+ self._auth_request
+ if timeout is None
+ else functools.partial(self._auth_request, timeout=timeout)
+ )
+
+ remaining_time = max_allowed_time
+
+ with TimeoutGuard(remaining_time) as guard:
+ self.credentials.before_request(auth_request, method, url, request_headers)
+ remaining_time = guard.remaining_timeout
+
+ with TimeoutGuard(remaining_time) as guard:
+ response = super(AuthorizedSession, self).request(
+ method,
+ url,
+ data=data,
+ headers=request_headers,
+ timeout=timeout,
+ **kwargs
+ )
+ remaining_time = guard.remaining_timeout
+
+ # If the response indicated that the credentials needed to be
+ # refreshed, then refresh the credentials and re-attempt the
+ # request.
+ # A stored token may expire between the time it is retrieved and
+ # the time the request is made, so we may need to try twice.
+ if (
+ response.status_code in self._refresh_status_codes
+ and _credential_refresh_attempt < self._max_refresh_attempts
+ ):
+
+ _LOGGER.info(
+ "Refreshing credentials due to a %s response. Attempt %s/%s.",
+ response.status_code,
+ _credential_refresh_attempt + 1,
+ self._max_refresh_attempts,
+ )
+
+ # Do not apply the timeout unconditionally in order to not override the
+ # _auth_request's default timeout.
+ auth_request = (
+ self._auth_request
+ if timeout is None
+ else functools.partial(self._auth_request, timeout=timeout)
+ )
+
+ with TimeoutGuard(remaining_time) as guard:
+ self.credentials.refresh(auth_request)
+ remaining_time = guard.remaining_timeout
+
+ # Recurse. Pass in the original headers, not our modified set, but
+ # do pass the adjusted max allowed time (i.e. the remaining total time).
+ return self.request(
+ method,
+ url,
+ data=data,
+ headers=headers,
+ max_allowed_time=remaining_time,
+ timeout=timeout,
+ _credential_refresh_attempt=_credential_refresh_attempt + 1,
+ **kwargs
+ )
+
+ return response
+
+ @property
+ def is_mtls(self):
+ """Indicates if the created SSL channel is mutual TLS."""
+ return self._is_mtls
+
+ def close(self):
+ if self._auth_request_session is not None:
+ self._auth_request_session.close()
+ super(AuthorizedSession, self).close()
diff --git a/contrib/python/google-auth/py3/google/auth/transport/urllib3.py b/contrib/python/google-auth/py3/google/auth/transport/urllib3.py
new file mode 100644
index 0000000000..053d6f7b72
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/transport/urllib3.py
@@ -0,0 +1,437 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Transport adapter for urllib3."""
+
+from __future__ import absolute_import
+
+import logging
+import os
+import warnings
+
+# Certifi is Mozilla's certificate bundle. Urllib3 needs a certificate bundle
+# to verify HTTPS requests, and certifi is the recommended and most reliable
+# way to get a root certificate bundle. See
+# http://urllib3.readthedocs.io/en/latest/user-guide.html\
+# #certificate-verification
+# For more details.
+try:
+ import certifi
+except ImportError: # pragma: NO COVER
+ certifi = None # type: ignore
+
+try:
+ import urllib3 # type: ignore
+ import urllib3.exceptions # type: ignore
+except ImportError as caught_exc: # pragma: NO COVER
+ raise ImportError(
+ "The urllib3 library is not installed from please install the "
+ "urllib3 package to use the urllib3 transport."
+ ) from caught_exc
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import service_account
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class _Response(transport.Response):
+ """urllib3 transport response adapter.
+
+ Args:
+ response (urllib3.response.HTTPResponse): The raw urllib3 response.
+ """
+
+ def __init__(self, response):
+ self._response = response
+
+ @property
+ def status(self):
+ return self._response.status
+
+ @property
+ def headers(self):
+ return self._response.headers
+
+ @property
+ def data(self):
+ return self._response.data
+
+
+class Request(transport.Request):
+ """urllib3 request adapter.
+
+ This class is used internally for making requests using various transports
+ in a consistent way. If you use :class:`AuthorizedHttp` you do not need
+ to construct or use this class directly.
+
+ This class can be useful if you want to manually refresh a
+ :class:`~google.auth.credentials.Credentials` instance::
+
+ import google.auth.transport.urllib3
+ import urllib3
+
+ http = urllib3.PoolManager()
+ request = google.auth.transport.urllib3.Request(http)
+
+ credentials.refresh(request)
+
+ Args:
+ http (urllib3.request.RequestMethods): An instance of any urllib3
+ class that implements :class:`~urllib3.request.RequestMethods`,
+ usually :class:`urllib3.PoolManager`.
+
+ .. automethod:: __call__
+ """
+
+ def __init__(self, http):
+ self.http = http
+
+ def __call__(
+ self, url, method="GET", body=None, headers=None, timeout=None, **kwargs
+ ):
+ """Make an HTTP request using urllib3.
+
+ Args:
+ url (str): The URI to be requested.
+ method (str): The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body (bytes): The payload / body in HTTP request.
+ headers (Mapping[str, str]): Request headers.
+ timeout (Optional[int]): The number of seconds to wait for a
+ response from the server. If not specified or if None, the
+ urllib3 default timeout will be used.
+ kwargs: Additional arguments passed throught to the underlying
+ urllib3 :meth:`urlopen` method.
+
+ Returns:
+ google.auth.transport.Response: The HTTP response.
+
+ Raises:
+ google.auth.exceptions.TransportError: If any exception occurred.
+ """
+ # urllib3 uses a sentinel default value for timeout, so only set it if
+ # specified.
+ if timeout is not None:
+ kwargs["timeout"] = timeout
+
+ try:
+ _LOGGER.debug("Making request: %s %s", method, url)
+ response = self.http.request(
+ method, url, body=body, headers=headers, **kwargs
+ )
+ return _Response(response)
+ except urllib3.exceptions.HTTPError as caught_exc:
+ new_exc = exceptions.TransportError(caught_exc)
+ raise new_exc from caught_exc
+
+
+def _make_default_http():
+ if certifi is not None:
+ return urllib3.PoolManager(cert_reqs="CERT_REQUIRED", ca_certs=certifi.where())
+ else:
+ return urllib3.PoolManager()
+
+
+def _make_mutual_tls_http(cert, key):
+ """Create a mutual TLS HTTP connection with the given client cert and key.
+ See https://github.com/urllib3/urllib3/issues/474#issuecomment-253168415
+
+ Args:
+ cert (bytes): client certificate in PEM format
+ key (bytes): client private key in PEM format
+
+ Returns:
+ urllib3.PoolManager: Mutual TLS HTTP connection.
+
+ Raises:
+ ImportError: If certifi or pyOpenSSL is not installed.
+ OpenSSL.crypto.Error: If the cert or key is invalid.
+ """
+ import certifi
+ from OpenSSL import crypto
+ import urllib3.contrib.pyopenssl # type: ignore
+
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ ctx = urllib3.util.ssl_.create_urllib3_context()
+ ctx.load_verify_locations(cafile=certifi.where())
+
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
+ x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+
+ ctx._ctx.use_certificate(x509)
+ ctx._ctx.use_privatekey(pkey)
+
+ http = urllib3.PoolManager(ssl_context=ctx)
+ return http
+
+
+class AuthorizedHttp(urllib3.request.RequestMethods):
+ """A urllib3 HTTP class with credentials.
+
+ This class is used to perform requests to API endpoints that require
+ authorization::
+
+ from google.auth.transport.urllib3 import AuthorizedHttp
+
+ authed_http = AuthorizedHttp(credentials)
+
+ response = authed_http.request(
+ 'GET', 'https://www.googleapis.com/storage/v1/b')
+
+ This class implements :class:`urllib3.request.RequestMethods` and can be
+ used just like any other :class:`urllib3.PoolManager`.
+
+ The underlying :meth:`urlopen` implementation handles adding the
+ credentials' headers to the request and refreshing credentials as needed.
+
+ This class also supports mutual TLS via :meth:`configure_mtls_channel`
+ method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
+ environment variable must be explicitly set to `true`, otherwise it does
+ nothing. Assume the environment is set to `true`, the method behaves in the
+ following manner:
+ If client_cert_callback is provided, client certificate and private
+ key are loaded using the callback; if client_cert_callback is None,
+ application default SSL credentials will be used. Exceptions are raised if
+ there are problems with the certificate, private key, or the loading process,
+ so it should be called within a try/except block.
+
+ First we set the environment variable to `true`, then create an :class:`AuthorizedHttp`
+ instance and specify the endpoints::
+
+ regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics'
+ mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics'
+
+ authed_http = AuthorizedHttp(credentials)
+
+ Now we can pass a callback to :meth:`configure_mtls_channel`::
+
+ def my_cert_callback():
+ # some code to load client cert bytes and private key bytes, both in
+ # PEM format.
+ some_code_to_load_client_cert_and_key()
+ if loaded:
+ return cert, key
+ raise MyClientCertFailureException()
+
+ # Always call configure_mtls_channel within a try/except block.
+ try:
+ is_mtls = authed_http.configure_mtls_channel(my_cert_callback)
+ except:
+ # handle exceptions.
+
+ if is_mtls:
+ response = authed_http.request('GET', mtls_endpoint)
+ else:
+ response = authed_http.request('GET', regular_endpoint)
+
+ You can alternatively use application default SSL credentials like this::
+
+ try:
+ is_mtls = authed_http.configure_mtls_channel()
+ except:
+ # handle exceptions.
+
+ Args:
+ credentials (google.auth.credentials.Credentials): The credentials to
+ add to the request.
+ http (urllib3.PoolManager): The underlying HTTP object to
+ use to make requests. If not specified, a
+ :class:`urllib3.PoolManager` instance will be constructed with
+ sane defaults.
+ refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
+ that credentials should be refreshed and the request should be
+ retried.
+ max_refresh_attempts (int): The maximum number of times to attempt to
+ refresh the credentials and retry the request.
+ default_host (Optional[str]): A host like "pubsub.googleapis.com".
+ This is used when a self-signed JWT is created from service
+ account credentials.
+ """
+
+ def __init__(
+ self,
+ credentials,
+ http=None,
+ refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
+ max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS,
+ default_host=None,
+ ):
+ if http is None:
+ self.http = _make_default_http()
+ self._has_user_provided_http = False
+ else:
+ self.http = http
+ self._has_user_provided_http = True
+
+ self.credentials = credentials
+ self._refresh_status_codes = refresh_status_codes
+ self._max_refresh_attempts = max_refresh_attempts
+ self._default_host = default_host
+ # Request instance used by internal methods (for example,
+ # credentials.refresh).
+ self._request = Request(self.http)
+
+ # https://google.aip.dev/auth/4111
+ # Attempt to use self-signed JWTs when a service account is used.
+ if isinstance(self.credentials, service_account.Credentials):
+ self.credentials._create_self_signed_jwt(
+ "https://{}/".format(self._default_host) if self._default_host else None
+ )
+
+ super(AuthorizedHttp, self).__init__()
+
+ def configure_mtls_channel(self, client_cert_callback=None):
+ """Configures mutual TLS channel using the given client_cert_callback or
+ application default SSL credentials. The behavior is controlled by
+ `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable.
+ (1) If the environment variable value is `true`, the function returns True
+ if the channel is mutual TLS and False otherwise. The `http` provided
+ in the constructor will be overwritten.
+ (2) If the environment variable is not set or `false`, the function does
+ nothing and it always return False.
+
+ Args:
+ client_cert_callback (Optional[Callable[[], (bytes, bytes)]]):
+ The optional callback returns the client certificate and private
+ key bytes both in PEM format.
+ If the callback is None, application default SSL credentials
+ will be used.
+
+ Returns:
+ True if the channel is mutual TLS and False otherwise.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
+ creation failed for any reason.
+ """
+ use_client_cert = os.getenv(
+ environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
+ )
+ if use_client_cert != "true":
+ return False
+
+ try:
+ import OpenSSL
+ except ImportError as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ raise new_exc from caught_exc
+
+ try:
+ found_cert_key, cert, key = transport._mtls_helper.get_client_cert_and_key(
+ client_cert_callback
+ )
+
+ if found_cert_key:
+ self.http = _make_mutual_tls_http(cert, key)
+ else:
+ self.http = _make_default_http()
+ except (
+ exceptions.ClientCertError,
+ ImportError,
+ OpenSSL.crypto.Error,
+ ) as caught_exc:
+ new_exc = exceptions.MutualTLSChannelError(caught_exc)
+ raise new_exc from caught_exc
+
+ if self._has_user_provided_http:
+ self._has_user_provided_http = False
+ warnings.warn(
+ "`http` provided in the constructor is overwritten", UserWarning
+ )
+
+ return found_cert_key
+
+ def urlopen(self, method, url, body=None, headers=None, **kwargs):
+ """Implementation of urllib3's urlopen."""
+ # pylint: disable=arguments-differ
+ # We use kwargs to collect additional args that we don't need to
+ # introspect here. However, we do explicitly collect the two
+ # positional arguments.
+
+ # Use a kwarg for this instead of an attribute to maintain
+ # thread-safety.
+ _credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0)
+
+ if headers is None:
+ headers = self.headers
+
+ # Make a copy of the headers. They will be modified by the credentials
+ # and we want to pass the original headers if we recurse.
+ request_headers = headers.copy()
+
+ self.credentials.before_request(self._request, method, url, request_headers)
+
+ response = self.http.urlopen(
+ method, url, body=body, headers=request_headers, **kwargs
+ )
+
+ # If the response indicated that the credentials needed to be
+ # refreshed, then refresh the credentials and re-attempt the
+ # request.
+ # A stored token may expire between the time it is retrieved and
+ # the time the request is made, so we may need to try twice.
+ # The reason urllib3's retries aren't used is because they
+ # don't allow you to modify the request headers. :/
+ if (
+ response.status in self._refresh_status_codes
+ and _credential_refresh_attempt < self._max_refresh_attempts
+ ):
+
+ _LOGGER.info(
+ "Refreshing credentials due to a %s response. Attempt %s/%s.",
+ response.status,
+ _credential_refresh_attempt + 1,
+ self._max_refresh_attempts,
+ )
+
+ self.credentials.refresh(self._request)
+
+ # Recurse. Pass in the original headers, not our modified set.
+ return self.urlopen(
+ method,
+ url,
+ body=body,
+ headers=headers,
+ _credential_refresh_attempt=_credential_refresh_attempt + 1,
+ **kwargs
+ )
+
+ return response
+
+ # Proxy methods for compliance with the urllib3.PoolManager interface
+
+ def __enter__(self):
+ """Proxy to ``self.http``."""
+ return self.http.__enter__()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Proxy to ``self.http``."""
+ return self.http.__exit__(exc_type, exc_val, exc_tb)
+
+ def __del__(self):
+ if hasattr(self, "http") and self.http is not None:
+ self.http.clear()
+
+ @property
+ def headers(self):
+ """Proxy to ``self.http``."""
+ return self.http.headers
+
+ @headers.setter
+ def headers(self, value):
+ """Proxy to ``self.http``."""
+ self.http.headers = value
diff --git a/contrib/python/google-auth/py3/google/auth/version.py b/contrib/python/google-auth/py3/google/auth/version.py
new file mode 100644
index 0000000000..491187e6d7
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/auth/version.py
@@ -0,0 +1,15 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "2.23.0"
diff --git a/contrib/python/google-auth/py3/google/oauth2/__init__.py b/contrib/python/google-auth/py3/google/oauth2/__init__.py
new file mode 100644
index 0000000000..4fb71fd1ad
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google OAuth 2.0 Library for Python."""
diff --git a/contrib/python/google-auth/py3/google/oauth2/_client.py b/contrib/python/google-auth/py3/google/oauth2/_client.py
new file mode 100644
index 0000000000..d2af6c8aa8
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/_client.py
@@ -0,0 +1,507 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 client.
+
+This is a client for interacting with an OAuth 2.0 authorization server's
+token endpoint.
+
+For more information about the token endpoint, see
+`Section 3.1 of rfc6749`_
+
+.. _Section 3.1 of rfc6749: https://tools.ietf.org/html/rfc6749#section-3.2
+"""
+
+import datetime
+import http.client as http_client
+import json
+import urllib
+
+from google.auth import _exponential_backoff
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import metrics
+from google.auth import transport
+
+_URLENCODED_CONTENT_TYPE = "application/x-www-form-urlencoded"
+_JSON_CONTENT_TYPE = "application/json"
+_JWT_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+_REFRESH_GRANT_TYPE = "refresh_token"
+_IAM_IDTOKEN_ENDPOINT = (
+ "https://iamcredentials.googleapis.com/v1/"
+ + "projects/-/serviceAccounts/{}:generateIdToken"
+)
+
+
+def _handle_error_response(response_data, retryable_error):
+ """Translates an error response into an exception.
+
+ Args:
+ response_data (Mapping | str): The decoded response data.
+ retryable_error Optional[bool]: A boolean indicating if an error is retryable.
+ Defaults to False.
+
+ Raises:
+ google.auth.exceptions.RefreshError: The errors contained in response_data.
+ """
+
+ retryable_error = retryable_error if retryable_error else False
+
+ if isinstance(response_data, str):
+ raise exceptions.RefreshError(response_data, retryable=retryable_error)
+ try:
+ error_details = "{}: {}".format(
+ response_data["error"], response_data.get("error_description")
+ )
+ # If no details could be extracted, use the response data.
+ except (KeyError, ValueError):
+ error_details = json.dumps(response_data)
+
+ raise exceptions.RefreshError(
+ error_details, response_data, retryable=retryable_error
+ )
+
+
+def _can_retry(status_code, response_data):
+ """Checks if a request can be retried by inspecting the status code
+ and response body of the request.
+
+ Args:
+ status_code (int): The response status code.
+ response_data (Mapping | str): The decoded response data.
+
+ Returns:
+ bool: True if the response is retryable. False otherwise.
+ """
+ if status_code in transport.DEFAULT_RETRYABLE_STATUS_CODES:
+ return True
+
+ try:
+ # For a failed response, response_body could be a string
+ error_desc = response_data.get("error_description") or ""
+ error_code = response_data.get("error") or ""
+
+ if not isinstance(error_code, str) or not isinstance(error_desc, str):
+ return False
+
+ # Per Oauth 2.0 RFC https://www.rfc-editor.org/rfc/rfc6749.html#section-4.1.2.1
+ # This is needed because a redirect will not return a 500 status code.
+ retryable_error_descriptions = {
+ "internal_failure",
+ "server_error",
+ "temporarily_unavailable",
+ }
+
+ if any(e in retryable_error_descriptions for e in (error_code, error_desc)):
+ return True
+
+ except AttributeError:
+ pass
+
+ return False
+
+
+def _parse_expiry(response_data):
+ """Parses the expiry field from a response into a datetime.
+
+ Args:
+ response_data (Mapping): The JSON-parsed response data.
+
+ Returns:
+ Optional[datetime]: The expiration or ``None`` if no expiration was
+ specified.
+ """
+ expires_in = response_data.get("expires_in", None)
+
+ if expires_in is not None:
+ # Some services do not respect the OAUTH2.0 RFC and send expires_in as a
+ # JSON String.
+ if isinstance(expires_in, str):
+ expires_in = int(expires_in)
+
+ return _helpers.utcnow() + datetime.timedelta(seconds=expires_in)
+ else:
+ return None
+
+
+def _token_endpoint_request_no_throw(
+ request,
+ token_uri,
+ body,
+ access_token=None,
+ use_json=False,
+ can_retry=True,
+ headers=None,
+ **kwargs
+):
+ """Makes a request to the OAuth 2.0 authorization server's token endpoint.
+ This function doesn't throw on response errors.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ body (Mapping[str, str]): The parameters to send in the request body.
+ access_token (Optional(str)): The access token needed to make the request.
+ use_json (Optional(bool)): Use urlencoded format or json format for the
+ content type. The default value is False.
+ can_retry (bool): Enable or disable request retry behavior.
+ headers (Optional[Mapping[str, str]]): The headers for the request.
+ kwargs: Additional arguments passed on to the request method. The
+ kwargs will be passed to `requests.request` method, see:
+ https://docs.python-requests.org/en/latest/api/#requests.request.
+ For example, you can use `cert=("cert_pem_path", "key_pem_path")`
+ to set up client side SSL certificate, and use
+ `verify="ca_bundle_path"` to set up the CA certificates for sever
+ side SSL certificate verification.
+
+ Returns:
+ Tuple(bool, Mapping[str, str], Optional[bool]): A boolean indicating
+ if the request is successful, a mapping for the JSON-decoded response
+ data and in the case of an error a boolean indicating if the error
+ is retryable.
+ """
+ if use_json:
+ headers_to_use = {"Content-Type": _JSON_CONTENT_TYPE}
+ body = json.dumps(body).encode("utf-8")
+ else:
+ headers_to_use = {"Content-Type": _URLENCODED_CONTENT_TYPE}
+ body = urllib.parse.urlencode(body).encode("utf-8")
+
+ if access_token:
+ headers_to_use["Authorization"] = "Bearer {}".format(access_token)
+
+ if headers:
+ headers_to_use.update(headers)
+
+ def _perform_request():
+ response = request(
+ method="POST", url=token_uri, headers=headers_to_use, body=body, **kwargs
+ )
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+ response_data = ""
+ try:
+ # response_body should be a JSON
+ response_data = json.loads(response_body)
+ except ValueError:
+ response_data = response_body
+
+ if response.status == http_client.OK:
+ return True, response_data, None
+
+ retryable_error = _can_retry(
+ status_code=response.status, response_data=response_data
+ )
+
+ return False, response_data, retryable_error
+
+ request_succeeded, response_data, retryable_error = _perform_request()
+
+ if request_succeeded or not retryable_error or not can_retry:
+ return request_succeeded, response_data, retryable_error
+
+ retries = _exponential_backoff.ExponentialBackoff()
+ for _ in retries:
+ request_succeeded, response_data, retryable_error = _perform_request()
+ if request_succeeded or not retryable_error:
+ return request_succeeded, response_data, retryable_error
+
+ return False, response_data, retryable_error
+
+
+def _token_endpoint_request(
+ request,
+ token_uri,
+ body,
+ access_token=None,
+ use_json=False,
+ can_retry=True,
+ headers=None,
+ **kwargs
+):
+ """Makes a request to the OAuth 2.0 authorization server's token endpoint.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ body (Mapping[str, str]): The parameters to send in the request body.
+ access_token (Optional(str)): The access token needed to make the request.
+ use_json (Optional(bool)): Use urlencoded format or json format for the
+ content type. The default value is False.
+ can_retry (bool): Enable or disable request retry behavior.
+ headers (Optional[Mapping[str, str]]): The headers for the request.
+ kwargs: Additional arguments passed on to the request method. The
+ kwargs will be passed to `requests.request` method, see:
+ https://docs.python-requests.org/en/latest/api/#requests.request.
+ For example, you can use `cert=("cert_pem_path", "key_pem_path")`
+ to set up client side SSL certificate, and use
+ `verify="ca_bundle_path"` to set up the CA certificates for sever
+ side SSL certificate verification.
+
+ Returns:
+ Mapping[str, str]: The JSON-decoded response data.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+
+ response_status_ok, response_data, retryable_error = _token_endpoint_request_no_throw(
+ request,
+ token_uri,
+ body,
+ access_token=access_token,
+ use_json=use_json,
+ can_retry=can_retry,
+ headers=headers,
+ **kwargs
+ )
+ if not response_status_ok:
+ _handle_error_response(response_data, retryable_error)
+ return response_data
+
+
+def jwt_grant(request, token_uri, assertion, can_retry=True):
+ """Implements the JWT Profile for OAuth 2.0 Authorization Grants.
+
+ For more details, see `rfc7523 section 4`_.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ assertion (str): The OAuth 2.0 assertion.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Tuple[str, Optional[datetime], Mapping[str, str]]: The access token,
+ expiration, and additional data returned by the token endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+
+ .. _rfc7523 section 4: https://tools.ietf.org/html/rfc7523#section-4
+ """
+ body = {"assertion": assertion, "grant_type": _JWT_GRANT_TYPE}
+
+ response_data = _token_endpoint_request(
+ request,
+ token_uri,
+ body,
+ can_retry=can_retry,
+ headers={
+ metrics.API_CLIENT_HEADER: metrics.token_request_access_token_sa_assertion()
+ },
+ )
+
+ try:
+ access_token = response_data["access_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "No access token in response.", response_data, retryable=False
+ )
+ raise new_exc from caught_exc
+
+ expiry = _parse_expiry(response_data)
+
+ return access_token, expiry, response_data
+
+
+def call_iam_generate_id_token_endpoint(request, signer_email, audience, access_token):
+ """Call iam.generateIdToken endpoint to get ID token.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ signer_email (str): The signer email used to form the IAM
+ generateIdToken endpoint.
+ audience (str): The audience for the ID token.
+ access_token (str): The access token used to call the IAM endpoint.
+
+ Returns:
+ Tuple[str, datetime]: The ID token and expiration.
+ """
+ body = {"audience": audience, "includeEmail": "true", "useEmailAzp": "true"}
+
+ response_data = _token_endpoint_request(
+ request,
+ _IAM_IDTOKEN_ENDPOINT.format(signer_email),
+ body,
+ access_token=access_token,
+ use_json=True,
+ )
+
+ try:
+ id_token = response_data["token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "No ID token in response.", response_data, retryable=False
+ )
+ raise new_exc from caught_exc
+
+ payload = jwt.decode(id_token, verify=False)
+ expiry = datetime.datetime.utcfromtimestamp(payload["exp"])
+
+ return id_token, expiry
+
+
+def id_token_jwt_grant(request, token_uri, assertion, can_retry=True):
+ """Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
+ requests an OpenID Connect ID Token instead of an access token.
+
+ This is a variant on the standard JWT Profile that is currently unique
+ to Google. This was added for the benefit of authenticating to services
+ that require ID Tokens instead of access tokens or JWT bearer tokens.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorization server's token endpoint
+ URI.
+ assertion (str): JWT token signed by a service account. The token's
+ payload must include a ``target_audience`` claim.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Tuple[str, Optional[datetime], Mapping[str, str]]:
+ The (encoded) Open ID Connect ID Token, expiration, and additional
+ data returned by the endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ body = {"assertion": assertion, "grant_type": _JWT_GRANT_TYPE}
+
+ response_data = _token_endpoint_request(
+ request,
+ token_uri,
+ body,
+ can_retry=can_retry,
+ headers={
+ metrics.API_CLIENT_HEADER: metrics.token_request_id_token_sa_assertion()
+ },
+ )
+
+ try:
+ id_token = response_data["id_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "No ID token in response.", response_data, retryable=False
+ )
+ raise new_exc from caught_exc
+
+ payload = jwt.decode(id_token, verify=False)
+ expiry = datetime.datetime.utcfromtimestamp(payload["exp"])
+
+ return id_token, expiry, response_data
+
+
+def _handle_refresh_grant_response(response_data, refresh_token):
+ """Extract tokens from refresh grant response.
+
+ Args:
+ response_data (Mapping[str, str]): Refresh grant response data.
+ refresh_token (str): Current refresh token.
+
+ Returns:
+ Tuple[str, str, Optional[datetime], Mapping[str, str]]: The access token,
+ refresh token, expiration, and additional data returned by the token
+ endpoint. If response_data doesn't have refresh token, then the current
+ refresh token will be returned.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ try:
+ access_token = response_data["access_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "No access token in response.", response_data, retryable=False
+ )
+ raise new_exc from caught_exc
+
+ refresh_token = response_data.get("refresh_token", refresh_token)
+ expiry = _parse_expiry(response_data)
+
+ return access_token, refresh_token, expiry, response_data
+
+
+def refresh_grant(
+ request,
+ token_uri,
+ refresh_token,
+ client_id,
+ client_secret,
+ scopes=None,
+ rapt_token=None,
+ can_retry=True,
+):
+ """Implements the OAuth 2.0 refresh token grant.
+
+ For more details, see `rfc678 section 6`_.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ refresh_token (str): The refresh token to use to get a new access
+ token.
+ client_id (str): The OAuth 2.0 application's client ID.
+ client_secret (str): The Oauth 2.0 appliaction's client secret.
+ scopes (Optional(Sequence[str])): Scopes to request. If present, all
+ scopes must be authorized for the refresh token. Useful if refresh
+ token has a wild card scope (e.g.
+ 'https://www.googleapis.com/auth/any-api').
+ rapt_token (Optional(str)): The reauth Proof Token.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Tuple[str, str, Optional[datetime], Mapping[str, str]]: The access
+ token, new or current refresh token, expiration, and additional data
+ returned by the token endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+
+ .. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6
+ """
+ body = {
+ "grant_type": _REFRESH_GRANT_TYPE,
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "refresh_token": refresh_token,
+ }
+ if scopes:
+ body["scope"] = " ".join(scopes)
+ if rapt_token:
+ body["rapt"] = rapt_token
+
+ response_data = _token_endpoint_request(
+ request, token_uri, body, can_retry=can_retry
+ )
+ return _handle_refresh_grant_response(response_data, refresh_token)
diff --git a/contrib/python/google-auth/py3/google/oauth2/_client_async.py b/contrib/python/google-auth/py3/google/oauth2/_client_async.py
new file mode 100644
index 0000000000..2858d862b0
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/_client_async.py
@@ -0,0 +1,292 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 async client.
+
+This is a client for interacting with an OAuth 2.0 authorization server's
+token endpoint.
+
+For more information about the token endpoint, see
+`Section 3.1 of rfc6749`_
+
+.. _Section 3.1 of rfc6749: https://tools.ietf.org/html/rfc6749#section-3.2
+"""
+
+import datetime
+import http.client as http_client
+import json
+import urllib
+
+from google.auth import _exponential_backoff
+from google.auth import exceptions
+from google.auth import jwt
+from google.oauth2 import _client as client
+
+
+async def _token_endpoint_request_no_throw(
+ request, token_uri, body, access_token=None, use_json=False, can_retry=True
+):
+ """Makes a request to the OAuth 2.0 authorization server's token endpoint.
+ This function doesn't throw on response errors.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ body (Mapping[str, str]): The parameters to send in the request body.
+ access_token (Optional(str)): The access token needed to make the request.
+ use_json (Optional(bool)): Use urlencoded format or json format for the
+ content type. The default value is False.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Tuple(bool, Mapping[str, str], Optional[bool]): A boolean indicating
+ if the request is successful, a mapping for the JSON-decoded response
+ data and in the case of an error a boolean indicating if the error
+ is retryable.
+ """
+ if use_json:
+ headers = {"Content-Type": client._JSON_CONTENT_TYPE}
+ body = json.dumps(body).encode("utf-8")
+ else:
+ headers = {"Content-Type": client._URLENCODED_CONTENT_TYPE}
+ body = urllib.parse.urlencode(body).encode("utf-8")
+
+ if access_token:
+ headers["Authorization"] = "Bearer {}".format(access_token)
+
+ async def _perform_request():
+ response = await request(
+ method="POST", url=token_uri, headers=headers, body=body
+ )
+
+ # Using data.read() resulted in zlib decompression errors. This may require future investigation.
+ response_body1 = await response.content()
+
+ response_body = (
+ response_body1.decode("utf-8")
+ if hasattr(response_body1, "decode")
+ else response_body1
+ )
+
+ try:
+ response_data = json.loads(response_body)
+ except ValueError:
+ response_data = response_body
+
+ if response.status == http_client.OK:
+ return True, response_data, None
+
+ retryable_error = client._can_retry(
+ status_code=response.status, response_data=response_data
+ )
+
+ return False, response_data, retryable_error
+
+ request_succeeded, response_data, retryable_error = await _perform_request()
+
+ if request_succeeded or not retryable_error or not can_retry:
+ return request_succeeded, response_data, retryable_error
+
+ retries = _exponential_backoff.ExponentialBackoff()
+ for _ in retries:
+ request_succeeded, response_data, retryable_error = await _perform_request()
+ if request_succeeded or not retryable_error:
+ return request_succeeded, response_data, retryable_error
+
+ return False, response_data, retryable_error
+
+
+async def _token_endpoint_request(
+ request, token_uri, body, access_token=None, use_json=False, can_retry=True
+):
+ """Makes a request to the OAuth 2.0 authorization server's token endpoint.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ body (Mapping[str, str]): The parameters to send in the request body.
+ access_token (Optional(str)): The access token needed to make the request.
+ use_json (Optional(bool)): Use urlencoded format or json format for the
+ content type. The default value is False.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Mapping[str, str]: The JSON-decoded response data.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+
+ response_status_ok, response_data, retryable_error = await _token_endpoint_request_no_throw(
+ request,
+ token_uri,
+ body,
+ access_token=access_token,
+ use_json=use_json,
+ can_retry=can_retry,
+ )
+ if not response_status_ok:
+ client._handle_error_response(response_data, retryable_error)
+ return response_data
+
+
+async def jwt_grant(request, token_uri, assertion, can_retry=True):
+ """Implements the JWT Profile for OAuth 2.0 Authorization Grants.
+
+ For more details, see `rfc7523 section 4`_.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ assertion (str): The OAuth 2.0 assertion.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Tuple[str, Optional[datetime], Mapping[str, str]]: The access token,
+ expiration, and additional data returned by the token endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+
+ .. _rfc7523 section 4: https://tools.ietf.org/html/rfc7523#section-4
+ """
+ body = {"assertion": assertion, "grant_type": client._JWT_GRANT_TYPE}
+
+ response_data = await _token_endpoint_request(
+ request, token_uri, body, can_retry=can_retry
+ )
+
+ try:
+ access_token = response_data["access_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "No access token in response.", response_data, retryable=False
+ )
+ raise new_exc from caught_exc
+
+ expiry = client._parse_expiry(response_data)
+
+ return access_token, expiry, response_data
+
+
+async def id_token_jwt_grant(request, token_uri, assertion, can_retry=True):
+ """Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
+ requests an OpenID Connect ID Token instead of an access token.
+
+ This is a variant on the standard JWT Profile that is currently unique
+ to Google. This was added for the benefit of authenticating to services
+ that require ID Tokens instead of access tokens or JWT bearer tokens.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorization server's token endpoint
+ URI.
+ assertion (str): JWT token signed by a service account. The token's
+ payload must include a ``target_audience`` claim.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Tuple[str, Optional[datetime], Mapping[str, str]]:
+ The (encoded) Open ID Connect ID Token, expiration, and additional
+ data returned by the endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ body = {"assertion": assertion, "grant_type": client._JWT_GRANT_TYPE}
+
+ response_data = await _token_endpoint_request(
+ request, token_uri, body, can_retry=can_retry
+ )
+
+ try:
+ id_token = response_data["id_token"]
+ except KeyError as caught_exc:
+ new_exc = exceptions.RefreshError(
+ "No ID token in response.", response_data, retryable=False
+ )
+ raise new_exc from caught_exc
+
+ payload = jwt.decode(id_token, verify=False)
+ expiry = datetime.datetime.utcfromtimestamp(payload["exp"])
+
+ return id_token, expiry, response_data
+
+
+async def refresh_grant(
+ request,
+ token_uri,
+ refresh_token,
+ client_id,
+ client_secret,
+ scopes=None,
+ rapt_token=None,
+ can_retry=True,
+):
+ """Implements the OAuth 2.0 refresh token grant.
+
+ For more details, see `rfc678 section 6`_.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ refresh_token (str): The refresh token to use to get a new access
+ token.
+ client_id (str): The OAuth 2.0 application's client ID.
+ client_secret (str): The Oauth 2.0 appliaction's client secret.
+ scopes (Optional(Sequence[str])): Scopes to request. If present, all
+ scopes must be authorized for the refresh token. Useful if refresh
+ token has a wild card scope (e.g.
+ 'https://www.googleapis.com/auth/any-api').
+ rapt_token (Optional(str)): The reauth Proof Token.
+ can_retry (bool): Enable or disable request retry behavior.
+
+ Returns:
+ Tuple[str, Optional[str], Optional[datetime], Mapping[str, str]]: The
+ access token, new or current refresh token, expiration, and additional data
+ returned by the token endpoint.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+
+ .. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6
+ """
+ body = {
+ "grant_type": client._REFRESH_GRANT_TYPE,
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "refresh_token": refresh_token,
+ }
+ if scopes:
+ body["scope"] = " ".join(scopes)
+ if rapt_token:
+ body["rapt"] = rapt_token
+
+ response_data = await _token_endpoint_request(
+ request, token_uri, body, can_retry=can_retry
+ )
+ return client._handle_refresh_grant_response(response_data, refresh_token)
diff --git a/contrib/python/google-auth/py3/google/oauth2/_credentials_async.py b/contrib/python/google-auth/py3/google/oauth2/_credentials_async.py
new file mode 100644
index 0000000000..e7b9637c82
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/_credentials_async.py
@@ -0,0 +1,112 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Async Credentials.
+
+This module provides credentials based on OAuth 2.0 access and refresh tokens.
+These credentials usually access resources on behalf of a user (resource
+owner).
+
+Specifically, this is intended to use access tokens acquired using the
+`Authorization Code grant`_ and can refresh those tokens using a
+optional `refresh token`_.
+
+Obtaining the initial access and refresh token is outside of the scope of this
+module. Consult `rfc6749 section 4.1`_ for complete details on the
+Authorization Code grant flow.
+
+.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
+.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
+.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
+"""
+
+from google.auth import _credentials_async as credentials
+from google.auth import _helpers
+from google.auth import exceptions
+from google.oauth2 import _reauth_async as reauth
+from google.oauth2 import credentials as oauth2_credentials
+
+
+class Credentials(oauth2_credentials.Credentials):
+ """Credentials using OAuth 2.0 access and refresh tokens.
+
+ The credentials are considered immutable. If you want to modify the
+ quota project, use :meth:`with_quota_project` or ::
+
+ credentials = credentials.with_quota_project('myproject-123)
+ """
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ async def refresh(self, request):
+ if (
+ self._refresh_token is None
+ or self._token_uri is None
+ or self._client_id is None
+ or self._client_secret is None
+ ):
+ raise exceptions.RefreshError(
+ "The credentials do not contain the necessary fields need to "
+ "refresh the access token. You must specify refresh_token, "
+ "token_uri, client_id, and client_secret."
+ )
+
+ (
+ access_token,
+ refresh_token,
+ expiry,
+ grant_response,
+ rapt_token,
+ ) = await reauth.refresh_grant(
+ request,
+ self._token_uri,
+ self._refresh_token,
+ self._client_id,
+ self._client_secret,
+ scopes=self._scopes,
+ rapt_token=self._rapt_token,
+ enable_reauth_refresh=self._enable_reauth_refresh,
+ )
+
+ self.token = access_token
+ self.expiry = expiry
+ self._refresh_token = refresh_token
+ self._id_token = grant_response.get("id_token")
+ self._rapt_token = rapt_token
+
+ if self._scopes and "scope" in grant_response:
+ requested_scopes = frozenset(self._scopes)
+ granted_scopes = frozenset(grant_response["scope"].split())
+ scopes_requested_but_not_granted = requested_scopes - granted_scopes
+ if scopes_requested_but_not_granted:
+ raise exceptions.RefreshError(
+ "Not all requested scopes were granted by the "
+ "authorization server, missing scopes {}.".format(
+ ", ".join(scopes_requested_but_not_granted)
+ )
+ )
+
+
+class UserAccessTokenCredentials(oauth2_credentials.UserAccessTokenCredentials):
+ """Access token credentials for user account.
+
+ Obtain the access token for a given user account or the current active
+ user account with the ``gcloud auth print-access-token`` command.
+
+ Args:
+ account (Optional[str]): Account to get the access token for. If not
+ specified, the current active account will be used.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+
+ """
diff --git a/contrib/python/google-auth/py3/google/oauth2/_id_token_async.py b/contrib/python/google-auth/py3/google/oauth2/_id_token_async.py
new file mode 100644
index 0000000000..6594e416ae
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/_id_token_async.py
@@ -0,0 +1,285 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google ID Token helpers.
+
+Provides support for verifying `OpenID Connect ID Tokens`_, especially ones
+generated by Google infrastructure.
+
+To parse and verify an ID Token issued by Google's OAuth 2.0 authorization
+server use :func:`verify_oauth2_token`. To verify an ID Token issued by
+Firebase, use :func:`verify_firebase_token`.
+
+A general purpose ID Token verifier is available as :func:`verify_token`.
+
+Example::
+
+ from google.oauth2 import _id_token_async
+ from google.auth.transport import aiohttp_requests
+
+ request = aiohttp_requests.Request()
+
+ id_info = await _id_token_async.verify_oauth2_token(
+ token, request, 'my-client-id.example.com')
+
+ if id_info['iss'] != 'https://accounts.google.com':
+ raise ValueError('Wrong issuer.')
+
+ userid = id_info['sub']
+
+By default, this will re-fetch certificates for each verification. Because
+Google's public keys are only changed infrequently (on the order of once per
+day), you may wish to take advantage of caching to reduce latency and the
+potential for network errors. This can be accomplished using an external
+library like `CacheControl`_ to create a cache-aware
+:class:`google.auth.transport.Request`::
+
+ import cachecontrol
+ import google.auth.transport.requests
+ import requests
+
+ session = requests.session()
+ cached_session = cachecontrol.CacheControl(session)
+ request = google.auth.transport.requests.Request(session=cached_session)
+
+.. _OpenID Connect ID Token:
+ http://openid.net/specs/openid-connect-core-1_0.html#IDToken
+.. _CacheControl: https://cachecontrol.readthedocs.io
+"""
+
+import http.client as http_client
+import json
+import os
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth.transport import requests
+from google.oauth2 import id_token as sync_id_token
+
+
+async def _fetch_certs(request, certs_url):
+ """Fetches certificates.
+
+ Google-style cerificate endpoints return JSON in the format of
+ ``{'key id': 'x509 certificate'}``.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests. This must be an aiohttp request.
+ certs_url (str): The certificate endpoint URL.
+
+ Returns:
+ Mapping[str, str]: A mapping of public key ID to x.509 certificate
+ data.
+ """
+ response = await request(certs_url, method="GET")
+
+ if response.status != http_client.OK:
+ raise exceptions.TransportError(
+ "Could not fetch certificates at {}".format(certs_url)
+ )
+
+ data = await response.content()
+
+ return json.loads(data)
+
+
+async def verify_token(
+ id_token,
+ request,
+ audience=None,
+ certs_url=sync_id_token._GOOGLE_OAUTH2_CERTS_URL,
+ clock_skew_in_seconds=0,
+):
+ """Verifies an ID token and returns the decoded token.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests. This must be an aiohttp request.
+ audience (str): The audience that this token is intended for. If None
+ then the audience is not verified.
+ certs_url (str): The URL that specifies the certificates to use to
+ verify the token. This URL should return JSON in the format of
+ ``{'key id': 'x509 certificate'}``.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+ """
+ certs = await _fetch_certs(request, certs_url)
+
+ return jwt.decode(
+ id_token,
+ certs=certs,
+ audience=audience,
+ clock_skew_in_seconds=clock_skew_in_seconds,
+ )
+
+
+async def verify_oauth2_token(
+ id_token, request, audience=None, clock_skew_in_seconds=0
+):
+ """Verifies an ID Token issued by Google's OAuth 2.0 authorization server.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests. This must be an aiohttp request.
+ audience (str): The audience that this token is intended for. This is
+ typically your application's OAuth 2.0 client ID. If None then the
+ audience is not verified.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+
+ Raises:
+ exceptions.GoogleAuthError: If the issuer is invalid.
+ """
+ idinfo = await verify_token(
+ id_token,
+ request,
+ audience=audience,
+ certs_url=sync_id_token._GOOGLE_OAUTH2_CERTS_URL,
+ clock_skew_in_seconds=clock_skew_in_seconds,
+ )
+
+ if idinfo["iss"] not in sync_id_token._GOOGLE_ISSUERS:
+ raise exceptions.GoogleAuthError(
+ "Wrong issuer. 'iss' should be one of the following: {}".format(
+ sync_id_token._GOOGLE_ISSUERS
+ )
+ )
+
+ return idinfo
+
+
+async def verify_firebase_token(
+ id_token, request, audience=None, clock_skew_in_seconds=0
+):
+ """Verifies an ID Token issued by Firebase Authentication.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests. This must be an aiohttp request.
+ audience (str): The audience that this token is intended for. This is
+ typically your Firebase application ID. If None then the audience
+ is not verified.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+ """
+ return await verify_token(
+ id_token,
+ request,
+ audience=audience,
+ certs_url=sync_id_token._GOOGLE_APIS_CERTS_URL,
+ clock_skew_in_seconds=clock_skew_in_seconds,
+ )
+
+
+async def fetch_id_token(request, audience):
+ """Fetch the ID Token from the current environment.
+
+ This function acquires ID token from the environment in the following order.
+ See https://google.aip.dev/auth/4110.
+
+ 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
+ to the path of a valid service account JSON file, then ID token is
+ acquired using this service account credentials.
+ 2. If the application is running in Compute Engine, App Engine or Cloud Run,
+ then the ID token are obtained from the metadata server.
+ 3. If metadata server doesn't exist and no valid service account credentials
+ are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will
+ be raised.
+
+ Example::
+
+ import google.oauth2._id_token_async
+ import google.auth.transport.aiohttp_requests
+
+ request = google.auth.transport.aiohttp_requests.Request()
+ target_audience = "https://pubsub.googleapis.com"
+
+ id_token = await google.oauth2._id_token_async.fetch_id_token(request, target_audience)
+
+ Args:
+ request (google.auth.transport.aiohttp_requests.Request): A callable used to make
+ HTTP requests.
+ audience (str): The audience that this ID token is intended for.
+
+ Returns:
+ str: The ID token.
+
+ Raises:
+ ~google.auth.exceptions.DefaultCredentialsError:
+ If metadata server doesn't exist and no valid service account
+ credentials are found.
+ """
+ # 1. Try to get credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
+ # variable.
+ credentials_filename = os.environ.get(environment_vars.CREDENTIALS)
+ if credentials_filename:
+ if not (
+ os.path.exists(credentials_filename)
+ and os.path.isfile(credentials_filename)
+ ):
+ raise exceptions.DefaultCredentialsError(
+ "GOOGLE_APPLICATION_CREDENTIALS path is either not found or invalid."
+ )
+
+ try:
+ with open(credentials_filename, "r") as f:
+ from google.oauth2 import _service_account_async as service_account
+
+ info = json.load(f)
+ if info.get("type") == "service_account":
+ credentials = service_account.IDTokenCredentials.from_service_account_info(
+ info, target_audience=audience
+ )
+ await credentials.refresh(request)
+ return credentials.token
+ except ValueError as caught_exc:
+ new_exc = exceptions.DefaultCredentialsError(
+ "GOOGLE_APPLICATION_CREDENTIALS is not valid service account credentials.",
+ caught_exc,
+ )
+ raise new_exc from caught_exc
+
+ # 2. Try to fetch ID token from metada server if it exists. The code works
+ # for GAE and Cloud Run metadata server as well.
+ try:
+ from google.auth import compute_engine
+ from google.auth.compute_engine import _metadata
+
+ request_new = requests.Request()
+ if _metadata.ping(request_new):
+ credentials = compute_engine.IDTokenCredentials(
+ request_new, audience, use_metadata_identity_endpoint=True
+ )
+ credentials.refresh(request_new)
+ return credentials.token
+ except (ImportError, exceptions.TransportError):
+ pass
+
+ raise exceptions.DefaultCredentialsError(
+ "Neither metadata server or valid service account credentials are found."
+ )
diff --git a/contrib/python/google-auth/py3/google/oauth2/_reauth_async.py b/contrib/python/google-auth/py3/google/oauth2/_reauth_async.py
new file mode 100644
index 0000000000..de3675c523
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/_reauth_async.py
@@ -0,0 +1,328 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module that provides functions for handling rapt authentication.
+
+Reauth is a process of obtaining additional authentication (such as password,
+security token, etc.) while refreshing OAuth 2.0 credentials for a user.
+
+Credentials that use the Reauth flow must have the reauth scope,
+``https://www.googleapis.com/auth/accounts.reauth``.
+
+This module provides a high-level function for executing the Reauth process,
+:func:`refresh_grant`, and lower-level helpers for doing the individual
+steps of the reauth process.
+
+Those steps are:
+
+1. Obtaining a list of challenges from the reauth server.
+2. Running through each challenge and sending the result back to the reauth
+ server.
+3. Refreshing the access token using the returned rapt token.
+"""
+
+import sys
+
+from google.auth import exceptions
+from google.oauth2 import _client
+from google.oauth2 import _client_async
+from google.oauth2 import challenges
+from google.oauth2 import reauth
+
+
+async def _get_challenges(
+ request, supported_challenge_types, access_token, requested_scopes=None
+):
+ """Does initial request to reauth API to get the challenges.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests. This must be an aiohttp request.
+ supported_challenge_types (Sequence[str]): list of challenge names
+ supported by the manager.
+ access_token (str): Access token with reauth scopes.
+ requested_scopes (Optional(Sequence[str])): Authorized scopes for the credentials.
+
+ Returns:
+ dict: The response from the reauth API.
+ """
+ body = {"supportedChallengeTypes": supported_challenge_types}
+ if requested_scopes:
+ body["oauthScopesForDomainPolicyLookup"] = requested_scopes
+
+ return await _client_async._token_endpoint_request(
+ request,
+ reauth._REAUTH_API + ":start",
+ body,
+ access_token=access_token,
+ use_json=True,
+ )
+
+
+async def _send_challenge_result(
+ request, session_id, challenge_id, client_input, access_token
+):
+ """Attempt to refresh access token by sending next challenge result.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests. This must be an aiohttp request.
+ session_id (str): session id returned by the initial reauth call.
+ challenge_id (str): challenge id returned by the initial reauth call.
+ client_input: dict with a challenge-specific client input. For example:
+ ``{'credential': password}`` for password challenge.
+ access_token (str): Access token with reauth scopes.
+
+ Returns:
+ dict: The response from the reauth API.
+ """
+ body = {
+ "sessionId": session_id,
+ "challengeId": challenge_id,
+ "action": "RESPOND",
+ "proposalResponse": client_input,
+ }
+
+ return await _client_async._token_endpoint_request(
+ request,
+ reauth._REAUTH_API + "/{}:continue".format(session_id),
+ body,
+ access_token=access_token,
+ use_json=True,
+ )
+
+
+async def _run_next_challenge(msg, request, access_token):
+ """Get the next challenge from msg and run it.
+
+ Args:
+ msg (dict): Reauth API response body (either from the initial request to
+ https://reauth.googleapis.com/v2/sessions:start or from sending the
+ previous challenge response to
+ https://reauth.googleapis.com/v2/sessions/id:continue)
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests. This must be an aiohttp request.
+ access_token (str): reauth access token
+
+ Returns:
+ dict: The response from the reauth API.
+
+ Raises:
+ google.auth.exceptions.ReauthError: if reauth failed.
+ """
+ for challenge in msg["challenges"]:
+ if challenge["status"] != "READY":
+ # Skip non-activated challenges.
+ continue
+ c = challenges.AVAILABLE_CHALLENGES.get(challenge["challengeType"], None)
+ if not c:
+ raise exceptions.ReauthFailError(
+ "Unsupported challenge type {0}. Supported types: {1}".format(
+ challenge["challengeType"],
+ ",".join(list(challenges.AVAILABLE_CHALLENGES.keys())),
+ )
+ )
+ if not c.is_locally_eligible:
+ raise exceptions.ReauthFailError(
+ "Challenge {0} is not locally eligible".format(
+ challenge["challengeType"]
+ )
+ )
+ client_input = c.obtain_challenge_input(challenge)
+ if not client_input:
+ return None
+ return await _send_challenge_result(
+ request,
+ msg["sessionId"],
+ challenge["challengeId"],
+ client_input,
+ access_token,
+ )
+ return None
+
+
+async def _obtain_rapt(request, access_token, requested_scopes):
+ """Given an http request method and reauth access token, get rapt token.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests. This must be an aiohttp request.
+ access_token (str): reauth access token
+ requested_scopes (Sequence[str]): scopes required by the client application
+
+ Returns:
+ str: The rapt token.
+
+ Raises:
+ google.auth.exceptions.ReauthError: if reauth failed
+ """
+ msg = await _get_challenges(
+ request,
+ list(challenges.AVAILABLE_CHALLENGES.keys()),
+ access_token,
+ requested_scopes,
+ )
+
+ if msg["status"] == reauth._AUTHENTICATED:
+ return msg["encodedProofOfReauthToken"]
+
+ for _ in range(0, reauth.RUN_CHALLENGE_RETRY_LIMIT):
+ if not (
+ msg["status"] == reauth._CHALLENGE_REQUIRED
+ or msg["status"] == reauth._CHALLENGE_PENDING
+ ):
+ raise exceptions.ReauthFailError(
+ "Reauthentication challenge failed due to API error: {}".format(
+ msg["status"]
+ )
+ )
+
+ if not reauth.is_interactive():
+ raise exceptions.ReauthFailError(
+ "Reauthentication challenge could not be answered because you are not"
+ " in an interactive session."
+ )
+
+ msg = await _run_next_challenge(msg, request, access_token)
+
+ if msg["status"] == reauth._AUTHENTICATED:
+ return msg["encodedProofOfReauthToken"]
+
+ # If we got here it means we didn't get authenticated.
+ raise exceptions.ReauthFailError("Failed to obtain rapt token.")
+
+
+async def get_rapt_token(
+ request, client_id, client_secret, refresh_token, token_uri, scopes=None
+):
+ """Given an http request method and refresh_token, get rapt token.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests. This must be an aiohttp request.
+ client_id (str): client id to get access token for reauth scope.
+ client_secret (str): client secret for the client_id
+ refresh_token (str): refresh token to refresh access token
+ token_uri (str): uri to refresh access token
+ scopes (Optional(Sequence[str])): scopes required by the client application
+
+ Returns:
+ str: The rapt token.
+ Raises:
+ google.auth.exceptions.RefreshError: If reauth failed.
+ """
+ sys.stderr.write("Reauthentication required.\n")
+
+ # Get access token for reauth.
+ access_token, _, _, _ = await _client_async.refresh_grant(
+ request=request,
+ client_id=client_id,
+ client_secret=client_secret,
+ refresh_token=refresh_token,
+ token_uri=token_uri,
+ scopes=[reauth._REAUTH_SCOPE],
+ )
+
+ # Get rapt token from reauth API.
+ rapt_token = await _obtain_rapt(request, access_token, requested_scopes=scopes)
+
+ return rapt_token
+
+
+async def refresh_grant(
+ request,
+ token_uri,
+ refresh_token,
+ client_id,
+ client_secret,
+ scopes=None,
+ rapt_token=None,
+ enable_reauth_refresh=False,
+):
+ """Implements the reauthentication flow.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests. This must be an aiohttp request.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ refresh_token (str): The refresh token to use to get a new access
+ token.
+ client_id (str): The OAuth 2.0 application's client ID.
+ client_secret (str): The Oauth 2.0 appliaction's client secret.
+ scopes (Optional(Sequence[str])): Scopes to request. If present, all
+ scopes must be authorized for the refresh token. Useful if refresh
+ token has a wild card scope (e.g.
+ 'https://www.googleapis.com/auth/any-api').
+ rapt_token (Optional(str)): The rapt token for reauth.
+ enable_reauth_refresh (Optional[bool]): Whether reauth refresh flow
+ should be used. The default value is False. This option is for
+ gcloud only, other users should use the default value.
+
+ Returns:
+ Tuple[str, Optional[str], Optional[datetime], Mapping[str, str], str]: The
+ access token, new refresh token, expiration, the additional data
+ returned by the token endpoint, and the rapt token.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ body = {
+ "grant_type": _client._REFRESH_GRANT_TYPE,
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "refresh_token": refresh_token,
+ }
+ if scopes:
+ body["scope"] = " ".join(scopes)
+ if rapt_token:
+ body["rapt"] = rapt_token
+
+ response_status_ok, response_data, retryable_error = await _client_async._token_endpoint_request_no_throw(
+ request, token_uri, body
+ )
+ if (
+ not response_status_ok
+ and response_data.get("error") == reauth._REAUTH_NEEDED_ERROR
+ and (
+ response_data.get("error_subtype")
+ == reauth._REAUTH_NEEDED_ERROR_INVALID_RAPT
+ or response_data.get("error_subtype")
+ == reauth._REAUTH_NEEDED_ERROR_RAPT_REQUIRED
+ )
+ ):
+ if not enable_reauth_refresh:
+ raise exceptions.RefreshError(
+ "Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate."
+ )
+
+ rapt_token = await get_rapt_token(
+ request, client_id, client_secret, refresh_token, token_uri, scopes=scopes
+ )
+ body["rapt"] = rapt_token
+ (
+ response_status_ok,
+ response_data,
+ retryable_error,
+ ) = await _client_async._token_endpoint_request_no_throw(
+ request, token_uri, body
+ )
+
+ if not response_status_ok:
+ _client._handle_error_response(response_data, retryable_error)
+ refresh_response = _client._handle_refresh_grant_response(
+ response_data, refresh_token
+ )
+ return refresh_response + (rapt_token,)
diff --git a/contrib/python/google-auth/py3/google/oauth2/_service_account_async.py b/contrib/python/google-auth/py3/google/oauth2/_service_account_async.py
new file mode 100644
index 0000000000..cfd315a7ff
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/_service_account_async.py
@@ -0,0 +1,132 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Service Accounts: JSON Web Token (JWT) Profile for OAuth 2.0
+
+NOTE: This file adds asynchronous refresh methods to both credentials
+classes, and therefore async/await syntax is required when calling this
+method when using service account credentials with asynchronous functionality.
+Otherwise, all other methods are inherited from the regular service account
+credentials file google.oauth2.service_account
+
+"""
+
+from google.auth import _credentials_async as credentials_async
+from google.auth import _helpers
+from google.oauth2 import _client_async
+from google.oauth2 import service_account
+
+
+class Credentials(
+ service_account.Credentials, credentials_async.Scoped, credentials_async.Credentials
+):
+ """Service account credentials
+
+ Usually, you'll create these credentials with one of the helper
+ constructors. To create credentials using a Google service account
+ private key JSON file::
+
+ credentials = _service_account_async.Credentials.from_service_account_file(
+ 'service-account.json')
+
+ Or if you already have the service account file loaded::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = _service_account_async.Credentials.from_service_account_info(
+ service_account_info)
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify additional scopes and a subject if necessary::
+
+ credentials = _service_account_async.Credentials.from_service_account_file(
+ 'service-account.json',
+ scopes=['email'],
+ subject='user@example.com')
+
+ The credentials are considered immutable. If you want to modify the scopes
+ or the subject used for delegation, use :meth:`with_scopes` or
+ :meth:`with_subject`::
+
+ scoped_credentials = credentials.with_scopes(['email'])
+ delegated_credentials = credentials.with_subject(subject)
+
+ To add a quota project, use :meth:`with_quota_project`::
+
+ credentials = credentials.with_quota_project('myproject-123')
+ """
+
+ @_helpers.copy_docstring(credentials_async.Credentials)
+ async def refresh(self, request):
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = await _client_async.jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
+
+
+class IDTokenCredentials(
+ service_account.IDTokenCredentials,
+ credentials_async.Signing,
+ credentials_async.Credentials,
+):
+ """Open ID Connect ID Token-based service account credentials.
+
+ These credentials are largely similar to :class:`.Credentials`, but instead
+ of using an OAuth 2.0 Access Token as the bearer token, they use an Open
+ ID Connect ID Token as the bearer token. These credentials are useful when
+ communicating to services that require ID Tokens and can not accept access
+ tokens.
+
+ Usually, you'll create these credentials with one of the helper
+ constructors. To create credentials using a Google service account
+ private key JSON file::
+
+ credentials = (
+ _service_account_async.IDTokenCredentials.from_service_account_file(
+ 'service-account.json'))
+
+ Or if you already have the service account file loaded::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = (
+ _service_account_async.IDTokenCredentials.from_service_account_info(
+ service_account_info))
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify additional scopes and a subject if necessary::
+
+ credentials = (
+ _service_account_async.IDTokenCredentials.from_service_account_file(
+ 'service-account.json',
+ scopes=['email'],
+ subject='user@example.com'))
+
+ The credentials are considered immutable. If you want to modify the scopes
+ or the subject used for delegation, use :meth:`with_scopes` or
+ :meth:`with_subject`::
+
+ scoped_credentials = credentials.with_scopes(['email'])
+ delegated_credentials = credentials.with_subject(subject)
+
+ """
+
+ @_helpers.copy_docstring(credentials_async.Credentials)
+ async def refresh(self, request):
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = await _client_async.id_token_jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
diff --git a/contrib/python/google-auth/py3/google/oauth2/challenges.py b/contrib/python/google-auth/py3/google/oauth2/challenges.py
new file mode 100644
index 0000000000..c55796323b
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/challenges.py
@@ -0,0 +1,203 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Challenges for reauthentication.
+"""
+
+import abc
+import base64
+import getpass
+import sys
+
+from google.auth import _helpers
+from google.auth import exceptions
+
+
+REAUTH_ORIGIN = "https://accounts.google.com"
+SAML_CHALLENGE_MESSAGE = (
+ "Please run `gcloud auth login` to complete reauthentication with SAML."
+)
+
+
+def get_user_password(text):
+ """Get password from user.
+
+ Override this function with a different logic if you are using this library
+ outside a CLI.
+
+ Args:
+ text (str): message for the password prompt.
+
+ Returns:
+ str: password string.
+ """
+ return getpass.getpass(text)
+
+
+class ReauthChallenge(metaclass=abc.ABCMeta):
+ """Base class for reauth challenges."""
+
+ @property
+ @abc.abstractmethod
+ def name(self): # pragma: NO COVER
+ """Returns the name of the challenge."""
+ raise NotImplementedError("name property must be implemented")
+
+ @property
+ @abc.abstractmethod
+ def is_locally_eligible(self): # pragma: NO COVER
+ """Returns true if a challenge is supported locally on this machine."""
+ raise NotImplementedError("is_locally_eligible property must be implemented")
+
+ @abc.abstractmethod
+ def obtain_challenge_input(self, metadata): # pragma: NO COVER
+ """Performs logic required to obtain credentials and returns it.
+
+ Args:
+ metadata (Mapping): challenge metadata returned in the 'challenges' field in
+ the initial reauth request. Includes the 'challengeType' field
+ and other challenge-specific fields.
+
+ Returns:
+ response that will be send to the reauth service as the content of
+ the 'proposalResponse' field in the request body. Usually a dict
+ with the keys specific to the challenge. For example,
+ ``{'credential': password}`` for password challenge.
+ """
+ raise NotImplementedError("obtain_challenge_input method must be implemented")
+
+
+class PasswordChallenge(ReauthChallenge):
+ """Challenge that asks for user's password."""
+
+ @property
+ def name(self):
+ return "PASSWORD"
+
+ @property
+ def is_locally_eligible(self):
+ return True
+
+ @_helpers.copy_docstring(ReauthChallenge)
+ def obtain_challenge_input(self, unused_metadata):
+ passwd = get_user_password("Please enter your password:")
+ if not passwd:
+ passwd = " " # avoid the server crashing in case of no password :D
+ return {"credential": passwd}
+
+
+class SecurityKeyChallenge(ReauthChallenge):
+ """Challenge that asks for user's security key touch."""
+
+ @property
+ def name(self):
+ return "SECURITY_KEY"
+
+ @property
+ def is_locally_eligible(self):
+ return True
+
+ @_helpers.copy_docstring(ReauthChallenge)
+ def obtain_challenge_input(self, metadata):
+ try:
+ import pyu2f.convenience.authenticator # type: ignore
+ import pyu2f.errors # type: ignore
+ import pyu2f.model # type: ignore
+ except ImportError:
+ raise exceptions.ReauthFailError(
+ "pyu2f dependency is required to use Security key reauth feature. "
+ "It can be installed via `pip install pyu2f` or `pip install google-auth[reauth]`."
+ )
+ sk = metadata["securityKey"]
+ challenges = sk["challenges"]
+ # Read both 'applicationId' and 'relyingPartyId', if they are the same, use
+ # applicationId, if they are different, use relyingPartyId first and retry
+ # with applicationId
+ application_id = sk["applicationId"]
+ relying_party_id = sk["relyingPartyId"]
+
+ if application_id != relying_party_id:
+ application_parameters = [relying_party_id, application_id]
+ else:
+ application_parameters = [application_id]
+
+ challenge_data = []
+ for c in challenges:
+ kh = c["keyHandle"].encode("ascii")
+ key = pyu2f.model.RegisteredKey(bytearray(base64.urlsafe_b64decode(kh)))
+ challenge = c["challenge"].encode("ascii")
+ challenge = base64.urlsafe_b64decode(challenge)
+ challenge_data.append({"key": key, "challenge": challenge})
+
+ # Track number of tries to suppress error message until all application_parameters
+ # are tried.
+ tries = 0
+ for app_id in application_parameters:
+ try:
+ tries += 1
+ api = pyu2f.convenience.authenticator.CreateCompositeAuthenticator(
+ REAUTH_ORIGIN
+ )
+ response = api.Authenticate(
+ app_id, challenge_data, print_callback=sys.stderr.write
+ )
+ return {"securityKey": response}
+ except pyu2f.errors.U2FError as e:
+ if e.code == pyu2f.errors.U2FError.DEVICE_INELIGIBLE:
+ # Only show error if all app_ids have been tried
+ if tries == len(application_parameters):
+ sys.stderr.write("Ineligible security key.\n")
+ return None
+ continue
+ if e.code == pyu2f.errors.U2FError.TIMEOUT:
+ sys.stderr.write(
+ "Timed out while waiting for security key touch.\n"
+ )
+ else:
+ raise e
+ except pyu2f.errors.PluginError as e:
+ sys.stderr.write("Plugin error: {}.\n".format(e))
+ continue
+ except pyu2f.errors.NoDeviceFoundError:
+ sys.stderr.write("No security key found.\n")
+ return None
+
+
+class SamlChallenge(ReauthChallenge):
+ """Challenge that asks the users to browse to their ID Providers.
+
+ Currently SAML challenge is not supported. When obtaining the challenge
+ input, exception will be raised to instruct the users to run
+ `gcloud auth login` for reauthentication.
+ """
+
+ @property
+ def name(self):
+ return "SAML"
+
+ @property
+ def is_locally_eligible(self):
+ return True
+
+ def obtain_challenge_input(self, metadata):
+ # Magic Arch has not fully supported returning a proper dedirect URL
+ # for programmatic SAML users today. So we error our here and request
+ # users to use gcloud to complete a login.
+ raise exceptions.ReauthSamlChallengeFailError(SAML_CHALLENGE_MESSAGE)
+
+
+AVAILABLE_CHALLENGES = {
+ challenge.name: challenge
+ for challenge in [SecurityKeyChallenge(), PasswordChallenge(), SamlChallenge()]
+}
diff --git a/contrib/python/google-auth/py3/google/oauth2/credentials.py b/contrib/python/google-auth/py3/google/oauth2/credentials.py
new file mode 100644
index 0000000000..4643fdbea6
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/credentials.py
@@ -0,0 +1,545 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Credentials.
+
+This module provides credentials based on OAuth 2.0 access and refresh tokens.
+These credentials usually access resources on behalf of a user (resource
+owner).
+
+Specifically, this is intended to use access tokens acquired using the
+`Authorization Code grant`_ and can refresh those tokens using a
+optional `refresh token`_.
+
+Obtaining the initial access and refresh token is outside of the scope of this
+module. Consult `rfc6749 section 4.1`_ for complete details on the
+Authorization Code grant flow.
+
+.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
+.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
+.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
+"""
+
+from datetime import datetime
+import io
+import json
+import logging
+import warnings
+
+from google.auth import _cloud_sdk
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import metrics
+from google.oauth2 import reauth
+
+_LOGGER = logging.getLogger(__name__)
+
+
+# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.
+_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token"
+
+
+class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaProject):
+ """Credentials using OAuth 2.0 access and refresh tokens.
+
+ The credentials are considered immutable. If you want to modify the
+ quota project, use :meth:`with_quota_project` or ::
+
+ credentials = credentials.with_quota_project('myproject-123')
+
+ Reauth is disabled by default. To enable reauth, set the
+ `enable_reauth_refresh` parameter to True in the constructor. Note that
+ reauth feature is intended for gcloud to use only.
+ If reauth is enabled, `pyu2f` dependency has to be installed in order to use security
+ key reauth feature. Dependency can be installed via `pip install pyu2f` or `pip install
+ google-auth[reauth]`.
+ """
+
+ def __init__(
+ self,
+ token,
+ refresh_token=None,
+ id_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ scopes=None,
+ default_scopes=None,
+ quota_project_id=None,
+ expiry=None,
+ rapt_token=None,
+ refresh_handler=None,
+ enable_reauth_refresh=False,
+ granted_scopes=None,
+ trust_boundary=None,
+ ):
+ """
+ Args:
+ token (Optional(str)): The OAuth 2.0 access token. Can be None
+ if refresh information is provided.
+ refresh_token (str): The OAuth 2.0 refresh token. If specified,
+ credentials can be refreshed.
+ id_token (str): The Open ID Connect ID Token.
+ token_uri (str): The OAuth 2.0 authorization server's token
+ endpoint URI. Must be specified for refresh, can be left as
+ None if the token can not be refreshed.
+ client_id (str): The OAuth 2.0 client ID. Must be specified for
+ refresh, can be left as None if the token can not be refreshed.
+ client_secret(str): The OAuth 2.0 client secret. Must be specified
+ for refresh, can be left as None if the token can not be
+ refreshed.
+ scopes (Sequence[str]): The scopes used to obtain authorization.
+ This parameter is used by :meth:`has_scopes`. OAuth 2.0
+ credentials can not request additional scopes after
+ authorization. The scopes must be derivable from the refresh
+ token if refresh information is provided (e.g. The refresh
+ token scopes are a superset of this or contain a wild card
+ scope like 'https://www.googleapis.com/auth/any-api').
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ quota_project_id (Optional[str]): The project ID used for quota and billing.
+ This project may be different from the project used to
+ create the credentials.
+ rapt_token (Optional[str]): The reauth Proof Token.
+ refresh_handler (Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]):
+ A callable which takes in the HTTP request callable and the list of
+ OAuth scopes and when called returns an access token string for the
+ requested scopes and its expiry datetime. This is useful when no
+ refresh tokens are provided and tokens are obtained by calling
+ some external process on demand. It is particularly useful for
+ retrieving downscoped tokens from a token broker.
+ enable_reauth_refresh (Optional[bool]): Whether reauth refresh flow
+ should be used. This flag is for gcloud to use only.
+ granted_scopes (Optional[Sequence[str]]): The scopes that were consented/granted by the user.
+ This could be different from the requested scopes and it could be empty if granted
+ and requested scopes were same.
+ """
+ super(Credentials, self).__init__()
+ self.token = token
+ self.expiry = expiry
+ self._refresh_token = refresh_token
+ self._id_token = id_token
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+ self._granted_scopes = granted_scopes
+ self._token_uri = token_uri
+ self._client_id = client_id
+ self._client_secret = client_secret
+ self._quota_project_id = quota_project_id
+ self._rapt_token = rapt_token
+ self.refresh_handler = refresh_handler
+ self._enable_reauth_refresh = enable_reauth_refresh
+ self._trust_boundary = trust_boundary
+
+ def __getstate__(self):
+ """A __getstate__ method must exist for the __setstate__ to be called
+ This is identical to the default implementation.
+ See https://docs.python.org/3.7/library/pickle.html#object.__setstate__
+ """
+ state_dict = self.__dict__.copy()
+ # Remove _refresh_handler function as there are limitations pickling and
+ # unpickling certain callables (lambda, functools.partial instances)
+ # because they need to be importable.
+ # Instead, the refresh_handler setter should be used to repopulate this.
+ del state_dict["_refresh_handler"]
+ return state_dict
+
+ def __setstate__(self, d):
+ """Credentials pickled with older versions of the class do not have
+ all the attributes."""
+ self.token = d.get("token")
+ self.expiry = d.get("expiry")
+ self._refresh_token = d.get("_refresh_token")
+ self._id_token = d.get("_id_token")
+ self._scopes = d.get("_scopes")
+ self._default_scopes = d.get("_default_scopes")
+ self._granted_scopes = d.get("_granted_scopes")
+ self._token_uri = d.get("_token_uri")
+ self._client_id = d.get("_client_id")
+ self._client_secret = d.get("_client_secret")
+ self._quota_project_id = d.get("_quota_project_id")
+ self._rapt_token = d.get("_rapt_token")
+ self._enable_reauth_refresh = d.get("_enable_reauth_refresh")
+ self._trust_boundary = d.get("_trust_boundary")
+ self._universe_domain = d.get("_universe_domain")
+ # The refresh_handler setter should be used to repopulate this.
+ self._refresh_handler = None
+
+ @property
+ def refresh_token(self):
+ """Optional[str]: The OAuth 2.0 refresh token."""
+ return self._refresh_token
+
+ @property
+ def scopes(self):
+ """Optional[str]: The OAuth 2.0 permission scopes."""
+ return self._scopes
+
+ @property
+ def granted_scopes(self):
+ """Optional[Sequence[str]]: The OAuth 2.0 permission scopes that were granted by the user."""
+ return self._granted_scopes
+
+ @property
+ def token_uri(self):
+ """Optional[str]: The OAuth 2.0 authorization server's token endpoint
+ URI."""
+ return self._token_uri
+
+ @property
+ def id_token(self):
+ """Optional[str]: The Open ID Connect ID Token.
+
+ Depending on the authorization server and the scopes requested, this
+ may be populated when credentials are obtained and updated when
+ :meth:`refresh` is called. This token is a JWT. It can be verified
+ and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`.
+ """
+ return self._id_token
+
+ @property
+ def client_id(self):
+ """Optional[str]: The OAuth 2.0 client ID."""
+ return self._client_id
+
+ @property
+ def client_secret(self):
+ """Optional[str]: The OAuth 2.0 client secret."""
+ return self._client_secret
+
+ @property
+ def requires_scopes(self):
+ """False: OAuth 2.0 credentials have their scopes set when
+ the initial token is requested and can not be changed."""
+ return False
+
+ @property
+ def rapt_token(self):
+ """Optional[str]: The reauth Proof Token."""
+ return self._rapt_token
+
+ @property
+ def refresh_handler(self):
+ """Returns the refresh handler if available.
+
+ Returns:
+ Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]:
+ The current refresh handler.
+ """
+ return self._refresh_handler
+
+ @refresh_handler.setter
+ def refresh_handler(self, value):
+ """Updates the current refresh handler.
+
+ Args:
+ value (Optional[Callable[[google.auth.transport.Request, Sequence[str]], [str, datetime]]]):
+ The updated value of the refresh handler.
+
+ Raises:
+ TypeError: If the value is not a callable or None.
+ """
+ if not callable(value) and value is not None:
+ raise TypeError("The provided refresh_handler is not a callable or None.")
+ self._refresh_handler = value
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+
+ return self.__class__(
+ self.token,
+ refresh_token=self.refresh_token,
+ id_token=self.id_token,
+ token_uri=self.token_uri,
+ client_id=self.client_id,
+ client_secret=self.client_secret,
+ scopes=self.scopes,
+ default_scopes=self.default_scopes,
+ granted_scopes=self.granted_scopes,
+ quota_project_id=quota_project_id,
+ rapt_token=self.rapt_token,
+ enable_reauth_refresh=self._enable_reauth_refresh,
+ trust_boundary=self._trust_boundary,
+ )
+
+ @_helpers.copy_docstring(credentials.CredentialsWithTokenUri)
+ def with_token_uri(self, token_uri):
+
+ return self.__class__(
+ self.token,
+ refresh_token=self.refresh_token,
+ id_token=self.id_token,
+ token_uri=token_uri,
+ client_id=self.client_id,
+ client_secret=self.client_secret,
+ scopes=self.scopes,
+ default_scopes=self.default_scopes,
+ granted_scopes=self.granted_scopes,
+ quota_project_id=self.quota_project_id,
+ rapt_token=self.rapt_token,
+ enable_reauth_refresh=self._enable_reauth_refresh,
+ trust_boundary=self._trust_boundary,
+ )
+
+ def _metric_header_for_usage(self):
+ return metrics.CRED_TYPE_USER
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ scopes = self._scopes if self._scopes is not None else self._default_scopes
+ # Use refresh handler if available and no refresh token is
+ # available. This is useful in general when tokens are obtained by calling
+ # some external process on demand. It is particularly useful for retrieving
+ # downscoped tokens from a token broker.
+ if self._refresh_token is None and self.refresh_handler:
+ token, expiry = self.refresh_handler(request, scopes=scopes)
+ # Validate returned data.
+ if not isinstance(token, str):
+ raise exceptions.RefreshError(
+ "The refresh_handler returned token is not a string."
+ )
+ if not isinstance(expiry, datetime):
+ raise exceptions.RefreshError(
+ "The refresh_handler returned expiry is not a datetime object."
+ )
+ if _helpers.utcnow() >= expiry - _helpers.REFRESH_THRESHOLD:
+ raise exceptions.RefreshError(
+ "The credentials returned by the refresh_handler are "
+ "already expired."
+ )
+ self.token = token
+ self.expiry = expiry
+ return
+
+ if (
+ self._refresh_token is None
+ or self._token_uri is None
+ or self._client_id is None
+ or self._client_secret is None
+ ):
+ raise exceptions.RefreshError(
+ "The credentials do not contain the necessary fields need to "
+ "refresh the access token. You must specify refresh_token, "
+ "token_uri, client_id, and client_secret."
+ )
+
+ (
+ access_token,
+ refresh_token,
+ expiry,
+ grant_response,
+ rapt_token,
+ ) = reauth.refresh_grant(
+ request,
+ self._token_uri,
+ self._refresh_token,
+ self._client_id,
+ self._client_secret,
+ scopes=scopes,
+ rapt_token=self._rapt_token,
+ enable_reauth_refresh=self._enable_reauth_refresh,
+ )
+
+ self.token = access_token
+ self.expiry = expiry
+ self._refresh_token = refresh_token
+ self._id_token = grant_response.get("id_token")
+ self._rapt_token = rapt_token
+
+ if scopes and "scope" in grant_response:
+ requested_scopes = frozenset(scopes)
+ self._granted_scopes = grant_response["scope"].split()
+ granted_scopes = frozenset(self._granted_scopes)
+ scopes_requested_but_not_granted = requested_scopes - granted_scopes
+ if scopes_requested_but_not_granted:
+ # User might be presented with unbundled scopes at the time of
+ # consent. So it is a valid scenario to not have all the requested
+ # scopes as part of granted scopes but log a warning in case the
+ # developer wants to debug the scenario.
+ _LOGGER.warning(
+ "Not all requested scopes were granted by the "
+ "authorization server, missing scopes {}.".format(
+ ", ".join(scopes_requested_but_not_granted)
+ )
+ )
+
+ @classmethod
+ def from_authorized_user_info(cls, info, scopes=None):
+ """Creates a Credentials instance from parsed authorized user info.
+
+ Args:
+ info (Mapping[str, str]): The authorized user info in Google
+ format.
+ scopes (Sequence[str]): Optional list of scopes to include in the
+ credentials.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ keys_needed = set(("refresh_token", "client_id", "client_secret"))
+ missing = keys_needed.difference(info.keys())
+
+ if missing:
+ raise ValueError(
+ "Authorized user info was not in the expected format, missing "
+ "fields {}.".format(", ".join(missing))
+ )
+
+ # access token expiry (datetime obj); auto-expire if not saved
+ expiry = info.get("expiry")
+ if expiry:
+ expiry = datetime.strptime(
+ expiry.rstrip("Z").split(".")[0], "%Y-%m-%dT%H:%M:%S"
+ )
+ else:
+ expiry = _helpers.utcnow() - _helpers.REFRESH_THRESHOLD
+
+ # process scopes, which needs to be a seq
+ if scopes is None and "scopes" in info:
+ scopes = info.get("scopes")
+ if isinstance(scopes, str):
+ scopes = scopes.split(" ")
+
+ return cls(
+ token=info.get("token"),
+ refresh_token=info.get("refresh_token"),
+ token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, # always overrides
+ scopes=scopes,
+ client_id=info.get("client_id"),
+ client_secret=info.get("client_secret"),
+ quota_project_id=info.get("quota_project_id"), # may not exist
+ expiry=expiry,
+ rapt_token=info.get("rapt_token"), # may not exist
+ trust_boundary=info.get("trust_boundary"), # may not exist
+ )
+
+ @classmethod
+ def from_authorized_user_file(cls, filename, scopes=None):
+ """Creates a Credentials instance from an authorized user json file.
+
+ Args:
+ filename (str): The path to the authorized user json file.
+ scopes (Sequence[str]): Optional list of scopes to include in the
+ credentials.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the file is not in the expected format.
+ """
+ with io.open(filename, "r", encoding="utf-8") as json_file:
+ data = json.load(json_file)
+ return cls.from_authorized_user_info(data, scopes)
+
+ def to_json(self, strip=None):
+ """Utility function that creates a JSON representation of a Credentials
+ object.
+
+ Args:
+ strip (Sequence[str]): Optional list of members to exclude from the
+ generated JSON.
+
+ Returns:
+ str: A JSON representation of this instance. When converted into
+ a dictionary, it can be passed to from_authorized_user_info()
+ to create a new credential instance.
+ """
+ prep = {
+ "token": self.token,
+ "refresh_token": self.refresh_token,
+ "token_uri": self.token_uri,
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "scopes": self.scopes,
+ "rapt_token": self.rapt_token,
+ }
+ if self.expiry: # flatten expiry timestamp
+ prep["expiry"] = self.expiry.isoformat() + "Z"
+
+ # Remove empty entries (those which are None)
+ prep = {k: v for k, v in prep.items() if v is not None}
+
+ # Remove entries that explicitely need to be removed
+ if strip is not None:
+ prep = {k: v for k, v in prep.items() if k not in strip}
+
+ return json.dumps(prep)
+
+
+class UserAccessTokenCredentials(credentials.CredentialsWithQuotaProject):
+ """Access token credentials for user account.
+
+ Obtain the access token for a given user account or the current active
+ user account with the ``gcloud auth print-access-token`` command.
+
+ Args:
+ account (Optional[str]): Account to get the access token for. If not
+ specified, the current active account will be used.
+ quota_project_id (Optional[str]): The project ID used for quota
+ and billing.
+ """
+
+ def __init__(self, account=None, quota_project_id=None):
+ warnings.warn(
+ "UserAccessTokenCredentials is deprecated, please use "
+ "google.oauth2.credentials.Credentials instead. To use "
+ "that credential type, simply run "
+ "`gcloud auth application-default login` and let the "
+ "client libraries pick up the application default credentials."
+ )
+ super(UserAccessTokenCredentials, self).__init__()
+ self._account = account
+ self._quota_project_id = quota_project_id
+
+ def with_account(self, account):
+ """Create a new instance with the given account.
+
+ Args:
+ account (str): Account to get the access token for.
+
+ Returns:
+ google.oauth2.credentials.UserAccessTokenCredentials: The created
+ credentials with the given account.
+ """
+ return self.__class__(account=account, quota_project_id=self._quota_project_id)
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ return self.__class__(account=self._account, quota_project_id=quota_project_id)
+
+ def refresh(self, request):
+ """Refreshes the access token.
+
+ Args:
+ request (google.auth.transport.Request): This argument is required
+ by the base class interface but not used in this implementation,
+ so just set it to `None`.
+
+ Raises:
+ google.auth.exceptions.UserAccessTokenError: If the access token
+ refresh failed.
+ """
+ self.token = _cloud_sdk.get_auth_access_token(self._account)
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def before_request(self, request, method, url, headers):
+ self.refresh(request)
+ self.apply(headers)
diff --git a/contrib/python/google-auth/py3/google/oauth2/gdch_credentials.py b/contrib/python/google-auth/py3/google/oauth2/gdch_credentials.py
new file mode 100644
index 0000000000..7410cfc2e0
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/gdch_credentials.py
@@ -0,0 +1,251 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Experimental GDCH credentials support.
+"""
+
+import datetime
+
+from google.auth import _helpers
+from google.auth import _service_account_info
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import jwt
+from google.oauth2 import _client
+
+
+TOKEN_EXCHANGE_TYPE = "urn:ietf:params:oauth:token-type:token-exchange"
+ACCESS_TOKEN_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+SERVICE_ACCOUNT_TOKEN_TYPE = "urn:k8s:params:oauth:token-type:serviceaccount"
+JWT_LIFETIME = datetime.timedelta(seconds=3600) # 1 hour
+
+
+class ServiceAccountCredentials(credentials.Credentials):
+ """Credentials for GDCH (`Google Distributed Cloud Hosted`_) for service
+ account users.
+
+ .. _Google Distributed Cloud Hosted:
+ https://cloud.google.com/blog/topics/hybrid-cloud/\
+ announcing-google-distributed-cloud-edge-and-hosted
+
+ To create a GDCH service account credential, first create a JSON file of
+ the following format::
+
+ {
+ "type": "gdch_service_account",
+ "format_version": "1",
+ "project": "<project name>",
+ "private_key_id": "<key id>",
+ "private_key": "-----BEGIN EC PRIVATE KEY-----\n<key bytes>\n-----END EC PRIVATE KEY-----\n",
+ "name": "<service identity name>",
+ "ca_cert_path": "<CA cert path>",
+ "token_uri": "https://service-identity.<Domain>/authenticate"
+ }
+
+ The "format_version" field stands for the format of the JSON file. For now
+ it is always "1". The `private_key_id` and `private_key` is used for signing.
+ The `ca_cert_path` is used for token server TLS certificate verification.
+
+ After the JSON file is created, set `GOOGLE_APPLICATION_CREDENTIALS` environment
+ variable to the JSON file path, then use the following code to create the
+ credential::
+
+ import google.auth
+
+ credential, _ = google.auth.default()
+ credential = credential.with_gdch_audience("<the audience>")
+
+ We can also create the credential directly::
+
+ from google.oauth import gdch_credentials
+
+ credential = gdch_credentials.ServiceAccountCredentials.from_service_account_file("<the json file path>")
+ credential = credential.with_gdch_audience("<the audience>")
+
+ The token is obtained in the following way. This class first creates a
+ self signed JWT. It uses the `name` value as the `iss` and `sub` claim, and
+ the `token_uri` as the `aud` claim, and signs the JWT with the `private_key`.
+ It then sends the JWT to the `token_uri` to exchange a final token for
+ `audience`.
+ """
+
+ def __init__(
+ self, signer, service_identity_name, project, audience, token_uri, ca_cert_path
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ service_identity_name (str): The service identity name. It will be
+ used as the `iss` and `sub` claim in the self signed JWT.
+ project (str): The project.
+ audience (str): The audience for the final token.
+ token_uri (str): The token server uri.
+ ca_cert_path (str): The CA cert path for token server side TLS
+ certificate verification. If the token server uses well known
+ CA, then this parameter can be `None`.
+ """
+ super(ServiceAccountCredentials, self).__init__()
+ self._signer = signer
+ self._service_identity_name = service_identity_name
+ self._project = project
+ self._audience = audience
+ self._token_uri = token_uri
+ self._ca_cert_path = ca_cert_path
+
+ def _create_jwt(self):
+ now = _helpers.utcnow()
+ expiry = now + JWT_LIFETIME
+ iss_sub_value = "system:serviceaccount:{}:{}".format(
+ self._project, self._service_identity_name
+ )
+
+ payload = {
+ "iss": iss_sub_value,
+ "sub": iss_sub_value,
+ "aud": self._token_uri,
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ }
+
+ return _helpers.from_bytes(jwt.encode(self._signer, payload))
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ import google.auth.transport.requests
+
+ if not isinstance(request, google.auth.transport.requests.Request):
+ raise exceptions.RefreshError(
+ "For GDCH service account credentials, request must be a google.auth.transport.requests.Request object"
+ )
+
+ # Create a self signed JWT, and do token exchange.
+ jwt_token = self._create_jwt()
+ request_body = {
+ "grant_type": TOKEN_EXCHANGE_TYPE,
+ "audience": self._audience,
+ "requested_token_type": ACCESS_TOKEN_TOKEN_TYPE,
+ "subject_token": jwt_token,
+ "subject_token_type": SERVICE_ACCOUNT_TOKEN_TYPE,
+ }
+ response_data = _client._token_endpoint_request(
+ request,
+ self._token_uri,
+ request_body,
+ access_token=None,
+ use_json=True,
+ verify=self._ca_cert_path,
+ )
+
+ self.token, _, self.expiry, _ = _client._handle_refresh_grant_response(
+ response_data, None
+ )
+
+ def with_gdch_audience(self, audience):
+ """Create a copy of GDCH credentials with the specified audience.
+
+ Args:
+ audience (str): The intended audience for GDCH credentials.
+ """
+ return self.__class__(
+ self._signer,
+ self._service_identity_name,
+ self._project,
+ audience,
+ self._token_uri,
+ self._ca_cert_path,
+ )
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info):
+ """Creates a Credentials instance from a signer and service account
+ info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+
+ Returns:
+ google.oauth2.gdch_credentials.ServiceAccountCredentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ if info["format_version"] != "1":
+ raise ValueError("Only format version 1 is supported")
+
+ return cls(
+ signer,
+ info["name"], # service_identity_name
+ info["project"],
+ None, # audience
+ info["token_uri"],
+ info.get("ca_cert_path", None),
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info):
+ """Creates a Credentials instance from parsed service account info.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.oauth2.gdch_credentials.ServiceAccountCredentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(
+ info,
+ require=[
+ "format_version",
+ "private_key_id",
+ "private_key",
+ "name",
+ "project",
+ "token_uri",
+ ],
+ use_rsa_signer=False,
+ )
+ return cls._from_signer_and_info(signer, info)
+
+ @classmethod
+ def from_service_account_file(cls, filename):
+ """Creates a Credentials instance from a service account json file.
+
+ Args:
+ filename (str): The path to the service account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.oauth2.gdch_credentials.ServiceAccountCredentials: The constructed
+ credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename,
+ require=[
+ "format_version",
+ "private_key_id",
+ "private_key",
+ "name",
+ "project",
+ "token_uri",
+ ],
+ use_rsa_signer=False,
+ )
+ return cls._from_signer_and_info(signer, info)
diff --git a/contrib/python/google-auth/py3/google/oauth2/id_token.py b/contrib/python/google-auth/py3/google/oauth2/id_token.py
new file mode 100644
index 0000000000..2b1abec2b4
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/id_token.py
@@ -0,0 +1,339 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google ID Token helpers.
+
+Provides support for verifying `OpenID Connect ID Tokens`_, especially ones
+generated by Google infrastructure.
+
+To parse and verify an ID Token issued by Google's OAuth 2.0 authorization
+server use :func:`verify_oauth2_token`. To verify an ID Token issued by
+Firebase, use :func:`verify_firebase_token`.
+
+A general purpose ID Token verifier is available as :func:`verify_token`.
+
+Example::
+
+ from google.oauth2 import id_token
+ from google.auth.transport import requests
+
+ request = requests.Request()
+
+ id_info = id_token.verify_oauth2_token(
+ token, request, 'my-client-id.example.com')
+
+ userid = id_info['sub']
+
+By default, this will re-fetch certificates for each verification. Because
+Google's public keys are only changed infrequently (on the order of once per
+day), you may wish to take advantage of caching to reduce latency and the
+potential for network errors. This can be accomplished using an external
+library like `CacheControl`_ to create a cache-aware
+:class:`google.auth.transport.Request`::
+
+ import cachecontrol
+ import google.auth.transport.requests
+ import requests
+
+ session = requests.session()
+ cached_session = cachecontrol.CacheControl(session)
+ request = google.auth.transport.requests.Request(session=cached_session)
+
+.. _OpenID Connect ID Tokens:
+ http://openid.net/specs/openid-connect-core-1_0.html#IDToken
+.. _CacheControl: https://cachecontrol.readthedocs.io
+"""
+
+import http.client as http_client
+import json
+import os
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import jwt
+import google.auth.transport.requests
+
+
+# The URL that provides public certificates for verifying ID tokens issued
+# by Google's OAuth 2.0 authorization server.
+_GOOGLE_OAUTH2_CERTS_URL = "https://www.googleapis.com/oauth2/v1/certs"
+
+# The URL that provides public certificates for verifying ID tokens issued
+# by Firebase and the Google APIs infrastructure
+_GOOGLE_APIS_CERTS_URL = (
+ "https://www.googleapis.com/robot/v1/metadata/x509"
+ "/securetoken@system.gserviceaccount.com"
+)
+
+_GOOGLE_ISSUERS = ["accounts.google.com", "https://accounts.google.com"]
+
+
+def _fetch_certs(request, certs_url):
+ """Fetches certificates.
+
+ Google-style cerificate endpoints return JSON in the format of
+ ``{'key id': 'x509 certificate'}``.
+
+ Args:
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ certs_url (str): The certificate endpoint URL.
+
+ Returns:
+ Mapping[str, str]: A mapping of public key ID to x.509 certificate
+ data.
+ """
+ response = request(certs_url, method="GET")
+
+ if response.status != http_client.OK:
+ raise exceptions.TransportError(
+ "Could not fetch certificates at {}".format(certs_url)
+ )
+
+ return json.loads(response.data.decode("utf-8"))
+
+
+def verify_token(
+ id_token,
+ request,
+ audience=None,
+ certs_url=_GOOGLE_OAUTH2_CERTS_URL,
+ clock_skew_in_seconds=0,
+):
+ """Verifies an ID token and returns the decoded token.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ audience (str or list): The audience or audiences that this token is
+ intended for. If None then the audience is not verified.
+ certs_url (str): The URL that specifies the certificates to use to
+ verify the token. This URL should return JSON in the format of
+ ``{'key id': 'x509 certificate'}``.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+ """
+ certs = _fetch_certs(request, certs_url)
+
+ return jwt.decode(
+ id_token,
+ certs=certs,
+ audience=audience,
+ clock_skew_in_seconds=clock_skew_in_seconds,
+ )
+
+
+def verify_oauth2_token(id_token, request, audience=None, clock_skew_in_seconds=0):
+ """Verifies an ID Token issued by Google's OAuth 2.0 authorization server.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ audience (str): The audience that this token is intended for. This is
+ typically your application's OAuth 2.0 client ID. If None then the
+ audience is not verified.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+
+ Raises:
+ exceptions.GoogleAuthError: If the issuer is invalid.
+ ValueError: If token verification fails
+ """
+ idinfo = verify_token(
+ id_token,
+ request,
+ audience=audience,
+ certs_url=_GOOGLE_OAUTH2_CERTS_URL,
+ clock_skew_in_seconds=clock_skew_in_seconds,
+ )
+
+ if idinfo["iss"] not in _GOOGLE_ISSUERS:
+ raise exceptions.GoogleAuthError(
+ "Wrong issuer. 'iss' should be one of the following: {}".format(
+ _GOOGLE_ISSUERS
+ )
+ )
+
+ return idinfo
+
+
+def verify_firebase_token(id_token, request, audience=None, clock_skew_in_seconds=0):
+ """Verifies an ID Token issued by Firebase Authentication.
+
+ Args:
+ id_token (Union[str, bytes]): The encoded token.
+ request (google.auth.transport.Request): The object used to make
+ HTTP requests.
+ audience (str): The audience that this token is intended for. This is
+ typically your Firebase application ID. If None then the audience
+ is not verified.
+ clock_skew_in_seconds (int): The clock skew used for `iat` and `exp`
+ validation.
+
+ Returns:
+ Mapping[str, Any]: The decoded token.
+ """
+ return verify_token(
+ id_token,
+ request,
+ audience=audience,
+ certs_url=_GOOGLE_APIS_CERTS_URL,
+ clock_skew_in_seconds=clock_skew_in_seconds,
+ )
+
+
+def fetch_id_token_credentials(audience, request=None):
+ """Create the ID Token credentials from the current environment.
+
+ This function acquires ID token from the environment in the following order.
+ See https://google.aip.dev/auth/4110.
+
+ 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
+ to the path of a valid service account JSON file, then ID token is
+ acquired using this service account credentials.
+ 2. If the application is running in Compute Engine, App Engine or Cloud Run,
+ then the ID token are obtained from the metadata server.
+ 3. If metadata server doesn't exist and no valid service account credentials
+ are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will
+ be raised.
+
+ Example::
+
+ import google.oauth2.id_token
+ import google.auth.transport.requests
+
+ request = google.auth.transport.requests.Request()
+ target_audience = "https://pubsub.googleapis.com"
+
+ # Create ID token credentials.
+ credentials = google.oauth2.id_token.fetch_id_token_credentials(target_audience, request=request)
+
+ # Refresh the credential to obtain an ID token.
+ credentials.refresh(request)
+
+ id_token = credentials.token
+ id_token_expiry = credentials.expiry
+
+ Args:
+ audience (str): The audience that this ID token is intended for.
+ request (Optional[google.auth.transport.Request]): A callable used to make
+ HTTP requests. A request object will be created if not provided.
+
+ Returns:
+ google.auth.credentials.Credentials: The ID token credentials.
+
+ Raises:
+ ~google.auth.exceptions.DefaultCredentialsError:
+ If metadata server doesn't exist and no valid service account
+ credentials are found.
+ """
+ # 1. Try to get credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
+ # variable.
+ credentials_filename = os.environ.get(environment_vars.CREDENTIALS)
+ if credentials_filename:
+ if not (
+ os.path.exists(credentials_filename)
+ and os.path.isfile(credentials_filename)
+ ):
+ raise exceptions.DefaultCredentialsError(
+ "GOOGLE_APPLICATION_CREDENTIALS path is either not found or invalid."
+ )
+
+ try:
+ with open(credentials_filename, "r") as f:
+ from google.oauth2 import service_account
+
+ info = json.load(f)
+ if info.get("type") == "service_account":
+ return service_account.IDTokenCredentials.from_service_account_info(
+ info, target_audience=audience
+ )
+ except ValueError as caught_exc:
+ new_exc = exceptions.DefaultCredentialsError(
+ "GOOGLE_APPLICATION_CREDENTIALS is not valid service account credentials.",
+ caught_exc,
+ )
+ raise new_exc from caught_exc
+
+ # 2. Try to fetch ID token from metada server if it exists. The code
+ # works for GAE and Cloud Run metadata server as well.
+ try:
+ from google.auth import compute_engine
+ from google.auth.compute_engine import _metadata
+
+ # Create a request object if not provided.
+ if not request:
+ request = google.auth.transport.requests.Request()
+
+ if _metadata.ping(request):
+ return compute_engine.IDTokenCredentials(
+ request, audience, use_metadata_identity_endpoint=True
+ )
+ except (ImportError, exceptions.TransportError):
+ pass
+
+ raise exceptions.DefaultCredentialsError(
+ "Neither metadata server or valid service account credentials are found."
+ )
+
+
+def fetch_id_token(request, audience):
+ """Fetch the ID Token from the current environment.
+
+ This function acquires ID token from the environment in the following order.
+ See https://google.aip.dev/auth/4110.
+
+ 1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
+ to the path of a valid service account JSON file, then ID token is
+ acquired using this service account credentials.
+ 2. If the application is running in Compute Engine, App Engine or Cloud Run,
+ then the ID token are obtained from the metadata server.
+ 3. If metadata server doesn't exist and no valid service account credentials
+ are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will
+ be raised.
+
+ Example::
+
+ import google.oauth2.id_token
+ import google.auth.transport.requests
+
+ request = google.auth.transport.requests.Request()
+ target_audience = "https://pubsub.googleapis.com"
+
+ id_token = google.oauth2.id_token.fetch_id_token(request, target_audience)
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ audience (str): The audience that this ID token is intended for.
+
+ Returns:
+ str: The ID token.
+
+ Raises:
+ ~google.auth.exceptions.DefaultCredentialsError:
+ If metadata server doesn't exist and no valid service account
+ credentials are found.
+ """
+ id_token_credentials = fetch_id_token_credentials(audience, request=request)
+ id_token_credentials.refresh(request)
+ return id_token_credentials.token
diff --git a/contrib/python/google-auth/py3/google/oauth2/reauth.py b/contrib/python/google-auth/py3/google/oauth2/reauth.py
new file mode 100644
index 0000000000..5870347739
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/reauth.py
@@ -0,0 +1,368 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module that provides functions for handling rapt authentication.
+
+Reauth is a process of obtaining additional authentication (such as password,
+security token, etc.) while refreshing OAuth 2.0 credentials for a user.
+
+Credentials that use the Reauth flow must have the reauth scope,
+``https://www.googleapis.com/auth/accounts.reauth``.
+
+This module provides a high-level function for executing the Reauth process,
+:func:`refresh_grant`, and lower-level helpers for doing the individual
+steps of the reauth process.
+
+Those steps are:
+
+1. Obtaining a list of challenges from the reauth server.
+2. Running through each challenge and sending the result back to the reauth
+ server.
+3. Refreshing the access token using the returned rapt token.
+"""
+
+import sys
+
+from google.auth import exceptions
+from google.auth import metrics
+from google.oauth2 import _client
+from google.oauth2 import challenges
+
+
+_REAUTH_SCOPE = "https://www.googleapis.com/auth/accounts.reauth"
+_REAUTH_API = "https://reauth.googleapis.com/v2/sessions"
+
+_REAUTH_NEEDED_ERROR = "invalid_grant"
+_REAUTH_NEEDED_ERROR_INVALID_RAPT = "invalid_rapt"
+_REAUTH_NEEDED_ERROR_RAPT_REQUIRED = "rapt_required"
+
+_AUTHENTICATED = "AUTHENTICATED"
+_CHALLENGE_REQUIRED = "CHALLENGE_REQUIRED"
+_CHALLENGE_PENDING = "CHALLENGE_PENDING"
+
+
+# Override this global variable to set custom max number of rounds of reauth
+# challenges should be run.
+RUN_CHALLENGE_RETRY_LIMIT = 5
+
+
+def is_interactive():
+ """Check if we are in an interractive environment.
+
+ Override this function with a different logic if you are using this library
+ outside a CLI.
+
+ If the rapt token needs refreshing, the user needs to answer the challenges.
+ If the user is not in an interractive environment, the challenges can not
+ be answered and we just wait for timeout for no reason.
+
+ Returns:
+ bool: True if is interactive environment, False otherwise.
+ """
+
+ return sys.stdin.isatty()
+
+
+def _get_challenges(
+ request, supported_challenge_types, access_token, requested_scopes=None
+):
+ """Does initial request to reauth API to get the challenges.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ supported_challenge_types (Sequence[str]): list of challenge names
+ supported by the manager.
+ access_token (str): Access token with reauth scopes.
+ requested_scopes (Optional(Sequence[str])): Authorized scopes for the credentials.
+
+ Returns:
+ dict: The response from the reauth API.
+ """
+ body = {"supportedChallengeTypes": supported_challenge_types}
+ if requested_scopes:
+ body["oauthScopesForDomainPolicyLookup"] = requested_scopes
+ metrics_header = {metrics.API_CLIENT_HEADER: metrics.reauth_start()}
+
+ return _client._token_endpoint_request(
+ request,
+ _REAUTH_API + ":start",
+ body,
+ access_token=access_token,
+ use_json=True,
+ headers=metrics_header,
+ )
+
+
+def _send_challenge_result(
+ request, session_id, challenge_id, client_input, access_token
+):
+ """Attempt to refresh access token by sending next challenge result.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ session_id (str): session id returned by the initial reauth call.
+ challenge_id (str): challenge id returned by the initial reauth call.
+ client_input: dict with a challenge-specific client input. For example:
+ ``{'credential': password}`` for password challenge.
+ access_token (str): Access token with reauth scopes.
+
+ Returns:
+ dict: The response from the reauth API.
+ """
+ body = {
+ "sessionId": session_id,
+ "challengeId": challenge_id,
+ "action": "RESPOND",
+ "proposalResponse": client_input,
+ }
+ metrics_header = {metrics.API_CLIENT_HEADER: metrics.reauth_continue()}
+
+ return _client._token_endpoint_request(
+ request,
+ _REAUTH_API + "/{}:continue".format(session_id),
+ body,
+ access_token=access_token,
+ use_json=True,
+ headers=metrics_header,
+ )
+
+
+def _run_next_challenge(msg, request, access_token):
+ """Get the next challenge from msg and run it.
+
+ Args:
+ msg (dict): Reauth API response body (either from the initial request to
+ https://reauth.googleapis.com/v2/sessions:start or from sending the
+ previous challenge response to
+ https://reauth.googleapis.com/v2/sessions/id:continue)
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ access_token (str): reauth access token
+
+ Returns:
+ dict: The response from the reauth API.
+
+ Raises:
+ google.auth.exceptions.ReauthError: if reauth failed.
+ """
+ for challenge in msg["challenges"]:
+ if challenge["status"] != "READY":
+ # Skip non-activated challenges.
+ continue
+ c = challenges.AVAILABLE_CHALLENGES.get(challenge["challengeType"], None)
+ if not c:
+ raise exceptions.ReauthFailError(
+ "Unsupported challenge type {0}. Supported types: {1}".format(
+ challenge["challengeType"],
+ ",".join(list(challenges.AVAILABLE_CHALLENGES.keys())),
+ )
+ )
+ if not c.is_locally_eligible:
+ raise exceptions.ReauthFailError(
+ "Challenge {0} is not locally eligible".format(
+ challenge["challengeType"]
+ )
+ )
+ client_input = c.obtain_challenge_input(challenge)
+ if not client_input:
+ return None
+ return _send_challenge_result(
+ request,
+ msg["sessionId"],
+ challenge["challengeId"],
+ client_input,
+ access_token,
+ )
+ return None
+
+
+def _obtain_rapt(request, access_token, requested_scopes):
+ """Given an http request method and reauth access token, get rapt token.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ access_token (str): reauth access token
+ requested_scopes (Sequence[str]): scopes required by the client application
+
+ Returns:
+ str: The rapt token.
+
+ Raises:
+ google.auth.exceptions.ReauthError: if reauth failed
+ """
+ msg = _get_challenges(
+ request,
+ list(challenges.AVAILABLE_CHALLENGES.keys()),
+ access_token,
+ requested_scopes,
+ )
+
+ if msg["status"] == _AUTHENTICATED:
+ return msg["encodedProofOfReauthToken"]
+
+ for _ in range(0, RUN_CHALLENGE_RETRY_LIMIT):
+ if not (
+ msg["status"] == _CHALLENGE_REQUIRED or msg["status"] == _CHALLENGE_PENDING
+ ):
+ raise exceptions.ReauthFailError(
+ "Reauthentication challenge failed due to API error: {}".format(
+ msg["status"]
+ )
+ )
+
+ if not is_interactive():
+ raise exceptions.ReauthFailError(
+ "Reauthentication challenge could not be answered because you are not"
+ " in an interactive session."
+ )
+
+ msg = _run_next_challenge(msg, request, access_token)
+
+ if not msg:
+ raise exceptions.ReauthFailError("Failed to obtain rapt token.")
+ if msg["status"] == _AUTHENTICATED:
+ return msg["encodedProofOfReauthToken"]
+
+ # If we got here it means we didn't get authenticated.
+ raise exceptions.ReauthFailError("Failed to obtain rapt token.")
+
+
+def get_rapt_token(
+ request, client_id, client_secret, refresh_token, token_uri, scopes=None
+):
+ """Given an http request method and refresh_token, get rapt token.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ client_id (str): client id to get access token for reauth scope.
+ client_secret (str): client secret for the client_id
+ refresh_token (str): refresh token to refresh access token
+ token_uri (str): uri to refresh access token
+ scopes (Optional(Sequence[str])): scopes required by the client application
+
+ Returns:
+ str: The rapt token.
+ Raises:
+ google.auth.exceptions.RefreshError: If reauth failed.
+ """
+ sys.stderr.write("Reauthentication required.\n")
+
+ # Get access token for reauth.
+ access_token, _, _, _ = _client.refresh_grant(
+ request=request,
+ client_id=client_id,
+ client_secret=client_secret,
+ refresh_token=refresh_token,
+ token_uri=token_uri,
+ scopes=[_REAUTH_SCOPE],
+ )
+
+ # Get rapt token from reauth API.
+ rapt_token = _obtain_rapt(request, access_token, requested_scopes=scopes)
+
+ return rapt_token
+
+
+def refresh_grant(
+ request,
+ token_uri,
+ refresh_token,
+ client_id,
+ client_secret,
+ scopes=None,
+ rapt_token=None,
+ enable_reauth_refresh=False,
+):
+ """Implements the reauthentication flow.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ token_uri (str): The OAuth 2.0 authorizations server's token endpoint
+ URI.
+ refresh_token (str): The refresh token to use to get a new access
+ token.
+ client_id (str): The OAuth 2.0 application's client ID.
+ client_secret (str): The Oauth 2.0 appliaction's client secret.
+ scopes (Optional(Sequence[str])): Scopes to request. If present, all
+ scopes must be authorized for the refresh token. Useful if refresh
+ token has a wild card scope (e.g.
+ 'https://www.googleapis.com/auth/any-api').
+ rapt_token (Optional(str)): The rapt token for reauth.
+ enable_reauth_refresh (Optional[bool]): Whether reauth refresh flow
+ should be used. The default value is False. This option is for
+ gcloud only, other users should use the default value.
+
+ Returns:
+ Tuple[str, Optional[str], Optional[datetime], Mapping[str, str], str]: The
+ access token, new refresh token, expiration, the additional data
+ returned by the token endpoint, and the rapt token.
+
+ Raises:
+ google.auth.exceptions.RefreshError: If the token endpoint returned
+ an error.
+ """
+ body = {
+ "grant_type": _client._REFRESH_GRANT_TYPE,
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "refresh_token": refresh_token,
+ }
+ if scopes:
+ body["scope"] = " ".join(scopes)
+ if rapt_token:
+ body["rapt"] = rapt_token
+ metrics_header = {metrics.API_CLIENT_HEADER: metrics.token_request_user()}
+
+ response_status_ok, response_data, retryable_error = _client._token_endpoint_request_no_throw(
+ request, token_uri, body, headers=metrics_header
+ )
+
+ if not response_status_ok and isinstance(response_data, str):
+ raise exceptions.RefreshError(response_data, retryable=False)
+
+ if (
+ not response_status_ok
+ and response_data.get("error") == _REAUTH_NEEDED_ERROR
+ and (
+ response_data.get("error_subtype") == _REAUTH_NEEDED_ERROR_INVALID_RAPT
+ or response_data.get("error_subtype") == _REAUTH_NEEDED_ERROR_RAPT_REQUIRED
+ )
+ ):
+ if not enable_reauth_refresh:
+ raise exceptions.RefreshError(
+ "Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate."
+ )
+
+ rapt_token = get_rapt_token(
+ request, client_id, client_secret, refresh_token, token_uri, scopes=scopes
+ )
+ body["rapt"] = rapt_token
+ (
+ response_status_ok,
+ response_data,
+ retryable_error,
+ ) = _client._token_endpoint_request_no_throw(
+ request, token_uri, body, headers=metrics_header
+ )
+
+ if not response_status_ok:
+ _client._handle_error_response(response_data, retryable_error)
+ return _client._handle_refresh_grant_response(response_data, refresh_token) + (
+ rapt_token,
+ )
diff --git a/contrib/python/google-auth/py3/google/oauth2/service_account.py b/contrib/python/google-auth/py3/google/oauth2/service_account.py
new file mode 100644
index 0000000000..e08899f8e5
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/service_account.py
@@ -0,0 +1,819 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Service Accounts: JSON Web Token (JWT) Profile for OAuth 2.0
+
+This module implements the JWT Profile for OAuth 2.0 Authorization Grants
+as defined by `RFC 7523`_ with particular support for how this RFC is
+implemented in Google's infrastructure. Google refers to these credentials
+as *Service Accounts*.
+
+Service accounts are used for server-to-server communication, such as
+interactions between a web application server and a Google service. The
+service account belongs to your application instead of to an individual end
+user. In contrast to other OAuth 2.0 profiles, no users are involved and your
+application "acts" as the service account.
+
+Typically an application uses a service account when the application uses
+Google APIs to work with its own data rather than a user's data. For example,
+an application that uses Google Cloud Datastore for data persistence would use
+a service account to authenticate its calls to the Google Cloud Datastore API.
+However, an application that needs to access a user's Drive documents would
+use the normal OAuth 2.0 profile.
+
+Additionally, Google Apps domain administrators can grant service accounts
+`domain-wide delegation`_ authority to access user data on behalf of users in
+the domain.
+
+This profile uses a JWT to acquire an OAuth 2.0 access token. The JWT is used
+in place of the usual authorization token returned during the standard
+OAuth 2.0 Authorization Code grant. The JWT is only used for this purpose, as
+the acquired access token is used as the bearer token when making requests
+using these credentials.
+
+This profile differs from normal OAuth 2.0 profile because no user consent
+step is required. The use of the private key allows this profile to assert
+identity directly.
+
+This profile also differs from the :mod:`google.auth.jwt` authentication
+because the JWT credentials use the JWT directly as the bearer token. This
+profile instead only uses the JWT to obtain an OAuth 2.0 access token. The
+obtained OAuth 2.0 access token is used as the bearer token.
+
+Domain-wide delegation
+----------------------
+
+Domain-wide delegation allows a service account to access user data on
+behalf of any user in a Google Apps domain without consent from the user.
+For example, an application that uses the Google Calendar API to add events to
+the calendars of all users in a Google Apps domain would use a service account
+to access the Google Calendar API on behalf of users.
+
+The Google Apps administrator must explicitly authorize the service account to
+do this. This authorization step is referred to as "delegating domain-wide
+authority" to a service account.
+
+You can use domain-wise delegation by creating a set of credentials with a
+specific subject using :meth:`~Credentials.with_subject`.
+
+.. _RFC 7523: https://tools.ietf.org/html/rfc7523
+"""
+
+import copy
+import datetime
+
+from google.auth import _helpers
+from google.auth import _service_account_info
+from google.auth import credentials
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import metrics
+from google.oauth2 import _client
+
+_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+_DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
+_GOOGLE_OAUTH2_TOKEN_ENDPOINT = "https://oauth2.googleapis.com/token"
+
+
+class Credentials(
+ credentials.Signing,
+ credentials.Scoped,
+ credentials.CredentialsWithQuotaProject,
+ credentials.CredentialsWithTokenUri,
+):
+ """Service account credentials
+
+ Usually, you'll create these credentials with one of the helper
+ constructors. To create credentials using a Google service account
+ private key JSON file::
+
+ credentials = service_account.Credentials.from_service_account_file(
+ 'service-account.json')
+
+ Or if you already have the service account file loaded::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = service_account.Credentials.from_service_account_info(
+ service_account_info)
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify additional scopes and a subject if necessary::
+
+ credentials = service_account.Credentials.from_service_account_file(
+ 'service-account.json',
+ scopes=['email'],
+ subject='user@example.com')
+
+ The credentials are considered immutable. If you want to modify the scopes
+ or the subject used for delegation, use :meth:`with_scopes` or
+ :meth:`with_subject`::
+
+ scoped_credentials = credentials.with_scopes(['email'])
+ delegated_credentials = credentials.with_subject(subject)
+
+ To add a quota project, use :meth:`with_quota_project`::
+
+ credentials = credentials.with_quota_project('myproject-123')
+ """
+
+ def __init__(
+ self,
+ signer,
+ service_account_email,
+ token_uri,
+ scopes=None,
+ default_scopes=None,
+ subject=None,
+ project_id=None,
+ quota_project_id=None,
+ additional_claims=None,
+ always_use_jwt_access=False,
+ universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ trust_boundary=None,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ service_account_email (str): The service account's email.
+ scopes (Sequence[str]): User-defined scopes to request during the
+ authorization grant.
+ default_scopes (Sequence[str]): Default scopes passed by a
+ Google client library. Use 'scopes' for user-defined scopes.
+ token_uri (str): The OAuth 2.0 Token URI.
+ subject (str): For domain-wide delegation, the email address of the
+ user to for which to request delegated access.
+ project_id (str): Project ID associated with the service account
+ credential.
+ quota_project_id (Optional[str]): The project ID used for quota and
+ billing.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT assertion used in the authorization grant.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be always used.
+ universe_domain (str): The universe domain. The default
+ universe domain is googleapis.com. For default value self
+ signed jwt is used for token refresh.
+ trust_boundary (str): String representation of trust boundary meta.
+
+ .. note:: Typically one of the helper constructors
+ :meth:`from_service_account_file` or
+ :meth:`from_service_account_info` are used instead of calling the
+ constructor directly.
+ """
+ super(Credentials, self).__init__()
+
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+ self._signer = signer
+ self._service_account_email = service_account_email
+ self._subject = subject
+ self._project_id = project_id
+ self._quota_project_id = quota_project_id
+ self._token_uri = token_uri
+ self._always_use_jwt_access = always_use_jwt_access
+ if not universe_domain:
+ self._universe_domain = _DEFAULT_UNIVERSE_DOMAIN
+ else:
+ self._universe_domain = universe_domain
+
+ if universe_domain != _DEFAULT_UNIVERSE_DOMAIN:
+ self._always_use_jwt_access = True
+
+ self._jwt_credentials = None
+
+ if additional_claims is not None:
+ self._additional_claims = additional_claims
+ else:
+ self._additional_claims = {}
+ self._trust_boundary = "0"
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates a Credentials instance from a signer and service account
+ info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ return cls(
+ signer,
+ service_account_email=info["client_email"],
+ token_uri=info["token_uri"],
+ project_id=info.get("project_id"),
+ universe_domain=info.get("universe_domain", _DEFAULT_UNIVERSE_DOMAIN),
+ trust_boundary=info.get("trust_boundary"),
+ **kwargs
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates a Credentials instance from parsed service account info.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.Credentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(
+ info, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates a Credentials instance from a service account json file.
+
+ Args:
+ filename (str): The path to the service account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.Credentials: The constructed
+ credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ return self._service_account_email
+
+ @property
+ def project_id(self):
+ """Project ID associated with this credential."""
+ return self._project_id
+
+ @property
+ def requires_scopes(self):
+ """Checks if the credentials requires scopes.
+
+ Returns:
+ bool: True if there are no scopes set otherwise False.
+ """
+ return True if not self._scopes else False
+
+ def _make_copy(self):
+ cred = self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ scopes=copy.copy(self._scopes),
+ default_scopes=copy.copy(self._default_scopes),
+ token_uri=self._token_uri,
+ subject=self._subject,
+ project_id=self._project_id,
+ quota_project_id=self._quota_project_id,
+ additional_claims=self._additional_claims.copy(),
+ always_use_jwt_access=self._always_use_jwt_access,
+ universe_domain=self._universe_domain,
+ )
+ return cred
+
+ @_helpers.copy_docstring(credentials.Scoped)
+ def with_scopes(self, scopes, default_scopes=None):
+ cred = self._make_copy()
+ cred._scopes = scopes
+ cred._default_scopes = default_scopes
+ return cred
+
+ def with_always_use_jwt_access(self, always_use_jwt_access):
+ """Create a copy of these credentials with the specified always_use_jwt_access value.
+
+ Args:
+ always_use_jwt_access (bool): Whether always use self signed JWT or not.
+
+ Returns:
+ google.auth.service_account.Credentials: A new credentials
+ instance.
+ Raises:
+ google.auth.exceptions.InvalidValue: If the universe domain is not
+ default and always_use_jwt_access is False.
+ """
+ cred = self._make_copy()
+ if (
+ cred._universe_domain != _DEFAULT_UNIVERSE_DOMAIN
+ and not always_use_jwt_access
+ ):
+ raise exceptions.InvalidValue(
+ "always_use_jwt_access should be True for non-default universe domain"
+ )
+ cred._always_use_jwt_access = always_use_jwt_access
+ return cred
+
+ def with_subject(self, subject):
+ """Create a copy of these credentials with the specified subject.
+
+ Args:
+ subject (str): The subject claim.
+
+ Returns:
+ google.auth.service_account.Credentials: A new credentials
+ instance.
+ """
+ cred = self._make_copy()
+ cred._subject = subject
+ return cred
+
+ def with_claims(self, additional_claims):
+ """Returns a copy of these credentials with modified claims.
+
+ Args:
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT payload. This will be merged with the current
+ additional claims.
+
+ Returns:
+ google.auth.service_account.Credentials: A new credentials
+ instance.
+ """
+ new_additional_claims = copy.deepcopy(self._additional_claims)
+ new_additional_claims.update(additional_claims or {})
+ cred = self._make_copy()
+ cred._additional_claims = new_additional_claims
+ return cred
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ cred = self._make_copy()
+ cred._quota_project_id = quota_project_id
+ return cred
+
+ @_helpers.copy_docstring(credentials.CredentialsWithTokenUri)
+ def with_token_uri(self, token_uri):
+ cred = self._make_copy()
+ cred._token_uri = token_uri
+ return cred
+
+ def _make_authorization_grant_assertion(self):
+ """Create the OAuth 2.0 assertion.
+
+ This assertion is used during the OAuth 2.0 grant to acquire an
+ access token.
+
+ Returns:
+ bytes: The authorization grant assertion.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+
+ payload = {
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ # The issuer must be the service account email.
+ "iss": self._service_account_email,
+ # The audience must be the auth token endpoint's URI
+ "aud": _GOOGLE_OAUTH2_TOKEN_ENDPOINT,
+ "scope": _helpers.scopes_to_string(self._scopes or ()),
+ }
+
+ payload.update(self._additional_claims)
+
+ # The subject can be a user email for domain-wide delegation.
+ if self._subject:
+ payload.setdefault("sub", self._subject)
+
+ token = jwt.encode(self._signer, payload)
+
+ return token
+
+ def _use_self_signed_jwt(self):
+ # Since domain wide delegation doesn't work with self signed JWT. If
+ # subject exists, then we should not use self signed JWT.
+ return self._subject is None and self._jwt_credentials is not None
+
+ def _metric_header_for_usage(self):
+ if self._use_self_signed_jwt():
+ return metrics.CRED_TYPE_SA_JWT
+ return metrics.CRED_TYPE_SA_ASSERTION
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ if (
+ self._universe_domain != _DEFAULT_UNIVERSE_DOMAIN
+ and not self._jwt_credentials
+ ):
+ raise exceptions.RefreshError(
+ "self._jwt_credentials is missing for non-default universe domain"
+ )
+ if self._universe_domain != _DEFAULT_UNIVERSE_DOMAIN and self._subject:
+ raise exceptions.RefreshError(
+ "domain wide delegation is not supported for non-default universe domain"
+ )
+
+ if self._use_self_signed_jwt():
+ self._jwt_credentials.refresh(request)
+ self.token = self._jwt_credentials.token.decode()
+ self.expiry = self._jwt_credentials.expiry
+ else:
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = _client.jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
+
+ def _create_self_signed_jwt(self, audience):
+ """Create a self-signed JWT from the credentials if requirements are met.
+
+ Args:
+ audience (str): The service URL. ``https://[API_ENDPOINT]/``
+ """
+ # https://google.aip.dev/auth/4111
+ if self._always_use_jwt_access:
+ if self._scopes:
+ additional_claims = {"scope": " ".join(self._scopes)}
+ if (
+ self._jwt_credentials is None
+ or self._jwt_credentials.additional_claims != additional_claims
+ ):
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self, None, additional_claims=additional_claims
+ )
+ elif audience:
+ if (
+ self._jwt_credentials is None
+ or self._jwt_credentials._audience != audience
+ ):
+
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self, audience
+ )
+ elif self._default_scopes:
+ additional_claims = {"scope": " ".join(self._default_scopes)}
+ if (
+ self._jwt_credentials is None
+ or additional_claims != self._jwt_credentials.additional_claims
+ ):
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self, None, additional_claims=additional_claims
+ )
+ elif not self._scopes and audience:
+ self._jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self, audience
+ )
+
+ @_helpers.copy_docstring(credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property # type: ignore
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
+
+ @property # type: ignore
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer_email(self):
+ return self._service_account_email
+
+
+class IDTokenCredentials(
+ credentials.Signing,
+ credentials.CredentialsWithQuotaProject,
+ credentials.CredentialsWithTokenUri,
+):
+ """Open ID Connect ID Token-based service account credentials.
+
+ These credentials are largely similar to :class:`.Credentials`, but instead
+ of using an OAuth 2.0 Access Token as the bearer token, they use an Open
+ ID Connect ID Token as the bearer token. These credentials are useful when
+ communicating to services that require ID Tokens and can not accept access
+ tokens.
+
+ Usually, you'll create these credentials with one of the helper
+ constructors. To create credentials using a Google service account
+ private key JSON file::
+
+ credentials = (
+ service_account.IDTokenCredentials.from_service_account_file(
+ 'service-account.json'))
+
+
+ Or if you already have the service account file loaded::
+
+ service_account_info = json.load(open('service_account.json'))
+ credentials = (
+ service_account.IDTokenCredentials.from_service_account_info(
+ service_account_info))
+
+
+ Both helper methods pass on arguments to the constructor, so you can
+ specify additional scopes and a subject if necessary::
+
+ credentials = (
+ service_account.IDTokenCredentials.from_service_account_file(
+ 'service-account.json',
+ scopes=['email'],
+ subject='user@example.com'))
+
+
+ The credentials are considered immutable. If you want to modify the scopes
+ or the subject used for delegation, use :meth:`with_scopes` or
+ :meth:`with_subject`::
+
+ scoped_credentials = credentials.with_scopes(['email'])
+ delegated_credentials = credentials.with_subject(subject)
+
+ """
+
+ def __init__(
+ self,
+ signer,
+ service_account_email,
+ token_uri,
+ target_audience,
+ additional_claims=None,
+ quota_project_id=None,
+ universe_domain=_DEFAULT_UNIVERSE_DOMAIN,
+ ):
+ """
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ service_account_email (str): The service account's email.
+ token_uri (str): The OAuth 2.0 Token URI.
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token. The ID Token's ``aud`` claim
+ will be set to this string.
+ additional_claims (Mapping[str, str]): Any additional claims for
+ the JWT assertion used in the authorization grant.
+ quota_project_id (Optional[str]): The project ID used for quota and billing.
+ universe_domain (str): The universe domain. The default
+ universe domain is googleapis.com. For default value IAM ID
+ token endponint is used for token refresh. Note that
+ iam.serviceAccountTokenCreator role is required to use the IAM
+ endpoint.
+ .. note:: Typically one of the helper constructors
+ :meth:`from_service_account_file` or
+ :meth:`from_service_account_info` are used instead of calling the
+ constructor directly.
+ """
+ super(IDTokenCredentials, self).__init__()
+ self._signer = signer
+ self._service_account_email = service_account_email
+ self._token_uri = token_uri
+ self._target_audience = target_audience
+ self._quota_project_id = quota_project_id
+ self._use_iam_endpoint = False
+
+ if not universe_domain:
+ self._universe_domain = _DEFAULT_UNIVERSE_DOMAIN
+ else:
+ self._universe_domain = universe_domain
+
+ if universe_domain != _DEFAULT_UNIVERSE_DOMAIN:
+ self._use_iam_endpoint = True
+
+ if additional_claims is not None:
+ self._additional_claims = additional_claims
+ else:
+ self._additional_claims = {}
+
+ @classmethod
+ def _from_signer_and_info(cls, signer, info, **kwargs):
+ """Creates a credentials instance from a signer and service account
+ info.
+
+ Args:
+ signer (google.auth.crypt.Signer): The signer used to sign JWTs.
+ info (Mapping[str, str]): The service account info.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.jwt.IDTokenCredentials: The constructed credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ kwargs.setdefault("service_account_email", info["client_email"])
+ kwargs.setdefault("token_uri", info["token_uri"])
+ if "universe_domain" in info:
+ kwargs["universe_domain"] = info["universe_domain"]
+ return cls(signer, **kwargs)
+
+ @classmethod
+ def from_service_account_info(cls, info, **kwargs):
+ """Creates a credentials instance from parsed service account info.
+
+ Args:
+ info (Mapping[str, str]): The service account info in Google
+ format.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.IDTokenCredentials: The constructed
+ credentials.
+
+ Raises:
+ ValueError: If the info is not in the expected format.
+ """
+ signer = _service_account_info.from_dict(
+ info, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename, **kwargs):
+ """Creates a credentials instance from a service account json file.
+
+ Args:
+ filename (str): The path to the service account json file.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ google.auth.service_account.IDTokenCredentials: The constructed
+ credentials.
+ """
+ info, signer = _service_account_info.from_filename(
+ filename, require=["client_email", "token_uri"]
+ )
+ return cls._from_signer_and_info(signer, info, **kwargs)
+
+ def _make_copy(self):
+ cred = self.__class__(
+ self._signer,
+ service_account_email=self._service_account_email,
+ token_uri=self._token_uri,
+ target_audience=self._target_audience,
+ additional_claims=self._additional_claims.copy(),
+ quota_project_id=self.quota_project_id,
+ universe_domain=self._universe_domain,
+ )
+ # _use_iam_endpoint is not exposed in the constructor
+ cred._use_iam_endpoint = self._use_iam_endpoint
+ return cred
+
+ def with_target_audience(self, target_audience):
+ """Create a copy of these credentials with the specified target
+ audience.
+
+ Args:
+ target_audience (str): The intended audience for these credentials,
+ used when requesting the ID Token.
+
+ Returns:
+ google.auth.service_account.IDTokenCredentials: A new credentials
+ instance.
+ """
+ cred = self._make_copy()
+ cred._target_audience = target_audience
+ return cred
+
+ def _with_use_iam_endpoint(self, use_iam_endpoint):
+ """Create a copy of these credentials with the use_iam_endpoint value.
+
+ Args:
+ use_iam_endpoint (bool): If True, IAM generateIdToken endpoint will
+ be used instead of the token_uri. Note that
+ iam.serviceAccountTokenCreator role is required to use the IAM
+ endpoint. The default value is False. This feature is currently
+ experimental and subject to change without notice.
+
+ Returns:
+ google.auth.service_account.IDTokenCredentials: A new credentials
+ instance.
+ Raises:
+ google.auth.exceptions.InvalidValue: If the universe domain is not
+ default and use_iam_endpoint is False.
+ """
+ cred = self._make_copy()
+ if cred._universe_domain != _DEFAULT_UNIVERSE_DOMAIN and not use_iam_endpoint:
+ raise exceptions.InvalidValue(
+ "use_iam_endpoint should be True for non-default universe domain"
+ )
+ cred._use_iam_endpoint = use_iam_endpoint
+ return cred
+
+ @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)
+ def with_quota_project(self, quota_project_id):
+ cred = self._make_copy()
+ cred._quota_project_id = quota_project_id
+ return cred
+
+ @_helpers.copy_docstring(credentials.CredentialsWithTokenUri)
+ def with_token_uri(self, token_uri):
+ cred = self._make_copy()
+ cred._token_uri = token_uri
+ return cred
+
+ def _make_authorization_grant_assertion(self):
+ """Create the OAuth 2.0 assertion.
+
+ This assertion is used during the OAuth 2.0 grant to acquire an
+ ID token.
+
+ Returns:
+ bytes: The authorization grant assertion.
+ """
+ now = _helpers.utcnow()
+ lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+
+ payload = {
+ "iat": _helpers.datetime_to_secs(now),
+ "exp": _helpers.datetime_to_secs(expiry),
+ # The issuer must be the service account email.
+ "iss": self.service_account_email,
+ # The audience must be the auth token endpoint's URI
+ "aud": _GOOGLE_OAUTH2_TOKEN_ENDPOINT,
+ # The target audience specifies which service the ID token is
+ # intended for.
+ "target_audience": self._target_audience,
+ }
+
+ payload.update(self._additional_claims)
+
+ token = jwt.encode(self._signer, payload)
+
+ return token
+
+ def _refresh_with_iam_endpoint(self, request):
+ """Use IAM generateIdToken endpoint to obtain an ID token.
+
+ It works as follows:
+
+ 1. First we create a self signed jwt with
+ https://www.googleapis.com/auth/iam being the scope.
+
+ 2. Next we use the self signed jwt as the access token, and make a POST
+ request to IAM generateIdToken endpoint. The request body is:
+ {
+ "audience": self._target_audience,
+ "includeEmail": "true",
+ "useEmailAzp": "true",
+ }
+
+ If the request is succesfully, it will return {"token":"the ID token"},
+ and we can extract the ID token and compute its expiry.
+ """
+ jwt_credentials = jwt.Credentials.from_signing_credentials(
+ self,
+ None,
+ additional_claims={"scope": "https://www.googleapis.com/auth/iam"},
+ )
+ jwt_credentials.refresh(request)
+ self.token, self.expiry = _client.call_iam_generate_id_token_endpoint(
+ request,
+ self.signer_email,
+ self._target_audience,
+ jwt_credentials.token.decode(),
+ )
+
+ @_helpers.copy_docstring(credentials.Credentials)
+ def refresh(self, request):
+ if self._use_iam_endpoint:
+ self._refresh_with_iam_endpoint(request)
+ else:
+ assertion = self._make_authorization_grant_assertion()
+ access_token, expiry, _ = _client.id_token_jwt_grant(
+ request, self._token_uri, assertion
+ )
+ self.token = access_token
+ self.expiry = expiry
+
+ @property
+ def service_account_email(self):
+ """The service account email."""
+ return self._service_account_email
+
+ @_helpers.copy_docstring(credentials.Signing)
+ def sign_bytes(self, message):
+ return self._signer.sign(message)
+
+ @property # type: ignore
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer(self):
+ return self._signer
+
+ @property # type: ignore
+ @_helpers.copy_docstring(credentials.Signing)
+ def signer_email(self):
+ return self._service_account_email
diff --git a/contrib/python/google-auth/py3/google/oauth2/sts.py b/contrib/python/google-auth/py3/google/oauth2/sts.py
new file mode 100644
index 0000000000..ad3962735f
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/sts.py
@@ -0,0 +1,176 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Token Exchange Spec.
+
+This module defines a token exchange utility based on the `OAuth 2.0 Token
+Exchange`_ spec. This will be mainly used to exchange external credentials
+for GCP access tokens in workload identity pools to access Google APIs.
+
+The implementation will support various types of client authentication as
+allowed in the spec.
+
+A deviation on the spec will be for additional Google specific options that
+cannot be easily mapped to parameters defined in the RFC.
+
+The returned dictionary response will be based on the `rfc8693 section 2.2.1`_
+spec JSON response.
+
+.. _OAuth 2.0 Token Exchange: https://tools.ietf.org/html/rfc8693
+.. _rfc8693 section 2.2.1: https://tools.ietf.org/html/rfc8693#section-2.2.1
+"""
+
+import http.client as http_client
+import json
+import urllib
+
+from google.oauth2 import utils
+
+
+_URLENCODED_HEADERS = {"Content-Type": "application/x-www-form-urlencoded"}
+
+
+class Client(utils.OAuthClientAuthHandler):
+ """Implements the OAuth 2.0 token exchange spec based on
+ https://tools.ietf.org/html/rfc8693.
+ """
+
+ def __init__(self, token_exchange_endpoint, client_authentication=None):
+ """Initializes an STS client instance.
+
+ Args:
+ token_exchange_endpoint (str): The token exchange endpoint.
+ client_authentication (Optional(google.oauth2.oauth2_utils.ClientAuthentication)):
+ The optional OAuth client authentication credentials if available.
+ """
+ super(Client, self).__init__(client_authentication)
+ self._token_exchange_endpoint = token_exchange_endpoint
+
+ def _make_request(self, request, headers, request_body):
+ # Initialize request headers.
+ request_headers = _URLENCODED_HEADERS.copy()
+
+ # Inject additional headers.
+ if headers:
+ for k, v in dict(headers).items():
+ request_headers[k] = v
+
+ # Apply OAuth client authentication.
+ self.apply_client_authentication_options(request_headers, request_body)
+
+ # Execute request.
+ response = request(
+ url=self._token_exchange_endpoint,
+ method="POST",
+ headers=request_headers,
+ body=urllib.parse.urlencode(request_body).encode("utf-8"),
+ )
+
+ response_body = (
+ response.data.decode("utf-8")
+ if hasattr(response.data, "decode")
+ else response.data
+ )
+
+ # If non-200 response received, translate to OAuthError exception.
+ if response.status != http_client.OK:
+ utils.handle_error_response(response_body)
+
+ response_data = json.loads(response_body)
+
+ # Return successful response.
+ return response_data
+
+ def exchange_token(
+ self,
+ request,
+ grant_type,
+ subject_token,
+ subject_token_type,
+ resource=None,
+ audience=None,
+ scopes=None,
+ requested_token_type=None,
+ actor_token=None,
+ actor_token_type=None,
+ additional_options=None,
+ additional_headers=None,
+ ):
+ """Exchanges the provided token for another type of token based on the
+ rfc8693 spec.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ grant_type (str): The OAuth 2.0 token exchange grant type.
+ subject_token (str): The OAuth 2.0 token exchange subject token.
+ subject_token_type (str): The OAuth 2.0 token exchange subject token type.
+ resource (Optional[str]): The optional OAuth 2.0 token exchange resource field.
+ audience (Optional[str]): The optional OAuth 2.0 token exchange audience field.
+ scopes (Optional[Sequence[str]]): The optional list of scopes to use.
+ requested_token_type (Optional[str]): The optional OAuth 2.0 token exchange requested
+ token type.
+ actor_token (Optional[str]): The optional OAuth 2.0 token exchange actor token.
+ actor_token_type (Optional[str]): The optional OAuth 2.0 token exchange actor token type.
+ additional_options (Optional[Mapping[str, str]]): The optional additional
+ non-standard Google specific options.
+ additional_headers (Optional[Mapping[str, str]]): The optional additional
+ headers to pass to the token exchange endpoint.
+
+ Returns:
+ Mapping[str, str]: The token exchange JSON-decoded response data containing
+ the requested token and its expiration time.
+
+ Raises:
+ google.auth.exceptions.OAuthError: If the token endpoint returned
+ an error.
+ """
+ # Initialize request body.
+ request_body = {
+ "grant_type": grant_type,
+ "resource": resource,
+ "audience": audience,
+ "scope": " ".join(scopes or []),
+ "requested_token_type": requested_token_type,
+ "subject_token": subject_token,
+ "subject_token_type": subject_token_type,
+ "actor_token": actor_token,
+ "actor_token_type": actor_token_type,
+ "options": None,
+ }
+ # Add additional non-standard options.
+ if additional_options:
+ request_body["options"] = urllib.parse.quote(json.dumps(additional_options))
+ # Remove empty fields in request body.
+ for k, v in dict(request_body).items():
+ if v is None or v == "":
+ del request_body[k]
+
+ return self._make_request(request, additional_headers, request_body)
+
+ def refresh_token(self, request, refresh_token):
+ """Exchanges a refresh token for an access token based on the
+ RFC6749 spec.
+
+ Args:
+ request (google.auth.transport.Request): A callable used to make
+ HTTP requests.
+ subject_token (str): The OAuth 2.0 refresh token.
+ """
+
+ return self._make_request(
+ request,
+ None,
+ {"grant_type": "refresh_token", "refresh_token": refresh_token},
+ )
diff --git a/contrib/python/google-auth/py3/google/oauth2/utils.py b/contrib/python/google-auth/py3/google/oauth2/utils.py
new file mode 100644
index 0000000000..d72ff19166
--- /dev/null
+++ b/contrib/python/google-auth/py3/google/oauth2/utils.py
@@ -0,0 +1,168 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Utilities.
+
+This module provides implementations for various OAuth 2.0 utilities.
+This includes `OAuth error handling`_ and
+`Client authentication for OAuth flows`_.
+
+OAuth error handling
+--------------------
+This will define interfaces for handling OAuth related error responses as
+stated in `RFC 6749 section 5.2`_.
+This will include a common function to convert these HTTP error responses to a
+:class:`google.auth.exceptions.OAuthError` exception.
+
+
+Client authentication for OAuth flows
+-------------------------------------
+We introduce an interface for defining client authentication credentials based
+on `RFC 6749 section 2.3.1`_. This will expose the following
+capabilities:
+
+ * Ability to support basic authentication via request header.
+ * Ability to support bearer token authentication via request header.
+ * Ability to support client ID / secret authentication via request body.
+
+.. _RFC 6749 section 2.3.1: https://tools.ietf.org/html/rfc6749#section-2.3.1
+.. _RFC 6749 section 5.2: https://tools.ietf.org/html/rfc6749#section-5.2
+"""
+
+import abc
+import base64
+import enum
+import json
+
+from google.auth import exceptions
+
+
+# OAuth client authentication based on
+# https://tools.ietf.org/html/rfc6749#section-2.3.
+class ClientAuthType(enum.Enum):
+ basic = 1
+ request_body = 2
+
+
+class ClientAuthentication(object):
+ """Defines the client authentication credentials for basic and request-body
+ types based on https://tools.ietf.org/html/rfc6749#section-2.3.1.
+ """
+
+ def __init__(self, client_auth_type, client_id, client_secret=None):
+ """Instantiates a client authentication object containing the client ID
+ and secret credentials for basic and response-body auth.
+
+ Args:
+ client_auth_type (google.oauth2.oauth_utils.ClientAuthType): The
+ client authentication type.
+ client_id (str): The client ID.
+ client_secret (Optional[str]): The client secret.
+ """
+ self.client_auth_type = client_auth_type
+ self.client_id = client_id
+ self.client_secret = client_secret
+
+
+class OAuthClientAuthHandler(metaclass=abc.ABCMeta):
+ """Abstract class for handling client authentication in OAuth-based
+ operations.
+ """
+
+ def __init__(self, client_authentication=None):
+ """Instantiates an OAuth client authentication handler.
+
+ Args:
+ client_authentication (Optional[google.oauth2.utils.ClientAuthentication]):
+ The OAuth client authentication credentials if available.
+ """
+ super(OAuthClientAuthHandler, self).__init__()
+ self._client_authentication = client_authentication
+
+ def apply_client_authentication_options(
+ self, headers, request_body=None, bearer_token=None
+ ):
+ """Applies client authentication on the OAuth request's headers or POST
+ body.
+
+ Args:
+ headers (Mapping[str, str]): The HTTP request header.
+ request_body (Optional[Mapping[str, str]]): The HTTP request body
+ dictionary. For requests that do not support request body, this
+ is None and will be ignored.
+ bearer_token (Optional[str]): The optional bearer token.
+ """
+ # Inject authenticated header.
+ self._inject_authenticated_headers(headers, bearer_token)
+ # Inject authenticated request body.
+ if bearer_token is None:
+ self._inject_authenticated_request_body(request_body)
+
+ def _inject_authenticated_headers(self, headers, bearer_token=None):
+ if bearer_token is not None:
+ headers["Authorization"] = "Bearer %s" % bearer_token
+ elif (
+ self._client_authentication is not None
+ and self._client_authentication.client_auth_type is ClientAuthType.basic
+ ):
+ username = self._client_authentication.client_id
+ password = self._client_authentication.client_secret or ""
+
+ credentials = base64.b64encode(
+ ("%s:%s" % (username, password)).encode()
+ ).decode()
+ headers["Authorization"] = "Basic %s" % credentials
+
+ def _inject_authenticated_request_body(self, request_body):
+ if (
+ self._client_authentication is not None
+ and self._client_authentication.client_auth_type
+ is ClientAuthType.request_body
+ ):
+ if request_body is None:
+ raise exceptions.OAuthError(
+ "HTTP request does not support request-body"
+ )
+ else:
+ request_body["client_id"] = self._client_authentication.client_id
+ request_body["client_secret"] = (
+ self._client_authentication.client_secret or ""
+ )
+
+
+def handle_error_response(response_body):
+ """Translates an error response from an OAuth operation into an
+ OAuthError exception.
+
+ Args:
+ response_body (str): The decoded response data.
+
+ Raises:
+ google.auth.exceptions.OAuthError
+ """
+ try:
+ error_components = []
+ error_data = json.loads(response_body)
+
+ error_components.append("Error code {}".format(error_data["error"]))
+ if "error_description" in error_data:
+ error_components.append(": {}".format(error_data["error_description"]))
+ if "error_uri" in error_data:
+ error_components.append(" - {}".format(error_data["error_uri"]))
+ error_details = "".join(error_components)
+ # If no details could be extracted, use the response data.
+ except (KeyError, ValueError):
+ error_details = response_body
+
+ raise exceptions.OAuthError(error_details, response_body)
diff --git a/contrib/python/google-auth/py3/tests/__init__.py b/contrib/python/google-auth/py3/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/__init__.py
diff --git a/contrib/python/google-auth/py3/tests/compute_engine/__init__.py b/contrib/python/google-auth/py3/tests/compute_engine/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/compute_engine/__init__.py
diff --git a/contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name b/contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name
new file mode 100644
index 0000000000..2ca735d9b3
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name
@@ -0,0 +1 @@
+Google Compute Engine
diff --git a/contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name_non_google b/contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name_non_google
new file mode 100644
index 0000000000..9fd177038e
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/compute_engine/data/smbios_product_name_non_google
@@ -0,0 +1 @@
+ABC Compute Engine
diff --git a/contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py b/contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py
new file mode 100644
index 0000000000..ddf84596af
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/compute_engine/test__metadata.py
@@ -0,0 +1,450 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import importlib
+import json
+import os
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+from google.auth.compute_engine import _metadata
+
+PATH = "instance/service-accounts/default"
+
+DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
+SMBIOS_PRODUCT_NAME_FILE = os.path.join(DATA_DIR, "smbios_product_name")
+SMBIOS_PRODUCT_NAME_NONEXISTENT_FILE = os.path.join(
+ DATA_DIR, "smbios_product_name_nonexistent"
+)
+SMBIOS_PRODUCT_NAME_NON_GOOGLE = os.path.join(
+ DATA_DIR, "smbios_product_name_non_google"
+)
+
+ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/mds"
+)
+MDS_PING_METRICS_HEADER_VALUE = "gl-python/3.7 auth/1.1 auth-request-type/mds"
+MDS_PING_REQUEST_HEADER = {
+ "metadata-flavor": "Google",
+ "x-goog-api-client": MDS_PING_METRICS_HEADER_VALUE,
+}
+
+
+def make_request(data, status=http_client.OK, headers=None, retry=False):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = _helpers.to_bytes(data)
+ response.headers = headers or {}
+
+ request = mock.create_autospec(transport.Request)
+ if retry:
+ request.side_effect = [exceptions.TransportError(), response]
+ else:
+ request.return_value = response
+
+ return request
+
+
+def test_detect_gce_residency_linux_success():
+ _metadata._GCE_PRODUCT_NAME_FILE = SMBIOS_PRODUCT_NAME_FILE
+ assert _metadata.detect_gce_residency_linux()
+
+
+def test_detect_gce_residency_linux_non_google():
+ _metadata._GCE_PRODUCT_NAME_FILE = SMBIOS_PRODUCT_NAME_NON_GOOGLE
+ assert not _metadata.detect_gce_residency_linux()
+
+
+def test_detect_gce_residency_linux_nonexistent():
+ _metadata._GCE_PRODUCT_NAME_FILE = SMBIOS_PRODUCT_NAME_NONEXISTENT_FILE
+ assert not _metadata.detect_gce_residency_linux()
+
+
+def test_is_on_gce_ping_success():
+ request = make_request("", headers=_metadata._METADATA_HEADERS)
+ assert _metadata.is_on_gce(request)
+
+
+@mock.patch("os.name", new="nt")
+def test_is_on_gce_windows_success():
+ request = make_request("", headers={_metadata._METADATA_FLAVOR_HEADER: "meep"})
+ assert not _metadata.is_on_gce(request)
+
+
+@mock.patch("os.name", new="posix")
+def test_is_on_gce_linux_success():
+ request = make_request("", headers={_metadata._METADATA_FLAVOR_HEADER: "meep"})
+ _metadata._GCE_PRODUCT_NAME_FILE = SMBIOS_PRODUCT_NAME_FILE
+ assert _metadata.is_on_gce(request)
+
+
+@mock.patch("google.auth.metrics.mds_ping", return_value=MDS_PING_METRICS_HEADER_VALUE)
+def test_ping_success(mock_metrics_header_value):
+ request = make_request("", headers=_metadata._METADATA_HEADERS)
+
+ assert _metadata.ping(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_IP_ROOT,
+ headers=MDS_PING_REQUEST_HEADER,
+ timeout=_metadata._METADATA_DEFAULT_TIMEOUT,
+ )
+
+
+@mock.patch("google.auth.metrics.mds_ping", return_value=MDS_PING_METRICS_HEADER_VALUE)
+def test_ping_success_retry(mock_metrics_header_value):
+ request = make_request("", headers=_metadata._METADATA_HEADERS, retry=True)
+
+ assert _metadata.ping(request)
+
+ request.assert_called_with(
+ method="GET",
+ url=_metadata._METADATA_IP_ROOT,
+ headers=MDS_PING_REQUEST_HEADER,
+ timeout=_metadata._METADATA_DEFAULT_TIMEOUT,
+ )
+ assert request.call_count == 2
+
+
+def test_ping_failure_bad_flavor():
+ request = make_request("", headers={_metadata._METADATA_FLAVOR_HEADER: "meep"})
+
+ assert not _metadata.ping(request)
+
+
+def test_ping_failure_connection_failed():
+ request = make_request("")
+ request.side_effect = exceptions.TransportError()
+
+ assert not _metadata.ping(request)
+
+
+@mock.patch("google.auth.metrics.mds_ping", return_value=MDS_PING_METRICS_HEADER_VALUE)
+def _test_ping_success_custom_root(mock_metrics_header_value):
+ request = make_request("", headers=_metadata._METADATA_HEADERS)
+
+ fake_ip = "1.2.3.4"
+ os.environ[environment_vars.GCE_METADATA_IP] = fake_ip
+ importlib.reload(_metadata)
+
+ try:
+ assert _metadata.ping(request)
+ finally:
+ del os.environ[environment_vars.GCE_METADATA_IP]
+ importlib.reload(_metadata)
+
+ request.assert_called_once_with(
+ method="GET",
+ url="http://" + fake_ip,
+ headers=MDS_PING_REQUEST_HEADER,
+ timeout=_metadata._METADATA_DEFAULT_TIMEOUT,
+ )
+
+
+def test_get_success_json():
+ key, value = "foo", "bar"
+
+ data = json.dumps({key: value})
+ request = make_request(data, headers={"content-type": "application/json"})
+
+ result = _metadata.get(request, PATH)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result[key] == value
+
+
+def test_get_success_retry():
+ key, value = "foo", "bar"
+
+ data = json.dumps({key: value})
+ request = make_request(
+ data, headers={"content-type": "application/json"}, retry=True
+ )
+
+ result = _metadata.get(request, PATH)
+
+ request.assert_called_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert request.call_count == 2
+ assert result[key] == value
+
+
+def test_get_success_text():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+
+ result = _metadata.get(request, PATH)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def test_get_success_params():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+ params = {"recursive": "true"}
+
+ result = _metadata.get(request, PATH, params=params)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def test_get_success_recursive_and_params():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+ params = {"recursive": "false"}
+ result = _metadata.get(request, PATH, recursive=True, params=params)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def test_get_success_recursive():
+ data = "foobar"
+ request = make_request(data, headers={"content-type": "text/plain"})
+
+ result = _metadata.get(request, PATH, recursive=True)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert result == data
+
+
+def _test_get_success_custom_root_new_variable():
+ request = make_request("{}", headers={"content-type": "application/json"})
+
+ fake_root = "another.metadata.service"
+ os.environ[environment_vars.GCE_METADATA_HOST] = fake_root
+ importlib.reload(_metadata)
+
+ try:
+ _metadata.get(request, PATH)
+ finally:
+ del os.environ[environment_vars.GCE_METADATA_HOST]
+ importlib.reload(_metadata)
+
+ request.assert_called_once_with(
+ method="GET",
+ url="http://{}/computeMetadata/v1/{}".format(fake_root, PATH),
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def _test_get_success_custom_root_old_variable():
+ request = make_request("{}", headers={"content-type": "application/json"})
+
+ fake_root = "another.metadata.service"
+ os.environ[environment_vars.GCE_METADATA_ROOT] = fake_root
+ importlib.reload(_metadata)
+
+ try:
+ _metadata.get(request, PATH)
+ finally:
+ del os.environ[environment_vars.GCE_METADATA_ROOT]
+ importlib.reload(_metadata)
+
+ request.assert_called_once_with(
+ method="GET",
+ url="http://{}/computeMetadata/v1/{}".format(fake_root, PATH),
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def test_get_failure():
+ request = make_request("Metadata error", status=http_client.NOT_FOUND)
+
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ _metadata.get(request, PATH)
+
+ assert excinfo.match(r"Metadata error")
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def test_get_failure_connection_failed():
+ request = make_request("")
+ request.side_effect = exceptions.TransportError()
+
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ _metadata.get(request, PATH)
+
+ assert excinfo.match(r"Compute Engine Metadata server unavailable")
+
+ request.assert_called_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert request.call_count == 5
+
+
+def test_get_failure_bad_json():
+ request = make_request("{", headers={"content-type": "application/json"})
+
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ _metadata.get(request, PATH)
+
+ assert excinfo.match(r"invalid JSON")
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH,
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+
+def test_get_project_id():
+ project = "example-project"
+ request = make_request(project, headers={"content-type": "text/plain"})
+
+ project_id = _metadata.get_project_id(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + "project/project-id",
+ headers=_metadata._METADATA_HEADERS,
+ )
+ assert project_id == project
+
+
+@mock.patch(
+ "google.auth.metrics.token_request_access_token_mds",
+ return_value=ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_get_service_account_token(utcnow, mock_metrics_header_value):
+ ttl = 500
+ request = make_request(
+ json.dumps({"access_token": "token", "expires_in": ttl}),
+ headers={"content-type": "application/json"},
+ )
+
+ token, expiry = _metadata.get_service_account_token(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/token",
+ headers={
+ "metadata-flavor": "Google",
+ "x-goog-api-client": ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ },
+ )
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=ttl)
+
+
+@mock.patch(
+ "google.auth.metrics.token_request_access_token_mds",
+ return_value=ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_get_service_account_token_with_scopes_list(utcnow, mock_metrics_header_value):
+ ttl = 500
+ request = make_request(
+ json.dumps({"access_token": "token", "expires_in": ttl}),
+ headers={"content-type": "application/json"},
+ )
+
+ token, expiry = _metadata.get_service_account_token(request, scopes=["foo", "bar"])
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/token" + "?scopes=foo%2Cbar",
+ headers={
+ "metadata-flavor": "Google",
+ "x-goog-api-client": ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ },
+ )
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=ttl)
+
+
+@mock.patch(
+ "google.auth.metrics.token_request_access_token_mds",
+ return_value=ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_get_service_account_token_with_scopes_string(
+ utcnow, mock_metrics_header_value
+):
+ ttl = 500
+ request = make_request(
+ json.dumps({"access_token": "token", "expires_in": ttl}),
+ headers={"content-type": "application/json"},
+ )
+
+ token, expiry = _metadata.get_service_account_token(request, scopes="foo,bar")
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/token" + "?scopes=foo%2Cbar",
+ headers={
+ "metadata-flavor": "Google",
+ "x-goog-api-client": ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ },
+ )
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=ttl)
+
+
+def test_get_service_account_info():
+ key, value = "foo", "bar"
+ request = make_request(
+ json.dumps({key: value}), headers={"content-type": "application/json"}
+ )
+
+ info = _metadata.get_service_account_info(request)
+
+ request.assert_called_once_with(
+ method="GET",
+ url=_metadata._METADATA_ROOT + PATH + "/?recursive=true",
+ headers=_metadata._METADATA_HEADERS,
+ )
+
+ assert info[key] == value
diff --git a/contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py b/contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py
new file mode 100644
index 0000000000..507fea9fcc
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/compute_engine/test_credentials.py
@@ -0,0 +1,875 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import base64
+import datetime
+
+import mock
+import pytest # type: ignore
+import responses # type: ignore
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import transport
+from google.auth.compute_engine import credentials
+from google.auth.transport import requests
+
+SAMPLE_ID_TOKEN_EXP = 1584393400
+
+# header: {"alg": "RS256", "typ": "JWT", "kid": "1"}
+# payload: {"iss": "issuer", "iat": 1584393348, "sub": "subject",
+# "exp": 1584393400,"aud": "audience"}
+SAMPLE_ID_TOKEN = (
+ b"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCIsICJraWQiOiAiMSJ9."
+ b"eyJpc3MiOiAiaXNzdWVyIiwgImlhdCI6IDE1ODQzOTMzNDgsICJzdWIiO"
+ b"iAic3ViamVjdCIsICJleHAiOiAxNTg0MzkzNDAwLCAiYXVkIjogImF1ZG"
+ b"llbmNlIn0."
+ b"OquNjHKhTmlgCk361omRo18F_uY-7y0f_AmLbzW062Q1Zr61HAwHYP5FM"
+ b"316CK4_0cH8MUNGASsvZc3VqXAqub6PUTfhemH8pFEwBdAdG0LhrNkU0H"
+ b"WN1YpT55IiQ31esLdL5q-qDsOPpNZJUti1y1lAreM5nIn2srdWzGXGs4i"
+ b"TRQsn0XkNUCL4RErpciXmjfhMrPkcAjKA-mXQm2fa4jmTlEZFqFmUlym1"
+ b"ozJ0yf5grjN6AslN4OGvAv1pS-_Ko_pGBS6IQtSBC6vVKCUuBfaqNjykg"
+ b"bsxbLa6Fp0SYeYwO8ifEnkRvasVpc1WTQqfRB2JCj5pTBDzJpIpFCMmnQ"
+)
+
+ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/mds"
+)
+ID_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/mds"
+)
+
+
+class TestCredentials(object):
+ credentials = None
+
+ @pytest.fixture(autouse=True)
+ def credentials_fixture(self):
+ self.credentials = credentials.Credentials()
+
+ def test_default_state(self):
+ assert not self.credentials.valid
+ # Expiration hasn't been set yet
+ assert not self.credentials.expired
+ # Scopes are needed
+ assert self.credentials.requires_scopes
+ # Service account email hasn't been populated
+ assert self.credentials.service_account_email == "default"
+ # No quota project
+ assert not self.credentials._quota_project_id
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_refresh_success(self, get, utcnow):
+ get.side_effect = [
+ {
+ # First request is for sevice account info.
+ "email": "service-account@example.com",
+ "scopes": ["one", "two"],
+ },
+ {
+ # Second request is for the token.
+ "access_token": "token",
+ "expires_in": 500,
+ },
+ ]
+
+ # Refresh credentials
+ self.credentials.refresh(None)
+
+ # Check that the credentials have the token and proper expiration
+ assert self.credentials.token == "token"
+ assert self.credentials.expiry == (utcnow() + datetime.timedelta(seconds=500))
+
+ # Check the credential info
+ assert self.credentials.service_account_email == "service-account@example.com"
+ assert self.credentials._scopes == ["one", "two"]
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert self.credentials.valid
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_mds",
+ return_value=ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_refresh_success_with_scopes(self, get, utcnow, mock_metrics_header_value):
+ get.side_effect = [
+ {
+ # First request is for sevice account info.
+ "email": "service-account@example.com",
+ "scopes": ["one", "two"],
+ },
+ {
+ # Second request is for the token.
+ "access_token": "token",
+ "expires_in": 500,
+ },
+ ]
+
+ # Refresh credentials
+ scopes = ["three", "four"]
+ self.credentials = self.credentials.with_scopes(scopes)
+ self.credentials.refresh(None)
+
+ # Check that the credentials have the token and proper expiration
+ assert self.credentials.token == "token"
+ assert self.credentials.expiry == (utcnow() + datetime.timedelta(seconds=500))
+
+ # Check the credential info
+ assert self.credentials.service_account_email == "service-account@example.com"
+ assert self.credentials._scopes == scopes
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert self.credentials.valid
+
+ kwargs = get.call_args[1]
+ assert kwargs["params"] == {"scopes": "three,four"}
+ assert kwargs["headers"] == {
+ "x-goog-api-client": ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE
+ }
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_refresh_error(self, get):
+ get.side_effect = exceptions.TransportError("http error")
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ self.credentials.refresh(None)
+
+ assert excinfo.match(r"http error")
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_before_request_refreshes(self, get):
+ get.side_effect = [
+ {
+ # First request is for sevice account info.
+ "email": "service-account@example.com",
+ "scopes": "one two",
+ },
+ {
+ # Second request is for the token.
+ "access_token": "token",
+ "expires_in": 500,
+ },
+ ]
+
+ # Credentials should start as invalid
+ assert not self.credentials.valid
+
+ # before_request should cause a refresh
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert get.called
+
+ # Credentials should now be valid.
+ assert self.credentials.valid
+
+ def test_with_quota_project(self):
+ quota_project_creds = self.credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds._quota_project_id == "project-foo"
+
+ def test_with_scopes(self):
+ assert self.credentials._scopes is None
+
+ scopes = ["one", "two"]
+ self.credentials = self.credentials.with_scopes(scopes)
+
+ assert self.credentials._scopes == scopes
+
+ def test_token_usage_metrics(self):
+ self.credentials.token = "token"
+ self.credentials.expiry = None
+
+ headers = {}
+ self.credentials.before_request(mock.Mock(), None, None, headers)
+ assert headers["authorization"] == "Bearer token"
+ assert headers["x-goog-api-client"] == "cred-type/mds"
+
+
+class TestIDTokenCredentials(object):
+ credentials = None
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_default_state(self, get):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scope": ["one", "two"]}
+ ]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://example.com"
+ )
+
+ assert not self.credentials.valid
+ # Expiration hasn't been set yet
+ assert not self.credentials.expired
+ # Service account email hasn't been populated
+ assert self.credentials.service_account_email == "service-account@example.com"
+ # Signer is initialized
+ assert self.credentials.signer
+ assert self.credentials.signer_email == "service-account@example.com"
+ # No quota project
+ assert not self.credentials._quota_project_id
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_make_authorization_grant_assertion(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://audience.com",
+ }
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_service_account(self, sign, get, utcnow):
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ target_audience="https://audience.com",
+ service_account_email="service-account@other.com",
+ )
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@other.com",
+ "target_audience": "https://audience.com",
+ }
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_additional_claims(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ target_audience="https://audience.com",
+ additional_claims={"foo": "bar"},
+ )
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://audience.com",
+ "foo": "bar",
+ }
+
+ def test_token_uri(self):
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ signer=mock.Mock(),
+ service_account_email="foo@example.com",
+ target_audience="https://audience.com",
+ )
+ assert self.credentials._token_uri == credentials._DEFAULT_TOKEN_URI
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ signer=mock.Mock(),
+ service_account_email="foo@example.com",
+ target_audience="https://audience.com",
+ token_uri="https://example.com/token",
+ )
+ assert self.credentials._token_uri == "https://example.com/token"
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_target_audience(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+ self.credentials = self.credentials.with_target_audience("https://actually.not")
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://actually.not",
+ }
+
+ # Check that the signer have been initialized with a Request object
+ assert isinstance(self.credentials._signer._request, transport.Request)
+
+ @responses.activate
+ def test_with_target_audience_integration(self):
+ """ Test that it is possible to refresh credentials
+ generated from `with_target_audience`.
+
+ Instead of mocking the methods, the HTTP responses
+ have been mocked.
+ """
+
+ # mock information about credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/default/?recursive=true",
+ status=200,
+ content_type="application/json",
+ json={
+ "scopes": "email",
+ "email": "service-account@example.com",
+ "aliases": ["default"],
+ },
+ )
+
+ # mock token for credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/service-account@example.com/token",
+ status=200,
+ content_type="application/json",
+ json={
+ "access_token": "some-token",
+ "expires_in": 3210,
+ "token_type": "Bearer",
+ },
+ )
+
+ # mock sign blob endpoint
+ signature = base64.b64encode(b"some-signature").decode("utf-8")
+ responses.add(
+ responses.POST,
+ "https://iamcredentials.googleapis.com/v1/projects/-/"
+ "serviceAccounts/service-account@example.com:signBlob?alt=json",
+ status=200,
+ content_type="application/json",
+ json={"keyId": "some-key-id", "signedBlob": signature},
+ )
+
+ id_token = "{}.{}.{}".format(
+ base64.b64encode(b'{"some":"some"}').decode("utf-8"),
+ base64.b64encode(b'{"exp": 3210}').decode("utf-8"),
+ base64.b64encode(b"token").decode("utf-8"),
+ )
+
+ # mock id token endpoint
+ responses.add(
+ responses.POST,
+ "https://www.googleapis.com/oauth2/v4/token",
+ status=200,
+ content_type="application/json",
+ json={"id_token": id_token, "expiry": 3210},
+ )
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=requests.Request(),
+ service_account_email="service-account@example.com",
+ target_audience="https://audience.com",
+ )
+
+ self.credentials = self.credentials.with_target_audience("https://actually.not")
+
+ self.credentials.refresh(requests.Request())
+
+ assert self.credentials.token is not None
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_quota_project(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+ self.credentials = self.credentials.with_quota_project("project-foo")
+
+ assert self.credentials._quota_project_id == "project-foo"
+
+ # Generate authorization grant:
+ token = self.credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, verify=False)
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert token.endswith(b".c2lnbmF0dXJl")
+
+ # Check that the credentials have the token and proper expiration
+ assert payload == {
+ "aud": "https://www.googleapis.com/oauth2/v4/token",
+ "exp": 3600,
+ "iat": 0,
+ "iss": "service-account@example.com",
+ "target_audience": "https://audience.com",
+ }
+
+ # Check that the signer have been initialized with a Request object
+ assert isinstance(self.credentials._signer._request, transport.Request)
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_token_uri(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ target_audience="https://audience.com",
+ token_uri="http://xyz.com",
+ )
+ assert self.credentials._token_uri == "http://xyz.com"
+ creds_with_token_uri = self.credentials.with_token_uri("http://example.com")
+ assert creds_with_token_uri._token_uri == "http://example.com"
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_with_token_uri_exception(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request,
+ target_audience="https://audience.com",
+ use_metadata_identity_endpoint=True,
+ )
+ assert self.credentials._token_uri is None
+ with pytest.raises(ValueError):
+ self.credentials.with_token_uri("http://example.com")
+
+ @responses.activate
+ def test_with_quota_project_integration(self):
+ """ Test that it is possible to refresh credentials
+ generated from `with_quota_project`.
+
+ Instead of mocking the methods, the HTTP responses
+ have been mocked.
+ """
+
+ # mock information about credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/default/?recursive=true",
+ status=200,
+ content_type="application/json",
+ json={
+ "scopes": "email",
+ "email": "service-account@example.com",
+ "aliases": ["default"],
+ },
+ )
+
+ # mock token for credentials
+ responses.add(
+ responses.GET,
+ "http://metadata.google.internal/computeMetadata/v1/instance/"
+ "service-accounts/service-account@example.com/token",
+ status=200,
+ content_type="application/json",
+ json={
+ "access_token": "some-token",
+ "expires_in": 3210,
+ "token_type": "Bearer",
+ },
+ )
+
+ # mock sign blob endpoint
+ signature = base64.b64encode(b"some-signature").decode("utf-8")
+ responses.add(
+ responses.POST,
+ "https://iamcredentials.googleapis.com/v1/projects/-/"
+ "serviceAccounts/service-account@example.com:signBlob?alt=json",
+ status=200,
+ content_type="application/json",
+ json={"keyId": "some-key-id", "signedBlob": signature},
+ )
+
+ id_token = "{}.{}.{}".format(
+ base64.b64encode(b'{"some":"some"}').decode("utf-8"),
+ base64.b64encode(b'{"exp": 3210}').decode("utf-8"),
+ base64.b64encode(b"token").decode("utf-8"),
+ )
+
+ # mock id token endpoint
+ responses.add(
+ responses.POST,
+ "https://www.googleapis.com/oauth2/v4/token",
+ status=200,
+ content_type="application/json",
+ json={"id_token": id_token, "expiry": 3210},
+ )
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=requests.Request(),
+ service_account_email="service-account@example.com",
+ target_audience="https://audience.com",
+ )
+
+ self.credentials = self.credentials.with_quota_project("project-foo")
+
+ self.credentials.refresh(requests.Request())
+
+ assert self.credentials.token is not None
+ assert self.credentials._quota_project_id == "project-foo"
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_refresh_success(self, id_token_jwt_grant, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+ id_token_jwt_grant.side_effect = [
+ ("idtoken", datetime.datetime.utcfromtimestamp(3600), {})
+ ]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Refresh credentials
+ self.credentials.refresh(None)
+
+ # Check that the credentials have the token and proper expiration
+ assert self.credentials.token == "idtoken"
+ assert self.credentials.expiry == (datetime.datetime.utcfromtimestamp(3600))
+
+ # Check the credential info
+ assert self.credentials.service_account_email == "service-account@example.com"
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert self.credentials.valid
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_refresh_error(self, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ response = mock.Mock()
+ response.data = b'{"error": "http error"}'
+ response.status = 404 # Throw a 404 so the request is not retried.
+ request.side_effect = [response]
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ self.credentials.refresh(request)
+
+ assert excinfo.match(r"http error")
+
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.utcfromtimestamp(0),
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_before_request_refreshes(self, id_token_jwt_grant, sign, get, utcnow):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": "one two"}
+ ]
+ sign.side_effect = [b"signature"]
+ id_token_jwt_grant.side_effect = [
+ ("idtoken", datetime.datetime.utcfromtimestamp(3600), {})
+ ]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Credentials should start as invalid
+ assert not self.credentials.valid
+
+ # before_request should cause a refresh
+ request = mock.create_autospec(transport.Request, instance=True)
+ self.credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert get.called
+
+ # Credentials should now be valid.
+ assert self.credentials.valid
+
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ @mock.patch("google.auth.iam.Signer.sign", autospec=True)
+ def test_sign_bytes(self, sign, get):
+ get.side_effect = [
+ {"email": "service-account@example.com", "scopes": ["one", "two"]}
+ ]
+ sign.side_effect = [b"signature"]
+
+ request = mock.create_autospec(transport.Request, instance=True)
+ response = mock.Mock()
+ response.data = b'{"signature": "c2lnbmF0dXJl"}'
+ response.status = 200
+ request.side_effect = [response]
+
+ self.credentials = credentials.IDTokenCredentials(
+ request=request, target_audience="https://audience.com"
+ )
+
+ # Generate authorization grant:
+ signature = self.credentials.sign_bytes(b"some bytes")
+
+ # The JWT token signature is 'signature' encoded in base 64:
+ assert signature == b"signature"
+
+ @mock.patch(
+ "google.auth.metrics.token_request_id_token_mds",
+ return_value=ID_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_get_id_token_from_metadata(
+ self, get, get_service_account_info, mock_metrics_header_value
+ ):
+ get.return_value = SAMPLE_ID_TOKEN
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+ cred.refresh(request=mock.Mock())
+
+ assert get.call_args.kwargs["headers"] == {
+ "x-goog-api-client": ID_TOKEN_REQUEST_METRICS_HEADER_VALUE
+ }
+
+ assert cred.token == SAMPLE_ID_TOKEN
+ assert cred.expiry == datetime.datetime.utcfromtimestamp(SAMPLE_ID_TOKEN_EXP)
+ assert cred._use_metadata_identity_endpoint
+ assert cred._signer is None
+ assert cred._token_uri is None
+ assert cred._service_account_email == "foo@example.com"
+ assert cred._target_audience == "audience"
+ with pytest.raises(ValueError):
+ cred.sign_bytes(b"bytes")
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ def test_with_target_audience_for_metadata(self, get_service_account_info):
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+ cred = cred.with_target_audience("new_audience")
+
+ assert cred._target_audience == "new_audience"
+ assert cred._use_metadata_identity_endpoint
+ assert cred._signer is None
+ assert cred._token_uri is None
+ assert cred._service_account_email == "foo@example.com"
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ def test_id_token_with_quota_project(self, get_service_account_info):
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+ cred = cred.with_quota_project("project-foo")
+
+ assert cred._quota_project_id == "project-foo"
+ assert cred._use_metadata_identity_endpoint
+ assert cred._signer is None
+ assert cred._token_uri is None
+ assert cred._service_account_email == "foo@example.com"
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_invalid_id_token_from_metadata(self, get, get_service_account_info):
+ get.return_value = "invalid_id_token"
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+
+ with pytest.raises(ValueError):
+ cred.refresh(request=mock.Mock())
+
+ @mock.patch(
+ "google.auth.compute_engine._metadata.get_service_account_info", autospec=True
+ )
+ @mock.patch("google.auth.compute_engine._metadata.get", autospec=True)
+ def test_transport_error_from_metadata(self, get, get_service_account_info):
+ get.side_effect = exceptions.TransportError("transport error")
+ get_service_account_info.return_value = {"email": "foo@example.com"}
+
+ cred = credentials.IDTokenCredentials(
+ mock.Mock(), "audience", use_metadata_identity_endpoint=True
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ cred.refresh(request=mock.Mock())
+ assert excinfo.match(r"transport error")
+
+ def test_get_id_token_from_metadata_constructor(self):
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ token_uri="token_uri",
+ )
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ signer=mock.Mock(),
+ )
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ additional_claims={"key", "value"},
+ )
+ with pytest.raises(ValueError):
+ credentials.IDTokenCredentials(
+ mock.Mock(),
+ "audience",
+ use_metadata_identity_endpoint=True,
+ service_account_email="foo@example.com",
+ )
diff --git a/contrib/python/google-auth/py3/tests/conftest.py b/contrib/python/google-auth/py3/tests/conftest.py
new file mode 100644
index 0000000000..08896b0f82
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/conftest.py
@@ -0,0 +1,45 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+import mock
+import pytest # type: ignore
+
+
+def pytest_configure():
+ """Load public certificate and private key."""
+ import __res
+ pytest.private_key_bytes = __res.find("data/privatekey.pem")
+ pytest.public_cert_bytes = __res.find("data/public_cert.pem")
+
+
+@pytest.fixture
+def mock_non_existent_module(monkeypatch):
+ """Mocks a non-existing module in sys.modules.
+
+ Additionally mocks any non-existing modules specified in the dotted path.
+ """
+
+ def _mock_non_existent_module(path):
+ parts = path.split(".")
+ partial = []
+ for part in parts:
+ partial.append(part)
+ current_module = ".".join(partial)
+ if current_module not in sys.modules:
+ monkeypatch.setitem(sys.modules, current_module, mock.MagicMock())
+
+ return _mock_non_existent_module
diff --git a/contrib/python/google-auth/py3/tests/crypt/__init__.py b/contrib/python/google-auth/py3/tests/crypt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/crypt/__init__.py
diff --git a/contrib/python/google-auth/py3/tests/crypt/test__cryptography_rsa.py b/contrib/python/google-auth/py3/tests/crypt/test__cryptography_rsa.py
new file mode 100644
index 0000000000..d19154b61b
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/crypt/test__cryptography_rsa.py
@@ -0,0 +1,162 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+from cryptography.hazmat.primitives.asymmetric import rsa
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth.crypt import _cryptography_rsa
+from google.auth.crypt import base
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate privatekey.pem, privatekey.pub, and public_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out public_cert.pem \
+# > -keyout privatekey.pem
+# $ openssl rsa -in privatekey.pem -pubout -out privatekey.pub
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+ PKCS1_KEY_BYTES = PRIVATE_KEY_BYTES
+
+with open(os.path.join(DATA_DIR, "privatekey.pub"), "rb") as fh:
+ PUBLIC_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+# To generate pem_from_pkcs12.pem and privatekey.p12:
+# $ openssl pkcs12 -export -out privatekey.p12 -inkey privatekey.pem \
+# > -in public_cert.pem
+# $ openssl pkcs12 -in privatekey.p12 -nocerts -nodes \
+# > -out pem_from_pkcs12.pem
+
+with open(os.path.join(DATA_DIR, "pem_from_pkcs12.pem"), "rb") as fh:
+ PKCS8_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "privatekey.p12"), "rb") as fh:
+ PKCS12_KEY_BYTES = fh.read()
+
+# The service account JSON file can be generated from the Google Cloud Console.
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+class TestRSAVerifier(object):
+ def test_verify_success(self):
+ to_sign = b"foo"
+ signer = _cryptography_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_unicode_success(self):
+ to_sign = u"foo"
+ signer = _cryptography_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_failure(self):
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ bad_signature1 = b""
+ assert not verifier.verify(b"foo", bad_signature1)
+ bad_signature2 = b"a"
+ assert not verifier.verify(b"foo", bad_signature2)
+
+ def test_from_string_pub_key(self):
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+ def test_from_string_pub_key_unicode(self):
+ public_key = _helpers.from_bytes(PUBLIC_KEY_BYTES)
+ verifier = _cryptography_rsa.RSAVerifier.from_string(public_key)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+ def test_from_string_pub_cert(self):
+ verifier = _cryptography_rsa.RSAVerifier.from_string(PUBLIC_CERT_BYTES)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+ def test_from_string_pub_cert_unicode(self):
+ public_cert = _helpers.from_bytes(PUBLIC_CERT_BYTES)
+ verifier = _cryptography_rsa.RSAVerifier.from_string(public_cert)
+ assert isinstance(verifier, _cryptography_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.RSAPublicKey)
+
+
+class TestRSASigner(object):
+ def test_from_string_pkcs1(self):
+ signer = _cryptography_rsa.RSASigner.from_string(PKCS1_KEY_BYTES)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs1_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS1_KEY_BYTES)
+ signer = _cryptography_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs8(self):
+ signer = _cryptography_rsa.RSASigner.from_string(PKCS8_KEY_BYTES)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs8_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS8_KEY_BYTES)
+ signer = _cryptography_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _cryptography_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_string_pkcs12(self):
+ with pytest.raises(ValueError):
+ _cryptography_rsa.RSASigner.from_string(PKCS12_KEY_BYTES)
+
+ def test_from_string_bogus_key(self):
+ key_bytes = "bogus-key"
+ with pytest.raises(ValueError):
+ _cryptography_rsa.RSASigner.from_string(key_bytes)
+
+ def test_from_service_account_info(self):
+ signer = _cryptography_rsa.RSASigner.from_service_account_info(
+ SERVICE_ACCOUNT_INFO
+ )
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
+
+ def test_from_service_account_info_missing_key(self):
+ with pytest.raises(ValueError) as excinfo:
+ _cryptography_rsa.RSASigner.from_service_account_info({})
+
+ assert excinfo.match(base._JSON_FILE_PRIVATE_KEY)
+
+ def test_from_service_account_file(self):
+ signer = _cryptography_rsa.RSASigner.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.RSAPrivateKey)
diff --git a/contrib/python/google-auth/py3/tests/crypt/test__python_rsa.py b/contrib/python/google-auth/py3/tests/crypt/test__python_rsa.py
new file mode 100644
index 0000000000..592b523d92
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/crypt/test__python_rsa.py
@@ -0,0 +1,194 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import json
+import os
+
+import mock
+from pyasn1_modules import pem # type: ignore
+import pytest # type: ignore
+import rsa # type: ignore
+
+from google.auth import _helpers
+from google.auth.crypt import _python_rsa
+from google.auth.crypt import base
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate privatekey.pem, privatekey.pub, and public_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out public_cert.pem \
+# > -keyout privatekey.pem
+# $ openssl rsa -in privatekey.pem -pubout -out privatekey.pub
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+ PKCS1_KEY_BYTES = PRIVATE_KEY_BYTES
+
+with open(os.path.join(DATA_DIR, "privatekey.pub"), "rb") as fh:
+ PUBLIC_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+# To generate pem_from_pkcs12.pem and privatekey.p12:
+# $ openssl pkcs12 -export -out privatekey.p12 -inkey privatekey.pem \
+# > -in public_cert.pem
+# $ openssl pkcs12 -in privatekey.p12 -nocerts -nodes \
+# > -out pem_from_pkcs12.pem
+
+with open(os.path.join(DATA_DIR, "pem_from_pkcs12.pem"), "rb") as fh:
+ PKCS8_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "privatekey.p12"), "rb") as fh:
+ PKCS12_KEY_BYTES = fh.read()
+
+# The service account JSON file can be generated from the Google Cloud Console.
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+class TestRSAVerifier(object):
+ def test_verify_success(self):
+ to_sign = b"foo"
+ signer = _python_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_unicode_success(self):
+ to_sign = u"foo"
+ signer = _python_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_failure(self):
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ bad_signature1 = b""
+ assert not verifier.verify(b"foo", bad_signature1)
+ bad_signature2 = b"a"
+ assert not verifier.verify(b"foo", bad_signature2)
+
+ def test_from_string_pub_key(self):
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_key_unicode(self):
+ public_key = _helpers.from_bytes(PUBLIC_KEY_BYTES)
+ verifier = _python_rsa.RSAVerifier.from_string(public_key)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_cert(self):
+ verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_CERT_BYTES)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_cert_unicode(self):
+ public_cert = _helpers.from_bytes(PUBLIC_CERT_BYTES)
+ verifier = _python_rsa.RSAVerifier.from_string(public_cert)
+ assert isinstance(verifier, _python_rsa.RSAVerifier)
+ assert isinstance(verifier._pubkey, rsa.key.PublicKey)
+
+ def test_from_string_pub_cert_failure(self):
+ cert_bytes = PUBLIC_CERT_BYTES
+ true_der = rsa.pem.load_pem(cert_bytes, "CERTIFICATE")
+ load_pem_patch = mock.patch(
+ "rsa.pem.load_pem", return_value=true_der + b"extra", autospec=True
+ )
+
+ with load_pem_patch as load_pem:
+ with pytest.raises(ValueError):
+ _python_rsa.RSAVerifier.from_string(cert_bytes)
+ load_pem.assert_called_once_with(cert_bytes, "CERTIFICATE")
+
+
+class TestRSASigner(object):
+ def test_from_string_pkcs1(self):
+ signer = _python_rsa.RSASigner.from_string(PKCS1_KEY_BYTES)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs1_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS1_KEY_BYTES)
+ signer = _python_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs8(self):
+ signer = _python_rsa.RSASigner.from_string(PKCS8_KEY_BYTES)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs8_extra_bytes(self):
+ key_bytes = PKCS8_KEY_BYTES
+ _, pem_bytes = pem.readPemBlocksFromFile(
+ io.StringIO(_helpers.from_bytes(key_bytes)), _python_rsa._PKCS8_MARKER
+ )
+
+ key_info, remaining = None, "extra"
+ decode_patch = mock.patch(
+ "pyasn1.codec.der.decoder.decode",
+ return_value=(key_info, remaining),
+ autospec=True,
+ )
+
+ with decode_patch as decode:
+ with pytest.raises(ValueError):
+ _python_rsa.RSASigner.from_string(key_bytes)
+ # Verify mock was called.
+ decode.assert_called_once_with(pem_bytes, asn1Spec=_python_rsa._PKCS8_SPEC)
+
+ def test_from_string_pkcs8_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS8_KEY_BYTES)
+ signer = _python_rsa.RSASigner.from_string(key_bytes)
+ assert isinstance(signer, _python_rsa.RSASigner)
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_string_pkcs12(self):
+ with pytest.raises(ValueError):
+ _python_rsa.RSASigner.from_string(PKCS12_KEY_BYTES)
+
+ def test_from_string_bogus_key(self):
+ key_bytes = "bogus-key"
+ with pytest.raises(ValueError):
+ _python_rsa.RSASigner.from_string(key_bytes)
+
+ def test_from_service_account_info(self):
+ signer = _python_rsa.RSASigner.from_service_account_info(SERVICE_ACCOUNT_INFO)
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.key.PrivateKey)
+
+ def test_from_service_account_info_missing_key(self):
+ with pytest.raises(ValueError) as excinfo:
+ _python_rsa.RSASigner.from_service_account_info({})
+
+ assert excinfo.match(base._JSON_FILE_PRIVATE_KEY)
+
+ def test_from_service_account_file(self):
+ signer = _python_rsa.RSASigner.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, rsa.key.PrivateKey)
diff --git a/contrib/python/google-auth/py3/tests/crypt/test_crypt.py b/contrib/python/google-auth/py3/tests/crypt/test_crypt.py
new file mode 100644
index 0000000000..97c2abc257
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/crypt/test_crypt.py
@@ -0,0 +1,59 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from google.auth import crypt
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate privatekey.pem, privatekey.pub, and public_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out public_cert.pem \
+# > -keyout privatekey.pem
+# $ openssl rsa -in privatekey.pem -pubout -out privatekey.pub
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+# To generate other_cert.pem:
+# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out other_cert.pem
+
+with open(os.path.join(DATA_DIR, "other_cert.pem"), "rb") as fh:
+ OTHER_CERT_BYTES = fh.read()
+
+
+def test_verify_signature():
+ to_sign = b"foo"
+ signer = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ signature = signer.sign(to_sign)
+
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ # List of certs
+ assert crypt.verify_signature(
+ to_sign, signature, [OTHER_CERT_BYTES, PUBLIC_CERT_BYTES]
+ )
+
+
+def test_verify_signature_failure():
+ to_sign = b"foo"
+ signer = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES)
+ signature = signer.sign(to_sign)
+
+ assert not crypt.verify_signature(to_sign, signature, OTHER_CERT_BYTES)
diff --git a/contrib/python/google-auth/py3/tests/crypt/test_es256.py b/contrib/python/google-auth/py3/tests/crypt/test_es256.py
new file mode 100644
index 0000000000..1a43a2f01b
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/crypt/test_es256.py
@@ -0,0 +1,144 @@
+# Copyright 2016 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import json
+import os
+
+from cryptography.hazmat.primitives.asymmetric import ec
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth.crypt import base
+from google.auth.crypt import es256
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+# To generate es256_privatekey.pem, es256_privatekey.pub, and
+# es256_public_cert.pem:
+# $ openssl ecparam -genkey -name prime256v1 -noout -out es256_privatekey.pem
+# $ openssl ec -in es256-private-key.pem -pubout -out es256-publickey.pem
+# $ openssl req -new -x509 -key es256_privatekey.pem -out \
+# > es256_public_cert.pem
+
+with open(os.path.join(DATA_DIR, "es256_privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+ PKCS1_KEY_BYTES = PRIVATE_KEY_BYTES
+
+with open(os.path.join(DATA_DIR, "es256_publickey.pem"), "rb") as fh:
+ PUBLIC_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "es256_public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "es256_service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+class TestES256Verifier(object):
+ def test_verify_success(self):
+ to_sign = b"foo"
+ signer = es256.ES256Signer.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_unicode_success(self):
+ to_sign = u"foo"
+ signer = es256.ES256Signer.from_string(PRIVATE_KEY_BYTES)
+ actual_signature = signer.sign(to_sign)
+
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert verifier.verify(to_sign, actual_signature)
+
+ def test_verify_failure(self):
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ bad_signature1 = b""
+ assert not verifier.verify(b"foo", bad_signature1)
+ bad_signature2 = b"a"
+ assert not verifier.verify(b"foo", bad_signature2)
+
+ def test_verify_failure_with_wrong_raw_signature(self):
+ to_sign = b"foo"
+
+ # This signature has a wrong "r" value in the "(r,s)" raw signature.
+ wrong_signature = base64.urlsafe_b64decode(
+ b"m7oaRxUDeYqjZ8qiMwo0PZLTMZWKJLFQREpqce1StMIa_yXQQ-C5WgeIRHW7OqlYSDL0XbUrj_uAw9i-QhfOJQ=="
+ )
+
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert not verifier.verify(to_sign, wrong_signature)
+
+ def test_from_string_pub_key(self):
+ verifier = es256.ES256Verifier.from_string(PUBLIC_KEY_BYTES)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+ def test_from_string_pub_key_unicode(self):
+ public_key = _helpers.from_bytes(PUBLIC_KEY_BYTES)
+ verifier = es256.ES256Verifier.from_string(public_key)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+ def test_from_string_pub_cert(self):
+ verifier = es256.ES256Verifier.from_string(PUBLIC_CERT_BYTES)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+ def test_from_string_pub_cert_unicode(self):
+ public_cert = _helpers.from_bytes(PUBLIC_CERT_BYTES)
+ verifier = es256.ES256Verifier.from_string(public_cert)
+ assert isinstance(verifier, es256.ES256Verifier)
+ assert isinstance(verifier._pubkey, ec.EllipticCurvePublicKey)
+
+
+class TestES256Signer(object):
+ def test_from_string_pkcs1(self):
+ signer = es256.ES256Signer.from_string(PKCS1_KEY_BYTES)
+ assert isinstance(signer, es256.ES256Signer)
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
+
+ def test_from_string_pkcs1_unicode(self):
+ key_bytes = _helpers.from_bytes(PKCS1_KEY_BYTES)
+ signer = es256.ES256Signer.from_string(key_bytes)
+ assert isinstance(signer, es256.ES256Signer)
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
+
+ def test_from_string_bogus_key(self):
+ key_bytes = "bogus-key"
+ with pytest.raises(ValueError):
+ es256.ES256Signer.from_string(key_bytes)
+
+ def test_from_service_account_info(self):
+ signer = es256.ES256Signer.from_service_account_info(SERVICE_ACCOUNT_INFO)
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
+
+ def test_from_service_account_info_missing_key(self):
+ with pytest.raises(ValueError) as excinfo:
+ es256.ES256Signer.from_service_account_info({})
+
+ assert excinfo.match(base._JSON_FILE_PRIVATE_KEY)
+
+ def test_from_service_account_file(self):
+ signer = es256.ES256Signer.from_service_account_file(SERVICE_ACCOUNT_JSON_FILE)
+
+ assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
+ assert isinstance(signer._key, ec.EllipticCurvePrivateKey)
diff --git a/contrib/python/google-auth/py3/tests/data/authorized_user.json b/contrib/python/google-auth/py3/tests/data/authorized_user.json
new file mode 100644
index 0000000000..4787acee57
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/authorized_user.json
@@ -0,0 +1,6 @@
+{
+ "client_id": "123",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user"
+}
diff --git a/contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk.json b/contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk.json
new file mode 100644
index 0000000000..c9e19a66e0
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk.json
@@ -0,0 +1,6 @@
+{
+ "client_id": "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user"
+}
diff --git a/contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json b/contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json
new file mode 100644
index 0000000000..53a8ff88aa
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/authorized_user_cloud_sdk_with_quota_project_id.json
@@ -0,0 +1,7 @@
+{
+ "client_id": "764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user",
+ "quota_project_id": "quota_project_id"
+}
diff --git a/contrib/python/google-auth/py3/tests/data/authorized_user_with_rapt_token.json b/contrib/python/google-auth/py3/tests/data/authorized_user_with_rapt_token.json
new file mode 100644
index 0000000000..64b161d422
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/authorized_user_with_rapt_token.json
@@ -0,0 +1,8 @@
+{
+ "client_id": "123",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user",
+ "rapt_token": "rapt"
+ }
+ \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/data/client_secrets.json b/contrib/python/google-auth/py3/tests/data/client_secrets.json
new file mode 100644
index 0000000000..1baa4995af
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/client_secrets.json
@@ -0,0 +1,14 @@
+{
+ "web": {
+ "client_id": "example.apps.googleusercontent.com",
+ "project_id": "example",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
+ "client_secret": "itsasecrettoeveryone",
+ "redirect_uris": [
+ "urn:ietf:wg:oauth:2.0:oob",
+ "http://localhost"
+ ]
+ }
+}
diff --git a/contrib/python/google-auth/py3/tests/data/context_aware_metadata.json b/contrib/python/google-auth/py3/tests/data/context_aware_metadata.json
new file mode 100644
index 0000000000..ec40e783f1
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/context_aware_metadata.json
@@ -0,0 +1,6 @@
+{
+ "cert_provider_command":[
+ "/opt/google/endpoint-verification/bin/SecureConnectHelper",
+ "--print_certificate"],
+ "device_resource_ids":["11111111-1111-1111"]
+}
diff --git a/contrib/python/google-auth/py3/tests/data/enterprise_cert_invalid.json b/contrib/python/google-auth/py3/tests/data/enterprise_cert_invalid.json
new file mode 100644
index 0000000000..4715a590a1
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/enterprise_cert_invalid.json
@@ -0,0 +1,3 @@
+{
+ "libs": {}
+} \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/data/enterprise_cert_valid.json b/contrib/python/google-auth/py3/tests/data/enterprise_cert_valid.json
new file mode 100644
index 0000000000..e445f55f8a
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/enterprise_cert_valid.json
@@ -0,0 +1,6 @@
+{
+ "libs": {
+ "ecp_client": "/path/to/signer/lib",
+ "tls_offload": "/path/to/offload/lib"
+ }
+}
diff --git a/contrib/python/google-auth/py3/tests/data/es256_privatekey.pem b/contrib/python/google-auth/py3/tests/data/es256_privatekey.pem
new file mode 100644
index 0000000000..5c950b514f
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/es256_privatekey.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIAIC57aTx5ev4T2HBMQk4fXV09AzLDQ3Ju1uNoEB0LngoAoGCCqGSM49
+AwEHoUQDQgAEsACsrmP6Bp216OCFm73C8W/VRHZWcO8yU/bMwx96f05BkTII3KeJ
+z2O0IRAnXfso8K6YsjMuUDGCfj+b1IDIoA==
+-----END EC PRIVATE KEY-----
diff --git a/contrib/python/google-auth/py3/tests/data/es256_public_cert.pem b/contrib/python/google-auth/py3/tests/data/es256_public_cert.pem
new file mode 100644
index 0000000000..774ca14843
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/es256_public_cert.pem
@@ -0,0 +1,8 @@
+-----BEGIN CERTIFICATE-----
+MIIBGDCBwAIJAPUA0H4EQWsdMAoGCCqGSM49BAMCMBUxEzARBgNVBAMMCnVuaXQt
+dGVzdHMwHhcNMTkwNTA5MDI1MDExWhcNMTkwNjA4MDI1MDExWjAVMRMwEQYDVQQD
+DAp1bml0LXRlc3RzMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEsACsrmP6Bp21
+6OCFm73C8W/VRHZWcO8yU/bMwx96f05BkTII3KeJz2O0IRAnXfso8K6YsjMuUDGC
+fj+b1IDIoDAKBggqhkjOPQQDAgNHADBEAh8PcDTMyWk8SHqV/v8FLuMbDxdtAsq2
+dwCpuHQwqCcmAiEAnwtkiyieN+8zozaf1P4QKp2mAqNGqua50y3ua5uVotc=
+-----END CERTIFICATE-----
diff --git a/contrib/python/google-auth/py3/tests/data/es256_publickey.pem b/contrib/python/google-auth/py3/tests/data/es256_publickey.pem
new file mode 100644
index 0000000000..51f2a03fa4
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/es256_publickey.pem
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEsACsrmP6Bp216OCFm73C8W/VRHZW
+cO8yU/bMwx96f05BkTII3KeJz2O0IRAnXfso8K6YsjMuUDGCfj+b1IDIoA==
+-----END PUBLIC KEY-----
diff --git a/contrib/python/google-auth/py3/tests/data/es256_service_account.json b/contrib/python/google-auth/py3/tests/data/es256_service_account.json
new file mode 100644
index 0000000000..dd26719f62
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/es256_service_account.json
@@ -0,0 +1,10 @@
+{
+ "type": "service_account",
+ "project_id": "example-project",
+ "private_key_id": "1",
+ "private_key": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIAIC57aTx5ev4T2HBMQk4fXV09AzLDQ3Ju1uNoEB0LngoAoGCCqGSM49\nAwEHoUQDQgAEsACsrmP6Bp216OCFm73C8W/VRHZWcO8yU/bMwx96f05BkTII3KeJ\nz2O0IRAnXfso8K6YsjMuUDGCfj+b1IDIoA==\n-----END EC PRIVATE KEY-----",
+ "client_email": "service-account@example.com",
+ "client_id": "1234",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token"
+}
diff --git a/contrib/python/google-auth/py3/tests/data/external_account_authorized_user.json b/contrib/python/google-auth/py3/tests/data/external_account_authorized_user.json
new file mode 100644
index 0000000000..e0bd20c8fd
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/external_account_authorized_user.json
@@ -0,0 +1,9 @@
+{
+ "type": "external_account_authorized_user",
+ "audience": "//iam.googleapis.com/locations/global/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID",
+ "refresh_token": "refreshToken",
+ "token_url": "https://sts.googleapis.com/v1/oauth/token",
+ "token_info_url": "https://sts.googleapis.com/v1/instrospect",
+ "client_id": "clientId",
+ "client_secret": "clientSecret"
+}
diff --git a/contrib/python/google-auth/py3/tests/data/external_subject_token.json b/contrib/python/google-auth/py3/tests/data/external_subject_token.json
new file mode 100644
index 0000000000..a47ec34127
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/external_subject_token.json
@@ -0,0 +1,3 @@
+{
+ "access_token": "HEADER.SIMULATED_JWT_PAYLOAD.SIGNATURE"
+} \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/data/external_subject_token.txt b/contrib/python/google-auth/py3/tests/data/external_subject_token.txt
new file mode 100644
index 0000000000..c668d8f71d
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/external_subject_token.txt
@@ -0,0 +1 @@
+HEADER.SIMULATED_JWT_PAYLOAD.SIGNATURE \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/data/gdch_service_account.json b/contrib/python/google-auth/py3/tests/data/gdch_service_account.json
new file mode 100644
index 0000000000..172164e9fa
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/gdch_service_account.json
@@ -0,0 +1,11 @@
+{
+ "type": "gdch_service_account",
+ "format_version": "1",
+ "project": "project_foo",
+ "private_key_id": "key_foo",
+ "private_key": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIIGb2np7v54Hs6++NiLE7CQtQg7rzm4znstHvrOUlcMMoAoGCCqGSM49\nAwEHoUQDQgAECvv0VyZS9nYOa8tdwKCbkNxlWgrAZVClhJXqrvOZHlH4N3d8Rplk\n2DEJvzp04eMxlHw1jm6JCs3iJR6KAokG+w==\n-----END EC PRIVATE KEY-----\n",
+ "name": "service_identity_name",
+ "ca_cert_path": "/path/to/ca/cert",
+ "token_uri": "https://service-identity.<Domain>/authenticate"
+}
+
diff --git a/contrib/python/google-auth/py3/tests/data/impersonated_service_account_authorized_user_source.json b/contrib/python/google-auth/py3/tests/data/impersonated_service_account_authorized_user_source.json
new file mode 100644
index 0000000000..0e545392cc
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/impersonated_service_account_authorized_user_source.json
@@ -0,0 +1,13 @@
+{
+ "delegates": [
+ "service-account-delegate@example.com"
+ ],
+ "service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/service-account-target@example.com:generateAccessToken",
+ "source_credentials": {
+ "client_id": "123",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user"
+ },
+ "type": "impersonated_service_account"
+} \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/data/impersonated_service_account_service_account_source.json b/contrib/python/google-auth/py3/tests/data/impersonated_service_account_service_account_source.json
new file mode 100644
index 0000000000..e1ff8e81f7
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/impersonated_service_account_service_account_source.json
@@ -0,0 +1,17 @@
+{
+ "delegates": [
+ "service-account-delegate@example.com"
+ ],
+ "service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/service-account-target@example.com:generateAccessToken",
+ "source_credentials": {
+ "type": "service_account",
+ "project_id": "example-project",
+ "private_key_id": "1",
+ "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj\n7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/\nxmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs\nSliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18\npe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk\nSBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk\nnQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq\nHD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y\nnHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9\nIisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2\nYCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU\nZ422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ\nvzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP\nB8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl\naLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2\neCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI\naqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk\nklORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ\nCFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu\nUqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg\nsoBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28\nbvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH\n504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL\nYXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx\nBeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==\n-----END RSA PRIVATE KEY-----\n",
+ "client_email": "service-account@example.com",
+ "client_id": "1234",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token"
+ },
+ "type": "impersonated_service_account"
+} \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/data/impersonated_service_account_with_quota_project.json b/contrib/python/google-auth/py3/tests/data/impersonated_service_account_with_quota_project.json
new file mode 100644
index 0000000000..89db9617c4
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/impersonated_service_account_with_quota_project.json
@@ -0,0 +1,14 @@
+{
+ "delegates": [
+ "service-account-delegate@example.com"
+ ],
+ "quota_project_id": "quota_project",
+ "service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/service-account-target@example.com:generateAccessToken",
+ "source_credentials": {
+ "client_id": "123",
+ "client_secret": "secret",
+ "refresh_token": "alabalaportocala",
+ "type": "authorized_user"
+ },
+ "type": "impersonated_service_account"
+} \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/data/old_oauth_credentials_py3.pickle b/contrib/python/google-auth/py3/tests/data/old_oauth_credentials_py3.pickle
new file mode 100644
index 0000000000..c8a05599b1
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/old_oauth_credentials_py3.pickle
Binary files differ
diff --git a/contrib/python/google-auth/py3/tests/data/other_cert.pem b/contrib/python/google-auth/py3/tests/data/other_cert.pem
new file mode 100644
index 0000000000..6895d1e7bf
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/other_cert.pem
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFtTCCA52gAwIBAgIJAPBsLZmNGfKtMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
+aWRnaXRzIFB0eSBMdGQwHhcNMTYwOTIxMDI0NTEyWhcNMTYxMDIxMDI0NTEyWjBF
+MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
+ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAsiMC7mTsmUXwZoYlT4aHY1FLw8bxIXC+z3IqA+TY1WqfbeiZRo8MA5Zx
+lTTxYMKPCZUE1XBc7jvD8GJhWIj6pToPYHn73B01IBkLBxq4kF1yV2Z7DVmkvc6H
+EcxXXq8zkCx0j6XOfiI4+qkXnuQn8cvrk8xfhtnMMZM7iVm6VSN93iRP/8ey6xuL
+XTHrDX7ukoRce1hpT8O+15GXNrY0irhhYQz5xKibNCJF3EjV28WMry8y7I8uYUFU
+RWDiQawwK9ec1zhZ94v92+GZDlPevmcFmSERKYQ0NsKcT0Y3lGuGnaExs8GyOpnC
+oksu4YJGXQjg7lkv4MxzsNbRqmCkUwxw1Mg6FP0tsCNsw9qTrkvWCRA9zp/aU+sZ
+IBGh1t4UGCub8joeQFvHxvr/3F7mH/dyvCjA34u0Lo1VPx+jYUIi9i0odltMspDW
+xOpjqdGARZYmlJP5Au9q5cQjPMcwS/EBIb8cwNl32mUE6WnFlep+38mNR/FghIjO
+ViAkXuKQmcHe6xppZAoHFsO/t3l4Tjek5vNW7erI1rgrFku/fvkIW/G8V1yIm/+Q
+F+CE4maQzCJfhftpkhM/sPC/FuLNBmNE8BHVX8y58xG4is/cQxL4Z9TsFIw0C5+3
+uTrFW9D0agysahMVzPGtCqhDQqJdIJrBQqlS6bztpzBA8zEI0skCAwEAAaOBpzCB
+pDAdBgNVHQ4EFgQUz/8FmW6TfqXyNJZr7rhc+Tn5sKQwdQYDVR0jBG4wbIAUz/8F
+mW6TfqXyNJZr7rhc+Tn5sKShSaRHMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpT
+b21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGSCCQDw
+bC2ZjRnyrTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQCQmrcfhurX
+riR3Q0Y+nq040/3dJIAJXjyI9CEtxaU0nzCNTng7PwgZ0CKmCelQfInuwWFwBSHS
+6kBfC1rgJeFnjnTt8a3RCgRlIgUr9NCdPSEccB7TurobwPJ2h6cJjjR8urcb0CXh
+CEMvPneyPj0xUFY8vVKXMGWahz/kyfwIiVqcX/OtMZ29fUu1onbWl71g2gVLtUZl
+sECdZ+AC/6HDCVpYIVETMl1T7N/XyqXZQiDLDNRDeZhnapz8w9fsW1KVujAZLNQR
+pVnw2qa2UK1dSf2FHX+lQU5mFSYM4vtwaMlX/LgfdLZ9I796hFh619WwTVz+LO2N
+vHnwBMabld3XSPuZRqlbBulDQ07Vbqdjv8DYSLA2aKI4ZkMMKuFLG/oS28V2ZYmv
+/KpGEs5UgKY+P9NulYpTDwCU/6SomuQpP795wbG6sm7Hzq82r2RmB61GupNRGeqi
+pXKsy69T388zBxYu6zQrosXiDl5YzaViH7tm0J7opye8dCWjjpnahki0vq2znti7
+6cWla2j8Xz1glvLz+JI/NCOMfxUInb82T7ijo80N0VJ2hzf7p2GxRZXAxAV9knLI
+nM4F5TLjSd7ZhOOZ7ni/eZFueTMisWfypt2nc41whGjHMX/Zp1kPfhB4H2bLKIX/
+lSrwNr3qbGTEJX8JqpDBNVAd96XkMvDNyA==
+-----END CERTIFICATE-----
diff --git a/contrib/python/google-auth/py3/tests/data/pem_from_pkcs12.pem b/contrib/python/google-auth/py3/tests/data/pem_from_pkcs12.pem
new file mode 100644
index 0000000000..2d77e10c1f
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/pem_from_pkcs12.pem
@@ -0,0 +1,32 @@
+Bag Attributes
+ friendlyName: key
+ localKeyID: 22 7E 04 FC 64 48 20 83 1E C1 BD E3 F5 2F 44 7D EA 99 A5 BC
+Key Attributes: <No Attributes>
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDh6PSnttDsv+vi
+tUZTP1E3hVBah6PUGDWZhYgNiyW8quTWCmPvBmCR2YzuhUrY5+CtKP8UJOQico+p
+oJHSAPsrzSr6YsGs3c9SQOslBmm9Fkh9/f/GZVTVZ6u5AsUmOcVvZ2q7Sz8Vj/aR
+aIm0EJqRe9cQ5vvN9sg25rIv4xKwIZJ1VixKWJLmpCmDINqn7xvl+ldlUmSr3aGt
+w21uSDuEJhQlzO3yf2FwJMkJ9SkCm9oVDXyl77OnKXj5bOQ/rojbyGeIxDJSUDWE
+GKyRPuqKi6rSbwg6h2G/Z9qBJkqM5NNTbGRIFz/9/LdmmwvtaqCxlLtD7RVEryAp
++qTGDk5hAgMBAAECggEBAMYYfNDEYpf4A2SdCLne/9zrrfZ0kphdUkL48MDPj5vN
+TzTRj6f9s5ixZ/+QKn3hdwbguCx13QbH5mocP0IjUhyqoFFHYAWxyyaZfpjM8tO4
+QoEYxby3BpjLe62UXESUzChQSytJZFwIDXKcdIPNO3zvVzufEJcfG5no2b9cIvsG
+Dy6J1FNILWxCtDIqBM+G1B1is9DhZnUDgn0iKzINiZmh1I1l7k/4tMnozVIKAfwo
+f1kYjG/d2IzDM02mTeTElz3IKeNriaOIYTZgI26xLJxTkiFnBV4JOWFAZw15X+yR
++DrjGSIkTfhzbLa20Vt3AFM+LFK0ZoXT2dRnjbYPjQECgYEA+9XJFGwLcEX6pl1p
+IwXAjXKJdju9DDn4lmHTW0Pbw25h1EXONwm/NPafwsWmPll9kW9IwsxUQVUyBC9a
+c3Q7rF1e8ai/qqVFRIZof275MI82ciV2Mw8Hz7FPAUyoju5CvnjAEH4+irt1VE/7
+SgdvQ1gDBQFegS69ijdz+cOhFxkCgYEA5aVoseMy/gIlsCvNPyw9+Jz/zBpKItX0
+jGzdF7lhERRO2cursujKaoHntRckHcE3P/Z4K565bvVq+VaVG0T/BcBKPmPHrLmY
+iuVXidltW7Jh9/RCVwb5+BvqlwlC470PEwhqoUatY/fPJ74srztrqJHvp1L29FT5
+sdmlJW8YwokCgYAUa3dMgp5C0knKp5RY1KSSU5E11w4zKZgwiWob4lq1dAPWtHpO
+GCo63yyBHImoUJVP75gUw4Cpc4EEudo5tlkIVuHV8nroGVKOhd9/Rb5K47Hke4kk
+Brn5a0Ues9qPDF65Fw1ryPDFSwHufjXAAO5SpZZJF51UGDgiNvDedbBgMQKBgHSk
+t7DjPhtW69234eCckD2fQS5ijBV1p2lMQmCygGM0dXiawvN02puOsCqDPoz+fxm2
+DwPY80cw0M0k9UeMnBxHt25JMDrDan/iTbxu++T/jlNrdebOXFlxlI5y3c7fULDS
+LZcNVzTXwhjlt7yp6d0NgzTyJw2ju9BiREfnTiRBAoGBAOPHrTOnPyjO+bVcCPTB
+WGLsbBd77mVPGIuL0XGrvbVYPE8yIcNbZcthd8VXL/38Ygy8SIZh2ZqsrU1b5WFa
+XUMLnGEODSS8x/GmW3i3KeirW5OxBNjfUzEF4XkJP8m41iTdsQEXQf9DdUY7X+CB
+VL5h7N0VstYhGgycuPpcIUQa
+-----END PRIVATE KEY-----
diff --git a/contrib/python/google-auth/py3/tests/data/privatekey.p12 b/contrib/python/google-auth/py3/tests/data/privatekey.p12
new file mode 100644
index 0000000000..c369ecb6e6
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/privatekey.p12
Binary files differ
diff --git a/contrib/python/google-auth/py3/tests/data/privatekey.pem b/contrib/python/google-auth/py3/tests/data/privatekey.pem
new file mode 100644
index 0000000000..57443540ad
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/privatekey.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj
+7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/
+xmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs
+SliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18
+pe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk
+SBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk
+nQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq
+HD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y
+nHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9
+IisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2
+YCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU
+Z422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ
+vzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP
+B8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl
+aLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2
+eCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI
+aqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk
+klORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ
+CFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu
+UqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg
+soBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28
+bvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH
+504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL
+YXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx
+BeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/python/google-auth/py3/tests/data/privatekey.pub b/contrib/python/google-auth/py3/tests/data/privatekey.pub
new file mode 100644
index 0000000000..11fdaa42f0
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/privatekey.pub
@@ -0,0 +1,8 @@
+-----BEGIN RSA PUBLIC KEY-----
+MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+kdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/xmVU
+1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYsSliS
+5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18pe+z
+pyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xkSBc/
+/fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+-----END RSA PUBLIC KEY-----
diff --git a/contrib/python/google-auth/py3/tests/data/public_cert.pem b/contrib/python/google-auth/py3/tests/data/public_cert.pem
new file mode 100644
index 0000000000..7af6ca3f93
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/public_cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDIzCCAgugAwIBAgIJAMfISuBQ5m+5MA0GCSqGSIb3DQEBBQUAMBUxEzARBgNV
+BAMTCnVuaXQtdGVzdHMwHhcNMTExMjA2MTYyNjAyWhcNMjExMjAzMTYyNjAyWjAV
+MRMwEQYDVQQDEwp1bml0LXRlc3RzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZgkdmM
+7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/xmVU1Wer
+uQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYsSliS5qQp
+gyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18pe+zpyl4
++WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xkSBc//fy3
+ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABo3YwdDAdBgNVHQ4EFgQU2RQ8yO+O
+gN8oVW2SW7RLrfYd9jEwRQYDVR0jBD4wPIAU2RQ8yO+OgN8oVW2SW7RLrfYd9jGh
+GaQXMBUxEzARBgNVBAMTCnVuaXQtdGVzdHOCCQDHyErgUOZvuTAMBgNVHRMEBTAD
+AQH/MA0GCSqGSIb3DQEBBQUAA4IBAQBRv+M/6+FiVu7KXNjFI5pSN17OcW5QUtPr
+odJMlWrJBtynn/TA1oJlYu3yV5clc/71Vr/AxuX5xGP+IXL32YDF9lTUJXG/uUGk
++JETpKmQviPbRsvzYhz4pf6ZIOZMc3/GIcNq92ECbseGO+yAgyWUVKMmZM0HqXC9
+ovNslqe0M8C1sLm1zAR5z/h/litE7/8O2ietija3Q/qtl2TOXJdCA6sgjJX2WUql
+ybrC55ct18NKf3qhpcEkGQvFU40rVYApJpi98DiZPYFdx1oBDp/f4uZ3ojpxRVFT
+cDwcJLfNRCPUhormsY7fDS9xSyThiHsW9mjJYdcaKQkwYZ0F11yB
+-----END CERTIFICATE-----
diff --git a/contrib/python/google-auth/py3/tests/data/service_account.json b/contrib/python/google-auth/py3/tests/data/service_account.json
new file mode 100644
index 0000000000..9e76f4d355
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/service_account.json
@@ -0,0 +1,10 @@
+{
+ "type": "service_account",
+ "project_id": "example-project",
+ "private_key_id": "1",
+ "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj\n7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/\nxmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs\nSliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18\npe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk\nSBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk\nnQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq\nHD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y\nnHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9\nIisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2\nYCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU\nZ422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ\nvzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP\nB8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl\naLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2\neCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI\naqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk\nklORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ\nCFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu\nUqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg\nsoBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28\nbvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH\n504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL\nYXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx\nBeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==\n-----END RSA PRIVATE KEY-----\n",
+ "client_email": "service-account@example.com",
+ "client_id": "1234",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token"
+}
diff --git a/contrib/python/google-auth/py3/tests/data/service_account_non_gdu.json b/contrib/python/google-auth/py3/tests/data/service_account_non_gdu.json
new file mode 100644
index 0000000000..976184f8c2
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/data/service_account_non_gdu.json
@@ -0,0 +1,15 @@
+{
+ "type": "service_account",
+ "universe_domain": "universe.foo",
+ "project_id": "example_project",
+ "private_key_id": "1",
+ "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj\n7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/\nxmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs\nSliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18\npe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk\nSBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk\nnQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq\nHD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y\nnHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9\nIisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2\nYCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU\nZ422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ\nvzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP\nB8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl\naLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2\neCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI\naqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk\nklORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ\nCFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu\nUqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg\nsoBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28\nbvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH\n504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL\nYXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx\nBeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==\n-----END RSA PRIVATE KEY-----\n",
+ "client_email": "testsa@foo.iam.gserviceaccount.com",
+ "client_id": "1234",
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://oauth2.universe.foo/token",
+ "auth_provider_x509_cert_url": "https://www.universe.foo/oauth2/v1/certs",
+ "client_x509_cert_url": "https://www.universe.foo/robot/v1/metadata/x509/foo.iam.gserviceaccount.com"
+}
+
+ \ No newline at end of file
diff --git a/contrib/python/google-auth/py3/tests/oauth2/__init__.py b/contrib/python/google-auth/py3/tests/oauth2/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/__init__.py
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test__client.py b/contrib/python/google-auth/py3/tests/oauth2/test__client.py
new file mode 100644
index 0000000000..54179269bd
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test__client.py
@@ -0,0 +1,622 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import json
+import os
+import urllib
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import transport
+from google.oauth2 import _client
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+SIGNER = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+
+SCOPES_AS_LIST = [
+ "https://www.googleapis.com/auth/pubsub",
+ "https://www.googleapis.com/auth/logging.write",
+]
+SCOPES_AS_STRING = (
+ "https://www.googleapis.com/auth/pubsub"
+ " https://www.googleapis.com/auth/logging.write"
+)
+
+ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/sa"
+)
+ID_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/sa"
+)
+
+
+@pytest.mark.parametrize("retryable", [True, False])
+def test__handle_error_response(retryable):
+ response_data = {"error": "help", "error_description": "I'm alive"}
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client._handle_error_response(response_data, retryable)
+
+ assert excinfo.value.retryable == retryable
+ assert excinfo.match(r"help: I\'m alive")
+
+
+def test__handle_error_response_no_error():
+ response_data = {"foo": "bar"}
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client._handle_error_response(response_data, False)
+
+ assert not excinfo.value.retryable
+ assert excinfo.match(r"{\"foo\": \"bar\"}")
+
+
+def test__handle_error_response_not_json():
+ response_data = "this is an error message"
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client._handle_error_response(response_data, False)
+
+ assert not excinfo.value.retryable
+ assert excinfo.match(response_data)
+
+
+def test__can_retry_retryable():
+ retryable_codes = transport.DEFAULT_RETRYABLE_STATUS_CODES
+ for status_code in range(100, 600):
+ if status_code in retryable_codes:
+ assert _client._can_retry(status_code, {"error": "invalid_scope"})
+ else:
+ assert not _client._can_retry(status_code, {"error": "invalid_scope"})
+
+
+@pytest.mark.parametrize(
+ "response_data", [{"error": "internal_failure"}, {"error": "server_error"}]
+)
+def test__can_retry_message(response_data):
+ assert _client._can_retry(http_client.OK, response_data)
+
+
+@pytest.mark.parametrize(
+ "response_data",
+ [
+ {"error": "invalid_scope"},
+ {"error": {"foo": "bar"}},
+ {"error_description": {"foo", "bar"}},
+ ],
+)
+def test__can_retry_no_retry_message(response_data):
+ assert not _client._can_retry(http_client.OK, response_data)
+
+
+@pytest.mark.parametrize("mock_expires_in", [500, "500"])
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test__parse_expiry(unused_utcnow, mock_expires_in):
+ result = _client._parse_expiry({"expires_in": mock_expires_in})
+ assert result == datetime.datetime.min + datetime.timedelta(seconds=500)
+
+
+def test__parse_expiry_none():
+ assert _client._parse_expiry({}) is None
+
+
+def make_request(response_data, status=http_client.OK):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = json.dumps(response_data).encode("utf-8")
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+ return request
+
+
+def test__token_endpoint_request():
+ request = make_request({"test": "response"})
+
+ result = _client._token_endpoint_request(
+ request, "http://example.com", {"test": "params"}
+ )
+
+ # Check request call
+ request.assert_called_with(
+ method="POST",
+ url="http://example.com",
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ body="test=params".encode("utf-8"),
+ )
+
+ # Check result
+ assert result == {"test": "response"}
+
+
+def test__token_endpoint_request_use_json():
+ request = make_request({"test": "response"})
+
+ result = _client._token_endpoint_request(
+ request,
+ "http://example.com",
+ {"test": "params"},
+ access_token="access_token",
+ use_json=True,
+ )
+
+ # Check request call
+ request.assert_called_with(
+ method="POST",
+ url="http://example.com",
+ headers={
+ "Content-Type": "application/json",
+ "Authorization": "Bearer access_token",
+ },
+ body=b'{"test": "params"}',
+ )
+
+ # Check result
+ assert result == {"test": "response"}
+
+
+def test__token_endpoint_request_error():
+ request = make_request({}, status=http_client.BAD_REQUEST)
+
+ with pytest.raises(exceptions.RefreshError):
+ _client._token_endpoint_request(request, "http://example.com", {})
+
+
+def test__token_endpoint_request_internal_failure_error():
+ request = make_request(
+ {"error_description": "internal_failure"}, status=http_client.BAD_REQUEST
+ )
+
+ with pytest.raises(exceptions.RefreshError):
+ _client._token_endpoint_request(
+ request, "http://example.com", {"error_description": "internal_failure"}
+ )
+ # request should be called once and then with 3 retries
+ assert request.call_count == 4
+
+ request = make_request(
+ {"error": "internal_failure"}, status=http_client.BAD_REQUEST
+ )
+
+ with pytest.raises(exceptions.RefreshError):
+ _client._token_endpoint_request(
+ request, "http://example.com", {"error": "internal_failure"}
+ )
+ # request should be called once and then with 3 retries
+ assert request.call_count == 4
+
+
+def test__token_endpoint_request_internal_failure_and_retry_failure_error():
+ retryable_error = mock.create_autospec(transport.Response, instance=True)
+ retryable_error.status = http_client.BAD_REQUEST
+ retryable_error.data = json.dumps({"error_description": "internal_failure"}).encode(
+ "utf-8"
+ )
+
+ unretryable_error = mock.create_autospec(transport.Response, instance=True)
+ unretryable_error.status = http_client.BAD_REQUEST
+ unretryable_error.data = json.dumps({"error_description": "invalid_scope"}).encode(
+ "utf-8"
+ )
+
+ request = mock.create_autospec(transport.Request)
+
+ request.side_effect = [retryable_error, retryable_error, unretryable_error]
+
+ with pytest.raises(exceptions.RefreshError):
+ _client._token_endpoint_request(
+ request, "http://example.com", {"error_description": "invalid_scope"}
+ )
+ # request should be called three times. Two retryable errors and one
+ # unretryable error to break the retry loop.
+ assert request.call_count == 3
+
+
+def test__token_endpoint_request_internal_failure_and_retry_succeeds():
+ retryable_error = mock.create_autospec(transport.Response, instance=True)
+ retryable_error.status = http_client.BAD_REQUEST
+ retryable_error.data = json.dumps({"error_description": "internal_failure"}).encode(
+ "utf-8"
+ )
+
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = http_client.OK
+ response.data = json.dumps({"hello": "world"}).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+
+ request.side_effect = [retryable_error, response]
+
+ _ = _client._token_endpoint_request(
+ request, "http://example.com", {"test": "params"}
+ )
+
+ assert request.call_count == 2
+
+
+def test__token_endpoint_request_string_error():
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = http_client.BAD_REQUEST
+ response.data = "this is an error message"
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client._token_endpoint_request(request, "http://example.com", {})
+ assert excinfo.match("this is an error message")
+
+
+def verify_request_params(request, params):
+ request_body = request.call_args[1]["body"].decode("utf-8")
+ request_params = urllib.parse.parse_qs(request_body)
+
+ for key, value in params.items():
+ assert request_params[key][0] == value
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_jwt_grant(utcnow):
+ request = make_request(
+ {"access_token": "token", "expires_in": 500, "extra": "data"}
+ )
+
+ token, expiry, extra_data = _client.jwt_grant(
+ request, "http://example.com", "assertion_value"
+ )
+
+ # Check request call
+ verify_request_params(
+ request, {"grant_type": _client._JWT_GRANT_TYPE, "assertion": "assertion_value"}
+ )
+
+ # Check result
+ assert token == "token"
+ assert expiry == utcnow() + datetime.timedelta(seconds=500)
+ assert extra_data["extra"] == "data"
+
+
+def test_jwt_grant_no_access_token():
+ request = make_request(
+ {
+ # No access token.
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client.jwt_grant(request, "http://example.com", "assertion_value")
+ assert not excinfo.value.retryable
+
+
+def test_call_iam_generate_id_token_endpoint():
+ now = _helpers.utcnow()
+ id_token_expiry = _helpers.datetime_to_secs(now)
+ id_token = jwt.encode(SIGNER, {"exp": id_token_expiry}).decode("utf-8")
+ request = make_request({"token": id_token})
+
+ token, expiry = _client.call_iam_generate_id_token_endpoint(
+ request, "fake_email", "fake_audience", "fake_access_token"
+ )
+
+ assert (
+ request.call_args[1]["url"]
+ == "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/fake_email:generateIdToken"
+ )
+ assert request.call_args[1]["headers"]["Content-Type"] == "application/json"
+ assert (
+ request.call_args[1]["headers"]["Authorization"] == "Bearer fake_access_token"
+ )
+ response_body = json.loads(request.call_args[1]["body"])
+ assert response_body["audience"] == "fake_audience"
+ assert response_body["includeEmail"] == "true"
+ assert response_body["useEmailAzp"] == "true"
+
+ # Check result
+ assert token == id_token
+ # JWT does not store microseconds
+ now = now.replace(microsecond=0)
+ assert expiry == now
+
+
+def test_call_iam_generate_id_token_endpoint_no_id_token():
+ request = make_request(
+ {
+ # No access token.
+ "error": "no token"
+ }
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client.call_iam_generate_id_token_endpoint(
+ request, "fake_email", "fake_audience", "fake_access_token"
+ )
+ assert excinfo.match("No ID token in response")
+
+
+def test_id_token_jwt_grant():
+ now = _helpers.utcnow()
+ id_token_expiry = _helpers.datetime_to_secs(now)
+ id_token = jwt.encode(SIGNER, {"exp": id_token_expiry}).decode("utf-8")
+ request = make_request({"id_token": id_token, "extra": "data"})
+
+ token, expiry, extra_data = _client.id_token_jwt_grant(
+ request, "http://example.com", "assertion_value"
+ )
+
+ # Check request call
+ verify_request_params(
+ request, {"grant_type": _client._JWT_GRANT_TYPE, "assertion": "assertion_value"}
+ )
+
+ # Check result
+ assert token == id_token
+ # JWT does not store microseconds
+ now = now.replace(microsecond=0)
+ assert expiry == now
+ assert extra_data["extra"] == "data"
+
+
+def test_id_token_jwt_grant_no_access_token():
+ request = make_request(
+ {
+ # No access token.
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client.id_token_jwt_grant(request, "http://example.com", "assertion_value")
+ assert not excinfo.value.retryable
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_refresh_grant(unused_utcnow):
+ request = make_request(
+ {
+ "access_token": "token",
+ "refresh_token": "new_refresh_token",
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ token, refresh_token, expiry, extra_data = _client.refresh_grant(
+ request,
+ "http://example.com",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ rapt_token="rapt_token",
+ )
+
+ # Check request call
+ verify_request_params(
+ request,
+ {
+ "grant_type": _client._REFRESH_GRANT_TYPE,
+ "refresh_token": "refresh_token",
+ "client_id": "client_id",
+ "client_secret": "client_secret",
+ "rapt": "rapt_token",
+ },
+ )
+
+ # Check result
+ assert token == "token"
+ assert refresh_token == "new_refresh_token"
+ assert expiry == datetime.datetime.min + datetime.timedelta(seconds=500)
+ assert extra_data["extra"] == "data"
+
+
+@mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+def test_refresh_grant_with_scopes(unused_utcnow):
+ request = make_request(
+ {
+ "access_token": "token",
+ "refresh_token": "new_refresh_token",
+ "expires_in": 500,
+ "extra": "data",
+ "scope": SCOPES_AS_STRING,
+ }
+ )
+
+ token, refresh_token, expiry, extra_data = _client.refresh_grant(
+ request,
+ "http://example.com",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ SCOPES_AS_LIST,
+ )
+
+ # Check request call.
+ verify_request_params(
+ request,
+ {
+ "grant_type": _client._REFRESH_GRANT_TYPE,
+ "refresh_token": "refresh_token",
+ "client_id": "client_id",
+ "client_secret": "client_secret",
+ "scope": SCOPES_AS_STRING,
+ },
+ )
+
+ # Check result.
+ assert token == "token"
+ assert refresh_token == "new_refresh_token"
+ assert expiry == datetime.datetime.min + datetime.timedelta(seconds=500)
+ assert extra_data["extra"] == "data"
+
+
+def test_refresh_grant_no_access_token():
+ request = make_request(
+ {
+ # No access token.
+ "refresh_token": "new_refresh_token",
+ "expires_in": 500,
+ "extra": "data",
+ }
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _client.refresh_grant(
+ request, "http://example.com", "refresh_token", "client_id", "client_secret"
+ )
+ assert not excinfo.value.retryable
+
+
+@mock.patch(
+ "google.auth.metrics.token_request_access_token_sa_assertion",
+ return_value=ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+@mock.patch("google.oauth2._client._parse_expiry", return_value=None)
+@mock.patch.object(_client, "_token_endpoint_request", autospec=True)
+def test_jwt_grant_retry_default(
+ mock_token_endpoint_request, mock_expiry, mock_metrics_header_value
+):
+ _client.jwt_grant(mock.Mock(), mock.Mock(), mock.Mock())
+ mock_token_endpoint_request.assert_called_with(
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ can_retry=True,
+ headers={"x-goog-api-client": ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE},
+ )
+
+
+@pytest.mark.parametrize("can_retry", [True, False])
+@mock.patch(
+ "google.auth.metrics.token_request_access_token_sa_assertion",
+ return_value=ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+@mock.patch("google.oauth2._client._parse_expiry", return_value=None)
+@mock.patch.object(_client, "_token_endpoint_request", autospec=True)
+def test_jwt_grant_retry_with_retry(
+ mock_token_endpoint_request, mock_expiry, mock_metrics_header_value, can_retry
+):
+ _client.jwt_grant(mock.Mock(), mock.Mock(), mock.Mock(), can_retry=can_retry)
+ mock_token_endpoint_request.assert_called_with(
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ can_retry=can_retry,
+ headers={"x-goog-api-client": ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE},
+ )
+
+
+@mock.patch(
+ "google.auth.metrics.token_request_id_token_sa_assertion",
+ return_value=ID_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+@mock.patch("google.auth.jwt.decode", return_value={"exp": 0})
+@mock.patch.object(_client, "_token_endpoint_request", autospec=True)
+def test_id_token_jwt_grant_retry_default(
+ mock_token_endpoint_request, mock_jwt_decode, mock_metrics_header_value
+):
+ _client.id_token_jwt_grant(mock.Mock(), mock.Mock(), mock.Mock())
+ mock_token_endpoint_request.assert_called_with(
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ can_retry=True,
+ headers={"x-goog-api-client": ID_TOKEN_REQUEST_METRICS_HEADER_VALUE},
+ )
+
+
+@pytest.mark.parametrize("can_retry", [True, False])
+@mock.patch(
+ "google.auth.metrics.token_request_id_token_sa_assertion",
+ return_value=ID_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+@mock.patch("google.auth.jwt.decode", return_value={"exp": 0})
+@mock.patch.object(_client, "_token_endpoint_request", autospec=True)
+def test_id_token_jwt_grant_retry_with_retry(
+ mock_token_endpoint_request, mock_jwt_decode, mock_metrics_header_value, can_retry
+):
+ _client.id_token_jwt_grant(
+ mock.Mock(), mock.Mock(), mock.Mock(), can_retry=can_retry
+ )
+ mock_token_endpoint_request.assert_called_with(
+ mock.ANY,
+ mock.ANY,
+ mock.ANY,
+ can_retry=can_retry,
+ headers={"x-goog-api-client": ID_TOKEN_REQUEST_METRICS_HEADER_VALUE},
+ )
+
+
+@mock.patch("google.oauth2._client._parse_expiry", return_value=None)
+@mock.patch.object(_client, "_token_endpoint_request", autospec=True)
+def test_refresh_grant_retry_default(mock_token_endpoint_request, mock_parse_expiry):
+ _client.refresh_grant(
+ mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()
+ )
+ mock_token_endpoint_request.assert_called_with(
+ mock.ANY, mock.ANY, mock.ANY, can_retry=True
+ )
+
+
+@pytest.mark.parametrize("can_retry", [True, False])
+@mock.patch("google.oauth2._client._parse_expiry", return_value=None)
+@mock.patch.object(_client, "_token_endpoint_request", autospec=True)
+def test_refresh_grant_retry_with_retry(
+ mock_token_endpoint_request, mock_parse_expiry, can_retry
+):
+ _client.refresh_grant(
+ mock.Mock(),
+ mock.Mock(),
+ mock.Mock(),
+ mock.Mock(),
+ mock.Mock(),
+ can_retry=can_retry,
+ )
+ mock_token_endpoint_request.assert_called_with(
+ mock.ANY, mock.ANY, mock.ANY, can_retry=can_retry
+ )
+
+
+@pytest.mark.parametrize("can_retry", [True, False])
+def test__token_endpoint_request_no_throw_with_retry(can_retry):
+ response_data = {"error": "help", "error_description": "I'm alive"}
+ body = "dummy body"
+
+ mock_response = mock.create_autospec(transport.Response, instance=True)
+ mock_response.status = http_client.INTERNAL_SERVER_ERROR
+ mock_response.data = json.dumps(response_data).encode("utf-8")
+
+ mock_request = mock.create_autospec(transport.Request)
+ mock_request.return_value = mock_response
+
+ _client._token_endpoint_request_no_throw(
+ mock_request, mock.Mock(), body, mock.Mock(), mock.Mock(), can_retry=can_retry
+ )
+
+ if can_retry:
+ assert mock_request.call_count == 4
+ else:
+ assert mock_request.call_count == 1
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_challenges.py b/contrib/python/google-auth/py3/tests/oauth2/test_challenges.py
new file mode 100644
index 0000000000..a06f552837
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_challenges.py
@@ -0,0 +1,198 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for the reauth module."""
+
+import base64
+import sys
+
+import mock
+import pytest # type: ignore
+import pyu2f # type: ignore
+
+from google.auth import exceptions
+from google.oauth2 import challenges
+
+
+def test_get_user_password():
+ with mock.patch("getpass.getpass", return_value="foo"):
+ assert challenges.get_user_password("") == "foo"
+
+
+def test_security_key():
+ metadata = {
+ "status": "READY",
+ "challengeId": 2,
+ "challengeType": "SECURITY_KEY",
+ "securityKey": {
+ "applicationId": "security_key_application_id",
+ "challenges": [
+ {
+ "keyHandle": "some_key",
+ "challenge": base64.urlsafe_b64encode(
+ "some_challenge".encode("ascii")
+ ).decode("ascii"),
+ }
+ ],
+ "relyingPartyId": "security_key_application_id",
+ },
+ }
+ mock_key = mock.Mock()
+
+ challenge = challenges.SecurityKeyChallenge()
+
+ # Test the case that security key challenge is passed with applicationId and
+ # relyingPartyId the same.
+ with mock.patch("pyu2f.model.RegisteredKey", return_value=mock_key):
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.return_value = "security key response"
+ assert challenge.name == "SECURITY_KEY"
+ assert challenge.is_locally_eligible
+ assert challenge.obtain_challenge_input(metadata) == {
+ "securityKey": "security key response"
+ }
+ mock_authenticate.assert_called_with(
+ "security_key_application_id",
+ [{"key": mock_key, "challenge": b"some_challenge"}],
+ print_callback=sys.stderr.write,
+ )
+
+ # Test the case that security key challenge is passed with applicationId and
+ # relyingPartyId different, first call works.
+ metadata["securityKey"]["relyingPartyId"] = "security_key_relying_party_id"
+ sys.stderr.write("metadata=" + str(metadata) + "\n")
+ with mock.patch("pyu2f.model.RegisteredKey", return_value=mock_key):
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.return_value = "security key response"
+ assert challenge.name == "SECURITY_KEY"
+ assert challenge.is_locally_eligible
+ assert challenge.obtain_challenge_input(metadata) == {
+ "securityKey": "security key response"
+ }
+ mock_authenticate.assert_called_with(
+ "security_key_relying_party_id",
+ [{"key": mock_key, "challenge": b"some_challenge"}],
+ print_callback=sys.stderr.write,
+ )
+
+ # Test the case that security key challenge is passed with applicationId and
+ # relyingPartyId different, first call fails, requires retry.
+ metadata["securityKey"]["relyingPartyId"] = "security_key_relying_party_id"
+ with mock.patch("pyu2f.model.RegisteredKey", return_value=mock_key):
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ assert challenge.name == "SECURITY_KEY"
+ assert challenge.is_locally_eligible
+ mock_authenticate.side_effect = [
+ pyu2f.errors.U2FError(pyu2f.errors.U2FError.DEVICE_INELIGIBLE),
+ "security key response",
+ ]
+ assert challenge.obtain_challenge_input(metadata) == {
+ "securityKey": "security key response"
+ }
+ calls = [
+ mock.call(
+ "security_key_relying_party_id",
+ [{"key": mock_key, "challenge": b"some_challenge"}],
+ print_callback=sys.stderr.write,
+ ),
+ mock.call(
+ "security_key_application_id",
+ [{"key": mock_key, "challenge": b"some_challenge"}],
+ print_callback=sys.stderr.write,
+ ),
+ ]
+ mock_authenticate.assert_has_calls(calls)
+
+ # Test various types of exceptions.
+ with mock.patch("pyu2f.model.RegisteredKey", return_value=mock_key):
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.U2FError(
+ pyu2f.errors.U2FError.DEVICE_INELIGIBLE
+ )
+ assert challenge.obtain_challenge_input(metadata) is None
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.U2FError(
+ pyu2f.errors.U2FError.TIMEOUT
+ )
+ assert challenge.obtain_challenge_input(metadata) is None
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.PluginError()
+ assert challenge.obtain_challenge_input(metadata) is None
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.U2FError(
+ pyu2f.errors.U2FError.BAD_REQUEST
+ )
+ with pytest.raises(pyu2f.errors.U2FError):
+ challenge.obtain_challenge_input(metadata)
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.NoDeviceFoundError()
+ assert challenge.obtain_challenge_input(metadata) is None
+
+ with mock.patch(
+ "pyu2f.convenience.authenticator.CompositeAuthenticator.Authenticate"
+ ) as mock_authenticate:
+ mock_authenticate.side_effect = pyu2f.errors.UnsupportedVersionException()
+ with pytest.raises(pyu2f.errors.UnsupportedVersionException):
+ challenge.obtain_challenge_input(metadata)
+
+ with mock.patch.dict("sys.modules"):
+ sys.modules["pyu2f"] = None
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ challenge.obtain_challenge_input(metadata)
+ assert excinfo.match(r"pyu2f dependency is required")
+
+
+@mock.patch("getpass.getpass", return_value="foo")
+def test_password_challenge(getpass_mock):
+ challenge = challenges.PasswordChallenge()
+
+ with mock.patch("getpass.getpass", return_value="foo"):
+ assert challenge.is_locally_eligible
+ assert challenge.name == "PASSWORD"
+ assert challenges.PasswordChallenge().obtain_challenge_input({}) == {
+ "credential": "foo"
+ }
+
+ with mock.patch("getpass.getpass", return_value=None):
+ assert challenges.PasswordChallenge().obtain_challenge_input({}) == {
+ "credential": " "
+ }
+
+
+def test_saml_challenge():
+ challenge = challenges.SamlChallenge()
+ assert challenge.is_locally_eligible
+ assert challenge.name == "SAML"
+ with pytest.raises(exceptions.ReauthSamlChallengeFailError):
+ challenge.obtain_challenge_input(None)
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_credentials.py b/contrib/python/google-auth/py3/tests/oauth2/test_credentials.py
new file mode 100644
index 0000000000..f2604a5f18
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_credentials.py
@@ -0,0 +1,997 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+import pickle
+import sys
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import credentials
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+AUTH_USER_JSON_FILE = os.path.join(DATA_DIR, "authorized_user.json")
+
+with open(AUTH_USER_JSON_FILE, "r") as fh:
+ AUTH_USER_INFO = json.load(fh)
+
+
+class TestCredentials(object):
+ TOKEN_URI = "https://example.com/oauth2/token"
+ REFRESH_TOKEN = "refresh_token"
+ RAPT_TOKEN = "rapt_token"
+ CLIENT_ID = "client_id"
+ CLIENT_SECRET = "client_secret"
+
+ @classmethod
+ def make_credentials(cls):
+ return credentials.Credentials(
+ token=None,
+ refresh_token=cls.REFRESH_TOKEN,
+ token_uri=cls.TOKEN_URI,
+ client_id=cls.CLIENT_ID,
+ client_secret=cls.CLIENT_SECRET,
+ rapt_token=cls.RAPT_TOKEN,
+ enable_reauth_refresh=True,
+ )
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+ # Scopes aren't required for these credentials
+ assert not credentials.requires_scopes
+ # Test properties
+ assert credentials.refresh_token == self.REFRESH_TOKEN
+ assert credentials.token_uri == self.TOKEN_URI
+ assert credentials.client_id == self.CLIENT_ID
+ assert credentials.client_secret == self.CLIENT_SECRET
+ assert credentials.rapt_token == self.RAPT_TOKEN
+ assert credentials.refresh_handler is None
+
+ def test_token_usage_metrics(self):
+ credentials = self.make_credentials()
+ credentials.token = "token"
+ credentials.expiry = None
+
+ headers = {}
+ credentials.before_request(mock.Mock(), None, None, headers)
+ assert headers["authorization"] == "Bearer token"
+ assert headers["x-goog-api-client"] == "cred-type/u"
+
+ def test_refresh_handler_setter_and_getter(self):
+ scopes = ["email", "profile"]
+ original_refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN_1", None))
+ updated_refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN_2", None))
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=None,
+ refresh_handler=original_refresh_handler,
+ )
+
+ assert creds.refresh_handler is original_refresh_handler
+
+ creds.refresh_handler = updated_refresh_handler
+
+ assert creds.refresh_handler is updated_refresh_handler
+
+ creds.refresh_handler = None
+
+ assert creds.refresh_handler is None
+
+ def test_invalid_refresh_handler(self):
+ scopes = ["email", "profile"]
+ with pytest.raises(TypeError) as excinfo:
+ credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=None,
+ refresh_handler=object(),
+ )
+
+ assert excinfo.match("The provided refresh_handler is not a callable or None.")
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ def test_refresh_success(self, unused_utcnow, refresh_grant):
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt_token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ credentials = self.make_credentials()
+
+ # Refresh credentials
+ credentials.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ None,
+ self.RAPT_TOKEN,
+ True,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert credentials.token == token
+ assert credentials.expiry == expiry
+ assert credentials.id_token == mock.sentinel.id_token
+ assert credentials.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert credentials.valid
+
+ def test_refresh_no_refresh_token(self):
+ request = mock.create_autospec(transport.Request)
+ credentials_ = credentials.Credentials(token=None, refresh_token=None)
+
+ with pytest.raises(exceptions.RefreshError, match="necessary fields"):
+ credentials_.refresh(request)
+
+ request.assert_not_called()
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ def test_refresh_with_refresh_token_and_refresh_handler(
+ self, unused_utcnow, refresh_grant
+ ):
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt_token
+ new_rapt_token,
+ )
+
+ refresh_handler = mock.Mock()
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ rapt_token=self.RAPT_TOKEN,
+ refresh_handler=refresh_handler,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ None,
+ self.RAPT_TOKEN,
+ False,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.rapt_token == new_rapt_token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert creds.valid
+
+ # Assert refresh handler not called as the refresh token has
+ # higher priority.
+ refresh_handler.assert_not_called()
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_success_scopes(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + datetime.timedelta(seconds=2800)
+ refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN", expected_expiry))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ creds.refresh(request)
+
+ assert creds.token == "ACCESS_TOKEN"
+ assert creds.expiry == expected_expiry
+ assert creds.valid
+ assert not creds.expired
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_success_default_scopes(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + datetime.timedelta(seconds=2800)
+ original_refresh_handler = mock.Mock(
+ return_value=("UNUSED_TOKEN", expected_expiry)
+ )
+ refresh_handler = mock.Mock(return_value=("ACCESS_TOKEN", expected_expiry))
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=None,
+ default_scopes=default_scopes,
+ refresh_handler=original_refresh_handler,
+ )
+
+ # Test newly set refresh_handler is used instead of the original one.
+ creds.refresh_handler = refresh_handler
+ creds.refresh(request)
+
+ assert creds.token == "ACCESS_TOKEN"
+ assert creds.expiry == expected_expiry
+ assert creds.valid
+ assert not creds.expired
+ # default_scopes should be used since no developer provided scopes
+ # are provided.
+ refresh_handler.assert_called_with(request, scopes=default_scopes)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_invalid_token(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + datetime.timedelta(seconds=2800)
+ # Simulate refresh handler does not return a valid token.
+ refresh_handler = mock.Mock(return_value=(None, expected_expiry))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ with pytest.raises(
+ exceptions.RefreshError, match="returned token is not a string"
+ ):
+ creds.refresh(request)
+
+ assert creds.token is None
+ assert creds.expiry is None
+ assert not creds.valid
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ def test_refresh_with_refresh_handler_invalid_expiry(self):
+ # Simulate refresh handler returns expiration time in an invalid unit.
+ refresh_handler = mock.Mock(return_value=("TOKEN", 2800))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ with pytest.raises(
+ exceptions.RefreshError, match="returned expiry is not a datetime object"
+ ):
+ creds.refresh(request)
+
+ assert creds.token is None
+ assert creds.expiry is None
+ assert not creds.valid
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_refresh_handler_expired_token(self, unused_utcnow):
+ expected_expiry = datetime.datetime.min + _helpers.REFRESH_THRESHOLD
+ # Simulate refresh handler returns an expired token.
+ refresh_handler = mock.Mock(return_value=("TOKEN", expected_expiry))
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ refresh_handler=refresh_handler,
+ )
+
+ with pytest.raises(exceptions.RefreshError, match="already expired"):
+ creds.refresh(request)
+
+ assert creds.token is None
+ assert creds.expiry is None
+ assert not creds.valid
+ # Confirm refresh handler called with the expected arguments.
+ refresh_handler.assert_called_with(request, scopes=scopes)
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ def test_credentials_with_scopes_requested_refresh_success(
+ self, unused_utcnow, refresh_grant
+ ):
+ scopes = ["email", "profile"]
+ default_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token, "scope": "email profile"}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ rapt_token=self.RAPT_TOKEN,
+ enable_reauth_refresh=True,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ scopes,
+ self.RAPT_TOKEN,
+ True,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(scopes)
+ assert creds.rapt_token == new_rapt_token
+ assert creds.granted_scopes == scopes
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ def test_credentials_with_only_default_scopes_requested(
+ self, unused_utcnow, refresh_grant
+ ):
+ default_scopes = ["email", "profile"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token, "scope": "email profile"}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ default_scopes=default_scopes,
+ rapt_token=self.RAPT_TOKEN,
+ enable_reauth_refresh=True,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ default_scopes,
+ self.RAPT_TOKEN,
+ True,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(default_scopes)
+ assert creds.rapt_token == new_rapt_token
+ assert creds.granted_scopes == default_scopes
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ def test_credentials_with_scopes_returned_refresh_success(
+ self, unused_utcnow, refresh_grant
+ ):
+ scopes = ["email", "profile"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token, "scope": " ".join(scopes)}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ scopes=scopes,
+ rapt_token=self.RAPT_TOKEN,
+ enable_reauth_refresh=True,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ scopes,
+ self.RAPT_TOKEN,
+ True,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(scopes)
+ assert creds.rapt_token == new_rapt_token
+ assert creds.granted_scopes == scopes
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ def test_credentials_with_only_default_scopes_requested_different_granted_scopes(
+ self, unused_utcnow, refresh_grant
+ ):
+ default_scopes = ["email", "profile"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {"id_token": mock.sentinel.id_token, "scope": "email"}
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ default_scopes=default_scopes,
+ rapt_token=self.RAPT_TOKEN,
+ enable_reauth_refresh=True,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ default_scopes,
+ self.RAPT_TOKEN,
+ True,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(default_scopes)
+ assert creds.rapt_token == new_rapt_token
+ assert creds.granted_scopes == ["email"]
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ @mock.patch("google.oauth2.reauth.refresh_grant", autospec=True)
+ @mock.patch(
+ "google.auth._helpers.utcnow",
+ return_value=datetime.datetime.min + _helpers.REFRESH_THRESHOLD,
+ )
+ def test_credentials_with_scopes_refresh_different_granted_scopes(
+ self, unused_utcnow, refresh_grant
+ ):
+ scopes = ["email", "profile"]
+ scopes_returned = ["email"]
+ token = "token"
+ new_rapt_token = "new_rapt_token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ grant_response = {
+ "id_token": mock.sentinel.id_token,
+ "scope": " ".join(scopes_returned),
+ }
+ refresh_grant.return_value = (
+ # Access token
+ token,
+ # New refresh token
+ None,
+ # Expiry,
+ expiry,
+ # Extra data
+ grant_response,
+ # rapt token
+ new_rapt_token,
+ )
+
+ request = mock.create_autospec(transport.Request)
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ scopes=scopes,
+ rapt_token=self.RAPT_TOKEN,
+ enable_reauth_refresh=True,
+ )
+
+ # Refresh credentials
+ creds.refresh(request)
+
+ # Check jwt grant call.
+ refresh_grant.assert_called_with(
+ request,
+ self.TOKEN_URI,
+ self.REFRESH_TOKEN,
+ self.CLIENT_ID,
+ self.CLIENT_SECRET,
+ scopes,
+ self.RAPT_TOKEN,
+ True,
+ )
+
+ # Check that the credentials have the token and expiry
+ assert creds.token == token
+ assert creds.expiry == expiry
+ assert creds.id_token == mock.sentinel.id_token
+ assert creds.has_scopes(scopes)
+ assert creds.rapt_token == new_rapt_token
+ assert creds.granted_scopes == scopes_returned
+
+ # Check that the credentials are valid (have a token and are not
+ # expired.)
+ assert creds.valid
+
+ def test_apply_with_quota_project_id(self):
+ creds = credentials.Credentials(
+ token="token",
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ quota_project_id="quota-project-123",
+ )
+
+ headers = {}
+ creds.apply(headers)
+ assert headers["x-goog-user-project"] == "quota-project-123"
+ assert "token" in headers["authorization"]
+
+ def test_apply_with_no_quota_project_id(self):
+ creds = credentials.Credentials(
+ token="token",
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ )
+
+ headers = {}
+ creds.apply(headers)
+ assert "x-goog-user-project" not in headers
+ assert "token" in headers["authorization"]
+
+ def test_with_quota_project(self):
+ creds = credentials.Credentials(
+ token="token",
+ refresh_token=self.REFRESH_TOKEN,
+ token_uri=self.TOKEN_URI,
+ client_id=self.CLIENT_ID,
+ client_secret=self.CLIENT_SECRET,
+ quota_project_id="quota-project-123",
+ )
+
+ new_creds = creds.with_quota_project("new-project-456")
+ assert new_creds.quota_project_id == "new-project-456"
+ headers = {}
+ creds.apply(headers)
+ assert "x-goog-user-project" in headers
+
+ def test_with_token_uri(self):
+ info = AUTH_USER_INFO.copy()
+
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ new_token_uri = "https://oauth2-eu.googleapis.com/token"
+
+ assert creds._token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+
+ creds_with_new_token_uri = creds.with_token_uri(new_token_uri)
+
+ assert creds_with_new_token_uri._token_uri == new_token_uri
+
+ def test_from_authorized_user_info(self):
+ info = AUTH_USER_INFO.copy()
+
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes is None
+
+ scopes = ["email", "profile"]
+ creds = credentials.Credentials.from_authorized_user_info(info, scopes)
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes == scopes
+
+ info["scopes"] = "email" # single non-array scope from file
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.scopes == [info["scopes"]]
+
+ info["scopes"] = ["email", "profile"] # array scope from file
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.scopes == info["scopes"]
+
+ expiry = datetime.datetime(2020, 8, 14, 15, 54, 1)
+ info["expiry"] = expiry.isoformat() + "Z"
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.expiry == expiry
+ assert creds.expired
+
+ def test_from_authorized_user_file(self):
+ info = AUTH_USER_INFO.copy()
+
+ creds = credentials.Credentials.from_authorized_user_file(AUTH_USER_JSON_FILE)
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes is None
+ assert creds.rapt_token is None
+
+ scopes = ["email", "profile"]
+ creds = credentials.Credentials.from_authorized_user_file(
+ AUTH_USER_JSON_FILE, scopes
+ )
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes == scopes
+
+ def test_from_authorized_user_file_with_rapt_token(self):
+ info = AUTH_USER_INFO.copy()
+ file_path = os.path.join(DATA_DIR, "authorized_user_with_rapt_token.json")
+
+ creds = credentials.Credentials.from_authorized_user_file(file_path)
+ assert creds.client_secret == info["client_secret"]
+ assert creds.client_id == info["client_id"]
+ assert creds.refresh_token == info["refresh_token"]
+ assert creds.token_uri == credentials._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert creds.scopes is None
+ assert creds.rapt_token == "rapt"
+
+ def test_to_json(self):
+ info = AUTH_USER_INFO.copy()
+ expiry = datetime.datetime(2020, 8, 14, 15, 54, 1)
+ info["expiry"] = expiry.isoformat() + "Z"
+ creds = credentials.Credentials.from_authorized_user_info(info)
+ assert creds.expiry == expiry
+
+ # Test with no `strip` arg
+ json_output = creds.to_json()
+ json_asdict = json.loads(json_output)
+ assert json_asdict.get("token") == creds.token
+ assert json_asdict.get("refresh_token") == creds.refresh_token
+ assert json_asdict.get("token_uri") == creds.token_uri
+ assert json_asdict.get("client_id") == creds.client_id
+ assert json_asdict.get("scopes") == creds.scopes
+ assert json_asdict.get("client_secret") == creds.client_secret
+ assert json_asdict.get("expiry") == info["expiry"]
+
+ # Test with a `strip` arg
+ json_output = creds.to_json(strip=["client_secret"])
+ json_asdict = json.loads(json_output)
+ assert json_asdict.get("token") == creds.token
+ assert json_asdict.get("refresh_token") == creds.refresh_token
+ assert json_asdict.get("token_uri") == creds.token_uri
+ assert json_asdict.get("client_id") == creds.client_id
+ assert json_asdict.get("scopes") == creds.scopes
+ assert json_asdict.get("client_secret") is None
+
+ # Test with no expiry
+ creds.expiry = None
+ json_output = creds.to_json()
+ json_asdict = json.loads(json_output)
+ assert json_asdict.get("expiry") is None
+
+ def test_pickle_and_unpickle(self):
+ creds = self.make_credentials()
+ unpickled = pickle.loads(pickle.dumps(creds))
+
+ # make sure attributes aren't lost during pickling
+ assert list(creds.__dict__).sort() == list(unpickled.__dict__).sort()
+
+ for attr in list(creds.__dict__):
+ assert getattr(creds, attr) == getattr(unpickled, attr)
+
+ def test_pickle_and_unpickle_with_refresh_handler(self):
+ expected_expiry = _helpers.utcnow() + datetime.timedelta(seconds=2800)
+ refresh_handler = mock.Mock(return_value=("TOKEN", expected_expiry))
+
+ creds = credentials.Credentials(
+ token=None,
+ refresh_token=None,
+ token_uri=None,
+ client_id=None,
+ client_secret=None,
+ rapt_token=None,
+ refresh_handler=refresh_handler,
+ )
+ unpickled = pickle.loads(pickle.dumps(creds))
+
+ # make sure attributes aren't lost during pickling
+ assert list(creds.__dict__).sort() == list(unpickled.__dict__).sort()
+
+ for attr in list(creds.__dict__):
+ # For the _refresh_handler property, the unpickled creds should be
+ # set to None.
+ if attr == "_refresh_handler":
+ assert getattr(unpickled, attr) is None
+ else:
+ assert getattr(creds, attr) == getattr(unpickled, attr)
+
+ def test_pickle_with_missing_attribute(self):
+ creds = self.make_credentials()
+
+ # remove an optional attribute before pickling
+ # this mimics a pickle created with a previous class definition with
+ # fewer attributes
+ del creds.__dict__["_quota_project_id"]
+
+ unpickled = pickle.loads(pickle.dumps(creds))
+
+ # Attribute should be initialized by `__setstate__`
+ assert unpickled.quota_project_id is None
+
+ # pickles are not compatible across versions
+ @pytest.mark.skipif(
+ sys.version_info < (3, 5),
+ reason="pickle file can only be loaded with Python >= 3.5",
+ )
+ def test_unpickle_old_credentials_pickle(self):
+ # make sure a credentials file pickled with an older
+ # library version (google-auth==1.5.1) can be unpickled
+ with open(
+ os.path.join(DATA_DIR, "old_oauth_credentials_py3.pickle"), "rb"
+ ) as f:
+ credentials = pickle.load(f)
+ assert credentials.quota_project_id is None
+
+
+class TestUserAccessTokenCredentials(object):
+ def test_instance(self):
+ with pytest.warns(
+ UserWarning, match="UserAccessTokenCredentials is deprecated"
+ ):
+ cred = credentials.UserAccessTokenCredentials()
+ assert cred._account is None
+
+ cred = cred.with_account("account")
+ assert cred._account == "account"
+
+ @mock.patch("google.auth._cloud_sdk.get_auth_access_token", autospec=True)
+ def test_refresh(self, get_auth_access_token):
+ with pytest.warns(
+ UserWarning, match="UserAccessTokenCredentials is deprecated"
+ ):
+ get_auth_access_token.return_value = "access_token"
+ cred = credentials.UserAccessTokenCredentials()
+ cred.refresh(None)
+ assert cred.token == "access_token"
+
+ def test_with_quota_project(self):
+ with pytest.warns(
+ UserWarning, match="UserAccessTokenCredentials is deprecated"
+ ):
+ cred = credentials.UserAccessTokenCredentials()
+ quota_project_cred = cred.with_quota_project("project-foo")
+
+ assert quota_project_cred._quota_project_id == "project-foo"
+ assert quota_project_cred._account == cred._account
+
+ @mock.patch(
+ "google.oauth2.credentials.UserAccessTokenCredentials.apply", autospec=True
+ )
+ @mock.patch(
+ "google.oauth2.credentials.UserAccessTokenCredentials.refresh", autospec=True
+ )
+ def test_before_request(self, refresh, apply):
+ with pytest.warns(
+ UserWarning, match="UserAccessTokenCredentials is deprecated"
+ ):
+ cred = credentials.UserAccessTokenCredentials()
+ cred.before_request(mock.Mock(), "GET", "https://example.com", {})
+ refresh.assert_called()
+ apply.assert_called()
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_gdch_credentials.py b/contrib/python/google-auth/py3/tests/oauth2/test_gdch_credentials.py
new file mode 100644
index 0000000000..1ff61d8683
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_gdch_credentials.py
@@ -0,0 +1,175 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import datetime
+import json
+import os
+
+import mock
+import pytest # type: ignore
+import requests
+
+from google.auth import exceptions
+from google.auth import jwt
+import google.auth.transport.requests
+from google.oauth2 import gdch_credentials
+from google.oauth2.gdch_credentials import ServiceAccountCredentials
+
+import yatest.common
+
+
+class TestServiceAccountCredentials(object):
+ AUDIENCE = "https://service-identity.<Domain>/authenticate"
+ PROJECT = "project_foo"
+ PRIVATE_KEY_ID = "key_foo"
+ NAME = "service_identity_name"
+ CA_CERT_PATH = "/path/to/ca/cert"
+ TOKEN_URI = "https://service-identity.<Domain>/authenticate"
+
+ JSON_PATH = os.path.join(
+ yatest.common.test_source_path(), "data", "gdch_service_account.json"
+ )
+ with open(JSON_PATH, "rb") as fh:
+ INFO = json.load(fh)
+
+ def test_with_gdch_audience(self):
+ mock_signer = mock.Mock()
+ creds = ServiceAccountCredentials._from_signer_and_info(mock_signer, self.INFO)
+ assert creds._signer == mock_signer
+ assert creds._service_identity_name == self.NAME
+ assert creds._audience is None
+ assert creds._token_uri == self.TOKEN_URI
+ assert creds._ca_cert_path == self.CA_CERT_PATH
+
+ new_creds = creds.with_gdch_audience(self.AUDIENCE)
+ assert new_creds._signer == mock_signer
+ assert new_creds._service_identity_name == self.NAME
+ assert new_creds._audience == self.AUDIENCE
+ assert new_creds._token_uri == self.TOKEN_URI
+ assert new_creds._ca_cert_path == self.CA_CERT_PATH
+
+ def test__create_jwt(self):
+ creds = ServiceAccountCredentials.from_service_account_file(self.JSON_PATH)
+ with mock.patch("google.auth._helpers.utcnow") as utcnow:
+ utcnow.return_value = datetime.datetime.now()
+ jwt_token = creds._create_jwt()
+ header, payload, _, _ = jwt._unverified_decode(jwt_token)
+
+ expected_iss_sub_value = (
+ "system:serviceaccount:project_foo:service_identity_name"
+ )
+ assert isinstance(jwt_token, str)
+ assert header["alg"] == "ES256"
+ assert header["kid"] == self.PRIVATE_KEY_ID
+ assert payload["iss"] == expected_iss_sub_value
+ assert payload["sub"] == expected_iss_sub_value
+ assert payload["aud"] == self.AUDIENCE
+ assert payload["exp"] == (payload["iat"] + 3600)
+
+ @mock.patch(
+ "google.oauth2.gdch_credentials.ServiceAccountCredentials._create_jwt",
+ autospec=True,
+ )
+ @mock.patch("google.oauth2._client._token_endpoint_request", autospec=True)
+ def test_refresh(self, token_endpoint_request, create_jwt):
+ creds = ServiceAccountCredentials.from_service_account_info(self.INFO)
+ creds = creds.with_gdch_audience(self.AUDIENCE)
+ req = google.auth.transport.requests.Request()
+
+ mock_jwt_token = "jwt token"
+ create_jwt.return_value = mock_jwt_token
+ sts_token = "STS token"
+ token_endpoint_request.return_value = {
+ "access_token": sts_token,
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ }
+
+ creds.refresh(req)
+
+ token_endpoint_request.assert_called_with(
+ req,
+ self.TOKEN_URI,
+ {
+ "grant_type": gdch_credentials.TOKEN_EXCHANGE_TYPE,
+ "audience": self.AUDIENCE,
+ "requested_token_type": gdch_credentials.ACCESS_TOKEN_TOKEN_TYPE,
+ "subject_token": mock_jwt_token,
+ "subject_token_type": gdch_credentials.SERVICE_ACCOUNT_TOKEN_TYPE,
+ },
+ access_token=None,
+ use_json=True,
+ verify=self.CA_CERT_PATH,
+ )
+ assert creds.token == sts_token
+
+ def test_refresh_wrong_requests_object(self):
+ creds = ServiceAccountCredentials.from_service_account_info(self.INFO)
+ creds = creds.with_gdch_audience(self.AUDIENCE)
+ req = requests.Request()
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ creds.refresh(req)
+ assert excinfo.match(
+ "request must be a google.auth.transport.requests.Request object"
+ )
+
+ def test__from_signer_and_info_wrong_format_version(self):
+ with pytest.raises(ValueError) as excinfo:
+ ServiceAccountCredentials._from_signer_and_info(
+ mock.Mock(), {"format_version": "2"}
+ )
+ assert excinfo.match("Only format version 1 is supported")
+
+ def test_from_service_account_info_miss_field(self):
+ for field in [
+ "format_version",
+ "private_key_id",
+ "private_key",
+ "name",
+ "project",
+ "token_uri",
+ ]:
+ info_with_missing_field = copy.deepcopy(self.INFO)
+ del info_with_missing_field[field]
+ with pytest.raises(ValueError) as excinfo:
+ ServiceAccountCredentials.from_service_account_info(
+ info_with_missing_field
+ )
+ assert excinfo.match("missing fields")
+
+ @mock.patch("google.auth._service_account_info.from_filename")
+ def test_from_service_account_file(self, from_filename):
+ mock_signer = mock.Mock()
+ from_filename.return_value = (self.INFO, mock_signer)
+ creds = ServiceAccountCredentials.from_service_account_file(self.JSON_PATH)
+ from_filename.assert_called_with(
+ self.JSON_PATH,
+ require=[
+ "format_version",
+ "private_key_id",
+ "private_key",
+ "name",
+ "project",
+ "token_uri",
+ ],
+ use_rsa_signer=False,
+ )
+ assert creds._signer == mock_signer
+ assert creds._service_identity_name == self.NAME
+ assert creds._audience is None
+ assert creds._token_uri == self.TOKEN_URI
+ assert creds._ca_cert_path == self.CA_CERT_PATH
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_id_token.py b/contrib/python/google-auth/py3/tests/oauth2/test_id_token.py
new file mode 100644
index 0000000000..861f76ce4f
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_id_token.py
@@ -0,0 +1,312 @@
+# Copyright 2014 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import mock
+import pytest # type: ignore
+
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import id_token
+from google.oauth2 import service_account
+
+import yatest.common
+SERVICE_ACCOUNT_FILE = os.path.join(
+ yatest.common.test_source_path(), "data/service_account.json"
+)
+ID_TOKEN_AUDIENCE = "https://pubsub.googleapis.com"
+
+
+def make_request(status, data=None):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+
+ if data is not None:
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+ return request
+
+
+def test__fetch_certs_success():
+ certs = {"1": "cert"}
+ request = make_request(200, certs)
+
+ returned_certs = id_token._fetch_certs(request, mock.sentinel.cert_url)
+
+ request.assert_called_once_with(mock.sentinel.cert_url, method="GET")
+ assert returned_certs == certs
+
+
+def test__fetch_certs_failure():
+ request = make_request(404)
+
+ with pytest.raises(exceptions.TransportError):
+ id_token._fetch_certs(request, mock.sentinel.cert_url)
+
+ request.assert_called_once_with(mock.sentinel.cert_url, method="GET")
+
+
+@mock.patch("google.auth.jwt.decode", autospec=True)
+@mock.patch("google.oauth2.id_token._fetch_certs", autospec=True)
+def test_verify_token(_fetch_certs, decode):
+ result = id_token.verify_token(mock.sentinel.token, mock.sentinel.request)
+
+ assert result == decode.return_value
+ _fetch_certs.assert_called_once_with(
+ mock.sentinel.request, id_token._GOOGLE_OAUTH2_CERTS_URL
+ )
+ decode.assert_called_once_with(
+ mock.sentinel.token,
+ certs=_fetch_certs.return_value,
+ audience=None,
+ clock_skew_in_seconds=0,
+ )
+
+
+@mock.patch("google.auth.jwt.decode", autospec=True)
+@mock.patch("google.oauth2.id_token._fetch_certs", autospec=True)
+def test_verify_token_args(_fetch_certs, decode):
+ result = id_token.verify_token(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=mock.sentinel.certs_url,
+ )
+
+ assert result == decode.return_value
+ _fetch_certs.assert_called_once_with(mock.sentinel.request, mock.sentinel.certs_url)
+ decode.assert_called_once_with(
+ mock.sentinel.token,
+ certs=_fetch_certs.return_value,
+ audience=mock.sentinel.audience,
+ clock_skew_in_seconds=0,
+ )
+
+
+@mock.patch("google.auth.jwt.decode", autospec=True)
+@mock.patch("google.oauth2.id_token._fetch_certs", autospec=True)
+def test_verify_token_clock_skew(_fetch_certs, decode):
+ result = id_token.verify_token(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=mock.sentinel.certs_url,
+ clock_skew_in_seconds=10,
+ )
+
+ assert result == decode.return_value
+ _fetch_certs.assert_called_once_with(mock.sentinel.request, mock.sentinel.certs_url)
+ decode.assert_called_once_with(
+ mock.sentinel.token,
+ certs=_fetch_certs.return_value,
+ audience=mock.sentinel.audience,
+ clock_skew_in_seconds=10,
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_oauth2_token(verify_token):
+ verify_token.return_value = {"iss": "accounts.google.com"}
+ result = id_token.verify_oauth2_token(
+ mock.sentinel.token, mock.sentinel.request, audience=mock.sentinel.audience
+ )
+
+ assert result == verify_token.return_value
+ verify_token.assert_called_once_with(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=id_token._GOOGLE_OAUTH2_CERTS_URL,
+ clock_skew_in_seconds=0,
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_oauth2_token_clock_skew(verify_token):
+ verify_token.return_value = {"iss": "accounts.google.com"}
+ result = id_token.verify_oauth2_token(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ clock_skew_in_seconds=10,
+ )
+
+ assert result == verify_token.return_value
+ verify_token.assert_called_once_with(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=id_token._GOOGLE_OAUTH2_CERTS_URL,
+ clock_skew_in_seconds=10,
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_oauth2_token_invalid_iss(verify_token):
+ verify_token.return_value = {"iss": "invalid_issuer"}
+
+ with pytest.raises(exceptions.GoogleAuthError):
+ id_token.verify_oauth2_token(
+ mock.sentinel.token, mock.sentinel.request, audience=mock.sentinel.audience
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_firebase_token(verify_token):
+ result = id_token.verify_firebase_token(
+ mock.sentinel.token, mock.sentinel.request, audience=mock.sentinel.audience
+ )
+
+ assert result == verify_token.return_value
+ verify_token.assert_called_once_with(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=id_token._GOOGLE_APIS_CERTS_URL,
+ clock_skew_in_seconds=0,
+ )
+
+
+@mock.patch("google.oauth2.id_token.verify_token", autospec=True)
+def test_verify_firebase_token_clock_skew(verify_token):
+ result = id_token.verify_firebase_token(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ clock_skew_in_seconds=10,
+ )
+
+ assert result == verify_token.return_value
+ verify_token.assert_called_once_with(
+ mock.sentinel.token,
+ mock.sentinel.request,
+ audience=mock.sentinel.audience,
+ certs_url=id_token._GOOGLE_APIS_CERTS_URL,
+ clock_skew_in_seconds=10,
+ )
+
+
+def test_fetch_id_token_credentials_optional_request(monkeypatch):
+ monkeypatch.delenv(environment_vars.CREDENTIALS, raising=False)
+
+ # Test a request object is created if not provided
+ with mock.patch("google.auth.compute_engine._metadata.ping", return_value=True):
+ with mock.patch(
+ "google.auth.compute_engine.IDTokenCredentials.__init__", return_value=None
+ ):
+ with mock.patch(
+ "google.auth.transport.requests.Request.__init__", return_value=None
+ ) as mock_request:
+ id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE)
+ mock_request.assert_called()
+
+
+def test_fetch_id_token_credentials_from_metadata_server(monkeypatch):
+ monkeypatch.delenv(environment_vars.CREDENTIALS, raising=False)
+
+ mock_req = mock.Mock()
+
+ with mock.patch("google.auth.compute_engine._metadata.ping", return_value=True):
+ with mock.patch(
+ "google.auth.compute_engine.IDTokenCredentials.__init__", return_value=None
+ ) as mock_init:
+ id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE, request=mock_req)
+ mock_init.assert_called_once_with(
+ mock_req, ID_TOKEN_AUDIENCE, use_metadata_identity_endpoint=True
+ )
+
+
+def test_fetch_id_token_credentials_from_explicit_cred_json_file(monkeypatch):
+ monkeypatch.setenv(environment_vars.CREDENTIALS, SERVICE_ACCOUNT_FILE)
+
+ cred = id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE)
+ assert isinstance(cred, service_account.IDTokenCredentials)
+ assert cred._target_audience == ID_TOKEN_AUDIENCE
+
+
+def test_fetch_id_token_credentials_no_cred_exists(monkeypatch):
+ monkeypatch.delenv(environment_vars.CREDENTIALS, raising=False)
+
+ with mock.patch(
+ "google.auth.compute_engine._metadata.ping",
+ side_effect=exceptions.TransportError(),
+ ):
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE)
+ assert excinfo.match(
+ r"Neither metadata server or valid service account credentials are found."
+ )
+
+ with mock.patch("google.auth.compute_engine._metadata.ping", return_value=False):
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE)
+ assert excinfo.match(
+ r"Neither metadata server or valid service account credentials are found."
+ )
+
+
+def test_fetch_id_token_credentials_invalid_cred_file_type(monkeypatch):
+ user_credentials_file = os.path.join(
+ yatest.common.test_source_path(), "data/authorized_user.json"
+ )
+ monkeypatch.setenv(environment_vars.CREDENTIALS, user_credentials_file)
+
+ with mock.patch("google.auth.compute_engine._metadata.ping", return_value=False):
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE)
+ assert excinfo.match(
+ r"Neither metadata server or valid service account credentials are found."
+ )
+
+
+def test_fetch_id_token_credentials_invalid_json(monkeypatch):
+ not_json_file = os.path.join(yatest.common.test_source_path(), "data/public_cert.pem")
+ monkeypatch.setenv(environment_vars.CREDENTIALS, not_json_file)
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE)
+ assert excinfo.match(
+ r"GOOGLE_APPLICATION_CREDENTIALS is not valid service account credentials."
+ )
+
+
+def test_fetch_id_token_credentials_invalid_cred_path(monkeypatch):
+ not_json_file = os.path.join(yatest.common.test_source_path(), "data/not_exists.json")
+ monkeypatch.setenv(environment_vars.CREDENTIALS, not_json_file)
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ id_token.fetch_id_token_credentials(ID_TOKEN_AUDIENCE)
+ assert excinfo.match(
+ r"GOOGLE_APPLICATION_CREDENTIALS path is either not found or invalid."
+ )
+
+
+def test_fetch_id_token(monkeypatch):
+ mock_cred = mock.MagicMock()
+ mock_cred.token = "token"
+
+ mock_req = mock.Mock()
+
+ with mock.patch(
+ "google.oauth2.id_token.fetch_id_token_credentials", return_value=mock_cred
+ ) as mock_fetch:
+ token = id_token.fetch_id_token(mock_req, ID_TOKEN_AUDIENCE)
+ mock_fetch.assert_called_once_with(ID_TOKEN_AUDIENCE, request=mock_req)
+ mock_cred.refresh.assert_called_once_with(mock_req)
+ assert token == "token"
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_reauth.py b/contrib/python/google-auth/py3/tests/oauth2/test_reauth.py
new file mode 100644
index 0000000000..5b15ad3b56
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_reauth.py
@@ -0,0 +1,388 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+import mock
+import pytest # type: ignore
+
+from google.auth import exceptions
+from google.oauth2 import reauth
+
+
+MOCK_REQUEST = mock.Mock()
+CHALLENGES_RESPONSE_TEMPLATE = {
+ "status": "CHALLENGE_REQUIRED",
+ "sessionId": "123",
+ "challenges": [
+ {
+ "status": "READY",
+ "challengeId": 1,
+ "challengeType": "PASSWORD",
+ "securityKey": {},
+ }
+ ],
+}
+CHALLENGES_RESPONSE_AUTHENTICATED = {
+ "status": "AUTHENTICATED",
+ "sessionId": "123",
+ "encodedProofOfReauthToken": "new_rapt_token",
+}
+
+REAUTH_START_METRICS_HEADER_VALUE = "gl-python/3.7 auth/1.1 auth-request-type/re-start"
+REAUTH_CONTINUE_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/re-cont"
+)
+TOKEN_REQUEST_METRICS_HEADER_VALUE = "gl-python/3.7 auth/1.1 cred-type/u"
+
+
+class MockChallenge(object):
+ def __init__(self, name, locally_eligible, challenge_input):
+ self.name = name
+ self.is_locally_eligible = locally_eligible
+ self.challenge_input = challenge_input
+
+ def obtain_challenge_input(self, metadata):
+ return self.challenge_input
+
+
+def _test_is_interactive():
+ with mock.patch("sys.stdin.isatty", return_value=True):
+ assert reauth.is_interactive()
+
+
+@mock.patch(
+ "google.auth.metrics.reauth_start", return_value=REAUTH_START_METRICS_HEADER_VALUE
+)
+def test__get_challenges(mock_metrics_header_value):
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request"
+ ) as mock_token_endpoint_request:
+ reauth._get_challenges(MOCK_REQUEST, ["SAML"], "token")
+ mock_token_endpoint_request.assert_called_with(
+ MOCK_REQUEST,
+ reauth._REAUTH_API + ":start",
+ {"supportedChallengeTypes": ["SAML"]},
+ access_token="token",
+ use_json=True,
+ headers={"x-goog-api-client": REAUTH_START_METRICS_HEADER_VALUE},
+ )
+
+
+@mock.patch(
+ "google.auth.metrics.reauth_start", return_value=REAUTH_START_METRICS_HEADER_VALUE
+)
+def test__get_challenges_with_scopes(mock_metrics_header_value):
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request"
+ ) as mock_token_endpoint_request:
+ reauth._get_challenges(
+ MOCK_REQUEST, ["SAML"], "token", requested_scopes=["scope"]
+ )
+ mock_token_endpoint_request.assert_called_with(
+ MOCK_REQUEST,
+ reauth._REAUTH_API + ":start",
+ {
+ "supportedChallengeTypes": ["SAML"],
+ "oauthScopesForDomainPolicyLookup": ["scope"],
+ },
+ access_token="token",
+ use_json=True,
+ headers={"x-goog-api-client": REAUTH_START_METRICS_HEADER_VALUE},
+ )
+
+
+@mock.patch(
+ "google.auth.metrics.reauth_continue",
+ return_value=REAUTH_CONTINUE_METRICS_HEADER_VALUE,
+)
+def test__send_challenge_result(mock_metrics_header_value):
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request"
+ ) as mock_token_endpoint_request:
+ reauth._send_challenge_result(
+ MOCK_REQUEST, "123", "1", {"credential": "password"}, "token"
+ )
+ mock_token_endpoint_request.assert_called_with(
+ MOCK_REQUEST,
+ reauth._REAUTH_API + "/123:continue",
+ {
+ "sessionId": "123",
+ "challengeId": "1",
+ "action": "RESPOND",
+ "proposalResponse": {"credential": "password"},
+ },
+ access_token="token",
+ use_json=True,
+ headers={"x-goog-api-client": REAUTH_CONTINUE_METRICS_HEADER_VALUE},
+ )
+
+
+def test__run_next_challenge_not_ready():
+ challenges_response = copy.deepcopy(CHALLENGES_RESPONSE_TEMPLATE)
+ challenges_response["challenges"][0]["status"] = "STATUS_UNSPECIFIED"
+ assert (
+ reauth._run_next_challenge(challenges_response, MOCK_REQUEST, "token") is None
+ )
+
+
+def test__run_next_challenge_not_supported():
+ challenges_response = copy.deepcopy(CHALLENGES_RESPONSE_TEMPLATE)
+ challenges_response["challenges"][0]["challengeType"] = "CHALLENGE_TYPE_UNSPECIFIED"
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._run_next_challenge(challenges_response, MOCK_REQUEST, "token")
+ assert excinfo.match(r"Unsupported challenge type CHALLENGE_TYPE_UNSPECIFIED")
+
+
+def test__run_next_challenge_not_locally_eligible():
+ mock_challenge = MockChallenge("PASSWORD", False, "challenge_input")
+ with mock.patch(
+ "google.oauth2.challenges.AVAILABLE_CHALLENGES", {"PASSWORD": mock_challenge}
+ ):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._run_next_challenge(
+ CHALLENGES_RESPONSE_TEMPLATE, MOCK_REQUEST, "token"
+ )
+ assert excinfo.match(r"Challenge PASSWORD is not locally eligible")
+
+
+def test__run_next_challenge_no_challenge_input():
+ mock_challenge = MockChallenge("PASSWORD", True, None)
+ with mock.patch(
+ "google.oauth2.challenges.AVAILABLE_CHALLENGES", {"PASSWORD": mock_challenge}
+ ):
+ assert (
+ reauth._run_next_challenge(
+ CHALLENGES_RESPONSE_TEMPLATE, MOCK_REQUEST, "token"
+ )
+ is None
+ )
+
+
+def test__run_next_challenge_success():
+ mock_challenge = MockChallenge("PASSWORD", True, {"credential": "password"})
+ with mock.patch(
+ "google.oauth2.challenges.AVAILABLE_CHALLENGES", {"PASSWORD": mock_challenge}
+ ):
+ with mock.patch(
+ "google.oauth2.reauth._send_challenge_result"
+ ) as mock_send_challenge_result:
+ reauth._run_next_challenge(
+ CHALLENGES_RESPONSE_TEMPLATE, MOCK_REQUEST, "token"
+ )
+ mock_send_challenge_result.assert_called_with(
+ MOCK_REQUEST, "123", 1, {"credential": "password"}, "token"
+ )
+
+
+def test__obtain_rapt_authenticated():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_AUTHENTICATED,
+ ):
+ assert reauth._obtain_rapt(MOCK_REQUEST, "token", None) == "new_rapt_token"
+
+
+def test__obtain_rapt_authenticated_after_run_next_challenge():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_TEMPLATE,
+ ):
+ with mock.patch(
+ "google.oauth2.reauth._run_next_challenge",
+ side_effect=[
+ CHALLENGES_RESPONSE_TEMPLATE,
+ CHALLENGES_RESPONSE_AUTHENTICATED,
+ ],
+ ):
+ with mock.patch("google.oauth2.reauth.is_interactive", return_value=True):
+ assert (
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None) == "new_rapt_token"
+ )
+
+
+def test__obtain_rapt_unsupported_status():
+ challenges_response = copy.deepcopy(CHALLENGES_RESPONSE_TEMPLATE)
+ challenges_response["status"] = "STATUS_UNSPECIFIED"
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges", return_value=challenges_response
+ ):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None)
+ assert excinfo.match(r"API error: STATUS_UNSPECIFIED")
+
+
+def test__obtain_rapt_no_challenge_output():
+ challenges_response = copy.deepcopy(CHALLENGES_RESPONSE_TEMPLATE)
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges", return_value=challenges_response
+ ):
+ with mock.patch("google.oauth2.reauth.is_interactive", return_value=True):
+ with mock.patch(
+ "google.oauth2.reauth._run_next_challenge", return_value=None
+ ):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None)
+ assert excinfo.match(r"Failed to obtain rapt token")
+
+
+def test__obtain_rapt_not_interactive():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_TEMPLATE,
+ ):
+ with mock.patch("google.oauth2.reauth.is_interactive", return_value=False):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None)
+ assert excinfo.match(r"not in an interactive session")
+
+
+def test__obtain_rapt_not_authenticated():
+ with mock.patch(
+ "google.oauth2.reauth._get_challenges",
+ return_value=CHALLENGES_RESPONSE_TEMPLATE,
+ ):
+ with mock.patch("google.oauth2.reauth.RUN_CHALLENGE_RETRY_LIMIT", 0):
+ with pytest.raises(exceptions.ReauthFailError) as excinfo:
+ reauth._obtain_rapt(MOCK_REQUEST, "token", None)
+ assert excinfo.match(r"Reauthentication failed")
+
+
+def test_get_rapt_token():
+ with mock.patch(
+ "google.oauth2._client.refresh_grant", return_value=("token", None, None, None)
+ ) as mock_refresh_grant:
+ with mock.patch(
+ "google.oauth2.reauth._obtain_rapt", return_value="new_rapt_token"
+ ) as mock_obtain_rapt:
+ assert (
+ reauth.get_rapt_token(
+ MOCK_REQUEST,
+ "client_id",
+ "client_secret",
+ "refresh_token",
+ "token_uri",
+ )
+ == "new_rapt_token"
+ )
+ mock_refresh_grant.assert_called_with(
+ request=MOCK_REQUEST,
+ client_id="client_id",
+ client_secret="client_secret",
+ refresh_token="refresh_token",
+ token_uri="token_uri",
+ scopes=[reauth._REAUTH_SCOPE],
+ )
+ mock_obtain_rapt.assert_called_with(
+ MOCK_REQUEST, "token", requested_scopes=None
+ )
+
+
+@mock.patch(
+ "google.auth.metrics.token_request_user",
+ return_value=TOKEN_REQUEST_METRICS_HEADER_VALUE,
+)
+def test_refresh_grant_failed(mock_metrics_header_value):
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request_no_throw"
+ ) as mock_token_request:
+ mock_token_request.return_value = (False, {"error": "Bad request"}, False)
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ reauth.refresh_grant(
+ MOCK_REQUEST,
+ "token_uri",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ scopes=["foo", "bar"],
+ rapt_token="rapt_token",
+ enable_reauth_refresh=True,
+ )
+ assert excinfo.match(r"Bad request")
+ assert not excinfo.value.retryable
+ mock_token_request.assert_called_with(
+ MOCK_REQUEST,
+ "token_uri",
+ {
+ "grant_type": "refresh_token",
+ "client_id": "client_id",
+ "client_secret": "client_secret",
+ "refresh_token": "refresh_token",
+ "scope": "foo bar",
+ "rapt": "rapt_token",
+ },
+ headers={"x-goog-api-client": TOKEN_REQUEST_METRICS_HEADER_VALUE},
+ )
+
+
+def test_refresh_grant_failed_with_string_type_response():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request_no_throw"
+ ) as mock_token_request:
+ mock_token_request.return_value = (False, "string type error", False)
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ reauth.refresh_grant(
+ MOCK_REQUEST,
+ "token_uri",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ scopes=["foo", "bar"],
+ rapt_token="rapt_token",
+ enable_reauth_refresh=True,
+ )
+ assert excinfo.match(r"string type error")
+ assert not excinfo.value.retryable
+
+
+def test_refresh_grant_success():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request_no_throw"
+ ) as mock_token_request:
+ mock_token_request.side_effect = [
+ (False, {"error": "invalid_grant", "error_subtype": "rapt_required"}, True),
+ (True, {"access_token": "access_token"}, None),
+ ]
+ with mock.patch(
+ "google.oauth2.reauth.get_rapt_token", return_value="new_rapt_token"
+ ):
+ assert reauth.refresh_grant(
+ MOCK_REQUEST,
+ "token_uri",
+ "refresh_token",
+ "client_id",
+ "client_secret",
+ enable_reauth_refresh=True,
+ ) == (
+ "access_token",
+ "refresh_token",
+ None,
+ {"access_token": "access_token"},
+ "new_rapt_token",
+ )
+
+
+def test_refresh_grant_reauth_refresh_disabled():
+ with mock.patch(
+ "google.oauth2._client._token_endpoint_request_no_throw"
+ ) as mock_token_request:
+ mock_token_request.side_effect = [
+ (False, {"error": "invalid_grant", "error_subtype": "rapt_required"}, True),
+ (True, {"access_token": "access_token"}, None),
+ ]
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ reauth.refresh_grant(
+ MOCK_REQUEST, "token_uri", "refresh_token", "client_id", "client_secret"
+ )
+ assert excinfo.match(r"Reauthentication is needed")
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_service_account.py b/contrib/python/google-auth/py3/tests/oauth2/test_service_account.py
new file mode 100644
index 0000000000..c474c90e6b
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_service_account.py
@@ -0,0 +1,789 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+from google.auth import jwt
+from google.auth import transport
+from google.oauth2 import service_account
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "other_cert.pem"), "rb") as fh:
+ OTHER_CERT_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+SERVICE_ACCOUNT_NON_GDU_JSON_FILE = os.path.join(
+ DATA_DIR, "service_account_non_gdu.json"
+)
+FAKE_UNIVERSE_DOMAIN = "universe.foo"
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+with open(SERVICE_ACCOUNT_NON_GDU_JSON_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_INFO_NON_GDU = json.load(fh)
+
+SIGNER = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+
+
+class TestCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ TOKEN_URI = "https://example.com/oauth2/token"
+
+ @classmethod
+ def make_credentials(cls, universe_domain=service_account._DEFAULT_UNIVERSE_DOMAIN):
+ return service_account.Credentials(
+ SIGNER,
+ cls.SERVICE_ACCOUNT_EMAIL,
+ cls.TOKEN_URI,
+ universe_domain=universe_domain,
+ )
+
+ def test_constructor_no_universe_domain(self):
+ credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI, universe_domain=None
+ )
+ assert credentials.universe_domain == service_account._DEFAULT_UNIVERSE_DOMAIN
+
+ def test_from_service_account_info(self):
+ credentials = service_account.Credentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO
+ )
+
+ assert credentials._signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
+ assert credentials.service_account_email == SERVICE_ACCOUNT_INFO["client_email"]
+ assert credentials._token_uri == SERVICE_ACCOUNT_INFO["token_uri"]
+ assert credentials._universe_domain == service_account._DEFAULT_UNIVERSE_DOMAIN
+ assert not credentials._always_use_jwt_access
+
+ def test_from_service_account_info_non_gdu(self):
+ credentials = service_account.Credentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO_NON_GDU
+ )
+
+ assert credentials.universe_domain == FAKE_UNIVERSE_DOMAIN
+ assert credentials._always_use_jwt_access
+
+ def test_from_service_account_info_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+ scopes = ["email", "profile"]
+ subject = "subject"
+ additional_claims = {"meta": "data"}
+
+ credentials = service_account.Credentials.from_service_account_info(
+ info, scopes=scopes, subject=subject, additional_claims=additional_claims
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials.project_id == info["project_id"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._scopes == scopes
+ assert credentials._subject == subject
+ assert credentials._additional_claims == additional_claims
+ assert not credentials._always_use_jwt_access
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = service_account.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials.project_id == info["project_id"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+
+ def test_from_service_account_file_non_gdu(self):
+ info = SERVICE_ACCOUNT_INFO_NON_GDU.copy()
+
+ credentials = service_account.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_NON_GDU_JSON_FILE
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials.project_id == info["project_id"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._universe_domain == FAKE_UNIVERSE_DOMAIN
+ assert credentials._always_use_jwt_access
+
+ def test_from_service_account_file_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+ scopes = ["email", "profile"]
+ subject = "subject"
+ additional_claims = {"meta": "data"}
+
+ credentials = service_account.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE,
+ subject=subject,
+ scopes=scopes,
+ additional_claims=additional_claims,
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials.project_id == info["project_id"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._scopes == scopes
+ assert credentials._subject == subject
+ assert credentials._additional_claims == additional_claims
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+ # Scopes haven't been specified yet
+ assert credentials.requires_scopes
+
+ def test_sign_bytes(self):
+ credentials = self.make_credentials()
+ to_sign = b"123"
+ signature = credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ credentials = self.make_credentials()
+ assert isinstance(credentials.signer, crypt.Signer)
+
+ def test_signer_email(self):
+ credentials = self.make_credentials()
+ assert credentials.signer_email == self.SERVICE_ACCOUNT_EMAIL
+
+ def test_create_scoped(self):
+ credentials = self.make_credentials()
+ scopes = ["email", "profile"]
+ credentials = credentials.with_scopes(scopes)
+ assert credentials._scopes == scopes
+
+ def test_with_claims(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_claims({"meep": "moop"})
+ assert new_credentials._additional_claims == {"meep": "moop"}
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_quota_project("new-project-456")
+ assert new_credentials.quota_project_id == "new-project-456"
+ hdrs = {}
+ new_credentials.apply(hdrs, token="tok")
+ assert "x-goog-user-project" in hdrs
+
+ def test_with_token_uri(self):
+ credentials = self.make_credentials()
+ new_token_uri = "https://example2.com/oauth2/token"
+ assert credentials._token_uri == self.TOKEN_URI
+ creds_with_new_token_uri = credentials.with_token_uri(new_token_uri)
+ assert creds_with_new_token_uri._token_uri == new_token_uri
+
+ def test__with_always_use_jwt_access(self):
+ credentials = self.make_credentials()
+ assert not credentials._always_use_jwt_access
+
+ new_credentials = credentials.with_always_use_jwt_access(True)
+ assert new_credentials._always_use_jwt_access
+
+ def test__with_always_use_jwt_access_non_default_universe_domain(self):
+ credentials = self.make_credentials(universe_domain=FAKE_UNIVERSE_DOMAIN)
+ with pytest.raises(exceptions.InvalidValue) as excinfo:
+ credentials.with_always_use_jwt_access(False)
+
+ assert excinfo.match(
+ "always_use_jwt_access should be True for non-default universe domain"
+ )
+
+ def test__make_authorization_grant_assertion(self):
+ credentials = self.make_credentials()
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ assert payload["aud"] == service_account._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+
+ def test__make_authorization_grant_assertion_scoped(self):
+ credentials = self.make_credentials()
+ scopes = ["email", "profile"]
+ credentials = credentials.with_scopes(scopes)
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["scope"] == "email profile"
+
+ def test__make_authorization_grant_assertion_subject(self):
+ credentials = self.make_credentials()
+ subject = "user@example.com"
+ credentials = credentials.with_subject(subject)
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["sub"] == subject
+
+ def test_apply_with_quota_project_id(self):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ quota_project_id="quota-project-123",
+ )
+
+ headers = {}
+ credentials.apply(headers, token="token")
+
+ assert headers["x-goog-user-project"] == "quota-project-123"
+ assert "token" in headers["authorization"]
+
+ def test_apply_with_no_quota_project_id(self):
+ credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI
+ )
+
+ headers = {}
+ credentials.apply(headers, token="token")
+
+ assert "x-goog-user-project" not in headers
+ assert "token" in headers["authorization"]
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt(self, jwt):
+ credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+ jwt.from_signing_credentials.assert_called_once_with(credentials, audience)
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_with_user_scopes(self, jwt):
+ credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, self.TOKEN_URI, scopes=["foo"]
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+
+ # JWT should not be created if there are user-defined scopes
+ jwt.from_signing_credentials.assert_not_called()
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_always_use_jwt_access_with_audience(self, jwt):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ default_scopes=["bar", "foo"],
+ always_use_jwt_access=True,
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+ jwt.from_signing_credentials.assert_called_once_with(credentials, audience)
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_always_use_jwt_access_with_audience_similar_jwt_is_reused(
+ self, jwt
+ ):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ default_scopes=["bar", "foo"],
+ always_use_jwt_access=True,
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+ credentials._jwt_credentials._audience = audience
+ credentials._create_self_signed_jwt(audience)
+ jwt.from_signing_credentials.assert_called_once_with(credentials, audience)
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_always_use_jwt_access_with_scopes(self, jwt):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ scopes=["bar", "foo"],
+ always_use_jwt_access=True,
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+ jwt.from_signing_credentials.assert_called_once_with(
+ credentials, None, additional_claims={"scope": "bar foo"}
+ )
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_always_use_jwt_access_with_scopes_similar_jwt_is_reused(
+ self, jwt
+ ):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ scopes=["bar", "foo"],
+ always_use_jwt_access=True,
+ )
+
+ audience = "https://pubsub.googleapis.com"
+ credentials._create_self_signed_jwt(audience)
+ credentials._jwt_credentials.additional_claims = {"scope": "bar foo"}
+ credentials._create_self_signed_jwt(audience)
+ jwt.from_signing_credentials.assert_called_once_with(
+ credentials, None, additional_claims={"scope": "bar foo"}
+ )
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_always_use_jwt_access_with_default_scopes(
+ self, jwt
+ ):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ default_scopes=["bar", "foo"],
+ always_use_jwt_access=True,
+ )
+
+ credentials._create_self_signed_jwt(None)
+ jwt.from_signing_credentials.assert_called_once_with(
+ credentials, None, additional_claims={"scope": "bar foo"}
+ )
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_always_use_jwt_access_with_default_scopes_similar_jwt_is_reused(
+ self, jwt
+ ):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ default_scopes=["bar", "foo"],
+ always_use_jwt_access=True,
+ )
+
+ credentials._create_self_signed_jwt(None)
+ credentials._jwt_credentials.additional_claims = {"scope": "bar foo"}
+ credentials._create_self_signed_jwt(None)
+ jwt.from_signing_credentials.assert_called_once_with(
+ credentials, None, additional_claims={"scope": "bar foo"}
+ )
+
+ @mock.patch("google.auth.jwt.Credentials", instance=True, autospec=True)
+ def test__create_self_signed_jwt_always_use_jwt_access(self, jwt):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ always_use_jwt_access=True,
+ )
+
+ credentials._create_self_signed_jwt(None)
+ jwt.from_signing_credentials.assert_not_called()
+
+ def test_token_usage_metrics_assertion(self):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ always_use_jwt_access=False,
+ )
+ credentials.token = "token"
+ credentials.expiry = None
+
+ headers = {}
+ credentials.before_request(mock.Mock(), None, None, headers)
+ assert headers["authorization"] == "Bearer token"
+ assert headers["x-goog-api-client"] == "cred-type/sa"
+
+ def test_token_usage_metrics_self_signed_jwt(self):
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ always_use_jwt_access=True,
+ )
+ credentials._create_self_signed_jwt("foo.googleapis.com")
+ credentials.token = "token"
+ credentials.expiry = None
+
+ headers = {}
+ credentials.before_request(mock.Mock(), None, None, headers)
+ assert headers["authorization"] == "Bearer token"
+ assert headers["x-goog-api-client"] == "cred-type/jwt"
+
+ @mock.patch("google.oauth2._client.jwt_grant", autospec=True)
+ def test_refresh_success(self, jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ {},
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Refresh credentials
+ credentials.refresh(request)
+
+ # Check jwt grant call.
+ assert jwt_grant.called
+
+ called_request, token_uri, assertion = jwt_grant.call_args[0]
+ assert called_request == request
+ assert token_uri == credentials._token_uri
+ assert jwt.decode(assertion, PUBLIC_CERT_BYTES)
+ # No further assertion done on the token, as there are separate tests
+ # for checking the authorization grant assertion.
+
+ # Check that the credentials have the token.
+ assert credentials.token == token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert credentials.valid
+
+ @mock.patch("google.oauth2._client.jwt_grant", autospec=True)
+ def test_before_request_refreshes(self, jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ None,
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Credentials should start as invalid
+ assert not credentials.valid
+
+ # before_request should cause a refresh
+ credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert jwt_grant.called
+
+ # Credentials should now be valid.
+ assert credentials.valid
+
+ @mock.patch("google.auth.jwt.Credentials._make_jwt")
+ def test_refresh_with_jwt_credentials(self, make_jwt):
+ credentials = self.make_credentials()
+ credentials._create_self_signed_jwt("https://pubsub.googleapis.com")
+
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ token = "token"
+ expiry = _helpers.utcnow() + datetime.timedelta(seconds=500)
+ make_jwt.return_value = (b"token", expiry)
+
+ # Credentials should start as invalid
+ assert not credentials.valid
+
+ # before_request should cause a refresh
+ credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # Credentials should now be valid.
+ assert credentials.valid
+
+ # Assert make_jwt was called
+ assert make_jwt.call_count == 1
+
+ assert credentials.token == token
+ assert credentials.expiry == expiry
+
+ def test_refresh_with_jwt_credentials_token_type_check(self):
+ credentials = self.make_credentials()
+ credentials._create_self_signed_jwt("https://pubsub.googleapis.com")
+ credentials.refresh(mock.Mock())
+
+ # Credentials token should be a JWT string.
+ assert isinstance(credentials.token, str)
+ payload = jwt.decode(credentials.token, verify=False)
+ assert payload["aud"] == "https://pubsub.googleapis.com"
+
+ @mock.patch("google.oauth2._client.jwt_grant", autospec=True)
+ @mock.patch("google.auth.jwt.Credentials.refresh", autospec=True)
+ def test_refresh_jwt_not_used_for_domain_wide_delegation(
+ self, self_signed_jwt_refresh, jwt_grant
+ ):
+ # Create a domain wide delegation credentials by setting the subject.
+ credentials = service_account.Credentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ always_use_jwt_access=True,
+ subject="subject",
+ )
+ credentials._create_self_signed_jwt("https://pubsub.googleapis.com")
+ jwt_grant.return_value = (
+ "token",
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ {},
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Refresh credentials
+ credentials.refresh(request)
+
+ # Make sure we are using jwt_grant and not self signed JWT refresh
+ # method to obtain the token.
+ assert jwt_grant.called
+ assert not self_signed_jwt_refresh.called
+
+ def test_refresh_non_gdu_missing_jwt_credentials(self):
+ credentials = self.make_credentials(universe_domain="foo")
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(None)
+ assert excinfo.match("self._jwt_credentials is missing")
+
+ def test_refresh_non_gdu_domain_wide_delegation_not_supported(self):
+ credentials = self.make_credentials(universe_domain="foo")
+ credentials._subject = "bar@example.com"
+ credentials._create_self_signed_jwt("https://pubsub.googleapis.com")
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(None)
+ assert excinfo.match("domain wide delegation is not supported")
+
+
+class TestIDTokenCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ TOKEN_URI = "https://example.com/oauth2/token"
+ TARGET_AUDIENCE = "https://example.com"
+
+ @classmethod
+ def make_credentials(cls, universe_domain=service_account._DEFAULT_UNIVERSE_DOMAIN):
+ return service_account.IDTokenCredentials(
+ SIGNER,
+ cls.SERVICE_ACCOUNT_EMAIL,
+ cls.TOKEN_URI,
+ cls.TARGET_AUDIENCE,
+ universe_domain=universe_domain,
+ )
+
+ def test_constructor_no_universe_domain(self):
+ credentials = service_account.IDTokenCredentials(
+ SIGNER,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.TOKEN_URI,
+ self.TARGET_AUDIENCE,
+ universe_domain=None,
+ )
+ assert credentials._universe_domain == service_account._DEFAULT_UNIVERSE_DOMAIN
+
+ def test_from_service_account_info(self):
+ credentials = service_account.IDTokenCredentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO, target_audience=self.TARGET_AUDIENCE
+ )
+
+ assert credentials._signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
+ assert credentials.service_account_email == SERVICE_ACCOUNT_INFO["client_email"]
+ assert credentials._token_uri == SERVICE_ACCOUNT_INFO["token_uri"]
+ assert credentials._target_audience == self.TARGET_AUDIENCE
+ assert not credentials._use_iam_endpoint
+
+ def test_from_service_account_info_non_gdu(self):
+ credentials = service_account.IDTokenCredentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO_NON_GDU, target_audience=self.TARGET_AUDIENCE
+ )
+
+ assert (
+ credentials._signer.key_id == SERVICE_ACCOUNT_INFO_NON_GDU["private_key_id"]
+ )
+ assert (
+ credentials.service_account_email
+ == SERVICE_ACCOUNT_INFO_NON_GDU["client_email"]
+ )
+ assert credentials._token_uri == SERVICE_ACCOUNT_INFO_NON_GDU["token_uri"]
+ assert credentials._target_audience == self.TARGET_AUDIENCE
+ assert credentials._use_iam_endpoint
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = service_account.IDTokenCredentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE, target_audience=self.TARGET_AUDIENCE
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._target_audience == self.TARGET_AUDIENCE
+ assert not credentials._use_iam_endpoint
+
+ def test_from_service_account_file_non_gdu(self):
+ info = SERVICE_ACCOUNT_INFO_NON_GDU.copy()
+
+ credentials = service_account.IDTokenCredentials.from_service_account_file(
+ SERVICE_ACCOUNT_NON_GDU_JSON_FILE, target_audience=self.TARGET_AUDIENCE
+ )
+
+ assert credentials.service_account_email == info["client_email"]
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._token_uri == info["token_uri"]
+ assert credentials._target_audience == self.TARGET_AUDIENCE
+ assert credentials._use_iam_endpoint
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+
+ def test_sign_bytes(self):
+ credentials = self.make_credentials()
+ to_sign = b"123"
+ signature = credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ credentials = self.make_credentials()
+ assert isinstance(credentials.signer, crypt.Signer)
+
+ def test_signer_email(self):
+ credentials = self.make_credentials()
+ assert credentials.signer_email == self.SERVICE_ACCOUNT_EMAIL
+
+ def test_with_target_audience(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_target_audience("https://new.example.com")
+ assert new_credentials._target_audience == "https://new.example.com"
+
+ def test__with_use_iam_endpoint(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials._with_use_iam_endpoint(True)
+ assert new_credentials._use_iam_endpoint
+
+ def test__with_use_iam_endpoint_non_default_universe_domain(self):
+ credentials = self.make_credentials(universe_domain=FAKE_UNIVERSE_DOMAIN)
+ with pytest.raises(exceptions.InvalidValue) as excinfo:
+ credentials._with_use_iam_endpoint(False)
+
+ assert excinfo.match(
+ "use_iam_endpoint should be True for non-default universe domain"
+ )
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+ new_credentials = credentials.with_quota_project("project-foo")
+ assert new_credentials._quota_project_id == "project-foo"
+
+ def test_with_token_uri(self):
+ credentials = self.make_credentials()
+ new_token_uri = "https://example2.com/oauth2/token"
+ assert credentials._token_uri == self.TOKEN_URI
+ creds_with_new_token_uri = credentials.with_token_uri(new_token_uri)
+ assert creds_with_new_token_uri._token_uri == new_token_uri
+
+ def test__make_authorization_grant_assertion(self):
+ credentials = self.make_credentials()
+ token = credentials._make_authorization_grant_assertion()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ assert payload["aud"] == service_account._GOOGLE_OAUTH2_TOKEN_ENDPOINT
+ assert payload["target_audience"] == self.TARGET_AUDIENCE
+
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_refresh_success(self, id_token_jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ id_token_jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ {},
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Refresh credentials
+ credentials.refresh(request)
+
+ # Check jwt grant call.
+ assert id_token_jwt_grant.called
+
+ called_request, token_uri, assertion = id_token_jwt_grant.call_args[0]
+ assert called_request == request
+ assert token_uri == credentials._token_uri
+ assert jwt.decode(assertion, PUBLIC_CERT_BYTES)
+ # No further assertion done on the token, as there are separate tests
+ # for checking the authorization grant assertion.
+
+ # Check that the credentials have the token.
+ assert credentials.token == token
+
+ # Check that the credentials are valid (have a token and are not
+ # expired)
+ assert credentials.valid
+
+ @mock.patch(
+ "google.oauth2._client.call_iam_generate_id_token_endpoint", autospec=True
+ )
+ def test_refresh_iam_flow(self, call_iam_generate_id_token_endpoint):
+ credentials = self.make_credentials()
+ credentials._use_iam_endpoint = True
+ token = "id_token"
+ call_iam_generate_id_token_endpoint.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ )
+ request = mock.Mock()
+ credentials.refresh(request)
+ req, signer_email, target_audience, access_token = call_iam_generate_id_token_endpoint.call_args[
+ 0
+ ]
+ assert req == request
+ assert signer_email == "service-account@example.com"
+ assert target_audience == "https://example.com"
+ decoded_access_token = jwt.decode(access_token, verify=False)
+ assert decoded_access_token["scope"] == "https://www.googleapis.com/auth/iam"
+
+ @mock.patch("google.oauth2._client.id_token_jwt_grant", autospec=True)
+ def test_before_request_refreshes(self, id_token_jwt_grant):
+ credentials = self.make_credentials()
+ token = "token"
+ id_token_jwt_grant.return_value = (
+ token,
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ None,
+ )
+ request = mock.create_autospec(transport.Request, instance=True)
+
+ # Credentials should start as invalid
+ assert not credentials.valid
+
+ # before_request should cause a refresh
+ credentials.before_request(request, "GET", "http://example.com?a=1#3", {})
+
+ # The refresh endpoint should've been called.
+ assert id_token_jwt_grant.called
+
+ # Credentials should now be valid.
+ assert credentials.valid
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_sts.py b/contrib/python/google-auth/py3/tests/oauth2/test_sts.py
new file mode 100644
index 0000000000..e0fb4ae23e
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_sts.py
@@ -0,0 +1,480 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import http.client as http_client
+import json
+import urllib
+
+import mock
+import pytest # type: ignore
+
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import sts
+from google.oauth2 import utils
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password"
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+
+
+class TestStsClient(object):
+ GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+ RESOURCE = "https://api.example.com/"
+ AUDIENCE = "urn:example:cooperation-context"
+ SCOPES = ["scope1", "scope2"]
+ REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+ SUBJECT_TOKEN = "HEADER.SUBJECT_TOKEN_PAYLOAD.SIGNATURE"
+ SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+ ACTOR_TOKEN = "HEADER.ACTOR_TOKEN_PAYLOAD.SIGNATURE"
+ ACTOR_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+ TOKEN_EXCHANGE_ENDPOINT = "https://example.com/token.oauth2"
+ ADDON_HEADERS = {"x-client-version": "0.1.2"}
+ ADDON_OPTIONS = {"additional": {"non-standard": ["options"], "other": "some-value"}}
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": "scope1 scope2",
+ }
+ SUCCESS_RESPONSE_WITH_REFRESH = {
+ "access_token": "abc",
+ "refresh_token": "xyz",
+ "expires_in": 3600,
+ }
+ ERROR_RESPONSE = {
+ "error": "invalid_request",
+ "error_description": "Invalid subject token",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+ }
+ CLIENT_AUTH_BASIC = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID, CLIENT_SECRET
+ )
+ CLIENT_AUTH_REQUEST_BODY = utils.ClientAuthentication(
+ utils.ClientAuthType.request_body, CLIENT_ID, CLIENT_SECRET
+ )
+
+ @classmethod
+ def make_client(cls, client_auth=None):
+ return sts.Client(cls.TOKEN_EXCHANGE_ENDPOINT, client_auth)
+
+ @classmethod
+ def make_mock_request(cls, data, status=http_client.OK):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+
+ return request
+
+ @classmethod
+ def assert_request_kwargs(cls, request_kwargs, headers, request_data):
+ """Asserts the request was called with the expected parameters.
+ """
+ assert request_kwargs["url"] == cls.TOKEN_EXCHANGE_ENDPOINT
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+ assert len(body_tuples) == len(request_data.keys())
+
+ def test_exchange_token_full_success_without_auth(self):
+ """Test token exchange success without client authentication using full
+ parameters.
+ """
+ client = self.make_client()
+ headers = self.ADDON_HEADERS.copy()
+ headers["Content-Type"] = "application/x-www-form-urlencoded"
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "resource": self.RESOURCE,
+ "audience": self.AUDIENCE,
+ "scope": " ".join(self.SCOPES),
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "actor_token": self.ACTOR_TOKEN,
+ "actor_token_type": self.ACTOR_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(self.ADDON_OPTIONS)),
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_partial_success_without_auth(self):
+ """Test token exchange success without client authentication using
+ partial (required only) parameters.
+ """
+ client = self.make_client()
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "audience": self.AUDIENCE,
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ grant_type=self.GRANT_TYPE,
+ subject_token=self.SUBJECT_TOKEN,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ audience=self.AUDIENCE,
+ requested_token_type=self.REQUESTED_TOKEN_TYPE,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_non200_without_auth(self):
+ """Test token exchange without client auth responding with non-200 status.
+ """
+ client = self.make_client()
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+
+ def test_exchange_token_full_success_with_basic_auth(self):
+ """Test token exchange success with basic client authentication using full
+ parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ headers = self.ADDON_HEADERS.copy()
+ headers["Content-Type"] = "application/x-www-form-urlencoded"
+ headers["Authorization"] = "Basic {}".format(BASIC_AUTH_ENCODING)
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "resource": self.RESOURCE,
+ "audience": self.AUDIENCE,
+ "scope": " ".join(self.SCOPES),
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "actor_token": self.ACTOR_TOKEN,
+ "actor_token_type": self.ACTOR_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(self.ADDON_OPTIONS)),
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_partial_success_with_basic_auth(self):
+ """Test token exchange success with basic client authentication using
+ partial (required only) parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ }
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "audience": self.AUDIENCE,
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ grant_type=self.GRANT_TYPE,
+ subject_token=self.SUBJECT_TOKEN,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ audience=self.AUDIENCE,
+ requested_token_type=self.REQUESTED_TOKEN_TYPE,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_non200_with_basic_auth(self):
+ """Test token exchange with basic client auth responding with non-200
+ status.
+ """
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+
+ def test_exchange_token_full_success_with_reqbody_auth(self):
+ """Test token exchange success with request body client authenticaiton
+ using full parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_REQUEST_BODY)
+ headers = self.ADDON_HEADERS.copy()
+ headers["Content-Type"] = "application/x-www-form-urlencoded"
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "resource": self.RESOURCE,
+ "audience": self.AUDIENCE,
+ "scope": " ".join(self.SCOPES),
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "actor_token": self.ACTOR_TOKEN,
+ "actor_token_type": self.ACTOR_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(self.ADDON_OPTIONS)),
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_partial_success_with_reqbody_auth(self):
+ """Test token exchange success with request body client authentication
+ using partial (required only) parameters.
+ """
+ client = self.make_client(self.CLIENT_AUTH_REQUEST_BODY)
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": self.GRANT_TYPE,
+ "audience": self.AUDIENCE,
+ "requested_token_type": self.REQUESTED_TOKEN_TYPE,
+ "subject_token": self.SUBJECT_TOKEN,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.exchange_token(
+ request,
+ grant_type=self.GRANT_TYPE,
+ subject_token=self.SUBJECT_TOKEN,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ audience=self.AUDIENCE,
+ requested_token_type=self.REQUESTED_TOKEN_TYPE,
+ )
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_exchange_token_non200_with_reqbody_auth(self):
+ """Test token exchange with POST request body client auth responding
+ with non-200 status.
+ """
+ client = self.make_client(self.CLIENT_AUTH_REQUEST_BODY)
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client.exchange_token(
+ request,
+ self.GRANT_TYPE,
+ self.SUBJECT_TOKEN,
+ self.SUBJECT_TOKEN_TYPE,
+ self.RESOURCE,
+ self.AUDIENCE,
+ self.SCOPES,
+ self.REQUESTED_TOKEN_TYPE,
+ self.ACTOR_TOKEN,
+ self.ACTOR_TOKEN_TYPE,
+ self.ADDON_OPTIONS,
+ self.ADDON_HEADERS,
+ )
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+
+ def test_refresh_token_success(self):
+ """Test refresh token with successful response."""
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client.refresh_token(request, "refreshtoken")
+
+ headers = {
+ "Authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ=",
+ "Content-Type": "application/x-www-form-urlencoded",
+ }
+ request_data = {"grant_type": "refresh_token", "refresh_token": "refreshtoken"}
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_refresh_token_success_with_refresh(self):
+ """Test refresh token with successful response."""
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE_WITH_REFRESH
+ )
+
+ response = client.refresh_token(request, "refreshtoken")
+
+ headers = {
+ "Authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ=",
+ "Content-Type": "application/x-www-form-urlencoded",
+ }
+ request_data = {"grant_type": "refresh_token", "refresh_token": "refreshtoken"}
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE_WITH_REFRESH
+
+ def test_refresh_token_failure(self):
+ """Test refresh token with failure response."""
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client.refresh_token(request, "refreshtoken")
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+
+ def test__make_request_success(self):
+ """Test base method with successful response."""
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+
+ response = client._make_request(request, {"a": "b"}, {"c": "d"})
+
+ headers = {
+ "Authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ=",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "a": "b",
+ }
+ request_data = {"c": "d"}
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert response == self.SUCCESS_RESPONSE
+
+ def test_make_request_failure(self):
+ """Test refresh token with failure response."""
+ client = self.make_client(self.CLIENT_AUTH_BASIC)
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ client._make_request(request, {"a": "b"}, {"c": "d"})
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
diff --git a/contrib/python/google-auth/py3/tests/oauth2/test_utils.py b/contrib/python/google-auth/py3/tests/oauth2/test_utils.py
new file mode 100644
index 0000000000..543a693a98
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/oauth2/test_utils.py
@@ -0,0 +1,264 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import pytest # type: ignore
+
+from google.auth import exceptions
+from google.oauth2 import utils
+
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password"
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+# Base64 encoding of "username:"
+BASIC_AUTH_ENCODING_SECRETLESS = "dXNlcm5hbWU6"
+
+
+class AuthHandler(utils.OAuthClientAuthHandler):
+ def __init__(self, client_auth=None):
+ super(AuthHandler, self).__init__(client_auth)
+
+ def apply_client_authentication_options(
+ self, headers, request_body=None, bearer_token=None
+ ):
+ return super(AuthHandler, self).apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+
+class TestClientAuthentication(object):
+ @classmethod
+ def make_client_auth(cls, client_secret=None):
+ return utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID, client_secret
+ )
+
+ def test_initialization_with_client_secret(self):
+ client_auth = self.make_client_auth(CLIENT_SECRET)
+
+ assert client_auth.client_auth_type == utils.ClientAuthType.basic
+ assert client_auth.client_id == CLIENT_ID
+ assert client_auth.client_secret == CLIENT_SECRET
+
+ def test_initialization_no_client_secret(self):
+ client_auth = self.make_client_auth()
+
+ assert client_auth.client_auth_type == utils.ClientAuthType.basic
+ assert client_auth.client_id == CLIENT_ID
+ assert client_auth.client_secret is None
+
+
+class TestOAuthClientAuthHandler(object):
+ CLIENT_AUTH_BASIC = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID, CLIENT_SECRET
+ )
+ CLIENT_AUTH_BASIC_SECRETLESS = utils.ClientAuthentication(
+ utils.ClientAuthType.basic, CLIENT_ID
+ )
+ CLIENT_AUTH_REQUEST_BODY = utils.ClientAuthentication(
+ utils.ClientAuthType.request_body, CLIENT_ID, CLIENT_SECRET
+ )
+ CLIENT_AUTH_REQUEST_BODY_SECRETLESS = utils.ClientAuthentication(
+ utils.ClientAuthType.request_body, CLIENT_ID
+ )
+
+ @classmethod
+ def make_oauth_client_auth_handler(cls, client_auth=None):
+ return AuthHandler(client_auth)
+
+ def test_apply_client_authentication_options_none(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler()
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {"Content-Type": "application/json"}
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_basic(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(self.CLIENT_AUTH_BASIC)
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_basic_nosecret(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_BASIC_SECRETLESS
+ )
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING_SECRETLESS),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_request_body(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY
+ )
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {"Content-Type": "application/json"}
+ assert request_body == {
+ "foo": "bar",
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ }
+
+ def test_apply_client_authentication_options_request_body_nosecret(self):
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY_SECRETLESS
+ )
+
+ auth_handler.apply_client_authentication_options(headers, request_body)
+
+ assert headers == {"Content-Type": "application/json"}
+ assert request_body == {
+ "foo": "bar",
+ "client_id": CLIENT_ID,
+ "client_secret": "",
+ }
+
+ def test_apply_client_authentication_options_request_body_no_body(self):
+ headers = {"Content-Type": "application/json"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY
+ )
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ auth_handler.apply_client_authentication_options(headers)
+
+ assert excinfo.match(r"HTTP request does not support request-body")
+
+ def test_apply_client_authentication_options_bearer_token(self):
+ bearer_token = "ACCESS_TOKEN"
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler()
+
+ auth_handler.apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {}".format(bearer_token),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_bearer_and_basic(self):
+ bearer_token = "ACCESS_TOKEN"
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(self.CLIENT_AUTH_BASIC)
+
+ auth_handler.apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+ # Bearer token should have higher priority.
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {}".format(bearer_token),
+ }
+ assert request_body == {"foo": "bar"}
+
+ def test_apply_client_authentication_options_bearer_and_request_body(self):
+ bearer_token = "ACCESS_TOKEN"
+ headers = {"Content-Type": "application/json"}
+ request_body = {"foo": "bar"}
+ auth_handler = self.make_oauth_client_auth_handler(
+ self.CLIENT_AUTH_REQUEST_BODY
+ )
+
+ auth_handler.apply_client_authentication_options(
+ headers, request_body, bearer_token
+ )
+
+ # Bearer token should have higher priority.
+ assert headers == {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {}".format(bearer_token),
+ }
+ assert request_body == {"foo": "bar"}
+
+
+def test__handle_error_response_code_only():
+ error_resp = {"error": "unsupported_grant_type"}
+ response_data = json.dumps(error_resp)
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(r"Error code unsupported_grant_type")
+
+
+def test__handle_error_response_code_description():
+ error_resp = {
+ "error": "unsupported_grant_type",
+ "error_description": "The provided grant_type is unsupported",
+ }
+ response_data = json.dumps(error_resp)
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(
+ r"Error code unsupported_grant_type: The provided grant_type is unsupported"
+ )
+
+
+def test__handle_error_response_code_description_uri():
+ error_resp = {
+ "error": "unsupported_grant_type",
+ "error_description": "The provided grant_type is unsupported",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+ }
+ response_data = json.dumps(error_resp)
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(
+ r"Error code unsupported_grant_type: The provided grant_type is unsupported - https://tools.ietf.org/html/rfc6749"
+ )
+
+
+def test__handle_error_response_non_json():
+ response_data = "Oops, something wrong happened"
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ utils.handle_error_response(response_data)
+
+ assert excinfo.match(r"Oops, something wrong happened")
diff --git a/contrib/python/google-auth/py3/tests/test__cloud_sdk.py b/contrib/python/google-auth/py3/tests/test__cloud_sdk.py
new file mode 100644
index 0000000000..18ac18fa35
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test__cloud_sdk.py
@@ -0,0 +1,182 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import json
+import os
+import subprocess
+import sys
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _cloud_sdk
+from google.auth import environment_vars
+from google.auth import exceptions
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+AUTHORIZED_USER_FILE = os.path.join(DATA_DIR, "authorized_user.json")
+
+with io.open(AUTHORIZED_USER_FILE, "rb") as fh:
+ AUTHORIZED_USER_FILE_DATA = json.load(fh)
+
+SERVICE_ACCOUNT_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with io.open(SERVICE_ACCOUNT_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_FILE_DATA = json.load(fh)
+
+
+@pytest.mark.parametrize(
+ "data, expected_project_id",
+ [(b"example-project\n", "example-project"), (b"", None)],
+)
+def test_get_project_id(data, expected_project_id):
+ check_output_patch = mock.patch(
+ "subprocess.check_output", autospec=True, return_value=data
+ )
+
+ with check_output_patch as check_output:
+ project_id = _cloud_sdk.get_project_id()
+
+ assert project_id == expected_project_id
+ assert check_output.called
+
+
+@mock.patch(
+ "subprocess.check_output",
+ autospec=True,
+ side_effect=subprocess.CalledProcessError(-1, "testing"),
+)
+def test_get_project_id_call_error(check_output):
+ project_id = _cloud_sdk.get_project_id()
+ assert project_id is None
+ assert check_output.called
+
+
+@pytest.mark.xfail
+def test__run_subprocess_ignore_stderr():
+ command = [
+ sys.executable,
+ "-c",
+ "from __future__ import print_function;"
+ + "import sys;"
+ + "print('error', file=sys.stderr);"
+ + "print('output', file=sys.stdout)",
+ ]
+
+ # If we ignore stderr, then the output only has stdout
+ output = _cloud_sdk._run_subprocess_ignore_stderr(command)
+ assert output == b"output\n"
+
+ # If we pipe stderr to stdout, then the output is mixed with stdout and stderr.
+ output = subprocess.check_output(command, stderr=subprocess.STDOUT)
+ assert output == b"output\nerror\n" or output == b"error\noutput\n"
+
+
+@mock.patch("os.name", new="nt")
+def test_get_project_id_windows():
+ check_output_patch = mock.patch(
+ "subprocess.check_output", autospec=True, return_value=b"example-project\n"
+ )
+
+ with check_output_patch as check_output:
+ project_id = _cloud_sdk.get_project_id()
+
+ assert project_id == "example-project"
+ assert check_output.called
+ # Make sure the executable is `gcloud.cmd`.
+ args = check_output.call_args[0]
+ command = args[0]
+ executable = command[0]
+ assert executable == "gcloud.cmd"
+
+
+@mock.patch("google.auth._cloud_sdk.get_config_path", autospec=True)
+def test_get_application_default_credentials_path(get_config_dir):
+ config_path = "config_path"
+ get_config_dir.return_value = config_path
+ credentials_path = _cloud_sdk.get_application_default_credentials_path()
+ assert credentials_path == os.path.join(
+ config_path, _cloud_sdk._CREDENTIALS_FILENAME
+ )
+
+
+def test_get_config_path_env_var(monkeypatch):
+ config_path_sentinel = "config_path"
+ monkeypatch.setenv(environment_vars.CLOUD_SDK_CONFIG_DIR, config_path_sentinel)
+ config_path = _cloud_sdk.get_config_path()
+ assert config_path == config_path_sentinel
+
+
+@mock.patch("os.path.expanduser")
+def test_get_config_path_unix(expanduser):
+ expanduser.side_effect = lambda path: path
+
+ config_path = _cloud_sdk.get_config_path()
+
+ assert os.path.split(config_path) == ("~/.config", _cloud_sdk._CONFIG_DIRECTORY)
+
+
+@mock.patch("os.name", new="nt")
+def test_get_config_path_windows(monkeypatch):
+ appdata = "appdata"
+ monkeypatch.setenv(_cloud_sdk._WINDOWS_CONFIG_ROOT_ENV_VAR, appdata)
+
+ config_path = _cloud_sdk.get_config_path()
+
+ assert os.path.split(config_path) == (appdata, _cloud_sdk._CONFIG_DIRECTORY)
+
+
+@mock.patch("os.name", new="nt")
+def test_get_config_path_no_appdata(monkeypatch):
+ monkeypatch.delenv(_cloud_sdk._WINDOWS_CONFIG_ROOT_ENV_VAR, raising=False)
+ monkeypatch.setenv("SystemDrive", "G:")
+
+ config_path = _cloud_sdk.get_config_path()
+
+ assert os.path.split(config_path) == ("G:/\\", _cloud_sdk._CONFIG_DIRECTORY)
+
+
+@mock.patch("os.name", new="nt")
+@mock.patch("subprocess.check_output", autospec=True)
+def test_get_auth_access_token_windows(check_output):
+ check_output.return_value = b"access_token\n"
+
+ token = _cloud_sdk.get_auth_access_token()
+ assert token == "access_token"
+ check_output.assert_called_with(
+ ("gcloud.cmd", "auth", "print-access-token"), stderr=subprocess.STDOUT
+ )
+
+
+@mock.patch("subprocess.check_output", autospec=True)
+def test_get_auth_access_token_with_account(check_output):
+ check_output.return_value = b"access_token\n"
+
+ token = _cloud_sdk.get_auth_access_token(account="account")
+ assert token == "access_token"
+ check_output.assert_called_with(
+ ("gcloud", "auth", "print-access-token", "--account=account"),
+ stderr=subprocess.STDOUT,
+ )
+
+
+@mock.patch("subprocess.check_output", autospec=True)
+def test_get_auth_access_token_with_exception(check_output):
+ check_output.side_effect = OSError()
+
+ with pytest.raises(exceptions.UserAccessTokenError):
+ _cloud_sdk.get_auth_access_token(account="account")
diff --git a/contrib/python/google-auth/py3/tests/test__default.py b/contrib/python/google-auth/py3/tests/test__default.py
new file mode 100644
index 0000000000..29904ec7aa
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test__default.py
@@ -0,0 +1,1352 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _default
+from google.auth import api_key
+from google.auth import app_engine
+from google.auth import aws
+from google.auth import compute_engine
+from google.auth import credentials
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import external_account
+from google.auth import external_account_authorized_user
+from google.auth import identity_pool
+from google.auth import impersonated_credentials
+from google.auth import pluggable
+from google.oauth2 import gdch_credentials
+from google.oauth2 import service_account
+import google.oauth2.credentials
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+AUTHORIZED_USER_FILE = os.path.join(DATA_DIR, "authorized_user.json")
+
+with open(AUTHORIZED_USER_FILE) as fh:
+ AUTHORIZED_USER_FILE_DATA = json.load(fh)
+
+AUTHORIZED_USER_CLOUD_SDK_FILE = os.path.join(
+ DATA_DIR, "authorized_user_cloud_sdk.json"
+)
+
+AUTHORIZED_USER_CLOUD_SDK_WITH_QUOTA_PROJECT_ID_FILE = os.path.join(
+ DATA_DIR, "authorized_user_cloud_sdk_with_quota_project_id.json"
+)
+
+SERVICE_ACCOUNT_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+CLIENT_SECRETS_FILE = os.path.join(DATA_DIR, "client_secrets.json")
+
+GDCH_SERVICE_ACCOUNT_FILE = os.path.join(DATA_DIR, "gdch_service_account.json")
+
+with open(SERVICE_ACCOUNT_FILE) as fh:
+ SERVICE_ACCOUNT_FILE_DATA = json.load(fh)
+
+SUBJECT_TOKEN_TEXT_FILE = os.path.join(DATA_DIR, "external_subject_token.txt")
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
+WORKFORCE_AUDIENCE = (
+ "//iam.googleapis.com/locations/global/workforcePools/POOL_ID/providers/PROVIDER_ID"
+)
+WORKFORCE_POOL_USER_PROJECT = "WORKFORCE_POOL_USER_PROJECT_NUMBER"
+REGION_URL = "http://169.254.169.254/latest/meta-data/placement/availability-zone"
+SECURITY_CREDS_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials"
+CRED_VERIFICATION_URL = (
+ "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
+)
+IDENTITY_POOL_DATA = {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": "urn:ietf:params:oauth:token-type:jwt",
+ "token_url": TOKEN_URL,
+ "credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
+}
+PLUGGABLE_DATA = {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": "urn:ietf:params:oauth:token-type:jwt",
+ "token_url": TOKEN_URL,
+ "credential_source": {"executable": {"command": "command"}},
+}
+AWS_DATA = {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": "urn:ietf:params:aws:token-type:aws4_request",
+ "token_url": TOKEN_URL,
+ "credential_source": {
+ "environment_id": "aws1",
+ "region_url": REGION_URL,
+ "url": SECURITY_CREDS_URL,
+ "regional_cred_verification_url": CRED_VERIFICATION_URL,
+ },
+}
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ "https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
+)
+IMPERSONATED_IDENTITY_POOL_DATA = {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": "urn:ietf:params:oauth:token-type:jwt",
+ "token_url": TOKEN_URL,
+ "credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+}
+IMPERSONATED_AWS_DATA = {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": "urn:ietf:params:aws:token-type:aws4_request",
+ "token_url": TOKEN_URL,
+ "credential_source": {
+ "environment_id": "aws1",
+ "region_url": REGION_URL,
+ "url": SECURITY_CREDS_URL,
+ "regional_cred_verification_url": CRED_VERIFICATION_URL,
+ },
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+}
+IDENTITY_POOL_WORKFORCE_DATA = {
+ "type": "external_account",
+ "audience": WORKFORCE_AUDIENCE,
+ "subject_token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "token_url": TOKEN_URL,
+ "credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
+ "workforce_pool_user_project": WORKFORCE_POOL_USER_PROJECT,
+}
+IMPERSONATED_IDENTITY_POOL_WORKFORCE_DATA = {
+ "type": "external_account",
+ "audience": WORKFORCE_AUDIENCE,
+ "subject_token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "token_url": TOKEN_URL,
+ "credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "workforce_pool_user_project": WORKFORCE_POOL_USER_PROJECT,
+}
+
+IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE = os.path.join(
+ DATA_DIR, "impersonated_service_account_authorized_user_source.json"
+)
+
+IMPERSONATED_SERVICE_ACCOUNT_WITH_QUOTA_PROJECT_FILE = os.path.join(
+ DATA_DIR, "impersonated_service_account_with_quota_project.json"
+)
+
+IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE = os.path.join(
+ DATA_DIR, "impersonated_service_account_service_account_source.json"
+)
+
+EXTERNAL_ACCOUNT_AUTHORIZED_USER_FILE = os.path.join(
+ DATA_DIR, "external_account_authorized_user.json"
+)
+
+MOCK_CREDENTIALS = mock.Mock(spec=credentials.CredentialsWithQuotaProject)
+MOCK_CREDENTIALS.with_quota_project.return_value = MOCK_CREDENTIALS
+
+
+def get_project_id_side_effect(self, request=None):
+ # If no scopes are set, this will always return None.
+ if not self.scopes:
+ return None
+ return mock.sentinel.project_id
+
+
+LOAD_FILE_PATCH = mock.patch(
+ "google.auth._default.load_credentials_from_file",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH = mock.patch.object(
+ external_account.Credentials,
+ "get_project_id",
+ side_effect=get_project_id_side_effect,
+ autospec=True,
+)
+
+
+def test_load_credentials_from_missing_file():
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file("")
+
+ assert excinfo.match(r"not found")
+
+
+def test_load_credentials_from_dict_non_dict_object():
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_dict("")
+ assert excinfo.match(r"dict type was expected")
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_dict(None)
+ assert excinfo.match(r"dict type was expected")
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_dict(1)
+ assert excinfo.match(r"dict type was expected")
+
+
+def test_load_credentials_from_dict_authorized_user():
+ credentials, project_id = _default.load_credentials_from_dict(
+ AUTHORIZED_USER_FILE_DATA
+ )
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+
+
+def test_load_credentials_from_file_invalid_json(tmpdir):
+ jsonfile = tmpdir.join("invalid.json")
+ jsonfile.write("{")
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(jsonfile))
+
+ assert excinfo.match(r"not a valid json file")
+
+
+def test_load_credentials_from_file_invalid_type(tmpdir):
+ jsonfile = tmpdir.join("invalid.json")
+ jsonfile.write(json.dumps({"type": "not-a-real-type"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(jsonfile))
+
+ assert excinfo.match(r"does not have a valid type")
+
+
+def test_load_credentials_from_file_authorized_user():
+ credentials, project_id = _default.load_credentials_from_file(AUTHORIZED_USER_FILE)
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+
+
+def test_load_credentials_from_file_no_type(tmpdir):
+ # use the client_secrets.json, which is valid json but not a
+ # loadable credentials type
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(CLIENT_SECRETS_FILE)
+
+ assert excinfo.match(r"does not have a valid type")
+ assert excinfo.match(r"Type is None")
+
+
+def test_load_credentials_from_file_authorized_user_bad_format(tmpdir):
+ filename = tmpdir.join("authorized_user_bad.json")
+ filename.write(json.dumps({"type": "authorized_user"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(filename))
+
+ assert excinfo.match(r"Failed to load authorized user")
+ assert excinfo.match(r"missing fields")
+
+
+def test_load_credentials_from_file_authorized_user_cloud_sdk():
+ with pytest.warns(UserWarning, match="Cloud SDK"):
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_FILE
+ )
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+
+ # No warning if the json file has quota project id.
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_WITH_QUOTA_PROJECT_ID_FILE
+ )
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+
+
+def test_load_credentials_from_file_authorized_user_cloud_sdk_with_scopes():
+ with pytest.warns(UserWarning, match="Cloud SDK"):
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_FILE,
+ scopes=["https://www.google.com/calendar/feeds"],
+ )
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+
+def test_load_credentials_from_file_authorized_user_cloud_sdk_with_quota_project():
+ credentials, project_id = _default.load_credentials_from_file(
+ AUTHORIZED_USER_CLOUD_SDK_FILE, quota_project_id="project-foo"
+ )
+
+ assert isinstance(credentials, google.oauth2.credentials.Credentials)
+ assert project_id is None
+ assert credentials.quota_project_id == "project-foo"
+
+
+def test_load_credentials_from_file_service_account():
+ credentials, project_id = _default.load_credentials_from_file(SERVICE_ACCOUNT_FILE)
+ assert isinstance(credentials, service_account.Credentials)
+ assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
+
+
+def test_load_credentials_from_file_service_account_with_scopes():
+ credentials, project_id = _default.load_credentials_from_file(
+ SERVICE_ACCOUNT_FILE, scopes=["https://www.google.com/calendar/feeds"]
+ )
+ assert isinstance(credentials, service_account.Credentials)
+ assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+
+def test_load_credentials_from_file_service_account_with_quota_project():
+ credentials, project_id = _default.load_credentials_from_file(
+ SERVICE_ACCOUNT_FILE, quota_project_id="project-foo"
+ )
+ assert isinstance(credentials, service_account.Credentials)
+ assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
+ assert credentials.quota_project_id == "project-foo"
+
+
+def test_load_credentials_from_file_service_account_bad_format(tmpdir):
+ filename = tmpdir.join("serivce_account_bad.json")
+ filename.write(json.dumps({"type": "service_account"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(filename))
+
+ assert excinfo.match(r"Failed to load service account")
+ assert excinfo.match(r"missing fields")
+
+
+def test_load_credentials_from_file_impersonated_with_authorized_user_source():
+ credentials, project_id = _default.load_credentials_from_file(
+ IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE
+ )
+ assert isinstance(credentials, impersonated_credentials.Credentials)
+ assert isinstance(
+ credentials._source_credentials, google.oauth2.credentials.Credentials
+ )
+ assert credentials.service_account_email == "service-account-target@example.com"
+ assert credentials._delegates == ["service-account-delegate@example.com"]
+ assert not credentials._quota_project_id
+ assert not credentials._target_scopes
+ assert project_id is None
+
+
+def test_load_credentials_from_file_impersonated_with_quota_project():
+ credentials, _ = _default.load_credentials_from_file(
+ IMPERSONATED_SERVICE_ACCOUNT_WITH_QUOTA_PROJECT_FILE
+ )
+ assert isinstance(credentials, impersonated_credentials.Credentials)
+ assert credentials._quota_project_id == "quota_project"
+
+
+def test_load_credentials_from_file_impersonated_with_service_account_source():
+ credentials, _ = _default.load_credentials_from_file(
+ IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE
+ )
+ assert isinstance(credentials, impersonated_credentials.Credentials)
+ assert isinstance(credentials._source_credentials, service_account.Credentials)
+ assert not credentials._quota_project_id
+
+
+def test_load_credentials_from_file_impersonated_passing_quota_project():
+ credentials, _ = _default.load_credentials_from_file(
+ IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE,
+ quota_project_id="new_quota_project",
+ )
+ assert credentials._quota_project_id == "new_quota_project"
+
+
+def test_load_credentials_from_file_impersonated_passing_scopes():
+ credentials, _ = _default.load_credentials_from_file(
+ IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE,
+ scopes=["scope1", "scope2"],
+ )
+ assert credentials._target_scopes == ["scope1", "scope2"]
+
+
+def test_load_credentials_from_file_impersonated_wrong_target_principal(tmpdir):
+
+ with open(IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE) as fh:
+ impersonated_credentials_info = json.load(fh)
+ impersonated_credentials_info[
+ "service_account_impersonation_url"
+ ] = "something_wrong"
+
+ jsonfile = tmpdir.join("invalid.json")
+ jsonfile.write(json.dumps(impersonated_credentials_info))
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(jsonfile))
+
+ assert excinfo.match(r"Cannot extract target principal")
+
+
+def test_load_credentials_from_file_impersonated_wrong_source_type(tmpdir):
+
+ with open(IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE) as fh:
+ impersonated_credentials_info = json.load(fh)
+ impersonated_credentials_info["source_credentials"]["type"] = "external_account"
+
+ jsonfile = tmpdir.join("invalid.json")
+ jsonfile.write(json.dumps(impersonated_credentials_info))
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(jsonfile))
+
+ assert excinfo.match(r"source credential of type external_account is not supported")
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_identity_pool(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_aws(get_project_id, tmpdir):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(AWS_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, aws.Credentials)
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_identity_pool_impersonated(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert not credentials.is_user
+ assert not credentials.is_workforce_pool
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_aws_impersonated(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_AWS_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, aws.Credentials)
+ assert not credentials.is_user
+ assert not credentials.is_workforce_pool
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_workforce(get_project_id, tmpdir):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_WORKFORCE_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert credentials.is_user
+ assert credentials.is_workforce_pool
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_workforce_impersonated(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_WORKFORCE_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert not credentials.is_user
+ assert credentials.is_workforce_pool
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_with_user_and_default_scopes(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(
+ str(config_file),
+ scopes=["https://www.google.com/calendar/feeds"],
+ default_scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since scopes are specified, the project ID can be determined.
+ assert project_id is mock.sentinel.project_id
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+ assert credentials.default_scopes == [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_with_quota_project(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(
+ str(config_file), quota_project_id="project-foo"
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert credentials.quota_project_id == "project-foo"
+
+
+def test_load_credentials_from_file_external_account_bad_format(tmpdir):
+ filename = tmpdir.join("external_account_bad.json")
+ filename.write(json.dumps({"type": "external_account"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(filename))
+
+ assert excinfo.match(
+ "Failed to load external account credentials from {}".format(str(filename))
+ )
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_file_external_account_explicit_request(
+ get_project_id, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ credentials, project_id = _default.load_credentials_from_file(
+ str(config_file),
+ request=mock.sentinel.request,
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ # Since scopes are specified, the project ID can be determined.
+ assert project_id is mock.sentinel.project_id
+ get_project_id.assert_called_with(credentials, request=mock.sentinel.request)
+
+
+@mock.patch.dict(os.environ, {}, clear=True)
+def test__get_explicit_environ_credentials_no_env():
+ assert _default._get_explicit_environ_credentials() == (None, None)
+
+
+def test_load_credentials_from_file_external_account_authorized_user():
+ credentials, project_id = _default.load_credentials_from_file(
+ EXTERNAL_ACCOUNT_AUTHORIZED_USER_FILE, request=mock.sentinel.request
+ )
+
+ assert isinstance(credentials, external_account_authorized_user.Credentials)
+ assert project_id is None
+
+
+def test_load_credentials_from_file_external_account_authorized_user_bad_format(tmpdir):
+ filename = tmpdir.join("external_account_authorized_user_bad.json")
+ filename.write(json.dumps({"type": "external_account_authorized_user"}))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.load_credentials_from_file(str(filename))
+
+ assert excinfo.match(
+ "Failed to load external account authorized user credentials from {}".format(
+ str(filename)
+ )
+ )
+
+
+@pytest.mark.parametrize("quota_project_id", [None, "project-foo"])
+@LOAD_FILE_PATCH
+def test__get_explicit_environ_credentials(load, quota_project_id, monkeypatch):
+ monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
+
+ credentials, project_id = _default._get_explicit_environ_credentials(
+ quota_project_id=quota_project_id
+ )
+
+ assert credentials is MOCK_CREDENTIALS
+ assert project_id is mock.sentinel.project_id
+ load.assert_called_with("filename", quota_project_id=quota_project_id)
+
+
+@LOAD_FILE_PATCH
+def test__get_explicit_environ_credentials_no_project_id(load, monkeypatch):
+ load.return_value = MOCK_CREDENTIALS, None
+ monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
+
+ credentials, project_id = _default._get_explicit_environ_credentials()
+
+ assert credentials is MOCK_CREDENTIALS
+ assert project_id is None
+
+
+@pytest.mark.parametrize("quota_project_id", [None, "project-foo"])
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+@mock.patch("google.auth._default._get_gcloud_sdk_credentials", autospec=True)
+def test__get_explicit_environ_credentials_fallback_to_gcloud(
+ get_gcloud_creds, get_adc_path, quota_project_id, monkeypatch
+):
+ # Set explicit credentials path to cloud sdk credentials path.
+ get_adc_path.return_value = "filename"
+ monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
+
+ _default._get_explicit_environ_credentials(quota_project_id=quota_project_id)
+
+ # Check we fall back to cloud sdk flow since explicit credentials path is
+ # cloud sdk credentials path
+ get_gcloud_creds.assert_called_with(quota_project_id=quota_project_id)
+
+
+@pytest.mark.parametrize("quota_project_id", [None, "project-foo"])
+@LOAD_FILE_PATCH
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test__get_gcloud_sdk_credentials(get_adc_path, load, quota_project_id):
+ get_adc_path.return_value = SERVICE_ACCOUNT_FILE
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials(
+ quota_project_id=quota_project_id
+ )
+
+ assert credentials is MOCK_CREDENTIALS
+ assert project_id is mock.sentinel.project_id
+ load.assert_called_with(SERVICE_ACCOUNT_FILE, quota_project_id=quota_project_id)
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test__get_gcloud_sdk_credentials_non_existent(get_adc_path, tmpdir):
+ non_existent = tmpdir.join("non-existent")
+ get_adc_path.return_value = str(non_existent)
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials()
+
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_project_id",
+ return_value=mock.sentinel.project_id,
+ autospec=True,
+)
+@mock.patch("os.path.isfile", return_value=True, autospec=True)
+@LOAD_FILE_PATCH
+def test__get_gcloud_sdk_credentials_project_id(load, unused_isfile, get_project_id):
+ # Don't return a project ID from load file, make the function check
+ # the Cloud SDK project.
+ load.return_value = MOCK_CREDENTIALS, None
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials()
+
+ assert credentials == MOCK_CREDENTIALS
+ assert project_id == mock.sentinel.project_id
+ assert get_project_id.called
+
+
+@mock.patch("google.auth._cloud_sdk.get_project_id", return_value=None, autospec=True)
+@mock.patch("os.path.isfile", return_value=True)
+@LOAD_FILE_PATCH
+def test__get_gcloud_sdk_credentials_no_project_id(load, unused_isfile, get_project_id):
+ # Don't return a project ID from load file, make the function check
+ # the Cloud SDK project.
+ load.return_value = MOCK_CREDENTIALS, None
+
+ credentials, project_id = _default._get_gcloud_sdk_credentials()
+
+ assert credentials == MOCK_CREDENTIALS
+ assert project_id is None
+ assert get_project_id.called
+
+
+def test__get_gdch_service_account_credentials_invalid_format_version():
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default._get_gdch_service_account_credentials(
+ "file_name", {"format_version": "2"}
+ )
+ assert excinfo.match("Failed to load GDCH service account credentials")
+
+
+def test_get_api_key_credentials():
+ creds = _default.get_api_key_credentials("api_key")
+ assert isinstance(creds, api_key.Credentials)
+ assert creds.token == "api_key"
+
+
+class _AppIdentityModule(object):
+ """The interface of the App Idenity app engine module.
+ See https://cloud.google.com/appengine/docs/standard/python/refdocs\
+ /google.appengine.api.app_identity.app_identity
+ """
+
+ def get_application_id(self):
+ raise NotImplementedError()
+
+
+@pytest.fixture
+def app_identity(monkeypatch):
+ """Mocks the app_identity module for google.auth.app_engine."""
+ app_identity_module = mock.create_autospec(_AppIdentityModule, instance=True)
+ monkeypatch.setattr(app_engine, "app_identity", app_identity_module)
+ yield app_identity_module
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_gen1(app_identity):
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python27"
+ app_identity.get_application_id.return_value = mock.sentinel.project
+
+ credentials, project_id = _default._get_gae_credentials()
+
+ assert isinstance(credentials, app_engine.Credentials)
+ assert project_id == mock.sentinel.project
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_gen2():
+ os.environ["GAE_RUNTIME"] = "python37"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_gen2_backwards_compat():
+ # compat helpers may copy GAE_RUNTIME to APPENGINE_RUNTIME
+ # for backwards compatibility with code that relies on it
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python37"
+ os.environ["GAE_RUNTIME"] = "python37"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+def test__get_gae_credentials_env_unset():
+ assert environment_vars.LEGACY_APPENGINE_RUNTIME not in os.environ
+ assert "GAE_RUNTIME" not in os.environ
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch.dict(os.environ)
+def test__get_gae_credentials_no_app_engine():
+ # test both with and without LEGACY_APPENGINE_RUNTIME setting
+ assert environment_vars.LEGACY_APPENGINE_RUNTIME not in os.environ
+
+ import sys
+
+ with mock.patch.dict(sys.modules, {"google.auth.app_engine": None}):
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python27"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch.dict(os.environ)
+@mock.patch.object(app_engine, "app_identity", new=None)
+def test__get_gae_credentials_no_apis():
+ # test both with and without LEGACY_APPENGINE_RUNTIME setting
+ assert environment_vars.LEGACY_APPENGINE_RUNTIME not in os.environ
+
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+ os.environ[environment_vars.LEGACY_APPENGINE_RUNTIME] = "python27"
+ credentials, project_id = _default._get_gae_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.is_on_gce", return_value=True, autospec=True
+)
+@mock.patch(
+ "google.auth.compute_engine._metadata.get_project_id",
+ return_value="example-project",
+ autospec=True,
+)
+def test__get_gce_credentials(unused_get, unused_ping):
+ credentials, project_id = _default._get_gce_credentials()
+
+ assert isinstance(credentials, compute_engine.Credentials)
+ assert project_id == "example-project"
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.is_on_gce", return_value=False, autospec=True
+)
+def test__get_gce_credentials_no_ping(unused_ping):
+ credentials, project_id = _default._get_gce_credentials()
+
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.is_on_gce", return_value=True, autospec=True
+)
+@mock.patch(
+ "google.auth.compute_engine._metadata.get_project_id",
+ side_effect=exceptions.TransportError(),
+ autospec=True,
+)
+def test__get_gce_credentials_no_project_id(unused_get, unused_ping):
+ credentials, project_id = _default._get_gce_credentials()
+
+ assert isinstance(credentials, compute_engine.Credentials)
+ assert project_id is None
+
+
+def test__get_gce_credentials_no_compute_engine():
+ import sys
+
+ with mock.patch.dict("sys.modules"):
+ sys.modules["google.auth.compute_engine"] = None
+ credentials, project_id = _default._get_gce_credentials()
+ assert credentials is None
+ assert project_id is None
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.is_on_gce", return_value=False, autospec=True
+)
+def test__get_gce_credentials_explicit_request(ping):
+ _default._get_gce_credentials(mock.sentinel.request)
+ ping.assert_called_with(request=mock.sentinel.request)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_early_out(unused_get):
+ assert _default.default() == (MOCK_CREDENTIALS, mock.sentinel.project_id)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_explict_project_id(unused_get, monkeypatch):
+ monkeypatch.setenv(environment_vars.PROJECT, "explicit-env")
+ assert _default.default() == (MOCK_CREDENTIALS, "explicit-env")
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_explict_legacy_project_id(unused_get, monkeypatch):
+ monkeypatch.setenv(environment_vars.LEGACY_PROJECT, "explicit-env")
+ assert _default.default() == (MOCK_CREDENTIALS, "explicit-env")
+
+
+@mock.patch("logging.Logger.warning", autospec=True)
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gcloud_sdk_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gae_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gce_credentials",
+ return_value=(MOCK_CREDENTIALS, None),
+ autospec=True,
+)
+def test_default_without_project_id(
+ unused_gce, unused_gae, unused_sdk, unused_explicit, logger_warning
+):
+ assert _default.default() == (MOCK_CREDENTIALS, None)
+ logger_warning.assert_called_with(mock.ANY, mock.ANY, mock.ANY)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gcloud_sdk_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gae_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth._default._get_gce_credentials",
+ return_value=(None, None),
+ autospec=True,
+)
+def test_default_fail(unused_gce, unused_gae, unused_sdk, unused_explicit):
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ assert _default.default()
+
+ assert excinfo.match(_default._CLOUD_SDK_MISSING_CREDENTIALS)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+@mock.patch(
+ "google.auth.credentials.with_scopes_if_required",
+ return_value=MOCK_CREDENTIALS,
+ autospec=True,
+)
+def test_default_scoped(with_scopes, unused_get):
+ scopes = ["one", "two"]
+
+ credentials, project_id = _default.default(scopes=scopes)
+
+ assert credentials == with_scopes.return_value
+ assert project_id == mock.sentinel.project_id
+ with_scopes.assert_called_once_with(MOCK_CREDENTIALS, scopes, default_scopes=None)
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_quota_project(with_quota_project):
+ credentials, project_id = _default.default(quota_project_id="project-foo")
+
+ MOCK_CREDENTIALS.with_quota_project.assert_called_once_with("project-foo")
+ assert project_id == mock.sentinel.project_id
+
+
+@mock.patch(
+ "google.auth._default._get_explicit_environ_credentials",
+ return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
+ autospec=True,
+)
+def test_default_no_app_engine_compute_engine_module(unused_get):
+ """
+ google.auth.compute_engine and google.auth.app_engine are both optional
+ to allow not including them when using this package. This verifies
+ that default fails gracefully if these modules are absent
+ """
+ import sys
+
+ with mock.patch.dict("sys.modules"):
+ sys.modules["google.auth.compute_engine"] = None
+ sys.modules["google.auth.app_engine"] = None
+ assert _default.default() == (MOCK_CREDENTIALS, mock.sentinel.project_id)
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_identity_pool(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default()
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert not credentials.is_user
+ assert not credentials.is_workforce_pool
+ # Without scopes, project ID cannot be determined.
+ assert project_id is None
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_identity_pool_impersonated(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"]
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert not credentials.is_user
+ assert not credentials.is_workforce_pool
+ assert project_id is mock.sentinel.project_id
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+ # The credential.get_project_id should have been used in _get_external_account_credentials and default
+ assert get_project_id.call_count == 2
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+@mock.patch.dict(os.environ)
+def test_default_environ_external_credentials_project_from_env(
+ get_project_id, monkeypatch, tmpdir
+):
+ project_from_env = "project_from_env"
+ os.environ[environment_vars.PROJECT] = project_from_env
+
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"]
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert not credentials.is_user
+ assert not credentials.is_workforce_pool
+ assert project_id == project_from_env
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+ # The credential.get_project_id should have been used only in _get_external_account_credentials
+ assert get_project_id.call_count == 1
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+@mock.patch.dict(os.environ)
+def test_default_environ_external_credentials_legacy_project_from_env(
+ get_project_id, monkeypatch, tmpdir
+):
+ project_from_env = "project_from_env"
+ os.environ[environment_vars.LEGACY_PROJECT] = project_from_env
+
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"]
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert not credentials.is_user
+ assert not credentials.is_workforce_pool
+ assert project_id == project_from_env
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+ # The credential.get_project_id should have been used only in _get_external_account_credentials
+ assert get_project_id.call_count == 1
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_aws_impersonated(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_AWS_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"]
+ )
+
+ assert isinstance(credentials, aws.Credentials)
+ assert not credentials.is_user
+ assert not credentials.is_workforce_pool
+ assert project_id is mock.sentinel.project_id
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_workforce(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_WORKFORCE_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"]
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert credentials.is_user
+ assert credentials.is_workforce_pool
+ assert project_id is mock.sentinel.project_id
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_workforce_impersonated(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_WORKFORCE_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"]
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert not credentials.is_user
+ assert credentials.is_workforce_pool
+ assert project_id is mock.sentinel.project_id
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_with_user_and_default_scopes_and_quota_project_id(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ scopes=["https://www.google.com/calendar/feeds"],
+ default_scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ quota_project_id="project-foo",
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert project_id is mock.sentinel.project_id
+ assert credentials.quota_project_id == "project-foo"
+ assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
+ assert credentials.default_scopes == [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_default_environ_external_credentials_explicit_request_with_scopes(
+ get_project_id, monkeypatch, tmpdir
+):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(IDENTITY_POOL_DATA))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(config_file))
+
+ credentials, project_id = _default.default(
+ request=mock.sentinel.request,
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
+ )
+
+ assert isinstance(credentials, identity_pool.Credentials)
+ assert project_id is mock.sentinel.project_id
+ # default() will initialize new credentials via with_scopes_if_required
+ # and potentially with_quota_project.
+ # As a result the caller of get_project_id() will not match the returned
+ # credentials.
+ get_project_id.assert_called_with(mock.ANY, request=mock.sentinel.request)
+
+
+def test_default_environ_external_credentials_bad_format(monkeypatch, tmpdir):
+ filename = tmpdir.join("external_account_bad.json")
+ filename.write(json.dumps({"type": "external_account"}))
+ monkeypatch.setenv(environment_vars.CREDENTIALS, str(filename))
+
+ with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
+ _default.default()
+
+ assert excinfo.match(
+ "Failed to load external account credentials from {}".format(str(filename))
+ )
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_default_warning_without_quota_project_id_for_user_creds(get_adc_path):
+ get_adc_path.return_value = AUTHORIZED_USER_CLOUD_SDK_FILE
+
+ with pytest.warns(UserWarning, match=_default._CLOUD_SDK_CREDENTIALS_WARNING):
+ credentials, project_id = _default.default(quota_project_id=None)
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_default_no_warning_with_quota_project_id_for_user_creds(get_adc_path):
+ get_adc_path.return_value = AUTHORIZED_USER_CLOUD_SDK_FILE
+
+ credentials, project_id = _default.default(quota_project_id="project-foo")
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_default_impersonated_service_account(get_adc_path):
+ get_adc_path.return_value = IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE
+
+ credentials, _ = _default.default()
+
+ assert isinstance(credentials, impersonated_credentials.Credentials)
+ assert isinstance(
+ credentials._source_credentials, google.oauth2.credentials.Credentials
+ )
+ assert credentials.service_account_email == "service-account-target@example.com"
+ assert credentials._delegates == ["service-account-delegate@example.com"]
+ assert not credentials._quota_project_id
+ assert not credentials._target_scopes
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_default_impersonated_service_account_set_scopes(get_adc_path):
+ get_adc_path.return_value = IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE
+ scopes = ["scope1", "scope2"]
+
+ credentials, _ = _default.default(scopes=scopes)
+ assert credentials._target_scopes == scopes
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_default_impersonated_service_account_set_default_scopes(get_adc_path):
+ get_adc_path.return_value = IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE
+ default_scopes = ["scope1", "scope2"]
+
+ credentials, _ = _default.default(default_scopes=default_scopes)
+ assert credentials._target_scopes == default_scopes
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_default_impersonated_service_account_set_both_scopes_and_default_scopes(
+ get_adc_path
+):
+ get_adc_path.return_value = IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE
+ scopes = ["scope1", "scope2"]
+ default_scopes = ["scope3", "scope4"]
+
+ credentials, _ = _default.default(scopes=scopes, default_scopes=default_scopes)
+ assert credentials._target_scopes == scopes
+
+
+@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
+def test_load_credentials_from_external_account_pluggable(get_project_id, tmpdir):
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(PLUGGABLE_DATA))
+ credentials, project_id = _default.load_credentials_from_file(str(config_file))
+
+ assert isinstance(credentials, pluggable.Credentials)
+ # Since no scopes are specified, the project ID cannot be determined.
+ assert project_id is None
+ assert get_project_id.called
+
+
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_default_gdch_service_account_credentials(get_adc_path):
+ get_adc_path.return_value = GDCH_SERVICE_ACCOUNT_FILE
+
+ creds, project = _default.default(quota_project_id="project-foo")
+
+ assert isinstance(creds, gdch_credentials.ServiceAccountCredentials)
+ assert creds._service_identity_name == "service_identity_name"
+ assert creds._audience is None
+ assert creds._token_uri == "https://service-identity.<Domain>/authenticate"
+ assert creds._ca_cert_path == "/path/to/ca/cert"
+ assert project == "project_foo"
+
+
+@mock.patch.dict(os.environ)
+@mock.patch(
+ "google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
+)
+def test_quota_project_from_environment(get_adc_path):
+ get_adc_path.return_value = AUTHORIZED_USER_CLOUD_SDK_WITH_QUOTA_PROJECT_ID_FILE
+
+ credentials, _ = _default.default(quota_project_id=None)
+ assert credentials.quota_project_id == "quota_project_id"
+
+ quota_from_env = "quota_from_env"
+ os.environ[environment_vars.GOOGLE_CLOUD_QUOTA_PROJECT] = quota_from_env
+ credentials, _ = _default.default(quota_project_id=None)
+ assert credentials.quota_project_id == quota_from_env
+
+ explicit_quota = "explicit_quota"
+ credentials, _ = _default.default(quota_project_id=explicit_quota)
+ assert credentials.quota_project_id == explicit_quota
+
+
+@mock.patch(
+ "google.auth.compute_engine._metadata.is_on_gce", return_value=True, autospec=True
+)
+@mock.patch(
+ "google.auth.compute_engine._metadata.get_project_id",
+ return_value="example-project",
+ autospec=True,
+)
+@mock.patch.dict(os.environ)
+def test_quota_gce_credentials(unused_get, unused_ping):
+ # No quota
+ credentials, project_id = _default._get_gce_credentials()
+ assert project_id == "example-project"
+ assert credentials.quota_project_id is None
+
+ # Quota from environment
+ quota_from_env = "quota_from_env"
+ os.environ[environment_vars.GOOGLE_CLOUD_QUOTA_PROJECT] = quota_from_env
+ credentials, project_id = _default._get_gce_credentials()
+ assert credentials.quota_project_id == quota_from_env
+
+ # Explicit quota
+ explicit_quota = "explicit_quota"
+ credentials, project_id = _default._get_gce_credentials(
+ quota_project_id=explicit_quota
+ )
+ assert credentials.quota_project_id == explicit_quota
diff --git a/contrib/python/google-auth/py3/tests/test__exponential_backoff.py b/contrib/python/google-auth/py3/tests/test__exponential_backoff.py
new file mode 100644
index 0000000000..06a54527e6
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test__exponential_backoff.py
@@ -0,0 +1,41 @@
+# Copyright 2022 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from google.auth import _exponential_backoff
+
+
+@mock.patch("time.sleep", return_value=None)
+def test_exponential_backoff(mock_time):
+ eb = _exponential_backoff.ExponentialBackoff()
+ curr_wait = eb._current_wait_in_seconds
+ iteration_count = 0
+
+ for attempt in eb:
+ backoff_interval = mock_time.call_args[0][0]
+ jitter = curr_wait * eb._randomization_factor
+
+ assert (curr_wait - jitter) <= backoff_interval <= (curr_wait + jitter)
+ assert attempt == iteration_count + 1
+ assert eb.backoff_count == iteration_count + 1
+ assert eb._current_wait_in_seconds == eb._multiplier ** (iteration_count + 1)
+
+ curr_wait = eb._current_wait_in_seconds
+ iteration_count += 1
+
+ assert eb.total_attempts == _exponential_backoff._DEFAULT_RETRY_TOTAL_ATTEMPTS
+ assert eb.backoff_count == _exponential_backoff._DEFAULT_RETRY_TOTAL_ATTEMPTS
+ assert iteration_count == _exponential_backoff._DEFAULT_RETRY_TOTAL_ATTEMPTS
+ assert mock_time.call_count == _exponential_backoff._DEFAULT_RETRY_TOTAL_ATTEMPTS
diff --git a/contrib/python/google-auth/py3/tests/test__helpers.py b/contrib/python/google-auth/py3/tests/test__helpers.py
new file mode 100644
index 0000000000..c1f1d812e5
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test__helpers.py
@@ -0,0 +1,170 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import urllib
+
+import pytest # type: ignore
+
+from google.auth import _helpers
+
+
+class SourceClass(object):
+ def func(self): # pragma: NO COVER
+ """example docstring"""
+
+
+def test_copy_docstring_success():
+ def func(): # pragma: NO COVER
+ pass
+
+ _helpers.copy_docstring(SourceClass)(func)
+
+ assert func.__doc__ == SourceClass.func.__doc__
+
+
+def test_copy_docstring_conflict():
+ def func(): # pragma: NO COVER
+ """existing docstring"""
+ pass
+
+ with pytest.raises(ValueError):
+ _helpers.copy_docstring(SourceClass)(func)
+
+
+def test_copy_docstring_non_existing():
+ def func2(): # pragma: NO COVER
+ pass
+
+ with pytest.raises(AttributeError):
+ _helpers.copy_docstring(SourceClass)(func2)
+
+
+def test_utcnow():
+ assert isinstance(_helpers.utcnow(), datetime.datetime)
+
+
+def test_datetime_to_secs():
+ assert _helpers.datetime_to_secs(datetime.datetime(1970, 1, 1)) == 0
+ assert _helpers.datetime_to_secs(datetime.datetime(1990, 5, 29)) == 643939200
+
+
+def test_to_bytes_with_bytes():
+ value = b"bytes-val"
+ assert _helpers.to_bytes(value) == value
+
+
+def test_to_bytes_with_unicode():
+ value = u"string-val"
+ encoded_value = b"string-val"
+ assert _helpers.to_bytes(value) == encoded_value
+
+
+def test_to_bytes_with_nonstring_type():
+ with pytest.raises(ValueError):
+ _helpers.to_bytes(object())
+
+
+def test_from_bytes_with_unicode():
+ value = u"bytes-val"
+ assert _helpers.from_bytes(value) == value
+
+
+def test_from_bytes_with_bytes():
+ value = b"string-val"
+ decoded_value = u"string-val"
+ assert _helpers.from_bytes(value) == decoded_value
+
+
+def test_from_bytes_with_nonstring_type():
+ with pytest.raises(ValueError):
+ _helpers.from_bytes(object())
+
+
+def _assert_query(url, expected):
+ parts = urllib.parse.urlsplit(url)
+ query = urllib.parse.parse_qs(parts.query)
+ assert query == expected
+
+
+def test_update_query_params_no_params():
+ uri = "http://www.google.com"
+ updated = _helpers.update_query(uri, {"a": "b"})
+ assert updated == uri + "?a=b"
+
+
+def test_update_query_existing_params():
+ uri = "http://www.google.com?x=y"
+ updated = _helpers.update_query(uri, {"a": "b", "c": "d&"})
+ _assert_query(updated, {"x": ["y"], "a": ["b"], "c": ["d&"]})
+
+
+def test_update_query_replace_param():
+ base_uri = "http://www.google.com"
+ uri = base_uri + "?x=a"
+ updated = _helpers.update_query(uri, {"x": "b", "y": "c"})
+ _assert_query(updated, {"x": ["b"], "y": ["c"]})
+
+
+def test_update_query_remove_param():
+ base_uri = "http://www.google.com"
+ uri = base_uri + "?x=a"
+ updated = _helpers.update_query(uri, {"y": "c"}, remove=["x"])
+ _assert_query(updated, {"y": ["c"]})
+
+
+def test_scopes_to_string():
+ cases = [
+ ("", ()),
+ ("", []),
+ ("", ("",)),
+ ("", [""]),
+ ("a", ("a",)),
+ ("b", ["b"]),
+ ("a b", ["a", "b"]),
+ ("a b", ("a", "b")),
+ ("a b", (s for s in ["a", "b"])),
+ ]
+ for expected, case in cases:
+ assert _helpers.scopes_to_string(case) == expected
+
+
+def test_string_to_scopes():
+ cases = [("", []), ("a", ["a"]), ("a b c d e f", ["a", "b", "c", "d", "e", "f"])]
+
+ for case, expected in cases:
+ assert _helpers.string_to_scopes(case) == expected
+
+
+def test_padded_urlsafe_b64decode():
+ cases = [
+ ("YQ==", b"a"),
+ ("YQ", b"a"),
+ ("YWE=", b"aa"),
+ ("YWE", b"aa"),
+ ("YWFhYQ==", b"aaaa"),
+ ("YWFhYQ", b"aaaa"),
+ ("YWFhYWE=", b"aaaaa"),
+ ("YWFhYWE", b"aaaaa"),
+ ]
+
+ for case, expected in cases:
+ assert _helpers.padded_urlsafe_b64decode(case) == expected
+
+
+def test_unpadded_urlsafe_b64encode():
+ cases = [(b"", b""), (b"a", b"YQ"), (b"aa", b"YWE"), (b"aaa", b"YWFh")]
+
+ for case, expected in cases:
+ assert _helpers.unpadded_urlsafe_b64encode(case) == expected
diff --git a/contrib/python/google-auth/py3/tests/test__oauth2client.py b/contrib/python/google-auth/py3/tests/test__oauth2client.py
new file mode 100644
index 0000000000..72db6535bc
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test__oauth2client.py
@@ -0,0 +1,178 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import importlib
+import os
+import sys
+
+import mock
+import pytest # type: ignore
+
+try:
+ import oauth2client.client # type: ignore
+ import oauth2client.contrib.gce # type: ignore
+ import oauth2client.service_account # type: ignore
+except ImportError: # pragma: NO COVER
+ pytest.skip(
+ "Skipping oauth2client tests since oauth2client is not installed.",
+ allow_module_level=True,
+ )
+
+from google.auth import _oauth2client
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+
+def test__convert_oauth2_credentials():
+ old_credentials = oauth2client.client.OAuth2Credentials(
+ "access_token",
+ "client_id",
+ "client_secret",
+ "refresh_token",
+ datetime.datetime.min,
+ "token_uri",
+ "user_agent",
+ scopes="one two",
+ )
+
+ new_credentials = _oauth2client._convert_oauth2_credentials(old_credentials)
+
+ assert new_credentials.token == old_credentials.access_token
+ assert new_credentials._refresh_token == old_credentials.refresh_token
+ assert new_credentials._client_id == old_credentials.client_id
+ assert new_credentials._client_secret == old_credentials.client_secret
+ assert new_credentials._token_uri == old_credentials.token_uri
+ assert new_credentials.scopes == old_credentials.scopes
+
+
+def test__convert_service_account_credentials():
+ old_class = oauth2client.service_account.ServiceAccountCredentials
+ old_credentials = old_class.from_json_keyfile_name(SERVICE_ACCOUNT_JSON_FILE)
+
+ new_credentials = _oauth2client._convert_service_account_credentials(
+ old_credentials
+ )
+
+ assert (
+ new_credentials.service_account_email == old_credentials.service_account_email
+ )
+ assert new_credentials._signer.key_id == old_credentials._private_key_id
+ assert new_credentials._token_uri == old_credentials.token_uri
+
+
+def test__convert_service_account_credentials_with_jwt():
+ old_class = oauth2client.service_account._JWTAccessCredentials
+ old_credentials = old_class.from_json_keyfile_name(SERVICE_ACCOUNT_JSON_FILE)
+
+ new_credentials = _oauth2client._convert_service_account_credentials(
+ old_credentials
+ )
+
+ assert (
+ new_credentials.service_account_email == old_credentials.service_account_email
+ )
+ assert new_credentials._signer.key_id == old_credentials._private_key_id
+ assert new_credentials._token_uri == old_credentials.token_uri
+
+
+def test__convert_gce_app_assertion_credentials():
+ old_credentials = oauth2client.contrib.gce.AppAssertionCredentials(
+ email="some_email"
+ )
+
+ new_credentials = _oauth2client._convert_gce_app_assertion_credentials(
+ old_credentials
+ )
+
+ assert (
+ new_credentials.service_account_email == old_credentials.service_account_email
+ )
+
+
+@pytest.fixture
+def mock_oauth2client_gae_imports(mock_non_existent_module):
+ mock_non_existent_module("google.appengine.api.app_identity")
+ mock_non_existent_module("google.appengine.ext.ndb")
+ mock_non_existent_module("google.appengine.ext.webapp.util")
+ mock_non_existent_module("webapp2")
+
+
+@mock.patch("google.auth.app_engine.app_identity")
+def _test__convert_appengine_app_assertion_credentials(
+ app_identity, mock_oauth2client_gae_imports
+):
+
+ import oauth2client.contrib.appengine # type: ignore
+
+ service_account_id = "service_account_id"
+ old_credentials = oauth2client.contrib.appengine.AppAssertionCredentials(
+ scope="one two", service_account_id=service_account_id
+ )
+
+ new_credentials = _oauth2client._convert_appengine_app_assertion_credentials(
+ old_credentials
+ )
+
+ assert new_credentials.scopes == ["one", "two"]
+ assert new_credentials._service_account_id == old_credentials.service_account_id
+
+
+class FakeCredentials(object):
+ pass
+
+
+def test_convert_success():
+ convert_function = mock.Mock(spec=["__call__"])
+ conversion_map_patch = mock.patch.object(
+ _oauth2client, "_CLASS_CONVERSION_MAP", {FakeCredentials: convert_function}
+ )
+ credentials = FakeCredentials()
+
+ with conversion_map_patch:
+ result = _oauth2client.convert(credentials)
+
+ convert_function.assert_called_once_with(credentials)
+ assert result == convert_function.return_value
+
+
+def test_convert_not_found():
+ with pytest.raises(ValueError) as excinfo:
+ _oauth2client.convert("a string is not a real credentials class")
+
+ assert excinfo.match("Unable to convert")
+
+
+@pytest.fixture
+def reset__oauth2client_module():
+ """Reloads the _oauth2client module after a test."""
+ importlib.reload(_oauth2client)
+
+
+def _test_import_has_app_engine(
+ mock_oauth2client_gae_imports, reset__oauth2client_module
+):
+ importlib.reload(_oauth2client)
+ assert _oauth2client._HAS_APPENGINE
+
+
+def test_import_without_oauth2client(monkeypatch, reset__oauth2client_module):
+ monkeypatch.setitem(sys.modules, "oauth2client", None)
+ with pytest.raises(ImportError) as excinfo:
+ importlib.reload(_oauth2client)
+
+ assert excinfo.match("oauth2client")
diff --git a/contrib/python/google-auth/py3/tests/test__service_account_info.py b/contrib/python/google-auth/py3/tests/test__service_account_info.py
new file mode 100644
index 0000000000..db8106081c
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test__service_account_info.py
@@ -0,0 +1,83 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import pytest # type: ignore
+
+from google.auth import _service_account_info
+from google.auth import crypt
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+GDCH_SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "gdch_service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+with open(GDCH_SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ GDCH_SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+def test_from_dict():
+ signer = _service_account_info.from_dict(SERVICE_ACCOUNT_INFO)
+ assert isinstance(signer, crypt.RSASigner)
+ assert signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
+
+
+def test_from_dict_es256_signer():
+ signer = _service_account_info.from_dict(
+ GDCH_SERVICE_ACCOUNT_INFO, use_rsa_signer=False
+ )
+ assert isinstance(signer, crypt.ES256Signer)
+ assert signer.key_id == GDCH_SERVICE_ACCOUNT_INFO["private_key_id"]
+
+
+def test_from_dict_bad_private_key():
+ info = SERVICE_ACCOUNT_INFO.copy()
+ info["private_key"] = "garbage"
+
+ with pytest.raises(ValueError) as excinfo:
+ _service_account_info.from_dict(info)
+
+ assert excinfo.match(r"key")
+
+
+def test_from_dict_bad_format():
+ with pytest.raises(ValueError) as excinfo:
+ _service_account_info.from_dict({}, require=("meep",))
+
+ assert excinfo.match(r"missing fields")
+
+
+def test_from_filename():
+ info, signer = _service_account_info.from_filename(SERVICE_ACCOUNT_JSON_FILE)
+
+ for key, value in SERVICE_ACCOUNT_INFO.items():
+ assert info[key] == value
+
+ assert isinstance(signer, crypt.RSASigner)
+ assert signer.key_id == SERVICE_ACCOUNT_INFO["private_key_id"]
+
+
+def test_from_filename_es256_signer():
+ _, signer = _service_account_info.from_filename(
+ GDCH_SERVICE_ACCOUNT_JSON_FILE, use_rsa_signer=False
+ )
+
+ assert isinstance(signer, crypt.ES256Signer)
+ assert signer.key_id == GDCH_SERVICE_ACCOUNT_INFO["private_key_id"]
diff --git a/contrib/python/google-auth/py3/tests/test_api_key.py b/contrib/python/google-auth/py3/tests/test_api_key.py
new file mode 100644
index 0000000000..9ba7b1426b
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_api_key.py
@@ -0,0 +1,45 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest # type: ignore
+
+from google.auth import api_key
+
+
+def test_credentials_constructor():
+ with pytest.raises(ValueError) as excinfo:
+ api_key.Credentials("")
+
+ assert excinfo.match(r"Token must be a non-empty API key string")
+
+
+def test_expired_and_valid():
+ credentials = api_key.Credentials("api-key")
+
+ assert credentials.valid
+ assert credentials.token == "api-key"
+ assert not credentials.expired
+
+ credentials.refresh(None)
+ assert credentials.valid
+ assert credentials.token == "api-key"
+ assert not credentials.expired
+
+
+def test_before_request():
+ credentials = api_key.Credentials("api-key")
+ headers = {}
+
+ credentials.before_request(None, "http://example.com", "GET", headers)
+ assert headers["x-goog-api-key"] == "api-key"
diff --git a/contrib/python/google-auth/py3/tests/test_app_engine.py b/contrib/python/google-auth/py3/tests/test_app_engine.py
new file mode 100644
index 0000000000..ca085bd698
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_app_engine.py
@@ -0,0 +1,217 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+import mock
+import pytest # type: ignore
+
+from google.auth import app_engine
+
+
+class _AppIdentityModule(object):
+ """The interface of the App Idenity app engine module.
+ See https://cloud.google.com/appengine/docs/standard/python/refdocs
+ /google.appengine.api.app_identity.app_identity
+ """
+
+ def get_application_id(self):
+ raise NotImplementedError()
+
+ def sign_blob(self, bytes_to_sign, deadline=None):
+ raise NotImplementedError()
+
+ def get_service_account_name(self, deadline=None):
+ raise NotImplementedError()
+
+ def get_access_token(self, scopes, service_account_id=None):
+ raise NotImplementedError()
+
+
+@pytest.fixture
+def app_identity(monkeypatch):
+ """Mocks the app_identity module for google.auth.app_engine."""
+ app_identity_module = mock.create_autospec(_AppIdentityModule, instance=True)
+ monkeypatch.setattr(app_engine, "app_identity", app_identity_module)
+ yield app_identity_module
+
+
+def test_get_project_id(app_identity):
+ app_identity.get_application_id.return_value = mock.sentinel.project
+ assert app_engine.get_project_id() == mock.sentinel.project
+
+
+@mock.patch.object(app_engine, "app_identity", new=None)
+def test_get_project_id_missing_apis():
+ with pytest.raises(EnvironmentError) as excinfo:
+ assert app_engine.get_project_id()
+
+ assert excinfo.match(r"App Engine APIs are not available")
+
+
+class TestSigner(object):
+ def test_key_id(self, app_identity):
+ app_identity.sign_blob.return_value = (
+ mock.sentinel.key_id,
+ mock.sentinel.signature,
+ )
+
+ signer = app_engine.Signer()
+
+ assert signer.key_id is None
+
+ def test_sign(self, app_identity):
+ app_identity.sign_blob.return_value = (
+ mock.sentinel.key_id,
+ mock.sentinel.signature,
+ )
+
+ signer = app_engine.Signer()
+ to_sign = b"123"
+
+ signature = signer.sign(to_sign)
+
+ assert signature == mock.sentinel.signature
+ app_identity.sign_blob.assert_called_with(to_sign)
+
+
+class TestCredentials(object):
+ @mock.patch.object(app_engine, "app_identity", new=None)
+ def test_missing_apis(self):
+ with pytest.raises(EnvironmentError) as excinfo:
+ app_engine.Credentials()
+
+ assert excinfo.match(r"App Engine APIs are not available")
+
+ def test_default_state(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ # Not token acquired yet
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expired
+ # Scopes are required
+ assert not credentials.scopes
+ assert not credentials.default_scopes
+ assert credentials.requires_scopes
+ assert not credentials.quota_project_id
+
+ def test_with_scopes(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(["email"])
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_default_scopes(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ assert not credentials.scopes
+ assert not credentials.default_scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(
+ scopes=None, default_scopes=["email"]
+ )
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_quota_project(self, app_identity):
+ credentials = app_engine.Credentials()
+
+ assert not credentials.scopes
+ assert not credentials.quota_project_id
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds.quota_project_id == "project-foo"
+
+ def test_service_account_email_implicit(self, app_identity):
+ app_identity.get_service_account_name.return_value = (
+ mock.sentinel.service_account_email
+ )
+ credentials = app_engine.Credentials()
+
+ assert credentials.service_account_email == mock.sentinel.service_account_email
+ assert app_identity.get_service_account_name.called
+
+ def test_service_account_email_explicit(self, app_identity):
+ credentials = app_engine.Credentials(
+ service_account_id=mock.sentinel.service_account_email
+ )
+
+ assert credentials.service_account_email == mock.sentinel.service_account_email
+ assert not app_identity.get_service_account_name.called
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh(self, utcnow, app_identity):
+ token = "token"
+ ttl = 643942923
+ app_identity.get_access_token.return_value = token, ttl
+ credentials = app_engine.Credentials(
+ scopes=["email"], default_scopes=["profile"]
+ )
+
+ credentials.refresh(None)
+
+ app_identity.get_access_token.assert_called_with(
+ credentials.scopes, credentials._service_account_id
+ )
+ assert credentials.token == token
+ assert credentials.expiry == datetime.datetime(1990, 5, 29, 1, 2, 3)
+ assert credentials.valid
+ assert not credentials.expired
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_with_default_scopes(self, utcnow, app_identity):
+ token = "token"
+ ttl = 643942923
+ app_identity.get_access_token.return_value = token, ttl
+ credentials = app_engine.Credentials(default_scopes=["email"])
+
+ credentials.refresh(None)
+
+ app_identity.get_access_token.assert_called_with(
+ credentials.default_scopes, credentials._service_account_id
+ )
+ assert credentials.token == token
+ assert credentials.expiry == datetime.datetime(1990, 5, 29, 1, 2, 3)
+ assert credentials.valid
+ assert not credentials.expired
+
+ def test_sign_bytes(self, app_identity):
+ app_identity.sign_blob.return_value = (
+ mock.sentinel.key_id,
+ mock.sentinel.signature,
+ )
+ credentials = app_engine.Credentials()
+ to_sign = b"123"
+
+ signature = credentials.sign_bytes(to_sign)
+
+ assert signature == mock.sentinel.signature
+ app_identity.sign_blob.assert_called_with(to_sign)
+
+ def test_signer(self, app_identity):
+ credentials = app_engine.Credentials()
+ assert isinstance(credentials.signer, app_engine.Signer)
+
+ def test_signer_email(self, app_identity):
+ credentials = app_engine.Credentials()
+ assert credentials.signer_email == credentials.service_account_email
diff --git a/contrib/python/google-auth/py3/tests/test_aws.py b/contrib/python/google-auth/py3/tests/test_aws.py
new file mode 100644
index 0000000000..39138ab12e
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_aws.py
@@ -0,0 +1,2125 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import json
+import os
+import urllib.parse
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import aws
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+
+
+IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
+)
+
+LANG_LIBRARY_METRICS_HEADER_VALUE = "gl-python/3.7 auth/1.1"
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password".
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+SERVICE_ACCOUNT_IMPERSONATION_URL_BASE = (
+ "https://us-east1-iamcredentials.googleapis.com"
+)
+SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE = "/v1/projects/-/serviceAccounts/{}:generateAccessToken".format(
+ SERVICE_ACCOUNT_EMAIL
+)
+SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ SERVICE_ACCOUNT_IMPERSONATION_URL_BASE + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+)
+QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+SCOPES = ["scope1", "scope2"]
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+TOKEN_INFO_URL = "https://sts.googleapis.com/v1/introspect"
+SUBJECT_TOKEN_TYPE = "urn:ietf:params:aws:token-type:aws4_request"
+AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
+REGION_URL = "http://169.254.169.254/latest/meta-data/placement/availability-zone"
+IMDSV2_SESSION_TOKEN_URL = "http://169.254.169.254/latest/api/token"
+SECURITY_CREDS_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials"
+REGION_URL_IPV6 = "http://[fd00:ec2::254]/latest/meta-data/placement/availability-zone"
+IMDSV2_SESSION_TOKEN_URL_IPV6 = "http://[fd00:ec2::254]/latest/api/token"
+SECURITY_CREDS_URL_IPV6 = (
+ "http://[fd00:ec2::254]/latest/meta-data/iam/security-credentials"
+)
+CRED_VERIFICATION_URL = (
+ "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
+)
+# Sample fictitious AWS security credentials to be used with tests that require a session token.
+ACCESS_KEY_ID = "AKIAIOSFODNN7EXAMPLE"
+SECRET_ACCESS_KEY = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
+TOKEN = "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE"
+# To avoid json.dumps() differing behavior from one version to other,
+# the JSON payload is hardcoded.
+REQUEST_PARAMS = '{"KeySchema":[{"KeyType":"HASH","AttributeName":"Id"}],"TableName":"TestTable","AttributeDefinitions":[{"AttributeName":"Id","AttributeType":"S"}],"ProvisionedThroughput":{"WriteCapacityUnits":5,"ReadCapacityUnits":5}}'
+# Each tuple contains the following entries:
+# region, time, credentials, original_request, signed_request
+
+DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
+VALID_TOKEN_URLS = [
+ "https://sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.com",
+ "https://US-EAST-1.sts.googleapis.com",
+ "https://sts.us-east-1.googleapis.com",
+ "https://sts.US-WEST-1.googleapis.com",
+ "https://us-east-1-sts.googleapis.com",
+ "https://US-WEST-1-sts.googleapis.com",
+ "https://us-west-1-sts.googleapis.com/path?query",
+ "https://sts-us-east-1.p.googleapis.com",
+]
+INVALID_TOKEN_URLS = [
+ "https://iamcredentials.googleapis.com",
+ "sts.googleapis.com",
+ "https://",
+ "http://sts.googleapis.com",
+ "https://st.s.googleapis.com",
+ "https://us-eas\t-1.sts.googleapis.com",
+ "https:/us-east-1.sts.googleapis.com",
+ "https://US-WE/ST-1-sts.googleapis.com",
+ "https://sts-us-east-1.googleapis.com",
+ "https://sts-US-WEST-1.googleapis.com",
+ "testhttps://us-east-1.sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.comevil.com",
+ "https://us-east-1.us-east-1.sts.googleapis.com",
+ "https://us-ea.s.t.sts.googleapis.com",
+ "https://sts.googleapis.comevil.com",
+ "hhttps://us-east-1.sts.googleapis.com",
+ "https://us- -1.sts.googleapis.com",
+ "https://-sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.com.evil.com",
+ "https://sts.pgoogleapis.com",
+ "https://p.googleapis.com",
+ "https://sts.p.com",
+ "http://sts.p.googleapis.com",
+ "https://xyz-sts.p.googleapis.com",
+ "https://sts-xyz.123.p.googleapis.com",
+ "https://sts-xyz.p1.googleapis.com",
+ "https://sts-xyz.p.foo.com",
+ "https://sts-xyz.p.foo.googleapis.com",
+]
+VALID_SERVICE_ACCOUNT_IMPERSONATION_URLS = [
+ "https://iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.com",
+ "https://US-EAST-1.iamcredentials.googleapis.com",
+ "https://iamcredentials.us-east-1.googleapis.com",
+ "https://iamcredentials.US-WEST-1.googleapis.com",
+ "https://us-east-1-iamcredentials.googleapis.com",
+ "https://US-WEST-1-iamcredentials.googleapis.com",
+ "https://us-west-1-iamcredentials.googleapis.com/path?query",
+ "https://iamcredentials-us-east-1.p.googleapis.com",
+]
+INVALID_SERVICE_ACCOUNT_IMPERSONATION_URLS = [
+ "https://sts.googleapis.com",
+ "iamcredentials.googleapis.com",
+ "https://",
+ "http://iamcredentials.googleapis.com",
+ "https://iamcre.dentials.googleapis.com",
+ "https://us-eas\t-1.iamcredentials.googleapis.com",
+ "https:/us-east-1.iamcredentials.googleapis.com",
+ "https://US-WE/ST-1-iamcredentials.googleapis.com",
+ "https://iamcredentials-us-east-1.googleapis.com",
+ "https://iamcredentials-US-WEST-1.googleapis.com",
+ "testhttps://us-east-1.iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.comevil.com",
+ "https://us-east-1.us-east-1.iamcredentials.googleapis.com",
+ "https://us-ea.s.t.iamcredentials.googleapis.com",
+ "https://iamcredentials.googleapis.comevil.com",
+ "hhttps://us-east-1.iamcredentials.googleapis.com",
+ "https://us- -1.iamcredentials.googleapis.com",
+ "https://-iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.com.evil.com",
+ "https://iamcredentials.pgoogleapis.com",
+ "https://p.googleapis.com",
+ "https://iamcredentials.p.com",
+ "http://iamcredentials.p.googleapis.com",
+ "https://xyz-iamcredentials.p.googleapis.com",
+ "https://iamcredentials-xyz.123.p.googleapis.com",
+ "https://iamcredentials-xyz.p1.googleapis.com",
+ "https://iamcredentials-xyz.p.foo.com",
+ "https://iamcredentials-xyz.p.foo.googleapis.com",
+]
+TEST_FIXTURES = [
+ # GET request (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with relative path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-relative-relative.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-relative-relative.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/foo/bar/../..",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/foo/bar/../..",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with /./ path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-dot-slash.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-dot-slash.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/./",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/./",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with pointless dot path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-pointless-dot.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-slash-pointless-dot.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/./foo",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/./foo",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with utf8 path (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-utf8.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-utf8.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/%E1%88%B4",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/%E1%88%B4",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with duplicate query key (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-key-case.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-key-case.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/?foo=Zoo&foo=aha",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?foo=Zoo&foo=aha",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with duplicate out of order query key (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-value.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-query-order-value.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/?foo=b&foo=a",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?foo=b&foo=a",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with utf8 query (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-ut8-query.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-vanilla-ut8-query.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "GET",
+ "url": "https://host.foo.com/?{}=bar".format(
+ urllib.parse.unquote("%E1%88%B4")
+ ),
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?{}=bar".format(
+ urllib.parse.unquote("%E1%88%B4")
+ ),
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # POST request with sorted headers (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-key-sort.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-key-sort.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT", "ZOO": "zoobar"},
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ "ZOO": "zoobar",
+ },
+ },
+ ),
+ # POST request with upper case header value from AWS Python test harness.
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-value-case.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-header-value-case.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT", "zoo": "ZOOBAR"},
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ "zoo": "ZOOBAR",
+ },
+ },
+ ),
+ # POST request with header and no body (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT", "p": "phfft"},
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ "p": "phfft",
+ },
+ },
+ ),
+ # POST request with body and no header (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-x-www-form-urlencoded.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-x-www-form-urlencoded.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/",
+ "headers": {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ "data": "foo=bar",
+ },
+ {
+ "url": "https://host.foo.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
+ "host": "host.foo.com",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ "data": "foo=bar",
+ },
+ ),
+ # POST request with querystring (AWS botocore tests).
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-vanilla-query.req
+ # https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/post-vanilla-query.sreq
+ (
+ "us-east-1",
+ "2011-09-09T23:36:00Z",
+ {
+ "access_key_id": "AKIDEXAMPLE",
+ "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
+ },
+ {
+ "method": "POST",
+ "url": "https://host.foo.com/?foo=bar",
+ "headers": {"date": "Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ {
+ "url": "https://host.foo.com/?foo=bar",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ "host": "host.foo.com",
+ "date": "Mon, 09 Sep 2011 23:36:00 GMT",
+ },
+ },
+ ),
+ # GET request with session token credentials.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ },
+ {
+ "method": "GET",
+ "url": "https://ec2.us-east-2.amazonaws.com?Action=DescribeRegions&Version=2013-10-15",
+ },
+ {
+ "url": "https://ec2.us-east-2.amazonaws.com?Action=DescribeRegions&Version=2013-10-15",
+ "method": "GET",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/ec2/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=41e226f997bf917ec6c9b2b14218df0874225f13bb153236c247881e614fafc9",
+ "host": "ec2.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ "x-amz-security-token": TOKEN,
+ },
+ },
+ ),
+ # POST request with session token credentials.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ },
+ {
+ "method": "POST",
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ },
+ {
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/sts/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=596aa990b792d763465d73703e684ca273c45536c6d322c31be01a41d02e5b60",
+ "host": "sts.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ "x-amz-security-token": TOKEN,
+ },
+ },
+ ),
+ # POST request with computed x-amz-date and no data.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {"access_key_id": ACCESS_KEY_ID, "secret_access_key": SECRET_ACCESS_KEY},
+ {
+ "method": "POST",
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ },
+ {
+ "url": "https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/sts/aws4_request, SignedHeaders=host;x-amz-date, Signature=9e722e5b7bfa163447e2a14df118b45ebd283c5aea72019bdf921d6e7dc01a9a",
+ "host": "sts.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ },
+ },
+ ),
+ # POST request with session token and additional headers/data.
+ (
+ "us-east-2",
+ "2020-08-11T06:55:22Z",
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ },
+ {
+ "method": "POST",
+ "url": "https://dynamodb.us-east-2.amazonaws.com/",
+ "headers": {
+ "Content-Type": "application/x-amz-json-1.0",
+ "x-amz-target": "DynamoDB_20120810.CreateTable",
+ },
+ "data": REQUEST_PARAMS,
+ },
+ {
+ "url": "https://dynamodb.us-east-2.amazonaws.com/",
+ "method": "POST",
+ "headers": {
+ "Authorization": "AWS4-HMAC-SHA256 Credential="
+ + ACCESS_KEY_ID
+ + "/20200811/us-east-2/dynamodb/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-amz-security-token;x-amz-target, Signature=eb8bce0e63654bba672d4a8acb07e72d69210c1797d56ce024dbbc31beb2a2c7",
+ "host": "dynamodb.us-east-2.amazonaws.com",
+ "x-amz-date": "20200811T065522Z",
+ "Content-Type": "application/x-amz-json-1.0",
+ "x-amz-target": "DynamoDB_20120810.CreateTable",
+ "x-amz-security-token": TOKEN,
+ },
+ "data": REQUEST_PARAMS,
+ },
+ ),
+]
+
+
+class TestRequestSigner(object):
+ @pytest.mark.parametrize(
+ "region, time, credentials, original_request, signed_request", TEST_FIXTURES
+ )
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_get_request_options(
+ self, utcnow, region, time, credentials, original_request, signed_request
+ ):
+ utcnow.return_value = datetime.datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ")
+ request_signer = aws.RequestSigner(region)
+ actual_signed_request = request_signer.get_request_options(
+ credentials,
+ original_request.get("url"),
+ original_request.get("method"),
+ original_request.get("data"),
+ original_request.get("headers"),
+ )
+
+ assert actual_signed_request == signed_request
+
+ def test_get_request_options_with_missing_scheme_url(self):
+ request_signer = aws.RequestSigner("us-east-2")
+
+ with pytest.raises(ValueError) as excinfo:
+ request_signer.get_request_options(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ },
+ "invalid",
+ "POST",
+ )
+
+ assert excinfo.match(r"Invalid AWS service URL")
+
+ def test_get_request_options_with_invalid_scheme_url(self):
+ request_signer = aws.RequestSigner("us-east-2")
+
+ with pytest.raises(ValueError) as excinfo:
+ request_signer.get_request_options(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ },
+ "http://invalid",
+ "POST",
+ )
+
+ assert excinfo.match(r"Invalid AWS service URL")
+
+ def test_get_request_options_with_missing_hostname_url(self):
+ request_signer = aws.RequestSigner("us-east-2")
+
+ with pytest.raises(ValueError) as excinfo:
+ request_signer.get_request_options(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ },
+ "https://",
+ "POST",
+ )
+
+ assert excinfo.match(r"Invalid AWS service URL")
+
+
+class TestCredentials(object):
+ AWS_REGION = "us-east-2"
+ AWS_ROLE = "gcp-aws-role"
+ AWS_SECURITY_CREDENTIALS_RESPONSE = {
+ "AccessKeyId": ACCESS_KEY_ID,
+ "SecretAccessKey": SECRET_ACCESS_KEY,
+ "Token": TOKEN,
+ }
+ AWS_IMDSV2_SESSION_TOKEN = "awsimdsv2sessiontoken"
+ AWS_SIGNATURE_TIME = "2020-08-11T06:55:22Z"
+ CREDENTIAL_SOURCE = {
+ "environment_id": "aws1",
+ "region_url": REGION_URL,
+ "url": SECURITY_CREDS_URL,
+ "regional_cred_verification_url": CRED_VERIFICATION_URL,
+ }
+ CREDENTIAL_SOURCE_IPV6 = {
+ "environment_id": "aws1",
+ "region_url": REGION_URL_IPV6,
+ "url": SECURITY_CREDS_URL_IPV6,
+ "regional_cred_verification_url": CRED_VERIFICATION_URL,
+ "imdsv2_session_token_url": IMDSV2_SESSION_TOKEN_URL_IPV6,
+ }
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": " ".join(SCOPES),
+ }
+
+ @classmethod
+ def make_serialized_aws_signed_request(
+ cls,
+ aws_security_credentials,
+ region_name="us-east-2",
+ url="https://sts.us-east-2.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15",
+ ):
+ """Utility to generate serialize AWS signed requests.
+ This makes it easy to assert generated subject tokens based on the
+ provided AWS security credentials, regions and AWS STS endpoint.
+ """
+ request_signer = aws.RequestSigner(region_name)
+ signed_request = request_signer.get_request_options(
+ aws_security_credentials, url, "POST"
+ )
+ reformatted_signed_request = {
+ "url": signed_request.get("url"),
+ "method": signed_request.get("method"),
+ "headers": [
+ {
+ "key": "Authorization",
+ "value": signed_request.get("headers").get("Authorization"),
+ },
+ {"key": "host", "value": signed_request.get("headers").get("host")},
+ {
+ "key": "x-amz-date",
+ "value": signed_request.get("headers").get("x-amz-date"),
+ },
+ ],
+ }
+ # Include security token if available.
+ if "security_token" in aws_security_credentials:
+ reformatted_signed_request.get("headers").append(
+ {
+ "key": "x-amz-security-token",
+ "value": signed_request.get("headers").get("x-amz-security-token"),
+ }
+ )
+ # Append x-goog-cloud-target-resource header.
+ reformatted_signed_request.get("headers").append(
+ {"key": "x-goog-cloud-target-resource", "value": AUDIENCE}
+ ),
+ return urllib.parse.quote(
+ json.dumps(
+ reformatted_signed_request, separators=(",", ":"), sort_keys=True
+ )
+ )
+
+ @classmethod
+ def make_mock_request(
+ cls,
+ region_status=None,
+ region_name=None,
+ role_status=None,
+ role_name=None,
+ security_credentials_status=None,
+ security_credentials_data=None,
+ token_status=None,
+ token_data=None,
+ impersonation_status=None,
+ impersonation_data=None,
+ imdsv2_session_token_status=None,
+ imdsv2_session_token_data=None,
+ ):
+ """Utility function to generate a mock HTTP request object.
+ This will facilitate testing various edge cases by specify how the
+ various endpoints will respond while generating a Google Access token
+ in an AWS environment.
+ """
+ responses = []
+ if imdsv2_session_token_status:
+ # AWS session token request
+ imdsv2_session_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ imdsv2_session_response.status = imdsv2_session_token_status
+ imdsv2_session_response.data = imdsv2_session_token_data
+ responses.append(imdsv2_session_response)
+
+ if region_status:
+ # AWS region request.
+ region_response = mock.create_autospec(transport.Response, instance=True)
+ region_response.status = region_status
+ if region_name:
+ region_response.data = "{}b".format(region_name).encode("utf-8")
+ responses.append(region_response)
+
+ if role_status:
+ # AWS role name request.
+ role_response = mock.create_autospec(transport.Response, instance=True)
+ role_response.status = role_status
+ if role_name:
+ role_response.data = role_name.encode("utf-8")
+ responses.append(role_response)
+
+ if security_credentials_status:
+ # AWS security credentials request.
+ security_credentials_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ security_credentials_response.status = security_credentials_status
+ if security_credentials_data:
+ security_credentials_response.data = json.dumps(
+ security_credentials_data
+ ).encode("utf-8")
+ responses.append(security_credentials_response)
+
+ if token_status:
+ # GCP token exchange request.
+ token_response = mock.create_autospec(transport.Response, instance=True)
+ token_response.status = token_status
+ token_response.data = json.dumps(token_data).encode("utf-8")
+ responses.append(token_response)
+
+ if impersonation_status:
+ # Service account impersonation request.
+ impersonation_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ impersonation_response.status = impersonation_status
+ impersonation_response.data = json.dumps(impersonation_data).encode("utf-8")
+ responses.append(impersonation_response)
+
+ request = mock.create_autospec(transport.Request)
+ request.side_effect = responses
+
+ return request
+
+ @classmethod
+ def make_credentials(
+ cls,
+ credential_source,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ ):
+ return aws.Credentials(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=token_url,
+ token_info_url=token_info_url,
+ service_account_impersonation_url=service_account_impersonation_url,
+ credential_source=credential_source,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ )
+
+ @classmethod
+ def assert_aws_metadata_request_kwargs(
+ cls, request_kwargs, url, headers=None, method="GET"
+ ):
+ assert request_kwargs["url"] == url
+ # All used AWS metadata server endpoints use GET HTTP method.
+ assert request_kwargs["method"] == method
+ if headers:
+ assert request_kwargs["headers"] == headers
+ else:
+ assert "headers" not in request_kwargs or request_kwargs["headers"] is None
+ # None of the endpoints used require any data in request.
+ assert "body" not in request_kwargs
+
+ @classmethod
+ def assert_token_request_kwargs(
+ cls, request_kwargs, headers, request_data, token_url=TOKEN_URL
+ ):
+ assert request_kwargs["url"] == token_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ assert len(body_tuples) == len(request_data.keys())
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+
+ @classmethod
+ def assert_impersonation_request_kwargs(
+ cls,
+ request_kwargs,
+ headers,
+ request_data,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ ):
+ assert request_kwargs["url"] == service_account_impersonation_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_json = json.loads(request_kwargs["body"].decode("utf-8"))
+ assert body_json == request_data
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_info_full_options(self, mock_init):
+ credentials = aws.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "service_account_impersonation": {"token_lifetime_seconds": 2800},
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ )
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_info_required_options_only(self, mock_init):
+ credentials = aws.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ )
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=None,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_file_full_options(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "service_account_impersonation": {"token_lifetime_seconds": 2800},
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = aws.Credentials.from_file(str(config_file))
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(aws.Credentials, "__init__", return_value=None)
+ def test_from_file_required_options_only(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = aws.Credentials.from_file(str(config_file))
+
+ # Confirm aws.Credentials instance initialized with the expected parameters.
+ assert isinstance(credentials, aws.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=None,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ def test_constructor_invalid_credential_source(self):
+ # Provide invalid credential source.
+ credential_source = {"unsupported": "value"}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"No valid AWS 'credential_source' provided")
+
+ def test_constructor_invalid_environment_id(self):
+ # Provide invalid environment_id.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source["environment_id"] = "azure1"
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"No valid AWS 'credential_source' provided")
+
+ def test_constructor_missing_cred_verification_url(self):
+ # regional_cred_verification_url is a required field.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source.pop("regional_cred_verification_url")
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"No valid AWS 'credential_source' provided")
+
+ def test_constructor_invalid_environment_id_version(self):
+ # Provide an unsupported version.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source["environment_id"] = "aws3"
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"aws version '3' is not supported in the current build.")
+
+ def test_info(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE.copy()
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
+ }
+
+ def test_token_info_url(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE.copy()
+ )
+
+ assert credentials.token_info_url == TOKEN_INFO_URL
+
+ def test_token_info_url_custom(self):
+ for url in VALID_TOKEN_URLS:
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE.copy(),
+ token_info_url=(url + "/introspect"),
+ )
+
+ assert credentials.token_info_url == (url + "/introspect")
+
+ def test_token_info_url_negative(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE.copy(), token_info_url=None
+ )
+
+ assert not credentials.token_info_url
+
+ def test_token_url_custom(self):
+ for url in VALID_TOKEN_URLS:
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE.copy(),
+ token_url=(url + "/token"),
+ )
+
+ assert credentials._token_url == (url + "/token")
+
+ def test_service_account_impersonation_url_custom(self):
+ for url in VALID_SERVICE_ACCOUNT_IMPERSONATION_URLS:
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE.copy(),
+ service_account_impersonation_url=(
+ url + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+ ),
+ )
+
+ assert credentials._service_account_impersonation_url == (
+ url + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+ )
+
+ def test_retrieve_subject_token_missing_region_url(self):
+ # When AWS_REGION envvar is not available, region_url is required for
+ # determining the current AWS region.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source.pop("region_url")
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Unable to determine AWS region")
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_temp_creds_no_environment_vars(
+ self, utcnow
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ # Assert region request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1], REGION_URL
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[1][1], SECURITY_CREDS_URL
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[2][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {"Content-Type": "application/json"},
+ )
+
+ # Retrieve subject_token again. Region should not be queried again.
+ new_request = self.make_mock_request(
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ )
+
+ credentials.retrieve_subject_token(new_request)
+
+ # Only 3 requests should be sent as the region is cached.
+ assert len(new_request.call_args_list) == 2
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ new_request.call_args_list[0][1], SECURITY_CREDS_URL
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ new_request.call_args_list[1][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {"Content-Type": "application/json"},
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ @mock.patch.dict(os.environ, {})
+ def test_retrieve_subject_token_success_temp_creds_no_environment_vars_idmsv2(
+ self, utcnow
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ imdsv2_session_token_status=http_client.OK,
+ imdsv2_session_token_data=self.AWS_IMDSV2_SESSION_TOKEN,
+ )
+ credential_source_token_url = self.CREDENTIAL_SOURCE.copy()
+ credential_source_token_url[
+ "imdsv2_session_token_url"
+ ] = IMDSV2_SESSION_TOKEN_URL
+ credentials = self.make_credentials(
+ credential_source=credential_source_token_url
+ )
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ # Assert session token request
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1],
+ IMDSV2_SESSION_TOKEN_URL,
+ {"X-aws-ec2-metadata-token-ttl-seconds": "300"},
+ "PUT",
+ )
+ # Assert region request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[1][1],
+ REGION_URL,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[2][1],
+ SECURITY_CREDS_URL,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[3][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {
+ "Content-Type": "application/json",
+ "X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN,
+ },
+ )
+
+ # Retrieve subject_token again. Region should not be queried again.
+ new_request = self.make_mock_request(
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ imdsv2_session_token_status=http_client.OK,
+ imdsv2_session_token_data=self.AWS_IMDSV2_SESSION_TOKEN,
+ )
+
+ credentials.retrieve_subject_token(new_request)
+
+ # Only 3 requests should be sent as the region is cached.
+ assert len(new_request.call_args_list) == 3
+ # Assert session token request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1],
+ IMDSV2_SESSION_TOKEN_URL,
+ {"X-aws-ec2-metadata-token-ttl-seconds": "300"},
+ "PUT",
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ new_request.call_args_list[1][1],
+ SECURITY_CREDS_URL,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ new_request.call_args_list[2][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {
+ "Content-Type": "application/json",
+ "X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN,
+ },
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ @mock.patch.dict(
+ os.environ,
+ {
+ environment_vars.AWS_REGION: AWS_REGION,
+ environment_vars.AWS_ACCESS_KEY_ID: ACCESS_KEY_ID,
+ },
+ )
+ def test_retrieve_subject_token_success_temp_creds_environment_vars_missing_secret_access_key_idmsv2(
+ self, utcnow
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ imdsv2_session_token_status=http_client.OK,
+ imdsv2_session_token_data=self.AWS_IMDSV2_SESSION_TOKEN,
+ )
+ credential_source_token_url = self.CREDENTIAL_SOURCE.copy()
+ credential_source_token_url[
+ "imdsv2_session_token_url"
+ ] = IMDSV2_SESSION_TOKEN_URL
+ credentials = self.make_credentials(
+ credential_source=credential_source_token_url
+ )
+
+ subject_token = credentials.retrieve_subject_token(request)
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ # Assert session token request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1],
+ IMDSV2_SESSION_TOKEN_URL,
+ {"X-aws-ec2-metadata-token-ttl-seconds": "300"},
+ "PUT",
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[1][1],
+ SECURITY_CREDS_URL,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[2][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {
+ "Content-Type": "application/json",
+ "X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN,
+ },
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ @mock.patch.dict(
+ os.environ,
+ {
+ environment_vars.AWS_REGION: AWS_REGION,
+ environment_vars.AWS_SECRET_ACCESS_KEY: SECRET_ACCESS_KEY,
+ },
+ )
+ def test_retrieve_subject_token_success_temp_creds_environment_vars_missing_access_key_id_idmsv2(
+ self, utcnow
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ imdsv2_session_token_status=http_client.OK,
+ imdsv2_session_token_data=self.AWS_IMDSV2_SESSION_TOKEN,
+ )
+ credential_source_token_url = self.CREDENTIAL_SOURCE.copy()
+ credential_source_token_url[
+ "imdsv2_session_token_url"
+ ] = IMDSV2_SESSION_TOKEN_URL
+ credentials = self.make_credentials(
+ credential_source=credential_source_token_url
+ )
+
+ subject_token = credentials.retrieve_subject_token(request)
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ # Assert session token request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1],
+ IMDSV2_SESSION_TOKEN_URL,
+ {"X-aws-ec2-metadata-token-ttl-seconds": "300"},
+ "PUT",
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[1][1],
+ SECURITY_CREDS_URL,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[2][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {
+ "Content-Type": "application/json",
+ "X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN,
+ },
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ @mock.patch.dict(os.environ, {environment_vars.AWS_REGION: AWS_REGION})
+ def test_retrieve_subject_token_success_temp_creds_environment_vars_missing_creds_idmsv2(
+ self, utcnow
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ imdsv2_session_token_status=http_client.OK,
+ imdsv2_session_token_data=self.AWS_IMDSV2_SESSION_TOKEN,
+ )
+ credential_source_token_url = self.CREDENTIAL_SOURCE.copy()
+ credential_source_token_url[
+ "imdsv2_session_token_url"
+ ] = IMDSV2_SESSION_TOKEN_URL
+ credentials = self.make_credentials(
+ credential_source=credential_source_token_url
+ )
+
+ subject_token = credentials.retrieve_subject_token(request)
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ # Assert session token request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1],
+ IMDSV2_SESSION_TOKEN_URL,
+ {"X-aws-ec2-metadata-token-ttl-seconds": "300"},
+ "PUT",
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[1][1],
+ SECURITY_CREDS_URL,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[2][1],
+ "{}/{}".format(SECURITY_CREDS_URL, self.AWS_ROLE),
+ {
+ "Content-Type": "application/json",
+ "X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN,
+ },
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ @mock.patch.dict(
+ os.environ,
+ {
+ environment_vars.AWS_REGION: AWS_REGION,
+ environment_vars.AWS_ACCESS_KEY_ID: ACCESS_KEY_ID,
+ environment_vars.AWS_SECRET_ACCESS_KEY: SECRET_ACCESS_KEY,
+ },
+ )
+ def test_retrieve_subject_token_success_temp_creds_idmsv2(self, utcnow):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ role_status=http_client.OK, role_name=self.AWS_ROLE
+ )
+ credential_source_token_url = self.CREDENTIAL_SOURCE.copy()
+ credential_source_token_url[
+ "imdsv2_session_token_url"
+ ] = IMDSV2_SESSION_TOKEN_URL
+ credentials = self.make_credentials(
+ credential_source=credential_source_token_url
+ )
+
+ credentials.retrieve_subject_token(request)
+ assert not request.called
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_ipv6(self, utcnow):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ imdsv2_session_token_status=http_client.OK,
+ imdsv2_session_token_data=self.AWS_IMDSV2_SESSION_TOKEN,
+ )
+ credential_source_token_url = self.CREDENTIAL_SOURCE_IPV6.copy()
+ credentials = self.make_credentials(
+ credential_source=credential_source_token_url
+ )
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ # Assert session token request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1],
+ IMDSV2_SESSION_TOKEN_URL_IPV6,
+ {"X-aws-ec2-metadata-token-ttl-seconds": "300"},
+ "PUT",
+ )
+ # Assert region request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[1][1],
+ REGION_URL_IPV6,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert role request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[2][1],
+ SECURITY_CREDS_URL_IPV6,
+ {"X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN},
+ )
+ # Assert security credentials request.
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[3][1],
+ "{}/{}".format(SECURITY_CREDS_URL_IPV6, self.AWS_ROLE),
+ {
+ "Content-Type": "application/json",
+ "X-aws-ec2-metadata-token": self.AWS_IMDSV2_SESSION_TOKEN,
+ },
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_session_error_idmsv2(self, utcnow):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ imdsv2_session_token_status=http_client.UNAUTHORIZED,
+ imdsv2_session_token_data="unauthorized",
+ )
+ credential_source_token_url = self.CREDENTIAL_SOURCE.copy()
+ credential_source_token_url[
+ "imdsv2_session_token_url"
+ ] = IMDSV2_SESSION_TOKEN_URL
+ credentials = self.make_credentials(
+ credential_source=credential_source_token_url
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS Session Token")
+
+ # Assert session token request
+ self.assert_aws_metadata_request_kwargs(
+ request.call_args_list[0][1],
+ IMDSV2_SESSION_TOKEN_URL,
+ {"X-aws-ec2-metadata-token-ttl-seconds": "300"},
+ "PUT",
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_permanent_creds_no_environment_vars(
+ self, utcnow
+ ):
+ # Simualte a permanent credential without a session token is
+ # returned by the security-credentials endpoint.
+ security_creds_response = self.AWS_SECURITY_CREDENTIALS_RESPONSE.copy()
+ security_creds_response.pop("Token")
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=security_creds_response,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {"access_key_id": ACCESS_KEY_ID, "secret_access_key": SECRET_ACCESS_KEY}
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars(self, utcnow, monkeypatch):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ monkeypatch.setenv(environment_vars.AWS_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_with_default_region(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ monkeypatch.setenv(environment_vars.AWS_DEFAULT_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_with_both_regions_set(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ monkeypatch.setenv(environment_vars.AWS_DEFAULT_REGION, "Malformed AWS Region")
+ # This test makes sure that the AWS_REGION gets used over AWS_DEFAULT_REGION,
+ # So, AWS_DEFAULT_REGION is set to something that would cause the test to fail,
+ # And AWS_REGION is set to the a valid value, and it should succeed
+ monkeypatch.setenv(environment_vars.AWS_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_no_session_token(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_REGION, self.AWS_REGION)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {"access_key_id": ACCESS_KEY_ID, "secret_access_key": SECRET_ACCESS_KEY}
+ )
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_retrieve_subject_token_success_environment_vars_except_region(
+ self, utcnow, monkeypatch
+ ):
+ monkeypatch.setenv(environment_vars.AWS_ACCESS_KEY_ID, ACCESS_KEY_ID)
+ monkeypatch.setenv(environment_vars.AWS_SECRET_ACCESS_KEY, SECRET_ACCESS_KEY)
+ monkeypatch.setenv(environment_vars.AWS_SESSION_TOKEN, TOKEN)
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ # Region will be queried since it is not found in envvars.
+ request = self.make_mock_request(
+ region_status=http_client.OK, region_name=self.AWS_REGION
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+
+ def test_retrieve_subject_token_error_determining_aws_region(self):
+ # Simulate error in retrieving the AWS region.
+ request = self.make_mock_request(region_status=http_client.BAD_REQUEST)
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS region")
+
+ def test_retrieve_subject_token_error_determining_aws_role(self):
+ # Simulate error in retrieving the AWS role name.
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.BAD_REQUEST,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS role name")
+
+ def test_retrieve_subject_token_error_determining_security_creds_url(self):
+ # Simulate the security-credentials url is missing. This is needed for
+ # determining the AWS security credentials when not found in envvars.
+ credential_source = self.CREDENTIAL_SOURCE.copy()
+ credential_source.pop("url")
+ request = self.make_mock_request(
+ region_status=http_client.OK, region_name=self.AWS_REGION
+ )
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(
+ r"Unable to determine the AWS metadata server security credentials endpoint"
+ )
+
+ def test_retrieve_subject_token_error_determining_aws_security_creds(self):
+ # Simulate error in retrieving the AWS security credentials.
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.BAD_REQUEST,
+ )
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS security credentials")
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_without_impersonation_ignore_default_scopes(
+ self, utcnow, mock_auth_lib_value
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false source/aws",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": " ".join(SCOPES),
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 4
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes == SCOPES
+ assert credentials.default_scopes == ["ignored"]
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_without_impersonation_use_default_scopes(
+ self, utcnow, mock_auth_lib_value
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false source/aws",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": " ".join(SCOPES),
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 4
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes is None
+ assert credentials.default_scopes == SCOPES
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_with_impersonation_ignore_default_scopes(
+ self, utcnow, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/false source/aws",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "https://www.googleapis.com/auth/iam",
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": QUOTA_PROJECT_ID,
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": SCOPES,
+ "lifetime": "3600s",
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 5
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ # Fifth request should be sent to iamcredentials endpoint for service
+ # account impersonation.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[4][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.token == impersonation_response["accessToken"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes == SCOPES
+ assert credentials.default_scopes == ["ignored"]
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_refresh_success_with_impersonation_use_default_scopes(
+ self, utcnow, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ utcnow.return_value = datetime.datetime.strptime(
+ self.AWS_SIGNATURE_TIME, "%Y-%m-%dT%H:%M:%SZ"
+ )
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ expected_subject_token = self.make_serialized_aws_signed_request(
+ {
+ "access_key_id": ACCESS_KEY_ID,
+ "secret_access_key": SECRET_ACCESS_KEY,
+ "security_token": TOKEN,
+ }
+ )
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/false source/aws",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "https://www.googleapis.com/auth/iam",
+ "subject_token": expected_subject_token,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": QUOTA_PROJECT_ID,
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": SCOPES,
+ "lifetime": "3600s",
+ }
+ request = self.make_mock_request(
+ region_status=http_client.OK,
+ region_name=self.AWS_REGION,
+ role_status=http_client.OK,
+ role_name=self.AWS_ROLE,
+ security_credentials_status=http_client.OK,
+ security_credentials_data=self.AWS_SECURITY_CREDENTIALS_RESPONSE,
+ token_status=http_client.OK,
+ token_data=self.SUCCESS_RESPONSE,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == 5
+ # Fourth request should be sent to GCP STS endpoint.
+ self.assert_token_request_kwargs(
+ request.call_args_list[3][1], token_headers, token_request_data
+ )
+ # Fifth request should be sent to iamcredentials endpoint for service
+ # account impersonation.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[4][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.token == impersonation_response["accessToken"]
+ assert credentials.quota_project_id == QUOTA_PROJECT_ID
+ assert credentials.scopes is None
+ assert credentials.default_scopes == SCOPES
+
+ def test_refresh_with_retrieve_subject_token_error(self):
+ request = self.make_mock_request(region_status=http_client.BAD_REQUEST)
+ credentials = self.make_credentials(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(r"Unable to retrieve AWS region")
diff --git a/contrib/python/google-auth/py3/tests/test_credentials.py b/contrib/python/google-auth/py3/tests/test_credentials.py
new file mode 100644
index 0000000000..99235cda61
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_credentials.py
@@ -0,0 +1,224 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import credentials
+
+
+class CredentialsImpl(credentials.Credentials):
+ def refresh(self, request):
+ self.token = request
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+class CredentialsImplWithMetrics(credentials.Credentials):
+ def refresh(self, request):
+ self.token = request
+
+ def _metric_header_for_usage(self):
+ return "foo"
+
+
+def test_credentials_constructor():
+ credentials = CredentialsImpl()
+ assert not credentials.token
+ assert not credentials.expiry
+ assert not credentials.expired
+ assert not credentials.valid
+ assert credentials.universe_domain == "googleapis.com"
+
+
+def test_expired_and_valid():
+ credentials = CredentialsImpl()
+ credentials.token = "token"
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ # Set the expiration to one second more than now plus the clock skew
+ # accomodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.utcnow()
+ + _helpers.REFRESH_THRESHOLD
+ + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ # Set the credentials expiration to now. Because of the clock skew
+ # accomodation, these credentials should report as expired.
+ credentials.expiry = datetime.datetime.utcnow()
+
+ assert not credentials.valid
+ assert credentials.expired
+
+
+def test_before_request():
+ credentials = CredentialsImpl()
+ request = "token"
+ headers = {}
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "http://example.com", "GET", headers)
+ assert credentials.valid
+ assert credentials.token == "token"
+ assert headers["authorization"] == "Bearer token"
+ assert "x-identity-trust-boundary" not in headers
+
+ request = "token2"
+ headers = {}
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "http://example.com", "GET", headers)
+ assert credentials.valid
+ assert credentials.token == "token"
+ assert headers["authorization"] == "Bearer token"
+ assert "x-identity-trust-boundary" not in headers
+
+
+def test_before_request_with_trust_boundary():
+ DUMMY_BOUNDARY = "00110101"
+ credentials = CredentialsImpl()
+ credentials._trust_boundary = DUMMY_BOUNDARY
+ request = "token"
+ headers = {}
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "http://example.com", "GET", headers)
+ assert credentials.valid
+ assert credentials.token == "token"
+ assert headers["authorization"] == "Bearer token"
+ assert headers["x-identity-trust-boundary"] == DUMMY_BOUNDARY
+
+ request = "token2"
+ headers = {}
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "http://example.com", "GET", headers)
+ assert credentials.valid
+ assert credentials.token == "token"
+ assert headers["authorization"] == "Bearer token"
+ assert headers["x-identity-trust-boundary"] == DUMMY_BOUNDARY
+
+
+def test_before_request_metrics():
+ credentials = CredentialsImplWithMetrics()
+ request = "token"
+ headers = {}
+
+ credentials.before_request(request, "http://example.com", "GET", headers)
+ assert headers["x-goog-api-client"] == "foo"
+
+
+def test_anonymous_credentials_ctor():
+ anon = credentials.AnonymousCredentials()
+ assert anon.token is None
+ assert anon.expiry is None
+ assert not anon.expired
+ assert anon.valid
+
+
+def test_anonymous_credentials_refresh():
+ anon = credentials.AnonymousCredentials()
+ request = object()
+ with pytest.raises(ValueError):
+ anon.refresh(request)
+
+
+def test_anonymous_credentials_apply_default():
+ anon = credentials.AnonymousCredentials()
+ headers = {}
+ anon.apply(headers)
+ assert headers == {}
+ with pytest.raises(ValueError):
+ anon.apply(headers, token="TOKEN")
+
+
+def test_anonymous_credentials_before_request():
+ anon = credentials.AnonymousCredentials()
+ request = object()
+ method = "GET"
+ url = "https://example.com/api/endpoint"
+ headers = {}
+ anon.before_request(request, method, url, headers)
+ assert headers == {}
+
+
+class ReadOnlyScopedCredentialsImpl(credentials.ReadOnlyScoped, CredentialsImpl):
+ @property
+ def requires_scopes(self):
+ return super(ReadOnlyScopedCredentialsImpl, self).requires_scopes
+
+
+def test_readonly_scoped_credentials_constructor():
+ credentials = ReadOnlyScopedCredentialsImpl()
+ assert credentials._scopes is None
+
+
+def test_readonly_scoped_credentials_scopes():
+ credentials = ReadOnlyScopedCredentialsImpl()
+ credentials._scopes = ["one", "two"]
+ assert credentials.scopes == ["one", "two"]
+ assert credentials.has_scopes(["one"])
+ assert credentials.has_scopes(["two"])
+ assert credentials.has_scopes(["one", "two"])
+ assert not credentials.has_scopes(["three"])
+
+
+def test_readonly_scoped_credentials_requires_scopes():
+ credentials = ReadOnlyScopedCredentialsImpl()
+ assert not credentials.requires_scopes
+
+
+class RequiresScopedCredentialsImpl(credentials.Scoped, CredentialsImpl):
+ def __init__(self, scopes=None, default_scopes=None):
+ super(RequiresScopedCredentialsImpl, self).__init__()
+ self._scopes = scopes
+ self._default_scopes = default_scopes
+
+ @property
+ def requires_scopes(self):
+ return not self.scopes
+
+ def with_scopes(self, scopes, default_scopes=None):
+ return RequiresScopedCredentialsImpl(
+ scopes=scopes, default_scopes=default_scopes
+ )
+
+
+def test_create_scoped_if_required_scoped():
+ unscoped_credentials = RequiresScopedCredentialsImpl()
+ scoped_credentials = credentials.with_scopes_if_required(
+ unscoped_credentials, ["one", "two"]
+ )
+
+ assert scoped_credentials is not unscoped_credentials
+ assert not scoped_credentials.requires_scopes
+ assert scoped_credentials.has_scopes(["one", "two"])
+
+
+def test_create_scoped_if_required_not_scopes():
+ unscoped_credentials = CredentialsImpl()
+ scoped_credentials = credentials.with_scopes_if_required(
+ unscoped_credentials, ["one", "two"]
+ )
+
+ assert scoped_credentials is unscoped_credentials
diff --git a/contrib/python/google-auth/py3/tests/test_downscoped.py b/contrib/python/google-auth/py3/tests/test_downscoped.py
new file mode 100644
index 0000000000..b011380bdb
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_downscoped.py
@@ -0,0 +1,696 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import json
+import urllib
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import downscoped
+from google.auth import exceptions
+from google.auth import transport
+
+
+EXPRESSION = (
+ "resource.name.startsWith('projects/_/buckets/example-bucket/objects/customer-a')"
+)
+TITLE = "customer-a-objects"
+DESCRIPTION = (
+ "Condition to make permissions available for objects starting with customer-a"
+)
+AVAILABLE_RESOURCE = "//storage.googleapis.com/projects/_/buckets/example-bucket"
+AVAILABLE_PERMISSIONS = ["inRole:roles/storage.objectViewer"]
+
+OTHER_EXPRESSION = (
+ "resource.name.startsWith('projects/_/buckets/example-bucket/objects/customer-b')"
+)
+OTHER_TITLE = "customer-b-objects"
+OTHER_DESCRIPTION = (
+ "Condition to make permissions available for objects starting with customer-b"
+)
+OTHER_AVAILABLE_RESOURCE = "//storage.googleapis.com/projects/_/buckets/other-bucket"
+OTHER_AVAILABLE_PERMISSIONS = ["inRole:roles/storage.objectCreator"]
+QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+GRANT_TYPE = "urn:ietf:params:oauth:grant-type:token-exchange"
+REQUESTED_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+TOKEN_EXCHANGE_ENDPOINT = "https://sts.googleapis.com/v1/token"
+SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:access_token"
+SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+}
+ERROR_RESPONSE = {
+ "error": "invalid_grant",
+ "error_description": "Subject token is invalid.",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+}
+CREDENTIAL_ACCESS_BOUNDARY_JSON = {
+ "accessBoundary": {
+ "accessBoundaryRules": [
+ {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ "availabilityCondition": {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ },
+ }
+ ]
+ }
+}
+
+
+class SourceCredentials(credentials.Credentials):
+ def __init__(self, raise_error=False, expires_in=3600):
+ super(SourceCredentials, self).__init__()
+ self._counter = 0
+ self._raise_error = raise_error
+ self._expires_in = expires_in
+
+ def refresh(self, request):
+ if self._raise_error:
+ raise exceptions.RefreshError(
+ "Failed to refresh access token in source credentials."
+ )
+ now = _helpers.utcnow()
+ self._counter += 1
+ self.token = "ACCESS_TOKEN_{}".format(self._counter)
+ self.expiry = now + datetime.timedelta(seconds=self._expires_in)
+
+
+def make_availability_condition(expression, title=None, description=None):
+ return downscoped.AvailabilityCondition(expression, title, description)
+
+
+def make_access_boundary_rule(
+ available_resource, available_permissions, availability_condition=None
+):
+ return downscoped.AccessBoundaryRule(
+ available_resource, available_permissions, availability_condition
+ )
+
+
+def make_credential_access_boundary(rules):
+ return downscoped.CredentialAccessBoundary(rules)
+
+
+class TestAvailabilityCondition(object):
+ def test_constructor(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+
+ assert availability_condition.expression == EXPRESSION
+ assert availability_condition.title == TITLE
+ assert availability_condition.description == DESCRIPTION
+
+ def test_constructor_required_params_only(self):
+ availability_condition = make_availability_condition(EXPRESSION)
+
+ assert availability_condition.expression == EXPRESSION
+ assert availability_condition.title is None
+ assert availability_condition.description is None
+
+ def test_setters(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ availability_condition.expression = OTHER_EXPRESSION
+ availability_condition.title = OTHER_TITLE
+ availability_condition.description = OTHER_DESCRIPTION
+
+ assert availability_condition.expression == OTHER_EXPRESSION
+ assert availability_condition.title == OTHER_TITLE
+ assert availability_condition.description == OTHER_DESCRIPTION
+
+ def test_invalid_expression_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_availability_condition([EXPRESSION], TITLE, DESCRIPTION)
+
+ assert excinfo.match("The provided expression is not a string.")
+
+ def test_invalid_title_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_availability_condition(EXPRESSION, False, DESCRIPTION)
+
+ assert excinfo.match("The provided title is not a string or None.")
+
+ def test_invalid_description_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_availability_condition(EXPRESSION, TITLE, False)
+
+ assert excinfo.match("The provided description is not a string or None.")
+
+ def test_to_json_required_params_only(self):
+ availability_condition = make_availability_condition(EXPRESSION)
+
+ assert availability_condition.to_json() == {"expression": EXPRESSION}
+
+ def test_to_json_(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+
+ assert availability_condition.to_json() == {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ }
+
+
+class TestAccessBoundaryRule(object):
+ def test_constructor(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+
+ assert access_boundary_rule.available_resource == AVAILABLE_RESOURCE
+ assert access_boundary_rule.available_permissions == tuple(
+ AVAILABLE_PERMISSIONS
+ )
+ assert access_boundary_rule.availability_condition == availability_condition
+
+ def test_constructor_required_params_only(self):
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS
+ )
+
+ assert access_boundary_rule.available_resource == AVAILABLE_RESOURCE
+ assert access_boundary_rule.available_permissions == tuple(
+ AVAILABLE_PERMISSIONS
+ )
+ assert access_boundary_rule.availability_condition is None
+
+ def test_setters(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ other_availability_condition = make_availability_condition(
+ OTHER_EXPRESSION, OTHER_TITLE, OTHER_DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ access_boundary_rule.available_resource = OTHER_AVAILABLE_RESOURCE
+ access_boundary_rule.available_permissions = OTHER_AVAILABLE_PERMISSIONS
+ access_boundary_rule.availability_condition = other_availability_condition
+
+ assert access_boundary_rule.available_resource == OTHER_AVAILABLE_RESOURCE
+ assert access_boundary_rule.available_permissions == tuple(
+ OTHER_AVAILABLE_PERMISSIONS
+ )
+ assert (
+ access_boundary_rule.availability_condition == other_availability_condition
+ )
+
+ def test_invalid_available_resource_type(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ with pytest.raises(TypeError) as excinfo:
+ make_access_boundary_rule(
+ None, AVAILABLE_PERMISSIONS, availability_condition
+ )
+
+ assert excinfo.match("The provided available_resource is not a string.")
+
+ def test_invalid_available_permissions_type(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ with pytest.raises(TypeError) as excinfo:
+ make_access_boundary_rule(
+ AVAILABLE_RESOURCE, [0, 1, 2], availability_condition
+ )
+
+ assert excinfo.match(
+ "Provided available_permissions are not a list of strings."
+ )
+
+ def test_invalid_available_permissions_value(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ with pytest.raises(ValueError) as excinfo:
+ make_access_boundary_rule(
+ AVAILABLE_RESOURCE,
+ ["roles/storage.objectViewer"],
+ availability_condition,
+ )
+
+ assert excinfo.match("available_permissions must be prefixed with 'inRole:'.")
+
+ def test_invalid_availability_condition_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, {"foo": "bar"}
+ )
+
+ assert excinfo.match(
+ "The provided availability_condition is not a 'google.auth.downscoped.AvailabilityCondition' or None."
+ )
+
+ def test_to_json(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+
+ assert access_boundary_rule.to_json() == {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ "availabilityCondition": {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ },
+ }
+
+ def test_to_json_required_params_only(self):
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS
+ )
+
+ assert access_boundary_rule.to_json() == {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ }
+
+
+class TestCredentialAccessBoundary(object):
+ def test_constructor(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ assert credential_access_boundary.rules == tuple(rules)
+
+ def test_setters(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ other_availability_condition = make_availability_condition(
+ OTHER_EXPRESSION, OTHER_TITLE, OTHER_DESCRIPTION
+ )
+ other_access_boundary_rule = make_access_boundary_rule(
+ OTHER_AVAILABLE_RESOURCE,
+ OTHER_AVAILABLE_PERMISSIONS,
+ other_availability_condition,
+ )
+ other_rules = [other_access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+ credential_access_boundary.rules = other_rules
+
+ assert credential_access_boundary.rules == tuple(other_rules)
+
+ def test_add_rule(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule] * 9
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ # Add one more rule. This should not raise an error.
+ additional_access_boundary_rule = make_access_boundary_rule(
+ OTHER_AVAILABLE_RESOURCE, OTHER_AVAILABLE_PERMISSIONS
+ )
+ credential_access_boundary.add_rule(additional_access_boundary_rule)
+
+ assert len(credential_access_boundary.rules) == 10
+ assert credential_access_boundary.rules[9] == additional_access_boundary_rule
+
+ def test_add_rule_invalid_value(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule] * 10
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ # Add one more rule to exceed maximum allowed rules.
+ with pytest.raises(ValueError) as excinfo:
+ credential_access_boundary.add_rule(access_boundary_rule)
+
+ assert excinfo.match(
+ "Credential access boundary rules can have a maximum of 10 rules."
+ )
+ assert len(credential_access_boundary.rules) == 10
+
+ def test_add_rule_invalid_type(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ # Add an invalid rule to exceed maximum allowed rules.
+ with pytest.raises(TypeError) as excinfo:
+ credential_access_boundary.add_rule("invalid")
+
+ assert excinfo.match(
+ "The provided rule does not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+ assert len(credential_access_boundary.rules) == 1
+ assert credential_access_boundary.rules[0] == access_boundary_rule
+
+ def test_invalid_rules_type(self):
+ with pytest.raises(TypeError) as excinfo:
+ make_credential_access_boundary(["invalid"])
+
+ assert excinfo.match(
+ "List of rules provided do not contain a valid 'google.auth.downscoped.AccessBoundaryRule'."
+ )
+
+ def test_invalid_rules_value(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ too_many_rules = [access_boundary_rule] * 11
+ with pytest.raises(ValueError) as excinfo:
+ make_credential_access_boundary(too_many_rules)
+
+ assert excinfo.match(
+ "Credential access boundary rules can have a maximum of 10 rules."
+ )
+
+ def test_to_json(self):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ assert credential_access_boundary.to_json() == {
+ "accessBoundary": {
+ "accessBoundaryRules": [
+ {
+ "availablePermissions": AVAILABLE_PERMISSIONS,
+ "availableResource": AVAILABLE_RESOURCE,
+ "availabilityCondition": {
+ "expression": EXPRESSION,
+ "title": TITLE,
+ "description": DESCRIPTION,
+ },
+ }
+ ]
+ }
+ }
+
+
+class TestCredentials(object):
+ @staticmethod
+ def make_credentials(source_credentials=SourceCredentials(), quota_project_id=None):
+ availability_condition = make_availability_condition(
+ EXPRESSION, TITLE, DESCRIPTION
+ )
+ access_boundary_rule = make_access_boundary_rule(
+ AVAILABLE_RESOURCE, AVAILABLE_PERMISSIONS, availability_condition
+ )
+ rules = [access_boundary_rule]
+ credential_access_boundary = make_credential_access_boundary(rules)
+
+ return downscoped.Credentials(
+ source_credentials, credential_access_boundary, quota_project_id
+ )
+
+ @staticmethod
+ def make_mock_request(data, status=http_client.OK):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+
+ return request
+
+ @staticmethod
+ def assert_request_kwargs(request_kwargs, headers, request_data):
+ """Asserts the request was called with the expected parameters.
+ """
+ assert request_kwargs["url"] == TOKEN_EXCHANGE_ENDPOINT
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+ assert len(body_tuples) == len(request_data.keys())
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+
+ # No token acquired yet.
+ assert not credentials.token
+ assert not credentials.valid
+ # Expiration hasn't been set yet.
+ assert not credentials.expiry
+ assert not credentials.expired
+ # No quota project ID set.
+ assert not credentials.quota_project_id
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.quota_project_id
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds.quota_project_id == "project-foo"
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh(self, unused_utcnow):
+ response = SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = 2800
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=response["expires_in"]
+ )
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": GRANT_TYPE,
+ "subject_token": "ACCESS_TOKEN_1",
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "requested_token_type": REQUESTED_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(CREDENTIAL_ACCESS_BOUNDARY_JSON)),
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ source_credentials = SourceCredentials()
+ credentials = self.make_credentials(source_credentials=source_credentials)
+
+ # Spy on calls to source credentials refresh to confirm the expected request
+ # instance is used.
+ with mock.patch.object(
+ source_credentials, "refresh", wraps=source_credentials.refresh
+ ) as wrapped_souce_cred_refresh:
+ credentials.refresh(request)
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+ # Confirm source credentials called with the same request instance.
+ wrapped_souce_cred_refresh.assert_called_with(request)
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_without_response_expires_in(self, unused_utcnow):
+ response = SUCCESS_RESPONSE.copy()
+ # Simulate the response is missing the expires_in field.
+ # The downscoped token expiration should match the source credentials
+ # expiration.
+ del response["expires_in"]
+ expected_expires_in = 1800
+ # Simulate the source credentials generates a token with 1800 second
+ # expiration time. The generated downscoped token should have the same
+ # expiration time.
+ source_credentials = SourceCredentials(expires_in=expected_expires_in)
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=expected_expires_in
+ )
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ request_data = {
+ "grant_type": GRANT_TYPE,
+ "subject_token": "ACCESS_TOKEN_1",
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "requested_token_type": REQUESTED_TOKEN_TYPE,
+ "options": urllib.parse.quote(json.dumps(CREDENTIAL_ACCESS_BOUNDARY_JSON)),
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ credentials = self.make_credentials(source_credentials=source_credentials)
+
+ # Spy on calls to source credentials refresh to confirm the expected request
+ # instance is used.
+ with mock.patch.object(
+ source_credentials, "refresh", wraps=source_credentials.refresh
+ ) as wrapped_souce_cred_refresh:
+ credentials.refresh(request)
+
+ self.assert_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+ # Confirm source credentials called with the same request instance.
+ wrapped_souce_cred_refresh.assert_called_with(request)
+
+ def test_refresh_token_exchange_error(self):
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=ERROR_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(
+ r"Error code invalid_grant: Subject token is invalid. - https://tools.ietf.org/html/rfc6749"
+ )
+ assert not credentials.expired
+ assert credentials.token is None
+
+ def test_refresh_source_credentials_refresh_error(self):
+ # Initialize downscoped credentials with source credentials that raise
+ # an error on refresh.
+ credentials = self.make_credentials(
+ source_credentials=SourceCredentials(raise_error=True)
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(mock.sentinel.request)
+
+ assert excinfo.match(r"Failed to refresh access token in source credentials.")
+ assert not credentials.expired
+ assert credentials.token is None
+
+ def test_apply_without_quota_project_id(self):
+ headers = {}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials()
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"])
+ }
+
+ def test_apply_with_quota_project_id(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials(quota_project_id=QUOTA_PROJECT_ID)
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": QUOTA_PROJECT_ID,
+ }
+
+ def test_before_request(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials()
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"]),
+ }
+
+ # Second call shouldn't call refresh (request should be untouched).
+ credentials.before_request(
+ mock.sentinel.request, "POST", "https://example.com/api", headers
+ )
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"]),
+ }
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_before_request_expired(self, utcnow):
+ headers = {}
+ request = self.make_mock_request(status=http_client.OK, data=SUCCESS_RESPONSE)
+ credentials = self.make_credentials()
+ credentials.token = "token"
+ utcnow.return_value = datetime.datetime.min
+ # Set the expiration to one second more than now plus the clock skew
+ # accommodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.min
+ + _helpers.REFRESH_THRESHOLD
+ + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # Cached token should be used.
+ assert headers == {"authorization": "Bearer token"}
+
+ # Next call should simulate 1 second passed.
+ utcnow.return_value = datetime.datetime.min + datetime.timedelta(seconds=1)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # New token should be retrieved.
+ assert headers == {
+ "authorization": "Bearer {}".format(SUCCESS_RESPONSE["access_token"])
+ }
diff --git a/contrib/python/google-auth/py3/tests/test_exceptions.py b/contrib/python/google-auth/py3/tests/test_exceptions.py
new file mode 100644
index 0000000000..6f542498fc
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_exceptions.py
@@ -0,0 +1,55 @@
+# Copyright 2022 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest # type: ignore
+
+from google.auth import exceptions # type:ignore
+
+
+@pytest.fixture(
+ params=[
+ exceptions.GoogleAuthError,
+ exceptions.TransportError,
+ exceptions.RefreshError,
+ exceptions.UserAccessTokenError,
+ exceptions.DefaultCredentialsError,
+ exceptions.MutualTLSChannelError,
+ exceptions.OAuthError,
+ exceptions.ReauthFailError,
+ exceptions.ReauthSamlChallengeFailError,
+ ]
+)
+def retryable_exception(request):
+ return request.param
+
+
+@pytest.fixture(params=[exceptions.ClientCertError])
+def non_retryable_exception(request):
+ return request.param
+
+
+def test_default_retryable_exceptions(retryable_exception):
+ assert not retryable_exception().retryable
+
+
+@pytest.mark.parametrize("retryable", [True, False])
+def test_retryable_exceptions(retryable_exception, retryable):
+ retryable_exception = retryable_exception(retryable=retryable)
+ assert retryable_exception.retryable == retryable
+
+
+@pytest.mark.parametrize("retryable", [True, False])
+def test_non_retryable_exceptions(non_retryable_exception, retryable):
+ non_retryable_exception = non_retryable_exception(retryable=retryable)
+ assert not non_retryable_exception.retryable
diff --git a/contrib/python/google-auth/py3/tests/test_external_account.py b/contrib/python/google-auth/py3/tests/test_external_account.py
new file mode 100644
index 0000000000..0b165bc70b
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_external_account.py
@@ -0,0 +1,1900 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import json
+import urllib
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import external_account
+from google.auth import transport
+
+
+IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
+)
+LANG_LIBRARY_METRICS_HEADER_VALUE = "gl-python/3.7 auth/1.1"
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password"
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+# List of valid workforce pool audiences.
+TEST_USER_AUDIENCES = [
+ "//iam.googleapis.com/locations/global/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/eu/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/eu/workforcePools/workloadIdentityPools/providers/provider-id",
+]
+# Workload identity pool audiences or invalid workforce pool audiences.
+TEST_NON_USER_AUDIENCES = [
+ # Legacy K8s audience format.
+ "identitynamespace:1f12345:my_provider",
+ (
+ "//iam.googleapis.com/projects/123456/locations/"
+ "global/workloadIdentityPools/pool-id/providers/"
+ "provider-id"
+ ),
+ (
+ "//iam.googleapis.com/projects/123456/locations/"
+ "eu/workloadIdentityPools/pool-id/providers/"
+ "provider-id"
+ ),
+ # Pool ID with workforcePools string.
+ (
+ "//iam.googleapis.com/projects/123456/locations/"
+ "global/workloadIdentityPools/workforcePools/providers/"
+ "provider-id"
+ ),
+ # Unrealistic / incorrect workforce pool audiences.
+ "//iamgoogleapis.com/locations/eu/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapiscom/locations/eu/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/workforcePools/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations/eu/workforcePool/pool-id/providers/provider-id",
+ "//iam.googleapis.com/locations//workforcePool/pool-id/providers/provider-id",
+]
+
+
+class CredentialsImpl(external_account.Credentials):
+ def __init__(self, **kwargs):
+ super(CredentialsImpl, self).__init__(**kwargs)
+ self._counter = 0
+
+ def retrieve_subject_token(self, request):
+ counter = self._counter
+ self._counter += 1
+ return "subject_token_{}".format(counter)
+
+
+class TestCredentials(object):
+ TOKEN_URL = "https://sts.googleapis.com/v1/token"
+ TOKEN_INFO_URL = "https://sts.googleapis.com/v1/introspect"
+ PROJECT_NUMBER = "123456"
+ POOL_ID = "POOL_ID"
+ PROVIDER_ID = "PROVIDER_ID"
+ AUDIENCE = (
+ "//iam.googleapis.com/projects/{}"
+ "/locations/global/workloadIdentityPools/{}"
+ "/providers/{}"
+ ).format(PROJECT_NUMBER, POOL_ID, PROVIDER_ID)
+ WORKFORCE_AUDIENCE = (
+ "//iam.googleapis.com/locations/global/workforcePools/{}/providers/{}"
+ ).format(POOL_ID, PROVIDER_ID)
+ WORKFORCE_POOL_USER_PROJECT = "WORKFORCE_POOL_USER_PROJECT_NUMBER"
+ SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+ WORKFORCE_SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:id_token"
+ CREDENTIAL_SOURCE = {"file": "/var/run/secrets/goog.id/token"}
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": "scope1 scope2",
+ }
+ ERROR_RESPONSE = {
+ "error": "invalid_request",
+ "error_description": "Invalid subject token",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+ }
+ QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+ SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ "https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
+ )
+ SCOPES = ["scope1", "scope2"]
+ IMPERSONATION_ERROR_RESPONSE = {
+ "error": {
+ "code": 400,
+ "message": "Request contains an invalid argument",
+ "status": "INVALID_ARGUMENT",
+ }
+ }
+ PROJECT_ID = "my-proj-id"
+ CLOUD_RESOURCE_MANAGER_URL = (
+ "https://cloudresourcemanager.googleapis.com/v1/projects/"
+ )
+ CLOUD_RESOURCE_MANAGER_SUCCESS_RESPONSE = {
+ "projectNumber": PROJECT_NUMBER,
+ "projectId": PROJECT_ID,
+ "lifecycleState": "ACTIVE",
+ "name": "project-name",
+ "createTime": "2018-11-06T04:42:54.109Z",
+ "parent": {"type": "folder", "id": "12345678901"},
+ }
+
+ @classmethod
+ def make_credentials(
+ cls,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ token_info_url=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ universe_domain=external_account._DEFAULT_UNIVERSE_DOMAIN,
+ ):
+ return CredentialsImpl(
+ audience=cls.AUDIENCE,
+ subject_token_type=cls.SUBJECT_TOKEN_TYPE,
+ token_url=cls.TOKEN_URL,
+ token_info_url=token_info_url,
+ service_account_impersonation_url=service_account_impersonation_url,
+ service_account_impersonation_options=service_account_impersonation_options,
+ credential_source=cls.CREDENTIAL_SOURCE,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ universe_domain=universe_domain,
+ )
+
+ @classmethod
+ def make_workforce_pool_credentials(
+ cls,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ workforce_pool_user_project=None,
+ ):
+ return CredentialsImpl(
+ audience=cls.WORKFORCE_AUDIENCE,
+ subject_token_type=cls.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=cls.TOKEN_URL,
+ service_account_impersonation_url=service_account_impersonation_url,
+ credential_source=cls.CREDENTIAL_SOURCE,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ workforce_pool_user_project=workforce_pool_user_project,
+ )
+
+ @classmethod
+ def make_mock_request(
+ cls,
+ status=http_client.OK,
+ data=None,
+ impersonation_status=None,
+ impersonation_data=None,
+ cloud_resource_manager_status=None,
+ cloud_resource_manager_data=None,
+ ):
+ # STS token exchange request.
+ token_response = mock.create_autospec(transport.Response, instance=True)
+ token_response.status = status
+ token_response.data = json.dumps(data).encode("utf-8")
+ responses = [token_response]
+
+ # If service account impersonation is requested, mock the expected response.
+ if impersonation_status:
+ impersonation_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ impersonation_response.status = impersonation_status
+ impersonation_response.data = json.dumps(impersonation_data).encode("utf-8")
+ responses.append(impersonation_response)
+
+ # If cloud resource manager is requested, mock the expected response.
+ if cloud_resource_manager_status:
+ cloud_resource_manager_response = mock.create_autospec(
+ transport.Response, instance=True
+ )
+ cloud_resource_manager_response.status = cloud_resource_manager_status
+ cloud_resource_manager_response.data = json.dumps(
+ cloud_resource_manager_data
+ ).encode("utf-8")
+ responses.append(cloud_resource_manager_response)
+
+ request = mock.create_autospec(transport.Request)
+ request.side_effect = responses
+
+ return request
+
+ @classmethod
+ def assert_token_request_kwargs(cls, request_kwargs, headers, request_data):
+ assert request_kwargs["url"] == cls.TOKEN_URL
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+ assert len(body_tuples) == len(request_data.keys())
+
+ @classmethod
+ def assert_impersonation_request_kwargs(cls, request_kwargs, headers, request_data):
+ assert request_kwargs["url"] == cls.SERVICE_ACCOUNT_IMPERSONATION_URL
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_json = json.loads(request_kwargs["body"].decode("utf-8"))
+ assert body_json == request_data
+
+ @classmethod
+ def assert_resource_manager_request_kwargs(
+ cls, request_kwargs, project_number, headers
+ ):
+ assert request_kwargs["url"] == cls.CLOUD_RESOURCE_MANAGER_URL + project_number
+ assert request_kwargs["method"] == "GET"
+ assert request_kwargs["headers"] == headers
+ assert "body" not in request_kwargs
+
+ def test_default_state(self):
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL
+ )
+
+ # Token url and service account impersonation url should be set
+ assert credentials._token_url
+ assert credentials._service_account_impersonation_url
+ # Not token acquired yet
+ assert not credentials.token
+ assert not credentials.valid
+ # Expiration hasn't been set yet
+ assert not credentials.expiry
+ assert not credentials.expired
+ # Scopes are required
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+ assert not credentials.quota_project_id
+ # Token info url not set yet
+ assert not credentials.token_info_url
+
+ def test_nonworkforce_with_workforce_pool_user_project(self):
+ with pytest.raises(ValueError) as excinfo:
+ CredentialsImpl(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ assert excinfo.match(
+ "workforce_pool_user_project should not be set for non-workforce "
+ "pool credentials"
+ )
+
+ def test_with_scopes(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(["email"])
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_scopes_workforce_pool(self):
+ credentials = self.make_workforce_pool_credentials(
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(["email"])
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.requires_scopes
+ assert (
+ scoped_credentials.info.get("workforce_pool_user_project")
+ == self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ def test_with_scopes_using_user_and_default_scopes(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(
+ ["email"], default_scopes=["profile"]
+ )
+
+ assert scoped_credentials.has_scopes(["email"])
+ assert not scoped_credentials.has_scopes(["profile"])
+ assert not scoped_credentials.requires_scopes
+ assert scoped_credentials.scopes == ["email"]
+ assert scoped_credentials.default_scopes == ["profile"]
+
+ def test_with_scopes_using_default_scopes_only(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert credentials.requires_scopes
+
+ scoped_credentials = credentials.with_scopes(None, default_scopes=["profile"])
+
+ assert scoped_credentials.has_scopes(["profile"])
+ assert not scoped_credentials.requires_scopes
+
+ def test_with_scopes_full_options_propagated(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ scopes=self.SCOPES,
+ token_info_url=self.TOKEN_INFO_URL,
+ default_scopes=["default1"],
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ )
+
+ with mock.patch.object(
+ external_account.Credentials, "__init__", return_value=None
+ ) as mock_init:
+ credentials.with_scopes(["email"], ["default2"])
+
+ # Confirm with_scopes initialized the credential with the expected
+ # parameters and scopes.
+ mock_init.assert_called_once_with(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ token_info_url=self.TOKEN_INFO_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ scopes=["email"],
+ default_scopes=["default2"],
+ universe_domain=external_account._DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ def test_with_token_uri(self):
+ credentials = self.make_credentials()
+ new_token_uri = "https://eu-sts.googleapis.com/v1/token"
+
+ assert credentials._token_url == self.TOKEN_URL
+
+ creds_with_new_token_uri = credentials.with_token_uri(new_token_uri)
+
+ assert creds_with_new_token_uri._token_url == new_token_uri
+
+ def test_with_token_uri_workforce_pool(self):
+ credentials = self.make_workforce_pool_credentials(
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ new_token_uri = "https://eu-sts.googleapis.com/v1/token"
+
+ assert credentials._token_url == self.TOKEN_URL
+
+ creds_with_new_token_uri = credentials.with_token_uri(new_token_uri)
+
+ assert creds_with_new_token_uri._token_url == new_token_uri
+ assert (
+ creds_with_new_token_uri.info.get("workforce_pool_user_project")
+ == self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+
+ assert not credentials.scopes
+ assert not credentials.quota_project_id
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds.quota_project_id == "project-foo"
+
+ def test_with_quota_project_workforce_pool(self):
+ credentials = self.make_workforce_pool_credentials(
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ assert not credentials.scopes
+ assert not credentials.quota_project_id
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ assert quota_project_creds.quota_project_id == "project-foo"
+ assert (
+ quota_project_creds.info.get("workforce_pool_user_project")
+ == self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ def test_with_quota_project_full_options_propagated(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ token_info_url=self.TOKEN_INFO_URL,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ scopes=self.SCOPES,
+ default_scopes=["default1"],
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ )
+
+ with mock.patch.object(
+ external_account.Credentials, "__init__", return_value=None
+ ) as mock_init:
+ credentials.with_quota_project("project-foo")
+
+ # Confirm with_quota_project initialized the credential with the
+ # expected parameters and quota project ID.
+ mock_init.assert_called_once_with(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ token_info_url=self.TOKEN_INFO_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id="project-foo",
+ scopes=self.SCOPES,
+ default_scopes=["default1"],
+ universe_domain=external_account._DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ def test_with_invalid_impersonation_target_principal(self):
+ invalid_url = "https://iamcredentials.googleapis.com/v1/invalid"
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ self.make_credentials(service_account_impersonation_url=invalid_url)
+
+ assert excinfo.match(
+ r"Unable to determine target principal from service account impersonation URL."
+ )
+
+ def test_info(self):
+ credentials = self.make_credentials(universe_domain="dummy_universe.com")
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": self.AUDIENCE,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "token_url": self.TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE.copy(),
+ "universe_domain": "dummy_universe.com",
+ }
+
+ def test_universe_domain(self):
+ credentials = self.make_credentials(universe_domain="dummy_universe.com")
+ assert credentials.universe_domain == "dummy_universe.com"
+
+ credentials = self.make_credentials()
+ assert credentials.universe_domain == external_account._DEFAULT_UNIVERSE_DOMAIN
+
+ def test_info_workforce_pool(self):
+ credentials = self.make_workforce_pool_credentials(
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": self.WORKFORCE_AUDIENCE,
+ "subject_token_type": self.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ "token_url": self.TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE.copy(),
+ "workforce_pool_user_project": self.WORKFORCE_POOL_USER_PROJECT,
+ "universe_domain": external_account._DEFAULT_UNIVERSE_DOMAIN,
+ }
+
+ def test_info_with_full_options(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ token_info_url=self.TOKEN_INFO_URL,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": self.AUDIENCE,
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "token_url": self.TOKEN_URL,
+ "token_info_url": self.TOKEN_INFO_URL,
+ "service_account_impersonation_url": self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "service_account_impersonation": {"token_lifetime_seconds": 2800},
+ "credential_source": self.CREDENTIAL_SOURCE.copy(),
+ "quota_project_id": self.QUOTA_PROJECT_ID,
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "universe_domain": external_account._DEFAULT_UNIVERSE_DOMAIN,
+ }
+
+ def test_service_account_email_without_impersonation(self):
+ credentials = self.make_credentials()
+
+ assert credentials.service_account_email is None
+
+ def test_service_account_email_with_impersonation(self):
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL
+ )
+
+ assert credentials.service_account_email == SERVICE_ACCOUNT_EMAIL
+
+ @pytest.mark.parametrize("audience", TEST_NON_USER_AUDIENCES)
+ def test_is_user_with_non_users(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.is_user is False
+
+ @pytest.mark.parametrize("audience", TEST_USER_AUDIENCES)
+ def test_is_user_with_users(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.is_user is True
+
+ @pytest.mark.parametrize("audience", TEST_USER_AUDIENCES)
+ def test_is_user_with_users_and_impersonation(self, audience):
+ # Initialize the credentials with service account impersonation.
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ )
+
+ # Even though the audience is for a workforce pool, since service account
+ # impersonation is used, the credentials will represent a service account and
+ # not a user.
+ assert credentials.is_user is False
+
+ @pytest.mark.parametrize("audience", TEST_NON_USER_AUDIENCES)
+ def test_is_workforce_pool_with_non_users(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.is_workforce_pool is False
+
+ @pytest.mark.parametrize("audience", TEST_USER_AUDIENCES)
+ def test_is_workforce_pool_with_users(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.is_workforce_pool is True
+
+ @pytest.mark.parametrize("audience", TEST_USER_AUDIENCES)
+ def test_is_workforce_pool_with_users_and_impersonation(self, audience):
+ # Initialize the credentials with workforce audience and service account
+ # impersonation.
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ )
+
+ # Even though impersonation is used, is_workforce_pool should still return True.
+ assert credentials.is_workforce_pool is True
+
+ @pytest.mark.parametrize("mock_expires_in", [2800, "2800"])
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_without_client_auth_success(
+ self, unused_utcnow, mock_auth_lib_value, mock_expires_in
+ ):
+ response = self.SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = mock_expires_in
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=int(mock_expires_in)
+ )
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ credentials = self.make_credentials()
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_workforce_without_client_auth_success(
+ self, unused_utcnow, test_auth_lib_value
+ ):
+ response = self.SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = 2800
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=response["expires_in"]
+ )
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.WORKFORCE_AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ "options": urllib.parse.quote(
+ json.dumps({"userProject": self.WORKFORCE_POOL_USER_PROJECT})
+ ),
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ credentials = self.make_workforce_pool_credentials(
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_workforce_with_client_auth_success(
+ self, unused_utcnow, mock_auth_lib_value
+ ):
+ response = self.SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = 2800
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=response["expires_in"]
+ )
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.WORKFORCE_AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ # Client Auth will have higher priority over workforce_pool_user_project.
+ credentials = self.make_workforce_pool_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ @mock.patch("google.auth._helpers.utcnow", return_value=datetime.datetime.min)
+ def test_refresh_workforce_with_client_auth_and_no_workforce_project_success(
+ self, unused_utcnow, mock_lib_version_value
+ ):
+ response = self.SUCCESS_RESPONSE.copy()
+ # Test custom expiration to confirm expiry is set correctly.
+ response["expires_in"] = 2800
+ expected_expiry = datetime.datetime.min + datetime.timedelta(
+ seconds=response["expires_in"]
+ )
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.WORKFORCE_AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(status=http_client.OK, data=response)
+ # Client Auth will be sufficient for user project determination.
+ credentials = self.make_workforce_pool_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ workforce_pool_user_project=None,
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == response["access_token"]
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_impersonation_without_client_auth_success(
+ self, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/false",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_workforce_impersonation_without_client_auth_success(
+ self, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/false",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.WORKFORCE_AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ "options": urllib.parse.quote(
+ json.dumps({"userProject": self.WORKFORCE_POOL_USER_PROJECT})
+ ),
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_workforce_pool_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_without_client_auth_success_explicit_user_scopes_ignore_default_scopes(
+ self, mock_auth_lib_value
+ ):
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "scope1 scope2",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(
+ scopes=["scope1", "scope2"],
+ # Default scopes will be ignored in favor of user scopes.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert not credentials.expired
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.has_scopes(["scope1", "scope2"])
+ assert not credentials.has_scopes(["ignored"])
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_without_client_auth_success_explicit_default_scopes_only(
+ self, mock_auth_lib_value
+ ):
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": "scope1 scope2",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(
+ scopes=None,
+ # Default scopes will be used since user scopes are none.
+ default_scopes=["scope1", "scope2"],
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert not credentials.expired
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ assert credentials.has_scopes(["scope1", "scope2"])
+
+ def test_refresh_without_client_auth_error(self):
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST, data=self.ERROR_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+ assert not credentials.expired
+ assert credentials.token is None
+
+ def test_refresh_impersonation_without_client_auth_error(self):
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE,
+ impersonation_status=http_client.BAD_REQUEST,
+ impersonation_data=self.IMPERSONATION_ERROR_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(r"Unable to acquire impersonated credentials")
+ assert not credentials.expired
+ assert credentials.token is None
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_with_client_auth_success(self, mock_auth_lib_value):
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ }
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID, client_secret=CLIENT_SECRET
+ )
+
+ credentials.refresh(request)
+
+ self.assert_token_request_kwargs(request.call_args[1], headers, request_data)
+ assert credentials.valid
+ assert not credentials.expired
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_impersonation_with_client_auth_success_ignore_default_scopes(
+ self, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/false",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ # Default scopes will be ignored since user scopes are specified.
+ default_scopes=["ignored"],
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_impersonation_with_client_auth_success_use_default_scopes(
+ self, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic {}".format(BASIC_AUTH_ENCODING),
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/false",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=None,
+ # Default scopes will be used since user specified scopes are none.
+ default_scopes=self.SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ def test_apply_without_quota_project_id(self):
+ headers = {}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ def test_apply_workforce_without_quota_project_id(self):
+ headers = {}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_workforce_pool_credentials(
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ def test_apply_impersonation_without_quota_project_id(self):
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ )
+ headers = {}
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ def test_apply_with_quota_project_id(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials(quota_project_id=self.QUOTA_PROJECT_ID)
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ "x-identity-trust-boundary": "0",
+ }
+
+ def test_apply_impersonation_with_quota_project_id(self):
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ )
+ headers = {"other": "header-value"}
+
+ credentials.refresh(request)
+ credentials.apply(headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ "x-identity-trust-boundary": "0",
+ }
+
+ def test_before_request(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials()
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ def test_before_request_workforce(self):
+ headers = {"other": "header-value"}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_workforce_pool_credentials(
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT
+ )
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ def test_before_request_impersonation(self):
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ headers = {"other": "header-value"}
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL
+ )
+
+ # First call should call refresh, setting the token.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ # Second call shouldn't call refresh.
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ assert headers == {
+ "other": "header-value",
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_before_request_expired(self, utcnow):
+ headers = {}
+ request = self.make_mock_request(
+ status=http_client.OK, data=self.SUCCESS_RESPONSE
+ )
+ credentials = self.make_credentials()
+ credentials.token = "token"
+ utcnow.return_value = datetime.datetime.min
+ # Set the expiration to one second more than now plus the clock skew
+ # accomodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.min
+ + _helpers.REFRESH_THRESHOLD
+ + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # Cached token should be used.
+ assert headers == {
+ "authorization": "Bearer token",
+ "x-identity-trust-boundary": "0",
+ }
+
+ # Next call should simulate 1 second passed.
+ utcnow.return_value = datetime.datetime.min + datetime.timedelta(seconds=1)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # New token should be retrieved.
+ assert headers == {
+ "authorization": "Bearer {}".format(self.SUCCESS_RESPONSE["access_token"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ @mock.patch("google.auth._helpers.utcnow")
+ def test_before_request_impersonation_expired(self, utcnow):
+ headers = {}
+ expire_time = (
+ datetime.datetime.min + datetime.timedelta(seconds=3601)
+ ).isoformat("T") + "Z"
+ # Service account impersonation response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL
+ )
+ credentials.token = "token"
+ utcnow.return_value = datetime.datetime.min
+ # Set the expiration to one second more than now plus the clock skew
+ # accomodation. These credentials should be valid.
+ credentials.expiry = (
+ datetime.datetime.min
+ + _helpers.REFRESH_THRESHOLD
+ + datetime.timedelta(seconds=1)
+ )
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # Cached token should be used.
+ assert headers == {
+ "authorization": "Bearer token",
+ "x-identity-trust-boundary": "0",
+ }
+
+ # Next call should simulate 1 second passed. This will trigger the expiration
+ # threshold.
+ utcnow.return_value = datetime.datetime.min + datetime.timedelta(seconds=1)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ credentials.before_request(request, "POST", "https://example.com/api", headers)
+
+ # New token should be retrieved.
+ assert headers == {
+ "authorization": "Bearer {}".format(impersonation_response["accessToken"]),
+ "x-identity-trust-boundary": "0",
+ }
+
+ @pytest.mark.parametrize(
+ "audience",
+ [
+ # Legacy K8s audience format.
+ "identitynamespace:1f12345:my_provider",
+ # Unrealistic audiences.
+ "//iam.googleapis.com/projects",
+ "//iam.googleapis.com/projects/",
+ "//iam.googleapis.com/project/123456",
+ "//iam.googleapis.com/projects//123456",
+ "//iam.googleapis.com/prefix_projects/123456",
+ "//iam.googleapis.com/projects_suffix/123456",
+ ],
+ )
+ def test_project_number_indeterminable(self, audience):
+ credentials = CredentialsImpl(
+ audience=audience,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.project_number is None
+ assert credentials.get_project_id(None) is None
+
+ def test_project_number_determinable(self):
+ credentials = CredentialsImpl(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.project_number == self.PROJECT_NUMBER
+
+ def test_project_number_workforce(self):
+ credentials = CredentialsImpl(
+ audience=self.WORKFORCE_AUDIENCE,
+ subject_token_type=self.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ assert credentials.project_number is None
+
+ def test_project_id_without_scopes(self):
+ # Initialize credentials with no scopes.
+ credentials = CredentialsImpl(
+ audience=self.AUDIENCE,
+ subject_token_type=self.SUBJECT_TOKEN_TYPE,
+ token_url=self.TOKEN_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ )
+
+ assert credentials.get_project_id(None) is None
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_get_project_id_cloud_resource_manager_success(
+ self, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/false",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "3600s",
+ }
+ # Initialize mock request to handle token exchange, service account
+ # impersonation and cloud resource manager request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ cloud_resource_manager_status=http_client.OK,
+ cloud_resource_manager_data=self.CLOUD_RESOURCE_MANAGER_SUCCESS_RESPONSE,
+ )
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=self.SCOPES,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ )
+
+ # Expected project ID from cloud resource manager response should be returned.
+ project_id = credentials.get_project_id(request)
+
+ assert project_id == self.PROJECT_ID
+ # 3 requests should be processed.
+ assert len(request.call_args_list) == 3
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ # In the process of getting project ID, an access token should be
+ # retrieved.
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+ # Verify cloud resource manager request parameters.
+ self.assert_resource_manager_request_kwargs(
+ request.call_args_list[2][1],
+ self.PROJECT_NUMBER,
+ {
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ "authorization": "Bearer {}".format(
+ impersonation_response["accessToken"]
+ ),
+ "x-identity-trust-boundary": "0",
+ },
+ )
+
+ # Calling get_project_id again should return the cached project_id.
+ project_id = credentials.get_project_id(request)
+
+ assert project_id == self.PROJECT_ID
+ # No additional requests.
+ assert len(request.call_args_list) == 3
+
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_workforce_pool_get_project_id_cloud_resource_manager_success(
+ self, mock_auth_lib_value
+ ):
+ # STS token exchange request/response.
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/false config-lifetime/false",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.WORKFORCE_AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.WORKFORCE_SUBJECT_TOKEN_TYPE,
+ "scope": "scope1 scope2",
+ "options": urllib.parse.quote(
+ json.dumps({"userProject": self.WORKFORCE_POOL_USER_PROJECT})
+ ),
+ }
+ # Initialize mock request to handle token exchange and cloud resource
+ # manager request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ cloud_resource_manager_status=http_client.OK,
+ cloud_resource_manager_data=self.CLOUD_RESOURCE_MANAGER_SUCCESS_RESPONSE,
+ )
+ credentials = self.make_workforce_pool_credentials(
+ scopes=self.SCOPES,
+ quota_project_id=self.QUOTA_PROJECT_ID,
+ workforce_pool_user_project=self.WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ # Expected project ID from cloud resource manager response should be returned.
+ project_id = credentials.get_project_id(request)
+
+ assert project_id == self.PROJECT_ID
+ # 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # In the process of getting project ID, an access token should be
+ # retrieved.
+ assert credentials.valid
+ assert not credentials.expired
+ assert credentials.token == self.SUCCESS_RESPONSE["access_token"]
+ # Verify cloud resource manager request parameters.
+ self.assert_resource_manager_request_kwargs(
+ request.call_args_list[1][1],
+ self.WORKFORCE_POOL_USER_PROJECT,
+ {
+ "x-goog-user-project": self.QUOTA_PROJECT_ID,
+ "authorization": "Bearer {}".format(
+ self.SUCCESS_RESPONSE["access_token"]
+ ),
+ "x-identity-trust-boundary": "0",
+ },
+ )
+
+ # Calling get_project_id again should return the cached project_id.
+ project_id = credentials.get_project_id(request)
+
+ assert project_id == self.PROJECT_ID
+ # No additional requests.
+ assert len(request.call_args_list) == 2
+
+ @mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ )
+ @mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value=LANG_LIBRARY_METRICS_HEADER_VALUE,
+ )
+ def test_refresh_impersonation_with_lifetime(
+ self, mock_metrics_header_value, mock_auth_lib_value
+ ):
+ # Simulate service account access token expires in 2800 seconds.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=2800)
+ ).isoformat("T") + "Z"
+ expected_expiry = datetime.datetime.strptime(expire_time, "%Y-%m-%dT%H:%M:%SZ")
+ # STS token exchange request/response.
+ token_response = self.SUCCESS_RESPONSE.copy()
+ token_headers = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "x-goog-api-client": "gl-python/3.7 auth/1.1 google-byoid-sdk sa-impersonation/true config-lifetime/true",
+ }
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": self.AUDIENCE,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "subject_token": "subject_token_0",
+ "subject_token_type": self.SUBJECT_TOKEN_TYPE,
+ "scope": "https://www.googleapis.com/auth/iam",
+ }
+ # Service account impersonation request/response.
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ "x-goog-api-client": IMPERSONATE_ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": self.SCOPES,
+ "lifetime": "2800s",
+ }
+ # Initialize mock request to handle token exchange and service account
+ # impersonation request.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=token_response,
+ impersonation_status=http_client.OK,
+ impersonation_data=impersonation_response,
+ )
+ # Initialize credentials with service account impersonation.
+ credentials = self.make_credentials(
+ service_account_impersonation_url=self.SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ scopes=self.SCOPES,
+ )
+
+ credentials.refresh(request)
+
+ # Only 2 requests should be processed.
+ assert len(request.call_args_list) == 2
+ # Verify token exchange request parameters.
+ self.assert_token_request_kwargs(
+ request.call_args_list[0][1], token_headers, token_request_data
+ )
+ # Verify service account impersonation request parameters.
+ self.assert_impersonation_request_kwargs(
+ request.call_args_list[1][1],
+ impersonation_headers,
+ impersonation_request_data,
+ )
+ assert credentials.valid
+ assert credentials.expiry == expected_expiry
+ assert not credentials.expired
+ assert credentials.token == impersonation_response["accessToken"]
+
+ def test_get_project_id_cloud_resource_manager_error(self):
+ # Simulate resource doesn't have sufficient permissions to access
+ # cloud resource manager.
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data=self.SUCCESS_RESPONSE.copy(),
+ cloud_resource_manager_status=http_client.UNAUTHORIZED,
+ )
+ credentials = self.make_credentials(scopes=self.SCOPES)
+
+ project_id = credentials.get_project_id(request)
+
+ assert project_id is None
+ # Only 2 requests to STS and cloud resource manager should be sent.
+ assert len(request.call_args_list) == 2
diff --git a/contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py b/contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py
new file mode 100644
index 0000000000..7ffd5078c8
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_external_account_authorized_user.py
@@ -0,0 +1,512 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import json
+
+import mock
+import pytest # type: ignore
+
+from google.auth import exceptions
+from google.auth import external_account_authorized_user
+from google.auth import transport
+
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+TOKEN_INFO_URL = "https://sts.googleapis.com/v1/introspect"
+REVOKE_URL = "https://sts.googleapis.com/v1/revoke"
+PROJECT_NUMBER = "123456"
+QUOTA_PROJECT_ID = "654321"
+POOL_ID = "POOL_ID"
+PROVIDER_ID = "PROVIDER_ID"
+AUDIENCE = (
+ "//iam.googleapis.com/projects/{}"
+ "/locations/global/workloadIdentityPools/{}"
+ "/providers/{}"
+).format(PROJECT_NUMBER, POOL_ID, PROVIDER_ID)
+REFRESH_TOKEN = "REFRESH_TOKEN"
+NEW_REFRESH_TOKEN = "NEW_REFRESH_TOKEN"
+ACCESS_TOKEN = "ACCESS_TOKEN"
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password".
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SCOPES = ["email", "profile"]
+NOW = datetime.datetime(1990, 8, 27, 6, 54, 30)
+
+
+class TestCredentials(object):
+ @classmethod
+ def make_credentials(
+ cls,
+ audience=AUDIENCE,
+ refresh_token=REFRESH_TOKEN,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ **kwargs
+ ):
+ return external_account_authorized_user.Credentials(
+ audience=audience,
+ refresh_token=refresh_token,
+ token_url=token_url,
+ token_info_url=token_info_url,
+ client_id=client_id,
+ client_secret=client_secret,
+ **kwargs
+ )
+
+ @classmethod
+ def make_mock_request(cls, status=http_client.OK, data=None):
+ # STS token exchange request.
+ token_response = mock.create_autospec(transport.Response, instance=True)
+ token_response.status = status
+ token_response.data = json.dumps(data).encode("utf-8")
+ responses = [token_response]
+
+ request = mock.create_autospec(transport.Request)
+ request.side_effect = responses
+
+ return request
+
+ def test_default_state(self):
+ creds = self.make_credentials()
+
+ assert not creds.expiry
+ assert not creds.expired
+ assert not creds.token
+ assert not creds.valid
+ assert not creds.requires_scopes
+ assert not creds.scopes
+ assert not creds.revoke_url
+ assert creds.token_info_url
+ assert creds.client_id
+ assert creds.client_secret
+ assert creds.is_user
+ assert creds.refresh_token == REFRESH_TOKEN
+ assert creds.audience == AUDIENCE
+ assert creds.token_url == TOKEN_URL
+
+ def test_basic_create(self):
+ creds = external_account_authorized_user.Credentials(
+ token=ACCESS_TOKEN,
+ expiry=datetime.datetime.max,
+ scopes=SCOPES,
+ revoke_url=REVOKE_URL,
+ )
+
+ assert creds.expiry == datetime.datetime.max
+ assert not creds.expired
+ assert creds.token == ACCESS_TOKEN
+ assert creds.valid
+ assert not creds.requires_scopes
+ assert creds.scopes == SCOPES
+ assert creds.is_user
+ assert creds.revoke_url == REVOKE_URL
+
+ def test_stunted_create_no_refresh_token(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(token=None, refresh_token=None)
+
+ assert excinfo.match(
+ r"Token should be created with fields to make it valid \(`token` and "
+ r"`expiry`\), or fields to allow it to refresh \(`refresh_token`, "
+ r"`token_url`, `client_id`, `client_secret`\)\."
+ )
+
+ def test_stunted_create_no_token_url(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(token=None, token_url=None)
+
+ assert excinfo.match(
+ r"Token should be created with fields to make it valid \(`token` and "
+ r"`expiry`\), or fields to allow it to refresh \(`refresh_token`, "
+ r"`token_url`, `client_id`, `client_secret`\)\."
+ )
+
+ def test_stunted_create_no_client_id(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(token=None, client_id=None)
+
+ assert excinfo.match(
+ r"Token should be created with fields to make it valid \(`token` and "
+ r"`expiry`\), or fields to allow it to refresh \(`refresh_token`, "
+ r"`token_url`, `client_id`, `client_secret`\)\."
+ )
+
+ def test_stunted_create_no_client_secret(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(token=None, client_secret=None)
+
+ assert excinfo.match(
+ r"Token should be created with fields to make it valid \(`token` and "
+ r"`expiry`\), or fields to allow it to refresh \(`refresh_token`, "
+ r"`token_url`, `client_id`, `client_secret`\)\."
+ )
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=NOW)
+ def test_refresh_auth_success(self, utcnow):
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data={"access_token": ACCESS_TOKEN, "expires_in": 3600},
+ )
+ creds = self.make_credentials()
+
+ creds.refresh(request)
+
+ assert creds.expiry == utcnow() + datetime.timedelta(seconds=3600)
+ assert not creds.expired
+ assert creds.token == ACCESS_TOKEN
+ assert creds.valid
+ assert not creds.requires_scopes
+ assert creds.is_user
+ assert creds._refresh_token == REFRESH_TOKEN
+
+ request.assert_called_once_with(
+ url=TOKEN_URL,
+ method="POST",
+ headers={
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ },
+ body=("grant_type=refresh_token&refresh_token=" + REFRESH_TOKEN).encode(
+ "UTF-8"
+ ),
+ )
+
+ @mock.patch("google.auth._helpers.utcnow", return_value=NOW)
+ def test_refresh_auth_success_new_refresh_token(self, utcnow):
+ request = self.make_mock_request(
+ status=http_client.OK,
+ data={
+ "access_token": ACCESS_TOKEN,
+ "expires_in": 3600,
+ "refresh_token": NEW_REFRESH_TOKEN,
+ },
+ )
+ creds = self.make_credentials()
+
+ creds.refresh(request)
+
+ assert creds.expiry == utcnow() + datetime.timedelta(seconds=3600)
+ assert not creds.expired
+ assert creds.token == ACCESS_TOKEN
+ assert creds.valid
+ assert not creds.requires_scopes
+ assert creds.is_user
+ assert creds._refresh_token == NEW_REFRESH_TOKEN
+
+ request.assert_called_once_with(
+ url=TOKEN_URL,
+ method="POST",
+ headers={
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ },
+ body=("grant_type=refresh_token&refresh_token=" + REFRESH_TOKEN).encode(
+ "UTF-8"
+ ),
+ )
+
+ def test_refresh_auth_failure(self):
+ request = self.make_mock_request(
+ status=http_client.BAD_REQUEST,
+ data={
+ "error": "invalid_request",
+ "error_description": "Invalid subject token",
+ "error_uri": "https://tools.ietf.org/html/rfc6749",
+ },
+ )
+ creds = self.make_credentials()
+
+ with pytest.raises(exceptions.OAuthError) as excinfo:
+ creds.refresh(request)
+
+ assert excinfo.match(
+ r"Error code invalid_request: Invalid subject token - https://tools.ietf.org/html/rfc6749"
+ )
+
+ assert not creds.expiry
+ assert not creds.expired
+ assert not creds.token
+ assert not creds.valid
+ assert not creds.requires_scopes
+ assert creds.is_user
+
+ request.assert_called_once_with(
+ url=TOKEN_URL,
+ method="POST",
+ headers={
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": "Basic " + BASIC_AUTH_ENCODING,
+ },
+ body=("grant_type=refresh_token&refresh_token=" + REFRESH_TOKEN).encode(
+ "UTF-8"
+ ),
+ )
+
+ def test_refresh_without_refresh_token(self):
+ request = self.make_mock_request()
+ creds = self.make_credentials(refresh_token=None, token=ACCESS_TOKEN)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ creds.refresh(request)
+
+ assert excinfo.match(
+ r"The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret."
+ )
+
+ assert not creds.expiry
+ assert not creds.expired
+ assert not creds.requires_scopes
+ assert creds.is_user
+
+ request.assert_not_called()
+
+ def test_refresh_without_token_url(self):
+ request = self.make_mock_request()
+ creds = self.make_credentials(token_url=None, token=ACCESS_TOKEN)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ creds.refresh(request)
+
+ assert excinfo.match(
+ r"The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret."
+ )
+
+ assert not creds.expiry
+ assert not creds.expired
+ assert not creds.requires_scopes
+ assert creds.is_user
+
+ request.assert_not_called()
+
+ def test_refresh_without_client_id(self):
+ request = self.make_mock_request()
+ creds = self.make_credentials(client_id=None, token=ACCESS_TOKEN)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ creds.refresh(request)
+
+ assert excinfo.match(
+ r"The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret."
+ )
+
+ assert not creds.expiry
+ assert not creds.expired
+ assert not creds.requires_scopes
+ assert creds.is_user
+
+ request.assert_not_called()
+
+ def test_refresh_without_client_secret(self):
+ request = self.make_mock_request()
+ creds = self.make_credentials(client_secret=None, token=ACCESS_TOKEN)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ creds.refresh(request)
+
+ assert excinfo.match(
+ r"The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret."
+ )
+
+ assert not creds.expiry
+ assert not creds.expired
+ assert not creds.requires_scopes
+ assert creds.is_user
+
+ request.assert_not_called()
+
+ def test_info(self):
+ creds = self.make_credentials()
+ info = creds.info
+
+ assert info["audience"] == AUDIENCE
+ assert info["refresh_token"] == REFRESH_TOKEN
+ assert info["token_url"] == TOKEN_URL
+ assert info["token_info_url"] == TOKEN_INFO_URL
+ assert info["client_id"] == CLIENT_ID
+ assert info["client_secret"] == CLIENT_SECRET
+ assert "token" not in info
+ assert "expiry" not in info
+ assert "revoke_url" not in info
+ assert "quota_project_id" not in info
+
+ def test_info_full(self):
+ creds = self.make_credentials(
+ token=ACCESS_TOKEN,
+ expiry=NOW,
+ revoke_url=REVOKE_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+ info = creds.info
+
+ assert info["audience"] == AUDIENCE
+ assert info["refresh_token"] == REFRESH_TOKEN
+ assert info["token_url"] == TOKEN_URL
+ assert info["token_info_url"] == TOKEN_INFO_URL
+ assert info["client_id"] == CLIENT_ID
+ assert info["client_secret"] == CLIENT_SECRET
+ assert info["token"] == ACCESS_TOKEN
+ assert info["expiry"] == NOW.isoformat() + "Z"
+ assert info["revoke_url"] == REVOKE_URL
+ assert info["quota_project_id"] == QUOTA_PROJECT_ID
+
+ def test_to_json(self):
+ creds = self.make_credentials()
+ json_info = creds.to_json()
+ info = json.loads(json_info)
+
+ assert info["audience"] == AUDIENCE
+ assert info["refresh_token"] == REFRESH_TOKEN
+ assert info["token_url"] == TOKEN_URL
+ assert info["token_info_url"] == TOKEN_INFO_URL
+ assert info["client_id"] == CLIENT_ID
+ assert info["client_secret"] == CLIENT_SECRET
+ assert "token" not in info
+ assert "expiry" not in info
+ assert "revoke_url" not in info
+ assert "quota_project_id" not in info
+
+ def test_to_json_full(self):
+ creds = self.make_credentials(
+ token=ACCESS_TOKEN,
+ expiry=NOW,
+ revoke_url=REVOKE_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+ json_info = creds.to_json()
+ info = json.loads(json_info)
+
+ assert info["audience"] == AUDIENCE
+ assert info["refresh_token"] == REFRESH_TOKEN
+ assert info["token_url"] == TOKEN_URL
+ assert info["token_info_url"] == TOKEN_INFO_URL
+ assert info["client_id"] == CLIENT_ID
+ assert info["client_secret"] == CLIENT_SECRET
+ assert info["token"] == ACCESS_TOKEN
+ assert info["expiry"] == NOW.isoformat() + "Z"
+ assert info["revoke_url"] == REVOKE_URL
+ assert info["quota_project_id"] == QUOTA_PROJECT_ID
+
+ def test_to_json_full_with_strip(self):
+ creds = self.make_credentials(
+ token=ACCESS_TOKEN,
+ expiry=NOW,
+ revoke_url=REVOKE_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+ json_info = creds.to_json(strip=["token", "expiry"])
+ info = json.loads(json_info)
+
+ assert info["audience"] == AUDIENCE
+ assert info["refresh_token"] == REFRESH_TOKEN
+ assert info["token_url"] == TOKEN_URL
+ assert info["token_info_url"] == TOKEN_INFO_URL
+ assert info["client_id"] == CLIENT_ID
+ assert info["client_secret"] == CLIENT_SECRET
+ assert "token" not in info
+ assert "expiry" not in info
+ assert info["revoke_url"] == REVOKE_URL
+ assert info["quota_project_id"] == QUOTA_PROJECT_ID
+
+ def test_get_project_id(self):
+ creds = self.make_credentials()
+ request = mock.create_autospec(transport.Request)
+
+ assert creds.get_project_id(request) is None
+ request.assert_not_called()
+
+ def test_with_quota_project(self):
+ creds = self.make_credentials(
+ token=ACCESS_TOKEN,
+ expiry=NOW,
+ revoke_url=REVOKE_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+ new_creds = creds.with_quota_project(QUOTA_PROJECT_ID)
+ assert new_creds._audience == creds._audience
+ assert new_creds._refresh_token == creds._refresh_token
+ assert new_creds._token_url == creds._token_url
+ assert new_creds._token_info_url == creds._token_info_url
+ assert new_creds._client_id == creds._client_id
+ assert new_creds._client_secret == creds._client_secret
+ assert new_creds.token == creds.token
+ assert new_creds.expiry == creds.expiry
+ assert new_creds._revoke_url == creds._revoke_url
+ assert new_creds._quota_project_id == QUOTA_PROJECT_ID
+
+ def test_with_token_uri(self):
+ creds = self.make_credentials(
+ token=ACCESS_TOKEN,
+ expiry=NOW,
+ revoke_url=REVOKE_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ )
+ new_creds = creds.with_token_uri("https://google.com")
+ assert new_creds._audience == creds._audience
+ assert new_creds._refresh_token == creds._refresh_token
+ assert new_creds._token_url == "https://google.com"
+ assert new_creds._token_info_url == creds._token_info_url
+ assert new_creds._client_id == creds._client_id
+ assert new_creds._client_secret == creds._client_secret
+ assert new_creds.token == creds.token
+ assert new_creds.expiry == creds.expiry
+ assert new_creds._revoke_url == creds._revoke_url
+ assert new_creds._quota_project_id == creds._quota_project_id
+
+ def test_from_file_required_options_only(self, tmpdir):
+ from_creds = self.make_credentials()
+ config_file = tmpdir.join("config.json")
+ config_file.write(from_creds.to_json())
+ creds = external_account_authorized_user.Credentials.from_file(str(config_file))
+
+ assert isinstance(creds, external_account_authorized_user.Credentials)
+ assert creds.audience == AUDIENCE
+ assert creds.refresh_token == REFRESH_TOKEN
+ assert creds.token_url == TOKEN_URL
+ assert creds.token_info_url == TOKEN_INFO_URL
+ assert creds.client_id == CLIENT_ID
+ assert creds.client_secret == CLIENT_SECRET
+ assert creds.token is None
+ assert creds.expiry is None
+ assert creds.scopes is None
+ assert creds._revoke_url is None
+ assert creds._quota_project_id is None
+
+ def test_from_file_full_options(self, tmpdir):
+ from_creds = self.make_credentials(
+ token=ACCESS_TOKEN,
+ expiry=NOW,
+ revoke_url=REVOKE_URL,
+ quota_project_id=QUOTA_PROJECT_ID,
+ scopes=SCOPES,
+ )
+ config_file = tmpdir.join("config.json")
+ config_file.write(from_creds.to_json())
+ creds = external_account_authorized_user.Credentials.from_file(str(config_file))
+
+ assert isinstance(creds, external_account_authorized_user.Credentials)
+ assert creds.audience == AUDIENCE
+ assert creds.refresh_token == REFRESH_TOKEN
+ assert creds.token_url == TOKEN_URL
+ assert creds.token_info_url == TOKEN_INFO_URL
+ assert creds.client_id == CLIENT_ID
+ assert creds.client_secret == CLIENT_SECRET
+ assert creds.token == ACCESS_TOKEN
+ assert creds.expiry == NOW
+ assert creds.scopes == SCOPES
+ assert creds._revoke_url == REVOKE_URL
+ assert creds._quota_project_id == QUOTA_PROJECT_ID
diff --git a/contrib/python/google-auth/py3/tests/test_iam.py b/contrib/python/google-auth/py3/tests/test_iam.py
new file mode 100644
index 0000000000..6706afb4b5
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_iam.py
@@ -0,0 +1,102 @@
+# Copyright 2017 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import datetime
+import http.client as http_client
+import json
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import iam
+from google.auth import transport
+import google.auth.credentials
+
+
+def make_request(status, data=None):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+
+ if data is not None:
+ response.data = json.dumps(data).encode("utf-8")
+
+ request = mock.create_autospec(transport.Request)
+ request.return_value = response
+ return request
+
+
+def make_credentials():
+ class CredentialsImpl(google.auth.credentials.Credentials):
+ def __init__(self):
+ super(CredentialsImpl, self).__init__()
+ self.token = "token"
+ # Force refresh
+ self.expiry = datetime.datetime.min + _helpers.REFRESH_THRESHOLD
+
+ def refresh(self, request):
+ pass
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+ return CredentialsImpl()
+
+
+class TestSigner(object):
+ def test_constructor(self):
+ request = mock.sentinel.request
+ credentials = mock.create_autospec(
+ google.auth.credentials.Credentials, instance=True
+ )
+
+ signer = iam.Signer(request, credentials, mock.sentinel.service_account_email)
+
+ assert signer._request == mock.sentinel.request
+ assert signer._credentials == credentials
+ assert signer._service_account_email == mock.sentinel.service_account_email
+
+ def test_key_id(self):
+ signer = iam.Signer(
+ mock.sentinel.request,
+ mock.sentinel.credentials,
+ mock.sentinel.service_account_email,
+ )
+
+ assert signer.key_id is None
+
+ def test_sign_bytes(self):
+ signature = b"DEADBEEF"
+ encoded_signature = base64.b64encode(signature).decode("utf-8")
+ request = make_request(http_client.OK, data={"signedBlob": encoded_signature})
+ credentials = make_credentials()
+
+ signer = iam.Signer(request, credentials, mock.sentinel.service_account_email)
+
+ returned_signature = signer.sign("123")
+
+ assert returned_signature == signature
+ kwargs = request.call_args[1]
+ assert kwargs["headers"]["Content-Type"] == "application/json"
+
+ def test_sign_bytes_failure(self):
+ request = make_request(http_client.UNAUTHORIZED)
+ credentials = make_credentials()
+
+ signer = iam.Signer(request, credentials, mock.sentinel.service_account_email)
+
+ with pytest.raises(exceptions.TransportError):
+ signer.sign("123")
diff --git a/contrib/python/google-auth/py3/tests/test_identity_pool.py b/contrib/python/google-auth/py3/tests/test_identity_pool.py
new file mode 100644
index 0000000000..d126a579bd
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_identity_pool.py
@@ -0,0 +1,1302 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import json
+import os
+import urllib
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import exceptions
+from google.auth import identity_pool
+from google.auth import metrics
+from google.auth import transport
+
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password".
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+SERVICE_ACCOUNT_IMPERSONATION_URL_BASE = (
+ "https://us-east1-iamcredentials.googleapis.com"
+)
+SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE = "/v1/projects/-/serviceAccounts/{}:generateAccessToken".format(
+ SERVICE_ACCOUNT_EMAIL
+)
+SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ SERVICE_ACCOUNT_IMPERSONATION_URL_BASE + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+)
+
+QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+SCOPES = ["scope1", "scope2"]
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+SUBJECT_TOKEN_TEXT_FILE = os.path.join(DATA_DIR, "external_subject_token.txt")
+SUBJECT_TOKEN_JSON_FILE = os.path.join(DATA_DIR, "external_subject_token.json")
+SUBJECT_TOKEN_FIELD_NAME = "access_token"
+
+with open(SUBJECT_TOKEN_TEXT_FILE) as fh:
+ TEXT_FILE_SUBJECT_TOKEN = fh.read()
+
+with open(SUBJECT_TOKEN_JSON_FILE) as fh:
+ JSON_FILE_CONTENT = json.load(fh)
+ JSON_FILE_SUBJECT_TOKEN = JSON_FILE_CONTENT.get(SUBJECT_TOKEN_FIELD_NAME)
+
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+TOKEN_INFO_URL = "https://sts.googleapis.com/v1/introspect"
+SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
+WORKFORCE_AUDIENCE = (
+ "//iam.googleapis.com/locations/global/workforcePools/POOL_ID/providers/PROVIDER_ID"
+)
+WORKFORCE_SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:id_token"
+WORKFORCE_POOL_USER_PROJECT = "WORKFORCE_POOL_USER_PROJECT_NUMBER"
+
+DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
+
+VALID_TOKEN_URLS = [
+ "https://sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.com",
+ "https://US-EAST-1.sts.googleapis.com",
+ "https://sts.us-east-1.googleapis.com",
+ "https://sts.US-WEST-1.googleapis.com",
+ "https://us-east-1-sts.googleapis.com",
+ "https://US-WEST-1-sts.googleapis.com",
+ "https://us-west-1-sts.googleapis.com/path?query",
+ "https://sts-us-east-1.p.googleapis.com",
+]
+INVALID_TOKEN_URLS = [
+ "https://iamcredentials.googleapis.com",
+ "sts.googleapis.com",
+ "https://",
+ "http://sts.googleapis.com",
+ "https://st.s.googleapis.com",
+ "https://us-eas\t-1.sts.googleapis.com",
+ "https:/us-east-1.sts.googleapis.com",
+ "https://US-WE/ST-1-sts.googleapis.com",
+ "https://sts-us-east-1.googleapis.com",
+ "https://sts-US-WEST-1.googleapis.com",
+ "testhttps://us-east-1.sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.comevil.com",
+ "https://us-east-1.us-east-1.sts.googleapis.com",
+ "https://us-ea.s.t.sts.googleapis.com",
+ "https://sts.googleapis.comevil.com",
+ "hhttps://us-east-1.sts.googleapis.com",
+ "https://us- -1.sts.googleapis.com",
+ "https://-sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.com.evil.com",
+ "https://sts.pgoogleapis.com",
+ "https://p.googleapis.com",
+ "https://sts.p.com",
+ "http://sts.p.googleapis.com",
+ "https://xyz-sts.p.googleapis.com",
+ "https://sts-xyz.123.p.googleapis.com",
+ "https://sts-xyz.p1.googleapis.com",
+ "https://sts-xyz.p.foo.com",
+ "https://sts-xyz.p.foo.googleapis.com",
+]
+VALID_SERVICE_ACCOUNT_IMPERSONATION_URLS = [
+ "https://iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.com",
+ "https://US-EAST-1.iamcredentials.googleapis.com",
+ "https://iamcredentials.us-east-1.googleapis.com",
+ "https://iamcredentials.US-WEST-1.googleapis.com",
+ "https://us-east-1-iamcredentials.googleapis.com",
+ "https://US-WEST-1-iamcredentials.googleapis.com",
+ "https://us-west-1-iamcredentials.googleapis.com/path?query",
+ "https://iamcredentials-us-east-1.p.googleapis.com",
+]
+INVALID_SERVICE_ACCOUNT_IMPERSONATION_URLS = [
+ "https://sts.googleapis.com",
+ "iamcredentials.googleapis.com",
+ "https://",
+ "http://iamcredentials.googleapis.com",
+ "https://iamcre.dentials.googleapis.com",
+ "https://us-eas\t-1.iamcredentials.googleapis.com",
+ "https:/us-east-1.iamcredentials.googleapis.com",
+ "https://US-WE/ST-1-iamcredentials.googleapis.com",
+ "https://iamcredentials-us-east-1.googleapis.com",
+ "https://iamcredentials-US-WEST-1.googleapis.com",
+ "testhttps://us-east-1.iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.comevil.com",
+ "https://us-east-1.us-east-1.iamcredentials.googleapis.com",
+ "https://us-ea.s.t.iamcredentials.googleapis.com",
+ "https://iamcredentials.googleapis.comevil.com",
+ "hhttps://us-east-1.iamcredentials.googleapis.com",
+ "https://us- -1.iamcredentials.googleapis.com",
+ "https://-iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.com.evil.com",
+ "https://iamcredentials.pgoogleapis.com",
+ "https://p.googleapis.com",
+ "https://iamcredentials.p.com",
+ "http://iamcredentials.p.googleapis.com",
+ "https://xyz-iamcredentials.p.googleapis.com",
+ "https://iamcredentials-xyz.123.p.googleapis.com",
+ "https://iamcredentials-xyz.p1.googleapis.com",
+ "https://iamcredentials-xyz.p.foo.com",
+ "https://iamcredentials-xyz.p.foo.googleapis.com",
+]
+
+
+class TestCredentials(object):
+ CREDENTIAL_SOURCE_TEXT = {"file": SUBJECT_TOKEN_TEXT_FILE}
+ CREDENTIAL_SOURCE_JSON = {
+ "file": SUBJECT_TOKEN_JSON_FILE,
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ }
+ CREDENTIAL_URL = "http://fakeurl.com"
+ CREDENTIAL_SOURCE_TEXT_URL = {"url": CREDENTIAL_URL}
+ CREDENTIAL_SOURCE_JSON_URL = {
+ "url": CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ }
+ SUCCESS_RESPONSE = {
+ "access_token": "ACCESS_TOKEN",
+ "issued_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "token_type": "Bearer",
+ "expires_in": 3600,
+ "scope": " ".join(SCOPES),
+ }
+
+ @classmethod
+ def make_mock_response(cls, status, data):
+ response = mock.create_autospec(transport.Response, instance=True)
+ response.status = status
+ if isinstance(data, dict):
+ response.data = json.dumps(data).encode("utf-8")
+ else:
+ response.data = data
+ return response
+
+ @classmethod
+ def make_mock_request(
+ cls, token_status=http_client.OK, token_data=None, *extra_requests
+ ):
+ responses = []
+ responses.append(cls.make_mock_response(token_status, token_data))
+
+ while len(extra_requests) > 0:
+ # If service account impersonation is requested, mock the expected response.
+ status, data, extra_requests = (
+ extra_requests[0],
+ extra_requests[1],
+ extra_requests[2:],
+ )
+ responses.append(cls.make_mock_response(status, data))
+
+ request = mock.create_autospec(transport.Request)
+ request.side_effect = responses
+
+ return request
+
+ @classmethod
+ def assert_credential_request_kwargs(
+ cls, request_kwargs, headers, url=CREDENTIAL_URL
+ ):
+ assert request_kwargs["url"] == url
+ assert request_kwargs["method"] == "GET"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs.get("body", None) is None
+
+ @classmethod
+ def assert_token_request_kwargs(
+ cls, request_kwargs, headers, request_data, token_url=TOKEN_URL
+ ):
+ assert request_kwargs["url"] == token_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_tuples = urllib.parse.parse_qsl(request_kwargs["body"])
+ assert len(body_tuples) == len(request_data.keys())
+ for (k, v) in body_tuples:
+ assert v.decode("utf-8") == request_data[k.decode("utf-8")]
+
+ @classmethod
+ def assert_impersonation_request_kwargs(
+ cls,
+ request_kwargs,
+ headers,
+ request_data,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ ):
+ assert request_kwargs["url"] == service_account_impersonation_url
+ assert request_kwargs["method"] == "POST"
+ assert request_kwargs["headers"] == headers
+ assert request_kwargs["body"] is not None
+ body_json = json.loads(request_kwargs["body"].decode("utf-8"))
+ assert body_json == request_data
+
+ @classmethod
+ def assert_underlying_credentials_refresh(
+ cls,
+ credentials,
+ audience,
+ subject_token,
+ subject_token_type,
+ token_url,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=None,
+ credential_data=None,
+ scopes=None,
+ default_scopes=None,
+ workforce_pool_user_project=None,
+ ):
+ """Utility to assert that a credentials are initialized with the expected
+ attributes by calling refresh functionality and confirming response matches
+ expected one and that the underlying requests were populated with the
+ expected parameters.
+ """
+ # STS token exchange request/response.
+ token_response = cls.SUCCESS_RESPONSE.copy()
+ token_headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ if basic_auth_encoding:
+ token_headers["Authorization"] = "Basic " + basic_auth_encoding
+
+ metrics_options = {}
+ if credentials._service_account_impersonation_url:
+ metrics_options["sa-impersonation"] = "true"
+ else:
+ metrics_options["sa-impersonation"] = "false"
+ metrics_options["config-lifetime"] = "false"
+ if credentials._credential_source_file:
+ metrics_options["source"] = "file"
+ else:
+ metrics_options["source"] = "url"
+
+ token_headers["x-goog-api-client"] = metrics.byoid_metrics_header(
+ metrics_options
+ )
+
+ if service_account_impersonation_url:
+ token_scopes = "https://www.googleapis.com/auth/iam"
+ else:
+ token_scopes = " ".join(used_scopes or [])
+
+ token_request_data = {
+ "grant_type": "urn:ietf:params:oauth:grant-type:token-exchange",
+ "audience": audience,
+ "requested_token_type": "urn:ietf:params:oauth:token-type:access_token",
+ "scope": token_scopes,
+ "subject_token": subject_token,
+ "subject_token_type": subject_token_type,
+ }
+ if workforce_pool_user_project:
+ token_request_data["options"] = urllib.parse.quote(
+ json.dumps({"userProject": workforce_pool_user_project})
+ )
+
+ metrics_header_value = (
+ "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
+ )
+ if service_account_impersonation_url:
+ # Service account impersonation request/response.
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0)
+ + datetime.timedelta(seconds=3600)
+ ).isoformat("T") + "Z"
+ impersonation_response = {
+ "accessToken": "SA_ACCESS_TOKEN",
+ "expireTime": expire_time,
+ }
+ impersonation_headers = {
+ "Content-Type": "application/json",
+ "authorization": "Bearer {}".format(token_response["access_token"]),
+ "x-goog-api-client": metrics_header_value,
+ "x-identity-trust-boundary": "0",
+ }
+ impersonation_request_data = {
+ "delegates": None,
+ "scope": used_scopes,
+ "lifetime": "3600s",
+ }
+
+ # Initialize mock request to handle token retrieval, token exchange and
+ # service account impersonation request.
+ requests = []
+ if credential_data:
+ requests.append((http_client.OK, credential_data))
+
+ token_request_index = len(requests)
+ requests.append((http_client.OK, token_response))
+
+ if service_account_impersonation_url:
+ impersonation_request_index = len(requests)
+ requests.append((http_client.OK, impersonation_response))
+
+ request = cls.make_mock_request(*[el for req in requests for el in req])
+
+ with mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=metrics_header_value,
+ ):
+ credentials.refresh(request)
+
+ assert len(request.call_args_list) == len(requests)
+ if credential_data:
+ cls.assert_credential_request_kwargs(request.call_args_list[0][1], None)
+ # Verify token exchange request parameters.
+ cls.assert_token_request_kwargs(
+ request.call_args_list[token_request_index][1],
+ token_headers,
+ token_request_data,
+ token_url,
+ )
+ # Verify service account impersonation request parameters if the request
+ # is processed.
+ if service_account_impersonation_url:
+ cls.assert_impersonation_request_kwargs(
+ request.call_args_list[impersonation_request_index][1],
+ impersonation_headers,
+ impersonation_request_data,
+ service_account_impersonation_url,
+ )
+ assert credentials.token == impersonation_response["accessToken"]
+ else:
+ assert credentials.token == token_response["access_token"]
+ assert credentials.quota_project_id == quota_project_id
+ assert credentials.scopes == scopes
+ assert credentials.default_scopes == default_scopes
+
+ @classmethod
+ def make_credentials(
+ cls,
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ credential_source=None,
+ workforce_pool_user_project=None,
+ ):
+ return identity_pool.Credentials(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ token_info_url=token_info_url,
+ service_account_impersonation_url=service_account_impersonation_url,
+ credential_source=credential_source,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ workforce_pool_user_project=workforce_pool_user_project,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_info_full_options(self, mock_init):
+ credentials = identity_pool.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "service_account_impersonation": {"token_lifetime_seconds": 2800},
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ )
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=QUOTA_PROJECT_ID,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_info_required_options_only(self, mock_init):
+ credentials = identity_pool.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ )
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=None,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_info_workforce_pool(self, mock_init):
+ credentials = identity_pool.Credentials.from_info(
+ {
+ "audience": WORKFORCE_AUDIENCE,
+ "subject_token_type": WORKFORCE_SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ "workforce_pool_user_project": WORKFORCE_POOL_USER_PROJECT,
+ }
+ )
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=WORKFORCE_AUDIENCE,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=None,
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_file_full_options(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "service_account_impersonation": {"token_lifetime_seconds": 2800},
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = identity_pool.Credentials.from_file(str(config_file))
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=QUOTA_PROJECT_ID,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_file_required_options_only(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = identity_pool.Credentials.from_file(str(config_file))
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=None,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(identity_pool.Credentials, "__init__", return_value=None)
+ def test_from_file_workforce_pool(self, mock_init, tmpdir):
+ info = {
+ "audience": WORKFORCE_AUDIENCE,
+ "subject_token_type": WORKFORCE_SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT,
+ "workforce_pool_user_project": WORKFORCE_POOL_USER_PROJECT,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = identity_pool.Credentials.from_file(str(config_file))
+
+ # Confirm identity_pool.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, identity_pool.Credentials)
+ mock_init.assert_called_once_with(
+ audience=WORKFORCE_AUDIENCE,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ quota_project_id=None,
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ def test_constructor_nonworkforce_with_workforce_pool_user_project(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(
+ audience=AUDIENCE,
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ assert excinfo.match(
+ "workforce_pool_user_project should not be set for non-workforce "
+ "pool credentials"
+ )
+
+ def test_constructor_invalid_options(self):
+ credential_source = {"unsupported": "value"}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"Missing credential_source")
+
+ def test_constructor_invalid_options_url_and_file(self):
+ credential_source = {
+ "url": self.CREDENTIAL_URL,
+ "file": SUBJECT_TOKEN_TEXT_FILE,
+ }
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"Ambiguous credential_source")
+
+ def test_constructor_invalid_options_environment_id(self):
+ credential_source = {"url": self.CREDENTIAL_URL, "environment_id": "aws1"}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(
+ r"Invalid Identity Pool credential_source field 'environment_id'"
+ )
+
+ def test_constructor_invalid_credential_source(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source="non-dict")
+
+ assert excinfo.match(r"Missing credential_source")
+
+ def test_constructor_invalid_credential_source_format_type(self):
+ credential_source = {"format": {"type": "xml"}}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(r"Invalid credential_source format 'xml'")
+
+ def test_constructor_missing_subject_token_field_name(self):
+ credential_source = {"format": {"type": "json"}}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_credentials(credential_source=credential_source)
+
+ assert excinfo.match(
+ r"Missing subject_token_field_name for JSON credential_source format"
+ )
+
+ def test_info_with_workforce_pool_user_project(self):
+ credentials = self.make_credentials(
+ audience=WORKFORCE_AUDIENCE,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL.copy(),
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": WORKFORCE_AUDIENCE,
+ "subject_token_type": WORKFORCE_SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT_URL,
+ "workforce_pool_user_project": WORKFORCE_POOL_USER_PROJECT,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
+ }
+
+ def test_info_with_file_credential_source(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL.copy()
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_TEXT_URL,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
+ }
+
+ def test_info_with_url_credential_source(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL.copy()
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "credential_source": self.CREDENTIAL_SOURCE_JSON_URL,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
+ }
+
+ def test_retrieve_subject_token_missing_subject_token(self, tmpdir):
+ # Provide empty text file.
+ empty_file = tmpdir.join("empty.txt")
+ empty_file.write("")
+ credential_source = {"file": str(empty_file)}
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Missing subject_token in the credential_source file")
+
+ def test_retrieve_subject_token_text_file(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT
+ )
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == TEXT_FILE_SUBJECT_TOKEN
+
+ def test_retrieve_subject_token_json_file(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON
+ )
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == JSON_FILE_SUBJECT_TOKEN
+
+ def test_retrieve_subject_token_json_file_invalid_field_name(self):
+ credential_source = {
+ "file": SUBJECT_TOKEN_JSON_FILE,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ SUBJECT_TOKEN_JSON_FILE, "not_found"
+ )
+ )
+
+ def test_retrieve_subject_token_invalid_json(self, tmpdir):
+ # Provide JSON file. This should result in JSON parsing error.
+ invalid_json_file = tmpdir.join("invalid.json")
+ invalid_json_file.write("{")
+ credential_source = {
+ "file": str(invalid_json_file),
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ str(invalid_json_file), "access_token"
+ )
+ )
+
+ def test_retrieve_subject_token_file_not_found(self):
+ credential_source = {"file": "./not_found.txt"}
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"File './not_found.txt' was not found")
+
+ def test_token_info_url(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON
+ )
+
+ assert credentials.token_info_url == TOKEN_INFO_URL
+
+ def test_token_info_url_custom(self):
+ for url in VALID_TOKEN_URLS:
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON.copy(),
+ token_info_url=(url + "/introspect"),
+ )
+
+ assert credentials.token_info_url == url + "/introspect"
+
+ def test_token_info_url_negative(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON.copy(), token_info_url=None
+ )
+
+ assert not credentials.token_info_url
+
+ def test_token_url_custom(self):
+ for url in VALID_TOKEN_URLS:
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON.copy(),
+ token_url=(url + "/token"),
+ )
+
+ assert credentials._token_url == (url + "/token")
+
+ def test_service_account_impersonation_url_custom(self):
+ for url in VALID_SERVICE_ACCOUNT_IMPERSONATION_URLS:
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON.copy(),
+ service_account_impersonation_url=(
+ url + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+ ),
+ )
+
+ assert credentials._service_account_impersonation_url == (
+ url + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+ )
+
+ def test_refresh_text_file_success_without_impersonation_ignore_default_scopes(
+ self,
+ ):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=["ignored"],
+ )
+
+ def test_refresh_workforce_success_with_client_auth_without_impersonation(self):
+ credentials = self.make_credentials(
+ audience=WORKFORCE_AUDIENCE,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=SCOPES,
+ # This will be ignored in favor of client auth.
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=WORKFORCE_AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ workforce_pool_user_project=None,
+ )
+
+ def test_refresh_workforce_success_with_client_auth_and_no_workforce_project(self):
+ credentials = self.make_credentials(
+ audience=WORKFORCE_AUDIENCE,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=SCOPES,
+ # This is not needed when client Auth is used.
+ workforce_pool_user_project=None,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=WORKFORCE_AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ workforce_pool_user_project=None,
+ )
+
+ def test_refresh_workforce_success_without_client_auth_without_impersonation(self):
+ credentials = self.make_credentials(
+ audience=WORKFORCE_AUDIENCE,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ client_id=None,
+ client_secret=None,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=SCOPES,
+ # This will not be ignored as client auth is not used.
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=WORKFORCE_AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ def test_refresh_workforce_success_without_client_auth_with_impersonation(self):
+ credentials = self.make_credentials(
+ audience=WORKFORCE_AUDIENCE,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ client_id=None,
+ client_secret=None,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=SCOPES,
+ # This will not be ignored as client auth is not used.
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=WORKFORCE_AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=WORKFORCE_SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ workforce_pool_user_project=WORKFORCE_POOL_USER_PROJECT,
+ )
+
+ def test_refresh_text_file_success_without_impersonation_use_default_scopes(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=None,
+ default_scopes=SCOPES,
+ )
+
+ def test_refresh_text_file_success_with_impersonation_ignore_default_scopes(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ # Default scopes should be ignored.
+ default_scopes=["ignored"],
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=["ignored"],
+ )
+
+ def test_refresh_text_file_success_with_impersonation_use_default_scopes(self):
+ # Initialize credentials with service account impersonation, basic auth
+ # and default scopes (no user scopes).
+ credentials = self.make_credentials(
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=None,
+ # Default scopes should be used since user specified scopes are none.
+ default_scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=None,
+ default_scopes=SCOPES,
+ )
+
+ def test_refresh_json_file_success_without_impersonation(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ )
+
+ def test_refresh_json_file_success_with_impersonation(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ )
+
+ def test_refresh_with_retrieve_subject_token_error(self):
+ credential_source = {
+ "file": SUBJECT_TOKEN_JSON_FILE,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(None)
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ SUBJECT_TOKEN_JSON_FILE, "not_found"
+ )
+ )
+
+ def test_retrieve_subject_token_from_url(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL
+ )
+ request = self.make_mock_request(token_data=TEXT_FILE_SUBJECT_TOKEN)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == TEXT_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(request.call_args_list[0][1], None)
+
+ def test_retrieve_subject_token_from_url_with_headers(self):
+ credentials = self.make_credentials(
+ credential_source={"url": self.CREDENTIAL_URL, "headers": {"foo": "bar"}}
+ )
+ request = self.make_mock_request(token_data=TEXT_FILE_SUBJECT_TOKEN)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == TEXT_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(
+ request.call_args_list[0][1], {"foo": "bar"}
+ )
+
+ def test_retrieve_subject_token_from_url_json(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL
+ )
+ request = self.make_mock_request(token_data=JSON_FILE_CONTENT)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == JSON_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(request.call_args_list[0][1], None)
+
+ def test_retrieve_subject_token_from_url_json_with_headers(self):
+ credentials = self.make_credentials(
+ credential_source={
+ "url": self.CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "access_token"},
+ "headers": {"foo": "bar"},
+ }
+ )
+ request = self.make_mock_request(token_data=JSON_FILE_CONTENT)
+ subject_token = credentials.retrieve_subject_token(request)
+
+ assert subject_token == JSON_FILE_SUBJECT_TOKEN
+ self.assert_credential_request_kwargs(
+ request.call_args_list[0][1], {"foo": "bar"}
+ )
+
+ def test_retrieve_subject_token_from_url_not_found(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL
+ )
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(
+ self.make_mock_request(token_status=404, token_data=JSON_FILE_CONTENT)
+ )
+
+ assert excinfo.match("Unable to retrieve Identity Pool subject token")
+
+ def test_retrieve_subject_token_from_url_json_invalid_field(self):
+ credential_source = {
+ "url": self.CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(
+ self.make_mock_request(token_data=JSON_FILE_CONTENT)
+ )
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ self.CREDENTIAL_URL, "not_found"
+ )
+ )
+
+ def test_retrieve_subject_token_from_url_json_invalid_format(self):
+ credentials = self.make_credentials(
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.retrieve_subject_token(self.make_mock_request(token_data="{"))
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ self.CREDENTIAL_URL, "access_token"
+ )
+ )
+
+ def test_refresh_text_file_success_without_impersonation_url(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=TEXT_FILE_SUBJECT_TOKEN,
+ )
+
+ def test_refresh_text_file_success_with_impersonation_url(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with text format type.
+ credential_source=self.CREDENTIAL_SOURCE_TEXT_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=TEXT_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=TEXT_FILE_SUBJECT_TOKEN,
+ )
+
+ def test_refresh_json_file_success_without_impersonation_url(self):
+ credentials = self.make_credentials(
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=None,
+ basic_auth_encoding=BASIC_AUTH_ENCODING,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=JSON_FILE_CONTENT,
+ )
+
+ def test_refresh_json_file_success_with_impersonation_url(self):
+ # Initialize credentials with service account impersonation and basic auth.
+ credentials = self.make_credentials(
+ # Test with JSON format type.
+ credential_source=self.CREDENTIAL_SOURCE_JSON_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ scopes=SCOPES,
+ )
+
+ self.assert_underlying_credentials_refresh(
+ credentials=credentials,
+ audience=AUDIENCE,
+ subject_token=JSON_FILE_SUBJECT_TOKEN,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ basic_auth_encoding=None,
+ quota_project_id=None,
+ used_scopes=SCOPES,
+ scopes=SCOPES,
+ default_scopes=None,
+ credential_data=JSON_FILE_CONTENT,
+ )
+
+ def test_refresh_with_retrieve_subject_token_error_url(self):
+ credential_source = {
+ "url": self.CREDENTIAL_URL,
+ "format": {"type": "json", "subject_token_field_name": "not_found"},
+ }
+ credentials = self.make_credentials(credential_source=credential_source)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(self.make_mock_request(token_data=JSON_FILE_CONTENT))
+
+ assert excinfo.match(
+ "Unable to parse subject_token from JSON file '{}' using key '{}'".format(
+ self.CREDENTIAL_URL, "not_found"
+ )
+ )
diff --git a/contrib/python/google-auth/py3/tests/test_impersonated_credentials.py b/contrib/python/google-auth/py3/tests/test_impersonated_credentials.py
new file mode 100644
index 0000000000..d63d2d5d3b
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_impersonated_credentials.py
@@ -0,0 +1,660 @@
+# Copyright 2018 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import http.client as http_client
+import json
+import os
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+from google.auth import impersonated_credentials
+from google.auth import transport
+from google.auth.impersonated_credentials import Credentials
+from google.oauth2 import credentials
+from google.oauth2 import service_account
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+ID_TOKEN_DATA = (
+ "eyJhbGciOiJSUzI1NiIsImtpZCI6ImRmMzc1ODkwOGI3OTIyOTNhZDk3N2Ew"
+ "Yjk5MWQ5OGE3N2Y0ZWVlY2QiLCJ0eXAiOiJKV1QifQ.eyJhdWQiOiJodHRwc"
+ "zovL2Zvby5iYXIiLCJhenAiOiIxMDIxMDE1NTA4MzQyMDA3MDg1NjgiLCJle"
+ "HAiOjE1NjQ0NzUwNTEsImlhdCI6MTU2NDQ3MTQ1MSwiaXNzIjoiaHR0cHM6L"
+ "y9hY2NvdW50cy5nb29nbGUuY29tIiwic3ViIjoiMTAyMTAxNTUwODM0MjAwN"
+ "zA4NTY4In0.redacted"
+)
+ID_TOKEN_EXPIRY = 1564475051
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+SIGNER = crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+TOKEN_URI = "https://example.com/oauth2/token"
+
+ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
+)
+ID_TOKEN_REQUEST_METRICS_HEADER_VALUE = (
+ "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/imp"
+)
+
+
+@pytest.fixture
+def mock_donor_credentials():
+ with mock.patch("google.oauth2._client.jwt_grant", autospec=True) as grant:
+ grant.return_value = (
+ "source token",
+ _helpers.utcnow() + datetime.timedelta(seconds=500),
+ {},
+ )
+ yield grant
+
+
+class MockResponse:
+ def __init__(self, json_data, status_code):
+ self.json_data = json_data
+ self.status_code = status_code
+
+ def json(self):
+ return self.json_data
+
+
+@pytest.fixture
+def mock_authorizedsession_sign():
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.request", autospec=True
+ ) as auth_session:
+ data = {"keyId": "1", "signedBlob": "c2lnbmF0dXJl"}
+ auth_session.return_value = MockResponse(data, http_client.OK)
+ yield auth_session
+
+
+@pytest.fixture
+def mock_authorizedsession_idtoken():
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.request", autospec=True
+ ) as auth_session:
+ data = {"token": ID_TOKEN_DATA}
+ auth_session.return_value = MockResponse(data, http_client.OK)
+ yield auth_session
+
+
+class TestImpersonatedCredentials(object):
+
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ TARGET_PRINCIPAL = "impersonated@project.iam.gserviceaccount.com"
+ TARGET_SCOPES = ["https://www.googleapis.com/auth/devstorage.read_only"]
+ # DELEGATES: List[str] = []
+ # Because Python 2.7:
+ DELEGATES = [] # type: ignore
+ LIFETIME = 3600
+ SOURCE_CREDENTIALS = service_account.Credentials(
+ SIGNER, SERVICE_ACCOUNT_EMAIL, TOKEN_URI
+ )
+ USER_SOURCE_CREDENTIALS = credentials.Credentials(token="ABCDE")
+ IAM_ENDPOINT_OVERRIDE = (
+ "https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ + "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
+ )
+
+ def make_credentials(
+ self,
+ source_credentials=SOURCE_CREDENTIALS,
+ lifetime=LIFETIME,
+ target_principal=TARGET_PRINCIPAL,
+ iam_endpoint_override=None,
+ ):
+
+ return Credentials(
+ source_credentials=source_credentials,
+ target_principal=target_principal,
+ target_scopes=self.TARGET_SCOPES,
+ delegates=self.DELEGATES,
+ lifetime=lifetime,
+ iam_endpoint_override=iam_endpoint_override,
+ )
+
+ def test_make_from_user_credentials(self):
+ credentials = self.make_credentials(
+ source_credentials=self.USER_SOURCE_CREDENTIALS
+ )
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_default_state(self):
+ credentials = self.make_credentials()
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_make_from_service_account_self_signed_jwt(self):
+ source_credentials = service_account.Credentials(
+ SIGNER, self.SERVICE_ACCOUNT_EMAIL, TOKEN_URI, always_use_jwt_access=True
+ )
+ credentials = self.make_credentials(source_credentials=source_credentials)
+ # test the source credential don't lose self signed jwt setting
+ assert credentials._source_credentials._always_use_jwt_access
+ assert credentials._source_credentials._jwt_credentials
+
+ def make_request(
+ self,
+ data,
+ status=http_client.OK,
+ headers=None,
+ side_effect=None,
+ use_data_bytes=True,
+ ):
+ response = mock.create_autospec(transport.Response, instance=False)
+ response.status = status
+ response.data = _helpers.to_bytes(data) if use_data_bytes else data
+ response.headers = headers or {}
+
+ request = mock.create_autospec(transport.Request, instance=False)
+ request.side_effect = side_effect
+ request.return_value = response
+
+ return request
+
+ def test_token_usage_metrics(self):
+ credentials = self.make_credentials()
+ credentials.token = "token"
+ credentials.expiry = None
+
+ headers = {}
+ credentials.before_request(mock.Mock(), None, None, headers)
+ assert headers["authorization"] == "Bearer token"
+ assert headers["x-goog-api-client"] == "cred-type/imp"
+
+ @pytest.mark.parametrize("use_data_bytes", [True, False])
+ def test_refresh_success(self, use_data_bytes, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body),
+ status=http_client.OK,
+ use_data_bytes=use_data_bytes,
+ )
+
+ with mock.patch(
+ "google.auth.metrics.token_request_access_token_impersonate",
+ return_value=ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ ):
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+ assert (
+ request.call_args.kwargs["headers"]["x-goog-api-client"]
+ == ACCESS_TOKEN_REQUEST_METRICS_HEADER_VALUE
+ )
+
+ @pytest.mark.parametrize("use_data_bytes", [True, False])
+ def test_refresh_success_iam_endpoint_override(
+ self, use_data_bytes, mock_donor_credentials
+ ):
+ credentials = self.make_credentials(
+ lifetime=None, iam_endpoint_override=self.IAM_ENDPOINT_OVERRIDE
+ )
+ token = "token"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body),
+ status=http_client.OK,
+ use_data_bytes=use_data_bytes,
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+ # Confirm override endpoint used.
+ request_kwargs = request.call_args[1]
+ assert request_kwargs["url"] == self.IAM_ENDPOINT_OVERRIDE
+
+ @pytest.mark.parametrize("time_skew", [100, -100])
+ def test_refresh_source_credentials(self, time_skew):
+ credentials = self.make_credentials(lifetime=None)
+
+ # Source credentials is refreshed only if it is expired within
+ # _helpers.REFRESH_THRESHOLD from now. We add a time_skew to the expiry, so
+ # source credentials is refreshed only if time_skew <= 0.
+ credentials._source_credentials.expiry = (
+ _helpers.utcnow()
+ + _helpers.REFRESH_THRESHOLD
+ + datetime.timedelta(seconds=time_skew)
+ )
+ credentials._source_credentials.token = "Token"
+
+ with mock.patch(
+ "google.oauth2.service_account.Credentials.refresh", autospec=True
+ ) as source_cred_refresh:
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0)
+ + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": "token", "expireTime": expire_time}
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ # Source credentials is refreshed only if it is expired within
+ # _helpers.REFRESH_THRESHOLD
+ if time_skew > 0:
+ source_cred_refresh.assert_not_called()
+ else:
+ source_cred_refresh.assert_called_once()
+
+ def test_refresh_failure_malformed_expire_time(self, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+
+ expire_time = (_helpers.utcnow() + datetime.timedelta(seconds=500)).isoformat(
+ "T"
+ )
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(impersonated_credentials._REFRESH_ERROR)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_refresh_failure_unauthorzed(self, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+
+ response_body = {
+ "error": {
+ "code": 403,
+ "message": "The caller does not have permission",
+ "status": "PERMISSION_DENIED",
+ }
+ }
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.UNAUTHORIZED
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(impersonated_credentials._REFRESH_ERROR)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_refresh_failure(self):
+ credentials = self.make_credentials(lifetime=None)
+ credentials.expiry = None
+ credentials.token = "token"
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience="audience"
+ )
+
+ response = mock.create_autospec(transport.Response, instance=False)
+ response.status_code = http_client.UNAUTHORIZED
+ response.json = mock.Mock(return_value="failed to get ID token")
+
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.post",
+ return_value=response,
+ ):
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ id_creds.refresh(None)
+
+ assert excinfo.match("Error getting ID token")
+
+ def test_refresh_failure_http_error(self, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+
+ response_body = {}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.HTTPException
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ credentials.refresh(request)
+
+ assert excinfo.match(impersonated_credentials._REFRESH_ERROR)
+
+ assert not credentials.valid
+ assert credentials.expired
+
+ def test_expired(self):
+ credentials = self.make_credentials(lifetime=None)
+ assert credentials.expired
+
+ def test_signer(self):
+ credentials = self.make_credentials()
+ assert isinstance(credentials.signer, impersonated_credentials.Credentials)
+
+ def test_signer_email(self):
+ credentials = self.make_credentials(target_principal=self.TARGET_PRINCIPAL)
+ assert credentials.signer_email == self.TARGET_PRINCIPAL
+
+ def test_service_account_email(self):
+ credentials = self.make_credentials(target_principal=self.TARGET_PRINCIPAL)
+ assert credentials.service_account_email == self.TARGET_PRINCIPAL
+
+ def test_sign_bytes(self, mock_donor_credentials, mock_authorizedsession_sign):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ token_response_body = {"accessToken": token, "expireTime": expire_time}
+
+ response = mock.create_autospec(transport.Response, instance=False)
+ response.status = http_client.OK
+ response.data = _helpers.to_bytes(json.dumps(token_response_body))
+
+ request = mock.create_autospec(transport.Request, instance=False)
+ request.return_value = response
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ signature = credentials.sign_bytes(b"signed bytes")
+ assert signature == b"signature"
+
+ def test_sign_bytes_failure(self):
+ credentials = self.make_credentials(lifetime=None)
+
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.request", autospec=True
+ ) as auth_session:
+ data = {"error": {"code": 403, "message": "unauthorized"}}
+ auth_session.return_value = MockResponse(data, http_client.FORBIDDEN)
+
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ credentials.sign_bytes(b"foo")
+ assert excinfo.match("'code': 403")
+
+ def test_with_quota_project(self):
+ credentials = self.make_credentials()
+
+ quota_project_creds = credentials.with_quota_project("project-foo")
+ assert quota_project_creds._quota_project_id == "project-foo"
+
+ @pytest.mark.parametrize("use_data_bytes", [True, False])
+ def test_with_quota_project_iam_endpoint_override(
+ self, use_data_bytes, mock_donor_credentials
+ ):
+ credentials = self.make_credentials(
+ lifetime=None, iam_endpoint_override=self.IAM_ENDPOINT_OVERRIDE
+ )
+ token = "token"
+ # iam_endpoint_override should be copied to created credentials.
+ quota_project_creds = credentials.with_quota_project("project-foo")
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body),
+ status=http_client.OK,
+ use_data_bytes=use_data_bytes,
+ )
+
+ quota_project_creds.refresh(request)
+
+ assert quota_project_creds.valid
+ assert not quota_project_creds.expired
+ # Confirm override endpoint used.
+ request_kwargs = request.call_args[1]
+ assert request_kwargs["url"] == self.IAM_ENDPOINT_OVERRIDE
+
+ def test_with_scopes(self):
+ credentials = self.make_credentials()
+ credentials._target_scopes = []
+ assert credentials.requires_scopes is True
+ credentials = credentials.with_scopes(["fake_scope1", "fake_scope2"])
+ assert credentials.requires_scopes is False
+ assert credentials._target_scopes == ["fake_scope1", "fake_scope2"]
+
+ def test_with_scopes_provide_default_scopes(self):
+ credentials = self.make_credentials()
+ credentials._target_scopes = []
+ credentials = credentials.with_scopes(
+ ["fake_scope1"], default_scopes=["fake_scope2"]
+ )
+ assert credentials._target_scopes == ["fake_scope1"]
+
+ def test_id_token_success(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience
+ )
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+ assert id_creds.expiry == datetime.datetime.utcfromtimestamp(ID_TOKEN_EXPIRY)
+
+ def test_id_token_metrics(self, mock_donor_credentials):
+ credentials = self.make_credentials(lifetime=None)
+ credentials.token = "token"
+ credentials.expiry = None
+ target_audience = "https://foo.bar"
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience
+ )
+
+ with mock.patch(
+ "google.auth.metrics.token_request_id_token_impersonate",
+ return_value=ID_TOKEN_REQUEST_METRICS_HEADER_VALUE,
+ ):
+ with mock.patch(
+ "google.auth.transport.requests.AuthorizedSession.post", autospec=True
+ ) as mock_post:
+ data = {"token": ID_TOKEN_DATA}
+ mock_post.return_value = MockResponse(data, http_client.OK)
+ id_creds.refresh(None)
+
+ assert id_creds.token == ID_TOKEN_DATA
+ assert id_creds.expiry == datetime.datetime.utcfromtimestamp(
+ ID_TOKEN_EXPIRY
+ )
+ assert (
+ mock_post.call_args.kwargs["headers"]["x-goog-api-client"]
+ == ID_TOKEN_REQUEST_METRICS_HEADER_VALUE
+ )
+
+ def test_id_token_from_credential(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ new_credentials = self.make_credentials(lifetime=None)
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience, include_email=True
+ )
+ id_creds = id_creds.from_credentials(target_credentials=new_credentials)
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+ assert id_creds._include_email is True
+ assert id_creds._target_credentials is new_credentials
+
+ def test_id_token_with_target_audience(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, include_email=True
+ )
+ id_creds = id_creds.with_target_audience(target_audience=target_audience)
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+ assert id_creds.expiry == datetime.datetime.utcfromtimestamp(ID_TOKEN_EXPIRY)
+ assert id_creds._include_email is True
+
+ def test_id_token_invalid_cred(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = None
+
+ with pytest.raises(exceptions.GoogleAuthError) as excinfo:
+ impersonated_credentials.IDTokenCredentials(credentials)
+
+ assert excinfo.match("Provided Credential must be" " impersonated_credentials")
+
+ def test_id_token_with_include_email(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience
+ )
+ id_creds = id_creds.with_include_email(True)
+ id_creds.refresh(request)
+
+ assert id_creds.token == ID_TOKEN_DATA
+
+ def test_id_token_with_quota_project(
+ self, mock_donor_credentials, mock_authorizedsession_idtoken
+ ):
+ credentials = self.make_credentials(lifetime=None)
+ token = "token"
+ target_audience = "https://foo.bar"
+
+ expire_time = (
+ _helpers.utcnow().replace(microsecond=0) + datetime.timedelta(seconds=500)
+ ).isoformat("T") + "Z"
+ response_body = {"accessToken": token, "expireTime": expire_time}
+
+ request = self.make_request(
+ data=json.dumps(response_body), status=http_client.OK
+ )
+
+ credentials.refresh(request)
+
+ assert credentials.valid
+ assert not credentials.expired
+
+ id_creds = impersonated_credentials.IDTokenCredentials(
+ credentials, target_audience=target_audience
+ )
+ id_creds = id_creds.with_quota_project("project-foo")
+ id_creds.refresh(request)
+
+ assert id_creds.quota_project_id == "project-foo"
diff --git a/contrib/python/google-auth/py3/tests/test_jwt.py b/contrib/python/google-auth/py3/tests/test_jwt.py
new file mode 100644
index 0000000000..62f310606d
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_jwt.py
@@ -0,0 +1,671 @@
+# Copyright 2014 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import datetime
+import json
+import os
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import crypt
+from google.auth import exceptions
+from google.auth import jwt
+
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "other_cert.pem"), "rb") as fh:
+ OTHER_CERT_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "es256_privatekey.pem"), "rb") as fh:
+ EC_PRIVATE_KEY_BYTES = fh.read()
+
+with open(os.path.join(DATA_DIR, "es256_public_cert.pem"), "rb") as fh:
+ EC_PUBLIC_CERT_BYTES = fh.read()
+
+SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
+
+with open(SERVICE_ACCOUNT_JSON_FILE, "rb") as fh:
+ SERVICE_ACCOUNT_INFO = json.load(fh)
+
+
+@pytest.fixture
+def signer():
+ return crypt.RSASigner.from_string(PRIVATE_KEY_BYTES, "1")
+
+
+def test_encode_basic(signer):
+ test_payload = {"test": "value"}
+ encoded = jwt.encode(signer, test_payload)
+ header, payload, _, _ = jwt._unverified_decode(encoded)
+ assert payload == test_payload
+ assert header == {"typ": "JWT", "alg": "RS256", "kid": signer.key_id}
+
+
+def test_encode_extra_headers(signer):
+ encoded = jwt.encode(signer, {}, header={"extra": "value"})
+ header = jwt.decode_header(encoded)
+ assert header == {
+ "typ": "JWT",
+ "alg": "RS256",
+ "kid": signer.key_id,
+ "extra": "value",
+ }
+
+
+def test_encode_custom_alg_in_headers(signer):
+ encoded = jwt.encode(signer, {}, header={"alg": "foo"})
+ header = jwt.decode_header(encoded)
+ assert header == {"typ": "JWT", "alg": "foo", "kid": signer.key_id}
+
+
+@pytest.fixture
+def es256_signer():
+ return crypt.ES256Signer.from_string(EC_PRIVATE_KEY_BYTES, "1")
+
+
+def test_encode_basic_es256(es256_signer):
+ test_payload = {"test": "value"}
+ encoded = jwt.encode(es256_signer, test_payload)
+ header, payload, _, _ = jwt._unverified_decode(encoded)
+ assert payload == test_payload
+ assert header == {"typ": "JWT", "alg": "ES256", "kid": es256_signer.key_id}
+
+
+@pytest.fixture
+def token_factory(signer, es256_signer):
+ def factory(claims=None, key_id=None, use_es256_signer=False):
+ now = _helpers.datetime_to_secs(_helpers.utcnow())
+ payload = {
+ "aud": "audience@example.com",
+ "iat": now,
+ "exp": now + 300,
+ "user": "billy bob",
+ "metadata": {"meta": "data"},
+ }
+ payload.update(claims or {})
+
+ # False is specified to remove the signer's key id for testing
+ # headers without key ids.
+ if key_id is False:
+ signer._key_id = None
+ key_id = None
+
+ if use_es256_signer:
+ return jwt.encode(es256_signer, payload, key_id=key_id)
+ else:
+ return jwt.encode(signer, payload, key_id=key_id)
+
+ return factory
+
+
+def test_decode_valid(token_factory):
+ payload = jwt.decode(token_factory(), certs=PUBLIC_CERT_BYTES)
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_header_object(token_factory):
+ payload = token_factory()
+ # Create a malformed JWT token with a number as a header instead of a
+ # dictionary (3 == base64d(M7==))
+ payload = b"M7." + b".".join(payload.split(b".")[1:])
+
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(payload, certs=PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Header segment should be a JSON object: " + str(b"M7"))
+
+
+def test_decode_payload_object(signer):
+ # Create a malformed JWT token with a payload containing both "iat" and
+ # "exp" strings, although not as fields of a dictionary
+ payload = jwt.encode(signer, "iatexp")
+
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(payload, certs=PUBLIC_CERT_BYTES)
+ assert excinfo.match(
+ r"Payload segment should be a JSON object: " + str(b"ImlhdGV4cCI")
+ )
+
+
+def test_decode_valid_es256(token_factory):
+ payload = jwt.decode(
+ token_factory(use_es256_signer=True), certs=EC_PUBLIC_CERT_BYTES
+ )
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_valid_with_audience(token_factory):
+ payload = jwt.decode(
+ token_factory(), certs=PUBLIC_CERT_BYTES, audience="audience@example.com"
+ )
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_valid_with_audience_list(token_factory):
+ payload = jwt.decode(
+ token_factory(),
+ certs=PUBLIC_CERT_BYTES,
+ audience=["audience@example.com", "another_audience@example.com"],
+ )
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_valid_unverified(token_factory):
+ payload = jwt.decode(token_factory(), certs=OTHER_CERT_BYTES, verify=False)
+ assert payload["aud"] == "audience@example.com"
+ assert payload["user"] == "billy bob"
+ assert payload["metadata"]["meta"] == "data"
+
+
+def test_decode_bad_token_wrong_number_of_segments():
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode("1.2", PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Wrong number of segments")
+
+
+def test_decode_bad_token_not_base64():
+ with pytest.raises((ValueError, TypeError)) as excinfo:
+ jwt.decode("1.2.3", PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Incorrect padding|more than a multiple of 4")
+
+
+def test_decode_bad_token_not_json():
+ token = b".".join([base64.urlsafe_b64encode(b"123!")] * 3)
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Can\'t parse segment")
+
+
+def test_decode_bad_token_no_iat_or_exp(signer):
+ token = jwt.encode(signer, {"test": "value"})
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert excinfo.match(r"Token does not contain required claim")
+
+
+def test_decode_bad_token_too_early(token_factory):
+ token = token_factory(
+ claims={
+ "iat": _helpers.datetime_to_secs(
+ _helpers.utcnow() + datetime.timedelta(hours=1)
+ )
+ }
+ )
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES, clock_skew_in_seconds=59)
+ assert excinfo.match(r"Token used too early")
+
+
+def test_decode_bad_token_expired(token_factory):
+ token = token_factory(
+ claims={
+ "exp": _helpers.datetime_to_secs(
+ _helpers.utcnow() - datetime.timedelta(hours=1)
+ )
+ }
+ )
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES, clock_skew_in_seconds=59)
+ assert excinfo.match(r"Token expired")
+
+
+def test_decode_success_with_no_clock_skew(token_factory):
+ token = token_factory(
+ claims={
+ "exp": _helpers.datetime_to_secs(
+ _helpers.utcnow() + datetime.timedelta(seconds=1)
+ ),
+ "iat": _helpers.datetime_to_secs(
+ _helpers.utcnow() - datetime.timedelta(seconds=1)
+ ),
+ }
+ )
+
+ jwt.decode(token, PUBLIC_CERT_BYTES)
+
+
+def test_decode_success_with_custom_clock_skew(token_factory):
+ token = token_factory(
+ claims={
+ "exp": _helpers.datetime_to_secs(
+ _helpers.utcnow() + datetime.timedelta(seconds=2)
+ ),
+ "iat": _helpers.datetime_to_secs(
+ _helpers.utcnow() - datetime.timedelta(seconds=2)
+ ),
+ }
+ )
+
+ jwt.decode(token, PUBLIC_CERT_BYTES, clock_skew_in_seconds=1)
+
+
+def test_decode_bad_token_wrong_audience(token_factory):
+ token = token_factory()
+ audience = "audience2@example.com"
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES, audience=audience)
+ assert excinfo.match(r"Token has wrong audience")
+
+
+def test_decode_bad_token_wrong_audience_list(token_factory):
+ token = token_factory()
+ audience = ["audience2@example.com", "audience3@example.com"]
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token, PUBLIC_CERT_BYTES, audience=audience)
+ assert excinfo.match(r"Token has wrong audience")
+
+
+def test_decode_wrong_cert(token_factory):
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token_factory(), OTHER_CERT_BYTES)
+ assert excinfo.match(r"Could not verify token signature")
+
+
+def test_decode_multicert_bad_cert(token_factory):
+ certs = {"1": OTHER_CERT_BYTES, "2": PUBLIC_CERT_BYTES}
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token_factory(), certs)
+ assert excinfo.match(r"Could not verify token signature")
+
+
+def test_decode_no_cert(token_factory):
+ certs = {"2": PUBLIC_CERT_BYTES}
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token_factory(), certs)
+ assert excinfo.match(r"Certificate for key id 1 not found")
+
+
+def test_decode_no_key_id(token_factory):
+ token = token_factory(key_id=False)
+ certs = {"2": PUBLIC_CERT_BYTES}
+ payload = jwt.decode(token, certs)
+ assert payload["user"] == "billy bob"
+
+
+def test_decode_unknown_alg():
+ headers = json.dumps({u"kid": u"1", u"alg": u"fakealg"})
+ token = b".".join(
+ map(lambda seg: base64.b64encode(seg.encode("utf-8")), [headers, u"{}", u"sig"])
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token)
+ assert excinfo.match(r"fakealg")
+
+
+def test_decode_missing_crytography_alg(monkeypatch):
+ monkeypatch.delitem(jwt._ALGORITHM_TO_VERIFIER_CLASS, "ES256")
+ headers = json.dumps({u"kid": u"1", u"alg": u"ES256"})
+ token = b".".join(
+ map(lambda seg: base64.b64encode(seg.encode("utf-8")), [headers, u"{}", u"sig"])
+ )
+
+ with pytest.raises(ValueError) as excinfo:
+ jwt.decode(token)
+ assert excinfo.match(r"cryptography")
+
+
+def test_roundtrip_explicit_key_id(token_factory):
+ token = token_factory(key_id="3")
+ certs = {"2": OTHER_CERT_BYTES, "3": PUBLIC_CERT_BYTES}
+ payload = jwt.decode(token, certs)
+ assert payload["user"] == "billy bob"
+
+
+class TestCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ SUBJECT = "subject"
+ AUDIENCE = "audience"
+ ADDITIONAL_CLAIMS = {"meta": "data"}
+ credentials = None
+
+ @pytest.fixture(autouse=True)
+ def credentials_fixture(self, signer):
+ self.credentials = jwt.Credentials(
+ signer,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.AUDIENCE,
+ )
+
+ def test_from_service_account_info(self):
+ with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ info = json.load(fh)
+
+ credentials = jwt.Credentials.from_service_account_info(
+ info, audience=self.AUDIENCE
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+ assert credentials._audience == self.AUDIENCE
+
+ def test_from_service_account_info_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.Credentials.from_service_account_info(
+ info,
+ subject=self.SUBJECT,
+ audience=self.AUDIENCE,
+ additional_claims=self.ADDITIONAL_CLAIMS,
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._audience == self.AUDIENCE
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE, audience=self.AUDIENCE
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+ assert credentials._audience == self.AUDIENCE
+
+ def test_from_service_account_file_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.Credentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE,
+ subject=self.SUBJECT,
+ audience=self.AUDIENCE,
+ additional_claims=self.ADDITIONAL_CLAIMS,
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._audience == self.AUDIENCE
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_signing_credentials(self):
+ jwt_from_signing = self.credentials.from_signing_credentials(
+ self.credentials, audience=mock.sentinel.new_audience
+ )
+ jwt_from_info = jwt.Credentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO, audience=mock.sentinel.new_audience
+ )
+
+ assert isinstance(jwt_from_signing, jwt.Credentials)
+ assert jwt_from_signing._signer.key_id == jwt_from_info._signer.key_id
+ assert jwt_from_signing._issuer == jwt_from_info._issuer
+ assert jwt_from_signing._subject == jwt_from_info._subject
+ assert jwt_from_signing._audience == jwt_from_info._audience
+
+ def test_default_state(self):
+ assert not self.credentials.valid
+ # Expiration hasn't been set yet
+ assert not self.credentials.expired
+
+ def test_with_claims(self):
+ new_audience = "new_audience"
+ new_credentials = self.credentials.with_claims(audience=new_audience)
+
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._audience == new_audience
+ assert new_credentials._additional_claims == self.credentials._additional_claims
+ assert new_credentials._quota_project_id == self.credentials._quota_project_id
+
+ def test__make_jwt_without_audience(self):
+ cred = jwt.Credentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO.copy(),
+ subject=self.SUBJECT,
+ audience=None,
+ additional_claims={"scope": "foo bar"},
+ )
+ token, _ = cred._make_jwt()
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["scope"] == "foo bar"
+ assert "aud" not in payload
+
+ def test_with_quota_project(self):
+ quota_project_id = "project-foo"
+
+ new_credentials = self.credentials.with_quota_project(quota_project_id)
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._audience == self.credentials._audience
+ assert new_credentials._additional_claims == self.credentials._additional_claims
+ assert new_credentials.additional_claims == self.credentials._additional_claims
+ assert new_credentials._quota_project_id == quota_project_id
+
+ def test_sign_bytes(self):
+ to_sign = b"123"
+ signature = self.credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ assert isinstance(self.credentials.signer, crypt.RSASigner)
+
+ def test_signer_email(self):
+ assert self.credentials.signer_email == SERVICE_ACCOUNT_INFO["client_email"]
+
+ def _verify_token(self, token):
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ return payload
+
+ def test_refresh(self):
+ self.credentials.refresh(None)
+ assert self.credentials.valid
+ assert not self.credentials.expired
+
+ def test_expired(self):
+ assert not self.credentials.expired
+
+ self.credentials.refresh(None)
+ assert not self.credentials.expired
+
+ with mock.patch("google.auth._helpers.utcnow") as now:
+ one_day = datetime.timedelta(days=1)
+ now.return_value = self.credentials.expiry + one_day
+ assert self.credentials.expired
+
+ def test_before_request(self):
+ headers = {}
+
+ self.credentials.refresh(None)
+ self.credentials.before_request(
+ None, "GET", "http://example.com?a=1#3", headers
+ )
+
+ header_value = headers["authorization"]
+ _, token = header_value.split(" ")
+
+ # Since the audience is set, it should use the existing token.
+ assert token.encode("utf-8") == self.credentials.token
+
+ payload = self._verify_token(token)
+ assert payload["aud"] == self.AUDIENCE
+
+ def test_before_request_refreshes(self):
+ assert not self.credentials.valid
+ self.credentials.before_request(None, "GET", "http://example.com?a=1#3", {})
+ assert self.credentials.valid
+
+
+class TestOnDemandCredentials(object):
+ SERVICE_ACCOUNT_EMAIL = "service-account@example.com"
+ SUBJECT = "subject"
+ ADDITIONAL_CLAIMS = {"meta": "data"}
+ credentials = None
+
+ @pytest.fixture(autouse=True)
+ def credentials_fixture(self, signer):
+ self.credentials = jwt.OnDemandCredentials(
+ signer,
+ self.SERVICE_ACCOUNT_EMAIL,
+ self.SERVICE_ACCOUNT_EMAIL,
+ max_cache_size=2,
+ )
+
+ def test_from_service_account_info(self):
+ with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
+ info = json.load(fh)
+
+ credentials = jwt.OnDemandCredentials.from_service_account_info(info)
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+
+ def test_from_service_account_info_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.OnDemandCredentials.from_service_account_info(
+ info, subject=self.SUBJECT, additional_claims=self.ADDITIONAL_CLAIMS
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_service_account_file(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.OnDemandCredentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == info["client_email"]
+
+ def test_from_service_account_file_args(self):
+ info = SERVICE_ACCOUNT_INFO.copy()
+
+ credentials = jwt.OnDemandCredentials.from_service_account_file(
+ SERVICE_ACCOUNT_JSON_FILE,
+ subject=self.SUBJECT,
+ additional_claims=self.ADDITIONAL_CLAIMS,
+ )
+
+ assert credentials._signer.key_id == info["private_key_id"]
+ assert credentials._issuer == info["client_email"]
+ assert credentials._subject == self.SUBJECT
+ assert credentials._additional_claims == self.ADDITIONAL_CLAIMS
+
+ def test_from_signing_credentials(self):
+ jwt_from_signing = self.credentials.from_signing_credentials(self.credentials)
+ jwt_from_info = jwt.OnDemandCredentials.from_service_account_info(
+ SERVICE_ACCOUNT_INFO
+ )
+
+ assert isinstance(jwt_from_signing, jwt.OnDemandCredentials)
+ assert jwt_from_signing._signer.key_id == jwt_from_info._signer.key_id
+ assert jwt_from_signing._issuer == jwt_from_info._issuer
+ assert jwt_from_signing._subject == jwt_from_info._subject
+
+ def test_default_state(self):
+ # Credentials are *always* valid.
+ assert self.credentials.valid
+ # Credentials *never* expire.
+ assert not self.credentials.expired
+
+ def test_with_claims(self):
+ new_claims = {"meep": "moop"}
+ new_credentials = self.credentials.with_claims(additional_claims=new_claims)
+
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._additional_claims == new_claims
+
+ def test_with_quota_project(self):
+ quota_project_id = "project-foo"
+ new_credentials = self.credentials.with_quota_project(quota_project_id)
+
+ assert new_credentials._signer == self.credentials._signer
+ assert new_credentials._issuer == self.credentials._issuer
+ assert new_credentials._subject == self.credentials._subject
+ assert new_credentials._additional_claims == self.credentials._additional_claims
+ assert new_credentials._quota_project_id == quota_project_id
+
+ def test_sign_bytes(self):
+ to_sign = b"123"
+ signature = self.credentials.sign_bytes(to_sign)
+ assert crypt.verify_signature(to_sign, signature, PUBLIC_CERT_BYTES)
+
+ def test_signer(self):
+ assert isinstance(self.credentials.signer, crypt.RSASigner)
+
+ def test_signer_email(self):
+ assert self.credentials.signer_email == SERVICE_ACCOUNT_INFO["client_email"]
+
+ def _verify_token(self, token):
+ payload = jwt.decode(token, PUBLIC_CERT_BYTES)
+ assert payload["iss"] == self.SERVICE_ACCOUNT_EMAIL
+ return payload
+
+ def test_refresh(self):
+ with pytest.raises(exceptions.RefreshError):
+ self.credentials.refresh(None)
+
+ def test_before_request(self):
+ headers = {}
+
+ self.credentials.before_request(
+ None, "GET", "http://example.com?a=1#3", headers
+ )
+
+ _, token = headers["authorization"].split(" ")
+ payload = self._verify_token(token)
+
+ assert payload["aud"] == "http://example.com"
+
+ # Making another request should re-use the same token.
+ self.credentials.before_request(None, "GET", "http://example.com?b=2", headers)
+
+ _, new_token = headers["authorization"].split(" ")
+
+ assert new_token == token
+
+ def test_expired_token(self):
+ self.credentials._cache["audience"] = (
+ mock.sentinel.token,
+ datetime.datetime.min,
+ )
+
+ token = self.credentials._get_jwt_for_audience("audience")
+
+ assert token != mock.sentinel.token
diff --git a/contrib/python/google-auth/py3/tests/test_metrics.py b/contrib/python/google-auth/py3/tests/test_metrics.py
new file mode 100644
index 0000000000..ba93892674
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_metrics.py
@@ -0,0 +1,96 @@
+# Copyright 2014 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import platform
+
+import mock
+
+from google.auth import metrics
+from google.auth import version
+
+
+def test_add_metric_header():
+ headers = {}
+ metrics.add_metric_header(headers, None)
+ assert headers == {}
+
+ headers = {"x-goog-api-client": "foo"}
+ metrics.add_metric_header(headers, "bar")
+ assert headers == {"x-goog-api-client": "foo bar"}
+
+ headers = {}
+ metrics.add_metric_header(headers, "bar")
+ assert headers == {"x-goog-api-client": "bar"}
+
+
+@mock.patch.object(platform, "python_version", return_value="3.7")
+def test_versions(mock_python_version):
+ version_save = version.__version__
+ version.__version__ = "1.1"
+ assert metrics.python_and_auth_lib_version() == "gl-python/3.7 auth/1.1"
+ version.__version__ = version_save
+
+
+@mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value="gl-python/3.7 auth/1.1",
+)
+def test_metric_values(mock_python_and_auth_lib_version):
+ assert (
+ metrics.token_request_access_token_mds()
+ == "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/mds"
+ )
+ assert (
+ metrics.token_request_id_token_mds()
+ == "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/mds"
+ )
+ assert (
+ metrics.token_request_access_token_impersonate()
+ == "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
+ )
+ assert (
+ metrics.token_request_id_token_impersonate()
+ == "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/imp"
+ )
+ assert (
+ metrics.token_request_access_token_sa_assertion()
+ == "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/sa"
+ )
+ assert (
+ metrics.token_request_id_token_sa_assertion()
+ == "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/sa"
+ )
+ assert metrics.token_request_user() == "gl-python/3.7 auth/1.1 cred-type/u"
+ assert metrics.mds_ping() == "gl-python/3.7 auth/1.1 auth-request-type/mds"
+ assert metrics.reauth_start() == "gl-python/3.7 auth/1.1 auth-request-type/re-start"
+ assert (
+ metrics.reauth_continue() == "gl-python/3.7 auth/1.1 auth-request-type/re-cont"
+ )
+
+
+@mock.patch(
+ "google.auth.metrics.python_and_auth_lib_version",
+ return_value="gl-python/3.7 auth/1.1",
+)
+def test_byoid_metric_header(mock_python_and_auth_lib_version):
+ metrics_options = {}
+ assert (
+ metrics.byoid_metrics_header(metrics_options)
+ == "gl-python/3.7 auth/1.1 google-byoid-sdk"
+ )
+ metrics_options["testKey"] = "testValue"
+ assert (
+ metrics.byoid_metrics_header(metrics_options)
+ == "gl-python/3.7 auth/1.1 google-byoid-sdk testKey/testValue"
+ )
diff --git a/contrib/python/google-auth/py3/tests/test_packaging.py b/contrib/python/google-auth/py3/tests/test_packaging.py
new file mode 100644
index 0000000000..e87b3a21b9
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_packaging.py
@@ -0,0 +1,30 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import sys
+
+
+def test_namespace_package_compat(tmp_path):
+ """
+ The ``google`` namespace package should not be masked
+ by the presence of ``google-auth``.
+ """
+ google = tmp_path / "google"
+ google.mkdir()
+ google.joinpath("othermod.py").write_text("")
+ env = dict(os.environ, PYTHONPATH=str(tmp_path))
+ cmd = [sys.executable, "-m", "google.othermod"]
+ subprocess.check_call(cmd, env=env)
diff --git a/contrib/python/google-auth/py3/tests/test_pluggable.py b/contrib/python/google-auth/py3/tests/test_pluggable.py
new file mode 100644
index 0000000000..783bbcaec0
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/test_pluggable.py
@@ -0,0 +1,1250 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import subprocess
+
+import mock
+import pytest # type: ignore
+
+from google.auth import exceptions
+from google.auth import pluggable
+from .test__default import WORKFORCE_AUDIENCE
+
+CLIENT_ID = "username"
+CLIENT_SECRET = "password"
+# Base64 encoding of "username:password".
+BASIC_AUTH_ENCODING = "dXNlcm5hbWU6cGFzc3dvcmQ="
+SERVICE_ACCOUNT_EMAIL = "service-1234@service-name.iam.gserviceaccount.com"
+SERVICE_ACCOUNT_IMPERSONATION_URL_BASE = (
+ "https://us-east1-iamcredentials.googleapis.com"
+)
+SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE = "/v1/projects/-/serviceAccounts/{}:generateAccessToken".format(
+ SERVICE_ACCOUNT_EMAIL
+)
+SERVICE_ACCOUNT_IMPERSONATION_URL = (
+ SERVICE_ACCOUNT_IMPERSONATION_URL_BASE + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+)
+QUOTA_PROJECT_ID = "QUOTA_PROJECT_ID"
+SCOPES = ["scope1", "scope2"]
+SUBJECT_TOKEN_FIELD_NAME = "access_token"
+
+TOKEN_URL = "https://sts.googleapis.com/v1/token"
+TOKEN_INFO_URL = "https://sts.googleapis.com/v1/introspect"
+SUBJECT_TOKEN_TYPE = "urn:ietf:params:oauth:token-type:jwt"
+AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
+DEFAULT_UNIVERSE_DOMAIN = "googleapis.com"
+
+VALID_TOKEN_URLS = [
+ "https://sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.com",
+ "https://US-EAST-1.sts.googleapis.com",
+ "https://sts.us-east-1.googleapis.com",
+ "https://sts.US-WEST-1.googleapis.com",
+ "https://us-east-1-sts.googleapis.com",
+ "https://US-WEST-1-sts.googleapis.com",
+ "https://us-west-1-sts.googleapis.com/path?query",
+ "https://sts-us-east-1.p.googleapis.com",
+]
+INVALID_TOKEN_URLS = [
+ "https://iamcredentials.googleapis.com",
+ "sts.googleapis.com",
+ "https://",
+ "http://sts.googleapis.com",
+ "https://st.s.googleapis.com",
+ "https://us-eas\t-1.sts.googleapis.com",
+ "https:/us-east-1.sts.googleapis.com",
+ "https://US-WE/ST-1-sts.googleapis.com",
+ "https://sts-us-east-1.googleapis.com",
+ "https://sts-US-WEST-1.googleapis.com",
+ "testhttps://us-east-1.sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.comevil.com",
+ "https://us-east-1.us-east-1.sts.googleapis.com",
+ "https://us-ea.s.t.sts.googleapis.com",
+ "https://sts.googleapis.comevil.com",
+ "hhttps://us-east-1.sts.googleapis.com",
+ "https://us- -1.sts.googleapis.com",
+ "https://-sts.googleapis.com",
+ "https://us-east-1.sts.googleapis.com.evil.com",
+ "https://sts.pgoogleapis.com",
+ "https://p.googleapis.com",
+ "https://sts.p.com",
+ "http://sts.p.googleapis.com",
+ "https://xyz-sts.p.googleapis.com",
+ "https://sts-xyz.123.p.googleapis.com",
+ "https://sts-xyz.p1.googleapis.com",
+ "https://sts-xyz.p.foo.com",
+ "https://sts-xyz.p.foo.googleapis.com",
+]
+VALID_SERVICE_ACCOUNT_IMPERSONATION_URLS = [
+ "https://iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.com",
+ "https://US-EAST-1.iamcredentials.googleapis.com",
+ "https://iamcredentials.us-east-1.googleapis.com",
+ "https://iamcredentials.US-WEST-1.googleapis.com",
+ "https://us-east-1-iamcredentials.googleapis.com",
+ "https://US-WEST-1-iamcredentials.googleapis.com",
+ "https://us-west-1-iamcredentials.googleapis.com/path?query",
+ "https://iamcredentials-us-east-1.p.googleapis.com",
+]
+INVALID_SERVICE_ACCOUNT_IMPERSONATION_URLS = [
+ "https://sts.googleapis.com",
+ "iamcredentials.googleapis.com",
+ "https://",
+ "http://iamcredentials.googleapis.com",
+ "https://iamcre.dentials.googleapis.com",
+ "https://us-eas\t-1.iamcredentials.googleapis.com",
+ "https:/us-east-1.iamcredentials.googleapis.com",
+ "https://US-WE/ST-1-iamcredentials.googleapis.com",
+ "https://iamcredentials-us-east-1.googleapis.com",
+ "https://iamcredentials-US-WEST-1.googleapis.com",
+ "testhttps://us-east-1.iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.comevil.com",
+ "https://us-east-1.us-east-1.iamcredentials.googleapis.com",
+ "https://us-ea.s.t.iamcredentials.googleapis.com",
+ "https://iamcredentials.googleapis.comevil.com",
+ "hhttps://us-east-1.iamcredentials.googleapis.com",
+ "https://us- -1.iamcredentials.googleapis.com",
+ "https://-iamcredentials.googleapis.com",
+ "https://us-east-1.iamcredentials.googleapis.com.evil.com",
+ "https://iamcredentials.pgoogleapis.com",
+ "https://p.googleapis.com",
+ "https://iamcredentials.p.com",
+ "http://iamcredentials.p.googleapis.com",
+ "https://xyz-iamcredentials.p.googleapis.com",
+ "https://iamcredentials-xyz.123.p.googleapis.com",
+ "https://iamcredentials-xyz.p1.googleapis.com",
+ "https://iamcredentials-xyz.p.foo.com",
+ "https://iamcredentials-xyz.p.foo.googleapis.com",
+]
+
+
+class TestCredentials(object):
+ CREDENTIAL_SOURCE_EXECUTABLE_COMMAND = (
+ "/fake/external/excutable --arg1=value1 --arg2=value2"
+ )
+ CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE = "fake_output_file"
+ CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": CREDENTIAL_SOURCE_EXECUTABLE_COMMAND,
+ "timeout_millis": 30000,
+ "interactive_timeout_millis": 300000,
+ "output_file": CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ CREDENTIAL_SOURCE = {"executable": CREDENTIAL_SOURCE_EXECUTABLE}
+ EXECUTABLE_OIDC_TOKEN = "FAKE_ID_TOKEN"
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_ID_TOKEN = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+ EXECUTABLE_SUCCESSFUL_OIDC_NO_EXPIRATION_TIME_RESPONSE_ID_TOKEN = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": EXECUTABLE_OIDC_TOKEN,
+ }
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_JWT = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:jwt",
+ "id_token": EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+ EXECUTABLE_SUCCESSFUL_OIDC_NO_EXPIRATION_TIME_RESPONSE_JWT = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:jwt",
+ "id_token": EXECUTABLE_OIDC_TOKEN,
+ }
+ EXECUTABLE_SAML_TOKEN = "FAKE_SAML_RESPONSE"
+ EXECUTABLE_SUCCESSFUL_SAML_RESPONSE = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:saml2",
+ "saml_response": EXECUTABLE_SAML_TOKEN,
+ "expiration_time": 9999999999,
+ }
+ EXECUTABLE_SUCCESSFUL_SAML_NO_EXPIRATION_TIME_RESPONSE = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:saml2",
+ "saml_response": EXECUTABLE_SAML_TOKEN,
+ }
+ EXECUTABLE_FAILED_RESPONSE = {
+ "version": 1,
+ "success": False,
+ "code": "401",
+ "message": "Permission denied. Caller not authorized",
+ }
+ CREDENTIAL_URL = "http://fakeurl.com"
+
+ @classmethod
+ def make_pluggable(
+ cls,
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ client_id=None,
+ client_secret=None,
+ quota_project_id=None,
+ scopes=None,
+ default_scopes=None,
+ service_account_impersonation_url=None,
+ credential_source=None,
+ workforce_pool_user_project=None,
+ interactive=None,
+ ):
+ return pluggable.Credentials(
+ audience=audience,
+ subject_token_type=subject_token_type,
+ token_url=token_url,
+ token_info_url=token_info_url,
+ service_account_impersonation_url=service_account_impersonation_url,
+ credential_source=credential_source,
+ client_id=client_id,
+ client_secret=client_secret,
+ quota_project_id=quota_project_id,
+ scopes=scopes,
+ default_scopes=default_scopes,
+ workforce_pool_user_project=workforce_pool_user_project,
+ interactive=interactive,
+ )
+
+ def test_from_constructor_and_injection(self):
+ credentials = pluggable.Credentials(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ interactive=True,
+ )
+ setattr(credentials, "_tokeninfo_username", "mock_external_account_id")
+
+ assert isinstance(credentials, pluggable.Credentials)
+ assert credentials.interactive
+ assert credentials.external_account_id == "mock_external_account_id"
+
+ @mock.patch.object(pluggable.Credentials, "__init__", return_value=None)
+ def test_from_info_full_options(self, mock_init):
+ credentials = pluggable.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "service_account_impersonation": {"token_lifetime_seconds": 2800},
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ )
+
+ # Confirm pluggable.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, pluggable.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(pluggable.Credentials, "__init__", return_value=None)
+ def test_from_info_required_options_only(self, mock_init):
+ credentials = pluggable.Credentials.from_info(
+ {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ )
+
+ # Confirm pluggable.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, pluggable.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=None,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(pluggable.Credentials, "__init__", return_value=None)
+ def test_from_file_full_options(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "service_account_impersonation": {"token_lifetime_seconds": 2800},
+ "client_id": CLIENT_ID,
+ "client_secret": CLIENT_SECRET,
+ "quota_project_id": QUOTA_PROJECT_ID,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = pluggable.Credentials.from_file(str(config_file))
+
+ # Confirm pluggable.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, pluggable.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=TOKEN_INFO_URL,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ service_account_impersonation_options={"token_lifetime_seconds": 2800},
+ client_id=CLIENT_ID,
+ client_secret=CLIENT_SECRET,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=QUOTA_PROJECT_ID,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ @mock.patch.object(pluggable.Credentials, "__init__", return_value=None)
+ def test_from_file_required_options_only(self, mock_init, tmpdir):
+ info = {
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ }
+ config_file = tmpdir.join("config.json")
+ config_file.write(json.dumps(info))
+ credentials = pluggable.Credentials.from_file(str(config_file))
+
+ # Confirm pluggable.Credentials instantiated with expected attributes.
+ assert isinstance(credentials, pluggable.Credentials)
+ mock_init.assert_called_once_with(
+ audience=AUDIENCE,
+ subject_token_type=SUBJECT_TOKEN_TYPE,
+ token_url=TOKEN_URL,
+ token_info_url=None,
+ service_account_impersonation_url=None,
+ service_account_impersonation_options={},
+ client_id=None,
+ client_secret=None,
+ credential_source=self.CREDENTIAL_SOURCE,
+ quota_project_id=None,
+ workforce_pool_user_project=None,
+ universe_domain=DEFAULT_UNIVERSE_DOMAIN,
+ )
+
+ def test_constructor_invalid_options(self):
+ credential_source = {"unsupported": "value"}
+
+ with pytest.raises(ValueError) as excinfo:
+ self.make_pluggable(credential_source=credential_source)
+
+ assert excinfo.match(r"Missing credential_source")
+
+ def test_constructor_invalid_credential_source(self):
+ with pytest.raises(ValueError) as excinfo:
+ self.make_pluggable(credential_source="non-dict")
+
+ assert excinfo.match(r"Missing credential_source")
+
+ def test_info_with_credential_source(self):
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE.copy()
+ )
+
+ assert credentials.info == {
+ "type": "external_account",
+ "audience": AUDIENCE,
+ "subject_token_type": SUBJECT_TOKEN_TYPE,
+ "token_url": TOKEN_URL,
+ "token_info_url": TOKEN_INFO_URL,
+ "credential_source": self.CREDENTIAL_SOURCE,
+ "universe_domain": DEFAULT_UNIVERSE_DOMAIN,
+ }
+
+ def test_token_info_url(self):
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE.copy()
+ )
+
+ assert credentials.token_info_url == TOKEN_INFO_URL
+
+ def test_token_info_url_custom(self):
+ for url in VALID_TOKEN_URLS:
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE.copy(),
+ token_info_url=(url + "/introspect"),
+ )
+
+ assert credentials.token_info_url == url + "/introspect"
+
+ def test_token_info_url_negative(self):
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE.copy(), token_info_url=None
+ )
+
+ assert not credentials.token_info_url
+
+ def test_token_url_custom(self):
+ for url in VALID_TOKEN_URLS:
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE.copy(),
+ token_url=(url + "/token"),
+ )
+
+ assert credentials._token_url == (url + "/token")
+
+ def test_service_account_impersonation_url_custom(self):
+ for url in VALID_SERVICE_ACCOUNT_IMPERSONATION_URLS:
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE.copy(),
+ service_account_impersonation_url=(
+ url + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+ ),
+ )
+
+ assert credentials._service_account_impersonation_url == (
+ url + SERVICE_ACCOUNT_IMPERSONATION_URL_ROUTE
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_successfully(self, tmpdir):
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE = tmpdir.join(
+ "actual_output_file"
+ )
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": "command",
+ "interactive_timeout_millis": 300000,
+ "output_file": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ ACTUAL_CREDENTIAL_SOURCE = {"executable": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE}
+
+ testData = {
+ "subject_token_oidc_id_token": {
+ "stdout": json.dumps(
+ self.EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_ID_TOKEN
+ ).encode("UTF-8"),
+ "impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "file_content": self.EXECUTABLE_SUCCESSFUL_OIDC_NO_EXPIRATION_TIME_RESPONSE_ID_TOKEN,
+ "expect_token": self.EXECUTABLE_OIDC_TOKEN,
+ },
+ "subject_token_oidc_id_token_interacitve_mode": {
+ "audience": WORKFORCE_AUDIENCE,
+ "file_content": self.EXECUTABLE_SUCCESSFUL_OIDC_NO_EXPIRATION_TIME_RESPONSE_ID_TOKEN,
+ "interactive": True,
+ "expect_token": self.EXECUTABLE_OIDC_TOKEN,
+ },
+ "subject_token_oidc_jwt": {
+ "stdout": json.dumps(
+ self.EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_JWT
+ ).encode("UTF-8"),
+ "impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "file_content": self.EXECUTABLE_SUCCESSFUL_OIDC_NO_EXPIRATION_TIME_RESPONSE_JWT,
+ "expect_token": self.EXECUTABLE_OIDC_TOKEN,
+ },
+ "subject_token_oidc_jwt_interactive_mode": {
+ "audience": WORKFORCE_AUDIENCE,
+ "file_content": self.EXECUTABLE_SUCCESSFUL_OIDC_NO_EXPIRATION_TIME_RESPONSE_JWT,
+ "interactive": True,
+ "expect_token": self.EXECUTABLE_OIDC_TOKEN,
+ },
+ "subject_token_saml": {
+ "stdout": json.dumps(self.EXECUTABLE_SUCCESSFUL_SAML_RESPONSE).encode(
+ "UTF-8"
+ ),
+ "impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
+ "file_content": self.EXECUTABLE_SUCCESSFUL_SAML_NO_EXPIRATION_TIME_RESPONSE,
+ "expect_token": self.EXECUTABLE_SAML_TOKEN,
+ },
+ "subject_token_saml_interactive_mode": {
+ "audience": WORKFORCE_AUDIENCE,
+ "file_content": self.EXECUTABLE_SUCCESSFUL_SAML_NO_EXPIRATION_TIME_RESPONSE,
+ "interactive": True,
+ "expect_token": self.EXECUTABLE_SAML_TOKEN,
+ },
+ }
+
+ for data in testData.values():
+ with open(
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE, "w"
+ ) as output_file:
+ json.dump(data.get("file_content"), output_file)
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[], stdout=data.get("stdout"), returncode=0
+ ),
+ ):
+ credentials = self.make_pluggable(
+ audience=data.get("audience", AUDIENCE),
+ service_account_impersonation_url=data.get("impersonation_url"),
+ credential_source=ACTUAL_CREDENTIAL_SOURCE,
+ interactive=data.get("interactive", False),
+ )
+ subject_token = credentials.retrieve_subject_token(None)
+ assert subject_token == data.get("expect_token")
+ os.remove(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE)
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_saml(self):
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(self.EXECUTABLE_SUCCESSFUL_SAML_RESPONSE).encode(
+ "UTF-8"
+ ),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.EXECUTABLE_SAML_TOKEN
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_saml_interactive_mode(self, tmpdir):
+
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE = tmpdir.join(
+ "actual_output_file"
+ )
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": "command",
+ "interactive_timeout_millis": 300000,
+ "output_file": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ ACTUAL_CREDENTIAL_SOURCE = {"executable": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE}
+ with open(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE, "w") as output_file:
+ json.dump(
+ self.EXECUTABLE_SUCCESSFUL_SAML_NO_EXPIRATION_TIME_RESPONSE, output_file
+ )
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(args=[], returncode=0),
+ ):
+ credentials = self.make_pluggable(
+ audience=WORKFORCE_AUDIENCE,
+ credential_source=ACTUAL_CREDENTIAL_SOURCE,
+ interactive=True,
+ )
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.EXECUTABLE_SAML_TOKEN
+ os.remove(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE)
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_failed(self):
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(self.EXECUTABLE_FAILED_RESPONSE).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"Executable returned unsuccessful response: code: 401, message: Permission denied. Caller not authorized."
+ )
+
+ @mock.patch.dict(
+ os.environ,
+ {
+ "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1",
+ "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE": "1",
+ },
+ )
+ def test_retrieve_subject_token_failed_interactive_mode(self, tmpdir):
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE = tmpdir.join(
+ "actual_output_file"
+ )
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": "command",
+ "interactive_timeout_millis": 300000,
+ "output_file": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ ACTUAL_CREDENTIAL_SOURCE = {"executable": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE}
+ with open(
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE, "w", encoding="utf-8"
+ ) as output_file:
+ json.dump(self.EXECUTABLE_FAILED_RESPONSE, output_file)
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(args=[], returncode=0),
+ ):
+ credentials = self.make_pluggable(
+ audience=WORKFORCE_AUDIENCE,
+ credential_source=ACTUAL_CREDENTIAL_SOURCE,
+ interactive=True,
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"Executable returned unsuccessful response: code: 401, message: Permission denied. Caller not authorized."
+ )
+ os.remove(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE)
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "0"})
+ def test_retrieve_subject_token_not_allowd(self):
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(
+ self.EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_ID_TOKEN
+ ).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Executables need to be explicitly allowed")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_invalid_version(self):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_VERSION_2 = {
+ "version": 2,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_VERSION_2).encode(
+ "UTF-8"
+ ),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Executable returned unsupported version.")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_expired_token(self):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_EXPIRED = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 0,
+ }
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_EXPIRED).encode(
+ "UTF-8"
+ ),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"The token returned by the executable is expired.")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_file_cache(self, tmpdir):
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE = tmpdir.join(
+ "actual_output_file"
+ )
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": "command",
+ "timeout_millis": 30000,
+ "output_file": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ ACTUAL_CREDENTIAL_SOURCE = {"executable": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE}
+ with open(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE, "w") as output_file:
+ json.dump(self.EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_ID_TOKEN, output_file)
+
+ credentials = self.make_pluggable(credential_source=ACTUAL_CREDENTIAL_SOURCE)
+
+ subject_token = credentials.retrieve_subject_token(None)
+ assert subject_token == self.EXECUTABLE_OIDC_TOKEN
+
+ os.remove(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE)
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_no_file_cache(self):
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": "command",
+ "timeout_millis": 30000,
+ }
+ ACTUAL_CREDENTIAL_SOURCE = {"executable": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE}
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(
+ self.EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_ID_TOKEN
+ ).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(
+ credential_source=ACTUAL_CREDENTIAL_SOURCE
+ )
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.EXECUTABLE_OIDC_TOKEN
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_file_cache_value_error_report(self, tmpdir):
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE = tmpdir.join(
+ "actual_output_file"
+ )
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": "command",
+ "timeout_millis": 30000,
+ "output_file": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ ACTUAL_CREDENTIAL_SOURCE = {"executable": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE}
+ ACTUAL_EXECUTABLE_RESPONSE = {
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+ with open(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE, "w") as output_file:
+ json.dump(ACTUAL_EXECUTABLE_RESPONSE, output_file)
+
+ credentials = self.make_pluggable(credential_source=ACTUAL_CREDENTIAL_SOURCE)
+
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"The executable response is missing the version field.")
+
+ os.remove(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE)
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_file_cache_refresh_error_retry(self, tmpdir):
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE = tmpdir.join(
+ "actual_output_file"
+ )
+ ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": "command",
+ "timeout_millis": 30000,
+ "output_file": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ ACTUAL_CREDENTIAL_SOURCE = {"executable": ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE}
+ ACTUAL_EXECUTABLE_RESPONSE = {
+ "version": 2,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+ with open(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE, "w") as output_file:
+ json.dump(ACTUAL_EXECUTABLE_RESPONSE, output_file)
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(
+ self.EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE_ID_TOKEN
+ ).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(
+ credential_source=ACTUAL_CREDENTIAL_SOURCE
+ )
+
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.EXECUTABLE_OIDC_TOKEN
+
+ os.remove(ACTUAL_CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE)
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_unsupported_token_type(self):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE = {
+ "version": 1,
+ "success": True,
+ "token_type": "unsupported_token_type",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Executable returned unsupported token type.")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_missing_version(self):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE = {
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"The executable response is missing the version field."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_missing_success(self):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE = {
+ "version": 1,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"The executable response is missing the success field."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_missing_error_code_message(self):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE = {"version": 1, "success": False}
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"Error code and message fields are required in the response."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_without_expiration_time_should_pass_when_output_file_not_specified(
+ self,
+ ):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE = {
+ "version": 1,
+ "success": True,
+ "token_type": "urn:ietf:params:oauth:token-type:id_token",
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ }
+
+ CREDENTIAL_SOURCE = {
+ "executable": {"command": "command", "timeout_millis": 30000}
+ }
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=CREDENTIAL_SOURCE)
+ subject_token = credentials.retrieve_subject_token(None)
+
+ assert subject_token == self.EXECUTABLE_OIDC_TOKEN
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_missing_token_type(self):
+ EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE = {
+ "version": 1,
+ "success": True,
+ "id_token": self.EXECUTABLE_OIDC_TOKEN,
+ "expiration_time": 9999999999,
+ }
+
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(EXECUTABLE_SUCCESSFUL_OIDC_RESPONSE).encode("UTF-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"The executable response is missing the token_type field."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_credential_source_missing_command(self):
+ with pytest.raises(ValueError) as excinfo:
+ CREDENTIAL_SOURCE = {
+ "executable": {
+ "timeout_millis": 30000,
+ "output_file": self.CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ }
+ _ = self.make_pluggable(credential_source=CREDENTIAL_SOURCE)
+
+ assert excinfo.match(
+ r"Missing command field. Executable command must be provided."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_credential_source_missing_output_interactive_mode(self):
+ CREDENTIAL_SOURCE = {
+ "executable": {"command": self.CREDENTIAL_SOURCE_EXECUTABLE_COMMAND}
+ }
+ credentials = self.make_pluggable(
+ credential_source=CREDENTIAL_SOURCE, interactive=True
+ )
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"An output_file must be specified in the credential configuration for interactive mode."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_credential_source_timeout_missing_will_use_default_timeout_value(self):
+ CREDENTIAL_SOURCE = {
+ "executable": {
+ "command": self.CREDENTIAL_SOURCE_EXECUTABLE_COMMAND,
+ "output_file": self.CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ }
+ credentials = self.make_pluggable(credential_source=CREDENTIAL_SOURCE)
+
+ assert (
+ credentials._credential_source_executable_timeout_millis
+ == pluggable.EXECUTABLE_TIMEOUT_MILLIS_DEFAULT
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_credential_source_timeout_small(self):
+ with pytest.raises(ValueError) as excinfo:
+ CREDENTIAL_SOURCE = {
+ "executable": {
+ "command": self.CREDENTIAL_SOURCE_EXECUTABLE_COMMAND,
+ "timeout_millis": 5000 - 1,
+ "output_file": self.CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ }
+ _ = self.make_pluggable(credential_source=CREDENTIAL_SOURCE)
+
+ assert excinfo.match(r"Timeout must be between 5 and 120 seconds.")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_credential_source_timeout_large(self):
+ with pytest.raises(ValueError) as excinfo:
+ CREDENTIAL_SOURCE = {
+ "executable": {
+ "command": self.CREDENTIAL_SOURCE_EXECUTABLE_COMMAND,
+ "timeout_millis": 120000 + 1,
+ "output_file": self.CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ }
+ _ = self.make_pluggable(credential_source=CREDENTIAL_SOURCE)
+
+ assert excinfo.match(r"Timeout must be between 5 and 120 seconds.")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_credential_source_interactive_timeout_small(self):
+ with pytest.raises(ValueError) as excinfo:
+ CREDENTIAL_SOURCE = {
+ "executable": {
+ "command": self.CREDENTIAL_SOURCE_EXECUTABLE_COMMAND,
+ "interactive_timeout_millis": 30000 - 1,
+ "output_file": self.CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ }
+ _ = self.make_pluggable(credential_source=CREDENTIAL_SOURCE)
+
+ assert excinfo.match(
+ r"Interactive timeout must be between 30 seconds and 30 minutes."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_credential_source_interactive_timeout_large(self):
+ with pytest.raises(ValueError) as excinfo:
+ CREDENTIAL_SOURCE = {
+ "executable": {
+ "command": self.CREDENTIAL_SOURCE_EXECUTABLE_COMMAND,
+ "interactive_timeout_millis": 1800000 + 1,
+ "output_file": self.CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ }
+ _ = self.make_pluggable(credential_source=CREDENTIAL_SOURCE)
+
+ assert excinfo.match(
+ r"Interactive timeout must be between 30 seconds and 30 minutes."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_executable_fail(self):
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[], stdout=None, returncode=1
+ ),
+ ):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"Executable exited with non-zero return code 1. Error: None"
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_non_workforce_fail_interactive_mode(self):
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE, interactive=True
+ )
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Interactive mode is only enabled for workforce pool.")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_fail_on_validation_missing_interactive_timeout(
+ self
+ ):
+ CREDENTIAL_SOURCE_EXECUTABLE = {
+ "command": self.CREDENTIAL_SOURCE_EXECUTABLE_COMMAND,
+ "output_file": self.CREDENTIAL_SOURCE_EXECUTABLE_OUTPUT_FILE,
+ }
+ CREDENTIAL_SOURCE = {"executable": CREDENTIAL_SOURCE_EXECUTABLE}
+ credentials = self.make_pluggable(
+ credential_source=CREDENTIAL_SOURCE, interactive=True
+ )
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"Interactive mode cannot run without an interactive timeout."
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_executable_fail_interactive_mode(self):
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[], stdout=None, returncode=1
+ ),
+ ):
+ credentials = self.make_pluggable(
+ audience=WORKFORCE_AUDIENCE,
+ credential_source=self.CREDENTIAL_SOURCE,
+ interactive=True,
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(
+ r"Executable exited with non-zero return code 1. Error: None"
+ )
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "0"})
+ def test_revoke_failed_executable_not_allowed(self):
+ credentials = self.make_pluggable(
+ credential_source=self.CREDENTIAL_SOURCE, interactive=True
+ )
+ with pytest.raises(ValueError) as excinfo:
+ _ = credentials.revoke(None)
+
+ assert excinfo.match(r"Executables need to be explicitly allowed")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_revoke_failed(self):
+ testData = {
+ "non_interactive_mode": {
+ "interactive": False,
+ "expectErrType": ValueError,
+ "expectErrPattern": r"Revoke is only enabled under interactive mode.",
+ },
+ "executable_failed": {
+ "returncode": 1,
+ "expectErrType": exceptions.RefreshError,
+ "expectErrPattern": r"Auth revoke failed on executable.",
+ },
+ "response_validation_missing_version": {
+ "response": {},
+ "expectErrType": ValueError,
+ "expectErrPattern": r"The executable response is missing the version field.",
+ },
+ "response_validation_invalid_version": {
+ "response": {"version": 2},
+ "expectErrType": exceptions.RefreshError,
+ "expectErrPattern": r"Executable returned unsupported version.",
+ },
+ "response_validation_missing_success": {
+ "response": {"version": 1},
+ "expectErrType": ValueError,
+ "expectErrPattern": r"The executable response is missing the success field.",
+ },
+ "response_validation_failed_with_success_field_is_false": {
+ "response": {"version": 1, "success": False},
+ "expectErrType": exceptions.RefreshError,
+ "expectErrPattern": r"Revoke failed with unsuccessful response.",
+ },
+ }
+ for data in testData.values():
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(data.get("response")).encode("UTF-8"),
+ returncode=data.get("returncode", 0),
+ ),
+ ):
+ credentials = self.make_pluggable(
+ audience=WORKFORCE_AUDIENCE,
+ service_account_impersonation_url=SERVICE_ACCOUNT_IMPERSONATION_URL,
+ credential_source=self.CREDENTIAL_SOURCE,
+ interactive=data.get("interactive", True),
+ )
+
+ with pytest.raises(data.get("expectErrType")) as excinfo:
+ _ = credentials.revoke(None)
+
+ assert excinfo.match(data.get("expectErrPattern"))
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_revoke_successfully(self):
+ ACTUAL_RESPONSE = {"version": 1, "success": True}
+ with mock.patch(
+ "subprocess.run",
+ return_value=subprocess.CompletedProcess(
+ args=[],
+ stdout=json.dumps(ACTUAL_RESPONSE).encode("utf-8"),
+ returncode=0,
+ ),
+ ):
+ credentials = self.make_pluggable(
+ audience=WORKFORCE_AUDIENCE,
+ credential_source=self.CREDENTIAL_SOURCE,
+ interactive=True,
+ )
+ _ = credentials.revoke(None)
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_retrieve_subject_token_python_2(self):
+ with mock.patch("sys.version_info", (2, 7)):
+ credentials = self.make_pluggable(credential_source=self.CREDENTIAL_SOURCE)
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.retrieve_subject_token(None)
+
+ assert excinfo.match(r"Pluggable auth is only supported for python 3.7+")
+
+ @mock.patch.dict(os.environ, {"GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES": "1"})
+ def test_revoke_subject_token_python_2(self):
+ with mock.patch("sys.version_info", (2, 7)):
+ credentials = self.make_pluggable(
+ audience=WORKFORCE_AUDIENCE,
+ credential_source=self.CREDENTIAL_SOURCE,
+ interactive=True,
+ )
+
+ with pytest.raises(exceptions.RefreshError) as excinfo:
+ _ = credentials.revoke(None)
+
+ assert excinfo.match(r"Pluggable auth is only supported for python 3.7+")
diff --git a/contrib/python/google-auth/py3/tests/transport/__init__.py b/contrib/python/google-auth/py3/tests/transport/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/__init__.py
diff --git a/contrib/python/google-auth/py3/tests/transport/compliance.py b/contrib/python/google-auth/py3/tests/transport/compliance.py
new file mode 100644
index 0000000000..b3cd7e8234
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/compliance.py
@@ -0,0 +1,108 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import http.client as http_client
+import time
+
+import flask # type: ignore
+import pytest # type: ignore
+from pytest_localserver.http import WSGIServer # type: ignore
+
+from google.auth import exceptions
+
+# .invalid will never resolve, see https://tools.ietf.org/html/rfc2606
+NXDOMAIN = "test.invalid"
+
+
+class RequestResponseTests(object):
+ @pytest.fixture(scope="module")
+ def server(self):
+ """Provides a test HTTP server.
+
+ The test server is automatically created before
+ a test and destroyed at the end. The server is serving a test
+ application that can be used to verify requests.
+ """
+ app = flask.Flask(__name__)
+ app.debug = True
+
+ # pylint: disable=unused-variable
+ # (pylint thinks the flask routes are unusued.)
+ @app.route("/basic")
+ def index():
+ header_value = flask.request.headers.get("x-test-header", "value")
+ headers = {"X-Test-Header": header_value}
+ return "Basic Content", http_client.OK, headers
+
+ @app.route("/server_error")
+ def server_error():
+ return "Error", http_client.INTERNAL_SERVER_ERROR
+
+ @app.route("/wait")
+ def wait():
+ time.sleep(3)
+ return "Waited"
+
+ # pylint: enable=unused-variable
+
+ server = WSGIServer(application=app.wsgi_app)
+ server.start()
+ yield server
+ server.stop()
+
+ def test_request_basic(self, server):
+ request = self.make_request()
+ response = request(url=server.url + "/basic", method="GET")
+
+ assert response.status == http_client.OK
+ assert response.headers["x-test-header"] == "value"
+ assert response.data == b"Basic Content"
+
+ def test_request_with_timeout_success(self, server):
+ request = self.make_request()
+ response = request(url=server.url + "/basic", method="GET", timeout=2)
+
+ assert response.status == http_client.OK
+ assert response.headers["x-test-header"] == "value"
+ assert response.data == b"Basic Content"
+
+ def test_request_with_timeout_failure(self, server):
+ request = self.make_request()
+
+ with pytest.raises(exceptions.TransportError):
+ request(url=server.url + "/wait", method="GET", timeout=1)
+
+ def test_request_headers(self, server):
+ request = self.make_request()
+ response = request(
+ url=server.url + "/basic",
+ method="GET",
+ headers={"x-test-header": "hello world"},
+ )
+
+ assert response.status == http_client.OK
+ assert response.headers["x-test-header"] == "hello world"
+ assert response.data == b"Basic Content"
+
+ def test_request_error(self, server):
+ request = self.make_request()
+ response = request(url=server.url + "/server_error", method="GET")
+
+ assert response.status == http_client.INTERNAL_SERVER_ERROR
+ assert response.data == b"Error"
+
+ def test_connection_error(self):
+ request = self.make_request()
+ with pytest.raises(exceptions.TransportError):
+ request(url="http://{}".format(NXDOMAIN), method="GET")
diff --git a/contrib/python/google-auth/py3/tests/transport/test__custom_tls_signer.py b/contrib/python/google-auth/py3/tests/transport/test__custom_tls_signer.py
new file mode 100644
index 0000000000..5836b325ad
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/test__custom_tls_signer.py
@@ -0,0 +1,234 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import ctypes
+import os
+
+import mock
+import pytest # type: ignore
+from requests.packages.urllib3.util.ssl_ import create_urllib3_context # type: ignore
+import urllib3.contrib.pyopenssl # type: ignore
+
+from google.auth import exceptions
+from google.auth.transport import _custom_tls_signer
+
+urllib3.contrib.pyopenssl.inject_into_urllib3()
+
+FAKE_ENTERPRISE_CERT_FILE_PATH = "/path/to/enterprise/cert/file"
+ENTERPRISE_CERT_FILE = os.path.join(
+ os.path.dirname(__file__), "../data/enterprise_cert_valid.json"
+)
+INVALID_ENTERPRISE_CERT_FILE = os.path.join(
+ os.path.dirname(__file__), "../data/enterprise_cert_invalid.json"
+)
+
+
+def test_load_offload_lib():
+ with mock.patch("ctypes.CDLL", return_value=mock.MagicMock()):
+ lib = _custom_tls_signer.load_offload_lib("/path/to/offload/lib")
+
+ assert lib.ConfigureSslContext.argtypes == [
+ _custom_tls_signer.SIGN_CALLBACK_CTYPE,
+ ctypes.c_char_p,
+ ctypes.c_void_p,
+ ]
+ assert lib.ConfigureSslContext.restype == ctypes.c_int
+
+
+def test_load_signer_lib():
+ with mock.patch("ctypes.CDLL", return_value=mock.MagicMock()):
+ lib = _custom_tls_signer.load_signer_lib("/path/to/signer/lib")
+
+ assert lib.SignForPython.restype == ctypes.c_int
+ assert lib.SignForPython.argtypes == [
+ ctypes.c_char_p,
+ ctypes.c_char_p,
+ ctypes.c_int,
+ ctypes.c_char_p,
+ ctypes.c_int,
+ ]
+
+ assert lib.GetCertPemForPython.restype == ctypes.c_int
+ assert lib.GetCertPemForPython.argtypes == [
+ ctypes.c_char_p,
+ ctypes.c_char_p,
+ ctypes.c_int,
+ ]
+
+
+def test__compute_sha256_digest():
+ to_be_signed = ctypes.create_string_buffer(b"foo")
+ sig = _custom_tls_signer._compute_sha256_digest(to_be_signed, 4)
+
+ assert (
+ base64.b64encode(sig).decode() == "RG5gyEH8CAAh3lxgbt2PLPAHPO8p6i9+cn5dqHfUUYM="
+ )
+
+
+def test_get_sign_callback():
+ # mock signer lib's SignForPython function
+ mock_sig_len = 10
+ mock_signer_lib = mock.MagicMock()
+ mock_signer_lib.SignForPython.return_value = mock_sig_len
+
+ # create a sign callback. The callback calls signer lib's SignForPython method
+ sign_callback = _custom_tls_signer.get_sign_callback(
+ mock_signer_lib, FAKE_ENTERPRISE_CERT_FILE_PATH
+ )
+
+ # mock the parameters used to call the sign callback
+ to_be_signed = ctypes.POINTER(ctypes.c_ubyte)()
+ to_be_signed_len = 4
+ returned_sig_array = ctypes.c_ubyte()
+ mock_sig_array = ctypes.byref(returned_sig_array)
+ returned_sign_len = ctypes.c_ulong()
+ mock_sig_len_array = ctypes.byref(returned_sign_len)
+
+ # call the callback, make sure the signature len is returned via mock_sig_len_array[0]
+ assert sign_callback(
+ mock_sig_array, mock_sig_len_array, to_be_signed, to_be_signed_len
+ )
+ assert returned_sign_len.value == mock_sig_len
+
+
+def test_get_sign_callback_failed_to_sign():
+ # mock signer lib's SignForPython function. Set the sig len to be 0 to
+ # indicate the signing failed.
+ mock_sig_len = 0
+ mock_signer_lib = mock.MagicMock()
+ mock_signer_lib.SignForPython.return_value = mock_sig_len
+
+ # create a sign callback. The callback calls signer lib's SignForPython method
+ sign_callback = _custom_tls_signer.get_sign_callback(
+ mock_signer_lib, FAKE_ENTERPRISE_CERT_FILE_PATH
+ )
+
+ # mock the parameters used to call the sign callback
+ to_be_signed = ctypes.POINTER(ctypes.c_ubyte)()
+ to_be_signed_len = 4
+ returned_sig_array = ctypes.c_ubyte()
+ mock_sig_array = ctypes.byref(returned_sig_array)
+ returned_sign_len = ctypes.c_ulong()
+ mock_sig_len_array = ctypes.byref(returned_sign_len)
+ sign_callback(mock_sig_array, mock_sig_len_array, to_be_signed, to_be_signed_len)
+
+ # sign callback should return 0
+ assert not sign_callback(
+ mock_sig_array, mock_sig_len_array, to_be_signed, to_be_signed_len
+ )
+
+
+def test_get_cert_no_cert():
+ # mock signer lib's GetCertPemForPython function to return 0 to indicts
+ # the cert doesn't exit (cert len = 0)
+ mock_signer_lib = mock.MagicMock()
+ mock_signer_lib.GetCertPemForPython.return_value = 0
+
+ # call the get cert method
+ with pytest.raises(exceptions.MutualTLSChannelError) as excinfo:
+ _custom_tls_signer.get_cert(mock_signer_lib, FAKE_ENTERPRISE_CERT_FILE_PATH)
+
+ assert excinfo.match("failed to get certificate")
+
+
+def test_get_cert():
+ # mock signer lib's GetCertPemForPython function
+ mock_cert_len = 10
+ mock_signer_lib = mock.MagicMock()
+ mock_signer_lib.GetCertPemForPython.return_value = mock_cert_len
+
+ # call the get cert method
+ mock_cert = _custom_tls_signer.get_cert(
+ mock_signer_lib, FAKE_ENTERPRISE_CERT_FILE_PATH
+ )
+
+ # make sure the signer lib's GetCertPemForPython is called twice, and the
+ # mock_cert has length mock_cert_len
+ assert mock_signer_lib.GetCertPemForPython.call_count == 2
+ assert len(mock_cert) == mock_cert_len
+
+
+def test_custom_tls_signer():
+ offload_lib = mock.MagicMock()
+ signer_lib = mock.MagicMock()
+
+ # Test load_libraries method
+ with mock.patch(
+ "google.auth.transport._custom_tls_signer.load_signer_lib"
+ ) as load_signer_lib:
+ with mock.patch(
+ "google.auth.transport._custom_tls_signer.load_offload_lib"
+ ) as load_offload_lib:
+ load_offload_lib.return_value = offload_lib
+ load_signer_lib.return_value = signer_lib
+ signer_object = _custom_tls_signer.CustomTlsSigner(ENTERPRISE_CERT_FILE)
+ signer_object.load_libraries()
+ assert signer_object._cert is None
+ assert signer_object._enterprise_cert_file_path == ENTERPRISE_CERT_FILE
+ assert signer_object._offload_lib == offload_lib
+ assert signer_object._signer_lib == signer_lib
+ load_signer_lib.assert_called_with("/path/to/signer/lib")
+ load_offload_lib.assert_called_with("/path/to/offload/lib")
+
+ # Test set_up_custom_key and set_up_ssl_context methods
+ with mock.patch("google.auth.transport._custom_tls_signer.get_cert") as get_cert:
+ with mock.patch(
+ "google.auth.transport._custom_tls_signer.get_sign_callback"
+ ) as get_sign_callback:
+ get_cert.return_value = b"mock_cert"
+ signer_object.set_up_custom_key()
+ signer_object.attach_to_ssl_context(create_urllib3_context())
+ get_cert.assert_called_once()
+ get_sign_callback.assert_called_once()
+ offload_lib.ConfigureSslContext.assert_called_once()
+
+
+def test_custom_tls_signer_failed_to_load_libraries():
+ # Test load_libraries method
+ with pytest.raises(exceptions.MutualTLSChannelError) as excinfo:
+ signer_object = _custom_tls_signer.CustomTlsSigner(INVALID_ENTERPRISE_CERT_FILE)
+ signer_object.load_libraries()
+ assert excinfo.match("enterprise cert file is invalid")
+
+
+def test_custom_tls_signer_fail_to_offload():
+ offload_lib = mock.MagicMock()
+ signer_lib = mock.MagicMock()
+
+ with mock.patch(
+ "google.auth.transport._custom_tls_signer.load_signer_lib"
+ ) as load_signer_lib:
+ with mock.patch(
+ "google.auth.transport._custom_tls_signer.load_offload_lib"
+ ) as load_offload_lib:
+ load_offload_lib.return_value = offload_lib
+ load_signer_lib.return_value = signer_lib
+ signer_object = _custom_tls_signer.CustomTlsSigner(ENTERPRISE_CERT_FILE)
+ signer_object.load_libraries()
+
+ # set the return value to be 0 which indicts offload fails
+ offload_lib.ConfigureSslContext.return_value = 0
+
+ with pytest.raises(exceptions.MutualTLSChannelError) as excinfo:
+ with mock.patch(
+ "google.auth.transport._custom_tls_signer.get_cert"
+ ) as get_cert:
+ with mock.patch(
+ "google.auth.transport._custom_tls_signer.get_sign_callback"
+ ):
+ get_cert.return_value = b"mock_cert"
+ signer_object.set_up_custom_key()
+ signer_object.attach_to_ssl_context(create_urllib3_context())
+ assert excinfo.match("failed to configure SSL context")
diff --git a/contrib/python/google-auth/py3/tests/transport/test__http_client.py b/contrib/python/google-auth/py3/tests/transport/test__http_client.py
new file mode 100644
index 0000000000..202276323c
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/test__http_client.py
@@ -0,0 +1,31 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest # type: ignore
+
+from google.auth import exceptions
+import google.auth.transport._http_client
+from tests.transport import compliance
+
+
+class TestRequestResponse(compliance.RequestResponseTests):
+ def make_request(self):
+ return google.auth.transport._http_client.Request()
+
+ def test_non_http(self):
+ request = self.make_request()
+ with pytest.raises(exceptions.TransportError) as excinfo:
+ request(url="https://{}".format(compliance.NXDOMAIN), method="GET")
+
+ assert excinfo.match("https")
diff --git a/contrib/python/google-auth/py3/tests/transport/test__mtls_helper.py b/contrib/python/google-auth/py3/tests/transport/test__mtls_helper.py
new file mode 100644
index 0000000000..642283a5c5
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/test__mtls_helper.py
@@ -0,0 +1,441 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+
+import mock
+from OpenSSL import crypto
+import pytest # type: ignore
+
+from google.auth import exceptions
+from google.auth.transport import _mtls_helper
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+
+CONTEXT_AWARE_METADATA = {"cert_provider_command": ["some command"]}
+
+ENCRYPTED_EC_PRIVATE_KEY = b"""-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIHkME8GCSqGSIb3DQEFDTBCMCkGCSqGSIb3DQEFDDAcBAgl2/yVgs1h3QICCAAw
+DAYIKoZIhvcNAgkFADAVBgkrBgEEAZdVAQIECJk2GRrvxOaJBIGQXIBnMU4wmciT
+uA6yD8q0FxuIzjG7E2S6tc5VRgSbhRB00eBO3jWmO2pBybeQW+zVioDcn50zp2ts
+wYErWC+LCm1Zg3r+EGnT1E1GgNoODbVQ3AEHlKh1CGCYhEovxtn3G+Fjh7xOBrNB
+saVVeDb4tHD4tMkiVVUBrUcTZPndP73CtgyGHYEphasYPzEz3+AU
+-----END ENCRYPTED PRIVATE KEY-----"""
+
+EC_PUBLIC_KEY = b"""-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEvCNi1NoDY1oMqPHIgXI8RBbTYGi/
+brEjbre1nSiQW11xRTJbVeETdsuP0EAu2tG3PcRhhwDfeJ8zXREgTBurNw==
+-----END PUBLIC KEY-----"""
+
+PASSPHRASE = b"""-----BEGIN PASSPHRASE-----
+password
+-----END PASSPHRASE-----"""
+PASSPHRASE_VALUE = b"password"
+
+
+def check_cert_and_key(content, expected_cert, expected_key):
+ success = True
+
+ cert_match = re.findall(_mtls_helper._CERT_REGEX, content)
+ success = success and len(cert_match) == 1 and cert_match[0] == expected_cert
+
+ key_match = re.findall(_mtls_helper._KEY_REGEX, content)
+ success = success and len(key_match) == 1 and key_match[0] == expected_key
+
+ return success
+
+
+class TestCertAndKeyRegex(object):
+ def test_cert_and_key(self):
+ # Test single cert and single key
+ check_cert_and_key(
+ pytest.public_cert_bytes + pytest.private_key_bytes,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+ check_cert_and_key(
+ pytest.private_key_bytes + pytest.public_cert_bytes,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ # Test cert chain and single key
+ check_cert_and_key(
+ pytest.public_cert_bytes
+ + pytest.public_cert_bytes
+ + pytest.private_key_bytes,
+ pytest.public_cert_bytes + pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+ check_cert_and_key(
+ pytest.private_key_bytes
+ + pytest.public_cert_bytes
+ + pytest.public_cert_bytes,
+ pytest.public_cert_bytes + pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ def test_key(self):
+ # Create some fake keys for regex check.
+ KEY = b"""-----BEGIN PRIVATE KEY-----
+ MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+ /fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+ -----END PRIVATE KEY-----"""
+ RSA_KEY = b"""-----BEGIN RSA PRIVATE KEY-----
+ MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+ /fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+ -----END RSA PRIVATE KEY-----"""
+ EC_KEY = b"""-----BEGIN EC PRIVATE KEY-----
+ MIIBCgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZg
+ /fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQAB
+ -----END EC PRIVATE KEY-----"""
+
+ check_cert_and_key(
+ pytest.public_cert_bytes + KEY, pytest.public_cert_bytes, KEY
+ )
+ check_cert_and_key(
+ pytest.public_cert_bytes + RSA_KEY, pytest.public_cert_bytes, RSA_KEY
+ )
+ check_cert_and_key(
+ pytest.public_cert_bytes + EC_KEY, pytest.public_cert_bytes, EC_KEY
+ )
+
+
+class TestCheckaMetadataPath(object):
+ def test_success(self):
+ metadata_path = os.path.join(DATA_DIR, "context_aware_metadata.json")
+ returned_path = _mtls_helper._check_dca_metadata_path(metadata_path)
+ assert returned_path is not None
+
+ def test_failure(self):
+ metadata_path = os.path.join(DATA_DIR, "not_exists.json")
+ returned_path = _mtls_helper._check_dca_metadata_path(metadata_path)
+ assert returned_path is None
+
+
+class TestReadMetadataFile(object):
+ def test_success(self):
+ metadata_path = os.path.join(DATA_DIR, "context_aware_metadata.json")
+ metadata = _mtls_helper._read_dca_metadata_file(metadata_path)
+
+ assert "cert_provider_command" in metadata
+
+ def test_file_not_json(self):
+ # read a file which is not json format.
+ metadata_path = os.path.join(DATA_DIR, "privatekey.pem")
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._read_dca_metadata_file(metadata_path)
+
+
+class TestRunCertProviderCommand(object):
+ def create_mock_process(self, output, error):
+ # There are two steps to execute a script with subprocess.Popen.
+ # (1) process = subprocess.Popen([comannds])
+ # (2) stdout, stderr = process.communicate()
+ # This function creates a mock process which can be returned by a mock
+ # subprocess.Popen. The mock process returns the given output and error
+ # when mock_process.communicate() is called.
+ mock_process = mock.Mock()
+ attrs = {"communicate.return_value": (output, error), "returncode": 0}
+ mock_process.configure_mock(**attrs)
+ return mock_process
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_success(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + pytest.private_key_bytes, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(["command"])
+ assert cert == pytest.public_cert_bytes
+ assert key == pytest.private_key_bytes
+ assert passphrase is None
+
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + ENCRYPTED_EC_PRIVATE_KEY + PASSPHRASE, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+ assert cert == pytest.public_cert_bytes
+ assert key == ENCRYPTED_EC_PRIVATE_KEY
+ assert passphrase == PASSPHRASE_VALUE
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_success_with_cert_chain(self, mock_popen):
+ PUBLIC_CERT_CHAIN_BYTES = pytest.public_cert_bytes + pytest.public_cert_bytes
+ mock_popen.return_value = self.create_mock_process(
+ PUBLIC_CERT_CHAIN_BYTES + pytest.private_key_bytes, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(["command"])
+ assert cert == PUBLIC_CERT_CHAIN_BYTES
+ assert key == pytest.private_key_bytes
+ assert passphrase is None
+
+ mock_popen.return_value = self.create_mock_process(
+ PUBLIC_CERT_CHAIN_BYTES + ENCRYPTED_EC_PRIVATE_KEY + PASSPHRASE, b""
+ )
+ cert, key, passphrase = _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+ assert cert == PUBLIC_CERT_CHAIN_BYTES
+ assert key == ENCRYPTED_EC_PRIVATE_KEY
+ assert passphrase == PASSPHRASE_VALUE
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_missing_cert(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.private_key_bytes, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ mock_popen.return_value = self.create_mock_process(
+ ENCRYPTED_EC_PRIVATE_KEY + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_missing_key(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_missing_passphrase(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + ENCRYPTED_EC_PRIVATE_KEY, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_passphrase_not_expected(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + pytest.private_key_bytes + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_encrypted_key_expected(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + pytest.private_key_bytes + PASSPHRASE, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(
+ ["command"], expect_encrypted_key=True
+ )
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_unencrypted_key_expected(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(
+ pytest.public_cert_bytes + ENCRYPTED_EC_PRIVATE_KEY, b""
+ )
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_cert_provider_returns_error(self, mock_popen):
+ mock_popen.return_value = self.create_mock_process(b"", b"some error")
+ mock_popen.return_value.returncode = 1
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+ @mock.patch("subprocess.Popen", autospec=True)
+ def test_popen_raise_exception(self, mock_popen):
+ mock_popen.side_effect = OSError()
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper._run_cert_provider_command(["command"])
+
+
+class TestGetClientSslCredentials(object):
+ @mock.patch(
+ "google.auth.transport._mtls_helper._run_cert_provider_command", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_success(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_run_cert_provider_command,
+ ):
+ mock_check_dca_metadata_path.return_value = True
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["command"]
+ }
+ mock_run_cert_provider_command.return_value = (b"cert", b"key", None)
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials()
+ assert has_cert
+ assert cert == b"cert"
+ assert key == b"key"
+ assert passphrase is None
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_success_without_metadata(self, mock_check_dca_metadata_path):
+ mock_check_dca_metadata_path.return_value = False
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials()
+ assert not has_cert
+ assert cert is None
+ assert key is None
+ assert passphrase is None
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._run_cert_provider_command", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_success_with_encrypted_key(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_run_cert_provider_command,
+ ):
+ mock_check_dca_metadata_path.return_value = True
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["command"]
+ }
+ mock_run_cert_provider_command.return_value = (b"cert", b"key", b"passphrase")
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials(
+ generate_encrypted_key=True
+ )
+ assert has_cert
+ assert cert == b"cert"
+ assert key == b"key"
+ assert passphrase == b"passphrase"
+ mock_run_cert_provider_command.assert_called_once_with(
+ ["command", "--with_passphrase"], expect_encrypted_key=True
+ )
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_missing_cert_command(
+ self, mock_check_dca_metadata_path, mock_read_dca_metadata_file
+ ):
+ mock_check_dca_metadata_path.return_value = True
+ mock_read_dca_metadata_file.return_value = {}
+ with pytest.raises(exceptions.ClientCertError):
+ _mtls_helper.get_client_ssl_credentials()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._run_cert_provider_command", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_customize_context_aware_metadata_path(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_run_cert_provider_command,
+ ):
+ context_aware_metadata_path = "/path/to/metata/data"
+ mock_check_dca_metadata_path.return_value = context_aware_metadata_path
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["command"]
+ }
+ mock_run_cert_provider_command.return_value = (b"cert", b"key", None)
+
+ has_cert, cert, key, passphrase = _mtls_helper.get_client_ssl_credentials(
+ context_aware_metadata_path=context_aware_metadata_path
+ )
+
+ assert has_cert
+ assert cert == b"cert"
+ assert key == b"key"
+ assert passphrase is None
+ mock_check_dca_metadata_path.assert_called_with(context_aware_metadata_path)
+ mock_read_dca_metadata_file.assert_called_with(context_aware_metadata_path)
+
+
+class TestGetClientCertAndKey(object):
+ def test_callback_success(self):
+ callback = mock.Mock()
+ callback.return_value = (pytest.public_cert_bytes, pytest.private_key_bytes)
+
+ found_cert_key, cert, key = _mtls_helper.get_client_cert_and_key(callback)
+ assert found_cert_key
+ assert cert == pytest.public_cert_bytes
+ assert key == pytest.private_key_bytes
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+ )
+ def test_use_metadata(self, mock_get_client_ssl_credentials):
+ mock_get_client_ssl_credentials.return_value = (
+ True,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ None,
+ )
+
+ found_cert_key, cert, key = _mtls_helper.get_client_cert_and_key()
+ assert found_cert_key
+ assert cert == pytest.public_cert_bytes
+ assert key == pytest.private_key_bytes
+
+
+class TestDecryptPrivateKey(object):
+ def test_success(self):
+ decrypted_key = _mtls_helper.decrypt_private_key(
+ ENCRYPTED_EC_PRIVATE_KEY, PASSPHRASE_VALUE
+ )
+ private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, decrypted_key)
+ public_key = crypto.load_publickey(crypto.FILETYPE_PEM, EC_PUBLIC_KEY)
+ x509 = crypto.X509()
+ x509.set_pubkey(public_key)
+
+ # Test the decrypted key works by signing and verification.
+ signature = crypto.sign(private_key, b"data", "sha256")
+ crypto.verify(x509, signature, b"data", "sha256")
+
+ def test_crypto_error(self):
+ with pytest.raises(crypto.Error):
+ _mtls_helper.decrypt_private_key(
+ ENCRYPTED_EC_PRIVATE_KEY, b"wrong_password"
+ )
diff --git a/contrib/python/google-auth/py3/tests/transport/test_grpc.py b/contrib/python/google-auth/py3/tests/transport/test_grpc.py
new file mode 100644
index 0000000000..05dc5fad0e
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/test_grpc.py
@@ -0,0 +1,503 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import os
+import time
+
+import mock
+import pytest # type: ignore
+
+from google.auth import _helpers
+from google.auth import credentials
+from google.auth import environment_vars
+from google.auth import exceptions
+from google.auth import transport
+from google.oauth2 import service_account
+
+try:
+ # pylint: disable=ungrouped-imports
+ import grpc # type: ignore
+ import google.auth.transport.grpc
+
+ HAS_GRPC = True
+except ImportError: # pragma: NO COVER
+ HAS_GRPC = False
+
+import yatest.common
+DATA_DIR = os.path.join(yatest.common.test_source_path(), "data")
+METADATA_PATH = os.path.join(DATA_DIR, "context_aware_metadata.json")
+with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
+ PRIVATE_KEY_BYTES = fh.read()
+with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
+ PUBLIC_CERT_BYTES = fh.read()
+
+pytestmark = pytest.mark.skipif(not HAS_GRPC, reason="gRPC is unavailable.")
+
+
+class CredentialsStub(credentials.Credentials):
+ def __init__(self, token="token"):
+ super(CredentialsStub, self).__init__()
+ self.token = token
+ self.expiry = None
+
+ def refresh(self, request):
+ self.token += "1"
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+class TestAuthMetadataPlugin(object):
+ def test_call_no_refresh(self):
+ credentials = CredentialsStub()
+ request = mock.create_autospec(transport.Request)
+
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(credentials, request)
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = mock.sentinel.method_name
+ context.service_url = mock.sentinel.service_url
+ callback = mock.create_autospec(grpc.AuthMetadataPluginCallback)
+
+ plugin(context, callback)
+
+ time.sleep(2)
+
+ callback.assert_called_once_with(
+ [("authorization", "Bearer {}".format(credentials.token))], None
+ )
+
+ def test_call_refresh(self):
+ credentials = CredentialsStub()
+ credentials.expiry = datetime.datetime.min + _helpers.REFRESH_THRESHOLD
+ request = mock.create_autospec(transport.Request)
+
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(credentials, request)
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = mock.sentinel.method_name
+ context.service_url = mock.sentinel.service_url
+ callback = mock.create_autospec(grpc.AuthMetadataPluginCallback)
+
+ plugin(context, callback)
+
+ time.sleep(2)
+
+ assert credentials.token == "token1"
+ callback.assert_called_once_with(
+ [("authorization", "Bearer {}".format(credentials.token))], None
+ )
+
+ def test__get_authorization_headers_with_service_account(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+ request = mock.create_autospec(transport.Request)
+
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(credentials, request)
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = "methodName"
+ context.service_url = "https://pubsub.googleapis.com/methodName"
+
+ plugin._get_authorization_headers(context)
+
+ credentials._create_self_signed_jwt.assert_called_once_with(None)
+
+ def test__get_authorization_headers_with_service_account_and_default_host(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+ request = mock.create_autospec(transport.Request)
+
+ default_host = "pubsub.googleapis.com"
+ plugin = google.auth.transport.grpc.AuthMetadataPlugin(
+ credentials, request, default_host=default_host
+ )
+
+ context = mock.create_autospec(grpc.AuthMetadataContext, instance=True)
+ context.method_name = "methodName"
+ context.service_url = "https://pubsub.googleapis.com/methodName"
+
+ plugin._get_authorization_headers(context)
+
+ credentials._create_self_signed_jwt.assert_called_once_with(
+ "https://{}/".format(default_host)
+ )
+
+
+@mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+)
+@mock.patch("grpc.composite_channel_credentials", autospec=True)
+@mock.patch("grpc.metadata_call_credentials", autospec=True)
+@mock.patch("grpc.ssl_channel_credentials", autospec=True)
+@mock.patch("grpc.secure_channel", autospec=True)
+class TestSecureAuthorizedChannel(object):
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_secure_authorized_channel_adc(
+ self,
+ check_dca_metadata_path,
+ read_dca_metadata_file,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = CredentialsStub()
+ request = mock.create_autospec(transport.Request)
+ target = "example.com:80"
+
+ # Mock the context aware metadata and client cert/key so mTLS SSL channel
+ # will be used.
+ check_dca_metadata_path.return_value = METADATA_PATH
+ read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["some command"]
+ }
+ get_client_ssl_credentials.return_value = (
+ True,
+ PUBLIC_CERT_BYTES,
+ PRIVATE_KEY_BYTES,
+ None,
+ )
+
+ channel = None
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, options=mock.sentinel.options
+ )
+
+ # Check the auth plugin construction.
+ auth_plugin = metadata_call_credentials.call_args[0][0]
+ assert isinstance(auth_plugin, google.auth.transport.grpc.AuthMetadataPlugin)
+ assert auth_plugin._credentials == credentials
+ assert auth_plugin._request == request
+
+ # Check the ssl channel call.
+ ssl_channel_credentials.assert_called_once_with(
+ certificate_chain=PUBLIC_CERT_BYTES, private_key=PRIVATE_KEY_BYTES
+ )
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+ # Check the channel call.
+ secure_channel.assert_called_once_with(
+ target,
+ composite_channel_credentials.return_value,
+ options=mock.sentinel.options,
+ )
+ assert channel == secure_channel.return_value
+
+ @mock.patch("google.auth.transport.grpc.SslCredentials", autospec=True)
+ def test_secure_authorized_channel_adc_without_client_cert_env(
+ self,
+ ssl_credentials_adc_method,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # environment variable is not set.
+ credentials = CredentialsStub()
+ request = mock.create_autospec(transport.Request)
+ target = "example.com:80"
+
+ channel = google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, options=mock.sentinel.options
+ )
+
+ # Check the auth plugin construction.
+ auth_plugin = metadata_call_credentials.call_args[0][0]
+ assert isinstance(auth_plugin, google.auth.transport.grpc.AuthMetadataPlugin)
+ assert auth_plugin._credentials == credentials
+ assert auth_plugin._request == request
+
+ # Check the ssl channel call.
+ ssl_channel_credentials.assert_called_once()
+ ssl_credentials_adc_method.assert_not_called()
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+ # Check the channel call.
+ secure_channel.assert_called_once_with(
+ target,
+ composite_channel_credentials.return_value,
+ options=mock.sentinel.options,
+ )
+ assert channel == secure_channel.return_value
+
+ def test_secure_authorized_channel_explicit_ssl(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ ssl_credentials = mock.Mock()
+
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, ssl_credentials=ssl_credentials
+ )
+
+ # Since explicit SSL credentials are provided, get_client_ssl_credentials
+ # shouldn't be called.
+ assert not get_client_ssl_credentials.called
+
+ # Check the ssl channel call.
+ assert not ssl_channel_credentials.called
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_credentials, metadata_call_credentials.return_value
+ )
+
+ def test_secure_authorized_channel_mutual_exclusive(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ ssl_credentials = mock.Mock()
+ client_cert_callback = mock.Mock()
+
+ with pytest.raises(ValueError):
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials,
+ request,
+ target,
+ ssl_credentials=ssl_credentials,
+ client_cert_callback=client_cert_callback,
+ )
+
+ def test_secure_authorized_channel_with_client_cert_callback_success(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ client_cert_callback = mock.Mock()
+ client_cert_callback.return_value = (PUBLIC_CERT_BYTES, PRIVATE_KEY_BYTES)
+
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, client_cert_callback=client_cert_callback
+ )
+
+ client_cert_callback.assert_called_once()
+
+ # Check we are using the cert and key provided by client_cert_callback.
+ ssl_channel_credentials.assert_called_once_with(
+ certificate_chain=PUBLIC_CERT_BYTES, private_key=PRIVATE_KEY_BYTES
+ )
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True
+ )
+ @mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+ )
+ def test_secure_authorized_channel_with_client_cert_callback_failure(
+ self,
+ check_dca_metadata_path,
+ read_dca_metadata_file,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+
+ client_cert_callback = mock.Mock()
+ client_cert_callback.side_effect = Exception("callback exception")
+
+ with pytest.raises(Exception) as excinfo:
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials,
+ request,
+ target,
+ client_cert_callback=client_cert_callback,
+ )
+
+ assert str(excinfo.value) == "callback exception"
+
+ def test_secure_authorized_channel_cert_callback_without_client_cert_env(
+ self,
+ secure_channel,
+ ssl_channel_credentials,
+ metadata_call_credentials,
+ composite_channel_credentials,
+ get_client_ssl_credentials,
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # environment variable is not set.
+ credentials = mock.Mock()
+ request = mock.Mock()
+ target = "example.com:80"
+ client_cert_callback = mock.Mock()
+
+ google.auth.transport.grpc.secure_authorized_channel(
+ credentials, request, target, client_cert_callback=client_cert_callback
+ )
+
+ # Check client_cert_callback is not called because GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # is not set.
+ client_cert_callback.assert_not_called()
+
+ ssl_channel_credentials.assert_called_once()
+
+ # Check the composite credentials call.
+ composite_channel_credentials.assert_called_once_with(
+ ssl_channel_credentials.return_value, metadata_call_credentials.return_value
+ )
+
+
+@mock.patch("grpc.ssl_channel_credentials", autospec=True)
+@mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+)
+@mock.patch("google.auth.transport._mtls_helper._read_dca_metadata_file", autospec=True)
+@mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+)
+class TestSslCredentials(object):
+ def test_no_context_aware_metadata(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ # Mock that the metadata file doesn't exist.
+ mock_check_dca_metadata_path.return_value = None
+
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ ssl_credentials = google.auth.transport.grpc.SslCredentials()
+
+ # Since no context aware metadata is found, we wouldn't call
+ # get_client_ssl_credentials, and the SSL channel credentials created is
+ # non mTLS.
+ assert ssl_credentials.ssl_credentials is not None
+ assert not ssl_credentials.is_mtls
+ mock_get_client_ssl_credentials.assert_not_called()
+ mock_ssl_channel_credentials.assert_called_once_with()
+
+ def test_get_client_ssl_credentials_failure(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ mock_check_dca_metadata_path.return_value = METADATA_PATH
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["some command"]
+ }
+
+ # Mock that client cert and key are not loaded and exception is raised.
+ mock_get_client_ssl_credentials.side_effect = exceptions.ClientCertError()
+
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ assert google.auth.transport.grpc.SslCredentials().ssl_credentials
+
+ def test_get_client_ssl_credentials_success(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ mock_check_dca_metadata_path.return_value = METADATA_PATH
+ mock_read_dca_metadata_file.return_value = {
+ "cert_provider_command": ["some command"]
+ }
+ mock_get_client_ssl_credentials.return_value = (
+ True,
+ PUBLIC_CERT_BYTES,
+ PRIVATE_KEY_BYTES,
+ None,
+ )
+
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ ssl_credentials = google.auth.transport.grpc.SslCredentials()
+
+ assert ssl_credentials.ssl_credentials is not None
+ assert ssl_credentials.is_mtls
+ mock_get_client_ssl_credentials.assert_called_once()
+ mock_ssl_channel_credentials.assert_called_once_with(
+ certificate_chain=PUBLIC_CERT_BYTES, private_key=PRIVATE_KEY_BYTES
+ )
+
+ def test_get_client_ssl_credentials_without_client_cert_env(
+ self,
+ mock_check_dca_metadata_path,
+ mock_read_dca_metadata_file,
+ mock_get_client_ssl_credentials,
+ mock_ssl_channel_credentials,
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE is not set.
+ ssl_credentials = google.auth.transport.grpc.SslCredentials()
+
+ assert ssl_credentials.ssl_credentials is not None
+ assert not ssl_credentials.is_mtls
+ mock_check_dca_metadata_path.assert_not_called()
+ mock_read_dca_metadata_file.assert_not_called()
+ mock_get_client_ssl_credentials.assert_not_called()
+ mock_ssl_channel_credentials.assert_called_once()
diff --git a/contrib/python/google-auth/py3/tests/transport/test_mtls.py b/contrib/python/google-auth/py3/tests/transport/test_mtls.py
new file mode 100644
index 0000000000..b62063e479
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/test_mtls.py
@@ -0,0 +1,83 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+import pytest # type: ignore
+
+from google.auth import exceptions
+from google.auth.transport import mtls
+
+
+@mock.patch(
+ "google.auth.transport._mtls_helper._check_dca_metadata_path", autospec=True
+)
+def test_has_default_client_cert_source(check_dca_metadata_path):
+ check_dca_metadata_path.return_value = mock.Mock()
+ assert mtls.has_default_client_cert_source()
+
+ check_dca_metadata_path.return_value = None
+ assert not mtls.has_default_client_cert_source()
+
+
+@mock.patch("google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True)
+@mock.patch("google.auth.transport.mtls.has_default_client_cert_source", autospec=True)
+def test_default_client_cert_source(
+ has_default_client_cert_source, get_client_cert_and_key
+):
+ # Test default client cert source doesn't exist.
+ has_default_client_cert_source.return_value = False
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ mtls.default_client_cert_source()
+
+ # The following tests will assume default client cert source exists.
+ has_default_client_cert_source.return_value = True
+
+ # Test good callback.
+ get_client_cert_and_key.return_value = (True, b"cert", b"key")
+ callback = mtls.default_client_cert_source()
+ assert callback() == (b"cert", b"key")
+
+ # Test bad callback which throws exception.
+ get_client_cert_and_key.side_effect = ValueError()
+ callback = mtls.default_client_cert_source()
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ callback()
+
+
+@mock.patch(
+ "google.auth.transport._mtls_helper.get_client_ssl_credentials", autospec=True
+)
+@mock.patch("google.auth.transport.mtls.has_default_client_cert_source", autospec=True)
+def test_default_client_encrypted_cert_source(
+ has_default_client_cert_source, get_client_ssl_credentials
+):
+ # Test default client cert source doesn't exist.
+ has_default_client_cert_source.return_value = False
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ mtls.default_client_encrypted_cert_source("cert_path", "key_path")
+
+ # The following tests will assume default client cert source exists.
+ has_default_client_cert_source.return_value = True
+
+ # Test good callback.
+ get_client_ssl_credentials.return_value = (True, b"cert", b"key", b"passphrase")
+ callback = mtls.default_client_encrypted_cert_source("cert_path", "key_path")
+ with mock.patch("{}.open".format(__name__), return_value=mock.MagicMock()):
+ assert callback() == ("cert_path", "key_path", b"passphrase")
+
+ # Test bad callback which throws exception.
+ get_client_ssl_credentials.side_effect = exceptions.ClientCertError()
+ callback = mtls.default_client_encrypted_cert_source("cert_path", "key_path")
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ callback()
diff --git a/contrib/python/google-auth/py3/tests/transport/test_requests.py b/contrib/python/google-auth/py3/tests/transport/test_requests.py
new file mode 100644
index 0000000000..d962814346
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/test_requests.py
@@ -0,0 +1,575 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import functools
+import http.client as http_client
+import os
+import sys
+
+import freezegun
+import mock
+import OpenSSL
+import pytest # type: ignore
+import requests
+import requests.adapters
+
+from google.auth import environment_vars
+from google.auth import exceptions
+import google.auth.credentials
+import google.auth.transport._custom_tls_signer
+import google.auth.transport._mtls_helper
+import google.auth.transport.requests
+from google.oauth2 import service_account
+from tests.transport import compliance
+
+
+@pytest.fixture
+def frozen_time():
+ with freezegun.freeze_time("1970-01-01 00:00:00", tick=False) as frozen:
+ yield frozen
+
+
+class TestRequestResponse(compliance.RequestResponseTests):
+ def make_request(self):
+ return google.auth.transport.requests.Request()
+
+ def test_timeout(self):
+ http = mock.create_autospec(requests.Session, instance=True)
+ request = google.auth.transport.requests.Request(http)
+ request(url="http://example.com", method="GET", timeout=5)
+
+ assert http.request.call_args[1]["timeout"] == 5
+
+ def test_session_closed_on_del(self):
+ http = mock.create_autospec(requests.Session, instance=True)
+ request = google.auth.transport.requests.Request(http)
+ request.__del__()
+ http.close.assert_called_with()
+
+ http = mock.create_autospec(requests.Session, instance=True)
+ http.close.side_effect = TypeError("test injected TypeError")
+ request = google.auth.transport.requests.Request(http)
+ request.__del__()
+ http.close.assert_called_with()
+
+
+class TestTimeoutGuard(object):
+ def make_guard(self, *args, **kwargs):
+ return google.auth.transport.requests.TimeoutGuard(*args, **kwargs)
+
+ def test_tracks_elapsed_time_w_numeric_timeout(self, frozen_time):
+ with self.make_guard(timeout=10) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=3.8))
+ assert guard.remaining_timeout == 6.2
+
+ def test_tracks_elapsed_time_w_tuple_timeout(self, frozen_time):
+ with self.make_guard(timeout=(16, 19)) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=3.8))
+ assert guard.remaining_timeout == (12.2, 15.2)
+
+ def test_noop_if_no_timeout(self, frozen_time):
+ with self.make_guard(timeout=None) as guard:
+ frozen_time.tick(delta=datetime.timedelta(days=3650))
+ # NOTE: no timeout error raised, despite years have passed
+ assert guard.remaining_timeout is None
+
+ def test_timeout_error_w_numeric_timeout(self, frozen_time):
+ with pytest.raises(requests.exceptions.Timeout):
+ with self.make_guard(timeout=10) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=10.001))
+ assert guard.remaining_timeout == pytest.approx(-0.001)
+
+ def test_timeout_error_w_tuple_timeout(self, frozen_time):
+ with pytest.raises(requests.exceptions.Timeout):
+ with self.make_guard(timeout=(11, 10)) as guard:
+ frozen_time.tick(delta=datetime.timedelta(seconds=10.001))
+ assert guard.remaining_timeout == pytest.approx((0.999, -0.001))
+
+ def test_custom_timeout_error_type(self, frozen_time):
+ class FooError(Exception):
+ pass
+
+ with pytest.raises(FooError):
+ with self.make_guard(timeout=1, timeout_error_type=FooError):
+ frozen_time.tick(delta=datetime.timedelta(seconds=2))
+
+ def test_lets_suite_errors_bubble_up(self, frozen_time):
+ with pytest.raises(IndexError):
+ with self.make_guard(timeout=1):
+ [1, 2, 3][3]
+
+
+class CredentialsStub(google.auth.credentials.Credentials):
+ def __init__(self, token="token"):
+ super(CredentialsStub, self).__init__()
+ self.token = token
+
+ def apply(self, headers, token=None):
+ headers["authorization"] = self.token
+
+ def before_request(self, request, method, url, headers):
+ self.apply(headers)
+
+ def refresh(self, request):
+ self.token += "1"
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+class TimeTickCredentialsStub(CredentialsStub):
+ """Credentials that spend some (mocked) time when refreshing a token."""
+
+ def __init__(self, time_tick, token="token"):
+ self._time_tick = time_tick
+ super(TimeTickCredentialsStub, self).__init__(token=token)
+
+ def refresh(self, request):
+ self._time_tick()
+ super(TimeTickCredentialsStub, self).refresh(requests)
+
+
+class AdapterStub(requests.adapters.BaseAdapter):
+ def __init__(self, responses, headers=None):
+ super(AdapterStub, self).__init__()
+ self.responses = responses
+ self.requests = []
+ self.headers = headers or {}
+
+ def send(self, request, **kwargs):
+ # pylint: disable=arguments-differ
+ # request is the only required argument here and the only argument
+ # we care about.
+ self.requests.append(request)
+ return self.responses.pop(0)
+
+ def close(self): # pragma: NO COVER
+ # pylint wants this to be here because it's abstract in the base
+ # class, but requests never actually calls it.
+ return
+
+
+class TimeTickAdapterStub(AdapterStub):
+ """Adapter that spends some (mocked) time when making a request."""
+
+ def __init__(self, time_tick, responses, headers=None):
+ self._time_tick = time_tick
+ super(TimeTickAdapterStub, self).__init__(responses, headers=headers)
+
+ def send(self, request, **kwargs):
+ self._time_tick()
+ return super(TimeTickAdapterStub, self).send(request, **kwargs)
+
+
+class TestMutualTlsAdapter(object):
+ @mock.patch.object(requests.adapters.HTTPAdapter, "init_poolmanager")
+ @mock.patch.object(requests.adapters.HTTPAdapter, "proxy_manager_for")
+ def test_success(self, mock_proxy_manager_for, mock_init_poolmanager):
+ adapter = google.auth.transport.requests._MutualTlsAdapter(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+
+ adapter.init_poolmanager()
+ mock_init_poolmanager.assert_called_with(ssl_context=adapter._ctx_poolmanager)
+
+ adapter.proxy_manager_for()
+ mock_proxy_manager_for.assert_called_with(ssl_context=adapter._ctx_proxymanager)
+
+ def test_invalid_cert_or_key(self):
+ with pytest.raises(OpenSSL.crypto.Error):
+ google.auth.transport.requests._MutualTlsAdapter(
+ b"invalid cert", b"invalid key"
+ )
+
+ @mock.patch.dict("sys.modules", {"OpenSSL.crypto": None})
+ def test_import_error(self):
+ with pytest.raises(ImportError):
+ google.auth.transport.requests._MutualTlsAdapter(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+
+
+def make_response(status=http_client.OK, data=None):
+ response = requests.Response()
+ response.status_code = status
+ response._content = data
+ return response
+
+
+class TestAuthorizedSession(object):
+ TEST_URL = "http://example.com/"
+
+ def test_constructor(self):
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ mock.sentinel.credentials
+ )
+
+ assert authed_session.credentials == mock.sentinel.credentials
+
+ def test_constructor_with_auth_request(self):
+ http = mock.create_autospec(requests.Session)
+ auth_request = google.auth.transport.requests.Request(http)
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ mock.sentinel.credentials, auth_request=auth_request
+ )
+
+ assert authed_session._auth_request is auth_request
+
+ def test_request_default_timeout(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ response = make_response()
+ adapter = AdapterStub([response])
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ patcher = mock.patch("google.auth.transport.requests.requests.Session.request")
+ with patcher as patched_request:
+ authed_session.request("GET", self.TEST_URL)
+
+ expected_timeout = google.auth.transport.requests._DEFAULT_TIMEOUT
+ assert patched_request.call_args[1]["timeout"] == expected_timeout
+
+ def test_request_no_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ response = make_response()
+ adapter = AdapterStub([response])
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ result = authed_session.request("GET", self.TEST_URL)
+
+ assert response == result
+ assert credentials.before_request.called
+ assert not credentials.refresh.called
+ assert len(adapter.requests) == 1
+ assert adapter.requests[0].url == self.TEST_URL
+ assert adapter.requests[0].headers["authorization"] == "token"
+
+ def test_request_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ final_response = make_response(status=http_client.OK)
+ # First request will 401, second request will succeed.
+ adapter = AdapterStub(
+ [make_response(status=http_client.UNAUTHORIZED), final_response]
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, refresh_timeout=60
+ )
+ authed_session.mount(self.TEST_URL, adapter)
+
+ result = authed_session.request("GET", self.TEST_URL)
+
+ assert result == final_response
+ assert credentials.before_request.call_count == 2
+ assert credentials.refresh.called
+ assert len(adapter.requests) == 2
+
+ assert adapter.requests[0].url == self.TEST_URL
+ assert adapter.requests[0].headers["authorization"] == "token"
+
+ assert adapter.requests[1].url == self.TEST_URL
+ assert adapter.requests[1].headers["authorization"] == "token1"
+
+ def test_request_max_allowed_time_timeout_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second, responses=[make_response(status=http_client.OK)]
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # Because a request takes a full mocked second, max_allowed_time shorter
+ # than that will cause a timeout error.
+ with pytest.raises(requests.exceptions.Timeout):
+ authed_session.request("GET", self.TEST_URL, max_allowed_time=0.9)
+
+ def test_request_max_allowed_time_w_transport_timeout_no_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second,
+ responses=[
+ make_response(status=http_client.UNAUTHORIZED),
+ make_response(status=http_client.OK),
+ ],
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # A short configured transport timeout does not affect max_allowed_time.
+ # The latter is not adjusted to it and is only concerned with the actual
+ # execution time. The call below should thus not raise a timeout error.
+ authed_session.request("GET", self.TEST_URL, timeout=0.5, max_allowed_time=3.1)
+
+ def test_request_max_allowed_time_w_refresh_timeout_no_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second,
+ responses=[
+ make_response(status=http_client.UNAUTHORIZED),
+ make_response(status=http_client.OK),
+ ],
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, refresh_timeout=1.1
+ )
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # A short configured refresh timeout does not affect max_allowed_time.
+ # The latter is not adjusted to it and is only concerned with the actual
+ # execution time. The call below should thus not raise a timeout error
+ # (and `timeout` does not come into play either, as it's very long).
+ authed_session.request("GET", self.TEST_URL, timeout=60, max_allowed_time=3.1)
+
+ def test_request_timeout_w_refresh_timeout_timeout_error(self, frozen_time):
+ tick_one_second = functools.partial(
+ frozen_time.tick, delta=datetime.timedelta(seconds=1.0)
+ )
+
+ credentials = mock.Mock(
+ wraps=TimeTickCredentialsStub(time_tick=tick_one_second)
+ )
+ adapter = TimeTickAdapterStub(
+ time_tick=tick_one_second,
+ responses=[
+ make_response(status=http_client.UNAUTHORIZED),
+ make_response(status=http_client.OK),
+ ],
+ )
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, refresh_timeout=100
+ )
+ authed_session.mount(self.TEST_URL, adapter)
+
+ # An UNAUTHORIZED response triggers a refresh (an extra request), thus
+ # the final request that otherwise succeeds results in a timeout error
+ # (all three requests together last 3 mocked seconds).
+ with pytest.raises(requests.exceptions.Timeout):
+ authed_session.request(
+ "GET", self.TEST_URL, timeout=60, max_allowed_time=2.9
+ )
+
+ def test_authorized_session_without_default_host(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(credentials)
+
+ authed_session.credentials._create_self_signed_jwt.assert_called_once_with(None)
+
+ def test_authorized_session_with_default_host(self):
+ default_host = "pubsub.googleapis.com"
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ credentials, default_host=default_host
+ )
+
+ authed_session.credentials._create_self_signed_jwt.assert_called_once_with(
+ "https://{}/".format(default_host)
+ )
+
+ def test_configure_mtls_channel_with_callback(self):
+ mock_callback = mock.Mock()
+ mock_callback.return_value = (
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel(mock_callback)
+
+ assert auth_session.is_mtls
+ assert isinstance(
+ auth_session.adapters["https://"],
+ google.auth.transport.requests._MutualTlsAdapter,
+ )
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_with_metadata(self, mock_get_client_cert_and_key):
+ mock_get_client_cert_and_key.return_value = (
+ True,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel()
+
+ assert auth_session.is_mtls
+ assert isinstance(
+ auth_session.adapters["https://"],
+ google.auth.transport.requests._MutualTlsAdapter,
+ )
+
+ @mock.patch.object(google.auth.transport.requests._MutualTlsAdapter, "__init__")
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_non_mtls(
+ self, mock_get_client_cert_and_key, mock_adapter_ctor
+ ):
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel()
+
+ assert not auth_session.is_mtls
+
+ # Assert _MutualTlsAdapter constructor is not called.
+ mock_adapter_ctor.assert_not_called()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_exceptions(self, mock_get_client_cert_and_key):
+ mock_get_client_cert_and_key.side_effect = exceptions.ClientCertError()
+
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ auth_session.configure_mtls_channel()
+
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+ with mock.patch.dict("sys.modules"):
+ sys.modules["OpenSSL"] = None
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ,
+ {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"},
+ ):
+ auth_session.configure_mtls_channel()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_without_client_cert_env(
+ self, get_client_cert_and_key
+ ):
+ # Test client cert won't be used if GOOGLE_API_USE_CLIENT_CERTIFICATE
+ # environment variable is not set.
+ auth_session = google.auth.transport.requests.AuthorizedSession(
+ credentials=mock.Mock()
+ )
+
+ auth_session.configure_mtls_channel()
+ assert not auth_session.is_mtls
+ get_client_cert_and_key.assert_not_called()
+
+ mock_callback = mock.Mock()
+ auth_session.configure_mtls_channel(mock_callback)
+ assert not auth_session.is_mtls
+ mock_callback.assert_not_called()
+
+ def test_close_wo_passed_in_auth_request(self):
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ mock.sentinel.credentials
+ )
+ authed_session._auth_request_session = mock.Mock(spec=["close"])
+
+ authed_session.close()
+
+ authed_session._auth_request_session.close.assert_called_once_with()
+
+ def test_close_w_passed_in_auth_request(self):
+ http = mock.create_autospec(requests.Session)
+ auth_request = google.auth.transport.requests.Request(http)
+ authed_session = google.auth.transport.requests.AuthorizedSession(
+ mock.sentinel.credentials, auth_request=auth_request
+ )
+
+ authed_session.close() # no raise
+
+
+class TestMutualTlsOffloadAdapter(object):
+ @mock.patch.object(requests.adapters.HTTPAdapter, "init_poolmanager")
+ @mock.patch.object(requests.adapters.HTTPAdapter, "proxy_manager_for")
+ @mock.patch.object(
+ google.auth.transport._custom_tls_signer.CustomTlsSigner, "load_libraries"
+ )
+ @mock.patch.object(
+ google.auth.transport._custom_tls_signer.CustomTlsSigner, "set_up_custom_key"
+ )
+ @mock.patch.object(
+ google.auth.transport._custom_tls_signer.CustomTlsSigner,
+ "attach_to_ssl_context",
+ )
+ def test_success(
+ self,
+ mock_attach_to_ssl_context,
+ mock_set_up_custom_key,
+ mock_load_libraries,
+ mock_proxy_manager_for,
+ mock_init_poolmanager,
+ ):
+ enterprise_cert_file_path = "/path/to/enterprise/cert/json"
+ adapter = google.auth.transport.requests._MutualTlsOffloadAdapter(
+ enterprise_cert_file_path
+ )
+
+ mock_load_libraries.assert_called_once()
+ mock_set_up_custom_key.assert_called_once()
+ assert mock_attach_to_ssl_context.call_count == 2
+
+ adapter.init_poolmanager()
+ mock_init_poolmanager.assert_called_with(ssl_context=adapter._ctx_poolmanager)
+
+ adapter.proxy_manager_for()
+ mock_proxy_manager_for.assert_called_with(ssl_context=adapter._ctx_proxymanager)
diff --git a/contrib/python/google-auth/py3/tests/transport/test_urllib3.py b/contrib/python/google-auth/py3/tests/transport/test_urllib3.py
new file mode 100644
index 0000000000..e832300321
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/transport/test_urllib3.py
@@ -0,0 +1,322 @@
+# Copyright 2016 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import http.client as http_client
+import os
+import sys
+
+import mock
+import OpenSSL
+import pytest # type: ignore
+import urllib3 # type: ignore
+
+from google.auth import environment_vars
+from google.auth import exceptions
+import google.auth.credentials
+import google.auth.transport._mtls_helper
+import google.auth.transport.urllib3
+from google.oauth2 import service_account
+from tests.transport import compliance
+
+
+class TestRequestResponse(compliance.RequestResponseTests):
+ def make_request(self):
+ http = urllib3.PoolManager()
+ return google.auth.transport.urllib3.Request(http)
+
+ def test_timeout(self):
+ http = mock.create_autospec(urllib3.PoolManager)
+ request = google.auth.transport.urllib3.Request(http)
+ request(url="http://example.com", method="GET", timeout=5)
+
+ assert http.request.call_args[1]["timeout"] == 5
+
+
+def test__make_default_http_with_certifi():
+ http = google.auth.transport.urllib3._make_default_http()
+ assert "cert_reqs" in http.connection_pool_kw
+
+
+@mock.patch.object(google.auth.transport.urllib3, "certifi", new=None)
+def test__make_default_http_without_certifi():
+ http = google.auth.transport.urllib3._make_default_http()
+ assert "cert_reqs" not in http.connection_pool_kw
+
+
+class CredentialsStub(google.auth.credentials.Credentials):
+ def __init__(self, token="token"):
+ super(CredentialsStub, self).__init__()
+ self.token = token
+
+ def apply(self, headers, token=None):
+ headers["authorization"] = self.token
+
+ def before_request(self, request, method, url, headers):
+ self.apply(headers)
+
+ def refresh(self, request):
+ self.token += "1"
+
+ def with_quota_project(self, quota_project_id):
+ raise NotImplementedError()
+
+
+class HttpStub(object):
+ def __init__(self, responses, headers=None):
+ self.responses = responses
+ self.requests = []
+ self.headers = headers or {}
+
+ def urlopen(self, method, url, body=None, headers=None, **kwargs):
+ self.requests.append((method, url, body, headers, kwargs))
+ return self.responses.pop(0)
+
+ def clear(self):
+ pass
+
+
+class ResponseStub(object):
+ def __init__(self, status=http_client.OK, data=None):
+ self.status = status
+ self.data = data
+
+
+class TestMakeMutualTlsHttp(object):
+ def test_success(self):
+ http = google.auth.transport.urllib3._make_mutual_tls_http(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+ assert isinstance(http, urllib3.PoolManager)
+
+ def test_crypto_error(self):
+ with pytest.raises(OpenSSL.crypto.Error):
+ google.auth.transport.urllib3._make_mutual_tls_http(
+ b"invalid cert", b"invalid key"
+ )
+
+ @mock.patch.dict("sys.modules", {"OpenSSL.crypto": None})
+ def test_import_error(self):
+ with pytest.raises(ImportError):
+ google.auth.transport.urllib3._make_mutual_tls_http(
+ pytest.public_cert_bytes, pytest.private_key_bytes
+ )
+
+
+class TestAuthorizedHttp(object):
+ TEST_URL = "http://example.com"
+
+ def test_authed_http_defaults(self):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ mock.sentinel.credentials
+ )
+
+ assert authed_http.credentials == mock.sentinel.credentials
+ assert isinstance(authed_http.http, urllib3.PoolManager)
+
+ def test_urlopen_no_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ response = ResponseStub()
+ http = HttpStub([response])
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials, http=http
+ )
+
+ result = authed_http.urlopen("GET", self.TEST_URL)
+
+ assert result == response
+ assert credentials.before_request.called
+ assert not credentials.refresh.called
+ assert http.requests == [
+ ("GET", self.TEST_URL, None, {"authorization": "token"}, {})
+ ]
+
+ def test_urlopen_refresh(self):
+ credentials = mock.Mock(wraps=CredentialsStub())
+ final_response = ResponseStub(status=http_client.OK)
+ # First request will 401, second request will succeed.
+ http = HttpStub([ResponseStub(status=http_client.UNAUTHORIZED), final_response])
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials, http=http
+ )
+
+ authed_http = authed_http.urlopen("GET", "http://example.com")
+
+ assert authed_http == final_response
+ assert credentials.before_request.call_count == 2
+ assert credentials.refresh.called
+ assert http.requests == [
+ ("GET", self.TEST_URL, None, {"authorization": "token"}, {}),
+ ("GET", self.TEST_URL, None, {"authorization": "token1"}, {}),
+ ]
+
+ def test_urlopen_no_default_host(self):
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(credentials)
+
+ authed_http.credentials._create_self_signed_jwt.assert_called_once_with(None)
+
+ def test_urlopen_with_default_host(self):
+ default_host = "pubsub.googleapis.com"
+ credentials = mock.create_autospec(service_account.Credentials)
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials, default_host=default_host
+ )
+
+ authed_http.credentials._create_self_signed_jwt.assert_called_once_with(
+ "https://{}/".format(default_host)
+ )
+
+ def test_proxies(self):
+ http = mock.create_autospec(urllib3.PoolManager)
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(None, http=http)
+
+ with authed_http:
+ pass
+
+ assert http.__enter__.called
+ assert http.__exit__.called
+
+ authed_http.headers = mock.sentinel.headers
+ assert authed_http.headers == http.headers
+
+ @mock.patch("google.auth.transport.urllib3._make_mutual_tls_http", autospec=True)
+ def test_configure_mtls_channel_with_callback(self, mock_make_mutual_tls_http):
+ callback = mock.Mock()
+ callback.return_value = (pytest.public_cert_bytes, pytest.private_key_bytes)
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock(), http=mock.Mock()
+ )
+
+ with pytest.warns(UserWarning):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ is_mtls = authed_http.configure_mtls_channel(callback)
+
+ assert is_mtls
+ mock_make_mutual_tls_http.assert_called_once_with(
+ cert=pytest.public_cert_bytes, key=pytest.private_key_bytes
+ )
+
+ @mock.patch("google.auth.transport.urllib3._make_mutual_tls_http", autospec=True)
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_with_metadata(
+ self, mock_get_client_cert_and_key, mock_make_mutual_tls_http
+ ):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock()
+ )
+
+ mock_get_client_cert_and_key.return_value = (
+ True,
+ pytest.public_cert_bytes,
+ pytest.private_key_bytes,
+ )
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ is_mtls = authed_http.configure_mtls_channel()
+
+ assert is_mtls
+ mock_get_client_cert_and_key.assert_called_once()
+ mock_make_mutual_tls_http.assert_called_once_with(
+ cert=pytest.public_cert_bytes, key=pytest.private_key_bytes
+ )
+
+ @mock.patch("google.auth.transport.urllib3._make_mutual_tls_http", autospec=True)
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_non_mtls(
+ self, mock_get_client_cert_and_key, mock_make_mutual_tls_http
+ ):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock()
+ )
+
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ is_mtls = authed_http.configure_mtls_channel()
+
+ assert not is_mtls
+ mock_get_client_cert_and_key.assert_called_once()
+ mock_make_mutual_tls_http.assert_not_called()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_exceptions(self, mock_get_client_cert_and_key):
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock()
+ )
+
+ mock_get_client_cert_and_key.side_effect = exceptions.ClientCertError()
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ, {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"}
+ ):
+ authed_http.configure_mtls_channel()
+
+ mock_get_client_cert_and_key.return_value = (False, None, None)
+ with mock.patch.dict("sys.modules"):
+ sys.modules["OpenSSL"] = None
+ with pytest.raises(exceptions.MutualTLSChannelError):
+ with mock.patch.dict(
+ os.environ,
+ {environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE: "true"},
+ ):
+ authed_http.configure_mtls_channel()
+
+ @mock.patch(
+ "google.auth.transport._mtls_helper.get_client_cert_and_key", autospec=True
+ )
+ def test_configure_mtls_channel_without_client_cert_env(
+ self, get_client_cert_and_key
+ ):
+ callback = mock.Mock()
+
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ credentials=mock.Mock(), http=mock.Mock()
+ )
+
+ # Test the callback is not called if GOOGLE_API_USE_CLIENT_CERTIFICATE is not set.
+ is_mtls = authed_http.configure_mtls_channel(callback)
+ assert not is_mtls
+ callback.assert_not_called()
+
+ # Test ADC client cert is not used if GOOGLE_API_USE_CLIENT_CERTIFICATE is not set.
+ is_mtls = authed_http.configure_mtls_channel(callback)
+ assert not is_mtls
+ get_client_cert_and_key.assert_not_called()
+
+ def test_clear_pool_on_del(self):
+ http = mock.create_autospec(urllib3.PoolManager)
+ authed_http = google.auth.transport.urllib3.AuthorizedHttp(
+ mock.sentinel.credentials, http=http
+ )
+ authed_http.__del__()
+ http.clear.assert_called_with()
+
+ authed_http.http = None
+ authed_http.__del__()
+ # Expect it to not crash
diff --git a/contrib/python/google-auth/py3/tests/ya.make b/contrib/python/google-auth/py3/tests/ya.make
new file mode 100644
index 0000000000..e7a1b3b272
--- /dev/null
+++ b/contrib/python/google-auth/py3/tests/ya.make
@@ -0,0 +1,77 @@
+PY3TEST()
+
+PEERDIR(
+ contrib/python/Flask
+ contrib/python/google-auth
+ contrib/python/mock
+ contrib/python/responses
+ contrib/python/pyOpenSSL
+ contrib/python/pytest-localserver
+ contrib/python/oauth2client
+ contrib/python/freezegun
+)
+
+DATA(
+ arcadia/contrib/python/google-auth/py3/tests/data
+)
+
+PY_SRCS(
+ NAMESPACE tests
+ transport/__init__.py
+ transport/compliance.py
+)
+
+TEST_SRCS(
+ __init__.py
+ compute_engine/__init__.py
+ compute_engine/test__metadata.py
+ compute_engine/test_credentials.py
+ conftest.py
+ crypt/__init__.py
+ crypt/test__cryptography_rsa.py
+ crypt/test__python_rsa.py
+ crypt/test_crypt.py
+ crypt/test_es256.py
+ oauth2/__init__.py
+ oauth2/test__client.py
+ # oauth2/test_challenges.py - need pyu2f
+ oauth2/test_credentials.py
+ oauth2/test_gdch_credentials.py
+ oauth2/test_id_token.py
+ oauth2/test_reauth.py
+ oauth2/test_service_account.py
+ oauth2/test_sts.py
+ oauth2/test_utils.py
+ test__cloud_sdk.py
+ test__default.py
+ test__helpers.py
+ test__oauth2client.py
+ test__service_account_info.py
+ test_app_engine.py
+ test_aws.py
+ test_credentials.py
+ test_downscoped.py
+ test_external_account.py
+ test_external_account_authorized_user.py
+ test_iam.py
+ test_identity_pool.py
+ test_impersonated_credentials.py
+ test_jwt.py
+ test_pluggable.py
+ # transport/test__custom_tls_signer.py
+ transport/test__http_client.py
+ transport/test__mtls_helper.py
+ transport/test_grpc.py
+ transport/test_mtls.py
+ # transport/test_requests.py
+ # transport/test_urllib3.py
+)
+
+RESOURCE(
+ data/privatekey.pem data/privatekey.pem
+ data/public_cert.pem data/public_cert.pem
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/google-auth/py3/ya.make b/contrib/python/google-auth/py3/ya.make
new file mode 100644
index 0000000000..77b6e5f741
--- /dev/null
+++ b/contrib/python/google-auth/py3/ya.make
@@ -0,0 +1,100 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(2.23.0)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/cachetools
+ contrib/python/cryptography
+ contrib/python/grpcio
+ contrib/python/pyasn1-modules
+ contrib/python/requests
+ contrib/python/rsa
+ contrib/python/urllib3
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ google.auth._oauth2client
+ google.auth.transport._aiohttp_requests
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ google/auth/__init__.py
+ google/auth/_cloud_sdk.py
+ google/auth/_credentials_async.py
+ google/auth/_default.py
+ google/auth/_default_async.py
+ google/auth/_exponential_backoff.py
+ google/auth/_helpers.py
+ google/auth/_jwt_async.py
+ google/auth/_oauth2client.py
+ google/auth/_service_account_info.py
+ google/auth/api_key.py
+ google/auth/app_engine.py
+ google/auth/aws.py
+ google/auth/compute_engine/__init__.py
+ google/auth/compute_engine/_metadata.py
+ google/auth/compute_engine/credentials.py
+ google/auth/credentials.py
+ google/auth/crypt/__init__.py
+ google/auth/crypt/_cryptography_rsa.py
+ google/auth/crypt/_helpers.py
+ google/auth/crypt/_python_rsa.py
+ google/auth/crypt/base.py
+ google/auth/crypt/es256.py
+ google/auth/crypt/rsa.py
+ google/auth/downscoped.py
+ google/auth/environment_vars.py
+ google/auth/exceptions.py
+ google/auth/external_account.py
+ google/auth/external_account_authorized_user.py
+ google/auth/iam.py
+ google/auth/identity_pool.py
+ google/auth/impersonated_credentials.py
+ google/auth/jwt.py
+ google/auth/metrics.py
+ google/auth/pluggable.py
+ google/auth/transport/__init__.py
+ google/auth/transport/_aiohttp_requests.py
+ google/auth/transport/_custom_tls_signer.py
+ google/auth/transport/_http_client.py
+ google/auth/transport/_mtls_helper.py
+ google/auth/transport/grpc.py
+ google/auth/transport/mtls.py
+ google/auth/transport/requests.py
+ google/auth/transport/urllib3.py
+ google/auth/version.py
+ google/oauth2/__init__.py
+ google/oauth2/_client.py
+ google/oauth2/_client_async.py
+ google/oauth2/_credentials_async.py
+ google/oauth2/_id_token_async.py
+ google/oauth2/_reauth_async.py
+ google/oauth2/_service_account_async.py
+ google/oauth2/challenges.py
+ google/oauth2/credentials.py
+ google/oauth2/gdch_credentials.py
+ google/oauth2/id_token.py
+ google/oauth2/reauth.py
+ google/oauth2/service_account.py
+ google/oauth2/sts.py
+ google/oauth2/utils.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/google-auth/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/google-auth/ya.make b/contrib/python/google-auth/ya.make
new file mode 100644
index 0000000000..c67baa124d
--- /dev/null
+++ b/contrib/python/google-auth/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/google-auth/py2)
+ELSE()
+ PEERDIR(contrib/python/google-auth/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/httplib2/py2/.dist-info/METADATA b/contrib/python/httplib2/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..325c6f76b7
--- /dev/null
+++ b/contrib/python/httplib2/py2/.dist-info/METADATA
@@ -0,0 +1,76 @@
+Metadata-Version: 2.1
+Name: httplib2
+Version: 0.20.4
+Summary: A comprehensive HTTP client library.
+Home-page: https://github.com/httplib2/httplib2
+Author: Joe Gregorio
+Author-email: joe@bitworking.org
+License: MIT
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Internet :: WWW/HTTP
+Classifier: Topic :: Software Development :: Libraries
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+License-File: LICENSE
+Requires-Dist: pyparsing (<3,>=2.4.2) ; python_version < "3.0"
+Requires-Dist: pyparsing (!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2) ; python_version > "3.0"
+
+
+
+A comprehensive HTTP client library, ``httplib2`` supports many features left out of other HTTP libraries.
+
+**HTTP and HTTPS**
+ HTTPS support is only available if the socket module was compiled with SSL support.
+
+
+**Keep-Alive**
+ Supports HTTP 1.1 Keep-Alive, keeping the socket open and performing multiple requests over the same connection if possible.
+
+
+**Authentication**
+ The following three types of HTTP Authentication are supported. These can be used over both HTTP and HTTPS.
+
+ * Digest
+ * Basic
+ * WSSE
+
+**Caching**
+ The module can optionally operate with a private cache that understands the Cache-Control:
+ header and uses both the ETag and Last-Modified cache validators. Both file system
+ and memcached based caches are supported.
+
+
+**All Methods**
+ The module can handle any HTTP request method, not just GET and POST.
+
+
+**Redirects**
+ Automatically follows 3XX redirects on GETs.
+
+
+**Compression**
+ Handles both 'deflate' and 'gzip' types of compression.
+
+
+**Lost update support**
+ Automatically adds back ETags into PUT requests to resources we have already cached. This implements Section 3.2 of Detecting the Lost Update Problem Using Unreserved Checkout
+
+
+**Unit Tested**
+ A large and growing set of unit tests.
+
+
diff --git a/contrib/python/httplib2/py2/.dist-info/top_level.txt b/contrib/python/httplib2/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..fb881ece05
--- /dev/null
+++ b/contrib/python/httplib2/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+httplib2
diff --git a/contrib/python/httplib2/py2/LICENSE b/contrib/python/httplib2/py2/LICENSE
new file mode 100644
index 0000000000..ae38286693
--- /dev/null
+++ b/contrib/python/httplib2/py2/LICENSE
@@ -0,0 +1,23 @@
+Httplib2 Software License
+
+Copyright (c) 2006 by Joe Gregorio
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of the Software,
+and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/python/httplib2/py2/README.md b/contrib/python/httplib2/py2/README.md
new file mode 100644
index 0000000000..329c4a4892
--- /dev/null
+++ b/contrib/python/httplib2/py2/README.md
@@ -0,0 +1,113 @@
+Introduction
+============
+
+httplib2 is a comprehensive HTTP client library, httplib2.py supports many
+features left out of other HTTP libraries.
+
+### HTTP and HTTPS
+
+HTTPS support is only available if the socket module was
+compiled with SSL support.
+
+### Keep-Alive
+
+Supports HTTP 1.1 Keep-Alive, keeping the socket open and
+performing multiple requests over the same connection if
+possible.
+
+### Authentication
+
+The following three types of HTTP Authentication are
+supported. These can be used over both HTTP and HTTPS.
+
+* Digest
+* Basic
+* WSSE
+
+### Caching
+
+The module can optionally operate with a private cache that
+understands the Cache-Control: header and uses both the ETag
+and Last-Modified cache validators.
+
+### All Methods
+
+The module can handle any HTTP request method, not just GET
+and POST.
+
+### Redirects
+
+Automatically follows 3XX redirects on GETs.
+
+### Compression
+
+Handles both 'deflate' and 'gzip' types of compression.
+
+### Lost update support
+
+Automatically adds back ETags into PUT requests to resources
+we have already cached. This implements Section 3.2 of
+Detecting the Lost Update Problem Using Unreserved Checkout.
+
+### Unit Tested
+
+A large and growing set of unit tests.
+
+
+Installation
+============
+
+
+ $ pip install httplib2
+
+
+Usage
+=====
+
+A simple retrieval:
+
+```python
+import httplib2
+h = httplib2.Http(".cache")
+(resp_headers, content) = h.request("http://example.org/", "GET")
+```
+
+The 'content' is the content retrieved from the URL. The content
+is already decompressed or unzipped if necessary.
+
+To PUT some content to a server that uses SSL and Basic authentication:
+
+```python
+import httplib2
+h = httplib2.Http(".cache")
+h.add_credentials('name', 'password')
+(resp, content) = h.request("https://example.org/chapter/2",
+ "PUT", body="This is text",
+ headers={'content-type':'text/plain'} )
+```
+
+Use the Cache-Control: header to control how the caching operates.
+
+```python
+import httplib2
+h = httplib2.Http(".cache")
+(resp, content) = h.request("http://bitworking.org/", "GET")
+...
+(resp, content) = h.request("http://bitworking.org/", "GET",
+ headers={'cache-control':'no-cache'})
+```
+
+The first request will be cached and since this is a request
+to bitworking.org it will be set to be cached for two hours,
+because that is how I have my server configured. Any subsequent
+GET to that URI will return the value from the on-disk cache
+and no request will be made to the server. You can use the
+Cache-Control: header to change the caches behavior and in
+this example the second request adds the Cache-Control:
+header with a value of 'no-cache' which tells the library
+that the cached copy must not be used when handling this request.
+
+More example usage can be found at:
+
+ * https://github.com/httplib2/httplib2/wiki/Examples
+ * https://github.com/httplib2/httplib2/wiki/Examples-Python3
diff --git a/contrib/python/httplib2/py2/httplib2/__init__.py b/contrib/python/httplib2/py2/httplib2/__init__.py
new file mode 100644
index 0000000000..a9f793d6e8
--- /dev/null
+++ b/contrib/python/httplib2/py2/httplib2/__init__.py
@@ -0,0 +1,1989 @@
+"""Small, fast HTTP client library for Python.
+
+Features persistent connections, cache, and Google App Engine Standard
+Environment support.
+"""
+
+from __future__ import print_function
+
+__author__ = "Joe Gregorio (joe@bitworking.org)"
+__copyright__ = "Copyright 2006, Joe Gregorio"
+__contributors__ = [
+ "Thomas Broyer (t.broyer@ltgt.net)",
+ "James Antill",
+ "Xavier Verges Farrero",
+ "Jonathan Feinberg",
+ "Blair Zajac",
+ "Sam Ruby",
+ "Louis Nyffenegger",
+ "Alex Yu",
+]
+__license__ = "MIT"
+__version__ = "0.20.4"
+
+import base64
+import calendar
+import copy
+import email
+import email.FeedParser
+import email.Message
+import email.Utils
+import errno
+import gzip
+import httplib
+import os
+import random
+import re
+import StringIO
+import sys
+import time
+import urllib
+import urlparse
+import zlib
+
+try:
+ from hashlib import sha1 as _sha, md5 as _md5
+except ImportError:
+ # prior to Python 2.5, these were separate modules
+ import sha
+ import md5
+
+ _sha = sha.new
+ _md5 = md5.new
+import hmac
+from gettext import gettext as _
+import socket
+
+try:
+ from httplib2 import socks
+except ImportError:
+ try:
+ import socks
+ except (ImportError, AttributeError):
+ socks = None
+from httplib2 import auth
+from httplib2.error import *
+
+# Build the appropriate socket wrapper for ssl
+ssl = None
+ssl_SSLError = None
+ssl_CertificateError = None
+try:
+ import ssl # python 2.6
+except ImportError:
+ pass
+if ssl is not None:
+ ssl_SSLError = getattr(ssl, "SSLError", None)
+ ssl_CertificateError = getattr(ssl, "CertificateError", None)
+
+
+def _ssl_wrap_socket(sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password):
+ if disable_validation:
+ cert_reqs = ssl.CERT_NONE
+ else:
+ cert_reqs = ssl.CERT_REQUIRED
+ if ssl_version is None:
+ ssl_version = ssl.PROTOCOL_SSLv23
+
+ if hasattr(ssl, "SSLContext"): # Python 2.7.9
+ context = ssl.SSLContext(ssl_version)
+ context.verify_mode = cert_reqs
+ context.check_hostname = cert_reqs != ssl.CERT_NONE
+ if cert_file:
+ if key_password:
+ context.load_cert_chain(cert_file, key_file, key_password)
+ else:
+ context.load_cert_chain(cert_file, key_file)
+ if ca_certs:
+ context.load_verify_locations(ca_certs)
+ return context.wrap_socket(sock, server_hostname=hostname)
+ else:
+ if key_password:
+ raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
+ return ssl.wrap_socket(
+ sock, keyfile=key_file, certfile=cert_file, cert_reqs=cert_reqs, ca_certs=ca_certs, ssl_version=ssl_version,
+ )
+
+
+def _ssl_wrap_socket_unsupported(
+ sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password
+):
+ if not disable_validation:
+ raise CertificateValidationUnsupported(
+ "SSL certificate validation is not supported without "
+ "the ssl module installed. To avoid this error, install "
+ "the ssl module, or explicity disable validation."
+ )
+ if key_password:
+ raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
+ ssl_sock = socket.ssl(sock, key_file, cert_file)
+ return httplib.FakeSocket(sock, ssl_sock)
+
+
+if ssl is None:
+ _ssl_wrap_socket = _ssl_wrap_socket_unsupported
+
+if sys.version_info >= (2, 3):
+ from .iri2uri import iri2uri
+else:
+
+ def iri2uri(uri):
+ return uri
+
+
+def has_timeout(timeout): # python 2.6
+ if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"):
+ return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT
+ return timeout is not None
+
+
+__all__ = [
+ "Http",
+ "Response",
+ "ProxyInfo",
+ "HttpLib2Error",
+ "RedirectMissingLocation",
+ "RedirectLimit",
+ "FailedToDecompressContent",
+ "UnimplementedDigestAuthOptionError",
+ "UnimplementedHmacDigestAuthOptionError",
+ "debuglevel",
+ "ProxiesUnavailableError",
+]
+
+# The httplib debug level, set to a non-zero value to get debug output
+debuglevel = 0
+
+# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
+RETRIES = 2
+
+# Python 2.3 support
+if sys.version_info < (2, 4):
+
+ def sorted(seq):
+ seq.sort()
+ return seq
+
+
+# Python 2.3 support
+def HTTPResponse__getheaders(self):
+ """Return list of (header, value) tuples."""
+ if self.msg is None:
+ raise httplib.ResponseNotReady()
+ return self.msg.items()
+
+
+if not hasattr(httplib.HTTPResponse, "getheaders"):
+ httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
+
+
+# All exceptions raised here derive from HttpLib2Error
+class HttpLib2Error(Exception):
+ pass
+
+
+# Some exceptions can be caught and optionally
+# be turned back into responses.
+class HttpLib2ErrorWithResponse(HttpLib2Error):
+ def __init__(self, desc, response, content):
+ self.response = response
+ self.content = content
+ HttpLib2Error.__init__(self, desc)
+
+
+class RedirectMissingLocation(HttpLib2ErrorWithResponse):
+ pass
+
+
+class RedirectLimit(HttpLib2ErrorWithResponse):
+ pass
+
+
+class FailedToDecompressContent(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class MalformedHeader(HttpLib2Error):
+ pass
+
+
+class RelativeURIError(HttpLib2Error):
+ pass
+
+
+class ServerNotFoundError(HttpLib2Error):
+ pass
+
+
+class ProxiesUnavailableError(HttpLib2Error):
+ pass
+
+
+class CertificateValidationUnsupported(HttpLib2Error):
+ pass
+
+
+class SSLHandshakeError(HttpLib2Error):
+ pass
+
+
+class NotSupportedOnThisPlatform(HttpLib2Error):
+ pass
+
+
+class CertificateHostnameMismatch(SSLHandshakeError):
+ def __init__(self, desc, host, cert):
+ HttpLib2Error.__init__(self, desc)
+ self.host = host
+ self.cert = cert
+
+
+class NotRunningAppEngineEnvironment(HttpLib2Error):
+ pass
+
+
+# Open Items:
+# -----------
+# Proxy support
+
+# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
+
+# Pluggable cache storage (supports storing the cache in
+# flat files by default. We need a plug-in architecture
+# that can support Berkeley DB and Squid)
+
+# == Known Issues ==
+# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
+# Does not handle Cache-Control: max-stale
+# Does not use Age: headers when calculating cache freshness.
+
+# The number of redirections to follow before giving up.
+# Note that only GET redirects are automatically followed.
+# Will also honor 301 requests by saving that info and never
+# requesting that URI again.
+DEFAULT_MAX_REDIRECTS = 5
+
+from httplib2 import certs
+
+CA_CERTS = certs.where()
+
+# Which headers are hop-by-hop headers by default
+HOP_BY_HOP = [
+ "connection",
+ "keep-alive",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "te",
+ "trailers",
+ "transfer-encoding",
+ "upgrade",
+]
+
+# https://tools.ietf.org/html/rfc7231#section-8.1.3
+SAFE_METHODS = ("GET", "HEAD") # TODO add "OPTIONS", "TRACE"
+
+# To change, assign to `Http().redirect_codes`
+REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308))
+
+
+def _get_end2end_headers(response):
+ hopbyhop = list(HOP_BY_HOP)
+ hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")])
+ return [header for header in response.keys() if header not in hopbyhop]
+
+
+URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
+
+
+def parse_uri(uri):
+ """Parses a URI using the regex given in Appendix B of RFC 3986.
+
+ (scheme, authority, path, query, fragment) = parse_uri(uri)
+ """
+ groups = URI.match(uri).groups()
+ return (groups[1], groups[3], groups[4], groups[6], groups[8])
+
+
+def urlnorm(uri):
+ (scheme, authority, path, query, fragment) = parse_uri(uri)
+ if not scheme or not authority:
+ raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
+ authority = authority.lower()
+ scheme = scheme.lower()
+ if not path:
+ path = "/"
+ # Could do syntax based normalization of the URI before
+ # computing the digest. See Section 6.2.2 of Std 66.
+ request_uri = query and "?".join([path, query]) or path
+ scheme = scheme.lower()
+ defrag_uri = scheme + "://" + authority + request_uri
+ return scheme, authority, request_uri, defrag_uri
+
+
+# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
+re_url_scheme = re.compile(r"^\w+://")
+re_unsafe = re.compile(r"[^\w\-_.()=!]+")
+
+
+def safename(filename):
+ """Return a filename suitable for the cache.
+ Strips dangerous and common characters to create a filename we
+ can use to store the cache in.
+ """
+ if isinstance(filename, str):
+ filename_bytes = filename
+ filename = filename.decode("utf-8")
+ else:
+ filename_bytes = filename.encode("utf-8")
+ filemd5 = _md5(filename_bytes).hexdigest()
+ filename = re_url_scheme.sub("", filename)
+ filename = re_unsafe.sub("", filename)
+
+ # limit length of filename (vital for Windows)
+ # https://github.com/httplib2/httplib2/pull/74
+ # C:\Users\ <username> \AppData\Local\Temp\ <safe_filename> , <md5>
+ # 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars
+ # Thus max safe filename x = 93 chars. Let it be 90 to make a round sum:
+ filename = filename[:90]
+
+ return ",".join((filename, filemd5))
+
+
+NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+")
+
+
+def _normalize_headers(headers):
+ return dict([(key.lower(), NORMALIZE_SPACE.sub(value, " ").strip()) for (key, value) in headers.iteritems()])
+
+
+def _parse_cache_control(headers):
+ retval = {}
+ if "cache-control" in headers:
+ parts = headers["cache-control"].split(",")
+ parts_with_args = [
+ tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")
+ ]
+ parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
+ retval = dict(parts_with_args + parts_wo_args)
+ return retval
+
+
+# Whether to use a strict mode to parse WWW-Authenticate headers
+# Might lead to bad results in case of ill-formed header value,
+# so disabled by default, falling back to relaxed parsing.
+# Set to true to turn on, usefull for testing servers.
+USE_WWW_AUTH_STRICT_PARSING = 0
+
+
+# TODO: add current time as _entry_disposition argument to avoid sleep in tests
+def _entry_disposition(response_headers, request_headers):
+ """Determine freshness from the Date, Expires and Cache-Control headers.
+
+ We don't handle the following:
+
+ 1. Cache-Control: max-stale
+ 2. Age: headers are not used in the calculations.
+
+ Not that this algorithm is simpler than you might think
+ because we are operating as a private (non-shared) cache.
+ This lets us ignore 's-maxage'. We can also ignore
+ 'proxy-invalidate' since we aren't a proxy.
+ We will never return a stale document as
+ fresh as a design decision, and thus the non-implementation
+ of 'max-stale'. This also lets us safely ignore 'must-revalidate'
+ since we operate as if every server has sent 'must-revalidate'.
+ Since we are private we get to ignore both 'public' and
+ 'private' parameters. We also ignore 'no-transform' since
+ we don't do any transformations.
+ The 'no-store' parameter is handled at a higher level.
+ So the only Cache-Control parameters we look at are:
+
+ no-cache
+ only-if-cached
+ max-age
+ min-fresh
+ """
+
+ retval = "STALE"
+ cc = _parse_cache_control(request_headers)
+ cc_response = _parse_cache_control(response_headers)
+
+ if "pragma" in request_headers and request_headers["pragma"].lower().find("no-cache") != -1:
+ retval = "TRANSPARENT"
+ if "cache-control" not in request_headers:
+ request_headers["cache-control"] = "no-cache"
+ elif "no-cache" in cc:
+ retval = "TRANSPARENT"
+ elif "no-cache" in cc_response:
+ retval = "STALE"
+ elif "only-if-cached" in cc:
+ retval = "FRESH"
+ elif "date" in response_headers:
+ date = calendar.timegm(email.Utils.parsedate_tz(response_headers["date"]))
+ now = time.time()
+ current_age = max(0, now - date)
+ if "max-age" in cc_response:
+ try:
+ freshness_lifetime = int(cc_response["max-age"])
+ except ValueError:
+ freshness_lifetime = 0
+ elif "expires" in response_headers:
+ expires = email.Utils.parsedate_tz(response_headers["expires"])
+ if None == expires:
+ freshness_lifetime = 0
+ else:
+ freshness_lifetime = max(0, calendar.timegm(expires) - date)
+ else:
+ freshness_lifetime = 0
+ if "max-age" in cc:
+ try:
+ freshness_lifetime = int(cc["max-age"])
+ except ValueError:
+ freshness_lifetime = 0
+ if "min-fresh" in cc:
+ try:
+ min_fresh = int(cc["min-fresh"])
+ except ValueError:
+ min_fresh = 0
+ current_age += min_fresh
+ if freshness_lifetime > current_age:
+ retval = "FRESH"
+ return retval
+
+
+def _decompressContent(response, new_content):
+ content = new_content
+ try:
+ encoding = response.get("content-encoding", None)
+ if encoding in ["gzip", "deflate"]:
+ if encoding == "gzip":
+ content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
+ if encoding == "deflate":
+ content = zlib.decompress(content, -zlib.MAX_WBITS)
+ response["content-length"] = str(len(content))
+ # Record the historical presence of the encoding in a way the won't interfere.
+ response["-content-encoding"] = response["content-encoding"]
+ del response["content-encoding"]
+ except (IOError, zlib.error):
+ content = ""
+ raise FailedToDecompressContent(
+ _("Content purported to be compressed with %s but failed to decompress.") % response.get("content-encoding"),
+ response,
+ content,
+ )
+ return content
+
+
+def _updateCache(request_headers, response_headers, content, cache, cachekey):
+ if cachekey:
+ cc = _parse_cache_control(request_headers)
+ cc_response = _parse_cache_control(response_headers)
+ if "no-store" in cc or "no-store" in cc_response:
+ cache.delete(cachekey)
+ else:
+ info = email.Message.Message()
+ for key, value in response_headers.iteritems():
+ if key not in ["status", "content-encoding", "transfer-encoding"]:
+ info[key] = value
+
+ # Add annotations to the cache to indicate what headers
+ # are variant for this request.
+ vary = response_headers.get("vary", None)
+ if vary:
+ vary_headers = vary.lower().replace(" ", "").split(",")
+ for header in vary_headers:
+ key = "-varied-%s" % header
+ try:
+ info[key] = request_headers[header]
+ except KeyError:
+ pass
+
+ status = response_headers.status
+ if status == 304:
+ status = 200
+
+ status_header = "status: %d\r\n" % status
+
+ header_str = info.as_string()
+
+ header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
+ text = "".join([status_header, header_str, content])
+
+ cache.set(cachekey, text)
+
+
+def _cnonce():
+ dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
+ return dig[:16]
+
+
+def _wsse_username_token(cnonce, iso_now, password):
+ return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
+
+
+# For credentials we need two things, first
+# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
+# Then we also need a list of URIs that have already demanded authentication
+# That list is tricky since sub-URIs can take the same auth, or the
+# auth scheme may change as you descend the tree.
+# So we also need each Auth instance to be able to tell us
+# how close to the 'top' it is.
+
+
+class Authentication(object):
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ (scheme, authority, path, query, fragment) = parse_uri(request_uri)
+ self.path = path
+ self.host = host
+ self.credentials = credentials
+ self.http = http
+
+ def depth(self, request_uri):
+ (scheme, authority, path, query, fragment) = parse_uri(request_uri)
+ return request_uri[len(self.path) :].count("/")
+
+ def inscope(self, host, request_uri):
+ # XXX Should we normalize the request_uri?
+ (scheme, authority, path, query, fragment) = parse_uri(request_uri)
+ return (host == self.host) and path.startswith(self.path)
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header. Over-ride this in sub-classes."""
+ pass
+
+ def response(self, response, content):
+ """Gives us a chance to update with new nonces
+ or such returned from the last authorized response.
+ Over-rise this in sub-classes if necessary.
+
+ Return TRUE is the request is to be retried, for
+ example Digest may return stale=true.
+ """
+ return False
+
+
+class BasicAuthentication(Authentication):
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header."""
+ headers["authorization"] = "Basic " + base64.b64encode("%s:%s" % self.credentials).strip()
+
+
+class DigestAuthentication(Authentication):
+ """Only do qop='auth' and MD5, since that
+ is all Apache currently implements"""
+
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+ self.challenge = auth._parse_www_authenticate(response, "www-authenticate")["digest"]
+ qop = self.challenge.get("qop", "auth")
+ self.challenge["qop"] = ("auth" in [x.strip() for x in qop.split()]) and "auth" or None
+ if self.challenge["qop"] is None:
+ raise UnimplementedDigestAuthOptionError(_("Unsupported value for qop: %s." % qop))
+ self.challenge["algorithm"] = self.challenge.get("algorithm", "MD5").upper()
+ if self.challenge["algorithm"] != "MD5":
+ raise UnimplementedDigestAuthOptionError(
+ _("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
+ )
+ self.A1 = "".join([self.credentials[0], ":", self.challenge["realm"], ":", self.credentials[1],])
+ self.challenge["nc"] = 1
+
+ def request(self, method, request_uri, headers, content, cnonce=None):
+ """Modify the request headers"""
+ H = lambda x: _md5(x).hexdigest()
+ KD = lambda s, d: H("%s:%s" % (s, d))
+ A2 = "".join([method, ":", request_uri])
+ self.challenge["cnonce"] = cnonce or _cnonce()
+ request_digest = '"%s"' % KD(
+ H(self.A1),
+ "%s:%s:%s:%s:%s"
+ % (
+ self.challenge["nonce"],
+ "%08x" % self.challenge["nc"],
+ self.challenge["cnonce"],
+ self.challenge["qop"],
+ H(A2),
+ ),
+ )
+ headers["authorization"] = (
+ 'Digest username="%s", realm="%s", nonce="%s", '
+ 'uri="%s", algorithm=%s, response=%s, qop=%s, '
+ 'nc=%08x, cnonce="%s"'
+ ) % (
+ self.credentials[0],
+ self.challenge["realm"],
+ self.challenge["nonce"],
+ request_uri,
+ self.challenge["algorithm"],
+ request_digest,
+ self.challenge["qop"],
+ self.challenge["nc"],
+ self.challenge["cnonce"],
+ )
+ if self.challenge.get("opaque"):
+ headers["authorization"] += ', opaque="%s"' % self.challenge["opaque"]
+ self.challenge["nc"] += 1
+
+ def response(self, response, content):
+ if "authentication-info" not in response:
+ challenge = auth._parse_www_authenticate(response, "www-authenticate").get("digest", {})
+ if "true" == challenge.get("stale"):
+ self.challenge["nonce"] = challenge["nonce"]
+ self.challenge["nc"] = 1
+ return True
+ else:
+ updated_challenge = auth._parse_authentication_info(response, "authentication-info")
+
+ if "nextnonce" in updated_challenge:
+ self.challenge["nonce"] = updated_challenge["nextnonce"]
+ self.challenge["nc"] = 1
+ return False
+
+
+class HmacDigestAuthentication(Authentication):
+ """Adapted from Robert Sayre's code and DigestAuthentication above."""
+
+ __author__ = "Thomas Broyer (t.broyer@ltgt.net)"
+
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+ challenge = auth._parse_www_authenticate(response, "www-authenticate")
+ self.challenge = challenge["hmacdigest"]
+ # TODO: self.challenge['domain']
+ self.challenge["reason"] = self.challenge.get("reason", "unauthorized")
+ if self.challenge["reason"] not in ["unauthorized", "integrity"]:
+ self.challenge["reason"] = "unauthorized"
+ self.challenge["salt"] = self.challenge.get("salt", "")
+ if not self.challenge.get("snonce"):
+ raise UnimplementedHmacDigestAuthOptionError(
+ _("The challenge doesn't contain a server nonce, or this one is empty.")
+ )
+ self.challenge["algorithm"] = self.challenge.get("algorithm", "HMAC-SHA-1")
+ if self.challenge["algorithm"] not in ["HMAC-SHA-1", "HMAC-MD5"]:
+ raise UnimplementedHmacDigestAuthOptionError(
+ _("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
+ )
+ self.challenge["pw-algorithm"] = self.challenge.get("pw-algorithm", "SHA-1")
+ if self.challenge["pw-algorithm"] not in ["SHA-1", "MD5"]:
+ raise UnimplementedHmacDigestAuthOptionError(
+ _("Unsupported value for pw-algorithm: %s." % self.challenge["pw-algorithm"])
+ )
+ if self.challenge["algorithm"] == "HMAC-MD5":
+ self.hashmod = _md5
+ else:
+ self.hashmod = _sha
+ if self.challenge["pw-algorithm"] == "MD5":
+ self.pwhashmod = _md5
+ else:
+ self.pwhashmod = _sha
+ self.key = "".join(
+ [
+ self.credentials[0],
+ ":",
+ self.pwhashmod.new("".join([self.credentials[1], self.challenge["salt"]])).hexdigest().lower(),
+ ":",
+ self.challenge["realm"],
+ ]
+ )
+ self.key = self.pwhashmod.new(self.key).hexdigest().lower()
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers"""
+ keys = _get_end2end_headers(headers)
+ keylist = "".join(["%s " % k for k in keys])
+ headers_val = "".join([headers[k] for k in keys])
+ created = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+ cnonce = _cnonce()
+ request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge["snonce"], headers_val,)
+ request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
+ headers["authorization"] = (
+ 'HMACDigest username="%s", realm="%s", snonce="%s",'
+ ' cnonce="%s", uri="%s", created="%s", '
+ 'response="%s", headers="%s"'
+ ) % (
+ self.credentials[0],
+ self.challenge["realm"],
+ self.challenge["snonce"],
+ cnonce,
+ request_uri,
+ created,
+ request_digest,
+ keylist,
+ )
+
+ def response(self, response, content):
+ challenge = auth._parse_www_authenticate(response, "www-authenticate").get("hmacdigest", {})
+ if challenge.get("reason") in ["integrity", "stale"]:
+ return True
+ return False
+
+
+class WsseAuthentication(Authentication):
+ """This is thinly tested and should not be relied upon.
+ At this time there isn't any third party server to test against.
+ Blogger and TypePad implemented this algorithm at one point
+ but Blogger has since switched to Basic over HTTPS and
+ TypePad has implemented it wrong, by never issuing a 401
+ challenge but instead requiring your client to telepathically know that
+ their endpoint is expecting WSSE profile="UsernameToken"."""
+
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header."""
+ headers["authorization"] = 'WSSE profile="UsernameToken"'
+ iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+ cnonce = _cnonce()
+ password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
+ headers["X-WSSE"] = ('UsernameToken Username="%s", PasswordDigest="%s", ' 'Nonce="%s", Created="%s"') % (
+ self.credentials[0],
+ password_digest,
+ cnonce,
+ iso_now,
+ )
+
+
+class GoogleLoginAuthentication(Authentication):
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ from urllib import urlencode
+
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+ challenge = auth._parse_www_authenticate(response, "www-authenticate")
+ service = challenge["googlelogin"].get("service", "xapi")
+ # Bloggger actually returns the service in the challenge
+ # For the rest we guess based on the URI
+ if service == "xapi" and request_uri.find("calendar") > 0:
+ service = "cl"
+ # No point in guessing Base or Spreadsheet
+ # elif request_uri.find("spreadsheets") > 0:
+ # service = "wise"
+
+ auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers["user-agent"],)
+ resp, content = self.http.request(
+ "https://www.google.com/accounts/ClientLogin",
+ method="POST",
+ body=urlencode(auth),
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ )
+ lines = content.split("\n")
+ d = dict([tuple(line.split("=", 1)) for line in lines if line])
+ if resp.status == 403:
+ self.Auth = ""
+ else:
+ self.Auth = d["Auth"]
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header."""
+ headers["authorization"] = "GoogleLogin Auth=" + self.Auth
+
+
+AUTH_SCHEME_CLASSES = {
+ "basic": BasicAuthentication,
+ "wsse": WsseAuthentication,
+ "digest": DigestAuthentication,
+ "hmacdigest": HmacDigestAuthentication,
+ "googlelogin": GoogleLoginAuthentication,
+}
+
+AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
+
+
+class FileCache(object):
+ """Uses a local directory as a store for cached files.
+ Not really safe to use if multiple threads or processes are going to
+ be running on the same cache.
+ """
+
+ def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
+ self.cache = cache
+ self.safe = safe
+ if not os.path.exists(cache):
+ os.makedirs(self.cache)
+
+ def get(self, key):
+ retval = None
+ cacheFullPath = os.path.join(self.cache, self.safe(key))
+ try:
+ f = file(cacheFullPath, "rb")
+ retval = f.read()
+ f.close()
+ except IOError:
+ pass
+ return retval
+
+ def set(self, key, value):
+ cacheFullPath = os.path.join(self.cache, self.safe(key))
+ f = file(cacheFullPath, "wb")
+ f.write(value)
+ f.close()
+
+ def delete(self, key):
+ cacheFullPath = os.path.join(self.cache, self.safe(key))
+ if os.path.exists(cacheFullPath):
+ os.remove(cacheFullPath)
+
+
+class Credentials(object):
+ def __init__(self):
+ self.credentials = []
+
+ def add(self, name, password, domain=""):
+ self.credentials.append((domain.lower(), name, password))
+
+ def clear(self):
+ self.credentials = []
+
+ def iter(self, domain):
+ for (cdomain, name, password) in self.credentials:
+ if cdomain == "" or domain == cdomain:
+ yield (name, password)
+
+
+class KeyCerts(Credentials):
+ """Identical to Credentials except that
+ name/password are mapped to key/cert."""
+
+ def add(self, key, cert, domain, password):
+ self.credentials.append((domain.lower(), key, cert, password))
+
+ def iter(self, domain):
+ for (cdomain, key, cert, password) in self.credentials:
+ if cdomain == "" or domain == cdomain:
+ yield (key, cert, password)
+
+
+class AllHosts(object):
+ pass
+
+
+class ProxyInfo(object):
+ """Collect information required to use a proxy."""
+
+ bypass_hosts = ()
+
+ def __init__(
+ self, proxy_type, proxy_host, proxy_port, proxy_rdns=True, proxy_user=None, proxy_pass=None, proxy_headers=None,
+ ):
+ """Args:
+
+ proxy_type: The type of proxy server. This must be set to one of
+ socks.PROXY_TYPE_XXX constants. For example: p =
+ ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost',
+ proxy_port=8000)
+ proxy_host: The hostname or IP address of the proxy server.
+ proxy_port: The port that the proxy server is running on.
+ proxy_rdns: If True (default), DNS queries will not be performed
+ locally, and instead, handed to the proxy to resolve. This is useful
+ if the network does not allow resolution of non-local names. In
+ httplib2 0.9 and earlier, this defaulted to False.
+ proxy_user: The username used to authenticate with the proxy server.
+ proxy_pass: The password used to authenticate with the proxy server.
+ proxy_headers: Additional or modified headers for the proxy connect
+ request.
+ """
+ self.proxy_type = proxy_type
+ self.proxy_host = proxy_host
+ self.proxy_port = proxy_port
+ self.proxy_rdns = proxy_rdns
+ self.proxy_user = proxy_user
+ self.proxy_pass = proxy_pass
+ self.proxy_headers = proxy_headers
+
+ def astuple(self):
+ return (
+ self.proxy_type,
+ self.proxy_host,
+ self.proxy_port,
+ self.proxy_rdns,
+ self.proxy_user,
+ self.proxy_pass,
+ self.proxy_headers,
+ )
+
+ def isgood(self):
+ return (self.proxy_host != None) and (self.proxy_port != None)
+
+ def applies_to(self, hostname):
+ return not self.bypass_host(hostname)
+
+ def bypass_host(self, hostname):
+ """Has this host been excluded from the proxy config"""
+ if self.bypass_hosts is AllHosts:
+ return True
+
+ hostname = "." + hostname.lstrip(".")
+ for skip_name in self.bypass_hosts:
+ # *.suffix
+ if skip_name.startswith(".") and hostname.endswith(skip_name):
+ return True
+ # exact match
+ if hostname == "." + skip_name:
+ return True
+ return False
+
+ def __repr__(self):
+ return (
+ "<ProxyInfo type={p.proxy_type} "
+ "host:port={p.proxy_host}:{p.proxy_port} rdns={p.proxy_rdns}"
+ + " user={p.proxy_user} headers={p.proxy_headers}>"
+ ).format(p=self)
+
+
+def proxy_info_from_environment(method="http"):
+ """Read proxy info from the environment variables.
+ """
+ if method not in ["http", "https"]:
+ return
+
+ env_var = method + "_proxy"
+ url = os.environ.get(env_var, os.environ.get(env_var.upper()))
+ if not url:
+ return
+ return proxy_info_from_url(url, method, None)
+
+
+def proxy_info_from_url(url, method="http", noproxy=None):
+ """Construct a ProxyInfo from a URL (such as http_proxy env var)
+ """
+ url = urlparse.urlparse(url)
+
+ proxy_type = 3 # socks.PROXY_TYPE_HTTP
+ pi = ProxyInfo(
+ proxy_type=proxy_type,
+ proxy_host=url.hostname,
+ proxy_port=url.port or dict(https=443, http=80)[method],
+ proxy_user=url.username or None,
+ proxy_pass=url.password or None,
+ proxy_headers=None,
+ )
+
+ bypass_hosts = []
+ # If not given an explicit noproxy value, respect values in env vars.
+ if noproxy is None:
+ noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", ""))
+ # Special case: A single '*' character means all hosts should be bypassed.
+ if noproxy == "*":
+ bypass_hosts = AllHosts
+ elif noproxy.strip():
+ bypass_hosts = noproxy.split(",")
+ bypass_hosts = filter(bool, bypass_hosts) # To exclude empty string.
+
+ pi.bypass_hosts = bypass_hosts
+ return pi
+
+
+class HTTPConnectionWithTimeout(httplib.HTTPConnection):
+ """HTTPConnection subclass that supports timeouts
+
+ All timeouts are in seconds. If None is passed for timeout then
+ Python's default timeout for sockets will be used. See for example
+ the docs of socket.setdefaulttimeout():
+ http://docs.python.org/library/socket.html#socket.setdefaulttimeout
+ """
+
+ def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
+ httplib.HTTPConnection.__init__(self, host, port, strict)
+ self.timeout = timeout
+ self.proxy_info = proxy_info
+
+ def connect(self):
+ """Connect to the host and port specified in __init__."""
+ # Mostly verbatim from httplib.py.
+ if self.proxy_info and socks is None:
+ raise ProxiesUnavailableError("Proxy support missing but proxy use was requested!")
+ if self.proxy_info and self.proxy_info.isgood():
+ use_proxy = True
+ (
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ ) = self.proxy_info.astuple()
+
+ host = proxy_host
+ port = proxy_port
+ else:
+ use_proxy = False
+
+ host = self.host
+ port = self.port
+
+ socket_err = None
+
+ for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ try:
+ if use_proxy:
+ self.sock = socks.socksocket(af, socktype, proto)
+ self.sock.setproxy(
+ proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,
+ )
+ else:
+ self.sock = socket.socket(af, socktype, proto)
+ self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ # Different from httplib: support timeouts.
+ if has_timeout(self.timeout):
+ self.sock.settimeout(self.timeout)
+ # End of difference from httplib.
+ if self.debuglevel > 0:
+ print("connect: (%s, %s) ************" % (self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: %s ************"
+ % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ if use_proxy:
+ self.sock.connect((self.host, self.port) + sa[2:])
+ else:
+ self.sock.connect(sa)
+ except socket.error as e:
+ socket_err = e
+ if self.debuglevel > 0:
+ print("connect fail: (%s, %s)" % (self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: %s"
+ % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ continue
+ break
+ if not self.sock:
+ raise socket_err or socket.error("getaddrinfo returns an empty list")
+
+
+class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
+ """This class allows communication via SSL.
+
+ All timeouts are in seconds. If None is passed for timeout then
+ Python's default timeout for sockets will be used. See for example
+ the docs of socket.setdefaulttimeout():
+ http://docs.python.org/library/socket.html#socket.setdefaulttimeout
+ """
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ key_file=None,
+ cert_file=None,
+ strict=None,
+ timeout=None,
+ proxy_info=None,
+ ca_certs=None,
+ disable_ssl_certificate_validation=False,
+ ssl_version=None,
+ key_password=None,
+ ):
+ if key_password:
+ httplib.HTTPSConnection.__init__(self, host, port=port, strict=strict)
+ self._context.load_cert_chain(cert_file, key_file, key_password)
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.key_password = key_password
+ else:
+ httplib.HTTPSConnection.__init__(
+ self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict
+ )
+ self.key_password = None
+ self.timeout = timeout
+ self.proxy_info = proxy_info
+ if ca_certs is None:
+ ca_certs = CA_CERTS
+ self.ca_certs = ca_certs
+ self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
+ self.ssl_version = ssl_version
+
+ # The following two methods were adapted from https_wrapper.py, released
+ # with the Google Appengine SDK at
+ # http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
+ # under the following license:
+ #
+ # Copyright 2007 Google Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #
+
+ def _GetValidHostsForCert(self, cert):
+ """Returns a list of valid host globs for an SSL certificate.
+
+ Args:
+ cert: A dictionary representing an SSL certificate.
+ Returns:
+ list: A list of valid host globs.
+ """
+ if "subjectAltName" in cert:
+ return [x[1] for x in cert["subjectAltName"] if x[0].lower() == "dns"]
+ else:
+ return [x[0][1] for x in cert["subject"] if x[0][0].lower() == "commonname"]
+
+ def _ValidateCertificateHostname(self, cert, hostname):
+ """Validates that a given hostname is valid for an SSL certificate.
+
+ Args:
+ cert: A dictionary representing an SSL certificate.
+ hostname: The hostname to test.
+ Returns:
+ bool: Whether or not the hostname is valid for this certificate.
+ """
+ hosts = self._GetValidHostsForCert(cert)
+ for host in hosts:
+ host_re = host.replace(".", "\.").replace("*", "[^.]*")
+ if re.search("^%s$" % (host_re,), hostname, re.I):
+ return True
+ return False
+
+ def connect(self):
+ "Connect to a host on a given (SSL) port."
+
+ if self.proxy_info and self.proxy_info.isgood():
+ use_proxy = True
+ (
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ ) = self.proxy_info.astuple()
+
+ host = proxy_host
+ port = proxy_port
+ else:
+ use_proxy = False
+
+ host = self.host
+ port = self.port
+
+ socket_err = None
+
+ address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in address_info:
+ try:
+ if use_proxy:
+ sock = socks.socksocket(family, socktype, proto)
+
+ sock.setproxy(
+ proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,
+ )
+ else:
+ sock = socket.socket(family, socktype, proto)
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ if has_timeout(self.timeout):
+ sock.settimeout(self.timeout)
+
+ if use_proxy:
+ sock.connect((self.host, self.port) + sockaddr[:2])
+ else:
+ sock.connect(sockaddr)
+ self.sock = _ssl_wrap_socket(
+ sock,
+ self.key_file,
+ self.cert_file,
+ self.disable_ssl_certificate_validation,
+ self.ca_certs,
+ self.ssl_version,
+ self.host,
+ self.key_password,
+ )
+ if self.debuglevel > 0:
+ print("connect: (%s, %s)" % (self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: %s"
+ % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ if not self.disable_ssl_certificate_validation:
+ cert = self.sock.getpeercert()
+ hostname = self.host.split(":", 0)[0]
+ if not self._ValidateCertificateHostname(cert, hostname):
+ raise CertificateHostnameMismatch(
+ "Server presented certificate that does not match " "host %s: %s" % (hostname, cert),
+ hostname,
+ cert,
+ )
+ except (ssl_SSLError, ssl_CertificateError, CertificateHostnameMismatch,) as e:
+ if sock:
+ sock.close()
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ # Unfortunately the ssl module doesn't seem to provide any way
+ # to get at more detailed error information, in particular
+ # whether the error is due to certificate validation or
+ # something else (such as SSL protocol mismatch).
+ if getattr(e, "errno", None) == ssl.SSL_ERROR_SSL:
+ raise SSLHandshakeError(e)
+ else:
+ raise
+ except (socket.timeout, socket.gaierror):
+ raise
+ except socket.error as e:
+ socket_err = e
+ if self.debuglevel > 0:
+ print("connect fail: (%s, %s)" % (self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: %s"
+ % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ continue
+ break
+ if not self.sock:
+ raise socket_err or socket.error("getaddrinfo returns an empty list")
+
+
+SCHEME_TO_CONNECTION = {
+ "http": HTTPConnectionWithTimeout,
+ "https": HTTPSConnectionWithTimeout,
+}
+
+
+def _new_fixed_fetch(validate_certificate):
+ def fixed_fetch(
+ url, payload=None, method="GET", headers={}, allow_truncated=False, follow_redirects=True, deadline=None,
+ ):
+ return fetch(
+ url,
+ payload=payload,
+ method=method,
+ headers=headers,
+ allow_truncated=allow_truncated,
+ follow_redirects=follow_redirects,
+ deadline=deadline,
+ validate_certificate=validate_certificate,
+ )
+
+ return fixed_fetch
+
+
+class AppEngineHttpConnection(httplib.HTTPConnection):
+ """Use httplib on App Engine, but compensate for its weirdness.
+
+ The parameters key_file, cert_file, proxy_info, ca_certs,
+ disable_ssl_certificate_validation, and ssl_version are all dropped on
+ the ground.
+ """
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ key_file=None,
+ cert_file=None,
+ strict=None,
+ timeout=None,
+ proxy_info=None,
+ ca_certs=None,
+ disable_ssl_certificate_validation=False,
+ ssl_version=None,
+ ):
+ httplib.HTTPConnection.__init__(self, host, port=port, strict=strict, timeout=timeout)
+
+
+class AppEngineHttpsConnection(httplib.HTTPSConnection):
+ """Same as AppEngineHttpConnection, but for HTTPS URIs.
+
+ The parameters proxy_info, ca_certs, disable_ssl_certificate_validation,
+ and ssl_version are all dropped on the ground.
+ """
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ key_file=None,
+ cert_file=None,
+ strict=None,
+ timeout=None,
+ proxy_info=None,
+ ca_certs=None,
+ disable_ssl_certificate_validation=False,
+ ssl_version=None,
+ key_password=None,
+ ):
+ if key_password:
+ raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
+ httplib.HTTPSConnection.__init__(
+ self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict, timeout=timeout,
+ )
+ self._fetch = _new_fixed_fetch(not disable_ssl_certificate_validation)
+
+
+# Use a different connection object for Google App Engine Standard Environment.
+def is_gae_instance():
+ server_software = os.environ.get("SERVER_SOFTWARE", "")
+ if (
+ server_software.startswith("Google App Engine/")
+ or server_software.startswith("Development/")
+ or server_software.startswith("testutil/")
+ ):
+ return True
+ return False
+
+
+try:
+ if not is_gae_instance():
+ raise NotRunningAppEngineEnvironment()
+
+ from google.appengine.api import apiproxy_stub_map
+
+ if apiproxy_stub_map.apiproxy.GetStub("urlfetch") is None:
+ raise ImportError
+
+ from google.appengine.api.urlfetch import fetch
+
+ # Update the connection classes to use the Googel App Engine specific ones.
+ SCHEME_TO_CONNECTION = {
+ "http": AppEngineHttpConnection,
+ "https": AppEngineHttpsConnection,
+ }
+except (ImportError, NotRunningAppEngineEnvironment):
+ pass
+
+
+class Http(object):
+ """An HTTP client that handles:
+
+ - all methods
+ - caching
+ - ETags
+ - compression,
+ - HTTPS
+ - Basic
+ - Digest
+ - WSSE
+
+ and more.
+ """
+
+ def __init__(
+ self,
+ cache=None,
+ timeout=None,
+ proxy_info=proxy_info_from_environment,
+ ca_certs=None,
+ disable_ssl_certificate_validation=False,
+ ssl_version=None,
+ ):
+ """If 'cache' is a string then it is used as a directory name for
+ a disk cache. Otherwise it must be an object that supports the
+ same interface as FileCache.
+
+ All timeouts are in seconds. If None is passed for timeout
+ then Python's default timeout for sockets will be used. See
+ for example the docs of socket.setdefaulttimeout():
+ http://docs.python.org/library/socket.html#socket.setdefaulttimeout
+
+ `proxy_info` may be:
+ - a callable that takes the http scheme ('http' or 'https') and
+ returns a ProxyInfo instance per request. By default, uses
+ proxy_nfo_from_environment.
+ - a ProxyInfo instance (static proxy config).
+ - None (proxy disabled).
+
+ ca_certs is the path of a file containing root CA certificates for SSL
+ server certificate validation. By default, a CA cert file bundled with
+ httplib2 is used.
+
+ If disable_ssl_certificate_validation is true, SSL cert validation will
+ not be performed.
+
+ By default, ssl.PROTOCOL_SSLv23 will be used for the ssl version.
+ """
+ self.proxy_info = proxy_info
+ self.ca_certs = ca_certs
+ self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
+ self.ssl_version = ssl_version
+
+ # Map domain name to an httplib connection
+ self.connections = {}
+ # The location of the cache, for now a directory
+ # where cached responses are held.
+ if cache and isinstance(cache, basestring):
+ self.cache = FileCache(cache)
+ else:
+ self.cache = cache
+
+ # Name/password
+ self.credentials = Credentials()
+
+ # Key/cert
+ self.certificates = KeyCerts()
+
+ # authorization objects
+ self.authorizations = []
+
+ # If set to False then no redirects are followed, even safe ones.
+ self.follow_redirects = True
+
+ self.redirect_codes = REDIRECT_CODES
+
+ # Which HTTP methods do we apply optimistic concurrency to, i.e.
+ # which methods get an "if-match:" etag header added to them.
+ self.optimistic_concurrency_methods = ["PUT", "PATCH"]
+
+ self.safe_methods = list(SAFE_METHODS)
+
+ # If 'follow_redirects' is True, and this is set to True then
+ # all redirecs are followed, including unsafe ones.
+ self.follow_all_redirects = False
+
+ self.ignore_etag = False
+
+ self.force_exception_to_status_code = False
+
+ self.timeout = timeout
+
+ # Keep Authorization: headers on a redirect.
+ self.forward_authorization_headers = False
+
+ def close(self):
+ """Close persistent connections, clear sensitive data.
+ Not thread-safe, requires external synchronization against concurrent requests.
+ """
+ existing, self.connections = self.connections, {}
+ for _, c in existing.iteritems():
+ c.close()
+ self.certificates.clear()
+ self.clear_credentials()
+
+ def __getstate__(self):
+ state_dict = copy.copy(self.__dict__)
+ # In case request is augmented by some foreign object such as
+ # credentials which handle auth
+ if "request" in state_dict:
+ del state_dict["request"]
+ if "connections" in state_dict:
+ del state_dict["connections"]
+ return state_dict
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.connections = {}
+
+ def _auth_from_challenge(self, host, request_uri, headers, response, content):
+ """A generator that creates Authorization objects
+ that can be applied to requests.
+ """
+ challenges = auth._parse_www_authenticate(response, "www-authenticate")
+ for cred in self.credentials.iter(host):
+ for scheme in AUTH_SCHEME_ORDER:
+ if scheme in challenges:
+ yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
+
+ def add_credentials(self, name, password, domain=""):
+ """Add a name and password that will be used
+ any time a request requires authentication."""
+ self.credentials.add(name, password, domain)
+
+ def add_certificate(self, key, cert, domain, password=None):
+ """Add a key and cert that will be used
+ any time a request requires authentication."""
+ self.certificates.add(key, cert, domain, password)
+
+ def clear_credentials(self):
+ """Remove all the names and passwords
+ that are used for authentication"""
+ self.credentials.clear()
+ self.authorizations = []
+
+ def _conn_request(self, conn, request_uri, method, body, headers):
+ i = 0
+ seen_bad_status_line = False
+ while i < RETRIES:
+ i += 1
+ try:
+ if hasattr(conn, "sock") and conn.sock is None:
+ conn.connect()
+ conn.request(method, request_uri, body, headers)
+ except socket.timeout:
+ raise
+ except socket.gaierror:
+ conn.close()
+ raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
+ except ssl_SSLError:
+ conn.close()
+ raise
+ except socket.error as e:
+ err = 0
+ if hasattr(e, "args"):
+ err = getattr(e, "args")[0]
+ else:
+ err = e.errno
+ if err == errno.ECONNREFUSED: # Connection refused
+ raise
+ if err in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES:
+ continue # retry on potentially transient socket errors
+ except httplib.HTTPException:
+ # Just because the server closed the connection doesn't apparently mean
+ # that the server didn't send a response.
+ if hasattr(conn, "sock") and conn.sock is None:
+ if i < RETRIES - 1:
+ conn.close()
+ conn.connect()
+ continue
+ else:
+ conn.close()
+ raise
+ if i < RETRIES - 1:
+ conn.close()
+ conn.connect()
+ continue
+ try:
+ response = conn.getresponse()
+ except httplib.BadStatusLine:
+ # If we get a BadStatusLine on the first try then that means
+ # the connection just went stale, so retry regardless of the
+ # number of RETRIES set.
+ if not seen_bad_status_line and i == 1:
+ i = 0
+ seen_bad_status_line = True
+ conn.close()
+ conn.connect()
+ continue
+ else:
+ conn.close()
+ raise
+ except (socket.error, httplib.HTTPException):
+ if i < RETRIES - 1:
+ conn.close()
+ conn.connect()
+ continue
+ else:
+ conn.close()
+ raise
+ else:
+ content = ""
+ if method == "HEAD":
+ conn.close()
+ else:
+ content = response.read()
+ response = Response(response)
+ if method != "HEAD":
+ content = _decompressContent(response, content)
+ break
+ return (response, content)
+
+ def _request(
+ self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey,
+ ):
+ """Do the actual request using the connection object
+ and also follow one level of redirects if necessary"""
+
+ auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
+ auth = auths and sorted(auths)[0][1] or None
+ if auth:
+ auth.request(method, request_uri, headers, body)
+
+ (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+
+ if auth:
+ if auth.response(response, body):
+ auth.request(method, request_uri, headers, body)
+ (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+ response._stale_digest = 1
+
+ if response.status == 401:
+ for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
+ authorization.request(method, request_uri, headers, body)
+ (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+ if response.status != 401:
+ self.authorizations.append(authorization)
+ authorization.response(response, body)
+ break
+
+ if self.follow_all_redirects or method in self.safe_methods or response.status in (303, 308):
+ if self.follow_redirects and response.status in self.redirect_codes:
+ # Pick out the location header and basically start from the beginning
+ # remembering first to strip the ETag header and decrement our 'depth'
+ if redirections:
+ if "location" not in response and response.status != 300:
+ raise RedirectMissingLocation(
+ _("Redirected but the response is missing a Location: header."), response, content,
+ )
+ # Fix-up relative redirects (which violate an RFC 2616 MUST)
+ if "location" in response:
+ location = response["location"]
+ (scheme, authority, path, query, fragment) = parse_uri(location)
+ if authority == None:
+ response["location"] = urlparse.urljoin(absolute_uri, location)
+ if response.status == 308 or (response.status == 301 and method in self.safe_methods):
+ response["-x-permanent-redirect-url"] = response["location"]
+ if "content-location" not in response:
+ response["content-location"] = absolute_uri
+ _updateCache(headers, response, content, self.cache, cachekey)
+ if "if-none-match" in headers:
+ del headers["if-none-match"]
+ if "if-modified-since" in headers:
+ del headers["if-modified-since"]
+ if "authorization" in headers and not self.forward_authorization_headers:
+ del headers["authorization"]
+ if "location" in response:
+ location = response["location"]
+ old_response = copy.deepcopy(response)
+ if "content-location" not in old_response:
+ old_response["content-location"] = absolute_uri
+ redirect_method = method
+ if response.status in [302, 303]:
+ redirect_method = "GET"
+ body = None
+ (response, content) = self.request(
+ location, method=redirect_method, body=body, headers=headers, redirections=redirections - 1,
+ )
+ response.previous = old_response
+ else:
+ raise RedirectLimit(
+ "Redirected more times than rediection_limit allows.", response, content,
+ )
+ elif response.status in [200, 203] and method in self.safe_methods:
+ # Don't cache 206's since we aren't going to handle byte range requests
+ if "content-location" not in response:
+ response["content-location"] = absolute_uri
+ _updateCache(headers, response, content, self.cache, cachekey)
+
+ return (response, content)
+
+ def _normalize_headers(self, headers):
+ return _normalize_headers(headers)
+
+ # Need to catch and rebrand some exceptions
+ # Then need to optionally turn all exceptions into status codes
+ # including all socket.* and httplib.* exceptions.
+
+ def request(
+ self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None,
+ ):
+ """ Performs a single HTTP request.
+
+ The 'uri' is the URI of the HTTP resource and can begin with either
+ 'http' or 'https'. The value of 'uri' must be an absolute URI.
+
+ The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
+ etc. There is no restriction on the methods allowed.
+
+ The 'body' is the entity body to be sent with the request. It is a
+ string object.
+
+ Any extra headers that are to be sent with the request should be
+ provided in the 'headers' dictionary.
+
+ The maximum number of redirect to follow before raising an
+ exception is 'redirections. The default is 5.
+
+ The return value is a tuple of (response, content), the first
+ being and instance of the 'Response' class, the second being
+ a string that contains the response entity body.
+ """
+ conn_key = ""
+
+ try:
+ if headers is None:
+ headers = {}
+ else:
+ headers = self._normalize_headers(headers)
+
+ if "user-agent" not in headers:
+ headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__
+
+ uri = iri2uri(uri)
+ # Prevent CWE-75 space injection to manipulate request via part of uri.
+ # Prevent CWE-93 CRLF injection to modify headers via part of uri.
+ uri = uri.replace(" ", "%20").replace("\r", "%0D").replace("\n", "%0A")
+
+ (scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
+
+ proxy_info = self._get_proxy_info(scheme, authority)
+
+ conn_key = scheme + ":" + authority
+ conn = self.connections.get(conn_key)
+ if conn is None:
+ if not connection_type:
+ connection_type = SCHEME_TO_CONNECTION[scheme]
+ certs = list(self.certificates.iter(authority))
+ if scheme == "https":
+ if certs:
+ conn = self.connections[conn_key] = connection_type(
+ authority,
+ key_file=certs[0][0],
+ cert_file=certs[0][1],
+ timeout=self.timeout,
+ proxy_info=proxy_info,
+ ca_certs=self.ca_certs,
+ disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
+ ssl_version=self.ssl_version,
+ key_password=certs[0][2],
+ )
+ else:
+ conn = self.connections[conn_key] = connection_type(
+ authority,
+ timeout=self.timeout,
+ proxy_info=proxy_info,
+ ca_certs=self.ca_certs,
+ disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
+ ssl_version=self.ssl_version,
+ )
+ else:
+ conn = self.connections[conn_key] = connection_type(
+ authority, timeout=self.timeout, proxy_info=proxy_info
+ )
+ conn.set_debuglevel(debuglevel)
+
+ if "range" not in headers and "accept-encoding" not in headers:
+ headers["accept-encoding"] = "gzip, deflate"
+
+ info = email.Message.Message()
+ cachekey = None
+ cached_value = None
+ if self.cache:
+ cachekey = defrag_uri.encode("utf-8")
+ cached_value = self.cache.get(cachekey)
+ if cached_value:
+ # info = email.message_from_string(cached_value)
+ #
+ # Need to replace the line above with the kludge below
+ # to fix the non-existent bug not fixed in this
+ # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
+ try:
+ info, content = cached_value.split("\r\n\r\n", 1)
+ feedparser = email.FeedParser.FeedParser()
+ feedparser.feed(info)
+ info = feedparser.close()
+ feedparser._parse = None
+ except (IndexError, ValueError):
+ self.cache.delete(cachekey)
+ cachekey = None
+ cached_value = None
+
+ if (
+ method in self.optimistic_concurrency_methods
+ and self.cache
+ and "etag" in info
+ and not self.ignore_etag
+ and "if-match" not in headers
+ ):
+ # http://www.w3.org/1999/04/Editing/
+ headers["if-match"] = info["etag"]
+
+ # https://tools.ietf.org/html/rfc7234
+ # A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location
+ # when a non-error status code is received in response to an unsafe request method.
+ if self.cache and cachekey and method not in self.safe_methods:
+ self.cache.delete(cachekey)
+
+ # Check the vary header in the cache to see if this request
+ # matches what varies in the cache.
+ if method in self.safe_methods and "vary" in info:
+ vary = info["vary"]
+ vary_headers = vary.lower().replace(" ", "").split(",")
+ for header in vary_headers:
+ key = "-varied-%s" % header
+ value = info[key]
+ if headers.get(header, None) != value:
+ cached_value = None
+ break
+
+ if (
+ self.cache
+ and cached_value
+ and (method in self.safe_methods or info["status"] == "308")
+ and "range" not in headers
+ ):
+ redirect_method = method
+ if info["status"] not in ("307", "308"):
+ redirect_method = "GET"
+ if "-x-permanent-redirect-url" in info:
+ # Should cached permanent redirects be counted in our redirection count? For now, yes.
+ if redirections <= 0:
+ raise RedirectLimit(
+ "Redirected more times than rediection_limit allows.", {}, "",
+ )
+ (response, new_content) = self.request(
+ info["-x-permanent-redirect-url"],
+ method=redirect_method,
+ headers=headers,
+ redirections=redirections - 1,
+ )
+ response.previous = Response(info)
+ response.previous.fromcache = True
+ else:
+ # Determine our course of action:
+ # Is the cached entry fresh or stale?
+ # Has the client requested a non-cached response?
+ #
+ # There seems to be three possible answers:
+ # 1. [FRESH] Return the cache entry w/o doing a GET
+ # 2. [STALE] Do the GET (but add in cache validators if available)
+ # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
+ entry_disposition = _entry_disposition(info, headers)
+
+ if entry_disposition == "FRESH":
+ if not cached_value:
+ info["status"] = "504"
+ content = ""
+ response = Response(info)
+ if cached_value:
+ response.fromcache = True
+ return (response, content)
+
+ if entry_disposition == "STALE":
+ if "etag" in info and not self.ignore_etag and not "if-none-match" in headers:
+ headers["if-none-match"] = info["etag"]
+ if "last-modified" in info and not "last-modified" in headers:
+ headers["if-modified-since"] = info["last-modified"]
+ elif entry_disposition == "TRANSPARENT":
+ pass
+
+ (response, new_content) = self._request(
+ conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
+ )
+
+ if response.status == 304 and method == "GET":
+ # Rewrite the cache entry with the new end-to-end headers
+ # Take all headers that are in response
+ # and overwrite their values in info.
+ # unless they are hop-by-hop, or are listed in the connection header.
+
+ for key in _get_end2end_headers(response):
+ info[key] = response[key]
+ merged_response = Response(info)
+ if hasattr(response, "_stale_digest"):
+ merged_response._stale_digest = response._stale_digest
+ _updateCache(headers, merged_response, content, self.cache, cachekey)
+ response = merged_response
+ response.status = 200
+ response.fromcache = True
+
+ elif response.status == 200:
+ content = new_content
+ else:
+ self.cache.delete(cachekey)
+ content = new_content
+ else:
+ cc = _parse_cache_control(headers)
+ if "only-if-cached" in cc:
+ info["status"] = "504"
+ response = Response(info)
+ content = ""
+ else:
+ (response, content) = self._request(
+ conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
+ )
+ except Exception as e:
+ is_timeout = isinstance(e, socket.timeout)
+ if is_timeout:
+ conn = self.connections.pop(conn_key, None)
+ if conn:
+ conn.close()
+
+ if self.force_exception_to_status_code:
+ if isinstance(e, HttpLib2ErrorWithResponse):
+ response = e.response
+ content = e.content
+ response.status = 500
+ response.reason = str(e)
+ elif is_timeout:
+ content = "Request Timeout"
+ response = Response({"content-type": "text/plain", "status": "408", "content-length": len(content),})
+ response.reason = "Request Timeout"
+ else:
+ content = str(e)
+ response = Response({"content-type": "text/plain", "status": "400", "content-length": len(content),})
+ response.reason = "Bad Request"
+ else:
+ raise
+
+ return (response, content)
+
+ def _get_proxy_info(self, scheme, authority):
+ """Return a ProxyInfo instance (or None) based on the scheme
+ and authority.
+ """
+ hostname, port = urllib.splitport(authority)
+ proxy_info = self.proxy_info
+ if callable(proxy_info):
+ proxy_info = proxy_info(scheme)
+
+ if hasattr(proxy_info, "applies_to") and not proxy_info.applies_to(hostname):
+ proxy_info = None
+ return proxy_info
+
+
+class Response(dict):
+ """An object more like email.Message than httplib.HTTPResponse."""
+
+ """Is this response from our local cache"""
+ fromcache = False
+ """HTTP protocol version used by server.
+
+ 10 for HTTP/1.0, 11 for HTTP/1.1.
+ """
+ version = 11
+
+ "Status code returned by server. "
+ status = 200
+ """Reason phrase returned by server."""
+ reason = "Ok"
+
+ previous = None
+
+ def __init__(self, info):
+ # info is either an email.Message or
+ # an httplib.HTTPResponse object.
+ if isinstance(info, httplib.HTTPResponse):
+ for key, value in info.getheaders():
+ self[key.lower()] = value
+ self.status = info.status
+ self["status"] = str(self.status)
+ self.reason = info.reason
+ self.version = info.version
+ elif isinstance(info, email.Message.Message):
+ for key, value in info.items():
+ self[key.lower()] = value
+ self.status = int(self["status"])
+ else:
+ for key, value in info.iteritems():
+ self[key.lower()] = value
+ self.status = int(self.get("status", self.status))
+ self.reason = self.get("reason", self.reason)
+
+ def __getattr__(self, name):
+ if name == "dict":
+ return self
+ else:
+ raise AttributeError(name)
diff --git a/contrib/python/httplib2/py2/httplib2/auth.py b/contrib/python/httplib2/py2/httplib2/auth.py
new file mode 100644
index 0000000000..84b5831766
--- /dev/null
+++ b/contrib/python/httplib2/py2/httplib2/auth.py
@@ -0,0 +1,63 @@
+import base64
+import re
+
+import pyparsing as pp
+
+from .error import *
+
+UNQUOTE_PAIRS = re.compile(r"\\(.)")
+unquote = lambda s, l, t: UNQUOTE_PAIRS.sub(r"\1", t[0][1:-1])
+
+# https://tools.ietf.org/html/rfc7235#section-1.2
+# https://tools.ietf.org/html/rfc7235#appendix-B
+tchar = "!#$%&'*+-.^_`|~" + pp.nums + pp.alphas
+token = pp.Word(tchar).setName("token")
+token68 = pp.Combine(pp.Word("-._~+/" + pp.nums + pp.alphas) + pp.Optional(pp.Word("=").leaveWhitespace())).setName(
+ "token68"
+)
+
+quoted_string = pp.dblQuotedString.copy().setName("quoted-string").setParseAction(unquote)
+auth_param_name = token.copy().setName("auth-param-name").addParseAction(pp.downcaseTokens)
+auth_param = auth_param_name + pp.Suppress("=") + (quoted_string | token)
+params = pp.Dict(pp.delimitedList(pp.Group(auth_param)))
+
+scheme = token("scheme")
+challenge = scheme + (params("params") | token68("token"))
+
+authentication_info = params.copy()
+www_authenticate = pp.delimitedList(pp.Group(challenge))
+
+
+def _parse_authentication_info(headers, headername="authentication-info"):
+ """https://tools.ietf.org/html/rfc7615
+ """
+ header = headers.get(headername, "").strip()
+ if not header:
+ return {}
+ try:
+ parsed = authentication_info.parseString(header)
+ except pp.ParseException as ex:
+ # print(ex.explain(ex))
+ raise MalformedHeader(headername)
+
+ return parsed.asDict()
+
+
+def _parse_www_authenticate(headers, headername="www-authenticate"):
+ """Returns a dictionary of dictionaries, one dict per auth_scheme."""
+ header = headers.get(headername, "").strip()
+ if not header:
+ return {}
+ try:
+ parsed = www_authenticate.parseString(header)
+ except pp.ParseException as ex:
+ # print(ex.explain(ex))
+ raise MalformedHeader(headername)
+
+ retval = {
+ challenge["scheme"].lower(): challenge["params"].asDict()
+ if "params" in challenge
+ else {"token": challenge.get("token")}
+ for challenge in parsed
+ }
+ return retval
diff --git a/contrib/python/httplib2/py2/httplib2/certs.py b/contrib/python/httplib2/py2/httplib2/certs.py
new file mode 100644
index 0000000000..59d1ffc702
--- /dev/null
+++ b/contrib/python/httplib2/py2/httplib2/certs.py
@@ -0,0 +1,42 @@
+"""Utilities for certificate management."""
+
+import os
+
+certifi_available = False
+certifi_where = None
+try:
+ from certifi import where as certifi_where
+ certifi_available = True
+except ImportError:
+ pass
+
+custom_ca_locater_available = False
+custom_ca_locater_where = None
+try:
+ from ca_certs_locater import get as custom_ca_locater_where
+ custom_ca_locater_available = True
+except ImportError:
+ pass
+
+
+BUILTIN_CA_CERTS = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "cacerts.txt"
+)
+
+
+def where():
+ env = os.environ.get("HTTPLIB2_CA_CERTS")
+ if env is not None:
+ if os.path.isfile(env):
+ return env
+ else:
+ raise RuntimeError("Environment variable HTTPLIB2_CA_CERTS not a valid file")
+ if custom_ca_locater_available:
+ return custom_ca_locater_where()
+ if certifi_available:
+ return certifi_where()
+ return BUILTIN_CA_CERTS
+
+
+if __name__ == "__main__":
+ print(where())
diff --git a/contrib/python/httplib2/py2/httplib2/error.py b/contrib/python/httplib2/py2/httplib2/error.py
new file mode 100644
index 0000000000..0e68c12a85
--- /dev/null
+++ b/contrib/python/httplib2/py2/httplib2/error.py
@@ -0,0 +1,48 @@
+# All exceptions raised here derive from HttpLib2Error
+class HttpLib2Error(Exception):
+ pass
+
+
+# Some exceptions can be caught and optionally
+# be turned back into responses.
+class HttpLib2ErrorWithResponse(HttpLib2Error):
+ def __init__(self, desc, response, content):
+ self.response = response
+ self.content = content
+ HttpLib2Error.__init__(self, desc)
+
+
+class RedirectMissingLocation(HttpLib2ErrorWithResponse):
+ pass
+
+
+class RedirectLimit(HttpLib2ErrorWithResponse):
+ pass
+
+
+class FailedToDecompressContent(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class MalformedHeader(HttpLib2Error):
+ pass
+
+
+class RelativeURIError(HttpLib2Error):
+ pass
+
+
+class ServerNotFoundError(HttpLib2Error):
+ pass
+
+
+class ProxiesUnavailableError(HttpLib2Error):
+ pass
diff --git a/contrib/python/httplib2/py2/httplib2/iri2uri.py b/contrib/python/httplib2/py2/httplib2/iri2uri.py
new file mode 100644
index 0000000000..0a978a7841
--- /dev/null
+++ b/contrib/python/httplib2/py2/httplib2/iri2uri.py
@@ -0,0 +1,123 @@
+"""Converts an IRI to a URI."""
+
+__author__ = "Joe Gregorio (joe@bitworking.org)"
+__copyright__ = "Copyright 2006, Joe Gregorio"
+__contributors__ = []
+__version__ = "1.0.0"
+__license__ = "MIT"
+
+import urlparse
+
+# Convert an IRI to a URI following the rules in RFC 3987
+#
+# The characters we need to enocde and escape are defined in the spec:
+#
+# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
+# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
+# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
+# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
+# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
+# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
+# / %xD0000-DFFFD / %xE1000-EFFFD
+
+escape_range = [
+ (0xA0, 0xD7FF),
+ (0xE000, 0xF8FF),
+ (0xF900, 0xFDCF),
+ (0xFDF0, 0xFFEF),
+ (0x10000, 0x1FFFD),
+ (0x20000, 0x2FFFD),
+ (0x30000, 0x3FFFD),
+ (0x40000, 0x4FFFD),
+ (0x50000, 0x5FFFD),
+ (0x60000, 0x6FFFD),
+ (0x70000, 0x7FFFD),
+ (0x80000, 0x8FFFD),
+ (0x90000, 0x9FFFD),
+ (0xA0000, 0xAFFFD),
+ (0xB0000, 0xBFFFD),
+ (0xC0000, 0xCFFFD),
+ (0xD0000, 0xDFFFD),
+ (0xE1000, 0xEFFFD),
+ (0xF0000, 0xFFFFD),
+ (0x100000, 0x10FFFD),
+]
+
+
+def encode(c):
+ retval = c
+ i = ord(c)
+ for low, high in escape_range:
+ if i < low:
+ break
+ if i >= low and i <= high:
+ retval = "".join(["%%%2X" % ord(o) for o in c.encode("utf-8")])
+ break
+ return retval
+
+
+def iri2uri(uri):
+ """Convert an IRI to a URI. Note that IRIs must be
+ passed in a unicode strings. That is, do not utf-8 encode
+ the IRI before passing it into the function."""
+ if isinstance(uri, unicode):
+ (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
+ authority = authority.encode("idna")
+ # For each character in 'ucschar' or 'iprivate'
+ # 1. encode as utf-8
+ # 2. then %-encode each octet of that utf-8
+ uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
+ uri = "".join([encode(c) for c in uri])
+ return uri
+
+
+if __name__ == "__main__":
+ import unittest
+
+ class Test(unittest.TestCase):
+ def test_uris(self):
+ """Test that URIs are invariant under the transformation."""
+ invariant = [
+ u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
+ u"http://www.ietf.org/rfc/rfc2396.txt",
+ u"ldap://[2001:db8::7]/c=GB?objectClass?one",
+ u"mailto:John.Doe@example.com",
+ u"news:comp.infosystems.www.servers.unix",
+ u"tel:+1-816-555-1212",
+ u"telnet://192.0.2.16:80/",
+ u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
+ ]
+ for uri in invariant:
+ self.assertEqual(uri, iri2uri(uri))
+
+ def test_iri(self):
+ """Test that the right type of escaping is done for each part of the URI."""
+ self.assertEqual(
+ "http://xn--o3h.com/%E2%98%84",
+ iri2uri(u"http://\N{COMET}.com/\N{COMET}"),
+ )
+ self.assertEqual(
+ "http://bitworking.org/?fred=%E2%98%84",
+ iri2uri(u"http://bitworking.org/?fred=\N{COMET}"),
+ )
+ self.assertEqual(
+ "http://bitworking.org/#%E2%98%84",
+ iri2uri(u"http://bitworking.org/#\N{COMET}"),
+ )
+ self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
+ self.assertEqual(
+ "/fred?bar=%E2%98%9A#%E2%98%84",
+ iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"),
+ )
+ self.assertEqual(
+ "/fred?bar=%E2%98%9A#%E2%98%84",
+ iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")),
+ )
+ self.assertNotEqual(
+ "/fred?bar=%E2%98%9A#%E2%98%84",
+ iri2uri(
+ u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8")
+ ),
+ )
+
+ unittest.main()
diff --git a/contrib/python/httplib2/py2/httplib2/socks.py b/contrib/python/httplib2/py2/httplib2/socks.py
new file mode 100644
index 0000000000..71eb4ebf96
--- /dev/null
+++ b/contrib/python/httplib2/py2/httplib2/socks.py
@@ -0,0 +1,518 @@
+"""SocksiPy - Python SOCKS module.
+
+Version 1.00
+
+Copyright 2006 Dan-Haim. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+3. Neither the name of Dan Haim nor the names of his contributors may be used
+ to endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
+OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
+
+This module provides a standard socket-like interface for Python
+for tunneling connections through SOCKS proxies.
+
+Minor modifications made by Christopher Gilbert (http://motomastyle.com/) for
+use in PyLoris (http://pyloris.sourceforge.net/).
+
+Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
+mainly to merge bug fixes found in Sourceforge.
+"""
+
+import base64
+import socket
+import struct
+import sys
+
+if getattr(socket, "socket", None) is None:
+ raise ImportError("socket.socket missing, proxy support unusable")
+
+PROXY_TYPE_SOCKS4 = 1
+PROXY_TYPE_SOCKS5 = 2
+PROXY_TYPE_HTTP = 3
+PROXY_TYPE_HTTP_NO_TUNNEL = 4
+
+_defaultproxy = None
+_orgsocket = socket.socket
+
+
+class ProxyError(Exception):
+ pass
+
+
+class GeneralProxyError(ProxyError):
+ pass
+
+
+class Socks5AuthError(ProxyError):
+ pass
+
+
+class Socks5Error(ProxyError):
+ pass
+
+
+class Socks4Error(ProxyError):
+ pass
+
+
+class HTTPError(ProxyError):
+ pass
+
+
+_generalerrors = (
+ "success",
+ "invalid data",
+ "not connected",
+ "not available",
+ "bad proxy type",
+ "bad input",
+)
+
+_socks5errors = (
+ "succeeded",
+ "general SOCKS server failure",
+ "connection not allowed by ruleset",
+ "Network unreachable",
+ "Host unreachable",
+ "Connection refused",
+ "TTL expired",
+ "Command not supported",
+ "Address type not supported",
+ "Unknown error",
+)
+
+_socks5autherrors = (
+ "succeeded",
+ "authentication is required",
+ "all offered authentication methods were rejected",
+ "unknown username or invalid password",
+ "unknown error",
+)
+
+_socks4errors = (
+ "request granted",
+ "request rejected or failed",
+ "request rejected because SOCKS server cannot connect to identd on the client",
+ "request rejected because the client program and identd report different "
+ "user-ids",
+ "unknown error",
+)
+
+
+def setdefaultproxy(
+ proxytype=None, addr=None, port=None, rdns=True, username=None, password=None
+):
+ """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
+ Sets a default proxy which all further socksocket objects will use,
+ unless explicitly changed.
+ """
+ global _defaultproxy
+ _defaultproxy = (proxytype, addr, port, rdns, username, password)
+
+
+def wrapmodule(module):
+ """wrapmodule(module)
+
+ Attempts to replace a module's socket library with a SOCKS socket. Must set
+ a default proxy using setdefaultproxy(...) first.
+ This will only work on modules that import socket directly into the
+ namespace;
+ most of the Python Standard Library falls into this category.
+ """
+ if _defaultproxy != None:
+ module.socket.socket = socksocket
+ else:
+ raise GeneralProxyError((4, "no proxy specified"))
+
+
+class socksocket(socket.socket):
+ """socksocket([family[, type[, proto]]]) -> socket object
+ Open a SOCKS enabled socket. The parameters are the same as
+ those of the standard socket init. In order for SOCKS to work,
+ you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
+ """
+
+ def __init__(
+ self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None
+ ):
+ _orgsocket.__init__(self, family, type, proto, _sock)
+ if _defaultproxy != None:
+ self.__proxy = _defaultproxy
+ else:
+ self.__proxy = (None, None, None, None, None, None)
+ self.__proxysockname = None
+ self.__proxypeername = None
+ self.__httptunnel = True
+
+ def __recvall(self, count):
+ """__recvall(count) -> data
+ Receive EXACTLY the number of bytes requested from the socket.
+ Blocks until the required number of bytes have been received.
+ """
+ data = self.recv(count)
+ while len(data) < count:
+ d = self.recv(count - len(data))
+ if not d:
+ raise GeneralProxyError((0, "connection closed unexpectedly"))
+ data = data + d
+ return data
+
+ def sendall(self, content, *args):
+ """ override socket.socket.sendall method to rewrite the header
+ for non-tunneling proxies if needed
+ """
+ if not self.__httptunnel:
+ content = self.__rewriteproxy(content)
+ return super(socksocket, self).sendall(content, *args)
+
+ def __rewriteproxy(self, header):
+ """ rewrite HTTP request headers to support non-tunneling proxies
+ (i.e. those which do not support the CONNECT method).
+ This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
+ """
+ host, endpt = None, None
+ hdrs = header.split("\r\n")
+ for hdr in hdrs:
+ if hdr.lower().startswith("host:"):
+ host = hdr
+ elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
+ endpt = hdr
+ if host and endpt:
+ hdrs.remove(host)
+ hdrs.remove(endpt)
+ host = host.split(" ")[1]
+ endpt = endpt.split(" ")
+ if self.__proxy[4] != None and self.__proxy[5] != None:
+ hdrs.insert(0, self.__getauthheader())
+ hdrs.insert(0, "Host: %s" % host)
+ hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
+ return "\r\n".join(hdrs)
+
+ def __getauthheader(self):
+ auth = self.__proxy[4] + ":" + self.__proxy[5]
+ return "Proxy-Authorization: Basic " + base64.b64encode(auth)
+
+ def setproxy(
+ self,
+ proxytype=None,
+ addr=None,
+ port=None,
+ rdns=True,
+ username=None,
+ password=None,
+ headers=None,
+ ):
+ """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
+
+ Sets the proxy to be used.
+ proxytype - The type of the proxy to be used. Three types
+ are supported: PROXY_TYPE_SOCKS4 (including socks4a),
+ PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
+ addr - The address of the server (IP or DNS).
+ port - The port of the server. Defaults to 1080 for SOCKS
+ servers and 8080 for HTTP proxy servers.
+ rdns - Should DNS queries be preformed on the remote side
+ (rather than the local side). The default is True.
+ Note: This has no effect with SOCKS4 servers.
+ username - Username to authenticate with to the server.
+ The default is no authentication.
+ password - Password to authenticate with to the server.
+ Only relevant when username is also provided.
+ headers - Additional or modified headers for the proxy connect
+ request.
+ """
+ self.__proxy = (
+ proxytype,
+ addr,
+ port,
+ rdns,
+ username.encode() if username else None,
+ password.encode() if password else None,
+ headers,
+ )
+
+ def __negotiatesocks5(self, destaddr, destport):
+ """__negotiatesocks5(self,destaddr,destport)
+ Negotiates a connection through a SOCKS5 server.
+ """
+ # First we'll send the authentication packages we support.
+ if (self.__proxy[4] != None) and (self.__proxy[5] != None):
+ # The username/password details were supplied to the
+ # setproxy method so we support the USERNAME/PASSWORD
+ # authentication (in addition to the standard none).
+ self.sendall(struct.pack("BBBB", 0x05, 0x02, 0x00, 0x02))
+ else:
+ # No username/password were entered, therefore we
+ # only support connections with no authentication.
+ self.sendall(struct.pack("BBB", 0x05, 0x01, 0x00))
+ # We'll receive the server's response to determine which
+ # method was selected
+ chosenauth = self.__recvall(2)
+ if chosenauth[0:1] != chr(0x05).encode():
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ # Check the chosen authentication method
+ if chosenauth[1:2] == chr(0x00).encode():
+ # No authentication is required
+ pass
+ elif chosenauth[1:2] == chr(0x02).encode():
+ # Okay, we need to perform a basic username/password
+ # authentication.
+ self.sendall(
+ chr(0x01).encode()
+ + chr(len(self.__proxy[4]))
+ + self.__proxy[4]
+ + chr(len(self.__proxy[5]))
+ + self.__proxy[5]
+ )
+ authstat = self.__recvall(2)
+ if authstat[0:1] != chr(0x01).encode():
+ # Bad response
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ if authstat[1:2] != chr(0x00).encode():
+ # Authentication failed
+ self.close()
+ raise Socks5AuthError((3, _socks5autherrors[3]))
+ # Authentication succeeded
+ else:
+ # Reaching here is always bad
+ self.close()
+ if chosenauth[1] == chr(0xFF).encode():
+ raise Socks5AuthError((2, _socks5autherrors[2]))
+ else:
+ raise GeneralProxyError((1, _generalerrors[1]))
+ # Now we can request the actual connection
+ req = struct.pack("BBB", 0x05, 0x01, 0x00)
+ # If the given destination address is an IP address, we'll
+ # use the IPv4 address request even if remote resolving was specified.
+ try:
+ ipaddr = socket.inet_aton(destaddr)
+ req = req + chr(0x01).encode() + ipaddr
+ except socket.error:
+ # Well it's not an IP number, so it's probably a DNS name.
+ if self.__proxy[3]:
+ # Resolve remotely
+ ipaddr = None
+ req = (
+ req
+ + chr(0x03).encode()
+ + chr(len(destaddr)).encode()
+ + destaddr.encode()
+ )
+ else:
+ # Resolve locally
+ ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
+ req = req + chr(0x01).encode() + ipaddr
+ req = req + struct.pack(">H", destport)
+ self.sendall(req)
+ # Get the response
+ resp = self.__recvall(4)
+ if resp[0:1] != chr(0x05).encode():
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ elif resp[1:2] != chr(0x00).encode():
+ # Connection failed
+ self.close()
+ if ord(resp[1:2]) <= 8:
+ raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
+ else:
+ raise Socks5Error((9, _socks5errors[9]))
+ # Get the bound address/port
+ elif resp[3:4] == chr(0x01).encode():
+ boundaddr = self.__recvall(4)
+ elif resp[3:4] == chr(0x03).encode():
+ resp = resp + self.recv(1)
+ boundaddr = self.__recvall(ord(resp[4:5]))
+ else:
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ boundport = struct.unpack(">H", self.__recvall(2))[0]
+ self.__proxysockname = (boundaddr, boundport)
+ if ipaddr != None:
+ self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
+ else:
+ self.__proxypeername = (destaddr, destport)
+
+ def getproxysockname(self):
+ """getsockname() -> address info
+ Returns the bound IP address and port number at the proxy.
+ """
+ return self.__proxysockname
+
+ def getproxypeername(self):
+ """getproxypeername() -> address info
+ Returns the IP and port number of the proxy.
+ """
+ return _orgsocket.getpeername(self)
+
+ def getpeername(self):
+ """getpeername() -> address info
+ Returns the IP address and port number of the destination
+ machine (note: getproxypeername returns the proxy)
+ """
+ return self.__proxypeername
+
+ def __negotiatesocks4(self, destaddr, destport):
+ """__negotiatesocks4(self,destaddr,destport)
+ Negotiates a connection through a SOCKS4 server.
+ """
+ # Check if the destination address provided is an IP address
+ rmtrslv = False
+ try:
+ ipaddr = socket.inet_aton(destaddr)
+ except socket.error:
+ # It's a DNS name. Check where it should be resolved.
+ if self.__proxy[3]:
+ ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
+ rmtrslv = True
+ else:
+ ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
+ # Construct the request packet
+ req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
+ # The username parameter is considered userid for SOCKS4
+ if self.__proxy[4] != None:
+ req = req + self.__proxy[4]
+ req = req + chr(0x00).encode()
+ # DNS name if remote resolving is required
+ # NOTE: This is actually an extension to the SOCKS4 protocol
+ # called SOCKS4A and may not be supported in all cases.
+ if rmtrslv:
+ req = req + destaddr + chr(0x00).encode()
+ self.sendall(req)
+ # Get the response from the server
+ resp = self.__recvall(8)
+ if resp[0:1] != chr(0x00).encode():
+ # Bad data
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ if resp[1:2] != chr(0x5A).encode():
+ # Server returned an error
+ self.close()
+ if ord(resp[1:2]) in (91, 92, 93):
+ self.close()
+ raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
+ else:
+ raise Socks4Error((94, _socks4errors[4]))
+ # Get the bound address/port
+ self.__proxysockname = (
+ socket.inet_ntoa(resp[4:]),
+ struct.unpack(">H", resp[2:4])[0],
+ )
+ if rmtrslv != None:
+ self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
+ else:
+ self.__proxypeername = (destaddr, destport)
+
+ def __negotiatehttp(self, destaddr, destport):
+ """__negotiatehttp(self,destaddr,destport)
+ Negotiates a connection through an HTTP server.
+ """
+ # If we need to resolve locally, we do this now
+ if not self.__proxy[3]:
+ addr = socket.gethostbyname(destaddr)
+ else:
+ addr = destaddr
+ headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
+ wrote_host_header = False
+ wrote_auth_header = False
+ if self.__proxy[6] != None:
+ for key, val in self.__proxy[6].iteritems():
+ headers += [key, ": ", val, "\r\n"]
+ wrote_host_header = key.lower() == "host"
+ wrote_auth_header = key.lower() == "proxy-authorization"
+ if not wrote_host_header:
+ headers += ["Host: ", destaddr, "\r\n"]
+ if not wrote_auth_header:
+ if self.__proxy[4] != None and self.__proxy[5] != None:
+ headers += [self.__getauthheader(), "\r\n"]
+ headers.append("\r\n")
+ self.sendall("".join(headers).encode())
+ # We read the response until we get the string "\r\n\r\n"
+ resp = self.recv(1)
+ while resp.find("\r\n\r\n".encode()) == -1:
+ resp = resp + self.recv(1)
+ # We just need the first line to check if the connection
+ # was successful
+ statusline = resp.splitlines()[0].split(" ".encode(), 2)
+ if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ try:
+ statuscode = int(statusline[1])
+ except ValueError:
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ if statuscode != 200:
+ self.close()
+ raise HTTPError((statuscode, statusline[2]))
+ self.__proxysockname = ("0.0.0.0", 0)
+ self.__proxypeername = (addr, destport)
+
+ def connect(self, destpair):
+ """connect(self, despair)
+ Connects to the specified destination through a proxy.
+ destpar - A tuple of the IP/DNS address and the port number.
+ (identical to socket's connect).
+ To select the proxy server use setproxy().
+ """
+ # Do a minimal input check first
+ if (
+ (not type(destpair) in (list, tuple))
+ or (len(destpair) < 2)
+ or (not isinstance(destpair[0], basestring))
+ or (type(destpair[1]) != int)
+ ):
+ raise GeneralProxyError((5, _generalerrors[5]))
+ if self.__proxy[0] == PROXY_TYPE_SOCKS5:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 1080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ self.__negotiatesocks5(destpair[0], destpair[1])
+ elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 1080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ self.__negotiatesocks4(destpair[0], destpair[1])
+ elif self.__proxy[0] == PROXY_TYPE_HTTP:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 8080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ self.__negotiatehttp(destpair[0], destpair[1])
+ elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 8080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ if destpair[1] == 443:
+ self.__negotiatehttp(destpair[0], destpair[1])
+ else:
+ self.__httptunnel = False
+ elif self.__proxy[0] == None:
+ _orgsocket.connect(self, (destpair[0], destpair[1]))
+ else:
+ raise GeneralProxyError((4, _generalerrors[4]))
diff --git a/contrib/python/httplib2/py2/ya.make b/contrib/python/httplib2/py2/ya.make
new file mode 100644
index 0000000000..773b4e514b
--- /dev/null
+++ b/contrib/python/httplib2/py2/ya.make
@@ -0,0 +1,33 @@
+# Edited to peerdir certifi, dispatch between py2 and py3, remove
+# certs.txt.
+
+PY2_LIBRARY()
+
+LICENSE(MIT)
+
+VERSION(0.20.4)
+
+NO_LINT()
+
+PEERDIR(
+ contrib/python/certifi
+ contrib/python/pyparsing
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ httplib2/__init__.py
+ httplib2/auth.py
+ httplib2/certs.py
+ httplib2/error.py
+ httplib2/iri2uri.py
+ httplib2/socks.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/httplib2/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/httplib2/py3/.dist-info/METADATA b/contrib/python/httplib2/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..933bfa0bda
--- /dev/null
+++ b/contrib/python/httplib2/py3/.dist-info/METADATA
@@ -0,0 +1,75 @@
+Metadata-Version: 2.1
+Name: httplib2
+Version: 0.22.0
+Summary: A comprehensive HTTP client library.
+Home-page: https://github.com/httplib2/httplib2
+Author: Joe Gregorio
+Author-email: joe@bitworking.org
+License: MIT
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Topic :: Internet :: WWW/HTTP
+Classifier: Topic :: Software Development :: Libraries
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+License-File: LICENSE
+Requires-Dist: pyparsing (<3,>=2.4.2) ; python_version < "3.0"
+Requires-Dist: pyparsing (!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2) ; python_version > "3.0"
+
+
+
+A comprehensive HTTP client library, ``httplib2`` supports many features left out of other HTTP libraries.
+
+**HTTP and HTTPS**
+ HTTPS support is only available if the socket module was compiled with SSL support.
+
+
+**Keep-Alive**
+ Supports HTTP 1.1 Keep-Alive, keeping the socket open and performing multiple requests over the same connection if possible.
+
+
+**Authentication**
+ The following three types of HTTP Authentication are supported. These can be used over both HTTP and HTTPS.
+
+ * Digest
+ * Basic
+ * WSSE
+
+**Caching**
+ The module can optionally operate with a private cache that understands the Cache-Control:
+ header and uses both the ETag and Last-Modified cache validators. Both file system
+ and memcached based caches are supported.
+
+
+**All Methods**
+ The module can handle any HTTP request method, not just GET and POST.
+
+
+**Redirects**
+ Automatically follows 3XX redirects on GETs.
+
+
+**Compression**
+ Handles both 'deflate' and 'gzip' types of compression.
+
+
+**Lost update support**
+ Automatically adds back ETags into PUT requests to resources we have already cached. This implements Section 3.2 of Detecting the Lost Update Problem Using Unreserved Checkout
+
+
+**Unit Tested**
+ A large and growing set of unit tests.
diff --git a/contrib/python/httplib2/py3/.dist-info/top_level.txt b/contrib/python/httplib2/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..fb881ece05
--- /dev/null
+++ b/contrib/python/httplib2/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+httplib2
diff --git a/contrib/python/httplib2/py3/LICENSE b/contrib/python/httplib2/py3/LICENSE
new file mode 100644
index 0000000000..ae38286693
--- /dev/null
+++ b/contrib/python/httplib2/py3/LICENSE
@@ -0,0 +1,23 @@
+Httplib2 Software License
+
+Copyright (c) 2006 by Joe Gregorio
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of the Software,
+and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/python/httplib2/py3/README.md b/contrib/python/httplib2/py3/README.md
new file mode 100644
index 0000000000..61936996c1
--- /dev/null
+++ b/contrib/python/httplib2/py3/README.md
@@ -0,0 +1,115 @@
+Introduction
+============
+
+httplib2 is a comprehensive HTTP client library, httplib2.py supports many
+features left out of other HTTP libraries.
+
+If you want to help this project by bug report or code change, [contribution guidelines](contributing.md) may contain useful information.
+
+### HTTP and HTTPS
+
+HTTPS support is only available if the socket module was
+compiled with SSL support.
+
+### Keep-Alive
+
+Supports HTTP 1.1 Keep-Alive, keeping the socket open and
+performing multiple requests over the same connection if
+possible.
+
+### Authentication
+
+The following three types of HTTP Authentication are
+supported. These can be used over both HTTP and HTTPS.
+
+* Digest
+* Basic
+* WSSE
+
+### Caching
+
+The module can optionally operate with a private cache that
+understands the Cache-Control: header and uses both the ETag
+and Last-Modified cache validators.
+
+### All Methods
+
+The module can handle any HTTP request method, not just GET
+and POST.
+
+### Redirects
+
+Automatically follows 3XX redirects on GETs.
+
+### Compression
+
+Handles both 'deflate' and 'gzip' types of compression.
+
+### Lost update support
+
+Automatically adds back ETags into PUT requests to resources
+we have already cached. This implements Section 3.2 of
+Detecting the Lost Update Problem Using Unreserved Checkout.
+
+### Unit Tested
+
+A large and growing set of unit tests.
+
+
+Installation
+============
+
+
+ $ pip install httplib2
+
+
+Usage
+=====
+
+A simple retrieval:
+
+```python
+import httplib2
+h = httplib2.Http(".cache")
+(resp_headers, content) = h.request("http://example.org/", "GET")
+```
+
+The 'content' is the content retrieved from the URL. The content
+is already decompressed or unzipped if necessary.
+
+To PUT some content to a server that uses SSL and Basic authentication:
+
+```python
+import httplib2
+h = httplib2.Http(".cache")
+h.add_credentials('name', 'password')
+(resp, content) = h.request("https://example.org/chapter/2",
+ "PUT", body="This is text",
+ headers={'content-type':'text/plain'} )
+```
+
+Use the Cache-Control: header to control how the caching operates.
+
+```python
+import httplib2
+h = httplib2.Http(".cache")
+(resp, content) = h.request("http://bitworking.org/", "GET")
+...
+(resp, content) = h.request("http://bitworking.org/", "GET",
+ headers={'cache-control':'no-cache'})
+```
+
+The first request will be cached and since this is a request
+to bitworking.org it will be set to be cached for two hours,
+because that is how I have my server configured. Any subsequent
+GET to that URI will return the value from the on-disk cache
+and no request will be made to the server. You can use the
+Cache-Control: header to change the caches behavior and in
+this example the second request adds the Cache-Control:
+header with a value of 'no-cache' which tells the library
+that the cached copy must not be used when handling this request.
+
+More example usage can be found at:
+
+ * https://github.com/httplib2/httplib2/wiki/Examples
+ * https://github.com/httplib2/httplib2/wiki/Examples-Python3
diff --git a/contrib/python/httplib2/py3/httplib2/__init__.py b/contrib/python/httplib2/py3/httplib2/__init__.py
new file mode 100644
index 0000000000..723a63c5b8
--- /dev/null
+++ b/contrib/python/httplib2/py3/httplib2/__init__.py
@@ -0,0 +1,1799 @@
+# -*- coding: utf-8 -*-
+"""Small, fast HTTP client library for Python."""
+
+__author__ = "Joe Gregorio (joe@bitworking.org)"
+__copyright__ = "Copyright 2006, Joe Gregorio"
+__contributors__ = [
+ "Thomas Broyer (t.broyer@ltgt.net)",
+ "James Antill",
+ "Xavier Verges Farrero",
+ "Jonathan Feinberg",
+ "Blair Zajac",
+ "Sam Ruby",
+ "Louis Nyffenegger",
+ "Mark Pilgrim",
+ "Alex Yu",
+ "Lai Han",
+]
+__license__ = "MIT"
+__version__ = "0.22.0"
+
+import base64
+import calendar
+import copy
+import email
+import email.feedparser
+from email import header
+import email.message
+import email.utils
+import errno
+from gettext import gettext as _
+import gzip
+from hashlib import md5 as _md5
+from hashlib import sha1 as _sha
+import hmac
+import http.client
+import io
+import os
+import random
+import re
+import socket
+import ssl
+import sys
+import time
+import urllib.parse
+import zlib
+
+try:
+ import socks
+except ImportError:
+ # TODO: remove this fallback and copypasted socksipy module upon py2/3 merge,
+ # idea is to have soft-dependency on any compatible module called socks
+ from . import socks
+from . import auth
+from .error import *
+from .iri2uri import iri2uri
+
+
+def has_timeout(timeout):
+ if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"):
+ return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT
+ return timeout is not None
+
+
+__all__ = [
+ "debuglevel",
+ "FailedToDecompressContent",
+ "Http",
+ "HttpLib2Error",
+ "ProxyInfo",
+ "RedirectLimit",
+ "RedirectMissingLocation",
+ "Response",
+ "RETRIES",
+ "UnimplementedDigestAuthOptionError",
+ "UnimplementedHmacDigestAuthOptionError",
+]
+
+# The httplib debug level, set to a non-zero value to get debug output
+debuglevel = 0
+
+# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
+RETRIES = 2
+
+
+# Open Items:
+# -----------
+
+# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
+
+# Pluggable cache storage (supports storing the cache in
+# flat files by default. We need a plug-in architecture
+# that can support Berkeley DB and Squid)
+
+# == Known Issues ==
+# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
+# Does not handle Cache-Control: max-stale
+# Does not use Age: headers when calculating cache freshness.
+
+# The number of redirections to follow before giving up.
+# Note that only GET redirects are automatically followed.
+# Will also honor 301 requests by saving that info and never
+# requesting that URI again.
+DEFAULT_MAX_REDIRECTS = 5
+
+# Which headers are hop-by-hop headers by default
+HOP_BY_HOP = [
+ "connection",
+ "keep-alive",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "te",
+ "trailers",
+ "transfer-encoding",
+ "upgrade",
+]
+
+# https://tools.ietf.org/html/rfc7231#section-8.1.3
+SAFE_METHODS = ("GET", "HEAD", "OPTIONS", "TRACE")
+
+# To change, assign to `Http().redirect_codes`
+REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308))
+
+
+from httplib2 import certs
+
+CA_CERTS = certs.where()
+
+# PROTOCOL_TLS is python 3.5.3+. PROTOCOL_SSLv23 is deprecated.
+# Both PROTOCOL_TLS and PROTOCOL_SSLv23 are equivalent and means:
+# > Selects the highest protocol version that both the client and server support.
+# > Despite the name, this option can select “TLS†protocols as well as “SSLâ€.
+# source: https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_SSLv23
+
+# PROTOCOL_TLS_CLIENT is python 3.10.0+. PROTOCOL_TLS is deprecated.
+# > Auto-negotiate the highest protocol version that both the client and server support, and configure the context client-side connections.
+# > The protocol enables CERT_REQUIRED and check_hostname by default.
+# source: https://docs.python.org/3.10/library/ssl.html#ssl.PROTOCOL_TLS
+
+DEFAULT_TLS_VERSION = getattr(ssl, "PROTOCOL_TLS_CLIENT", None) or getattr(ssl, "PROTOCOL_TLS", None) or getattr(ssl, "PROTOCOL_SSLv23")
+
+
+def _build_ssl_context(
+ disable_ssl_certificate_validation,
+ ca_certs,
+ cert_file=None,
+ key_file=None,
+ maximum_version=None,
+ minimum_version=None,
+ key_password=None,
+):
+ if not hasattr(ssl, "SSLContext"):
+ raise RuntimeError("httplib2 requires Python 3.2+ for ssl.SSLContext")
+
+ context = ssl.SSLContext(DEFAULT_TLS_VERSION)
+ # check_hostname and verify_mode should be set in opposite order during disable
+ # https://bugs.python.org/issue31431
+ if disable_ssl_certificate_validation and hasattr(context, "check_hostname"):
+ context.check_hostname = not disable_ssl_certificate_validation
+ context.verify_mode = ssl.CERT_NONE if disable_ssl_certificate_validation else ssl.CERT_REQUIRED
+
+ # SSLContext.maximum_version and SSLContext.minimum_version are python 3.7+.
+ # source: https://docs.python.org/3/library/ssl.html#ssl.SSLContext.maximum_version
+ if maximum_version is not None:
+ if hasattr(context, "maximum_version"):
+ if isinstance(maximum_version, str):
+ maximum_version = getattr(ssl.TLSVersion, maximum_version)
+ context.maximum_version = maximum_version
+ else:
+ raise RuntimeError("setting tls_maximum_version requires Python 3.7 and OpenSSL 1.1 or newer")
+ if minimum_version is not None:
+ if hasattr(context, "minimum_version"):
+ if isinstance(minimum_version, str):
+ minimum_version = getattr(ssl.TLSVersion, minimum_version)
+ context.minimum_version = minimum_version
+ else:
+ raise RuntimeError("setting tls_minimum_version requires Python 3.7 and OpenSSL 1.1 or newer")
+ # check_hostname requires python 3.4+
+ # we will perform the equivalent in HTTPSConnectionWithTimeout.connect() by calling ssl.match_hostname
+ # if check_hostname is not supported.
+ if hasattr(context, "check_hostname"):
+ context.check_hostname = not disable_ssl_certificate_validation
+
+ context.load_verify_locations(ca_certs)
+
+ if cert_file:
+ context.load_cert_chain(cert_file, key_file, key_password)
+
+ return context
+
+
+def _get_end2end_headers(response):
+ hopbyhop = list(HOP_BY_HOP)
+ hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")])
+ return [header for header in list(response.keys()) if header not in hopbyhop]
+
+
+_missing = object()
+
+
+def _errno_from_exception(e):
+ # TODO python 3.11+ cheap try: return e.errno except AttributeError: pass
+ errno = getattr(e, "errno", _missing)
+ if errno is not _missing:
+ return errno
+
+ # socket.error and common wrap in .args
+ args = getattr(e, "args", None)
+ if args:
+ return _errno_from_exception(args[0])
+
+ # pysocks.ProxyError wraps in .socket_err
+ # https://github.com/httplib2/httplib2/pull/202
+ socket_err = getattr(e, "socket_err", None)
+ if socket_err:
+ return _errno_from_exception(socket_err)
+
+ return None
+
+
+URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
+
+
+def parse_uri(uri):
+ """Parses a URI using the regex given in Appendix B of RFC 3986.
+
+ (scheme, authority, path, query, fragment) = parse_uri(uri)
+ """
+ groups = URI.match(uri).groups()
+ return (groups[1], groups[3], groups[4], groups[6], groups[8])
+
+
+def urlnorm(uri):
+ (scheme, authority, path, query, fragment) = parse_uri(uri)
+ if not scheme or not authority:
+ raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
+ authority = authority.lower()
+ scheme = scheme.lower()
+ if not path:
+ path = "/"
+ # Could do syntax based normalization of the URI before
+ # computing the digest. See Section 6.2.2 of Std 66.
+ request_uri = query and "?".join([path, query]) or path
+ scheme = scheme.lower()
+ defrag_uri = scheme + "://" + authority + request_uri
+ return scheme, authority, request_uri, defrag_uri
+
+
+# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
+re_url_scheme = re.compile(r"^\w+://")
+re_unsafe = re.compile(r"[^\w\-_.()=!]+", re.ASCII)
+
+
+def safename(filename):
+ """Return a filename suitable for the cache.
+ Strips dangerous and common characters to create a filename we
+ can use to store the cache in.
+ """
+ if isinstance(filename, bytes):
+ filename_bytes = filename
+ filename = filename.decode("utf-8")
+ else:
+ filename_bytes = filename.encode("utf-8")
+ filemd5 = _md5(filename_bytes).hexdigest()
+ filename = re_url_scheme.sub("", filename)
+ filename = re_unsafe.sub("", filename)
+
+ # limit length of filename (vital for Windows)
+ # https://github.com/httplib2/httplib2/pull/74
+ # C:\Users\ <username> \AppData\Local\Temp\ <safe_filename> , <md5>
+ # 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars
+ # Thus max safe filename x = 93 chars. Let it be 90 to make a round sum:
+ filename = filename[:90]
+
+ return ",".join((filename, filemd5))
+
+
+NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+")
+
+
+def _normalize_headers(headers):
+ return dict(
+ [
+ (_convert_byte_str(key).lower(), NORMALIZE_SPACE.sub(_convert_byte_str(value), " ").strip(),)
+ for (key, value) in headers.items()
+ ]
+ )
+
+
+def _convert_byte_str(s):
+ if not isinstance(s, str):
+ return str(s, "utf-8")
+ return s
+
+
+def _parse_cache_control(headers):
+ retval = {}
+ if "cache-control" in headers:
+ parts = headers["cache-control"].split(",")
+ parts_with_args = [
+ tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")
+ ]
+ parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
+ retval = dict(parts_with_args + parts_wo_args)
+ return retval
+
+
+# Whether to use a strict mode to parse WWW-Authenticate headers
+# Might lead to bad results in case of ill-formed header value,
+# so disabled by default, falling back to relaxed parsing.
+# Set to true to turn on, useful for testing servers.
+USE_WWW_AUTH_STRICT_PARSING = 0
+
+
+def _entry_disposition(response_headers, request_headers):
+ """Determine freshness from the Date, Expires and Cache-Control headers.
+
+ We don't handle the following:
+
+ 1. Cache-Control: max-stale
+ 2. Age: headers are not used in the calculations.
+
+ Not that this algorithm is simpler than you might think
+ because we are operating as a private (non-shared) cache.
+ This lets us ignore 's-maxage'. We can also ignore
+ 'proxy-invalidate' since we aren't a proxy.
+ We will never return a stale document as
+ fresh as a design decision, and thus the non-implementation
+ of 'max-stale'. This also lets us safely ignore 'must-revalidate'
+ since we operate as if every server has sent 'must-revalidate'.
+ Since we are private we get to ignore both 'public' and
+ 'private' parameters. We also ignore 'no-transform' since
+ we don't do any transformations.
+ The 'no-store' parameter is handled at a higher level.
+ So the only Cache-Control parameters we look at are:
+
+ no-cache
+ only-if-cached
+ max-age
+ min-fresh
+ """
+
+ retval = "STALE"
+ cc = _parse_cache_control(request_headers)
+ cc_response = _parse_cache_control(response_headers)
+
+ if "pragma" in request_headers and request_headers["pragma"].lower().find("no-cache") != -1:
+ retval = "TRANSPARENT"
+ if "cache-control" not in request_headers:
+ request_headers["cache-control"] = "no-cache"
+ elif "no-cache" in cc:
+ retval = "TRANSPARENT"
+ elif "no-cache" in cc_response:
+ retval = "STALE"
+ elif "only-if-cached" in cc:
+ retval = "FRESH"
+ elif "date" in response_headers:
+ date = calendar.timegm(email.utils.parsedate_tz(response_headers["date"]))
+ now = time.time()
+ current_age = max(0, now - date)
+ if "max-age" in cc_response:
+ try:
+ freshness_lifetime = int(cc_response["max-age"])
+ except ValueError:
+ freshness_lifetime = 0
+ elif "expires" in response_headers:
+ expires = email.utils.parsedate_tz(response_headers["expires"])
+ if None == expires:
+ freshness_lifetime = 0
+ else:
+ freshness_lifetime = max(0, calendar.timegm(expires) - date)
+ else:
+ freshness_lifetime = 0
+ if "max-age" in cc:
+ try:
+ freshness_lifetime = int(cc["max-age"])
+ except ValueError:
+ freshness_lifetime = 0
+ if "min-fresh" in cc:
+ try:
+ min_fresh = int(cc["min-fresh"])
+ except ValueError:
+ min_fresh = 0
+ current_age += min_fresh
+ if freshness_lifetime > current_age:
+ retval = "FRESH"
+ return retval
+
+
+def _decompressContent(response, new_content):
+ content = new_content
+ try:
+ encoding = response.get("content-encoding", None)
+ if encoding in ["gzip", "deflate"]:
+ if encoding == "gzip":
+ content = gzip.GzipFile(fileobj=io.BytesIO(new_content)).read()
+ if encoding == "deflate":
+ try:
+ content = zlib.decompress(content, zlib.MAX_WBITS)
+ except (IOError, zlib.error):
+ content = zlib.decompress(content, -zlib.MAX_WBITS)
+ response["content-length"] = str(len(content))
+ # Record the historical presence of the encoding in a way the won't interfere.
+ response["-content-encoding"] = response["content-encoding"]
+ del response["content-encoding"]
+ except (IOError, zlib.error):
+ content = ""
+ raise FailedToDecompressContent(
+ _("Content purported to be compressed with %s but failed to decompress.") % response.get("content-encoding"),
+ response,
+ content,
+ )
+ return content
+
+
+def _bind_write_headers(msg):
+ def _write_headers(self):
+ # Self refers to the Generator object.
+ for h, v in msg.items():
+ print("%s:" % h, end=" ", file=self._fp)
+ if isinstance(v, header.Header):
+ print(v.encode(maxlinelen=self._maxheaderlen), file=self._fp)
+ else:
+ # email.Header got lots of smarts, so use it.
+ headers = header.Header(v, maxlinelen=self._maxheaderlen, charset="utf-8", header_name=h)
+ print(headers.encode(), file=self._fp)
+ # A blank line always separates headers from body.
+ print(file=self._fp)
+
+ return _write_headers
+
+
+def _updateCache(request_headers, response_headers, content, cache, cachekey):
+ if cachekey:
+ cc = _parse_cache_control(request_headers)
+ cc_response = _parse_cache_control(response_headers)
+ if "no-store" in cc or "no-store" in cc_response:
+ cache.delete(cachekey)
+ else:
+ info = email.message.Message()
+ for key, value in response_headers.items():
+ if key not in ["status", "content-encoding", "transfer-encoding"]:
+ info[key] = value
+
+ # Add annotations to the cache to indicate what headers
+ # are variant for this request.
+ vary = response_headers.get("vary", None)
+ if vary:
+ vary_headers = vary.lower().replace(" ", "").split(",")
+ for header in vary_headers:
+ key = "-varied-%s" % header
+ try:
+ info[key] = request_headers[header]
+ except KeyError:
+ pass
+
+ status = response_headers.status
+ if status == 304:
+ status = 200
+
+ status_header = "status: %d\r\n" % status
+
+ try:
+ header_str = info.as_string()
+ except UnicodeEncodeError:
+ setattr(info, "_write_headers", _bind_write_headers(info))
+ header_str = info.as_string()
+
+ header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
+ text = b"".join([status_header.encode("utf-8"), header_str.encode("utf-8"), content])
+
+ cache.set(cachekey, text)
+
+
+def _cnonce():
+ dig = _md5(
+ ("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).encode("utf-8")
+ ).hexdigest()
+ return dig[:16]
+
+
+def _wsse_username_token(cnonce, iso_now, password):
+ return (
+ base64.b64encode(_sha(("%s%s%s" % (cnonce, iso_now, password)).encode("utf-8")).digest()).strip().decode("utf-8")
+ )
+
+
+# For credentials we need two things, first
+# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
+# Then we also need a list of URIs that have already demanded authentication
+# That list is tricky since sub-URIs can take the same auth, or the
+# auth scheme may change as you descend the tree.
+# So we also need each Auth instance to be able to tell us
+# how close to the 'top' it is.
+
+
+class Authentication(object):
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ (scheme, authority, path, query, fragment) = parse_uri(request_uri)
+ self.path = path
+ self.host = host
+ self.credentials = credentials
+ self.http = http
+
+ def depth(self, request_uri):
+ (scheme, authority, path, query, fragment) = parse_uri(request_uri)
+ return request_uri[len(self.path) :].count("/")
+
+ def inscope(self, host, request_uri):
+ # XXX Should we normalize the request_uri?
+ (scheme, authority, path, query, fragment) = parse_uri(request_uri)
+ return (host == self.host) and path.startswith(self.path)
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header. Over-rise this in sub-classes."""
+ pass
+
+ def response(self, response, content):
+ """Gives us a chance to update with new nonces
+ or such returned from the last authorized response.
+ Over-rise this in sub-classes if necessary.
+
+ Return TRUE is the request is to be retried, for
+ example Digest may return stale=true.
+ """
+ return False
+
+ def __eq__(self, auth):
+ return False
+
+ def __ne__(self, auth):
+ return True
+
+ def __lt__(self, auth):
+ return True
+
+ def __gt__(self, auth):
+ return False
+
+ def __le__(self, auth):
+ return True
+
+ def __ge__(self, auth):
+ return False
+
+ def __bool__(self):
+ return True
+
+
+class BasicAuthentication(Authentication):
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header."""
+ headers["authorization"] = "Basic " + base64.b64encode(
+ ("%s:%s" % self.credentials).encode("utf-8")
+ ).strip().decode("utf-8")
+
+
+class DigestAuthentication(Authentication):
+ """Only do qop='auth' and MD5, since that
+ is all Apache currently implements"""
+
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+ self.challenge = auth._parse_www_authenticate(response, "www-authenticate")["digest"]
+ qop = self.challenge.get("qop", "auth")
+ self.challenge["qop"] = ("auth" in [x.strip() for x in qop.split()]) and "auth" or None
+ if self.challenge["qop"] is None:
+ raise UnimplementedDigestAuthOptionError(_("Unsupported value for qop: %s." % qop))
+ self.challenge["algorithm"] = self.challenge.get("algorithm", "MD5").upper()
+ if self.challenge["algorithm"] != "MD5":
+ raise UnimplementedDigestAuthOptionError(
+ _("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
+ )
+ self.A1 = "".join([self.credentials[0], ":", self.challenge["realm"], ":", self.credentials[1],])
+ self.challenge["nc"] = 1
+
+ def request(self, method, request_uri, headers, content, cnonce=None):
+ """Modify the request headers"""
+ H = lambda x: _md5(x.encode("utf-8")).hexdigest()
+ KD = lambda s, d: H("%s:%s" % (s, d))
+ A2 = "".join([method, ":", request_uri])
+ self.challenge["cnonce"] = cnonce or _cnonce()
+ request_digest = '"%s"' % KD(
+ H(self.A1),
+ "%s:%s:%s:%s:%s"
+ % (
+ self.challenge["nonce"],
+ "%08x" % self.challenge["nc"],
+ self.challenge["cnonce"],
+ self.challenge["qop"],
+ H(A2),
+ ),
+ )
+ headers["authorization"] = (
+ 'Digest username="%s", realm="%s", nonce="%s", '
+ 'uri="%s", algorithm=%s, response=%s, qop=%s, '
+ 'nc=%08x, cnonce="%s"'
+ ) % (
+ self.credentials[0],
+ self.challenge["realm"],
+ self.challenge["nonce"],
+ request_uri,
+ self.challenge["algorithm"],
+ request_digest,
+ self.challenge["qop"],
+ self.challenge["nc"],
+ self.challenge["cnonce"],
+ )
+ if self.challenge.get("opaque"):
+ headers["authorization"] += ', opaque="%s"' % self.challenge["opaque"]
+ self.challenge["nc"] += 1
+
+ def response(self, response, content):
+ if "authentication-info" not in response:
+ challenge = auth._parse_www_authenticate(response, "www-authenticate").get("digest", {})
+ if "true" == challenge.get("stale"):
+ self.challenge["nonce"] = challenge["nonce"]
+ self.challenge["nc"] = 1
+ return True
+ else:
+ updated_challenge = auth._parse_authentication_info(response, "authentication-info")
+
+ if "nextnonce" in updated_challenge:
+ self.challenge["nonce"] = updated_challenge["nextnonce"]
+ self.challenge["nc"] = 1
+ return False
+
+
+class HmacDigestAuthentication(Authentication):
+ """Adapted from Robert Sayre's code and DigestAuthentication above."""
+
+ __author__ = "Thomas Broyer (t.broyer@ltgt.net)"
+
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+ challenge = auth._parse_www_authenticate(response, "www-authenticate")
+ self.challenge = challenge["hmacdigest"]
+ # TODO: self.challenge['domain']
+ self.challenge["reason"] = self.challenge.get("reason", "unauthorized")
+ if self.challenge["reason"] not in ["unauthorized", "integrity"]:
+ self.challenge["reason"] = "unauthorized"
+ self.challenge["salt"] = self.challenge.get("salt", "")
+ if not self.challenge.get("snonce"):
+ raise UnimplementedHmacDigestAuthOptionError(
+ _("The challenge doesn't contain a server nonce, or this one is empty.")
+ )
+ self.challenge["algorithm"] = self.challenge.get("algorithm", "HMAC-SHA-1")
+ if self.challenge["algorithm"] not in ["HMAC-SHA-1", "HMAC-MD5"]:
+ raise UnimplementedHmacDigestAuthOptionError(
+ _("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
+ )
+ self.challenge["pw-algorithm"] = self.challenge.get("pw-algorithm", "SHA-1")
+ if self.challenge["pw-algorithm"] not in ["SHA-1", "MD5"]:
+ raise UnimplementedHmacDigestAuthOptionError(
+ _("Unsupported value for pw-algorithm: %s." % self.challenge["pw-algorithm"])
+ )
+ if self.challenge["algorithm"] == "HMAC-MD5":
+ self.hashmod = _md5
+ else:
+ self.hashmod = _sha
+ if self.challenge["pw-algorithm"] == "MD5":
+ self.pwhashmod = _md5
+ else:
+ self.pwhashmod = _sha
+ self.key = "".join(
+ [
+ self.credentials[0],
+ ":",
+ self.pwhashmod.new("".join([self.credentials[1], self.challenge["salt"]])).hexdigest().lower(),
+ ":",
+ self.challenge["realm"],
+ ]
+ )
+ self.key = self.pwhashmod.new(self.key).hexdigest().lower()
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers"""
+ keys = _get_end2end_headers(headers)
+ keylist = "".join(["%s " % k for k in keys])
+ headers_val = "".join([headers[k] for k in keys])
+ created = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+ cnonce = _cnonce()
+ request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge["snonce"], headers_val,)
+ request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
+ headers["authorization"] = (
+ 'HMACDigest username="%s", realm="%s", snonce="%s",'
+ ' cnonce="%s", uri="%s", created="%s", '
+ 'response="%s", headers="%s"'
+ ) % (
+ self.credentials[0],
+ self.challenge["realm"],
+ self.challenge["snonce"],
+ cnonce,
+ request_uri,
+ created,
+ request_digest,
+ keylist,
+ )
+
+ def response(self, response, content):
+ challenge = auth._parse_www_authenticate(response, "www-authenticate").get("hmacdigest", {})
+ if challenge.get("reason") in ["integrity", "stale"]:
+ return True
+ return False
+
+
+class WsseAuthentication(Authentication):
+ """This is thinly tested and should not be relied upon.
+ At this time there isn't any third party server to test against.
+ Blogger and TypePad implemented this algorithm at one point
+ but Blogger has since switched to Basic over HTTPS and
+ TypePad has implemented it wrong, by never issuing a 401
+ challenge but instead requiring your client to telepathically know that
+ their endpoint is expecting WSSE profile="UsernameToken"."""
+
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header."""
+ headers["authorization"] = 'WSSE profile="UsernameToken"'
+ iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+ cnonce = _cnonce()
+ password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
+ headers["X-WSSE"] = ('UsernameToken Username="%s", PasswordDigest="%s", ' 'Nonce="%s", Created="%s"') % (
+ self.credentials[0],
+ password_digest,
+ cnonce,
+ iso_now,
+ )
+
+
+class GoogleLoginAuthentication(Authentication):
+ def __init__(self, credentials, host, request_uri, headers, response, content, http):
+ from urllib.parse import urlencode
+
+ Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
+ challenge = auth._parse_www_authenticate(response, "www-authenticate")
+ service = challenge["googlelogin"].get("service", "xapi")
+ # Bloggger actually returns the service in the challenge
+ # For the rest we guess based on the URI
+ if service == "xapi" and request_uri.find("calendar") > 0:
+ service = "cl"
+ # No point in guessing Base or Spreadsheet
+ # elif request_uri.find("spreadsheets") > 0:
+ # service = "wise"
+
+ auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers["user-agent"],)
+ resp, content = self.http.request(
+ "https://www.google.com/accounts/ClientLogin",
+ method="POST",
+ body=urlencode(auth),
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ )
+ lines = content.split("\n")
+ d = dict([tuple(line.split("=", 1)) for line in lines if line])
+ if resp.status == 403:
+ self.Auth = ""
+ else:
+ self.Auth = d["Auth"]
+
+ def request(self, method, request_uri, headers, content):
+ """Modify the request headers to add the appropriate
+ Authorization header."""
+ headers["authorization"] = "GoogleLogin Auth=" + self.Auth
+
+
+AUTH_SCHEME_CLASSES = {
+ "basic": BasicAuthentication,
+ "wsse": WsseAuthentication,
+ "digest": DigestAuthentication,
+ "hmacdigest": HmacDigestAuthentication,
+ "googlelogin": GoogleLoginAuthentication,
+}
+
+AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
+
+
+class FileCache(object):
+ """Uses a local directory as a store for cached files.
+ Not really safe to use if multiple threads or processes are going to
+ be running on the same cache.
+ """
+
+ def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
+ self.cache = cache
+ self.safe = safe
+ if not os.path.exists(cache):
+ os.makedirs(self.cache)
+
+ def get(self, key):
+ retval = None
+ cacheFullPath = os.path.join(self.cache, self.safe(key))
+ try:
+ f = open(cacheFullPath, "rb")
+ retval = f.read()
+ f.close()
+ except IOError:
+ pass
+ return retval
+
+ def set(self, key, value):
+ cacheFullPath = os.path.join(self.cache, self.safe(key))
+ f = open(cacheFullPath, "wb")
+ f.write(value)
+ f.close()
+
+ def delete(self, key):
+ cacheFullPath = os.path.join(self.cache, self.safe(key))
+ if os.path.exists(cacheFullPath):
+ os.remove(cacheFullPath)
+
+
+class Credentials(object):
+ def __init__(self):
+ self.credentials = []
+
+ def add(self, name, password, domain=""):
+ self.credentials.append((domain.lower(), name, password))
+
+ def clear(self):
+ self.credentials = []
+
+ def iter(self, domain):
+ for (cdomain, name, password) in self.credentials:
+ if cdomain == "" or domain == cdomain:
+ yield (name, password)
+
+
+class KeyCerts(Credentials):
+ """Identical to Credentials except that
+ name/password are mapped to key/cert."""
+
+ def add(self, key, cert, domain, password):
+ self.credentials.append((domain.lower(), key, cert, password))
+
+ def iter(self, domain):
+ for (cdomain, key, cert, password) in self.credentials:
+ if cdomain == "" or domain == cdomain:
+ yield (key, cert, password)
+
+
+class AllHosts(object):
+ pass
+
+
+class ProxyInfo(object):
+ """Collect information required to use a proxy."""
+
+ bypass_hosts = ()
+
+ def __init__(
+ self, proxy_type, proxy_host, proxy_port, proxy_rdns=True, proxy_user=None, proxy_pass=None, proxy_headers=None,
+ ):
+ """Args:
+
+ proxy_type: The type of proxy server. This must be set to one of
+ socks.PROXY_TYPE_XXX constants. For example: p =
+ ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost',
+ proxy_port=8000)
+ proxy_host: The hostname or IP address of the proxy server.
+ proxy_port: The port that the proxy server is running on.
+ proxy_rdns: If True (default), DNS queries will not be performed
+ locally, and instead, handed to the proxy to resolve. This is useful
+ if the network does not allow resolution of non-local names. In
+ httplib2 0.9 and earlier, this defaulted to False.
+ proxy_user: The username used to authenticate with the proxy server.
+ proxy_pass: The password used to authenticate with the proxy server.
+ proxy_headers: Additional or modified headers for the proxy connect
+ request.
+ """
+ if isinstance(proxy_user, bytes):
+ proxy_user = proxy_user.decode()
+ if isinstance(proxy_pass, bytes):
+ proxy_pass = proxy_pass.decode()
+ (
+ self.proxy_type,
+ self.proxy_host,
+ self.proxy_port,
+ self.proxy_rdns,
+ self.proxy_user,
+ self.proxy_pass,
+ self.proxy_headers,
+ ) = (
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ )
+
+ def astuple(self):
+ return (
+ self.proxy_type,
+ self.proxy_host,
+ self.proxy_port,
+ self.proxy_rdns,
+ self.proxy_user,
+ self.proxy_pass,
+ self.proxy_headers,
+ )
+
+ def isgood(self):
+ return socks and (self.proxy_host != None) and (self.proxy_port != None)
+
+ def applies_to(self, hostname):
+ return not self.bypass_host(hostname)
+
+ def bypass_host(self, hostname):
+ """Has this host been excluded from the proxy config"""
+ if self.bypass_hosts is AllHosts:
+ return True
+
+ hostname = "." + hostname.lstrip(".")
+ for skip_name in self.bypass_hosts:
+ # *.suffix
+ if skip_name.startswith(".") and hostname.endswith(skip_name):
+ return True
+ # exact match
+ if hostname == "." + skip_name:
+ return True
+ return False
+
+ def __repr__(self):
+ return (
+ "<ProxyInfo type={p.proxy_type} "
+ "host:port={p.proxy_host}:{p.proxy_port} rdns={p.proxy_rdns}"
+ + " user={p.proxy_user} headers={p.proxy_headers}>"
+ ).format(p=self)
+
+
+def proxy_info_from_environment(method="http"):
+ """Read proxy info from the environment variables.
+ """
+ if method not in ("http", "https"):
+ return
+
+ env_var = method + "_proxy"
+ url = os.environ.get(env_var, os.environ.get(env_var.upper()))
+ if not url:
+ return
+ return proxy_info_from_url(url, method, noproxy=None)
+
+
+def proxy_info_from_url(url, method="http", noproxy=None):
+ """Construct a ProxyInfo from a URL (such as http_proxy env var)
+ """
+ url = urllib.parse.urlparse(url)
+
+ proxy_type = 3 # socks.PROXY_TYPE_HTTP
+ pi = ProxyInfo(
+ proxy_type=proxy_type,
+ proxy_host=url.hostname,
+ proxy_port=url.port or dict(https=443, http=80)[method],
+ proxy_user=url.username or None,
+ proxy_pass=url.password or None,
+ proxy_headers=None,
+ )
+
+ bypass_hosts = []
+ # If not given an explicit noproxy value, respect values in env vars.
+ if noproxy is None:
+ noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", ""))
+ # Special case: A single '*' character means all hosts should be bypassed.
+ if noproxy == "*":
+ bypass_hosts = AllHosts
+ elif noproxy.strip():
+ bypass_hosts = noproxy.split(",")
+ bypass_hosts = tuple(filter(bool, bypass_hosts)) # To exclude empty string.
+
+ pi.bypass_hosts = bypass_hosts
+ return pi
+
+
+class HTTPConnectionWithTimeout(http.client.HTTPConnection):
+ """HTTPConnection subclass that supports timeouts
+
+ HTTPConnection subclass that supports timeouts
+
+ All timeouts are in seconds. If None is passed for timeout then
+ Python's default timeout for sockets will be used. See for example
+ the docs of socket.setdefaulttimeout():
+ http://docs.python.org/library/socket.html#socket.setdefaulttimeout
+ """
+
+ def __init__(self, host, port=None, timeout=None, proxy_info=None):
+ http.client.HTTPConnection.__init__(self, host, port=port, timeout=timeout)
+
+ self.proxy_info = proxy_info
+ if proxy_info and not isinstance(proxy_info, ProxyInfo):
+ self.proxy_info = proxy_info("http")
+
+ def connect(self):
+ """Connect to the host and port specified in __init__."""
+ if self.proxy_info and socks is None:
+ raise ProxiesUnavailableError("Proxy support missing but proxy use was requested!")
+ if self.proxy_info and self.proxy_info.isgood() and self.proxy_info.applies_to(self.host):
+ use_proxy = True
+ (
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ ) = self.proxy_info.astuple()
+
+ host = proxy_host
+ port = proxy_port
+ else:
+ use_proxy = False
+
+ host = self.host
+ port = self.port
+ proxy_type = None
+
+ socket_err = None
+
+ for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ try:
+ if use_proxy:
+ self.sock = socks.socksocket(af, socktype, proto)
+ self.sock.setproxy(
+ proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass,
+ )
+ else:
+ self.sock = socket.socket(af, socktype, proto)
+ self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ if has_timeout(self.timeout):
+ self.sock.settimeout(self.timeout)
+ if self.debuglevel > 0:
+ print("connect: ({0}, {1}) ************".format(self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: {0} ************".format(
+ str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ )
+
+ self.sock.connect((self.host, self.port) + sa[2:])
+ except socket.error as e:
+ socket_err = e
+ if self.debuglevel > 0:
+ print("connect fail: ({0}, {1})".format(self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: {0}".format(
+ str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ )
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ continue
+ break
+ if not self.sock:
+ raise socket_err
+
+
+class HTTPSConnectionWithTimeout(http.client.HTTPSConnection):
+ """This class allows communication via SSL.
+
+ All timeouts are in seconds. If None is passed for timeout then
+ Python's default timeout for sockets will be used. See for example
+ the docs of socket.setdefaulttimeout():
+ http://docs.python.org/library/socket.html#socket.setdefaulttimeout
+ """
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ key_file=None,
+ cert_file=None,
+ timeout=None,
+ proxy_info=None,
+ ca_certs=None,
+ disable_ssl_certificate_validation=False,
+ tls_maximum_version=None,
+ tls_minimum_version=None,
+ key_password=None,
+ ):
+
+ self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
+ self.ca_certs = ca_certs if ca_certs else CA_CERTS
+
+ self.proxy_info = proxy_info
+ if proxy_info and not isinstance(proxy_info, ProxyInfo):
+ self.proxy_info = proxy_info("https")
+
+ context = _build_ssl_context(
+ self.disable_ssl_certificate_validation,
+ self.ca_certs,
+ cert_file,
+ key_file,
+ maximum_version=tls_maximum_version,
+ minimum_version=tls_minimum_version,
+ key_password=key_password,
+ )
+ super(HTTPSConnectionWithTimeout, self).__init__(
+ host, port=port, timeout=timeout, context=context,
+ )
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.key_password = key_password
+
+ def connect(self):
+ """Connect to a host on a given (SSL) port."""
+ if self.proxy_info and self.proxy_info.isgood() and self.proxy_info.applies_to(self.host):
+ use_proxy = True
+ (
+ proxy_type,
+ proxy_host,
+ proxy_port,
+ proxy_rdns,
+ proxy_user,
+ proxy_pass,
+ proxy_headers,
+ ) = self.proxy_info.astuple()
+
+ host = proxy_host
+ port = proxy_port
+ else:
+ use_proxy = False
+
+ host = self.host
+ port = self.port
+ proxy_type = None
+ proxy_headers = None
+
+ socket_err = None
+
+ address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
+ for family, socktype, proto, canonname, sockaddr in address_info:
+ try:
+ if use_proxy:
+ sock = socks.socksocket(family, socktype, proto)
+
+ sock.setproxy(
+ proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass,
+ )
+ else:
+ sock = socket.socket(family, socktype, proto)
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ if has_timeout(self.timeout):
+ sock.settimeout(self.timeout)
+ sock.connect((self.host, self.port))
+
+ self.sock = self._context.wrap_socket(sock, server_hostname=self.host)
+
+ # Python 3.3 compatibility: emulate the check_hostname behavior
+ if not hasattr(self._context, "check_hostname") and not self.disable_ssl_certificate_validation:
+ try:
+ ssl.match_hostname(self.sock.getpeercert(), self.host)
+ except Exception:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ raise
+
+ if self.debuglevel > 0:
+ print("connect: ({0}, {1})".format(self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: {0}".format(
+ str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ )
+ except (ssl.SSLError, ssl.CertificateError) as e:
+ if sock:
+ sock.close()
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ raise
+ except (socket.timeout, socket.gaierror):
+ raise
+ except socket.error as e:
+ socket_err = e
+ if self.debuglevel > 0:
+ print("connect fail: ({0}, {1})".format(self.host, self.port))
+ if use_proxy:
+ print(
+ "proxy: {0}".format(
+ str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
+ )
+ )
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ continue
+ break
+ if not self.sock:
+ raise socket_err
+
+
+SCHEME_TO_CONNECTION = {
+ "http": HTTPConnectionWithTimeout,
+ "https": HTTPSConnectionWithTimeout,
+}
+
+
+class Http(object):
+ """An HTTP client that handles:
+
+ - all methods
+ - caching
+ - ETags
+ - compression,
+ - HTTPS
+ - Basic
+ - Digest
+ - WSSE
+
+ and more.
+ """
+
+ def __init__(
+ self,
+ cache=None,
+ timeout=None,
+ proxy_info=proxy_info_from_environment,
+ ca_certs=None,
+ disable_ssl_certificate_validation=False,
+ tls_maximum_version=None,
+ tls_minimum_version=None,
+ ):
+ """If 'cache' is a string then it is used as a directory name for
+ a disk cache. Otherwise it must be an object that supports the
+ same interface as FileCache.
+
+ All timeouts are in seconds. If None is passed for timeout
+ then Python's default timeout for sockets will be used. See
+ for example the docs of socket.setdefaulttimeout():
+ http://docs.python.org/library/socket.html#socket.setdefaulttimeout
+
+ `proxy_info` may be:
+ - a callable that takes the http scheme ('http' or 'https') and
+ returns a ProxyInfo instance per request. By default, uses
+ proxy_info_from_environment.
+ - a ProxyInfo instance (static proxy config).
+ - None (proxy disabled).
+
+ ca_certs is the path of a file containing root CA certificates for SSL
+ server certificate validation. By default, a CA cert file bundled with
+ httplib2 is used.
+
+ If disable_ssl_certificate_validation is true, SSL cert validation will
+ not be performed.
+
+ tls_maximum_version / tls_minimum_version require Python 3.7+ /
+ OpenSSL 1.1.0g+. A value of "TLSv1_3" requires OpenSSL 1.1.1+.
+ """
+ self.proxy_info = proxy_info
+ self.ca_certs = ca_certs
+ self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
+ self.tls_maximum_version = tls_maximum_version
+ self.tls_minimum_version = tls_minimum_version
+ # Map domain name to an httplib connection
+ self.connections = {}
+ # The location of the cache, for now a directory
+ # where cached responses are held.
+ if cache and isinstance(cache, str):
+ self.cache = FileCache(cache)
+ else:
+ self.cache = cache
+
+ # Name/password
+ self.credentials = Credentials()
+
+ # Key/cert
+ self.certificates = KeyCerts()
+
+ # authorization objects
+ self.authorizations = []
+
+ # If set to False then no redirects are followed, even safe ones.
+ self.follow_redirects = True
+
+ self.redirect_codes = REDIRECT_CODES
+
+ # Which HTTP methods do we apply optimistic concurrency to, i.e.
+ # which methods get an "if-match:" etag header added to them.
+ self.optimistic_concurrency_methods = ["PUT", "PATCH"]
+
+ self.safe_methods = list(SAFE_METHODS)
+
+ # If 'follow_redirects' is True, and this is set to True then
+ # all redirecs are followed, including unsafe ones.
+ self.follow_all_redirects = False
+
+ self.ignore_etag = False
+
+ self.force_exception_to_status_code = False
+
+ self.timeout = timeout
+
+ # Keep Authorization: headers on a redirect.
+ self.forward_authorization_headers = False
+
+ def close(self):
+ """Close persistent connections, clear sensitive data.
+ Not thread-safe, requires external synchronization against concurrent requests.
+ """
+ existing, self.connections = self.connections, {}
+ for _, c in existing.items():
+ c.close()
+ self.certificates.clear()
+ self.clear_credentials()
+
+ def __getstate__(self):
+ state_dict = copy.copy(self.__dict__)
+ # In case request is augmented by some foreign object such as
+ # credentials which handle auth
+ if "request" in state_dict:
+ del state_dict["request"]
+ if "connections" in state_dict:
+ del state_dict["connections"]
+ return state_dict
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.connections = {}
+
+ def _auth_from_challenge(self, host, request_uri, headers, response, content):
+ """A generator that creates Authorization objects
+ that can be applied to requests.
+ """
+ challenges = auth._parse_www_authenticate(response, "www-authenticate")
+ for cred in self.credentials.iter(host):
+ for scheme in AUTH_SCHEME_ORDER:
+ if scheme in challenges:
+ yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
+
+ def add_credentials(self, name, password, domain=""):
+ """Add a name and password that will be used
+ any time a request requires authentication."""
+ self.credentials.add(name, password, domain)
+
+ def add_certificate(self, key, cert, domain, password=None):
+ """Add a key and cert that will be used
+ any time a request requires authentication."""
+ self.certificates.add(key, cert, domain, password)
+
+ def clear_credentials(self):
+ """Remove all the names and passwords
+ that are used for authentication"""
+ self.credentials.clear()
+ self.authorizations = []
+
+ def _conn_request(self, conn, request_uri, method, body, headers):
+ i = 0
+ seen_bad_status_line = False
+ while i < RETRIES:
+ i += 1
+ try:
+ if conn.sock is None:
+ conn.connect()
+ conn.request(method, request_uri, body, headers)
+ except socket.timeout:
+ conn.close()
+ raise
+ except socket.gaierror:
+ conn.close()
+ raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
+ except socket.error as e:
+ errno_ = _errno_from_exception(e)
+ if errno_ in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES:
+ continue # retry on potentially transient errors
+ raise
+ except http.client.HTTPException:
+ if conn.sock is None:
+ if i < RETRIES - 1:
+ conn.close()
+ conn.connect()
+ continue
+ else:
+ conn.close()
+ raise
+ if i < RETRIES - 1:
+ conn.close()
+ conn.connect()
+ continue
+ # Just because the server closed the connection doesn't apparently mean
+ # that the server didn't send a response.
+ pass
+ try:
+ response = conn.getresponse()
+ except (http.client.BadStatusLine, http.client.ResponseNotReady):
+ # If we get a BadStatusLine on the first try then that means
+ # the connection just went stale, so retry regardless of the
+ # number of RETRIES set.
+ if not seen_bad_status_line and i == 1:
+ i = 0
+ seen_bad_status_line = True
+ conn.close()
+ conn.connect()
+ continue
+ else:
+ conn.close()
+ raise
+ except socket.timeout:
+ raise
+ except (socket.error, http.client.HTTPException):
+ conn.close()
+ if i == 0:
+ conn.close()
+ conn.connect()
+ continue
+ else:
+ raise
+ else:
+ content = b""
+ if method == "HEAD":
+ conn.close()
+ else:
+ content = response.read()
+ response = Response(response)
+ if method != "HEAD":
+ content = _decompressContent(response, content)
+
+ break
+ return (response, content)
+
+ def _request(
+ self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey,
+ ):
+ """Do the actual request using the connection object
+ and also follow one level of redirects if necessary"""
+
+ auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
+ auth = auths and sorted(auths)[0][1] or None
+ if auth:
+ auth.request(method, request_uri, headers, body)
+
+ (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+
+ if auth:
+ if auth.response(response, body):
+ auth.request(method, request_uri, headers, body)
+ (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+ response._stale_digest = 1
+
+ if response.status == 401:
+ for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
+ authorization.request(method, request_uri, headers, body)
+ (response, content) = self._conn_request(conn, request_uri, method, body, headers)
+ if response.status != 401:
+ self.authorizations.append(authorization)
+ authorization.response(response, body)
+ break
+
+ if self.follow_all_redirects or method in self.safe_methods or response.status in (303, 308):
+ if self.follow_redirects and response.status in self.redirect_codes:
+ # Pick out the location header and basically start from the beginning
+ # remembering first to strip the ETag header and decrement our 'depth'
+ if redirections:
+ if "location" not in response and response.status != 300:
+ raise RedirectMissingLocation(
+ _("Redirected but the response is missing a Location: header."), response, content,
+ )
+ # Fix-up relative redirects (which violate an RFC 2616 MUST)
+ if "location" in response:
+ location = response["location"]
+ (scheme, authority, path, query, fragment) = parse_uri(location)
+ if authority == None:
+ response["location"] = urllib.parse.urljoin(absolute_uri, location)
+ if response.status == 308 or (response.status == 301 and (method in self.safe_methods)):
+ response["-x-permanent-redirect-url"] = response["location"]
+ if "content-location" not in response:
+ response["content-location"] = absolute_uri
+ _updateCache(headers, response, content, self.cache, cachekey)
+ if "if-none-match" in headers:
+ del headers["if-none-match"]
+ if "if-modified-since" in headers:
+ del headers["if-modified-since"]
+ if "authorization" in headers and not self.forward_authorization_headers:
+ del headers["authorization"]
+ if "location" in response:
+ location = response["location"]
+ old_response = copy.deepcopy(response)
+ if "content-location" not in old_response:
+ old_response["content-location"] = absolute_uri
+ redirect_method = method
+ if response.status in [302, 303]:
+ redirect_method = "GET"
+ body = None
+ (response, content) = self.request(
+ location, method=redirect_method, body=body, headers=headers, redirections=redirections - 1,
+ )
+ response.previous = old_response
+ else:
+ raise RedirectLimit(
+ "Redirected more times than redirection_limit allows.", response, content,
+ )
+ elif response.status in [200, 203] and method in self.safe_methods:
+ # Don't cache 206's since we aren't going to handle byte range requests
+ if "content-location" not in response:
+ response["content-location"] = absolute_uri
+ _updateCache(headers, response, content, self.cache, cachekey)
+
+ return (response, content)
+
+ def _normalize_headers(self, headers):
+ return _normalize_headers(headers)
+
+ # Need to catch and rebrand some exceptions
+ # Then need to optionally turn all exceptions into status codes
+ # including all socket.* and httplib.* exceptions.
+
+ def request(
+ self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None,
+ ):
+ """ Performs a single HTTP request.
+The 'uri' is the URI of the HTTP resource and can begin
+with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
+
+The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
+There is no restriction on the methods allowed.
+
+The 'body' is the entity body to be sent with the request. It is a string
+object.
+
+Any extra headers that are to be sent with the request should be provided in the
+'headers' dictionary.
+
+The maximum number of redirect to follow before raising an
+exception is 'redirections. The default is 5.
+
+The return value is a tuple of (response, content), the first
+being and instance of the 'Response' class, the second being
+a string that contains the response entity body.
+ """
+ conn_key = ""
+
+ try:
+ if headers is None:
+ headers = {}
+ else:
+ headers = self._normalize_headers(headers)
+
+ if "user-agent" not in headers:
+ headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__
+
+ uri = iri2uri(uri)
+ # Prevent CWE-75 space injection to manipulate request via part of uri.
+ # Prevent CWE-93 CRLF injection to modify headers via part of uri.
+ uri = uri.replace(" ", "%20").replace("\r", "%0D").replace("\n", "%0A")
+
+ (scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
+
+ conn_key = scheme + ":" + authority
+ conn = self.connections.get(conn_key)
+ if conn is None:
+ if not connection_type:
+ connection_type = SCHEME_TO_CONNECTION[scheme]
+ certs = list(self.certificates.iter(authority))
+ if issubclass(connection_type, HTTPSConnectionWithTimeout):
+ if certs:
+ conn = self.connections[conn_key] = connection_type(
+ authority,
+ key_file=certs[0][0],
+ cert_file=certs[0][1],
+ timeout=self.timeout,
+ proxy_info=self.proxy_info,
+ ca_certs=self.ca_certs,
+ disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
+ tls_maximum_version=self.tls_maximum_version,
+ tls_minimum_version=self.tls_minimum_version,
+ key_password=certs[0][2],
+ )
+ else:
+ conn = self.connections[conn_key] = connection_type(
+ authority,
+ timeout=self.timeout,
+ proxy_info=self.proxy_info,
+ ca_certs=self.ca_certs,
+ disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
+ tls_maximum_version=self.tls_maximum_version,
+ tls_minimum_version=self.tls_minimum_version,
+ )
+ else:
+ conn = self.connections[conn_key] = connection_type(
+ authority, timeout=self.timeout, proxy_info=self.proxy_info
+ )
+ conn.set_debuglevel(debuglevel)
+
+ if "range" not in headers and "accept-encoding" not in headers:
+ headers["accept-encoding"] = "gzip, deflate"
+
+ info = email.message.Message()
+ cachekey = None
+ cached_value = None
+ if self.cache:
+ cachekey = defrag_uri
+ cached_value = self.cache.get(cachekey)
+ if cached_value:
+ try:
+ info, content = cached_value.split(b"\r\n\r\n", 1)
+ info = email.message_from_bytes(info)
+ for k, v in info.items():
+ if v.startswith("=?") and v.endswith("?="):
+ info.replace_header(k, str(*email.header.decode_header(v)[0]))
+ except (IndexError, ValueError):
+ self.cache.delete(cachekey)
+ cachekey = None
+ cached_value = None
+
+ if (
+ method in self.optimistic_concurrency_methods
+ and self.cache
+ and "etag" in info
+ and not self.ignore_etag
+ and "if-match" not in headers
+ ):
+ # http://www.w3.org/1999/04/Editing/
+ headers["if-match"] = info["etag"]
+
+ # https://tools.ietf.org/html/rfc7234
+ # A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location
+ # when a non-error status code is received in response to an unsafe request method.
+ if self.cache and cachekey and method not in self.safe_methods:
+ self.cache.delete(cachekey)
+
+ # Check the vary header in the cache to see if this request
+ # matches what varies in the cache.
+ if method in self.safe_methods and "vary" in info:
+ vary = info["vary"]
+ vary_headers = vary.lower().replace(" ", "").split(",")
+ for header in vary_headers:
+ key = "-varied-%s" % header
+ value = info[key]
+ if headers.get(header, None) != value:
+ cached_value = None
+ break
+
+ if (
+ self.cache
+ and cached_value
+ and (method in self.safe_methods or info["status"] == "308")
+ and "range" not in headers
+ ):
+ redirect_method = method
+ if info["status"] not in ("307", "308"):
+ redirect_method = "GET"
+ if "-x-permanent-redirect-url" in info:
+ # Should cached permanent redirects be counted in our redirection count? For now, yes.
+ if redirections <= 0:
+ raise RedirectLimit(
+ "Redirected more times than redirection_limit allows.", {}, "",
+ )
+ (response, new_content) = self.request(
+ info["-x-permanent-redirect-url"],
+ method=redirect_method,
+ headers=headers,
+ redirections=redirections - 1,
+ )
+ response.previous = Response(info)
+ response.previous.fromcache = True
+ else:
+ # Determine our course of action:
+ # Is the cached entry fresh or stale?
+ # Has the client requested a non-cached response?
+ #
+ # There seems to be three possible answers:
+ # 1. [FRESH] Return the cache entry w/o doing a GET
+ # 2. [STALE] Do the GET (but add in cache validators if available)
+ # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
+ entry_disposition = _entry_disposition(info, headers)
+
+ if entry_disposition == "FRESH":
+ response = Response(info)
+ response.fromcache = True
+ return (response, content)
+
+ if entry_disposition == "STALE":
+ if "etag" in info and not self.ignore_etag and not "if-none-match" in headers:
+ headers["if-none-match"] = info["etag"]
+ if "last-modified" in info and not "last-modified" in headers:
+ headers["if-modified-since"] = info["last-modified"]
+ elif entry_disposition == "TRANSPARENT":
+ pass
+
+ (response, new_content) = self._request(
+ conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
+ )
+
+ if response.status == 304 and method == "GET":
+ # Rewrite the cache entry with the new end-to-end headers
+ # Take all headers that are in response
+ # and overwrite their values in info.
+ # unless they are hop-by-hop, or are listed in the connection header.
+
+ for key in _get_end2end_headers(response):
+ info[key] = response[key]
+ merged_response = Response(info)
+ if hasattr(response, "_stale_digest"):
+ merged_response._stale_digest = response._stale_digest
+ _updateCache(headers, merged_response, content, self.cache, cachekey)
+ response = merged_response
+ response.status = 200
+ response.fromcache = True
+
+ elif response.status == 200:
+ content = new_content
+ else:
+ self.cache.delete(cachekey)
+ content = new_content
+ else:
+ cc = _parse_cache_control(headers)
+ if "only-if-cached" in cc:
+ info["status"] = "504"
+ response = Response(info)
+ content = b""
+ else:
+ (response, content) = self._request(
+ conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
+ )
+ except Exception as e:
+ is_timeout = isinstance(e, socket.timeout)
+ if is_timeout:
+ conn = self.connections.pop(conn_key, None)
+ if conn:
+ conn.close()
+
+ if self.force_exception_to_status_code:
+ if isinstance(e, HttpLib2ErrorWithResponse):
+ response = e.response
+ content = e.content
+ response.status = 500
+ response.reason = str(e)
+ elif isinstance(e, socket.timeout):
+ content = b"Request Timeout"
+ response = Response({"content-type": "text/plain", "status": "408", "content-length": len(content),})
+ response.reason = "Request Timeout"
+ else:
+ content = str(e).encode("utf-8")
+ response = Response({"content-type": "text/plain", "status": "400", "content-length": len(content),})
+ response.reason = "Bad Request"
+ else:
+ raise
+
+ return (response, content)
+
+
+class Response(dict):
+ """An object more like email.message than httplib.HTTPResponse."""
+
+ """Is this response from our local cache"""
+ fromcache = False
+ """HTTP protocol version used by server.
+
+ 10 for HTTP/1.0, 11 for HTTP/1.1.
+ """
+ version = 11
+
+ "Status code returned by server. "
+ status = 200
+ """Reason phrase returned by server."""
+ reason = "Ok"
+
+ previous = None
+
+ def __init__(self, info):
+ # info is either an email.message or
+ # an httplib.HTTPResponse object.
+ if isinstance(info, http.client.HTTPResponse):
+ for key, value in info.getheaders():
+ key = key.lower()
+ prev = self.get(key)
+ if prev is not None:
+ value = ", ".join((prev, value))
+ self[key] = value
+ self.status = info.status
+ self["status"] = str(self.status)
+ self.reason = info.reason
+ self.version = info.version
+ elif isinstance(info, email.message.Message):
+ for key, value in list(info.items()):
+ self[key.lower()] = value
+ self.status = int(self["status"])
+ else:
+ for key, value in info.items():
+ self[key.lower()] = value
+ self.status = int(self.get("status", self.status))
+
+ def __getattr__(self, name):
+ if name == "dict":
+ return self
+ else:
+ raise AttributeError(name)
diff --git a/contrib/python/httplib2/py3/httplib2/auth.py b/contrib/python/httplib2/py3/httplib2/auth.py
new file mode 100644
index 0000000000..b8028ae2a7
--- /dev/null
+++ b/contrib/python/httplib2/py3/httplib2/auth.py
@@ -0,0 +1,69 @@
+import base64
+import re
+
+import pyparsing as pp
+
+from .error import *
+
+
+try: # pyparsing>=3.0.0
+ downcaseTokens = pp.common.downcaseTokens
+except AttributeError:
+ downcaseTokens = pp.downcaseTokens
+
+UNQUOTE_PAIRS = re.compile(r"\\(.)")
+unquote = lambda s, l, t: UNQUOTE_PAIRS.sub(r"\1", t[0][1:-1])
+
+# https://tools.ietf.org/html/rfc7235#section-1.2
+# https://tools.ietf.org/html/rfc7235#appendix-B
+tchar = "!#$%&'*+-.^_`|~" + pp.nums + pp.alphas
+token = pp.Word(tchar).setName("token")
+token68 = pp.Combine(pp.Word("-._~+/" + pp.nums + pp.alphas) + pp.Optional(pp.Word("=").leaveWhitespace())).setName(
+ "token68"
+)
+
+quoted_string = pp.dblQuotedString.copy().setName("quoted-string").setParseAction(unquote)
+auth_param_name = token.copy().setName("auth-param-name").addParseAction(downcaseTokens)
+auth_param = auth_param_name + pp.Suppress("=") + (quoted_string | token)
+params = pp.Dict(pp.delimitedList(pp.Group(auth_param)))
+
+scheme = token("scheme")
+challenge = scheme + (params("params") | token68("token"))
+
+authentication_info = params.copy()
+www_authenticate = pp.delimitedList(pp.Group(challenge))
+
+
+def _parse_authentication_info(headers, headername="authentication-info"):
+ """https://tools.ietf.org/html/rfc7615
+ """
+ header = headers.get(headername, "").strip()
+ if not header:
+ return {}
+ try:
+ parsed = authentication_info.parseString(header)
+ except pp.ParseException as ex:
+ # print(ex.explain(ex))
+ raise MalformedHeader(headername)
+
+ return parsed.asDict()
+
+
+def _parse_www_authenticate(headers, headername="www-authenticate"):
+ """Returns a dictionary of dictionaries, one dict per auth_scheme."""
+ header = headers.get(headername, "").strip()
+ if not header:
+ return {}
+ try:
+ parsed = www_authenticate.parseString(header)
+ except pp.ParseException as ex:
+ # print(ex.explain(ex))
+ raise MalformedHeader(headername)
+
+ retval = {
+ challenge["scheme"].lower(): challenge["params"].asDict()
+ if "params" in challenge
+ else {"token": challenge.get("token")}
+ for challenge in parsed
+ }
+ return retval
diff --git a/contrib/python/httplib2/py3/httplib2/certs.py b/contrib/python/httplib2/py3/httplib2/certs.py
new file mode 100644
index 0000000000..59d1ffc702
--- /dev/null
+++ b/contrib/python/httplib2/py3/httplib2/certs.py
@@ -0,0 +1,42 @@
+"""Utilities for certificate management."""
+
+import os
+
+certifi_available = False
+certifi_where = None
+try:
+ from certifi import where as certifi_where
+ certifi_available = True
+except ImportError:
+ pass
+
+custom_ca_locater_available = False
+custom_ca_locater_where = None
+try:
+ from ca_certs_locater import get as custom_ca_locater_where
+ custom_ca_locater_available = True
+except ImportError:
+ pass
+
+
+BUILTIN_CA_CERTS = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "cacerts.txt"
+)
+
+
+def where():
+ env = os.environ.get("HTTPLIB2_CA_CERTS")
+ if env is not None:
+ if os.path.isfile(env):
+ return env
+ else:
+ raise RuntimeError("Environment variable HTTPLIB2_CA_CERTS not a valid file")
+ if custom_ca_locater_available:
+ return custom_ca_locater_where()
+ if certifi_available:
+ return certifi_where()
+ return BUILTIN_CA_CERTS
+
+
+if __name__ == "__main__":
+ print(where())
diff --git a/contrib/python/httplib2/py3/httplib2/error.py b/contrib/python/httplib2/py3/httplib2/error.py
new file mode 100644
index 0000000000..0e68c12a85
--- /dev/null
+++ b/contrib/python/httplib2/py3/httplib2/error.py
@@ -0,0 +1,48 @@
+# All exceptions raised here derive from HttpLib2Error
+class HttpLib2Error(Exception):
+ pass
+
+
+# Some exceptions can be caught and optionally
+# be turned back into responses.
+class HttpLib2ErrorWithResponse(HttpLib2Error):
+ def __init__(self, desc, response, content):
+ self.response = response
+ self.content = content
+ HttpLib2Error.__init__(self, desc)
+
+
+class RedirectMissingLocation(HttpLib2ErrorWithResponse):
+ pass
+
+
+class RedirectLimit(HttpLib2ErrorWithResponse):
+ pass
+
+
+class FailedToDecompressContent(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
+ pass
+
+
+class MalformedHeader(HttpLib2Error):
+ pass
+
+
+class RelativeURIError(HttpLib2Error):
+ pass
+
+
+class ServerNotFoundError(HttpLib2Error):
+ pass
+
+
+class ProxiesUnavailableError(HttpLib2Error):
+ pass
diff --git a/contrib/python/httplib2/py3/httplib2/iri2uri.py b/contrib/python/httplib2/py3/httplib2/iri2uri.py
new file mode 100644
index 0000000000..86e361e62a
--- /dev/null
+++ b/contrib/python/httplib2/py3/httplib2/iri2uri.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+"""Converts an IRI to a URI."""
+
+__author__ = "Joe Gregorio (joe@bitworking.org)"
+__copyright__ = "Copyright 2006, Joe Gregorio"
+__contributors__ = []
+__version__ = "1.0.0"
+__license__ = "MIT"
+
+import urllib.parse
+
+# Convert an IRI to a URI following the rules in RFC 3987
+#
+# The characters we need to enocde and escape are defined in the spec:
+#
+# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
+# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
+# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
+# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
+# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
+# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
+# / %xD0000-DFFFD / %xE1000-EFFFD
+
+escape_range = [
+ (0xA0, 0xD7FF),
+ (0xE000, 0xF8FF),
+ (0xF900, 0xFDCF),
+ (0xFDF0, 0xFFEF),
+ (0x10000, 0x1FFFD),
+ (0x20000, 0x2FFFD),
+ (0x30000, 0x3FFFD),
+ (0x40000, 0x4FFFD),
+ (0x50000, 0x5FFFD),
+ (0x60000, 0x6FFFD),
+ (0x70000, 0x7FFFD),
+ (0x80000, 0x8FFFD),
+ (0x90000, 0x9FFFD),
+ (0xA0000, 0xAFFFD),
+ (0xB0000, 0xBFFFD),
+ (0xC0000, 0xCFFFD),
+ (0xD0000, 0xDFFFD),
+ (0xE1000, 0xEFFFD),
+ (0xF0000, 0xFFFFD),
+ (0x100000, 0x10FFFD),
+]
+
+
+def encode(c):
+ retval = c
+ i = ord(c)
+ for low, high in escape_range:
+ if i < low:
+ break
+ if i >= low and i <= high:
+ retval = "".join(["%%%2X" % o for o in c.encode("utf-8")])
+ break
+ return retval
+
+
+def iri2uri(uri):
+ """Convert an IRI to a URI. Note that IRIs must be
+ passed in a unicode strings. That is, do not utf-8 encode
+ the IRI before passing it into the function."""
+ if isinstance(uri, str):
+ (scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri)
+ authority = authority.encode("idna").decode("utf-8")
+ # For each character in 'ucschar' or 'iprivate'
+ # 1. encode as utf-8
+ # 2. then %-encode each octet of that utf-8
+ uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment))
+ uri = "".join([encode(c) for c in uri])
+ return uri
+
+
+if __name__ == "__main__":
+ import unittest
+
+ class Test(unittest.TestCase):
+ def test_uris(self):
+ """Test that URIs are invariant under the transformation."""
+ invariant = [
+ "ftp://ftp.is.co.za/rfc/rfc1808.txt",
+ "http://www.ietf.org/rfc/rfc2396.txt",
+ "ldap://[2001:db8::7]/c=GB?objectClass?one",
+ "mailto:John.Doe@example.com",
+ "news:comp.infosystems.www.servers.unix",
+ "tel:+1-816-555-1212",
+ "telnet://192.0.2.16:80/",
+ "urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
+ ]
+ for uri in invariant:
+ self.assertEqual(uri, iri2uri(uri))
+
+ def test_iri(self):
+ """Test that the right type of escaping is done for each part of the URI."""
+ self.assertEqual(
+ "http://xn--o3h.com/%E2%98%84",
+ iri2uri("http://\N{COMET}.com/\N{COMET}"),
+ )
+ self.assertEqual(
+ "http://bitworking.org/?fred=%E2%98%84",
+ iri2uri("http://bitworking.org/?fred=\N{COMET}"),
+ )
+ self.assertEqual(
+ "http://bitworking.org/#%E2%98%84",
+ iri2uri("http://bitworking.org/#\N{COMET}"),
+ )
+ self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}"))
+ self.assertEqual(
+ "/fred?bar=%E2%98%9A#%E2%98%84",
+ iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"),
+ )
+ self.assertEqual(
+ "/fred?bar=%E2%98%9A#%E2%98%84",
+ iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")),
+ )
+ self.assertNotEqual(
+ "/fred?bar=%E2%98%9A#%E2%98%84",
+ iri2uri(
+ "/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8")
+ ),
+ )
+
+ unittest.main()
diff --git a/contrib/python/httplib2/py3/httplib2/socks.py b/contrib/python/httplib2/py3/httplib2/socks.py
new file mode 100644
index 0000000000..cc68e634c7
--- /dev/null
+++ b/contrib/python/httplib2/py3/httplib2/socks.py
@@ -0,0 +1,518 @@
+"""SocksiPy - Python SOCKS module.
+
+Version 1.00
+
+Copyright 2006 Dan-Haim. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+3. Neither the name of Dan Haim nor the names of his contributors may be used
+ to endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
+OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
+
+This module provides a standard socket-like interface for Python
+for tunneling connections through SOCKS proxies.
+
+Minor modifications made by Christopher Gilbert (http://motomastyle.com/) for
+use in PyLoris (http://pyloris.sourceforge.net/).
+
+Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
+mainly to merge bug fixes found in Sourceforge.
+"""
+
+import base64
+import socket
+import struct
+import sys
+
+if getattr(socket, "socket", None) is None:
+ raise ImportError("socket.socket missing, proxy support unusable")
+
+PROXY_TYPE_SOCKS4 = 1
+PROXY_TYPE_SOCKS5 = 2
+PROXY_TYPE_HTTP = 3
+PROXY_TYPE_HTTP_NO_TUNNEL = 4
+
+_defaultproxy = None
+_orgsocket = socket.socket
+
+
+class ProxyError(Exception):
+ pass
+
+
+class GeneralProxyError(ProxyError):
+ pass
+
+
+class Socks5AuthError(ProxyError):
+ pass
+
+
+class Socks5Error(ProxyError):
+ pass
+
+
+class Socks4Error(ProxyError):
+ pass
+
+
+class HTTPError(ProxyError):
+ pass
+
+
+_generalerrors = (
+ "success",
+ "invalid data",
+ "not connected",
+ "not available",
+ "bad proxy type",
+ "bad input",
+)
+
+_socks5errors = (
+ "succeeded",
+ "general SOCKS server failure",
+ "connection not allowed by ruleset",
+ "Network unreachable",
+ "Host unreachable",
+ "Connection refused",
+ "TTL expired",
+ "Command not supported",
+ "Address type not supported",
+ "Unknown error",
+)
+
+_socks5autherrors = (
+ "succeeded",
+ "authentication is required",
+ "all offered authentication methods were rejected",
+ "unknown username or invalid password",
+ "unknown error",
+)
+
+_socks4errors = (
+ "request granted",
+ "request rejected or failed",
+ "request rejected because SOCKS server cannot connect to identd on the client",
+ "request rejected because the client program and identd report different "
+ "user-ids",
+ "unknown error",
+)
+
+
+def setdefaultproxy(
+ proxytype=None, addr=None, port=None, rdns=True, username=None, password=None
+):
+ """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
+ Sets a default proxy which all further socksocket objects will use,
+ unless explicitly changed.
+ """
+ global _defaultproxy
+ _defaultproxy = (proxytype, addr, port, rdns, username, password)
+
+
+def wrapmodule(module):
+ """wrapmodule(module)
+
+ Attempts to replace a module's socket library with a SOCKS socket. Must set
+ a default proxy using setdefaultproxy(...) first.
+ This will only work on modules that import socket directly into the
+ namespace;
+ most of the Python Standard Library falls into this category.
+ """
+ if _defaultproxy != None:
+ module.socket.socket = socksocket
+ else:
+ raise GeneralProxyError((4, "no proxy specified"))
+
+
+class socksocket(socket.socket):
+ """socksocket([family[, type[, proto]]]) -> socket object
+ Open a SOCKS enabled socket. The parameters are the same as
+ those of the standard socket init. In order for SOCKS to work,
+ you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
+ """
+
+ def __init__(
+ self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None
+ ):
+ _orgsocket.__init__(self, family, type, proto, _sock)
+ if _defaultproxy != None:
+ self.__proxy = _defaultproxy
+ else:
+ self.__proxy = (None, None, None, None, None, None)
+ self.__proxysockname = None
+ self.__proxypeername = None
+ self.__httptunnel = True
+
+ def __recvall(self, count):
+ """__recvall(count) -> data
+ Receive EXACTLY the number of bytes requested from the socket.
+ Blocks until the required number of bytes have been received.
+ """
+ data = self.recv(count)
+ while len(data) < count:
+ d = self.recv(count - len(data))
+ if not d:
+ raise GeneralProxyError((0, "connection closed unexpectedly"))
+ data = data + d
+ return data
+
+ def sendall(self, content, *args):
+ """ override socket.socket.sendall method to rewrite the header
+ for non-tunneling proxies if needed
+ """
+ if not self.__httptunnel:
+ content = self.__rewriteproxy(content)
+ return super(socksocket, self).sendall(content, *args)
+
+ def __rewriteproxy(self, header):
+ """ rewrite HTTP request headers to support non-tunneling proxies
+ (i.e. those which do not support the CONNECT method).
+ This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
+ """
+ host, endpt = None, None
+ hdrs = header.split("\r\n")
+ for hdr in hdrs:
+ if hdr.lower().startswith("host:"):
+ host = hdr
+ elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
+ endpt = hdr
+ if host and endpt:
+ hdrs.remove(host)
+ hdrs.remove(endpt)
+ host = host.split(" ")[1]
+ endpt = endpt.split(" ")
+ if self.__proxy[4] != None and self.__proxy[5] != None:
+ hdrs.insert(0, self.__getauthheader())
+ hdrs.insert(0, "Host: %s" % host)
+ hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
+ return "\r\n".join(hdrs)
+
+ def __getauthheader(self):
+ auth = self.__proxy[4] + b":" + self.__proxy[5]
+ return "Proxy-Authorization: Basic " + base64.b64encode(auth).decode()
+
+ def setproxy(
+ self,
+ proxytype=None,
+ addr=None,
+ port=None,
+ rdns=True,
+ username=None,
+ password=None,
+ headers=None,
+ ):
+ """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
+
+ Sets the proxy to be used.
+ proxytype - The type of the proxy to be used. Three types
+ are supported: PROXY_TYPE_SOCKS4 (including socks4a),
+ PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
+ addr - The address of the server (IP or DNS).
+ port - The port of the server. Defaults to 1080 for SOCKS
+ servers and 8080 for HTTP proxy servers.
+ rdns - Should DNS queries be preformed on the remote side
+ (rather than the local side). The default is True.
+ Note: This has no effect with SOCKS4 servers.
+ username - Username to authenticate with to the server.
+ The default is no authentication.
+ password - Password to authenticate with to the server.
+ Only relevant when username is also provided.
+ headers - Additional or modified headers for the proxy connect
+ request.
+ """
+ self.__proxy = (
+ proxytype,
+ addr,
+ port,
+ rdns,
+ username.encode() if username else None,
+ password.encode() if password else None,
+ headers,
+ )
+
+ def __negotiatesocks5(self, destaddr, destport):
+ """__negotiatesocks5(self,destaddr,destport)
+ Negotiates a connection through a SOCKS5 server.
+ """
+ # First we'll send the authentication packages we support.
+ if (self.__proxy[4] != None) and (self.__proxy[5] != None):
+ # The username/password details were supplied to the
+ # setproxy method so we support the USERNAME/PASSWORD
+ # authentication (in addition to the standard none).
+ self.sendall(struct.pack("BBBB", 0x05, 0x02, 0x00, 0x02))
+ else:
+ # No username/password were entered, therefore we
+ # only support connections with no authentication.
+ self.sendall(struct.pack("BBB", 0x05, 0x01, 0x00))
+ # We'll receive the server's response to determine which
+ # method was selected
+ chosenauth = self.__recvall(2)
+ if chosenauth[0:1] != chr(0x05).encode():
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ # Check the chosen authentication method
+ if chosenauth[1:2] == chr(0x00).encode():
+ # No authentication is required
+ pass
+ elif chosenauth[1:2] == chr(0x02).encode():
+ # Okay, we need to perform a basic username/password
+ # authentication.
+ packet = bytearray()
+ packet.append(0x01)
+ packet.append(len(self.__proxy[4]))
+ packet.extend(self.__proxy[4])
+ packet.append(len(self.__proxy[5]))
+ packet.extend(self.__proxy[5])
+ self.sendall(packet)
+ authstat = self.__recvall(2)
+ if authstat[0:1] != chr(0x01).encode():
+ # Bad response
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ if authstat[1:2] != chr(0x00).encode():
+ # Authentication failed
+ self.close()
+ raise Socks5AuthError((3, _socks5autherrors[3]))
+ # Authentication succeeded
+ else:
+ # Reaching here is always bad
+ self.close()
+ if chosenauth[1] == chr(0xFF).encode():
+ raise Socks5AuthError((2, _socks5autherrors[2]))
+ else:
+ raise GeneralProxyError((1, _generalerrors[1]))
+ # Now we can request the actual connection
+ req = struct.pack("BBB", 0x05, 0x01, 0x00)
+ # If the given destination address is an IP address, we'll
+ # use the IPv4 address request even if remote resolving was specified.
+ try:
+ ipaddr = socket.inet_aton(destaddr)
+ req = req + chr(0x01).encode() + ipaddr
+ except socket.error:
+ # Well it's not an IP number, so it's probably a DNS name.
+ if self.__proxy[3]:
+ # Resolve remotely
+ ipaddr = None
+ req = (
+ req
+ + chr(0x03).encode()
+ + chr(len(destaddr)).encode()
+ + destaddr.encode()
+ )
+ else:
+ # Resolve locally
+ ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
+ req = req + chr(0x01).encode() + ipaddr
+ req = req + struct.pack(">H", destport)
+ self.sendall(req)
+ # Get the response
+ resp = self.__recvall(4)
+ if resp[0:1] != chr(0x05).encode():
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ elif resp[1:2] != chr(0x00).encode():
+ # Connection failed
+ self.close()
+ if ord(resp[1:2]) <= 8:
+ raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
+ else:
+ raise Socks5Error((9, _socks5errors[9]))
+ # Get the bound address/port
+ elif resp[3:4] == chr(0x01).encode():
+ boundaddr = self.__recvall(4)
+ elif resp[3:4] == chr(0x03).encode():
+ resp = resp + self.recv(1)
+ boundaddr = self.__recvall(ord(resp[4:5]))
+ else:
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ boundport = struct.unpack(">H", self.__recvall(2))[0]
+ self.__proxysockname = (boundaddr, boundport)
+ if ipaddr != None:
+ self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
+ else:
+ self.__proxypeername = (destaddr, destport)
+
+ def getproxysockname(self):
+ """getsockname() -> address info
+ Returns the bound IP address and port number at the proxy.
+ """
+ return self.__proxysockname
+
+ def getproxypeername(self):
+ """getproxypeername() -> address info
+ Returns the IP and port number of the proxy.
+ """
+ return _orgsocket.getpeername(self)
+
+ def getpeername(self):
+ """getpeername() -> address info
+ Returns the IP address and port number of the destination
+ machine (note: getproxypeername returns the proxy)
+ """
+ return self.__proxypeername
+
+ def __negotiatesocks4(self, destaddr, destport):
+ """__negotiatesocks4(self,destaddr,destport)
+ Negotiates a connection through a SOCKS4 server.
+ """
+ # Check if the destination address provided is an IP address
+ rmtrslv = False
+ try:
+ ipaddr = socket.inet_aton(destaddr)
+ except socket.error:
+ # It's a DNS name. Check where it should be resolved.
+ if self.__proxy[3]:
+ ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
+ rmtrslv = True
+ else:
+ ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
+ # Construct the request packet
+ req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
+ # The username parameter is considered userid for SOCKS4
+ if self.__proxy[4] != None:
+ req = req + self.__proxy[4]
+ req = req + chr(0x00).encode()
+ # DNS name if remote resolving is required
+ # NOTE: This is actually an extension to the SOCKS4 protocol
+ # called SOCKS4A and may not be supported in all cases.
+ if rmtrslv:
+ req = req + destaddr + chr(0x00).encode()
+ self.sendall(req)
+ # Get the response from the server
+ resp = self.__recvall(8)
+ if resp[0:1] != chr(0x00).encode():
+ # Bad data
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ if resp[1:2] != chr(0x5A).encode():
+ # Server returned an error
+ self.close()
+ if ord(resp[1:2]) in (91, 92, 93):
+ self.close()
+ raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
+ else:
+ raise Socks4Error((94, _socks4errors[4]))
+ # Get the bound address/port
+ self.__proxysockname = (
+ socket.inet_ntoa(resp[4:]),
+ struct.unpack(">H", resp[2:4])[0],
+ )
+ if rmtrslv != None:
+ self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
+ else:
+ self.__proxypeername = (destaddr, destport)
+
+ def __negotiatehttp(self, destaddr, destport):
+ """__negotiatehttp(self,destaddr,destport)
+ Negotiates a connection through an HTTP server.
+ """
+ # If we need to resolve locally, we do this now
+ if not self.__proxy[3]:
+ addr = socket.gethostbyname(destaddr)
+ else:
+ addr = destaddr
+ headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
+ wrote_host_header = False
+ wrote_auth_header = False
+ if self.__proxy[6] != None:
+ for key, val in self.__proxy[6].iteritems():
+ headers += [key, ": ", val, "\r\n"]
+ wrote_host_header = key.lower() == "host"
+ wrote_auth_header = key.lower() == "proxy-authorization"
+ if not wrote_host_header:
+ headers += ["Host: ", destaddr, "\r\n"]
+ if not wrote_auth_header:
+ if self.__proxy[4] != None and self.__proxy[5] != None:
+ headers += [self.__getauthheader(), "\r\n"]
+ headers.append("\r\n")
+ self.sendall("".join(headers).encode())
+ # We read the response until we get the string "\r\n\r\n"
+ resp = self.recv(1)
+ while resp.find("\r\n\r\n".encode()) == -1:
+ resp = resp + self.recv(1)
+ # We just need the first line to check if the connection
+ # was successful
+ statusline = resp.splitlines()[0].split(" ".encode(), 2)
+ if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ try:
+ statuscode = int(statusline[1])
+ except ValueError:
+ self.close()
+ raise GeneralProxyError((1, _generalerrors[1]))
+ if statuscode != 200:
+ self.close()
+ raise HTTPError((statuscode, statusline[2]))
+ self.__proxysockname = ("0.0.0.0", 0)
+ self.__proxypeername = (addr, destport)
+
+ def connect(self, destpair):
+ """connect(self, despair)
+ Connects to the specified destination through a proxy.
+ destpar - A tuple of the IP/DNS address and the port number.
+ (identical to socket's connect).
+ To select the proxy server use setproxy().
+ """
+ # Do a minimal input check first
+ if (
+ (not type(destpair) in (list, tuple))
+ or (len(destpair) < 2)
+ or (not isinstance(destpair[0], (str, bytes)))
+ or (type(destpair[1]) != int)
+ ):
+ raise GeneralProxyError((5, _generalerrors[5]))
+ if self.__proxy[0] == PROXY_TYPE_SOCKS5:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 1080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ self.__negotiatesocks5(destpair[0], destpair[1])
+ elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 1080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ self.__negotiatesocks4(destpair[0], destpair[1])
+ elif self.__proxy[0] == PROXY_TYPE_HTTP:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 8080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ self.__negotiatehttp(destpair[0], destpair[1])
+ elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
+ if self.__proxy[2] != None:
+ portnum = self.__proxy[2]
+ else:
+ portnum = 8080
+ _orgsocket.connect(self, (self.__proxy[1], portnum))
+ if destpair[1] == 443:
+ self.__negotiatehttp(destpair[0], destpair[1])
+ else:
+ self.__httptunnel = False
+ elif self.__proxy[0] == None:
+ _orgsocket.connect(self, (destpair[0], destpair[1]))
+ else:
+ raise GeneralProxyError((4, _generalerrors[4]))
diff --git a/contrib/python/httplib2/py3/ya.make b/contrib/python/httplib2/py3/ya.make
new file mode 100644
index 0000000000..a598484774
--- /dev/null
+++ b/contrib/python/httplib2/py3/ya.make
@@ -0,0 +1,32 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(0.22.0)
+
+LICENSE(MIT)
+
+PEERDIR(
+ contrib/python/certifi
+ contrib/python/pyparsing
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ httplib2/__init__.py
+ httplib2/auth.py
+ httplib2/certs.py
+ httplib2/error.py
+ httplib2/iri2uri.py
+ httplib2/socks.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/httplib2/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/httplib2/ya.make b/contrib/python/httplib2/ya.make
new file mode 100644
index 0000000000..e53114c90c
--- /dev/null
+++ b/contrib/python/httplib2/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/httplib2/py2)
+ELSE()
+ PEERDIR(contrib/python/httplib2/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/kubernetes/.dist-info/METADATA b/contrib/python/kubernetes/.dist-info/METADATA
new file mode 100644
index 0000000000..7dc251d643
--- /dev/null
+++ b/contrib/python/kubernetes/.dist-info/METADATA
@@ -0,0 +1,40 @@
+Metadata-Version: 2.1
+Name: kubernetes
+Version: 28.1.0
+Summary: Kubernetes python client
+Home-page: https://github.com/kubernetes-client/python
+Author: Kubernetes
+Author-email:
+License: Apache License Version 2.0
+Keywords: Swagger,OpenAPI,Kubernetes
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Topic :: Utilities
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Requires-Python: >=3.6
+License-File: LICENSE
+Requires-Dist: certifi >=14.05.14
+Requires-Dist: six >=1.9.0
+Requires-Dist: python-dateutil >=2.5.3
+Requires-Dist: pyyaml >=5.4.1
+Requires-Dist: google-auth >=1.0.1
+Requires-Dist: websocket-client !=0.40.0,!=0.41.*,!=0.42.*,>=0.32.0
+Requires-Dist: requests
+Requires-Dist: requests-oauthlib
+Requires-Dist: oauthlib >=3.2.2
+Requires-Dist: urllib3 <2.0,>=1.24.2
+Requires-Dist: ipaddress >=1.0.17 ; python_version=="2.7"
+Provides-Extra: adal
+Requires-Dist: adal >=1.0.2 ; extra == 'adal'
+
+Python client for kubernetes http://kubernetes.io/
diff --git a/contrib/python/kubernetes/.dist-info/top_level.txt b/contrib/python/kubernetes/.dist-info/top_level.txt
new file mode 100644
index 0000000000..807e21be4c
--- /dev/null
+++ b/contrib/python/kubernetes/.dist-info/top_level.txt
@@ -0,0 +1 @@
+kubernetes
diff --git a/contrib/python/kubernetes/LICENSE b/contrib/python/kubernetes/LICENSE
new file mode 100644
index 0000000000..00b2401109
--- /dev/null
+++ b/contrib/python/kubernetes/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2014 The Kubernetes Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/python/kubernetes/README.md b/contrib/python/kubernetes/README.md
new file mode 100644
index 0000000000..dbd3206f54
--- /dev/null
+++ b/contrib/python/kubernetes/README.md
@@ -0,0 +1,226 @@
+# Kubernetes Python Client
+
+[![Build Status](https://travis-ci.org/kubernetes-client/python.svg?branch=master)](https://travis-ci.org/kubernetes-client/python)
+[![PyPI version](https://badge.fury.io/py/kubernetes.svg)](https://badge.fury.io/py/kubernetes)
+[![codecov](https://codecov.io/gh/kubernetes-client/python/branch/master/graph/badge.svg)](https://codecov.io/gh/kubernetes-client/python "Non-generated packages only")
+[![pypi supported versions](https://img.shields.io/pypi/pyversions/kubernetes.svg)](https://pypi.python.org/pypi/kubernetes)
+[![Client Capabilities](https://img.shields.io/badge/Kubernetes%20client-Silver-blue.svg?style=flat&colorB=C0C0C0&colorA=306CE8)](http://bit.ly/kubernetes-client-capabilities-badge)
+[![Client Support Level](https://img.shields.io/badge/kubernetes%20client-beta-green.svg?style=flat&colorA=306CE8)](http://bit.ly/kubernetes-client-support-badge)
+
+Python client for the [kubernetes](http://kubernetes.io/) API.
+
+## Installation
+
+From source:
+
+```
+git clone --recursive https://github.com/kubernetes-client/python.git
+cd python
+python setup.py install
+```
+
+From [PyPI](https://pypi.python.org/pypi/kubernetes/) directly:
+
+```
+pip install kubernetes
+```
+
+## Examples
+
+list all pods:
+
+```python
+from kubernetes import client, config
+
+# Configs can be set in Configuration class directly or using helper utility
+config.load_kube_config()
+
+v1 = client.CoreV1Api()
+print("Listing pods with their IPs:")
+ret = v1.list_pod_for_all_namespaces(watch=False)
+for i in ret.items:
+ print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
+```
+
+watch on namespace object:
+
+```python
+from kubernetes import client, config, watch
+
+# Configs can be set in Configuration class directly or using helper utility
+config.load_kube_config()
+
+v1 = client.CoreV1Api()
+count = 10
+w = watch.Watch()
+for event in w.stream(v1.list_namespace, _request_timeout=60):
+ print("Event: %s %s" % (event['type'], event['object'].metadata.name))
+ count -= 1
+ if not count:
+ w.stop()
+
+print("Ended.")
+```
+
+More examples can be found in [examples](examples/) folder. To run examples, run this command:
+
+```shell
+python -m examples.example1
+```
+
+(replace example1 with one of the filenames in the examples folder)
+
+## Documentation
+
+All APIs and Models' documentation can be found at the [Generated client's README file](kubernetes/README.md)
+
+## Compatibility
+
+`client-python` follows [semver](http://semver.org/), so until the major version of
+client-python gets increased, your code will continue to work with explicitly
+supported versions of Kubernetes clusters.
+
+#### Compatibility matrix of supported client versions
+
+- [client 9.y.z](https://pypi.org/project/kubernetes/9.0.1/): Kubernetes 1.12 or below (+-), Kubernetes 1.13 (✓), Kubernetes 1.14 or above (+-)
+- [client 10.y.z](https://pypi.org/project/kubernetes/10.1.0/): Kubernetes 1.13 or below (+-), Kubernetes 1.14 (✓), Kubernetes 1.14 or above (+-)
+- [client 11.y.z](https://pypi.org/project/kubernetes/11.0.0/): Kubernetes 1.14 or below (+-), Kubernetes 1.15 (✓), Kubernetes 1.16 or above (+-)
+- [client 12.y.z](https://pypi.org/project/kubernetes/12.0.1/): Kubernetes 1.15 or below (+-), Kubernetes 1.16 (✓), Kubernetes 1.17 or above (+-)
+- [client 17.y.z](https://pypi.org/project/kubernetes/17.17.0/): Kubernetes 1.16 or below (+-), Kubernetes 1.17 (✓), Kubernetes 1.18 or above (+-)
+- [client 18.y.z](https://pypi.org/project/kubernetes/18.20.0/): Kubernetes 1.17 or below (+-), Kubernetes 1.18 (✓), Kubernetes 1.19 or above (+-)
+- [client 19.y.z](https://pypi.org/project/kubernetes/19.15.0/): Kubernetes 1.18 or below (+-), Kubernetes 1.19 (✓), Kubernetes 1.20 or above (+-)
+- [client 20.y.z](https://pypi.org/project/kubernetes/20.13.0/): Kubernetes 1.19 or below (+-), Kubernetes 1.20 (✓), Kubernetes 1.21 or above (+-)
+- [client 21.y.z](https://pypi.org/project/kubernetes/21.7.0/): Kubernetes 1.20 or below (+-), Kubernetes 1.21 (✓), Kubernetes 1.22 or above (+-)
+- [client 22.y.z](https://pypi.org/project/kubernetes/22.6.0/): Kubernetes 1.21 or below (+-), Kubernetes 1.22 (✓), Kubernetes 1.23 or above (+-)
+- [client 23.y.z](https://pypi.org/project/kubernetes/23.6.0/): Kubernetes 1.22 or below (+-), Kubernetes 1.23 (✓), Kubernetes 1.24 or above (+-)
+- [client 24.y.z](https://pypi.org/project/kubernetes/24.2.0/): Kubernetes 1.23 or below (+-), Kubernetes 1.24 (✓), Kubernetes 1.25 or above (+-)
+- [client 25.y.z](https://pypi.org/project/kubernetes/25.3.0/): Kubernetes 1.24 or below (+-), Kubernetes 1.25 (✓), Kubernetes 1.26 or above (+-)
+- [client 26.y.z](https://pypi.org/project/kubernetes/26.1.0/): Kubernetes 1.25 or below (+-), Kubernetes 1.26 (✓), Kubernetes 1.27 or above (+-)
+- [client 27.y.z](https://pypi.org/project/kubernetes/27.2.0/): Kubernetes 1.26 or below (+-), Kubernetes 1.27 (✓), Kubernetes 1.28 or above (+-)
+- [client 28.y.z](https://pypi.org/project/kubernetes/28.1.0/): Kubernetes 1.27 or below (+-), Kubernetes 1.28 (✓), Kubernetes 1.29 or above (+-)
+
+> See [here](#homogenizing-the-kubernetes-python-client-versions) for an explanation of why there is no v13-v16 release.
+
+Key:
+
+* `✓` Exactly the same features / API objects in both client-python and the Kubernetes
+ version.
+* `+` client-python has features or API objects that may not be present in the Kubernetes
+ cluster, either due to that client-python has additional new API, or that the server has
+ removed old API. However, everything they have in common (i.e., most APIs) will work.
+ Please note that alpha APIs may vanish or change significantly in a single release.
+* `-` The Kubernetes cluster has features the client-python library can't use, either due
+ to the server has additional new API, or that client-python has removed old API. However,
+ everything they share in common (i.e., most APIs) will work.
+
+See the [CHANGELOG](./CHANGELOG.md) for a detailed description of changes
+between client-python versions.
+
+| Client version | Canonical source for OpenAPI spec | Maintenance status |
+|-----------------|--------------------------------------|-------------------------------|
+| 5.0 Alpha/Beta | Kubernetes main repo, 1.9 branch | ✗ |
+| 5.0 | Kubernetes main repo, 1.9 branch | ✗ |
+| 6.0 Alpha/Beta | Kubernetes main repo, 1.10 branch | ✗ |
+| 6.0 | Kubernetes main repo, 1.10 branch | ✗ |
+| 7.0 Alpha/Beta | Kubernetes main repo, 1.11 branch | ✗ |
+| 7.0 | Kubernetes main repo, 1.11 branch | ✗ |
+| 8.0 Alpha/Beta | Kubernetes main repo, 1.12 branch | ✗ |
+| 8.0 | Kubernetes main repo, 1.12 branch | ✗ |
+| 9.0 Alpha/Beta | Kubernetes main repo, 1.13 branch | ✗ |
+| 9.0 | Kubernetes main repo, 1.13 branch | ✗ |
+| 10.0 Alpha/Beta | Kubernetes main repo, 1.14 branch | ✗ |
+| 10.0 | Kubernetes main repo, 1.14 branch | ✗ |
+| 11.0 Alpha/Beta | Kubernetes main repo, 1.15 branch | ✗ |
+| 11.0 | Kubernetes main repo, 1.15 branch | ✗ |
+| 12.0 Alpha/Beta | Kubernetes main repo, 1.16 branch | ✗ |
+| 12.0 | Kubernetes main repo, 1.16 branch | ✗ |
+| 17.0 Alpha/Beta | Kubernetes main repo, 1.17 branch | ✗ |
+| 17.0 | Kubernetes main repo, 1.17 branch | ✗ |
+| 18.0 Alpha/Beta | Kubernetes main repo, 1.18 branch | ✗ |
+| 18.0 | Kubernetes main repo, 1.18 branch | ✗ |
+| 19.0 Alpha/Beta | Kubernetes main repo, 1.19 branch | ✗ |
+| 19.0 | Kubernetes main repo, 1.19 branch | ✗ |
+| 20.0 Alpha/Beta | Kubernetes main repo, 1.20 branch | ✗ |
+| 20.0 | Kubernetes main repo, 1.20 branch | ✗ |
+| 21.0 Alpha/Beta | Kubernetes main repo, 1.21 branch | ✗ |
+| 21.0 | Kubernetes main repo, 1.21 branch | ✗ |
+| 22.0 Alpha/Beta | Kubernetes main repo, 1.22 branch | ✗ |
+| 22.0 | Kubernetes main repo, 1.22 branch | ✗ |
+| 23.0 Alpha/Beta | Kubernetes main repo, 1.23 branch | ✗ |
+| 23.0 | Kubernetes main repo, 1.23 branch | ✗ |
+| 24.0 Alpha/Beta | Kubernetes main repo, 1.24 branch | ✗ |
+| 24.0 | Kubernetes main repo, 1.24 branch | ✗ |
+| 25.0 Alpha/Beta | Kubernetes main repo, 1.25 branch | ✗ |
+| 25.0 | Kubernetes main repo, 1.25 branch | ✗ |
+| 26.0 Alpha/Beta | Kubernetes main repo, 1.26 branch | ✗ |
+| 26.0 | Kubernetes main repo, 1.26 branch | ✓ |
+| 27.0 Alpha/Beta | Kubernetes main repo, 1.27 branch | ✗ |
+| 27.0 | Kubernetes main repo, 1.27 branch | ✓ |
+| 28.0 Alpha/Beta | Kubernetes main repo, 1.28 branch | ✗ |
+| 28.0 | Kubernetes main repo, 1.28 branch | ✓ |
+
+> See [here](#homogenizing-the-kubernetes-python-client-versions) for an explanation of why there is no v13-v16 release.
+
+Key:
+
+* `✓` Changes in main Kubernetes repo are manually ([should be automated](https://github.com/kubernetes-client/python/issues/177)) published to client-python when they are available.
+* `✗` No longer maintained; please upgrade.
+
+Kubernetes supports [three minor releases](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#supported-releases-and-component-skew) at a time. "Support" means we expect users to be running that version in production, though we may not port fixes back before the latest minor version. For example, when v1.3 comes out, v1.0 will no longer be supported. In consistent with Kubernetes support policy, we expect to support **three GA major releases** (corresponding to three Kubernetes minor releases) at a time.
+
+Note: There would be no maintenance for alpha/beta releases except the latest one.
+
+**Exception to the above support rule:** Since we are running behind on releases, we will support Alpha/Beta releases for a greater number of clients until we catch up with the upstream version.
+
+## Homogenizing the Kubernetes Python Client versions
+
+The client releases v12 and before following a versioning schema where the major version was 4 integer positions behind the Kubernetes minor on which the client is based on. For example, v12.0.0 is based on Kubernetes v1.16, v11.0.0 is based on Kubernetes v1.15 and so on.
+
+This created a lot of confusion tracking two different version numbers for each client release. It was decided to homogenize the version scheme starting from the Kubernetes Python client based on Kubernetes v1.17. The versioning scheme of the client from this release would be vY.Z.P where Y and Z are the Kubernetes minor and patch release numbers from Kubernets v1.Y.Z and P is the client specific patch release numbers to accommodate changes and fixes done specifically to the client. For more details, refer [this issue](https://github.com/kubernetes-client/python/issues/1244).
+
+## Community, Support, Discussion
+
+If you have any problem on using the package or any suggestions, please start with reaching the [Kubernetes clients slack channel](https://kubernetes.slack.com/messages/C76GB48RK/), or filing an [issue](https://github.com/kubernetes-client/python/issues) to let us know. You can also reach the maintainers of this project at [SIG API Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery), where this project falls under.
+
+### Code of Conduct
+
+Participation in the Kubernetes community is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
+
+## Troubleshooting
+
+### SSLError on macOS
+
+If you get an SSLError, you likely need to update your version of python. The
+version that ships with macOS may not be supported.
+
+Install the latest version of python with [brew](https://brew.sh/):
+
+```
+brew install python
+```
+
+Once installed, you can query the version of OpenSSL like so:
+
+```
+python -c "import ssl; print (ssl.OPENSSL_VERSION)"
+```
+
+You'll need a version with OpenSSL version 1.0.0 or later.
+
+### Hostname doesn't match
+
+If you get an `ssl.CertificateError` complaining about hostname match, your installed packages does not meet version [requirements](requirements.txt).
+Specifically check `ipaddress` and `urllib3` package versions to make sure they met requirements in [requirements.txt](requirements.txt) file.
+
+### Why Exec/Attach calls doesn't work
+
+Starting from 4.0 release, we do not support directly calling exec or attach calls. you should use stream module to call them. so instead
+of `resp = api.connect_get_namespaced_pod_exec(name, ...` you should call `resp = stream(api.connect_get_namespaced_pod_exec, name, ...`.
+
+Using Stream will overwrite the requests protocol in _core_v1_api.CoreV1Api()_
+This will cause a failure in non-exec/attach calls. If you reuse your api client object, you will need to
+recreate it between api calls that use _stream_ and other api calls.
+
+See more at [exec example](examples/pod_exec.py).
+
+**[⬆ back to top](#Installation)**
diff --git a/contrib/python/kubernetes/kubernetes/__init__.py b/contrib/python/kubernetes/kubernetes/__init__.py
new file mode 100644
index 0000000000..31a6521c37
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/__init__.py
@@ -0,0 +1,25 @@
+# Copyright 2022 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__project__ = 'kubernetes'
+# The version is auto-updated. Please do not edit.
+__version__ = "28.1.0"
+
+from . import client
+from . import config
+from . import dynamic
+from . import watch
+from . import stream
+from . import utils
+from . import leaderelection
diff --git a/contrib/python/kubernetes/kubernetes/client/__init__.py b/contrib/python/kubernetes/kubernetes/client/__init__.py
new file mode 100644
index 0000000000..d47fac5322
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/__init__.py
@@ -0,0 +1,645 @@
+# coding: utf-8
+
+# flake8: noqa
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+__version__ = "28.1.0"
+
+# import apis into sdk package
+from kubernetes.client.api.well_known_api import WellKnownApi
+from kubernetes.client.api.admissionregistration_api import AdmissionregistrationApi
+from kubernetes.client.api.admissionregistration_v1_api import AdmissionregistrationV1Api
+from kubernetes.client.api.admissionregistration_v1alpha1_api import AdmissionregistrationV1alpha1Api
+from kubernetes.client.api.admissionregistration_v1beta1_api import AdmissionregistrationV1beta1Api
+from kubernetes.client.api.apiextensions_api import ApiextensionsApi
+from kubernetes.client.api.apiextensions_v1_api import ApiextensionsV1Api
+from kubernetes.client.api.apiregistration_api import ApiregistrationApi
+from kubernetes.client.api.apiregistration_v1_api import ApiregistrationV1Api
+from kubernetes.client.api.apis_api import ApisApi
+from kubernetes.client.api.apps_api import AppsApi
+from kubernetes.client.api.apps_v1_api import AppsV1Api
+from kubernetes.client.api.authentication_api import AuthenticationApi
+from kubernetes.client.api.authentication_v1_api import AuthenticationV1Api
+from kubernetes.client.api.authentication_v1alpha1_api import AuthenticationV1alpha1Api
+from kubernetes.client.api.authentication_v1beta1_api import AuthenticationV1beta1Api
+from kubernetes.client.api.authorization_api import AuthorizationApi
+from kubernetes.client.api.authorization_v1_api import AuthorizationV1Api
+from kubernetes.client.api.autoscaling_api import AutoscalingApi
+from kubernetes.client.api.autoscaling_v1_api import AutoscalingV1Api
+from kubernetes.client.api.autoscaling_v2_api import AutoscalingV2Api
+from kubernetes.client.api.batch_api import BatchApi
+from kubernetes.client.api.batch_v1_api import BatchV1Api
+from kubernetes.client.api.certificates_api import CertificatesApi
+from kubernetes.client.api.certificates_v1_api import CertificatesV1Api
+from kubernetes.client.api.certificates_v1alpha1_api import CertificatesV1alpha1Api
+from kubernetes.client.api.coordination_api import CoordinationApi
+from kubernetes.client.api.coordination_v1_api import CoordinationV1Api
+from kubernetes.client.api.core_api import CoreApi
+from kubernetes.client.api.core_v1_api import CoreV1Api
+from kubernetes.client.api.custom_objects_api import CustomObjectsApi
+from kubernetes.client.api.discovery_api import DiscoveryApi
+from kubernetes.client.api.discovery_v1_api import DiscoveryV1Api
+from kubernetes.client.api.events_api import EventsApi
+from kubernetes.client.api.events_v1_api import EventsV1Api
+from kubernetes.client.api.flowcontrol_apiserver_api import FlowcontrolApiserverApi
+from kubernetes.client.api.flowcontrol_apiserver_v1beta2_api import FlowcontrolApiserverV1beta2Api
+from kubernetes.client.api.flowcontrol_apiserver_v1beta3_api import FlowcontrolApiserverV1beta3Api
+from kubernetes.client.api.internal_apiserver_api import InternalApiserverApi
+from kubernetes.client.api.internal_apiserver_v1alpha1_api import InternalApiserverV1alpha1Api
+from kubernetes.client.api.logs_api import LogsApi
+from kubernetes.client.api.networking_api import NetworkingApi
+from kubernetes.client.api.networking_v1_api import NetworkingV1Api
+from kubernetes.client.api.networking_v1alpha1_api import NetworkingV1alpha1Api
+from kubernetes.client.api.node_api import NodeApi
+from kubernetes.client.api.node_v1_api import NodeV1Api
+from kubernetes.client.api.openid_api import OpenidApi
+from kubernetes.client.api.policy_api import PolicyApi
+from kubernetes.client.api.policy_v1_api import PolicyV1Api
+from kubernetes.client.api.rbac_authorization_api import RbacAuthorizationApi
+from kubernetes.client.api.rbac_authorization_v1_api import RbacAuthorizationV1Api
+from kubernetes.client.api.resource_api import ResourceApi
+from kubernetes.client.api.resource_v1alpha2_api import ResourceV1alpha2Api
+from kubernetes.client.api.scheduling_api import SchedulingApi
+from kubernetes.client.api.scheduling_v1_api import SchedulingV1Api
+from kubernetes.client.api.storage_api import StorageApi
+from kubernetes.client.api.storage_v1_api import StorageV1Api
+from kubernetes.client.api.version_api import VersionApi
+
+# import ApiClient
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.configuration import Configuration
+from kubernetes.client.exceptions import OpenApiException
+from kubernetes.client.exceptions import ApiTypeError
+from kubernetes.client.exceptions import ApiValueError
+from kubernetes.client.exceptions import ApiKeyError
+from kubernetes.client.exceptions import ApiException
+# import models into sdk package
+from kubernetes.client.models.admissionregistration_v1_service_reference import AdmissionregistrationV1ServiceReference
+from kubernetes.client.models.admissionregistration_v1_webhook_client_config import AdmissionregistrationV1WebhookClientConfig
+from kubernetes.client.models.apiextensions_v1_service_reference import ApiextensionsV1ServiceReference
+from kubernetes.client.models.apiextensions_v1_webhook_client_config import ApiextensionsV1WebhookClientConfig
+from kubernetes.client.models.apiregistration_v1_service_reference import ApiregistrationV1ServiceReference
+from kubernetes.client.models.authentication_v1_token_request import AuthenticationV1TokenRequest
+from kubernetes.client.models.core_v1_endpoint_port import CoreV1EndpointPort
+from kubernetes.client.models.core_v1_event import CoreV1Event
+from kubernetes.client.models.core_v1_event_list import CoreV1EventList
+from kubernetes.client.models.core_v1_event_series import CoreV1EventSeries
+from kubernetes.client.models.discovery_v1_endpoint_port import DiscoveryV1EndpointPort
+from kubernetes.client.models.events_v1_event import EventsV1Event
+from kubernetes.client.models.events_v1_event_list import EventsV1EventList
+from kubernetes.client.models.events_v1_event_series import EventsV1EventSeries
+from kubernetes.client.models.storage_v1_token_request import StorageV1TokenRequest
+from kubernetes.client.models.v1_api_group import V1APIGroup
+from kubernetes.client.models.v1_api_group_list import V1APIGroupList
+from kubernetes.client.models.v1_api_resource import V1APIResource
+from kubernetes.client.models.v1_api_resource_list import V1APIResourceList
+from kubernetes.client.models.v1_api_service import V1APIService
+from kubernetes.client.models.v1_api_service_condition import V1APIServiceCondition
+from kubernetes.client.models.v1_api_service_list import V1APIServiceList
+from kubernetes.client.models.v1_api_service_spec import V1APIServiceSpec
+from kubernetes.client.models.v1_api_service_status import V1APIServiceStatus
+from kubernetes.client.models.v1_api_versions import V1APIVersions
+from kubernetes.client.models.v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource
+from kubernetes.client.models.v1_affinity import V1Affinity
+from kubernetes.client.models.v1_aggregation_rule import V1AggregationRule
+from kubernetes.client.models.v1_attached_volume import V1AttachedVolume
+from kubernetes.client.models.v1_azure_disk_volume_source import V1AzureDiskVolumeSource
+from kubernetes.client.models.v1_azure_file_persistent_volume_source import V1AzureFilePersistentVolumeSource
+from kubernetes.client.models.v1_azure_file_volume_source import V1AzureFileVolumeSource
+from kubernetes.client.models.v1_binding import V1Binding
+from kubernetes.client.models.v1_bound_object_reference import V1BoundObjectReference
+from kubernetes.client.models.v1_csi_driver import V1CSIDriver
+from kubernetes.client.models.v1_csi_driver_list import V1CSIDriverList
+from kubernetes.client.models.v1_csi_driver_spec import V1CSIDriverSpec
+from kubernetes.client.models.v1_csi_node import V1CSINode
+from kubernetes.client.models.v1_csi_node_driver import V1CSINodeDriver
+from kubernetes.client.models.v1_csi_node_list import V1CSINodeList
+from kubernetes.client.models.v1_csi_node_spec import V1CSINodeSpec
+from kubernetes.client.models.v1_csi_persistent_volume_source import V1CSIPersistentVolumeSource
+from kubernetes.client.models.v1_csi_storage_capacity import V1CSIStorageCapacity
+from kubernetes.client.models.v1_csi_storage_capacity_list import V1CSIStorageCapacityList
+from kubernetes.client.models.v1_csi_volume_source import V1CSIVolumeSource
+from kubernetes.client.models.v1_capabilities import V1Capabilities
+from kubernetes.client.models.v1_ceph_fs_persistent_volume_source import V1CephFSPersistentVolumeSource
+from kubernetes.client.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource
+from kubernetes.client.models.v1_certificate_signing_request import V1CertificateSigningRequest
+from kubernetes.client.models.v1_certificate_signing_request_condition import V1CertificateSigningRequestCondition
+from kubernetes.client.models.v1_certificate_signing_request_list import V1CertificateSigningRequestList
+from kubernetes.client.models.v1_certificate_signing_request_spec import V1CertificateSigningRequestSpec
+from kubernetes.client.models.v1_certificate_signing_request_status import V1CertificateSigningRequestStatus
+from kubernetes.client.models.v1_cinder_persistent_volume_source import V1CinderPersistentVolumeSource
+from kubernetes.client.models.v1_cinder_volume_source import V1CinderVolumeSource
+from kubernetes.client.models.v1_claim_source import V1ClaimSource
+from kubernetes.client.models.v1_client_ip_config import V1ClientIPConfig
+from kubernetes.client.models.v1_cluster_role import V1ClusterRole
+from kubernetes.client.models.v1_cluster_role_binding import V1ClusterRoleBinding
+from kubernetes.client.models.v1_cluster_role_binding_list import V1ClusterRoleBindingList
+from kubernetes.client.models.v1_cluster_role_list import V1ClusterRoleList
+from kubernetes.client.models.v1_component_condition import V1ComponentCondition
+from kubernetes.client.models.v1_component_status import V1ComponentStatus
+from kubernetes.client.models.v1_component_status_list import V1ComponentStatusList
+from kubernetes.client.models.v1_condition import V1Condition
+from kubernetes.client.models.v1_config_map import V1ConfigMap
+from kubernetes.client.models.v1_config_map_env_source import V1ConfigMapEnvSource
+from kubernetes.client.models.v1_config_map_key_selector import V1ConfigMapKeySelector
+from kubernetes.client.models.v1_config_map_list import V1ConfigMapList
+from kubernetes.client.models.v1_config_map_node_config_source import V1ConfigMapNodeConfigSource
+from kubernetes.client.models.v1_config_map_projection import V1ConfigMapProjection
+from kubernetes.client.models.v1_config_map_volume_source import V1ConfigMapVolumeSource
+from kubernetes.client.models.v1_container import V1Container
+from kubernetes.client.models.v1_container_image import V1ContainerImage
+from kubernetes.client.models.v1_container_port import V1ContainerPort
+from kubernetes.client.models.v1_container_resize_policy import V1ContainerResizePolicy
+from kubernetes.client.models.v1_container_state import V1ContainerState
+from kubernetes.client.models.v1_container_state_running import V1ContainerStateRunning
+from kubernetes.client.models.v1_container_state_terminated import V1ContainerStateTerminated
+from kubernetes.client.models.v1_container_state_waiting import V1ContainerStateWaiting
+from kubernetes.client.models.v1_container_status import V1ContainerStatus
+from kubernetes.client.models.v1_controller_revision import V1ControllerRevision
+from kubernetes.client.models.v1_controller_revision_list import V1ControllerRevisionList
+from kubernetes.client.models.v1_cron_job import V1CronJob
+from kubernetes.client.models.v1_cron_job_list import V1CronJobList
+from kubernetes.client.models.v1_cron_job_spec import V1CronJobSpec
+from kubernetes.client.models.v1_cron_job_status import V1CronJobStatus
+from kubernetes.client.models.v1_cross_version_object_reference import V1CrossVersionObjectReference
+from kubernetes.client.models.v1_custom_resource_column_definition import V1CustomResourceColumnDefinition
+from kubernetes.client.models.v1_custom_resource_conversion import V1CustomResourceConversion
+from kubernetes.client.models.v1_custom_resource_definition import V1CustomResourceDefinition
+from kubernetes.client.models.v1_custom_resource_definition_condition import V1CustomResourceDefinitionCondition
+from kubernetes.client.models.v1_custom_resource_definition_list import V1CustomResourceDefinitionList
+from kubernetes.client.models.v1_custom_resource_definition_names import V1CustomResourceDefinitionNames
+from kubernetes.client.models.v1_custom_resource_definition_spec import V1CustomResourceDefinitionSpec
+from kubernetes.client.models.v1_custom_resource_definition_status import V1CustomResourceDefinitionStatus
+from kubernetes.client.models.v1_custom_resource_definition_version import V1CustomResourceDefinitionVersion
+from kubernetes.client.models.v1_custom_resource_subresource_scale import V1CustomResourceSubresourceScale
+from kubernetes.client.models.v1_custom_resource_subresources import V1CustomResourceSubresources
+from kubernetes.client.models.v1_custom_resource_validation import V1CustomResourceValidation
+from kubernetes.client.models.v1_daemon_endpoint import V1DaemonEndpoint
+from kubernetes.client.models.v1_daemon_set import V1DaemonSet
+from kubernetes.client.models.v1_daemon_set_condition import V1DaemonSetCondition
+from kubernetes.client.models.v1_daemon_set_list import V1DaemonSetList
+from kubernetes.client.models.v1_daemon_set_spec import V1DaemonSetSpec
+from kubernetes.client.models.v1_daemon_set_status import V1DaemonSetStatus
+from kubernetes.client.models.v1_daemon_set_update_strategy import V1DaemonSetUpdateStrategy
+from kubernetes.client.models.v1_delete_options import V1DeleteOptions
+from kubernetes.client.models.v1_deployment import V1Deployment
+from kubernetes.client.models.v1_deployment_condition import V1DeploymentCondition
+from kubernetes.client.models.v1_deployment_list import V1DeploymentList
+from kubernetes.client.models.v1_deployment_spec import V1DeploymentSpec
+from kubernetes.client.models.v1_deployment_status import V1DeploymentStatus
+from kubernetes.client.models.v1_deployment_strategy import V1DeploymentStrategy
+from kubernetes.client.models.v1_downward_api_projection import V1DownwardAPIProjection
+from kubernetes.client.models.v1_downward_api_volume_file import V1DownwardAPIVolumeFile
+from kubernetes.client.models.v1_downward_api_volume_source import V1DownwardAPIVolumeSource
+from kubernetes.client.models.v1_empty_dir_volume_source import V1EmptyDirVolumeSource
+from kubernetes.client.models.v1_endpoint import V1Endpoint
+from kubernetes.client.models.v1_endpoint_address import V1EndpointAddress
+from kubernetes.client.models.v1_endpoint_conditions import V1EndpointConditions
+from kubernetes.client.models.v1_endpoint_hints import V1EndpointHints
+from kubernetes.client.models.v1_endpoint_slice import V1EndpointSlice
+from kubernetes.client.models.v1_endpoint_slice_list import V1EndpointSliceList
+from kubernetes.client.models.v1_endpoint_subset import V1EndpointSubset
+from kubernetes.client.models.v1_endpoints import V1Endpoints
+from kubernetes.client.models.v1_endpoints_list import V1EndpointsList
+from kubernetes.client.models.v1_env_from_source import V1EnvFromSource
+from kubernetes.client.models.v1_env_var import V1EnvVar
+from kubernetes.client.models.v1_env_var_source import V1EnvVarSource
+from kubernetes.client.models.v1_ephemeral_container import V1EphemeralContainer
+from kubernetes.client.models.v1_ephemeral_volume_source import V1EphemeralVolumeSource
+from kubernetes.client.models.v1_event_source import V1EventSource
+from kubernetes.client.models.v1_eviction import V1Eviction
+from kubernetes.client.models.v1_exec_action import V1ExecAction
+from kubernetes.client.models.v1_external_documentation import V1ExternalDocumentation
+from kubernetes.client.models.v1_fc_volume_source import V1FCVolumeSource
+from kubernetes.client.models.v1_flex_persistent_volume_source import V1FlexPersistentVolumeSource
+from kubernetes.client.models.v1_flex_volume_source import V1FlexVolumeSource
+from kubernetes.client.models.v1_flocker_volume_source import V1FlockerVolumeSource
+from kubernetes.client.models.v1_for_zone import V1ForZone
+from kubernetes.client.models.v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource
+from kubernetes.client.models.v1_grpc_action import V1GRPCAction
+from kubernetes.client.models.v1_git_repo_volume_source import V1GitRepoVolumeSource
+from kubernetes.client.models.v1_glusterfs_persistent_volume_source import V1GlusterfsPersistentVolumeSource
+from kubernetes.client.models.v1_glusterfs_volume_source import V1GlusterfsVolumeSource
+from kubernetes.client.models.v1_group_version_for_discovery import V1GroupVersionForDiscovery
+from kubernetes.client.models.v1_http_get_action import V1HTTPGetAction
+from kubernetes.client.models.v1_http_header import V1HTTPHeader
+from kubernetes.client.models.v1_http_ingress_path import V1HTTPIngressPath
+from kubernetes.client.models.v1_http_ingress_rule_value import V1HTTPIngressRuleValue
+from kubernetes.client.models.v1_horizontal_pod_autoscaler import V1HorizontalPodAutoscaler
+from kubernetes.client.models.v1_horizontal_pod_autoscaler_list import V1HorizontalPodAutoscalerList
+from kubernetes.client.models.v1_horizontal_pod_autoscaler_spec import V1HorizontalPodAutoscalerSpec
+from kubernetes.client.models.v1_horizontal_pod_autoscaler_status import V1HorizontalPodAutoscalerStatus
+from kubernetes.client.models.v1_host_alias import V1HostAlias
+from kubernetes.client.models.v1_host_ip import V1HostIP
+from kubernetes.client.models.v1_host_path_volume_source import V1HostPathVolumeSource
+from kubernetes.client.models.v1_ip_block import V1IPBlock
+from kubernetes.client.models.v1_iscsi_persistent_volume_source import V1ISCSIPersistentVolumeSource
+from kubernetes.client.models.v1_iscsi_volume_source import V1ISCSIVolumeSource
+from kubernetes.client.models.v1_ingress import V1Ingress
+from kubernetes.client.models.v1_ingress_backend import V1IngressBackend
+from kubernetes.client.models.v1_ingress_class import V1IngressClass
+from kubernetes.client.models.v1_ingress_class_list import V1IngressClassList
+from kubernetes.client.models.v1_ingress_class_parameters_reference import V1IngressClassParametersReference
+from kubernetes.client.models.v1_ingress_class_spec import V1IngressClassSpec
+from kubernetes.client.models.v1_ingress_list import V1IngressList
+from kubernetes.client.models.v1_ingress_load_balancer_ingress import V1IngressLoadBalancerIngress
+from kubernetes.client.models.v1_ingress_load_balancer_status import V1IngressLoadBalancerStatus
+from kubernetes.client.models.v1_ingress_port_status import V1IngressPortStatus
+from kubernetes.client.models.v1_ingress_rule import V1IngressRule
+from kubernetes.client.models.v1_ingress_service_backend import V1IngressServiceBackend
+from kubernetes.client.models.v1_ingress_spec import V1IngressSpec
+from kubernetes.client.models.v1_ingress_status import V1IngressStatus
+from kubernetes.client.models.v1_ingress_tls import V1IngressTLS
+from kubernetes.client.models.v1_json_schema_props import V1JSONSchemaProps
+from kubernetes.client.models.v1_job import V1Job
+from kubernetes.client.models.v1_job_condition import V1JobCondition
+from kubernetes.client.models.v1_job_list import V1JobList
+from kubernetes.client.models.v1_job_spec import V1JobSpec
+from kubernetes.client.models.v1_job_status import V1JobStatus
+from kubernetes.client.models.v1_job_template_spec import V1JobTemplateSpec
+from kubernetes.client.models.v1_key_to_path import V1KeyToPath
+from kubernetes.client.models.v1_label_selector import V1LabelSelector
+from kubernetes.client.models.v1_label_selector_requirement import V1LabelSelectorRequirement
+from kubernetes.client.models.v1_lease import V1Lease
+from kubernetes.client.models.v1_lease_list import V1LeaseList
+from kubernetes.client.models.v1_lease_spec import V1LeaseSpec
+from kubernetes.client.models.v1_lifecycle import V1Lifecycle
+from kubernetes.client.models.v1_lifecycle_handler import V1LifecycleHandler
+from kubernetes.client.models.v1_limit_range import V1LimitRange
+from kubernetes.client.models.v1_limit_range_item import V1LimitRangeItem
+from kubernetes.client.models.v1_limit_range_list import V1LimitRangeList
+from kubernetes.client.models.v1_limit_range_spec import V1LimitRangeSpec
+from kubernetes.client.models.v1_list_meta import V1ListMeta
+from kubernetes.client.models.v1_load_balancer_ingress import V1LoadBalancerIngress
+from kubernetes.client.models.v1_load_balancer_status import V1LoadBalancerStatus
+from kubernetes.client.models.v1_local_object_reference import V1LocalObjectReference
+from kubernetes.client.models.v1_local_subject_access_review import V1LocalSubjectAccessReview
+from kubernetes.client.models.v1_local_volume_source import V1LocalVolumeSource
+from kubernetes.client.models.v1_managed_fields_entry import V1ManagedFieldsEntry
+from kubernetes.client.models.v1_match_condition import V1MatchCondition
+from kubernetes.client.models.v1_mutating_webhook import V1MutatingWebhook
+from kubernetes.client.models.v1_mutating_webhook_configuration import V1MutatingWebhookConfiguration
+from kubernetes.client.models.v1_mutating_webhook_configuration_list import V1MutatingWebhookConfigurationList
+from kubernetes.client.models.v1_nfs_volume_source import V1NFSVolumeSource
+from kubernetes.client.models.v1_namespace import V1Namespace
+from kubernetes.client.models.v1_namespace_condition import V1NamespaceCondition
+from kubernetes.client.models.v1_namespace_list import V1NamespaceList
+from kubernetes.client.models.v1_namespace_spec import V1NamespaceSpec
+from kubernetes.client.models.v1_namespace_status import V1NamespaceStatus
+from kubernetes.client.models.v1_network_policy import V1NetworkPolicy
+from kubernetes.client.models.v1_network_policy_egress_rule import V1NetworkPolicyEgressRule
+from kubernetes.client.models.v1_network_policy_ingress_rule import V1NetworkPolicyIngressRule
+from kubernetes.client.models.v1_network_policy_list import V1NetworkPolicyList
+from kubernetes.client.models.v1_network_policy_peer import V1NetworkPolicyPeer
+from kubernetes.client.models.v1_network_policy_port import V1NetworkPolicyPort
+from kubernetes.client.models.v1_network_policy_spec import V1NetworkPolicySpec
+from kubernetes.client.models.v1_node import V1Node
+from kubernetes.client.models.v1_node_address import V1NodeAddress
+from kubernetes.client.models.v1_node_affinity import V1NodeAffinity
+from kubernetes.client.models.v1_node_condition import V1NodeCondition
+from kubernetes.client.models.v1_node_config_source import V1NodeConfigSource
+from kubernetes.client.models.v1_node_config_status import V1NodeConfigStatus
+from kubernetes.client.models.v1_node_daemon_endpoints import V1NodeDaemonEndpoints
+from kubernetes.client.models.v1_node_list import V1NodeList
+from kubernetes.client.models.v1_node_selector import V1NodeSelector
+from kubernetes.client.models.v1_node_selector_requirement import V1NodeSelectorRequirement
+from kubernetes.client.models.v1_node_selector_term import V1NodeSelectorTerm
+from kubernetes.client.models.v1_node_spec import V1NodeSpec
+from kubernetes.client.models.v1_node_status import V1NodeStatus
+from kubernetes.client.models.v1_node_system_info import V1NodeSystemInfo
+from kubernetes.client.models.v1_non_resource_attributes import V1NonResourceAttributes
+from kubernetes.client.models.v1_non_resource_rule import V1NonResourceRule
+from kubernetes.client.models.v1_object_field_selector import V1ObjectFieldSelector
+from kubernetes.client.models.v1_object_meta import V1ObjectMeta
+from kubernetes.client.models.v1_object_reference import V1ObjectReference
+from kubernetes.client.models.v1_overhead import V1Overhead
+from kubernetes.client.models.v1_owner_reference import V1OwnerReference
+from kubernetes.client.models.v1_persistent_volume import V1PersistentVolume
+from kubernetes.client.models.v1_persistent_volume_claim import V1PersistentVolumeClaim
+from kubernetes.client.models.v1_persistent_volume_claim_condition import V1PersistentVolumeClaimCondition
+from kubernetes.client.models.v1_persistent_volume_claim_list import V1PersistentVolumeClaimList
+from kubernetes.client.models.v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec
+from kubernetes.client.models.v1_persistent_volume_claim_status import V1PersistentVolumeClaimStatus
+from kubernetes.client.models.v1_persistent_volume_claim_template import V1PersistentVolumeClaimTemplate
+from kubernetes.client.models.v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
+from kubernetes.client.models.v1_persistent_volume_list import V1PersistentVolumeList
+from kubernetes.client.models.v1_persistent_volume_spec import V1PersistentVolumeSpec
+from kubernetes.client.models.v1_persistent_volume_status import V1PersistentVolumeStatus
+from kubernetes.client.models.v1_photon_persistent_disk_volume_source import V1PhotonPersistentDiskVolumeSource
+from kubernetes.client.models.v1_pod import V1Pod
+from kubernetes.client.models.v1_pod_affinity import V1PodAffinity
+from kubernetes.client.models.v1_pod_affinity_term import V1PodAffinityTerm
+from kubernetes.client.models.v1_pod_anti_affinity import V1PodAntiAffinity
+from kubernetes.client.models.v1_pod_condition import V1PodCondition
+from kubernetes.client.models.v1_pod_dns_config import V1PodDNSConfig
+from kubernetes.client.models.v1_pod_dns_config_option import V1PodDNSConfigOption
+from kubernetes.client.models.v1_pod_disruption_budget import V1PodDisruptionBudget
+from kubernetes.client.models.v1_pod_disruption_budget_list import V1PodDisruptionBudgetList
+from kubernetes.client.models.v1_pod_disruption_budget_spec import V1PodDisruptionBudgetSpec
+from kubernetes.client.models.v1_pod_disruption_budget_status import V1PodDisruptionBudgetStatus
+from kubernetes.client.models.v1_pod_failure_policy import V1PodFailurePolicy
+from kubernetes.client.models.v1_pod_failure_policy_on_exit_codes_requirement import V1PodFailurePolicyOnExitCodesRequirement
+from kubernetes.client.models.v1_pod_failure_policy_on_pod_conditions_pattern import V1PodFailurePolicyOnPodConditionsPattern
+from kubernetes.client.models.v1_pod_failure_policy_rule import V1PodFailurePolicyRule
+from kubernetes.client.models.v1_pod_ip import V1PodIP
+from kubernetes.client.models.v1_pod_list import V1PodList
+from kubernetes.client.models.v1_pod_os import V1PodOS
+from kubernetes.client.models.v1_pod_readiness_gate import V1PodReadinessGate
+from kubernetes.client.models.v1_pod_resource_claim import V1PodResourceClaim
+from kubernetes.client.models.v1_pod_resource_claim_status import V1PodResourceClaimStatus
+from kubernetes.client.models.v1_pod_scheduling_gate import V1PodSchedulingGate
+from kubernetes.client.models.v1_pod_security_context import V1PodSecurityContext
+from kubernetes.client.models.v1_pod_spec import V1PodSpec
+from kubernetes.client.models.v1_pod_status import V1PodStatus
+from kubernetes.client.models.v1_pod_template import V1PodTemplate
+from kubernetes.client.models.v1_pod_template_list import V1PodTemplateList
+from kubernetes.client.models.v1_pod_template_spec import V1PodTemplateSpec
+from kubernetes.client.models.v1_policy_rule import V1PolicyRule
+from kubernetes.client.models.v1_port_status import V1PortStatus
+from kubernetes.client.models.v1_portworx_volume_source import V1PortworxVolumeSource
+from kubernetes.client.models.v1_preconditions import V1Preconditions
+from kubernetes.client.models.v1_preferred_scheduling_term import V1PreferredSchedulingTerm
+from kubernetes.client.models.v1_priority_class import V1PriorityClass
+from kubernetes.client.models.v1_priority_class_list import V1PriorityClassList
+from kubernetes.client.models.v1_probe import V1Probe
+from kubernetes.client.models.v1_projected_volume_source import V1ProjectedVolumeSource
+from kubernetes.client.models.v1_quobyte_volume_source import V1QuobyteVolumeSource
+from kubernetes.client.models.v1_rbd_persistent_volume_source import V1RBDPersistentVolumeSource
+from kubernetes.client.models.v1_rbd_volume_source import V1RBDVolumeSource
+from kubernetes.client.models.v1_replica_set import V1ReplicaSet
+from kubernetes.client.models.v1_replica_set_condition import V1ReplicaSetCondition
+from kubernetes.client.models.v1_replica_set_list import V1ReplicaSetList
+from kubernetes.client.models.v1_replica_set_spec import V1ReplicaSetSpec
+from kubernetes.client.models.v1_replica_set_status import V1ReplicaSetStatus
+from kubernetes.client.models.v1_replication_controller import V1ReplicationController
+from kubernetes.client.models.v1_replication_controller_condition import V1ReplicationControllerCondition
+from kubernetes.client.models.v1_replication_controller_list import V1ReplicationControllerList
+from kubernetes.client.models.v1_replication_controller_spec import V1ReplicationControllerSpec
+from kubernetes.client.models.v1_replication_controller_status import V1ReplicationControllerStatus
+from kubernetes.client.models.v1_resource_attributes import V1ResourceAttributes
+from kubernetes.client.models.v1_resource_claim import V1ResourceClaim
+from kubernetes.client.models.v1_resource_field_selector import V1ResourceFieldSelector
+from kubernetes.client.models.v1_resource_quota import V1ResourceQuota
+from kubernetes.client.models.v1_resource_quota_list import V1ResourceQuotaList
+from kubernetes.client.models.v1_resource_quota_spec import V1ResourceQuotaSpec
+from kubernetes.client.models.v1_resource_quota_status import V1ResourceQuotaStatus
+from kubernetes.client.models.v1_resource_requirements import V1ResourceRequirements
+from kubernetes.client.models.v1_resource_rule import V1ResourceRule
+from kubernetes.client.models.v1_role import V1Role
+from kubernetes.client.models.v1_role_binding import V1RoleBinding
+from kubernetes.client.models.v1_role_binding_list import V1RoleBindingList
+from kubernetes.client.models.v1_role_list import V1RoleList
+from kubernetes.client.models.v1_role_ref import V1RoleRef
+from kubernetes.client.models.v1_rolling_update_daemon_set import V1RollingUpdateDaemonSet
+from kubernetes.client.models.v1_rolling_update_deployment import V1RollingUpdateDeployment
+from kubernetes.client.models.v1_rolling_update_stateful_set_strategy import V1RollingUpdateStatefulSetStrategy
+from kubernetes.client.models.v1_rule_with_operations import V1RuleWithOperations
+from kubernetes.client.models.v1_runtime_class import V1RuntimeClass
+from kubernetes.client.models.v1_runtime_class_list import V1RuntimeClassList
+from kubernetes.client.models.v1_se_linux_options import V1SELinuxOptions
+from kubernetes.client.models.v1_scale import V1Scale
+from kubernetes.client.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
+from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
+from kubernetes.client.models.v1_scale_spec import V1ScaleSpec
+from kubernetes.client.models.v1_scale_status import V1ScaleStatus
+from kubernetes.client.models.v1_scheduling import V1Scheduling
+from kubernetes.client.models.v1_scope_selector import V1ScopeSelector
+from kubernetes.client.models.v1_scoped_resource_selector_requirement import V1ScopedResourceSelectorRequirement
+from kubernetes.client.models.v1_seccomp_profile import V1SeccompProfile
+from kubernetes.client.models.v1_secret import V1Secret
+from kubernetes.client.models.v1_secret_env_source import V1SecretEnvSource
+from kubernetes.client.models.v1_secret_key_selector import V1SecretKeySelector
+from kubernetes.client.models.v1_secret_list import V1SecretList
+from kubernetes.client.models.v1_secret_projection import V1SecretProjection
+from kubernetes.client.models.v1_secret_reference import V1SecretReference
+from kubernetes.client.models.v1_secret_volume_source import V1SecretVolumeSource
+from kubernetes.client.models.v1_security_context import V1SecurityContext
+from kubernetes.client.models.v1_self_subject_access_review import V1SelfSubjectAccessReview
+from kubernetes.client.models.v1_self_subject_access_review_spec import V1SelfSubjectAccessReviewSpec
+from kubernetes.client.models.v1_self_subject_review import V1SelfSubjectReview
+from kubernetes.client.models.v1_self_subject_review_status import V1SelfSubjectReviewStatus
+from kubernetes.client.models.v1_self_subject_rules_review import V1SelfSubjectRulesReview
+from kubernetes.client.models.v1_self_subject_rules_review_spec import V1SelfSubjectRulesReviewSpec
+from kubernetes.client.models.v1_server_address_by_client_cidr import V1ServerAddressByClientCIDR
+from kubernetes.client.models.v1_service import V1Service
+from kubernetes.client.models.v1_service_account import V1ServiceAccount
+from kubernetes.client.models.v1_service_account_list import V1ServiceAccountList
+from kubernetes.client.models.v1_service_account_token_projection import V1ServiceAccountTokenProjection
+from kubernetes.client.models.v1_service_backend_port import V1ServiceBackendPort
+from kubernetes.client.models.v1_service_list import V1ServiceList
+from kubernetes.client.models.v1_service_port import V1ServicePort
+from kubernetes.client.models.v1_service_spec import V1ServiceSpec
+from kubernetes.client.models.v1_service_status import V1ServiceStatus
+from kubernetes.client.models.v1_session_affinity_config import V1SessionAffinityConfig
+from kubernetes.client.models.v1_stateful_set import V1StatefulSet
+from kubernetes.client.models.v1_stateful_set_condition import V1StatefulSetCondition
+from kubernetes.client.models.v1_stateful_set_list import V1StatefulSetList
+from kubernetes.client.models.v1_stateful_set_ordinals import V1StatefulSetOrdinals
+from kubernetes.client.models.v1_stateful_set_persistent_volume_claim_retention_policy import V1StatefulSetPersistentVolumeClaimRetentionPolicy
+from kubernetes.client.models.v1_stateful_set_spec import V1StatefulSetSpec
+from kubernetes.client.models.v1_stateful_set_status import V1StatefulSetStatus
+from kubernetes.client.models.v1_stateful_set_update_strategy import V1StatefulSetUpdateStrategy
+from kubernetes.client.models.v1_status import V1Status
+from kubernetes.client.models.v1_status_cause import V1StatusCause
+from kubernetes.client.models.v1_status_details import V1StatusDetails
+from kubernetes.client.models.v1_storage_class import V1StorageClass
+from kubernetes.client.models.v1_storage_class_list import V1StorageClassList
+from kubernetes.client.models.v1_storage_os_persistent_volume_source import V1StorageOSPersistentVolumeSource
+from kubernetes.client.models.v1_storage_os_volume_source import V1StorageOSVolumeSource
+from kubernetes.client.models.v1_subject import V1Subject
+from kubernetes.client.models.v1_subject_access_review import V1SubjectAccessReview
+from kubernetes.client.models.v1_subject_access_review_spec import V1SubjectAccessReviewSpec
+from kubernetes.client.models.v1_subject_access_review_status import V1SubjectAccessReviewStatus
+from kubernetes.client.models.v1_subject_rules_review_status import V1SubjectRulesReviewStatus
+from kubernetes.client.models.v1_sysctl import V1Sysctl
+from kubernetes.client.models.v1_tcp_socket_action import V1TCPSocketAction
+from kubernetes.client.models.v1_taint import V1Taint
+from kubernetes.client.models.v1_token_request_spec import V1TokenRequestSpec
+from kubernetes.client.models.v1_token_request_status import V1TokenRequestStatus
+from kubernetes.client.models.v1_token_review import V1TokenReview
+from kubernetes.client.models.v1_token_review_spec import V1TokenReviewSpec
+from kubernetes.client.models.v1_token_review_status import V1TokenReviewStatus
+from kubernetes.client.models.v1_toleration import V1Toleration
+from kubernetes.client.models.v1_topology_selector_label_requirement import V1TopologySelectorLabelRequirement
+from kubernetes.client.models.v1_topology_selector_term import V1TopologySelectorTerm
+from kubernetes.client.models.v1_topology_spread_constraint import V1TopologySpreadConstraint
+from kubernetes.client.models.v1_typed_local_object_reference import V1TypedLocalObjectReference
+from kubernetes.client.models.v1_typed_object_reference import V1TypedObjectReference
+from kubernetes.client.models.v1_uncounted_terminated_pods import V1UncountedTerminatedPods
+from kubernetes.client.models.v1_user_info import V1UserInfo
+from kubernetes.client.models.v1_validating_webhook import V1ValidatingWebhook
+from kubernetes.client.models.v1_validating_webhook_configuration import V1ValidatingWebhookConfiguration
+from kubernetes.client.models.v1_validating_webhook_configuration_list import V1ValidatingWebhookConfigurationList
+from kubernetes.client.models.v1_validation_rule import V1ValidationRule
+from kubernetes.client.models.v1_volume import V1Volume
+from kubernetes.client.models.v1_volume_attachment import V1VolumeAttachment
+from kubernetes.client.models.v1_volume_attachment_list import V1VolumeAttachmentList
+from kubernetes.client.models.v1_volume_attachment_source import V1VolumeAttachmentSource
+from kubernetes.client.models.v1_volume_attachment_spec import V1VolumeAttachmentSpec
+from kubernetes.client.models.v1_volume_attachment_status import V1VolumeAttachmentStatus
+from kubernetes.client.models.v1_volume_device import V1VolumeDevice
+from kubernetes.client.models.v1_volume_error import V1VolumeError
+from kubernetes.client.models.v1_volume_mount import V1VolumeMount
+from kubernetes.client.models.v1_volume_node_affinity import V1VolumeNodeAffinity
+from kubernetes.client.models.v1_volume_node_resources import V1VolumeNodeResources
+from kubernetes.client.models.v1_volume_projection import V1VolumeProjection
+from kubernetes.client.models.v1_vsphere_virtual_disk_volume_source import V1VsphereVirtualDiskVolumeSource
+from kubernetes.client.models.v1_watch_event import V1WatchEvent
+from kubernetes.client.models.v1_webhook_conversion import V1WebhookConversion
+from kubernetes.client.models.v1_weighted_pod_affinity_term import V1WeightedPodAffinityTerm
+from kubernetes.client.models.v1_windows_security_context_options import V1WindowsSecurityContextOptions
+from kubernetes.client.models.v1alpha1_audit_annotation import V1alpha1AuditAnnotation
+from kubernetes.client.models.v1alpha1_cluster_cidr import V1alpha1ClusterCIDR
+from kubernetes.client.models.v1alpha1_cluster_cidr_list import V1alpha1ClusterCIDRList
+from kubernetes.client.models.v1alpha1_cluster_cidr_spec import V1alpha1ClusterCIDRSpec
+from kubernetes.client.models.v1alpha1_cluster_trust_bundle import V1alpha1ClusterTrustBundle
+from kubernetes.client.models.v1alpha1_cluster_trust_bundle_list import V1alpha1ClusterTrustBundleList
+from kubernetes.client.models.v1alpha1_cluster_trust_bundle_spec import V1alpha1ClusterTrustBundleSpec
+from kubernetes.client.models.v1alpha1_expression_warning import V1alpha1ExpressionWarning
+from kubernetes.client.models.v1alpha1_ip_address import V1alpha1IPAddress
+from kubernetes.client.models.v1alpha1_ip_address_list import V1alpha1IPAddressList
+from kubernetes.client.models.v1alpha1_ip_address_spec import V1alpha1IPAddressSpec
+from kubernetes.client.models.v1alpha1_match_condition import V1alpha1MatchCondition
+from kubernetes.client.models.v1alpha1_match_resources import V1alpha1MatchResources
+from kubernetes.client.models.v1alpha1_named_rule_with_operations import V1alpha1NamedRuleWithOperations
+from kubernetes.client.models.v1alpha1_param_kind import V1alpha1ParamKind
+from kubernetes.client.models.v1alpha1_param_ref import V1alpha1ParamRef
+from kubernetes.client.models.v1alpha1_parent_reference import V1alpha1ParentReference
+from kubernetes.client.models.v1alpha1_self_subject_review import V1alpha1SelfSubjectReview
+from kubernetes.client.models.v1alpha1_self_subject_review_status import V1alpha1SelfSubjectReviewStatus
+from kubernetes.client.models.v1alpha1_server_storage_version import V1alpha1ServerStorageVersion
+from kubernetes.client.models.v1alpha1_storage_version import V1alpha1StorageVersion
+from kubernetes.client.models.v1alpha1_storage_version_condition import V1alpha1StorageVersionCondition
+from kubernetes.client.models.v1alpha1_storage_version_list import V1alpha1StorageVersionList
+from kubernetes.client.models.v1alpha1_storage_version_status import V1alpha1StorageVersionStatus
+from kubernetes.client.models.v1alpha1_type_checking import V1alpha1TypeChecking
+from kubernetes.client.models.v1alpha1_validating_admission_policy import V1alpha1ValidatingAdmissionPolicy
+from kubernetes.client.models.v1alpha1_validating_admission_policy_binding import V1alpha1ValidatingAdmissionPolicyBinding
+from kubernetes.client.models.v1alpha1_validating_admission_policy_binding_list import V1alpha1ValidatingAdmissionPolicyBindingList
+from kubernetes.client.models.v1alpha1_validating_admission_policy_binding_spec import V1alpha1ValidatingAdmissionPolicyBindingSpec
+from kubernetes.client.models.v1alpha1_validating_admission_policy_list import V1alpha1ValidatingAdmissionPolicyList
+from kubernetes.client.models.v1alpha1_validating_admission_policy_spec import V1alpha1ValidatingAdmissionPolicySpec
+from kubernetes.client.models.v1alpha1_validating_admission_policy_status import V1alpha1ValidatingAdmissionPolicyStatus
+from kubernetes.client.models.v1alpha1_validation import V1alpha1Validation
+from kubernetes.client.models.v1alpha1_variable import V1alpha1Variable
+from kubernetes.client.models.v1alpha2_allocation_result import V1alpha2AllocationResult
+from kubernetes.client.models.v1alpha2_pod_scheduling_context import V1alpha2PodSchedulingContext
+from kubernetes.client.models.v1alpha2_pod_scheduling_context_list import V1alpha2PodSchedulingContextList
+from kubernetes.client.models.v1alpha2_pod_scheduling_context_spec import V1alpha2PodSchedulingContextSpec
+from kubernetes.client.models.v1alpha2_pod_scheduling_context_status import V1alpha2PodSchedulingContextStatus
+from kubernetes.client.models.v1alpha2_resource_claim import V1alpha2ResourceClaim
+from kubernetes.client.models.v1alpha2_resource_claim_consumer_reference import V1alpha2ResourceClaimConsumerReference
+from kubernetes.client.models.v1alpha2_resource_claim_list import V1alpha2ResourceClaimList
+from kubernetes.client.models.v1alpha2_resource_claim_parameters_reference import V1alpha2ResourceClaimParametersReference
+from kubernetes.client.models.v1alpha2_resource_claim_scheduling_status import V1alpha2ResourceClaimSchedulingStatus
+from kubernetes.client.models.v1alpha2_resource_claim_spec import V1alpha2ResourceClaimSpec
+from kubernetes.client.models.v1alpha2_resource_claim_status import V1alpha2ResourceClaimStatus
+from kubernetes.client.models.v1alpha2_resource_claim_template import V1alpha2ResourceClaimTemplate
+from kubernetes.client.models.v1alpha2_resource_claim_template_list import V1alpha2ResourceClaimTemplateList
+from kubernetes.client.models.v1alpha2_resource_claim_template_spec import V1alpha2ResourceClaimTemplateSpec
+from kubernetes.client.models.v1alpha2_resource_class import V1alpha2ResourceClass
+from kubernetes.client.models.v1alpha2_resource_class_list import V1alpha2ResourceClassList
+from kubernetes.client.models.v1alpha2_resource_class_parameters_reference import V1alpha2ResourceClassParametersReference
+from kubernetes.client.models.v1alpha2_resource_handle import V1alpha2ResourceHandle
+from kubernetes.client.models.v1beta1_audit_annotation import V1beta1AuditAnnotation
+from kubernetes.client.models.v1beta1_expression_warning import V1beta1ExpressionWarning
+from kubernetes.client.models.v1beta1_match_condition import V1beta1MatchCondition
+from kubernetes.client.models.v1beta1_match_resources import V1beta1MatchResources
+from kubernetes.client.models.v1beta1_named_rule_with_operations import V1beta1NamedRuleWithOperations
+from kubernetes.client.models.v1beta1_param_kind import V1beta1ParamKind
+from kubernetes.client.models.v1beta1_param_ref import V1beta1ParamRef
+from kubernetes.client.models.v1beta1_self_subject_review import V1beta1SelfSubjectReview
+from kubernetes.client.models.v1beta1_self_subject_review_status import V1beta1SelfSubjectReviewStatus
+from kubernetes.client.models.v1beta1_type_checking import V1beta1TypeChecking
+from kubernetes.client.models.v1beta1_validating_admission_policy import V1beta1ValidatingAdmissionPolicy
+from kubernetes.client.models.v1beta1_validating_admission_policy_binding import V1beta1ValidatingAdmissionPolicyBinding
+from kubernetes.client.models.v1beta1_validating_admission_policy_binding_list import V1beta1ValidatingAdmissionPolicyBindingList
+from kubernetes.client.models.v1beta1_validating_admission_policy_binding_spec import V1beta1ValidatingAdmissionPolicyBindingSpec
+from kubernetes.client.models.v1beta1_validating_admission_policy_list import V1beta1ValidatingAdmissionPolicyList
+from kubernetes.client.models.v1beta1_validating_admission_policy_spec import V1beta1ValidatingAdmissionPolicySpec
+from kubernetes.client.models.v1beta1_validating_admission_policy_status import V1beta1ValidatingAdmissionPolicyStatus
+from kubernetes.client.models.v1beta1_validation import V1beta1Validation
+from kubernetes.client.models.v1beta1_variable import V1beta1Variable
+from kubernetes.client.models.v1beta2_exempt_priority_level_configuration import V1beta2ExemptPriorityLevelConfiguration
+from kubernetes.client.models.v1beta2_flow_distinguisher_method import V1beta2FlowDistinguisherMethod
+from kubernetes.client.models.v1beta2_flow_schema import V1beta2FlowSchema
+from kubernetes.client.models.v1beta2_flow_schema_condition import V1beta2FlowSchemaCondition
+from kubernetes.client.models.v1beta2_flow_schema_list import V1beta2FlowSchemaList
+from kubernetes.client.models.v1beta2_flow_schema_spec import V1beta2FlowSchemaSpec
+from kubernetes.client.models.v1beta2_flow_schema_status import V1beta2FlowSchemaStatus
+from kubernetes.client.models.v1beta2_group_subject import V1beta2GroupSubject
+from kubernetes.client.models.v1beta2_limit_response import V1beta2LimitResponse
+from kubernetes.client.models.v1beta2_limited_priority_level_configuration import V1beta2LimitedPriorityLevelConfiguration
+from kubernetes.client.models.v1beta2_non_resource_policy_rule import V1beta2NonResourcePolicyRule
+from kubernetes.client.models.v1beta2_policy_rules_with_subjects import V1beta2PolicyRulesWithSubjects
+from kubernetes.client.models.v1beta2_priority_level_configuration import V1beta2PriorityLevelConfiguration
+from kubernetes.client.models.v1beta2_priority_level_configuration_condition import V1beta2PriorityLevelConfigurationCondition
+from kubernetes.client.models.v1beta2_priority_level_configuration_list import V1beta2PriorityLevelConfigurationList
+from kubernetes.client.models.v1beta2_priority_level_configuration_reference import V1beta2PriorityLevelConfigurationReference
+from kubernetes.client.models.v1beta2_priority_level_configuration_spec import V1beta2PriorityLevelConfigurationSpec
+from kubernetes.client.models.v1beta2_priority_level_configuration_status import V1beta2PriorityLevelConfigurationStatus
+from kubernetes.client.models.v1beta2_queuing_configuration import V1beta2QueuingConfiguration
+from kubernetes.client.models.v1beta2_resource_policy_rule import V1beta2ResourcePolicyRule
+from kubernetes.client.models.v1beta2_service_account_subject import V1beta2ServiceAccountSubject
+from kubernetes.client.models.v1beta2_subject import V1beta2Subject
+from kubernetes.client.models.v1beta2_user_subject import V1beta2UserSubject
+from kubernetes.client.models.v1beta3_exempt_priority_level_configuration import V1beta3ExemptPriorityLevelConfiguration
+from kubernetes.client.models.v1beta3_flow_distinguisher_method import V1beta3FlowDistinguisherMethod
+from kubernetes.client.models.v1beta3_flow_schema import V1beta3FlowSchema
+from kubernetes.client.models.v1beta3_flow_schema_condition import V1beta3FlowSchemaCondition
+from kubernetes.client.models.v1beta3_flow_schema_list import V1beta3FlowSchemaList
+from kubernetes.client.models.v1beta3_flow_schema_spec import V1beta3FlowSchemaSpec
+from kubernetes.client.models.v1beta3_flow_schema_status import V1beta3FlowSchemaStatus
+from kubernetes.client.models.v1beta3_group_subject import V1beta3GroupSubject
+from kubernetes.client.models.v1beta3_limit_response import V1beta3LimitResponse
+from kubernetes.client.models.v1beta3_limited_priority_level_configuration import V1beta3LimitedPriorityLevelConfiguration
+from kubernetes.client.models.v1beta3_non_resource_policy_rule import V1beta3NonResourcePolicyRule
+from kubernetes.client.models.v1beta3_policy_rules_with_subjects import V1beta3PolicyRulesWithSubjects
+from kubernetes.client.models.v1beta3_priority_level_configuration import V1beta3PriorityLevelConfiguration
+from kubernetes.client.models.v1beta3_priority_level_configuration_condition import V1beta3PriorityLevelConfigurationCondition
+from kubernetes.client.models.v1beta3_priority_level_configuration_list import V1beta3PriorityLevelConfigurationList
+from kubernetes.client.models.v1beta3_priority_level_configuration_reference import V1beta3PriorityLevelConfigurationReference
+from kubernetes.client.models.v1beta3_priority_level_configuration_spec import V1beta3PriorityLevelConfigurationSpec
+from kubernetes.client.models.v1beta3_priority_level_configuration_status import V1beta3PriorityLevelConfigurationStatus
+from kubernetes.client.models.v1beta3_queuing_configuration import V1beta3QueuingConfiguration
+from kubernetes.client.models.v1beta3_resource_policy_rule import V1beta3ResourcePolicyRule
+from kubernetes.client.models.v1beta3_service_account_subject import V1beta3ServiceAccountSubject
+from kubernetes.client.models.v1beta3_subject import V1beta3Subject
+from kubernetes.client.models.v1beta3_user_subject import V1beta3UserSubject
+from kubernetes.client.models.v2_container_resource_metric_source import V2ContainerResourceMetricSource
+from kubernetes.client.models.v2_container_resource_metric_status import V2ContainerResourceMetricStatus
+from kubernetes.client.models.v2_cross_version_object_reference import V2CrossVersionObjectReference
+from kubernetes.client.models.v2_external_metric_source import V2ExternalMetricSource
+from kubernetes.client.models.v2_external_metric_status import V2ExternalMetricStatus
+from kubernetes.client.models.v2_hpa_scaling_policy import V2HPAScalingPolicy
+from kubernetes.client.models.v2_hpa_scaling_rules import V2HPAScalingRules
+from kubernetes.client.models.v2_horizontal_pod_autoscaler import V2HorizontalPodAutoscaler
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_behavior import V2HorizontalPodAutoscalerBehavior
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_condition import V2HorizontalPodAutoscalerCondition
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_list import V2HorizontalPodAutoscalerList
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_spec import V2HorizontalPodAutoscalerSpec
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_status import V2HorizontalPodAutoscalerStatus
+from kubernetes.client.models.v2_metric_identifier import V2MetricIdentifier
+from kubernetes.client.models.v2_metric_spec import V2MetricSpec
+from kubernetes.client.models.v2_metric_status import V2MetricStatus
+from kubernetes.client.models.v2_metric_target import V2MetricTarget
+from kubernetes.client.models.v2_metric_value_status import V2MetricValueStatus
+from kubernetes.client.models.v2_object_metric_source import V2ObjectMetricSource
+from kubernetes.client.models.v2_object_metric_status import V2ObjectMetricStatus
+from kubernetes.client.models.v2_pods_metric_source import V2PodsMetricSource
+from kubernetes.client.models.v2_pods_metric_status import V2PodsMetricStatus
+from kubernetes.client.models.v2_resource_metric_source import V2ResourceMetricSource
+from kubernetes.client.models.v2_resource_metric_status import V2ResourceMetricStatus
+from kubernetes.client.models.version_info import VersionInfo
+
diff --git a/contrib/python/kubernetes/kubernetes/client/api/__init__.py b/contrib/python/kubernetes/kubernetes/client/api/__init__.py
new file mode 100644
index 0000000000..88756fb030
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/__init__.py
@@ -0,0 +1,63 @@
+from __future__ import absolute_import
+
+# flake8: noqa
+
+# import apis into api package
+from kubernetes.client.api.well_known_api import WellKnownApi
+from kubernetes.client.api.admissionregistration_api import AdmissionregistrationApi
+from kubernetes.client.api.admissionregistration_v1_api import AdmissionregistrationV1Api
+from kubernetes.client.api.admissionregistration_v1alpha1_api import AdmissionregistrationV1alpha1Api
+from kubernetes.client.api.admissionregistration_v1beta1_api import AdmissionregistrationV1beta1Api
+from kubernetes.client.api.apiextensions_api import ApiextensionsApi
+from kubernetes.client.api.apiextensions_v1_api import ApiextensionsV1Api
+from kubernetes.client.api.apiregistration_api import ApiregistrationApi
+from kubernetes.client.api.apiregistration_v1_api import ApiregistrationV1Api
+from kubernetes.client.api.apis_api import ApisApi
+from kubernetes.client.api.apps_api import AppsApi
+from kubernetes.client.api.apps_v1_api import AppsV1Api
+from kubernetes.client.api.authentication_api import AuthenticationApi
+from kubernetes.client.api.authentication_v1_api import AuthenticationV1Api
+from kubernetes.client.api.authentication_v1alpha1_api import AuthenticationV1alpha1Api
+from kubernetes.client.api.authentication_v1beta1_api import AuthenticationV1beta1Api
+from kubernetes.client.api.authorization_api import AuthorizationApi
+from kubernetes.client.api.authorization_v1_api import AuthorizationV1Api
+from kubernetes.client.api.autoscaling_api import AutoscalingApi
+from kubernetes.client.api.autoscaling_v1_api import AutoscalingV1Api
+from kubernetes.client.api.autoscaling_v2_api import AutoscalingV2Api
+from kubernetes.client.api.batch_api import BatchApi
+from kubernetes.client.api.batch_v1_api import BatchV1Api
+from kubernetes.client.api.certificates_api import CertificatesApi
+from kubernetes.client.api.certificates_v1_api import CertificatesV1Api
+from kubernetes.client.api.certificates_v1alpha1_api import CertificatesV1alpha1Api
+from kubernetes.client.api.coordination_api import CoordinationApi
+from kubernetes.client.api.coordination_v1_api import CoordinationV1Api
+from kubernetes.client.api.core_api import CoreApi
+from kubernetes.client.api.core_v1_api import CoreV1Api
+from kubernetes.client.api.custom_objects_api import CustomObjectsApi
+from kubernetes.client.api.discovery_api import DiscoveryApi
+from kubernetes.client.api.discovery_v1_api import DiscoveryV1Api
+from kubernetes.client.api.events_api import EventsApi
+from kubernetes.client.api.events_v1_api import EventsV1Api
+from kubernetes.client.api.flowcontrol_apiserver_api import FlowcontrolApiserverApi
+from kubernetes.client.api.flowcontrol_apiserver_v1beta2_api import FlowcontrolApiserverV1beta2Api
+from kubernetes.client.api.flowcontrol_apiserver_v1beta3_api import FlowcontrolApiserverV1beta3Api
+from kubernetes.client.api.internal_apiserver_api import InternalApiserverApi
+from kubernetes.client.api.internal_apiserver_v1alpha1_api import InternalApiserverV1alpha1Api
+from kubernetes.client.api.logs_api import LogsApi
+from kubernetes.client.api.networking_api import NetworkingApi
+from kubernetes.client.api.networking_v1_api import NetworkingV1Api
+from kubernetes.client.api.networking_v1alpha1_api import NetworkingV1alpha1Api
+from kubernetes.client.api.node_api import NodeApi
+from kubernetes.client.api.node_v1_api import NodeV1Api
+from kubernetes.client.api.openid_api import OpenidApi
+from kubernetes.client.api.policy_api import PolicyApi
+from kubernetes.client.api.policy_v1_api import PolicyV1Api
+from kubernetes.client.api.rbac_authorization_api import RbacAuthorizationApi
+from kubernetes.client.api.rbac_authorization_v1_api import RbacAuthorizationV1Api
+from kubernetes.client.api.resource_api import ResourceApi
+from kubernetes.client.api.resource_v1alpha2_api import ResourceV1alpha2Api
+from kubernetes.client.api.scheduling_api import SchedulingApi
+from kubernetes.client.api.scheduling_v1_api import SchedulingV1Api
+from kubernetes.client.api.storage_api import StorageApi
+from kubernetes.client.api.storage_v1_api import StorageV1Api
+from kubernetes.client.api.version_api import VersionApi
diff --git a/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_api.py b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_api.py
new file mode 100644
index 0000000000..f9636fa98c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AdmissionregistrationApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1_api.py
new file mode 100644
index 0000000000..37e2b287cc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1_api.py
@@ -0,0 +1,2196 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AdmissionregistrationV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_mutating_webhook_configuration(self, body, **kwargs): # noqa: E501
+ """create_mutating_webhook_configuration # noqa: E501
+
+ create a MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_mutating_webhook_configuration(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1MutatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1MutatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_mutating_webhook_configuration_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_mutating_webhook_configuration_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_mutating_webhook_configuration # noqa: E501
+
+ create a MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_mutating_webhook_configuration_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1MutatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_mutating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_mutating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1MutatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_validating_webhook_configuration(self, body, **kwargs): # noqa: E501
+ """create_validating_webhook_configuration # noqa: E501
+
+ create a ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_webhook_configuration(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1ValidatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ValidatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_validating_webhook_configuration_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_validating_webhook_configuration_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_validating_webhook_configuration # noqa: E501
+
+ create a ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_webhook_configuration_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1ValidatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_validating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_validating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ValidatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_mutating_webhook_configuration(self, **kwargs): # noqa: E501
+ """delete_collection_mutating_webhook_configuration # noqa: E501
+
+ delete collection of MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_mutating_webhook_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_mutating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_mutating_webhook_configuration # noqa: E501
+
+ delete collection of MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_mutating_webhook_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_mutating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_validating_webhook_configuration(self, **kwargs): # noqa: E501
+ """delete_collection_validating_webhook_configuration # noqa: E501
+
+ delete collection of ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_webhook_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_validating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_validating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_validating_webhook_configuration # noqa: E501
+
+ delete collection of ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_webhook_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_validating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501
+ """delete_mutating_webhook_configuration # noqa: E501
+
+ delete a MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_mutating_webhook_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_mutating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_mutating_webhook_configuration # noqa: E501
+
+ delete a MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_mutating_webhook_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_mutating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_mutating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_validating_webhook_configuration(self, name, **kwargs): # noqa: E501
+ """delete_validating_webhook_configuration # noqa: E501
+
+ delete a ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_webhook_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_validating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_validating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_validating_webhook_configuration # noqa: E501
+
+ delete a ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_webhook_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_validating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_mutating_webhook_configuration(self, **kwargs): # noqa: E501
+ """list_mutating_webhook_configuration # noqa: E501
+
+ list or watch objects of kind MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_mutating_webhook_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1MutatingWebhookConfigurationList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def list_mutating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """list_mutating_webhook_configuration # noqa: E501
+
+ list or watch objects of kind MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_mutating_webhook_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1MutatingWebhookConfigurationList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_mutating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1MutatingWebhookConfigurationList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_validating_webhook_configuration(self, **kwargs): # noqa: E501
+ """list_validating_webhook_configuration # noqa: E501
+
+ list or watch objects of kind ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_webhook_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ValidatingWebhookConfigurationList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_validating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def list_validating_webhook_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """list_validating_webhook_configuration # noqa: E501
+
+ list or watch objects of kind ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_webhook_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ValidatingWebhookConfigurationList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_validating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ValidatingWebhookConfigurationList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_mutating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
+ """patch_mutating_webhook_configuration # noqa: E501
+
+ partially update the specified MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_mutating_webhook_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1MutatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_mutating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_mutating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_mutating_webhook_configuration # noqa: E501
+
+ partially update the specified MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_mutating_webhook_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_mutating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_mutating_webhook_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_mutating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1MutatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_validating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_webhook_configuration # noqa: E501
+
+ partially update the specified ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_webhook_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ValidatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_validating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_validating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_webhook_configuration # noqa: E501
+
+ partially update the specified ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_webhook_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_validating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_webhook_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ValidatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501
+ """read_mutating_webhook_configuration # noqa: E501
+
+ read the specified MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1MutatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_mutating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_mutating_webhook_configuration # noqa: E501
+
+ read the specified MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_mutating_webhook_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_mutating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_mutating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1MutatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_validating_webhook_configuration(self, name, **kwargs): # noqa: E501
+ """read_validating_webhook_configuration # noqa: E501
+
+ read the specified ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_webhook_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ValidatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_validating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_validating_webhook_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_validating_webhook_configuration # noqa: E501
+
+ read the specified ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_webhook_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_validating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_validating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ValidatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_mutating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
+ """replace_mutating_webhook_configuration # noqa: E501
+
+ replace the specified MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_mutating_webhook_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param V1MutatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1MutatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_mutating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_mutating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_mutating_webhook_configuration # noqa: E501
+
+ replace the specified MutatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_mutating_webhook_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the MutatingWebhookConfiguration (required)
+ :param V1MutatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1MutatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_mutating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_mutating_webhook_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_mutating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1MutatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_validating_webhook_configuration(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_webhook_configuration # noqa: E501
+
+ replace the specified ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_webhook_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param V1ValidatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ValidatingWebhookConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_validating_webhook_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_validating_webhook_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_webhook_configuration # noqa: E501
+
+ replace the specified ValidatingWebhookConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_webhook_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingWebhookConfiguration (required)
+ :param V1ValidatingWebhookConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ValidatingWebhookConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_validating_webhook_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_webhook_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_webhook_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ValidatingWebhookConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1alpha1_api.py b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1alpha1_api.py
new file mode 100644
index 0000000000..bc1fa7cc29
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1alpha1_api.py
@@ -0,0 +1,2610 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AdmissionregistrationV1alpha1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_validating_admission_policy(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy # noqa: E501
+
+ create a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_validating_admission_policy_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_validating_admission_policy_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy # noqa: E501
+
+ create a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_validating_admission_policy_binding(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy_binding # noqa: E501
+
+ create a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy_binding(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_validating_admission_policy_binding_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_validating_admission_policy_binding_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy_binding # noqa: E501
+
+ create a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy_binding_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicybindings', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_validating_admission_policy(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_validating_admission_policy_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_validating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_validating_admission_policy_binding(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy_binding # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy_binding(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_validating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_validating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy_binding # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy_binding_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicybindings', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_validating_admission_policy(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy # noqa: E501
+
+ delete a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_validating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_validating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy # noqa: E501
+
+ delete a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_validating_admission_policy_binding(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy_binding # noqa: E501
+
+ delete a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy_binding(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_validating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_validating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy_binding # noqa: E501
+
+ delete a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy_binding_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicybindings/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_validating_admission_policy(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicyList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_validating_admission_policy_with_http_info(**kwargs) # noqa: E501
+
+ def list_validating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicyList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicyList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_validating_admission_policy_binding(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy_binding # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy_binding(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicyBindingList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_validating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
+
+ def list_validating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy_binding # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy_binding_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicyBindingList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicybindings', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicyBindingList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_validating_admission_policy(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_validating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_validating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_validating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_binding # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_binding(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_validating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_validating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_binding # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_binding_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicybindings/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_validating_admission_policy_status(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_status # noqa: E501
+
+ partially update status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_validating_admission_policy_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_validating_admission_policy_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_status # noqa: E501
+
+ partially update status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_validating_admission_policy_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_validating_admission_policy(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy # noqa: E501
+
+ read the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_validating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_validating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy # noqa: E501
+
+ read the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_validating_admission_policy_binding(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_binding # noqa: E501
+
+ read the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_binding(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_validating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_validating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_binding # noqa: E501
+
+ read the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_binding_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicybindings/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_validating_admission_policy_status(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_status # noqa: E501
+
+ read status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_validating_admission_policy_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_validating_admission_policy_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_status # noqa: E501
+
+ read status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_validating_admission_policy_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_validating_admission_policy(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1alpha1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_validating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_validating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1alpha1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_validating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_binding # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_binding(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param V1alpha1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_validating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_validating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_binding # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_binding_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param V1alpha1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicybindings/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_validating_admission_policy_status(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_status # noqa: E501
+
+ replace status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1alpha1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_validating_admission_policy_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_validating_admission_policy_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_status # noqa: E501
+
+ replace status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1alpha1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_validating_admission_policy_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1alpha1/validatingadmissionpolicies/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1beta1_api.py b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1beta1_api.py
new file mode 100644
index 0000000000..669f88deff
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/admissionregistration_v1beta1_api.py
@@ -0,0 +1,2610 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AdmissionregistrationV1beta1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_validating_admission_policy(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy # noqa: E501
+
+ create a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_validating_admission_policy_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_validating_admission_policy_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy # noqa: E501
+
+ create a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_validating_admission_policy_binding(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy_binding # noqa: E501
+
+ create a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy_binding(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_validating_admission_policy_binding_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_validating_admission_policy_binding_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_validating_admission_policy_binding # noqa: E501
+
+ create a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_validating_admission_policy_binding_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_validating_admission_policy(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_validating_admission_policy_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_validating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_validating_admission_policy_binding(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy_binding # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy_binding(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_validating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_validating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_validating_admission_policy_binding # noqa: E501
+
+ delete collection of ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_validating_admission_policy_binding_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_validating_admission_policy(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy # noqa: E501
+
+ delete a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_validating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_validating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy # noqa: E501
+
+ delete a ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_validating_admission_policy_binding(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy_binding # noqa: E501
+
+ delete a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy_binding(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_validating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_validating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_validating_admission_policy_binding # noqa: E501
+
+ delete a ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_validating_admission_policy_binding_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_validating_admission_policy(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicyList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_validating_admission_policy_with_http_info(**kwargs) # noqa: E501
+
+ def list_validating_admission_policy_with_http_info(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicyList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicyList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_validating_admission_policy_binding(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy_binding # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy_binding(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicyBindingList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_validating_admission_policy_binding_with_http_info(**kwargs) # noqa: E501
+
+ def list_validating_admission_policy_binding_with_http_info(self, **kwargs): # noqa: E501
+ """list_validating_admission_policy_binding # noqa: E501
+
+ list or watch objects of kind ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_validating_admission_policy_binding_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicyBindingList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicyBindingList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_validating_admission_policy(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_validating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_validating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_validating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_binding # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_binding(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_validating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_validating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_binding # noqa: E501
+
+ partially update the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_binding_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_validating_admission_policy_status(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_status # noqa: E501
+
+ partially update status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_validating_admission_policy_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_validating_admission_policy_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_validating_admission_policy_status # noqa: E501
+
+ partially update status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_validating_admission_policy_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_validating_admission_policy_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_validating_admission_policy_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_validating_admission_policy_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_validating_admission_policy(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy # noqa: E501
+
+ read the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_validating_admission_policy_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_validating_admission_policy_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy # noqa: E501
+
+ read the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_validating_admission_policy_binding(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_binding # noqa: E501
+
+ read the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_binding(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_validating_admission_policy_binding_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_validating_admission_policy_binding_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_binding # noqa: E501
+
+ read the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_binding_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_validating_admission_policy_status(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_status # noqa: E501
+
+ read status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_validating_admission_policy_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_validating_admission_policy_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_validating_admission_policy_status # noqa: E501
+
+ read status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_validating_admission_policy_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_validating_admission_policy_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_validating_admission_policy_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_validating_admission_policy(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1beta1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_validating_admission_policy_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_validating_admission_policy_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1beta1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_validating_admission_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_validating_admission_policy_binding(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_binding # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_binding(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param V1beta1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicyBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_validating_admission_policy_binding_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_validating_admission_policy_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_binding # noqa: E501
+
+ replace the specified ValidatingAdmissionPolicyBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_binding_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicyBinding (required)
+ :param V1beta1ValidatingAdmissionPolicyBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicyBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_validating_admission_policy_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicyBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_validating_admission_policy_status(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_status # noqa: E501
+
+ replace status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1beta1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1ValidatingAdmissionPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_validating_admission_policy_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_validating_admission_policy_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_validating_admission_policy_status # noqa: E501
+
+ replace status of the specified ValidatingAdmissionPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_validating_admission_policy_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ValidatingAdmissionPolicy (required)
+ :param V1beta1ValidatingAdmissionPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1ValidatingAdmissionPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_validating_admission_policy_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_validating_admission_policy_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_validating_admission_policy_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1ValidatingAdmissionPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/apiextensions_api.py b/contrib/python/kubernetes/kubernetes/client/api/apiextensions_api.py
new file mode 100644
index 0000000000..3e05ff7389
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/apiextensions_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ApiextensionsApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/apiextensions_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/apiextensions_v1_api.py
new file mode 100644
index 0000000000..6483f0c925
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/apiextensions_v1_api.py
@@ -0,0 +1,1583 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ApiextensionsV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_custom_resource_definition(self, body, **kwargs): # noqa: E501
+ """create_custom_resource_definition # noqa: E501
+
+ create a CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_custom_resource_definition(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CustomResourceDefinition body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinition
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_custom_resource_definition_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_custom_resource_definition_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_custom_resource_definition # noqa: E501
+
+ create a CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_custom_resource_definition_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CustomResourceDefinition body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_custom_resource_definition" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_custom_resource_definition`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinition', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_custom_resource_definition(self, **kwargs): # noqa: E501
+ """delete_collection_custom_resource_definition # noqa: E501
+
+ delete collection of CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_custom_resource_definition(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_custom_resource_definition_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_custom_resource_definition_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_custom_resource_definition # noqa: E501
+
+ delete collection of CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_custom_resource_definition_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_custom_resource_definition" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_custom_resource_definition(self, name, **kwargs): # noqa: E501
+ """delete_custom_resource_definition # noqa: E501
+
+ delete a CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_custom_resource_definition(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_custom_resource_definition_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_custom_resource_definition_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_custom_resource_definition # noqa: E501
+
+ delete a CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_custom_resource_definition_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_custom_resource_definition" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_custom_resource_definition`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_custom_resource_definition(self, **kwargs): # noqa: E501
+ """list_custom_resource_definition # noqa: E501
+
+ list or watch objects of kind CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_custom_resource_definition(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinitionList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_custom_resource_definition_with_http_info(**kwargs) # noqa: E501
+
+ def list_custom_resource_definition_with_http_info(self, **kwargs): # noqa: E501
+ """list_custom_resource_definition # noqa: E501
+
+ list or watch objects of kind CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_custom_resource_definition_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinitionList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_custom_resource_definition" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinitionList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_custom_resource_definition(self, name, body, **kwargs): # noqa: E501
+ """patch_custom_resource_definition # noqa: E501
+
+ partially update the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_custom_resource_definition(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinition
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_custom_resource_definition_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_custom_resource_definition_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_custom_resource_definition # noqa: E501
+
+ partially update the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_custom_resource_definition_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_custom_resource_definition" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_custom_resource_definition`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_custom_resource_definition`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinition', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_custom_resource_definition_status(self, name, body, **kwargs): # noqa: E501
+ """patch_custom_resource_definition_status # noqa: E501
+
+ partially update status of the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_custom_resource_definition_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinition
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_custom_resource_definition_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_custom_resource_definition_status # noqa: E501
+
+ partially update status of the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_custom_resource_definition_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_custom_resource_definition_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_custom_resource_definition_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_custom_resource_definition_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinition', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_custom_resource_definition(self, name, **kwargs): # noqa: E501
+ """read_custom_resource_definition # noqa: E501
+
+ read the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_custom_resource_definition(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinition
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_custom_resource_definition_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_custom_resource_definition_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_custom_resource_definition # noqa: E501
+
+ read the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_custom_resource_definition_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_custom_resource_definition" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_custom_resource_definition`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinition', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_custom_resource_definition_status(self, name, **kwargs): # noqa: E501
+ """read_custom_resource_definition_status # noqa: E501
+
+ read status of the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_custom_resource_definition_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinition
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_custom_resource_definition_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_custom_resource_definition_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_custom_resource_definition_status # noqa: E501
+
+ read status of the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_custom_resource_definition_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_custom_resource_definition_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_custom_resource_definition_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinition', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_custom_resource_definition(self, name, body, **kwargs): # noqa: E501
+ """replace_custom_resource_definition # noqa: E501
+
+ replace the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_custom_resource_definition(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param V1CustomResourceDefinition body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinition
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_custom_resource_definition_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_custom_resource_definition_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_custom_resource_definition # noqa: E501
+
+ replace the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_custom_resource_definition_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param V1CustomResourceDefinition body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_custom_resource_definition" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_custom_resource_definition`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_custom_resource_definition`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinition', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_custom_resource_definition_status(self, name, body, **kwargs): # noqa: E501
+ """replace_custom_resource_definition_status # noqa: E501
+
+ replace status of the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_custom_resource_definition_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param V1CustomResourceDefinition body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CustomResourceDefinition
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_custom_resource_definition_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_custom_resource_definition_status # noqa: E501
+
+ replace status of the specified CustomResourceDefinition # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_custom_resource_definition_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CustomResourceDefinition (required)
+ :param V1CustomResourceDefinition body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CustomResourceDefinition, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_custom_resource_definition_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_custom_resource_definition_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_custom_resource_definition_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CustomResourceDefinition', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/apiregistration_api.py b/contrib/python/kubernetes/kubernetes/client/api/apiregistration_api.py
new file mode 100644
index 0000000000..6dd73f5830
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/apiregistration_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ApiregistrationApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/apiregistration_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/apiregistration_v1_api.py
new file mode 100644
index 0000000000..cac5d276a3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/apiregistration_v1_api.py
@@ -0,0 +1,1583 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ApiregistrationV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_api_service(self, body, **kwargs): # noqa: E501
+ """create_api_service # noqa: E501
+
+ create an APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_api_service(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1APIService body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIService
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_api_service_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_api_service_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_api_service # noqa: E501
+
+ create an APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_api_service_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1APIService body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_api_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_api_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIService', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_api_service(self, name, **kwargs): # noqa: E501
+ """delete_api_service # noqa: E501
+
+ delete an APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_api_service(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_api_service_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_api_service_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_api_service # noqa: E501
+
+ delete an APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_api_service_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_api_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_api_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_api_service(self, **kwargs): # noqa: E501
+ """delete_collection_api_service # noqa: E501
+
+ delete collection of APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_api_service(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_api_service_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_api_service_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_api_service # noqa: E501
+
+ delete collection of APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_api_service_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_api_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_api_service(self, **kwargs): # noqa: E501
+ """list_api_service # noqa: E501
+
+ list or watch objects of kind APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_api_service(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIServiceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_api_service_with_http_info(**kwargs) # noqa: E501
+
+ def list_api_service_with_http_info(self, **kwargs): # noqa: E501
+ """list_api_service # noqa: E501
+
+ list or watch objects of kind APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_api_service_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIServiceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_api_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIServiceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_api_service(self, name, body, **kwargs): # noqa: E501
+ """patch_api_service # noqa: E501
+
+ partially update the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_api_service(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIService
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_api_service_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_api_service_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_api_service # noqa: E501
+
+ partially update the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_api_service_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_api_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_api_service`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_api_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIService', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_api_service_status(self, name, body, **kwargs): # noqa: E501
+ """patch_api_service_status # noqa: E501
+
+ partially update status of the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_api_service_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIService
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_api_service_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_api_service_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_api_service_status # noqa: E501
+
+ partially update status of the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_api_service_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_api_service_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_api_service_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_api_service_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIService', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_api_service(self, name, **kwargs): # noqa: E501
+ """read_api_service # noqa: E501
+
+ read the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_api_service(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIService
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_api_service_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_api_service_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_api_service # noqa: E501
+
+ read the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_api_service_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_api_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_api_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIService', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_api_service_status(self, name, **kwargs): # noqa: E501
+ """read_api_service_status # noqa: E501
+
+ read status of the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_api_service_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIService
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_api_service_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_api_service_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_api_service_status # noqa: E501
+
+ read status of the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_api_service_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_api_service_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_api_service_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIService', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_api_service(self, name, body, **kwargs): # noqa: E501
+ """replace_api_service # noqa: E501
+
+ replace the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_api_service(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param V1APIService body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIService
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_api_service_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_api_service_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_api_service # noqa: E501
+
+ replace the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_api_service_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param V1APIService body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_api_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_api_service`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_api_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIService', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_api_service_status(self, name, body, **kwargs): # noqa: E501
+ """replace_api_service_status # noqa: E501
+
+ replace status of the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_api_service_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param V1APIService body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIService
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_api_service_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_api_service_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_api_service_status # noqa: E501
+
+ replace status of the specified APIService # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_api_service_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the APIService (required)
+ :param V1APIService body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_api_service_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_api_service_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_api_service_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIService', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/apis_api.py b/contrib/python/kubernetes/kubernetes/client/api/apis_api.py
new file mode 100644
index 0000000000..9dc9c82e4e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/apis_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ApisApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_versions(self, **kwargs): # noqa: E501
+ """get_api_versions # noqa: E501
+
+ get available API versions # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_versions(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroupList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_versions_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_versions_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_versions # noqa: E501
+
+ get available API versions # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_versions_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroupList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_versions" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroupList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/apps_api.py b/contrib/python/kubernetes/kubernetes/client/api/apps_api.py
new file mode 100644
index 0000000000..43ee8db83a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/apps_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AppsApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/apps_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/apps_v1_api.py
new file mode 100644
index 0000000000..0b1a111224
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/apps_v1_api.py
@@ -0,0 +1,9479 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AppsV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_controller_revision(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_controller_revision # noqa: E501
+
+ create a ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_controller_revision(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ControllerRevision body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ControllerRevision
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_controller_revision_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_controller_revision # noqa: E501
+
+ create a ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_controller_revision_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ControllerRevision body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ControllerRevision, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_controller_revision" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_controller_revision`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_controller_revision`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/controllerrevisions', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ControllerRevision', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_daemon_set(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_daemon_set # noqa: E501
+
+ create a DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_daemon_set(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1DaemonSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_daemon_set_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_daemon_set_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_daemon_set # noqa: E501
+
+ create a DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_daemon_set_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1DaemonSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_daemon_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_daemon_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_daemon_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_deployment(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_deployment # noqa: E501
+
+ create a Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_deployment(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Deployment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Deployment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_deployment_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_deployment # noqa: E501
+
+ create a Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_deployment_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Deployment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Deployment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_deployment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_deployment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Deployment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_replica_set(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_replica_set # noqa: E501
+
+ create a ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_replica_set(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicaSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_replica_set_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_replica_set_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_replica_set # noqa: E501
+
+ create a ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_replica_set_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicaSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_replica_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_replica_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_replica_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_stateful_set(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_stateful_set # noqa: E501
+
+ create a StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_stateful_set(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1StatefulSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_stateful_set_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_stateful_set # noqa: E501
+
+ create a StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_stateful_set_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1StatefulSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_stateful_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_stateful_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_stateful_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_controller_revision(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_controller_revision # noqa: E501
+
+ delete collection of ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_controller_revision(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_controller_revision_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_controller_revision # noqa: E501
+
+ delete collection of ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_controller_revision_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_controller_revision" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_controller_revision`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/controllerrevisions', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_daemon_set(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_daemon_set # noqa: E501
+
+ delete collection of DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_daemon_set(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_daemon_set_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_daemon_set_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_daemon_set # noqa: E501
+
+ delete collection of DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_daemon_set_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_daemon_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_daemon_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_deployment(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_deployment # noqa: E501
+
+ delete collection of Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_deployment(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_deployment_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_deployment # noqa: E501
+
+ delete collection of Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_deployment_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_deployment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_deployment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_replica_set(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_replica_set # noqa: E501
+
+ delete collection of ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_replica_set_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_replica_set # noqa: E501
+
+ delete collection of ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_replica_set_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_replica_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_replica_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_stateful_set(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_stateful_set # noqa: E501
+
+ delete collection of StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_stateful_set(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_stateful_set_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_stateful_set # noqa: E501
+
+ delete collection of StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_stateful_set_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_stateful_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_stateful_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_controller_revision(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_controller_revision # noqa: E501
+
+ delete a ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_controller_revision(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_controller_revision_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_controller_revision_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_controller_revision # noqa: E501
+
+ delete a ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_controller_revision_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_controller_revision" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_controller_revision`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_controller_revision`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_daemon_set(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_daemon_set # noqa: E501
+
+ delete a DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_daemon_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_daemon_set # noqa: E501
+
+ delete a DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_daemon_set_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_daemon_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_daemon_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_daemon_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_deployment(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_deployment # noqa: E501
+
+ delete a Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_deployment(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_deployment_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_deployment_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_deployment # noqa: E501
+
+ delete a Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_deployment_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_deployment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_deployment`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_deployment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_replica_set # noqa: E501
+
+ delete a ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_replica_set(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_replica_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_replica_set # noqa: E501
+
+ delete a ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_replica_set_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_replica_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_replica_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_replica_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_stateful_set(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_stateful_set # noqa: E501
+
+ delete a StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_stateful_set(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_stateful_set # noqa: E501
+
+ delete a StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_stateful_set_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_stateful_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_stateful_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_stateful_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_controller_revision_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_controller_revision_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_controller_revision_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ControllerRevisionList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_controller_revision_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_controller_revision_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_controller_revision_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ControllerRevisionList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_controller_revision_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/controllerrevisions', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ControllerRevisionList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_daemon_set_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_daemon_set_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_daemon_set_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_daemon_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_daemon_set_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_daemon_set_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_daemon_set_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_daemon_set_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/daemonsets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_deployment_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_deployment_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_deployment_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DeploymentList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_deployment_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_deployment_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_deployment_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_deployment_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DeploymentList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_deployment_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/deployments', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DeploymentList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_controller_revision(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_controller_revision # noqa: E501
+
+ list or watch objects of kind ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_controller_revision(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ControllerRevisionList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_controller_revision_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_controller_revision # noqa: E501
+
+ list or watch objects of kind ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_controller_revision_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ControllerRevisionList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_controller_revision" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_controller_revision`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/controllerrevisions', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ControllerRevisionList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_daemon_set(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_daemon_set # noqa: E501
+
+ list or watch objects of kind DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_daemon_set(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_daemon_set_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_daemon_set_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_daemon_set # noqa: E501
+
+ list or watch objects of kind DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_daemon_set_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_daemon_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_daemon_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_deployment(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_deployment # noqa: E501
+
+ list or watch objects of kind Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_deployment(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DeploymentList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_deployment_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_deployment_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_deployment # noqa: E501
+
+ list or watch objects of kind Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_deployment_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DeploymentList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_deployment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_deployment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DeploymentList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_replica_set(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_replica_set # noqa: E501
+
+ list or watch objects of kind ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_replica_set(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_replica_set_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_replica_set # noqa: E501
+
+ list or watch objects of kind ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_replica_set_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_replica_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_replica_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_stateful_set(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_stateful_set # noqa: E501
+
+ list or watch objects of kind StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_stateful_set(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_stateful_set_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_stateful_set # noqa: E501
+
+ list or watch objects of kind StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_stateful_set_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_stateful_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_stateful_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_replica_set_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_replica_set_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_replica_set_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_replica_set_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_replica_set_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_replica_set_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_replica_set_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/replicasets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_stateful_set_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_stateful_set_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_stateful_set_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_stateful_set_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_stateful_set_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_stateful_set_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_stateful_set_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/statefulsets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_controller_revision(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_controller_revision # noqa: E501
+
+ partially update the specified ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ControllerRevision
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_controller_revision # noqa: E501
+
+ partially update the specified ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ControllerRevision, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_controller_revision" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_controller_revision`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_controller_revision`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_controller_revision`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ControllerRevision', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_daemon_set(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_daemon_set # noqa: E501
+
+ partially update the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_daemon_set(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_daemon_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_daemon_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_daemon_set # noqa: E501
+
+ partially update the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_daemon_set_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_daemon_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_daemon_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_daemon_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_daemon_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_daemon_set_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_daemon_set_status # noqa: E501
+
+ partially update status of the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_daemon_set_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_daemon_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_daemon_set_status # noqa: E501
+
+ partially update status of the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_daemon_set_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_daemon_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_daemon_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_daemon_set_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_daemon_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_deployment(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_deployment # noqa: E501
+
+ partially update the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_deployment(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Deployment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_deployment # noqa: E501
+
+ partially update the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_deployment_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Deployment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_deployment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Deployment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_deployment_scale # noqa: E501
+
+ partially update scale of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_deployment_scale # noqa: E501
+
+ partially update scale of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_deployment_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_deployment_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_deployment_status # noqa: E501
+
+ partially update status of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_deployment_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Deployment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_deployment_status # noqa: E501
+
+ partially update status of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Deployment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_deployment_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Deployment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_replica_set(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replica_set # noqa: E501
+
+ partially update the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replica_set(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_replica_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_replica_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replica_set # noqa: E501
+
+ partially update the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replica_set_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_replica_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_replica_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replica_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_replica_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_replica_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replica_set_scale # noqa: E501
+
+ partially update scale of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replica_set_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_replica_set_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replica_set_scale # noqa: E501
+
+ partially update scale of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replica_set_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_replica_set_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_replica_set_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replica_set_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_replica_set_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_replica_set_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replica_set_status # noqa: E501
+
+ partially update status of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replica_set_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_replica_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_replica_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replica_set_status # noqa: E501
+
+ partially update status of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replica_set_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_replica_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_replica_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replica_set_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_replica_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_stateful_set(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_stateful_set # noqa: E501
+
+ partially update the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_stateful_set(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_stateful_set # noqa: E501
+
+ partially update the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_stateful_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_stateful_set_scale # noqa: E501
+
+ partially update scale of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_stateful_set_scale # noqa: E501
+
+ partially update scale of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_stateful_set_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_stateful_set_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_stateful_set_status # noqa: E501
+
+ partially update status of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_stateful_set_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_stateful_set_status # noqa: E501
+
+ partially update status of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_stateful_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_controller_revision(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_controller_revision # noqa: E501
+
+ read the specified ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_controller_revision(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ControllerRevision
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_controller_revision_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_controller_revision # noqa: E501
+
+ read the specified ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_controller_revision_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ControllerRevision, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_controller_revision" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_controller_revision`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_controller_revision`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ControllerRevision', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_daemon_set(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_daemon_set # noqa: E501
+
+ read the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_daemon_set(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_daemon_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_daemon_set # noqa: E501
+
+ read the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_daemon_set_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_daemon_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_daemon_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_daemon_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_daemon_set_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_daemon_set_status # noqa: E501
+
+ read status of the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_daemon_set_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_daemon_set_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_daemon_set_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_daemon_set_status # noqa: E501
+
+ read status of the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_daemon_set_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_daemon_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_daemon_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_daemon_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_deployment(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_deployment # noqa: E501
+
+ read the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Deployment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_deployment_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_deployment # noqa: E501
+
+ read the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_deployment_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Deployment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_deployment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_deployment`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Deployment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_deployment_scale(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_deployment_scale # noqa: E501
+
+ read scale of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_deployment_scale(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_deployment_scale_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_deployment_scale # noqa: E501
+
+ read scale of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_deployment_scale_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_deployment_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_deployment_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_deployment_status # noqa: E501
+
+ read status of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_deployment_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Deployment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_deployment_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_deployment_status # noqa: E501
+
+ read status of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_deployment_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Deployment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_deployment_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Deployment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replica_set # noqa: E501
+
+ read the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_replica_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replica_set # noqa: E501
+
+ read the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replica_set_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_replica_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_replica_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_replica_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_replica_set_scale(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replica_set_scale # noqa: E501
+
+ read scale of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replica_set_scale(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_replica_set_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_replica_set_scale_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replica_set_scale # noqa: E501
+
+ read scale of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replica_set_scale_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_replica_set_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_replica_set_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_replica_set_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_replica_set_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replica_set_status # noqa: E501
+
+ read status of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replica_set_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_replica_set_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_replica_set_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replica_set_status # noqa: E501
+
+ read status of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replica_set_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_replica_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_replica_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_replica_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_stateful_set(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_stateful_set # noqa: E501
+
+ read the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_stateful_set(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_stateful_set # noqa: E501
+
+ read the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_stateful_set_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_stateful_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_stateful_set_scale(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_stateful_set_scale # noqa: E501
+
+ read scale of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_stateful_set_scale(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_stateful_set_scale_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_stateful_set_scale # noqa: E501
+
+ read scale of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_stateful_set_scale_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_stateful_set_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_stateful_set_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_stateful_set_status # noqa: E501
+
+ read status of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_stateful_set_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_stateful_set_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_stateful_set_status # noqa: E501
+
+ read status of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_stateful_set_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_stateful_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_controller_revision(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_controller_revision # noqa: E501
+
+ replace the specified ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_controller_revision(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ControllerRevision body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ControllerRevision
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_controller_revision # noqa: E501
+
+ replace the specified ControllerRevision # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ControllerRevision (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ControllerRevision body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ControllerRevision, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_controller_revision" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_controller_revision`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_controller_revision`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_controller_revision`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ControllerRevision', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_daemon_set(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_daemon_set # noqa: E501
+
+ replace the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_daemon_set(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1DaemonSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_daemon_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_daemon_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_daemon_set # noqa: E501
+
+ replace the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_daemon_set_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1DaemonSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_daemon_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_daemon_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_daemon_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_daemon_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_daemon_set_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_daemon_set_status # noqa: E501
+
+ replace status of the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_daemon_set_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1DaemonSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1DaemonSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_daemon_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_daemon_set_status # noqa: E501
+
+ replace status of the specified DaemonSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_daemon_set_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the DaemonSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1DaemonSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DaemonSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_daemon_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_daemon_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_daemon_set_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_daemon_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DaemonSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_deployment(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_deployment # noqa: E501
+
+ replace the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_deployment(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Deployment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Deployment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_deployment # noqa: E501
+
+ replace the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_deployment_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Deployment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Deployment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_deployment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Deployment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_deployment_scale # noqa: E501
+
+ replace scale of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_deployment_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_deployment_scale # noqa: E501
+
+ replace scale of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_deployment_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_deployment_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_deployment_status # noqa: E501
+
+ replace status of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Deployment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Deployment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_deployment_status # noqa: E501
+
+ replace status of the specified Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Deployment (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Deployment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Deployment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_deployment_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Deployment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_replica_set(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replica_set # noqa: E501
+
+ replace the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replica_set(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicaSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_replica_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_replica_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replica_set # noqa: E501
+
+ replace the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replica_set_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicaSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_replica_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_replica_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replica_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_replica_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_replica_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replica_set_scale # noqa: E501
+
+ replace scale of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_replica_set_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replica_set_scale # noqa: E501
+
+ replace scale of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_replica_set_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_replica_set_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replica_set_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_replica_set_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_replica_set_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replica_set_status # noqa: E501
+
+ replace status of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replica_set_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicaSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicaSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_replica_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_replica_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replica_set_status # noqa: E501
+
+ replace status of the specified ReplicaSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replica_set_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicaSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicaSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicaSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_replica_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_replica_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replica_set_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_replica_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicaSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_stateful_set(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_stateful_set # noqa: E501
+
+ replace the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_stateful_set(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1StatefulSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_stateful_set # noqa: E501
+
+ replace the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1StatefulSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_stateful_set" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_stateful_set_scale # noqa: E501
+
+ replace scale of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_stateful_set_scale # noqa: E501
+
+ replace scale of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_stateful_set_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_stateful_set_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_stateful_set_status # noqa: E501
+
+ replace status of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_stateful_set_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1StatefulSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StatefulSet
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_stateful_set_status # noqa: E501
+
+ replace status of the specified StatefulSet # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StatefulSet (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1StatefulSet body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StatefulSet, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_stateful_set_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StatefulSet', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/authentication_api.py b/contrib/python/kubernetes/kubernetes/client/api/authentication_api.py
new file mode 100644
index 0000000000..f80e0c34b3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/authentication_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AuthenticationApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/authentication_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/authentication_v1_api.py
new file mode 100644
index 0000000000..162b094b60
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/authentication_v1_api.py
@@ -0,0 +1,410 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AuthenticationV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_self_subject_review(self, body, **kwargs): # noqa: E501
+ """create_self_subject_review # noqa: E501
+
+ create a SelfSubjectReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_review(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SelfSubjectReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1SelfSubjectReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_self_subject_review_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_self_subject_review_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_self_subject_review # noqa: E501
+
+ create a SelfSubjectReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_review_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SelfSubjectReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1SelfSubjectReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_self_subject_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/v1/selfsubjectreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1SelfSubjectReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_token_review(self, body, **kwargs): # noqa: E501
+ """create_token_review # noqa: E501
+
+ create a TokenReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_token_review(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1TokenReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1TokenReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_token_review_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_token_review_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_token_review # noqa: E501
+
+ create a TokenReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_token_review_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1TokenReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1TokenReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_token_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_token_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/v1/tokenreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1TokenReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/authentication_v1alpha1_api.py b/contrib/python/kubernetes/kubernetes/client/api/authentication_v1alpha1_api.py
new file mode 100644
index 0000000000..bd6ac1630c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/authentication_v1alpha1_api.py
@@ -0,0 +1,276 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AuthenticationV1alpha1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_self_subject_review(self, body, **kwargs): # noqa: E501
+ """create_self_subject_review # noqa: E501
+
+ create a SelfSubjectReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_review(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1SelfSubjectReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1SelfSubjectReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_self_subject_review_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_self_subject_review_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_self_subject_review # noqa: E501
+
+ create a SelfSubjectReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_review_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1SelfSubjectReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1SelfSubjectReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_self_subject_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/v1alpha1/selfsubjectreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1SelfSubjectReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/v1alpha1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/authentication_v1beta1_api.py b/contrib/python/kubernetes/kubernetes/client/api/authentication_v1beta1_api.py
new file mode 100644
index 0000000000..a1cc0fbd0d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/authentication_v1beta1_api.py
@@ -0,0 +1,276 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AuthenticationV1beta1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_self_subject_review(self, body, **kwargs): # noqa: E501
+ """create_self_subject_review # noqa: E501
+
+ create a SelfSubjectReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_review(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta1SelfSubjectReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta1SelfSubjectReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_self_subject_review_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_self_subject_review_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_self_subject_review # noqa: E501
+
+ create a SelfSubjectReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_review_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta1SelfSubjectReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta1SelfSubjectReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_self_subject_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/v1beta1/selfsubjectreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta1SelfSubjectReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authentication.k8s.io/v1beta1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/authorization_api.py b/contrib/python/kubernetes/kubernetes/client/api/authorization_api.py
new file mode 100644
index 0000000000..e74472b8c0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/authorization_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AuthorizationApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authorization.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/authorization_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/authorization_v1_api.py
new file mode 100644
index 0000000000..3956cd6909
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/authorization_v1_api.py
@@ -0,0 +1,687 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AuthorizationV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_local_subject_access_review(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_local_subject_access_review # noqa: E501
+
+ create a LocalSubjectAccessReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_local_subject_access_review(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1LocalSubjectAccessReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LocalSubjectAccessReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_local_subject_access_review_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_local_subject_access_review_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_local_subject_access_review # noqa: E501
+
+ create a LocalSubjectAccessReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_local_subject_access_review_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1LocalSubjectAccessReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LocalSubjectAccessReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_local_subject_access_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_local_subject_access_review`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_local_subject_access_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authorization.k8s.io/v1/namespaces/{namespace}/localsubjectaccessreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LocalSubjectAccessReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_self_subject_access_review(self, body, **kwargs): # noqa: E501
+ """create_self_subject_access_review # noqa: E501
+
+ create a SelfSubjectAccessReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_access_review(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SelfSubjectAccessReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1SelfSubjectAccessReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_self_subject_access_review_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_self_subject_access_review_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_self_subject_access_review # noqa: E501
+
+ create a SelfSubjectAccessReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_access_review_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SelfSubjectAccessReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1SelfSubjectAccessReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_self_subject_access_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_access_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authorization.k8s.io/v1/selfsubjectaccessreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1SelfSubjectAccessReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_self_subject_rules_review(self, body, **kwargs): # noqa: E501
+ """create_self_subject_rules_review # noqa: E501
+
+ create a SelfSubjectRulesReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_rules_review(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SelfSubjectRulesReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1SelfSubjectRulesReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_self_subject_rules_review_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_self_subject_rules_review_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_self_subject_rules_review # noqa: E501
+
+ create a SelfSubjectRulesReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_self_subject_rules_review_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SelfSubjectRulesReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1SelfSubjectRulesReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_self_subject_rules_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_rules_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authorization.k8s.io/v1/selfsubjectrulesreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1SelfSubjectRulesReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_subject_access_review(self, body, **kwargs): # noqa: E501
+ """create_subject_access_review # noqa: E501
+
+ create a SubjectAccessReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_subject_access_review(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SubjectAccessReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1SubjectAccessReview
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_subject_access_review_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_subject_access_review_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_subject_access_review # noqa: E501
+
+ create a SubjectAccessReview # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_subject_access_review_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1SubjectAccessReview body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1SubjectAccessReview, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_subject_access_review" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_subject_access_review`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authorization.k8s.io/v1/subjectaccessreviews', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1SubjectAccessReview', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/authorization.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/autoscaling_api.py b/contrib/python/kubernetes/kubernetes/client/api/autoscaling_api.py
new file mode 100644
index 0000000000..ac384dac55
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/autoscaling_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AutoscalingApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/autoscaling_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/autoscaling_v1_api.py
new file mode 100644
index 0000000000..24226387f5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/autoscaling_v1_api.py
@@ -0,0 +1,1833 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AutoscalingV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_horizontal_pod_autoscaler(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ create a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_horizontal_pod_autoscaler(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_horizontal_pod_autoscaler_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ create a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_horizontal_pod_autoscaler(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete collection of HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_horizontal_pod_autoscaler(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_horizontal_pod_autoscaler_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete collection of HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_horizontal_pod_autoscaler(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_horizontal_pod_autoscaler(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_horizontal_pod_autoscaler_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_horizontal_pod_autoscaler_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_horizontal_pod_autoscaler_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscalerList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_horizontal_pod_autoscaler_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_horizontal_pod_autoscaler_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_horizontal_pod_autoscaler_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_horizontal_pod_autoscaler_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscalerList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_horizontal_pod_autoscaler_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/horizontalpodautoscalers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscalerList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_horizontal_pod_autoscaler(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_horizontal_pod_autoscaler(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscalerList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_horizontal_pod_autoscaler_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscalerList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscalerList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_horizontal_pod_autoscaler(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ partially update the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ partially update the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ partially update status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ partially update status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_horizontal_pod_autoscaler_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_horizontal_pod_autoscaler(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ read the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ read the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ read status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ read status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_horizontal_pod_autoscaler_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_horizontal_pod_autoscaler(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ replace the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ replace the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ replace status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ replace status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_horizontal_pod_autoscaler_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/autoscaling_v2_api.py b/contrib/python/kubernetes/kubernetes/client/api/autoscaling_v2_api.py
new file mode 100644
index 0000000000..5778ed5669
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/autoscaling_v2_api.py
@@ -0,0 +1,1833 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class AutoscalingV2Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_horizontal_pod_autoscaler(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ create a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_horizontal_pod_autoscaler(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V2HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_horizontal_pod_autoscaler_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ create a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V2HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_horizontal_pod_autoscaler(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete collection of HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_horizontal_pod_autoscaler(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_horizontal_pod_autoscaler_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete collection of HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_horizontal_pod_autoscaler(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_horizontal_pod_autoscaler(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ delete a HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_horizontal_pod_autoscaler_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_horizontal_pod_autoscaler_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_horizontal_pod_autoscaler_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscalerList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_horizontal_pod_autoscaler_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_horizontal_pod_autoscaler_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_horizontal_pod_autoscaler_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_horizontal_pod_autoscaler_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscalerList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_horizontal_pod_autoscaler_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/horizontalpodautoscalers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscalerList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_horizontal_pod_autoscaler(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_horizontal_pod_autoscaler(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscalerList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_horizontal_pod_autoscaler_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ list or watch objects of kind HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_horizontal_pod_autoscaler_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscalerList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscalerList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_horizontal_pod_autoscaler(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ partially update the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ partially update the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ partially update status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ partially update status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_horizontal_pod_autoscaler_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_horizontal_pod_autoscaler(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ read the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ read the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ read status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ read status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_horizontal_pod_autoscaler_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_horizontal_pod_autoscaler(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ replace the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V2HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_horizontal_pod_autoscaler_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler # noqa: E501
+
+ replace the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V2HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_horizontal_pod_autoscaler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_horizontal_pod_autoscaler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ replace status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V2HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V2HorizontalPodAutoscaler
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_horizontal_pod_autoscaler_status # noqa: E501
+
+ replace status of the specified HorizontalPodAutoscaler # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the HorizontalPodAutoscaler (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V2HorizontalPodAutoscaler body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V2HorizontalPodAutoscaler, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_horizontal_pod_autoscaler_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_horizontal_pod_autoscaler_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/autoscaling/v2/namespaces/{namespace}/horizontalpodautoscalers/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V2HorizontalPodAutoscaler', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/batch_api.py b/contrib/python/kubernetes/kubernetes/client/api/batch_api.py
new file mode 100644
index 0000000000..8ddc0030dc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/batch_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class BatchApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/batch_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/batch_v1_api.py
new file mode 100644
index 0000000000..bf52d5049d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/batch_v1_api.py
@@ -0,0 +1,3524 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class BatchV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_cron_job(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_cron_job # noqa: E501
+
+ create a CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CronJob body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJob
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_cron_job # noqa: E501
+
+ create a CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_cron_job_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CronJob body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJob, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_cron_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJob', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_job(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_job # noqa: E501
+
+ create a Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_job(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Job body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Job
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_job_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_job_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_job # noqa: E501
+
+ create a Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Job body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Job', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_cron_job(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_cron_job # noqa: E501
+
+ delete collection of CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_cron_job(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_cron_job # noqa: E501
+
+ delete collection of CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_cron_job_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_cron_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_job(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_job # noqa: E501
+
+ delete collection of Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_job # noqa: E501
+
+ delete collection of Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_cron_job(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_cron_job # noqa: E501
+
+ delete a CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_cron_job # noqa: E501
+
+ delete a CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_cron_job_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_cron_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_job(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_job # noqa: E501
+
+ delete a Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_job # noqa: E501
+
+ delete a Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_cron_job_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_cron_job_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cron_job_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJobList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_cron_job_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cron_job_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJobList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_cron_job_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/cronjobs', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJobList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_job_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_job_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_job_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1JobList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_job_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_job_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_job_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1JobList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_job_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/jobs', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1JobList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_cron_job(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_cron_job # noqa: E501
+
+ list or watch objects of kind CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_cron_job(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJobList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_cron_job # noqa: E501
+
+ list or watch objects of kind CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_cron_job_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJobList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_cron_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJobList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_job(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_job # noqa: E501
+
+ list or watch objects of kind Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_job(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1JobList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_job_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_job # noqa: E501
+
+ list or watch objects of kind Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1JobList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1JobList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_cron_job # noqa: E501
+
+ partially update the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJob
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_cron_job # noqa: E501
+
+ partially update the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_cron_job_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJob, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_cron_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJob', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_cron_job_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_cron_job_status # noqa: E501
+
+ partially update status of the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_cron_job_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJob
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_cron_job_status # noqa: E501
+
+ partially update status of the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJob, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_cron_job_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJob', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_job(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_job # noqa: E501
+
+ partially update the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Job
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_job # noqa: E501
+
+ partially update the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Job', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_job_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_job_status # noqa: E501
+
+ partially update status of the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Job
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_job_status # noqa: E501
+
+ partially update status of the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_job_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Job', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_cron_job(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_cron_job # noqa: E501
+
+ read the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_cron_job(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJob
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_cron_job # noqa: E501
+
+ read the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_cron_job_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJob, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_cron_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJob', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_cron_job_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_cron_job_status # noqa: E501
+
+ read status of the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_cron_job_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJob
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_cron_job_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_cron_job_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_cron_job_status # noqa: E501
+
+ read status of the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_cron_job_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJob, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_cron_job_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJob', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_job(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_job # noqa: E501
+
+ read the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_job(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Job
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_job_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_job_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_job # noqa: E501
+
+ read the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Job', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_job_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_job_status # noqa: E501
+
+ read status of the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Job
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_job_status # noqa: E501
+
+ read status of the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_job_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Job', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_cron_job # noqa: E501
+
+ replace the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_cron_job(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CronJob body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJob
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_cron_job # noqa: E501
+
+ replace the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_cron_job_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CronJob body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJob, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_cron_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJob', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_cron_job_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_cron_job_status # noqa: E501
+
+ replace status of the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_cron_job_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CronJob body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CronJob
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_cron_job_status # noqa: E501
+
+ replace status of the specified CronJob # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CronJob (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CronJob body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CronJob, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_cron_job_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CronJob', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_job(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_job # noqa: E501
+
+ replace the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Job body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Job
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_job # noqa: E501
+
+ replace the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Job body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_job" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_job`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_job`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Job', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_job_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_job_status # noqa: E501
+
+ replace status of the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Job body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Job
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_job_status # noqa: E501
+
+ replace status of the specified Job # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Job (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Job body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_job_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Job', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/certificates_api.py b/contrib/python/kubernetes/kubernetes/client/api/certificates_api.py
new file mode 100644
index 0000000000..d2e10d6d52
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/certificates_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CertificatesApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/certificates_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/certificates_v1_api.py
new file mode 100644
index 0000000000..0a1699a5ab
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/certificates_v1_api.py
@@ -0,0 +1,1997 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CertificatesV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_certificate_signing_request(self, body, **kwargs): # noqa: E501
+ """create_certificate_signing_request # noqa: E501
+
+ create a CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_certificate_signing_request(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_certificate_signing_request_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_certificate_signing_request_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_certificate_signing_request # noqa: E501
+
+ create a CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_certificate_signing_request_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_certificate_signing_request" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_certificate_signing_request`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_certificate_signing_request(self, name, **kwargs): # noqa: E501
+ """delete_certificate_signing_request # noqa: E501
+
+ delete a CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_certificate_signing_request(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_certificate_signing_request_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_certificate_signing_request_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_certificate_signing_request # noqa: E501
+
+ delete a CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_certificate_signing_request_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_certificate_signing_request" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_certificate_signing_request`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_certificate_signing_request(self, **kwargs): # noqa: E501
+ """delete_collection_certificate_signing_request # noqa: E501
+
+ delete collection of CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_certificate_signing_request(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_certificate_signing_request_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_certificate_signing_request_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_certificate_signing_request # noqa: E501
+
+ delete collection of CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_certificate_signing_request_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_certificate_signing_request" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_certificate_signing_request(self, **kwargs): # noqa: E501
+ """list_certificate_signing_request # noqa: E501
+
+ list or watch objects of kind CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_certificate_signing_request(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequestList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_certificate_signing_request_with_http_info(**kwargs) # noqa: E501
+
+ def list_certificate_signing_request_with_http_info(self, **kwargs): # noqa: E501
+ """list_certificate_signing_request # noqa: E501
+
+ list or watch objects of kind CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_certificate_signing_request_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequestList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_certificate_signing_request" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequestList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_certificate_signing_request(self, name, body, **kwargs): # noqa: E501
+ """patch_certificate_signing_request # noqa: E501
+
+ partially update the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_certificate_signing_request(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_certificate_signing_request_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_certificate_signing_request_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_certificate_signing_request # noqa: E501
+
+ partially update the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_certificate_signing_request_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_certificate_signing_request" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_certificate_signing_request`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_certificate_signing_request`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_certificate_signing_request_approval(self, name, body, **kwargs): # noqa: E501
+ """patch_certificate_signing_request_approval # noqa: E501
+
+ partially update approval of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_certificate_signing_request_approval(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_certificate_signing_request_approval_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_certificate_signing_request_approval_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_certificate_signing_request_approval # noqa: E501
+
+ partially update approval of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_certificate_signing_request_approval_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_certificate_signing_request_approval" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_certificate_signing_request_approval`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_certificate_signing_request_approval`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/approval', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_certificate_signing_request_status(self, name, body, **kwargs): # noqa: E501
+ """patch_certificate_signing_request_status # noqa: E501
+
+ partially update status of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_certificate_signing_request_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_certificate_signing_request_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_certificate_signing_request_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_certificate_signing_request_status # noqa: E501
+
+ partially update status of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_certificate_signing_request_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_certificate_signing_request_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_certificate_signing_request_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_certificate_signing_request_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_certificate_signing_request(self, name, **kwargs): # noqa: E501
+ """read_certificate_signing_request # noqa: E501
+
+ read the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_certificate_signing_request(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_certificate_signing_request_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_certificate_signing_request_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_certificate_signing_request # noqa: E501
+
+ read the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_certificate_signing_request_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_certificate_signing_request" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_certificate_signing_request`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_certificate_signing_request_approval(self, name, **kwargs): # noqa: E501
+ """read_certificate_signing_request_approval # noqa: E501
+
+ read approval of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_certificate_signing_request_approval(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_certificate_signing_request_approval_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_certificate_signing_request_approval_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_certificate_signing_request_approval # noqa: E501
+
+ read approval of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_certificate_signing_request_approval_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_certificate_signing_request_approval" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_certificate_signing_request_approval`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/approval', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_certificate_signing_request_status(self, name, **kwargs): # noqa: E501
+ """read_certificate_signing_request_status # noqa: E501
+
+ read status of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_certificate_signing_request_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_certificate_signing_request_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_certificate_signing_request_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_certificate_signing_request_status # noqa: E501
+
+ read status of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_certificate_signing_request_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_certificate_signing_request_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_certificate_signing_request_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_certificate_signing_request(self, name, body, **kwargs): # noqa: E501
+ """replace_certificate_signing_request # noqa: E501
+
+ replace the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_certificate_signing_request(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_certificate_signing_request_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_certificate_signing_request_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_certificate_signing_request # noqa: E501
+
+ replace the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_certificate_signing_request_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_certificate_signing_request" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_certificate_signing_request`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_certificate_signing_request`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_certificate_signing_request_approval(self, name, body, **kwargs): # noqa: E501
+ """replace_certificate_signing_request_approval # noqa: E501
+
+ replace approval of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_certificate_signing_request_approval(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_certificate_signing_request_approval_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_certificate_signing_request_approval_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_certificate_signing_request_approval # noqa: E501
+
+ replace approval of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_certificate_signing_request_approval_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_certificate_signing_request_approval" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_certificate_signing_request_approval`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_certificate_signing_request_approval`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/approval', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_certificate_signing_request_status(self, name, body, **kwargs): # noqa: E501
+ """replace_certificate_signing_request_status # noqa: E501
+
+ replace status of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_certificate_signing_request_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CertificateSigningRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_certificate_signing_request_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_certificate_signing_request_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_certificate_signing_request_status # noqa: E501
+
+ replace status of the specified CertificateSigningRequest # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_certificate_signing_request_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CertificateSigningRequest (required)
+ :param V1CertificateSigningRequest body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CertificateSigningRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_certificate_signing_request_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_certificate_signing_request_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_certificate_signing_request_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CertificateSigningRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/certificates_v1alpha1_api.py b/contrib/python/kubernetes/kubernetes/client/api/certificates_v1alpha1_api.py
new file mode 100644
index 0000000000..74ddf3ae6a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/certificates_v1alpha1_api.py
@@ -0,0 +1,1169 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CertificatesV1alpha1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_cluster_trust_bundle(self, body, **kwargs): # noqa: E501
+ """create_cluster_trust_bundle # noqa: E501
+
+ create a ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_trust_bundle(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ClusterTrustBundle body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterTrustBundle
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_cluster_trust_bundle_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_cluster_trust_bundle_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_cluster_trust_bundle # noqa: E501
+
+ create a ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_trust_bundle_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ClusterTrustBundle body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterTrustBundle, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_cluster_trust_bundle" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_trust_bundle`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/clustertrustbundles', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterTrustBundle', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_cluster_trust_bundle(self, name, **kwargs): # noqa: E501
+ """delete_cluster_trust_bundle # noqa: E501
+
+ delete a ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_trust_bundle(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_cluster_trust_bundle_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_cluster_trust_bundle_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_cluster_trust_bundle # noqa: E501
+
+ delete a ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_trust_bundle_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_cluster_trust_bundle" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_trust_bundle`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/clustertrustbundles/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_cluster_trust_bundle(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_trust_bundle # noqa: E501
+
+ delete collection of ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_trust_bundle(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_cluster_trust_bundle_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_cluster_trust_bundle_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_trust_bundle # noqa: E501
+
+ delete collection of ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_trust_bundle_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_cluster_trust_bundle" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/clustertrustbundles', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_cluster_trust_bundle(self, **kwargs): # noqa: E501
+ """list_cluster_trust_bundle # noqa: E501
+
+ list or watch objects of kind ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_trust_bundle(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterTrustBundleList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_cluster_trust_bundle_with_http_info(**kwargs) # noqa: E501
+
+ def list_cluster_trust_bundle_with_http_info(self, **kwargs): # noqa: E501
+ """list_cluster_trust_bundle # noqa: E501
+
+ list or watch objects of kind ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_trust_bundle_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterTrustBundleList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_cluster_trust_bundle" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/clustertrustbundles', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterTrustBundleList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_cluster_trust_bundle(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_trust_bundle # noqa: E501
+
+ partially update the specified ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_trust_bundle(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterTrustBundle
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_cluster_trust_bundle_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_cluster_trust_bundle_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_trust_bundle # noqa: E501
+
+ partially update the specified ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_trust_bundle_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterTrustBundle, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_cluster_trust_bundle" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_trust_bundle`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_trust_bundle`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/clustertrustbundles/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterTrustBundle', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_cluster_trust_bundle(self, name, **kwargs): # noqa: E501
+ """read_cluster_trust_bundle # noqa: E501
+
+ read the specified ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_trust_bundle(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterTrustBundle
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_cluster_trust_bundle_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_cluster_trust_bundle_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_cluster_trust_bundle # noqa: E501
+
+ read the specified ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_trust_bundle_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterTrustBundle, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_cluster_trust_bundle" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_trust_bundle`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/clustertrustbundles/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterTrustBundle', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_cluster_trust_bundle(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_trust_bundle # noqa: E501
+
+ replace the specified ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_trust_bundle(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param V1alpha1ClusterTrustBundle body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterTrustBundle
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_cluster_trust_bundle_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_cluster_trust_bundle_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_trust_bundle # noqa: E501
+
+ replace the specified ClusterTrustBundle # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_trust_bundle_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterTrustBundle (required)
+ :param V1alpha1ClusterTrustBundle body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterTrustBundle, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_cluster_trust_bundle" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_trust_bundle`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_trust_bundle`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/certificates.k8s.io/v1alpha1/clustertrustbundles/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterTrustBundle', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/coordination_api.py b/contrib/python/kubernetes/kubernetes/client/api/coordination_api.py
new file mode 100644
index 0000000000..02145bc9b5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/coordination_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CoordinationApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/coordination_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/coordination_v1_api.py
new file mode 100644
index 0000000000..6403ad82fd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/coordination_v1_api.py
@@ -0,0 +1,1392 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CoordinationV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_lease(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_lease # noqa: E501
+
+ create a Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_lease(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Lease body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Lease
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_lease_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_lease_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_lease # noqa: E501
+
+ create a Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_lease_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Lease body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Lease, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_lease" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_lease`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_lease`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Lease', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_lease(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_lease # noqa: E501
+
+ delete collection of Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_lease(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_lease_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_lease_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_lease # noqa: E501
+
+ delete collection of Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_lease_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_lease" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_lease`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_lease(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_lease # noqa: E501
+
+ delete a Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_lease(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_lease_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_lease_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_lease # noqa: E501
+
+ delete a Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_lease_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_lease" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_lease`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_lease`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_lease_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_lease_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_lease_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LeaseList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_lease_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_lease_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_lease_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_lease_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LeaseList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_lease_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/leases', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LeaseList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_lease(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_lease # noqa: E501
+
+ list or watch objects of kind Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_lease(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LeaseList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_lease_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_lease_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_lease # noqa: E501
+
+ list or watch objects of kind Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_lease_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LeaseList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_lease" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_lease`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LeaseList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_lease(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_lease # noqa: E501
+
+ partially update the specified Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_lease(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Lease
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_lease_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_lease_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_lease # noqa: E501
+
+ partially update the specified Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_lease_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Lease, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_lease" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_lease`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_lease`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_lease`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Lease', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_lease(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_lease # noqa: E501
+
+ read the specified Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_lease(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Lease
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_lease_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_lease_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_lease # noqa: E501
+
+ read the specified Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_lease_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Lease, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_lease" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_lease`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_lease`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Lease', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_lease(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_lease # noqa: E501
+
+ replace the specified Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_lease(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Lease body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Lease
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_lease_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_lease_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_lease # noqa: E501
+
+ replace the specified Lease # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_lease_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Lease (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Lease body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Lease, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_lease" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_lease`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_lease`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_lease`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Lease', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/core_api.py b/contrib/python/kubernetes/kubernetes/client/api/core_api.py
new file mode 100644
index 0000000000..e43bb82ad4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/core_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CoreApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_versions(self, **kwargs): # noqa: E501
+ """get_api_versions # noqa: E501
+
+ get available API versions # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_versions(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIVersions
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_versions_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_versions_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_versions # noqa: E501
+
+ get available API versions # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_versions_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIVersions, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_versions" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIVersions', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/core_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/core_v1_api.py
new file mode 100644
index 0000000000..27c87ab84e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/core_v1_api.py
@@ -0,0 +1,29863 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CoreV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def connect_delete_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_delete_namespaced_pod_proxy # noqa: E501
+
+ connect DELETE requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_pod_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_delete_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_delete_namespaced_pod_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_delete_namespaced_pod_proxy # noqa: E501
+
+ connect DELETE requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_pod_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_delete_namespaced_pod_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_delete_namespaced_pod_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_delete_namespaced_pod_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_delete_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_delete_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect DELETE requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_delete_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_delete_namespaced_pod_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_delete_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect DELETE requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_delete_namespaced_pod_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_delete_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_delete_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_delete_namespaced_pod_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_delete_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_delete_namespaced_service_proxy # noqa: E501
+
+ connect DELETE requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_delete_namespaced_service_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_delete_namespaced_service_proxy # noqa: E501
+
+ connect DELETE requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_delete_namespaced_service_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_delete_namespaced_service_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_delete_namespaced_service_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_delete_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_delete_namespaced_service_proxy_with_path # noqa: E501
+
+ connect DELETE requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_delete_namespaced_service_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_delete_namespaced_service_proxy_with_path # noqa: E501
+
+ connect DELETE requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_delete_namespaced_service_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_delete_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_delete_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_delete_namespaced_service_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_delete_node_proxy(self, name, **kwargs): # noqa: E501
+ """connect_delete_node_proxy # noqa: E501
+
+ connect DELETE requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_node_proxy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_delete_node_proxy_with_http_info(name, **kwargs) # noqa: E501
+
+ def connect_delete_node_proxy_with_http_info(self, name, **kwargs): # noqa: E501
+ """connect_delete_node_proxy # noqa: E501
+
+ connect DELETE requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_node_proxy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_delete_node_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_delete_node_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_delete_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501
+ """connect_delete_node_proxy_with_path # noqa: E501
+
+ connect DELETE requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_node_proxy_with_path(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_delete_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501
+
+ def connect_delete_node_proxy_with_path_with_http_info(self, name, path, **kwargs): # noqa: E501
+ """connect_delete_node_proxy_with_path # noqa: E501
+
+ connect DELETE requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_delete_node_proxy_with_path_with_http_info(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_delete_node_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_delete_node_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_delete_node_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy/{path}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_namespaced_pod_attach(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_attach # noqa: E501
+
+ connect GET requests to attach of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_attach(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodAttachOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
+ :param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_get_namespaced_pod_attach_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_attach # noqa: E501
+
+ connect GET requests to attach of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_attach_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodAttachOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
+ :param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'container',
+ 'stderr',
+ 'stdin',
+ 'stdout',
+ 'tty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_namespaced_pod_attach" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_namespaced_pod_attach`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_get_namespaced_pod_attach`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'container' in local_var_params and local_var_params['container'] is not None: # noqa: E501
+ query_params.append(('container', local_var_params['container'])) # noqa: E501
+ if 'stderr' in local_var_params and local_var_params['stderr'] is not None: # noqa: E501
+ query_params.append(('stderr', local_var_params['stderr'])) # noqa: E501
+ if 'stdin' in local_var_params and local_var_params['stdin'] is not None: # noqa: E501
+ query_params.append(('stdin', local_var_params['stdin'])) # noqa: E501
+ if 'stdout' in local_var_params and local_var_params['stdout'] is not None: # noqa: E501
+ query_params.append(('stdout', local_var_params['stdout'])) # noqa: E501
+ if 'tty' in local_var_params and local_var_params['tty'] is not None: # noqa: E501
+ query_params.append(('tty', local_var_params['tty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/attach', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_namespaced_pod_exec(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_exec # noqa: E501
+
+ connect GET requests to exec of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_exec(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodExecOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str command: Command is the remote command to execute. argv array. Not executed within a shell.
+ :param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Redirect the standard error stream of the pod for this call.
+ :param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Redirect the standard output stream of the pod for this call.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_get_namespaced_pod_exec_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_exec # noqa: E501
+
+ connect GET requests to exec of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_exec_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodExecOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str command: Command is the remote command to execute. argv array. Not executed within a shell.
+ :param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Redirect the standard error stream of the pod for this call.
+ :param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Redirect the standard output stream of the pod for this call.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'command',
+ 'container',
+ 'stderr',
+ 'stdin',
+ 'stdout',
+ 'tty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_namespaced_pod_exec" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_namespaced_pod_exec`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_get_namespaced_pod_exec`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'command' in local_var_params and local_var_params['command'] is not None: # noqa: E501
+ query_params.append(('command', local_var_params['command'])) # noqa: E501
+ if 'container' in local_var_params and local_var_params['container'] is not None: # noqa: E501
+ query_params.append(('container', local_var_params['container'])) # noqa: E501
+ if 'stderr' in local_var_params and local_var_params['stderr'] is not None: # noqa: E501
+ query_params.append(('stderr', local_var_params['stderr'])) # noqa: E501
+ if 'stdin' in local_var_params and local_var_params['stdin'] is not None: # noqa: E501
+ query_params.append(('stdin', local_var_params['stdin'])) # noqa: E501
+ if 'stdout' in local_var_params and local_var_params['stdout'] is not None: # noqa: E501
+ query_params.append(('stdout', local_var_params['stdout'])) # noqa: E501
+ if 'tty' in local_var_params and local_var_params['tty'] is not None: # noqa: E501
+ query_params.append(('tty', local_var_params['tty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/exec', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_namespaced_pod_portforward(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_portforward # noqa: E501
+
+ connect GET requests to portforward of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_portforward(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodPortForwardOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param int ports: List of ports to forward Required when using WebSockets
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_namespaced_pod_portforward_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_get_namespaced_pod_portforward_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_portforward # noqa: E501
+
+ connect GET requests to portforward of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_portforward_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodPortForwardOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param int ports: List of ports to forward Required when using WebSockets
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'ports'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_namespaced_pod_portforward" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_namespaced_pod_portforward`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_get_namespaced_pod_portforward`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'ports' in local_var_params and local_var_params['ports'] is not None: # noqa: E501
+ query_params.append(('ports', local_var_params['ports'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/portforward', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_proxy # noqa: E501
+
+ connect GET requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_get_namespaced_pod_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_proxy # noqa: E501
+
+ connect GET requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_namespaced_pod_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_namespaced_pod_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_get_namespaced_pod_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect GET requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_get_namespaced_pod_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_get_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect GET requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_namespaced_pod_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_get_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_get_namespaced_pod_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_service_proxy # noqa: E501
+
+ connect GET requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_service_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_get_namespaced_service_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_get_namespaced_service_proxy # noqa: E501
+
+ connect GET requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_service_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_namespaced_service_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_namespaced_service_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_get_namespaced_service_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_get_namespaced_service_proxy_with_path # noqa: E501
+
+ connect GET requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_get_namespaced_service_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_get_namespaced_service_proxy_with_path # noqa: E501
+
+ connect GET requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_namespaced_service_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_get_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_get_namespaced_service_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_node_proxy(self, name, **kwargs): # noqa: E501
+ """connect_get_node_proxy # noqa: E501
+
+ connect GET requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_node_proxy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_node_proxy_with_http_info(name, **kwargs) # noqa: E501
+
+ def connect_get_node_proxy_with_http_info(self, name, **kwargs): # noqa: E501
+ """connect_get_node_proxy # noqa: E501
+
+ connect GET requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_node_proxy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_node_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_node_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_get_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501
+ """connect_get_node_proxy_with_path # noqa: E501
+
+ connect GET requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_node_proxy_with_path(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_get_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501
+
+ def connect_get_node_proxy_with_path_with_http_info(self, name, path, **kwargs): # noqa: E501
+ """connect_get_node_proxy_with_path # noqa: E501
+
+ connect GET requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_get_node_proxy_with_path_with_http_info(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_get_node_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_get_node_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_get_node_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy/{path}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_head_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_head_namespaced_pod_proxy # noqa: E501
+
+ connect HEAD requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_pod_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_head_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_head_namespaced_pod_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_head_namespaced_pod_proxy # noqa: E501
+
+ connect HEAD requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_pod_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_head_namespaced_pod_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_head_namespaced_pod_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_head_namespaced_pod_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy', 'HEAD',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_head_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_head_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect HEAD requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_head_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_head_namespaced_pod_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_head_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect HEAD requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_head_namespaced_pod_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_head_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_head_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_head_namespaced_pod_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}', 'HEAD',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_head_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_head_namespaced_service_proxy # noqa: E501
+
+ connect HEAD requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_service_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_head_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_head_namespaced_service_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_head_namespaced_service_proxy # noqa: E501
+
+ connect HEAD requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_service_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_head_namespaced_service_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_head_namespaced_service_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_head_namespaced_service_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy', 'HEAD',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_head_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_head_namespaced_service_proxy_with_path # noqa: E501
+
+ connect HEAD requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_head_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_head_namespaced_service_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_head_namespaced_service_proxy_with_path # noqa: E501
+
+ connect HEAD requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_head_namespaced_service_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_head_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_head_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_head_namespaced_service_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}', 'HEAD',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_head_node_proxy(self, name, **kwargs): # noqa: E501
+ """connect_head_node_proxy # noqa: E501
+
+ connect HEAD requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_node_proxy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_head_node_proxy_with_http_info(name, **kwargs) # noqa: E501
+
+ def connect_head_node_proxy_with_http_info(self, name, **kwargs): # noqa: E501
+ """connect_head_node_proxy # noqa: E501
+
+ connect HEAD requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_node_proxy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_head_node_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_head_node_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy', 'HEAD',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_head_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501
+ """connect_head_node_proxy_with_path # noqa: E501
+
+ connect HEAD requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_node_proxy_with_path(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_head_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501
+
+ def connect_head_node_proxy_with_path_with_http_info(self, name, path, **kwargs): # noqa: E501
+ """connect_head_node_proxy_with_path # noqa: E501
+
+ connect HEAD requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_head_node_proxy_with_path_with_http_info(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_head_node_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_head_node_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_head_node_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy/{path}', 'HEAD',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_options_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_options_namespaced_pod_proxy # noqa: E501
+
+ connect OPTIONS requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_pod_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_options_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_options_namespaced_pod_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_options_namespaced_pod_proxy # noqa: E501
+
+ connect OPTIONS requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_pod_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_options_namespaced_pod_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_options_namespaced_pod_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_options_namespaced_pod_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy', 'OPTIONS',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_options_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_options_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect OPTIONS requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_options_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_options_namespaced_pod_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_options_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect OPTIONS requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_options_namespaced_pod_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_options_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_options_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_options_namespaced_pod_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}', 'OPTIONS',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_options_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_options_namespaced_service_proxy # noqa: E501
+
+ connect OPTIONS requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_service_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_options_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_options_namespaced_service_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_options_namespaced_service_proxy # noqa: E501
+
+ connect OPTIONS requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_service_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_options_namespaced_service_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_options_namespaced_service_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_options_namespaced_service_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy', 'OPTIONS',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_options_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_options_namespaced_service_proxy_with_path # noqa: E501
+
+ connect OPTIONS requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_options_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_options_namespaced_service_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_options_namespaced_service_proxy_with_path # noqa: E501
+
+ connect OPTIONS requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_options_namespaced_service_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_options_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_options_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_options_namespaced_service_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}', 'OPTIONS',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_options_node_proxy(self, name, **kwargs): # noqa: E501
+ """connect_options_node_proxy # noqa: E501
+
+ connect OPTIONS requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_node_proxy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_options_node_proxy_with_http_info(name, **kwargs) # noqa: E501
+
+ def connect_options_node_proxy_with_http_info(self, name, **kwargs): # noqa: E501
+ """connect_options_node_proxy # noqa: E501
+
+ connect OPTIONS requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_node_proxy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_options_node_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_options_node_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy', 'OPTIONS',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_options_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501
+ """connect_options_node_proxy_with_path # noqa: E501
+
+ connect OPTIONS requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_node_proxy_with_path(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_options_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501
+
+ def connect_options_node_proxy_with_path_with_http_info(self, name, path, **kwargs): # noqa: E501
+ """connect_options_node_proxy_with_path # noqa: E501
+
+ connect OPTIONS requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_options_node_proxy_with_path_with_http_info(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_options_node_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_options_node_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_options_node_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy/{path}', 'OPTIONS',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_patch_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_patch_namespaced_pod_proxy # noqa: E501
+
+ connect PATCH requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_pod_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_patch_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_patch_namespaced_pod_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_patch_namespaced_pod_proxy # noqa: E501
+
+ connect PATCH requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_pod_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_patch_namespaced_pod_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_patch_namespaced_pod_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_patch_namespaced_pod_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_patch_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_patch_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect PATCH requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_patch_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_patch_namespaced_pod_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_patch_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect PATCH requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_patch_namespaced_pod_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_patch_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_patch_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_patch_namespaced_pod_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_patch_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_patch_namespaced_service_proxy # noqa: E501
+
+ connect PATCH requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_service_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_patch_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_patch_namespaced_service_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_patch_namespaced_service_proxy # noqa: E501
+
+ connect PATCH requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_service_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_patch_namespaced_service_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_patch_namespaced_service_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_patch_namespaced_service_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_patch_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_patch_namespaced_service_proxy_with_path # noqa: E501
+
+ connect PATCH requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_patch_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_patch_namespaced_service_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_patch_namespaced_service_proxy_with_path # noqa: E501
+
+ connect PATCH requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_patch_namespaced_service_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_patch_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_patch_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_patch_namespaced_service_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_patch_node_proxy(self, name, **kwargs): # noqa: E501
+ """connect_patch_node_proxy # noqa: E501
+
+ connect PATCH requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_node_proxy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_patch_node_proxy_with_http_info(name, **kwargs) # noqa: E501
+
+ def connect_patch_node_proxy_with_http_info(self, name, **kwargs): # noqa: E501
+ """connect_patch_node_proxy # noqa: E501
+
+ connect PATCH requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_node_proxy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_patch_node_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_patch_node_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_patch_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501
+ """connect_patch_node_proxy_with_path # noqa: E501
+
+ connect PATCH requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_node_proxy_with_path(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_patch_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501
+
+ def connect_patch_node_proxy_with_path_with_http_info(self, name, path, **kwargs): # noqa: E501
+ """connect_patch_node_proxy_with_path # noqa: E501
+
+ connect PATCH requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_patch_node_proxy_with_path_with_http_info(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_patch_node_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_patch_node_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_patch_node_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy/{path}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_namespaced_pod_attach(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_attach # noqa: E501
+
+ connect POST requests to attach of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_attach(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodAttachOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
+ :param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_post_namespaced_pod_attach_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_attach # noqa: E501
+
+ connect POST requests to attach of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_attach_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodAttachOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
+ :param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'container',
+ 'stderr',
+ 'stdin',
+ 'stdout',
+ 'tty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_namespaced_pod_attach" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_namespaced_pod_attach`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_pod_attach`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'container' in local_var_params and local_var_params['container'] is not None: # noqa: E501
+ query_params.append(('container', local_var_params['container'])) # noqa: E501
+ if 'stderr' in local_var_params and local_var_params['stderr'] is not None: # noqa: E501
+ query_params.append(('stderr', local_var_params['stderr'])) # noqa: E501
+ if 'stdin' in local_var_params and local_var_params['stdin'] is not None: # noqa: E501
+ query_params.append(('stdin', local_var_params['stdin'])) # noqa: E501
+ if 'stdout' in local_var_params and local_var_params['stdout'] is not None: # noqa: E501
+ query_params.append(('stdout', local_var_params['stdout'])) # noqa: E501
+ if 'tty' in local_var_params and local_var_params['tty'] is not None: # noqa: E501
+ query_params.append(('tty', local_var_params['tty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/attach', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_namespaced_pod_exec(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_exec # noqa: E501
+
+ connect POST requests to exec of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_exec(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodExecOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str command: Command is the remote command to execute. argv array. Not executed within a shell.
+ :param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Redirect the standard error stream of the pod for this call.
+ :param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Redirect the standard output stream of the pod for this call.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_post_namespaced_pod_exec_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_exec # noqa: E501
+
+ connect POST requests to exec of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_exec_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodExecOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str command: Command is the remote command to execute. argv array. Not executed within a shell.
+ :param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.
+ :param bool stderr: Redirect the standard error stream of the pod for this call.
+ :param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.
+ :param bool stdout: Redirect the standard output stream of the pod for this call.
+ :param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'command',
+ 'container',
+ 'stderr',
+ 'stdin',
+ 'stdout',
+ 'tty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_namespaced_pod_exec" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_namespaced_pod_exec`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_pod_exec`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'command' in local_var_params and local_var_params['command'] is not None: # noqa: E501
+ query_params.append(('command', local_var_params['command'])) # noqa: E501
+ if 'container' in local_var_params and local_var_params['container'] is not None: # noqa: E501
+ query_params.append(('container', local_var_params['container'])) # noqa: E501
+ if 'stderr' in local_var_params and local_var_params['stderr'] is not None: # noqa: E501
+ query_params.append(('stderr', local_var_params['stderr'])) # noqa: E501
+ if 'stdin' in local_var_params and local_var_params['stdin'] is not None: # noqa: E501
+ query_params.append(('stdin', local_var_params['stdin'])) # noqa: E501
+ if 'stdout' in local_var_params and local_var_params['stdout'] is not None: # noqa: E501
+ query_params.append(('stdout', local_var_params['stdout'])) # noqa: E501
+ if 'tty' in local_var_params and local_var_params['tty'] is not None: # noqa: E501
+ query_params.append(('tty', local_var_params['tty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/exec', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_namespaced_pod_portforward(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_portforward # noqa: E501
+
+ connect POST requests to portforward of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_portforward(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodPortForwardOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param int ports: List of ports to forward Required when using WebSockets
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_namespaced_pod_portforward_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_post_namespaced_pod_portforward_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_portforward # noqa: E501
+
+ connect POST requests to portforward of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_portforward_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodPortForwardOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param int ports: List of ports to forward Required when using WebSockets
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'ports'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_namespaced_pod_portforward" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_namespaced_pod_portforward`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_pod_portforward`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'ports' in local_var_params and local_var_params['ports'] is not None: # noqa: E501
+ query_params.append(('ports', local_var_params['ports'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/portforward', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_proxy # noqa: E501
+
+ connect POST requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_post_namespaced_pod_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_proxy # noqa: E501
+
+ connect POST requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_namespaced_pod_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_namespaced_pod_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_pod_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect POST requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_post_namespaced_pod_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_post_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect POST requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_namespaced_pod_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_post_namespaced_pod_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_service_proxy # noqa: E501
+
+ connect POST requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_service_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_post_namespaced_service_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_post_namespaced_service_proxy # noqa: E501
+
+ connect POST requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_service_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_namespaced_service_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_namespaced_service_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_service_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_post_namespaced_service_proxy_with_path # noqa: E501
+
+ connect POST requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_post_namespaced_service_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_post_namespaced_service_proxy_with_path # noqa: E501
+
+ connect POST requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_namespaced_service_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_post_namespaced_service_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_node_proxy(self, name, **kwargs): # noqa: E501
+ """connect_post_node_proxy # noqa: E501
+
+ connect POST requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_node_proxy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_node_proxy_with_http_info(name, **kwargs) # noqa: E501
+
+ def connect_post_node_proxy_with_http_info(self, name, **kwargs): # noqa: E501
+ """connect_post_node_proxy # noqa: E501
+
+ connect POST requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_node_proxy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_node_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_node_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_post_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501
+ """connect_post_node_proxy_with_path # noqa: E501
+
+ connect POST requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_node_proxy_with_path(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_post_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501
+
+ def connect_post_node_proxy_with_path_with_http_info(self, name, path, **kwargs): # noqa: E501
+ """connect_post_node_proxy_with_path # noqa: E501
+
+ connect POST requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_post_node_proxy_with_path_with_http_info(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_post_node_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_post_node_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_post_node_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy/{path}', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_put_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_put_namespaced_pod_proxy # noqa: E501
+
+ connect PUT requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_pod_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_put_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_put_namespaced_pod_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_put_namespaced_pod_proxy # noqa: E501
+
+ connect PUT requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_pod_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_put_namespaced_pod_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_put_namespaced_pod_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_put_namespaced_pod_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_put_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_put_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect PUT requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_put_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_put_namespaced_pod_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_put_namespaced_pod_proxy_with_path # noqa: E501
+
+ connect PUT requests to proxy of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to pod.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_put_namespaced_pod_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_put_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_put_namespaced_pod_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_put_namespaced_pod_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_put_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
+ """connect_put_namespaced_service_proxy # noqa: E501
+
+ connect PUT requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_service_proxy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_put_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def connect_put_namespaced_service_proxy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """connect_put_namespaced_service_proxy # noqa: E501
+
+ connect PUT requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_service_proxy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_put_namespaced_service_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_put_namespaced_service_proxy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_put_namespaced_service_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_put_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_put_namespaced_service_proxy_with_path # noqa: E501
+
+ connect PUT requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_put_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
+
+ def connect_put_namespaced_service_proxy_with_path_with_http_info(self, name, namespace, path, **kwargs): # noqa: E501
+ """connect_put_namespaced_service_proxy_with_path # noqa: E501
+
+ connect PUT requests to proxy of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceProxyOptions (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_put_namespaced_service_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_put_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `connect_put_namespaced_service_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_put_namespaced_service_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_put_node_proxy(self, name, **kwargs): # noqa: E501
+ """connect_put_node_proxy # noqa: E501
+
+ connect PUT requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_node_proxy(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_put_node_proxy_with_http_info(name, **kwargs) # noqa: E501
+
+ def connect_put_node_proxy_with_http_info(self, name, **kwargs): # noqa: E501
+ """connect_put_node_proxy # noqa: E501
+
+ connect PUT requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_node_proxy_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_put_node_proxy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_put_node_proxy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def connect_put_node_proxy_with_path(self, name, path, **kwargs): # noqa: E501
+ """connect_put_node_proxy_with_path # noqa: E501
+
+ connect PUT requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_node_proxy_with_path(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.connect_put_node_proxy_with_path_with_http_info(name, path, **kwargs) # noqa: E501
+
+ def connect_put_node_proxy_with_path_with_http_info(self, name, path, **kwargs): # noqa: E501
+ """connect_put_node_proxy_with_path # noqa: E501
+
+ connect PUT requests to proxy of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.connect_put_node_proxy_with_path_with_http_info(name, path, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NodeProxyOptions (required)
+ :param str path: path to the resource (required)
+ :param str path2: Path is the URL path to use for the current proxy request to node.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'path',
+ 'path2'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method connect_put_node_proxy_with_path" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `connect_put_node_proxy_with_path`") # noqa: E501
+ # verify the required parameter 'path' is set
+ if self.api_client.client_side_validation and ('path' not in local_var_params or # noqa: E501
+ local_var_params['path'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `path` when calling `connect_put_node_proxy_with_path`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'path' in local_var_params:
+ path_params['path'] = local_var_params['path'] # noqa: E501
+
+ query_params = []
+ if 'path2' in local_var_params and local_var_params['path2'] is not None: # noqa: E501
+ query_params.append(('path', local_var_params['path2'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['*/*']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/proxy/{path}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespace(self, body, **kwargs): # noqa: E501
+ """create_namespace # noqa: E501
+
+ create a Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespace(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1Namespace body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespace_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_namespace_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_namespace # noqa: E501
+
+ create a Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespace_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1Namespace body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespace" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespace`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_binding(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_binding # noqa: E501
+
+ create a Binding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_binding(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Binding body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Binding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_binding_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_binding_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_binding # noqa: E501
+
+ create a Binding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_binding_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Binding body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Binding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/bindings', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Binding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_config_map(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_config_map # noqa: E501
+
+ create a ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_config_map(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ConfigMap body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ConfigMap
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_config_map_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_config_map_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_config_map # noqa: E501
+
+ create a ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_config_map_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ConfigMap body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ConfigMap, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_config_map" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_config_map`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_config_map`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/configmaps', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ConfigMap', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_endpoints(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_endpoints # noqa: E501
+
+ create Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_endpoints(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Endpoints body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Endpoints
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_endpoints_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_endpoints_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_endpoints # noqa: E501
+
+ create Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_endpoints_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Endpoints body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Endpoints, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_endpoints" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_endpoints`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_endpoints`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/endpoints', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Endpoints', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_event(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_event # noqa: E501
+
+ create an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_event(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param CoreV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: CoreV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_event_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_event_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_event # noqa: E501
+
+ create an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_event_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param CoreV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(CoreV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_event`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/events', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='CoreV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_limit_range(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_limit_range # noqa: E501
+
+ create a LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_limit_range(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1LimitRange body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LimitRange
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_limit_range_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_limit_range_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_limit_range # noqa: E501
+
+ create a LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_limit_range_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1LimitRange body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LimitRange, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_limit_range" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_limit_range`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_limit_range`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/limitranges', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LimitRange', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_persistent_volume_claim(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_persistent_volume_claim # noqa: E501
+
+ create a PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_persistent_volume_claim(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PersistentVolumeClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_persistent_volume_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_persistent_volume_claim_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_persistent_volume_claim # noqa: E501
+
+ create a PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_persistent_volume_claim_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PersistentVolumeClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_persistent_volume_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_persistent_volume_claim`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_persistent_volume_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_pod(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod # noqa: E501
+
+ create a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_pod_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_pod_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod # noqa: E501
+
+ create a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_pod" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_pod`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_pod`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_pod_binding(self, name, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_binding # noqa: E501
+
+ create binding of a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_binding(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Binding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Binding body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Binding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_pod_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_pod_binding_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_binding # noqa: E501
+
+ create binding of a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_binding_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Binding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Binding body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Binding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_pod_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `create_namespaced_pod_binding`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_pod_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_pod_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/binding', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Binding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_pod_eviction(self, name, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_eviction # noqa: E501
+
+ create eviction of a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_eviction(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Eviction (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Eviction body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Eviction
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_pod_eviction_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_pod_eviction_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_eviction # noqa: E501
+
+ create eviction of a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_eviction_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Eviction (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Eviction body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Eviction, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_pod_eviction" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `create_namespaced_pod_eviction`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_pod_eviction`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_pod_eviction`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/eviction', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Eviction', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_pod_template(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_template # noqa: E501
+
+ create a PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_template(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_pod_template_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_pod_template_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_template # noqa: E501
+
+ create a PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_template_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_pod_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_pod_template`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_pod_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/podtemplates', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_replication_controller(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_replication_controller # noqa: E501
+
+ create a ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_replication_controller(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicationController body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationController
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_replication_controller_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_replication_controller_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_replication_controller # noqa: E501
+
+ create a ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_replication_controller_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicationController body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationController, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_replication_controller" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_replication_controller`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_replication_controller`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationController', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_resource_quota(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_resource_quota # noqa: E501
+
+ create a ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ResourceQuota body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_resource_quota_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_resource_quota # noqa: E501
+
+ create a ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_resource_quota_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ResourceQuota body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_resource_quota" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_quota`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_quota`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_secret(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_secret # noqa: E501
+
+ create a Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_secret(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Secret body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Secret
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_secret_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_secret_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_secret # noqa: E501
+
+ create a Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_secret_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Secret body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Secret, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_secret" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_secret`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_secret`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/secrets', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Secret', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_service(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_service # noqa: E501
+
+ create a Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_service(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Service body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_service_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_service_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_service # noqa: E501
+
+ create a Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_service_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Service body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_service`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_service_account(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_service_account # noqa: E501
+
+ create a ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_service_account(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ServiceAccount body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceAccount
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_service_account_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_service_account_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_service_account # noqa: E501
+
+ create a ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_service_account_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ServiceAccount body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceAccount, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_service_account" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_service_account`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_service_account`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceAccount', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_service_account_token(self, name, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_service_account_token # noqa: E501
+
+ create token of a ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_service_account_token(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the TokenRequest (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param AuthenticationV1TokenRequest body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: AuthenticationV1TokenRequest
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_service_account_token_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_service_account_token_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_service_account_token # noqa: E501
+
+ create token of a ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_service_account_token_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the TokenRequest (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param AuthenticationV1TokenRequest body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(AuthenticationV1TokenRequest, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_service_account_token" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `create_namespaced_service_account_token`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_service_account_token`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_service_account_token`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts/{name}/token', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='AuthenticationV1TokenRequest', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_node(self, body, **kwargs): # noqa: E501
+ """create_node # noqa: E501
+
+ create a Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_node(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1Node body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Node
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_node_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_node_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_node # noqa: E501
+
+ create a Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_node_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1Node body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Node, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Node', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_persistent_volume(self, body, **kwargs): # noqa: E501
+ """create_persistent_volume # noqa: E501
+
+ create a PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_persistent_volume(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1PersistentVolume body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_persistent_volume_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_persistent_volume_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_persistent_volume # noqa: E501
+
+ create a PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_persistent_volume_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1PersistentVolume body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_persistent_volume" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_persistent_volume`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_config_map(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_config_map # noqa: E501
+
+ delete collection of ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_config_map(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_config_map_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_config_map_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_config_map # noqa: E501
+
+ delete collection of ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_config_map_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_config_map" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_config_map`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/configmaps', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_endpoints(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_endpoints # noqa: E501
+
+ delete collection of Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_endpoints(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_endpoints_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_endpoints_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_endpoints # noqa: E501
+
+ delete collection of Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_endpoints_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_endpoints" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_endpoints`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/endpoints', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_event(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_event # noqa: E501
+
+ delete collection of Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_event(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_event_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_event_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_event # noqa: E501
+
+ delete collection of Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_event_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/events', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_limit_range(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_limit_range # noqa: E501
+
+ delete collection of LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_limit_range(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_limit_range_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_limit_range_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_limit_range # noqa: E501
+
+ delete collection of LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_limit_range_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_limit_range" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_limit_range`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/limitranges', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_persistent_volume_claim(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_persistent_volume_claim # noqa: E501
+
+ delete collection of PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_persistent_volume_claim(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_persistent_volume_claim_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_persistent_volume_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_persistent_volume_claim # noqa: E501
+
+ delete collection of PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_persistent_volume_claim_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_persistent_volume_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_persistent_volume_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_pod(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod # noqa: E501
+
+ delete collection of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_pod_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_pod_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod # noqa: E501
+
+ delete collection of Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_pod" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_pod`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_pod_template(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod_template # noqa: E501
+
+ delete collection of PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod_template(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_pod_template_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_pod_template_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod_template # noqa: E501
+
+ delete collection of PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod_template_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_pod_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_pod_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/podtemplates', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_replication_controller(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_replication_controller # noqa: E501
+
+ delete collection of ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_replication_controller(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_replication_controller_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_replication_controller_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_replication_controller # noqa: E501
+
+ delete collection of ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_replication_controller_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_replication_controller" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_replication_controller`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_resource_quota(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_resource_quota # noqa: E501
+
+ delete collection of ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_resource_quota(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_resource_quota_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_resource_quota_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_resource_quota # noqa: E501
+
+ delete collection of ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_resource_quota_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_resource_quota" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_quota`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_secret(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_secret # noqa: E501
+
+ delete collection of Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_secret(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_secret_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_secret_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_secret # noqa: E501
+
+ delete collection of Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_secret_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_secret" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_secret`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/secrets', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_service(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_service # noqa: E501
+
+ delete collection of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_service(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_service_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_service_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_service # noqa: E501
+
+ delete collection of Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_service_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_service_account(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_service_account # noqa: E501
+
+ delete collection of ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_service_account(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_service_account_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_service_account_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_service_account # noqa: E501
+
+ delete collection of ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_service_account_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_service_account" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_service_account`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_node(self, **kwargs): # noqa: E501
+ """delete_collection_node # noqa: E501
+
+ delete collection of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_node(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_node_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_node_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_node # noqa: E501
+
+ delete collection of Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_node_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_persistent_volume(self, **kwargs): # noqa: E501
+ """delete_collection_persistent_volume # noqa: E501
+
+ delete collection of PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_persistent_volume(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_persistent_volume_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_persistent_volume_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_persistent_volume # noqa: E501
+
+ delete collection of PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_persistent_volume_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_persistent_volume" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespace(self, name, **kwargs): # noqa: E501
+ """delete_namespace # noqa: E501
+
+ delete a Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespace(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespace_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_namespace_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_namespace # noqa: E501
+
+ delete a Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespace_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespace" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespace`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_config_map(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_config_map # noqa: E501
+
+ delete a ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_config_map(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_config_map_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_config_map_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_config_map # noqa: E501
+
+ delete a ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_config_map_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_config_map" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_config_map`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_config_map`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/configmaps/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_endpoints(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_endpoints # noqa: E501
+
+ delete Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_endpoints(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_endpoints_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_endpoints_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_endpoints # noqa: E501
+
+ delete Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_endpoints_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_endpoints" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_endpoints`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_endpoints`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/endpoints/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_event # noqa: E501
+
+ delete an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_event(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_event_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_event # noqa: E501
+
+ delete an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_event_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/events/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_limit_range(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_limit_range # noqa: E501
+
+ delete a LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_limit_range(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_limit_range_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_limit_range_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_limit_range # noqa: E501
+
+ delete a LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_limit_range_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_limit_range" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_limit_range`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_limit_range`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/limitranges/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_persistent_volume_claim(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_persistent_volume_claim # noqa: E501
+
+ delete a PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_persistent_volume_claim(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_persistent_volume_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_persistent_volume_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_persistent_volume_claim # noqa: E501
+
+ delete a PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_persistent_volume_claim_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_persistent_volume_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_persistent_volume_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_persistent_volume_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_pod(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod # noqa: E501
+
+ delete a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_pod_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod # noqa: E501
+
+ delete a Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_pod" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_pod`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_pod`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_pod_template(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod_template # noqa: E501
+
+ delete a PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod_template(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_pod_template_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_pod_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod_template # noqa: E501
+
+ delete a PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod_template_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_pod_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_pod_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_pod_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/podtemplates/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_replication_controller(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_replication_controller # noqa: E501
+
+ delete a ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_replication_controller(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_replication_controller_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_replication_controller_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_replication_controller # noqa: E501
+
+ delete a ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_replication_controller_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_replication_controller" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_replication_controller`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_replication_controller`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_resource_quota(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_resource_quota # noqa: E501
+
+ delete a ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_resource_quota(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_resource_quota_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_resource_quota_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_resource_quota # noqa: E501
+
+ delete a ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_resource_quota_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_resource_quota" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_quota`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_quota`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_secret(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_secret # noqa: E501
+
+ delete a Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_secret(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_secret_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_secret_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_secret # noqa: E501
+
+ delete a Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_secret_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_secret" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_secret`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_secret`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/secrets/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_service(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_service # noqa: E501
+
+ delete a Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_service(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_service_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_service # noqa: E501
+
+ delete a Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_service_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_service`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_service_account(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_service_account # noqa: E501
+
+ delete a ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_service_account(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceAccount
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_service_account_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_service_account_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_service_account # noqa: E501
+
+ delete a ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_service_account_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceAccount, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_service_account" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_service_account`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_service_account`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceAccount', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_node(self, name, **kwargs): # noqa: E501
+ """delete_node # noqa: E501
+
+ delete a Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_node(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_node_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_node_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_node # noqa: E501
+
+ delete a Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_node_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_persistent_volume(self, name, **kwargs): # noqa: E501
+ """delete_persistent_volume # noqa: E501
+
+ delete a PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_persistent_volume(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_persistent_volume_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_persistent_volume_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_persistent_volume # noqa: E501
+
+ delete a PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_persistent_volume_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_persistent_volume" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_persistent_volume`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_component_status(self, **kwargs): # noqa: E501
+ """list_component_status # noqa: E501
+
+ list objects of kind ComponentStatus # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_component_status(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ComponentStatusList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_component_status_with_http_info(**kwargs) # noqa: E501
+
+ def list_component_status_with_http_info(self, **kwargs): # noqa: E501
+ """list_component_status # noqa: E501
+
+ list objects of kind ComponentStatus # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_component_status_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ComponentStatusList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_component_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/componentstatuses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ComponentStatusList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_config_map_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_config_map_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_config_map_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ConfigMapList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_config_map_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_config_map_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_config_map_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_config_map_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ConfigMapList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_config_map_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/configmaps', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ConfigMapList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_endpoints_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_endpoints_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_endpoints_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointsList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_endpoints_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_endpoints_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_endpoints_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_endpoints_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointsList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_endpoints_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/endpoints', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointsList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_event_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_event_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_event_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: CoreV1EventList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_event_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_event_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_event_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_event_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(CoreV1EventList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_event_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/events', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='CoreV1EventList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_limit_range_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_limit_range_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_limit_range_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LimitRangeList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_limit_range_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_limit_range_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_limit_range_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_limit_range_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LimitRangeList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_limit_range_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/limitranges', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LimitRangeList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespace(self, **kwargs): # noqa: E501
+ """list_namespace # noqa: E501
+
+ list or watch objects of kind Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespace(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NamespaceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespace_with_http_info(**kwargs) # noqa: E501
+
+ def list_namespace_with_http_info(self, **kwargs): # noqa: E501
+ """list_namespace # noqa: E501
+
+ list or watch objects of kind Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespace_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NamespaceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespace" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NamespaceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_config_map(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_config_map # noqa: E501
+
+ list or watch objects of kind ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_config_map(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ConfigMapList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_config_map_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_config_map_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_config_map # noqa: E501
+
+ list or watch objects of kind ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_config_map_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ConfigMapList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_config_map" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_config_map`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/configmaps', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ConfigMapList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_endpoints(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_endpoints # noqa: E501
+
+ list or watch objects of kind Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_endpoints(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointsList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_endpoints_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_endpoints_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_endpoints # noqa: E501
+
+ list or watch objects of kind Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_endpoints_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointsList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_endpoints" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_endpoints`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/endpoints', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointsList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_event(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_event # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_event(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: CoreV1EventList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_event_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_event_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_event # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_event_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(CoreV1EventList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/events', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='CoreV1EventList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_limit_range(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_limit_range # noqa: E501
+
+ list or watch objects of kind LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_limit_range(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LimitRangeList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_limit_range_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_limit_range_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_limit_range # noqa: E501
+
+ list or watch objects of kind LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_limit_range_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LimitRangeList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_limit_range" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_limit_range`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/limitranges', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LimitRangeList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_persistent_volume_claim(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_persistent_volume_claim # noqa: E501
+
+ list or watch objects of kind PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_persistent_volume_claim(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaimList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_persistent_volume_claim_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_persistent_volume_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_persistent_volume_claim # noqa: E501
+
+ list or watch objects of kind PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_persistent_volume_claim_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaimList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_persistent_volume_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_persistent_volume_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaimList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_pod(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod # noqa: E501
+
+ list or watch objects of kind Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_pod_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_pod_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod # noqa: E501
+
+ list or watch objects of kind Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_pod" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_pod`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_pod_template(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod_template # noqa: E501
+
+ list or watch objects of kind PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod_template(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodTemplateList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_pod_template_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_pod_template_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod_template # noqa: E501
+
+ list or watch objects of kind PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod_template_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodTemplateList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_pod_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_pod_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/podtemplates', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodTemplateList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_replication_controller(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_replication_controller # noqa: E501
+
+ list or watch objects of kind ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_replication_controller(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationControllerList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_replication_controller_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_replication_controller_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_replication_controller # noqa: E501
+
+ list or watch objects of kind ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_replication_controller_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationControllerList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_replication_controller" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_replication_controller`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationControllerList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_resource_quota(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_resource_quota # noqa: E501
+
+ list or watch objects of kind ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_resource_quota(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuotaList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_resource_quota_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_resource_quota # noqa: E501
+
+ list or watch objects of kind ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_resource_quota_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuotaList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_resource_quota" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_quota`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuotaList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_secret(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_secret # noqa: E501
+
+ list or watch objects of kind Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_secret(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1SecretList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_secret_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_secret_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_secret # noqa: E501
+
+ list or watch objects of kind Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_secret_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1SecretList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_secret" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_secret`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/secrets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1SecretList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_service(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_service # noqa: E501
+
+ list or watch objects of kind Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_service(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_service_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_service_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_service # noqa: E501
+
+ list or watch objects of kind Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_service_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_service_account(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_service_account # noqa: E501
+
+ list or watch objects of kind ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_service_account(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceAccountList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_service_account_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_service_account_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_service_account # noqa: E501
+
+ list or watch objects of kind ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_service_account_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceAccountList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_service_account" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_service_account`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceAccountList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_node(self, **kwargs): # noqa: E501
+ """list_node # noqa: E501
+
+ list or watch objects of kind Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_node(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NodeList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_node_with_http_info(**kwargs) # noqa: E501
+
+ def list_node_with_http_info(self, **kwargs): # noqa: E501
+ """list_node # noqa: E501
+
+ list or watch objects of kind Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_node_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NodeList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NodeList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_persistent_volume(self, **kwargs): # noqa: E501
+ """list_persistent_volume # noqa: E501
+
+ list or watch objects of kind PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_persistent_volume(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_persistent_volume_with_http_info(**kwargs) # noqa: E501
+
+ def list_persistent_volume_with_http_info(self, **kwargs): # noqa: E501
+ """list_persistent_volume # noqa: E501
+
+ list or watch objects of kind PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_persistent_volume_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_persistent_volume" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_persistent_volume_claim_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_persistent_volume_claim_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_persistent_volume_claim_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaimList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_persistent_volume_claim_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_persistent_volume_claim_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_persistent_volume_claim_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_persistent_volume_claim_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaimList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_persistent_volume_claim_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumeclaims', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaimList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_pod_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_pod_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_pod_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_pod_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_pod_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_pod_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/pods', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_pod_template_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_pod_template_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_template_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodTemplateList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_pod_template_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_pod_template_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_pod_template_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_template_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodTemplateList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_pod_template_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/podtemplates', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodTemplateList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_replication_controller_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_replication_controller_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_replication_controller_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationControllerList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_replication_controller_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_replication_controller_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_replication_controller_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_replication_controller_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationControllerList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_replication_controller_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/replicationcontrollers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationControllerList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_resource_quota_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_resource_quota_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_quota_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuotaList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_resource_quota_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_resource_quota_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_resource_quota_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_quota_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuotaList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_resource_quota_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/resourcequotas', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuotaList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_secret_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_secret_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_secret_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1SecretList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_secret_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_secret_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_secret_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_secret_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1SecretList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_secret_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/secrets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1SecretList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_service_account_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_service_account_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_service_account_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceAccountList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_service_account_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_service_account_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_service_account_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_service_account_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceAccountList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_service_account_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/serviceaccounts', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceAccountList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_service_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_service_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_service_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_service_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_service_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_service_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_service_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_service_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/services', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespace(self, name, body, **kwargs): # noqa: E501
+ """patch_namespace # noqa: E501
+
+ partially update the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespace(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespace_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_namespace_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_namespace # noqa: E501
+
+ partially update the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespace_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespace" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespace`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespace`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespace_status(self, name, body, **kwargs): # noqa: E501
+ """patch_namespace_status # noqa: E501
+
+ partially update status of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespace_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespace_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_namespace_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_namespace_status # noqa: E501
+
+ partially update status of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespace_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespace_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespace_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespace_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_config_map(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_config_map # noqa: E501
+
+ partially update the specified ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_config_map(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ConfigMap
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_config_map_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_config_map # noqa: E501
+
+ partially update the specified ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_config_map_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ConfigMap, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_config_map" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_config_map`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_config_map`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_config_map`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/configmaps/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ConfigMap', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_endpoints(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_endpoints # noqa: E501
+
+ partially update the specified Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Endpoints
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_endpoints_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_endpoints # noqa: E501
+
+ partially update the specified Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_endpoints_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Endpoints, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_endpoints" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_endpoints`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_endpoints`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_endpoints`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/endpoints/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Endpoints', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_event # noqa: E501
+
+ partially update the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_event(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: CoreV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_event_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_event # noqa: E501
+
+ partially update the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_event_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(CoreV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_event`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/events/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='CoreV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_limit_range(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_limit_range # noqa: E501
+
+ partially update the specified LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_limit_range(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LimitRange
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_limit_range_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_limit_range_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_limit_range # noqa: E501
+
+ partially update the specified LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_limit_range_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LimitRange, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_limit_range" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_limit_range`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_limit_range`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_limit_range`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/limitranges/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LimitRange', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_persistent_volume_claim(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_persistent_volume_claim # noqa: E501
+
+ partially update the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_persistent_volume_claim(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_persistent_volume_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_persistent_volume_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_persistent_volume_claim # noqa: E501
+
+ partially update the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_persistent_volume_claim_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_persistent_volume_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_persistent_volume_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_persistent_volume_claim`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_persistent_volume_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_persistent_volume_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_persistent_volume_claim_status # noqa: E501
+
+ partially update status of the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_persistent_volume_claim_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_persistent_volume_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_persistent_volume_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_persistent_volume_claim_status # noqa: E501
+
+ partially update status of the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_persistent_volume_claim_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_persistent_volume_claim_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_persistent_volume_claim_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_persistent_volume_claim_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_persistent_volume_claim_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod # noqa: E501
+
+ partially update the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod # noqa: E501
+
+ partially update the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod_ephemeralcontainers(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_ephemeralcontainers # noqa: E501
+
+ partially update ephemeralcontainers of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_ephemeralcontainers(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_ephemeralcontainers_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_ephemeralcontainers_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_ephemeralcontainers # noqa: E501
+
+ partially update ephemeralcontainers of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_ephemeralcontainers_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod_ephemeralcontainers" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_ephemeralcontainers`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_ephemeralcontainers`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_ephemeralcontainers`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_status # noqa: E501
+
+ partially update status of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_status # noqa: E501
+
+ partially update status of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod_template(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_template # noqa: E501
+
+ partially update the specified PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_template(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_template # noqa: E501
+
+ partially update the specified PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_template_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_template`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/podtemplates/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_replication_controller(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replication_controller # noqa: E501
+
+ partially update the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replication_controller(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationController
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_replication_controller_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_replication_controller_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replication_controller # noqa: E501
+
+ partially update the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replication_controller_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationController, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_replication_controller" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_replication_controller`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replication_controller`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_replication_controller`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationController', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_replication_controller_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replication_controller_scale # noqa: E501
+
+ partially update scale of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replication_controller_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_replication_controller_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_replication_controller_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replication_controller_scale # noqa: E501
+
+ partially update scale of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replication_controller_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_replication_controller_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_replication_controller_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replication_controller_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_replication_controller_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scale', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_replication_controller_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replication_controller_status # noqa: E501
+
+ partially update status of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replication_controller_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationController
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_replication_controller_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_replication_controller_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_replication_controller_status # noqa: E501
+
+ partially update status of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_replication_controller_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationController, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_replication_controller_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_replication_controller_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_replication_controller_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_replication_controller_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationController', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_resource_quota(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_quota # noqa: E501
+
+ partially update the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_quota(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_resource_quota_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_resource_quota_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_quota # noqa: E501
+
+ partially update the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_quota_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_resource_quota" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_quota`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_quota`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_quota`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_resource_quota_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_quota_status # noqa: E501
+
+ partially update status of the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_quota_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_resource_quota_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_resource_quota_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_quota_status # noqa: E501
+
+ partially update status of the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_quota_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_resource_quota_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_quota_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_quota_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_quota_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_secret(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_secret # noqa: E501
+
+ partially update the specified Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_secret(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Secret
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_secret_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_secret_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_secret # noqa: E501
+
+ partially update the specified Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_secret_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Secret, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_secret" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_secret`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_secret`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_secret`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/secrets/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Secret', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_service(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_service # noqa: E501
+
+ partially update the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_service(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_service_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_service # noqa: E501
+
+ partially update the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_service_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_service`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_service`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_service_account(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_service_account # noqa: E501
+
+ partially update the specified ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_service_account(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceAccount
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_service_account_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_service_account # noqa: E501
+
+ partially update the specified ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_service_account_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceAccount, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_service_account" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_service_account`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_service_account`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_service_account`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceAccount', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_service_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_service_status # noqa: E501
+
+ partially update status of the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_service_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_service_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_service_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_service_status # noqa: E501
+
+ partially update status of the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_service_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_service_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_service_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_service_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_service_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_node(self, name, body, **kwargs): # noqa: E501
+ """patch_node # noqa: E501
+
+ partially update the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_node(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Node
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_node_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_node_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_node # noqa: E501
+
+ partially update the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_node_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Node, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_node`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Node', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_node_status(self, name, body, **kwargs): # noqa: E501
+ """patch_node_status # noqa: E501
+
+ partially update status of the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_node_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Node
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_node_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_node_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_node_status # noqa: E501
+
+ partially update status of the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_node_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Node, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_node_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_node_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_node_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Node', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_persistent_volume(self, name, body, **kwargs): # noqa: E501
+ """patch_persistent_volume # noqa: E501
+
+ partially update the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_persistent_volume(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_persistent_volume_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_persistent_volume_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_persistent_volume # noqa: E501
+
+ partially update the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_persistent_volume_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_persistent_volume" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_persistent_volume`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_persistent_volume`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_persistent_volume_status(self, name, body, **kwargs): # noqa: E501
+ """patch_persistent_volume_status # noqa: E501
+
+ partially update status of the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_persistent_volume_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_persistent_volume_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_persistent_volume_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_persistent_volume_status # noqa: E501
+
+ partially update status of the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_persistent_volume_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_persistent_volume_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_persistent_volume_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_persistent_volume_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_component_status(self, name, **kwargs): # noqa: E501
+ """read_component_status # noqa: E501
+
+ read the specified ComponentStatus # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_component_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ComponentStatus (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ComponentStatus
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_component_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_component_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_component_status # noqa: E501
+
+ read the specified ComponentStatus # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_component_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ComponentStatus (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ComponentStatus, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_component_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_component_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/componentstatuses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ComponentStatus', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespace(self, name, **kwargs): # noqa: E501
+ """read_namespace # noqa: E501
+
+ read the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespace(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespace_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_namespace_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_namespace # noqa: E501
+
+ read the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespace_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespace" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespace`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespace_status(self, name, **kwargs): # noqa: E501
+ """read_namespace_status # noqa: E501
+
+ read status of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespace_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespace_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_namespace_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_namespace_status # noqa: E501
+
+ read status of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespace_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespace_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespace_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_config_map(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_config_map # noqa: E501
+
+ read the specified ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_config_map(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ConfigMap
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_config_map_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_config_map_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_config_map # noqa: E501
+
+ read the specified ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_config_map_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ConfigMap, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_config_map" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_config_map`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_config_map`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/configmaps/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ConfigMap', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_endpoints(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_endpoints # noqa: E501
+
+ read the specified Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_endpoints(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Endpoints
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_endpoints_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_endpoints_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_endpoints # noqa: E501
+
+ read the specified Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_endpoints_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Endpoints, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_endpoints" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_endpoints`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_endpoints`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/endpoints/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Endpoints', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_event # noqa: E501
+
+ read the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_event(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: CoreV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_event_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_event # noqa: E501
+
+ read the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_event_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(CoreV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/events/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='CoreV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_limit_range(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_limit_range # noqa: E501
+
+ read the specified LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_limit_range(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LimitRange
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_limit_range_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_limit_range_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_limit_range # noqa: E501
+
+ read the specified LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_limit_range_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LimitRange, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_limit_range" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_limit_range`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_limit_range`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/limitranges/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LimitRange', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_persistent_volume_claim(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_persistent_volume_claim # noqa: E501
+
+ read the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_persistent_volume_claim(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_persistent_volume_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_persistent_volume_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_persistent_volume_claim # noqa: E501
+
+ read the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_persistent_volume_claim_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_persistent_volume_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_persistent_volume_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_persistent_volume_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_persistent_volume_claim_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_persistent_volume_claim_status # noqa: E501
+
+ read status of the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_persistent_volume_claim_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_persistent_volume_claim_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_persistent_volume_claim_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_persistent_volume_claim_status # noqa: E501
+
+ read status of the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_persistent_volume_claim_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_persistent_volume_claim_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_persistent_volume_claim_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_persistent_volume_claim_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod # noqa: E501
+
+ read the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod # noqa: E501
+
+ read the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_ephemeralcontainers(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_ephemeralcontainers # noqa: E501
+
+ read ephemeralcontainers of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_ephemeralcontainers(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_ephemeralcontainers_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_ephemeralcontainers_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_ephemeralcontainers # noqa: E501
+
+ read ephemeralcontainers of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_ephemeralcontainers_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_ephemeralcontainers" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_ephemeralcontainers`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_ephemeralcontainers`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_log(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_log # noqa: E501
+
+ read log of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_log(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str container: The container for which to stream logs. Defaults to only container if there is one container in the pod.
+ :param bool follow: Follow the log stream of the pod. Defaults to false.
+ :param bool insecure_skip_tls_verify_backend: insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).
+ :param int limit_bytes: If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool previous: Return previous terminated container logs. Defaults to false.
+ :param int since_seconds: A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.
+ :param int tail_lines: If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime
+ :param bool timestamps: If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_log_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_log_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_log # noqa: E501
+
+ read log of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_log_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str container: The container for which to stream logs. Defaults to only container if there is one container in the pod.
+ :param bool follow: Follow the log stream of the pod. Defaults to false.
+ :param bool insecure_skip_tls_verify_backend: insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).
+ :param int limit_bytes: If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool previous: Return previous terminated container logs. Defaults to false.
+ :param int since_seconds: A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.
+ :param int tail_lines: If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime
+ :param bool timestamps: If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'container',
+ 'follow',
+ 'insecure_skip_tls_verify_backend',
+ 'limit_bytes',
+ 'pretty',
+ 'previous',
+ 'since_seconds',
+ 'tail_lines',
+ 'timestamps'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_log" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_log`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_log`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'container' in local_var_params and local_var_params['container'] is not None: # noqa: E501
+ query_params.append(('container', local_var_params['container'])) # noqa: E501
+ if 'follow' in local_var_params and local_var_params['follow'] is not None: # noqa: E501
+ query_params.append(('follow', local_var_params['follow'])) # noqa: E501
+ if 'insecure_skip_tls_verify_backend' in local_var_params and local_var_params['insecure_skip_tls_verify_backend'] is not None: # noqa: E501
+ query_params.append(('insecureSkipTLSVerifyBackend', local_var_params['insecure_skip_tls_verify_backend'])) # noqa: E501
+ if 'limit_bytes' in local_var_params and local_var_params['limit_bytes'] is not None: # noqa: E501
+ query_params.append(('limitBytes', local_var_params['limit_bytes'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'previous' in local_var_params and local_var_params['previous'] is not None: # noqa: E501
+ query_params.append(('previous', local_var_params['previous'])) # noqa: E501
+ if 'since_seconds' in local_var_params and local_var_params['since_seconds'] is not None: # noqa: E501
+ query_params.append(('sinceSeconds', local_var_params['since_seconds'])) # noqa: E501
+ if 'tail_lines' in local_var_params and local_var_params['tail_lines'] is not None: # noqa: E501
+ query_params.append(('tailLines', local_var_params['tail_lines'])) # noqa: E501
+ if 'timestamps' in local_var_params and local_var_params['timestamps'] is not None: # noqa: E501
+ query_params.append(('timestamps', local_var_params['timestamps'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['text/plain', 'application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/log', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_status # noqa: E501
+
+ read status of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_status # noqa: E501
+
+ read status of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_template(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_template # noqa: E501
+
+ read the specified PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_template(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_template_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_template # noqa: E501
+
+ read the specified PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_template_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/podtemplates/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_replication_controller(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replication_controller # noqa: E501
+
+ read the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replication_controller(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationController
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_replication_controller_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_replication_controller_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replication_controller # noqa: E501
+
+ read the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replication_controller_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationController, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_replication_controller" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_replication_controller`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_replication_controller`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationController', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_replication_controller_scale(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replication_controller_scale # noqa: E501
+
+ read scale of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replication_controller_scale(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_replication_controller_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_replication_controller_scale_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replication_controller_scale # noqa: E501
+
+ read scale of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replication_controller_scale_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_replication_controller_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_replication_controller_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_replication_controller_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scale', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_replication_controller_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replication_controller_status # noqa: E501
+
+ read status of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replication_controller_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationController
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_replication_controller_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_replication_controller_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_replication_controller_status # noqa: E501
+
+ read status of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_replication_controller_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationController, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_replication_controller_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_replication_controller_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_replication_controller_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationController', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_resource_quota(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_quota # noqa: E501
+
+ read the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_quota(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_resource_quota_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_resource_quota_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_quota # noqa: E501
+
+ read the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_quota_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_resource_quota" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_quota`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_quota`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_resource_quota_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_quota_status # noqa: E501
+
+ read status of the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_quota_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_resource_quota_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_resource_quota_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_quota_status # noqa: E501
+
+ read status of the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_quota_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_resource_quota_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_quota_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_quota_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_secret(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_secret # noqa: E501
+
+ read the specified Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_secret(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Secret
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_secret_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_secret_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_secret # noqa: E501
+
+ read the specified Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_secret_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Secret, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_secret" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_secret`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_secret`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/secrets/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Secret', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_service(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_service # noqa: E501
+
+ read the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_service(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_service_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_service # noqa: E501
+
+ read the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_service_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_service`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_service_account(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_service_account # noqa: E501
+
+ read the specified ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_service_account(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceAccount
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_service_account_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_service_account_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_service_account # noqa: E501
+
+ read the specified ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_service_account_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceAccount, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_service_account" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_service_account`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_service_account`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceAccount', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_service_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_service_status # noqa: E501
+
+ read status of the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_service_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_service_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_service_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_service_status # noqa: E501
+
+ read status of the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_service_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_service_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_service_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_service_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_node(self, name, **kwargs): # noqa: E501
+ """read_node # noqa: E501
+
+ read the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_node(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Node
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_node_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_node_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_node # noqa: E501
+
+ read the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_node_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Node, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Node', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_node_status(self, name, **kwargs): # noqa: E501
+ """read_node_status # noqa: E501
+
+ read status of the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_node_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Node
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_node_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_node_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_node_status # noqa: E501
+
+ read status of the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_node_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Node, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_node_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_node_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Node', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_persistent_volume(self, name, **kwargs): # noqa: E501
+ """read_persistent_volume # noqa: E501
+
+ read the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_persistent_volume(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_persistent_volume_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_persistent_volume_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_persistent_volume # noqa: E501
+
+ read the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_persistent_volume_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_persistent_volume" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_persistent_volume`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_persistent_volume_status(self, name, **kwargs): # noqa: E501
+ """read_persistent_volume_status # noqa: E501
+
+ read status of the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_persistent_volume_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_persistent_volume_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_persistent_volume_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_persistent_volume_status # noqa: E501
+
+ read status of the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_persistent_volume_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_persistent_volume_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_persistent_volume_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespace(self, name, body, **kwargs): # noqa: E501
+ """replace_namespace # noqa: E501
+
+ replace the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespace(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param V1Namespace body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespace_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_namespace_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_namespace # noqa: E501
+
+ replace the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespace_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param V1Namespace body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespace" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespace`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespace`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespace_finalize(self, name, body, **kwargs): # noqa: E501
+ """replace_namespace_finalize # noqa: E501
+
+ replace finalize of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespace_finalize(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param V1Namespace body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespace_finalize_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_namespace_finalize_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_namespace_finalize # noqa: E501
+
+ replace finalize of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespace_finalize_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param V1Namespace body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespace_finalize" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespace_finalize`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespace_finalize`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}/finalize', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespace_status(self, name, body, **kwargs): # noqa: E501
+ """replace_namespace_status # noqa: E501
+
+ replace status of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespace_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param V1Namespace body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Namespace
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespace_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_namespace_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_namespace_status # noqa: E501
+
+ replace status of the specified Namespace # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespace_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Namespace (required)
+ :param V1Namespace body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Namespace, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespace_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespace_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespace_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Namespace', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_config_map(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_config_map # noqa: E501
+
+ replace the specified ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_config_map(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ConfigMap body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ConfigMap
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_config_map_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_config_map # noqa: E501
+
+ replace the specified ConfigMap # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_config_map_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ConfigMap (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ConfigMap body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ConfigMap, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_config_map" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_config_map`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_config_map`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_config_map`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/configmaps/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ConfigMap', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_endpoints(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_endpoints # noqa: E501
+
+ replace the specified Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_endpoints(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Endpoints body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Endpoints
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_endpoints_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_endpoints # noqa: E501
+
+ replace the specified Endpoints # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_endpoints_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Endpoints (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Endpoints body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Endpoints, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_endpoints" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_endpoints`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_endpoints`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_endpoints`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/endpoints/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Endpoints', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_event # noqa: E501
+
+ replace the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_event(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param CoreV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: CoreV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_event_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_event # noqa: E501
+
+ replace the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_event_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param CoreV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(CoreV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_event`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/events/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='CoreV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_limit_range(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_limit_range # noqa: E501
+
+ replace the specified LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_limit_range(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1LimitRange body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1LimitRange
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_limit_range_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_limit_range_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_limit_range # noqa: E501
+
+ replace the specified LimitRange # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_limit_range_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the LimitRange (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1LimitRange body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1LimitRange, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_limit_range" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_limit_range`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_limit_range`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_limit_range`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/limitranges/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1LimitRange', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_persistent_volume_claim(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_persistent_volume_claim # noqa: E501
+
+ replace the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_persistent_volume_claim(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PersistentVolumeClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_persistent_volume_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_persistent_volume_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_persistent_volume_claim # noqa: E501
+
+ replace the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_persistent_volume_claim_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PersistentVolumeClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_persistent_volume_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_persistent_volume_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_persistent_volume_claim`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_persistent_volume_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_persistent_volume_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_persistent_volume_claim_status # noqa: E501
+
+ replace status of the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_persistent_volume_claim_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PersistentVolumeClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolumeClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_persistent_volume_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_persistent_volume_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_persistent_volume_claim_status # noqa: E501
+
+ replace status of the specified PersistentVolumeClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_persistent_volume_claim_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolumeClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PersistentVolumeClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolumeClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_persistent_volume_claim_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_persistent_volume_claim_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_persistent_volume_claim_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_persistent_volume_claim_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolumeClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod # noqa: E501
+
+ replace the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod # noqa: E501
+
+ replace the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod_ephemeralcontainers(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_ephemeralcontainers # noqa: E501
+
+ replace ephemeralcontainers of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_ephemeralcontainers(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_ephemeralcontainers_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_ephemeralcontainers_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_ephemeralcontainers # noqa: E501
+
+ replace ephemeralcontainers of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_ephemeralcontainers_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod_ephemeralcontainers" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_ephemeralcontainers`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_ephemeralcontainers`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_ephemeralcontainers`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/ephemeralcontainers', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_status # noqa: E501
+
+ replace status of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Pod
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_status # noqa: E501
+
+ replace status of the specified Pod # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Pod (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Pod body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Pod, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/pods/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Pod', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod_template(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_template # noqa: E501
+
+ replace the specified PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_template(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_template # noqa: E501
+
+ replace the specified PodTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_template_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_template`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/podtemplates/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_replication_controller(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replication_controller # noqa: E501
+
+ replace the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replication_controller(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicationController body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationController
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_replication_controller_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_replication_controller_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replication_controller # noqa: E501
+
+ replace the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replication_controller_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicationController body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationController, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_replication_controller" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_replication_controller`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replication_controller`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_replication_controller`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationController', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_replication_controller_scale(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replication_controller_scale # noqa: E501
+
+ replace scale of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replication_controller_scale(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Scale
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_replication_controller_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_replication_controller_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replication_controller_scale # noqa: E501
+
+ replace scale of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replication_controller_scale_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Scale (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Scale body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Scale, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_replication_controller_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_replication_controller_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replication_controller_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_replication_controller_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scale', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Scale', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_replication_controller_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replication_controller_status # noqa: E501
+
+ replace status of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replication_controller_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicationController body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ReplicationController
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_replication_controller_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_replication_controller_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_replication_controller_status # noqa: E501
+
+ replace status of the specified ReplicationController # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_replication_controller_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ReplicationController (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ReplicationController body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ReplicationController, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_replication_controller_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_replication_controller_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_replication_controller_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_replication_controller_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ReplicationController', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_resource_quota(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_quota # noqa: E501
+
+ replace the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_quota(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ResourceQuota body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_resource_quota_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_resource_quota_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_quota # noqa: E501
+
+ replace the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_quota_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ResourceQuota body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_resource_quota" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_quota`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_quota`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_quota`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_resource_quota_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_quota_status # noqa: E501
+
+ replace status of the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_quota_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ResourceQuota body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ResourceQuota
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_resource_quota_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_resource_quota_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_quota_status # noqa: E501
+
+ replace status of the specified ResourceQuota # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_quota_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceQuota (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ResourceQuota body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ResourceQuota, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_resource_quota_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_quota_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_quota_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_quota_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/resourcequotas/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ResourceQuota', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_secret(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_secret # noqa: E501
+
+ replace the specified Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_secret(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Secret body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Secret
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_secret_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_secret_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_secret # noqa: E501
+
+ replace the specified Secret # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_secret_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Secret (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Secret body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Secret, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_secret" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_secret`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_secret`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_secret`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/secrets/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Secret', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_service(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_service # noqa: E501
+
+ replace the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_service(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Service body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_service_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_service # noqa: E501
+
+ replace the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_service_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Service body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_service" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_service`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_service`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_service`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_service_account(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_service_account # noqa: E501
+
+ replace the specified ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_service_account(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ServiceAccount body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ServiceAccount
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_service_account_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_service_account # noqa: E501
+
+ replace the specified ServiceAccount # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_service_account_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ServiceAccount (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1ServiceAccount body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ServiceAccount, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_service_account" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_service_account`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_service_account`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_service_account`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/serviceaccounts/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ServiceAccount', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_service_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_service_status # noqa: E501
+
+ replace status of the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_service_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Service body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Service
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_service_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_service_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_service_status # noqa: E501
+
+ replace status of the specified Service # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_service_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Service (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Service body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Service, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_service_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_service_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_service_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_service_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/namespaces/{namespace}/services/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Service', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_node(self, name, body, **kwargs): # noqa: E501
+ """replace_node # noqa: E501
+
+ replace the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_node(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param V1Node body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Node
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_node_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_node_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_node # noqa: E501
+
+ replace the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_node_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param V1Node body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Node, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_node`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Node', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_node_status(self, name, body, **kwargs): # noqa: E501
+ """replace_node_status # noqa: E501
+
+ replace status of the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_node_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param V1Node body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Node
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_node_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_node_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_node_status # noqa: E501
+
+ replace status of the specified Node # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_node_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Node (required)
+ :param V1Node body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Node, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_node_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_node_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_node_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/nodes/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Node', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_persistent_volume(self, name, body, **kwargs): # noqa: E501
+ """replace_persistent_volume # noqa: E501
+
+ replace the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_persistent_volume(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param V1PersistentVolume body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_persistent_volume_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_persistent_volume_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_persistent_volume # noqa: E501
+
+ replace the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_persistent_volume_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param V1PersistentVolume body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_persistent_volume" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_persistent_volume`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_persistent_volume`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_persistent_volume_status(self, name, body, **kwargs): # noqa: E501
+ """replace_persistent_volume_status # noqa: E501
+
+ replace status of the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_persistent_volume_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param V1PersistentVolume body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PersistentVolume
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_persistent_volume_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_persistent_volume_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_persistent_volume_status # noqa: E501
+
+ replace status of the specified PersistentVolume # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_persistent_volume_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PersistentVolume (required)
+ :param V1PersistentVolume body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PersistentVolume, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_persistent_volume_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_persistent_volume_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_persistent_volume_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/api/v1/persistentvolumes/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PersistentVolume', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/custom_objects_api.py b/contrib/python/kubernetes/kubernetes/client/api/custom_objects_api.py
new file mode 100644
index 0000000000..5eb9f00170
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/custom_objects_api.py
@@ -0,0 +1,4429 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class CustomObjectsApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_cluster_custom_object(self, group, version, plural, body, **kwargs): # noqa: E501
+ """create_cluster_custom_object # noqa: E501
+
+ Creates a cluster scoped Custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_custom_object(group, version, plural, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param object body: The JSON schema of the Resource to create. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_cluster_custom_object_with_http_info(group, version, plural, body, **kwargs) # noqa: E501
+
+ def create_cluster_custom_object_with_http_info(self, group, version, plural, body, **kwargs): # noqa: E501
+ """create_cluster_custom_object # noqa: E501
+
+ Creates a cluster scoped Custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_custom_object_with_http_info(group, version, plural, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param object body: The JSON schema of the Resource to create. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_cluster_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `create_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `create_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `create_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_custom_object(self, group, version, namespace, plural, body, **kwargs): # noqa: E501
+ """create_namespaced_custom_object # noqa: E501
+
+ Creates a namespace scoped Custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_custom_object(group, version, namespace, plural, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param object body: The JSON schema of the Resource to create. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_custom_object_with_http_info(group, version, namespace, plural, body, **kwargs) # noqa: E501
+
+ def create_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, body, **kwargs): # noqa: E501
+ """create_namespaced_custom_object # noqa: E501
+
+ Creates a namespace scoped Custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_custom_object_with_http_info(group, version, namespace, plural, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param object body: The JSON schema of the Resource to create. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `create_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `create_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `create_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_cluster_custom_object(self, group, version, plural, name, **kwargs): # noqa: E501
+ """delete_cluster_custom_object # noqa: E501
+
+ Deletes the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_custom_object(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_cluster_custom_object_with_http_info(group, version, plural, name, **kwargs) # noqa: E501
+
+ def delete_cluster_custom_object_with_http_info(self, group, version, plural, name, **kwargs): # noqa: E501
+ """delete_cluster_custom_object # noqa: E501
+
+ Deletes the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_custom_object_with_http_info(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'dry_run',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_cluster_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `delete_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `delete_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `delete_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_cluster_custom_object(self, group, version, plural, **kwargs): # noqa: E501
+ """delete_collection_cluster_custom_object # noqa: E501
+
+ Delete collection of cluster scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_custom_object(group, version, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_cluster_custom_object_with_http_info(group, version, plural, **kwargs) # noqa: E501
+
+ def delete_collection_cluster_custom_object_with_http_info(self, group, version, plural, **kwargs): # noqa: E501
+ """delete_collection_cluster_custom_object # noqa: E501
+
+ Delete collection of cluster scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_custom_object_with_http_info(group, version, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'pretty',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'dry_run',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_cluster_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `delete_collection_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `delete_collection_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `delete_collection_cluster_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_custom_object(self, group, version, namespace, plural, **kwargs): # noqa: E501
+ """delete_collection_namespaced_custom_object # noqa: E501
+
+ Delete collection of namespace scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_custom_object(group, version, namespace, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_custom_object_with_http_info(group, version, namespace, plural, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, **kwargs): # noqa: E501
+ """delete_collection_namespaced_custom_object # noqa: E501
+
+ Delete collection of namespace scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_custom_object_with_http_info(group, version, namespace, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'pretty',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'dry_run',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `delete_collection_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `delete_collection_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `delete_collection_namespaced_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_custom_object(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """delete_namespaced_custom_object # noqa: E501
+
+ Deletes the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_custom_object(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
+
+ def delete_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """delete_namespaced_custom_object # noqa: E501
+
+ Deletes the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'dry_run',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `delete_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `delete_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `delete_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, group, version, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(group, version, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(group, version, **kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, group, version, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(group, version, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `get_api_resources`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `get_api_resources`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_cluster_custom_object(self, group, version, plural, name, **kwargs): # noqa: E501
+ """get_cluster_custom_object # noqa: E501
+
+ Returns a cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_cluster_custom_object(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_cluster_custom_object_with_http_info(group, version, plural, name, **kwargs) # noqa: E501
+
+ def get_cluster_custom_object_with_http_info(self, group, version, plural, name, **kwargs): # noqa: E501
+ """get_cluster_custom_object # noqa: E501
+
+ Returns a cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_cluster_custom_object_with_http_info(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_cluster_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `get_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `get_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `get_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `get_cluster_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_cluster_custom_object_scale(self, group, version, plural, name, **kwargs): # noqa: E501
+ """get_cluster_custom_object_scale # noqa: E501
+
+ read scale of the specified custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_cluster_custom_object_scale(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_cluster_custom_object_scale_with_http_info(group, version, plural, name, **kwargs) # noqa: E501
+
+ def get_cluster_custom_object_scale_with_http_info(self, group, version, plural, name, **kwargs): # noqa: E501
+ """get_cluster_custom_object_scale # noqa: E501
+
+ read scale of the specified custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_cluster_custom_object_scale_with_http_info(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_cluster_custom_object_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `get_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `get_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `get_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `get_cluster_custom_object_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}/scale', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_cluster_custom_object_status(self, group, version, plural, name, **kwargs): # noqa: E501
+ """get_cluster_custom_object_status # noqa: E501
+
+ read status of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_cluster_custom_object_status(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_cluster_custom_object_status_with_http_info(group, version, plural, name, **kwargs) # noqa: E501
+
+ def get_cluster_custom_object_status_with_http_info(self, group, version, plural, name, **kwargs): # noqa: E501
+ """get_cluster_custom_object_status # noqa: E501
+
+ read status of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_cluster_custom_object_status_with_http_info(group, version, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_cluster_custom_object_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `get_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `get_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `get_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `get_cluster_custom_object_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_namespaced_custom_object(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """get_namespaced_custom_object # noqa: E501
+
+ Returns a namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_namespaced_custom_object(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
+
+ def get_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """get_namespaced_custom_object # noqa: E501
+
+ Returns a namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_namespaced_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `get_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `get_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `get_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `get_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `get_namespaced_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_namespaced_custom_object_scale(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """get_namespaced_custom_object_scale # noqa: E501
+
+ read scale of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_namespaced_custom_object_scale(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
+
+ def get_namespaced_custom_object_scale_with_http_info(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """get_namespaced_custom_object_scale # noqa: E501
+
+ read scale of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_namespaced_custom_object_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `get_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `get_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `get_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `get_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `get_namespaced_custom_object_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}/scale', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_namespaced_custom_object_status(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """get_namespaced_custom_object_status # noqa: E501
+
+ read status of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_namespaced_custom_object_status(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
+
+ def get_namespaced_custom_object_status_with_http_info(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
+ """get_namespaced_custom_object_status # noqa: E501
+
+ read status of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_namespaced_custom_object_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `get_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `get_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `get_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `get_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `get_namespaced_custom_object_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_cluster_custom_object(self, group, version, plural, **kwargs): # noqa: E501
+ """list_cluster_custom_object # noqa: E501
+
+ list or watch cluster scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_custom_object(group, version, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_cluster_custom_object_with_http_info(group, version, plural, **kwargs) # noqa: E501
+
+ def list_cluster_custom_object_with_http_info(self, group, version, plural, **kwargs): # noqa: E501
+ """list_cluster_custom_object # noqa: E501
+
+ list or watch cluster scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_custom_object_with_http_info(group, version, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_cluster_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `list_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `list_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `list_cluster_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/json;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_custom_object(self, group, version, namespace, plural, **kwargs): # noqa: E501
+ """list_namespaced_custom_object # noqa: E501
+
+ list or watch namespace scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_custom_object(group, version, namespace, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_custom_object_with_http_info(group, version, namespace, plural, **kwargs) # noqa: E501
+
+ def list_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, **kwargs): # noqa: E501
+ """list_namespaced_custom_object # noqa: E501
+
+ list or watch namespace scoped custom objects # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_custom_object_with_http_info(group, version, namespace, plural, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: The custom resource's group name (required)
+ :param str version: The custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: The custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `list_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `list_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `list_namespaced_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/json;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_cluster_custom_object(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """patch_cluster_custom_object # noqa: E501
+
+ patch the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_custom_object(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to patch. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_cluster_custom_object_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
+
+ def patch_cluster_custom_object_with_http_info(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """patch_cluster_custom_object # noqa: E501
+
+ patch the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_custom_object_with_http_info(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to patch. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_cluster_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `patch_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `patch_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `patch_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/merge-patch+json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_cluster_custom_object_scale(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """patch_cluster_custom_object_scale # noqa: E501
+
+ partially update scale of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
+
+ def patch_cluster_custom_object_scale_with_http_info(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """patch_cluster_custom_object_scale # noqa: E501
+
+ partially update scale of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_cluster_custom_object_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `patch_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `patch_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `patch_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_custom_object_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/merge-patch+json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}/scale', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_cluster_custom_object_status(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """patch_cluster_custom_object_status # noqa: E501
+
+ partially update status of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
+
+ def patch_cluster_custom_object_status_with_http_info(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """patch_cluster_custom_object_status # noqa: E501
+
+ partially update status of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_cluster_custom_object_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `patch_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `patch_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `patch_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_custom_object_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/merge-patch+json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_custom_object(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """patch_namespaced_custom_object # noqa: E501
+
+ patch the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to patch. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """patch_namespaced_custom_object # noqa: E501
+
+ patch the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to patch. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `patch_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `patch_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `patch_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/merge-patch+json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_custom_object_scale(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """patch_namespaced_custom_object_scale # noqa: E501
+
+ partially update scale of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_custom_object_scale(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_custom_object_scale_with_http_info(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """patch_namespaced_custom_object_scale # noqa: E501
+
+ partially update scale of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_custom_object_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `patch_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `patch_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `patch_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_custom_object_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/merge-patch+json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}/scale', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_custom_object_status(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """patch_namespaced_custom_object_status # noqa: E501
+
+ partially update status of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_custom_object_status(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_custom_object_status_with_http_info(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """patch_namespaced_custom_object_status # noqa: E501
+
+ partially update status of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_custom_object_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `patch_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `patch_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `patch_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_custom_object_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/merge-patch+json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_cluster_custom_object(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """replace_cluster_custom_object # noqa: E501
+
+ replace the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_custom_object(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to replace. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_cluster_custom_object_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
+
+ def replace_cluster_custom_object_with_http_info(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """replace_cluster_custom_object # noqa: E501
+
+ replace the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_custom_object_with_http_info(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to replace. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_cluster_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `replace_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `replace_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `replace_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_custom_object`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_cluster_custom_object_scale(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """replace_cluster_custom_object_scale # noqa: E501
+
+ replace scale of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
+
+ def replace_cluster_custom_object_scale_with_http_info(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """replace_cluster_custom_object_scale # noqa: E501
+
+ replace scale of the specified cluster scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_cluster_custom_object_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `replace_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `replace_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `replace_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_custom_object_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}/scale', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_cluster_custom_object_status(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """replace_cluster_custom_object_status # noqa: E501
+
+ replace status of the cluster scoped specified custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_custom_object_status(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
+
+ def replace_cluster_custom_object_status_with_http_info(self, group, version, plural, name, body, **kwargs): # noqa: E501
+ """replace_cluster_custom_object_status # noqa: E501
+
+ replace status of the cluster scoped specified custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_custom_object_status_with_http_info(group, version, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_cluster_custom_object_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `replace_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `replace_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `replace_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_custom_object_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_custom_object_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/{plural}/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_custom_object(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """replace_namespaced_custom_object # noqa: E501
+
+ replace the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to replace. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_custom_object_with_http_info(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """replace_namespaced_custom_object # noqa: E501
+
+ replace the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: The JSON schema of the Resource to replace. (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_custom_object" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `replace_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `replace_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `replace_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_custom_object`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_custom_object`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_custom_object_scale(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """replace_namespaced_custom_object_scale # noqa: E501
+
+ replace scale of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_custom_object_scale(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_custom_object_scale_with_http_info(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """replace_namespaced_custom_object_scale # noqa: E501
+
+ replace scale of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_custom_object_scale" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `replace_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `replace_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `replace_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_custom_object_scale`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_custom_object_scale`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}/scale', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_custom_object_status(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """replace_namespaced_custom_object_status # noqa: E501
+
+ replace status of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_custom_object_status(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: object
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_custom_object_status_with_http_info(self, group, version, namespace, plural, name, body, **kwargs): # noqa: E501
+ """replace_namespaced_custom_object_status # noqa: E501
+
+ replace status of the specified namespace scoped custom object # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str group: the custom resource's group (required)
+ :param str version: the custom resource's version (required)
+ :param str namespace: The custom resource's namespace (required)
+ :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
+ :param str name: the custom object's name (required)
+ :param object body: (required)
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(object, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'group',
+ 'version',
+ 'namespace',
+ 'plural',
+ 'name',
+ 'body',
+ 'dry_run',
+ 'field_manager'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_custom_object_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'group' is set
+ if self.api_client.client_side_validation and ('group' not in local_var_params or # noqa: E501
+ local_var_params['group'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `group` when calling `replace_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'version' is set
+ if self.api_client.client_side_validation and ('version' not in local_var_params or # noqa: E501
+ local_var_params['version'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `version` when calling `replace_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'plural' is set
+ if self.api_client.client_side_validation and ('plural' not in local_var_params or # noqa: E501
+ local_var_params['plural'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `plural` when calling `replace_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_custom_object_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_custom_object_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'group' in local_var_params:
+ path_params['group'] = local_var_params['group'] # noqa: E501
+ if 'version' in local_var_params:
+ path_params['version'] = local_var_params['version'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+ if 'plural' in local_var_params:
+ path_params['plural'] = local_var_params['plural'] # noqa: E501
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/{group}/{version}/namespaces/{namespace}/{plural}/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='object', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/discovery_api.py b/contrib/python/kubernetes/kubernetes/client/api/discovery_api.py
new file mode 100644
index 0000000000..02f34f7161
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/discovery_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class DiscoveryApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/discovery_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/discovery_v1_api.py
new file mode 100644
index 0000000000..3cc0a0df81
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/discovery_v1_api.py
@@ -0,0 +1,1392 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class DiscoveryV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_endpoint_slice(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_endpoint_slice # noqa: E501
+
+ create an EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_endpoint_slice(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1EndpointSlice body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointSlice
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_endpoint_slice_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_endpoint_slice_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_endpoint_slice # noqa: E501
+
+ create an EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_endpoint_slice_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1EndpointSlice body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_endpoint_slice" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_endpoint_slice`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_endpoint_slice`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointSlice', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_endpoint_slice(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_endpoint_slice # noqa: E501
+
+ delete collection of EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_endpoint_slice(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_endpoint_slice_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_endpoint_slice_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_endpoint_slice # noqa: E501
+
+ delete collection of EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_endpoint_slice_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_endpoint_slice" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_endpoint_slice`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_endpoint_slice(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_endpoint_slice # noqa: E501
+
+ delete an EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_endpoint_slice(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_endpoint_slice_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_endpoint_slice_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_endpoint_slice # noqa: E501
+
+ delete an EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_endpoint_slice_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_endpoint_slice" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_endpoint_slice`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_endpoint_slice`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_endpoint_slice_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_endpoint_slice_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_endpoint_slice_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointSliceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_endpoint_slice_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_endpoint_slice_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_endpoint_slice_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_endpoint_slice_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointSliceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_endpoint_slice_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/endpointslices', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointSliceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_endpoint_slice(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_endpoint_slice # noqa: E501
+
+ list or watch objects of kind EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_endpoint_slice(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointSliceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_endpoint_slice_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_endpoint_slice_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_endpoint_slice # noqa: E501
+
+ list or watch objects of kind EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_endpoint_slice_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointSliceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_endpoint_slice" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_endpoint_slice`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointSliceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_endpoint_slice(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_endpoint_slice # noqa: E501
+
+ partially update the specified EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_endpoint_slice(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointSlice
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_endpoint_slice_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_endpoint_slice_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_endpoint_slice # noqa: E501
+
+ partially update the specified EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_endpoint_slice_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_endpoint_slice" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_endpoint_slice`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_endpoint_slice`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_endpoint_slice`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointSlice', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_endpoint_slice(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_endpoint_slice # noqa: E501
+
+ read the specified EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_endpoint_slice(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointSlice
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_endpoint_slice_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_endpoint_slice_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_endpoint_slice # noqa: E501
+
+ read the specified EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_endpoint_slice_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_endpoint_slice" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_endpoint_slice`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_endpoint_slice`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointSlice', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_endpoint_slice(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_endpoint_slice # noqa: E501
+
+ replace the specified EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_endpoint_slice(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1EndpointSlice body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1EndpointSlice
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_endpoint_slice_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_endpoint_slice_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_endpoint_slice # noqa: E501
+
+ replace the specified EndpointSlice # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_endpoint_slice_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the EndpointSlice (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1EndpointSlice body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_endpoint_slice" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_endpoint_slice`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_endpoint_slice`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_endpoint_slice`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1EndpointSlice', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/events_api.py b/contrib/python/kubernetes/kubernetes/client/api/events_api.py
new file mode 100644
index 0000000000..9a08eb9c07
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/events_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class EventsApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/events_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/events_v1_api.py
new file mode 100644
index 0000000000..94c411ed03
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/events_v1_api.py
@@ -0,0 +1,1392 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class EventsV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_event(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_event # noqa: E501
+
+ create an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_event(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param EventsV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: EventsV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_event_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_event_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_event # noqa: E501
+
+ create an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_event_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param EventsV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_event`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/namespaces/{namespace}/events', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='EventsV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_event(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_event # noqa: E501
+
+ delete collection of Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_event(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_event_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_event_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_event # noqa: E501
+
+ delete collection of Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_event_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/namespaces/{namespace}/events', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_event # noqa: E501
+
+ delete an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_event(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_event_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_event # noqa: E501
+
+ delete an Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_event_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_event_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_event_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_event_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: EventsV1EventList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_event_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_event_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_event_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_event_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(EventsV1EventList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_event_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/events', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='EventsV1EventList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_event(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_event # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_event(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: EventsV1EventList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_event_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_event_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_event # noqa: E501
+
+ list or watch objects of kind Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_event_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(EventsV1EventList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/namespaces/{namespace}/events', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='EventsV1EventList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_event # noqa: E501
+
+ partially update the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_event(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: EventsV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_event_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_event # noqa: E501
+
+ partially update the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_event_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_event`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='EventsV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_event # noqa: E501
+
+ read the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_event(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: EventsV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_event_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_event # noqa: E501
+
+ read the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_event_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='EventsV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_event # noqa: E501
+
+ replace the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_event(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param EventsV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: EventsV1Event
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_event_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_event # noqa: E501
+
+ replace the specified Event # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_event_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Event (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param EventsV1Event body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_event" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_event`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_event`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_event`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='EventsV1Event', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_api.py b/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_api.py
new file mode 100644
index 0000000000..f27340347d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class FlowcontrolApiserverApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta2_api.py b/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta2_api.py
new file mode 100644
index 0000000000..c81b1ecea7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta2_api.py
@@ -0,0 +1,3024 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class FlowcontrolApiserverV1beta2Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_flow_schema(self, body, **kwargs): # noqa: E501
+ """create_flow_schema # noqa: E501
+
+ create a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_flow_schema(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta2FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_flow_schema_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_flow_schema_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_flow_schema # noqa: E501
+
+ create a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_flow_schema_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta2FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_priority_level_configuration(self, body, **kwargs): # noqa: E501
+ """create_priority_level_configuration # noqa: E501
+
+ create a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_priority_level_configuration(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta2PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_priority_level_configuration_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_priority_level_configuration_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_priority_level_configuration # noqa: E501
+
+ create a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_priority_level_configuration_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta2PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_flow_schema(self, **kwargs): # noqa: E501
+ """delete_collection_flow_schema # noqa: E501
+
+ delete collection of FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_flow_schema(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_flow_schema_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_flow_schema_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_flow_schema # noqa: E501
+
+ delete collection of FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_flow_schema_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_priority_level_configuration(self, **kwargs): # noqa: E501
+ """delete_collection_priority_level_configuration # noqa: E501
+
+ delete collection of PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_priority_level_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_priority_level_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_priority_level_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_priority_level_configuration # noqa: E501
+
+ delete collection of PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_priority_level_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_flow_schema(self, name, **kwargs): # noqa: E501
+ """delete_flow_schema # noqa: E501
+
+ delete a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_flow_schema(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_flow_schema_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_flow_schema_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_flow_schema # noqa: E501
+
+ delete a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_flow_schema_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_priority_level_configuration(self, name, **kwargs): # noqa: E501
+ """delete_priority_level_configuration # noqa: E501
+
+ delete a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_priority_level_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_priority_level_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_priority_level_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_priority_level_configuration # noqa: E501
+
+ delete a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_priority_level_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_flow_schema(self, **kwargs): # noqa: E501
+ """list_flow_schema # noqa: E501
+
+ list or watch objects of kind FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_flow_schema(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchemaList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_flow_schema_with_http_info(**kwargs) # noqa: E501
+
+ def list_flow_schema_with_http_info(self, **kwargs): # noqa: E501
+ """list_flow_schema # noqa: E501
+
+ list or watch objects of kind FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_flow_schema_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchemaList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchemaList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_priority_level_configuration(self, **kwargs): # noqa: E501
+ """list_priority_level_configuration # noqa: E501
+
+ list or watch objects of kind PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_priority_level_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfigurationList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_priority_level_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def list_priority_level_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """list_priority_level_configuration # noqa: E501
+
+ list or watch objects of kind PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_priority_level_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfigurationList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfigurationList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_flow_schema(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema # noqa: E501
+
+ partially update the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_flow_schema_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_flow_schema_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema # noqa: E501
+
+ partially update the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_flow_schema`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_flow_schema_status(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema_status # noqa: E501
+
+ partially update status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_flow_schema_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_flow_schema_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema_status # noqa: E501
+
+ partially update status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_flow_schema_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_flow_schema_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_flow_schema_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_priority_level_configuration(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration # noqa: E501
+
+ partially update the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_priority_level_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_priority_level_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration # noqa: E501
+
+ partially update the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_priority_level_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_priority_level_configuration_status(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration_status # noqa: E501
+
+ partially update status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_priority_level_configuration_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_priority_level_configuration_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration_status # noqa: E501
+
+ partially update status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_priority_level_configuration_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_priority_level_configuration_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_priority_level_configuration_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_flow_schema(self, name, **kwargs): # noqa: E501
+ """read_flow_schema # noqa: E501
+
+ read the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_flow_schema_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_flow_schema_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_flow_schema # noqa: E501
+
+ read the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_flow_schema_status(self, name, **kwargs): # noqa: E501
+ """read_flow_schema_status # noqa: E501
+
+ read status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_flow_schema_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_flow_schema_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_flow_schema_status # noqa: E501
+
+ read status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_flow_schema_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_flow_schema_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_priority_level_configuration(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration # noqa: E501
+
+ read the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_priority_level_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_priority_level_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration # noqa: E501
+
+ read the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_priority_level_configuration_status(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration_status # noqa: E501
+
+ read status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_priority_level_configuration_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_priority_level_configuration_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration_status # noqa: E501
+
+ read status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_priority_level_configuration_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_priority_level_configuration_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_flow_schema(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema # noqa: E501
+
+ replace the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta2FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_flow_schema_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_flow_schema_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema # noqa: E501
+
+ replace the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta2FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_flow_schema`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_flow_schema_status(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema_status # noqa: E501
+
+ replace status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta2FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_flow_schema_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_flow_schema_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema_status # noqa: E501
+
+ replace status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta2FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_flow_schema_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_flow_schema_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_flow_schema_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/flowschemas/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_priority_level_configuration(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration # noqa: E501
+
+ replace the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta2PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_priority_level_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_priority_level_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration # noqa: E501
+
+ replace the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta2PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_priority_level_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_priority_level_configuration_status(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration_status # noqa: E501
+
+ replace status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta2PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta2PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_priority_level_configuration_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_priority_level_configuration_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration_status # noqa: E501
+
+ replace status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta2PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta2PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_priority_level_configuration_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_priority_level_configuration_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_priority_level_configuration_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta2/prioritylevelconfigurations/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta2PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta3_api.py b/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta3_api.py
new file mode 100644
index 0000000000..493e7eeb4f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/flowcontrol_apiserver_v1beta3_api.py
@@ -0,0 +1,3024 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class FlowcontrolApiserverV1beta3Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_flow_schema(self, body, **kwargs): # noqa: E501
+ """create_flow_schema # noqa: E501
+
+ create a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_flow_schema(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta3FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_flow_schema_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_flow_schema_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_flow_schema # noqa: E501
+
+ create a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_flow_schema_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta3FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_priority_level_configuration(self, body, **kwargs): # noqa: E501
+ """create_priority_level_configuration # noqa: E501
+
+ create a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_priority_level_configuration(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta3PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_priority_level_configuration_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_priority_level_configuration_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_priority_level_configuration # noqa: E501
+
+ create a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_priority_level_configuration_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1beta3PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_flow_schema(self, **kwargs): # noqa: E501
+ """delete_collection_flow_schema # noqa: E501
+
+ delete collection of FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_flow_schema(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_flow_schema_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_flow_schema_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_flow_schema # noqa: E501
+
+ delete collection of FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_flow_schema_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_priority_level_configuration(self, **kwargs): # noqa: E501
+ """delete_collection_priority_level_configuration # noqa: E501
+
+ delete collection of PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_priority_level_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_priority_level_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_priority_level_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_priority_level_configuration # noqa: E501
+
+ delete collection of PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_priority_level_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_flow_schema(self, name, **kwargs): # noqa: E501
+ """delete_flow_schema # noqa: E501
+
+ delete a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_flow_schema(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_flow_schema_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_flow_schema_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_flow_schema # noqa: E501
+
+ delete a FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_flow_schema_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_priority_level_configuration(self, name, **kwargs): # noqa: E501
+ """delete_priority_level_configuration # noqa: E501
+
+ delete a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_priority_level_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_priority_level_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_priority_level_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_priority_level_configuration # noqa: E501
+
+ delete a PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_priority_level_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_flow_schema(self, **kwargs): # noqa: E501
+ """list_flow_schema # noqa: E501
+
+ list or watch objects of kind FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_flow_schema(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchemaList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_flow_schema_with_http_info(**kwargs) # noqa: E501
+
+ def list_flow_schema_with_http_info(self, **kwargs): # noqa: E501
+ """list_flow_schema # noqa: E501
+
+ list or watch objects of kind FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_flow_schema_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchemaList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchemaList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_priority_level_configuration(self, **kwargs): # noqa: E501
+ """list_priority_level_configuration # noqa: E501
+
+ list or watch objects of kind PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_priority_level_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfigurationList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_priority_level_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def list_priority_level_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """list_priority_level_configuration # noqa: E501
+
+ list or watch objects of kind PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_priority_level_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfigurationList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfigurationList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_flow_schema(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema # noqa: E501
+
+ partially update the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_flow_schema_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_flow_schema_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema # noqa: E501
+
+ partially update the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_flow_schema`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_flow_schema_status(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema_status # noqa: E501
+
+ partially update status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_flow_schema_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_flow_schema_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_flow_schema_status # noqa: E501
+
+ partially update status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_flow_schema_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_flow_schema_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_flow_schema_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_flow_schema_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_priority_level_configuration(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration # noqa: E501
+
+ partially update the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_priority_level_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_priority_level_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration # noqa: E501
+
+ partially update the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_priority_level_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_priority_level_configuration_status(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration_status # noqa: E501
+
+ partially update status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_priority_level_configuration_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_priority_level_configuration_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_level_configuration_status # noqa: E501
+
+ partially update status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_level_configuration_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_priority_level_configuration_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_priority_level_configuration_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_priority_level_configuration_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_flow_schema(self, name, **kwargs): # noqa: E501
+ """read_flow_schema # noqa: E501
+
+ read the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_flow_schema_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_flow_schema_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_flow_schema # noqa: E501
+
+ read the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_flow_schema_status(self, name, **kwargs): # noqa: E501
+ """read_flow_schema_status # noqa: E501
+
+ read status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_flow_schema_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_flow_schema_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_flow_schema_status # noqa: E501
+
+ read status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_flow_schema_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_flow_schema_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_flow_schema_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_priority_level_configuration(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration # noqa: E501
+
+ read the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_priority_level_configuration_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_priority_level_configuration_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration # noqa: E501
+
+ read the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_priority_level_configuration_status(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration_status # noqa: E501
+
+ read status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_priority_level_configuration_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_priority_level_configuration_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_priority_level_configuration_status # noqa: E501
+
+ read status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_level_configuration_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_priority_level_configuration_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_priority_level_configuration_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_flow_schema(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema # noqa: E501
+
+ replace the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta3FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_flow_schema_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_flow_schema_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema # noqa: E501
+
+ replace the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta3FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_flow_schema" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_flow_schema`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_flow_schema`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_flow_schema_status(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema_status # noqa: E501
+
+ replace status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta3FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3FlowSchema
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_flow_schema_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_flow_schema_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_flow_schema_status # noqa: E501
+
+ replace status of the specified FlowSchema # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_flow_schema_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the FlowSchema (required)
+ :param V1beta3FlowSchema body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3FlowSchema, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_flow_schema_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_flow_schema_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_flow_schema_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/flowschemas/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3FlowSchema', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_priority_level_configuration(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration # noqa: E501
+
+ replace the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta3PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_priority_level_configuration_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_priority_level_configuration_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration # noqa: E501
+
+ replace the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta3PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_priority_level_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_priority_level_configuration`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_priority_level_configuration`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_priority_level_configuration_status(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration_status # noqa: E501
+
+ replace status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta3PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1beta3PriorityLevelConfiguration
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_priority_level_configuration_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_priority_level_configuration_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_level_configuration_status # noqa: E501
+
+ replace status of the specified PriorityLevelConfiguration # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_level_configuration_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityLevelConfiguration (required)
+ :param V1beta3PriorityLevelConfiguration body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1beta3PriorityLevelConfiguration, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_priority_level_configuration_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_priority_level_configuration_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_priority_level_configuration_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/flowcontrol.apiserver.k8s.io/v1beta3/prioritylevelconfigurations/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1beta3PriorityLevelConfiguration', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_api.py b/contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_api.py
new file mode 100644
index 0000000000..a6265a5175
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class InternalApiserverApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_v1alpha1_api.py b/contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_v1alpha1_api.py
new file mode 100644
index 0000000000..acf62cd6ff
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/internal_apiserver_v1alpha1_api.py
@@ -0,0 +1,1583 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class InternalApiserverV1alpha1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_storage_version(self, body, **kwargs): # noqa: E501
+ """create_storage_version # noqa: E501
+
+ create a StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_storage_version(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1StorageVersion body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersion
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_storage_version_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_storage_version_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_storage_version # noqa: E501
+
+ create a StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_storage_version_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1StorageVersion body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_storage_version" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_storage_version`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersion', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_storage_version(self, **kwargs): # noqa: E501
+ """delete_collection_storage_version # noqa: E501
+
+ delete collection of StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_storage_version(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_storage_version_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_storage_version_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_storage_version # noqa: E501
+
+ delete collection of StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_storage_version_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_storage_version" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_storage_version(self, name, **kwargs): # noqa: E501
+ """delete_storage_version # noqa: E501
+
+ delete a StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_storage_version(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_storage_version_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_storage_version_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_storage_version # noqa: E501
+
+ delete a StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_storage_version_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_storage_version" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_storage_version`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_storage_version(self, **kwargs): # noqa: E501
+ """list_storage_version # noqa: E501
+
+ list or watch objects of kind StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_storage_version(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersionList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_storage_version_with_http_info(**kwargs) # noqa: E501
+
+ def list_storage_version_with_http_info(self, **kwargs): # noqa: E501
+ """list_storage_version # noqa: E501
+
+ list or watch objects of kind StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_storage_version_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersionList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_storage_version" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersionList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_storage_version(self, name, body, **kwargs): # noqa: E501
+ """patch_storage_version # noqa: E501
+
+ partially update the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_storage_version(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersion
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_storage_version_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_storage_version_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_storage_version # noqa: E501
+
+ partially update the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_storage_version_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_storage_version" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_storage_version`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_storage_version`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersion', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_storage_version_status(self, name, body, **kwargs): # noqa: E501
+ """patch_storage_version_status # noqa: E501
+
+ partially update status of the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_storage_version_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersion
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_storage_version_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_storage_version_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_storage_version_status # noqa: E501
+
+ partially update status of the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_storage_version_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_storage_version_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_storage_version_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_storage_version_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersion', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_storage_version(self, name, **kwargs): # noqa: E501
+ """read_storage_version # noqa: E501
+
+ read the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_storage_version(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersion
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_storage_version_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_storage_version_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_storage_version # noqa: E501
+
+ read the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_storage_version_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_storage_version" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_storage_version`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersion', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_storage_version_status(self, name, **kwargs): # noqa: E501
+ """read_storage_version_status # noqa: E501
+
+ read status of the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_storage_version_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersion
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_storage_version_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_storage_version_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_storage_version_status # noqa: E501
+
+ read status of the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_storage_version_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_storage_version_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_storage_version_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersion', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_storage_version(self, name, body, **kwargs): # noqa: E501
+ """replace_storage_version # noqa: E501
+
+ replace the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_storage_version(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param V1alpha1StorageVersion body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersion
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_storage_version_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_storage_version_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_storage_version # noqa: E501
+
+ replace the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_storage_version_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param V1alpha1StorageVersion body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_storage_version" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_storage_version`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_storage_version`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersion', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_storage_version_status(self, name, body, **kwargs): # noqa: E501
+ """replace_storage_version_status # noqa: E501
+
+ replace status of the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_storage_version_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param V1alpha1StorageVersion body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1StorageVersion
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_storage_version_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_storage_version_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_storage_version_status # noqa: E501
+
+ replace status of the specified StorageVersion # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_storage_version_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageVersion (required)
+ :param V1alpha1StorageVersion body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1StorageVersion, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_storage_version_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_storage_version_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_storage_version_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/internal.apiserver.k8s.io/v1alpha1/storageversions/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1StorageVersion', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/logs_api.py b/contrib/python/kubernetes/kubernetes/client/api/logs_api.py
new file mode 100644
index 0000000000..1655128fba
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/logs_api.py
@@ -0,0 +1,244 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class LogsApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def log_file_handler(self, logpath, **kwargs): # noqa: E501
+ """log_file_handler # noqa: E501
+
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.log_file_handler(logpath, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str logpath: path to the log (required)
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: None
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.log_file_handler_with_http_info(logpath, **kwargs) # noqa: E501
+
+ def log_file_handler_with_http_info(self, logpath, **kwargs): # noqa: E501
+ """log_file_handler # noqa: E501
+
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.log_file_handler_with_http_info(logpath, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str logpath: path to the log (required)
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: None
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'logpath'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method log_file_handler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'logpath' is set
+ if self.api_client.client_side_validation and ('logpath' not in local_var_params or # noqa: E501
+ local_var_params['logpath'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `logpath` when calling `log_file_handler`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'logpath' in local_var_params:
+ path_params['logpath'] = local_var_params['logpath'] # noqa: E501
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/logs/{logpath}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type=None, # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def log_file_list_handler(self, **kwargs): # noqa: E501
+ """log_file_list_handler # noqa: E501
+
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.log_file_list_handler(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: None
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.log_file_list_handler_with_http_info(**kwargs) # noqa: E501
+
+ def log_file_list_handler_with_http_info(self, **kwargs): # noqa: E501
+ """log_file_list_handler # noqa: E501
+
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.log_file_list_handler_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: None
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method log_file_list_handler" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/logs/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type=None, # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/networking_api.py b/contrib/python/kubernetes/kubernetes/client/api/networking_api.py
new file mode 100644
index 0000000000..9d78fbf14c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/networking_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class NetworkingApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/networking_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/networking_v1_api.py
new file mode 100644
index 0000000000..a5ea857b9e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/networking_v1_api.py
@@ -0,0 +1,4110 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class NetworkingV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_ingress_class(self, body, **kwargs): # noqa: E501
+ """create_ingress_class # noqa: E501
+
+ create an IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_ingress_class(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1IngressClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1IngressClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_ingress_class_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_ingress_class_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_ingress_class # noqa: E501
+
+ create an IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_ingress_class_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1IngressClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1IngressClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_ingress_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_ingress_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingressclasses', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1IngressClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_ingress(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_ingress # noqa: E501
+
+ create an Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Ingress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Ingress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_ingress_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_ingress # noqa: E501
+
+ create an Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_ingress_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Ingress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Ingress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_ingress" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_ingress`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_ingress`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Ingress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_network_policy(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_network_policy # noqa: E501
+
+ create a NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_network_policy(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1NetworkPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NetworkPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_network_policy_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_network_policy_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_network_policy # noqa: E501
+
+ create a NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_network_policy_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1NetworkPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NetworkPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_network_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_network_policy`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_network_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NetworkPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_ingress_class(self, **kwargs): # noqa: E501
+ """delete_collection_ingress_class # noqa: E501
+
+ delete collection of IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_ingress_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_ingress_class_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_ingress_class_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_ingress_class # noqa: E501
+
+ delete collection of IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_ingress_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_ingress_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingressclasses', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_ingress(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_ingress # noqa: E501
+
+ delete collection of Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_ingress(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_ingress_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_ingress_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_ingress # noqa: E501
+
+ delete collection of Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_ingress_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_ingress" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_ingress`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_network_policy(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_network_policy # noqa: E501
+
+ delete collection of NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_network_policy(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_network_policy_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_network_policy_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_network_policy # noqa: E501
+
+ delete collection of NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_network_policy_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_network_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_network_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_ingress_class(self, name, **kwargs): # noqa: E501
+ """delete_ingress_class # noqa: E501
+
+ delete an IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_ingress_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_ingress_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_ingress_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_ingress_class # noqa: E501
+
+ delete an IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_ingress_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_ingress_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_ingress_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingressclasses/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_ingress(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_ingress # noqa: E501
+
+ delete an Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_ingress(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_ingress_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_ingress_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_ingress # noqa: E501
+
+ delete an Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_ingress_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_ingress" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_ingress`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_ingress`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_network_policy(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_network_policy # noqa: E501
+
+ delete a NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_network_policy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_network_policy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_network_policy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_network_policy # noqa: E501
+
+ delete a NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_network_policy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_network_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_network_policy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_network_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_ingress_class(self, **kwargs): # noqa: E501
+ """list_ingress_class # noqa: E501
+
+ list or watch objects of kind IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_ingress_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1IngressClassList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_ingress_class_with_http_info(**kwargs) # noqa: E501
+
+ def list_ingress_class_with_http_info(self, **kwargs): # noqa: E501
+ """list_ingress_class # noqa: E501
+
+ list or watch objects of kind IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_ingress_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1IngressClassList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_ingress_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingressclasses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1IngressClassList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_ingress_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_ingress_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_ingress_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1IngressList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_ingress_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_ingress_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_ingress_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_ingress_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1IngressList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_ingress_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingresses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1IngressList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_ingress(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_ingress # noqa: E501
+
+ list or watch objects of kind Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_ingress(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1IngressList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_ingress_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_ingress_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_ingress # noqa: E501
+
+ list or watch objects of kind Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_ingress_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1IngressList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_ingress" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_ingress`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1IngressList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_network_policy(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_network_policy # noqa: E501
+
+ list or watch objects of kind NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_network_policy(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NetworkPolicyList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_network_policy_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_network_policy_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_network_policy # noqa: E501
+
+ list or watch objects of kind NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_network_policy_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NetworkPolicyList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_network_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_network_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NetworkPolicyList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_network_policy_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_network_policy_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_network_policy_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NetworkPolicyList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_network_policy_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_network_policy_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_network_policy_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_network_policy_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NetworkPolicyList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_network_policy_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/networkpolicies', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NetworkPolicyList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_ingress_class(self, name, body, **kwargs): # noqa: E501
+ """patch_ingress_class # noqa: E501
+
+ partially update the specified IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_ingress_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1IngressClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_ingress_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_ingress_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_ingress_class # noqa: E501
+
+ partially update the specified IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_ingress_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1IngressClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_ingress_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_ingress_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_ingress_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingressclasses/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1IngressClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_ingress(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_ingress # noqa: E501
+
+ partially update the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_ingress(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Ingress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_ingress_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_ingress_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_ingress # noqa: E501
+
+ partially update the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_ingress_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Ingress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_ingress" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_ingress`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_ingress`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_ingress`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Ingress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_ingress_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_ingress_status # noqa: E501
+
+ partially update status of the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_ingress_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Ingress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_ingress_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_ingress_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_ingress_status # noqa: E501
+
+ partially update status of the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_ingress_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Ingress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_ingress_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_ingress_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_ingress_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_ingress_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Ingress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_network_policy(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_network_policy # noqa: E501
+
+ partially update the specified NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_network_policy(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NetworkPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_network_policy_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_network_policy # noqa: E501
+
+ partially update the specified NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_network_policy_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NetworkPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_network_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_network_policy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_network_policy`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_network_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NetworkPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_ingress_class(self, name, **kwargs): # noqa: E501
+ """read_ingress_class # noqa: E501
+
+ read the specified IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_ingress_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1IngressClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_ingress_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_ingress_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_ingress_class # noqa: E501
+
+ read the specified IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_ingress_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1IngressClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_ingress_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_ingress_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingressclasses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1IngressClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_ingress(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_ingress # noqa: E501
+
+ read the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_ingress(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Ingress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_ingress_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_ingress_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_ingress # noqa: E501
+
+ read the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_ingress_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Ingress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_ingress" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_ingress`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_ingress`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Ingress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_ingress_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_ingress_status # noqa: E501
+
+ read status of the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_ingress_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Ingress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_ingress_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_ingress_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_ingress_status # noqa: E501
+
+ read status of the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_ingress_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Ingress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_ingress_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_ingress_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_ingress_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Ingress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_network_policy(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_network_policy # noqa: E501
+
+ read the specified NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_network_policy(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NetworkPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_network_policy_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_network_policy # noqa: E501
+
+ read the specified NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_network_policy_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NetworkPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_network_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_network_policy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_network_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NetworkPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_ingress_class(self, name, body, **kwargs): # noqa: E501
+ """replace_ingress_class # noqa: E501
+
+ replace the specified IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_ingress_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param V1IngressClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1IngressClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_ingress_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_ingress_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_ingress_class # noqa: E501
+
+ replace the specified IngressClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_ingress_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IngressClass (required)
+ :param V1IngressClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1IngressClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_ingress_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_ingress_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_ingress_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/ingressclasses/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1IngressClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_ingress(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_ingress # noqa: E501
+
+ replace the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_ingress(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Ingress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Ingress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_ingress_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_ingress_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_ingress # noqa: E501
+
+ replace the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_ingress_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Ingress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Ingress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_ingress" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_ingress`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_ingress`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_ingress`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Ingress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_ingress_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_ingress_status # noqa: E501
+
+ replace status of the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_ingress_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Ingress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Ingress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_ingress_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_ingress_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_ingress_status # noqa: E501
+
+ replace status of the specified Ingress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_ingress_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Ingress (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Ingress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Ingress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_ingress_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_ingress_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_ingress_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_ingress_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Ingress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_network_policy(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_network_policy # noqa: E501
+
+ replace the specified NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_network_policy(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1NetworkPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1NetworkPolicy
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_network_policy_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_network_policy # noqa: E501
+
+ replace the specified NetworkPolicy # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_network_policy_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the NetworkPolicy (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1NetworkPolicy body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1NetworkPolicy, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_network_policy" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_network_policy`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_network_policy`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_network_policy`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1NetworkPolicy', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/networking_v1alpha1_api.py b/contrib/python/kubernetes/kubernetes/client/api/networking_v1alpha1_api.py
new file mode 100644
index 0000000000..3f32452cfa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/networking_v1alpha1_api.py
@@ -0,0 +1,2196 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class NetworkingV1alpha1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_cluster_cidr(self, body, **kwargs): # noqa: E501
+ """create_cluster_cidr # noqa: E501
+
+ create a ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_cidr(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ClusterCIDR body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterCIDR
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_cluster_cidr_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_cluster_cidr_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_cluster_cidr # noqa: E501
+
+ create a ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_cidr_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1ClusterCIDR body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterCIDR, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_cluster_cidr" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_cidr`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/clustercidrs', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterCIDR', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_ip_address(self, body, **kwargs): # noqa: E501
+ """create_ip_address # noqa: E501
+
+ create an IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_ip_address(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1IPAddress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1IPAddress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_ip_address_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_ip_address_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_ip_address # noqa: E501
+
+ create an IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_ip_address_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha1IPAddress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1IPAddress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_ip_address" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_ip_address`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/ipaddresses', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1IPAddress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_cluster_cidr(self, name, **kwargs): # noqa: E501
+ """delete_cluster_cidr # noqa: E501
+
+ delete a ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_cidr(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_cluster_cidr_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_cluster_cidr_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_cluster_cidr # noqa: E501
+
+ delete a ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_cidr_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_cluster_cidr" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_cidr`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_cluster_cidr(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_cidr # noqa: E501
+
+ delete collection of ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_cidr(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_cluster_cidr_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_cluster_cidr_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_cidr # noqa: E501
+
+ delete collection of ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_cidr_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_cluster_cidr" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/clustercidrs', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_ip_address(self, **kwargs): # noqa: E501
+ """delete_collection_ip_address # noqa: E501
+
+ delete collection of IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_ip_address(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_ip_address_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_ip_address_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_ip_address # noqa: E501
+
+ delete collection of IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_ip_address_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_ip_address" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/ipaddresses', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_ip_address(self, name, **kwargs): # noqa: E501
+ """delete_ip_address # noqa: E501
+
+ delete an IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_ip_address(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_ip_address_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_ip_address_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_ip_address # noqa: E501
+
+ delete an IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_ip_address_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_ip_address" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_ip_address`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/ipaddresses/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_cluster_cidr(self, **kwargs): # noqa: E501
+ """list_cluster_cidr # noqa: E501
+
+ list or watch objects of kind ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_cidr(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterCIDRList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_cluster_cidr_with_http_info(**kwargs) # noqa: E501
+
+ def list_cluster_cidr_with_http_info(self, **kwargs): # noqa: E501
+ """list_cluster_cidr # noqa: E501
+
+ list or watch objects of kind ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_cidr_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterCIDRList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_cluster_cidr" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/clustercidrs', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterCIDRList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_ip_address(self, **kwargs): # noqa: E501
+ """list_ip_address # noqa: E501
+
+ list or watch objects of kind IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_ip_address(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1IPAddressList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_ip_address_with_http_info(**kwargs) # noqa: E501
+
+ def list_ip_address_with_http_info(self, **kwargs): # noqa: E501
+ """list_ip_address # noqa: E501
+
+ list or watch objects of kind IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_ip_address_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1IPAddressList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_ip_address" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/ipaddresses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1IPAddressList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_cluster_cidr(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_cidr # noqa: E501
+
+ partially update the specified ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_cidr(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterCIDR
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_cluster_cidr_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_cluster_cidr_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_cidr # noqa: E501
+
+ partially update the specified ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_cidr_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterCIDR, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_cluster_cidr" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_cidr`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_cidr`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterCIDR', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_ip_address(self, name, body, **kwargs): # noqa: E501
+ """patch_ip_address # noqa: E501
+
+ partially update the specified IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_ip_address(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1IPAddress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_ip_address_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_ip_address_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_ip_address # noqa: E501
+
+ partially update the specified IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_ip_address_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1IPAddress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_ip_address" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_ip_address`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_ip_address`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/ipaddresses/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1IPAddress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_cluster_cidr(self, name, **kwargs): # noqa: E501
+ """read_cluster_cidr # noqa: E501
+
+ read the specified ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_cidr(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterCIDR
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_cluster_cidr_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_cluster_cidr_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_cluster_cidr # noqa: E501
+
+ read the specified ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_cidr_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterCIDR, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_cluster_cidr" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_cidr`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterCIDR', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_ip_address(self, name, **kwargs): # noqa: E501
+ """read_ip_address # noqa: E501
+
+ read the specified IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_ip_address(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1IPAddress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_ip_address_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_ip_address_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_ip_address # noqa: E501
+
+ read the specified IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_ip_address_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1IPAddress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_ip_address" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_ip_address`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/ipaddresses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1IPAddress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_cluster_cidr(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_cidr # noqa: E501
+
+ replace the specified ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_cidr(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param V1alpha1ClusterCIDR body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1ClusterCIDR
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_cluster_cidr_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_cluster_cidr_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_cidr # noqa: E501
+
+ replace the specified ClusterCIDR # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_cidr_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterCIDR (required)
+ :param V1alpha1ClusterCIDR body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1ClusterCIDR, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_cluster_cidr" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_cidr`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_cidr`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1ClusterCIDR', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_ip_address(self, name, body, **kwargs): # noqa: E501
+ """replace_ip_address # noqa: E501
+
+ replace the specified IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_ip_address(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param V1alpha1IPAddress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha1IPAddress
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_ip_address_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_ip_address_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_ip_address # noqa: E501
+
+ replace the specified IPAddress # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_ip_address_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the IPAddress (required)
+ :param V1alpha1IPAddress body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha1IPAddress, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_ip_address" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_ip_address`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_ip_address`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/networking.k8s.io/v1alpha1/ipaddresses/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha1IPAddress', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/node_api.py b/contrib/python/kubernetes/kubernetes/client/api/node_api.py
new file mode 100644
index 0000000000..0c06c422db
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/node_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class NodeApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/node_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/node_v1_api.py
new file mode 100644
index 0000000000..db8e4bf48d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/node_v1_api.py
@@ -0,0 +1,1169 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class NodeV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_runtime_class(self, body, **kwargs): # noqa: E501
+ """create_runtime_class # noqa: E501
+
+ create a RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_runtime_class(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1RuntimeClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RuntimeClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_runtime_class_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_runtime_class_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_runtime_class # noqa: E501
+
+ create a RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_runtime_class_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1RuntimeClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_runtime_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_runtime_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/runtimeclasses', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RuntimeClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_runtime_class(self, **kwargs): # noqa: E501
+ """delete_collection_runtime_class # noqa: E501
+
+ delete collection of RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_runtime_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_runtime_class_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_runtime_class_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_runtime_class # noqa: E501
+
+ delete collection of RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_runtime_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_runtime_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/runtimeclasses', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_runtime_class(self, name, **kwargs): # noqa: E501
+ """delete_runtime_class # noqa: E501
+
+ delete a RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_runtime_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_runtime_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_runtime_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_runtime_class # noqa: E501
+
+ delete a RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_runtime_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_runtime_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_runtime_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_runtime_class(self, **kwargs): # noqa: E501
+ """list_runtime_class # noqa: E501
+
+ list or watch objects of kind RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_runtime_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RuntimeClassList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_runtime_class_with_http_info(**kwargs) # noqa: E501
+
+ def list_runtime_class_with_http_info(self, **kwargs): # noqa: E501
+ """list_runtime_class # noqa: E501
+
+ list or watch objects of kind RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_runtime_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RuntimeClassList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_runtime_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/runtimeclasses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RuntimeClassList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_runtime_class(self, name, body, **kwargs): # noqa: E501
+ """patch_runtime_class # noqa: E501
+
+ partially update the specified RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_runtime_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RuntimeClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_runtime_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_runtime_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_runtime_class # noqa: E501
+
+ partially update the specified RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_runtime_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_runtime_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_runtime_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_runtime_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RuntimeClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_runtime_class(self, name, **kwargs): # noqa: E501
+ """read_runtime_class # noqa: E501
+
+ read the specified RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_runtime_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RuntimeClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_runtime_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_runtime_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_runtime_class # noqa: E501
+
+ read the specified RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_runtime_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_runtime_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_runtime_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RuntimeClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_runtime_class(self, name, body, **kwargs): # noqa: E501
+ """replace_runtime_class # noqa: E501
+
+ replace the specified RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_runtime_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param V1RuntimeClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RuntimeClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_runtime_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_runtime_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_runtime_class # noqa: E501
+
+ replace the specified RuntimeClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_runtime_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RuntimeClass (required)
+ :param V1RuntimeClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_runtime_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_runtime_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_runtime_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RuntimeClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/openid_api.py b/contrib/python/kubernetes/kubernetes/client/api/openid_api.py
new file mode 100644
index 0000000000..1b16c857ed
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/openid_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class OpenidApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_service_account_issuer_open_id_keyset(self, **kwargs): # noqa: E501
+ """get_service_account_issuer_open_id_keyset # noqa: E501
+
+ get service account issuer OpenID JSON Web Key Set (contains public token verification keys) # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_service_account_issuer_open_id_keyset(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_service_account_issuer_open_id_keyset_with_http_info(**kwargs) # noqa: E501
+
+ def get_service_account_issuer_open_id_keyset_with_http_info(self, **kwargs): # noqa: E501
+ """get_service_account_issuer_open_id_keyset # noqa: E501
+
+ get service account issuer OpenID JSON Web Key Set (contains public token verification keys) # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_service_account_issuer_open_id_keyset_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_service_account_issuer_open_id_keyset" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/jwk-set+json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/openid/v1/jwks', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/policy_api.py b/contrib/python/kubernetes/kubernetes/client/api/policy_api.py
new file mode 100644
index 0000000000..363b77c7b2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/policy_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class PolicyApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/policy_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/policy_v1_api.py
new file mode 100644
index 0000000000..d3171727b7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/policy_v1_api.py
@@ -0,0 +1,1833 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class PolicyV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_pod_disruption_budget(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_disruption_budget # noqa: E501
+
+ create a PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_disruption_budget(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodDisruptionBudget body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudget
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_pod_disruption_budget_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_pod_disruption_budget_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_disruption_budget # noqa: E501
+
+ create a PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_disruption_budget_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodDisruptionBudget body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudget, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_pod_disruption_budget" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_pod_disruption_budget`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_pod_disruption_budget`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudget', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_pod_disruption_budget(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod_disruption_budget # noqa: E501
+
+ delete collection of PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod_disruption_budget(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_pod_disruption_budget_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_pod_disruption_budget_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod_disruption_budget # noqa: E501
+
+ delete collection of PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod_disruption_budget_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_pod_disruption_budget" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_pod_disruption_budget`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_pod_disruption_budget(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod_disruption_budget # noqa: E501
+
+ delete a PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod_disruption_budget(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_pod_disruption_budget_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod_disruption_budget # noqa: E501
+
+ delete a PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod_disruption_budget_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_pod_disruption_budget" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_pod_disruption_budget`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_pod_disruption_budget`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_pod_disruption_budget(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod_disruption_budget # noqa: E501
+
+ list or watch objects of kind PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod_disruption_budget(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudgetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_pod_disruption_budget_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_pod_disruption_budget_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod_disruption_budget # noqa: E501
+
+ list or watch objects of kind PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod_disruption_budget_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudgetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_pod_disruption_budget" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_pod_disruption_budget`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudgetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_pod_disruption_budget_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_pod_disruption_budget_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_disruption_budget_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudgetList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_pod_disruption_budget_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_pod_disruption_budget_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_pod_disruption_budget_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_disruption_budget_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudgetList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_pod_disruption_budget_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/poddisruptionbudgets', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudgetList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod_disruption_budget(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_disruption_budget # noqa: E501
+
+ partially update the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_disruption_budget(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudget
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_disruption_budget_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_disruption_budget_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_disruption_budget # noqa: E501
+
+ partially update the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_disruption_budget_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudget, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod_disruption_budget" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_disruption_budget`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_disruption_budget`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_disruption_budget`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudget', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod_disruption_budget_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_disruption_budget_status # noqa: E501
+
+ partially update status of the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_disruption_budget_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudget
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_disruption_budget_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_disruption_budget_status # noqa: E501
+
+ partially update status of the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudget, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod_disruption_budget_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_disruption_budget_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_disruption_budget_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_disruption_budget_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudget', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_disruption_budget(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_disruption_budget # noqa: E501
+
+ read the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_disruption_budget(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudget
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_disruption_budget_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_disruption_budget # noqa: E501
+
+ read the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_disruption_budget_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudget, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_disruption_budget" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_disruption_budget`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_disruption_budget`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudget', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_disruption_budget_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_disruption_budget_status # noqa: E501
+
+ read status of the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_disruption_budget_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudget
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_disruption_budget_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_disruption_budget_status # noqa: E501
+
+ read status of the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudget, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_disruption_budget_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_disruption_budget_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_disruption_budget_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudget', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod_disruption_budget(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_disruption_budget # noqa: E501
+
+ replace the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_disruption_budget(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodDisruptionBudget body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudget
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_disruption_budget_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_disruption_budget_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_disruption_budget # noqa: E501
+
+ replace the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_disruption_budget_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodDisruptionBudget body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudget, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod_disruption_budget" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_disruption_budget`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_disruption_budget`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_disruption_budget`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudget', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod_disruption_budget_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_disruption_budget_status # noqa: E501
+
+ replace status of the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_disruption_budget_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodDisruptionBudget body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PodDisruptionBudget
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_disruption_budget_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_disruption_budget_status # noqa: E501
+
+ replace status of the specified PodDisruptionBudget # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodDisruptionBudget (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1PodDisruptionBudget body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PodDisruptionBudget, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod_disruption_budget_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_disruption_budget_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_disruption_budget_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_disruption_budget_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PodDisruptionBudget', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_api.py b/contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_api.py
new file mode 100644
index 0000000000..ad9768d42a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class RbacAuthorizationApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_v1_api.py
new file mode 100644
index 0000000000..a77b6dfb06
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/rbac_authorization_v1_api.py
@@ -0,0 +1,4696 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class RbacAuthorizationV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_cluster_role(self, body, **kwargs): # noqa: E501
+ """create_cluster_role # noqa: E501
+
+ create a ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_role(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1ClusterRole body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRole
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_cluster_role_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_cluster_role_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_cluster_role # noqa: E501
+
+ create a ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_role_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1ClusterRole body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRole, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_cluster_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterroles', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRole', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_cluster_role_binding(self, body, **kwargs): # noqa: E501
+ """create_cluster_role_binding # noqa: E501
+
+ create a ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_role_binding(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1ClusterRoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_cluster_role_binding_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_cluster_role_binding_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_cluster_role_binding # noqa: E501
+
+ create a ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_cluster_role_binding_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1ClusterRoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_cluster_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_cluster_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_role(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_role # noqa: E501
+
+ create a Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_role(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Role body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Role
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_role_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_role_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_role # noqa: E501
+
+ create a Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_role_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Role body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Role, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_role`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Role', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_role_binding(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_role_binding # noqa: E501
+
+ create a RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_role_binding(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1RoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_role_binding_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_role_binding_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_role_binding # noqa: E501
+
+ create a RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_role_binding_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1RoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_role_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_cluster_role(self, name, **kwargs): # noqa: E501
+ """delete_cluster_role # noqa: E501
+
+ delete a ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_role(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_cluster_role_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_cluster_role_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_cluster_role # noqa: E501
+
+ delete a ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_role_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_cluster_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterroles/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_cluster_role_binding(self, name, **kwargs): # noqa: E501
+ """delete_cluster_role_binding # noqa: E501
+
+ delete a ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_role_binding(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_cluster_role_binding_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_cluster_role_binding_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_cluster_role_binding # noqa: E501
+
+ delete a ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_cluster_role_binding_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_cluster_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_cluster_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_cluster_role(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_role # noqa: E501
+
+ delete collection of ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_role(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_cluster_role_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_cluster_role_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_role # noqa: E501
+
+ delete collection of ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_role_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_cluster_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterroles', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_cluster_role_binding(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_role_binding # noqa: E501
+
+ delete collection of ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_role_binding(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_cluster_role_binding_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_cluster_role_binding_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_cluster_role_binding # noqa: E501
+
+ delete collection of ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_cluster_role_binding_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_cluster_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_role(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_role # noqa: E501
+
+ delete collection of Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_role(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_role_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_role_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_role # noqa: E501
+
+ delete collection of Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_role_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_role_binding(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_role_binding # noqa: E501
+
+ delete collection of RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_role_binding(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_role_binding_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_role_binding_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_role_binding # noqa: E501
+
+ delete collection of RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_role_binding_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_role(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_role # noqa: E501
+
+ delete a Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_role(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_role_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_role_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_role # noqa: E501
+
+ delete a Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_role_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_role`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_role_binding(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_role_binding # noqa: E501
+
+ delete a RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_role_binding(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_role_binding_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_role_binding_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_role_binding # noqa: E501
+
+ delete a RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_role_binding_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_role_binding`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_cluster_role(self, **kwargs): # noqa: E501
+ """list_cluster_role # noqa: E501
+
+ list or watch objects of kind ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_role(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRoleList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_cluster_role_with_http_info(**kwargs) # noqa: E501
+
+ def list_cluster_role_with_http_info(self, **kwargs): # noqa: E501
+ """list_cluster_role # noqa: E501
+
+ list or watch objects of kind ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_role_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRoleList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_cluster_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterroles', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRoleList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_cluster_role_binding(self, **kwargs): # noqa: E501
+ """list_cluster_role_binding # noqa: E501
+
+ list or watch objects of kind ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_role_binding(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRoleBindingList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_cluster_role_binding_with_http_info(**kwargs) # noqa: E501
+
+ def list_cluster_role_binding_with_http_info(self, **kwargs): # noqa: E501
+ """list_cluster_role_binding # noqa: E501
+
+ list or watch objects of kind ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_cluster_role_binding_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRoleBindingList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_cluster_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRoleBindingList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_role(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_role # noqa: E501
+
+ list or watch objects of kind Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_role(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_role_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_role_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_role # noqa: E501
+
+ list or watch objects of kind Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_role_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_role_binding(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_role_binding # noqa: E501
+
+ list or watch objects of kind RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_role_binding(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleBindingList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_role_binding_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_role_binding_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_role_binding # noqa: E501
+
+ list or watch objects of kind RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_role_binding_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleBindingList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleBindingList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_role_binding_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_role_binding_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleBindingList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_role_binding_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_role_binding_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_role_binding_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_role_binding_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleBindingList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_role_binding_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/rolebindings', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleBindingList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_role_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_role_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_role_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_role_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_role_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_role_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_role_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_role_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/roles', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_cluster_role(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_role # noqa: E501
+
+ partially update the specified ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_role(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRole
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_cluster_role_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_cluster_role_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_role # noqa: E501
+
+ partially update the specified ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_role_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRole, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_cluster_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_role`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterroles/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRole', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_cluster_role_binding(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_role_binding # noqa: E501
+
+ partially update the specified ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_role_binding(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_cluster_role_binding_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_cluster_role_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_cluster_role_binding # noqa: E501
+
+ partially update the specified ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_cluster_role_binding_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_cluster_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_cluster_role_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_cluster_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_role(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_role # noqa: E501
+
+ partially update the specified Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_role(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Role
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_role_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_role_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_role # noqa: E501
+
+ partially update the specified Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_role_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Role, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_role`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_role`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Role', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_role_binding(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_role_binding # noqa: E501
+
+ partially update the specified RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_role_binding(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_role_binding_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_role_binding # noqa: E501
+
+ partially update the specified RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_role_binding_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_role_binding`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_role_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_cluster_role(self, name, **kwargs): # noqa: E501
+ """read_cluster_role # noqa: E501
+
+ read the specified ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_role(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRole
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_cluster_role_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_cluster_role_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_cluster_role # noqa: E501
+
+ read the specified ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_role_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRole, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_cluster_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterroles/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRole', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_cluster_role_binding(self, name, **kwargs): # noqa: E501
+ """read_cluster_role_binding # noqa: E501
+
+ read the specified ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_role_binding(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_cluster_role_binding_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_cluster_role_binding_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_cluster_role_binding # noqa: E501
+
+ read the specified ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_cluster_role_binding_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_cluster_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_cluster_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_role(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_role # noqa: E501
+
+ read the specified Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_role(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Role
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_role_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_role_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_role # noqa: E501
+
+ read the specified Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_role_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Role, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_role`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Role', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_role_binding(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_role_binding # noqa: E501
+
+ read the specified RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_role_binding(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_role_binding_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_role_binding_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_role_binding # noqa: E501
+
+ read the specified RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_role_binding_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_role_binding`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_cluster_role(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_role # noqa: E501
+
+ replace the specified ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_role(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param V1ClusterRole body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRole
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_cluster_role_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_cluster_role_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_role # noqa: E501
+
+ replace the specified ClusterRole # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_role_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRole (required)
+ :param V1ClusterRole body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRole, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_cluster_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_role`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterroles/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRole', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_cluster_role_binding(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_role_binding # noqa: E501
+
+ replace the specified ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_role_binding(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param V1ClusterRoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1ClusterRoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_cluster_role_binding_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_cluster_role_binding_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_cluster_role_binding # noqa: E501
+
+ replace the specified ClusterRoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_cluster_role_binding_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ClusterRoleBinding (required)
+ :param V1ClusterRoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1ClusterRoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_cluster_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_cluster_role_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_cluster_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1ClusterRoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_role(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_role # noqa: E501
+
+ replace the specified Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_role(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Role body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Role
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_role_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_role_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_role # noqa: E501
+
+ replace the specified Role # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_role_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the Role (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1Role body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Role, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_role" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_role`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_role`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_role`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Role', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_role_binding(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_role_binding # noqa: E501
+
+ replace the specified RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_role_binding(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1RoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RoleBinding
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_role_binding_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_role_binding # noqa: E501
+
+ replace the specified RoleBinding # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_role_binding_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the RoleBinding (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1RoleBinding body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1RoleBinding, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_role_binding" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_role_binding`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_role_binding`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_role_binding`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1RoleBinding', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/resource_api.py b/contrib/python/kubernetes/kubernetes/client/api/resource_api.py
new file mode 100644
index 0000000000..a0bd1ff5db
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/resource_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ResourceApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/resource_v1alpha2_api.py b/contrib/python/kubernetes/kubernetes/client/api/resource_v1alpha2_api.py
new file mode 100644
index 0000000000..381b9ce6a1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/resource_v1alpha2_api.py
@@ -0,0 +1,5801 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class ResourceV1alpha2Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_namespaced_pod_scheduling_context(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_scheduling_context # noqa: E501
+
+ create a PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_scheduling_context(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2PodSchedulingContext body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_pod_scheduling_context_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_pod_scheduling_context_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_pod_scheduling_context # noqa: E501
+
+ create a PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_pod_scheduling_context_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2PodSchedulingContext body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_pod_scheduling_context" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_pod_scheduling_context`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_pod_scheduling_context`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_resource_claim(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_resource_claim # noqa: E501
+
+ create a ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_resource_claim(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_resource_claim_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_resource_claim_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_resource_claim # noqa: E501
+
+ create a ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_resource_claim_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_resource_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_resource_claim_template(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_resource_claim_template # noqa: E501
+
+ create a ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_resource_claim_template(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaimTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_resource_claim_template_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_resource_claim_template_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_resource_claim_template # noqa: E501
+
+ create a ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_resource_claim_template_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaimTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_resource_claim_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_resource_claim_template`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_resource_claim_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_resource_class(self, body, **kwargs): # noqa: E501
+ """create_resource_class # noqa: E501
+
+ create a ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_resource_class(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha2ResourceClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_resource_class_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_resource_class_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_resource_class # noqa: E501
+
+ create a ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_resource_class_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1alpha2ResourceClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_resource_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_resource_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclasses', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_pod_scheduling_context(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod_scheduling_context # noqa: E501
+
+ delete collection of PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod_scheduling_context(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_pod_scheduling_context_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_pod_scheduling_context_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_pod_scheduling_context # noqa: E501
+
+ delete collection of PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_pod_scheduling_context_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_pod_scheduling_context" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_pod_scheduling_context`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_resource_claim # noqa: E501
+
+ delete collection of ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_resource_claim(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_resource_claim # noqa: E501
+
+ delete collection of ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_resource_claim_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_resource_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_resource_claim_template # noqa: E501
+
+ delete collection of ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_resource_claim_template(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_resource_claim_template # noqa: E501
+
+ delete collection of ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_resource_claim_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_resource_claim_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_resource_class(self, **kwargs): # noqa: E501
+ """delete_collection_resource_class # noqa: E501
+
+ delete collection of ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_resource_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_resource_class_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_resource_class_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_resource_class # noqa: E501
+
+ delete collection of ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_resource_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_resource_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclasses', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_pod_scheduling_context(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod_scheduling_context # noqa: E501
+
+ delete a PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod_scheduling_context(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_pod_scheduling_context_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_pod_scheduling_context_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_pod_scheduling_context # noqa: E501
+
+ delete a PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_pod_scheduling_context_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_pod_scheduling_context" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_pod_scheduling_context`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_pod_scheduling_context`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_resource_claim # noqa: E501
+
+ delete a ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_resource_claim(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_resource_claim # noqa: E501
+
+ delete a ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_resource_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_resource_claim_template # noqa: E501
+
+ delete a ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_resource_claim_template(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_resource_claim_template # noqa: E501
+
+ delete a ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_resource_claim_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_resource_claim_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_resource_class(self, name, **kwargs): # noqa: E501
+ """delete_resource_class # noqa: E501
+
+ delete a ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_resource_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_resource_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_resource_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_resource_class # noqa: E501
+
+ delete a ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_resource_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_resource_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_resource_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclasses/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_pod_scheduling_context(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod_scheduling_context # noqa: E501
+
+ list or watch objects of kind PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod_scheduling_context(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContextList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_pod_scheduling_context_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_pod_scheduling_context_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_pod_scheduling_context # noqa: E501
+
+ list or watch objects of kind PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_pod_scheduling_context_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContextList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_pod_scheduling_context" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_pod_scheduling_context`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContextList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_resource_claim(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_resource_claim # noqa: E501
+
+ list or watch objects of kind ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_resource_claim(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_resource_claim_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_resource_claim_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_resource_claim # noqa: E501
+
+ list or watch objects of kind ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_resource_claim_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_resource_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_resource_claim_template(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_resource_claim_template # noqa: E501
+
+ list or watch objects of kind ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_resource_claim_template(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimTemplateList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_resource_claim_template_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_resource_claim_template_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_resource_claim_template # noqa: E501
+
+ list or watch objects of kind ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_resource_claim_template_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_resource_claim_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_resource_claim_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimTemplateList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_pod_scheduling_context_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_pod_scheduling_context_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_scheduling_context_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContextList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_pod_scheduling_context_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_pod_scheduling_context_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_pod_scheduling_context_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_pod_scheduling_context_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContextList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_pod_scheduling_context_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/podschedulingcontexts', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContextList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_resource_claim_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_resource_claim_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_claim_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_resource_claim_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_resource_claim_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_resource_claim_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_claim_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_resource_claim_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclaims', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_resource_claim_template_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_resource_claim_template_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_claim_template_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimTemplateList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_resource_claim_template_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_resource_claim_template_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_resource_claim_template_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_claim_template_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimTemplateList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_resource_claim_template_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclaimtemplates', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimTemplateList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_resource_class(self, **kwargs): # noqa: E501
+ """list_resource_class # noqa: E501
+
+ list or watch objects of kind ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClassList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_resource_class_with_http_info(**kwargs) # noqa: E501
+
+ def list_resource_class_with_http_info(self, **kwargs): # noqa: E501
+ """list_resource_class # noqa: E501
+
+ list or watch objects of kind ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_resource_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClassList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_resource_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclasses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClassList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod_scheduling_context(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_scheduling_context # noqa: E501
+
+ partially update the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_scheduling_context(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_scheduling_context_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_scheduling_context_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_scheduling_context # noqa: E501
+
+ partially update the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_scheduling_context_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod_scheduling_context" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_scheduling_context`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_scheduling_context`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_scheduling_context`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_pod_scheduling_context_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_scheduling_context_status # noqa: E501
+
+ partially update status of the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_scheduling_context_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_pod_scheduling_context_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_pod_scheduling_context_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_pod_scheduling_context_status # noqa: E501
+
+ partially update status of the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_pod_scheduling_context_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_pod_scheduling_context_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_pod_scheduling_context_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod_scheduling_context_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_pod_scheduling_context_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_claim # noqa: E501
+
+ partially update the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_claim(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_claim # noqa: E501
+
+ partially update the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_resource_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_claim_status # noqa: E501
+
+ partially update status of the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_claim_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_claim_status # noqa: E501
+
+ partially update status of the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_resource_claim_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_claim_template # noqa: E501
+
+ partially update the specified ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_claim_template(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_resource_claim_template # noqa: E501
+
+ partially update the specified ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_resource_claim_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_resource_claim_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_resource_class(self, name, body, **kwargs): # noqa: E501
+ """patch_resource_class # noqa: E501
+
+ partially update the specified ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_resource_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_resource_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_resource_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_resource_class # noqa: E501
+
+ partially update the specified ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_resource_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_resource_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_resource_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_resource_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclasses/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_scheduling_context(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_scheduling_context # noqa: E501
+
+ read the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_scheduling_context(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_scheduling_context_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_scheduling_context_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_scheduling_context # noqa: E501
+
+ read the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_scheduling_context_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_scheduling_context" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_scheduling_context`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_scheduling_context`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_pod_scheduling_context_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_scheduling_context_status # noqa: E501
+
+ read status of the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_scheduling_context_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_pod_scheduling_context_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_pod_scheduling_context_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_pod_scheduling_context_status # noqa: E501
+
+ read status of the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_pod_scheduling_context_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_pod_scheduling_context_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_pod_scheduling_context_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_pod_scheduling_context_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_resource_claim(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_claim # noqa: E501
+
+ read the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_claim(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_resource_claim_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_resource_claim_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_claim # noqa: E501
+
+ read the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_claim_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_resource_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_resource_claim_status(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_claim_status # noqa: E501
+
+ read status of the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_claim_status(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_resource_claim_status_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_resource_claim_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_claim_status # noqa: E501
+
+ read status of the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_claim_status_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_resource_claim_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_resource_claim_template(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_claim_template # noqa: E501
+
+ read the specified ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_claim_template(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_resource_claim_template_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_resource_claim_template_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_resource_claim_template # noqa: E501
+
+ read the specified ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_resource_claim_template_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_resource_claim_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_resource_claim_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_resource_claim_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_resource_class(self, name, **kwargs): # noqa: E501
+ """read_resource_class # noqa: E501
+
+ read the specified ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_resource_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_resource_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_resource_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_resource_class # noqa: E501
+
+ read the specified ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_resource_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_resource_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_resource_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclasses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod_scheduling_context(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_scheduling_context # noqa: E501
+
+ replace the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_scheduling_context(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2PodSchedulingContext body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_scheduling_context_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_scheduling_context_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_scheduling_context # noqa: E501
+
+ replace the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_scheduling_context_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2PodSchedulingContext body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod_scheduling_context" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_scheduling_context`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_scheduling_context`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_scheduling_context`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_pod_scheduling_context_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_scheduling_context_status # noqa: E501
+
+ replace status of the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_scheduling_context_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2PodSchedulingContext body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2PodSchedulingContext
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_pod_scheduling_context_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_pod_scheduling_context_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_pod_scheduling_context_status # noqa: E501
+
+ replace status of the specified PodSchedulingContext # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_pod_scheduling_context_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PodSchedulingContext (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2PodSchedulingContext body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2PodSchedulingContext, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_pod_scheduling_context_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_pod_scheduling_context_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod_scheduling_context_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_pod_scheduling_context_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/podschedulingcontexts/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2PodSchedulingContext', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_resource_claim(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_claim # noqa: E501
+
+ replace the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_claim(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_resource_claim_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_resource_claim_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_claim # noqa: E501
+
+ replace the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_claim_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_resource_claim" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_resource_claim_status(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_claim_status # noqa: E501
+
+ replace status of the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_claim_status(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaim
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_resource_claim_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_claim_status # noqa: E501
+
+ replace status of the specified ResourceClaim # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_claim_status_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaim (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaim body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaim, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_resource_claim_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaims/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaim', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_resource_claim_template(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_claim_template # noqa: E501
+
+ replace the specified ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_claim_template(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaimTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClaimTemplate
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_resource_claim_template_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_resource_claim_template # noqa: E501
+
+ replace the specified ResourceClaimTemplate # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_resource_claim_template_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClaimTemplate (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1alpha2ResourceClaimTemplate body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClaimTemplate, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_resource_claim_template" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_resource_claim_template`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/namespaces/{namespace}/resourceclaimtemplates/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClaimTemplate', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_resource_class(self, name, body, **kwargs): # noqa: E501
+ """replace_resource_class # noqa: E501
+
+ replace the specified ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_resource_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param V1alpha2ResourceClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1alpha2ResourceClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_resource_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_resource_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_resource_class # noqa: E501
+
+ replace the specified ResourceClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_resource_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the ResourceClass (required)
+ :param V1alpha2ResourceClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1alpha2ResourceClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_resource_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_resource_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_resource_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/resource.k8s.io/v1alpha2/resourceclasses/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1alpha2ResourceClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/scheduling_api.py b/contrib/python/kubernetes/kubernetes/client/api/scheduling_api.py
new file mode 100644
index 0000000000..171ad6f23a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/scheduling_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class SchedulingApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/scheduling_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/scheduling_v1_api.py
new file mode 100644
index 0000000000..57b3ca43f7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/scheduling_v1_api.py
@@ -0,0 +1,1169 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class SchedulingV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_priority_class(self, body, **kwargs): # noqa: E501
+ """create_priority_class # noqa: E501
+
+ create a PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_priority_class(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1PriorityClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PriorityClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_priority_class_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_priority_class_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_priority_class # noqa: E501
+
+ create a PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_priority_class_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1PriorityClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_priority_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_priority_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/priorityclasses', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PriorityClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_priority_class(self, **kwargs): # noqa: E501
+ """delete_collection_priority_class # noqa: E501
+
+ delete collection of PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_priority_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_priority_class_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_priority_class_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_priority_class # noqa: E501
+
+ delete collection of PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_priority_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_priority_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/priorityclasses', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_priority_class(self, name, **kwargs): # noqa: E501
+ """delete_priority_class # noqa: E501
+
+ delete a PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_priority_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_priority_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_priority_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_priority_class # noqa: E501
+
+ delete a PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_priority_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_priority_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_priority_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_priority_class(self, **kwargs): # noqa: E501
+ """list_priority_class # noqa: E501
+
+ list or watch objects of kind PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_priority_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PriorityClassList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_priority_class_with_http_info(**kwargs) # noqa: E501
+
+ def list_priority_class_with_http_info(self, **kwargs): # noqa: E501
+ """list_priority_class # noqa: E501
+
+ list or watch objects of kind PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_priority_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PriorityClassList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_priority_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/priorityclasses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PriorityClassList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_priority_class(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_class # noqa: E501
+
+ partially update the specified PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PriorityClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_priority_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_priority_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_priority_class # noqa: E501
+
+ partially update the specified PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_priority_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_priority_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_priority_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_priority_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PriorityClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_priority_class(self, name, **kwargs): # noqa: E501
+ """read_priority_class # noqa: E501
+
+ read the specified PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PriorityClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_priority_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_priority_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_priority_class # noqa: E501
+
+ read the specified PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_priority_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_priority_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_priority_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PriorityClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_priority_class(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_class # noqa: E501
+
+ replace the specified PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param V1PriorityClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1PriorityClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_priority_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_priority_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_priority_class # noqa: E501
+
+ replace the specified PriorityClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_priority_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the PriorityClass (required)
+ :param V1PriorityClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_priority_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_priority_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_priority_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1PriorityClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/storage_api.py b/contrib/python/kubernetes/kubernetes/client/api/storage_api.py
new file mode 100644
index 0000000000..37da999979
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/storage_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class StorageApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_api_group(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIGroup
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_group_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_group_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_group # noqa: E501
+
+ get information of a group # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_group_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_group" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIGroup', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/storage_v1_api.py b/contrib/python/kubernetes/kubernetes/client/api/storage_v1_api.py
new file mode 100644
index 0000000000..9a0ff1dd23
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/storage_v1_api.py
@@ -0,0 +1,5914 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class StorageV1Api(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def create_csi_driver(self, body, **kwargs): # noqa: E501
+ """create_csi_driver # noqa: E501
+
+ create a CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_csi_driver(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CSIDriver body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIDriver
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_csi_driver_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_csi_driver_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_csi_driver # noqa: E501
+
+ create a CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_csi_driver_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CSIDriver body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIDriver, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_csi_driver" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_csi_driver`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csidrivers', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIDriver', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_csi_node(self, body, **kwargs): # noqa: E501
+ """create_csi_node # noqa: E501
+
+ create a CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_csi_node(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CSINode body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSINode
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_csi_node_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_csi_node_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_csi_node # noqa: E501
+
+ create a CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_csi_node_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1CSINode body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSINode, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_csi_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_csi_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csinodes', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSINode', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_namespaced_csi_storage_capacity(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_csi_storage_capacity # noqa: E501
+
+ create a CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_csi_storage_capacity(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CSIStorageCapacity body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIStorageCapacity
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_namespaced_csi_storage_capacity_with_http_info(namespace, body, **kwargs) # noqa: E501
+
+ def create_namespaced_csi_storage_capacity_with_http_info(self, namespace, body, **kwargs): # noqa: E501
+ """create_namespaced_csi_storage_capacity # noqa: E501
+
+ create a CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_namespaced_csi_storage_capacity_with_http_info(namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CSIStorageCapacity body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_namespaced_csi_storage_capacity" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_csi_storage_capacity`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_csi_storage_capacity`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/namespaces/{namespace}/csistoragecapacities', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIStorageCapacity', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_storage_class(self, body, **kwargs): # noqa: E501
+ """create_storage_class # noqa: E501
+
+ create a StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_storage_class(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1StorageClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StorageClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_storage_class_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_storage_class_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_storage_class # noqa: E501
+
+ create a StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_storage_class_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1StorageClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StorageClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_storage_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_storage_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/storageclasses', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StorageClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def create_volume_attachment(self, body, **kwargs): # noqa: E501
+ """create_volume_attachment # noqa: E501
+
+ create a VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_volume_attachment(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1VolumeAttachment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.create_volume_attachment_with_http_info(body, **kwargs) # noqa: E501
+
+ def create_volume_attachment_with_http_info(self, body, **kwargs): # noqa: E501
+ """create_volume_attachment # noqa: E501
+
+ create a VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.create_volume_attachment_with_http_info(body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param V1VolumeAttachment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method create_volume_attachment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `create_volume_attachment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments', 'POST',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_csi_driver(self, **kwargs): # noqa: E501
+ """delete_collection_csi_driver # noqa: E501
+
+ delete collection of CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_csi_driver(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_csi_driver_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_csi_driver_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_csi_driver # noqa: E501
+
+ delete collection of CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_csi_driver_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_csi_driver" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csidrivers', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_csi_node(self, **kwargs): # noqa: E501
+ """delete_collection_csi_node # noqa: E501
+
+ delete collection of CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_csi_node(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_csi_node_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_csi_node_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_csi_node # noqa: E501
+
+ delete collection of CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_csi_node_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_csi_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csinodes', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_namespaced_csi_storage_capacity(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_csi_storage_capacity # noqa: E501
+
+ delete collection of CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_csi_storage_capacity(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_namespaced_csi_storage_capacity_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def delete_collection_namespaced_csi_storage_capacity_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """delete_collection_namespaced_csi_storage_capacity # noqa: E501
+
+ delete collection of CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_namespaced_csi_storage_capacity_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_namespaced_csi_storage_capacity" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_csi_storage_capacity`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/namespaces/{namespace}/csistoragecapacities', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_storage_class(self, **kwargs): # noqa: E501
+ """delete_collection_storage_class # noqa: E501
+
+ delete collection of StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_storage_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_storage_class_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_storage_class_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_storage_class # noqa: E501
+
+ delete collection of StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_storage_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_storage_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/storageclasses', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_collection_volume_attachment(self, **kwargs): # noqa: E501
+ """delete_collection_volume_attachment # noqa: E501
+
+ delete collection of VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_volume_attachment(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_collection_volume_attachment_with_http_info(**kwargs) # noqa: E501
+
+ def delete_collection_volume_attachment_with_http_info(self, **kwargs): # noqa: E501
+ """delete_collection_volume_attachment # noqa: E501
+
+ delete collection of VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_collection_volume_attachment_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ '_continue',
+ 'dry_run',
+ 'field_selector',
+ 'grace_period_seconds',
+ 'label_selector',
+ 'limit',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_collection_volume_attachment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_csi_driver(self, name, **kwargs): # noqa: E501
+ """delete_csi_driver # noqa: E501
+
+ delete a CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_csi_driver(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIDriver
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_csi_driver_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_csi_driver_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_csi_driver # noqa: E501
+
+ delete a CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_csi_driver_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIDriver, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_csi_driver" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_csi_driver`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csidrivers/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIDriver', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_csi_node(self, name, **kwargs): # noqa: E501
+ """delete_csi_node # noqa: E501
+
+ delete a CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_csi_node(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSINode
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_csi_node_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_csi_node_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_csi_node # noqa: E501
+
+ delete a CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_csi_node_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSINode, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_csi_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_csi_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csinodes/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSINode', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_namespaced_csi_storage_capacity(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_csi_storage_capacity # noqa: E501
+
+ delete a CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_csi_storage_capacity(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1Status
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_namespaced_csi_storage_capacity_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def delete_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """delete_namespaced_csi_storage_capacity # noqa: E501
+
+ delete a CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_namespaced_csi_storage_capacity_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_namespaced_csi_storage_capacity" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_csi_storage_capacity`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_csi_storage_capacity`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/namespaces/{namespace}/csistoragecapacities/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1Status', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_storage_class(self, name, **kwargs): # noqa: E501
+ """delete_storage_class # noqa: E501
+
+ delete a StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_storage_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StorageClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_storage_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_storage_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_storage_class # noqa: E501
+
+ delete a StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_storage_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StorageClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_storage_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_storage_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/storageclasses/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StorageClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def delete_volume_attachment(self, name, **kwargs): # noqa: E501
+ """delete_volume_attachment # noqa: E501
+
+ delete a VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_volume_attachment(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.delete_volume_attachment_with_http_info(name, **kwargs) # noqa: E501
+
+ def delete_volume_attachment_with_http_info(self, name, **kwargs): # noqa: E501
+ """delete_volume_attachment # noqa: E501
+
+ delete a VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.delete_volume_attachment_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
+ :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
+ :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
+ :param V1DeleteOptions body:
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty',
+ 'dry_run',
+ 'grace_period_seconds',
+ 'orphan_dependents',
+ 'propagation_policy',
+ 'body'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method delete_volume_attachment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `delete_volume_attachment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
+ query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
+ if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
+ query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
+ if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
+ query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments/{name}', 'DELETE',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def get_api_resources(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1APIResourceList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
+
+ def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
+ """get_api_resources # noqa: E501
+
+ get available resources # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_api_resources_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_api_resources" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1APIResourceList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_csi_driver(self, **kwargs): # noqa: E501
+ """list_csi_driver # noqa: E501
+
+ list or watch objects of kind CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_csi_driver(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIDriverList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_csi_driver_with_http_info(**kwargs) # noqa: E501
+
+ def list_csi_driver_with_http_info(self, **kwargs): # noqa: E501
+ """list_csi_driver # noqa: E501
+
+ list or watch objects of kind CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_csi_driver_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIDriverList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_csi_driver" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csidrivers', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIDriverList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_csi_node(self, **kwargs): # noqa: E501
+ """list_csi_node # noqa: E501
+
+ list or watch objects of kind CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_csi_node(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSINodeList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_csi_node_with_http_info(**kwargs) # noqa: E501
+
+ def list_csi_node_with_http_info(self, **kwargs): # noqa: E501
+ """list_csi_node # noqa: E501
+
+ list or watch objects of kind CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_csi_node_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSINodeList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_csi_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csinodes', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSINodeList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_csi_storage_capacity_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_csi_storage_capacity_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_csi_storage_capacity_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIStorageCapacityList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_csi_storage_capacity_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_csi_storage_capacity_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_csi_storage_capacity_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_csi_storage_capacity_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIStorageCapacityList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_csi_storage_capacity_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csistoragecapacities', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIStorageCapacityList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_namespaced_csi_storage_capacity(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_csi_storage_capacity # noqa: E501
+
+ list or watch objects of kind CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_csi_storage_capacity(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIStorageCapacityList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_namespaced_csi_storage_capacity_with_http_info(namespace, **kwargs) # noqa: E501
+
+ def list_namespaced_csi_storage_capacity_with_http_info(self, namespace, **kwargs): # noqa: E501
+ """list_namespaced_csi_storage_capacity # noqa: E501
+
+ list or watch objects of kind CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_namespaced_csi_storage_capacity_with_http_info(namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIStorageCapacityList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'namespace',
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_namespaced_csi_storage_capacity" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_csi_storage_capacity`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/namespaces/{namespace}/csistoragecapacities', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIStorageCapacityList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_storage_class(self, **kwargs): # noqa: E501
+ """list_storage_class # noqa: E501
+
+ list or watch objects of kind StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_storage_class(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StorageClassList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_storage_class_with_http_info(**kwargs) # noqa: E501
+
+ def list_storage_class_with_http_info(self, **kwargs): # noqa: E501
+ """list_storage_class # noqa: E501
+
+ list or watch objects of kind StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_storage_class_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StorageClassList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_storage_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/storageclasses', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StorageClassList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def list_volume_attachment(self, **kwargs): # noqa: E501
+ """list_volume_attachment # noqa: E501
+
+ list or watch objects of kind VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_volume_attachment(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachmentList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_volume_attachment_with_http_info(**kwargs) # noqa: E501
+
+ def list_volume_attachment_with_http_info(self, **kwargs): # noqa: E501
+ """list_volume_attachment # noqa: E501
+
+ list or watch objects of kind VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_volume_attachment_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachmentList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'pretty',
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'resource_version',
+ 'resource_version_match',
+ 'send_initial_events',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_volume_attachment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
+ query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachmentList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_csi_driver(self, name, body, **kwargs): # noqa: E501
+ """patch_csi_driver # noqa: E501
+
+ partially update the specified CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_csi_driver(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIDriver
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_csi_driver_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_csi_driver_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_csi_driver # noqa: E501
+
+ partially update the specified CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_csi_driver_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIDriver, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_csi_driver" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_csi_driver`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_csi_driver`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csidrivers/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIDriver', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_csi_node(self, name, body, **kwargs): # noqa: E501
+ """patch_csi_node # noqa: E501
+
+ partially update the specified CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_csi_node(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSINode
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_csi_node_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_csi_node_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_csi_node # noqa: E501
+
+ partially update the specified CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_csi_node_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSINode, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_csi_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_csi_node`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_csi_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csinodes/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSINode', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_namespaced_csi_storage_capacity(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_csi_storage_capacity # noqa: E501
+
+ partially update the specified CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_csi_storage_capacity(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIStorageCapacity
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def patch_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """patch_namespaced_csi_storage_capacity # noqa: E501
+
+ partially update the specified CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_namespaced_csi_storage_capacity" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_csi_storage_capacity`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_csi_storage_capacity`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_csi_storage_capacity`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/namespaces/{namespace}/csistoragecapacities/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIStorageCapacity', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_storage_class(self, name, body, **kwargs): # noqa: E501
+ """patch_storage_class # noqa: E501
+
+ partially update the specified StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_storage_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StorageClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_storage_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_storage_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_storage_class # noqa: E501
+
+ partially update the specified StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_storage_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StorageClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_storage_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_storage_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_storage_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/storageclasses/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StorageClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_volume_attachment(self, name, body, **kwargs): # noqa: E501
+ """patch_volume_attachment # noqa: E501
+
+ partially update the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_volume_attachment(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_volume_attachment_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_volume_attachment_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_volume_attachment # noqa: E501
+
+ partially update the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_volume_attachment_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_volume_attachment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_volume_attachment`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_volume_attachment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments/{name}', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def patch_volume_attachment_status(self, name, body, **kwargs): # noqa: E501
+ """patch_volume_attachment_status # noqa: E501
+
+ partially update status of the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_volume_attachment_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.patch_volume_attachment_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def patch_volume_attachment_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """patch_volume_attachment_status # noqa: E501
+
+ partially update status of the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.patch_volume_attachment_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param object body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation',
+ 'force'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method patch_volume_attachment_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `patch_volume_attachment_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `patch_volume_attachment_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+ if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
+ query_params.append(('force', local_var_params['force'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # HTTP header `Content-Type`
+ header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
+ ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments/{name}/status', 'PATCH',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_csi_driver(self, name, **kwargs): # noqa: E501
+ """read_csi_driver # noqa: E501
+
+ read the specified CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_csi_driver(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIDriver
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_csi_driver_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_csi_driver_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_csi_driver # noqa: E501
+
+ read the specified CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_csi_driver_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIDriver, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_csi_driver" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_csi_driver`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csidrivers/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIDriver', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_csi_node(self, name, **kwargs): # noqa: E501
+ """read_csi_node # noqa: E501
+
+ read the specified CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_csi_node(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSINode
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_csi_node_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_csi_node_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_csi_node # noqa: E501
+
+ read the specified CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_csi_node_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSINode, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_csi_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_csi_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csinodes/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSINode', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_namespaced_csi_storage_capacity(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_csi_storage_capacity # noqa: E501
+
+ read the specified CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_csi_storage_capacity(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIStorageCapacity
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_namespaced_csi_storage_capacity_with_http_info(name, namespace, **kwargs) # noqa: E501
+
+ def read_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, **kwargs): # noqa: E501
+ """read_namespaced_csi_storage_capacity # noqa: E501
+
+ read the specified CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_namespaced_csi_storage_capacity_with_http_info(name, namespace, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_namespaced_csi_storage_capacity" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_csi_storage_capacity`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_csi_storage_capacity`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/namespaces/{namespace}/csistoragecapacities/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIStorageCapacity', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_storage_class(self, name, **kwargs): # noqa: E501
+ """read_storage_class # noqa: E501
+
+ read the specified StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_storage_class(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StorageClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_storage_class_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_storage_class_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_storage_class # noqa: E501
+
+ read the specified StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_storage_class_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StorageClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_storage_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_storage_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/storageclasses/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StorageClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_volume_attachment(self, name, **kwargs): # noqa: E501
+ """read_volume_attachment # noqa: E501
+
+ read the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_volume_attachment(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_volume_attachment_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_volume_attachment_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_volume_attachment # noqa: E501
+
+ read the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_volume_attachment_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_volume_attachment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_volume_attachment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments/{name}', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def read_volume_attachment_status(self, name, **kwargs): # noqa: E501
+ """read_volume_attachment_status # noqa: E501
+
+ read status of the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_volume_attachment_status(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.read_volume_attachment_status_with_http_info(name, **kwargs) # noqa: E501
+
+ def read_volume_attachment_status_with_http_info(self, name, **kwargs): # noqa: E501
+ """read_volume_attachment_status # noqa: E501
+
+ read status of the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.read_volume_attachment_status_with_http_info(name, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'pretty'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method read_volume_attachment_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `read_volume_attachment_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments/{name}/status', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_csi_driver(self, name, body, **kwargs): # noqa: E501
+ """replace_csi_driver # noqa: E501
+
+ replace the specified CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_csi_driver(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param V1CSIDriver body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIDriver
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_csi_driver_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_csi_driver_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_csi_driver # noqa: E501
+
+ replace the specified CSIDriver # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_csi_driver_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIDriver (required)
+ :param V1CSIDriver body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIDriver, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_csi_driver" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_csi_driver`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_csi_driver`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csidrivers/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIDriver', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_csi_node(self, name, body, **kwargs): # noqa: E501
+ """replace_csi_node # noqa: E501
+
+ replace the specified CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_csi_node(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param V1CSINode body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSINode
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_csi_node_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_csi_node_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_csi_node # noqa: E501
+
+ replace the specified CSINode # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_csi_node_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSINode (required)
+ :param V1CSINode body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSINode, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_csi_node" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_csi_node`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_csi_node`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/csinodes/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSINode', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_namespaced_csi_storage_capacity(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_csi_storage_capacity # noqa: E501
+
+ replace the specified CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_csi_storage_capacity(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CSIStorageCapacity body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1CSIStorageCapacity
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, **kwargs) # noqa: E501
+
+ def replace_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
+ """replace_namespaced_csi_storage_capacity # noqa: E501
+
+ replace the specified CSIStorageCapacity # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the CSIStorageCapacity (required)
+ :param str namespace: object name and auth scope, such as for teams and projects (required)
+ :param V1CSIStorageCapacity body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'namespace',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_namespaced_csi_storage_capacity" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_csi_storage_capacity`") # noqa: E501
+ # verify the required parameter 'namespace' is set
+ if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
+ local_var_params['namespace'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_csi_storage_capacity`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_csi_storage_capacity`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+ if 'namespace' in local_var_params:
+ path_params['namespace'] = local_var_params['namespace'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/namespaces/{namespace}/csistoragecapacities/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1CSIStorageCapacity', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_storage_class(self, name, body, **kwargs): # noqa: E501
+ """replace_storage_class # noqa: E501
+
+ replace the specified StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_storage_class(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param V1StorageClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1StorageClass
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_storage_class_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_storage_class_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_storage_class # noqa: E501
+
+ replace the specified StorageClass # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_storage_class_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the StorageClass (required)
+ :param V1StorageClass body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1StorageClass, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_storage_class" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_storage_class`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_storage_class`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/storageclasses/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1StorageClass', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_volume_attachment(self, name, body, **kwargs): # noqa: E501
+ """replace_volume_attachment # noqa: E501
+
+ replace the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_volume_attachment(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param V1VolumeAttachment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_volume_attachment_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_volume_attachment_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_volume_attachment # noqa: E501
+
+ replace the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_volume_attachment_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param V1VolumeAttachment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_volume_attachment" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_volume_attachment`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_volume_attachment`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments/{name}', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
+
+ def replace_volume_attachment_status(self, name, body, **kwargs): # noqa: E501
+ """replace_volume_attachment_status # noqa: E501
+
+ replace status of the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_volume_attachment_status(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param V1VolumeAttachment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1VolumeAttachment
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.replace_volume_attachment_status_with_http_info(name, body, **kwargs) # noqa: E501
+
+ def replace_volume_attachment_status_with_http_info(self, name, body, **kwargs): # noqa: E501
+ """replace_volume_attachment_status # noqa: E501
+
+ replace status of the specified VolumeAttachment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.replace_volume_attachment_status_with_http_info(name, body, async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param str name: name of the VolumeAttachment (required)
+ :param V1VolumeAttachment body: (required)
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
+ :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
+ :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1VolumeAttachment, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'name',
+ 'body',
+ 'pretty',
+ 'dry_run',
+ 'field_manager',
+ 'field_validation'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method replace_volume_attachment_status" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+ # verify the required parameter 'name' is set
+ if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
+ local_var_params['name'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `name` when calling `replace_volume_attachment_status`") # noqa: E501
+ # verify the required parameter 'body' is set
+ if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
+ local_var_params['body'] is None): # noqa: E501
+ raise ApiValueError("Missing the required parameter `body` when calling `replace_volume_attachment_status`") # noqa: E501
+
+ collection_formats = {}
+
+ path_params = {}
+ if 'name' in local_var_params:
+ path_params['name'] = local_var_params['name'] # noqa: E501
+
+ query_params = []
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
+ query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
+ if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
+ query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
+ if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
+ query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ if 'body' in local_var_params:
+ body_params = local_var_params['body']
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/storage.k8s.io/v1/volumeattachments/{name}/status', 'PUT',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1VolumeAttachment', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/version_api.py b/contrib/python/kubernetes/kubernetes/client/api/version_api.py
new file mode 100644
index 0000000000..afbaed5d13
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/version_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class VersionApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_code(self, **kwargs): # noqa: E501
+ """get_code # noqa: E501
+
+ get the code version # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_code(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: VersionInfo
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_code_with_http_info(**kwargs) # noqa: E501
+
+ def get_code_with_http_info(self, **kwargs): # noqa: E501
+ """get_code # noqa: E501
+
+ get the code version # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_code_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(VersionInfo, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_code" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/version/', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='VersionInfo', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api/well_known_api.py b/contrib/python/kubernetes/kubernetes/client/api/well_known_api.py
new file mode 100644
index 0000000000..8de41208c0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api/well_known_api.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import re # noqa: F401
+
+# python 2 and python 3 compatibility library
+import six
+
+from kubernetes.client.api_client import ApiClient
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+ ApiValueError
+)
+
+
+class WellKnownApi(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, api_client=None):
+ if api_client is None:
+ api_client = ApiClient()
+ self.api_client = api_client
+
+ def get_service_account_issuer_open_id_configuration(self, **kwargs): # noqa: E501
+ """get_service_account_issuer_open_id_configuration # noqa: E501
+
+ get service account issuer OpenID configuration, also known as the 'OIDC discovery doc' # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_service_account_issuer_open_id_configuration(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: str
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.get_service_account_issuer_open_id_configuration_with_http_info(**kwargs) # noqa: E501
+
+ def get_service_account_issuer_open_id_configuration_with_http_info(self, **kwargs): # noqa: E501
+ """get_service_account_issuer_open_id_configuration # noqa: E501
+
+ get service account issuer OpenID configuration, also known as the 'OIDC discovery doc' # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.get_service_account_issuer_open_id_configuration_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(str, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method get_service_account_issuer_open_id_configuration" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/.well-known/openid-configuration', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='str', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/contrib/python/kubernetes/kubernetes/client/api_client.py b/contrib/python/kubernetes/kubernetes/client/api_client.py
new file mode 100644
index 0000000000..870d0939e9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/api_client.py
@@ -0,0 +1,647 @@
+# coding: utf-8
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+from __future__ import absolute_import
+
+import atexit
+import datetime
+from dateutil.parser import parse
+import json
+import mimetypes
+from multiprocessing.pool import ThreadPool
+import os
+import re
+import tempfile
+
+# python 2 and python 3 compatibility library
+import six
+from six.moves.urllib.parse import quote
+
+from kubernetes.client.configuration import Configuration
+import kubernetes.client.models
+from kubernetes.client import rest
+from kubernetes.client.exceptions import ApiValueError
+
+
+class ApiClient(object):
+ """Generic API client for OpenAPI client library builds.
+
+ OpenAPI generic API client. This client handles the client-
+ server communication, and is invariant across implementations. Specifics of
+ the methods and models for each application are generated from the OpenAPI
+ templates.
+
+ NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+ Do not edit the class manually.
+
+ :param configuration: .Configuration object for this client
+ :param header_name: a header to pass when making calls to the API.
+ :param header_value: a header value to pass when making calls to
+ the API.
+ :param cookie: a cookie to include in the header when making calls
+ to the API
+ :param pool_threads: The number of threads to use for async requests
+ to the API. More threads means more concurrent API requests.
+ """
+
+ PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
+ NATIVE_TYPES_MAPPING = {
+ 'int': int,
+ 'long': int if six.PY3 else long, # noqa: F821
+ 'float': float,
+ 'str': str,
+ 'bool': bool,
+ 'date': datetime.date,
+ 'datetime': datetime.datetime,
+ 'object': object,
+ }
+ _pool = None
+
+ def __init__(self, configuration=None, header_name=None, header_value=None,
+ cookie=None, pool_threads=1):
+ if configuration is None:
+ configuration = Configuration.get_default_copy()
+ self.configuration = configuration
+ self.pool_threads = pool_threads
+
+ self.rest_client = rest.RESTClientObject(configuration)
+ self.default_headers = {}
+ if header_name is not None:
+ self.default_headers[header_name] = header_value
+ self.cookie = cookie
+ # Set default User-Agent.
+ self.user_agent = 'OpenAPI-Generator/28.1.0/python'
+ self.client_side_validation = configuration.client_side_validation
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ if self._pool:
+ self._pool.close()
+ self._pool.join()
+ self._pool = None
+ if hasattr(atexit, 'unregister'):
+ atexit.unregister(self.close)
+
+ @property
+ def pool(self):
+ """Create thread pool on first request
+ avoids instantiating unused threadpool for blocking clients.
+ """
+ if self._pool is None:
+ atexit.register(self.close)
+ self._pool = ThreadPool(self.pool_threads)
+ return self._pool
+
+ @property
+ def user_agent(self):
+ """User agent for this API client"""
+ return self.default_headers['User-Agent']
+
+ @user_agent.setter
+ def user_agent(self, value):
+ self.default_headers['User-Agent'] = value
+
+ def set_default_header(self, header_name, header_value):
+ self.default_headers[header_name] = header_value
+
+ def __call_api(
+ self, resource_path, method, path_params=None,
+ query_params=None, header_params=None, body=None, post_params=None,
+ files=None, response_type=None, auth_settings=None,
+ _return_http_data_only=None, collection_formats=None,
+ _preload_content=True, _request_timeout=None, _host=None):
+
+ config = self.configuration
+
+ # header parameters
+ header_params = header_params or {}
+ header_params.update(self.default_headers)
+ if self.cookie:
+ header_params['Cookie'] = self.cookie
+ if header_params:
+ header_params = self.sanitize_for_serialization(header_params)
+ header_params = dict(self.parameters_to_tuples(header_params,
+ collection_formats))
+
+ # path parameters
+ if path_params:
+ path_params = self.sanitize_for_serialization(path_params)
+ path_params = self.parameters_to_tuples(path_params,
+ collection_formats)
+ for k, v in path_params:
+ # specified safe chars, encode everything
+ resource_path = resource_path.replace(
+ '{%s}' % k,
+ quote(str(v), safe=config.safe_chars_for_path_param)
+ )
+
+ # query parameters
+ if query_params:
+ query_params = self.sanitize_for_serialization(query_params)
+ query_params = self.parameters_to_tuples(query_params,
+ collection_formats)
+
+ # post parameters
+ if post_params or files:
+ post_params = post_params if post_params else []
+ post_params = self.sanitize_for_serialization(post_params)
+ post_params = self.parameters_to_tuples(post_params,
+ collection_formats)
+ post_params.extend(self.files_parameters(files))
+
+ # auth setting
+ self.update_params_for_auth(header_params, query_params, auth_settings)
+
+ # body
+ if body:
+ body = self.sanitize_for_serialization(body)
+
+ # request url
+ if _host is None:
+ url = self.configuration.host + resource_path
+ else:
+ # use server/host defined in path or operation instead
+ url = _host + resource_path
+
+ # perform request and return response
+ response_data = self.request(
+ method, url, query_params=query_params, headers=header_params,
+ post_params=post_params, body=body,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout)
+
+ self.last_response = response_data
+
+ return_data = response_data
+ if _preload_content:
+ # deserialize response data
+ if response_type:
+ return_data = self.deserialize(response_data, response_type)
+ else:
+ return_data = None
+
+ if _return_http_data_only:
+ return (return_data)
+ else:
+ return (return_data, response_data.status,
+ response_data.getheaders())
+
+ def sanitize_for_serialization(self, obj):
+ """Builds a JSON POST object.
+
+ If obj is None, return None.
+ If obj is str, int, long, float, bool, return directly.
+ If obj is datetime.datetime, datetime.date
+ convert to string in iso8601 format.
+ If obj is list, sanitize each element in the list.
+ If obj is dict, return the dict.
+ If obj is OpenAPI model, return the properties dict.
+
+ :param obj: The data to serialize.
+ :return: The serialized form of data.
+ """
+ if obj is None:
+ return None
+ elif isinstance(obj, self.PRIMITIVE_TYPES):
+ return obj
+ elif isinstance(obj, list):
+ return [self.sanitize_for_serialization(sub_obj)
+ for sub_obj in obj]
+ elif isinstance(obj, tuple):
+ return tuple(self.sanitize_for_serialization(sub_obj)
+ for sub_obj in obj)
+ elif isinstance(obj, (datetime.datetime, datetime.date)):
+ return obj.isoformat()
+
+ if isinstance(obj, dict):
+ obj_dict = obj
+ else:
+ # Convert model obj to dict except
+ # attributes `openapi_types`, `attribute_map`
+ # and attributes which value is not None.
+ # Convert attribute name to json key in
+ # model definition for request.
+ obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
+ for attr, _ in six.iteritems(obj.openapi_types)
+ if getattr(obj, attr) is not None}
+
+ return {key: self.sanitize_for_serialization(val)
+ for key, val in six.iteritems(obj_dict)}
+
+ def deserialize(self, response, response_type):
+ """Deserializes response into an object.
+
+ :param response: RESTResponse object to be deserialized.
+ :param response_type: class literal for
+ deserialized object, or string of class name.
+
+ :return: deserialized object.
+ """
+ # handle file downloading
+ # save response body into a tmp file and return the instance
+ if response_type == "file":
+ return self.__deserialize_file(response)
+
+ # fetch data from response object
+ try:
+ data = json.loads(response.data)
+ except ValueError:
+ data = response.data
+
+ return self.__deserialize(data, response_type)
+
+ def __deserialize(self, data, klass):
+ """Deserializes dict, list, str into an object.
+
+ :param data: dict, list or str.
+ :param klass: class literal, or string of class name.
+
+ :return: object.
+ """
+ if data is None:
+ return None
+
+ if type(klass) == str:
+ if klass.startswith('list['):
+ sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
+ return [self.__deserialize(sub_data, sub_kls)
+ for sub_data in data]
+
+ if klass.startswith('dict('):
+ sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
+ return {k: self.__deserialize(v, sub_kls)
+ for k, v in six.iteritems(data)}
+
+ # convert str to class
+ if klass in self.NATIVE_TYPES_MAPPING:
+ klass = self.NATIVE_TYPES_MAPPING[klass]
+ else:
+ klass = getattr(kubernetes.client.models, klass)
+
+ if klass in self.PRIMITIVE_TYPES:
+ return self.__deserialize_primitive(data, klass)
+ elif klass == object:
+ return self.__deserialize_object(data)
+ elif klass == datetime.date:
+ return self.__deserialize_date(data)
+ elif klass == datetime.datetime:
+ return self.__deserialize_datetime(data)
+ else:
+ return self.__deserialize_model(data, klass)
+
+ def call_api(self, resource_path, method,
+ path_params=None, query_params=None, header_params=None,
+ body=None, post_params=None, files=None,
+ response_type=None, auth_settings=None, async_req=None,
+ _return_http_data_only=None, collection_formats=None,
+ _preload_content=True, _request_timeout=None, _host=None):
+ """Makes the HTTP request (synchronous) and returns deserialized data.
+
+ To make an async_req request, set the async_req parameter.
+
+ :param resource_path: Path to method endpoint.
+ :param method: Method to call.
+ :param path_params: Path parameters in the url.
+ :param query_params: Query parameters in the url.
+ :param header_params: Header parameters to be
+ placed in the request header.
+ :param body: Request body.
+ :param post_params dict: Request post form parameters,
+ for `application/x-www-form-urlencoded`, `multipart/form-data`.
+ :param auth_settings list: Auth Settings names for the request.
+ :param response: Response data type.
+ :param files dict: key -> filename, value -> filepath,
+ for `multipart/form-data`.
+ :param async_req bool: execute request asynchronously
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param collection_formats: dict of collection formats for path, query,
+ header, and post parameters.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return:
+ If async_req parameter is True,
+ the request will be called asynchronously.
+ The method will return the request thread.
+ If parameter async_req is False or missing,
+ then the method will return the response directly.
+ """
+ if not async_req:
+ return self.__call_api(resource_path, method,
+ path_params, query_params, header_params,
+ body, post_params, files,
+ response_type, auth_settings,
+ _return_http_data_only, collection_formats,
+ _preload_content, _request_timeout, _host)
+
+ return self.pool.apply_async(self.__call_api, (resource_path,
+ method, path_params,
+ query_params,
+ header_params, body,
+ post_params, files,
+ response_type,
+ auth_settings,
+ _return_http_data_only,
+ collection_formats,
+ _preload_content,
+ _request_timeout,
+ _host))
+
+ def request(self, method, url, query_params=None, headers=None,
+ post_params=None, body=None, _preload_content=True,
+ _request_timeout=None):
+ """Makes the HTTP request using RESTClient."""
+ if method == "GET":
+ return self.rest_client.GET(url,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ headers=headers)
+ elif method == "HEAD":
+ return self.rest_client.HEAD(url,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ headers=headers)
+ elif method == "OPTIONS":
+ return self.rest_client.OPTIONS(url,
+ query_params=query_params,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout)
+ elif method == "POST":
+ return self.rest_client.POST(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "PUT":
+ return self.rest_client.PUT(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "PATCH":
+ return self.rest_client.PATCH(url,
+ query_params=query_params,
+ headers=headers,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ elif method == "DELETE":
+ return self.rest_client.DELETE(url,
+ query_params=query_params,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+ else:
+ raise ApiValueError(
+ "http method must be `GET`, `HEAD`, `OPTIONS`,"
+ " `POST`, `PATCH`, `PUT` or `DELETE`."
+ )
+
+ def parameters_to_tuples(self, params, collection_formats):
+ """Get parameters as list of tuples, formatting collections.
+
+ :param params: Parameters as dict or list of two-tuples
+ :param dict collection_formats: Parameter collection formats
+ :return: Parameters as list of tuples, collections formatted
+ """
+ new_params = []
+ if collection_formats is None:
+ collection_formats = {}
+ for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
+ if k in collection_formats:
+ collection_format = collection_formats[k]
+ if collection_format == 'multi':
+ new_params.extend((k, value) for value in v)
+ else:
+ if collection_format == 'ssv':
+ delimiter = ' '
+ elif collection_format == 'tsv':
+ delimiter = '\t'
+ elif collection_format == 'pipes':
+ delimiter = '|'
+ else: # csv is the default
+ delimiter = ','
+ new_params.append(
+ (k, delimiter.join(str(value) for value in v)))
+ else:
+ new_params.append((k, v))
+ return new_params
+
+ def files_parameters(self, files=None):
+ """Builds form parameters.
+
+ :param files: File parameters.
+ :return: Form parameters with files.
+ """
+ params = []
+
+ if files:
+ for k, v in six.iteritems(files):
+ if not v:
+ continue
+ file_names = v if type(v) is list else [v]
+ for n in file_names:
+ with open(n, 'rb') as f:
+ filename = os.path.basename(f.name)
+ filedata = f.read()
+ mimetype = (mimetypes.guess_type(filename)[0] or
+ 'application/octet-stream')
+ params.append(
+ tuple([k, tuple([filename, filedata, mimetype])]))
+
+ return params
+
+ def select_header_accept(self, accepts):
+ """Returns `Accept` based on an array of accepts provided.
+
+ :param accepts: List of headers.
+ :return: Accept (e.g. application/json).
+ """
+ if not accepts:
+ return
+
+ accepts = [x.lower() for x in accepts]
+
+ if 'application/json' in accepts:
+ return 'application/json'
+ else:
+ return ', '.join(accepts)
+
+ def select_header_content_type(self, content_types):
+ """Returns `Content-Type` based on an array of content_types provided.
+
+ :param content_types: List of content-types.
+ :return: Content-Type (e.g. application/json).
+ """
+ if not content_types:
+ return 'application/json'
+
+ content_types = [x.lower() for x in content_types]
+
+ if 'application/json' in content_types or '*/*' in content_types:
+ return 'application/json'
+ else:
+ return content_types[0]
+
+ def update_params_for_auth(self, headers, querys, auth_settings):
+ """Updates header and query params based on authentication setting.
+
+ :param headers: Header parameters dict to be updated.
+ :param querys: Query parameters tuple list to be updated.
+ :param auth_settings: Authentication setting identifiers list.
+ """
+ if not auth_settings:
+ return
+
+ for auth in auth_settings:
+ auth_setting = self.configuration.auth_settings().get(auth)
+ if auth_setting:
+ if auth_setting['in'] == 'cookie':
+ headers['Cookie'] = auth_setting['value']
+ elif auth_setting['in'] == 'header':
+ headers[auth_setting['key']] = auth_setting['value']
+ elif auth_setting['in'] == 'query':
+ querys.append((auth_setting['key'], auth_setting['value']))
+ else:
+ raise ApiValueError(
+ 'Authentication token must be in `query` or `header`'
+ )
+
+ def __deserialize_file(self, response):
+ """Deserializes body to file
+
+ Saves response body into a file in a temporary folder,
+ using the filename from the `Content-Disposition` header if provided.
+
+ :param response: RESTResponse.
+ :return: file path.
+ """
+ fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
+ os.close(fd)
+ os.remove(path)
+
+ content_disposition = response.getheader("Content-Disposition")
+ if content_disposition:
+ filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
+ content_disposition).group(1)
+ path = os.path.join(os.path.dirname(path), filename)
+
+ with open(path, "wb") as f:
+ f.write(response.data)
+
+ return path
+
+ def __deserialize_primitive(self, data, klass):
+ """Deserializes string to primitive type.
+
+ :param data: str.
+ :param klass: class literal.
+
+ :return: int, long, float, str, bool.
+ """
+ try:
+ return klass(data)
+ except UnicodeEncodeError:
+ return six.text_type(data)
+ except TypeError:
+ return data
+
+ def __deserialize_object(self, value):
+ """Return an original value.
+
+ :return: object.
+ """
+ return value
+
+ def __deserialize_date(self, string):
+ """Deserializes string to date.
+
+ :param string: str.
+ :return: date.
+ """
+ try:
+ return parse(string).date()
+ except ImportError:
+ return string
+ except ValueError:
+ raise rest.ApiException(
+ status=0,
+ reason="Failed to parse `{0}` as date object".format(string)
+ )
+
+ def __deserialize_datetime(self, string):
+ """Deserializes string to datetime.
+
+ The string should be in iso8601 datetime format.
+
+ :param string: str.
+ :return: datetime.
+ """
+ try:
+ return parse(string)
+ except ImportError:
+ return string
+ except ValueError:
+ raise rest.ApiException(
+ status=0,
+ reason=(
+ "Failed to parse `{0}` as datetime object"
+ .format(string)
+ )
+ )
+
+ def __deserialize_model(self, data, klass):
+ """Deserializes list or dict to model.
+
+ :param data: dict, list.
+ :param klass: class literal.
+ :return: model object.
+ """
+
+ if not klass.openapi_types and not hasattr(klass,
+ 'get_real_child_model'):
+ return data
+
+ kwargs = {}
+ if (data is not None and
+ klass.openapi_types is not None and
+ isinstance(data, (list, dict))):
+ for attr, attr_type in six.iteritems(klass.openapi_types):
+ if klass.attribute_map[attr] in data:
+ value = data[klass.attribute_map[attr]]
+ kwargs[attr] = self.__deserialize(value, attr_type)
+
+ instance = klass(**kwargs)
+
+ if hasattr(instance, 'get_real_child_model'):
+ klass_name = instance.get_real_child_model(data)
+ if klass_name:
+ instance = self.__deserialize(data, klass_name)
+ return instance
diff --git a/contrib/python/kubernetes/kubernetes/client/apis/__init__.py b/contrib/python/kubernetes/kubernetes/client/apis/__init__.py
new file mode 100644
index 0000000000..ca4b321de2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/apis/__init__.py
@@ -0,0 +1,13 @@
+from __future__ import absolute_import
+import warnings
+
+# flake8: noqa
+
+# alias kubernetes.client.api package and print deprecation warning
+from kubernetes.client.api import *
+
+warnings.filterwarnings('default', module='kubernetes.client.apis')
+warnings.warn(
+ "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).",
+ DeprecationWarning
+)
diff --git a/contrib/python/kubernetes/kubernetes/client/configuration.py b/contrib/python/kubernetes/kubernetes/client/configuration.py
new file mode 100644
index 0000000000..a7fc5e93c7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/configuration.py
@@ -0,0 +1,405 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import copy
+import logging
+import multiprocessing
+import sys
+import urllib3
+
+import six
+from six.moves import http_client as httplib
+
+
+class Configuration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator
+
+ Ref: https://openapi-generator.tech
+ Do not edit the class manually.
+
+ :param host: Base url
+ :param api_key: Dict to store API key(s).
+ Each entry in the dict specifies an API key.
+ The dict key is the name of the security scheme in the OAS specification.
+ The dict value is the API key secret.
+ :param api_key_prefix: Dict to store API prefix (e.g. Bearer)
+ The dict key is the name of the security scheme in the OAS specification.
+ The dict value is an API key prefix when generating the auth data.
+ :param username: Username for HTTP basic authentication
+ :param password: Password for HTTP basic authentication
+ :param discard_unknown_keys: Boolean value indicating whether to discard
+ unknown properties. A server may send a response that includes additional
+ properties that are not known by the client in the following scenarios:
+ 1. The OpenAPI document is incomplete, i.e. it does not match the server
+ implementation.
+ 2. The client was generated using an older version of the OpenAPI document
+ and the server has been upgraded since then.
+ If a schema in the OpenAPI document defines the additionalProperties attribute,
+ then all undeclared properties received by the server are injected into the
+ additional properties map. In that case, there are undeclared properties, and
+ nothing to discard.
+
+ :Example:
+
+ API Key Authentication Example.
+ Given the following security scheme in the OpenAPI specification:
+ components:
+ securitySchemes:
+ cookieAuth: # name for the security scheme
+ type: apiKey
+ in: cookie
+ name: JSESSIONID # cookie name
+
+ You can programmatically set the cookie:
+ conf = client.Configuration(
+ api_key={'cookieAuth': 'abc123'}
+ api_key_prefix={'cookieAuth': 'JSESSIONID'}
+ )
+ The following cookie will be added to the HTTP request:
+ Cookie: JSESSIONID abc123
+ """
+
+ _default = None
+
+ def __init__(self, host="http://localhost",
+ api_key=None, api_key_prefix=None,
+ username=None, password=None,
+ discard_unknown_keys=False,
+ ):
+ """Constructor
+ """
+ self.host = host
+ """Default Base url
+ """
+ self.temp_folder_path = None
+ """Temp file folder for downloading files
+ """
+ # Authentication Settings
+ self.api_key = {}
+ if api_key:
+ self.api_key = api_key
+ """dict to store API key(s)
+ """
+ self.api_key_prefix = {}
+ if api_key_prefix:
+ self.api_key_prefix = api_key_prefix
+ """dict to store API prefix (e.g. Bearer)
+ """
+ self.refresh_api_key_hook = None
+ """function hook to refresh API key if expired
+ """
+ self.username = username
+ """Username for HTTP basic authentication
+ """
+ self.password = password
+ """Password for HTTP basic authentication
+ """
+ self.discard_unknown_keys = discard_unknown_keys
+ self.logger = {}
+ """Logging Settings
+ """
+ self.logger["package_logger"] = logging.getLogger("client")
+ self.logger["urllib3_logger"] = logging.getLogger("urllib3")
+ self.logger_format = '%(asctime)s %(levelname)s %(message)s'
+ """Log format
+ """
+ self.logger_stream_handler = None
+ """Log stream handler
+ """
+ self.logger_file_handler = None
+ """Log file handler
+ """
+ self.logger_file = None
+ """Debug file location
+ """
+ self.debug = False
+ """Debug switch
+ """
+
+ self.verify_ssl = True
+ """SSL/TLS verification
+ Set this to false to skip verifying SSL certificate when calling API
+ from https server.
+ """
+ self.ssl_ca_cert = None
+ """Set this to customize the certificate file to verify the peer.
+ """
+ self.cert_file = None
+ """client certificate file
+ """
+ self.key_file = None
+ """client key file
+ """
+ self.assert_hostname = None
+ """Set this to True/False to enable/disable SSL hostname verification.
+ """
+ self.tls_server_name = None
+ """SSL/TLS Server Name Indication (SNI)
+ Set this to the SNI value expected by the server.
+ """
+
+ self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
+ """urllib3 connection pool's maximum number of connections saved
+ per pool. urllib3 uses 1 connection as default value, but this is
+ not the best value when you are making a lot of possibly parallel
+ requests to the same host, which is often the case here.
+ cpu_count * 5 is used as default value to increase performance.
+ """
+
+ self.proxy = None
+ """Proxy URL
+ """
+ self.no_proxy = None
+ """bypass proxy for host in the no_proxy list.
+ """
+ self.proxy_headers = None
+ """Proxy headers
+ """
+ self.safe_chars_for_path_param = ''
+ """Safe chars for path_param
+ """
+ self.retries = None
+ """Adding retries to override urllib3 default value 3
+ """
+ # Disable client side validation
+ self.client_side_validation = True
+
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ result = cls.__new__(cls)
+ memo[id(self)] = result
+ for k, v in self.__dict__.items():
+ if k not in ('logger', 'logger_file_handler'):
+ setattr(result, k, copy.deepcopy(v, memo))
+ # shallow copy of loggers
+ result.logger = copy.copy(self.logger)
+ # use setters to configure loggers
+ result.logger_file = self.logger_file
+ result.debug = self.debug
+ return result
+
+ @classmethod
+ def set_default(cls, default):
+ """Set default instance of configuration.
+
+ It stores default configuration, which can be
+ returned by get_default_copy method.
+
+ :param default: object of Configuration
+ """
+ cls._default = copy.deepcopy(default)
+
+ @classmethod
+ def get_default_copy(cls):
+ """Return new instance of configuration.
+
+ This method returns newly created, based on default constructor,
+ object of Configuration class or returns a copy of default
+ configuration passed by the set_default method.
+
+ :return: The configuration object.
+ """
+ if cls._default is not None:
+ return copy.deepcopy(cls._default)
+ return Configuration()
+
+ @property
+ def logger_file(self):
+ """The logger file.
+
+ If the logger_file is None, then add stream handler and remove file
+ handler. Otherwise, add file handler and remove stream handler.
+
+ :param value: The logger_file path.
+ :type: str
+ """
+ return self.__logger_file
+
+ @logger_file.setter
+ def logger_file(self, value):
+ """The logger file.
+
+ If the logger_file is None, then add stream handler and remove file
+ handler. Otherwise, add file handler and remove stream handler.
+
+ :param value: The logger_file path.
+ :type: str
+ """
+ self.__logger_file = value
+ if self.__logger_file:
+ # If set logging file,
+ # then add file handler and remove stream handler.
+ self.logger_file_handler = logging.FileHandler(self.__logger_file)
+ self.logger_file_handler.setFormatter(self.logger_formatter)
+ for _, logger in six.iteritems(self.logger):
+ logger.addHandler(self.logger_file_handler)
+
+ @property
+ def debug(self):
+ """Debug status
+
+ :param value: The debug status, True or False.
+ :type: bool
+ """
+ return self.__debug
+
+ @debug.setter
+ def debug(self, value):
+ """Debug status
+
+ :param value: The debug status, True or False.
+ :type: bool
+ """
+ self.__debug = value
+ if self.__debug:
+ # if debug status is True, turn on debug logging
+ for _, logger in six.iteritems(self.logger):
+ logger.setLevel(logging.DEBUG)
+ # turn on httplib debug
+ httplib.HTTPConnection.debuglevel = 1
+ else:
+ # if debug status is False, turn off debug logging,
+ # setting log level to default `logging.WARNING`
+ for _, logger in six.iteritems(self.logger):
+ logger.setLevel(logging.WARNING)
+ # turn off httplib debug
+ httplib.HTTPConnection.debuglevel = 0
+
+ @property
+ def logger_format(self):
+ """The logger format.
+
+ The logger_formatter will be updated when sets logger_format.
+
+ :param value: The format string.
+ :type: str
+ """
+ return self.__logger_format
+
+ @logger_format.setter
+ def logger_format(self, value):
+ """The logger format.
+
+ The logger_formatter will be updated when sets logger_format.
+
+ :param value: The format string.
+ :type: str
+ """
+ self.__logger_format = value
+ self.logger_formatter = logging.Formatter(self.__logger_format)
+
+ def get_api_key_with_prefix(self, identifier):
+ """Gets API key (with prefix if set).
+
+ :param identifier: The identifier of apiKey.
+ :return: The token for api key authentication.
+ """
+ if self.refresh_api_key_hook is not None:
+ self.refresh_api_key_hook(self)
+ key = self.api_key.get(identifier)
+ if key:
+ prefix = self.api_key_prefix.get(identifier)
+ if prefix:
+ return "%s %s" % (prefix, key)
+ else:
+ return key
+
+ def get_basic_auth_token(self):
+ """Gets HTTP basic authentication header (string).
+
+ :return: The token for basic HTTP authentication.
+ """
+ username = ""
+ if self.username is not None:
+ username = self.username
+ password = ""
+ if self.password is not None:
+ password = self.password
+ return urllib3.util.make_headers(
+ basic_auth=username + ':' + password
+ ).get('authorization')
+
+ def auth_settings(self):
+ """Gets Auth Settings dict for api client.
+
+ :return: The Auth Settings information dict.
+ """
+ auth = {}
+ if 'authorization' in self.api_key:
+ auth['BearerToken'] = {
+ 'type': 'api_key',
+ 'in': 'header',
+ 'key': 'authorization',
+ 'value': self.get_api_key_with_prefix('authorization')
+ }
+ return auth
+
+ def to_debug_report(self):
+ """Gets the essential information for debugging.
+
+ :return: The report for debugging.
+ """
+ return "Python SDK Debug Report:\n"\
+ "OS: {env}\n"\
+ "Python Version: {pyversion}\n"\
+ "Version of the API: release-1.28\n"\
+ "SDK Package Version: 28.1.0".\
+ format(env=sys.platform, pyversion=sys.version)
+
+ def get_host_settings(self):
+ """Gets an array of host settings
+
+ :return: An array of host settings
+ """
+ return [
+ {
+ 'url': "/",
+ 'description': "No description provided",
+ }
+ ]
+
+ def get_host_from_settings(self, index, variables=None):
+ """Gets host URL based on the index and variables
+ :param index: array index of the host settings
+ :param variables: hash of variable and the corresponding value
+ :return: URL based on host settings
+ """
+ variables = {} if variables is None else variables
+ servers = self.get_host_settings()
+
+ try:
+ server = servers[index]
+ except IndexError:
+ raise ValueError(
+ "Invalid index {0} when selecting the host settings. "
+ "Must be less than {1}".format(index, len(servers)))
+
+ url = server['url']
+
+ # go through variables and replace placeholders
+ for variable_name, variable in server['variables'].items():
+ used_value = variables.get(
+ variable_name, variable['default_value'])
+
+ if 'enum_values' in variable \
+ and used_value not in variable['enum_values']:
+ raise ValueError(
+ "The variable `{0}` in the host URL has invalid value "
+ "{1}. Must be {2}.".format(
+ variable_name, variables[variable_name],
+ variable['enum_values']))
+
+ url = url.replace("{" + variable_name + "}", used_value)
+
+ return url
diff --git a/contrib/python/kubernetes/kubernetes/client/exceptions.py b/contrib/python/kubernetes/kubernetes/client/exceptions.py
new file mode 100644
index 0000000000..a87db80eee
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/exceptions.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import six
+
+
+class OpenApiException(Exception):
+ """The base exception class for all OpenAPIExceptions"""
+
+
+class ApiTypeError(OpenApiException, TypeError):
+ def __init__(self, msg, path_to_item=None, valid_classes=None,
+ key_type=None):
+ """ Raises an exception for TypeErrors
+
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (list): a list of keys an indices to get to the
+ current_item
+ None if unset
+ valid_classes (tuple): the primitive classes that current item
+ should be an instance of
+ None if unset
+ key_type (bool): False if our value is a value in a dict
+ True if it is a key in a dict
+ False if our item is an item in a list
+ None if unset
+ """
+ self.path_to_item = path_to_item
+ self.valid_classes = valid_classes
+ self.key_type = key_type
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiTypeError, self).__init__(full_msg)
+
+
+class ApiValueError(OpenApiException, ValueError):
+ def __init__(self, msg, path_to_item=None):
+ """
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (list) the path to the exception in the
+ received_data dict. None if unset
+ """
+
+ self.path_to_item = path_to_item
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiValueError, self).__init__(full_msg)
+
+
+class ApiKeyError(OpenApiException, KeyError):
+ def __init__(self, msg, path_to_item=None):
+ """
+ Args:
+ msg (str): the exception message
+
+ Keyword Args:
+ path_to_item (None/list) the path to the exception in the
+ received_data dict
+ """
+ self.path_to_item = path_to_item
+ full_msg = msg
+ if path_to_item:
+ full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
+ super(ApiKeyError, self).__init__(full_msg)
+
+
+class ApiException(OpenApiException):
+
+ def __init__(self, status=None, reason=None, http_resp=None):
+ if http_resp:
+ self.status = http_resp.status
+ self.reason = http_resp.reason
+ self.body = http_resp.data
+ self.headers = http_resp.getheaders()
+ else:
+ self.status = status
+ self.reason = reason
+ self.body = None
+ self.headers = None
+
+ def __str__(self):
+ """Custom error messages for exception"""
+ error_message = "({0})\n"\
+ "Reason: {1}\n".format(self.status, self.reason)
+ if self.headers:
+ error_message += "HTTP response headers: {0}\n".format(
+ self.headers)
+
+ if self.body:
+ error_message += "HTTP response body: {0}\n".format(self.body)
+
+ return error_message
+
+
+def render_path(path_to_item):
+ """Returns a string representation of a path"""
+ result = ""
+ for pth in path_to_item:
+ if isinstance(pth, six.integer_types):
+ result += "[{0}]".format(pth)
+ else:
+ result += "['{0}']".format(pth)
+ return result
diff --git a/contrib/python/kubernetes/kubernetes/client/models/__init__.py b/contrib/python/kubernetes/kubernetes/client/models/__init__.py
new file mode 100644
index 0000000000..b7fa8c636e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/__init__.py
@@ -0,0 +1,573 @@
+# coding: utf-8
+
+# flake8: noqa
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+# import models into model package
+from kubernetes.client.models.admissionregistration_v1_service_reference import AdmissionregistrationV1ServiceReference
+from kubernetes.client.models.admissionregistration_v1_webhook_client_config import AdmissionregistrationV1WebhookClientConfig
+from kubernetes.client.models.apiextensions_v1_service_reference import ApiextensionsV1ServiceReference
+from kubernetes.client.models.apiextensions_v1_webhook_client_config import ApiextensionsV1WebhookClientConfig
+from kubernetes.client.models.apiregistration_v1_service_reference import ApiregistrationV1ServiceReference
+from kubernetes.client.models.authentication_v1_token_request import AuthenticationV1TokenRequest
+from kubernetes.client.models.core_v1_endpoint_port import CoreV1EndpointPort
+from kubernetes.client.models.core_v1_event import CoreV1Event
+from kubernetes.client.models.core_v1_event_list import CoreV1EventList
+from kubernetes.client.models.core_v1_event_series import CoreV1EventSeries
+from kubernetes.client.models.discovery_v1_endpoint_port import DiscoveryV1EndpointPort
+from kubernetes.client.models.events_v1_event import EventsV1Event
+from kubernetes.client.models.events_v1_event_list import EventsV1EventList
+from kubernetes.client.models.events_v1_event_series import EventsV1EventSeries
+from kubernetes.client.models.storage_v1_token_request import StorageV1TokenRequest
+from kubernetes.client.models.v1_api_group import V1APIGroup
+from kubernetes.client.models.v1_api_group_list import V1APIGroupList
+from kubernetes.client.models.v1_api_resource import V1APIResource
+from kubernetes.client.models.v1_api_resource_list import V1APIResourceList
+from kubernetes.client.models.v1_api_service import V1APIService
+from kubernetes.client.models.v1_api_service_condition import V1APIServiceCondition
+from kubernetes.client.models.v1_api_service_list import V1APIServiceList
+from kubernetes.client.models.v1_api_service_spec import V1APIServiceSpec
+from kubernetes.client.models.v1_api_service_status import V1APIServiceStatus
+from kubernetes.client.models.v1_api_versions import V1APIVersions
+from kubernetes.client.models.v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource
+from kubernetes.client.models.v1_affinity import V1Affinity
+from kubernetes.client.models.v1_aggregation_rule import V1AggregationRule
+from kubernetes.client.models.v1_attached_volume import V1AttachedVolume
+from kubernetes.client.models.v1_azure_disk_volume_source import V1AzureDiskVolumeSource
+from kubernetes.client.models.v1_azure_file_persistent_volume_source import V1AzureFilePersistentVolumeSource
+from kubernetes.client.models.v1_azure_file_volume_source import V1AzureFileVolumeSource
+from kubernetes.client.models.v1_binding import V1Binding
+from kubernetes.client.models.v1_bound_object_reference import V1BoundObjectReference
+from kubernetes.client.models.v1_csi_driver import V1CSIDriver
+from kubernetes.client.models.v1_csi_driver_list import V1CSIDriverList
+from kubernetes.client.models.v1_csi_driver_spec import V1CSIDriverSpec
+from kubernetes.client.models.v1_csi_node import V1CSINode
+from kubernetes.client.models.v1_csi_node_driver import V1CSINodeDriver
+from kubernetes.client.models.v1_csi_node_list import V1CSINodeList
+from kubernetes.client.models.v1_csi_node_spec import V1CSINodeSpec
+from kubernetes.client.models.v1_csi_persistent_volume_source import V1CSIPersistentVolumeSource
+from kubernetes.client.models.v1_csi_storage_capacity import V1CSIStorageCapacity
+from kubernetes.client.models.v1_csi_storage_capacity_list import V1CSIStorageCapacityList
+from kubernetes.client.models.v1_csi_volume_source import V1CSIVolumeSource
+from kubernetes.client.models.v1_capabilities import V1Capabilities
+from kubernetes.client.models.v1_ceph_fs_persistent_volume_source import V1CephFSPersistentVolumeSource
+from kubernetes.client.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource
+from kubernetes.client.models.v1_certificate_signing_request import V1CertificateSigningRequest
+from kubernetes.client.models.v1_certificate_signing_request_condition import V1CertificateSigningRequestCondition
+from kubernetes.client.models.v1_certificate_signing_request_list import V1CertificateSigningRequestList
+from kubernetes.client.models.v1_certificate_signing_request_spec import V1CertificateSigningRequestSpec
+from kubernetes.client.models.v1_certificate_signing_request_status import V1CertificateSigningRequestStatus
+from kubernetes.client.models.v1_cinder_persistent_volume_source import V1CinderPersistentVolumeSource
+from kubernetes.client.models.v1_cinder_volume_source import V1CinderVolumeSource
+from kubernetes.client.models.v1_claim_source import V1ClaimSource
+from kubernetes.client.models.v1_client_ip_config import V1ClientIPConfig
+from kubernetes.client.models.v1_cluster_role import V1ClusterRole
+from kubernetes.client.models.v1_cluster_role_binding import V1ClusterRoleBinding
+from kubernetes.client.models.v1_cluster_role_binding_list import V1ClusterRoleBindingList
+from kubernetes.client.models.v1_cluster_role_list import V1ClusterRoleList
+from kubernetes.client.models.v1_component_condition import V1ComponentCondition
+from kubernetes.client.models.v1_component_status import V1ComponentStatus
+from kubernetes.client.models.v1_component_status_list import V1ComponentStatusList
+from kubernetes.client.models.v1_condition import V1Condition
+from kubernetes.client.models.v1_config_map import V1ConfigMap
+from kubernetes.client.models.v1_config_map_env_source import V1ConfigMapEnvSource
+from kubernetes.client.models.v1_config_map_key_selector import V1ConfigMapKeySelector
+from kubernetes.client.models.v1_config_map_list import V1ConfigMapList
+from kubernetes.client.models.v1_config_map_node_config_source import V1ConfigMapNodeConfigSource
+from kubernetes.client.models.v1_config_map_projection import V1ConfigMapProjection
+from kubernetes.client.models.v1_config_map_volume_source import V1ConfigMapVolumeSource
+from kubernetes.client.models.v1_container import V1Container
+from kubernetes.client.models.v1_container_image import V1ContainerImage
+from kubernetes.client.models.v1_container_port import V1ContainerPort
+from kubernetes.client.models.v1_container_resize_policy import V1ContainerResizePolicy
+from kubernetes.client.models.v1_container_state import V1ContainerState
+from kubernetes.client.models.v1_container_state_running import V1ContainerStateRunning
+from kubernetes.client.models.v1_container_state_terminated import V1ContainerStateTerminated
+from kubernetes.client.models.v1_container_state_waiting import V1ContainerStateWaiting
+from kubernetes.client.models.v1_container_status import V1ContainerStatus
+from kubernetes.client.models.v1_controller_revision import V1ControllerRevision
+from kubernetes.client.models.v1_controller_revision_list import V1ControllerRevisionList
+from kubernetes.client.models.v1_cron_job import V1CronJob
+from kubernetes.client.models.v1_cron_job_list import V1CronJobList
+from kubernetes.client.models.v1_cron_job_spec import V1CronJobSpec
+from kubernetes.client.models.v1_cron_job_status import V1CronJobStatus
+from kubernetes.client.models.v1_cross_version_object_reference import V1CrossVersionObjectReference
+from kubernetes.client.models.v1_custom_resource_column_definition import V1CustomResourceColumnDefinition
+from kubernetes.client.models.v1_custom_resource_conversion import V1CustomResourceConversion
+from kubernetes.client.models.v1_custom_resource_definition import V1CustomResourceDefinition
+from kubernetes.client.models.v1_custom_resource_definition_condition import V1CustomResourceDefinitionCondition
+from kubernetes.client.models.v1_custom_resource_definition_list import V1CustomResourceDefinitionList
+from kubernetes.client.models.v1_custom_resource_definition_names import V1CustomResourceDefinitionNames
+from kubernetes.client.models.v1_custom_resource_definition_spec import V1CustomResourceDefinitionSpec
+from kubernetes.client.models.v1_custom_resource_definition_status import V1CustomResourceDefinitionStatus
+from kubernetes.client.models.v1_custom_resource_definition_version import V1CustomResourceDefinitionVersion
+from kubernetes.client.models.v1_custom_resource_subresource_scale import V1CustomResourceSubresourceScale
+from kubernetes.client.models.v1_custom_resource_subresources import V1CustomResourceSubresources
+from kubernetes.client.models.v1_custom_resource_validation import V1CustomResourceValidation
+from kubernetes.client.models.v1_daemon_endpoint import V1DaemonEndpoint
+from kubernetes.client.models.v1_daemon_set import V1DaemonSet
+from kubernetes.client.models.v1_daemon_set_condition import V1DaemonSetCondition
+from kubernetes.client.models.v1_daemon_set_list import V1DaemonSetList
+from kubernetes.client.models.v1_daemon_set_spec import V1DaemonSetSpec
+from kubernetes.client.models.v1_daemon_set_status import V1DaemonSetStatus
+from kubernetes.client.models.v1_daemon_set_update_strategy import V1DaemonSetUpdateStrategy
+from kubernetes.client.models.v1_delete_options import V1DeleteOptions
+from kubernetes.client.models.v1_deployment import V1Deployment
+from kubernetes.client.models.v1_deployment_condition import V1DeploymentCondition
+from kubernetes.client.models.v1_deployment_list import V1DeploymentList
+from kubernetes.client.models.v1_deployment_spec import V1DeploymentSpec
+from kubernetes.client.models.v1_deployment_status import V1DeploymentStatus
+from kubernetes.client.models.v1_deployment_strategy import V1DeploymentStrategy
+from kubernetes.client.models.v1_downward_api_projection import V1DownwardAPIProjection
+from kubernetes.client.models.v1_downward_api_volume_file import V1DownwardAPIVolumeFile
+from kubernetes.client.models.v1_downward_api_volume_source import V1DownwardAPIVolumeSource
+from kubernetes.client.models.v1_empty_dir_volume_source import V1EmptyDirVolumeSource
+from kubernetes.client.models.v1_endpoint import V1Endpoint
+from kubernetes.client.models.v1_endpoint_address import V1EndpointAddress
+from kubernetes.client.models.v1_endpoint_conditions import V1EndpointConditions
+from kubernetes.client.models.v1_endpoint_hints import V1EndpointHints
+from kubernetes.client.models.v1_endpoint_slice import V1EndpointSlice
+from kubernetes.client.models.v1_endpoint_slice_list import V1EndpointSliceList
+from kubernetes.client.models.v1_endpoint_subset import V1EndpointSubset
+from kubernetes.client.models.v1_endpoints import V1Endpoints
+from kubernetes.client.models.v1_endpoints_list import V1EndpointsList
+from kubernetes.client.models.v1_env_from_source import V1EnvFromSource
+from kubernetes.client.models.v1_env_var import V1EnvVar
+from kubernetes.client.models.v1_env_var_source import V1EnvVarSource
+from kubernetes.client.models.v1_ephemeral_container import V1EphemeralContainer
+from kubernetes.client.models.v1_ephemeral_volume_source import V1EphemeralVolumeSource
+from kubernetes.client.models.v1_event_source import V1EventSource
+from kubernetes.client.models.v1_eviction import V1Eviction
+from kubernetes.client.models.v1_exec_action import V1ExecAction
+from kubernetes.client.models.v1_external_documentation import V1ExternalDocumentation
+from kubernetes.client.models.v1_fc_volume_source import V1FCVolumeSource
+from kubernetes.client.models.v1_flex_persistent_volume_source import V1FlexPersistentVolumeSource
+from kubernetes.client.models.v1_flex_volume_source import V1FlexVolumeSource
+from kubernetes.client.models.v1_flocker_volume_source import V1FlockerVolumeSource
+from kubernetes.client.models.v1_for_zone import V1ForZone
+from kubernetes.client.models.v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource
+from kubernetes.client.models.v1_grpc_action import V1GRPCAction
+from kubernetes.client.models.v1_git_repo_volume_source import V1GitRepoVolumeSource
+from kubernetes.client.models.v1_glusterfs_persistent_volume_source import V1GlusterfsPersistentVolumeSource
+from kubernetes.client.models.v1_glusterfs_volume_source import V1GlusterfsVolumeSource
+from kubernetes.client.models.v1_group_version_for_discovery import V1GroupVersionForDiscovery
+from kubernetes.client.models.v1_http_get_action import V1HTTPGetAction
+from kubernetes.client.models.v1_http_header import V1HTTPHeader
+from kubernetes.client.models.v1_http_ingress_path import V1HTTPIngressPath
+from kubernetes.client.models.v1_http_ingress_rule_value import V1HTTPIngressRuleValue
+from kubernetes.client.models.v1_horizontal_pod_autoscaler import V1HorizontalPodAutoscaler
+from kubernetes.client.models.v1_horizontal_pod_autoscaler_list import V1HorizontalPodAutoscalerList
+from kubernetes.client.models.v1_horizontal_pod_autoscaler_spec import V1HorizontalPodAutoscalerSpec
+from kubernetes.client.models.v1_horizontal_pod_autoscaler_status import V1HorizontalPodAutoscalerStatus
+from kubernetes.client.models.v1_host_alias import V1HostAlias
+from kubernetes.client.models.v1_host_ip import V1HostIP
+from kubernetes.client.models.v1_host_path_volume_source import V1HostPathVolumeSource
+from kubernetes.client.models.v1_ip_block import V1IPBlock
+from kubernetes.client.models.v1_iscsi_persistent_volume_source import V1ISCSIPersistentVolumeSource
+from kubernetes.client.models.v1_iscsi_volume_source import V1ISCSIVolumeSource
+from kubernetes.client.models.v1_ingress import V1Ingress
+from kubernetes.client.models.v1_ingress_backend import V1IngressBackend
+from kubernetes.client.models.v1_ingress_class import V1IngressClass
+from kubernetes.client.models.v1_ingress_class_list import V1IngressClassList
+from kubernetes.client.models.v1_ingress_class_parameters_reference import V1IngressClassParametersReference
+from kubernetes.client.models.v1_ingress_class_spec import V1IngressClassSpec
+from kubernetes.client.models.v1_ingress_list import V1IngressList
+from kubernetes.client.models.v1_ingress_load_balancer_ingress import V1IngressLoadBalancerIngress
+from kubernetes.client.models.v1_ingress_load_balancer_status import V1IngressLoadBalancerStatus
+from kubernetes.client.models.v1_ingress_port_status import V1IngressPortStatus
+from kubernetes.client.models.v1_ingress_rule import V1IngressRule
+from kubernetes.client.models.v1_ingress_service_backend import V1IngressServiceBackend
+from kubernetes.client.models.v1_ingress_spec import V1IngressSpec
+from kubernetes.client.models.v1_ingress_status import V1IngressStatus
+from kubernetes.client.models.v1_ingress_tls import V1IngressTLS
+from kubernetes.client.models.v1_json_schema_props import V1JSONSchemaProps
+from kubernetes.client.models.v1_job import V1Job
+from kubernetes.client.models.v1_job_condition import V1JobCondition
+from kubernetes.client.models.v1_job_list import V1JobList
+from kubernetes.client.models.v1_job_spec import V1JobSpec
+from kubernetes.client.models.v1_job_status import V1JobStatus
+from kubernetes.client.models.v1_job_template_spec import V1JobTemplateSpec
+from kubernetes.client.models.v1_key_to_path import V1KeyToPath
+from kubernetes.client.models.v1_label_selector import V1LabelSelector
+from kubernetes.client.models.v1_label_selector_requirement import V1LabelSelectorRequirement
+from kubernetes.client.models.v1_lease import V1Lease
+from kubernetes.client.models.v1_lease_list import V1LeaseList
+from kubernetes.client.models.v1_lease_spec import V1LeaseSpec
+from kubernetes.client.models.v1_lifecycle import V1Lifecycle
+from kubernetes.client.models.v1_lifecycle_handler import V1LifecycleHandler
+from kubernetes.client.models.v1_limit_range import V1LimitRange
+from kubernetes.client.models.v1_limit_range_item import V1LimitRangeItem
+from kubernetes.client.models.v1_limit_range_list import V1LimitRangeList
+from kubernetes.client.models.v1_limit_range_spec import V1LimitRangeSpec
+from kubernetes.client.models.v1_list_meta import V1ListMeta
+from kubernetes.client.models.v1_load_balancer_ingress import V1LoadBalancerIngress
+from kubernetes.client.models.v1_load_balancer_status import V1LoadBalancerStatus
+from kubernetes.client.models.v1_local_object_reference import V1LocalObjectReference
+from kubernetes.client.models.v1_local_subject_access_review import V1LocalSubjectAccessReview
+from kubernetes.client.models.v1_local_volume_source import V1LocalVolumeSource
+from kubernetes.client.models.v1_managed_fields_entry import V1ManagedFieldsEntry
+from kubernetes.client.models.v1_match_condition import V1MatchCondition
+from kubernetes.client.models.v1_mutating_webhook import V1MutatingWebhook
+from kubernetes.client.models.v1_mutating_webhook_configuration import V1MutatingWebhookConfiguration
+from kubernetes.client.models.v1_mutating_webhook_configuration_list import V1MutatingWebhookConfigurationList
+from kubernetes.client.models.v1_nfs_volume_source import V1NFSVolumeSource
+from kubernetes.client.models.v1_namespace import V1Namespace
+from kubernetes.client.models.v1_namespace_condition import V1NamespaceCondition
+from kubernetes.client.models.v1_namespace_list import V1NamespaceList
+from kubernetes.client.models.v1_namespace_spec import V1NamespaceSpec
+from kubernetes.client.models.v1_namespace_status import V1NamespaceStatus
+from kubernetes.client.models.v1_network_policy import V1NetworkPolicy
+from kubernetes.client.models.v1_network_policy_egress_rule import V1NetworkPolicyEgressRule
+from kubernetes.client.models.v1_network_policy_ingress_rule import V1NetworkPolicyIngressRule
+from kubernetes.client.models.v1_network_policy_list import V1NetworkPolicyList
+from kubernetes.client.models.v1_network_policy_peer import V1NetworkPolicyPeer
+from kubernetes.client.models.v1_network_policy_port import V1NetworkPolicyPort
+from kubernetes.client.models.v1_network_policy_spec import V1NetworkPolicySpec
+from kubernetes.client.models.v1_node import V1Node
+from kubernetes.client.models.v1_node_address import V1NodeAddress
+from kubernetes.client.models.v1_node_affinity import V1NodeAffinity
+from kubernetes.client.models.v1_node_condition import V1NodeCondition
+from kubernetes.client.models.v1_node_config_source import V1NodeConfigSource
+from kubernetes.client.models.v1_node_config_status import V1NodeConfigStatus
+from kubernetes.client.models.v1_node_daemon_endpoints import V1NodeDaemonEndpoints
+from kubernetes.client.models.v1_node_list import V1NodeList
+from kubernetes.client.models.v1_node_selector import V1NodeSelector
+from kubernetes.client.models.v1_node_selector_requirement import V1NodeSelectorRequirement
+from kubernetes.client.models.v1_node_selector_term import V1NodeSelectorTerm
+from kubernetes.client.models.v1_node_spec import V1NodeSpec
+from kubernetes.client.models.v1_node_status import V1NodeStatus
+from kubernetes.client.models.v1_node_system_info import V1NodeSystemInfo
+from kubernetes.client.models.v1_non_resource_attributes import V1NonResourceAttributes
+from kubernetes.client.models.v1_non_resource_rule import V1NonResourceRule
+from kubernetes.client.models.v1_object_field_selector import V1ObjectFieldSelector
+from kubernetes.client.models.v1_object_meta import V1ObjectMeta
+from kubernetes.client.models.v1_object_reference import V1ObjectReference
+from kubernetes.client.models.v1_overhead import V1Overhead
+from kubernetes.client.models.v1_owner_reference import V1OwnerReference
+from kubernetes.client.models.v1_persistent_volume import V1PersistentVolume
+from kubernetes.client.models.v1_persistent_volume_claim import V1PersistentVolumeClaim
+from kubernetes.client.models.v1_persistent_volume_claim_condition import V1PersistentVolumeClaimCondition
+from kubernetes.client.models.v1_persistent_volume_claim_list import V1PersistentVolumeClaimList
+from kubernetes.client.models.v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec
+from kubernetes.client.models.v1_persistent_volume_claim_status import V1PersistentVolumeClaimStatus
+from kubernetes.client.models.v1_persistent_volume_claim_template import V1PersistentVolumeClaimTemplate
+from kubernetes.client.models.v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
+from kubernetes.client.models.v1_persistent_volume_list import V1PersistentVolumeList
+from kubernetes.client.models.v1_persistent_volume_spec import V1PersistentVolumeSpec
+from kubernetes.client.models.v1_persistent_volume_status import V1PersistentVolumeStatus
+from kubernetes.client.models.v1_photon_persistent_disk_volume_source import V1PhotonPersistentDiskVolumeSource
+from kubernetes.client.models.v1_pod import V1Pod
+from kubernetes.client.models.v1_pod_affinity import V1PodAffinity
+from kubernetes.client.models.v1_pod_affinity_term import V1PodAffinityTerm
+from kubernetes.client.models.v1_pod_anti_affinity import V1PodAntiAffinity
+from kubernetes.client.models.v1_pod_condition import V1PodCondition
+from kubernetes.client.models.v1_pod_dns_config import V1PodDNSConfig
+from kubernetes.client.models.v1_pod_dns_config_option import V1PodDNSConfigOption
+from kubernetes.client.models.v1_pod_disruption_budget import V1PodDisruptionBudget
+from kubernetes.client.models.v1_pod_disruption_budget_list import V1PodDisruptionBudgetList
+from kubernetes.client.models.v1_pod_disruption_budget_spec import V1PodDisruptionBudgetSpec
+from kubernetes.client.models.v1_pod_disruption_budget_status import V1PodDisruptionBudgetStatus
+from kubernetes.client.models.v1_pod_failure_policy import V1PodFailurePolicy
+from kubernetes.client.models.v1_pod_failure_policy_on_exit_codes_requirement import V1PodFailurePolicyOnExitCodesRequirement
+from kubernetes.client.models.v1_pod_failure_policy_on_pod_conditions_pattern import V1PodFailurePolicyOnPodConditionsPattern
+from kubernetes.client.models.v1_pod_failure_policy_rule import V1PodFailurePolicyRule
+from kubernetes.client.models.v1_pod_ip import V1PodIP
+from kubernetes.client.models.v1_pod_list import V1PodList
+from kubernetes.client.models.v1_pod_os import V1PodOS
+from kubernetes.client.models.v1_pod_readiness_gate import V1PodReadinessGate
+from kubernetes.client.models.v1_pod_resource_claim import V1PodResourceClaim
+from kubernetes.client.models.v1_pod_resource_claim_status import V1PodResourceClaimStatus
+from kubernetes.client.models.v1_pod_scheduling_gate import V1PodSchedulingGate
+from kubernetes.client.models.v1_pod_security_context import V1PodSecurityContext
+from kubernetes.client.models.v1_pod_spec import V1PodSpec
+from kubernetes.client.models.v1_pod_status import V1PodStatus
+from kubernetes.client.models.v1_pod_template import V1PodTemplate
+from kubernetes.client.models.v1_pod_template_list import V1PodTemplateList
+from kubernetes.client.models.v1_pod_template_spec import V1PodTemplateSpec
+from kubernetes.client.models.v1_policy_rule import V1PolicyRule
+from kubernetes.client.models.v1_port_status import V1PortStatus
+from kubernetes.client.models.v1_portworx_volume_source import V1PortworxVolumeSource
+from kubernetes.client.models.v1_preconditions import V1Preconditions
+from kubernetes.client.models.v1_preferred_scheduling_term import V1PreferredSchedulingTerm
+from kubernetes.client.models.v1_priority_class import V1PriorityClass
+from kubernetes.client.models.v1_priority_class_list import V1PriorityClassList
+from kubernetes.client.models.v1_probe import V1Probe
+from kubernetes.client.models.v1_projected_volume_source import V1ProjectedVolumeSource
+from kubernetes.client.models.v1_quobyte_volume_source import V1QuobyteVolumeSource
+from kubernetes.client.models.v1_rbd_persistent_volume_source import V1RBDPersistentVolumeSource
+from kubernetes.client.models.v1_rbd_volume_source import V1RBDVolumeSource
+from kubernetes.client.models.v1_replica_set import V1ReplicaSet
+from kubernetes.client.models.v1_replica_set_condition import V1ReplicaSetCondition
+from kubernetes.client.models.v1_replica_set_list import V1ReplicaSetList
+from kubernetes.client.models.v1_replica_set_spec import V1ReplicaSetSpec
+from kubernetes.client.models.v1_replica_set_status import V1ReplicaSetStatus
+from kubernetes.client.models.v1_replication_controller import V1ReplicationController
+from kubernetes.client.models.v1_replication_controller_condition import V1ReplicationControllerCondition
+from kubernetes.client.models.v1_replication_controller_list import V1ReplicationControllerList
+from kubernetes.client.models.v1_replication_controller_spec import V1ReplicationControllerSpec
+from kubernetes.client.models.v1_replication_controller_status import V1ReplicationControllerStatus
+from kubernetes.client.models.v1_resource_attributes import V1ResourceAttributes
+from kubernetes.client.models.v1_resource_claim import V1ResourceClaim
+from kubernetes.client.models.v1_resource_field_selector import V1ResourceFieldSelector
+from kubernetes.client.models.v1_resource_quota import V1ResourceQuota
+from kubernetes.client.models.v1_resource_quota_list import V1ResourceQuotaList
+from kubernetes.client.models.v1_resource_quota_spec import V1ResourceQuotaSpec
+from kubernetes.client.models.v1_resource_quota_status import V1ResourceQuotaStatus
+from kubernetes.client.models.v1_resource_requirements import V1ResourceRequirements
+from kubernetes.client.models.v1_resource_rule import V1ResourceRule
+from kubernetes.client.models.v1_role import V1Role
+from kubernetes.client.models.v1_role_binding import V1RoleBinding
+from kubernetes.client.models.v1_role_binding_list import V1RoleBindingList
+from kubernetes.client.models.v1_role_list import V1RoleList
+from kubernetes.client.models.v1_role_ref import V1RoleRef
+from kubernetes.client.models.v1_rolling_update_daemon_set import V1RollingUpdateDaemonSet
+from kubernetes.client.models.v1_rolling_update_deployment import V1RollingUpdateDeployment
+from kubernetes.client.models.v1_rolling_update_stateful_set_strategy import V1RollingUpdateStatefulSetStrategy
+from kubernetes.client.models.v1_rule_with_operations import V1RuleWithOperations
+from kubernetes.client.models.v1_runtime_class import V1RuntimeClass
+from kubernetes.client.models.v1_runtime_class_list import V1RuntimeClassList
+from kubernetes.client.models.v1_se_linux_options import V1SELinuxOptions
+from kubernetes.client.models.v1_scale import V1Scale
+from kubernetes.client.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
+from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
+from kubernetes.client.models.v1_scale_spec import V1ScaleSpec
+from kubernetes.client.models.v1_scale_status import V1ScaleStatus
+from kubernetes.client.models.v1_scheduling import V1Scheduling
+from kubernetes.client.models.v1_scope_selector import V1ScopeSelector
+from kubernetes.client.models.v1_scoped_resource_selector_requirement import V1ScopedResourceSelectorRequirement
+from kubernetes.client.models.v1_seccomp_profile import V1SeccompProfile
+from kubernetes.client.models.v1_secret import V1Secret
+from kubernetes.client.models.v1_secret_env_source import V1SecretEnvSource
+from kubernetes.client.models.v1_secret_key_selector import V1SecretKeySelector
+from kubernetes.client.models.v1_secret_list import V1SecretList
+from kubernetes.client.models.v1_secret_projection import V1SecretProjection
+from kubernetes.client.models.v1_secret_reference import V1SecretReference
+from kubernetes.client.models.v1_secret_volume_source import V1SecretVolumeSource
+from kubernetes.client.models.v1_security_context import V1SecurityContext
+from kubernetes.client.models.v1_self_subject_access_review import V1SelfSubjectAccessReview
+from kubernetes.client.models.v1_self_subject_access_review_spec import V1SelfSubjectAccessReviewSpec
+from kubernetes.client.models.v1_self_subject_review import V1SelfSubjectReview
+from kubernetes.client.models.v1_self_subject_review_status import V1SelfSubjectReviewStatus
+from kubernetes.client.models.v1_self_subject_rules_review import V1SelfSubjectRulesReview
+from kubernetes.client.models.v1_self_subject_rules_review_spec import V1SelfSubjectRulesReviewSpec
+from kubernetes.client.models.v1_server_address_by_client_cidr import V1ServerAddressByClientCIDR
+from kubernetes.client.models.v1_service import V1Service
+from kubernetes.client.models.v1_service_account import V1ServiceAccount
+from kubernetes.client.models.v1_service_account_list import V1ServiceAccountList
+from kubernetes.client.models.v1_service_account_token_projection import V1ServiceAccountTokenProjection
+from kubernetes.client.models.v1_service_backend_port import V1ServiceBackendPort
+from kubernetes.client.models.v1_service_list import V1ServiceList
+from kubernetes.client.models.v1_service_port import V1ServicePort
+from kubernetes.client.models.v1_service_spec import V1ServiceSpec
+from kubernetes.client.models.v1_service_status import V1ServiceStatus
+from kubernetes.client.models.v1_session_affinity_config import V1SessionAffinityConfig
+from kubernetes.client.models.v1_stateful_set import V1StatefulSet
+from kubernetes.client.models.v1_stateful_set_condition import V1StatefulSetCondition
+from kubernetes.client.models.v1_stateful_set_list import V1StatefulSetList
+from kubernetes.client.models.v1_stateful_set_ordinals import V1StatefulSetOrdinals
+from kubernetes.client.models.v1_stateful_set_persistent_volume_claim_retention_policy import V1StatefulSetPersistentVolumeClaimRetentionPolicy
+from kubernetes.client.models.v1_stateful_set_spec import V1StatefulSetSpec
+from kubernetes.client.models.v1_stateful_set_status import V1StatefulSetStatus
+from kubernetes.client.models.v1_stateful_set_update_strategy import V1StatefulSetUpdateStrategy
+from kubernetes.client.models.v1_status import V1Status
+from kubernetes.client.models.v1_status_cause import V1StatusCause
+from kubernetes.client.models.v1_status_details import V1StatusDetails
+from kubernetes.client.models.v1_storage_class import V1StorageClass
+from kubernetes.client.models.v1_storage_class_list import V1StorageClassList
+from kubernetes.client.models.v1_storage_os_persistent_volume_source import V1StorageOSPersistentVolumeSource
+from kubernetes.client.models.v1_storage_os_volume_source import V1StorageOSVolumeSource
+from kubernetes.client.models.v1_subject import V1Subject
+from kubernetes.client.models.v1_subject_access_review import V1SubjectAccessReview
+from kubernetes.client.models.v1_subject_access_review_spec import V1SubjectAccessReviewSpec
+from kubernetes.client.models.v1_subject_access_review_status import V1SubjectAccessReviewStatus
+from kubernetes.client.models.v1_subject_rules_review_status import V1SubjectRulesReviewStatus
+from kubernetes.client.models.v1_sysctl import V1Sysctl
+from kubernetes.client.models.v1_tcp_socket_action import V1TCPSocketAction
+from kubernetes.client.models.v1_taint import V1Taint
+from kubernetes.client.models.v1_token_request_spec import V1TokenRequestSpec
+from kubernetes.client.models.v1_token_request_status import V1TokenRequestStatus
+from kubernetes.client.models.v1_token_review import V1TokenReview
+from kubernetes.client.models.v1_token_review_spec import V1TokenReviewSpec
+from kubernetes.client.models.v1_token_review_status import V1TokenReviewStatus
+from kubernetes.client.models.v1_toleration import V1Toleration
+from kubernetes.client.models.v1_topology_selector_label_requirement import V1TopologySelectorLabelRequirement
+from kubernetes.client.models.v1_topology_selector_term import V1TopologySelectorTerm
+from kubernetes.client.models.v1_topology_spread_constraint import V1TopologySpreadConstraint
+from kubernetes.client.models.v1_typed_local_object_reference import V1TypedLocalObjectReference
+from kubernetes.client.models.v1_typed_object_reference import V1TypedObjectReference
+from kubernetes.client.models.v1_uncounted_terminated_pods import V1UncountedTerminatedPods
+from kubernetes.client.models.v1_user_info import V1UserInfo
+from kubernetes.client.models.v1_validating_webhook import V1ValidatingWebhook
+from kubernetes.client.models.v1_validating_webhook_configuration import V1ValidatingWebhookConfiguration
+from kubernetes.client.models.v1_validating_webhook_configuration_list import V1ValidatingWebhookConfigurationList
+from kubernetes.client.models.v1_validation_rule import V1ValidationRule
+from kubernetes.client.models.v1_volume import V1Volume
+from kubernetes.client.models.v1_volume_attachment import V1VolumeAttachment
+from kubernetes.client.models.v1_volume_attachment_list import V1VolumeAttachmentList
+from kubernetes.client.models.v1_volume_attachment_source import V1VolumeAttachmentSource
+from kubernetes.client.models.v1_volume_attachment_spec import V1VolumeAttachmentSpec
+from kubernetes.client.models.v1_volume_attachment_status import V1VolumeAttachmentStatus
+from kubernetes.client.models.v1_volume_device import V1VolumeDevice
+from kubernetes.client.models.v1_volume_error import V1VolumeError
+from kubernetes.client.models.v1_volume_mount import V1VolumeMount
+from kubernetes.client.models.v1_volume_node_affinity import V1VolumeNodeAffinity
+from kubernetes.client.models.v1_volume_node_resources import V1VolumeNodeResources
+from kubernetes.client.models.v1_volume_projection import V1VolumeProjection
+from kubernetes.client.models.v1_vsphere_virtual_disk_volume_source import V1VsphereVirtualDiskVolumeSource
+from kubernetes.client.models.v1_watch_event import V1WatchEvent
+from kubernetes.client.models.v1_webhook_conversion import V1WebhookConversion
+from kubernetes.client.models.v1_weighted_pod_affinity_term import V1WeightedPodAffinityTerm
+from kubernetes.client.models.v1_windows_security_context_options import V1WindowsSecurityContextOptions
+from kubernetes.client.models.v1alpha1_audit_annotation import V1alpha1AuditAnnotation
+from kubernetes.client.models.v1alpha1_cluster_cidr import V1alpha1ClusterCIDR
+from kubernetes.client.models.v1alpha1_cluster_cidr_list import V1alpha1ClusterCIDRList
+from kubernetes.client.models.v1alpha1_cluster_cidr_spec import V1alpha1ClusterCIDRSpec
+from kubernetes.client.models.v1alpha1_cluster_trust_bundle import V1alpha1ClusterTrustBundle
+from kubernetes.client.models.v1alpha1_cluster_trust_bundle_list import V1alpha1ClusterTrustBundleList
+from kubernetes.client.models.v1alpha1_cluster_trust_bundle_spec import V1alpha1ClusterTrustBundleSpec
+from kubernetes.client.models.v1alpha1_expression_warning import V1alpha1ExpressionWarning
+from kubernetes.client.models.v1alpha1_ip_address import V1alpha1IPAddress
+from kubernetes.client.models.v1alpha1_ip_address_list import V1alpha1IPAddressList
+from kubernetes.client.models.v1alpha1_ip_address_spec import V1alpha1IPAddressSpec
+from kubernetes.client.models.v1alpha1_match_condition import V1alpha1MatchCondition
+from kubernetes.client.models.v1alpha1_match_resources import V1alpha1MatchResources
+from kubernetes.client.models.v1alpha1_named_rule_with_operations import V1alpha1NamedRuleWithOperations
+from kubernetes.client.models.v1alpha1_param_kind import V1alpha1ParamKind
+from kubernetes.client.models.v1alpha1_param_ref import V1alpha1ParamRef
+from kubernetes.client.models.v1alpha1_parent_reference import V1alpha1ParentReference
+from kubernetes.client.models.v1alpha1_self_subject_review import V1alpha1SelfSubjectReview
+from kubernetes.client.models.v1alpha1_self_subject_review_status import V1alpha1SelfSubjectReviewStatus
+from kubernetes.client.models.v1alpha1_server_storage_version import V1alpha1ServerStorageVersion
+from kubernetes.client.models.v1alpha1_storage_version import V1alpha1StorageVersion
+from kubernetes.client.models.v1alpha1_storage_version_condition import V1alpha1StorageVersionCondition
+from kubernetes.client.models.v1alpha1_storage_version_list import V1alpha1StorageVersionList
+from kubernetes.client.models.v1alpha1_storage_version_status import V1alpha1StorageVersionStatus
+from kubernetes.client.models.v1alpha1_type_checking import V1alpha1TypeChecking
+from kubernetes.client.models.v1alpha1_validating_admission_policy import V1alpha1ValidatingAdmissionPolicy
+from kubernetes.client.models.v1alpha1_validating_admission_policy_binding import V1alpha1ValidatingAdmissionPolicyBinding
+from kubernetes.client.models.v1alpha1_validating_admission_policy_binding_list import V1alpha1ValidatingAdmissionPolicyBindingList
+from kubernetes.client.models.v1alpha1_validating_admission_policy_binding_spec import V1alpha1ValidatingAdmissionPolicyBindingSpec
+from kubernetes.client.models.v1alpha1_validating_admission_policy_list import V1alpha1ValidatingAdmissionPolicyList
+from kubernetes.client.models.v1alpha1_validating_admission_policy_spec import V1alpha1ValidatingAdmissionPolicySpec
+from kubernetes.client.models.v1alpha1_validating_admission_policy_status import V1alpha1ValidatingAdmissionPolicyStatus
+from kubernetes.client.models.v1alpha1_validation import V1alpha1Validation
+from kubernetes.client.models.v1alpha1_variable import V1alpha1Variable
+from kubernetes.client.models.v1alpha2_allocation_result import V1alpha2AllocationResult
+from kubernetes.client.models.v1alpha2_pod_scheduling_context import V1alpha2PodSchedulingContext
+from kubernetes.client.models.v1alpha2_pod_scheduling_context_list import V1alpha2PodSchedulingContextList
+from kubernetes.client.models.v1alpha2_pod_scheduling_context_spec import V1alpha2PodSchedulingContextSpec
+from kubernetes.client.models.v1alpha2_pod_scheduling_context_status import V1alpha2PodSchedulingContextStatus
+from kubernetes.client.models.v1alpha2_resource_claim import V1alpha2ResourceClaim
+from kubernetes.client.models.v1alpha2_resource_claim_consumer_reference import V1alpha2ResourceClaimConsumerReference
+from kubernetes.client.models.v1alpha2_resource_claim_list import V1alpha2ResourceClaimList
+from kubernetes.client.models.v1alpha2_resource_claim_parameters_reference import V1alpha2ResourceClaimParametersReference
+from kubernetes.client.models.v1alpha2_resource_claim_scheduling_status import V1alpha2ResourceClaimSchedulingStatus
+from kubernetes.client.models.v1alpha2_resource_claim_spec import V1alpha2ResourceClaimSpec
+from kubernetes.client.models.v1alpha2_resource_claim_status import V1alpha2ResourceClaimStatus
+from kubernetes.client.models.v1alpha2_resource_claim_template import V1alpha2ResourceClaimTemplate
+from kubernetes.client.models.v1alpha2_resource_claim_template_list import V1alpha2ResourceClaimTemplateList
+from kubernetes.client.models.v1alpha2_resource_claim_template_spec import V1alpha2ResourceClaimTemplateSpec
+from kubernetes.client.models.v1alpha2_resource_class import V1alpha2ResourceClass
+from kubernetes.client.models.v1alpha2_resource_class_list import V1alpha2ResourceClassList
+from kubernetes.client.models.v1alpha2_resource_class_parameters_reference import V1alpha2ResourceClassParametersReference
+from kubernetes.client.models.v1alpha2_resource_handle import V1alpha2ResourceHandle
+from kubernetes.client.models.v1beta1_audit_annotation import V1beta1AuditAnnotation
+from kubernetes.client.models.v1beta1_expression_warning import V1beta1ExpressionWarning
+from kubernetes.client.models.v1beta1_match_condition import V1beta1MatchCondition
+from kubernetes.client.models.v1beta1_match_resources import V1beta1MatchResources
+from kubernetes.client.models.v1beta1_named_rule_with_operations import V1beta1NamedRuleWithOperations
+from kubernetes.client.models.v1beta1_param_kind import V1beta1ParamKind
+from kubernetes.client.models.v1beta1_param_ref import V1beta1ParamRef
+from kubernetes.client.models.v1beta1_self_subject_review import V1beta1SelfSubjectReview
+from kubernetes.client.models.v1beta1_self_subject_review_status import V1beta1SelfSubjectReviewStatus
+from kubernetes.client.models.v1beta1_type_checking import V1beta1TypeChecking
+from kubernetes.client.models.v1beta1_validating_admission_policy import V1beta1ValidatingAdmissionPolicy
+from kubernetes.client.models.v1beta1_validating_admission_policy_binding import V1beta1ValidatingAdmissionPolicyBinding
+from kubernetes.client.models.v1beta1_validating_admission_policy_binding_list import V1beta1ValidatingAdmissionPolicyBindingList
+from kubernetes.client.models.v1beta1_validating_admission_policy_binding_spec import V1beta1ValidatingAdmissionPolicyBindingSpec
+from kubernetes.client.models.v1beta1_validating_admission_policy_list import V1beta1ValidatingAdmissionPolicyList
+from kubernetes.client.models.v1beta1_validating_admission_policy_spec import V1beta1ValidatingAdmissionPolicySpec
+from kubernetes.client.models.v1beta1_validating_admission_policy_status import V1beta1ValidatingAdmissionPolicyStatus
+from kubernetes.client.models.v1beta1_validation import V1beta1Validation
+from kubernetes.client.models.v1beta1_variable import V1beta1Variable
+from kubernetes.client.models.v1beta2_exempt_priority_level_configuration import V1beta2ExemptPriorityLevelConfiguration
+from kubernetes.client.models.v1beta2_flow_distinguisher_method import V1beta2FlowDistinguisherMethod
+from kubernetes.client.models.v1beta2_flow_schema import V1beta2FlowSchema
+from kubernetes.client.models.v1beta2_flow_schema_condition import V1beta2FlowSchemaCondition
+from kubernetes.client.models.v1beta2_flow_schema_list import V1beta2FlowSchemaList
+from kubernetes.client.models.v1beta2_flow_schema_spec import V1beta2FlowSchemaSpec
+from kubernetes.client.models.v1beta2_flow_schema_status import V1beta2FlowSchemaStatus
+from kubernetes.client.models.v1beta2_group_subject import V1beta2GroupSubject
+from kubernetes.client.models.v1beta2_limit_response import V1beta2LimitResponse
+from kubernetes.client.models.v1beta2_limited_priority_level_configuration import V1beta2LimitedPriorityLevelConfiguration
+from kubernetes.client.models.v1beta2_non_resource_policy_rule import V1beta2NonResourcePolicyRule
+from kubernetes.client.models.v1beta2_policy_rules_with_subjects import V1beta2PolicyRulesWithSubjects
+from kubernetes.client.models.v1beta2_priority_level_configuration import V1beta2PriorityLevelConfiguration
+from kubernetes.client.models.v1beta2_priority_level_configuration_condition import V1beta2PriorityLevelConfigurationCondition
+from kubernetes.client.models.v1beta2_priority_level_configuration_list import V1beta2PriorityLevelConfigurationList
+from kubernetes.client.models.v1beta2_priority_level_configuration_reference import V1beta2PriorityLevelConfigurationReference
+from kubernetes.client.models.v1beta2_priority_level_configuration_spec import V1beta2PriorityLevelConfigurationSpec
+from kubernetes.client.models.v1beta2_priority_level_configuration_status import V1beta2PriorityLevelConfigurationStatus
+from kubernetes.client.models.v1beta2_queuing_configuration import V1beta2QueuingConfiguration
+from kubernetes.client.models.v1beta2_resource_policy_rule import V1beta2ResourcePolicyRule
+from kubernetes.client.models.v1beta2_service_account_subject import V1beta2ServiceAccountSubject
+from kubernetes.client.models.v1beta2_subject import V1beta2Subject
+from kubernetes.client.models.v1beta2_user_subject import V1beta2UserSubject
+from kubernetes.client.models.v1beta3_exempt_priority_level_configuration import V1beta3ExemptPriorityLevelConfiguration
+from kubernetes.client.models.v1beta3_flow_distinguisher_method import V1beta3FlowDistinguisherMethod
+from kubernetes.client.models.v1beta3_flow_schema import V1beta3FlowSchema
+from kubernetes.client.models.v1beta3_flow_schema_condition import V1beta3FlowSchemaCondition
+from kubernetes.client.models.v1beta3_flow_schema_list import V1beta3FlowSchemaList
+from kubernetes.client.models.v1beta3_flow_schema_spec import V1beta3FlowSchemaSpec
+from kubernetes.client.models.v1beta3_flow_schema_status import V1beta3FlowSchemaStatus
+from kubernetes.client.models.v1beta3_group_subject import V1beta3GroupSubject
+from kubernetes.client.models.v1beta3_limit_response import V1beta3LimitResponse
+from kubernetes.client.models.v1beta3_limited_priority_level_configuration import V1beta3LimitedPriorityLevelConfiguration
+from kubernetes.client.models.v1beta3_non_resource_policy_rule import V1beta3NonResourcePolicyRule
+from kubernetes.client.models.v1beta3_policy_rules_with_subjects import V1beta3PolicyRulesWithSubjects
+from kubernetes.client.models.v1beta3_priority_level_configuration import V1beta3PriorityLevelConfiguration
+from kubernetes.client.models.v1beta3_priority_level_configuration_condition import V1beta3PriorityLevelConfigurationCondition
+from kubernetes.client.models.v1beta3_priority_level_configuration_list import V1beta3PriorityLevelConfigurationList
+from kubernetes.client.models.v1beta3_priority_level_configuration_reference import V1beta3PriorityLevelConfigurationReference
+from kubernetes.client.models.v1beta3_priority_level_configuration_spec import V1beta3PriorityLevelConfigurationSpec
+from kubernetes.client.models.v1beta3_priority_level_configuration_status import V1beta3PriorityLevelConfigurationStatus
+from kubernetes.client.models.v1beta3_queuing_configuration import V1beta3QueuingConfiguration
+from kubernetes.client.models.v1beta3_resource_policy_rule import V1beta3ResourcePolicyRule
+from kubernetes.client.models.v1beta3_service_account_subject import V1beta3ServiceAccountSubject
+from kubernetes.client.models.v1beta3_subject import V1beta3Subject
+from kubernetes.client.models.v1beta3_user_subject import V1beta3UserSubject
+from kubernetes.client.models.v2_container_resource_metric_source import V2ContainerResourceMetricSource
+from kubernetes.client.models.v2_container_resource_metric_status import V2ContainerResourceMetricStatus
+from kubernetes.client.models.v2_cross_version_object_reference import V2CrossVersionObjectReference
+from kubernetes.client.models.v2_external_metric_source import V2ExternalMetricSource
+from kubernetes.client.models.v2_external_metric_status import V2ExternalMetricStatus
+from kubernetes.client.models.v2_hpa_scaling_policy import V2HPAScalingPolicy
+from kubernetes.client.models.v2_hpa_scaling_rules import V2HPAScalingRules
+from kubernetes.client.models.v2_horizontal_pod_autoscaler import V2HorizontalPodAutoscaler
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_behavior import V2HorizontalPodAutoscalerBehavior
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_condition import V2HorizontalPodAutoscalerCondition
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_list import V2HorizontalPodAutoscalerList
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_spec import V2HorizontalPodAutoscalerSpec
+from kubernetes.client.models.v2_horizontal_pod_autoscaler_status import V2HorizontalPodAutoscalerStatus
+from kubernetes.client.models.v2_metric_identifier import V2MetricIdentifier
+from kubernetes.client.models.v2_metric_spec import V2MetricSpec
+from kubernetes.client.models.v2_metric_status import V2MetricStatus
+from kubernetes.client.models.v2_metric_target import V2MetricTarget
+from kubernetes.client.models.v2_metric_value_status import V2MetricValueStatus
+from kubernetes.client.models.v2_object_metric_source import V2ObjectMetricSource
+from kubernetes.client.models.v2_object_metric_status import V2ObjectMetricStatus
+from kubernetes.client.models.v2_pods_metric_source import V2PodsMetricSource
+from kubernetes.client.models.v2_pods_metric_status import V2PodsMetricStatus
+from kubernetes.client.models.v2_resource_metric_source import V2ResourceMetricSource
+from kubernetes.client.models.v2_resource_metric_status import V2ResourceMetricStatus
+from kubernetes.client.models.version_info import VersionInfo
diff --git a/contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_service_reference.py b/contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_service_reference.py
new file mode 100644
index 0000000000..dc03c5b79d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_service_reference.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class AdmissionregistrationV1ServiceReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str',
+ 'path': 'str',
+ 'port': 'int'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'path': 'path',
+ 'port': 'port'
+ }
+
+ def __init__(self, name=None, namespace=None, path=None, port=None, local_vars_configuration=None): # noqa: E501
+ """AdmissionregistrationV1ServiceReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self._path = None
+ self._port = None
+ self.discriminator = None
+
+ self.name = name
+ self.namespace = namespace
+ if path is not None:
+ self.path = path
+ if port is not None:
+ self.port = port
+
+ @property
+ def name(self):
+ """Gets the name of this AdmissionregistrationV1ServiceReference. # noqa: E501
+
+ `name` is the name of the service. Required # noqa: E501
+
+ :return: The name of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this AdmissionregistrationV1ServiceReference.
+
+ `name` is the name of the service. Required # noqa: E501
+
+ :param name: The name of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this AdmissionregistrationV1ServiceReference. # noqa: E501
+
+ `namespace` is the namespace of the service. Required # noqa: E501
+
+ :return: The namespace of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this AdmissionregistrationV1ServiceReference.
+
+ `namespace` is the namespace of the service. Required # noqa: E501
+
+ :param namespace: The namespace of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
+ raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
+
+ self._namespace = namespace
+
+ @property
+ def path(self):
+ """Gets the path of this AdmissionregistrationV1ServiceReference. # noqa: E501
+
+ `path` is an optional URL path which will be sent in any request to this service. # noqa: E501
+
+ :return: The path of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this AdmissionregistrationV1ServiceReference.
+
+ `path` is an optional URL path which will be sent in any request to this service. # noqa: E501
+
+ :param path: The path of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :type: str
+ """
+
+ self._path = path
+
+ @property
+ def port(self):
+ """Gets the port of this AdmissionregistrationV1ServiceReference. # noqa: E501
+
+ If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). # noqa: E501
+
+ :return: The port of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this AdmissionregistrationV1ServiceReference.
+
+ If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). # noqa: E501
+
+ :param port: The port of this AdmissionregistrationV1ServiceReference. # noqa: E501
+ :type: int
+ """
+
+ self._port = port
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, AdmissionregistrationV1ServiceReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, AdmissionregistrationV1ServiceReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_webhook_client_config.py b/contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_webhook_client_config.py
new file mode 100644
index 0000000000..697929a5d5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/admissionregistration_v1_webhook_client_config.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class AdmissionregistrationV1WebhookClientConfig(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ca_bundle': 'str',
+ 'service': 'AdmissionregistrationV1ServiceReference',
+ 'url': 'str'
+ }
+
+ attribute_map = {
+ 'ca_bundle': 'caBundle',
+ 'service': 'service',
+ 'url': 'url'
+ }
+
+ def __init__(self, ca_bundle=None, service=None, url=None, local_vars_configuration=None): # noqa: E501
+ """AdmissionregistrationV1WebhookClientConfig - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ca_bundle = None
+ self._service = None
+ self._url = None
+ self.discriminator = None
+
+ if ca_bundle is not None:
+ self.ca_bundle = ca_bundle
+ if service is not None:
+ self.service = service
+ if url is not None:
+ self.url = url
+
+ @property
+ def ca_bundle(self):
+ """Gets the ca_bundle of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+
+ `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501
+
+ :return: The ca_bundle of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+ :rtype: str
+ """
+ return self._ca_bundle
+
+ @ca_bundle.setter
+ def ca_bundle(self, ca_bundle):
+ """Sets the ca_bundle of this AdmissionregistrationV1WebhookClientConfig.
+
+ `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501
+
+ :param ca_bundle: The ca_bundle of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+ :type: str
+ """
+ if (self.local_vars_configuration.client_side_validation and
+ ca_bundle is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle)): # noqa: E501
+ raise ValueError(r"Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
+
+ self._ca_bundle = ca_bundle
+
+ @property
+ def service(self):
+ """Gets the service of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+
+
+ :return: The service of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+ :rtype: AdmissionregistrationV1ServiceReference
+ """
+ return self._service
+
+ @service.setter
+ def service(self, service):
+ """Sets the service of this AdmissionregistrationV1WebhookClientConfig.
+
+
+ :param service: The service of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+ :type: AdmissionregistrationV1ServiceReference
+ """
+
+ self._service = service
+
+ @property
+ def url(self):
+ """Gets the url of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+
+ `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. The scheme must be \"https\"; the URL must begin with \"https://\". A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. Attempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either. # noqa: E501
+
+ :return: The url of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+ :rtype: str
+ """
+ return self._url
+
+ @url.setter
+ def url(self, url):
+ """Sets the url of this AdmissionregistrationV1WebhookClientConfig.
+
+ `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. The scheme must be \"https\"; the URL must begin with \"https://\". A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. Attempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either. # noqa: E501
+
+ :param url: The url of this AdmissionregistrationV1WebhookClientConfig. # noqa: E501
+ :type: str
+ """
+
+ self._url = url
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, AdmissionregistrationV1WebhookClientConfig):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, AdmissionregistrationV1WebhookClientConfig):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_service_reference.py b/contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_service_reference.py
new file mode 100644
index 0000000000..5110085bb3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_service_reference.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class ApiextensionsV1ServiceReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str',
+ 'path': 'str',
+ 'port': 'int'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'path': 'path',
+ 'port': 'port'
+ }
+
+ def __init__(self, name=None, namespace=None, path=None, port=None, local_vars_configuration=None): # noqa: E501
+ """ApiextensionsV1ServiceReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self._path = None
+ self._port = None
+ self.discriminator = None
+
+ self.name = name
+ self.namespace = namespace
+ if path is not None:
+ self.path = path
+ if port is not None:
+ self.port = port
+
+ @property
+ def name(self):
+ """Gets the name of this ApiextensionsV1ServiceReference. # noqa: E501
+
+ name is the name of the service. Required # noqa: E501
+
+ :return: The name of this ApiextensionsV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this ApiextensionsV1ServiceReference.
+
+ name is the name of the service. Required # noqa: E501
+
+ :param name: The name of this ApiextensionsV1ServiceReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this ApiextensionsV1ServiceReference. # noqa: E501
+
+ namespace is the namespace of the service. Required # noqa: E501
+
+ :return: The namespace of this ApiextensionsV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this ApiextensionsV1ServiceReference.
+
+ namespace is the namespace of the service. Required # noqa: E501
+
+ :param namespace: The namespace of this ApiextensionsV1ServiceReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
+ raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
+
+ self._namespace = namespace
+
+ @property
+ def path(self):
+ """Gets the path of this ApiextensionsV1ServiceReference. # noqa: E501
+
+ path is an optional URL path at which the webhook will be contacted. # noqa: E501
+
+ :return: The path of this ApiextensionsV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this ApiextensionsV1ServiceReference.
+
+ path is an optional URL path at which the webhook will be contacted. # noqa: E501
+
+ :param path: The path of this ApiextensionsV1ServiceReference. # noqa: E501
+ :type: str
+ """
+
+ self._path = path
+
+ @property
+ def port(self):
+ """Gets the port of this ApiextensionsV1ServiceReference. # noqa: E501
+
+ port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. # noqa: E501
+
+ :return: The port of this ApiextensionsV1ServiceReference. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this ApiextensionsV1ServiceReference.
+
+ port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. # noqa: E501
+
+ :param port: The port of this ApiextensionsV1ServiceReference. # noqa: E501
+ :type: int
+ """
+
+ self._port = port
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, ApiextensionsV1ServiceReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, ApiextensionsV1ServiceReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_webhook_client_config.py b/contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_webhook_client_config.py
new file mode 100644
index 0000000000..6b18deaa30
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/apiextensions_v1_webhook_client_config.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class ApiextensionsV1WebhookClientConfig(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ca_bundle': 'str',
+ 'service': 'ApiextensionsV1ServiceReference',
+ 'url': 'str'
+ }
+
+ attribute_map = {
+ 'ca_bundle': 'caBundle',
+ 'service': 'service',
+ 'url': 'url'
+ }
+
+ def __init__(self, ca_bundle=None, service=None, url=None, local_vars_configuration=None): # noqa: E501
+ """ApiextensionsV1WebhookClientConfig - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ca_bundle = None
+ self._service = None
+ self._url = None
+ self.discriminator = None
+
+ if ca_bundle is not None:
+ self.ca_bundle = ca_bundle
+ if service is not None:
+ self.service = service
+ if url is not None:
+ self.url = url
+
+ @property
+ def ca_bundle(self):
+ """Gets the ca_bundle of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+
+ caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501
+
+ :return: The ca_bundle of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+ :rtype: str
+ """
+ return self._ca_bundle
+
+ @ca_bundle.setter
+ def ca_bundle(self, ca_bundle):
+ """Sets the ca_bundle of this ApiextensionsV1WebhookClientConfig.
+
+ caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501
+
+ :param ca_bundle: The ca_bundle of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+ :type: str
+ """
+ if (self.local_vars_configuration.client_side_validation and
+ ca_bundle is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle)): # noqa: E501
+ raise ValueError(r"Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
+
+ self._ca_bundle = ca_bundle
+
+ @property
+ def service(self):
+ """Gets the service of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+
+
+ :return: The service of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+ :rtype: ApiextensionsV1ServiceReference
+ """
+ return self._service
+
+ @service.setter
+ def service(self, service):
+ """Sets the service of this ApiextensionsV1WebhookClientConfig.
+
+
+ :param service: The service of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+ :type: ApiextensionsV1ServiceReference
+ """
+
+ self._service = service
+
+ @property
+ def url(self):
+ """Gets the url of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+
+ url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. The scheme must be \"https\"; the URL must begin with \"https://\". A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. Attempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either. # noqa: E501
+
+ :return: The url of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+ :rtype: str
+ """
+ return self._url
+
+ @url.setter
+ def url(self, url):
+ """Sets the url of this ApiextensionsV1WebhookClientConfig.
+
+ url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. The scheme must be \"https\"; the URL must begin with \"https://\". A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. Attempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either. # noqa: E501
+
+ :param url: The url of this ApiextensionsV1WebhookClientConfig. # noqa: E501
+ :type: str
+ """
+
+ self._url = url
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, ApiextensionsV1WebhookClientConfig):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, ApiextensionsV1WebhookClientConfig):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/apiregistration_v1_service_reference.py b/contrib/python/kubernetes/kubernetes/client/models/apiregistration_v1_service_reference.py
new file mode 100644
index 0000000000..7f12b0b8c5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/apiregistration_v1_service_reference.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class ApiregistrationV1ServiceReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str',
+ 'port': 'int'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'port': 'port'
+ }
+
+ def __init__(self, name=None, namespace=None, port=None, local_vars_configuration=None): # noqa: E501
+ """ApiregistrationV1ServiceReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self._port = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if port is not None:
+ self.port = port
+
+ @property
+ def name(self):
+ """Gets the name of this ApiregistrationV1ServiceReference. # noqa: E501
+
+ Name is the name of the service # noqa: E501
+
+ :return: The name of this ApiregistrationV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this ApiregistrationV1ServiceReference.
+
+ Name is the name of the service # noqa: E501
+
+ :param name: The name of this ApiregistrationV1ServiceReference. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this ApiregistrationV1ServiceReference. # noqa: E501
+
+ Namespace is the namespace of the service # noqa: E501
+
+ :return: The namespace of this ApiregistrationV1ServiceReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this ApiregistrationV1ServiceReference.
+
+ Namespace is the namespace of the service # noqa: E501
+
+ :param namespace: The namespace of this ApiregistrationV1ServiceReference. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def port(self):
+ """Gets the port of this ApiregistrationV1ServiceReference. # noqa: E501
+
+ If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). # noqa: E501
+
+ :return: The port of this ApiregistrationV1ServiceReference. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this ApiregistrationV1ServiceReference.
+
+ If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive). # noqa: E501
+
+ :param port: The port of this ApiregistrationV1ServiceReference. # noqa: E501
+ :type: int
+ """
+
+ self._port = port
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, ApiregistrationV1ServiceReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, ApiregistrationV1ServiceReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/authentication_v1_token_request.py b/contrib/python/kubernetes/kubernetes/client/models/authentication_v1_token_request.py
new file mode 100644
index 0000000000..84baec4840
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/authentication_v1_token_request.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class AuthenticationV1TokenRequest(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1TokenRequestSpec',
+ 'status': 'V1TokenRequestStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """AuthenticationV1TokenRequest - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this AuthenticationV1TokenRequest. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this AuthenticationV1TokenRequest. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this AuthenticationV1TokenRequest.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this AuthenticationV1TokenRequest. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this AuthenticationV1TokenRequest. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this AuthenticationV1TokenRequest. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this AuthenticationV1TokenRequest.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this AuthenticationV1TokenRequest. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this AuthenticationV1TokenRequest. # noqa: E501
+
+
+ :return: The metadata of this AuthenticationV1TokenRequest. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this AuthenticationV1TokenRequest.
+
+
+ :param metadata: The metadata of this AuthenticationV1TokenRequest. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this AuthenticationV1TokenRequest. # noqa: E501
+
+
+ :return: The spec of this AuthenticationV1TokenRequest. # noqa: E501
+ :rtype: V1TokenRequestSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this AuthenticationV1TokenRequest.
+
+
+ :param spec: The spec of this AuthenticationV1TokenRequest. # noqa: E501
+ :type: V1TokenRequestSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this AuthenticationV1TokenRequest. # noqa: E501
+
+
+ :return: The status of this AuthenticationV1TokenRequest. # noqa: E501
+ :rtype: V1TokenRequestStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this AuthenticationV1TokenRequest.
+
+
+ :param status: The status of this AuthenticationV1TokenRequest. # noqa: E501
+ :type: V1TokenRequestStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, AuthenticationV1TokenRequest):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, AuthenticationV1TokenRequest):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/core_v1_endpoint_port.py b/contrib/python/kubernetes/kubernetes/client/models/core_v1_endpoint_port.py
new file mode 100644
index 0000000000..cd0ed7238d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/core_v1_endpoint_port.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class CoreV1EndpointPort(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'app_protocol': 'str',
+ 'name': 'str',
+ 'port': 'int',
+ 'protocol': 'str'
+ }
+
+ attribute_map = {
+ 'app_protocol': 'appProtocol',
+ 'name': 'name',
+ 'port': 'port',
+ 'protocol': 'protocol'
+ }
+
+ def __init__(self, app_protocol=None, name=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
+ """CoreV1EndpointPort - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._app_protocol = None
+ self._name = None
+ self._port = None
+ self._protocol = None
+ self.discriminator = None
+
+ if app_protocol is not None:
+ self.app_protocol = app_protocol
+ if name is not None:
+ self.name = name
+ self.port = port
+ if protocol is not None:
+ self.protocol = protocol
+
+ @property
+ def app_protocol(self):
+ """Gets the app_protocol of this CoreV1EndpointPort. # noqa: E501
+
+ The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
+
+ :return: The app_protocol of this CoreV1EndpointPort. # noqa: E501
+ :rtype: str
+ """
+ return self._app_protocol
+
+ @app_protocol.setter
+ def app_protocol(self, app_protocol):
+ """Sets the app_protocol of this CoreV1EndpointPort.
+
+ The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
+
+ :param app_protocol: The app_protocol of this CoreV1EndpointPort. # noqa: E501
+ :type: str
+ """
+
+ self._app_protocol = app_protocol
+
+ @property
+ def name(self):
+ """Gets the name of this CoreV1EndpointPort. # noqa: E501
+
+ The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. # noqa: E501
+
+ :return: The name of this CoreV1EndpointPort. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this CoreV1EndpointPort.
+
+ The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. # noqa: E501
+
+ :param name: The name of this CoreV1EndpointPort. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def port(self):
+ """Gets the port of this CoreV1EndpointPort. # noqa: E501
+
+ The port number of the endpoint. # noqa: E501
+
+ :return: The port of this CoreV1EndpointPort. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this CoreV1EndpointPort.
+
+ The port number of the endpoint. # noqa: E501
+
+ :param port: The port of this CoreV1EndpointPort. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ @property
+ def protocol(self):
+ """Gets the protocol of this CoreV1EndpointPort. # noqa: E501
+
+ The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
+
+ :return: The protocol of this CoreV1EndpointPort. # noqa: E501
+ :rtype: str
+ """
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, protocol):
+ """Sets the protocol of this CoreV1EndpointPort.
+
+ The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
+
+ :param protocol: The protocol of this CoreV1EndpointPort. # noqa: E501
+ :type: str
+ """
+
+ self._protocol = protocol
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, CoreV1EndpointPort):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, CoreV1EndpointPort):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/core_v1_event.py b/contrib/python/kubernetes/kubernetes/client/models/core_v1_event.py
new file mode 100644
index 0000000000..2906780298
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/core_v1_event.py
@@ -0,0 +1,562 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class CoreV1Event(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'action': 'str',
+ 'api_version': 'str',
+ 'count': 'int',
+ 'event_time': 'datetime',
+ 'first_timestamp': 'datetime',
+ 'involved_object': 'V1ObjectReference',
+ 'kind': 'str',
+ 'last_timestamp': 'datetime',
+ 'message': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'reason': 'str',
+ 'related': 'V1ObjectReference',
+ 'reporting_component': 'str',
+ 'reporting_instance': 'str',
+ 'series': 'CoreV1EventSeries',
+ 'source': 'V1EventSource',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'action': 'action',
+ 'api_version': 'apiVersion',
+ 'count': 'count',
+ 'event_time': 'eventTime',
+ 'first_timestamp': 'firstTimestamp',
+ 'involved_object': 'involvedObject',
+ 'kind': 'kind',
+ 'last_timestamp': 'lastTimestamp',
+ 'message': 'message',
+ 'metadata': 'metadata',
+ 'reason': 'reason',
+ 'related': 'related',
+ 'reporting_component': 'reportingComponent',
+ 'reporting_instance': 'reportingInstance',
+ 'series': 'series',
+ 'source': 'source',
+ 'type': 'type'
+ }
+
+ def __init__(self, action=None, api_version=None, count=None, event_time=None, first_timestamp=None, involved_object=None, kind=None, last_timestamp=None, message=None, metadata=None, reason=None, related=None, reporting_component=None, reporting_instance=None, series=None, source=None, type=None, local_vars_configuration=None): # noqa: E501
+ """CoreV1Event - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._action = None
+ self._api_version = None
+ self._count = None
+ self._event_time = None
+ self._first_timestamp = None
+ self._involved_object = None
+ self._kind = None
+ self._last_timestamp = None
+ self._message = None
+ self._metadata = None
+ self._reason = None
+ self._related = None
+ self._reporting_component = None
+ self._reporting_instance = None
+ self._series = None
+ self._source = None
+ self._type = None
+ self.discriminator = None
+
+ if action is not None:
+ self.action = action
+ if api_version is not None:
+ self.api_version = api_version
+ if count is not None:
+ self.count = count
+ if event_time is not None:
+ self.event_time = event_time
+ if first_timestamp is not None:
+ self.first_timestamp = first_timestamp
+ self.involved_object = involved_object
+ if kind is not None:
+ self.kind = kind
+ if last_timestamp is not None:
+ self.last_timestamp = last_timestamp
+ if message is not None:
+ self.message = message
+ self.metadata = metadata
+ if reason is not None:
+ self.reason = reason
+ if related is not None:
+ self.related = related
+ if reporting_component is not None:
+ self.reporting_component = reporting_component
+ if reporting_instance is not None:
+ self.reporting_instance = reporting_instance
+ if series is not None:
+ self.series = series
+ if source is not None:
+ self.source = source
+ if type is not None:
+ self.type = type
+
+ @property
+ def action(self):
+ """Gets the action of this CoreV1Event. # noqa: E501
+
+ What action was taken/failed regarding to the Regarding object. # noqa: E501
+
+ :return: The action of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._action
+
+ @action.setter
+ def action(self, action):
+ """Sets the action of this CoreV1Event.
+
+ What action was taken/failed regarding to the Regarding object. # noqa: E501
+
+ :param action: The action of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._action = action
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this CoreV1Event. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this CoreV1Event.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def count(self):
+ """Gets the count of this CoreV1Event. # noqa: E501
+
+ The number of times this event has occurred. # noqa: E501
+
+ :return: The count of this CoreV1Event. # noqa: E501
+ :rtype: int
+ """
+ return self._count
+
+ @count.setter
+ def count(self, count):
+ """Sets the count of this CoreV1Event.
+
+ The number of times this event has occurred. # noqa: E501
+
+ :param count: The count of this CoreV1Event. # noqa: E501
+ :type: int
+ """
+
+ self._count = count
+
+ @property
+ def event_time(self):
+ """Gets the event_time of this CoreV1Event. # noqa: E501
+
+ Time when this Event was first observed. # noqa: E501
+
+ :return: The event_time of this CoreV1Event. # noqa: E501
+ :rtype: datetime
+ """
+ return self._event_time
+
+ @event_time.setter
+ def event_time(self, event_time):
+ """Sets the event_time of this CoreV1Event.
+
+ Time when this Event was first observed. # noqa: E501
+
+ :param event_time: The event_time of this CoreV1Event. # noqa: E501
+ :type: datetime
+ """
+
+ self._event_time = event_time
+
+ @property
+ def first_timestamp(self):
+ """Gets the first_timestamp of this CoreV1Event. # noqa: E501
+
+ The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) # noqa: E501
+
+ :return: The first_timestamp of this CoreV1Event. # noqa: E501
+ :rtype: datetime
+ """
+ return self._first_timestamp
+
+ @first_timestamp.setter
+ def first_timestamp(self, first_timestamp):
+ """Sets the first_timestamp of this CoreV1Event.
+
+ The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) # noqa: E501
+
+ :param first_timestamp: The first_timestamp of this CoreV1Event. # noqa: E501
+ :type: datetime
+ """
+
+ self._first_timestamp = first_timestamp
+
+ @property
+ def involved_object(self):
+ """Gets the involved_object of this CoreV1Event. # noqa: E501
+
+
+ :return: The involved_object of this CoreV1Event. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._involved_object
+
+ @involved_object.setter
+ def involved_object(self, involved_object):
+ """Sets the involved_object of this CoreV1Event.
+
+
+ :param involved_object: The involved_object of this CoreV1Event. # noqa: E501
+ :type: V1ObjectReference
+ """
+ if self.local_vars_configuration.client_side_validation and involved_object is None: # noqa: E501
+ raise ValueError("Invalid value for `involved_object`, must not be `None`") # noqa: E501
+
+ self._involved_object = involved_object
+
+ @property
+ def kind(self):
+ """Gets the kind of this CoreV1Event. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this CoreV1Event.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def last_timestamp(self):
+ """Gets the last_timestamp of this CoreV1Event. # noqa: E501
+
+ The time at which the most recent occurrence of this event was recorded. # noqa: E501
+
+ :return: The last_timestamp of this CoreV1Event. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_timestamp
+
+ @last_timestamp.setter
+ def last_timestamp(self, last_timestamp):
+ """Sets the last_timestamp of this CoreV1Event.
+
+ The time at which the most recent occurrence of this event was recorded. # noqa: E501
+
+ :param last_timestamp: The last_timestamp of this CoreV1Event. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_timestamp = last_timestamp
+
+ @property
+ def message(self):
+ """Gets the message of this CoreV1Event. # noqa: E501
+
+ A human-readable description of the status of this operation. # noqa: E501
+
+ :return: The message of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this CoreV1Event.
+
+ A human-readable description of the status of this operation. # noqa: E501
+
+ :param message: The message of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this CoreV1Event. # noqa: E501
+
+
+ :return: The metadata of this CoreV1Event. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this CoreV1Event.
+
+
+ :param metadata: The metadata of this CoreV1Event. # noqa: E501
+ :type: V1ObjectMeta
+ """
+ if self.local_vars_configuration.client_side_validation and metadata is None: # noqa: E501
+ raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
+
+ self._metadata = metadata
+
+ @property
+ def reason(self):
+ """Gets the reason of this CoreV1Event. # noqa: E501
+
+ This should be a short, machine understandable string that gives the reason for the transition into the object's current status. # noqa: E501
+
+ :return: The reason of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this CoreV1Event.
+
+ This should be a short, machine understandable string that gives the reason for the transition into the object's current status. # noqa: E501
+
+ :param reason: The reason of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def related(self):
+ """Gets the related of this CoreV1Event. # noqa: E501
+
+
+ :return: The related of this CoreV1Event. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._related
+
+ @related.setter
+ def related(self, related):
+ """Sets the related of this CoreV1Event.
+
+
+ :param related: The related of this CoreV1Event. # noqa: E501
+ :type: V1ObjectReference
+ """
+
+ self._related = related
+
+ @property
+ def reporting_component(self):
+ """Gets the reporting_component of this CoreV1Event. # noqa: E501
+
+ Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. # noqa: E501
+
+ :return: The reporting_component of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._reporting_component
+
+ @reporting_component.setter
+ def reporting_component(self, reporting_component):
+ """Sets the reporting_component of this CoreV1Event.
+
+ Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. # noqa: E501
+
+ :param reporting_component: The reporting_component of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._reporting_component = reporting_component
+
+ @property
+ def reporting_instance(self):
+ """Gets the reporting_instance of this CoreV1Event. # noqa: E501
+
+ ID of the controller instance, e.g. `kubelet-xyzf`. # noqa: E501
+
+ :return: The reporting_instance of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._reporting_instance
+
+ @reporting_instance.setter
+ def reporting_instance(self, reporting_instance):
+ """Sets the reporting_instance of this CoreV1Event.
+
+ ID of the controller instance, e.g. `kubelet-xyzf`. # noqa: E501
+
+ :param reporting_instance: The reporting_instance of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._reporting_instance = reporting_instance
+
+ @property
+ def series(self):
+ """Gets the series of this CoreV1Event. # noqa: E501
+
+
+ :return: The series of this CoreV1Event. # noqa: E501
+ :rtype: CoreV1EventSeries
+ """
+ return self._series
+
+ @series.setter
+ def series(self, series):
+ """Sets the series of this CoreV1Event.
+
+
+ :param series: The series of this CoreV1Event. # noqa: E501
+ :type: CoreV1EventSeries
+ """
+
+ self._series = series
+
+ @property
+ def source(self):
+ """Gets the source of this CoreV1Event. # noqa: E501
+
+
+ :return: The source of this CoreV1Event. # noqa: E501
+ :rtype: V1EventSource
+ """
+ return self._source
+
+ @source.setter
+ def source(self, source):
+ """Sets the source of this CoreV1Event.
+
+
+ :param source: The source of this CoreV1Event. # noqa: E501
+ :type: V1EventSource
+ """
+
+ self._source = source
+
+ @property
+ def type(self):
+ """Gets the type of this CoreV1Event. # noqa: E501
+
+ Type of this event (Normal, Warning), new types could be added in the future # noqa: E501
+
+ :return: The type of this CoreV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this CoreV1Event.
+
+ Type of this event (Normal, Warning), new types could be added in the future # noqa: E501
+
+ :param type: The type of this CoreV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, CoreV1Event):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, CoreV1Event):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/core_v1_event_list.py b/contrib/python/kubernetes/kubernetes/client/models/core_v1_event_list.py
new file mode 100644
index 0000000000..8fa1d27e75
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/core_v1_event_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class CoreV1EventList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[CoreV1Event]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """CoreV1EventList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this CoreV1EventList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this CoreV1EventList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this CoreV1EventList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this CoreV1EventList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this CoreV1EventList. # noqa: E501
+
+ List of events # noqa: E501
+
+ :return: The items of this CoreV1EventList. # noqa: E501
+ :rtype: list[CoreV1Event]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this CoreV1EventList.
+
+ List of events # noqa: E501
+
+ :param items: The items of this CoreV1EventList. # noqa: E501
+ :type: list[CoreV1Event]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this CoreV1EventList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this CoreV1EventList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this CoreV1EventList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this CoreV1EventList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this CoreV1EventList. # noqa: E501
+
+
+ :return: The metadata of this CoreV1EventList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this CoreV1EventList.
+
+
+ :param metadata: The metadata of this CoreV1EventList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, CoreV1EventList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, CoreV1EventList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/core_v1_event_series.py b/contrib/python/kubernetes/kubernetes/client/models/core_v1_event_series.py
new file mode 100644
index 0000000000..75d49ee6f4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/core_v1_event_series.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class CoreV1EventSeries(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'count': 'int',
+ 'last_observed_time': 'datetime'
+ }
+
+ attribute_map = {
+ 'count': 'count',
+ 'last_observed_time': 'lastObservedTime'
+ }
+
+ def __init__(self, count=None, last_observed_time=None, local_vars_configuration=None): # noqa: E501
+ """CoreV1EventSeries - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._count = None
+ self._last_observed_time = None
+ self.discriminator = None
+
+ if count is not None:
+ self.count = count
+ if last_observed_time is not None:
+ self.last_observed_time = last_observed_time
+
+ @property
+ def count(self):
+ """Gets the count of this CoreV1EventSeries. # noqa: E501
+
+ Number of occurrences in this series up to the last heartbeat time # noqa: E501
+
+ :return: The count of this CoreV1EventSeries. # noqa: E501
+ :rtype: int
+ """
+ return self._count
+
+ @count.setter
+ def count(self, count):
+ """Sets the count of this CoreV1EventSeries.
+
+ Number of occurrences in this series up to the last heartbeat time # noqa: E501
+
+ :param count: The count of this CoreV1EventSeries. # noqa: E501
+ :type: int
+ """
+
+ self._count = count
+
+ @property
+ def last_observed_time(self):
+ """Gets the last_observed_time of this CoreV1EventSeries. # noqa: E501
+
+ Time of the last occurrence observed # noqa: E501
+
+ :return: The last_observed_time of this CoreV1EventSeries. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_observed_time
+
+ @last_observed_time.setter
+ def last_observed_time(self, last_observed_time):
+ """Sets the last_observed_time of this CoreV1EventSeries.
+
+ Time of the last occurrence observed # noqa: E501
+
+ :param last_observed_time: The last_observed_time of this CoreV1EventSeries. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_observed_time = last_observed_time
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, CoreV1EventSeries):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, CoreV1EventSeries):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/discovery_v1_endpoint_port.py b/contrib/python/kubernetes/kubernetes/client/models/discovery_v1_endpoint_port.py
new file mode 100644
index 0000000000..4fb9fccbc1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/discovery_v1_endpoint_port.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class DiscoveryV1EndpointPort(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'app_protocol': 'str',
+ 'name': 'str',
+ 'port': 'int',
+ 'protocol': 'str'
+ }
+
+ attribute_map = {
+ 'app_protocol': 'appProtocol',
+ 'name': 'name',
+ 'port': 'port',
+ 'protocol': 'protocol'
+ }
+
+ def __init__(self, app_protocol=None, name=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
+ """DiscoveryV1EndpointPort - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._app_protocol = None
+ self._name = None
+ self._port = None
+ self._protocol = None
+ self.discriminator = None
+
+ if app_protocol is not None:
+ self.app_protocol = app_protocol
+ if name is not None:
+ self.name = name
+ if port is not None:
+ self.port = port
+ if protocol is not None:
+ self.protocol = protocol
+
+ @property
+ def app_protocol(self):
+ """Gets the app_protocol of this DiscoveryV1EndpointPort. # noqa: E501
+
+ The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
+
+ :return: The app_protocol of this DiscoveryV1EndpointPort. # noqa: E501
+ :rtype: str
+ """
+ return self._app_protocol
+
+ @app_protocol.setter
+ def app_protocol(self, app_protocol):
+ """Sets the app_protocol of this DiscoveryV1EndpointPort.
+
+ The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
+
+ :param app_protocol: The app_protocol of this DiscoveryV1EndpointPort. # noqa: E501
+ :type: str
+ """
+
+ self._app_protocol = app_protocol
+
+ @property
+ def name(self):
+ """Gets the name of this DiscoveryV1EndpointPort. # noqa: E501
+
+ name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string. # noqa: E501
+
+ :return: The name of this DiscoveryV1EndpointPort. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this DiscoveryV1EndpointPort.
+
+ name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string. # noqa: E501
+
+ :param name: The name of this DiscoveryV1EndpointPort. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def port(self):
+ """Gets the port of this DiscoveryV1EndpointPort. # noqa: E501
+
+ port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer. # noqa: E501
+
+ :return: The port of this DiscoveryV1EndpointPort. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this DiscoveryV1EndpointPort.
+
+ port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer. # noqa: E501
+
+ :param port: The port of this DiscoveryV1EndpointPort. # noqa: E501
+ :type: int
+ """
+
+ self._port = port
+
+ @property
+ def protocol(self):
+ """Gets the protocol of this DiscoveryV1EndpointPort. # noqa: E501
+
+ protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
+
+ :return: The protocol of this DiscoveryV1EndpointPort. # noqa: E501
+ :rtype: str
+ """
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, protocol):
+ """Sets the protocol of this DiscoveryV1EndpointPort.
+
+ protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
+
+ :param protocol: The protocol of this DiscoveryV1EndpointPort. # noqa: E501
+ :type: str
+ """
+
+ self._protocol = protocol
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, DiscoveryV1EndpointPort):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, DiscoveryV1EndpointPort):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/events_v1_event.py b/contrib/python/kubernetes/kubernetes/client/models/events_v1_event.py
new file mode 100644
index 0000000000..04e41f3832
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/events_v1_event.py
@@ -0,0 +1,561 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class EventsV1Event(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'action': 'str',
+ 'api_version': 'str',
+ 'deprecated_count': 'int',
+ 'deprecated_first_timestamp': 'datetime',
+ 'deprecated_last_timestamp': 'datetime',
+ 'deprecated_source': 'V1EventSource',
+ 'event_time': 'datetime',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'note': 'str',
+ 'reason': 'str',
+ 'regarding': 'V1ObjectReference',
+ 'related': 'V1ObjectReference',
+ 'reporting_controller': 'str',
+ 'reporting_instance': 'str',
+ 'series': 'EventsV1EventSeries',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'action': 'action',
+ 'api_version': 'apiVersion',
+ 'deprecated_count': 'deprecatedCount',
+ 'deprecated_first_timestamp': 'deprecatedFirstTimestamp',
+ 'deprecated_last_timestamp': 'deprecatedLastTimestamp',
+ 'deprecated_source': 'deprecatedSource',
+ 'event_time': 'eventTime',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'note': 'note',
+ 'reason': 'reason',
+ 'regarding': 'regarding',
+ 'related': 'related',
+ 'reporting_controller': 'reportingController',
+ 'reporting_instance': 'reportingInstance',
+ 'series': 'series',
+ 'type': 'type'
+ }
+
+ def __init__(self, action=None, api_version=None, deprecated_count=None, deprecated_first_timestamp=None, deprecated_last_timestamp=None, deprecated_source=None, event_time=None, kind=None, metadata=None, note=None, reason=None, regarding=None, related=None, reporting_controller=None, reporting_instance=None, series=None, type=None, local_vars_configuration=None): # noqa: E501
+ """EventsV1Event - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._action = None
+ self._api_version = None
+ self._deprecated_count = None
+ self._deprecated_first_timestamp = None
+ self._deprecated_last_timestamp = None
+ self._deprecated_source = None
+ self._event_time = None
+ self._kind = None
+ self._metadata = None
+ self._note = None
+ self._reason = None
+ self._regarding = None
+ self._related = None
+ self._reporting_controller = None
+ self._reporting_instance = None
+ self._series = None
+ self._type = None
+ self.discriminator = None
+
+ if action is not None:
+ self.action = action
+ if api_version is not None:
+ self.api_version = api_version
+ if deprecated_count is not None:
+ self.deprecated_count = deprecated_count
+ if deprecated_first_timestamp is not None:
+ self.deprecated_first_timestamp = deprecated_first_timestamp
+ if deprecated_last_timestamp is not None:
+ self.deprecated_last_timestamp = deprecated_last_timestamp
+ if deprecated_source is not None:
+ self.deprecated_source = deprecated_source
+ self.event_time = event_time
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if note is not None:
+ self.note = note
+ if reason is not None:
+ self.reason = reason
+ if regarding is not None:
+ self.regarding = regarding
+ if related is not None:
+ self.related = related
+ if reporting_controller is not None:
+ self.reporting_controller = reporting_controller
+ if reporting_instance is not None:
+ self.reporting_instance = reporting_instance
+ if series is not None:
+ self.series = series
+ if type is not None:
+ self.type = type
+
+ @property
+ def action(self):
+ """Gets the action of this EventsV1Event. # noqa: E501
+
+ action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters. # noqa: E501
+
+ :return: The action of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._action
+
+ @action.setter
+ def action(self, action):
+ """Sets the action of this EventsV1Event.
+
+ action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters. # noqa: E501
+
+ :param action: The action of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._action = action
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this EventsV1Event. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this EventsV1Event.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def deprecated_count(self):
+ """Gets the deprecated_count of this EventsV1Event. # noqa: E501
+
+ deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. # noqa: E501
+
+ :return: The deprecated_count of this EventsV1Event. # noqa: E501
+ :rtype: int
+ """
+ return self._deprecated_count
+
+ @deprecated_count.setter
+ def deprecated_count(self, deprecated_count):
+ """Sets the deprecated_count of this EventsV1Event.
+
+ deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. # noqa: E501
+
+ :param deprecated_count: The deprecated_count of this EventsV1Event. # noqa: E501
+ :type: int
+ """
+
+ self._deprecated_count = deprecated_count
+
+ @property
+ def deprecated_first_timestamp(self):
+ """Gets the deprecated_first_timestamp of this EventsV1Event. # noqa: E501
+
+ deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. # noqa: E501
+
+ :return: The deprecated_first_timestamp of this EventsV1Event. # noqa: E501
+ :rtype: datetime
+ """
+ return self._deprecated_first_timestamp
+
+ @deprecated_first_timestamp.setter
+ def deprecated_first_timestamp(self, deprecated_first_timestamp):
+ """Sets the deprecated_first_timestamp of this EventsV1Event.
+
+ deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. # noqa: E501
+
+ :param deprecated_first_timestamp: The deprecated_first_timestamp of this EventsV1Event. # noqa: E501
+ :type: datetime
+ """
+
+ self._deprecated_first_timestamp = deprecated_first_timestamp
+
+ @property
+ def deprecated_last_timestamp(self):
+ """Gets the deprecated_last_timestamp of this EventsV1Event. # noqa: E501
+
+ deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. # noqa: E501
+
+ :return: The deprecated_last_timestamp of this EventsV1Event. # noqa: E501
+ :rtype: datetime
+ """
+ return self._deprecated_last_timestamp
+
+ @deprecated_last_timestamp.setter
+ def deprecated_last_timestamp(self, deprecated_last_timestamp):
+ """Sets the deprecated_last_timestamp of this EventsV1Event.
+
+ deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. # noqa: E501
+
+ :param deprecated_last_timestamp: The deprecated_last_timestamp of this EventsV1Event. # noqa: E501
+ :type: datetime
+ """
+
+ self._deprecated_last_timestamp = deprecated_last_timestamp
+
+ @property
+ def deprecated_source(self):
+ """Gets the deprecated_source of this EventsV1Event. # noqa: E501
+
+
+ :return: The deprecated_source of this EventsV1Event. # noqa: E501
+ :rtype: V1EventSource
+ """
+ return self._deprecated_source
+
+ @deprecated_source.setter
+ def deprecated_source(self, deprecated_source):
+ """Sets the deprecated_source of this EventsV1Event.
+
+
+ :param deprecated_source: The deprecated_source of this EventsV1Event. # noqa: E501
+ :type: V1EventSource
+ """
+
+ self._deprecated_source = deprecated_source
+
+ @property
+ def event_time(self):
+ """Gets the event_time of this EventsV1Event. # noqa: E501
+
+ eventTime is the time when this Event was first observed. It is required. # noqa: E501
+
+ :return: The event_time of this EventsV1Event. # noqa: E501
+ :rtype: datetime
+ """
+ return self._event_time
+
+ @event_time.setter
+ def event_time(self, event_time):
+ """Sets the event_time of this EventsV1Event.
+
+ eventTime is the time when this Event was first observed. It is required. # noqa: E501
+
+ :param event_time: The event_time of this EventsV1Event. # noqa: E501
+ :type: datetime
+ """
+ if self.local_vars_configuration.client_side_validation and event_time is None: # noqa: E501
+ raise ValueError("Invalid value for `event_time`, must not be `None`") # noqa: E501
+
+ self._event_time = event_time
+
+ @property
+ def kind(self):
+ """Gets the kind of this EventsV1Event. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this EventsV1Event.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this EventsV1Event. # noqa: E501
+
+
+ :return: The metadata of this EventsV1Event. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this EventsV1Event.
+
+
+ :param metadata: The metadata of this EventsV1Event. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def note(self):
+ """Gets the note of this EventsV1Event. # noqa: E501
+
+ note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB. # noqa: E501
+
+ :return: The note of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._note
+
+ @note.setter
+ def note(self, note):
+ """Sets the note of this EventsV1Event.
+
+ note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB. # noqa: E501
+
+ :param note: The note of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._note = note
+
+ @property
+ def reason(self):
+ """Gets the reason of this EventsV1Event. # noqa: E501
+
+ reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters. # noqa: E501
+
+ :return: The reason of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this EventsV1Event.
+
+ reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters. # noqa: E501
+
+ :param reason: The reason of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def regarding(self):
+ """Gets the regarding of this EventsV1Event. # noqa: E501
+
+
+ :return: The regarding of this EventsV1Event. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._regarding
+
+ @regarding.setter
+ def regarding(self, regarding):
+ """Sets the regarding of this EventsV1Event.
+
+
+ :param regarding: The regarding of this EventsV1Event. # noqa: E501
+ :type: V1ObjectReference
+ """
+
+ self._regarding = regarding
+
+ @property
+ def related(self):
+ """Gets the related of this EventsV1Event. # noqa: E501
+
+
+ :return: The related of this EventsV1Event. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._related
+
+ @related.setter
+ def related(self, related):
+ """Sets the related of this EventsV1Event.
+
+
+ :param related: The related of this EventsV1Event. # noqa: E501
+ :type: V1ObjectReference
+ """
+
+ self._related = related
+
+ @property
+ def reporting_controller(self):
+ """Gets the reporting_controller of this EventsV1Event. # noqa: E501
+
+ reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events. # noqa: E501
+
+ :return: The reporting_controller of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._reporting_controller
+
+ @reporting_controller.setter
+ def reporting_controller(self, reporting_controller):
+ """Sets the reporting_controller of this EventsV1Event.
+
+ reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events. # noqa: E501
+
+ :param reporting_controller: The reporting_controller of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._reporting_controller = reporting_controller
+
+ @property
+ def reporting_instance(self):
+ """Gets the reporting_instance of this EventsV1Event. # noqa: E501
+
+ reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters. # noqa: E501
+
+ :return: The reporting_instance of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._reporting_instance
+
+ @reporting_instance.setter
+ def reporting_instance(self, reporting_instance):
+ """Sets the reporting_instance of this EventsV1Event.
+
+ reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters. # noqa: E501
+
+ :param reporting_instance: The reporting_instance of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._reporting_instance = reporting_instance
+
+ @property
+ def series(self):
+ """Gets the series of this EventsV1Event. # noqa: E501
+
+
+ :return: The series of this EventsV1Event. # noqa: E501
+ :rtype: EventsV1EventSeries
+ """
+ return self._series
+
+ @series.setter
+ def series(self, series):
+ """Sets the series of this EventsV1Event.
+
+
+ :param series: The series of this EventsV1Event. # noqa: E501
+ :type: EventsV1EventSeries
+ """
+
+ self._series = series
+
+ @property
+ def type(self):
+ """Gets the type of this EventsV1Event. # noqa: E501
+
+ type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events. # noqa: E501
+
+ :return: The type of this EventsV1Event. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this EventsV1Event.
+
+ type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events. # noqa: E501
+
+ :param type: The type of this EventsV1Event. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, EventsV1Event):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, EventsV1Event):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/events_v1_event_list.py b/contrib/python/kubernetes/kubernetes/client/models/events_v1_event_list.py
new file mode 100644
index 0000000000..05323c8a95
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/events_v1_event_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class EventsV1EventList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[EventsV1Event]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """EventsV1EventList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this EventsV1EventList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this EventsV1EventList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this EventsV1EventList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this EventsV1EventList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this EventsV1EventList. # noqa: E501
+
+ items is a list of schema objects. # noqa: E501
+
+ :return: The items of this EventsV1EventList. # noqa: E501
+ :rtype: list[EventsV1Event]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this EventsV1EventList.
+
+ items is a list of schema objects. # noqa: E501
+
+ :param items: The items of this EventsV1EventList. # noqa: E501
+ :type: list[EventsV1Event]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this EventsV1EventList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this EventsV1EventList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this EventsV1EventList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this EventsV1EventList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this EventsV1EventList. # noqa: E501
+
+
+ :return: The metadata of this EventsV1EventList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this EventsV1EventList.
+
+
+ :param metadata: The metadata of this EventsV1EventList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, EventsV1EventList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, EventsV1EventList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/events_v1_event_series.py b/contrib/python/kubernetes/kubernetes/client/models/events_v1_event_series.py
new file mode 100644
index 0000000000..e7e4d88946
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/events_v1_event_series.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class EventsV1EventSeries(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'count': 'int',
+ 'last_observed_time': 'datetime'
+ }
+
+ attribute_map = {
+ 'count': 'count',
+ 'last_observed_time': 'lastObservedTime'
+ }
+
+ def __init__(self, count=None, last_observed_time=None, local_vars_configuration=None): # noqa: E501
+ """EventsV1EventSeries - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._count = None
+ self._last_observed_time = None
+ self.discriminator = None
+
+ self.count = count
+ self.last_observed_time = last_observed_time
+
+ @property
+ def count(self):
+ """Gets the count of this EventsV1EventSeries. # noqa: E501
+
+ count is the number of occurrences in this series up to the last heartbeat time. # noqa: E501
+
+ :return: The count of this EventsV1EventSeries. # noqa: E501
+ :rtype: int
+ """
+ return self._count
+
+ @count.setter
+ def count(self, count):
+ """Sets the count of this EventsV1EventSeries.
+
+ count is the number of occurrences in this series up to the last heartbeat time. # noqa: E501
+
+ :param count: The count of this EventsV1EventSeries. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and count is None: # noqa: E501
+ raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
+
+ self._count = count
+
+ @property
+ def last_observed_time(self):
+ """Gets the last_observed_time of this EventsV1EventSeries. # noqa: E501
+
+ lastObservedTime is the time when last Event from the series was seen before last heartbeat. # noqa: E501
+
+ :return: The last_observed_time of this EventsV1EventSeries. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_observed_time
+
+ @last_observed_time.setter
+ def last_observed_time(self, last_observed_time):
+ """Sets the last_observed_time of this EventsV1EventSeries.
+
+ lastObservedTime is the time when last Event from the series was seen before last heartbeat. # noqa: E501
+
+ :param last_observed_time: The last_observed_time of this EventsV1EventSeries. # noqa: E501
+ :type: datetime
+ """
+ if self.local_vars_configuration.client_side_validation and last_observed_time is None: # noqa: E501
+ raise ValueError("Invalid value for `last_observed_time`, must not be `None`") # noqa: E501
+
+ self._last_observed_time = last_observed_time
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, EventsV1EventSeries):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, EventsV1EventSeries):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/storage_v1_token_request.py b/contrib/python/kubernetes/kubernetes/client/models/storage_v1_token_request.py
new file mode 100644
index 0000000000..d12cb8d8fa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/storage_v1_token_request.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class StorageV1TokenRequest(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'audience': 'str',
+ 'expiration_seconds': 'int'
+ }
+
+ attribute_map = {
+ 'audience': 'audience',
+ 'expiration_seconds': 'expirationSeconds'
+ }
+
+ def __init__(self, audience=None, expiration_seconds=None, local_vars_configuration=None): # noqa: E501
+ """StorageV1TokenRequest - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._audience = None
+ self._expiration_seconds = None
+ self.discriminator = None
+
+ self.audience = audience
+ if expiration_seconds is not None:
+ self.expiration_seconds = expiration_seconds
+
+ @property
+ def audience(self):
+ """Gets the audience of this StorageV1TokenRequest. # noqa: E501
+
+ audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver. # noqa: E501
+
+ :return: The audience of this StorageV1TokenRequest. # noqa: E501
+ :rtype: str
+ """
+ return self._audience
+
+ @audience.setter
+ def audience(self, audience):
+ """Sets the audience of this StorageV1TokenRequest.
+
+ audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver. # noqa: E501
+
+ :param audience: The audience of this StorageV1TokenRequest. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and audience is None: # noqa: E501
+ raise ValueError("Invalid value for `audience`, must not be `None`") # noqa: E501
+
+ self._audience = audience
+
+ @property
+ def expiration_seconds(self):
+ """Gets the expiration_seconds of this StorageV1TokenRequest. # noqa: E501
+
+ expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\". # noqa: E501
+
+ :return: The expiration_seconds of this StorageV1TokenRequest. # noqa: E501
+ :rtype: int
+ """
+ return self._expiration_seconds
+
+ @expiration_seconds.setter
+ def expiration_seconds(self, expiration_seconds):
+ """Sets the expiration_seconds of this StorageV1TokenRequest.
+
+ expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\". # noqa: E501
+
+ :param expiration_seconds: The expiration_seconds of this StorageV1TokenRequest. # noqa: E501
+ :type: int
+ """
+
+ self._expiration_seconds = expiration_seconds
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, StorageV1TokenRequest):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, StorageV1TokenRequest):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_affinity.py b/contrib/python/kubernetes/kubernetes/client/models/v1_affinity.py
new file mode 100644
index 0000000000..f5ac4307e0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_affinity.py
@@ -0,0 +1,172 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Affinity(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'node_affinity': 'V1NodeAffinity',
+ 'pod_affinity': 'V1PodAffinity',
+ 'pod_anti_affinity': 'V1PodAntiAffinity'
+ }
+
+ attribute_map = {
+ 'node_affinity': 'nodeAffinity',
+ 'pod_affinity': 'podAffinity',
+ 'pod_anti_affinity': 'podAntiAffinity'
+ }
+
+ def __init__(self, node_affinity=None, pod_affinity=None, pod_anti_affinity=None, local_vars_configuration=None): # noqa: E501
+ """V1Affinity - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._node_affinity = None
+ self._pod_affinity = None
+ self._pod_anti_affinity = None
+ self.discriminator = None
+
+ if node_affinity is not None:
+ self.node_affinity = node_affinity
+ if pod_affinity is not None:
+ self.pod_affinity = pod_affinity
+ if pod_anti_affinity is not None:
+ self.pod_anti_affinity = pod_anti_affinity
+
+ @property
+ def node_affinity(self):
+ """Gets the node_affinity of this V1Affinity. # noqa: E501
+
+
+ :return: The node_affinity of this V1Affinity. # noqa: E501
+ :rtype: V1NodeAffinity
+ """
+ return self._node_affinity
+
+ @node_affinity.setter
+ def node_affinity(self, node_affinity):
+ """Sets the node_affinity of this V1Affinity.
+
+
+ :param node_affinity: The node_affinity of this V1Affinity. # noqa: E501
+ :type: V1NodeAffinity
+ """
+
+ self._node_affinity = node_affinity
+
+ @property
+ def pod_affinity(self):
+ """Gets the pod_affinity of this V1Affinity. # noqa: E501
+
+
+ :return: The pod_affinity of this V1Affinity. # noqa: E501
+ :rtype: V1PodAffinity
+ """
+ return self._pod_affinity
+
+ @pod_affinity.setter
+ def pod_affinity(self, pod_affinity):
+ """Sets the pod_affinity of this V1Affinity.
+
+
+ :param pod_affinity: The pod_affinity of this V1Affinity. # noqa: E501
+ :type: V1PodAffinity
+ """
+
+ self._pod_affinity = pod_affinity
+
+ @property
+ def pod_anti_affinity(self):
+ """Gets the pod_anti_affinity of this V1Affinity. # noqa: E501
+
+
+ :return: The pod_anti_affinity of this V1Affinity. # noqa: E501
+ :rtype: V1PodAntiAffinity
+ """
+ return self._pod_anti_affinity
+
+ @pod_anti_affinity.setter
+ def pod_anti_affinity(self, pod_anti_affinity):
+ """Sets the pod_anti_affinity of this V1Affinity.
+
+
+ :param pod_anti_affinity: The pod_anti_affinity of this V1Affinity. # noqa: E501
+ :type: V1PodAntiAffinity
+ """
+
+ self._pod_anti_affinity = pod_anti_affinity
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Affinity):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Affinity):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_aggregation_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_aggregation_rule.py
new file mode 100644
index 0000000000..974a834efb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_aggregation_rule.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1AggregationRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'cluster_role_selectors': 'list[V1LabelSelector]'
+ }
+
+ attribute_map = {
+ 'cluster_role_selectors': 'clusterRoleSelectors'
+ }
+
+ def __init__(self, cluster_role_selectors=None, local_vars_configuration=None): # noqa: E501
+ """V1AggregationRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._cluster_role_selectors = None
+ self.discriminator = None
+
+ if cluster_role_selectors is not None:
+ self.cluster_role_selectors = cluster_role_selectors
+
+ @property
+ def cluster_role_selectors(self):
+ """Gets the cluster_role_selectors of this V1AggregationRule. # noqa: E501
+
+ ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added # noqa: E501
+
+ :return: The cluster_role_selectors of this V1AggregationRule. # noqa: E501
+ :rtype: list[V1LabelSelector]
+ """
+ return self._cluster_role_selectors
+
+ @cluster_role_selectors.setter
+ def cluster_role_selectors(self, cluster_role_selectors):
+ """Sets the cluster_role_selectors of this V1AggregationRule.
+
+ ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added # noqa: E501
+
+ :param cluster_role_selectors: The cluster_role_selectors of this V1AggregationRule. # noqa: E501
+ :type: list[V1LabelSelector]
+ """
+
+ self._cluster_role_selectors = cluster_role_selectors
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1AggregationRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1AggregationRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_group.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_group.py
new file mode 100644
index 0000000000..e1ac4aa7ca
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_group.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIGroup(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'preferred_version': 'V1GroupVersionForDiscovery',
+ 'server_address_by_client_cid_rs': 'list[V1ServerAddressByClientCIDR]',
+ 'versions': 'list[V1GroupVersionForDiscovery]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'preferred_version': 'preferredVersion',
+ 'server_address_by_client_cid_rs': 'serverAddressByClientCIDRs',
+ 'versions': 'versions'
+ }
+
+ def __init__(self, api_version=None, kind=None, name=None, preferred_version=None, server_address_by_client_cid_rs=None, versions=None, local_vars_configuration=None): # noqa: E501
+ """V1APIGroup - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._name = None
+ self._preferred_version = None
+ self._server_address_by_client_cid_rs = None
+ self._versions = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ self.name = name
+ if preferred_version is not None:
+ self.preferred_version = preferred_version
+ if server_address_by_client_cid_rs is not None:
+ self.server_address_by_client_cid_rs = server_address_by_client_cid_rs
+ self.versions = versions
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1APIGroup. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1APIGroup. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1APIGroup.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1APIGroup. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1APIGroup. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1APIGroup. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1APIGroup.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1APIGroup. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1APIGroup. # noqa: E501
+
+ name is the name of the group. # noqa: E501
+
+ :return: The name of this V1APIGroup. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1APIGroup.
+
+ name is the name of the group. # noqa: E501
+
+ :param name: The name of this V1APIGroup. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def preferred_version(self):
+ """Gets the preferred_version of this V1APIGroup. # noqa: E501
+
+
+ :return: The preferred_version of this V1APIGroup. # noqa: E501
+ :rtype: V1GroupVersionForDiscovery
+ """
+ return self._preferred_version
+
+ @preferred_version.setter
+ def preferred_version(self, preferred_version):
+ """Sets the preferred_version of this V1APIGroup.
+
+
+ :param preferred_version: The preferred_version of this V1APIGroup. # noqa: E501
+ :type: V1GroupVersionForDiscovery
+ """
+
+ self._preferred_version = preferred_version
+
+ @property
+ def server_address_by_client_cid_rs(self):
+ """Gets the server_address_by_client_cid_rs of this V1APIGroup. # noqa: E501
+
+ a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. # noqa: E501
+
+ :return: The server_address_by_client_cid_rs of this V1APIGroup. # noqa: E501
+ :rtype: list[V1ServerAddressByClientCIDR]
+ """
+ return self._server_address_by_client_cid_rs
+
+ @server_address_by_client_cid_rs.setter
+ def server_address_by_client_cid_rs(self, server_address_by_client_cid_rs):
+ """Sets the server_address_by_client_cid_rs of this V1APIGroup.
+
+ a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. # noqa: E501
+
+ :param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIGroup. # noqa: E501
+ :type: list[V1ServerAddressByClientCIDR]
+ """
+
+ self._server_address_by_client_cid_rs = server_address_by_client_cid_rs
+
+ @property
+ def versions(self):
+ """Gets the versions of this V1APIGroup. # noqa: E501
+
+ versions are the versions supported in this group. # noqa: E501
+
+ :return: The versions of this V1APIGroup. # noqa: E501
+ :rtype: list[V1GroupVersionForDiscovery]
+ """
+ return self._versions
+
+ @versions.setter
+ def versions(self, versions):
+ """Sets the versions of this V1APIGroup.
+
+ versions are the versions supported in this group. # noqa: E501
+
+ :param versions: The versions of this V1APIGroup. # noqa: E501
+ :type: list[V1GroupVersionForDiscovery]
+ """
+ if self.local_vars_configuration.client_side_validation and versions is None: # noqa: E501
+ raise ValueError("Invalid value for `versions`, must not be `None`") # noqa: E501
+
+ self._versions = versions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIGroup):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIGroup):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_group_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_group_list.py
new file mode 100644
index 0000000000..3b9e66a2f0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_group_list.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIGroupList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'groups': 'list[V1APIGroup]',
+ 'kind': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'groups': 'groups',
+ 'kind': 'kind'
+ }
+
+ def __init__(self, api_version=None, groups=None, kind=None, local_vars_configuration=None): # noqa: E501
+ """V1APIGroupList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._groups = None
+ self._kind = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.groups = groups
+ if kind is not None:
+ self.kind = kind
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1APIGroupList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1APIGroupList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1APIGroupList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1APIGroupList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def groups(self):
+ """Gets the groups of this V1APIGroupList. # noqa: E501
+
+ groups is a list of APIGroup. # noqa: E501
+
+ :return: The groups of this V1APIGroupList. # noqa: E501
+ :rtype: list[V1APIGroup]
+ """
+ return self._groups
+
+ @groups.setter
+ def groups(self, groups):
+ """Sets the groups of this V1APIGroupList.
+
+ groups is a list of APIGroup. # noqa: E501
+
+ :param groups: The groups of this V1APIGroupList. # noqa: E501
+ :type: list[V1APIGroup]
+ """
+ if self.local_vars_configuration.client_side_validation and groups is None: # noqa: E501
+ raise ValueError("Invalid value for `groups`, must not be `None`") # noqa: E501
+
+ self._groups = groups
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1APIGroupList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1APIGroupList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1APIGroupList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1APIGroupList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIGroupList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIGroupList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_resource.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_resource.py
new file mode 100644
index 0000000000..9a087eee01
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_resource.py
@@ -0,0 +1,379 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIResource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'categories': 'list[str]',
+ 'group': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'namespaced': 'bool',
+ 'short_names': 'list[str]',
+ 'singular_name': 'str',
+ 'storage_version_hash': 'str',
+ 'verbs': 'list[str]',
+ 'version': 'str'
+ }
+
+ attribute_map = {
+ 'categories': 'categories',
+ 'group': 'group',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'namespaced': 'namespaced',
+ 'short_names': 'shortNames',
+ 'singular_name': 'singularName',
+ 'storage_version_hash': 'storageVersionHash',
+ 'verbs': 'verbs',
+ 'version': 'version'
+ }
+
+ def __init__(self, categories=None, group=None, kind=None, name=None, namespaced=None, short_names=None, singular_name=None, storage_version_hash=None, verbs=None, version=None, local_vars_configuration=None): # noqa: E501
+ """V1APIResource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._categories = None
+ self._group = None
+ self._kind = None
+ self._name = None
+ self._namespaced = None
+ self._short_names = None
+ self._singular_name = None
+ self._storage_version_hash = None
+ self._verbs = None
+ self._version = None
+ self.discriminator = None
+
+ if categories is not None:
+ self.categories = categories
+ if group is not None:
+ self.group = group
+ self.kind = kind
+ self.name = name
+ self.namespaced = namespaced
+ if short_names is not None:
+ self.short_names = short_names
+ self.singular_name = singular_name
+ if storage_version_hash is not None:
+ self.storage_version_hash = storage_version_hash
+ self.verbs = verbs
+ if version is not None:
+ self.version = version
+
+ @property
+ def categories(self):
+ """Gets the categories of this V1APIResource. # noqa: E501
+
+ categories is a list of the grouped resources this resource belongs to (e.g. 'all') # noqa: E501
+
+ :return: The categories of this V1APIResource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._categories
+
+ @categories.setter
+ def categories(self, categories):
+ """Sets the categories of this V1APIResource.
+
+ categories is a list of the grouped resources this resource belongs to (e.g. 'all') # noqa: E501
+
+ :param categories: The categories of this V1APIResource. # noqa: E501
+ :type: list[str]
+ """
+
+ self._categories = categories
+
+ @property
+ def group(self):
+ """Gets the group of this V1APIResource. # noqa: E501
+
+ group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\". # noqa: E501
+
+ :return: The group of this V1APIResource. # noqa: E501
+ :rtype: str
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1APIResource.
+
+ group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\". # noqa: E501
+
+ :param group: The group of this V1APIResource. # noqa: E501
+ :type: str
+ """
+
+ self._group = group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1APIResource. # noqa: E501
+
+ kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') # noqa: E501
+
+ :return: The kind of this V1APIResource. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1APIResource.
+
+ kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') # noqa: E501
+
+ :param kind: The kind of this V1APIResource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1APIResource. # noqa: E501
+
+ name is the plural name of the resource. # noqa: E501
+
+ :return: The name of this V1APIResource. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1APIResource.
+
+ name is the plural name of the resource. # noqa: E501
+
+ :param name: The name of this V1APIResource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespaced(self):
+ """Gets the namespaced of this V1APIResource. # noqa: E501
+
+ namespaced indicates if a resource is namespaced or not. # noqa: E501
+
+ :return: The namespaced of this V1APIResource. # noqa: E501
+ :rtype: bool
+ """
+ return self._namespaced
+
+ @namespaced.setter
+ def namespaced(self, namespaced):
+ """Sets the namespaced of this V1APIResource.
+
+ namespaced indicates if a resource is namespaced or not. # noqa: E501
+
+ :param namespaced: The namespaced of this V1APIResource. # noqa: E501
+ :type: bool
+ """
+ if self.local_vars_configuration.client_side_validation and namespaced is None: # noqa: E501
+ raise ValueError("Invalid value for `namespaced`, must not be `None`") # noqa: E501
+
+ self._namespaced = namespaced
+
+ @property
+ def short_names(self):
+ """Gets the short_names of this V1APIResource. # noqa: E501
+
+ shortNames is a list of suggested short names of the resource. # noqa: E501
+
+ :return: The short_names of this V1APIResource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._short_names
+
+ @short_names.setter
+ def short_names(self, short_names):
+ """Sets the short_names of this V1APIResource.
+
+ shortNames is a list of suggested short names of the resource. # noqa: E501
+
+ :param short_names: The short_names of this V1APIResource. # noqa: E501
+ :type: list[str]
+ """
+
+ self._short_names = short_names
+
+ @property
+ def singular_name(self):
+ """Gets the singular_name of this V1APIResource. # noqa: E501
+
+ singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface. # noqa: E501
+
+ :return: The singular_name of this V1APIResource. # noqa: E501
+ :rtype: str
+ """
+ return self._singular_name
+
+ @singular_name.setter
+ def singular_name(self, singular_name):
+ """Sets the singular_name of this V1APIResource.
+
+ singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface. # noqa: E501
+
+ :param singular_name: The singular_name of this V1APIResource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and singular_name is None: # noqa: E501
+ raise ValueError("Invalid value for `singular_name`, must not be `None`") # noqa: E501
+
+ self._singular_name = singular_name
+
+ @property
+ def storage_version_hash(self):
+ """Gets the storage_version_hash of this V1APIResource. # noqa: E501
+
+ The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates. # noqa: E501
+
+ :return: The storage_version_hash of this V1APIResource. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_version_hash
+
+ @storage_version_hash.setter
+ def storage_version_hash(self, storage_version_hash):
+ """Sets the storage_version_hash of this V1APIResource.
+
+ The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates. # noqa: E501
+
+ :param storage_version_hash: The storage_version_hash of this V1APIResource. # noqa: E501
+ :type: str
+ """
+
+ self._storage_version_hash = storage_version_hash
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1APIResource. # noqa: E501
+
+ verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy) # noqa: E501
+
+ :return: The verbs of this V1APIResource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1APIResource.
+
+ verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy) # noqa: E501
+
+ :param verbs: The verbs of this V1APIResource. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ @property
+ def version(self):
+ """Gets the version of this V1APIResource. # noqa: E501
+
+ version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\". # noqa: E501
+
+ :return: The version of this V1APIResource. # noqa: E501
+ :rtype: str
+ """
+ return self._version
+
+ @version.setter
+ def version(self, version):
+ """Sets the version of this V1APIResource.
+
+ version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\". # noqa: E501
+
+ :param version: The version of this V1APIResource. # noqa: E501
+ :type: str
+ """
+
+ self._version = version
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIResource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIResource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_resource_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_resource_list.py
new file mode 100644
index 0000000000..d344354a86
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_resource_list.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIResourceList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'group_version': 'str',
+ 'kind': 'str',
+ 'resources': 'list[V1APIResource]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'group_version': 'groupVersion',
+ 'kind': 'kind',
+ 'resources': 'resources'
+ }
+
+ def __init__(self, api_version=None, group_version=None, kind=None, resources=None, local_vars_configuration=None): # noqa: E501
+ """V1APIResourceList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._group_version = None
+ self._kind = None
+ self._resources = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.group_version = group_version
+ if kind is not None:
+ self.kind = kind
+ self.resources = resources
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1APIResourceList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1APIResourceList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1APIResourceList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1APIResourceList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def group_version(self):
+ """Gets the group_version of this V1APIResourceList. # noqa: E501
+
+ groupVersion is the group and version this APIResourceList is for. # noqa: E501
+
+ :return: The group_version of this V1APIResourceList. # noqa: E501
+ :rtype: str
+ """
+ return self._group_version
+
+ @group_version.setter
+ def group_version(self, group_version):
+ """Sets the group_version of this V1APIResourceList.
+
+ groupVersion is the group and version this APIResourceList is for. # noqa: E501
+
+ :param group_version: The group_version of this V1APIResourceList. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and group_version is None: # noqa: E501
+ raise ValueError("Invalid value for `group_version`, must not be `None`") # noqa: E501
+
+ self._group_version = group_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1APIResourceList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1APIResourceList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1APIResourceList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1APIResourceList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1APIResourceList. # noqa: E501
+
+ resources contains the name of the resources and if they are namespaced. # noqa: E501
+
+ :return: The resources of this V1APIResourceList. # noqa: E501
+ :rtype: list[V1APIResource]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1APIResourceList.
+
+ resources contains the name of the resources and if they are namespaced. # noqa: E501
+
+ :param resources: The resources of this V1APIResourceList. # noqa: E501
+ :type: list[V1APIResource]
+ """
+ if self.local_vars_configuration.client_side_validation and resources is None: # noqa: E501
+ raise ValueError("Invalid value for `resources`, must not be `None`") # noqa: E501
+
+ self._resources = resources
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIResourceList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIResourceList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_service.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service.py
new file mode 100644
index 0000000000..84d6e7ccc5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIService(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1APIServiceSpec',
+ 'status': 'V1APIServiceStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1APIService - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1APIService. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1APIService. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1APIService.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1APIService. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1APIService. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1APIService. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1APIService.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1APIService. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1APIService. # noqa: E501
+
+
+ :return: The metadata of this V1APIService. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1APIService.
+
+
+ :param metadata: The metadata of this V1APIService. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1APIService. # noqa: E501
+
+
+ :return: The spec of this V1APIService. # noqa: E501
+ :rtype: V1APIServiceSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1APIService.
+
+
+ :param spec: The spec of this V1APIService. # noqa: E501
+ :type: V1APIServiceSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1APIService. # noqa: E501
+
+
+ :return: The status of this V1APIService. # noqa: E501
+ :rtype: V1APIServiceStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1APIService.
+
+
+ :param status: The status of this V1APIService. # noqa: E501
+ :type: V1APIServiceStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIService):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIService):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_condition.py
new file mode 100644
index 0000000000..e103164109
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_condition.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIServiceCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1APIServiceCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1APIServiceCondition. # noqa: E501
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1APIServiceCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1APIServiceCondition.
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1APIServiceCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1APIServiceCondition. # noqa: E501
+
+ Human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1APIServiceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1APIServiceCondition.
+
+ Human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1APIServiceCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1APIServiceCondition. # noqa: E501
+
+ Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1APIServiceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1APIServiceCondition.
+
+ Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1APIServiceCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1APIServiceCondition. # noqa: E501
+
+ Status is the status of the condition. Can be True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1APIServiceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1APIServiceCondition.
+
+ Status is the status of the condition. Can be True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1APIServiceCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1APIServiceCondition. # noqa: E501
+
+ Type is the type of the condition. # noqa: E501
+
+ :return: The type of this V1APIServiceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1APIServiceCondition.
+
+ Type is the type of the condition. # noqa: E501
+
+ :param type: The type of this V1APIServiceCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIServiceCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIServiceCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_list.py
new file mode 100644
index 0000000000..2e1eef0514
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIServiceList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1APIService]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1APIServiceList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1APIServiceList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1APIServiceList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1APIServiceList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1APIServiceList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1APIServiceList. # noqa: E501
+
+ Items is the list of APIService # noqa: E501
+
+ :return: The items of this V1APIServiceList. # noqa: E501
+ :rtype: list[V1APIService]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1APIServiceList.
+
+ Items is the list of APIService # noqa: E501
+
+ :param items: The items of this V1APIServiceList. # noqa: E501
+ :type: list[V1APIService]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1APIServiceList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1APIServiceList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1APIServiceList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1APIServiceList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1APIServiceList. # noqa: E501
+
+
+ :return: The metadata of this V1APIServiceList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1APIServiceList.
+
+
+ :param metadata: The metadata of this V1APIServiceList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIServiceList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIServiceList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_spec.py
new file mode 100644
index 0000000000..56b1922ed3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_spec.py
@@ -0,0 +1,293 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIServiceSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ca_bundle': 'str',
+ 'group': 'str',
+ 'group_priority_minimum': 'int',
+ 'insecure_skip_tls_verify': 'bool',
+ 'service': 'ApiregistrationV1ServiceReference',
+ 'version': 'str',
+ 'version_priority': 'int'
+ }
+
+ attribute_map = {
+ 'ca_bundle': 'caBundle',
+ 'group': 'group',
+ 'group_priority_minimum': 'groupPriorityMinimum',
+ 'insecure_skip_tls_verify': 'insecureSkipTLSVerify',
+ 'service': 'service',
+ 'version': 'version',
+ 'version_priority': 'versionPriority'
+ }
+
+ def __init__(self, ca_bundle=None, group=None, group_priority_minimum=None, insecure_skip_tls_verify=None, service=None, version=None, version_priority=None, local_vars_configuration=None): # noqa: E501
+ """V1APIServiceSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ca_bundle = None
+ self._group = None
+ self._group_priority_minimum = None
+ self._insecure_skip_tls_verify = None
+ self._service = None
+ self._version = None
+ self._version_priority = None
+ self.discriminator = None
+
+ if ca_bundle is not None:
+ self.ca_bundle = ca_bundle
+ if group is not None:
+ self.group = group
+ self.group_priority_minimum = group_priority_minimum
+ if insecure_skip_tls_verify is not None:
+ self.insecure_skip_tls_verify = insecure_skip_tls_verify
+ if service is not None:
+ self.service = service
+ if version is not None:
+ self.version = version
+ self.version_priority = version_priority
+
+ @property
+ def ca_bundle(self):
+ """Gets the ca_bundle of this V1APIServiceSpec. # noqa: E501
+
+ CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501
+
+ :return: The ca_bundle of this V1APIServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._ca_bundle
+
+ @ca_bundle.setter
+ def ca_bundle(self, ca_bundle):
+ """Sets the ca_bundle of this V1APIServiceSpec.
+
+ CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501
+
+ :param ca_bundle: The ca_bundle of this V1APIServiceSpec. # noqa: E501
+ :type: str
+ """
+ if (self.local_vars_configuration.client_side_validation and
+ ca_bundle is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle)): # noqa: E501
+ raise ValueError(r"Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
+
+ self._ca_bundle = ca_bundle
+
+ @property
+ def group(self):
+ """Gets the group of this V1APIServiceSpec. # noqa: E501
+
+ Group is the API group name this server hosts # noqa: E501
+
+ :return: The group of this V1APIServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1APIServiceSpec.
+
+ Group is the API group name this server hosts # noqa: E501
+
+ :param group: The group of this V1APIServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._group = group
+
+ @property
+ def group_priority_minimum(self):
+ """Gets the group_priority_minimum of this V1APIServiceSpec. # noqa: E501
+
+ GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s # noqa: E501
+
+ :return: The group_priority_minimum of this V1APIServiceSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._group_priority_minimum
+
+ @group_priority_minimum.setter
+ def group_priority_minimum(self, group_priority_minimum):
+ """Sets the group_priority_minimum of this V1APIServiceSpec.
+
+ GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s # noqa: E501
+
+ :param group_priority_minimum: The group_priority_minimum of this V1APIServiceSpec. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and group_priority_minimum is None: # noqa: E501
+ raise ValueError("Invalid value for `group_priority_minimum`, must not be `None`") # noqa: E501
+
+ self._group_priority_minimum = group_priority_minimum
+
+ @property
+ def insecure_skip_tls_verify(self):
+ """Gets the insecure_skip_tls_verify of this V1APIServiceSpec. # noqa: E501
+
+ InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead. # noqa: E501
+
+ :return: The insecure_skip_tls_verify of this V1APIServiceSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._insecure_skip_tls_verify
+
+ @insecure_skip_tls_verify.setter
+ def insecure_skip_tls_verify(self, insecure_skip_tls_verify):
+ """Sets the insecure_skip_tls_verify of this V1APIServiceSpec.
+
+ InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead. # noqa: E501
+
+ :param insecure_skip_tls_verify: The insecure_skip_tls_verify of this V1APIServiceSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._insecure_skip_tls_verify = insecure_skip_tls_verify
+
+ @property
+ def service(self):
+ """Gets the service of this V1APIServiceSpec. # noqa: E501
+
+
+ :return: The service of this V1APIServiceSpec. # noqa: E501
+ :rtype: ApiregistrationV1ServiceReference
+ """
+ return self._service
+
+ @service.setter
+ def service(self, service):
+ """Sets the service of this V1APIServiceSpec.
+
+
+ :param service: The service of this V1APIServiceSpec. # noqa: E501
+ :type: ApiregistrationV1ServiceReference
+ """
+
+ self._service = service
+
+ @property
+ def version(self):
+ """Gets the version of this V1APIServiceSpec. # noqa: E501
+
+ Version is the API version this server hosts. For example, \"v1\" # noqa: E501
+
+ :return: The version of this V1APIServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._version
+
+ @version.setter
+ def version(self, version):
+ """Sets the version of this V1APIServiceSpec.
+
+ Version is the API version this server hosts. For example, \"v1\" # noqa: E501
+
+ :param version: The version of this V1APIServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._version = version
+
+ @property
+ def version_priority(self):
+ """Gets the version_priority of this V1APIServiceSpec. # noqa: E501
+
+ VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. # noqa: E501
+
+ :return: The version_priority of this V1APIServiceSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._version_priority
+
+ @version_priority.setter
+ def version_priority(self, version_priority):
+ """Sets the version_priority of this V1APIServiceSpec.
+
+ VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. # noqa: E501
+
+ :param version_priority: The version_priority of this V1APIServiceSpec. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and version_priority is None: # noqa: E501
+ raise ValueError("Invalid value for `version_priority`, must not be `None`") # noqa: E501
+
+ self._version_priority = version_priority
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIServiceSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIServiceSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_status.py
new file mode 100644
index 0000000000..c7372aab8c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_service_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIServiceStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1APIServiceCondition]'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions'
+ }
+
+ def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1APIServiceStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1APIServiceStatus. # noqa: E501
+
+ Current service state of apiService. # noqa: E501
+
+ :return: The conditions of this V1APIServiceStatus. # noqa: E501
+ :rtype: list[V1APIServiceCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1APIServiceStatus.
+
+ Current service state of apiService. # noqa: E501
+
+ :param conditions: The conditions of this V1APIServiceStatus. # noqa: E501
+ :type: list[V1APIServiceCondition]
+ """
+
+ self._conditions = conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIServiceStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIServiceStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_api_versions.py b/contrib/python/kubernetes/kubernetes/client/models/v1_api_versions.py
new file mode 100644
index 0000000000..9b1bfdcc57
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_api_versions.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1APIVersions(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'server_address_by_client_cid_rs': 'list[V1ServerAddressByClientCIDR]',
+ 'versions': 'list[str]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'server_address_by_client_cid_rs': 'serverAddressByClientCIDRs',
+ 'versions': 'versions'
+ }
+
+ def __init__(self, api_version=None, kind=None, server_address_by_client_cid_rs=None, versions=None, local_vars_configuration=None): # noqa: E501
+ """V1APIVersions - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._server_address_by_client_cid_rs = None
+ self._versions = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ self.server_address_by_client_cid_rs = server_address_by_client_cid_rs
+ self.versions = versions
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1APIVersions. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1APIVersions. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1APIVersions.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1APIVersions. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1APIVersions. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1APIVersions. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1APIVersions.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1APIVersions. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def server_address_by_client_cid_rs(self):
+ """Gets the server_address_by_client_cid_rs of this V1APIVersions. # noqa: E501
+
+ a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. # noqa: E501
+
+ :return: The server_address_by_client_cid_rs of this V1APIVersions. # noqa: E501
+ :rtype: list[V1ServerAddressByClientCIDR]
+ """
+ return self._server_address_by_client_cid_rs
+
+ @server_address_by_client_cid_rs.setter
+ def server_address_by_client_cid_rs(self, server_address_by_client_cid_rs):
+ """Sets the server_address_by_client_cid_rs of this V1APIVersions.
+
+ a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. # noqa: E501
+
+ :param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIVersions. # noqa: E501
+ :type: list[V1ServerAddressByClientCIDR]
+ """
+ if self.local_vars_configuration.client_side_validation and server_address_by_client_cid_rs is None: # noqa: E501
+ raise ValueError("Invalid value for `server_address_by_client_cid_rs`, must not be `None`") # noqa: E501
+
+ self._server_address_by_client_cid_rs = server_address_by_client_cid_rs
+
+ @property
+ def versions(self):
+ """Gets the versions of this V1APIVersions. # noqa: E501
+
+ versions are the api versions that are available. # noqa: E501
+
+ :return: The versions of this V1APIVersions. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._versions
+
+ @versions.setter
+ def versions(self, versions):
+ """Sets the versions of this V1APIVersions.
+
+ versions are the api versions that are available. # noqa: E501
+
+ :param versions: The versions of this V1APIVersions. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and versions is None: # noqa: E501
+ raise ValueError("Invalid value for `versions`, must not be `None`") # noqa: E501
+
+ self._versions = versions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1APIVersions):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1APIVersions):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_attached_volume.py b/contrib/python/kubernetes/kubernetes/client/models/v1_attached_volume.py
new file mode 100644
index 0000000000..bf7d3a1390
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_attached_volume.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1AttachedVolume(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'device_path': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'device_path': 'devicePath',
+ 'name': 'name'
+ }
+
+ def __init__(self, device_path=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1AttachedVolume - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._device_path = None
+ self._name = None
+ self.discriminator = None
+
+ self.device_path = device_path
+ self.name = name
+
+ @property
+ def device_path(self):
+ """Gets the device_path of this V1AttachedVolume. # noqa: E501
+
+ DevicePath represents the device path where the volume should be available # noqa: E501
+
+ :return: The device_path of this V1AttachedVolume. # noqa: E501
+ :rtype: str
+ """
+ return self._device_path
+
+ @device_path.setter
+ def device_path(self, device_path):
+ """Sets the device_path of this V1AttachedVolume.
+
+ DevicePath represents the device path where the volume should be available # noqa: E501
+
+ :param device_path: The device_path of this V1AttachedVolume. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and device_path is None: # noqa: E501
+ raise ValueError("Invalid value for `device_path`, must not be `None`") # noqa: E501
+
+ self._device_path = device_path
+
+ @property
+ def name(self):
+ """Gets the name of this V1AttachedVolume. # noqa: E501
+
+ Name of the attached volume # noqa: E501
+
+ :return: The name of this V1AttachedVolume. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1AttachedVolume.
+
+ Name of the attached volume # noqa: E501
+
+ :param name: The name of this V1AttachedVolume. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1AttachedVolume):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1AttachedVolume):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py
new file mode 100644
index 0000000000..48c116c956
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1AWSElasticBlockStoreVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'partition': 'int',
+ 'read_only': 'bool',
+ 'volume_id': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'partition': 'partition',
+ 'read_only': 'readOnly',
+ 'volume_id': 'volumeID'
+ }
+
+ def __init__(self, fs_type=None, partition=None, read_only=None, volume_id=None, local_vars_configuration=None): # noqa: E501
+ """V1AWSElasticBlockStoreVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._partition = None
+ self._read_only = None
+ self._volume_id = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if partition is not None:
+ self.partition = partition
+ if read_only is not None:
+ self.read_only = read_only
+ self.volume_id = volume_id
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
+
+ :return: The fs_type of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1AWSElasticBlockStoreVolumeSource.
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
+
+ :param fs_type: The fs_type of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def partition(self):
+ """Gets the partition of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+
+ partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). # noqa: E501
+
+ :return: The partition of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._partition
+
+ @partition.setter
+ def partition(self, partition):
+ """Sets the partition of this V1AWSElasticBlockStoreVolumeSource.
+
+ partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). # noqa: E501
+
+ :param partition: The partition of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :type: int
+ """
+
+ self._partition = partition
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+
+ readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
+
+ :return: The read_only of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1AWSElasticBlockStoreVolumeSource.
+
+ readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
+
+ :param read_only: The read_only of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def volume_id(self):
+ """Gets the volume_id of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
+
+ :return: The volume_id of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_id
+
+ @volume_id.setter
+ def volume_id(self, volume_id):
+ """Sets the volume_id of this V1AWSElasticBlockStoreVolumeSource.
+
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
+
+ :param volume_id: The volume_id of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and volume_id is None: # noqa: E501
+ raise ValueError("Invalid value for `volume_id`, must not be `None`") # noqa: E501
+
+ self._volume_id = volume_id
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1AWSElasticBlockStoreVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1AWSElasticBlockStoreVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_azure_disk_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_azure_disk_volume_source.py
new file mode 100644
index 0000000000..1153269e1d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_azure_disk_volume_source.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1AzureDiskVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'caching_mode': 'str',
+ 'disk_name': 'str',
+ 'disk_uri': 'str',
+ 'fs_type': 'str',
+ 'kind': 'str',
+ 'read_only': 'bool'
+ }
+
+ attribute_map = {
+ 'caching_mode': 'cachingMode',
+ 'disk_name': 'diskName',
+ 'disk_uri': 'diskURI',
+ 'fs_type': 'fsType',
+ 'kind': 'kind',
+ 'read_only': 'readOnly'
+ }
+
+ def __init__(self, caching_mode=None, disk_name=None, disk_uri=None, fs_type=None, kind=None, read_only=None, local_vars_configuration=None): # noqa: E501
+ """V1AzureDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._caching_mode = None
+ self._disk_name = None
+ self._disk_uri = None
+ self._fs_type = None
+ self._kind = None
+ self._read_only = None
+ self.discriminator = None
+
+ if caching_mode is not None:
+ self.caching_mode = caching_mode
+ self.disk_name = disk_name
+ self.disk_uri = disk_uri
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if kind is not None:
+ self.kind = kind
+ if read_only is not None:
+ self.read_only = read_only
+
+ @property
+ def caching_mode(self):
+ """Gets the caching_mode of this V1AzureDiskVolumeSource. # noqa: E501
+
+ cachingMode is the Host Caching mode: None, Read Only, Read Write. # noqa: E501
+
+ :return: The caching_mode of this V1AzureDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._caching_mode
+
+ @caching_mode.setter
+ def caching_mode(self, caching_mode):
+ """Sets the caching_mode of this V1AzureDiskVolumeSource.
+
+ cachingMode is the Host Caching mode: None, Read Only, Read Write. # noqa: E501
+
+ :param caching_mode: The caching_mode of this V1AzureDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._caching_mode = caching_mode
+
+ @property
+ def disk_name(self):
+ """Gets the disk_name of this V1AzureDiskVolumeSource. # noqa: E501
+
+ diskName is the Name of the data disk in the blob storage # noqa: E501
+
+ :return: The disk_name of this V1AzureDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._disk_name
+
+ @disk_name.setter
+ def disk_name(self, disk_name):
+ """Sets the disk_name of this V1AzureDiskVolumeSource.
+
+ diskName is the Name of the data disk in the blob storage # noqa: E501
+
+ :param disk_name: The disk_name of this V1AzureDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and disk_name is None: # noqa: E501
+ raise ValueError("Invalid value for `disk_name`, must not be `None`") # noqa: E501
+
+ self._disk_name = disk_name
+
+ @property
+ def disk_uri(self):
+ """Gets the disk_uri of this V1AzureDiskVolumeSource. # noqa: E501
+
+ diskURI is the URI of data disk in the blob storage # noqa: E501
+
+ :return: The disk_uri of this V1AzureDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._disk_uri
+
+ @disk_uri.setter
+ def disk_uri(self, disk_uri):
+ """Sets the disk_uri of this V1AzureDiskVolumeSource.
+
+ diskURI is the URI of data disk in the blob storage # noqa: E501
+
+ :param disk_uri: The disk_uri of this V1AzureDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and disk_uri is None: # noqa: E501
+ raise ValueError("Invalid value for `disk_uri`, must not be `None`") # noqa: E501
+
+ self._disk_uri = disk_uri
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1AzureDiskVolumeSource. # noqa: E501
+
+ fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1AzureDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1AzureDiskVolumeSource.
+
+ fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1AzureDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1AzureDiskVolumeSource. # noqa: E501
+
+ kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared # noqa: E501
+
+ :return: The kind of this V1AzureDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1AzureDiskVolumeSource.
+
+ kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared # noqa: E501
+
+ :param kind: The kind of this V1AzureDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1AzureDiskVolumeSource. # noqa: E501
+
+ readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1AzureDiskVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1AzureDiskVolumeSource.
+
+ readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1AzureDiskVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1AzureDiskVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1AzureDiskVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_persistent_volume_source.py
new file mode 100644
index 0000000000..35c1359f07
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_persistent_volume_source.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1AzureFilePersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'read_only': 'bool',
+ 'secret_name': 'str',
+ 'secret_namespace': 'str',
+ 'share_name': 'str'
+ }
+
+ attribute_map = {
+ 'read_only': 'readOnly',
+ 'secret_name': 'secretName',
+ 'secret_namespace': 'secretNamespace',
+ 'share_name': 'shareName'
+ }
+
+ def __init__(self, read_only=None, secret_name=None, secret_namespace=None, share_name=None, local_vars_configuration=None): # noqa: E501
+ """V1AzureFilePersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._read_only = None
+ self._secret_name = None
+ self._secret_namespace = None
+ self._share_name = None
+ self.discriminator = None
+
+ if read_only is not None:
+ self.read_only = read_only
+ self.secret_name = secret_name
+ if secret_namespace is not None:
+ self.secret_namespace = secret_namespace
+ self.share_name = share_name
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1AzureFilePersistentVolumeSource. # noqa: E501
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1AzureFilePersistentVolumeSource.
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_name(self):
+ """Gets the secret_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
+
+ secretName is the name of secret that contains Azure Storage Account Name and Key # noqa: E501
+
+ :return: The secret_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._secret_name
+
+ @secret_name.setter
+ def secret_name(self, secret_name):
+ """Sets the secret_name of this V1AzureFilePersistentVolumeSource.
+
+ secretName is the name of secret that contains Azure Storage Account Name and Key # noqa: E501
+
+ :param secret_name: The secret_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and secret_name is None: # noqa: E501
+ raise ValueError("Invalid value for `secret_name`, must not be `None`") # noqa: E501
+
+ self._secret_name = secret_name
+
+ @property
+ def secret_namespace(self):
+ """Gets the secret_namespace of this V1AzureFilePersistentVolumeSource. # noqa: E501
+
+ secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod # noqa: E501
+
+ :return: The secret_namespace of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._secret_namespace
+
+ @secret_namespace.setter
+ def secret_namespace(self, secret_namespace):
+ """Sets the secret_namespace of this V1AzureFilePersistentVolumeSource.
+
+ secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod # noqa: E501
+
+ :param secret_namespace: The secret_namespace of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._secret_namespace = secret_namespace
+
+ @property
+ def share_name(self):
+ """Gets the share_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
+
+ shareName is the azure Share Name # noqa: E501
+
+ :return: The share_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._share_name
+
+ @share_name.setter
+ def share_name(self, share_name):
+ """Sets the share_name of this V1AzureFilePersistentVolumeSource.
+
+ shareName is the azure Share Name # noqa: E501
+
+ :param share_name: The share_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and share_name is None: # noqa: E501
+ raise ValueError("Invalid value for `share_name`, must not be `None`") # noqa: E501
+
+ self._share_name = share_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1AzureFilePersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1AzureFilePersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_volume_source.py
new file mode 100644
index 0000000000..0b79a022fc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_azure_file_volume_source.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1AzureFileVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'read_only': 'bool',
+ 'secret_name': 'str',
+ 'share_name': 'str'
+ }
+
+ attribute_map = {
+ 'read_only': 'readOnly',
+ 'secret_name': 'secretName',
+ 'share_name': 'shareName'
+ }
+
+ def __init__(self, read_only=None, secret_name=None, share_name=None, local_vars_configuration=None): # noqa: E501
+ """V1AzureFileVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._read_only = None
+ self._secret_name = None
+ self._share_name = None
+ self.discriminator = None
+
+ if read_only is not None:
+ self.read_only = read_only
+ self.secret_name = secret_name
+ self.share_name = share_name
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1AzureFileVolumeSource. # noqa: E501
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1AzureFileVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1AzureFileVolumeSource.
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1AzureFileVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_name(self):
+ """Gets the secret_name of this V1AzureFileVolumeSource. # noqa: E501
+
+ secretName is the name of secret that contains Azure Storage Account Name and Key # noqa: E501
+
+ :return: The secret_name of this V1AzureFileVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._secret_name
+
+ @secret_name.setter
+ def secret_name(self, secret_name):
+ """Sets the secret_name of this V1AzureFileVolumeSource.
+
+ secretName is the name of secret that contains Azure Storage Account Name and Key # noqa: E501
+
+ :param secret_name: The secret_name of this V1AzureFileVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and secret_name is None: # noqa: E501
+ raise ValueError("Invalid value for `secret_name`, must not be `None`") # noqa: E501
+
+ self._secret_name = secret_name
+
+ @property
+ def share_name(self):
+ """Gets the share_name of this V1AzureFileVolumeSource. # noqa: E501
+
+ shareName is the azure share Name # noqa: E501
+
+ :return: The share_name of this V1AzureFileVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._share_name
+
+ @share_name.setter
+ def share_name(self, share_name):
+ """Sets the share_name of this V1AzureFileVolumeSource.
+
+ shareName is the azure share Name # noqa: E501
+
+ :param share_name: The share_name of this V1AzureFileVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and share_name is None: # noqa: E501
+ raise ValueError("Invalid value for `share_name`, must not be `None`") # noqa: E501
+
+ self._share_name = share_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1AzureFileVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1AzureFileVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_binding.py b/contrib/python/kubernetes/kubernetes/client/models/v1_binding.py
new file mode 100644
index 0000000000..f776f3e0cb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_binding.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Binding(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'target': 'V1ObjectReference'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'target': 'target'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, target=None, local_vars_configuration=None): # noqa: E501
+ """V1Binding - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._target = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.target = target
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Binding. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Binding. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Binding.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Binding. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Binding. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Binding. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Binding.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Binding. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Binding. # noqa: E501
+
+
+ :return: The metadata of this V1Binding. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Binding.
+
+
+ :param metadata: The metadata of this V1Binding. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def target(self):
+ """Gets the target of this V1Binding. # noqa: E501
+
+
+ :return: The target of this V1Binding. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._target
+
+ @target.setter
+ def target(self, target):
+ """Sets the target of this V1Binding.
+
+
+ :param target: The target of this V1Binding. # noqa: E501
+ :type: V1ObjectReference
+ """
+ if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
+ raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
+
+ self._target = target
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Binding):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Binding):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_bound_object_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_bound_object_reference.py
new file mode 100644
index 0000000000..7707566b8e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_bound_object_reference.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1BoundObjectReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, api_version=None, kind=None, name=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1BoundObjectReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._name = None
+ self._uid = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if name is not None:
+ self.name = name
+ if uid is not None:
+ self.uid = uid
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1BoundObjectReference. # noqa: E501
+
+ API version of the referent. # noqa: E501
+
+ :return: The api_version of this V1BoundObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1BoundObjectReference.
+
+ API version of the referent. # noqa: E501
+
+ :param api_version: The api_version of this V1BoundObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1BoundObjectReference. # noqa: E501
+
+ Kind of the referent. Valid kinds are 'Pod' and 'Secret'. # noqa: E501
+
+ :return: The kind of this V1BoundObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1BoundObjectReference.
+
+ Kind of the referent. Valid kinds are 'Pod' and 'Secret'. # noqa: E501
+
+ :param kind: The kind of this V1BoundObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1BoundObjectReference. # noqa: E501
+
+ Name of the referent. # noqa: E501
+
+ :return: The name of this V1BoundObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1BoundObjectReference.
+
+ Name of the referent. # noqa: E501
+
+ :param name: The name of this V1BoundObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1BoundObjectReference. # noqa: E501
+
+ UID of the referent. # noqa: E501
+
+ :return: The uid of this V1BoundObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1BoundObjectReference.
+
+ UID of the referent. # noqa: E501
+
+ :param uid: The uid of this V1BoundObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1BoundObjectReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1BoundObjectReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_capabilities.py b/contrib/python/kubernetes/kubernetes/client/models/v1_capabilities.py
new file mode 100644
index 0000000000..2c32afb1d7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_capabilities.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Capabilities(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'add': 'list[str]',
+ 'drop': 'list[str]'
+ }
+
+ attribute_map = {
+ 'add': 'add',
+ 'drop': 'drop'
+ }
+
+ def __init__(self, add=None, drop=None, local_vars_configuration=None): # noqa: E501
+ """V1Capabilities - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._add = None
+ self._drop = None
+ self.discriminator = None
+
+ if add is not None:
+ self.add = add
+ if drop is not None:
+ self.drop = drop
+
+ @property
+ def add(self):
+ """Gets the add of this V1Capabilities. # noqa: E501
+
+ Added capabilities # noqa: E501
+
+ :return: The add of this V1Capabilities. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._add
+
+ @add.setter
+ def add(self, add):
+ """Sets the add of this V1Capabilities.
+
+ Added capabilities # noqa: E501
+
+ :param add: The add of this V1Capabilities. # noqa: E501
+ :type: list[str]
+ """
+
+ self._add = add
+
+ @property
+ def drop(self):
+ """Gets the drop of this V1Capabilities. # noqa: E501
+
+ Removed capabilities # noqa: E501
+
+ :return: The drop of this V1Capabilities. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._drop
+
+ @drop.setter
+ def drop(self, drop):
+ """Sets the drop of this V1Capabilities.
+
+ Removed capabilities # noqa: E501
+
+ :param drop: The drop of this V1Capabilities. # noqa: E501
+ :type: list[str]
+ """
+
+ self._drop = drop
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Capabilities):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Capabilities):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_persistent_volume_source.py
new file mode 100644
index 0000000000..278b83c8cf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_persistent_volume_source.py
@@ -0,0 +1,261 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CephFSPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'monitors': 'list[str]',
+ 'path': 'str',
+ 'read_only': 'bool',
+ 'secret_file': 'str',
+ 'secret_ref': 'V1SecretReference',
+ 'user': 'str'
+ }
+
+ attribute_map = {
+ 'monitors': 'monitors',
+ 'path': 'path',
+ 'read_only': 'readOnly',
+ 'secret_file': 'secretFile',
+ 'secret_ref': 'secretRef',
+ 'user': 'user'
+ }
+
+ def __init__(self, monitors=None, path=None, read_only=None, secret_file=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1CephFSPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._monitors = None
+ self._path = None
+ self._read_only = None
+ self._secret_file = None
+ self._secret_ref = None
+ self._user = None
+ self.discriminator = None
+
+ self.monitors = monitors
+ if path is not None:
+ self.path = path
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_file is not None:
+ self.secret_file = secret_file
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ if user is not None:
+ self.user = user
+
+ @property
+ def monitors(self):
+ """Gets the monitors of this V1CephFSPersistentVolumeSource. # noqa: E501
+
+ monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The monitors of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._monitors
+
+ @monitors.setter
+ def monitors(self, monitors):
+ """Sets the monitors of this V1CephFSPersistentVolumeSource.
+
+ monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param monitors: The monitors of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501
+ raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501
+
+ self._monitors = monitors
+
+ @property
+ def path(self):
+ """Gets the path of this V1CephFSPersistentVolumeSource. # noqa: E501
+
+ path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501
+
+ :return: The path of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1CephFSPersistentVolumeSource.
+
+ path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501
+
+ :param path: The path of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._path = path
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1CephFSPersistentVolumeSource. # noqa: E501
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The read_only of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1CephFSPersistentVolumeSource.
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param read_only: The read_only of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_file(self):
+ """Gets the secret_file of this V1CephFSPersistentVolumeSource. # noqa: E501
+
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The secret_file of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._secret_file
+
+ @secret_file.setter
+ def secret_file(self, secret_file):
+ """Sets the secret_file of this V1CephFSPersistentVolumeSource.
+
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param secret_file: The secret_file of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._secret_file = secret_file
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1CephFSPersistentVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1CephFSPersistentVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def user(self):
+ """Gets the user of this V1CephFSPersistentVolumeSource. # noqa: E501
+
+ user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The user of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1CephFSPersistentVolumeSource.
+
+ user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param user: The user of this V1CephFSPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CephFSPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CephFSPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_volume_source.py
new file mode 100644
index 0000000000..d499f06d72
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ceph_fs_volume_source.py
@@ -0,0 +1,261 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CephFSVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'monitors': 'list[str]',
+ 'path': 'str',
+ 'read_only': 'bool',
+ 'secret_file': 'str',
+ 'secret_ref': 'V1LocalObjectReference',
+ 'user': 'str'
+ }
+
+ attribute_map = {
+ 'monitors': 'monitors',
+ 'path': 'path',
+ 'read_only': 'readOnly',
+ 'secret_file': 'secretFile',
+ 'secret_ref': 'secretRef',
+ 'user': 'user'
+ }
+
+ def __init__(self, monitors=None, path=None, read_only=None, secret_file=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1CephFSVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._monitors = None
+ self._path = None
+ self._read_only = None
+ self._secret_file = None
+ self._secret_ref = None
+ self._user = None
+ self.discriminator = None
+
+ self.monitors = monitors
+ if path is not None:
+ self.path = path
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_file is not None:
+ self.secret_file = secret_file
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ if user is not None:
+ self.user = user
+
+ @property
+ def monitors(self):
+ """Gets the monitors of this V1CephFSVolumeSource. # noqa: E501
+
+ monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The monitors of this V1CephFSVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._monitors
+
+ @monitors.setter
+ def monitors(self, monitors):
+ """Sets the monitors of this V1CephFSVolumeSource.
+
+ monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param monitors: The monitors of this V1CephFSVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501
+ raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501
+
+ self._monitors = monitors
+
+ @property
+ def path(self):
+ """Gets the path of this V1CephFSVolumeSource. # noqa: E501
+
+ path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501
+
+ :return: The path of this V1CephFSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1CephFSVolumeSource.
+
+ path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / # noqa: E501
+
+ :param path: The path of this V1CephFSVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._path = path
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1CephFSVolumeSource. # noqa: E501
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The read_only of this V1CephFSVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1CephFSVolumeSource.
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param read_only: The read_only of this V1CephFSVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_file(self):
+ """Gets the secret_file of this V1CephFSVolumeSource. # noqa: E501
+
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The secret_file of this V1CephFSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._secret_file
+
+ @secret_file.setter
+ def secret_file(self, secret_file):
+ """Sets the secret_file of this V1CephFSVolumeSource.
+
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param secret_file: The secret_file of this V1CephFSVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._secret_file = secret_file
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1CephFSVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1CephFSVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1CephFSVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1CephFSVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def user(self):
+ """Gets the user of this V1CephFSVolumeSource. # noqa: E501
+
+ user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :return: The user of this V1CephFSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1CephFSVolumeSource.
+
+ user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it # noqa: E501
+
+ :param user: The user of this V1CephFSVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CephFSVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CephFSVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request.py b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request.py
new file mode 100644
index 0000000000..88c4a41c46
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CertificateSigningRequest(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1CertificateSigningRequestSpec',
+ 'status': 'V1CertificateSigningRequestStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1CertificateSigningRequest - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CertificateSigningRequest. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CertificateSigningRequest. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CertificateSigningRequest.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CertificateSigningRequest. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CertificateSigningRequest. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CertificateSigningRequest. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CertificateSigningRequest.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CertificateSigningRequest. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CertificateSigningRequest. # noqa: E501
+
+
+ :return: The metadata of this V1CertificateSigningRequest. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CertificateSigningRequest.
+
+
+ :param metadata: The metadata of this V1CertificateSigningRequest. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1CertificateSigningRequest. # noqa: E501
+
+
+ :return: The spec of this V1CertificateSigningRequest. # noqa: E501
+ :rtype: V1CertificateSigningRequestSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1CertificateSigningRequest.
+
+
+ :param spec: The spec of this V1CertificateSigningRequest. # noqa: E501
+ :type: V1CertificateSigningRequestSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1CertificateSigningRequest. # noqa: E501
+
+
+ :return: The status of this V1CertificateSigningRequest. # noqa: E501
+ :rtype: V1CertificateSigningRequestStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1CertificateSigningRequest.
+
+
+ :param status: The status of this V1CertificateSigningRequest. # noqa: E501
+ :type: V1CertificateSigningRequestStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CertificateSigningRequest):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CertificateSigningRequest):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_condition.py
new file mode 100644
index 0000000000..eb9b505aa6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_condition.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CertificateSigningRequestCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'last_update_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'last_update_time': 'lastUpdateTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1CertificateSigningRequestCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._last_update_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if last_update_time is not None:
+ self.last_update_time = last_update_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
+
+ lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. # noqa: E501
+
+ :return: The last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1CertificateSigningRequestCondition.
+
+ lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def last_update_time(self):
+ """Gets the last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
+
+ lastUpdateTime is the time of the last update to this condition # noqa: E501
+
+ :return: The last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_update_time
+
+ @last_update_time.setter
+ def last_update_time(self, last_update_time):
+ """Sets the last_update_time of this V1CertificateSigningRequestCondition.
+
+ lastUpdateTime is the time of the last update to this condition # noqa: E501
+
+ :param last_update_time: The last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_update_time = last_update_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1CertificateSigningRequestCondition. # noqa: E501
+
+ message contains a human readable message with details about the request state # noqa: E501
+
+ :return: The message of this V1CertificateSigningRequestCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1CertificateSigningRequestCondition.
+
+ message contains a human readable message with details about the request state # noqa: E501
+
+ :param message: The message of this V1CertificateSigningRequestCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1CertificateSigningRequestCondition. # noqa: E501
+
+ reason indicates a brief reason for the request state # noqa: E501
+
+ :return: The reason of this V1CertificateSigningRequestCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1CertificateSigningRequestCondition.
+
+ reason indicates a brief reason for the request state # noqa: E501
+
+ :param reason: The reason of this V1CertificateSigningRequestCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1CertificateSigningRequestCondition. # noqa: E501
+
+ status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\". # noqa: E501
+
+ :return: The status of this V1CertificateSigningRequestCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1CertificateSigningRequestCondition.
+
+ status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\". # noqa: E501
+
+ :param status: The status of this V1CertificateSigningRequestCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1CertificateSigningRequestCondition. # noqa: E501
+
+ type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\". An \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer. A \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer. A \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate. Approved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added. Only one condition of a given type is allowed. # noqa: E501
+
+ :return: The type of this V1CertificateSigningRequestCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1CertificateSigningRequestCondition.
+
+ type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\". An \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer. A \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer. A \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate. Approved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added. Only one condition of a given type is allowed. # noqa: E501
+
+ :param type: The type of this V1CertificateSigningRequestCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CertificateSigningRequestCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CertificateSigningRequestCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_list.py
new file mode 100644
index 0000000000..d92d24073e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CertificateSigningRequestList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1CertificateSigningRequest]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1CertificateSigningRequestList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CertificateSigningRequestList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CertificateSigningRequestList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CertificateSigningRequestList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CertificateSigningRequestList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1CertificateSigningRequestList. # noqa: E501
+
+ items is a collection of CertificateSigningRequest objects # noqa: E501
+
+ :return: The items of this V1CertificateSigningRequestList. # noqa: E501
+ :rtype: list[V1CertificateSigningRequest]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1CertificateSigningRequestList.
+
+ items is a collection of CertificateSigningRequest objects # noqa: E501
+
+ :param items: The items of this V1CertificateSigningRequestList. # noqa: E501
+ :type: list[V1CertificateSigningRequest]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CertificateSigningRequestList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CertificateSigningRequestList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CertificateSigningRequestList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CertificateSigningRequestList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CertificateSigningRequestList. # noqa: E501
+
+
+ :return: The metadata of this V1CertificateSigningRequestList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CertificateSigningRequestList.
+
+
+ :param metadata: The metadata of this V1CertificateSigningRequestList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CertificateSigningRequestList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CertificateSigningRequestList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_spec.py
new file mode 100644
index 0000000000..af740de998
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_spec.py
@@ -0,0 +1,323 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CertificateSigningRequestSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expiration_seconds': 'int',
+ 'extra': 'dict(str, list[str])',
+ 'groups': 'list[str]',
+ 'request': 'str',
+ 'signer_name': 'str',
+ 'uid': 'str',
+ 'usages': 'list[str]',
+ 'username': 'str'
+ }
+
+ attribute_map = {
+ 'expiration_seconds': 'expirationSeconds',
+ 'extra': 'extra',
+ 'groups': 'groups',
+ 'request': 'request',
+ 'signer_name': 'signerName',
+ 'uid': 'uid',
+ 'usages': 'usages',
+ 'username': 'username'
+ }
+
+ def __init__(self, expiration_seconds=None, extra=None, groups=None, request=None, signer_name=None, uid=None, usages=None, username=None, local_vars_configuration=None): # noqa: E501
+ """V1CertificateSigningRequestSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expiration_seconds = None
+ self._extra = None
+ self._groups = None
+ self._request = None
+ self._signer_name = None
+ self._uid = None
+ self._usages = None
+ self._username = None
+ self.discriminator = None
+
+ if expiration_seconds is not None:
+ self.expiration_seconds = expiration_seconds
+ if extra is not None:
+ self.extra = extra
+ if groups is not None:
+ self.groups = groups
+ self.request = request
+ self.signer_name = signer_name
+ if uid is not None:
+ self.uid = uid
+ if usages is not None:
+ self.usages = usages
+ if username is not None:
+ self.username = username
+
+ @property
+ def expiration_seconds(self):
+ """Gets the expiration_seconds of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration. The v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager. Certificate signers may not honor this field for various reasons: 1. Old signer that is unaware of the field (such as the in-tree implementations prior to v1.22) 2. Signer whose configured maximum is shorter than the requested duration 3. Signer whose configured minimum is longer than the requested duration The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. # noqa: E501
+
+ :return: The expiration_seconds of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._expiration_seconds
+
+ @expiration_seconds.setter
+ def expiration_seconds(self, expiration_seconds):
+ """Sets the expiration_seconds of this V1CertificateSigningRequestSpec.
+
+ expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration. The v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager. Certificate signers may not honor this field for various reasons: 1. Old signer that is unaware of the field (such as the in-tree implementations prior to v1.22) 2. Signer whose configured maximum is shorter than the requested duration 3. Signer whose configured minimum is longer than the requested duration The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. # noqa: E501
+
+ :param expiration_seconds: The expiration_seconds of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: int
+ """
+
+ self._expiration_seconds = expiration_seconds
+
+ @property
+ def extra(self):
+ """Gets the extra of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :return: The extra of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: dict(str, list[str])
+ """
+ return self._extra
+
+ @extra.setter
+ def extra(self, extra):
+ """Sets the extra of this V1CertificateSigningRequestSpec.
+
+ extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :param extra: The extra of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: dict(str, list[str])
+ """
+
+ self._extra = extra
+
+ @property
+ def groups(self):
+ """Gets the groups of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :return: The groups of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._groups
+
+ @groups.setter
+ def groups(self, groups):
+ """Sets the groups of this V1CertificateSigningRequestSpec.
+
+ groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :param groups: The groups of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._groups = groups
+
+ @property
+ def request(self):
+ """Gets the request of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded. # noqa: E501
+
+ :return: The request of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._request
+
+ @request.setter
+ def request(self, request):
+ """Sets the request of this V1CertificateSigningRequestSpec.
+
+ request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded. # noqa: E501
+
+ :param request: The request of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and request is None: # noqa: E501
+ raise ValueError("Invalid value for `request`, must not be `None`") # noqa: E501
+ if (self.local_vars_configuration.client_side_validation and
+ request is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', request)): # noqa: E501
+ raise ValueError(r"Invalid value for `request`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
+
+ self._request = request
+
+ @property
+ def signer_name(self):
+ """Gets the signer_name of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ signerName indicates the requested signer, and is a qualified name. List/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector. Well-known Kubernetes signers are: 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver. Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager. 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver. Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager. 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely. Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager. More details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers Custom signerNames can also be specified. The signer defines: 1. Trust distribution: how trust (CA bundles) are distributed. 2. Permitted subjects: and behavior when a disallowed subject is requested. 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested. 4. Required, permitted, or forbidden key usages / extended key usages. 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin. 6. Whether or not requests for CA certificates are allowed. # noqa: E501
+
+ :return: The signer_name of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._signer_name
+
+ @signer_name.setter
+ def signer_name(self, signer_name):
+ """Sets the signer_name of this V1CertificateSigningRequestSpec.
+
+ signerName indicates the requested signer, and is a qualified name. List/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector. Well-known Kubernetes signers are: 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver. Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager. 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver. Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager. 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely. Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager. More details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers Custom signerNames can also be specified. The signer defines: 1. Trust distribution: how trust (CA bundles) are distributed. 2. Permitted subjects: and behavior when a disallowed subject is requested. 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested. 4. Required, permitted, or forbidden key usages / extended key usages. 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin. 6. Whether or not requests for CA certificates are allowed. # noqa: E501
+
+ :param signer_name: The signer_name of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and signer_name is None: # noqa: E501
+ raise ValueError("Invalid value for `signer_name`, must not be `None`") # noqa: E501
+
+ self._signer_name = signer_name
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :return: The uid of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1CertificateSigningRequestSpec.
+
+ uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :param uid: The uid of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ @property
+ def usages(self):
+ """Gets the usages of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ usages specifies a set of key usages requested in the issued certificate. Requests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\". Requests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\". Valid values are: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\" # noqa: E501
+
+ :return: The usages of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._usages
+
+ @usages.setter
+ def usages(self, usages):
+ """Sets the usages of this V1CertificateSigningRequestSpec.
+
+ usages specifies a set of key usages requested in the issued certificate. Requests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\". Requests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\". Valid values are: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\" # noqa: E501
+
+ :param usages: The usages of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._usages = usages
+
+ @property
+ def username(self):
+ """Gets the username of this V1CertificateSigningRequestSpec. # noqa: E501
+
+ username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :return: The username of this V1CertificateSigningRequestSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._username
+
+ @username.setter
+ def username(self, username):
+ """Sets the username of this V1CertificateSigningRequestSpec.
+
+ username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable. # noqa: E501
+
+ :param username: The username of this V1CertificateSigningRequestSpec. # noqa: E501
+ :type: str
+ """
+
+ self._username = username
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CertificateSigningRequestSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CertificateSigningRequestSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_status.py
new file mode 100644
index 0000000000..03cd1d9296
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_certificate_signing_request_status.py
@@ -0,0 +1,153 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CertificateSigningRequestStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'certificate': 'str',
+ 'conditions': 'list[V1CertificateSigningRequestCondition]'
+ }
+
+ attribute_map = {
+ 'certificate': 'certificate',
+ 'conditions': 'conditions'
+ }
+
+ def __init__(self, certificate=None, conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1CertificateSigningRequestStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._certificate = None
+ self._conditions = None
+ self.discriminator = None
+
+ if certificate is not None:
+ self.certificate = certificate
+ if conditions is not None:
+ self.conditions = conditions
+
+ @property
+ def certificate(self):
+ """Gets the certificate of this V1CertificateSigningRequestStatus. # noqa: E501
+
+ certificate is populated with an issued certificate by the signer after an Approved condition is present. This field is set via the /status subresource. Once populated, this field is immutable. If the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty. Validation requirements: 1. certificate must contain one or more PEM blocks. 2. All PEM blocks must have the \"CERTIFICATE\" label, contain no headers, and the encoded data must be a BER-encoded ASN.1 Certificate structure as described in section 4 of RFC5280. 3. Non-PEM content may appear before or after the \"CERTIFICATE\" PEM blocks and is unvalidated, to allow for explanatory text as described in section 5.2 of RFC7468. If more than one PEM block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. The certificate is encoded in PEM format. When serialized as JSON or YAML, the data is additionally base64-encoded, so it consists of: base64( -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- ) # noqa: E501
+
+ :return: The certificate of this V1CertificateSigningRequestStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._certificate
+
+ @certificate.setter
+ def certificate(self, certificate):
+ """Sets the certificate of this V1CertificateSigningRequestStatus.
+
+ certificate is populated with an issued certificate by the signer after an Approved condition is present. This field is set via the /status subresource. Once populated, this field is immutable. If the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty. Validation requirements: 1. certificate must contain one or more PEM blocks. 2. All PEM blocks must have the \"CERTIFICATE\" label, contain no headers, and the encoded data must be a BER-encoded ASN.1 Certificate structure as described in section 4 of RFC5280. 3. Non-PEM content may appear before or after the \"CERTIFICATE\" PEM blocks and is unvalidated, to allow for explanatory text as described in section 5.2 of RFC7468. If more than one PEM block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. The certificate is encoded in PEM format. When serialized as JSON or YAML, the data is additionally base64-encoded, so it consists of: base64( -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- ) # noqa: E501
+
+ :param certificate: The certificate of this V1CertificateSigningRequestStatus. # noqa: E501
+ :type: str
+ """
+ if (self.local_vars_configuration.client_side_validation and
+ certificate is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', certificate)): # noqa: E501
+ raise ValueError(r"Invalid value for `certificate`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
+
+ self._certificate = certificate
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1CertificateSigningRequestStatus. # noqa: E501
+
+ conditions applied to the request. Known conditions are \"Approved\", \"Denied\", and \"Failed\". # noqa: E501
+
+ :return: The conditions of this V1CertificateSigningRequestStatus. # noqa: E501
+ :rtype: list[V1CertificateSigningRequestCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1CertificateSigningRequestStatus.
+
+ conditions applied to the request. Known conditions are \"Approved\", \"Denied\", and \"Failed\". # noqa: E501
+
+ :param conditions: The conditions of this V1CertificateSigningRequestStatus. # noqa: E501
+ :type: list[V1CertificateSigningRequestCondition]
+ """
+
+ self._conditions = conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CertificateSigningRequestStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CertificateSigningRequestStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cinder_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cinder_persistent_volume_source.py
new file mode 100644
index 0000000000..0ac7814577
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cinder_persistent_volume_source.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CinderPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1SecretReference',
+ 'volume_id': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'volume_id': 'volumeID'
+ }
+
+ def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_id=None, local_vars_configuration=None): # noqa: E501
+ """V1CinderPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._read_only = None
+ self._secret_ref = None
+ self._volume_id = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ self.volume_id = volume_id
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1CinderPersistentVolumeSource. # noqa: E501
+
+ fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :return: The fs_type of this V1CinderPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1CinderPersistentVolumeSource.
+
+ fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :param fs_type: The fs_type of this V1CinderPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1CinderPersistentVolumeSource. # noqa: E501
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :return: The read_only of this V1CinderPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1CinderPersistentVolumeSource.
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :param read_only: The read_only of this V1CinderPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1CinderPersistentVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1CinderPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1CinderPersistentVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1CinderPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def volume_id(self):
+ """Gets the volume_id of this V1CinderPersistentVolumeSource. # noqa: E501
+
+ volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :return: The volume_id of this V1CinderPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_id
+
+ @volume_id.setter
+ def volume_id(self, volume_id):
+ """Sets the volume_id of this V1CinderPersistentVolumeSource.
+
+ volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :param volume_id: The volume_id of this V1CinderPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and volume_id is None: # noqa: E501
+ raise ValueError("Invalid value for `volume_id`, must not be `None`") # noqa: E501
+
+ self._volume_id = volume_id
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CinderPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CinderPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cinder_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cinder_volume_source.py
new file mode 100644
index 0000000000..0fe5277e66
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cinder_volume_source.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CinderVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1LocalObjectReference',
+ 'volume_id': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'volume_id': 'volumeID'
+ }
+
+ def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_id=None, local_vars_configuration=None): # noqa: E501
+ """V1CinderVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._read_only = None
+ self._secret_ref = None
+ self._volume_id = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ self.volume_id = volume_id
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1CinderVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :return: The fs_type of this V1CinderVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1CinderVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :param fs_type: The fs_type of this V1CinderVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1CinderVolumeSource. # noqa: E501
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :return: The read_only of this V1CinderVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1CinderVolumeSource.
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :param read_only: The read_only of this V1CinderVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1CinderVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1CinderVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1CinderVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1CinderVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def volume_id(self):
+ """Gets the volume_id of this V1CinderVolumeSource. # noqa: E501
+
+ volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :return: The volume_id of this V1CinderVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_id
+
+ @volume_id.setter
+ def volume_id(self, volume_id):
+ """Sets the volume_id of this V1CinderVolumeSource.
+
+ volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md # noqa: E501
+
+ :param volume_id: The volume_id of this V1CinderVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and volume_id is None: # noqa: E501
+ raise ValueError("Invalid value for `volume_id`, must not be `None`") # noqa: E501
+
+ self._volume_id = volume_id
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CinderVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CinderVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_claim_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_claim_source.py
new file mode 100644
index 0000000000..d1464b1051
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_claim_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ClaimSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'resource_claim_name': 'str',
+ 'resource_claim_template_name': 'str'
+ }
+
+ attribute_map = {
+ 'resource_claim_name': 'resourceClaimName',
+ 'resource_claim_template_name': 'resourceClaimTemplateName'
+ }
+
+ def __init__(self, resource_claim_name=None, resource_claim_template_name=None, local_vars_configuration=None): # noqa: E501
+ """V1ClaimSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._resource_claim_name = None
+ self._resource_claim_template_name = None
+ self.discriminator = None
+
+ if resource_claim_name is not None:
+ self.resource_claim_name = resource_claim_name
+ if resource_claim_template_name is not None:
+ self.resource_claim_template_name = resource_claim_template_name
+
+ @property
+ def resource_claim_name(self):
+ """Gets the resource_claim_name of this V1ClaimSource. # noqa: E501
+
+ ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. # noqa: E501
+
+ :return: The resource_claim_name of this V1ClaimSource. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_claim_name
+
+ @resource_claim_name.setter
+ def resource_claim_name(self, resource_claim_name):
+ """Sets the resource_claim_name of this V1ClaimSource.
+
+ ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. # noqa: E501
+
+ :param resource_claim_name: The resource_claim_name of this V1ClaimSource. # noqa: E501
+ :type: str
+ """
+
+ self._resource_claim_name = resource_claim_name
+
+ @property
+ def resource_claim_template_name(self):
+ """Gets the resource_claim_template_name of this V1ClaimSource. # noqa: E501
+
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. # noqa: E501
+
+ :return: The resource_claim_template_name of this V1ClaimSource. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_claim_template_name
+
+ @resource_claim_template_name.setter
+ def resource_claim_template_name(self, resource_claim_template_name):
+ """Sets the resource_claim_template_name of this V1ClaimSource.
+
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. # noqa: E501
+
+ :param resource_claim_template_name: The resource_claim_template_name of this V1ClaimSource. # noqa: E501
+ :type: str
+ """
+
+ self._resource_claim_template_name = resource_claim_template_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ClaimSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ClaimSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_client_ip_config.py b/contrib/python/kubernetes/kubernetes/client/models/v1_client_ip_config.py
new file mode 100644
index 0000000000..579afe954a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_client_ip_config.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ClientIPConfig(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'timeout_seconds': 'int'
+ }
+
+ attribute_map = {
+ 'timeout_seconds': 'timeoutSeconds'
+ }
+
+ def __init__(self, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
+ """V1ClientIPConfig - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._timeout_seconds = None
+ self.discriminator = None
+
+ if timeout_seconds is not None:
+ self.timeout_seconds = timeout_seconds
+
+ @property
+ def timeout_seconds(self):
+ """Gets the timeout_seconds of this V1ClientIPConfig. # noqa: E501
+
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours). # noqa: E501
+
+ :return: The timeout_seconds of this V1ClientIPConfig. # noqa: E501
+ :rtype: int
+ """
+ return self._timeout_seconds
+
+ @timeout_seconds.setter
+ def timeout_seconds(self, timeout_seconds):
+ """Sets the timeout_seconds of this V1ClientIPConfig.
+
+ timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours). # noqa: E501
+
+ :param timeout_seconds: The timeout_seconds of this V1ClientIPConfig. # noqa: E501
+ :type: int
+ """
+
+ self._timeout_seconds = timeout_seconds
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ClientIPConfig):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ClientIPConfig):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role.py
new file mode 100644
index 0000000000..eb3a56c1a9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role.py
@@ -0,0 +1,230 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ClusterRole(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'aggregation_rule': 'V1AggregationRule',
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'rules': 'list[V1PolicyRule]'
+ }
+
+ attribute_map = {
+ 'aggregation_rule': 'aggregationRule',
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'rules': 'rules'
+ }
+
+ def __init__(self, aggregation_rule=None, api_version=None, kind=None, metadata=None, rules=None, local_vars_configuration=None): # noqa: E501
+ """V1ClusterRole - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._aggregation_rule = None
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._rules = None
+ self.discriminator = None
+
+ if aggregation_rule is not None:
+ self.aggregation_rule = aggregation_rule
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if rules is not None:
+ self.rules = rules
+
+ @property
+ def aggregation_rule(self):
+ """Gets the aggregation_rule of this V1ClusterRole. # noqa: E501
+
+
+ :return: The aggregation_rule of this V1ClusterRole. # noqa: E501
+ :rtype: V1AggregationRule
+ """
+ return self._aggregation_rule
+
+ @aggregation_rule.setter
+ def aggregation_rule(self, aggregation_rule):
+ """Sets the aggregation_rule of this V1ClusterRole.
+
+
+ :param aggregation_rule: The aggregation_rule of this V1ClusterRole. # noqa: E501
+ :type: V1AggregationRule
+ """
+
+ self._aggregation_rule = aggregation_rule
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ClusterRole. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ClusterRole. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ClusterRole.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ClusterRole. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ClusterRole. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ClusterRole. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ClusterRole.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ClusterRole. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ClusterRole. # noqa: E501
+
+
+ :return: The metadata of this V1ClusterRole. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ClusterRole.
+
+
+ :param metadata: The metadata of this V1ClusterRole. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1ClusterRole. # noqa: E501
+
+ Rules holds all the PolicyRules for this ClusterRole # noqa: E501
+
+ :return: The rules of this V1ClusterRole. # noqa: E501
+ :rtype: list[V1PolicyRule]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1ClusterRole.
+
+ Rules holds all the PolicyRules for this ClusterRole # noqa: E501
+
+ :param rules: The rules of this V1ClusterRole. # noqa: E501
+ :type: list[V1PolicyRule]
+ """
+
+ self._rules = rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ClusterRole):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ClusterRole):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding.py
new file mode 100644
index 0000000000..45c4aa03ea
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding.py
@@ -0,0 +1,231 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ClusterRoleBinding(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'role_ref': 'V1RoleRef',
+ 'subjects': 'list[V1Subject]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'role_ref': 'roleRef',
+ 'subjects': 'subjects'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, role_ref=None, subjects=None, local_vars_configuration=None): # noqa: E501
+ """V1ClusterRoleBinding - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._role_ref = None
+ self._subjects = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.role_ref = role_ref
+ if subjects is not None:
+ self.subjects = subjects
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ClusterRoleBinding. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ClusterRoleBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ClusterRoleBinding.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ClusterRoleBinding. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ClusterRoleBinding. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ClusterRoleBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ClusterRoleBinding.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ClusterRoleBinding. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ClusterRoleBinding. # noqa: E501
+
+
+ :return: The metadata of this V1ClusterRoleBinding. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ClusterRoleBinding.
+
+
+ :param metadata: The metadata of this V1ClusterRoleBinding. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def role_ref(self):
+ """Gets the role_ref of this V1ClusterRoleBinding. # noqa: E501
+
+
+ :return: The role_ref of this V1ClusterRoleBinding. # noqa: E501
+ :rtype: V1RoleRef
+ """
+ return self._role_ref
+
+ @role_ref.setter
+ def role_ref(self, role_ref):
+ """Sets the role_ref of this V1ClusterRoleBinding.
+
+
+ :param role_ref: The role_ref of this V1ClusterRoleBinding. # noqa: E501
+ :type: V1RoleRef
+ """
+ if self.local_vars_configuration.client_side_validation and role_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `role_ref`, must not be `None`") # noqa: E501
+
+ self._role_ref = role_ref
+
+ @property
+ def subjects(self):
+ """Gets the subjects of this V1ClusterRoleBinding. # noqa: E501
+
+ Subjects holds references to the objects the role applies to. # noqa: E501
+
+ :return: The subjects of this V1ClusterRoleBinding. # noqa: E501
+ :rtype: list[V1Subject]
+ """
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, subjects):
+ """Sets the subjects of this V1ClusterRoleBinding.
+
+ Subjects holds references to the objects the role applies to. # noqa: E501
+
+ :param subjects: The subjects of this V1ClusterRoleBinding. # noqa: E501
+ :type: list[V1Subject]
+ """
+
+ self._subjects = subjects
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ClusterRoleBinding):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ClusterRoleBinding):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding_list.py
new file mode 100644
index 0000000000..6dfa9ad49b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_binding_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ClusterRoleBindingList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ClusterRoleBinding]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ClusterRoleBindingList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ClusterRoleBindingList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ClusterRoleBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ClusterRoleBindingList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ClusterRoleBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ClusterRoleBindingList. # noqa: E501
+
+ Items is a list of ClusterRoleBindings # noqa: E501
+
+ :return: The items of this V1ClusterRoleBindingList. # noqa: E501
+ :rtype: list[V1ClusterRoleBinding]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ClusterRoleBindingList.
+
+ Items is a list of ClusterRoleBindings # noqa: E501
+
+ :param items: The items of this V1ClusterRoleBindingList. # noqa: E501
+ :type: list[V1ClusterRoleBinding]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ClusterRoleBindingList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ClusterRoleBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ClusterRoleBindingList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ClusterRoleBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ClusterRoleBindingList. # noqa: E501
+
+
+ :return: The metadata of this V1ClusterRoleBindingList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ClusterRoleBindingList.
+
+
+ :param metadata: The metadata of this V1ClusterRoleBindingList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ClusterRoleBindingList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ClusterRoleBindingList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_list.py
new file mode 100644
index 0000000000..d396ab17f7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cluster_role_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ClusterRoleList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ClusterRole]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ClusterRoleList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ClusterRoleList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ClusterRoleList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ClusterRoleList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ClusterRoleList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ClusterRoleList. # noqa: E501
+
+ Items is a list of ClusterRoles # noqa: E501
+
+ :return: The items of this V1ClusterRoleList. # noqa: E501
+ :rtype: list[V1ClusterRole]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ClusterRoleList.
+
+ Items is a list of ClusterRoles # noqa: E501
+
+ :param items: The items of this V1ClusterRoleList. # noqa: E501
+ :type: list[V1ClusterRole]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ClusterRoleList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ClusterRoleList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ClusterRoleList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ClusterRoleList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ClusterRoleList. # noqa: E501
+
+
+ :return: The metadata of this V1ClusterRoleList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ClusterRoleList.
+
+
+ :param metadata: The metadata of this V1ClusterRoleList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ClusterRoleList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ClusterRoleList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_component_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_component_condition.py
new file mode 100644
index 0000000000..aacd4a685c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_component_condition.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ComponentCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'error': 'str',
+ 'message': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'error': 'error',
+ 'message': 'message',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, error=None, message=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1ComponentCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._error = None
+ self._message = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if error is not None:
+ self.error = error
+ if message is not None:
+ self.message = message
+ self.status = status
+ self.type = type
+
+ @property
+ def error(self):
+ """Gets the error of this V1ComponentCondition. # noqa: E501
+
+ Condition error code for a component. For example, a health check error code. # noqa: E501
+
+ :return: The error of this V1ComponentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._error
+
+ @error.setter
+ def error(self, error):
+ """Sets the error of this V1ComponentCondition.
+
+ Condition error code for a component. For example, a health check error code. # noqa: E501
+
+ :param error: The error of this V1ComponentCondition. # noqa: E501
+ :type: str
+ """
+
+ self._error = error
+
+ @property
+ def message(self):
+ """Gets the message of this V1ComponentCondition. # noqa: E501
+
+ Message about the condition for a component. For example, information about a health check. # noqa: E501
+
+ :return: The message of this V1ComponentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1ComponentCondition.
+
+ Message about the condition for a component. For example, information about a health check. # noqa: E501
+
+ :param message: The message of this V1ComponentCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def status(self):
+ """Gets the status of this V1ComponentCondition. # noqa: E501
+
+ Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\". # noqa: E501
+
+ :return: The status of this V1ComponentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1ComponentCondition.
+
+ Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\". # noqa: E501
+
+ :param status: The status of this V1ComponentCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1ComponentCondition. # noqa: E501
+
+ Type of condition for a component. Valid value: \"Healthy\" # noqa: E501
+
+ :return: The type of this V1ComponentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1ComponentCondition.
+
+ Type of condition for a component. Valid value: \"Healthy\" # noqa: E501
+
+ :param type: The type of this V1ComponentCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ComponentCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ComponentCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_component_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_component_status.py
new file mode 100644
index 0000000000..f091fc7745
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_component_status.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ComponentStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'conditions': 'list[V1ComponentCondition]',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'conditions': 'conditions',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, conditions=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ComponentStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._conditions = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if conditions is not None:
+ self.conditions = conditions
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ComponentStatus. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ComponentStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ComponentStatus.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ComponentStatus. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1ComponentStatus. # noqa: E501
+
+ List of component conditions observed # noqa: E501
+
+ :return: The conditions of this V1ComponentStatus. # noqa: E501
+ :rtype: list[V1ComponentCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1ComponentStatus.
+
+ List of component conditions observed # noqa: E501
+
+ :param conditions: The conditions of this V1ComponentStatus. # noqa: E501
+ :type: list[V1ComponentCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ComponentStatus. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ComponentStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ComponentStatus.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ComponentStatus. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ComponentStatus. # noqa: E501
+
+
+ :return: The metadata of this V1ComponentStatus. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ComponentStatus.
+
+
+ :param metadata: The metadata of this V1ComponentStatus. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ComponentStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ComponentStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_component_status_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_component_status_list.py
new file mode 100644
index 0000000000..99375b785e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_component_status_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ComponentStatusList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ComponentStatus]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ComponentStatusList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ComponentStatusList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ComponentStatusList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ComponentStatusList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ComponentStatusList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ComponentStatusList. # noqa: E501
+
+ List of ComponentStatus objects. # noqa: E501
+
+ :return: The items of this V1ComponentStatusList. # noqa: E501
+ :rtype: list[V1ComponentStatus]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ComponentStatusList.
+
+ List of ComponentStatus objects. # noqa: E501
+
+ :param items: The items of this V1ComponentStatusList. # noqa: E501
+ :type: list[V1ComponentStatus]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ComponentStatusList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ComponentStatusList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ComponentStatusList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ComponentStatusList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ComponentStatusList. # noqa: E501
+
+
+ :return: The metadata of this V1ComponentStatusList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ComponentStatusList.
+
+
+ :param metadata: The metadata of this V1ComponentStatusList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ComponentStatusList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ComponentStatusList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_condition.py
new file mode 100644
index 0000000000..3fe76b1190
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_condition.py
@@ -0,0 +1,267 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Condition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'observed_generation': 'int',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'observed_generation': 'observedGeneration',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, observed_generation=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1Condition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._observed_generation = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ self.last_transition_time = last_transition_time
+ self.message = message
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1Condition. # noqa: E501
+
+ lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. # noqa: E501
+
+ :return: The last_transition_time of this V1Condition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1Condition.
+
+ lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1Condition. # noqa: E501
+ :type: datetime
+ """
+ if self.local_vars_configuration.client_side_validation and last_transition_time is None: # noqa: E501
+ raise ValueError("Invalid value for `last_transition_time`, must not be `None`") # noqa: E501
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1Condition. # noqa: E501
+
+ message is a human readable message indicating details about the transition. This may be an empty string. # noqa: E501
+
+ :return: The message of this V1Condition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1Condition.
+
+ message is a human readable message indicating details about the transition. This may be an empty string. # noqa: E501
+
+ :param message: The message of this V1Condition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and message is None: # noqa: E501
+ raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501
+
+ self._message = message
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1Condition. # noqa: E501
+
+ observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. # noqa: E501
+
+ :return: The observed_generation of this V1Condition. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1Condition.
+
+ observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1Condition. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1Condition. # noqa: E501
+
+ reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. # noqa: E501
+
+ :return: The reason of this V1Condition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1Condition.
+
+ reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. # noqa: E501
+
+ :param reason: The reason of this V1Condition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and reason is None: # noqa: E501
+ raise ValueError("Invalid value for `reason`, must not be `None`") # noqa: E501
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1Condition. # noqa: E501
+
+ status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1Condition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Condition.
+
+ status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1Condition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1Condition. # noqa: E501
+
+ type of condition in CamelCase or in foo.example.com/CamelCase. # noqa: E501
+
+ :return: The type of this V1Condition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1Condition.
+
+ type of condition in CamelCase or in foo.example.com/CamelCase. # noqa: E501
+
+ :param type: The type of this V1Condition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Condition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Condition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_config_map.py b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map.py
new file mode 100644
index 0000000000..dc77d92db2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map.py
@@ -0,0 +1,260 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ConfigMap(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'binary_data': 'dict(str, str)',
+ 'data': 'dict(str, str)',
+ 'immutable': 'bool',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'binary_data': 'binaryData',
+ 'data': 'data',
+ 'immutable': 'immutable',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, binary_data=None, data=None, immutable=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ConfigMap - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._binary_data = None
+ self._data = None
+ self._immutable = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if binary_data is not None:
+ self.binary_data = binary_data
+ if data is not None:
+ self.data = data
+ if immutable is not None:
+ self.immutable = immutable
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ConfigMap. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ConfigMap. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ConfigMap.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ConfigMap. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def binary_data(self):
+ """Gets the binary_data of this V1ConfigMap. # noqa: E501
+
+ BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet. # noqa: E501
+
+ :return: The binary_data of this V1ConfigMap. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._binary_data
+
+ @binary_data.setter
+ def binary_data(self, binary_data):
+ """Sets the binary_data of this V1ConfigMap.
+
+ BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet. # noqa: E501
+
+ :param binary_data: The binary_data of this V1ConfigMap. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._binary_data = binary_data
+
+ @property
+ def data(self):
+ """Gets the data of this V1ConfigMap. # noqa: E501
+
+ Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process. # noqa: E501
+
+ :return: The data of this V1ConfigMap. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._data
+
+ @data.setter
+ def data(self, data):
+ """Sets the data of this V1ConfigMap.
+
+ Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process. # noqa: E501
+
+ :param data: The data of this V1ConfigMap. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._data = data
+
+ @property
+ def immutable(self):
+ """Gets the immutable of this V1ConfigMap. # noqa: E501
+
+ Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
+
+ :return: The immutable of this V1ConfigMap. # noqa: E501
+ :rtype: bool
+ """
+ return self._immutable
+
+ @immutable.setter
+ def immutable(self, immutable):
+ """Sets the immutable of this V1ConfigMap.
+
+ Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
+
+ :param immutable: The immutable of this V1ConfigMap. # noqa: E501
+ :type: bool
+ """
+
+ self._immutable = immutable
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ConfigMap. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ConfigMap. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ConfigMap.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ConfigMap. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ConfigMap. # noqa: E501
+
+
+ :return: The metadata of this V1ConfigMap. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ConfigMap.
+
+
+ :param metadata: The metadata of this V1ConfigMap. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ConfigMap):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ConfigMap):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_env_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_env_source.py
new file mode 100644
index 0000000000..7a23432b2f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_env_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ConfigMapEnvSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'optional': 'bool'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'optional': 'optional'
+ }
+
+ def __init__(self, name=None, optional=None, local_vars_configuration=None): # noqa: E501
+ """V1ConfigMapEnvSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._optional = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if optional is not None:
+ self.optional = optional
+
+ @property
+ def name(self):
+ """Gets the name of this V1ConfigMapEnvSource. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1ConfigMapEnvSource. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ConfigMapEnvSource.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1ConfigMapEnvSource. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1ConfigMapEnvSource. # noqa: E501
+
+ Specify whether the ConfigMap must be defined # noqa: E501
+
+ :return: The optional of this V1ConfigMapEnvSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1ConfigMapEnvSource.
+
+ Specify whether the ConfigMap must be defined # noqa: E501
+
+ :param optional: The optional of this V1ConfigMapEnvSource. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ConfigMapEnvSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ConfigMapEnvSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_key_selector.py b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_key_selector.py
new file mode 100644
index 0000000000..15ba01fe45
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_key_selector.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ConfigMapKeySelector(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'name': 'str',
+ 'optional': 'bool'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'name': 'name',
+ 'optional': 'optional'
+ }
+
+ def __init__(self, key=None, name=None, optional=None, local_vars_configuration=None): # noqa: E501
+ """V1ConfigMapKeySelector - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._name = None
+ self._optional = None
+ self.discriminator = None
+
+ self.key = key
+ if name is not None:
+ self.name = name
+ if optional is not None:
+ self.optional = optional
+
+ @property
+ def key(self):
+ """Gets the key of this V1ConfigMapKeySelector. # noqa: E501
+
+ The key to select. # noqa: E501
+
+ :return: The key of this V1ConfigMapKeySelector. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1ConfigMapKeySelector.
+
+ The key to select. # noqa: E501
+
+ :param key: The key of this V1ConfigMapKeySelector. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def name(self):
+ """Gets the name of this V1ConfigMapKeySelector. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1ConfigMapKeySelector. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ConfigMapKeySelector.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1ConfigMapKeySelector. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1ConfigMapKeySelector. # noqa: E501
+
+ Specify whether the ConfigMap or its key must be defined # noqa: E501
+
+ :return: The optional of this V1ConfigMapKeySelector. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1ConfigMapKeySelector.
+
+ Specify whether the ConfigMap or its key must be defined # noqa: E501
+
+ :param optional: The optional of this V1ConfigMapKeySelector. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ConfigMapKeySelector):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ConfigMapKeySelector):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_list.py
new file mode 100644
index 0000000000..557da86d44
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ConfigMapList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ConfigMap]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ConfigMapList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ConfigMapList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ConfigMapList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ConfigMapList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ConfigMapList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ConfigMapList. # noqa: E501
+
+ Items is the list of ConfigMaps. # noqa: E501
+
+ :return: The items of this V1ConfigMapList. # noqa: E501
+ :rtype: list[V1ConfigMap]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ConfigMapList.
+
+ Items is the list of ConfigMaps. # noqa: E501
+
+ :param items: The items of this V1ConfigMapList. # noqa: E501
+ :type: list[V1ConfigMap]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ConfigMapList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ConfigMapList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ConfigMapList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ConfigMapList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ConfigMapList. # noqa: E501
+
+
+ :return: The metadata of this V1ConfigMapList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ConfigMapList.
+
+
+ :param metadata: The metadata of this V1ConfigMapList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ConfigMapList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ConfigMapList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_node_config_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_node_config_source.py
new file mode 100644
index 0000000000..9c1b89e3c6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_node_config_source.py
@@ -0,0 +1,237 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ConfigMapNodeConfigSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'kubelet_config_key': 'str',
+ 'name': 'str',
+ 'namespace': 'str',
+ 'resource_version': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'kubelet_config_key': 'kubeletConfigKey',
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'resource_version': 'resourceVersion',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, kubelet_config_key=None, name=None, namespace=None, resource_version=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1ConfigMapNodeConfigSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._kubelet_config_key = None
+ self._name = None
+ self._namespace = None
+ self._resource_version = None
+ self._uid = None
+ self.discriminator = None
+
+ self.kubelet_config_key = kubelet_config_key
+ self.name = name
+ self.namespace = namespace
+ if resource_version is not None:
+ self.resource_version = resource_version
+ if uid is not None:
+ self.uid = uid
+
+ @property
+ def kubelet_config_key(self):
+ """Gets the kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
+
+ KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. # noqa: E501
+
+ :return: The kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :rtype: str
+ """
+ return self._kubelet_config_key
+
+ @kubelet_config_key.setter
+ def kubelet_config_key(self, kubelet_config_key):
+ """Sets the kubelet_config_key of this V1ConfigMapNodeConfigSource.
+
+ KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. # noqa: E501
+
+ :param kubelet_config_key: The kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kubelet_config_key is None: # noqa: E501
+ raise ValueError("Invalid value for `kubelet_config_key`, must not be `None`") # noqa: E501
+
+ self._kubelet_config_key = kubelet_config_key
+
+ @property
+ def name(self):
+ """Gets the name of this V1ConfigMapNodeConfigSource. # noqa: E501
+
+ Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. # noqa: E501
+
+ :return: The name of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ConfigMapNodeConfigSource.
+
+ Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. # noqa: E501
+
+ :param name: The name of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
+
+ Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. # noqa: E501
+
+ :return: The namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1ConfigMapNodeConfigSource.
+
+ Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. # noqa: E501
+
+ :param namespace: The namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
+ raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
+
+ self._namespace = namespace
+
+ @property
+ def resource_version(self):
+ """Gets the resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
+
+ ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
+
+ :return: The resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_version
+
+ @resource_version.setter
+ def resource_version(self, resource_version):
+ """Sets the resource_version of this V1ConfigMapNodeConfigSource.
+
+ ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
+
+ :param resource_version: The resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :type: str
+ """
+
+ self._resource_version = resource_version
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1ConfigMapNodeConfigSource. # noqa: E501
+
+ UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
+
+ :return: The uid of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1ConfigMapNodeConfigSource.
+
+ UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
+
+ :param uid: The uid of this V1ConfigMapNodeConfigSource. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ConfigMapNodeConfigSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ConfigMapNodeConfigSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_projection.py b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_projection.py
new file mode 100644
index 0000000000..8b7cbc83cf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_projection.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ConfigMapProjection(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'items': 'list[V1KeyToPath]',
+ 'name': 'str',
+ 'optional': 'bool'
+ }
+
+ attribute_map = {
+ 'items': 'items',
+ 'name': 'name',
+ 'optional': 'optional'
+ }
+
+ def __init__(self, items=None, name=None, optional=None, local_vars_configuration=None): # noqa: E501
+ """V1ConfigMapProjection - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._items = None
+ self._name = None
+ self._optional = None
+ self.discriminator = None
+
+ if items is not None:
+ self.items = items
+ if name is not None:
+ self.name = name
+ if optional is not None:
+ self.optional = optional
+
+ @property
+ def items(self):
+ """Gets the items of this V1ConfigMapProjection. # noqa: E501
+
+ items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :return: The items of this V1ConfigMapProjection. # noqa: E501
+ :rtype: list[V1KeyToPath]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ConfigMapProjection.
+
+ items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :param items: The items of this V1ConfigMapProjection. # noqa: E501
+ :type: list[V1KeyToPath]
+ """
+
+ self._items = items
+
+ @property
+ def name(self):
+ """Gets the name of this V1ConfigMapProjection. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1ConfigMapProjection. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ConfigMapProjection.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1ConfigMapProjection. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1ConfigMapProjection. # noqa: E501
+
+ optional specify whether the ConfigMap or its keys must be defined # noqa: E501
+
+ :return: The optional of this V1ConfigMapProjection. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1ConfigMapProjection.
+
+ optional specify whether the ConfigMap or its keys must be defined # noqa: E501
+
+ :param optional: The optional of this V1ConfigMapProjection. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ConfigMapProjection):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ConfigMapProjection):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_volume_source.py
new file mode 100644
index 0000000000..95d4951052
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_config_map_volume_source.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ConfigMapVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'default_mode': 'int',
+ 'items': 'list[V1KeyToPath]',
+ 'name': 'str',
+ 'optional': 'bool'
+ }
+
+ attribute_map = {
+ 'default_mode': 'defaultMode',
+ 'items': 'items',
+ 'name': 'name',
+ 'optional': 'optional'
+ }
+
+ def __init__(self, default_mode=None, items=None, name=None, optional=None, local_vars_configuration=None): # noqa: E501
+ """V1ConfigMapVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._default_mode = None
+ self._items = None
+ self._name = None
+ self._optional = None
+ self.discriminator = None
+
+ if default_mode is not None:
+ self.default_mode = default_mode
+ if items is not None:
+ self.items = items
+ if name is not None:
+ self.name = name
+ if optional is not None:
+ self.optional = optional
+
+ @property
+ def default_mode(self):
+ """Gets the default_mode of this V1ConfigMapVolumeSource. # noqa: E501
+
+ defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :return: The default_mode of this V1ConfigMapVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._default_mode
+
+ @default_mode.setter
+ def default_mode(self, default_mode):
+ """Sets the default_mode of this V1ConfigMapVolumeSource.
+
+ defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :param default_mode: The default_mode of this V1ConfigMapVolumeSource. # noqa: E501
+ :type: int
+ """
+
+ self._default_mode = default_mode
+
+ @property
+ def items(self):
+ """Gets the items of this V1ConfigMapVolumeSource. # noqa: E501
+
+ items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :return: The items of this V1ConfigMapVolumeSource. # noqa: E501
+ :rtype: list[V1KeyToPath]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ConfigMapVolumeSource.
+
+ items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :param items: The items of this V1ConfigMapVolumeSource. # noqa: E501
+ :type: list[V1KeyToPath]
+ """
+
+ self._items = items
+
+ @property
+ def name(self):
+ """Gets the name of this V1ConfigMapVolumeSource. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1ConfigMapVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ConfigMapVolumeSource.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1ConfigMapVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1ConfigMapVolumeSource. # noqa: E501
+
+ optional specify whether the ConfigMap or its keys must be defined # noqa: E501
+
+ :return: The optional of this V1ConfigMapVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1ConfigMapVolumeSource.
+
+ optional specify whether the ConfigMap or its keys must be defined # noqa: E501
+
+ :param optional: The optional of this V1ConfigMapVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ConfigMapVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ConfigMapVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container.py
new file mode 100644
index 0000000000..b461da7fdb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container.py
@@ -0,0 +1,755 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Container(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'args': 'list[str]',
+ 'command': 'list[str]',
+ 'env': 'list[V1EnvVar]',
+ 'env_from': 'list[V1EnvFromSource]',
+ 'image': 'str',
+ 'image_pull_policy': 'str',
+ 'lifecycle': 'V1Lifecycle',
+ 'liveness_probe': 'V1Probe',
+ 'name': 'str',
+ 'ports': 'list[V1ContainerPort]',
+ 'readiness_probe': 'V1Probe',
+ 'resize_policy': 'list[V1ContainerResizePolicy]',
+ 'resources': 'V1ResourceRequirements',
+ 'restart_policy': 'str',
+ 'security_context': 'V1SecurityContext',
+ 'startup_probe': 'V1Probe',
+ 'stdin': 'bool',
+ 'stdin_once': 'bool',
+ 'termination_message_path': 'str',
+ 'termination_message_policy': 'str',
+ 'tty': 'bool',
+ 'volume_devices': 'list[V1VolumeDevice]',
+ 'volume_mounts': 'list[V1VolumeMount]',
+ 'working_dir': 'str'
+ }
+
+ attribute_map = {
+ 'args': 'args',
+ 'command': 'command',
+ 'env': 'env',
+ 'env_from': 'envFrom',
+ 'image': 'image',
+ 'image_pull_policy': 'imagePullPolicy',
+ 'lifecycle': 'lifecycle',
+ 'liveness_probe': 'livenessProbe',
+ 'name': 'name',
+ 'ports': 'ports',
+ 'readiness_probe': 'readinessProbe',
+ 'resize_policy': 'resizePolicy',
+ 'resources': 'resources',
+ 'restart_policy': 'restartPolicy',
+ 'security_context': 'securityContext',
+ 'startup_probe': 'startupProbe',
+ 'stdin': 'stdin',
+ 'stdin_once': 'stdinOnce',
+ 'termination_message_path': 'terminationMessagePath',
+ 'termination_message_policy': 'terminationMessagePolicy',
+ 'tty': 'tty',
+ 'volume_devices': 'volumeDevices',
+ 'volume_mounts': 'volumeMounts',
+ 'working_dir': 'workingDir'
+ }
+
+ def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, readiness_probe=None, resize_policy=None, resources=None, restart_policy=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
+ """V1Container - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._args = None
+ self._command = None
+ self._env = None
+ self._env_from = None
+ self._image = None
+ self._image_pull_policy = None
+ self._lifecycle = None
+ self._liveness_probe = None
+ self._name = None
+ self._ports = None
+ self._readiness_probe = None
+ self._resize_policy = None
+ self._resources = None
+ self._restart_policy = None
+ self._security_context = None
+ self._startup_probe = None
+ self._stdin = None
+ self._stdin_once = None
+ self._termination_message_path = None
+ self._termination_message_policy = None
+ self._tty = None
+ self._volume_devices = None
+ self._volume_mounts = None
+ self._working_dir = None
+ self.discriminator = None
+
+ if args is not None:
+ self.args = args
+ if command is not None:
+ self.command = command
+ if env is not None:
+ self.env = env
+ if env_from is not None:
+ self.env_from = env_from
+ if image is not None:
+ self.image = image
+ if image_pull_policy is not None:
+ self.image_pull_policy = image_pull_policy
+ if lifecycle is not None:
+ self.lifecycle = lifecycle
+ if liveness_probe is not None:
+ self.liveness_probe = liveness_probe
+ self.name = name
+ if ports is not None:
+ self.ports = ports
+ if readiness_probe is not None:
+ self.readiness_probe = readiness_probe
+ if resize_policy is not None:
+ self.resize_policy = resize_policy
+ if resources is not None:
+ self.resources = resources
+ if restart_policy is not None:
+ self.restart_policy = restart_policy
+ if security_context is not None:
+ self.security_context = security_context
+ if startup_probe is not None:
+ self.startup_probe = startup_probe
+ if stdin is not None:
+ self.stdin = stdin
+ if stdin_once is not None:
+ self.stdin_once = stdin_once
+ if termination_message_path is not None:
+ self.termination_message_path = termination_message_path
+ if termination_message_policy is not None:
+ self.termination_message_policy = termination_message_policy
+ if tty is not None:
+ self.tty = tty
+ if volume_devices is not None:
+ self.volume_devices = volume_devices
+ if volume_mounts is not None:
+ self.volume_mounts = volume_mounts
+ if working_dir is not None:
+ self.working_dir = working_dir
+
+ @property
+ def args(self):
+ """Gets the args of this V1Container. # noqa: E501
+
+ Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :return: The args of this V1Container. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._args
+
+ @args.setter
+ def args(self, args):
+ """Sets the args of this V1Container.
+
+ Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :param args: The args of this V1Container. # noqa: E501
+ :type: list[str]
+ """
+
+ self._args = args
+
+ @property
+ def command(self):
+ """Gets the command of this V1Container. # noqa: E501
+
+ Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :return: The command of this V1Container. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._command
+
+ @command.setter
+ def command(self, command):
+ """Sets the command of this V1Container.
+
+ Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :param command: The command of this V1Container. # noqa: E501
+ :type: list[str]
+ """
+
+ self._command = command
+
+ @property
+ def env(self):
+ """Gets the env of this V1Container. # noqa: E501
+
+ List of environment variables to set in the container. Cannot be updated. # noqa: E501
+
+ :return: The env of this V1Container. # noqa: E501
+ :rtype: list[V1EnvVar]
+ """
+ return self._env
+
+ @env.setter
+ def env(self, env):
+ """Sets the env of this V1Container.
+
+ List of environment variables to set in the container. Cannot be updated. # noqa: E501
+
+ :param env: The env of this V1Container. # noqa: E501
+ :type: list[V1EnvVar]
+ """
+
+ self._env = env
+
+ @property
+ def env_from(self):
+ """Gets the env_from of this V1Container. # noqa: E501
+
+ List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
+
+ :return: The env_from of this V1Container. # noqa: E501
+ :rtype: list[V1EnvFromSource]
+ """
+ return self._env_from
+
+ @env_from.setter
+ def env_from(self, env_from):
+ """Sets the env_from of this V1Container.
+
+ List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
+
+ :param env_from: The env_from of this V1Container. # noqa: E501
+ :type: list[V1EnvFromSource]
+ """
+
+ self._env_from = env_from
+
+ @property
+ def image(self):
+ """Gets the image of this V1Container. # noqa: E501
+
+ Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
+
+ :return: The image of this V1Container. # noqa: E501
+ :rtype: str
+ """
+ return self._image
+
+ @image.setter
+ def image(self, image):
+ """Sets the image of this V1Container.
+
+ Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
+
+ :param image: The image of this V1Container. # noqa: E501
+ :type: str
+ """
+
+ self._image = image
+
+ @property
+ def image_pull_policy(self):
+ """Gets the image_pull_policy of this V1Container. # noqa: E501
+
+ Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
+
+ :return: The image_pull_policy of this V1Container. # noqa: E501
+ :rtype: str
+ """
+ return self._image_pull_policy
+
+ @image_pull_policy.setter
+ def image_pull_policy(self, image_pull_policy):
+ """Sets the image_pull_policy of this V1Container.
+
+ Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
+
+ :param image_pull_policy: The image_pull_policy of this V1Container. # noqa: E501
+ :type: str
+ """
+
+ self._image_pull_policy = image_pull_policy
+
+ @property
+ def lifecycle(self):
+ """Gets the lifecycle of this V1Container. # noqa: E501
+
+
+ :return: The lifecycle of this V1Container. # noqa: E501
+ :rtype: V1Lifecycle
+ """
+ return self._lifecycle
+
+ @lifecycle.setter
+ def lifecycle(self, lifecycle):
+ """Sets the lifecycle of this V1Container.
+
+
+ :param lifecycle: The lifecycle of this V1Container. # noqa: E501
+ :type: V1Lifecycle
+ """
+
+ self._lifecycle = lifecycle
+
+ @property
+ def liveness_probe(self):
+ """Gets the liveness_probe of this V1Container. # noqa: E501
+
+
+ :return: The liveness_probe of this V1Container. # noqa: E501
+ :rtype: V1Probe
+ """
+ return self._liveness_probe
+
+ @liveness_probe.setter
+ def liveness_probe(self, liveness_probe):
+ """Sets the liveness_probe of this V1Container.
+
+
+ :param liveness_probe: The liveness_probe of this V1Container. # noqa: E501
+ :type: V1Probe
+ """
+
+ self._liveness_probe = liveness_probe
+
+ @property
+ def name(self):
+ """Gets the name of this V1Container. # noqa: E501
+
+ Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
+
+ :return: The name of this V1Container. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1Container.
+
+ Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
+
+ :param name: The name of this V1Container. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1Container. # noqa: E501
+
+ List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. # noqa: E501
+
+ :return: The ports of this V1Container. # noqa: E501
+ :rtype: list[V1ContainerPort]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1Container.
+
+ List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. # noqa: E501
+
+ :param ports: The ports of this V1Container. # noqa: E501
+ :type: list[V1ContainerPort]
+ """
+
+ self._ports = ports
+
+ @property
+ def readiness_probe(self):
+ """Gets the readiness_probe of this V1Container. # noqa: E501
+
+
+ :return: The readiness_probe of this V1Container. # noqa: E501
+ :rtype: V1Probe
+ """
+ return self._readiness_probe
+
+ @readiness_probe.setter
+ def readiness_probe(self, readiness_probe):
+ """Sets the readiness_probe of this V1Container.
+
+
+ :param readiness_probe: The readiness_probe of this V1Container. # noqa: E501
+ :type: V1Probe
+ """
+
+ self._readiness_probe = readiness_probe
+
+ @property
+ def resize_policy(self):
+ """Gets the resize_policy of this V1Container. # noqa: E501
+
+ Resources resize policy for the container. # noqa: E501
+
+ :return: The resize_policy of this V1Container. # noqa: E501
+ :rtype: list[V1ContainerResizePolicy]
+ """
+ return self._resize_policy
+
+ @resize_policy.setter
+ def resize_policy(self, resize_policy):
+ """Sets the resize_policy of this V1Container.
+
+ Resources resize policy for the container. # noqa: E501
+
+ :param resize_policy: The resize_policy of this V1Container. # noqa: E501
+ :type: list[V1ContainerResizePolicy]
+ """
+
+ self._resize_policy = resize_policy
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1Container. # noqa: E501
+
+
+ :return: The resources of this V1Container. # noqa: E501
+ :rtype: V1ResourceRequirements
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1Container.
+
+
+ :param resources: The resources of this V1Container. # noqa: E501
+ :type: V1ResourceRequirements
+ """
+
+ self._resources = resources
+
+ @property
+ def restart_policy(self):
+ """Gets the restart_policy of this V1Container. # noqa: E501
+
+ RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. # noqa: E501
+
+ :return: The restart_policy of this V1Container. # noqa: E501
+ :rtype: str
+ """
+ return self._restart_policy
+
+ @restart_policy.setter
+ def restart_policy(self, restart_policy):
+ """Sets the restart_policy of this V1Container.
+
+ RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. # noqa: E501
+
+ :param restart_policy: The restart_policy of this V1Container. # noqa: E501
+ :type: str
+ """
+
+ self._restart_policy = restart_policy
+
+ @property
+ def security_context(self):
+ """Gets the security_context of this V1Container. # noqa: E501
+
+
+ :return: The security_context of this V1Container. # noqa: E501
+ :rtype: V1SecurityContext
+ """
+ return self._security_context
+
+ @security_context.setter
+ def security_context(self, security_context):
+ """Sets the security_context of this V1Container.
+
+
+ :param security_context: The security_context of this V1Container. # noqa: E501
+ :type: V1SecurityContext
+ """
+
+ self._security_context = security_context
+
+ @property
+ def startup_probe(self):
+ """Gets the startup_probe of this V1Container. # noqa: E501
+
+
+ :return: The startup_probe of this V1Container. # noqa: E501
+ :rtype: V1Probe
+ """
+ return self._startup_probe
+
+ @startup_probe.setter
+ def startup_probe(self, startup_probe):
+ """Sets the startup_probe of this V1Container.
+
+
+ :param startup_probe: The startup_probe of this V1Container. # noqa: E501
+ :type: V1Probe
+ """
+
+ self._startup_probe = startup_probe
+
+ @property
+ def stdin(self):
+ """Gets the stdin of this V1Container. # noqa: E501
+
+ Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
+
+ :return: The stdin of this V1Container. # noqa: E501
+ :rtype: bool
+ """
+ return self._stdin
+
+ @stdin.setter
+ def stdin(self, stdin):
+ """Sets the stdin of this V1Container.
+
+ Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
+
+ :param stdin: The stdin of this V1Container. # noqa: E501
+ :type: bool
+ """
+
+ self._stdin = stdin
+
+ @property
+ def stdin_once(self):
+ """Gets the stdin_once of this V1Container. # noqa: E501
+
+ Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
+
+ :return: The stdin_once of this V1Container. # noqa: E501
+ :rtype: bool
+ """
+ return self._stdin_once
+
+ @stdin_once.setter
+ def stdin_once(self, stdin_once):
+ """Sets the stdin_once of this V1Container.
+
+ Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
+
+ :param stdin_once: The stdin_once of this V1Container. # noqa: E501
+ :type: bool
+ """
+
+ self._stdin_once = stdin_once
+
+ @property
+ def termination_message_path(self):
+ """Gets the termination_message_path of this V1Container. # noqa: E501
+
+ Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
+
+ :return: The termination_message_path of this V1Container. # noqa: E501
+ :rtype: str
+ """
+ return self._termination_message_path
+
+ @termination_message_path.setter
+ def termination_message_path(self, termination_message_path):
+ """Sets the termination_message_path of this V1Container.
+
+ Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
+
+ :param termination_message_path: The termination_message_path of this V1Container. # noqa: E501
+ :type: str
+ """
+
+ self._termination_message_path = termination_message_path
+
+ @property
+ def termination_message_policy(self):
+ """Gets the termination_message_policy of this V1Container. # noqa: E501
+
+ Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
+
+ :return: The termination_message_policy of this V1Container. # noqa: E501
+ :rtype: str
+ """
+ return self._termination_message_policy
+
+ @termination_message_policy.setter
+ def termination_message_policy(self, termination_message_policy):
+ """Sets the termination_message_policy of this V1Container.
+
+ Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
+
+ :param termination_message_policy: The termination_message_policy of this V1Container. # noqa: E501
+ :type: str
+ """
+
+ self._termination_message_policy = termination_message_policy
+
+ @property
+ def tty(self):
+ """Gets the tty of this V1Container. # noqa: E501
+
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
+
+ :return: The tty of this V1Container. # noqa: E501
+ :rtype: bool
+ """
+ return self._tty
+
+ @tty.setter
+ def tty(self, tty):
+ """Sets the tty of this V1Container.
+
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
+
+ :param tty: The tty of this V1Container. # noqa: E501
+ :type: bool
+ """
+
+ self._tty = tty
+
+ @property
+ def volume_devices(self):
+ """Gets the volume_devices of this V1Container. # noqa: E501
+
+ volumeDevices is the list of block devices to be used by the container. # noqa: E501
+
+ :return: The volume_devices of this V1Container. # noqa: E501
+ :rtype: list[V1VolumeDevice]
+ """
+ return self._volume_devices
+
+ @volume_devices.setter
+ def volume_devices(self, volume_devices):
+ """Sets the volume_devices of this V1Container.
+
+ volumeDevices is the list of block devices to be used by the container. # noqa: E501
+
+ :param volume_devices: The volume_devices of this V1Container. # noqa: E501
+ :type: list[V1VolumeDevice]
+ """
+
+ self._volume_devices = volume_devices
+
+ @property
+ def volume_mounts(self):
+ """Gets the volume_mounts of this V1Container. # noqa: E501
+
+ Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
+
+ :return: The volume_mounts of this V1Container. # noqa: E501
+ :rtype: list[V1VolumeMount]
+ """
+ return self._volume_mounts
+
+ @volume_mounts.setter
+ def volume_mounts(self, volume_mounts):
+ """Sets the volume_mounts of this V1Container.
+
+ Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
+
+ :param volume_mounts: The volume_mounts of this V1Container. # noqa: E501
+ :type: list[V1VolumeMount]
+ """
+
+ self._volume_mounts = volume_mounts
+
+ @property
+ def working_dir(self):
+ """Gets the working_dir of this V1Container. # noqa: E501
+
+ Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
+
+ :return: The working_dir of this V1Container. # noqa: E501
+ :rtype: str
+ """
+ return self._working_dir
+
+ @working_dir.setter
+ def working_dir(self, working_dir):
+ """Sets the working_dir of this V1Container.
+
+ Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
+
+ :param working_dir: The working_dir of this V1Container. # noqa: E501
+ :type: str
+ """
+
+ self._working_dir = working_dir
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Container):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Container):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_image.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_image.py
new file mode 100644
index 0000000000..826de40b86
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_image.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerImage(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'names': 'list[str]',
+ 'size_bytes': 'int'
+ }
+
+ attribute_map = {
+ 'names': 'names',
+ 'size_bytes': 'sizeBytes'
+ }
+
+ def __init__(self, names=None, size_bytes=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerImage - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._names = None
+ self._size_bytes = None
+ self.discriminator = None
+
+ if names is not None:
+ self.names = names
+ if size_bytes is not None:
+ self.size_bytes = size_bytes
+
+ @property
+ def names(self):
+ """Gets the names of this V1ContainerImage. # noqa: E501
+
+ Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"] # noqa: E501
+
+ :return: The names of this V1ContainerImage. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._names
+
+ @names.setter
+ def names(self, names):
+ """Sets the names of this V1ContainerImage.
+
+ Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"] # noqa: E501
+
+ :param names: The names of this V1ContainerImage. # noqa: E501
+ :type: list[str]
+ """
+
+ self._names = names
+
+ @property
+ def size_bytes(self):
+ """Gets the size_bytes of this V1ContainerImage. # noqa: E501
+
+ The size of the image in bytes. # noqa: E501
+
+ :return: The size_bytes of this V1ContainerImage. # noqa: E501
+ :rtype: int
+ """
+ return self._size_bytes
+
+ @size_bytes.setter
+ def size_bytes(self, size_bytes):
+ """Sets the size_bytes of this V1ContainerImage.
+
+ The size of the image in bytes. # noqa: E501
+
+ :param size_bytes: The size_bytes of this V1ContainerImage. # noqa: E501
+ :type: int
+ """
+
+ self._size_bytes = size_bytes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerImage):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerImage):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_port.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_port.py
new file mode 100644
index 0000000000..4c71f5bfda
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_port.py
@@ -0,0 +1,235 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerPort(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container_port': 'int',
+ 'host_ip': 'str',
+ 'host_port': 'int',
+ 'name': 'str',
+ 'protocol': 'str'
+ }
+
+ attribute_map = {
+ 'container_port': 'containerPort',
+ 'host_ip': 'hostIP',
+ 'host_port': 'hostPort',
+ 'name': 'name',
+ 'protocol': 'protocol'
+ }
+
+ def __init__(self, container_port=None, host_ip=None, host_port=None, name=None, protocol=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerPort - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container_port = None
+ self._host_ip = None
+ self._host_port = None
+ self._name = None
+ self._protocol = None
+ self.discriminator = None
+
+ self.container_port = container_port
+ if host_ip is not None:
+ self.host_ip = host_ip
+ if host_port is not None:
+ self.host_port = host_port
+ if name is not None:
+ self.name = name
+ if protocol is not None:
+ self.protocol = protocol
+
+ @property
+ def container_port(self):
+ """Gets the container_port of this V1ContainerPort. # noqa: E501
+
+ Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. # noqa: E501
+
+ :return: The container_port of this V1ContainerPort. # noqa: E501
+ :rtype: int
+ """
+ return self._container_port
+
+ @container_port.setter
+ def container_port(self, container_port):
+ """Sets the container_port of this V1ContainerPort.
+
+ Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. # noqa: E501
+
+ :param container_port: The container_port of this V1ContainerPort. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and container_port is None: # noqa: E501
+ raise ValueError("Invalid value for `container_port`, must not be `None`") # noqa: E501
+
+ self._container_port = container_port
+
+ @property
+ def host_ip(self):
+ """Gets the host_ip of this V1ContainerPort. # noqa: E501
+
+ What host IP to bind the external port to. # noqa: E501
+
+ :return: The host_ip of this V1ContainerPort. # noqa: E501
+ :rtype: str
+ """
+ return self._host_ip
+
+ @host_ip.setter
+ def host_ip(self, host_ip):
+ """Sets the host_ip of this V1ContainerPort.
+
+ What host IP to bind the external port to. # noqa: E501
+
+ :param host_ip: The host_ip of this V1ContainerPort. # noqa: E501
+ :type: str
+ """
+
+ self._host_ip = host_ip
+
+ @property
+ def host_port(self):
+ """Gets the host_port of this V1ContainerPort. # noqa: E501
+
+ Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. # noqa: E501
+
+ :return: The host_port of this V1ContainerPort. # noqa: E501
+ :rtype: int
+ """
+ return self._host_port
+
+ @host_port.setter
+ def host_port(self, host_port):
+ """Sets the host_port of this V1ContainerPort.
+
+ Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. # noqa: E501
+
+ :param host_port: The host_port of this V1ContainerPort. # noqa: E501
+ :type: int
+ """
+
+ self._host_port = host_port
+
+ @property
+ def name(self):
+ """Gets the name of this V1ContainerPort. # noqa: E501
+
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. # noqa: E501
+
+ :return: The name of this V1ContainerPort. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ContainerPort.
+
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. # noqa: E501
+
+ :param name: The name of this V1ContainerPort. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def protocol(self):
+ """Gets the protocol of this V1ContainerPort. # noqa: E501
+
+ Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\". # noqa: E501
+
+ :return: The protocol of this V1ContainerPort. # noqa: E501
+ :rtype: str
+ """
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, protocol):
+ """Sets the protocol of this V1ContainerPort.
+
+ Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\". # noqa: E501
+
+ :param protocol: The protocol of this V1ContainerPort. # noqa: E501
+ :type: str
+ """
+
+ self._protocol = protocol
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerPort):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerPort):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_resize_policy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_resize_policy.py
new file mode 100644
index 0000000000..74c5d02ab3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_resize_policy.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerResizePolicy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'resource_name': 'str',
+ 'restart_policy': 'str'
+ }
+
+ attribute_map = {
+ 'resource_name': 'resourceName',
+ 'restart_policy': 'restartPolicy'
+ }
+
+ def __init__(self, resource_name=None, restart_policy=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerResizePolicy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._resource_name = None
+ self._restart_policy = None
+ self.discriminator = None
+
+ self.resource_name = resource_name
+ self.restart_policy = restart_policy
+
+ @property
+ def resource_name(self):
+ """Gets the resource_name of this V1ContainerResizePolicy. # noqa: E501
+
+ Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. # noqa: E501
+
+ :return: The resource_name of this V1ContainerResizePolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_name
+
+ @resource_name.setter
+ def resource_name(self, resource_name):
+ """Sets the resource_name of this V1ContainerResizePolicy.
+
+ Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. # noqa: E501
+
+ :param resource_name: The resource_name of this V1ContainerResizePolicy. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and resource_name is None: # noqa: E501
+ raise ValueError("Invalid value for `resource_name`, must not be `None`") # noqa: E501
+
+ self._resource_name = resource_name
+
+ @property
+ def restart_policy(self):
+ """Gets the restart_policy of this V1ContainerResizePolicy. # noqa: E501
+
+ Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. # noqa: E501
+
+ :return: The restart_policy of this V1ContainerResizePolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._restart_policy
+
+ @restart_policy.setter
+ def restart_policy(self, restart_policy):
+ """Sets the restart_policy of this V1ContainerResizePolicy.
+
+ Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. # noqa: E501
+
+ :param restart_policy: The restart_policy of this V1ContainerResizePolicy. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and restart_policy is None: # noqa: E501
+ raise ValueError("Invalid value for `restart_policy`, must not be `None`") # noqa: E501
+
+ self._restart_policy = restart_policy
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerResizePolicy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerResizePolicy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_state.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state.py
new file mode 100644
index 0000000000..28349c5cea
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state.py
@@ -0,0 +1,172 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerState(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'running': 'V1ContainerStateRunning',
+ 'terminated': 'V1ContainerStateTerminated',
+ 'waiting': 'V1ContainerStateWaiting'
+ }
+
+ attribute_map = {
+ 'running': 'running',
+ 'terminated': 'terminated',
+ 'waiting': 'waiting'
+ }
+
+ def __init__(self, running=None, terminated=None, waiting=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerState - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._running = None
+ self._terminated = None
+ self._waiting = None
+ self.discriminator = None
+
+ if running is not None:
+ self.running = running
+ if terminated is not None:
+ self.terminated = terminated
+ if waiting is not None:
+ self.waiting = waiting
+
+ @property
+ def running(self):
+ """Gets the running of this V1ContainerState. # noqa: E501
+
+
+ :return: The running of this V1ContainerState. # noqa: E501
+ :rtype: V1ContainerStateRunning
+ """
+ return self._running
+
+ @running.setter
+ def running(self, running):
+ """Sets the running of this V1ContainerState.
+
+
+ :param running: The running of this V1ContainerState. # noqa: E501
+ :type: V1ContainerStateRunning
+ """
+
+ self._running = running
+
+ @property
+ def terminated(self):
+ """Gets the terminated of this V1ContainerState. # noqa: E501
+
+
+ :return: The terminated of this V1ContainerState. # noqa: E501
+ :rtype: V1ContainerStateTerminated
+ """
+ return self._terminated
+
+ @terminated.setter
+ def terminated(self, terminated):
+ """Sets the terminated of this V1ContainerState.
+
+
+ :param terminated: The terminated of this V1ContainerState. # noqa: E501
+ :type: V1ContainerStateTerminated
+ """
+
+ self._terminated = terminated
+
+ @property
+ def waiting(self):
+ """Gets the waiting of this V1ContainerState. # noqa: E501
+
+
+ :return: The waiting of this V1ContainerState. # noqa: E501
+ :rtype: V1ContainerStateWaiting
+ """
+ return self._waiting
+
+ @waiting.setter
+ def waiting(self, waiting):
+ """Sets the waiting of this V1ContainerState.
+
+
+ :param waiting: The waiting of this V1ContainerState. # noqa: E501
+ :type: V1ContainerStateWaiting
+ """
+
+ self._waiting = waiting
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerState):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerState):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_running.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_running.py
new file mode 100644
index 0000000000..9712060a68
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_running.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerStateRunning(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'started_at': 'datetime'
+ }
+
+ attribute_map = {
+ 'started_at': 'startedAt'
+ }
+
+ def __init__(self, started_at=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerStateRunning - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._started_at = None
+ self.discriminator = None
+
+ if started_at is not None:
+ self.started_at = started_at
+
+ @property
+ def started_at(self):
+ """Gets the started_at of this V1ContainerStateRunning. # noqa: E501
+
+ Time at which the container was last (re-)started # noqa: E501
+
+ :return: The started_at of this V1ContainerStateRunning. # noqa: E501
+ :rtype: datetime
+ """
+ return self._started_at
+
+ @started_at.setter
+ def started_at(self, started_at):
+ """Sets the started_at of this V1ContainerStateRunning.
+
+ Time at which the container was last (re-)started # noqa: E501
+
+ :param started_at: The started_at of this V1ContainerStateRunning. # noqa: E501
+ :type: datetime
+ """
+
+ self._started_at = started_at
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerStateRunning):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerStateRunning):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_terminated.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_terminated.py
new file mode 100644
index 0000000000..cc2621818d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_terminated.py
@@ -0,0 +1,291 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerStateTerminated(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container_id': 'str',
+ 'exit_code': 'int',
+ 'finished_at': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'signal': 'int',
+ 'started_at': 'datetime'
+ }
+
+ attribute_map = {
+ 'container_id': 'containerID',
+ 'exit_code': 'exitCode',
+ 'finished_at': 'finishedAt',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'signal': 'signal',
+ 'started_at': 'startedAt'
+ }
+
+ def __init__(self, container_id=None, exit_code=None, finished_at=None, message=None, reason=None, signal=None, started_at=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerStateTerminated - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container_id = None
+ self._exit_code = None
+ self._finished_at = None
+ self._message = None
+ self._reason = None
+ self._signal = None
+ self._started_at = None
+ self.discriminator = None
+
+ if container_id is not None:
+ self.container_id = container_id
+ self.exit_code = exit_code
+ if finished_at is not None:
+ self.finished_at = finished_at
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ if signal is not None:
+ self.signal = signal
+ if started_at is not None:
+ self.started_at = started_at
+
+ @property
+ def container_id(self):
+ """Gets the container_id of this V1ContainerStateTerminated. # noqa: E501
+
+ Container's ID in the format '<type>://<container_id>' # noqa: E501
+
+ :return: The container_id of this V1ContainerStateTerminated. # noqa: E501
+ :rtype: str
+ """
+ return self._container_id
+
+ @container_id.setter
+ def container_id(self, container_id):
+ """Sets the container_id of this V1ContainerStateTerminated.
+
+ Container's ID in the format '<type>://<container_id>' # noqa: E501
+
+ :param container_id: The container_id of this V1ContainerStateTerminated. # noqa: E501
+ :type: str
+ """
+
+ self._container_id = container_id
+
+ @property
+ def exit_code(self):
+ """Gets the exit_code of this V1ContainerStateTerminated. # noqa: E501
+
+ Exit status from the last termination of the container # noqa: E501
+
+ :return: The exit_code of this V1ContainerStateTerminated. # noqa: E501
+ :rtype: int
+ """
+ return self._exit_code
+
+ @exit_code.setter
+ def exit_code(self, exit_code):
+ """Sets the exit_code of this V1ContainerStateTerminated.
+
+ Exit status from the last termination of the container # noqa: E501
+
+ :param exit_code: The exit_code of this V1ContainerStateTerminated. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and exit_code is None: # noqa: E501
+ raise ValueError("Invalid value for `exit_code`, must not be `None`") # noqa: E501
+
+ self._exit_code = exit_code
+
+ @property
+ def finished_at(self):
+ """Gets the finished_at of this V1ContainerStateTerminated. # noqa: E501
+
+ Time at which the container last terminated # noqa: E501
+
+ :return: The finished_at of this V1ContainerStateTerminated. # noqa: E501
+ :rtype: datetime
+ """
+ return self._finished_at
+
+ @finished_at.setter
+ def finished_at(self, finished_at):
+ """Sets the finished_at of this V1ContainerStateTerminated.
+
+ Time at which the container last terminated # noqa: E501
+
+ :param finished_at: The finished_at of this V1ContainerStateTerminated. # noqa: E501
+ :type: datetime
+ """
+
+ self._finished_at = finished_at
+
+ @property
+ def message(self):
+ """Gets the message of this V1ContainerStateTerminated. # noqa: E501
+
+ Message regarding the last termination of the container # noqa: E501
+
+ :return: The message of this V1ContainerStateTerminated. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1ContainerStateTerminated.
+
+ Message regarding the last termination of the container # noqa: E501
+
+ :param message: The message of this V1ContainerStateTerminated. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1ContainerStateTerminated. # noqa: E501
+
+ (brief) reason from the last termination of the container # noqa: E501
+
+ :return: The reason of this V1ContainerStateTerminated. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1ContainerStateTerminated.
+
+ (brief) reason from the last termination of the container # noqa: E501
+
+ :param reason: The reason of this V1ContainerStateTerminated. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def signal(self):
+ """Gets the signal of this V1ContainerStateTerminated. # noqa: E501
+
+ Signal from the last termination of the container # noqa: E501
+
+ :return: The signal of this V1ContainerStateTerminated. # noqa: E501
+ :rtype: int
+ """
+ return self._signal
+
+ @signal.setter
+ def signal(self, signal):
+ """Sets the signal of this V1ContainerStateTerminated.
+
+ Signal from the last termination of the container # noqa: E501
+
+ :param signal: The signal of this V1ContainerStateTerminated. # noqa: E501
+ :type: int
+ """
+
+ self._signal = signal
+
+ @property
+ def started_at(self):
+ """Gets the started_at of this V1ContainerStateTerminated. # noqa: E501
+
+ Time at which previous execution of the container started # noqa: E501
+
+ :return: The started_at of this V1ContainerStateTerminated. # noqa: E501
+ :rtype: datetime
+ """
+ return self._started_at
+
+ @started_at.setter
+ def started_at(self, started_at):
+ """Sets the started_at of this V1ContainerStateTerminated.
+
+ Time at which previous execution of the container started # noqa: E501
+
+ :param started_at: The started_at of this V1ContainerStateTerminated. # noqa: E501
+ :type: datetime
+ """
+
+ self._started_at = started_at
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerStateTerminated):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerStateTerminated):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_waiting.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_waiting.py
new file mode 100644
index 0000000000..c9b27d60ca
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_state_waiting.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerStateWaiting(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'message': 'str',
+ 'reason': 'str'
+ }
+
+ attribute_map = {
+ 'message': 'message',
+ 'reason': 'reason'
+ }
+
+ def __init__(self, message=None, reason=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerStateWaiting - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._message = None
+ self._reason = None
+ self.discriminator = None
+
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+
+ @property
+ def message(self):
+ """Gets the message of this V1ContainerStateWaiting. # noqa: E501
+
+ Message regarding why the container is not yet running. # noqa: E501
+
+ :return: The message of this V1ContainerStateWaiting. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1ContainerStateWaiting.
+
+ Message regarding why the container is not yet running. # noqa: E501
+
+ :param message: The message of this V1ContainerStateWaiting. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1ContainerStateWaiting. # noqa: E501
+
+ (brief) reason the container is not yet running. # noqa: E501
+
+ :return: The reason of this V1ContainerStateWaiting. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1ContainerStateWaiting.
+
+ (brief) reason the container is not yet running. # noqa: E501
+
+ :param reason: The reason of this V1ContainerStateWaiting. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerStateWaiting):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerStateWaiting):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_container_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_container_status.py
new file mode 100644
index 0000000000..4fae190d13
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_container_status.py
@@ -0,0 +1,401 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ContainerStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allocated_resources': 'dict(str, str)',
+ 'container_id': 'str',
+ 'image': 'str',
+ 'image_id': 'str',
+ 'last_state': 'V1ContainerState',
+ 'name': 'str',
+ 'ready': 'bool',
+ 'resources': 'V1ResourceRequirements',
+ 'restart_count': 'int',
+ 'started': 'bool',
+ 'state': 'V1ContainerState'
+ }
+
+ attribute_map = {
+ 'allocated_resources': 'allocatedResources',
+ 'container_id': 'containerID',
+ 'image': 'image',
+ 'image_id': 'imageID',
+ 'last_state': 'lastState',
+ 'name': 'name',
+ 'ready': 'ready',
+ 'resources': 'resources',
+ 'restart_count': 'restartCount',
+ 'started': 'started',
+ 'state': 'state'
+ }
+
+ def __init__(self, allocated_resources=None, container_id=None, image=None, image_id=None, last_state=None, name=None, ready=None, resources=None, restart_count=None, started=None, state=None, local_vars_configuration=None): # noqa: E501
+ """V1ContainerStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allocated_resources = None
+ self._container_id = None
+ self._image = None
+ self._image_id = None
+ self._last_state = None
+ self._name = None
+ self._ready = None
+ self._resources = None
+ self._restart_count = None
+ self._started = None
+ self._state = None
+ self.discriminator = None
+
+ if allocated_resources is not None:
+ self.allocated_resources = allocated_resources
+ if container_id is not None:
+ self.container_id = container_id
+ self.image = image
+ self.image_id = image_id
+ if last_state is not None:
+ self.last_state = last_state
+ self.name = name
+ self.ready = ready
+ if resources is not None:
+ self.resources = resources
+ self.restart_count = restart_count
+ if started is not None:
+ self.started = started
+ if state is not None:
+ self.state = state
+
+ @property
+ def allocated_resources(self):
+ """Gets the allocated_resources of this V1ContainerStatus. # noqa: E501
+
+ AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize. # noqa: E501
+
+ :return: The allocated_resources of this V1ContainerStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._allocated_resources
+
+ @allocated_resources.setter
+ def allocated_resources(self, allocated_resources):
+ """Sets the allocated_resources of this V1ContainerStatus.
+
+ AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize. # noqa: E501
+
+ :param allocated_resources: The allocated_resources of this V1ContainerStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._allocated_resources = allocated_resources
+
+ @property
+ def container_id(self):
+ """Gets the container_id of this V1ContainerStatus. # noqa: E501
+
+ ContainerID is the ID of the container in the format '<type>://<container_id>'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\"). # noqa: E501
+
+ :return: The container_id of this V1ContainerStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._container_id
+
+ @container_id.setter
+ def container_id(self, container_id):
+ """Sets the container_id of this V1ContainerStatus.
+
+ ContainerID is the ID of the container in the format '<type>://<container_id>'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\"). # noqa: E501
+
+ :param container_id: The container_id of this V1ContainerStatus. # noqa: E501
+ :type: str
+ """
+
+ self._container_id = container_id
+
+ @property
+ def image(self):
+ """Gets the image of this V1ContainerStatus. # noqa: E501
+
+ Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images. # noqa: E501
+
+ :return: The image of this V1ContainerStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._image
+
+ @image.setter
+ def image(self, image):
+ """Sets the image of this V1ContainerStatus.
+
+ Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images. # noqa: E501
+
+ :param image: The image of this V1ContainerStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and image is None: # noqa: E501
+ raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
+
+ self._image = image
+
+ @property
+ def image_id(self):
+ """Gets the image_id of this V1ContainerStatus. # noqa: E501
+
+ ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime. # noqa: E501
+
+ :return: The image_id of this V1ContainerStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._image_id
+
+ @image_id.setter
+ def image_id(self, image_id):
+ """Sets the image_id of this V1ContainerStatus.
+
+ ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime. # noqa: E501
+
+ :param image_id: The image_id of this V1ContainerStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and image_id is None: # noqa: E501
+ raise ValueError("Invalid value for `image_id`, must not be `None`") # noqa: E501
+
+ self._image_id = image_id
+
+ @property
+ def last_state(self):
+ """Gets the last_state of this V1ContainerStatus. # noqa: E501
+
+
+ :return: The last_state of this V1ContainerStatus. # noqa: E501
+ :rtype: V1ContainerState
+ """
+ return self._last_state
+
+ @last_state.setter
+ def last_state(self, last_state):
+ """Sets the last_state of this V1ContainerStatus.
+
+
+ :param last_state: The last_state of this V1ContainerStatus. # noqa: E501
+ :type: V1ContainerState
+ """
+
+ self._last_state = last_state
+
+ @property
+ def name(self):
+ """Gets the name of this V1ContainerStatus. # noqa: E501
+
+ Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated. # noqa: E501
+
+ :return: The name of this V1ContainerStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ContainerStatus.
+
+ Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated. # noqa: E501
+
+ :param name: The name of this V1ContainerStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def ready(self):
+ """Gets the ready of this V1ContainerStatus. # noqa: E501
+
+ Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field). The value is typically used to determine whether a container is ready to accept traffic. # noqa: E501
+
+ :return: The ready of this V1ContainerStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._ready
+
+ @ready.setter
+ def ready(self, ready):
+ """Sets the ready of this V1ContainerStatus.
+
+ Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field). The value is typically used to determine whether a container is ready to accept traffic. # noqa: E501
+
+ :param ready: The ready of this V1ContainerStatus. # noqa: E501
+ :type: bool
+ """
+ if self.local_vars_configuration.client_side_validation and ready is None: # noqa: E501
+ raise ValueError("Invalid value for `ready`, must not be `None`") # noqa: E501
+
+ self._ready = ready
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1ContainerStatus. # noqa: E501
+
+
+ :return: The resources of this V1ContainerStatus. # noqa: E501
+ :rtype: V1ResourceRequirements
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1ContainerStatus.
+
+
+ :param resources: The resources of this V1ContainerStatus. # noqa: E501
+ :type: V1ResourceRequirements
+ """
+
+ self._resources = resources
+
+ @property
+ def restart_count(self):
+ """Gets the restart_count of this V1ContainerStatus. # noqa: E501
+
+ RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative. # noqa: E501
+
+ :return: The restart_count of this V1ContainerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._restart_count
+
+ @restart_count.setter
+ def restart_count(self, restart_count):
+ """Sets the restart_count of this V1ContainerStatus.
+
+ RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative. # noqa: E501
+
+ :param restart_count: The restart_count of this V1ContainerStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and restart_count is None: # noqa: E501
+ raise ValueError("Invalid value for `restart_count`, must not be `None`") # noqa: E501
+
+ self._restart_count = restart_count
+
+ @property
+ def started(self):
+ """Gets the started of this V1ContainerStatus. # noqa: E501
+
+ Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false. # noqa: E501
+
+ :return: The started of this V1ContainerStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._started
+
+ @started.setter
+ def started(self, started):
+ """Sets the started of this V1ContainerStatus.
+
+ Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false. # noqa: E501
+
+ :param started: The started of this V1ContainerStatus. # noqa: E501
+ :type: bool
+ """
+
+ self._started = started
+
+ @property
+ def state(self):
+ """Gets the state of this V1ContainerStatus. # noqa: E501
+
+
+ :return: The state of this V1ContainerStatus. # noqa: E501
+ :rtype: V1ContainerState
+ """
+ return self._state
+
+ @state.setter
+ def state(self, state):
+ """Sets the state of this V1ContainerStatus.
+
+
+ :param state: The state of this V1ContainerStatus. # noqa: E501
+ :type: V1ContainerState
+ """
+
+ self._state = state
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ContainerStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ContainerStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision.py b/contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision.py
new file mode 100644
index 0000000000..d25ac2c75a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision.py
@@ -0,0 +1,233 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ControllerRevision(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'data': 'object',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'revision': 'int'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'data': 'data',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'revision': 'revision'
+ }
+
+ def __init__(self, api_version=None, data=None, kind=None, metadata=None, revision=None, local_vars_configuration=None): # noqa: E501
+ """V1ControllerRevision - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._data = None
+ self._kind = None
+ self._metadata = None
+ self._revision = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if data is not None:
+ self.data = data
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.revision = revision
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ControllerRevision. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ControllerRevision. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ControllerRevision.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ControllerRevision. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def data(self):
+ """Gets the data of this V1ControllerRevision. # noqa: E501
+
+ Data is the serialized representation of the state. # noqa: E501
+
+ :return: The data of this V1ControllerRevision. # noqa: E501
+ :rtype: object
+ """
+ return self._data
+
+ @data.setter
+ def data(self, data):
+ """Sets the data of this V1ControllerRevision.
+
+ Data is the serialized representation of the state. # noqa: E501
+
+ :param data: The data of this V1ControllerRevision. # noqa: E501
+ :type: object
+ """
+
+ self._data = data
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ControllerRevision. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ControllerRevision. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ControllerRevision.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ControllerRevision. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ControllerRevision. # noqa: E501
+
+
+ :return: The metadata of this V1ControllerRevision. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ControllerRevision.
+
+
+ :param metadata: The metadata of this V1ControllerRevision. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def revision(self):
+ """Gets the revision of this V1ControllerRevision. # noqa: E501
+
+ Revision indicates the revision of the state represented by Data. # noqa: E501
+
+ :return: The revision of this V1ControllerRevision. # noqa: E501
+ :rtype: int
+ """
+ return self._revision
+
+ @revision.setter
+ def revision(self, revision):
+ """Sets the revision of this V1ControllerRevision.
+
+ Revision indicates the revision of the state represented by Data. # noqa: E501
+
+ :param revision: The revision of this V1ControllerRevision. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and revision is None: # noqa: E501
+ raise ValueError("Invalid value for `revision`, must not be `None`") # noqa: E501
+
+ self._revision = revision
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ControllerRevision):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ControllerRevision):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision_list.py
new file mode 100644
index 0000000000..10e327a84a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_controller_revision_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ControllerRevisionList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ControllerRevision]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ControllerRevisionList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ControllerRevisionList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ControllerRevisionList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ControllerRevisionList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ControllerRevisionList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ControllerRevisionList. # noqa: E501
+
+ Items is the list of ControllerRevisions # noqa: E501
+
+ :return: The items of this V1ControllerRevisionList. # noqa: E501
+ :rtype: list[V1ControllerRevision]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ControllerRevisionList.
+
+ Items is the list of ControllerRevisions # noqa: E501
+
+ :param items: The items of this V1ControllerRevisionList. # noqa: E501
+ :type: list[V1ControllerRevision]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ControllerRevisionList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ControllerRevisionList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ControllerRevisionList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ControllerRevisionList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ControllerRevisionList. # noqa: E501
+
+
+ :return: The metadata of this V1ControllerRevisionList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ControllerRevisionList.
+
+
+ :param metadata: The metadata of this V1ControllerRevisionList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ControllerRevisionList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ControllerRevisionList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job.py
new file mode 100644
index 0000000000..6193c6afe7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CronJob(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1CronJobSpec',
+ 'status': 'V1CronJobStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1CronJob - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CronJob. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CronJob. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CronJob.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CronJob. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CronJob. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CronJob. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CronJob.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CronJob. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CronJob. # noqa: E501
+
+
+ :return: The metadata of this V1CronJob. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CronJob.
+
+
+ :param metadata: The metadata of this V1CronJob. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1CronJob. # noqa: E501
+
+
+ :return: The spec of this V1CronJob. # noqa: E501
+ :rtype: V1CronJobSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1CronJob.
+
+
+ :param spec: The spec of this V1CronJob. # noqa: E501
+ :type: V1CronJobSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1CronJob. # noqa: E501
+
+
+ :return: The status of this V1CronJob. # noqa: E501
+ :rtype: V1CronJobStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1CronJob.
+
+
+ :param status: The status of this V1CronJob. # noqa: E501
+ :type: V1CronJobStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CronJob):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CronJob):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_list.py
new file mode 100644
index 0000000000..0bf9a792db
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CronJobList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1CronJob]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1CronJobList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CronJobList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CronJobList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CronJobList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CronJobList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1CronJobList. # noqa: E501
+
+ items is the list of CronJobs. # noqa: E501
+
+ :return: The items of this V1CronJobList. # noqa: E501
+ :rtype: list[V1CronJob]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1CronJobList.
+
+ items is the list of CronJobs. # noqa: E501
+
+ :param items: The items of this V1CronJobList. # noqa: E501
+ :type: list[V1CronJob]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CronJobList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CronJobList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CronJobList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CronJobList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CronJobList. # noqa: E501
+
+
+ :return: The metadata of this V1CronJobList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CronJobList.
+
+
+ :param metadata: The metadata of this V1CronJobList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CronJobList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CronJobList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_spec.py
new file mode 100644
index 0000000000..35bcfac1ad
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_spec.py
@@ -0,0 +1,318 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CronJobSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'concurrency_policy': 'str',
+ 'failed_jobs_history_limit': 'int',
+ 'job_template': 'V1JobTemplateSpec',
+ 'schedule': 'str',
+ 'starting_deadline_seconds': 'int',
+ 'successful_jobs_history_limit': 'int',
+ 'suspend': 'bool',
+ 'time_zone': 'str'
+ }
+
+ attribute_map = {
+ 'concurrency_policy': 'concurrencyPolicy',
+ 'failed_jobs_history_limit': 'failedJobsHistoryLimit',
+ 'job_template': 'jobTemplate',
+ 'schedule': 'schedule',
+ 'starting_deadline_seconds': 'startingDeadlineSeconds',
+ 'successful_jobs_history_limit': 'successfulJobsHistoryLimit',
+ 'suspend': 'suspend',
+ 'time_zone': 'timeZone'
+ }
+
+ def __init__(self, concurrency_policy=None, failed_jobs_history_limit=None, job_template=None, schedule=None, starting_deadline_seconds=None, successful_jobs_history_limit=None, suspend=None, time_zone=None, local_vars_configuration=None): # noqa: E501
+ """V1CronJobSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._concurrency_policy = None
+ self._failed_jobs_history_limit = None
+ self._job_template = None
+ self._schedule = None
+ self._starting_deadline_seconds = None
+ self._successful_jobs_history_limit = None
+ self._suspend = None
+ self._time_zone = None
+ self.discriminator = None
+
+ if concurrency_policy is not None:
+ self.concurrency_policy = concurrency_policy
+ if failed_jobs_history_limit is not None:
+ self.failed_jobs_history_limit = failed_jobs_history_limit
+ self.job_template = job_template
+ self.schedule = schedule
+ if starting_deadline_seconds is not None:
+ self.starting_deadline_seconds = starting_deadline_seconds
+ if successful_jobs_history_limit is not None:
+ self.successful_jobs_history_limit = successful_jobs_history_limit
+ if suspend is not None:
+ self.suspend = suspend
+ if time_zone is not None:
+ self.time_zone = time_zone
+
+ @property
+ def concurrency_policy(self):
+ """Gets the concurrency_policy of this V1CronJobSpec. # noqa: E501
+
+ Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one # noqa: E501
+
+ :return: The concurrency_policy of this V1CronJobSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._concurrency_policy
+
+ @concurrency_policy.setter
+ def concurrency_policy(self, concurrency_policy):
+ """Sets the concurrency_policy of this V1CronJobSpec.
+
+ Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one # noqa: E501
+
+ :param concurrency_policy: The concurrency_policy of this V1CronJobSpec. # noqa: E501
+ :type: str
+ """
+
+ self._concurrency_policy = concurrency_policy
+
+ @property
+ def failed_jobs_history_limit(self):
+ """Gets the failed_jobs_history_limit of this V1CronJobSpec. # noqa: E501
+
+ The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1. # noqa: E501
+
+ :return: The failed_jobs_history_limit of this V1CronJobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._failed_jobs_history_limit
+
+ @failed_jobs_history_limit.setter
+ def failed_jobs_history_limit(self, failed_jobs_history_limit):
+ """Sets the failed_jobs_history_limit of this V1CronJobSpec.
+
+ The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1. # noqa: E501
+
+ :param failed_jobs_history_limit: The failed_jobs_history_limit of this V1CronJobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._failed_jobs_history_limit = failed_jobs_history_limit
+
+ @property
+ def job_template(self):
+ """Gets the job_template of this V1CronJobSpec. # noqa: E501
+
+
+ :return: The job_template of this V1CronJobSpec. # noqa: E501
+ :rtype: V1JobTemplateSpec
+ """
+ return self._job_template
+
+ @job_template.setter
+ def job_template(self, job_template):
+ """Sets the job_template of this V1CronJobSpec.
+
+
+ :param job_template: The job_template of this V1CronJobSpec. # noqa: E501
+ :type: V1JobTemplateSpec
+ """
+ if self.local_vars_configuration.client_side_validation and job_template is None: # noqa: E501
+ raise ValueError("Invalid value for `job_template`, must not be `None`") # noqa: E501
+
+ self._job_template = job_template
+
+ @property
+ def schedule(self):
+ """Gets the schedule of this V1CronJobSpec. # noqa: E501
+
+ The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. # noqa: E501
+
+ :return: The schedule of this V1CronJobSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._schedule
+
+ @schedule.setter
+ def schedule(self, schedule):
+ """Sets the schedule of this V1CronJobSpec.
+
+ The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. # noqa: E501
+
+ :param schedule: The schedule of this V1CronJobSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and schedule is None: # noqa: E501
+ raise ValueError("Invalid value for `schedule`, must not be `None`") # noqa: E501
+
+ self._schedule = schedule
+
+ @property
+ def starting_deadline_seconds(self):
+ """Gets the starting_deadline_seconds of this V1CronJobSpec. # noqa: E501
+
+ Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. # noqa: E501
+
+ :return: The starting_deadline_seconds of this V1CronJobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._starting_deadline_seconds
+
+ @starting_deadline_seconds.setter
+ def starting_deadline_seconds(self, starting_deadline_seconds):
+ """Sets the starting_deadline_seconds of this V1CronJobSpec.
+
+ Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. # noqa: E501
+
+ :param starting_deadline_seconds: The starting_deadline_seconds of this V1CronJobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._starting_deadline_seconds = starting_deadline_seconds
+
+ @property
+ def successful_jobs_history_limit(self):
+ """Gets the successful_jobs_history_limit of this V1CronJobSpec. # noqa: E501
+
+ The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3. # noqa: E501
+
+ :return: The successful_jobs_history_limit of this V1CronJobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._successful_jobs_history_limit
+
+ @successful_jobs_history_limit.setter
+ def successful_jobs_history_limit(self, successful_jobs_history_limit):
+ """Sets the successful_jobs_history_limit of this V1CronJobSpec.
+
+ The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3. # noqa: E501
+
+ :param successful_jobs_history_limit: The successful_jobs_history_limit of this V1CronJobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._successful_jobs_history_limit = successful_jobs_history_limit
+
+ @property
+ def suspend(self):
+ """Gets the suspend of this V1CronJobSpec. # noqa: E501
+
+ This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. # noqa: E501
+
+ :return: The suspend of this V1CronJobSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._suspend
+
+ @suspend.setter
+ def suspend(self, suspend):
+ """Sets the suspend of this V1CronJobSpec.
+
+ This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. # noqa: E501
+
+ :param suspend: The suspend of this V1CronJobSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._suspend = suspend
+
+ @property
+ def time_zone(self):
+ """Gets the time_zone of this V1CronJobSpec. # noqa: E501
+
+ The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones # noqa: E501
+
+ :return: The time_zone of this V1CronJobSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._time_zone
+
+ @time_zone.setter
+ def time_zone(self, time_zone):
+ """Sets the time_zone of this V1CronJobSpec.
+
+ The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones # noqa: E501
+
+ :param time_zone: The time_zone of this V1CronJobSpec. # noqa: E501
+ :type: str
+ """
+
+ self._time_zone = time_zone
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CronJobSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CronJobSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_status.py
new file mode 100644
index 0000000000..dcb3d212d6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cron_job_status.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CronJobStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'active': 'list[V1ObjectReference]',
+ 'last_schedule_time': 'datetime',
+ 'last_successful_time': 'datetime'
+ }
+
+ attribute_map = {
+ 'active': 'active',
+ 'last_schedule_time': 'lastScheduleTime',
+ 'last_successful_time': 'lastSuccessfulTime'
+ }
+
+ def __init__(self, active=None, last_schedule_time=None, last_successful_time=None, local_vars_configuration=None): # noqa: E501
+ """V1CronJobStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._active = None
+ self._last_schedule_time = None
+ self._last_successful_time = None
+ self.discriminator = None
+
+ if active is not None:
+ self.active = active
+ if last_schedule_time is not None:
+ self.last_schedule_time = last_schedule_time
+ if last_successful_time is not None:
+ self.last_successful_time = last_successful_time
+
+ @property
+ def active(self):
+ """Gets the active of this V1CronJobStatus. # noqa: E501
+
+ A list of pointers to currently running jobs. # noqa: E501
+
+ :return: The active of this V1CronJobStatus. # noqa: E501
+ :rtype: list[V1ObjectReference]
+ """
+ return self._active
+
+ @active.setter
+ def active(self, active):
+ """Sets the active of this V1CronJobStatus.
+
+ A list of pointers to currently running jobs. # noqa: E501
+
+ :param active: The active of this V1CronJobStatus. # noqa: E501
+ :type: list[V1ObjectReference]
+ """
+
+ self._active = active
+
+ @property
+ def last_schedule_time(self):
+ """Gets the last_schedule_time of this V1CronJobStatus. # noqa: E501
+
+ Information when was the last time the job was successfully scheduled. # noqa: E501
+
+ :return: The last_schedule_time of this V1CronJobStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_schedule_time
+
+ @last_schedule_time.setter
+ def last_schedule_time(self, last_schedule_time):
+ """Sets the last_schedule_time of this V1CronJobStatus.
+
+ Information when was the last time the job was successfully scheduled. # noqa: E501
+
+ :param last_schedule_time: The last_schedule_time of this V1CronJobStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_schedule_time = last_schedule_time
+
+ @property
+ def last_successful_time(self):
+ """Gets the last_successful_time of this V1CronJobStatus. # noqa: E501
+
+ Information when was the last time the job successfully completed. # noqa: E501
+
+ :return: The last_successful_time of this V1CronJobStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_successful_time
+
+ @last_successful_time.setter
+ def last_successful_time(self, last_successful_time):
+ """Sets the last_successful_time of this V1CronJobStatus.
+
+ Information when was the last time the job successfully completed. # noqa: E501
+
+ :param last_successful_time: The last_successful_time of this V1CronJobStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_successful_time = last_successful_time
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CronJobStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CronJobStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_cross_version_object_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_cross_version_object_reference.py
new file mode 100644
index 0000000000..edfacdcfdd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_cross_version_object_reference.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CrossVersionObjectReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'name': 'name'
+ }
+
+ def __init__(self, api_version=None, kind=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1CrossVersionObjectReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._name = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.kind = kind
+ self.name = name
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CrossVersionObjectReference. # noqa: E501
+
+ apiVersion is the API version of the referent # noqa: E501
+
+ :return: The api_version of this V1CrossVersionObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CrossVersionObjectReference.
+
+ apiVersion is the API version of the referent # noqa: E501
+
+ :param api_version: The api_version of this V1CrossVersionObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CrossVersionObjectReference. # noqa: E501
+
+ kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CrossVersionObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CrossVersionObjectReference.
+
+ kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CrossVersionObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1CrossVersionObjectReference. # noqa: E501
+
+ name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1CrossVersionObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1CrossVersionObjectReference.
+
+ name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1CrossVersionObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CrossVersionObjectReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CrossVersionObjectReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver.py
new file mode 100644
index 0000000000..9511b6d653
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSIDriver(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1CSIDriverSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1CSIDriver - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CSIDriver. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CSIDriver. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CSIDriver.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CSIDriver. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CSIDriver. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CSIDriver. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CSIDriver.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CSIDriver. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CSIDriver. # noqa: E501
+
+
+ :return: The metadata of this V1CSIDriver. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CSIDriver.
+
+
+ :param metadata: The metadata of this V1CSIDriver. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1CSIDriver. # noqa: E501
+
+
+ :return: The spec of this V1CSIDriver. # noqa: E501
+ :rtype: V1CSIDriverSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1CSIDriver.
+
+
+ :param spec: The spec of this V1CSIDriver. # noqa: E501
+ :type: V1CSIDriverSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSIDriver):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSIDriver):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_list.py
new file mode 100644
index 0000000000..c464e6e740
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSIDriverList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1CSIDriver]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1CSIDriverList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CSIDriverList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CSIDriverList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CSIDriverList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CSIDriverList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1CSIDriverList. # noqa: E501
+
+ items is the list of CSIDriver # noqa: E501
+
+ :return: The items of this V1CSIDriverList. # noqa: E501
+ :rtype: list[V1CSIDriver]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1CSIDriverList.
+
+ items is the list of CSIDriver # noqa: E501
+
+ :param items: The items of this V1CSIDriverList. # noqa: E501
+ :type: list[V1CSIDriver]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CSIDriverList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CSIDriverList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CSIDriverList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CSIDriverList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CSIDriverList. # noqa: E501
+
+
+ :return: The metadata of this V1CSIDriverList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CSIDriverList.
+
+
+ :param metadata: The metadata of this V1CSIDriverList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSIDriverList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSIDriverList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_spec.py
new file mode 100644
index 0000000000..e232b14aea
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_driver_spec.py
@@ -0,0 +1,318 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSIDriverSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'attach_required': 'bool',
+ 'fs_group_policy': 'str',
+ 'pod_info_on_mount': 'bool',
+ 'requires_republish': 'bool',
+ 'se_linux_mount': 'bool',
+ 'storage_capacity': 'bool',
+ 'token_requests': 'list[StorageV1TokenRequest]',
+ 'volume_lifecycle_modes': 'list[str]'
+ }
+
+ attribute_map = {
+ 'attach_required': 'attachRequired',
+ 'fs_group_policy': 'fsGroupPolicy',
+ 'pod_info_on_mount': 'podInfoOnMount',
+ 'requires_republish': 'requiresRepublish',
+ 'se_linux_mount': 'seLinuxMount',
+ 'storage_capacity': 'storageCapacity',
+ 'token_requests': 'tokenRequests',
+ 'volume_lifecycle_modes': 'volumeLifecycleModes'
+ }
+
+ def __init__(self, attach_required=None, fs_group_policy=None, pod_info_on_mount=None, requires_republish=None, se_linux_mount=None, storage_capacity=None, token_requests=None, volume_lifecycle_modes=None, local_vars_configuration=None): # noqa: E501
+ """V1CSIDriverSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._attach_required = None
+ self._fs_group_policy = None
+ self._pod_info_on_mount = None
+ self._requires_republish = None
+ self._se_linux_mount = None
+ self._storage_capacity = None
+ self._token_requests = None
+ self._volume_lifecycle_modes = None
+ self.discriminator = None
+
+ if attach_required is not None:
+ self.attach_required = attach_required
+ if fs_group_policy is not None:
+ self.fs_group_policy = fs_group_policy
+ if pod_info_on_mount is not None:
+ self.pod_info_on_mount = pod_info_on_mount
+ if requires_republish is not None:
+ self.requires_republish = requires_republish
+ if se_linux_mount is not None:
+ self.se_linux_mount = se_linux_mount
+ if storage_capacity is not None:
+ self.storage_capacity = storage_capacity
+ if token_requests is not None:
+ self.token_requests = token_requests
+ if volume_lifecycle_modes is not None:
+ self.volume_lifecycle_modes = volume_lifecycle_modes
+
+ @property
+ def attach_required(self):
+ """Gets the attach_required of this V1CSIDriverSpec. # noqa: E501
+
+ attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. This field is immutable. # noqa: E501
+
+ :return: The attach_required of this V1CSIDriverSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._attach_required
+
+ @attach_required.setter
+ def attach_required(self, attach_required):
+ """Sets the attach_required of this V1CSIDriverSpec.
+
+ attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. This field is immutable. # noqa: E501
+
+ :param attach_required: The attach_required of this V1CSIDriverSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._attach_required = attach_required
+
+ @property
+ def fs_group_policy(self):
+ """Gets the fs_group_policy of this V1CSIDriverSpec. # noqa: E501
+
+ fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is immutable. Defaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce. # noqa: E501
+
+ :return: The fs_group_policy of this V1CSIDriverSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_group_policy
+
+ @fs_group_policy.setter
+ def fs_group_policy(self, fs_group_policy):
+ """Sets the fs_group_policy of this V1CSIDriverSpec.
+
+ fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is immutable. Defaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce. # noqa: E501
+
+ :param fs_group_policy: The fs_group_policy of this V1CSIDriverSpec. # noqa: E501
+ :type: str
+ """
+
+ self._fs_group_policy = fs_group_policy
+
+ @property
+ def pod_info_on_mount(self):
+ """Gets the pod_info_on_mount of this V1CSIDriverSpec. # noqa: E501
+
+ podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume defined by a CSIVolumeSource, otherwise \"false\" \"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. This field is immutable. # noqa: E501
+
+ :return: The pod_info_on_mount of this V1CSIDriverSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._pod_info_on_mount
+
+ @pod_info_on_mount.setter
+ def pod_info_on_mount(self, pod_info_on_mount):
+ """Sets the pod_info_on_mount of this V1CSIDriverSpec.
+
+ podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume defined by a CSIVolumeSource, otherwise \"false\" \"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. This field is immutable. # noqa: E501
+
+ :param pod_info_on_mount: The pod_info_on_mount of this V1CSIDriverSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._pod_info_on_mount = pod_info_on_mount
+
+ @property
+ def requires_republish(self):
+ """Gets the requires_republish of this V1CSIDriverSpec. # noqa: E501
+
+ requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. # noqa: E501
+
+ :return: The requires_republish of this V1CSIDriverSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._requires_republish
+
+ @requires_republish.setter
+ def requires_republish(self, requires_republish):
+ """Sets the requires_republish of this V1CSIDriverSpec.
+
+ requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. # noqa: E501
+
+ :param requires_republish: The requires_republish of this V1CSIDriverSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._requires_republish = requires_republish
+
+ @property
+ def se_linux_mount(self):
+ """Gets the se_linux_mount of this V1CSIDriverSpec. # noqa: E501
+
+ seLinuxMount specifies if the CSI driver supports \"-o context\" mount option. When \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context. When \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem. Default is \"false\". # noqa: E501
+
+ :return: The se_linux_mount of this V1CSIDriverSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._se_linux_mount
+
+ @se_linux_mount.setter
+ def se_linux_mount(self, se_linux_mount):
+ """Sets the se_linux_mount of this V1CSIDriverSpec.
+
+ seLinuxMount specifies if the CSI driver supports \"-o context\" mount option. When \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context. When \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem. Default is \"false\". # noqa: E501
+
+ :param se_linux_mount: The se_linux_mount of this V1CSIDriverSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._se_linux_mount = se_linux_mount
+
+ @property
+ def storage_capacity(self):
+ """Gets the storage_capacity of this V1CSIDriverSpec. # noqa: E501
+
+ storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true. The check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object. Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. This field was immutable in Kubernetes <= 1.22 and now is mutable. # noqa: E501
+
+ :return: The storage_capacity of this V1CSIDriverSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._storage_capacity
+
+ @storage_capacity.setter
+ def storage_capacity(self, storage_capacity):
+ """Sets the storage_capacity of this V1CSIDriverSpec.
+
+ storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true. The check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object. Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. This field was immutable in Kubernetes <= 1.22 and now is mutable. # noqa: E501
+
+ :param storage_capacity: The storage_capacity of this V1CSIDriverSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._storage_capacity = storage_capacity
+
+ @property
+ def token_requests(self):
+ """Gets the token_requests of this V1CSIDriverSpec. # noqa: E501
+
+ tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": { \"<audience>\": { \"token\": <token>, \"expirationTimestamp\": <expiration timestamp in RFC3339>, }, ... } Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. # noqa: E501
+
+ :return: The token_requests of this V1CSIDriverSpec. # noqa: E501
+ :rtype: list[StorageV1TokenRequest]
+ """
+ return self._token_requests
+
+ @token_requests.setter
+ def token_requests(self, token_requests):
+ """Sets the token_requests of this V1CSIDriverSpec.
+
+ tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": { \"<audience>\": { \"token\": <token>, \"expirationTimestamp\": <expiration timestamp in RFC3339>, }, ... } Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. # noqa: E501
+
+ :param token_requests: The token_requests of this V1CSIDriverSpec. # noqa: E501
+ :type: list[StorageV1TokenRequest]
+ """
+
+ self._token_requests = token_requests
+
+ @property
+ def volume_lifecycle_modes(self):
+ """Gets the volume_lifecycle_modes of this V1CSIDriverSpec. # noqa: E501
+
+ volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta. This field is immutable. # noqa: E501
+
+ :return: The volume_lifecycle_modes of this V1CSIDriverSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._volume_lifecycle_modes
+
+ @volume_lifecycle_modes.setter
+ def volume_lifecycle_modes(self, volume_lifecycle_modes):
+ """Sets the volume_lifecycle_modes of this V1CSIDriverSpec.
+
+ volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta. This field is immutable. # noqa: E501
+
+ :param volume_lifecycle_modes: The volume_lifecycle_modes of this V1CSIDriverSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._volume_lifecycle_modes = volume_lifecycle_modes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSIDriverSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSIDriverSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node.py
new file mode 100644
index 0000000000..08762340cc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSINode(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1CSINodeSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1CSINode - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CSINode. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CSINode. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CSINode.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CSINode. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CSINode. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CSINode. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CSINode.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CSINode. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CSINode. # noqa: E501
+
+
+ :return: The metadata of this V1CSINode. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CSINode.
+
+
+ :param metadata: The metadata of this V1CSINode. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1CSINode. # noqa: E501
+
+
+ :return: The spec of this V1CSINode. # noqa: E501
+ :rtype: V1CSINodeSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1CSINode.
+
+
+ :param spec: The spec of this V1CSINode. # noqa: E501
+ :type: V1CSINodeSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSINode):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSINode):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_driver.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_driver.py
new file mode 100644
index 0000000000..7cdc825f57
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_driver.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSINodeDriver(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allocatable': 'V1VolumeNodeResources',
+ 'name': 'str',
+ 'node_id': 'str',
+ 'topology_keys': 'list[str]'
+ }
+
+ attribute_map = {
+ 'allocatable': 'allocatable',
+ 'name': 'name',
+ 'node_id': 'nodeID',
+ 'topology_keys': 'topologyKeys'
+ }
+
+ def __init__(self, allocatable=None, name=None, node_id=None, topology_keys=None, local_vars_configuration=None): # noqa: E501
+ """V1CSINodeDriver - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allocatable = None
+ self._name = None
+ self._node_id = None
+ self._topology_keys = None
+ self.discriminator = None
+
+ if allocatable is not None:
+ self.allocatable = allocatable
+ self.name = name
+ self.node_id = node_id
+ if topology_keys is not None:
+ self.topology_keys = topology_keys
+
+ @property
+ def allocatable(self):
+ """Gets the allocatable of this V1CSINodeDriver. # noqa: E501
+
+
+ :return: The allocatable of this V1CSINodeDriver. # noqa: E501
+ :rtype: V1VolumeNodeResources
+ """
+ return self._allocatable
+
+ @allocatable.setter
+ def allocatable(self, allocatable):
+ """Sets the allocatable of this V1CSINodeDriver.
+
+
+ :param allocatable: The allocatable of this V1CSINodeDriver. # noqa: E501
+ :type: V1VolumeNodeResources
+ """
+
+ self._allocatable = allocatable
+
+ @property
+ def name(self):
+ """Gets the name of this V1CSINodeDriver. # noqa: E501
+
+ name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver. # noqa: E501
+
+ :return: The name of this V1CSINodeDriver. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1CSINodeDriver.
+
+ name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver. # noqa: E501
+
+ :param name: The name of this V1CSINodeDriver. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def node_id(self):
+ """Gets the node_id of this V1CSINodeDriver. # noqa: E501
+
+ nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required. # noqa: E501
+
+ :return: The node_id of this V1CSINodeDriver. # noqa: E501
+ :rtype: str
+ """
+ return self._node_id
+
+ @node_id.setter
+ def node_id(self, node_id):
+ """Sets the node_id of this V1CSINodeDriver.
+
+ nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required. # noqa: E501
+
+ :param node_id: The node_id of this V1CSINodeDriver. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and node_id is None: # noqa: E501
+ raise ValueError("Invalid value for `node_id`, must not be `None`") # noqa: E501
+
+ self._node_id = node_id
+
+ @property
+ def topology_keys(self):
+ """Gets the topology_keys of this V1CSINodeDriver. # noqa: E501
+
+ topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology. # noqa: E501
+
+ :return: The topology_keys of this V1CSINodeDriver. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._topology_keys
+
+ @topology_keys.setter
+ def topology_keys(self, topology_keys):
+ """Sets the topology_keys of this V1CSINodeDriver.
+
+ topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology. # noqa: E501
+
+ :param topology_keys: The topology_keys of this V1CSINodeDriver. # noqa: E501
+ :type: list[str]
+ """
+
+ self._topology_keys = topology_keys
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSINodeDriver):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSINodeDriver):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_list.py
new file mode 100644
index 0000000000..415e578470
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSINodeList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1CSINode]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1CSINodeList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CSINodeList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CSINodeList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CSINodeList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CSINodeList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1CSINodeList. # noqa: E501
+
+ items is the list of CSINode # noqa: E501
+
+ :return: The items of this V1CSINodeList. # noqa: E501
+ :rtype: list[V1CSINode]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1CSINodeList.
+
+ items is the list of CSINode # noqa: E501
+
+ :param items: The items of this V1CSINodeList. # noqa: E501
+ :type: list[V1CSINode]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CSINodeList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CSINodeList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CSINodeList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CSINodeList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CSINodeList. # noqa: E501
+
+
+ :return: The metadata of this V1CSINodeList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CSINodeList.
+
+
+ :param metadata: The metadata of this V1CSINodeList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSINodeList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSINodeList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_spec.py
new file mode 100644
index 0000000000..5fafa17924
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_node_spec.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSINodeSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'drivers': 'list[V1CSINodeDriver]'
+ }
+
+ attribute_map = {
+ 'drivers': 'drivers'
+ }
+
+ def __init__(self, drivers=None, local_vars_configuration=None): # noqa: E501
+ """V1CSINodeSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._drivers = None
+ self.discriminator = None
+
+ self.drivers = drivers
+
+ @property
+ def drivers(self):
+ """Gets the drivers of this V1CSINodeSpec. # noqa: E501
+
+ drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty. # noqa: E501
+
+ :return: The drivers of this V1CSINodeSpec. # noqa: E501
+ :rtype: list[V1CSINodeDriver]
+ """
+ return self._drivers
+
+ @drivers.setter
+ def drivers(self, drivers):
+ """Sets the drivers of this V1CSINodeSpec.
+
+ drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty. # noqa: E501
+
+ :param drivers: The drivers of this V1CSINodeSpec. # noqa: E501
+ :type: list[V1CSINodeDriver]
+ """
+ if self.local_vars_configuration.client_side_validation and drivers is None: # noqa: E501
+ raise ValueError("Invalid value for `drivers`, must not be `None`") # noqa: E501
+
+ self._drivers = drivers
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSINodeSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSINodeSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_persistent_volume_source.py
new file mode 100644
index 0000000000..aadf8488bc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_persistent_volume_source.py
@@ -0,0 +1,366 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSIPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'controller_expand_secret_ref': 'V1SecretReference',
+ 'controller_publish_secret_ref': 'V1SecretReference',
+ 'driver': 'str',
+ 'fs_type': 'str',
+ 'node_expand_secret_ref': 'V1SecretReference',
+ 'node_publish_secret_ref': 'V1SecretReference',
+ 'node_stage_secret_ref': 'V1SecretReference',
+ 'read_only': 'bool',
+ 'volume_attributes': 'dict(str, str)',
+ 'volume_handle': 'str'
+ }
+
+ attribute_map = {
+ 'controller_expand_secret_ref': 'controllerExpandSecretRef',
+ 'controller_publish_secret_ref': 'controllerPublishSecretRef',
+ 'driver': 'driver',
+ 'fs_type': 'fsType',
+ 'node_expand_secret_ref': 'nodeExpandSecretRef',
+ 'node_publish_secret_ref': 'nodePublishSecretRef',
+ 'node_stage_secret_ref': 'nodeStageSecretRef',
+ 'read_only': 'readOnly',
+ 'volume_attributes': 'volumeAttributes',
+ 'volume_handle': 'volumeHandle'
+ }
+
+ def __init__(self, controller_expand_secret_ref=None, controller_publish_secret_ref=None, driver=None, fs_type=None, node_expand_secret_ref=None, node_publish_secret_ref=None, node_stage_secret_ref=None, read_only=None, volume_attributes=None, volume_handle=None, local_vars_configuration=None): # noqa: E501
+ """V1CSIPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._controller_expand_secret_ref = None
+ self._controller_publish_secret_ref = None
+ self._driver = None
+ self._fs_type = None
+ self._node_expand_secret_ref = None
+ self._node_publish_secret_ref = None
+ self._node_stage_secret_ref = None
+ self._read_only = None
+ self._volume_attributes = None
+ self._volume_handle = None
+ self.discriminator = None
+
+ if controller_expand_secret_ref is not None:
+ self.controller_expand_secret_ref = controller_expand_secret_ref
+ if controller_publish_secret_ref is not None:
+ self.controller_publish_secret_ref = controller_publish_secret_ref
+ self.driver = driver
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if node_expand_secret_ref is not None:
+ self.node_expand_secret_ref = node_expand_secret_ref
+ if node_publish_secret_ref is not None:
+ self.node_publish_secret_ref = node_publish_secret_ref
+ if node_stage_secret_ref is not None:
+ self.node_stage_secret_ref = node_stage_secret_ref
+ if read_only is not None:
+ self.read_only = read_only
+ if volume_attributes is not None:
+ self.volume_attributes = volume_attributes
+ self.volume_handle = volume_handle
+
+ @property
+ def controller_expand_secret_ref(self):
+ """Gets the controller_expand_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+
+
+ :return: The controller_expand_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._controller_expand_secret_ref
+
+ @controller_expand_secret_ref.setter
+ def controller_expand_secret_ref(self, controller_expand_secret_ref):
+ """Sets the controller_expand_secret_ref of this V1CSIPersistentVolumeSource.
+
+
+ :param controller_expand_secret_ref: The controller_expand_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._controller_expand_secret_ref = controller_expand_secret_ref
+
+ @property
+ def controller_publish_secret_ref(self):
+ """Gets the controller_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+
+
+ :return: The controller_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._controller_publish_secret_ref
+
+ @controller_publish_secret_ref.setter
+ def controller_publish_secret_ref(self, controller_publish_secret_ref):
+ """Sets the controller_publish_secret_ref of this V1CSIPersistentVolumeSource.
+
+
+ :param controller_publish_secret_ref: The controller_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._controller_publish_secret_ref = controller_publish_secret_ref
+
+ @property
+ def driver(self):
+ """Gets the driver of this V1CSIPersistentVolumeSource. # noqa: E501
+
+ driver is the name of the driver to use for this volume. Required. # noqa: E501
+
+ :return: The driver of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._driver
+
+ @driver.setter
+ def driver(self, driver):
+ """Sets the driver of this V1CSIPersistentVolumeSource.
+
+ driver is the name of the driver to use for this volume. Required. # noqa: E501
+
+ :param driver: The driver of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
+ raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
+
+ self._driver = driver
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1CSIPersistentVolumeSource. # noqa: E501
+
+ fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". # noqa: E501
+
+ :return: The fs_type of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1CSIPersistentVolumeSource.
+
+ fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". # noqa: E501
+
+ :param fs_type: The fs_type of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def node_expand_secret_ref(self):
+ """Gets the node_expand_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+
+
+ :return: The node_expand_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._node_expand_secret_ref
+
+ @node_expand_secret_ref.setter
+ def node_expand_secret_ref(self, node_expand_secret_ref):
+ """Sets the node_expand_secret_ref of this V1CSIPersistentVolumeSource.
+
+
+ :param node_expand_secret_ref: The node_expand_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._node_expand_secret_ref = node_expand_secret_ref
+
+ @property
+ def node_publish_secret_ref(self):
+ """Gets the node_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+
+
+ :return: The node_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._node_publish_secret_ref
+
+ @node_publish_secret_ref.setter
+ def node_publish_secret_ref(self, node_publish_secret_ref):
+ """Sets the node_publish_secret_ref of this V1CSIPersistentVolumeSource.
+
+
+ :param node_publish_secret_ref: The node_publish_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._node_publish_secret_ref = node_publish_secret_ref
+
+ @property
+ def node_stage_secret_ref(self):
+ """Gets the node_stage_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+
+
+ :return: The node_stage_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._node_stage_secret_ref
+
+ @node_stage_secret_ref.setter
+ def node_stage_secret_ref(self, node_stage_secret_ref):
+ """Sets the node_stage_secret_ref of this V1CSIPersistentVolumeSource.
+
+
+ :param node_stage_secret_ref: The node_stage_secret_ref of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._node_stage_secret_ref = node_stage_secret_ref
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1CSIPersistentVolumeSource. # noqa: E501
+
+ readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write). # noqa: E501
+
+ :return: The read_only of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1CSIPersistentVolumeSource.
+
+ readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write). # noqa: E501
+
+ :param read_only: The read_only of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def volume_attributes(self):
+ """Gets the volume_attributes of this V1CSIPersistentVolumeSource. # noqa: E501
+
+ volumeAttributes of the volume to publish. # noqa: E501
+
+ :return: The volume_attributes of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._volume_attributes
+
+ @volume_attributes.setter
+ def volume_attributes(self, volume_attributes):
+ """Sets the volume_attributes of this V1CSIPersistentVolumeSource.
+
+ volumeAttributes of the volume to publish. # noqa: E501
+
+ :param volume_attributes: The volume_attributes of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._volume_attributes = volume_attributes
+
+ @property
+ def volume_handle(self):
+ """Gets the volume_handle of this V1CSIPersistentVolumeSource. # noqa: E501
+
+ volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required. # noqa: E501
+
+ :return: The volume_handle of this V1CSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_handle
+
+ @volume_handle.setter
+ def volume_handle(self, volume_handle):
+ """Sets the volume_handle of this V1CSIPersistentVolumeSource.
+
+ volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required. # noqa: E501
+
+ :param volume_handle: The volume_handle of this V1CSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and volume_handle is None: # noqa: E501
+ raise ValueError("Invalid value for `volume_handle`, must not be `None`") # noqa: E501
+
+ self._volume_handle = volume_handle
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSIPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSIPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity.py
new file mode 100644
index 0000000000..3dfc126a92
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity.py
@@ -0,0 +1,287 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSIStorageCapacity(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'capacity': 'str',
+ 'kind': 'str',
+ 'maximum_volume_size': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'node_topology': 'V1LabelSelector',
+ 'storage_class_name': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'capacity': 'capacity',
+ 'kind': 'kind',
+ 'maximum_volume_size': 'maximumVolumeSize',
+ 'metadata': 'metadata',
+ 'node_topology': 'nodeTopology',
+ 'storage_class_name': 'storageClassName'
+ }
+
+ def __init__(self, api_version=None, capacity=None, kind=None, maximum_volume_size=None, metadata=None, node_topology=None, storage_class_name=None, local_vars_configuration=None): # noqa: E501
+ """V1CSIStorageCapacity - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._capacity = None
+ self._kind = None
+ self._maximum_volume_size = None
+ self._metadata = None
+ self._node_topology = None
+ self._storage_class_name = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if capacity is not None:
+ self.capacity = capacity
+ if kind is not None:
+ self.kind = kind
+ if maximum_volume_size is not None:
+ self.maximum_volume_size = maximum_volume_size
+ if metadata is not None:
+ self.metadata = metadata
+ if node_topology is not None:
+ self.node_topology = node_topology
+ self.storage_class_name = storage_class_name
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CSIStorageCapacity. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CSIStorageCapacity. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CSIStorageCapacity.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CSIStorageCapacity. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def capacity(self):
+ """Gets the capacity of this V1CSIStorageCapacity. # noqa: E501
+
+ capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable. # noqa: E501
+
+ :return: The capacity of this V1CSIStorageCapacity. # noqa: E501
+ :rtype: str
+ """
+ return self._capacity
+
+ @capacity.setter
+ def capacity(self, capacity):
+ """Sets the capacity of this V1CSIStorageCapacity.
+
+ capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable. # noqa: E501
+
+ :param capacity: The capacity of this V1CSIStorageCapacity. # noqa: E501
+ :type: str
+ """
+
+ self._capacity = capacity
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CSIStorageCapacity. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CSIStorageCapacity. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CSIStorageCapacity.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CSIStorageCapacity. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def maximum_volume_size(self):
+ """Gets the maximum_volume_size of this V1CSIStorageCapacity. # noqa: E501
+
+ maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. # noqa: E501
+
+ :return: The maximum_volume_size of this V1CSIStorageCapacity. # noqa: E501
+ :rtype: str
+ """
+ return self._maximum_volume_size
+
+ @maximum_volume_size.setter
+ def maximum_volume_size(self, maximum_volume_size):
+ """Sets the maximum_volume_size of this V1CSIStorageCapacity.
+
+ maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. # noqa: E501
+
+ :param maximum_volume_size: The maximum_volume_size of this V1CSIStorageCapacity. # noqa: E501
+ :type: str
+ """
+
+ self._maximum_volume_size = maximum_volume_size
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CSIStorageCapacity. # noqa: E501
+
+
+ :return: The metadata of this V1CSIStorageCapacity. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CSIStorageCapacity.
+
+
+ :param metadata: The metadata of this V1CSIStorageCapacity. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def node_topology(self):
+ """Gets the node_topology of this V1CSIStorageCapacity. # noqa: E501
+
+
+ :return: The node_topology of this V1CSIStorageCapacity. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._node_topology
+
+ @node_topology.setter
+ def node_topology(self, node_topology):
+ """Sets the node_topology of this V1CSIStorageCapacity.
+
+
+ :param node_topology: The node_topology of this V1CSIStorageCapacity. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._node_topology = node_topology
+
+ @property
+ def storage_class_name(self):
+ """Gets the storage_class_name of this V1CSIStorageCapacity. # noqa: E501
+
+ storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. # noqa: E501
+
+ :return: The storage_class_name of this V1CSIStorageCapacity. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_class_name
+
+ @storage_class_name.setter
+ def storage_class_name(self, storage_class_name):
+ """Sets the storage_class_name of this V1CSIStorageCapacity.
+
+ storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. # noqa: E501
+
+ :param storage_class_name: The storage_class_name of this V1CSIStorageCapacity. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and storage_class_name is None: # noqa: E501
+ raise ValueError("Invalid value for `storage_class_name`, must not be `None`") # noqa: E501
+
+ self._storage_class_name = storage_class_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSIStorageCapacity):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSIStorageCapacity):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity_list.py
new file mode 100644
index 0000000000..cf2525f705
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_storage_capacity_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSIStorageCapacityList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1CSIStorageCapacity]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1CSIStorageCapacityList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CSIStorageCapacityList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CSIStorageCapacityList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CSIStorageCapacityList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CSIStorageCapacityList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1CSIStorageCapacityList. # noqa: E501
+
+ items is the list of CSIStorageCapacity objects. # noqa: E501
+
+ :return: The items of this V1CSIStorageCapacityList. # noqa: E501
+ :rtype: list[V1CSIStorageCapacity]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1CSIStorageCapacityList.
+
+ items is the list of CSIStorageCapacity objects. # noqa: E501
+
+ :param items: The items of this V1CSIStorageCapacityList. # noqa: E501
+ :type: list[V1CSIStorageCapacity]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CSIStorageCapacityList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CSIStorageCapacityList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CSIStorageCapacityList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CSIStorageCapacityList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CSIStorageCapacityList. # noqa: E501
+
+
+ :return: The metadata of this V1CSIStorageCapacityList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CSIStorageCapacityList.
+
+
+ :param metadata: The metadata of this V1CSIStorageCapacityList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSIStorageCapacityList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSIStorageCapacityList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_csi_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_volume_source.py
new file mode 100644
index 0000000000..3064f97d37
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_csi_volume_source.py
@@ -0,0 +1,233 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CSIVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'driver': 'str',
+ 'fs_type': 'str',
+ 'node_publish_secret_ref': 'V1LocalObjectReference',
+ 'read_only': 'bool',
+ 'volume_attributes': 'dict(str, str)'
+ }
+
+ attribute_map = {
+ 'driver': 'driver',
+ 'fs_type': 'fsType',
+ 'node_publish_secret_ref': 'nodePublishSecretRef',
+ 'read_only': 'readOnly',
+ 'volume_attributes': 'volumeAttributes'
+ }
+
+ def __init__(self, driver=None, fs_type=None, node_publish_secret_ref=None, read_only=None, volume_attributes=None, local_vars_configuration=None): # noqa: E501
+ """V1CSIVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._driver = None
+ self._fs_type = None
+ self._node_publish_secret_ref = None
+ self._read_only = None
+ self._volume_attributes = None
+ self.discriminator = None
+
+ self.driver = driver
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if node_publish_secret_ref is not None:
+ self.node_publish_secret_ref = node_publish_secret_ref
+ if read_only is not None:
+ self.read_only = read_only
+ if volume_attributes is not None:
+ self.volume_attributes = volume_attributes
+
+ @property
+ def driver(self):
+ """Gets the driver of this V1CSIVolumeSource. # noqa: E501
+
+ driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. # noqa: E501
+
+ :return: The driver of this V1CSIVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._driver
+
+ @driver.setter
+ def driver(self, driver):
+ """Sets the driver of this V1CSIVolumeSource.
+
+ driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. # noqa: E501
+
+ :param driver: The driver of this V1CSIVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
+ raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
+
+ self._driver = driver
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1CSIVolumeSource. # noqa: E501
+
+ fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. # noqa: E501
+
+ :return: The fs_type of this V1CSIVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1CSIVolumeSource.
+
+ fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. # noqa: E501
+
+ :param fs_type: The fs_type of this V1CSIVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def node_publish_secret_ref(self):
+ """Gets the node_publish_secret_ref of this V1CSIVolumeSource. # noqa: E501
+
+
+ :return: The node_publish_secret_ref of this V1CSIVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._node_publish_secret_ref
+
+ @node_publish_secret_ref.setter
+ def node_publish_secret_ref(self, node_publish_secret_ref):
+ """Sets the node_publish_secret_ref of this V1CSIVolumeSource.
+
+
+ :param node_publish_secret_ref: The node_publish_secret_ref of this V1CSIVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+
+ self._node_publish_secret_ref = node_publish_secret_ref
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1CSIVolumeSource. # noqa: E501
+
+ readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). # noqa: E501
+
+ :return: The read_only of this V1CSIVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1CSIVolumeSource.
+
+ readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). # noqa: E501
+
+ :param read_only: The read_only of this V1CSIVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def volume_attributes(self):
+ """Gets the volume_attributes of this V1CSIVolumeSource. # noqa: E501
+
+ volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. # noqa: E501
+
+ :return: The volume_attributes of this V1CSIVolumeSource. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._volume_attributes
+
+ @volume_attributes.setter
+ def volume_attributes(self, volume_attributes):
+ """Sets the volume_attributes of this V1CSIVolumeSource.
+
+ volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. # noqa: E501
+
+ :param volume_attributes: The volume_attributes of this V1CSIVolumeSource. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._volume_attributes = volume_attributes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CSIVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CSIVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_column_definition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_column_definition.py
new file mode 100644
index 0000000000..d996148cfc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_column_definition.py
@@ -0,0 +1,265 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceColumnDefinition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'description': 'str',
+ 'format': 'str',
+ 'json_path': 'str',
+ 'name': 'str',
+ 'priority': 'int',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'description': 'description',
+ 'format': 'format',
+ 'json_path': 'jsonPath',
+ 'name': 'name',
+ 'priority': 'priority',
+ 'type': 'type'
+ }
+
+ def __init__(self, description=None, format=None, json_path=None, name=None, priority=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceColumnDefinition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._description = None
+ self._format = None
+ self._json_path = None
+ self._name = None
+ self._priority = None
+ self._type = None
+ self.discriminator = None
+
+ if description is not None:
+ self.description = description
+ if format is not None:
+ self.format = format
+ self.json_path = json_path
+ self.name = name
+ if priority is not None:
+ self.priority = priority
+ self.type = type
+
+ @property
+ def description(self):
+ """Gets the description of this V1CustomResourceColumnDefinition. # noqa: E501
+
+ description is a human readable description of this column. # noqa: E501
+
+ :return: The description of this V1CustomResourceColumnDefinition. # noqa: E501
+ :rtype: str
+ """
+ return self._description
+
+ @description.setter
+ def description(self, description):
+ """Sets the description of this V1CustomResourceColumnDefinition.
+
+ description is a human readable description of this column. # noqa: E501
+
+ :param description: The description of this V1CustomResourceColumnDefinition. # noqa: E501
+ :type: str
+ """
+
+ self._description = description
+
+ @property
+ def format(self):
+ """Gets the format of this V1CustomResourceColumnDefinition. # noqa: E501
+
+ format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
+
+ :return: The format of this V1CustomResourceColumnDefinition. # noqa: E501
+ :rtype: str
+ """
+ return self._format
+
+ @format.setter
+ def format(self, format):
+ """Sets the format of this V1CustomResourceColumnDefinition.
+
+ format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
+
+ :param format: The format of this V1CustomResourceColumnDefinition. # noqa: E501
+ :type: str
+ """
+
+ self._format = format
+
+ @property
+ def json_path(self):
+ """Gets the json_path of this V1CustomResourceColumnDefinition. # noqa: E501
+
+ jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column. # noqa: E501
+
+ :return: The json_path of this V1CustomResourceColumnDefinition. # noqa: E501
+ :rtype: str
+ """
+ return self._json_path
+
+ @json_path.setter
+ def json_path(self, json_path):
+ """Sets the json_path of this V1CustomResourceColumnDefinition.
+
+ jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column. # noqa: E501
+
+ :param json_path: The json_path of this V1CustomResourceColumnDefinition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and json_path is None: # noqa: E501
+ raise ValueError("Invalid value for `json_path`, must not be `None`") # noqa: E501
+
+ self._json_path = json_path
+
+ @property
+ def name(self):
+ """Gets the name of this V1CustomResourceColumnDefinition. # noqa: E501
+
+ name is a human readable name for the column. # noqa: E501
+
+ :return: The name of this V1CustomResourceColumnDefinition. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1CustomResourceColumnDefinition.
+
+ name is a human readable name for the column. # noqa: E501
+
+ :param name: The name of this V1CustomResourceColumnDefinition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def priority(self):
+ """Gets the priority of this V1CustomResourceColumnDefinition. # noqa: E501
+
+ priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0. # noqa: E501
+
+ :return: The priority of this V1CustomResourceColumnDefinition. # noqa: E501
+ :rtype: int
+ """
+ return self._priority
+
+ @priority.setter
+ def priority(self, priority):
+ """Sets the priority of this V1CustomResourceColumnDefinition.
+
+ priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0. # noqa: E501
+
+ :param priority: The priority of this V1CustomResourceColumnDefinition. # noqa: E501
+ :type: int
+ """
+
+ self._priority = priority
+
+ @property
+ def type(self):
+ """Gets the type of this V1CustomResourceColumnDefinition. # noqa: E501
+
+ type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
+
+ :return: The type of this V1CustomResourceColumnDefinition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1CustomResourceColumnDefinition.
+
+ type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. # noqa: E501
+
+ :param type: The type of this V1CustomResourceColumnDefinition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceColumnDefinition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceColumnDefinition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_conversion.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_conversion.py
new file mode 100644
index 0000000000..cf9b524f8d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_conversion.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceConversion(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'strategy': 'str',
+ 'webhook': 'V1WebhookConversion'
+ }
+
+ attribute_map = {
+ 'strategy': 'strategy',
+ 'webhook': 'webhook'
+ }
+
+ def __init__(self, strategy=None, webhook=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceConversion - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._strategy = None
+ self._webhook = None
+ self.discriminator = None
+
+ self.strategy = strategy
+ if webhook is not None:
+ self.webhook = webhook
+
+ @property
+ def strategy(self):
+ """Gets the strategy of this V1CustomResourceConversion. # noqa: E501
+
+ strategy specifies how custom resources are converted between versions. Allowed values are: - `\"None\"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `\"Webhook\"`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. # noqa: E501
+
+ :return: The strategy of this V1CustomResourceConversion. # noqa: E501
+ :rtype: str
+ """
+ return self._strategy
+
+ @strategy.setter
+ def strategy(self, strategy):
+ """Sets the strategy of this V1CustomResourceConversion.
+
+ strategy specifies how custom resources are converted between versions. Allowed values are: - `\"None\"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `\"Webhook\"`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. # noqa: E501
+
+ :param strategy: The strategy of this V1CustomResourceConversion. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and strategy is None: # noqa: E501
+ raise ValueError("Invalid value for `strategy`, must not be `None`") # noqa: E501
+
+ self._strategy = strategy
+
+ @property
+ def webhook(self):
+ """Gets the webhook of this V1CustomResourceConversion. # noqa: E501
+
+
+ :return: The webhook of this V1CustomResourceConversion. # noqa: E501
+ :rtype: V1WebhookConversion
+ """
+ return self._webhook
+
+ @webhook.setter
+ def webhook(self, webhook):
+ """Sets the webhook of this V1CustomResourceConversion.
+
+
+ :param webhook: The webhook of this V1CustomResourceConversion. # noqa: E501
+ :type: V1WebhookConversion
+ """
+
+ self._webhook = webhook
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceConversion):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceConversion):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition.py
new file mode 100644
index 0000000000..c97a36d6f3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceDefinition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1CustomResourceDefinitionSpec',
+ 'status': 'V1CustomResourceDefinitionStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceDefinition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CustomResourceDefinition. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CustomResourceDefinition. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CustomResourceDefinition.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CustomResourceDefinition. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CustomResourceDefinition. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CustomResourceDefinition. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CustomResourceDefinition.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CustomResourceDefinition. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CustomResourceDefinition. # noqa: E501
+
+
+ :return: The metadata of this V1CustomResourceDefinition. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CustomResourceDefinition.
+
+
+ :param metadata: The metadata of this V1CustomResourceDefinition. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1CustomResourceDefinition. # noqa: E501
+
+
+ :return: The spec of this V1CustomResourceDefinition. # noqa: E501
+ :rtype: V1CustomResourceDefinitionSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1CustomResourceDefinition.
+
+
+ :param spec: The spec of this V1CustomResourceDefinition. # noqa: E501
+ :type: V1CustomResourceDefinitionSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1CustomResourceDefinition. # noqa: E501
+
+
+ :return: The status of this V1CustomResourceDefinition. # noqa: E501
+ :rtype: V1CustomResourceDefinitionStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1CustomResourceDefinition.
+
+
+ :param status: The status of this V1CustomResourceDefinition. # noqa: E501
+ :type: V1CustomResourceDefinitionStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceDefinition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceDefinition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_condition.py
new file mode 100644
index 0000000000..f1a9d663de
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_condition.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceDefinitionCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceDefinitionCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1CustomResourceDefinitionCondition. # noqa: E501
+
+ lastTransitionTime last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1CustomResourceDefinitionCondition.
+
+ lastTransitionTime last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1CustomResourceDefinitionCondition. # noqa: E501
+
+ message is a human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1CustomResourceDefinitionCondition.
+
+ message is a human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1CustomResourceDefinitionCondition. # noqa: E501
+
+ reason is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1CustomResourceDefinitionCondition.
+
+ reason is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1CustomResourceDefinitionCondition. # noqa: E501
+
+ status is the status of the condition. Can be True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1CustomResourceDefinitionCondition.
+
+ status is the status of the condition. Can be True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1CustomResourceDefinitionCondition. # noqa: E501
+
+ type is the type of the condition. Types include Established, NamesAccepted and Terminating. # noqa: E501
+
+ :return: The type of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1CustomResourceDefinitionCondition.
+
+ type is the type of the condition. Types include Established, NamesAccepted and Terminating. # noqa: E501
+
+ :param type: The type of this V1CustomResourceDefinitionCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceDefinitionCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceDefinitionCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_list.py
new file mode 100644
index 0000000000..e172683d4a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceDefinitionList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1CustomResourceDefinition]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceDefinitionList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1CustomResourceDefinitionList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1CustomResourceDefinitionList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1CustomResourceDefinitionList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1CustomResourceDefinitionList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1CustomResourceDefinitionList. # noqa: E501
+
+ items list individual CustomResourceDefinition objects # noqa: E501
+
+ :return: The items of this V1CustomResourceDefinitionList. # noqa: E501
+ :rtype: list[V1CustomResourceDefinition]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1CustomResourceDefinitionList.
+
+ items list individual CustomResourceDefinition objects # noqa: E501
+
+ :param items: The items of this V1CustomResourceDefinitionList. # noqa: E501
+ :type: list[V1CustomResourceDefinition]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CustomResourceDefinitionList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1CustomResourceDefinitionList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CustomResourceDefinitionList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1CustomResourceDefinitionList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1CustomResourceDefinitionList. # noqa: E501
+
+
+ :return: The metadata of this V1CustomResourceDefinitionList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1CustomResourceDefinitionList.
+
+
+ :param metadata: The metadata of this V1CustomResourceDefinitionList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceDefinitionList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceDefinitionList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_names.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_names.py
new file mode 100644
index 0000000000..d4818e7410
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_names.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceDefinitionNames(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'categories': 'list[str]',
+ 'kind': 'str',
+ 'list_kind': 'str',
+ 'plural': 'str',
+ 'short_names': 'list[str]',
+ 'singular': 'str'
+ }
+
+ attribute_map = {
+ 'categories': 'categories',
+ 'kind': 'kind',
+ 'list_kind': 'listKind',
+ 'plural': 'plural',
+ 'short_names': 'shortNames',
+ 'singular': 'singular'
+ }
+
+ def __init__(self, categories=None, kind=None, list_kind=None, plural=None, short_names=None, singular=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceDefinitionNames - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._categories = None
+ self._kind = None
+ self._list_kind = None
+ self._plural = None
+ self._short_names = None
+ self._singular = None
+ self.discriminator = None
+
+ if categories is not None:
+ self.categories = categories
+ self.kind = kind
+ if list_kind is not None:
+ self.list_kind = list_kind
+ self.plural = plural
+ if short_names is not None:
+ self.short_names = short_names
+ if singular is not None:
+ self.singular = singular
+
+ @property
+ def categories(self):
+ """Gets the categories of this V1CustomResourceDefinitionNames. # noqa: E501
+
+ categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`. # noqa: E501
+
+ :return: The categories of this V1CustomResourceDefinitionNames. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._categories
+
+ @categories.setter
+ def categories(self, categories):
+ """Sets the categories of this V1CustomResourceDefinitionNames.
+
+ categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`. # noqa: E501
+
+ :param categories: The categories of this V1CustomResourceDefinitionNames. # noqa: E501
+ :type: list[str]
+ """
+
+ self._categories = categories
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1CustomResourceDefinitionNames. # noqa: E501
+
+ kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls. # noqa: E501
+
+ :return: The kind of this V1CustomResourceDefinitionNames. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1CustomResourceDefinitionNames.
+
+ kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls. # noqa: E501
+
+ :param kind: The kind of this V1CustomResourceDefinitionNames. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def list_kind(self):
+ """Gets the list_kind of this V1CustomResourceDefinitionNames. # noqa: E501
+
+ listKind is the serialized kind of the list for this resource. Defaults to \"`kind`List\". # noqa: E501
+
+ :return: The list_kind of this V1CustomResourceDefinitionNames. # noqa: E501
+ :rtype: str
+ """
+ return self._list_kind
+
+ @list_kind.setter
+ def list_kind(self, list_kind):
+ """Sets the list_kind of this V1CustomResourceDefinitionNames.
+
+ listKind is the serialized kind of the list for this resource. Defaults to \"`kind`List\". # noqa: E501
+
+ :param list_kind: The list_kind of this V1CustomResourceDefinitionNames. # noqa: E501
+ :type: str
+ """
+
+ self._list_kind = list_kind
+
+ @property
+ def plural(self):
+ """Gets the plural of this V1CustomResourceDefinitionNames. # noqa: E501
+
+ plural is the plural name of the resource to serve. The custom resources are served under `/apis/<group>/<version>/.../<plural>`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). Must be all lowercase. # noqa: E501
+
+ :return: The plural of this V1CustomResourceDefinitionNames. # noqa: E501
+ :rtype: str
+ """
+ return self._plural
+
+ @plural.setter
+ def plural(self, plural):
+ """Sets the plural of this V1CustomResourceDefinitionNames.
+
+ plural is the plural name of the resource to serve. The custom resources are served under `/apis/<group>/<version>/.../<plural>`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). Must be all lowercase. # noqa: E501
+
+ :param plural: The plural of this V1CustomResourceDefinitionNames. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and plural is None: # noqa: E501
+ raise ValueError("Invalid value for `plural`, must not be `None`") # noqa: E501
+
+ self._plural = plural
+
+ @property
+ def short_names(self):
+ """Gets the short_names of this V1CustomResourceDefinitionNames. # noqa: E501
+
+ shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get <shortname>`. It must be all lowercase. # noqa: E501
+
+ :return: The short_names of this V1CustomResourceDefinitionNames. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._short_names
+
+ @short_names.setter
+ def short_names(self, short_names):
+ """Sets the short_names of this V1CustomResourceDefinitionNames.
+
+ shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get <shortname>`. It must be all lowercase. # noqa: E501
+
+ :param short_names: The short_names of this V1CustomResourceDefinitionNames. # noqa: E501
+ :type: list[str]
+ """
+
+ self._short_names = short_names
+
+ @property
+ def singular(self):
+ """Gets the singular of this V1CustomResourceDefinitionNames. # noqa: E501
+
+ singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. # noqa: E501
+
+ :return: The singular of this V1CustomResourceDefinitionNames. # noqa: E501
+ :rtype: str
+ """
+ return self._singular
+
+ @singular.setter
+ def singular(self, singular):
+ """Sets the singular of this V1CustomResourceDefinitionNames.
+
+ singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. # noqa: E501
+
+ :param singular: The singular of this V1CustomResourceDefinitionNames. # noqa: E501
+ :type: str
+ """
+
+ self._singular = singular
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceDefinitionNames):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceDefinitionNames):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_spec.py
new file mode 100644
index 0000000000..51aa9dd923
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_spec.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceDefinitionSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conversion': 'V1CustomResourceConversion',
+ 'group': 'str',
+ 'names': 'V1CustomResourceDefinitionNames',
+ 'preserve_unknown_fields': 'bool',
+ 'scope': 'str',
+ 'versions': 'list[V1CustomResourceDefinitionVersion]'
+ }
+
+ attribute_map = {
+ 'conversion': 'conversion',
+ 'group': 'group',
+ 'names': 'names',
+ 'preserve_unknown_fields': 'preserveUnknownFields',
+ 'scope': 'scope',
+ 'versions': 'versions'
+ }
+
+ def __init__(self, conversion=None, group=None, names=None, preserve_unknown_fields=None, scope=None, versions=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceDefinitionSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conversion = None
+ self._group = None
+ self._names = None
+ self._preserve_unknown_fields = None
+ self._scope = None
+ self._versions = None
+ self.discriminator = None
+
+ if conversion is not None:
+ self.conversion = conversion
+ self.group = group
+ self.names = names
+ if preserve_unknown_fields is not None:
+ self.preserve_unknown_fields = preserve_unknown_fields
+ self.scope = scope
+ self.versions = versions
+
+ @property
+ def conversion(self):
+ """Gets the conversion of this V1CustomResourceDefinitionSpec. # noqa: E501
+
+
+ :return: The conversion of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :rtype: V1CustomResourceConversion
+ """
+ return self._conversion
+
+ @conversion.setter
+ def conversion(self, conversion):
+ """Sets the conversion of this V1CustomResourceDefinitionSpec.
+
+
+ :param conversion: The conversion of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :type: V1CustomResourceConversion
+ """
+
+ self._conversion = conversion
+
+ @property
+ def group(self):
+ """Gets the group of this V1CustomResourceDefinitionSpec. # noqa: E501
+
+ group is the API group of the defined custom resource. The custom resources are served under `/apis/<group>/...`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). # noqa: E501
+
+ :return: The group of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1CustomResourceDefinitionSpec.
+
+ group is the API group of the defined custom resource. The custom resources are served under `/apis/<group>/...`. Must match the name of the CustomResourceDefinition (in the form `<names.plural>.<group>`). # noqa: E501
+
+ :param group: The group of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and group is None: # noqa: E501
+ raise ValueError("Invalid value for `group`, must not be `None`") # noqa: E501
+
+ self._group = group
+
+ @property
+ def names(self):
+ """Gets the names of this V1CustomResourceDefinitionSpec. # noqa: E501
+
+
+ :return: The names of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :rtype: V1CustomResourceDefinitionNames
+ """
+ return self._names
+
+ @names.setter
+ def names(self, names):
+ """Sets the names of this V1CustomResourceDefinitionSpec.
+
+
+ :param names: The names of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :type: V1CustomResourceDefinitionNames
+ """
+ if self.local_vars_configuration.client_side_validation and names is None: # noqa: E501
+ raise ValueError("Invalid value for `names`, must not be `None`") # noqa: E501
+
+ self._names = names
+
+ @property
+ def preserve_unknown_fields(self):
+ """Gets the preserve_unknown_fields of this V1CustomResourceDefinitionSpec. # noqa: E501
+
+ preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. # noqa: E501
+
+ :return: The preserve_unknown_fields of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._preserve_unknown_fields
+
+ @preserve_unknown_fields.setter
+ def preserve_unknown_fields(self, preserve_unknown_fields):
+ """Sets the preserve_unknown_fields of this V1CustomResourceDefinitionSpec.
+
+ preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. # noqa: E501
+
+ :param preserve_unknown_fields: The preserve_unknown_fields of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._preserve_unknown_fields = preserve_unknown_fields
+
+ @property
+ def scope(self):
+ """Gets the scope of this V1CustomResourceDefinitionSpec. # noqa: E501
+
+ scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`. # noqa: E501
+
+ :return: The scope of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._scope
+
+ @scope.setter
+ def scope(self, scope):
+ """Sets the scope of this V1CustomResourceDefinitionSpec.
+
+ scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`. # noqa: E501
+
+ :param scope: The scope of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and scope is None: # noqa: E501
+ raise ValueError("Invalid value for `scope`, must not be `None`") # noqa: E501
+
+ self._scope = scope
+
+ @property
+ def versions(self):
+ """Gets the versions of this V1CustomResourceDefinitionSpec. # noqa: E501
+
+ versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. # noqa: E501
+
+ :return: The versions of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :rtype: list[V1CustomResourceDefinitionVersion]
+ """
+ return self._versions
+
+ @versions.setter
+ def versions(self, versions):
+ """Sets the versions of this V1CustomResourceDefinitionSpec.
+
+ versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. # noqa: E501
+
+ :param versions: The versions of this V1CustomResourceDefinitionSpec. # noqa: E501
+ :type: list[V1CustomResourceDefinitionVersion]
+ """
+ if self.local_vars_configuration.client_side_validation and versions is None: # noqa: E501
+ raise ValueError("Invalid value for `versions`, must not be `None`") # noqa: E501
+
+ self._versions = versions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceDefinitionSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceDefinitionSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_status.py
new file mode 100644
index 0000000000..9a072154f0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_status.py
@@ -0,0 +1,176 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceDefinitionStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'accepted_names': 'V1CustomResourceDefinitionNames',
+ 'conditions': 'list[V1CustomResourceDefinitionCondition]',
+ 'stored_versions': 'list[str]'
+ }
+
+ attribute_map = {
+ 'accepted_names': 'acceptedNames',
+ 'conditions': 'conditions',
+ 'stored_versions': 'storedVersions'
+ }
+
+ def __init__(self, accepted_names=None, conditions=None, stored_versions=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceDefinitionStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._accepted_names = None
+ self._conditions = None
+ self._stored_versions = None
+ self.discriminator = None
+
+ if accepted_names is not None:
+ self.accepted_names = accepted_names
+ if conditions is not None:
+ self.conditions = conditions
+ if stored_versions is not None:
+ self.stored_versions = stored_versions
+
+ @property
+ def accepted_names(self):
+ """Gets the accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501
+
+
+ :return: The accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501
+ :rtype: V1CustomResourceDefinitionNames
+ """
+ return self._accepted_names
+
+ @accepted_names.setter
+ def accepted_names(self, accepted_names):
+ """Sets the accepted_names of this V1CustomResourceDefinitionStatus.
+
+
+ :param accepted_names: The accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501
+ :type: V1CustomResourceDefinitionNames
+ """
+
+ self._accepted_names = accepted_names
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1CustomResourceDefinitionStatus. # noqa: E501
+
+ conditions indicate state for particular aspects of a CustomResourceDefinition # noqa: E501
+
+ :return: The conditions of this V1CustomResourceDefinitionStatus. # noqa: E501
+ :rtype: list[V1CustomResourceDefinitionCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1CustomResourceDefinitionStatus.
+
+ conditions indicate state for particular aspects of a CustomResourceDefinition # noqa: E501
+
+ :param conditions: The conditions of this V1CustomResourceDefinitionStatus. # noqa: E501
+ :type: list[V1CustomResourceDefinitionCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def stored_versions(self):
+ """Gets the stored_versions of this V1CustomResourceDefinitionStatus. # noqa: E501
+
+ storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list. # noqa: E501
+
+ :return: The stored_versions of this V1CustomResourceDefinitionStatus. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._stored_versions
+
+ @stored_versions.setter
+ def stored_versions(self, stored_versions):
+ """Sets the stored_versions of this V1CustomResourceDefinitionStatus.
+
+ storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list. # noqa: E501
+
+ :param stored_versions: The stored_versions of this V1CustomResourceDefinitionStatus. # noqa: E501
+ :type: list[str]
+ """
+
+ self._stored_versions = stored_versions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceDefinitionStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceDefinitionStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_version.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_version.py
new file mode 100644
index 0000000000..03ee87eb65
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_definition_version.py
@@ -0,0 +1,317 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceDefinitionVersion(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'additional_printer_columns': 'list[V1CustomResourceColumnDefinition]',
+ 'deprecated': 'bool',
+ 'deprecation_warning': 'str',
+ 'name': 'str',
+ 'schema': 'V1CustomResourceValidation',
+ 'served': 'bool',
+ 'storage': 'bool',
+ 'subresources': 'V1CustomResourceSubresources'
+ }
+
+ attribute_map = {
+ 'additional_printer_columns': 'additionalPrinterColumns',
+ 'deprecated': 'deprecated',
+ 'deprecation_warning': 'deprecationWarning',
+ 'name': 'name',
+ 'schema': 'schema',
+ 'served': 'served',
+ 'storage': 'storage',
+ 'subresources': 'subresources'
+ }
+
+ def __init__(self, additional_printer_columns=None, deprecated=None, deprecation_warning=None, name=None, schema=None, served=None, storage=None, subresources=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceDefinitionVersion - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._additional_printer_columns = None
+ self._deprecated = None
+ self._deprecation_warning = None
+ self._name = None
+ self._schema = None
+ self._served = None
+ self._storage = None
+ self._subresources = None
+ self.discriminator = None
+
+ if additional_printer_columns is not None:
+ self.additional_printer_columns = additional_printer_columns
+ if deprecated is not None:
+ self.deprecated = deprecated
+ if deprecation_warning is not None:
+ self.deprecation_warning = deprecation_warning
+ self.name = name
+ if schema is not None:
+ self.schema = schema
+ self.served = served
+ self.storage = storage
+ if subresources is not None:
+ self.subresources = subresources
+
+ @property
+ def additional_printer_columns(self):
+ """Gets the additional_printer_columns of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+ additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used. # noqa: E501
+
+ :return: The additional_printer_columns of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: list[V1CustomResourceColumnDefinition]
+ """
+ return self._additional_printer_columns
+
+ @additional_printer_columns.setter
+ def additional_printer_columns(self, additional_printer_columns):
+ """Sets the additional_printer_columns of this V1CustomResourceDefinitionVersion.
+
+ additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used. # noqa: E501
+
+ :param additional_printer_columns: The additional_printer_columns of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: list[V1CustomResourceColumnDefinition]
+ """
+
+ self._additional_printer_columns = additional_printer_columns
+
+ @property
+ def deprecated(self):
+ """Gets the deprecated of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+ deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false. # noqa: E501
+
+ :return: The deprecated of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: bool
+ """
+ return self._deprecated
+
+ @deprecated.setter
+ def deprecated(self, deprecated):
+ """Sets the deprecated of this V1CustomResourceDefinitionVersion.
+
+ deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false. # noqa: E501
+
+ :param deprecated: The deprecated of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: bool
+ """
+
+ self._deprecated = deprecated
+
+ @property
+ def deprecation_warning(self):
+ """Gets the deprecation_warning of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+ deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists. # noqa: E501
+
+ :return: The deprecation_warning of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: str
+ """
+ return self._deprecation_warning
+
+ @deprecation_warning.setter
+ def deprecation_warning(self, deprecation_warning):
+ """Sets the deprecation_warning of this V1CustomResourceDefinitionVersion.
+
+ deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists. # noqa: E501
+
+ :param deprecation_warning: The deprecation_warning of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: str
+ """
+
+ self._deprecation_warning = deprecation_warning
+
+ @property
+ def name(self):
+ """Gets the name of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+ name is the version name, e.g. “v1â€, “v2beta1â€, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true. # noqa: E501
+
+ :return: The name of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1CustomResourceDefinitionVersion.
+
+ name is the version name, e.g. “v1â€, “v2beta1â€, etc. The custom resources are served under this version at `/apis/<group>/<version>/...` if `served` is true. # noqa: E501
+
+ :param name: The name of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def schema(self):
+ """Gets the schema of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+
+ :return: The schema of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: V1CustomResourceValidation
+ """
+ return self._schema
+
+ @schema.setter
+ def schema(self, schema):
+ """Sets the schema of this V1CustomResourceDefinitionVersion.
+
+
+ :param schema: The schema of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: V1CustomResourceValidation
+ """
+
+ self._schema = schema
+
+ @property
+ def served(self):
+ """Gets the served of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+ served is a flag enabling/disabling this version from being served via REST APIs # noqa: E501
+
+ :return: The served of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: bool
+ """
+ return self._served
+
+ @served.setter
+ def served(self, served):
+ """Sets the served of this V1CustomResourceDefinitionVersion.
+
+ served is a flag enabling/disabling this version from being served via REST APIs # noqa: E501
+
+ :param served: The served of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: bool
+ """
+ if self.local_vars_configuration.client_side_validation and served is None: # noqa: E501
+ raise ValueError("Invalid value for `served`, must not be `None`") # noqa: E501
+
+ self._served = served
+
+ @property
+ def storage(self):
+ """Gets the storage of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+ storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true. # noqa: E501
+
+ :return: The storage of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: bool
+ """
+ return self._storage
+
+ @storage.setter
+ def storage(self, storage):
+ """Sets the storage of this V1CustomResourceDefinitionVersion.
+
+ storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true. # noqa: E501
+
+ :param storage: The storage of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: bool
+ """
+ if self.local_vars_configuration.client_side_validation and storage is None: # noqa: E501
+ raise ValueError("Invalid value for `storage`, must not be `None`") # noqa: E501
+
+ self._storage = storage
+
+ @property
+ def subresources(self):
+ """Gets the subresources of this V1CustomResourceDefinitionVersion. # noqa: E501
+
+
+ :return: The subresources of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :rtype: V1CustomResourceSubresources
+ """
+ return self._subresources
+
+ @subresources.setter
+ def subresources(self, subresources):
+ """Sets the subresources of this V1CustomResourceDefinitionVersion.
+
+
+ :param subresources: The subresources of this V1CustomResourceDefinitionVersion. # noqa: E501
+ :type: V1CustomResourceSubresources
+ """
+
+ self._subresources = subresources
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceDefinitionVersion):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceDefinitionVersion):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresource_scale.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresource_scale.py
new file mode 100644
index 0000000000..8251e12953
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresource_scale.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceSubresourceScale(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'label_selector_path': 'str',
+ 'spec_replicas_path': 'str',
+ 'status_replicas_path': 'str'
+ }
+
+ attribute_map = {
+ 'label_selector_path': 'labelSelectorPath',
+ 'spec_replicas_path': 'specReplicasPath',
+ 'status_replicas_path': 'statusReplicasPath'
+ }
+
+ def __init__(self, label_selector_path=None, spec_replicas_path=None, status_replicas_path=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceSubresourceScale - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._label_selector_path = None
+ self._spec_replicas_path = None
+ self._status_replicas_path = None
+ self.discriminator = None
+
+ if label_selector_path is not None:
+ self.label_selector_path = label_selector_path
+ self.spec_replicas_path = spec_replicas_path
+ self.status_replicas_path = status_replicas_path
+
+ @property
+ def label_selector_path(self):
+ """Gets the label_selector_path of this V1CustomResourceSubresourceScale. # noqa: E501
+
+ labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string. # noqa: E501
+
+ :return: The label_selector_path of this V1CustomResourceSubresourceScale. # noqa: E501
+ :rtype: str
+ """
+ return self._label_selector_path
+
+ @label_selector_path.setter
+ def label_selector_path(self, label_selector_path):
+ """Sets the label_selector_path of this V1CustomResourceSubresourceScale.
+
+ labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string. # noqa: E501
+
+ :param label_selector_path: The label_selector_path of this V1CustomResourceSubresourceScale. # noqa: E501
+ :type: str
+ """
+
+ self._label_selector_path = label_selector_path
+
+ @property
+ def spec_replicas_path(self):
+ """Gets the spec_replicas_path of this V1CustomResourceSubresourceScale. # noqa: E501
+
+ specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. # noqa: E501
+
+ :return: The spec_replicas_path of this V1CustomResourceSubresourceScale. # noqa: E501
+ :rtype: str
+ """
+ return self._spec_replicas_path
+
+ @spec_replicas_path.setter
+ def spec_replicas_path(self, spec_replicas_path):
+ """Sets the spec_replicas_path of this V1CustomResourceSubresourceScale.
+
+ specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. # noqa: E501
+
+ :param spec_replicas_path: The spec_replicas_path of this V1CustomResourceSubresourceScale. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and spec_replicas_path is None: # noqa: E501
+ raise ValueError("Invalid value for `spec_replicas_path`, must not be `None`") # noqa: E501
+
+ self._spec_replicas_path = spec_replicas_path
+
+ @property
+ def status_replicas_path(self):
+ """Gets the status_replicas_path of this V1CustomResourceSubresourceScale. # noqa: E501
+
+ statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0. # noqa: E501
+
+ :return: The status_replicas_path of this V1CustomResourceSubresourceScale. # noqa: E501
+ :rtype: str
+ """
+ return self._status_replicas_path
+
+ @status_replicas_path.setter
+ def status_replicas_path(self, status_replicas_path):
+ """Sets the status_replicas_path of this V1CustomResourceSubresourceScale.
+
+ statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0. # noqa: E501
+
+ :param status_replicas_path: The status_replicas_path of this V1CustomResourceSubresourceScale. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status_replicas_path is None: # noqa: E501
+ raise ValueError("Invalid value for `status_replicas_path`, must not be `None`") # noqa: E501
+
+ self._status_replicas_path = status_replicas_path
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceSubresourceScale):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceSubresourceScale):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresources.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresources.py
new file mode 100644
index 0000000000..697241e150
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_subresources.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceSubresources(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'scale': 'V1CustomResourceSubresourceScale',
+ 'status': 'object'
+ }
+
+ attribute_map = {
+ 'scale': 'scale',
+ 'status': 'status'
+ }
+
+ def __init__(self, scale=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceSubresources - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._scale = None
+ self._status = None
+ self.discriminator = None
+
+ if scale is not None:
+ self.scale = scale
+ if status is not None:
+ self.status = status
+
+ @property
+ def scale(self):
+ """Gets the scale of this V1CustomResourceSubresources. # noqa: E501
+
+
+ :return: The scale of this V1CustomResourceSubresources. # noqa: E501
+ :rtype: V1CustomResourceSubresourceScale
+ """
+ return self._scale
+
+ @scale.setter
+ def scale(self, scale):
+ """Sets the scale of this V1CustomResourceSubresources.
+
+
+ :param scale: The scale of this V1CustomResourceSubresources. # noqa: E501
+ :type: V1CustomResourceSubresourceScale
+ """
+
+ self._scale = scale
+
+ @property
+ def status(self):
+ """Gets the status of this V1CustomResourceSubresources. # noqa: E501
+
+ status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. # noqa: E501
+
+ :return: The status of this V1CustomResourceSubresources. # noqa: E501
+ :rtype: object
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1CustomResourceSubresources.
+
+ status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. # noqa: E501
+
+ :param status: The status of this V1CustomResourceSubresources. # noqa: E501
+ :type: object
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceSubresources):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceSubresources):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_validation.py b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_validation.py
new file mode 100644
index 0000000000..070a4b6223
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_custom_resource_validation.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1CustomResourceValidation(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'open_apiv3_schema': 'V1JSONSchemaProps'
+ }
+
+ attribute_map = {
+ 'open_apiv3_schema': 'openAPIV3Schema'
+ }
+
+ def __init__(self, open_apiv3_schema=None, local_vars_configuration=None): # noqa: E501
+ """V1CustomResourceValidation - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._open_apiv3_schema = None
+ self.discriminator = None
+
+ if open_apiv3_schema is not None:
+ self.open_apiv3_schema = open_apiv3_schema
+
+ @property
+ def open_apiv3_schema(self):
+ """Gets the open_apiv3_schema of this V1CustomResourceValidation. # noqa: E501
+
+
+ :return: The open_apiv3_schema of this V1CustomResourceValidation. # noqa: E501
+ :rtype: V1JSONSchemaProps
+ """
+ return self._open_apiv3_schema
+
+ @open_apiv3_schema.setter
+ def open_apiv3_schema(self, open_apiv3_schema):
+ """Sets the open_apiv3_schema of this V1CustomResourceValidation.
+
+
+ :param open_apiv3_schema: The open_apiv3_schema of this V1CustomResourceValidation. # noqa: E501
+ :type: V1JSONSchemaProps
+ """
+
+ self._open_apiv3_schema = open_apiv3_schema
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1CustomResourceValidation):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1CustomResourceValidation):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_endpoint.py b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_endpoint.py
new file mode 100644
index 0000000000..f43c0e1149
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_endpoint.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DaemonEndpoint(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'port': 'int'
+ }
+
+ attribute_map = {
+ 'port': 'Port'
+ }
+
+ def __init__(self, port=None, local_vars_configuration=None): # noqa: E501
+ """V1DaemonEndpoint - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._port = None
+ self.discriminator = None
+
+ self.port = port
+
+ @property
+ def port(self):
+ """Gets the port of this V1DaemonEndpoint. # noqa: E501
+
+ Port number of the given endpoint. # noqa: E501
+
+ :return: The port of this V1DaemonEndpoint. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1DaemonEndpoint.
+
+ Port number of the given endpoint. # noqa: E501
+
+ :param port: The port of this V1DaemonEndpoint. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DaemonEndpoint):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DaemonEndpoint):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set.py b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set.py
new file mode 100644
index 0000000000..91927c4f83
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DaemonSet(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1DaemonSetSpec',
+ 'status': 'V1DaemonSetStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1DaemonSet - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1DaemonSet. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1DaemonSet. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1DaemonSet.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1DaemonSet. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1DaemonSet. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1DaemonSet. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1DaemonSet.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1DaemonSet. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1DaemonSet. # noqa: E501
+
+
+ :return: The metadata of this V1DaemonSet. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1DaemonSet.
+
+
+ :param metadata: The metadata of this V1DaemonSet. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1DaemonSet. # noqa: E501
+
+
+ :return: The spec of this V1DaemonSet. # noqa: E501
+ :rtype: V1DaemonSetSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1DaemonSet.
+
+
+ :param spec: The spec of this V1DaemonSet. # noqa: E501
+ :type: V1DaemonSetSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1DaemonSet. # noqa: E501
+
+
+ :return: The status of this V1DaemonSet. # noqa: E501
+ :rtype: V1DaemonSetStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1DaemonSet.
+
+
+ :param status: The status of this V1DaemonSet. # noqa: E501
+ :type: V1DaemonSetStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DaemonSet):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DaemonSet):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_condition.py
new file mode 100644
index 0000000000..8fefd9a8a9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_condition.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DaemonSetCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1DaemonSetCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1DaemonSetCondition. # noqa: E501
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1DaemonSetCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1DaemonSetCondition.
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1DaemonSetCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1DaemonSetCondition. # noqa: E501
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :return: The message of this V1DaemonSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1DaemonSetCondition.
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :param message: The message of this V1DaemonSetCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1DaemonSetCondition. # noqa: E501
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1DaemonSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1DaemonSetCondition.
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1DaemonSetCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1DaemonSetCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1DaemonSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1DaemonSetCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1DaemonSetCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1DaemonSetCondition. # noqa: E501
+
+ Type of DaemonSet condition. # noqa: E501
+
+ :return: The type of this V1DaemonSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1DaemonSetCondition.
+
+ Type of DaemonSet condition. # noqa: E501
+
+ :param type: The type of this V1DaemonSetCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DaemonSetCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DaemonSetCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_list.py
new file mode 100644
index 0000000000..ba42af7a6b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DaemonSetList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1DaemonSet]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1DaemonSetList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1DaemonSetList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1DaemonSetList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1DaemonSetList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1DaemonSetList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1DaemonSetList. # noqa: E501
+
+ A list of daemon sets. # noqa: E501
+
+ :return: The items of this V1DaemonSetList. # noqa: E501
+ :rtype: list[V1DaemonSet]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1DaemonSetList.
+
+ A list of daemon sets. # noqa: E501
+
+ :param items: The items of this V1DaemonSetList. # noqa: E501
+ :type: list[V1DaemonSet]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1DaemonSetList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1DaemonSetList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1DaemonSetList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1DaemonSetList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1DaemonSetList. # noqa: E501
+
+
+ :return: The metadata of this V1DaemonSetList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1DaemonSetList.
+
+
+ :param metadata: The metadata of this V1DaemonSetList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DaemonSetList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DaemonSetList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_spec.py
new file mode 100644
index 0000000000..87571f5bbb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_spec.py
@@ -0,0 +1,230 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DaemonSetSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'min_ready_seconds': 'int',
+ 'revision_history_limit': 'int',
+ 'selector': 'V1LabelSelector',
+ 'template': 'V1PodTemplateSpec',
+ 'update_strategy': 'V1DaemonSetUpdateStrategy'
+ }
+
+ attribute_map = {
+ 'min_ready_seconds': 'minReadySeconds',
+ 'revision_history_limit': 'revisionHistoryLimit',
+ 'selector': 'selector',
+ 'template': 'template',
+ 'update_strategy': 'updateStrategy'
+ }
+
+ def __init__(self, min_ready_seconds=None, revision_history_limit=None, selector=None, template=None, update_strategy=None, local_vars_configuration=None): # noqa: E501
+ """V1DaemonSetSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._min_ready_seconds = None
+ self._revision_history_limit = None
+ self._selector = None
+ self._template = None
+ self._update_strategy = None
+ self.discriminator = None
+
+ if min_ready_seconds is not None:
+ self.min_ready_seconds = min_ready_seconds
+ if revision_history_limit is not None:
+ self.revision_history_limit = revision_history_limit
+ self.selector = selector
+ self.template = template
+ if update_strategy is not None:
+ self.update_strategy = update_strategy
+
+ @property
+ def min_ready_seconds(self):
+ """Gets the min_ready_seconds of this V1DaemonSetSpec. # noqa: E501
+
+ The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). # noqa: E501
+
+ :return: The min_ready_seconds of this V1DaemonSetSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._min_ready_seconds
+
+ @min_ready_seconds.setter
+ def min_ready_seconds(self, min_ready_seconds):
+ """Sets the min_ready_seconds of this V1DaemonSetSpec.
+
+ The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). # noqa: E501
+
+ :param min_ready_seconds: The min_ready_seconds of this V1DaemonSetSpec. # noqa: E501
+ :type: int
+ """
+
+ self._min_ready_seconds = min_ready_seconds
+
+ @property
+ def revision_history_limit(self):
+ """Gets the revision_history_limit of this V1DaemonSetSpec. # noqa: E501
+
+ The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
+
+ :return: The revision_history_limit of this V1DaemonSetSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._revision_history_limit
+
+ @revision_history_limit.setter
+ def revision_history_limit(self, revision_history_limit):
+ """Sets the revision_history_limit of this V1DaemonSetSpec.
+
+ The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
+
+ :param revision_history_limit: The revision_history_limit of this V1DaemonSetSpec. # noqa: E501
+ :type: int
+ """
+
+ self._revision_history_limit = revision_history_limit
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1DaemonSetSpec. # noqa: E501
+
+
+ :return: The selector of this V1DaemonSetSpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1DaemonSetSpec.
+
+
+ :param selector: The selector of this V1DaemonSetSpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+ if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
+ raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
+
+ self._selector = selector
+
+ @property
+ def template(self):
+ """Gets the template of this V1DaemonSetSpec. # noqa: E501
+
+
+ :return: The template of this V1DaemonSetSpec. # noqa: E501
+ :rtype: V1PodTemplateSpec
+ """
+ return self._template
+
+ @template.setter
+ def template(self, template):
+ """Sets the template of this V1DaemonSetSpec.
+
+
+ :param template: The template of this V1DaemonSetSpec. # noqa: E501
+ :type: V1PodTemplateSpec
+ """
+ if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
+ raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
+
+ self._template = template
+
+ @property
+ def update_strategy(self):
+ """Gets the update_strategy of this V1DaemonSetSpec. # noqa: E501
+
+
+ :return: The update_strategy of this V1DaemonSetSpec. # noqa: E501
+ :rtype: V1DaemonSetUpdateStrategy
+ """
+ return self._update_strategy
+
+ @update_strategy.setter
+ def update_strategy(self, update_strategy):
+ """Sets the update_strategy of this V1DaemonSetSpec.
+
+
+ :param update_strategy: The update_strategy of this V1DaemonSetSpec. # noqa: E501
+ :type: V1DaemonSetUpdateStrategy
+ """
+
+ self._update_strategy = update_strategy
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DaemonSetSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DaemonSetSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_status.py
new file mode 100644
index 0000000000..386fc38422
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_status.py
@@ -0,0 +1,378 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DaemonSetStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'collision_count': 'int',
+ 'conditions': 'list[V1DaemonSetCondition]',
+ 'current_number_scheduled': 'int',
+ 'desired_number_scheduled': 'int',
+ 'number_available': 'int',
+ 'number_misscheduled': 'int',
+ 'number_ready': 'int',
+ 'number_unavailable': 'int',
+ 'observed_generation': 'int',
+ 'updated_number_scheduled': 'int'
+ }
+
+ attribute_map = {
+ 'collision_count': 'collisionCount',
+ 'conditions': 'conditions',
+ 'current_number_scheduled': 'currentNumberScheduled',
+ 'desired_number_scheduled': 'desiredNumberScheduled',
+ 'number_available': 'numberAvailable',
+ 'number_misscheduled': 'numberMisscheduled',
+ 'number_ready': 'numberReady',
+ 'number_unavailable': 'numberUnavailable',
+ 'observed_generation': 'observedGeneration',
+ 'updated_number_scheduled': 'updatedNumberScheduled'
+ }
+
+ def __init__(self, collision_count=None, conditions=None, current_number_scheduled=None, desired_number_scheduled=None, number_available=None, number_misscheduled=None, number_ready=None, number_unavailable=None, observed_generation=None, updated_number_scheduled=None, local_vars_configuration=None): # noqa: E501
+ """V1DaemonSetStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._collision_count = None
+ self._conditions = None
+ self._current_number_scheduled = None
+ self._desired_number_scheduled = None
+ self._number_available = None
+ self._number_misscheduled = None
+ self._number_ready = None
+ self._number_unavailable = None
+ self._observed_generation = None
+ self._updated_number_scheduled = None
+ self.discriminator = None
+
+ if collision_count is not None:
+ self.collision_count = collision_count
+ if conditions is not None:
+ self.conditions = conditions
+ self.current_number_scheduled = current_number_scheduled
+ self.desired_number_scheduled = desired_number_scheduled
+ if number_available is not None:
+ self.number_available = number_available
+ self.number_misscheduled = number_misscheduled
+ self.number_ready = number_ready
+ if number_unavailable is not None:
+ self.number_unavailable = number_unavailable
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ if updated_number_scheduled is not None:
+ self.updated_number_scheduled = updated_number_scheduled
+
+ @property
+ def collision_count(self):
+ """Gets the collision_count of this V1DaemonSetStatus. # noqa: E501
+
+ Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
+
+ :return: The collision_count of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._collision_count
+
+ @collision_count.setter
+ def collision_count(self, collision_count):
+ """Sets the collision_count of this V1DaemonSetStatus.
+
+ Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
+
+ :param collision_count: The collision_count of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._collision_count = collision_count
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1DaemonSetStatus. # noqa: E501
+
+ Represents the latest available observations of a DaemonSet's current state. # noqa: E501
+
+ :return: The conditions of this V1DaemonSetStatus. # noqa: E501
+ :rtype: list[V1DaemonSetCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1DaemonSetStatus.
+
+ Represents the latest available observations of a DaemonSet's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1DaemonSetStatus. # noqa: E501
+ :type: list[V1DaemonSetCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def current_number_scheduled(self):
+ """Gets the current_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+
+ The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
+
+ :return: The current_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._current_number_scheduled
+
+ @current_number_scheduled.setter
+ def current_number_scheduled(self, current_number_scheduled):
+ """Sets the current_number_scheduled of this V1DaemonSetStatus.
+
+ The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
+
+ :param current_number_scheduled: The current_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and current_number_scheduled is None: # noqa: E501
+ raise ValueError("Invalid value for `current_number_scheduled`, must not be `None`") # noqa: E501
+
+ self._current_number_scheduled = current_number_scheduled
+
+ @property
+ def desired_number_scheduled(self):
+ """Gets the desired_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+
+ The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
+
+ :return: The desired_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._desired_number_scheduled
+
+ @desired_number_scheduled.setter
+ def desired_number_scheduled(self, desired_number_scheduled):
+ """Sets the desired_number_scheduled of this V1DaemonSetStatus.
+
+ The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
+
+ :param desired_number_scheduled: The desired_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and desired_number_scheduled is None: # noqa: E501
+ raise ValueError("Invalid value for `desired_number_scheduled`, must not be `None`") # noqa: E501
+
+ self._desired_number_scheduled = desired_number_scheduled
+
+ @property
+ def number_available(self):
+ """Gets the number_available of this V1DaemonSetStatus. # noqa: E501
+
+ The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
+
+ :return: The number_available of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._number_available
+
+ @number_available.setter
+ def number_available(self, number_available):
+ """Sets the number_available of this V1DaemonSetStatus.
+
+ The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
+
+ :param number_available: The number_available of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._number_available = number_available
+
+ @property
+ def number_misscheduled(self):
+ """Gets the number_misscheduled of this V1DaemonSetStatus. # noqa: E501
+
+ The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
+
+ :return: The number_misscheduled of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._number_misscheduled
+
+ @number_misscheduled.setter
+ def number_misscheduled(self, number_misscheduled):
+ """Sets the number_misscheduled of this V1DaemonSetStatus.
+
+ The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
+
+ :param number_misscheduled: The number_misscheduled of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and number_misscheduled is None: # noqa: E501
+ raise ValueError("Invalid value for `number_misscheduled`, must not be `None`") # noqa: E501
+
+ self._number_misscheduled = number_misscheduled
+
+ @property
+ def number_ready(self):
+ """Gets the number_ready of this V1DaemonSetStatus. # noqa: E501
+
+ numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition. # noqa: E501
+
+ :return: The number_ready of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._number_ready
+
+ @number_ready.setter
+ def number_ready(self, number_ready):
+ """Sets the number_ready of this V1DaemonSetStatus.
+
+ numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition. # noqa: E501
+
+ :param number_ready: The number_ready of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and number_ready is None: # noqa: E501
+ raise ValueError("Invalid value for `number_ready`, must not be `None`") # noqa: E501
+
+ self._number_ready = number_ready
+
+ @property
+ def number_unavailable(self):
+ """Gets the number_unavailable of this V1DaemonSetStatus. # noqa: E501
+
+ The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
+
+ :return: The number_unavailable of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._number_unavailable
+
+ @number_unavailable.setter
+ def number_unavailable(self, number_unavailable):
+ """Sets the number_unavailable of this V1DaemonSetStatus.
+
+ The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
+
+ :param number_unavailable: The number_unavailable of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._number_unavailable = number_unavailable
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1DaemonSetStatus. # noqa: E501
+
+ The most recent generation observed by the daemon set controller. # noqa: E501
+
+ :return: The observed_generation of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1DaemonSetStatus.
+
+ The most recent generation observed by the daemon set controller. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def updated_number_scheduled(self):
+ """Gets the updated_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+
+ The total number of nodes that are running updated daemon pod # noqa: E501
+
+ :return: The updated_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._updated_number_scheduled
+
+ @updated_number_scheduled.setter
+ def updated_number_scheduled(self, updated_number_scheduled):
+ """Sets the updated_number_scheduled of this V1DaemonSetStatus.
+
+ The total number of nodes that are running updated daemon pod # noqa: E501
+
+ :param updated_number_scheduled: The updated_number_scheduled of this V1DaemonSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._updated_number_scheduled = updated_number_scheduled
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DaemonSetStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DaemonSetStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_update_strategy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_update_strategy.py
new file mode 100644
index 0000000000..a03f548fff
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_daemon_set_update_strategy.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DaemonSetUpdateStrategy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'rolling_update': 'V1RollingUpdateDaemonSet',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'rolling_update': 'rollingUpdate',
+ 'type': 'type'
+ }
+
+ def __init__(self, rolling_update=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1DaemonSetUpdateStrategy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._rolling_update = None
+ self._type = None
+ self.discriminator = None
+
+ if rolling_update is not None:
+ self.rolling_update = rolling_update
+ if type is not None:
+ self.type = type
+
+ @property
+ def rolling_update(self):
+ """Gets the rolling_update of this V1DaemonSetUpdateStrategy. # noqa: E501
+
+
+ :return: The rolling_update of this V1DaemonSetUpdateStrategy. # noqa: E501
+ :rtype: V1RollingUpdateDaemonSet
+ """
+ return self._rolling_update
+
+ @rolling_update.setter
+ def rolling_update(self, rolling_update):
+ """Sets the rolling_update of this V1DaemonSetUpdateStrategy.
+
+
+ :param rolling_update: The rolling_update of this V1DaemonSetUpdateStrategy. # noqa: E501
+ :type: V1RollingUpdateDaemonSet
+ """
+
+ self._rolling_update = rolling_update
+
+ @property
+ def type(self):
+ """Gets the type of this V1DaemonSetUpdateStrategy. # noqa: E501
+
+ Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate. # noqa: E501
+
+ :return: The type of this V1DaemonSetUpdateStrategy. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1DaemonSetUpdateStrategy.
+
+ Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate. # noqa: E501
+
+ :param type: The type of this V1DaemonSetUpdateStrategy. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DaemonSetUpdateStrategy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DaemonSetUpdateStrategy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_delete_options.py b/contrib/python/kubernetes/kubernetes/client/models/v1_delete_options.py
new file mode 100644
index 0000000000..3feda04c4e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_delete_options.py
@@ -0,0 +1,288 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DeleteOptions(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'dry_run': 'list[str]',
+ 'grace_period_seconds': 'int',
+ 'kind': 'str',
+ 'orphan_dependents': 'bool',
+ 'preconditions': 'V1Preconditions',
+ 'propagation_policy': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'dry_run': 'dryRun',
+ 'grace_period_seconds': 'gracePeriodSeconds',
+ 'kind': 'kind',
+ 'orphan_dependents': 'orphanDependents',
+ 'preconditions': 'preconditions',
+ 'propagation_policy': 'propagationPolicy'
+ }
+
+ def __init__(self, api_version=None, dry_run=None, grace_period_seconds=None, kind=None, orphan_dependents=None, preconditions=None, propagation_policy=None, local_vars_configuration=None): # noqa: E501
+ """V1DeleteOptions - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._dry_run = None
+ self._grace_period_seconds = None
+ self._kind = None
+ self._orphan_dependents = None
+ self._preconditions = None
+ self._propagation_policy = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if dry_run is not None:
+ self.dry_run = dry_run
+ if grace_period_seconds is not None:
+ self.grace_period_seconds = grace_period_seconds
+ if kind is not None:
+ self.kind = kind
+ if orphan_dependents is not None:
+ self.orphan_dependents = orphan_dependents
+ if preconditions is not None:
+ self.preconditions = preconditions
+ if propagation_policy is not None:
+ self.propagation_policy = propagation_policy
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1DeleteOptions. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1DeleteOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1DeleteOptions.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1DeleteOptions. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def dry_run(self):
+ """Gets the dry_run of this V1DeleteOptions. # noqa: E501
+
+ When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed # noqa: E501
+
+ :return: The dry_run of this V1DeleteOptions. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._dry_run
+
+ @dry_run.setter
+ def dry_run(self, dry_run):
+ """Sets the dry_run of this V1DeleteOptions.
+
+ When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed # noqa: E501
+
+ :param dry_run: The dry_run of this V1DeleteOptions. # noqa: E501
+ :type: list[str]
+ """
+
+ self._dry_run = dry_run
+
+ @property
+ def grace_period_seconds(self):
+ """Gets the grace_period_seconds of this V1DeleteOptions. # noqa: E501
+
+ The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. # noqa: E501
+
+ :return: The grace_period_seconds of this V1DeleteOptions. # noqa: E501
+ :rtype: int
+ """
+ return self._grace_period_seconds
+
+ @grace_period_seconds.setter
+ def grace_period_seconds(self, grace_period_seconds):
+ """Sets the grace_period_seconds of this V1DeleteOptions.
+
+ The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. # noqa: E501
+
+ :param grace_period_seconds: The grace_period_seconds of this V1DeleteOptions. # noqa: E501
+ :type: int
+ """
+
+ self._grace_period_seconds = grace_period_seconds
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1DeleteOptions. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1DeleteOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1DeleteOptions.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1DeleteOptions. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def orphan_dependents(self):
+ """Gets the orphan_dependents of this V1DeleteOptions. # noqa: E501
+
+ Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. # noqa: E501
+
+ :return: The orphan_dependents of this V1DeleteOptions. # noqa: E501
+ :rtype: bool
+ """
+ return self._orphan_dependents
+
+ @orphan_dependents.setter
+ def orphan_dependents(self, orphan_dependents):
+ """Sets the orphan_dependents of this V1DeleteOptions.
+
+ Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. # noqa: E501
+
+ :param orphan_dependents: The orphan_dependents of this V1DeleteOptions. # noqa: E501
+ :type: bool
+ """
+
+ self._orphan_dependents = orphan_dependents
+
+ @property
+ def preconditions(self):
+ """Gets the preconditions of this V1DeleteOptions. # noqa: E501
+
+
+ :return: The preconditions of this V1DeleteOptions. # noqa: E501
+ :rtype: V1Preconditions
+ """
+ return self._preconditions
+
+ @preconditions.setter
+ def preconditions(self, preconditions):
+ """Sets the preconditions of this V1DeleteOptions.
+
+
+ :param preconditions: The preconditions of this V1DeleteOptions. # noqa: E501
+ :type: V1Preconditions
+ """
+
+ self._preconditions = preconditions
+
+ @property
+ def propagation_policy(self):
+ """Gets the propagation_policy of this V1DeleteOptions. # noqa: E501
+
+ Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. # noqa: E501
+
+ :return: The propagation_policy of this V1DeleteOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._propagation_policy
+
+ @propagation_policy.setter
+ def propagation_policy(self, propagation_policy):
+ """Sets the propagation_policy of this V1DeleteOptions.
+
+ Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. # noqa: E501
+
+ :param propagation_policy: The propagation_policy of this V1DeleteOptions. # noqa: E501
+ :type: str
+ """
+
+ self._propagation_policy = propagation_policy
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DeleteOptions):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DeleteOptions):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_deployment.py b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment.py
new file mode 100644
index 0000000000..a52e630082
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Deployment(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1DeploymentSpec',
+ 'status': 'V1DeploymentStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Deployment - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Deployment. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Deployment. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Deployment.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Deployment. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Deployment. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Deployment. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Deployment.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Deployment. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Deployment. # noqa: E501
+
+
+ :return: The metadata of this V1Deployment. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Deployment.
+
+
+ :param metadata: The metadata of this V1Deployment. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Deployment. # noqa: E501
+
+
+ :return: The spec of this V1Deployment. # noqa: E501
+ :rtype: V1DeploymentSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Deployment.
+
+
+ :param spec: The spec of this V1Deployment. # noqa: E501
+ :type: V1DeploymentSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Deployment. # noqa: E501
+
+
+ :return: The status of this V1Deployment. # noqa: E501
+ :rtype: V1DeploymentStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Deployment.
+
+
+ :param status: The status of this V1Deployment. # noqa: E501
+ :type: V1DeploymentStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Deployment):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Deployment):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_condition.py
new file mode 100644
index 0000000000..86c57b596d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_condition.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DeploymentCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'last_update_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'last_update_time': 'lastUpdateTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1DeploymentCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._last_update_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if last_update_time is not None:
+ self.last_update_time = last_update_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1DeploymentCondition. # noqa: E501
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1DeploymentCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1DeploymentCondition.
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1DeploymentCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def last_update_time(self):
+ """Gets the last_update_time of this V1DeploymentCondition. # noqa: E501
+
+ The last time this condition was updated. # noqa: E501
+
+ :return: The last_update_time of this V1DeploymentCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_update_time
+
+ @last_update_time.setter
+ def last_update_time(self, last_update_time):
+ """Sets the last_update_time of this V1DeploymentCondition.
+
+ The last time this condition was updated. # noqa: E501
+
+ :param last_update_time: The last_update_time of this V1DeploymentCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_update_time = last_update_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1DeploymentCondition. # noqa: E501
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :return: The message of this V1DeploymentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1DeploymentCondition.
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :param message: The message of this V1DeploymentCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1DeploymentCondition. # noqa: E501
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1DeploymentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1DeploymentCondition.
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1DeploymentCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1DeploymentCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1DeploymentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1DeploymentCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1DeploymentCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1DeploymentCondition. # noqa: E501
+
+ Type of deployment condition. # noqa: E501
+
+ :return: The type of this V1DeploymentCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1DeploymentCondition.
+
+ Type of deployment condition. # noqa: E501
+
+ :param type: The type of this V1DeploymentCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DeploymentCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DeploymentCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_list.py
new file mode 100644
index 0000000000..304338008e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DeploymentList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Deployment]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1DeploymentList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1DeploymentList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1DeploymentList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1DeploymentList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1DeploymentList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1DeploymentList. # noqa: E501
+
+ Items is the list of Deployments. # noqa: E501
+
+ :return: The items of this V1DeploymentList. # noqa: E501
+ :rtype: list[V1Deployment]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1DeploymentList.
+
+ Items is the list of Deployments. # noqa: E501
+
+ :param items: The items of this V1DeploymentList. # noqa: E501
+ :type: list[V1Deployment]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1DeploymentList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1DeploymentList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1DeploymentList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1DeploymentList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1DeploymentList. # noqa: E501
+
+
+ :return: The metadata of this V1DeploymentList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1DeploymentList.
+
+
+ :param metadata: The metadata of this V1DeploymentList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DeploymentList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DeploymentList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_spec.py
new file mode 100644
index 0000000000..9bfaaf9523
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_spec.py
@@ -0,0 +1,314 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DeploymentSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'min_ready_seconds': 'int',
+ 'paused': 'bool',
+ 'progress_deadline_seconds': 'int',
+ 'replicas': 'int',
+ 'revision_history_limit': 'int',
+ 'selector': 'V1LabelSelector',
+ 'strategy': 'V1DeploymentStrategy',
+ 'template': 'V1PodTemplateSpec'
+ }
+
+ attribute_map = {
+ 'min_ready_seconds': 'minReadySeconds',
+ 'paused': 'paused',
+ 'progress_deadline_seconds': 'progressDeadlineSeconds',
+ 'replicas': 'replicas',
+ 'revision_history_limit': 'revisionHistoryLimit',
+ 'selector': 'selector',
+ 'strategy': 'strategy',
+ 'template': 'template'
+ }
+
+ def __init__(self, min_ready_seconds=None, paused=None, progress_deadline_seconds=None, replicas=None, revision_history_limit=None, selector=None, strategy=None, template=None, local_vars_configuration=None): # noqa: E501
+ """V1DeploymentSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._min_ready_seconds = None
+ self._paused = None
+ self._progress_deadline_seconds = None
+ self._replicas = None
+ self._revision_history_limit = None
+ self._selector = None
+ self._strategy = None
+ self._template = None
+ self.discriminator = None
+
+ if min_ready_seconds is not None:
+ self.min_ready_seconds = min_ready_seconds
+ if paused is not None:
+ self.paused = paused
+ if progress_deadline_seconds is not None:
+ self.progress_deadline_seconds = progress_deadline_seconds
+ if replicas is not None:
+ self.replicas = replicas
+ if revision_history_limit is not None:
+ self.revision_history_limit = revision_history_limit
+ self.selector = selector
+ if strategy is not None:
+ self.strategy = strategy
+ self.template = template
+
+ @property
+ def min_ready_seconds(self):
+ """Gets the min_ready_seconds of this V1DeploymentSpec. # noqa: E501
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :return: The min_ready_seconds of this V1DeploymentSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._min_ready_seconds
+
+ @min_ready_seconds.setter
+ def min_ready_seconds(self, min_ready_seconds):
+ """Sets the min_ready_seconds of this V1DeploymentSpec.
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :param min_ready_seconds: The min_ready_seconds of this V1DeploymentSpec. # noqa: E501
+ :type: int
+ """
+
+ self._min_ready_seconds = min_ready_seconds
+
+ @property
+ def paused(self):
+ """Gets the paused of this V1DeploymentSpec. # noqa: E501
+
+ Indicates that the deployment is paused. # noqa: E501
+
+ :return: The paused of this V1DeploymentSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._paused
+
+ @paused.setter
+ def paused(self, paused):
+ """Sets the paused of this V1DeploymentSpec.
+
+ Indicates that the deployment is paused. # noqa: E501
+
+ :param paused: The paused of this V1DeploymentSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._paused = paused
+
+ @property
+ def progress_deadline_seconds(self):
+ """Gets the progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
+
+ The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. # noqa: E501
+
+ :return: The progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._progress_deadline_seconds
+
+ @progress_deadline_seconds.setter
+ def progress_deadline_seconds(self, progress_deadline_seconds):
+ """Sets the progress_deadline_seconds of this V1DeploymentSpec.
+
+ The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. # noqa: E501
+
+ :param progress_deadline_seconds: The progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
+ :type: int
+ """
+
+ self._progress_deadline_seconds = progress_deadline_seconds
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1DeploymentSpec. # noqa: E501
+
+ Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
+
+ :return: The replicas of this V1DeploymentSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1DeploymentSpec.
+
+ Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
+
+ :param replicas: The replicas of this V1DeploymentSpec. # noqa: E501
+ :type: int
+ """
+
+ self._replicas = replicas
+
+ @property
+ def revision_history_limit(self):
+ """Gets the revision_history_limit of this V1DeploymentSpec. # noqa: E501
+
+ The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
+
+ :return: The revision_history_limit of this V1DeploymentSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._revision_history_limit
+
+ @revision_history_limit.setter
+ def revision_history_limit(self, revision_history_limit):
+ """Sets the revision_history_limit of this V1DeploymentSpec.
+
+ The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
+
+ :param revision_history_limit: The revision_history_limit of this V1DeploymentSpec. # noqa: E501
+ :type: int
+ """
+
+ self._revision_history_limit = revision_history_limit
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1DeploymentSpec. # noqa: E501
+
+
+ :return: The selector of this V1DeploymentSpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1DeploymentSpec.
+
+
+ :param selector: The selector of this V1DeploymentSpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+ if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
+ raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
+
+ self._selector = selector
+
+ @property
+ def strategy(self):
+ """Gets the strategy of this V1DeploymentSpec. # noqa: E501
+
+
+ :return: The strategy of this V1DeploymentSpec. # noqa: E501
+ :rtype: V1DeploymentStrategy
+ """
+ return self._strategy
+
+ @strategy.setter
+ def strategy(self, strategy):
+ """Sets the strategy of this V1DeploymentSpec.
+
+
+ :param strategy: The strategy of this V1DeploymentSpec. # noqa: E501
+ :type: V1DeploymentStrategy
+ """
+
+ self._strategy = strategy
+
+ @property
+ def template(self):
+ """Gets the template of this V1DeploymentSpec. # noqa: E501
+
+
+ :return: The template of this V1DeploymentSpec. # noqa: E501
+ :rtype: V1PodTemplateSpec
+ """
+ return self._template
+
+ @template.setter
+ def template(self, template):
+ """Sets the template of this V1DeploymentSpec.
+
+
+ :param template: The template of this V1DeploymentSpec. # noqa: E501
+ :type: V1PodTemplateSpec
+ """
+ if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
+ raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
+
+ self._template = template
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DeploymentSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DeploymentSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_status.py
new file mode 100644
index 0000000000..61fad37c66
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_status.py
@@ -0,0 +1,318 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DeploymentStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'available_replicas': 'int',
+ 'collision_count': 'int',
+ 'conditions': 'list[V1DeploymentCondition]',
+ 'observed_generation': 'int',
+ 'ready_replicas': 'int',
+ 'replicas': 'int',
+ 'unavailable_replicas': 'int',
+ 'updated_replicas': 'int'
+ }
+
+ attribute_map = {
+ 'available_replicas': 'availableReplicas',
+ 'collision_count': 'collisionCount',
+ 'conditions': 'conditions',
+ 'observed_generation': 'observedGeneration',
+ 'ready_replicas': 'readyReplicas',
+ 'replicas': 'replicas',
+ 'unavailable_replicas': 'unavailableReplicas',
+ 'updated_replicas': 'updatedReplicas'
+ }
+
+ def __init__(self, available_replicas=None, collision_count=None, conditions=None, observed_generation=None, ready_replicas=None, replicas=None, unavailable_replicas=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
+ """V1DeploymentStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._available_replicas = None
+ self._collision_count = None
+ self._conditions = None
+ self._observed_generation = None
+ self._ready_replicas = None
+ self._replicas = None
+ self._unavailable_replicas = None
+ self._updated_replicas = None
+ self.discriminator = None
+
+ if available_replicas is not None:
+ self.available_replicas = available_replicas
+ if collision_count is not None:
+ self.collision_count = collision_count
+ if conditions is not None:
+ self.conditions = conditions
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ if ready_replicas is not None:
+ self.ready_replicas = ready_replicas
+ if replicas is not None:
+ self.replicas = replicas
+ if unavailable_replicas is not None:
+ self.unavailable_replicas = unavailable_replicas
+ if updated_replicas is not None:
+ self.updated_replicas = updated_replicas
+
+ @property
+ def available_replicas(self):
+ """Gets the available_replicas of this V1DeploymentStatus. # noqa: E501
+
+ Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. # noqa: E501
+
+ :return: The available_replicas of this V1DeploymentStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._available_replicas
+
+ @available_replicas.setter
+ def available_replicas(self, available_replicas):
+ """Sets the available_replicas of this V1DeploymentStatus.
+
+ Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. # noqa: E501
+
+ :param available_replicas: The available_replicas of this V1DeploymentStatus. # noqa: E501
+ :type: int
+ """
+
+ self._available_replicas = available_replicas
+
+ @property
+ def collision_count(self):
+ """Gets the collision_count of this V1DeploymentStatus. # noqa: E501
+
+ Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet. # noqa: E501
+
+ :return: The collision_count of this V1DeploymentStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._collision_count
+
+ @collision_count.setter
+ def collision_count(self, collision_count):
+ """Sets the collision_count of this V1DeploymentStatus.
+
+ Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet. # noqa: E501
+
+ :param collision_count: The collision_count of this V1DeploymentStatus. # noqa: E501
+ :type: int
+ """
+
+ self._collision_count = collision_count
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1DeploymentStatus. # noqa: E501
+
+ Represents the latest available observations of a deployment's current state. # noqa: E501
+
+ :return: The conditions of this V1DeploymentStatus. # noqa: E501
+ :rtype: list[V1DeploymentCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1DeploymentStatus.
+
+ Represents the latest available observations of a deployment's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1DeploymentStatus. # noqa: E501
+ :type: list[V1DeploymentCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1DeploymentStatus. # noqa: E501
+
+ The generation observed by the deployment controller. # noqa: E501
+
+ :return: The observed_generation of this V1DeploymentStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1DeploymentStatus.
+
+ The generation observed by the deployment controller. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1DeploymentStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def ready_replicas(self):
+ """Gets the ready_replicas of this V1DeploymentStatus. # noqa: E501
+
+ readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. # noqa: E501
+
+ :return: The ready_replicas of this V1DeploymentStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._ready_replicas
+
+ @ready_replicas.setter
+ def ready_replicas(self, ready_replicas):
+ """Sets the ready_replicas of this V1DeploymentStatus.
+
+ readyReplicas is the number of pods targeted by this Deployment with a Ready Condition. # noqa: E501
+
+ :param ready_replicas: The ready_replicas of this V1DeploymentStatus. # noqa: E501
+ :type: int
+ """
+
+ self._ready_replicas = ready_replicas
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1DeploymentStatus. # noqa: E501
+
+ Total number of non-terminated pods targeted by this deployment (their labels match the selector). # noqa: E501
+
+ :return: The replicas of this V1DeploymentStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1DeploymentStatus.
+
+ Total number of non-terminated pods targeted by this deployment (their labels match the selector). # noqa: E501
+
+ :param replicas: The replicas of this V1DeploymentStatus. # noqa: E501
+ :type: int
+ """
+
+ self._replicas = replicas
+
+ @property
+ def unavailable_replicas(self):
+ """Gets the unavailable_replicas of this V1DeploymentStatus. # noqa: E501
+
+ Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created. # noqa: E501
+
+ :return: The unavailable_replicas of this V1DeploymentStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._unavailable_replicas
+
+ @unavailable_replicas.setter
+ def unavailable_replicas(self, unavailable_replicas):
+ """Sets the unavailable_replicas of this V1DeploymentStatus.
+
+ Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created. # noqa: E501
+
+ :param unavailable_replicas: The unavailable_replicas of this V1DeploymentStatus. # noqa: E501
+ :type: int
+ """
+
+ self._unavailable_replicas = unavailable_replicas
+
+ @property
+ def updated_replicas(self):
+ """Gets the updated_replicas of this V1DeploymentStatus. # noqa: E501
+
+ Total number of non-terminated pods targeted by this deployment that have the desired template spec. # noqa: E501
+
+ :return: The updated_replicas of this V1DeploymentStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._updated_replicas
+
+ @updated_replicas.setter
+ def updated_replicas(self, updated_replicas):
+ """Sets the updated_replicas of this V1DeploymentStatus.
+
+ Total number of non-terminated pods targeted by this deployment that have the desired template spec. # noqa: E501
+
+ :param updated_replicas: The updated_replicas of this V1DeploymentStatus. # noqa: E501
+ :type: int
+ """
+
+ self._updated_replicas = updated_replicas
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DeploymentStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DeploymentStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_strategy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_strategy.py
new file mode 100644
index 0000000000..5fb06af860
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_deployment_strategy.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DeploymentStrategy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'rolling_update': 'V1RollingUpdateDeployment',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'rolling_update': 'rollingUpdate',
+ 'type': 'type'
+ }
+
+ def __init__(self, rolling_update=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1DeploymentStrategy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._rolling_update = None
+ self._type = None
+ self.discriminator = None
+
+ if rolling_update is not None:
+ self.rolling_update = rolling_update
+ if type is not None:
+ self.type = type
+
+ @property
+ def rolling_update(self):
+ """Gets the rolling_update of this V1DeploymentStrategy. # noqa: E501
+
+
+ :return: The rolling_update of this V1DeploymentStrategy. # noqa: E501
+ :rtype: V1RollingUpdateDeployment
+ """
+ return self._rolling_update
+
+ @rolling_update.setter
+ def rolling_update(self, rolling_update):
+ """Sets the rolling_update of this V1DeploymentStrategy.
+
+
+ :param rolling_update: The rolling_update of this V1DeploymentStrategy. # noqa: E501
+ :type: V1RollingUpdateDeployment
+ """
+
+ self._rolling_update = rolling_update
+
+ @property
+ def type(self):
+ """Gets the type of this V1DeploymentStrategy. # noqa: E501
+
+ Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
+
+ :return: The type of this V1DeploymentStrategy. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1DeploymentStrategy.
+
+ Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
+
+ :param type: The type of this V1DeploymentStrategy. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DeploymentStrategy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DeploymentStrategy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_projection.py b/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_projection.py
new file mode 100644
index 0000000000..5d797cd3e6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_projection.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DownwardAPIProjection(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'items': 'list[V1DownwardAPIVolumeFile]'
+ }
+
+ attribute_map = {
+ 'items': 'items'
+ }
+
+ def __init__(self, items=None, local_vars_configuration=None): # noqa: E501
+ """V1DownwardAPIProjection - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._items = None
+ self.discriminator = None
+
+ if items is not None:
+ self.items = items
+
+ @property
+ def items(self):
+ """Gets the items of this V1DownwardAPIProjection. # noqa: E501
+
+ Items is a list of DownwardAPIVolume file # noqa: E501
+
+ :return: The items of this V1DownwardAPIProjection. # noqa: E501
+ :rtype: list[V1DownwardAPIVolumeFile]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1DownwardAPIProjection.
+
+ Items is a list of DownwardAPIVolume file # noqa: E501
+
+ :param items: The items of this V1DownwardAPIProjection. # noqa: E501
+ :type: list[V1DownwardAPIVolumeFile]
+ """
+
+ self._items = items
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DownwardAPIProjection):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DownwardAPIProjection):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_file.py b/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_file.py
new file mode 100644
index 0000000000..e9e9afb3a9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_file.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DownwardAPIVolumeFile(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'field_ref': 'V1ObjectFieldSelector',
+ 'mode': 'int',
+ 'path': 'str',
+ 'resource_field_ref': 'V1ResourceFieldSelector'
+ }
+
+ attribute_map = {
+ 'field_ref': 'fieldRef',
+ 'mode': 'mode',
+ 'path': 'path',
+ 'resource_field_ref': 'resourceFieldRef'
+ }
+
+ def __init__(self, field_ref=None, mode=None, path=None, resource_field_ref=None, local_vars_configuration=None): # noqa: E501
+ """V1DownwardAPIVolumeFile - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._field_ref = None
+ self._mode = None
+ self._path = None
+ self._resource_field_ref = None
+ self.discriminator = None
+
+ if field_ref is not None:
+ self.field_ref = field_ref
+ if mode is not None:
+ self.mode = mode
+ self.path = path
+ if resource_field_ref is not None:
+ self.resource_field_ref = resource_field_ref
+
+ @property
+ def field_ref(self):
+ """Gets the field_ref of this V1DownwardAPIVolumeFile. # noqa: E501
+
+
+ :return: The field_ref of this V1DownwardAPIVolumeFile. # noqa: E501
+ :rtype: V1ObjectFieldSelector
+ """
+ return self._field_ref
+
+ @field_ref.setter
+ def field_ref(self, field_ref):
+ """Sets the field_ref of this V1DownwardAPIVolumeFile.
+
+
+ :param field_ref: The field_ref of this V1DownwardAPIVolumeFile. # noqa: E501
+ :type: V1ObjectFieldSelector
+ """
+
+ self._field_ref = field_ref
+
+ @property
+ def mode(self):
+ """Gets the mode of this V1DownwardAPIVolumeFile. # noqa: E501
+
+ Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :return: The mode of this V1DownwardAPIVolumeFile. # noqa: E501
+ :rtype: int
+ """
+ return self._mode
+
+ @mode.setter
+ def mode(self, mode):
+ """Sets the mode of this V1DownwardAPIVolumeFile.
+
+ Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :param mode: The mode of this V1DownwardAPIVolumeFile. # noqa: E501
+ :type: int
+ """
+
+ self._mode = mode
+
+ @property
+ def path(self):
+ """Gets the path of this V1DownwardAPIVolumeFile. # noqa: E501
+
+ Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' # noqa: E501
+
+ :return: The path of this V1DownwardAPIVolumeFile. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1DownwardAPIVolumeFile.
+
+ Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' # noqa: E501
+
+ :param path: The path of this V1DownwardAPIVolumeFile. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ @property
+ def resource_field_ref(self):
+ """Gets the resource_field_ref of this V1DownwardAPIVolumeFile. # noqa: E501
+
+
+ :return: The resource_field_ref of this V1DownwardAPIVolumeFile. # noqa: E501
+ :rtype: V1ResourceFieldSelector
+ """
+ return self._resource_field_ref
+
+ @resource_field_ref.setter
+ def resource_field_ref(self, resource_field_ref):
+ """Sets the resource_field_ref of this V1DownwardAPIVolumeFile.
+
+
+ :param resource_field_ref: The resource_field_ref of this V1DownwardAPIVolumeFile. # noqa: E501
+ :type: V1ResourceFieldSelector
+ """
+
+ self._resource_field_ref = resource_field_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DownwardAPIVolumeFile):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DownwardAPIVolumeFile):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_source.py
new file mode 100644
index 0000000000..ca4a6193ba
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_downward_api_volume_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1DownwardAPIVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'default_mode': 'int',
+ 'items': 'list[V1DownwardAPIVolumeFile]'
+ }
+
+ attribute_map = {
+ 'default_mode': 'defaultMode',
+ 'items': 'items'
+ }
+
+ def __init__(self, default_mode=None, items=None, local_vars_configuration=None): # noqa: E501
+ """V1DownwardAPIVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._default_mode = None
+ self._items = None
+ self.discriminator = None
+
+ if default_mode is not None:
+ self.default_mode = default_mode
+ if items is not None:
+ self.items = items
+
+ @property
+ def default_mode(self):
+ """Gets the default_mode of this V1DownwardAPIVolumeSource. # noqa: E501
+
+ Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :return: The default_mode of this V1DownwardAPIVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._default_mode
+
+ @default_mode.setter
+ def default_mode(self, default_mode):
+ """Sets the default_mode of this V1DownwardAPIVolumeSource.
+
+ Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :param default_mode: The default_mode of this V1DownwardAPIVolumeSource. # noqa: E501
+ :type: int
+ """
+
+ self._default_mode = default_mode
+
+ @property
+ def items(self):
+ """Gets the items of this V1DownwardAPIVolumeSource. # noqa: E501
+
+ Items is a list of downward API volume file # noqa: E501
+
+ :return: The items of this V1DownwardAPIVolumeSource. # noqa: E501
+ :rtype: list[V1DownwardAPIVolumeFile]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1DownwardAPIVolumeSource.
+
+ Items is a list of downward API volume file # noqa: E501
+
+ :param items: The items of this V1DownwardAPIVolumeSource. # noqa: E501
+ :type: list[V1DownwardAPIVolumeFile]
+ """
+
+ self._items = items
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1DownwardAPIVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1DownwardAPIVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_empty_dir_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_empty_dir_volume_source.py
new file mode 100644
index 0000000000..abd1a37e86
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_empty_dir_volume_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EmptyDirVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'medium': 'str',
+ 'size_limit': 'str'
+ }
+
+ attribute_map = {
+ 'medium': 'medium',
+ 'size_limit': 'sizeLimit'
+ }
+
+ def __init__(self, medium=None, size_limit=None, local_vars_configuration=None): # noqa: E501
+ """V1EmptyDirVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._medium = None
+ self._size_limit = None
+ self.discriminator = None
+
+ if medium is not None:
+ self.medium = medium
+ if size_limit is not None:
+ self.size_limit = size_limit
+
+ @property
+ def medium(self):
+ """Gets the medium of this V1EmptyDirVolumeSource. # noqa: E501
+
+ medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir # noqa: E501
+
+ :return: The medium of this V1EmptyDirVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._medium
+
+ @medium.setter
+ def medium(self, medium):
+ """Sets the medium of this V1EmptyDirVolumeSource.
+
+ medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir # noqa: E501
+
+ :param medium: The medium of this V1EmptyDirVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._medium = medium
+
+ @property
+ def size_limit(self):
+ """Gets the size_limit of this V1EmptyDirVolumeSource. # noqa: E501
+
+ sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir # noqa: E501
+
+ :return: The size_limit of this V1EmptyDirVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._size_limit
+
+ @size_limit.setter
+ def size_limit(self, size_limit):
+ """Sets the size_limit of this V1EmptyDirVolumeSource.
+
+ sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir # noqa: E501
+
+ :param size_limit: The size_limit of this V1EmptyDirVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._size_limit = size_limit
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EmptyDirVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EmptyDirVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint.py
new file mode 100644
index 0000000000..4db17da168
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint.py
@@ -0,0 +1,313 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Endpoint(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'addresses': 'list[str]',
+ 'conditions': 'V1EndpointConditions',
+ 'deprecated_topology': 'dict(str, str)',
+ 'hints': 'V1EndpointHints',
+ 'hostname': 'str',
+ 'node_name': 'str',
+ 'target_ref': 'V1ObjectReference',
+ 'zone': 'str'
+ }
+
+ attribute_map = {
+ 'addresses': 'addresses',
+ 'conditions': 'conditions',
+ 'deprecated_topology': 'deprecatedTopology',
+ 'hints': 'hints',
+ 'hostname': 'hostname',
+ 'node_name': 'nodeName',
+ 'target_ref': 'targetRef',
+ 'zone': 'zone'
+ }
+
+ def __init__(self, addresses=None, conditions=None, deprecated_topology=None, hints=None, hostname=None, node_name=None, target_ref=None, zone=None, local_vars_configuration=None): # noqa: E501
+ """V1Endpoint - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._addresses = None
+ self._conditions = None
+ self._deprecated_topology = None
+ self._hints = None
+ self._hostname = None
+ self._node_name = None
+ self._target_ref = None
+ self._zone = None
+ self.discriminator = None
+
+ self.addresses = addresses
+ if conditions is not None:
+ self.conditions = conditions
+ if deprecated_topology is not None:
+ self.deprecated_topology = deprecated_topology
+ if hints is not None:
+ self.hints = hints
+ if hostname is not None:
+ self.hostname = hostname
+ if node_name is not None:
+ self.node_name = node_name
+ if target_ref is not None:
+ self.target_ref = target_ref
+ if zone is not None:
+ self.zone = zone
+
+ @property
+ def addresses(self):
+ """Gets the addresses of this V1Endpoint. # noqa: E501
+
+ addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267 # noqa: E501
+
+ :return: The addresses of this V1Endpoint. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._addresses
+
+ @addresses.setter
+ def addresses(self, addresses):
+ """Sets the addresses of this V1Endpoint.
+
+ addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267 # noqa: E501
+
+ :param addresses: The addresses of this V1Endpoint. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and addresses is None: # noqa: E501
+ raise ValueError("Invalid value for `addresses`, must not be `None`") # noqa: E501
+
+ self._addresses = addresses
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1Endpoint. # noqa: E501
+
+
+ :return: The conditions of this V1Endpoint. # noqa: E501
+ :rtype: V1EndpointConditions
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1Endpoint.
+
+
+ :param conditions: The conditions of this V1Endpoint. # noqa: E501
+ :type: V1EndpointConditions
+ """
+
+ self._conditions = conditions
+
+ @property
+ def deprecated_topology(self):
+ """Gets the deprecated_topology of this V1Endpoint. # noqa: E501
+
+ deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead. # noqa: E501
+
+ :return: The deprecated_topology of this V1Endpoint. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._deprecated_topology
+
+ @deprecated_topology.setter
+ def deprecated_topology(self, deprecated_topology):
+ """Sets the deprecated_topology of this V1Endpoint.
+
+ deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead. # noqa: E501
+
+ :param deprecated_topology: The deprecated_topology of this V1Endpoint. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._deprecated_topology = deprecated_topology
+
+ @property
+ def hints(self):
+ """Gets the hints of this V1Endpoint. # noqa: E501
+
+
+ :return: The hints of this V1Endpoint. # noqa: E501
+ :rtype: V1EndpointHints
+ """
+ return self._hints
+
+ @hints.setter
+ def hints(self, hints):
+ """Sets the hints of this V1Endpoint.
+
+
+ :param hints: The hints of this V1Endpoint. # noqa: E501
+ :type: V1EndpointHints
+ """
+
+ self._hints = hints
+
+ @property
+ def hostname(self):
+ """Gets the hostname of this V1Endpoint. # noqa: E501
+
+ hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation. # noqa: E501
+
+ :return: The hostname of this V1Endpoint. # noqa: E501
+ :rtype: str
+ """
+ return self._hostname
+
+ @hostname.setter
+ def hostname(self, hostname):
+ """Sets the hostname of this V1Endpoint.
+
+ hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation. # noqa: E501
+
+ :param hostname: The hostname of this V1Endpoint. # noqa: E501
+ :type: str
+ """
+
+ self._hostname = hostname
+
+ @property
+ def node_name(self):
+ """Gets the node_name of this V1Endpoint. # noqa: E501
+
+ nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. # noqa: E501
+
+ :return: The node_name of this V1Endpoint. # noqa: E501
+ :rtype: str
+ """
+ return self._node_name
+
+ @node_name.setter
+ def node_name(self, node_name):
+ """Sets the node_name of this V1Endpoint.
+
+ nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. # noqa: E501
+
+ :param node_name: The node_name of this V1Endpoint. # noqa: E501
+ :type: str
+ """
+
+ self._node_name = node_name
+
+ @property
+ def target_ref(self):
+ """Gets the target_ref of this V1Endpoint. # noqa: E501
+
+
+ :return: The target_ref of this V1Endpoint. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._target_ref
+
+ @target_ref.setter
+ def target_ref(self, target_ref):
+ """Sets the target_ref of this V1Endpoint.
+
+
+ :param target_ref: The target_ref of this V1Endpoint. # noqa: E501
+ :type: V1ObjectReference
+ """
+
+ self._target_ref = target_ref
+
+ @property
+ def zone(self):
+ """Gets the zone of this V1Endpoint. # noqa: E501
+
+ zone is the name of the Zone this endpoint exists in. # noqa: E501
+
+ :return: The zone of this V1Endpoint. # noqa: E501
+ :rtype: str
+ """
+ return self._zone
+
+ @zone.setter
+ def zone(self, zone):
+ """Sets the zone of this V1Endpoint.
+
+ zone is the name of the Zone this endpoint exists in. # noqa: E501
+
+ :param zone: The zone of this V1Endpoint. # noqa: E501
+ :type: str
+ """
+
+ self._zone = zone
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Endpoint):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Endpoint):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_address.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_address.py
new file mode 100644
index 0000000000..38fca92a04
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_address.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EndpointAddress(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hostname': 'str',
+ 'ip': 'str',
+ 'node_name': 'str',
+ 'target_ref': 'V1ObjectReference'
+ }
+
+ attribute_map = {
+ 'hostname': 'hostname',
+ 'ip': 'ip',
+ 'node_name': 'nodeName',
+ 'target_ref': 'targetRef'
+ }
+
+ def __init__(self, hostname=None, ip=None, node_name=None, target_ref=None, local_vars_configuration=None): # noqa: E501
+ """V1EndpointAddress - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hostname = None
+ self._ip = None
+ self._node_name = None
+ self._target_ref = None
+ self.discriminator = None
+
+ if hostname is not None:
+ self.hostname = hostname
+ self.ip = ip
+ if node_name is not None:
+ self.node_name = node_name
+ if target_ref is not None:
+ self.target_ref = target_ref
+
+ @property
+ def hostname(self):
+ """Gets the hostname of this V1EndpointAddress. # noqa: E501
+
+ The Hostname of this endpoint # noqa: E501
+
+ :return: The hostname of this V1EndpointAddress. # noqa: E501
+ :rtype: str
+ """
+ return self._hostname
+
+ @hostname.setter
+ def hostname(self, hostname):
+ """Sets the hostname of this V1EndpointAddress.
+
+ The Hostname of this endpoint # noqa: E501
+
+ :param hostname: The hostname of this V1EndpointAddress. # noqa: E501
+ :type: str
+ """
+
+ self._hostname = hostname
+
+ @property
+ def ip(self):
+ """Gets the ip of this V1EndpointAddress. # noqa: E501
+
+ The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16). # noqa: E501
+
+ :return: The ip of this V1EndpointAddress. # noqa: E501
+ :rtype: str
+ """
+ return self._ip
+
+ @ip.setter
+ def ip(self, ip):
+ """Sets the ip of this V1EndpointAddress.
+
+ The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16). # noqa: E501
+
+ :param ip: The ip of this V1EndpointAddress. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and ip is None: # noqa: E501
+ raise ValueError("Invalid value for `ip`, must not be `None`") # noqa: E501
+
+ self._ip = ip
+
+ @property
+ def node_name(self):
+ """Gets the node_name of this V1EndpointAddress. # noqa: E501
+
+ Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. # noqa: E501
+
+ :return: The node_name of this V1EndpointAddress. # noqa: E501
+ :rtype: str
+ """
+ return self._node_name
+
+ @node_name.setter
+ def node_name(self, node_name):
+ """Sets the node_name of this V1EndpointAddress.
+
+ Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. # noqa: E501
+
+ :param node_name: The node_name of this V1EndpointAddress. # noqa: E501
+ :type: str
+ """
+
+ self._node_name = node_name
+
+ @property
+ def target_ref(self):
+ """Gets the target_ref of this V1EndpointAddress. # noqa: E501
+
+
+ :return: The target_ref of this V1EndpointAddress. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._target_ref
+
+ @target_ref.setter
+ def target_ref(self, target_ref):
+ """Sets the target_ref of this V1EndpointAddress.
+
+
+ :param target_ref: The target_ref of this V1EndpointAddress. # noqa: E501
+ :type: V1ObjectReference
+ """
+
+ self._target_ref = target_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EndpointAddress):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EndpointAddress):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_conditions.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_conditions.py
new file mode 100644
index 0000000000..334c703e37
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_conditions.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EndpointConditions(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ready': 'bool',
+ 'serving': 'bool',
+ 'terminating': 'bool'
+ }
+
+ attribute_map = {
+ 'ready': 'ready',
+ 'serving': 'serving',
+ 'terminating': 'terminating'
+ }
+
+ def __init__(self, ready=None, serving=None, terminating=None, local_vars_configuration=None): # noqa: E501
+ """V1EndpointConditions - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ready = None
+ self._serving = None
+ self._terminating = None
+ self.discriminator = None
+
+ if ready is not None:
+ self.ready = ready
+ if serving is not None:
+ self.serving = serving
+ if terminating is not None:
+ self.terminating = terminating
+
+ @property
+ def ready(self):
+ """Gets the ready of this V1EndpointConditions. # noqa: E501
+
+ ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag. # noqa: E501
+
+ :return: The ready of this V1EndpointConditions. # noqa: E501
+ :rtype: bool
+ """
+ return self._ready
+
+ @ready.setter
+ def ready(self, ready):
+ """Sets the ready of this V1EndpointConditions.
+
+ ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag. # noqa: E501
+
+ :param ready: The ready of this V1EndpointConditions. # noqa: E501
+ :type: bool
+ """
+
+ self._ready = ready
+
+ @property
+ def serving(self):
+ """Gets the serving of this V1EndpointConditions. # noqa: E501
+
+ serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. # noqa: E501
+
+ :return: The serving of this V1EndpointConditions. # noqa: E501
+ :rtype: bool
+ """
+ return self._serving
+
+ @serving.setter
+ def serving(self, serving):
+ """Sets the serving of this V1EndpointConditions.
+
+ serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. # noqa: E501
+
+ :param serving: The serving of this V1EndpointConditions. # noqa: E501
+ :type: bool
+ """
+
+ self._serving = serving
+
+ @property
+ def terminating(self):
+ """Gets the terminating of this V1EndpointConditions. # noqa: E501
+
+ terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. # noqa: E501
+
+ :return: The terminating of this V1EndpointConditions. # noqa: E501
+ :rtype: bool
+ """
+ return self._terminating
+
+ @terminating.setter
+ def terminating(self, terminating):
+ """Sets the terminating of this V1EndpointConditions.
+
+ terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. # noqa: E501
+
+ :param terminating: The terminating of this V1EndpointConditions. # noqa: E501
+ :type: bool
+ """
+
+ self._terminating = terminating
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EndpointConditions):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EndpointConditions):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_hints.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_hints.py
new file mode 100644
index 0000000000..1c3f9916c6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_hints.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EndpointHints(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'for_zones': 'list[V1ForZone]'
+ }
+
+ attribute_map = {
+ 'for_zones': 'forZones'
+ }
+
+ def __init__(self, for_zones=None, local_vars_configuration=None): # noqa: E501
+ """V1EndpointHints - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._for_zones = None
+ self.discriminator = None
+
+ if for_zones is not None:
+ self.for_zones = for_zones
+
+ @property
+ def for_zones(self):
+ """Gets the for_zones of this V1EndpointHints. # noqa: E501
+
+ forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. # noqa: E501
+
+ :return: The for_zones of this V1EndpointHints. # noqa: E501
+ :rtype: list[V1ForZone]
+ """
+ return self._for_zones
+
+ @for_zones.setter
+ def for_zones(self, for_zones):
+ """Sets the for_zones of this V1EndpointHints.
+
+ forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. # noqa: E501
+
+ :param for_zones: The for_zones of this V1EndpointHints. # noqa: E501
+ :type: list[V1ForZone]
+ """
+
+ self._for_zones = for_zones
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EndpointHints):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EndpointHints):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice.py
new file mode 100644
index 0000000000..1e552f3847
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EndpointSlice(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'address_type': 'str',
+ 'api_version': 'str',
+ 'endpoints': 'list[V1Endpoint]',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'ports': 'list[DiscoveryV1EndpointPort]'
+ }
+
+ attribute_map = {
+ 'address_type': 'addressType',
+ 'api_version': 'apiVersion',
+ 'endpoints': 'endpoints',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'ports': 'ports'
+ }
+
+ def __init__(self, address_type=None, api_version=None, endpoints=None, kind=None, metadata=None, ports=None, local_vars_configuration=None): # noqa: E501
+ """V1EndpointSlice - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._address_type = None
+ self._api_version = None
+ self._endpoints = None
+ self._kind = None
+ self._metadata = None
+ self._ports = None
+ self.discriminator = None
+
+ self.address_type = address_type
+ if api_version is not None:
+ self.api_version = api_version
+ self.endpoints = endpoints
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if ports is not None:
+ self.ports = ports
+
+ @property
+ def address_type(self):
+ """Gets the address_type of this V1EndpointSlice. # noqa: E501
+
+ addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. # noqa: E501
+
+ :return: The address_type of this V1EndpointSlice. # noqa: E501
+ :rtype: str
+ """
+ return self._address_type
+
+ @address_type.setter
+ def address_type(self, address_type):
+ """Sets the address_type of this V1EndpointSlice.
+
+ addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. # noqa: E501
+
+ :param address_type: The address_type of this V1EndpointSlice. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and address_type is None: # noqa: E501
+ raise ValueError("Invalid value for `address_type`, must not be `None`") # noqa: E501
+
+ self._address_type = address_type
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1EndpointSlice. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1EndpointSlice. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1EndpointSlice.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1EndpointSlice. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def endpoints(self):
+ """Gets the endpoints of this V1EndpointSlice. # noqa: E501
+
+ endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints. # noqa: E501
+
+ :return: The endpoints of this V1EndpointSlice. # noqa: E501
+ :rtype: list[V1Endpoint]
+ """
+ return self._endpoints
+
+ @endpoints.setter
+ def endpoints(self, endpoints):
+ """Sets the endpoints of this V1EndpointSlice.
+
+ endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints. # noqa: E501
+
+ :param endpoints: The endpoints of this V1EndpointSlice. # noqa: E501
+ :type: list[V1Endpoint]
+ """
+ if self.local_vars_configuration.client_side_validation and endpoints is None: # noqa: E501
+ raise ValueError("Invalid value for `endpoints`, must not be `None`") # noqa: E501
+
+ self._endpoints = endpoints
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1EndpointSlice. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1EndpointSlice. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1EndpointSlice.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1EndpointSlice. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1EndpointSlice. # noqa: E501
+
+
+ :return: The metadata of this V1EndpointSlice. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1EndpointSlice.
+
+
+ :param metadata: The metadata of this V1EndpointSlice. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1EndpointSlice. # noqa: E501
+
+ ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports. # noqa: E501
+
+ :return: The ports of this V1EndpointSlice. # noqa: E501
+ :rtype: list[DiscoveryV1EndpointPort]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1EndpointSlice.
+
+ ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports. # noqa: E501
+
+ :param ports: The ports of this V1EndpointSlice. # noqa: E501
+ :type: list[DiscoveryV1EndpointPort]
+ """
+
+ self._ports = ports
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EndpointSlice):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EndpointSlice):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice_list.py
new file mode 100644
index 0000000000..4f7a1f8c9b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_slice_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EndpointSliceList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1EndpointSlice]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1EndpointSliceList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1EndpointSliceList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1EndpointSliceList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1EndpointSliceList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1EndpointSliceList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1EndpointSliceList. # noqa: E501
+
+ items is the list of endpoint slices # noqa: E501
+
+ :return: The items of this V1EndpointSliceList. # noqa: E501
+ :rtype: list[V1EndpointSlice]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1EndpointSliceList.
+
+ items is the list of endpoint slices # noqa: E501
+
+ :param items: The items of this V1EndpointSliceList. # noqa: E501
+ :type: list[V1EndpointSlice]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1EndpointSliceList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1EndpointSliceList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1EndpointSliceList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1EndpointSliceList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1EndpointSliceList. # noqa: E501
+
+
+ :return: The metadata of this V1EndpointSliceList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1EndpointSliceList.
+
+
+ :param metadata: The metadata of this V1EndpointSliceList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EndpointSliceList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EndpointSliceList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_subset.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_subset.py
new file mode 100644
index 0000000000..518ca86f3e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoint_subset.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EndpointSubset(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'addresses': 'list[V1EndpointAddress]',
+ 'not_ready_addresses': 'list[V1EndpointAddress]',
+ 'ports': 'list[CoreV1EndpointPort]'
+ }
+
+ attribute_map = {
+ 'addresses': 'addresses',
+ 'not_ready_addresses': 'notReadyAddresses',
+ 'ports': 'ports'
+ }
+
+ def __init__(self, addresses=None, not_ready_addresses=None, ports=None, local_vars_configuration=None): # noqa: E501
+ """V1EndpointSubset - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._addresses = None
+ self._not_ready_addresses = None
+ self._ports = None
+ self.discriminator = None
+
+ if addresses is not None:
+ self.addresses = addresses
+ if not_ready_addresses is not None:
+ self.not_ready_addresses = not_ready_addresses
+ if ports is not None:
+ self.ports = ports
+
+ @property
+ def addresses(self):
+ """Gets the addresses of this V1EndpointSubset. # noqa: E501
+
+ IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize. # noqa: E501
+
+ :return: The addresses of this V1EndpointSubset. # noqa: E501
+ :rtype: list[V1EndpointAddress]
+ """
+ return self._addresses
+
+ @addresses.setter
+ def addresses(self, addresses):
+ """Sets the addresses of this V1EndpointSubset.
+
+ IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize. # noqa: E501
+
+ :param addresses: The addresses of this V1EndpointSubset. # noqa: E501
+ :type: list[V1EndpointAddress]
+ """
+
+ self._addresses = addresses
+
+ @property
+ def not_ready_addresses(self):
+ """Gets the not_ready_addresses of this V1EndpointSubset. # noqa: E501
+
+ IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check. # noqa: E501
+
+ :return: The not_ready_addresses of this V1EndpointSubset. # noqa: E501
+ :rtype: list[V1EndpointAddress]
+ """
+ return self._not_ready_addresses
+
+ @not_ready_addresses.setter
+ def not_ready_addresses(self, not_ready_addresses):
+ """Sets the not_ready_addresses of this V1EndpointSubset.
+
+ IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check. # noqa: E501
+
+ :param not_ready_addresses: The not_ready_addresses of this V1EndpointSubset. # noqa: E501
+ :type: list[V1EndpointAddress]
+ """
+
+ self._not_ready_addresses = not_ready_addresses
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1EndpointSubset. # noqa: E501
+
+ Port numbers available on the related IP addresses. # noqa: E501
+
+ :return: The ports of this V1EndpointSubset. # noqa: E501
+ :rtype: list[CoreV1EndpointPort]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1EndpointSubset.
+
+ Port numbers available on the related IP addresses. # noqa: E501
+
+ :param ports: The ports of this V1EndpointSubset. # noqa: E501
+ :type: list[CoreV1EndpointPort]
+ """
+
+ self._ports = ports
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EndpointSubset):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EndpointSubset):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoints.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoints.py
new file mode 100644
index 0000000000..b1711ad333
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoints.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Endpoints(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'subsets': 'list[V1EndpointSubset]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'subsets': 'subsets'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, subsets=None, local_vars_configuration=None): # noqa: E501
+ """V1Endpoints - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._subsets = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if subsets is not None:
+ self.subsets = subsets
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Endpoints. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Endpoints. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Endpoints.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Endpoints. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Endpoints. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Endpoints. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Endpoints.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Endpoints. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Endpoints. # noqa: E501
+
+
+ :return: The metadata of this V1Endpoints. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Endpoints.
+
+
+ :param metadata: The metadata of this V1Endpoints. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def subsets(self):
+ """Gets the subsets of this V1Endpoints. # noqa: E501
+
+ The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service. # noqa: E501
+
+ :return: The subsets of this V1Endpoints. # noqa: E501
+ :rtype: list[V1EndpointSubset]
+ """
+ return self._subsets
+
+ @subsets.setter
+ def subsets(self, subsets):
+ """Sets the subsets of this V1Endpoints.
+
+ The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service. # noqa: E501
+
+ :param subsets: The subsets of this V1Endpoints. # noqa: E501
+ :type: list[V1EndpointSubset]
+ """
+
+ self._subsets = subsets
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Endpoints):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Endpoints):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_endpoints_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoints_list.py
new file mode 100644
index 0000000000..5971cf5986
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_endpoints_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EndpointsList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Endpoints]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1EndpointsList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1EndpointsList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1EndpointsList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1EndpointsList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1EndpointsList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1EndpointsList. # noqa: E501
+
+ List of endpoints. # noqa: E501
+
+ :return: The items of this V1EndpointsList. # noqa: E501
+ :rtype: list[V1Endpoints]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1EndpointsList.
+
+ List of endpoints. # noqa: E501
+
+ :param items: The items of this V1EndpointsList. # noqa: E501
+ :type: list[V1Endpoints]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1EndpointsList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1EndpointsList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1EndpointsList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1EndpointsList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1EndpointsList. # noqa: E501
+
+
+ :return: The metadata of this V1EndpointsList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1EndpointsList.
+
+
+ :param metadata: The metadata of this V1EndpointsList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EndpointsList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EndpointsList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_env_from_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_env_from_source.py
new file mode 100644
index 0000000000..46732ac1b5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_env_from_source.py
@@ -0,0 +1,174 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EnvFromSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'config_map_ref': 'V1ConfigMapEnvSource',
+ 'prefix': 'str',
+ 'secret_ref': 'V1SecretEnvSource'
+ }
+
+ attribute_map = {
+ 'config_map_ref': 'configMapRef',
+ 'prefix': 'prefix',
+ 'secret_ref': 'secretRef'
+ }
+
+ def __init__(self, config_map_ref=None, prefix=None, secret_ref=None, local_vars_configuration=None): # noqa: E501
+ """V1EnvFromSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._config_map_ref = None
+ self._prefix = None
+ self._secret_ref = None
+ self.discriminator = None
+
+ if config_map_ref is not None:
+ self.config_map_ref = config_map_ref
+ if prefix is not None:
+ self.prefix = prefix
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+
+ @property
+ def config_map_ref(self):
+ """Gets the config_map_ref of this V1EnvFromSource. # noqa: E501
+
+
+ :return: The config_map_ref of this V1EnvFromSource. # noqa: E501
+ :rtype: V1ConfigMapEnvSource
+ """
+ return self._config_map_ref
+
+ @config_map_ref.setter
+ def config_map_ref(self, config_map_ref):
+ """Sets the config_map_ref of this V1EnvFromSource.
+
+
+ :param config_map_ref: The config_map_ref of this V1EnvFromSource. # noqa: E501
+ :type: V1ConfigMapEnvSource
+ """
+
+ self._config_map_ref = config_map_ref
+
+ @property
+ def prefix(self):
+ """Gets the prefix of this V1EnvFromSource. # noqa: E501
+
+ An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. # noqa: E501
+
+ :return: The prefix of this V1EnvFromSource. # noqa: E501
+ :rtype: str
+ """
+ return self._prefix
+
+ @prefix.setter
+ def prefix(self, prefix):
+ """Sets the prefix of this V1EnvFromSource.
+
+ An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. # noqa: E501
+
+ :param prefix: The prefix of this V1EnvFromSource. # noqa: E501
+ :type: str
+ """
+
+ self._prefix = prefix
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1EnvFromSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1EnvFromSource. # noqa: E501
+ :rtype: V1SecretEnvSource
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1EnvFromSource.
+
+
+ :param secret_ref: The secret_ref of this V1EnvFromSource. # noqa: E501
+ :type: V1SecretEnvSource
+ """
+
+ self._secret_ref = secret_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EnvFromSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EnvFromSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_env_var.py b/contrib/python/kubernetes/kubernetes/client/models/v1_env_var.py
new file mode 100644
index 0000000000..defd8a18c9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_env_var.py
@@ -0,0 +1,177 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EnvVar(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'value': 'str',
+ 'value_from': 'V1EnvVarSource'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'value': 'value',
+ 'value_from': 'valueFrom'
+ }
+
+ def __init__(self, name=None, value=None, value_from=None, local_vars_configuration=None): # noqa: E501
+ """V1EnvVar - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._value = None
+ self._value_from = None
+ self.discriminator = None
+
+ self.name = name
+ if value is not None:
+ self.value = value
+ if value_from is not None:
+ self.value_from = value_from
+
+ @property
+ def name(self):
+ """Gets the name of this V1EnvVar. # noqa: E501
+
+ Name of the environment variable. Must be a C_IDENTIFIER. # noqa: E501
+
+ :return: The name of this V1EnvVar. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1EnvVar.
+
+ Name of the environment variable. Must be a C_IDENTIFIER. # noqa: E501
+
+ :param name: The name of this V1EnvVar. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def value(self):
+ """Gets the value of this V1EnvVar. # noqa: E501
+
+ Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\". # noqa: E501
+
+ :return: The value of this V1EnvVar. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V1EnvVar.
+
+ Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\". # noqa: E501
+
+ :param value: The value of this V1EnvVar. # noqa: E501
+ :type: str
+ """
+
+ self._value = value
+
+ @property
+ def value_from(self):
+ """Gets the value_from of this V1EnvVar. # noqa: E501
+
+
+ :return: The value_from of this V1EnvVar. # noqa: E501
+ :rtype: V1EnvVarSource
+ """
+ return self._value_from
+
+ @value_from.setter
+ def value_from(self, value_from):
+ """Sets the value_from of this V1EnvVar.
+
+
+ :param value_from: The value_from of this V1EnvVar. # noqa: E501
+ :type: V1EnvVarSource
+ """
+
+ self._value_from = value_from
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EnvVar):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EnvVar):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_env_var_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_env_var_source.py
new file mode 100644
index 0000000000..d8023520ed
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_env_var_source.py
@@ -0,0 +1,198 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EnvVarSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'config_map_key_ref': 'V1ConfigMapKeySelector',
+ 'field_ref': 'V1ObjectFieldSelector',
+ 'resource_field_ref': 'V1ResourceFieldSelector',
+ 'secret_key_ref': 'V1SecretKeySelector'
+ }
+
+ attribute_map = {
+ 'config_map_key_ref': 'configMapKeyRef',
+ 'field_ref': 'fieldRef',
+ 'resource_field_ref': 'resourceFieldRef',
+ 'secret_key_ref': 'secretKeyRef'
+ }
+
+ def __init__(self, config_map_key_ref=None, field_ref=None, resource_field_ref=None, secret_key_ref=None, local_vars_configuration=None): # noqa: E501
+ """V1EnvVarSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._config_map_key_ref = None
+ self._field_ref = None
+ self._resource_field_ref = None
+ self._secret_key_ref = None
+ self.discriminator = None
+
+ if config_map_key_ref is not None:
+ self.config_map_key_ref = config_map_key_ref
+ if field_ref is not None:
+ self.field_ref = field_ref
+ if resource_field_ref is not None:
+ self.resource_field_ref = resource_field_ref
+ if secret_key_ref is not None:
+ self.secret_key_ref = secret_key_ref
+
+ @property
+ def config_map_key_ref(self):
+ """Gets the config_map_key_ref of this V1EnvVarSource. # noqa: E501
+
+
+ :return: The config_map_key_ref of this V1EnvVarSource. # noqa: E501
+ :rtype: V1ConfigMapKeySelector
+ """
+ return self._config_map_key_ref
+
+ @config_map_key_ref.setter
+ def config_map_key_ref(self, config_map_key_ref):
+ """Sets the config_map_key_ref of this V1EnvVarSource.
+
+
+ :param config_map_key_ref: The config_map_key_ref of this V1EnvVarSource. # noqa: E501
+ :type: V1ConfigMapKeySelector
+ """
+
+ self._config_map_key_ref = config_map_key_ref
+
+ @property
+ def field_ref(self):
+ """Gets the field_ref of this V1EnvVarSource. # noqa: E501
+
+
+ :return: The field_ref of this V1EnvVarSource. # noqa: E501
+ :rtype: V1ObjectFieldSelector
+ """
+ return self._field_ref
+
+ @field_ref.setter
+ def field_ref(self, field_ref):
+ """Sets the field_ref of this V1EnvVarSource.
+
+
+ :param field_ref: The field_ref of this V1EnvVarSource. # noqa: E501
+ :type: V1ObjectFieldSelector
+ """
+
+ self._field_ref = field_ref
+
+ @property
+ def resource_field_ref(self):
+ """Gets the resource_field_ref of this V1EnvVarSource. # noqa: E501
+
+
+ :return: The resource_field_ref of this V1EnvVarSource. # noqa: E501
+ :rtype: V1ResourceFieldSelector
+ """
+ return self._resource_field_ref
+
+ @resource_field_ref.setter
+ def resource_field_ref(self, resource_field_ref):
+ """Sets the resource_field_ref of this V1EnvVarSource.
+
+
+ :param resource_field_ref: The resource_field_ref of this V1EnvVarSource. # noqa: E501
+ :type: V1ResourceFieldSelector
+ """
+
+ self._resource_field_ref = resource_field_ref
+
+ @property
+ def secret_key_ref(self):
+ """Gets the secret_key_ref of this V1EnvVarSource. # noqa: E501
+
+
+ :return: The secret_key_ref of this V1EnvVarSource. # noqa: E501
+ :rtype: V1SecretKeySelector
+ """
+ return self._secret_key_ref
+
+ @secret_key_ref.setter
+ def secret_key_ref(self, secret_key_ref):
+ """Sets the secret_key_ref of this V1EnvVarSource.
+
+
+ :param secret_key_ref: The secret_key_ref of this V1EnvVarSource. # noqa: E501
+ :type: V1SecretKeySelector
+ """
+
+ self._secret_key_ref = secret_key_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EnvVarSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EnvVarSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_container.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_container.py
new file mode 100644
index 0000000000..d07af400a5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_container.py
@@ -0,0 +1,783 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EphemeralContainer(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'args': 'list[str]',
+ 'command': 'list[str]',
+ 'env': 'list[V1EnvVar]',
+ 'env_from': 'list[V1EnvFromSource]',
+ 'image': 'str',
+ 'image_pull_policy': 'str',
+ 'lifecycle': 'V1Lifecycle',
+ 'liveness_probe': 'V1Probe',
+ 'name': 'str',
+ 'ports': 'list[V1ContainerPort]',
+ 'readiness_probe': 'V1Probe',
+ 'resize_policy': 'list[V1ContainerResizePolicy]',
+ 'resources': 'V1ResourceRequirements',
+ 'restart_policy': 'str',
+ 'security_context': 'V1SecurityContext',
+ 'startup_probe': 'V1Probe',
+ 'stdin': 'bool',
+ 'stdin_once': 'bool',
+ 'target_container_name': 'str',
+ 'termination_message_path': 'str',
+ 'termination_message_policy': 'str',
+ 'tty': 'bool',
+ 'volume_devices': 'list[V1VolumeDevice]',
+ 'volume_mounts': 'list[V1VolumeMount]',
+ 'working_dir': 'str'
+ }
+
+ attribute_map = {
+ 'args': 'args',
+ 'command': 'command',
+ 'env': 'env',
+ 'env_from': 'envFrom',
+ 'image': 'image',
+ 'image_pull_policy': 'imagePullPolicy',
+ 'lifecycle': 'lifecycle',
+ 'liveness_probe': 'livenessProbe',
+ 'name': 'name',
+ 'ports': 'ports',
+ 'readiness_probe': 'readinessProbe',
+ 'resize_policy': 'resizePolicy',
+ 'resources': 'resources',
+ 'restart_policy': 'restartPolicy',
+ 'security_context': 'securityContext',
+ 'startup_probe': 'startupProbe',
+ 'stdin': 'stdin',
+ 'stdin_once': 'stdinOnce',
+ 'target_container_name': 'targetContainerName',
+ 'termination_message_path': 'terminationMessagePath',
+ 'termination_message_policy': 'terminationMessagePolicy',
+ 'tty': 'tty',
+ 'volume_devices': 'volumeDevices',
+ 'volume_mounts': 'volumeMounts',
+ 'working_dir': 'workingDir'
+ }
+
+ def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, readiness_probe=None, resize_policy=None, resources=None, restart_policy=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, target_container_name=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
+ """V1EphemeralContainer - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._args = None
+ self._command = None
+ self._env = None
+ self._env_from = None
+ self._image = None
+ self._image_pull_policy = None
+ self._lifecycle = None
+ self._liveness_probe = None
+ self._name = None
+ self._ports = None
+ self._readiness_probe = None
+ self._resize_policy = None
+ self._resources = None
+ self._restart_policy = None
+ self._security_context = None
+ self._startup_probe = None
+ self._stdin = None
+ self._stdin_once = None
+ self._target_container_name = None
+ self._termination_message_path = None
+ self._termination_message_policy = None
+ self._tty = None
+ self._volume_devices = None
+ self._volume_mounts = None
+ self._working_dir = None
+ self.discriminator = None
+
+ if args is not None:
+ self.args = args
+ if command is not None:
+ self.command = command
+ if env is not None:
+ self.env = env
+ if env_from is not None:
+ self.env_from = env_from
+ if image is not None:
+ self.image = image
+ if image_pull_policy is not None:
+ self.image_pull_policy = image_pull_policy
+ if lifecycle is not None:
+ self.lifecycle = lifecycle
+ if liveness_probe is not None:
+ self.liveness_probe = liveness_probe
+ self.name = name
+ if ports is not None:
+ self.ports = ports
+ if readiness_probe is not None:
+ self.readiness_probe = readiness_probe
+ if resize_policy is not None:
+ self.resize_policy = resize_policy
+ if resources is not None:
+ self.resources = resources
+ if restart_policy is not None:
+ self.restart_policy = restart_policy
+ if security_context is not None:
+ self.security_context = security_context
+ if startup_probe is not None:
+ self.startup_probe = startup_probe
+ if stdin is not None:
+ self.stdin = stdin
+ if stdin_once is not None:
+ self.stdin_once = stdin_once
+ if target_container_name is not None:
+ self.target_container_name = target_container_name
+ if termination_message_path is not None:
+ self.termination_message_path = termination_message_path
+ if termination_message_policy is not None:
+ self.termination_message_policy = termination_message_policy
+ if tty is not None:
+ self.tty = tty
+ if volume_devices is not None:
+ self.volume_devices = volume_devices
+ if volume_mounts is not None:
+ self.volume_mounts = volume_mounts
+ if working_dir is not None:
+ self.working_dir = working_dir
+
+ @property
+ def args(self):
+ """Gets the args of this V1EphemeralContainer. # noqa: E501
+
+ Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :return: The args of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._args
+
+ @args.setter
+ def args(self, args):
+ """Sets the args of this V1EphemeralContainer.
+
+ Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :param args: The args of this V1EphemeralContainer. # noqa: E501
+ :type: list[str]
+ """
+
+ self._args = args
+
+ @property
+ def command(self):
+ """Gets the command of this V1EphemeralContainer. # noqa: E501
+
+ Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :return: The command of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._command
+
+ @command.setter
+ def command(self, command):
+ """Sets the command of this V1EphemeralContainer.
+
+ Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
+
+ :param command: The command of this V1EphemeralContainer. # noqa: E501
+ :type: list[str]
+ """
+
+ self._command = command
+
+ @property
+ def env(self):
+ """Gets the env of this V1EphemeralContainer. # noqa: E501
+
+ List of environment variables to set in the container. Cannot be updated. # noqa: E501
+
+ :return: The env of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[V1EnvVar]
+ """
+ return self._env
+
+ @env.setter
+ def env(self, env):
+ """Sets the env of this V1EphemeralContainer.
+
+ List of environment variables to set in the container. Cannot be updated. # noqa: E501
+
+ :param env: The env of this V1EphemeralContainer. # noqa: E501
+ :type: list[V1EnvVar]
+ """
+
+ self._env = env
+
+ @property
+ def env_from(self):
+ """Gets the env_from of this V1EphemeralContainer. # noqa: E501
+
+ List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
+
+ :return: The env_from of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[V1EnvFromSource]
+ """
+ return self._env_from
+
+ @env_from.setter
+ def env_from(self, env_from):
+ """Sets the env_from of this V1EphemeralContainer.
+
+ List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
+
+ :param env_from: The env_from of this V1EphemeralContainer. # noqa: E501
+ :type: list[V1EnvFromSource]
+ """
+
+ self._env_from = env_from
+
+ @property
+ def image(self):
+ """Gets the image of this V1EphemeralContainer. # noqa: E501
+
+ Container image name. More info: https://kubernetes.io/docs/concepts/containers/images # noqa: E501
+
+ :return: The image of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._image
+
+ @image.setter
+ def image(self, image):
+ """Sets the image of this V1EphemeralContainer.
+
+ Container image name. More info: https://kubernetes.io/docs/concepts/containers/images # noqa: E501
+
+ :param image: The image of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+
+ self._image = image
+
+ @property
+ def image_pull_policy(self):
+ """Gets the image_pull_policy of this V1EphemeralContainer. # noqa: E501
+
+ Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
+
+ :return: The image_pull_policy of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._image_pull_policy
+
+ @image_pull_policy.setter
+ def image_pull_policy(self, image_pull_policy):
+ """Sets the image_pull_policy of this V1EphemeralContainer.
+
+ Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
+
+ :param image_pull_policy: The image_pull_policy of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+
+ self._image_pull_policy = image_pull_policy
+
+ @property
+ def lifecycle(self):
+ """Gets the lifecycle of this V1EphemeralContainer. # noqa: E501
+
+
+ :return: The lifecycle of this V1EphemeralContainer. # noqa: E501
+ :rtype: V1Lifecycle
+ """
+ return self._lifecycle
+
+ @lifecycle.setter
+ def lifecycle(self, lifecycle):
+ """Sets the lifecycle of this V1EphemeralContainer.
+
+
+ :param lifecycle: The lifecycle of this V1EphemeralContainer. # noqa: E501
+ :type: V1Lifecycle
+ """
+
+ self._lifecycle = lifecycle
+
+ @property
+ def liveness_probe(self):
+ """Gets the liveness_probe of this V1EphemeralContainer. # noqa: E501
+
+
+ :return: The liveness_probe of this V1EphemeralContainer. # noqa: E501
+ :rtype: V1Probe
+ """
+ return self._liveness_probe
+
+ @liveness_probe.setter
+ def liveness_probe(self, liveness_probe):
+ """Sets the liveness_probe of this V1EphemeralContainer.
+
+
+ :param liveness_probe: The liveness_probe of this V1EphemeralContainer. # noqa: E501
+ :type: V1Probe
+ """
+
+ self._liveness_probe = liveness_probe
+
+ @property
+ def name(self):
+ """Gets the name of this V1EphemeralContainer. # noqa: E501
+
+ Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. # noqa: E501
+
+ :return: The name of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1EphemeralContainer.
+
+ Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. # noqa: E501
+
+ :param name: The name of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1EphemeralContainer. # noqa: E501
+
+ Ports are not allowed for ephemeral containers. # noqa: E501
+
+ :return: The ports of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[V1ContainerPort]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1EphemeralContainer.
+
+ Ports are not allowed for ephemeral containers. # noqa: E501
+
+ :param ports: The ports of this V1EphemeralContainer. # noqa: E501
+ :type: list[V1ContainerPort]
+ """
+
+ self._ports = ports
+
+ @property
+ def readiness_probe(self):
+ """Gets the readiness_probe of this V1EphemeralContainer. # noqa: E501
+
+
+ :return: The readiness_probe of this V1EphemeralContainer. # noqa: E501
+ :rtype: V1Probe
+ """
+ return self._readiness_probe
+
+ @readiness_probe.setter
+ def readiness_probe(self, readiness_probe):
+ """Sets the readiness_probe of this V1EphemeralContainer.
+
+
+ :param readiness_probe: The readiness_probe of this V1EphemeralContainer. # noqa: E501
+ :type: V1Probe
+ """
+
+ self._readiness_probe = readiness_probe
+
+ @property
+ def resize_policy(self):
+ """Gets the resize_policy of this V1EphemeralContainer. # noqa: E501
+
+ Resources resize policy for the container. # noqa: E501
+
+ :return: The resize_policy of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[V1ContainerResizePolicy]
+ """
+ return self._resize_policy
+
+ @resize_policy.setter
+ def resize_policy(self, resize_policy):
+ """Sets the resize_policy of this V1EphemeralContainer.
+
+ Resources resize policy for the container. # noqa: E501
+
+ :param resize_policy: The resize_policy of this V1EphemeralContainer. # noqa: E501
+ :type: list[V1ContainerResizePolicy]
+ """
+
+ self._resize_policy = resize_policy
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1EphemeralContainer. # noqa: E501
+
+
+ :return: The resources of this V1EphemeralContainer. # noqa: E501
+ :rtype: V1ResourceRequirements
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1EphemeralContainer.
+
+
+ :param resources: The resources of this V1EphemeralContainer. # noqa: E501
+ :type: V1ResourceRequirements
+ """
+
+ self._resources = resources
+
+ @property
+ def restart_policy(self):
+ """Gets the restart_policy of this V1EphemeralContainer. # noqa: E501
+
+ Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers. # noqa: E501
+
+ :return: The restart_policy of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._restart_policy
+
+ @restart_policy.setter
+ def restart_policy(self, restart_policy):
+ """Sets the restart_policy of this V1EphemeralContainer.
+
+ Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers. # noqa: E501
+
+ :param restart_policy: The restart_policy of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+
+ self._restart_policy = restart_policy
+
+ @property
+ def security_context(self):
+ """Gets the security_context of this V1EphemeralContainer. # noqa: E501
+
+
+ :return: The security_context of this V1EphemeralContainer. # noqa: E501
+ :rtype: V1SecurityContext
+ """
+ return self._security_context
+
+ @security_context.setter
+ def security_context(self, security_context):
+ """Sets the security_context of this V1EphemeralContainer.
+
+
+ :param security_context: The security_context of this V1EphemeralContainer. # noqa: E501
+ :type: V1SecurityContext
+ """
+
+ self._security_context = security_context
+
+ @property
+ def startup_probe(self):
+ """Gets the startup_probe of this V1EphemeralContainer. # noqa: E501
+
+
+ :return: The startup_probe of this V1EphemeralContainer. # noqa: E501
+ :rtype: V1Probe
+ """
+ return self._startup_probe
+
+ @startup_probe.setter
+ def startup_probe(self, startup_probe):
+ """Sets the startup_probe of this V1EphemeralContainer.
+
+
+ :param startup_probe: The startup_probe of this V1EphemeralContainer. # noqa: E501
+ :type: V1Probe
+ """
+
+ self._startup_probe = startup_probe
+
+ @property
+ def stdin(self):
+ """Gets the stdin of this V1EphemeralContainer. # noqa: E501
+
+ Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
+
+ :return: The stdin of this V1EphemeralContainer. # noqa: E501
+ :rtype: bool
+ """
+ return self._stdin
+
+ @stdin.setter
+ def stdin(self, stdin):
+ """Sets the stdin of this V1EphemeralContainer.
+
+ Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
+
+ :param stdin: The stdin of this V1EphemeralContainer. # noqa: E501
+ :type: bool
+ """
+
+ self._stdin = stdin
+
+ @property
+ def stdin_once(self):
+ """Gets the stdin_once of this V1EphemeralContainer. # noqa: E501
+
+ Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
+
+ :return: The stdin_once of this V1EphemeralContainer. # noqa: E501
+ :rtype: bool
+ """
+ return self._stdin_once
+
+ @stdin_once.setter
+ def stdin_once(self, stdin_once):
+ """Sets the stdin_once of this V1EphemeralContainer.
+
+ Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
+
+ :param stdin_once: The stdin_once of this V1EphemeralContainer. # noqa: E501
+ :type: bool
+ """
+
+ self._stdin_once = stdin_once
+
+ @property
+ def target_container_name(self):
+ """Gets the target_container_name of this V1EphemeralContainer. # noqa: E501
+
+ If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. # noqa: E501
+
+ :return: The target_container_name of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._target_container_name
+
+ @target_container_name.setter
+ def target_container_name(self, target_container_name):
+ """Sets the target_container_name of this V1EphemeralContainer.
+
+ If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. # noqa: E501
+
+ :param target_container_name: The target_container_name of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+
+ self._target_container_name = target_container_name
+
+ @property
+ def termination_message_path(self):
+ """Gets the termination_message_path of this V1EphemeralContainer. # noqa: E501
+
+ Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
+
+ :return: The termination_message_path of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._termination_message_path
+
+ @termination_message_path.setter
+ def termination_message_path(self, termination_message_path):
+ """Sets the termination_message_path of this V1EphemeralContainer.
+
+ Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
+
+ :param termination_message_path: The termination_message_path of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+
+ self._termination_message_path = termination_message_path
+
+ @property
+ def termination_message_policy(self):
+ """Gets the termination_message_policy of this V1EphemeralContainer. # noqa: E501
+
+ Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
+
+ :return: The termination_message_policy of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._termination_message_policy
+
+ @termination_message_policy.setter
+ def termination_message_policy(self, termination_message_policy):
+ """Sets the termination_message_policy of this V1EphemeralContainer.
+
+ Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
+
+ :param termination_message_policy: The termination_message_policy of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+
+ self._termination_message_policy = termination_message_policy
+
+ @property
+ def tty(self):
+ """Gets the tty of this V1EphemeralContainer. # noqa: E501
+
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
+
+ :return: The tty of this V1EphemeralContainer. # noqa: E501
+ :rtype: bool
+ """
+ return self._tty
+
+ @tty.setter
+ def tty(self, tty):
+ """Sets the tty of this V1EphemeralContainer.
+
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
+
+ :param tty: The tty of this V1EphemeralContainer. # noqa: E501
+ :type: bool
+ """
+
+ self._tty = tty
+
+ @property
+ def volume_devices(self):
+ """Gets the volume_devices of this V1EphemeralContainer. # noqa: E501
+
+ volumeDevices is the list of block devices to be used by the container. # noqa: E501
+
+ :return: The volume_devices of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[V1VolumeDevice]
+ """
+ return self._volume_devices
+
+ @volume_devices.setter
+ def volume_devices(self, volume_devices):
+ """Sets the volume_devices of this V1EphemeralContainer.
+
+ volumeDevices is the list of block devices to be used by the container. # noqa: E501
+
+ :param volume_devices: The volume_devices of this V1EphemeralContainer. # noqa: E501
+ :type: list[V1VolumeDevice]
+ """
+
+ self._volume_devices = volume_devices
+
+ @property
+ def volume_mounts(self):
+ """Gets the volume_mounts of this V1EphemeralContainer. # noqa: E501
+
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. # noqa: E501
+
+ :return: The volume_mounts of this V1EphemeralContainer. # noqa: E501
+ :rtype: list[V1VolumeMount]
+ """
+ return self._volume_mounts
+
+ @volume_mounts.setter
+ def volume_mounts(self, volume_mounts):
+ """Sets the volume_mounts of this V1EphemeralContainer.
+
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. # noqa: E501
+
+ :param volume_mounts: The volume_mounts of this V1EphemeralContainer. # noqa: E501
+ :type: list[V1VolumeMount]
+ """
+
+ self._volume_mounts = volume_mounts
+
+ @property
+ def working_dir(self):
+ """Gets the working_dir of this V1EphemeralContainer. # noqa: E501
+
+ Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
+
+ :return: The working_dir of this V1EphemeralContainer. # noqa: E501
+ :rtype: str
+ """
+ return self._working_dir
+
+ @working_dir.setter
+ def working_dir(self, working_dir):
+ """Sets the working_dir of this V1EphemeralContainer.
+
+ Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
+
+ :param working_dir: The working_dir of this V1EphemeralContainer. # noqa: E501
+ :type: str
+ """
+
+ self._working_dir = working_dir
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EphemeralContainer):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EphemeralContainer):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_volume_source.py
new file mode 100644
index 0000000000..2d9ba9a2cd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ephemeral_volume_source.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EphemeralVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'volume_claim_template': 'V1PersistentVolumeClaimTemplate'
+ }
+
+ attribute_map = {
+ 'volume_claim_template': 'volumeClaimTemplate'
+ }
+
+ def __init__(self, volume_claim_template=None, local_vars_configuration=None): # noqa: E501
+ """V1EphemeralVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._volume_claim_template = None
+ self.discriminator = None
+
+ if volume_claim_template is not None:
+ self.volume_claim_template = volume_claim_template
+
+ @property
+ def volume_claim_template(self):
+ """Gets the volume_claim_template of this V1EphemeralVolumeSource. # noqa: E501
+
+
+ :return: The volume_claim_template of this V1EphemeralVolumeSource. # noqa: E501
+ :rtype: V1PersistentVolumeClaimTemplate
+ """
+ return self._volume_claim_template
+
+ @volume_claim_template.setter
+ def volume_claim_template(self, volume_claim_template):
+ """Sets the volume_claim_template of this V1EphemeralVolumeSource.
+
+
+ :param volume_claim_template: The volume_claim_template of this V1EphemeralVolumeSource. # noqa: E501
+ :type: V1PersistentVolumeClaimTemplate
+ """
+
+ self._volume_claim_template = volume_claim_template
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EphemeralVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EphemeralVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_event_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_event_source.py
new file mode 100644
index 0000000000..631e452613
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_event_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1EventSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'component': 'str',
+ 'host': 'str'
+ }
+
+ attribute_map = {
+ 'component': 'component',
+ 'host': 'host'
+ }
+
+ def __init__(self, component=None, host=None, local_vars_configuration=None): # noqa: E501
+ """V1EventSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._component = None
+ self._host = None
+ self.discriminator = None
+
+ if component is not None:
+ self.component = component
+ if host is not None:
+ self.host = host
+
+ @property
+ def component(self):
+ """Gets the component of this V1EventSource. # noqa: E501
+
+ Component from which the event is generated. # noqa: E501
+
+ :return: The component of this V1EventSource. # noqa: E501
+ :rtype: str
+ """
+ return self._component
+
+ @component.setter
+ def component(self, component):
+ """Sets the component of this V1EventSource.
+
+ Component from which the event is generated. # noqa: E501
+
+ :param component: The component of this V1EventSource. # noqa: E501
+ :type: str
+ """
+
+ self._component = component
+
+ @property
+ def host(self):
+ """Gets the host of this V1EventSource. # noqa: E501
+
+ Node name on which the event is generated. # noqa: E501
+
+ :return: The host of this V1EventSource. # noqa: E501
+ :rtype: str
+ """
+ return self._host
+
+ @host.setter
+ def host(self, host):
+ """Sets the host of this V1EventSource.
+
+ Node name on which the event is generated. # noqa: E501
+
+ :param host: The host of this V1EventSource. # noqa: E501
+ :type: str
+ """
+
+ self._host = host
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1EventSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1EventSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_eviction.py b/contrib/python/kubernetes/kubernetes/client/models/v1_eviction.py
new file mode 100644
index 0000000000..b5cee4aec9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_eviction.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Eviction(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'delete_options': 'V1DeleteOptions',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'delete_options': 'deleteOptions',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, delete_options=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1Eviction - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._delete_options = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if delete_options is not None:
+ self.delete_options = delete_options
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Eviction. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Eviction. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Eviction.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Eviction. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def delete_options(self):
+ """Gets the delete_options of this V1Eviction. # noqa: E501
+
+
+ :return: The delete_options of this V1Eviction. # noqa: E501
+ :rtype: V1DeleteOptions
+ """
+ return self._delete_options
+
+ @delete_options.setter
+ def delete_options(self, delete_options):
+ """Sets the delete_options of this V1Eviction.
+
+
+ :param delete_options: The delete_options of this V1Eviction. # noqa: E501
+ :type: V1DeleteOptions
+ """
+
+ self._delete_options = delete_options
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Eviction. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Eviction. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Eviction.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Eviction. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Eviction. # noqa: E501
+
+
+ :return: The metadata of this V1Eviction. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Eviction.
+
+
+ :param metadata: The metadata of this V1Eviction. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Eviction):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Eviction):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_exec_action.py b/contrib/python/kubernetes/kubernetes/client/models/v1_exec_action.py
new file mode 100644
index 0000000000..659fbd6973
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_exec_action.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ExecAction(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'command': 'list[str]'
+ }
+
+ attribute_map = {
+ 'command': 'command'
+ }
+
+ def __init__(self, command=None, local_vars_configuration=None): # noqa: E501
+ """V1ExecAction - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._command = None
+ self.discriminator = None
+
+ if command is not None:
+ self.command = command
+
+ @property
+ def command(self):
+ """Gets the command of this V1ExecAction. # noqa: E501
+
+ Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. # noqa: E501
+
+ :return: The command of this V1ExecAction. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._command
+
+ @command.setter
+ def command(self, command):
+ """Sets the command of this V1ExecAction.
+
+ Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. # noqa: E501
+
+ :param command: The command of this V1ExecAction. # noqa: E501
+ :type: list[str]
+ """
+
+ self._command = command
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ExecAction):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ExecAction):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_external_documentation.py b/contrib/python/kubernetes/kubernetes/client/models/v1_external_documentation.py
new file mode 100644
index 0000000000..ae8cf8afd2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_external_documentation.py
@@ -0,0 +1,146 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ExternalDocumentation(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'description': 'str',
+ 'url': 'str'
+ }
+
+ attribute_map = {
+ 'description': 'description',
+ 'url': 'url'
+ }
+
+ def __init__(self, description=None, url=None, local_vars_configuration=None): # noqa: E501
+ """V1ExternalDocumentation - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._description = None
+ self._url = None
+ self.discriminator = None
+
+ if description is not None:
+ self.description = description
+ if url is not None:
+ self.url = url
+
+ @property
+ def description(self):
+ """Gets the description of this V1ExternalDocumentation. # noqa: E501
+
+
+ :return: The description of this V1ExternalDocumentation. # noqa: E501
+ :rtype: str
+ """
+ return self._description
+
+ @description.setter
+ def description(self, description):
+ """Sets the description of this V1ExternalDocumentation.
+
+
+ :param description: The description of this V1ExternalDocumentation. # noqa: E501
+ :type: str
+ """
+
+ self._description = description
+
+ @property
+ def url(self):
+ """Gets the url of this V1ExternalDocumentation. # noqa: E501
+
+
+ :return: The url of this V1ExternalDocumentation. # noqa: E501
+ :rtype: str
+ """
+ return self._url
+
+ @url.setter
+ def url(self, url):
+ """Sets the url of this V1ExternalDocumentation.
+
+
+ :param url: The url of this V1ExternalDocumentation. # noqa: E501
+ :type: str
+ """
+
+ self._url = url
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ExternalDocumentation):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ExternalDocumentation):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_fc_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_fc_volume_source.py
new file mode 100644
index 0000000000..ed7005fc55
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_fc_volume_source.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1FCVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'lun': 'int',
+ 'read_only': 'bool',
+ 'target_ww_ns': 'list[str]',
+ 'wwids': 'list[str]'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'lun': 'lun',
+ 'read_only': 'readOnly',
+ 'target_ww_ns': 'targetWWNs',
+ 'wwids': 'wwids'
+ }
+
+ def __init__(self, fs_type=None, lun=None, read_only=None, target_ww_ns=None, wwids=None, local_vars_configuration=None): # noqa: E501
+ """V1FCVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._lun = None
+ self._read_only = None
+ self._target_ww_ns = None
+ self._wwids = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if lun is not None:
+ self.lun = lun
+ if read_only is not None:
+ self.read_only = read_only
+ if target_ww_ns is not None:
+ self.target_ww_ns = target_ww_ns
+ if wwids is not None:
+ self.wwids = wwids
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1FCVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1FCVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1FCVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1FCVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def lun(self):
+ """Gets the lun of this V1FCVolumeSource. # noqa: E501
+
+ lun is Optional: FC target lun number # noqa: E501
+
+ :return: The lun of this V1FCVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._lun
+
+ @lun.setter
+ def lun(self, lun):
+ """Sets the lun of this V1FCVolumeSource.
+
+ lun is Optional: FC target lun number # noqa: E501
+
+ :param lun: The lun of this V1FCVolumeSource. # noqa: E501
+ :type: int
+ """
+
+ self._lun = lun
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1FCVolumeSource. # noqa: E501
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1FCVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1FCVolumeSource.
+
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1FCVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def target_ww_ns(self):
+ """Gets the target_ww_ns of this V1FCVolumeSource. # noqa: E501
+
+ targetWWNs is Optional: FC target worldwide names (WWNs) # noqa: E501
+
+ :return: The target_ww_ns of this V1FCVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._target_ww_ns
+
+ @target_ww_ns.setter
+ def target_ww_ns(self, target_ww_ns):
+ """Sets the target_ww_ns of this V1FCVolumeSource.
+
+ targetWWNs is Optional: FC target worldwide names (WWNs) # noqa: E501
+
+ :param target_ww_ns: The target_ww_ns of this V1FCVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+
+ self._target_ww_ns = target_ww_ns
+
+ @property
+ def wwids(self):
+ """Gets the wwids of this V1FCVolumeSource. # noqa: E501
+
+ wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. # noqa: E501
+
+ :return: The wwids of this V1FCVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._wwids
+
+ @wwids.setter
+ def wwids(self, wwids):
+ """Sets the wwids of this V1FCVolumeSource.
+
+ wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. # noqa: E501
+
+ :param wwids: The wwids of this V1FCVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+
+ self._wwids = wwids
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1FCVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1FCVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_flex_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_flex_persistent_volume_source.py
new file mode 100644
index 0000000000..c80fdcf180
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_flex_persistent_volume_source.py
@@ -0,0 +1,233 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1FlexPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'driver': 'str',
+ 'fs_type': 'str',
+ 'options': 'dict(str, str)',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1SecretReference'
+ }
+
+ attribute_map = {
+ 'driver': 'driver',
+ 'fs_type': 'fsType',
+ 'options': 'options',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef'
+ }
+
+ def __init__(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None, local_vars_configuration=None): # noqa: E501
+ """V1FlexPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._driver = None
+ self._fs_type = None
+ self._options = None
+ self._read_only = None
+ self._secret_ref = None
+ self.discriminator = None
+
+ self.driver = driver
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if options is not None:
+ self.options = options
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+
+ @property
+ def driver(self):
+ """Gets the driver of this V1FlexPersistentVolumeSource. # noqa: E501
+
+ driver is the name of the driver to use for this volume. # noqa: E501
+
+ :return: The driver of this V1FlexPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._driver
+
+ @driver.setter
+ def driver(self, driver):
+ """Sets the driver of this V1FlexPersistentVolumeSource.
+
+ driver is the name of the driver to use for this volume. # noqa: E501
+
+ :param driver: The driver of this V1FlexPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
+ raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
+
+ self._driver = driver
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
+
+ fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
+
+ :return: The fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1FlexPersistentVolumeSource.
+
+ fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
+
+ :param fs_type: The fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def options(self):
+ """Gets the options of this V1FlexPersistentVolumeSource. # noqa: E501
+
+ options is Optional: this field holds extra command options if any. # noqa: E501
+
+ :return: The options of this V1FlexPersistentVolumeSource. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._options
+
+ @options.setter
+ def options(self, options):
+ """Sets the options of this V1FlexPersistentVolumeSource.
+
+ options is Optional: this field holds extra command options if any. # noqa: E501
+
+ :param options: The options of this V1FlexPersistentVolumeSource. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._options = options
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1FlexPersistentVolumeSource. # noqa: E501
+
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1FlexPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1FlexPersistentVolumeSource.
+
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1FlexPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1FlexPersistentVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._secret_ref = secret_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1FlexPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1FlexPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_flex_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_flex_volume_source.py
new file mode 100644
index 0000000000..6a282f6c28
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_flex_volume_source.py
@@ -0,0 +1,233 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1FlexVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'driver': 'str',
+ 'fs_type': 'str',
+ 'options': 'dict(str, str)',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1LocalObjectReference'
+ }
+
+ attribute_map = {
+ 'driver': 'driver',
+ 'fs_type': 'fsType',
+ 'options': 'options',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef'
+ }
+
+ def __init__(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None, local_vars_configuration=None): # noqa: E501
+ """V1FlexVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._driver = None
+ self._fs_type = None
+ self._options = None
+ self._read_only = None
+ self._secret_ref = None
+ self.discriminator = None
+
+ self.driver = driver
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if options is not None:
+ self.options = options
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+
+ @property
+ def driver(self):
+ """Gets the driver of this V1FlexVolumeSource. # noqa: E501
+
+ driver is the name of the driver to use for this volume. # noqa: E501
+
+ :return: The driver of this V1FlexVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._driver
+
+ @driver.setter
+ def driver(self, driver):
+ """Sets the driver of this V1FlexVolumeSource.
+
+ driver is the name of the driver to use for this volume. # noqa: E501
+
+ :param driver: The driver of this V1FlexVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
+ raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
+
+ self._driver = driver
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1FlexVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
+
+ :return: The fs_type of this V1FlexVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1FlexVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
+
+ :param fs_type: The fs_type of this V1FlexVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def options(self):
+ """Gets the options of this V1FlexVolumeSource. # noqa: E501
+
+ options is Optional: this field holds extra command options if any. # noqa: E501
+
+ :return: The options of this V1FlexVolumeSource. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._options
+
+ @options.setter
+ def options(self, options):
+ """Sets the options of this V1FlexVolumeSource.
+
+ options is Optional: this field holds extra command options if any. # noqa: E501
+
+ :param options: The options of this V1FlexVolumeSource. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._options = options
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1FlexVolumeSource. # noqa: E501
+
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1FlexVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1FlexVolumeSource.
+
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1FlexVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1FlexVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1FlexVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1FlexVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1FlexVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+
+ self._secret_ref = secret_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1FlexVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1FlexVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_flocker_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_flocker_volume_source.py
new file mode 100644
index 0000000000..71111f9fdc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_flocker_volume_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1FlockerVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'dataset_name': 'str',
+ 'dataset_uuid': 'str'
+ }
+
+ attribute_map = {
+ 'dataset_name': 'datasetName',
+ 'dataset_uuid': 'datasetUUID'
+ }
+
+ def __init__(self, dataset_name=None, dataset_uuid=None, local_vars_configuration=None): # noqa: E501
+ """V1FlockerVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._dataset_name = None
+ self._dataset_uuid = None
+ self.discriminator = None
+
+ if dataset_name is not None:
+ self.dataset_name = dataset_name
+ if dataset_uuid is not None:
+ self.dataset_uuid = dataset_uuid
+
+ @property
+ def dataset_name(self):
+ """Gets the dataset_name of this V1FlockerVolumeSource. # noqa: E501
+
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated # noqa: E501
+
+ :return: The dataset_name of this V1FlockerVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._dataset_name
+
+ @dataset_name.setter
+ def dataset_name(self, dataset_name):
+ """Sets the dataset_name of this V1FlockerVolumeSource.
+
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated # noqa: E501
+
+ :param dataset_name: The dataset_name of this V1FlockerVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._dataset_name = dataset_name
+
+ @property
+ def dataset_uuid(self):
+ """Gets the dataset_uuid of this V1FlockerVolumeSource. # noqa: E501
+
+ datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset # noqa: E501
+
+ :return: The dataset_uuid of this V1FlockerVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._dataset_uuid
+
+ @dataset_uuid.setter
+ def dataset_uuid(self, dataset_uuid):
+ """Sets the dataset_uuid of this V1FlockerVolumeSource.
+
+ datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset # noqa: E501
+
+ :param dataset_uuid: The dataset_uuid of this V1FlockerVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._dataset_uuid = dataset_uuid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1FlockerVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1FlockerVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_for_zone.py b/contrib/python/kubernetes/kubernetes/client/models/v1_for_zone.py
new file mode 100644
index 0000000000..3a01095f93
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_for_zone.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ForZone(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1ForZone - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1ForZone. # noqa: E501
+
+ name represents the name of the zone. # noqa: E501
+
+ :return: The name of this V1ForZone. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ForZone.
+
+ name represents the name of the zone. # noqa: E501
+
+ :param name: The name of this V1ForZone. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ForZone):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ForZone):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_gce_persistent_disk_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_gce_persistent_disk_volume_source.py
new file mode 100644
index 0000000000..2c446ea6f7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_gce_persistent_disk_volume_source.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1GCEPersistentDiskVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'partition': 'int',
+ 'pd_name': 'str',
+ 'read_only': 'bool'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'partition': 'partition',
+ 'pd_name': 'pdName',
+ 'read_only': 'readOnly'
+ }
+
+ def __init__(self, fs_type=None, partition=None, pd_name=None, read_only=None, local_vars_configuration=None): # noqa: E501
+ """V1GCEPersistentDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._partition = None
+ self._pd_name = None
+ self._read_only = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if partition is not None:
+ self.partition = partition
+ self.pd_name = pd_name
+ if read_only is not None:
+ self.read_only = read_only
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+
+ fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :return: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1GCEPersistentDiskVolumeSource.
+
+ fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :param fs_type: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def partition(self):
+ """Gets the partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+
+ partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :return: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._partition
+
+ @partition.setter
+ def partition(self, partition):
+ """Sets the partition of this V1GCEPersistentDiskVolumeSource.
+
+ partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :param partition: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :type: int
+ """
+
+ self._partition = partition
+
+ @property
+ def pd_name(self):
+ """Gets the pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :return: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._pd_name
+
+ @pd_name.setter
+ def pd_name(self, pd_name):
+ """Sets the pd_name of this V1GCEPersistentDiskVolumeSource.
+
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :param pd_name: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and pd_name is None: # noqa: E501
+ raise ValueError("Invalid value for `pd_name`, must not be `None`") # noqa: E501
+
+ self._pd_name = pd_name
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :return: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1GCEPersistentDiskVolumeSource.
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
+
+ :param read_only: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1GCEPersistentDiskVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1GCEPersistentDiskVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_git_repo_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_git_repo_volume_source.py
new file mode 100644
index 0000000000..9ae8d1cc5f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_git_repo_volume_source.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1GitRepoVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'directory': 'str',
+ 'repository': 'str',
+ 'revision': 'str'
+ }
+
+ attribute_map = {
+ 'directory': 'directory',
+ 'repository': 'repository',
+ 'revision': 'revision'
+ }
+
+ def __init__(self, directory=None, repository=None, revision=None, local_vars_configuration=None): # noqa: E501
+ """V1GitRepoVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._directory = None
+ self._repository = None
+ self._revision = None
+ self.discriminator = None
+
+ if directory is not None:
+ self.directory = directory
+ self.repository = repository
+ if revision is not None:
+ self.revision = revision
+
+ @property
+ def directory(self):
+ """Gets the directory of this V1GitRepoVolumeSource. # noqa: E501
+
+ directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. # noqa: E501
+
+ :return: The directory of this V1GitRepoVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._directory
+
+ @directory.setter
+ def directory(self, directory):
+ """Sets the directory of this V1GitRepoVolumeSource.
+
+ directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. # noqa: E501
+
+ :param directory: The directory of this V1GitRepoVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._directory = directory
+
+ @property
+ def repository(self):
+ """Gets the repository of this V1GitRepoVolumeSource. # noqa: E501
+
+ repository is the URL # noqa: E501
+
+ :return: The repository of this V1GitRepoVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._repository
+
+ @repository.setter
+ def repository(self, repository):
+ """Sets the repository of this V1GitRepoVolumeSource.
+
+ repository is the URL # noqa: E501
+
+ :param repository: The repository of this V1GitRepoVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and repository is None: # noqa: E501
+ raise ValueError("Invalid value for `repository`, must not be `None`") # noqa: E501
+
+ self._repository = repository
+
+ @property
+ def revision(self):
+ """Gets the revision of this V1GitRepoVolumeSource. # noqa: E501
+
+ revision is the commit hash for the specified revision. # noqa: E501
+
+ :return: The revision of this V1GitRepoVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._revision
+
+ @revision.setter
+ def revision(self, revision):
+ """Sets the revision of this V1GitRepoVolumeSource.
+
+ revision is the commit hash for the specified revision. # noqa: E501
+
+ :param revision: The revision of this V1GitRepoVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._revision = revision
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1GitRepoVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1GitRepoVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_persistent_volume_source.py
new file mode 100644
index 0000000000..676ec74e8f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_persistent_volume_source.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1GlusterfsPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'endpoints': 'str',
+ 'endpoints_namespace': 'str',
+ 'path': 'str',
+ 'read_only': 'bool'
+ }
+
+ attribute_map = {
+ 'endpoints': 'endpoints',
+ 'endpoints_namespace': 'endpointsNamespace',
+ 'path': 'path',
+ 'read_only': 'readOnly'
+ }
+
+ def __init__(self, endpoints=None, endpoints_namespace=None, path=None, read_only=None, local_vars_configuration=None): # noqa: E501
+ """V1GlusterfsPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._endpoints = None
+ self._endpoints_namespace = None
+ self._path = None
+ self._read_only = None
+ self.discriminator = None
+
+ self.endpoints = endpoints
+ if endpoints_namespace is not None:
+ self.endpoints_namespace = endpoints_namespace
+ self.path = path
+ if read_only is not None:
+ self.read_only = read_only
+
+ @property
+ def endpoints(self):
+ """Gets the endpoints of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+
+ endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :return: The endpoints of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._endpoints
+
+ @endpoints.setter
+ def endpoints(self, endpoints):
+ """Sets the endpoints of this V1GlusterfsPersistentVolumeSource.
+
+ endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :param endpoints: The endpoints of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and endpoints is None: # noqa: E501
+ raise ValueError("Invalid value for `endpoints`, must not be `None`") # noqa: E501
+
+ self._endpoints = endpoints
+
+ @property
+ def endpoints_namespace(self):
+ """Gets the endpoints_namespace of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+
+ endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :return: The endpoints_namespace of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._endpoints_namespace
+
+ @endpoints_namespace.setter
+ def endpoints_namespace(self, endpoints_namespace):
+ """Sets the endpoints_namespace of this V1GlusterfsPersistentVolumeSource.
+
+ endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :param endpoints_namespace: The endpoints_namespace of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._endpoints_namespace = endpoints_namespace
+
+ @property
+ def path(self):
+ """Gets the path of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+
+ path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :return: The path of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1GlusterfsPersistentVolumeSource.
+
+ path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :param path: The path of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :return: The read_only of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1GlusterfsPersistentVolumeSource.
+
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :param read_only: The read_only of this V1GlusterfsPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1GlusterfsPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1GlusterfsPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_volume_source.py
new file mode 100644
index 0000000000..79cf8a48a3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_glusterfs_volume_source.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1GlusterfsVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'endpoints': 'str',
+ 'path': 'str',
+ 'read_only': 'bool'
+ }
+
+ attribute_map = {
+ 'endpoints': 'endpoints',
+ 'path': 'path',
+ 'read_only': 'readOnly'
+ }
+
+ def __init__(self, endpoints=None, path=None, read_only=None, local_vars_configuration=None): # noqa: E501
+ """V1GlusterfsVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._endpoints = None
+ self._path = None
+ self._read_only = None
+ self.discriminator = None
+
+ self.endpoints = endpoints
+ self.path = path
+ if read_only is not None:
+ self.read_only = read_only
+
+ @property
+ def endpoints(self):
+ """Gets the endpoints of this V1GlusterfsVolumeSource. # noqa: E501
+
+ endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :return: The endpoints of this V1GlusterfsVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._endpoints
+
+ @endpoints.setter
+ def endpoints(self, endpoints):
+ """Sets the endpoints of this V1GlusterfsVolumeSource.
+
+ endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :param endpoints: The endpoints of this V1GlusterfsVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and endpoints is None: # noqa: E501
+ raise ValueError("Invalid value for `endpoints`, must not be `None`") # noqa: E501
+
+ self._endpoints = endpoints
+
+ @property
+ def path(self):
+ """Gets the path of this V1GlusterfsVolumeSource. # noqa: E501
+
+ path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :return: The path of this V1GlusterfsVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1GlusterfsVolumeSource.
+
+ path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :param path: The path of this V1GlusterfsVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1GlusterfsVolumeSource. # noqa: E501
+
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :return: The read_only of this V1GlusterfsVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1GlusterfsVolumeSource.
+
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod # noqa: E501
+
+ :param read_only: The read_only of this V1GlusterfsVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1GlusterfsVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1GlusterfsVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_group_version_for_discovery.py b/contrib/python/kubernetes/kubernetes/client/models/v1_group_version_for_discovery.py
new file mode 100644
index 0000000000..43a5db2001
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_group_version_for_discovery.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1GroupVersionForDiscovery(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'group_version': 'str',
+ 'version': 'str'
+ }
+
+ attribute_map = {
+ 'group_version': 'groupVersion',
+ 'version': 'version'
+ }
+
+ def __init__(self, group_version=None, version=None, local_vars_configuration=None): # noqa: E501
+ """V1GroupVersionForDiscovery - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._group_version = None
+ self._version = None
+ self.discriminator = None
+
+ self.group_version = group_version
+ self.version = version
+
+ @property
+ def group_version(self):
+ """Gets the group_version of this V1GroupVersionForDiscovery. # noqa: E501
+
+ groupVersion specifies the API group and version in the form \"group/version\" # noqa: E501
+
+ :return: The group_version of this V1GroupVersionForDiscovery. # noqa: E501
+ :rtype: str
+ """
+ return self._group_version
+
+ @group_version.setter
+ def group_version(self, group_version):
+ """Sets the group_version of this V1GroupVersionForDiscovery.
+
+ groupVersion specifies the API group and version in the form \"group/version\" # noqa: E501
+
+ :param group_version: The group_version of this V1GroupVersionForDiscovery. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and group_version is None: # noqa: E501
+ raise ValueError("Invalid value for `group_version`, must not be `None`") # noqa: E501
+
+ self._group_version = group_version
+
+ @property
+ def version(self):
+ """Gets the version of this V1GroupVersionForDiscovery. # noqa: E501
+
+ version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion. # noqa: E501
+
+ :return: The version of this V1GroupVersionForDiscovery. # noqa: E501
+ :rtype: str
+ """
+ return self._version
+
+ @version.setter
+ def version(self, version):
+ """Sets the version of this V1GroupVersionForDiscovery.
+
+ version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion. # noqa: E501
+
+ :param version: The version of this V1GroupVersionForDiscovery. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501
+ raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
+
+ self._version = version
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1GroupVersionForDiscovery):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1GroupVersionForDiscovery):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_grpc_action.py b/contrib/python/kubernetes/kubernetes/client/models/v1_grpc_action.py
new file mode 100644
index 0000000000..d2dc5032bb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_grpc_action.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1GRPCAction(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'port': 'int',
+ 'service': 'str'
+ }
+
+ attribute_map = {
+ 'port': 'port',
+ 'service': 'service'
+ }
+
+ def __init__(self, port=None, service=None, local_vars_configuration=None): # noqa: E501
+ """V1GRPCAction - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._port = None
+ self._service = None
+ self.discriminator = None
+
+ self.port = port
+ if service is not None:
+ self.service = service
+
+ @property
+ def port(self):
+ """Gets the port of this V1GRPCAction. # noqa: E501
+
+ Port number of the gRPC service. Number must be in the range 1 to 65535. # noqa: E501
+
+ :return: The port of this V1GRPCAction. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1GRPCAction.
+
+ Port number of the gRPC service. Number must be in the range 1 to 65535. # noqa: E501
+
+ :param port: The port of this V1GRPCAction. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ @property
+ def service(self):
+ """Gets the service of this V1GRPCAction. # noqa: E501
+
+ Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC. # noqa: E501
+
+ :return: The service of this V1GRPCAction. # noqa: E501
+ :rtype: str
+ """
+ return self._service
+
+ @service.setter
+ def service(self, service):
+ """Sets the service of this V1GRPCAction.
+
+ Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). If this is not specified, the default behavior is defined by gRPC. # noqa: E501
+
+ :param service: The service of this V1GRPCAction. # noqa: E501
+ :type: str
+ """
+
+ self._service = service
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1GRPCAction):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1GRPCAction):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler.py b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler.py
new file mode 100644
index 0000000000..85d14edf98
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HorizontalPodAutoscaler(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1HorizontalPodAutoscalerSpec',
+ 'status': 'V1HorizontalPodAutoscalerStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1HorizontalPodAutoscaler - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1HorizontalPodAutoscaler. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1HorizontalPodAutoscaler. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1HorizontalPodAutoscaler.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1HorizontalPodAutoscaler. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1HorizontalPodAutoscaler. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1HorizontalPodAutoscaler. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1HorizontalPodAutoscaler.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1HorizontalPodAutoscaler. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1HorizontalPodAutoscaler. # noqa: E501
+
+
+ :return: The metadata of this V1HorizontalPodAutoscaler. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1HorizontalPodAutoscaler.
+
+
+ :param metadata: The metadata of this V1HorizontalPodAutoscaler. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1HorizontalPodAutoscaler. # noqa: E501
+
+
+ :return: The spec of this V1HorizontalPodAutoscaler. # noqa: E501
+ :rtype: V1HorizontalPodAutoscalerSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1HorizontalPodAutoscaler.
+
+
+ :param spec: The spec of this V1HorizontalPodAutoscaler. # noqa: E501
+ :type: V1HorizontalPodAutoscalerSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1HorizontalPodAutoscaler. # noqa: E501
+
+
+ :return: The status of this V1HorizontalPodAutoscaler. # noqa: E501
+ :rtype: V1HorizontalPodAutoscalerStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1HorizontalPodAutoscaler.
+
+
+ :param status: The status of this V1HorizontalPodAutoscaler. # noqa: E501
+ :type: V1HorizontalPodAutoscalerStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HorizontalPodAutoscaler):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HorizontalPodAutoscaler):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_list.py
new file mode 100644
index 0000000000..8f417f4084
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HorizontalPodAutoscalerList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1HorizontalPodAutoscaler]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1HorizontalPodAutoscalerList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1HorizontalPodAutoscalerList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1HorizontalPodAutoscalerList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1HorizontalPodAutoscalerList. # noqa: E501
+
+ items is the list of horizontal pod autoscaler objects. # noqa: E501
+
+ :return: The items of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: list[V1HorizontalPodAutoscaler]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1HorizontalPodAutoscalerList.
+
+ items is the list of horizontal pod autoscaler objects. # noqa: E501
+
+ :param items: The items of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :type: list[V1HorizontalPodAutoscaler]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1HorizontalPodAutoscalerList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1HorizontalPodAutoscalerList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1HorizontalPodAutoscalerList. # noqa: E501
+
+
+ :return: The metadata of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1HorizontalPodAutoscalerList.
+
+
+ :param metadata: The metadata of this V1HorizontalPodAutoscalerList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HorizontalPodAutoscalerList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HorizontalPodAutoscalerList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py
new file mode 100644
index 0000000000..a0a2bc0e87
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HorizontalPodAutoscalerSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'max_replicas': 'int',
+ 'min_replicas': 'int',
+ 'scale_target_ref': 'V1CrossVersionObjectReference',
+ 'target_cpu_utilization_percentage': 'int'
+ }
+
+ attribute_map = {
+ 'max_replicas': 'maxReplicas',
+ 'min_replicas': 'minReplicas',
+ 'scale_target_ref': 'scaleTargetRef',
+ 'target_cpu_utilization_percentage': 'targetCPUUtilizationPercentage'
+ }
+
+ def __init__(self, max_replicas=None, min_replicas=None, scale_target_ref=None, target_cpu_utilization_percentage=None, local_vars_configuration=None): # noqa: E501
+ """V1HorizontalPodAutoscalerSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._max_replicas = None
+ self._min_replicas = None
+ self._scale_target_ref = None
+ self._target_cpu_utilization_percentage = None
+ self.discriminator = None
+
+ self.max_replicas = max_replicas
+ if min_replicas is not None:
+ self.min_replicas = min_replicas
+ self.scale_target_ref = scale_target_ref
+ if target_cpu_utilization_percentage is not None:
+ self.target_cpu_utilization_percentage = target_cpu_utilization_percentage
+
+ @property
+ def max_replicas(self):
+ """Gets the max_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+
+ maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. # noqa: E501
+
+ :return: The max_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._max_replicas
+
+ @max_replicas.setter
+ def max_replicas(self, max_replicas):
+ """Sets the max_replicas of this V1HorizontalPodAutoscalerSpec.
+
+ maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. # noqa: E501
+
+ :param max_replicas: The max_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and max_replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `max_replicas`, must not be `None`") # noqa: E501
+
+ self._max_replicas = max_replicas
+
+ @property
+ def min_replicas(self):
+ """Gets the min_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+
+ minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
+
+ :return: The min_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._min_replicas
+
+ @min_replicas.setter
+ def min_replicas(self, min_replicas):
+ """Sets the min_replicas of this V1HorizontalPodAutoscalerSpec.
+
+ minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
+
+ :param min_replicas: The min_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: int
+ """
+
+ self._min_replicas = min_replicas
+
+ @property
+ def scale_target_ref(self):
+ """Gets the scale_target_ref of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+
+
+ :return: The scale_target_ref of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: V1CrossVersionObjectReference
+ """
+ return self._scale_target_ref
+
+ @scale_target_ref.setter
+ def scale_target_ref(self, scale_target_ref):
+ """Sets the scale_target_ref of this V1HorizontalPodAutoscalerSpec.
+
+
+ :param scale_target_ref: The scale_target_ref of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: V1CrossVersionObjectReference
+ """
+ if self.local_vars_configuration.client_side_validation and scale_target_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `scale_target_ref`, must not be `None`") # noqa: E501
+
+ self._scale_target_ref = scale_target_ref
+
+ @property
+ def target_cpu_utilization_percentage(self):
+ """Gets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+
+ targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used. # noqa: E501
+
+ :return: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._target_cpu_utilization_percentage
+
+ @target_cpu_utilization_percentage.setter
+ def target_cpu_utilization_percentage(self, target_cpu_utilization_percentage):
+ """Sets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
+
+ targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used. # noqa: E501
+
+ :param target_cpu_utilization_percentage: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: int
+ """
+
+ self._target_cpu_utilization_percentage = target_cpu_utilization_percentage
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HorizontalPodAutoscalerSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HorizontalPodAutoscalerSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_status.py
new file mode 100644
index 0000000000..72e7dc53a7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_horizontal_pod_autoscaler_status.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HorizontalPodAutoscalerStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'current_cpu_utilization_percentage': 'int',
+ 'current_replicas': 'int',
+ 'desired_replicas': 'int',
+ 'last_scale_time': 'datetime',
+ 'observed_generation': 'int'
+ }
+
+ attribute_map = {
+ 'current_cpu_utilization_percentage': 'currentCPUUtilizationPercentage',
+ 'current_replicas': 'currentReplicas',
+ 'desired_replicas': 'desiredReplicas',
+ 'last_scale_time': 'lastScaleTime',
+ 'observed_generation': 'observedGeneration'
+ }
+
+ def __init__(self, current_cpu_utilization_percentage=None, current_replicas=None, desired_replicas=None, last_scale_time=None, observed_generation=None, local_vars_configuration=None): # noqa: E501
+ """V1HorizontalPodAutoscalerStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._current_cpu_utilization_percentage = None
+ self._current_replicas = None
+ self._desired_replicas = None
+ self._last_scale_time = None
+ self._observed_generation = None
+ self.discriminator = None
+
+ if current_cpu_utilization_percentage is not None:
+ self.current_cpu_utilization_percentage = current_cpu_utilization_percentage
+ self.current_replicas = current_replicas
+ self.desired_replicas = desired_replicas
+ if last_scale_time is not None:
+ self.last_scale_time = last_scale_time
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+
+ @property
+ def current_cpu_utilization_percentage(self):
+ """Gets the current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+
+ currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU. # noqa: E501
+
+ :return: The current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._current_cpu_utilization_percentage
+
+ @current_cpu_utilization_percentage.setter
+ def current_cpu_utilization_percentage(self, current_cpu_utilization_percentage):
+ """Sets the current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus.
+
+ currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU. # noqa: E501
+
+ :param current_cpu_utilization_percentage: The current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._current_cpu_utilization_percentage = current_cpu_utilization_percentage
+
+ @property
+ def current_replicas(self):
+ """Gets the current_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+
+ currentReplicas is the current number of replicas of pods managed by this autoscaler. # noqa: E501
+
+ :return: The current_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._current_replicas
+
+ @current_replicas.setter
+ def current_replicas(self, current_replicas):
+ """Sets the current_replicas of this V1HorizontalPodAutoscalerStatus.
+
+ currentReplicas is the current number of replicas of pods managed by this autoscaler. # noqa: E501
+
+ :param current_replicas: The current_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and current_replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `current_replicas`, must not be `None`") # noqa: E501
+
+ self._current_replicas = current_replicas
+
+ @property
+ def desired_replicas(self):
+ """Gets the desired_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+
+ desiredReplicas is the desired number of replicas of pods managed by this autoscaler. # noqa: E501
+
+ :return: The desired_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._desired_replicas
+
+ @desired_replicas.setter
+ def desired_replicas(self, desired_replicas):
+ """Sets the desired_replicas of this V1HorizontalPodAutoscalerStatus.
+
+ desiredReplicas is the desired number of replicas of pods managed by this autoscaler. # noqa: E501
+
+ :param desired_replicas: The desired_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and desired_replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `desired_replicas`, must not be `None`") # noqa: E501
+
+ self._desired_replicas = desired_replicas
+
+ @property
+ def last_scale_time(self):
+ """Gets the last_scale_time of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+
+ lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. # noqa: E501
+
+ :return: The last_scale_time of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_scale_time
+
+ @last_scale_time.setter
+ def last_scale_time(self, last_scale_time):
+ """Sets the last_scale_time of this V1HorizontalPodAutoscalerStatus.
+
+ lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. # noqa: E501
+
+ :param last_scale_time: The last_scale_time of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_scale_time = last_scale_time
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+
+ observedGeneration is the most recent generation observed by this autoscaler. # noqa: E501
+
+ :return: The observed_generation of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1HorizontalPodAutoscalerStatus.
+
+ observedGeneration is the most recent generation observed by this autoscaler. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HorizontalPodAutoscalerStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HorizontalPodAutoscalerStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_host_alias.py b/contrib/python/kubernetes/kubernetes/client/models/v1_host_alias.py
new file mode 100644
index 0000000000..3bc7105252
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_host_alias.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HostAlias(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hostnames': 'list[str]',
+ 'ip': 'str'
+ }
+
+ attribute_map = {
+ 'hostnames': 'hostnames',
+ 'ip': 'ip'
+ }
+
+ def __init__(self, hostnames=None, ip=None, local_vars_configuration=None): # noqa: E501
+ """V1HostAlias - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hostnames = None
+ self._ip = None
+ self.discriminator = None
+
+ if hostnames is not None:
+ self.hostnames = hostnames
+ if ip is not None:
+ self.ip = ip
+
+ @property
+ def hostnames(self):
+ """Gets the hostnames of this V1HostAlias. # noqa: E501
+
+ Hostnames for the above IP address. # noqa: E501
+
+ :return: The hostnames of this V1HostAlias. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._hostnames
+
+ @hostnames.setter
+ def hostnames(self, hostnames):
+ """Sets the hostnames of this V1HostAlias.
+
+ Hostnames for the above IP address. # noqa: E501
+
+ :param hostnames: The hostnames of this V1HostAlias. # noqa: E501
+ :type: list[str]
+ """
+
+ self._hostnames = hostnames
+
+ @property
+ def ip(self):
+ """Gets the ip of this V1HostAlias. # noqa: E501
+
+ IP address of the host file entry. # noqa: E501
+
+ :return: The ip of this V1HostAlias. # noqa: E501
+ :rtype: str
+ """
+ return self._ip
+
+ @ip.setter
+ def ip(self, ip):
+ """Sets the ip of this V1HostAlias.
+
+ IP address of the host file entry. # noqa: E501
+
+ :param ip: The ip of this V1HostAlias. # noqa: E501
+ :type: str
+ """
+
+ self._ip = ip
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HostAlias):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HostAlias):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_host_ip.py b/contrib/python/kubernetes/kubernetes/client/models/v1_host_ip.py
new file mode 100644
index 0000000000..6e491b3080
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_host_ip.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HostIP(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ip': 'str'
+ }
+
+ attribute_map = {
+ 'ip': 'ip'
+ }
+
+ def __init__(self, ip=None, local_vars_configuration=None): # noqa: E501
+ """V1HostIP - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ip = None
+ self.discriminator = None
+
+ if ip is not None:
+ self.ip = ip
+
+ @property
+ def ip(self):
+ """Gets the ip of this V1HostIP. # noqa: E501
+
+ IP is the IP address assigned to the host # noqa: E501
+
+ :return: The ip of this V1HostIP. # noqa: E501
+ :rtype: str
+ """
+ return self._ip
+
+ @ip.setter
+ def ip(self, ip):
+ """Sets the ip of this V1HostIP.
+
+ IP is the IP address assigned to the host # noqa: E501
+
+ :param ip: The ip of this V1HostIP. # noqa: E501
+ :type: str
+ """
+
+ self._ip = ip
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HostIP):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HostIP):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_host_path_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_host_path_volume_source.py
new file mode 100644
index 0000000000..1b8d3d2fcb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_host_path_volume_source.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HostPathVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'path': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'path': 'path',
+ 'type': 'type'
+ }
+
+ def __init__(self, path=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1HostPathVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._path = None
+ self._type = None
+ self.discriminator = None
+
+ self.path = path
+ if type is not None:
+ self.type = type
+
+ @property
+ def path(self):
+ """Gets the path of this V1HostPathVolumeSource. # noqa: E501
+
+ path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
+
+ :return: The path of this V1HostPathVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1HostPathVolumeSource.
+
+ path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
+
+ :param path: The path of this V1HostPathVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ @property
+ def type(self):
+ """Gets the type of this V1HostPathVolumeSource. # noqa: E501
+
+ type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
+
+ :return: The type of this V1HostPathVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1HostPathVolumeSource.
+
+ type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
+
+ :param type: The type of this V1HostPathVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HostPathVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HostPathVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_http_get_action.py b/contrib/python/kubernetes/kubernetes/client/models/v1_http_get_action.py
new file mode 100644
index 0000000000..81b21dcc49
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_http_get_action.py
@@ -0,0 +1,235 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HTTPGetAction(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'host': 'str',
+ 'http_headers': 'list[V1HTTPHeader]',
+ 'path': 'str',
+ 'port': 'object',
+ 'scheme': 'str'
+ }
+
+ attribute_map = {
+ 'host': 'host',
+ 'http_headers': 'httpHeaders',
+ 'path': 'path',
+ 'port': 'port',
+ 'scheme': 'scheme'
+ }
+
+ def __init__(self, host=None, http_headers=None, path=None, port=None, scheme=None, local_vars_configuration=None): # noqa: E501
+ """V1HTTPGetAction - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._host = None
+ self._http_headers = None
+ self._path = None
+ self._port = None
+ self._scheme = None
+ self.discriminator = None
+
+ if host is not None:
+ self.host = host
+ if http_headers is not None:
+ self.http_headers = http_headers
+ if path is not None:
+ self.path = path
+ self.port = port
+ if scheme is not None:
+ self.scheme = scheme
+
+ @property
+ def host(self):
+ """Gets the host of this V1HTTPGetAction. # noqa: E501
+
+ Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead. # noqa: E501
+
+ :return: The host of this V1HTTPGetAction. # noqa: E501
+ :rtype: str
+ """
+ return self._host
+
+ @host.setter
+ def host(self, host):
+ """Sets the host of this V1HTTPGetAction.
+
+ Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead. # noqa: E501
+
+ :param host: The host of this V1HTTPGetAction. # noqa: E501
+ :type: str
+ """
+
+ self._host = host
+
+ @property
+ def http_headers(self):
+ """Gets the http_headers of this V1HTTPGetAction. # noqa: E501
+
+ Custom headers to set in the request. HTTP allows repeated headers. # noqa: E501
+
+ :return: The http_headers of this V1HTTPGetAction. # noqa: E501
+ :rtype: list[V1HTTPHeader]
+ """
+ return self._http_headers
+
+ @http_headers.setter
+ def http_headers(self, http_headers):
+ """Sets the http_headers of this V1HTTPGetAction.
+
+ Custom headers to set in the request. HTTP allows repeated headers. # noqa: E501
+
+ :param http_headers: The http_headers of this V1HTTPGetAction. # noqa: E501
+ :type: list[V1HTTPHeader]
+ """
+
+ self._http_headers = http_headers
+
+ @property
+ def path(self):
+ """Gets the path of this V1HTTPGetAction. # noqa: E501
+
+ Path to access on the HTTP server. # noqa: E501
+
+ :return: The path of this V1HTTPGetAction. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1HTTPGetAction.
+
+ Path to access on the HTTP server. # noqa: E501
+
+ :param path: The path of this V1HTTPGetAction. # noqa: E501
+ :type: str
+ """
+
+ self._path = path
+
+ @property
+ def port(self):
+ """Gets the port of this V1HTTPGetAction. # noqa: E501
+
+ Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. # noqa: E501
+
+ :return: The port of this V1HTTPGetAction. # noqa: E501
+ :rtype: object
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1HTTPGetAction.
+
+ Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. # noqa: E501
+
+ :param port: The port of this V1HTTPGetAction. # noqa: E501
+ :type: object
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ @property
+ def scheme(self):
+ """Gets the scheme of this V1HTTPGetAction. # noqa: E501
+
+ Scheme to use for connecting to the host. Defaults to HTTP. # noqa: E501
+
+ :return: The scheme of this V1HTTPGetAction. # noqa: E501
+ :rtype: str
+ """
+ return self._scheme
+
+ @scheme.setter
+ def scheme(self, scheme):
+ """Sets the scheme of this V1HTTPGetAction.
+
+ Scheme to use for connecting to the host. Defaults to HTTP. # noqa: E501
+
+ :param scheme: The scheme of this V1HTTPGetAction. # noqa: E501
+ :type: str
+ """
+
+ self._scheme = scheme
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HTTPGetAction):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HTTPGetAction):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_http_header.py b/contrib/python/kubernetes/kubernetes/client/models/v1_http_header.py
new file mode 100644
index 0000000000..c4b3c62036
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_http_header.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HTTPHeader(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'value': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'value': 'value'
+ }
+
+ def __init__(self, name=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V1HTTPHeader - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._value = None
+ self.discriminator = None
+
+ self.name = name
+ self.value = value
+
+ @property
+ def name(self):
+ """Gets the name of this V1HTTPHeader. # noqa: E501
+
+ The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. # noqa: E501
+
+ :return: The name of this V1HTTPHeader. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1HTTPHeader.
+
+ The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. # noqa: E501
+
+ :param name: The name of this V1HTTPHeader. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def value(self):
+ """Gets the value of this V1HTTPHeader. # noqa: E501
+
+ The header field value # noqa: E501
+
+ :return: The value of this V1HTTPHeader. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V1HTTPHeader.
+
+ The header field value # noqa: E501
+
+ :param value: The value of this V1HTTPHeader. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
+ raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HTTPHeader):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HTTPHeader):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_path.py b/contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_path.py
new file mode 100644
index 0000000000..620d9c83a5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_path.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HTTPIngressPath(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'backend': 'V1IngressBackend',
+ 'path': 'str',
+ 'path_type': 'str'
+ }
+
+ attribute_map = {
+ 'backend': 'backend',
+ 'path': 'path',
+ 'path_type': 'pathType'
+ }
+
+ def __init__(self, backend=None, path=None, path_type=None, local_vars_configuration=None): # noqa: E501
+ """V1HTTPIngressPath - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._backend = None
+ self._path = None
+ self._path_type = None
+ self.discriminator = None
+
+ self.backend = backend
+ if path is not None:
+ self.path = path
+ self.path_type = path_type
+
+ @property
+ def backend(self):
+ """Gets the backend of this V1HTTPIngressPath. # noqa: E501
+
+
+ :return: The backend of this V1HTTPIngressPath. # noqa: E501
+ :rtype: V1IngressBackend
+ """
+ return self._backend
+
+ @backend.setter
+ def backend(self, backend):
+ """Sets the backend of this V1HTTPIngressPath.
+
+
+ :param backend: The backend of this V1HTTPIngressPath. # noqa: E501
+ :type: V1IngressBackend
+ """
+ if self.local_vars_configuration.client_side_validation and backend is None: # noqa: E501
+ raise ValueError("Invalid value for `backend`, must not be `None`") # noqa: E501
+
+ self._backend = backend
+
+ @property
+ def path(self):
+ """Gets the path of this V1HTTPIngressPath. # noqa: E501
+
+ path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\". # noqa: E501
+
+ :return: The path of this V1HTTPIngressPath. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1HTTPIngressPath.
+
+ path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\". # noqa: E501
+
+ :param path: The path of this V1HTTPIngressPath. # noqa: E501
+ :type: str
+ """
+
+ self._path = path
+
+ @property
+ def path_type(self):
+ """Gets the path_type of this V1HTTPIngressPath. # noqa: E501
+
+ pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is done on a path element by element basis. A path element refers is the list of labels in the path split by the '/' separator. A request is a match for path p if every p is an element-wise prefix of p of the request path. Note that if the last element of the path is a substring of the last element in request path, it is not a match (e.g. /foo/bar matches /foo/bar/baz, but does not match /foo/barbaz). * ImplementationSpecific: Interpretation of the Path matching is up to the IngressClass. Implementations can treat this as a separate PathType or treat it identically to Prefix or Exact path types. Implementations are required to support all path types. # noqa: E501
+
+ :return: The path_type of this V1HTTPIngressPath. # noqa: E501
+ :rtype: str
+ """
+ return self._path_type
+
+ @path_type.setter
+ def path_type(self, path_type):
+ """Sets the path_type of this V1HTTPIngressPath.
+
+ pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is done on a path element by element basis. A path element refers is the list of labels in the path split by the '/' separator. A request is a match for path p if every p is an element-wise prefix of p of the request path. Note that if the last element of the path is a substring of the last element in request path, it is not a match (e.g. /foo/bar matches /foo/bar/baz, but does not match /foo/barbaz). * ImplementationSpecific: Interpretation of the Path matching is up to the IngressClass. Implementations can treat this as a separate PathType or treat it identically to Prefix or Exact path types. Implementations are required to support all path types. # noqa: E501
+
+ :param path_type: The path_type of this V1HTTPIngressPath. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path_type is None: # noqa: E501
+ raise ValueError("Invalid value for `path_type`, must not be `None`") # noqa: E501
+
+ self._path_type = path_type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HTTPIngressPath):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HTTPIngressPath):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_rule_value.py b/contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_rule_value.py
new file mode 100644
index 0000000000..71dd6d7d4d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_http_ingress_rule_value.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1HTTPIngressRuleValue(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'paths': 'list[V1HTTPIngressPath]'
+ }
+
+ attribute_map = {
+ 'paths': 'paths'
+ }
+
+ def __init__(self, paths=None, local_vars_configuration=None): # noqa: E501
+ """V1HTTPIngressRuleValue - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._paths = None
+ self.discriminator = None
+
+ self.paths = paths
+
+ @property
+ def paths(self):
+ """Gets the paths of this V1HTTPIngressRuleValue. # noqa: E501
+
+ paths is a collection of paths that map requests to backends. # noqa: E501
+
+ :return: The paths of this V1HTTPIngressRuleValue. # noqa: E501
+ :rtype: list[V1HTTPIngressPath]
+ """
+ return self._paths
+
+ @paths.setter
+ def paths(self, paths):
+ """Sets the paths of this V1HTTPIngressRuleValue.
+
+ paths is a collection of paths that map requests to backends. # noqa: E501
+
+ :param paths: The paths of this V1HTTPIngressRuleValue. # noqa: E501
+ :type: list[V1HTTPIngressPath]
+ """
+ if self.local_vars_configuration.client_side_validation and paths is None: # noqa: E501
+ raise ValueError("Invalid value for `paths`, must not be `None`") # noqa: E501
+
+ self._paths = paths
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1HTTPIngressRuleValue):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1HTTPIngressRuleValue):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress.py
new file mode 100644
index 0000000000..a9ee3ebf1e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Ingress(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1IngressSpec',
+ 'status': 'V1IngressStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Ingress - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Ingress. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Ingress. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Ingress.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Ingress. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Ingress. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Ingress. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Ingress.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Ingress. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Ingress. # noqa: E501
+
+
+ :return: The metadata of this V1Ingress. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Ingress.
+
+
+ :param metadata: The metadata of this V1Ingress. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Ingress. # noqa: E501
+
+
+ :return: The spec of this V1Ingress. # noqa: E501
+ :rtype: V1IngressSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Ingress.
+
+
+ :param spec: The spec of this V1Ingress. # noqa: E501
+ :type: V1IngressSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Ingress. # noqa: E501
+
+
+ :return: The status of this V1Ingress. # noqa: E501
+ :rtype: V1IngressStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Ingress.
+
+
+ :param status: The status of this V1Ingress. # noqa: E501
+ :type: V1IngressStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Ingress):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Ingress):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_backend.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_backend.py
new file mode 100644
index 0000000000..9346668911
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_backend.py
@@ -0,0 +1,146 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressBackend(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'resource': 'V1TypedLocalObjectReference',
+ 'service': 'V1IngressServiceBackend'
+ }
+
+ attribute_map = {
+ 'resource': 'resource',
+ 'service': 'service'
+ }
+
+ def __init__(self, resource=None, service=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressBackend - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._resource = None
+ self._service = None
+ self.discriminator = None
+
+ if resource is not None:
+ self.resource = resource
+ if service is not None:
+ self.service = service
+
+ @property
+ def resource(self):
+ """Gets the resource of this V1IngressBackend. # noqa: E501
+
+
+ :return: The resource of this V1IngressBackend. # noqa: E501
+ :rtype: V1TypedLocalObjectReference
+ """
+ return self._resource
+
+ @resource.setter
+ def resource(self, resource):
+ """Sets the resource of this V1IngressBackend.
+
+
+ :param resource: The resource of this V1IngressBackend. # noqa: E501
+ :type: V1TypedLocalObjectReference
+ """
+
+ self._resource = resource
+
+ @property
+ def service(self):
+ """Gets the service of this V1IngressBackend. # noqa: E501
+
+
+ :return: The service of this V1IngressBackend. # noqa: E501
+ :rtype: V1IngressServiceBackend
+ """
+ return self._service
+
+ @service.setter
+ def service(self, service):
+ """Sets the service of this V1IngressBackend.
+
+
+ :param service: The service of this V1IngressBackend. # noqa: E501
+ :type: V1IngressServiceBackend
+ """
+
+ self._service = service
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressBackend):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressBackend):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class.py
new file mode 100644
index 0000000000..d027c19ca4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressClass(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1IngressClassSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressClass - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1IngressClass. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1IngressClass. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1IngressClass.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1IngressClass. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1IngressClass. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1IngressClass. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1IngressClass.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1IngressClass. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1IngressClass. # noqa: E501
+
+
+ :return: The metadata of this V1IngressClass. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1IngressClass.
+
+
+ :param metadata: The metadata of this V1IngressClass. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1IngressClass. # noqa: E501
+
+
+ :return: The spec of this V1IngressClass. # noqa: E501
+ :rtype: V1IngressClassSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1IngressClass.
+
+
+ :param spec: The spec of this V1IngressClass. # noqa: E501
+ :type: V1IngressClassSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressClass):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressClass):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_list.py
new file mode 100644
index 0000000000..ef4b312b13
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressClassList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1IngressClass]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressClassList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1IngressClassList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1IngressClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1IngressClassList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1IngressClassList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1IngressClassList. # noqa: E501
+
+ items is the list of IngressClasses. # noqa: E501
+
+ :return: The items of this V1IngressClassList. # noqa: E501
+ :rtype: list[V1IngressClass]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1IngressClassList.
+
+ items is the list of IngressClasses. # noqa: E501
+
+ :param items: The items of this V1IngressClassList. # noqa: E501
+ :type: list[V1IngressClass]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1IngressClassList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1IngressClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1IngressClassList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1IngressClassList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1IngressClassList. # noqa: E501
+
+
+ :return: The metadata of this V1IngressClassList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1IngressClassList.
+
+
+ :param metadata: The metadata of this V1IngressClassList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressClassList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressClassList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_parameters_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_parameters_reference.py
new file mode 100644
index 0000000000..ce1c3c9202
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_parameters_reference.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressClassParametersReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'namespace': 'str',
+ 'scope': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'scope': 'scope'
+ }
+
+ def __init__(self, api_group=None, kind=None, name=None, namespace=None, scope=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressClassParametersReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._kind = None
+ self._name = None
+ self._namespace = None
+ self._scope = None
+ self.discriminator = None
+
+ if api_group is not None:
+ self.api_group = api_group
+ self.kind = kind
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if scope is not None:
+ self.scope = scope
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1IngressClassParametersReference. # noqa: E501
+
+ apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
+
+ :return: The api_group of this V1IngressClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1IngressClassParametersReference.
+
+ apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
+
+ :param api_group: The api_group of this V1IngressClassParametersReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_group = api_group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1IngressClassParametersReference. # noqa: E501
+
+ kind is the type of resource being referenced. # noqa: E501
+
+ :return: The kind of this V1IngressClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1IngressClassParametersReference.
+
+ kind is the type of resource being referenced. # noqa: E501
+
+ :param kind: The kind of this V1IngressClassParametersReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1IngressClassParametersReference. # noqa: E501
+
+ name is the name of resource being referenced. # noqa: E501
+
+ :return: The name of this V1IngressClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1IngressClassParametersReference.
+
+ name is the name of resource being referenced. # noqa: E501
+
+ :param name: The name of this V1IngressClassParametersReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1IngressClassParametersReference. # noqa: E501
+
+ namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\". # noqa: E501
+
+ :return: The namespace of this V1IngressClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1IngressClassParametersReference.
+
+ namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\". # noqa: E501
+
+ :param namespace: The namespace of this V1IngressClassParametersReference. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def scope(self):
+ """Gets the scope of this V1IngressClassParametersReference. # noqa: E501
+
+ scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". # noqa: E501
+
+ :return: The scope of this V1IngressClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._scope
+
+ @scope.setter
+ def scope(self, scope):
+ """Sets the scope of this V1IngressClassParametersReference.
+
+ scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\". # noqa: E501
+
+ :param scope: The scope of this V1IngressClassParametersReference. # noqa: E501
+ :type: str
+ """
+
+ self._scope = scope
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressClassParametersReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressClassParametersReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_spec.py
new file mode 100644
index 0000000000..869dbacd76
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_class_spec.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressClassSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'controller': 'str',
+ 'parameters': 'V1IngressClassParametersReference'
+ }
+
+ attribute_map = {
+ 'controller': 'controller',
+ 'parameters': 'parameters'
+ }
+
+ def __init__(self, controller=None, parameters=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressClassSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._controller = None
+ self._parameters = None
+ self.discriminator = None
+
+ if controller is not None:
+ self.controller = controller
+ if parameters is not None:
+ self.parameters = parameters
+
+ @property
+ def controller(self):
+ """Gets the controller of this V1IngressClassSpec. # noqa: E501
+
+ controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
+
+ :return: The controller of this V1IngressClassSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._controller
+
+ @controller.setter
+ def controller(self, controller):
+ """Sets the controller of this V1IngressClassSpec.
+
+ controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
+
+ :param controller: The controller of this V1IngressClassSpec. # noqa: E501
+ :type: str
+ """
+
+ self._controller = controller
+
+ @property
+ def parameters(self):
+ """Gets the parameters of this V1IngressClassSpec. # noqa: E501
+
+
+ :return: The parameters of this V1IngressClassSpec. # noqa: E501
+ :rtype: V1IngressClassParametersReference
+ """
+ return self._parameters
+
+ @parameters.setter
+ def parameters(self, parameters):
+ """Sets the parameters of this V1IngressClassSpec.
+
+
+ :param parameters: The parameters of this V1IngressClassSpec. # noqa: E501
+ :type: V1IngressClassParametersReference
+ """
+
+ self._parameters = parameters
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressClassSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressClassSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_list.py
new file mode 100644
index 0000000000..ea76125435
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Ingress]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1IngressList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1IngressList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1IngressList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1IngressList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1IngressList. # noqa: E501
+
+ items is the list of Ingress. # noqa: E501
+
+ :return: The items of this V1IngressList. # noqa: E501
+ :rtype: list[V1Ingress]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1IngressList.
+
+ items is the list of Ingress. # noqa: E501
+
+ :param items: The items of this V1IngressList. # noqa: E501
+ :type: list[V1Ingress]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1IngressList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1IngressList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1IngressList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1IngressList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1IngressList. # noqa: E501
+
+
+ :return: The metadata of this V1IngressList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1IngressList.
+
+
+ :param metadata: The metadata of this V1IngressList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_ingress.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_ingress.py
new file mode 100644
index 0000000000..abdd8b906b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_ingress.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressLoadBalancerIngress(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hostname': 'str',
+ 'ip': 'str',
+ 'ports': 'list[V1IngressPortStatus]'
+ }
+
+ attribute_map = {
+ 'hostname': 'hostname',
+ 'ip': 'ip',
+ 'ports': 'ports'
+ }
+
+ def __init__(self, hostname=None, ip=None, ports=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressLoadBalancerIngress - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hostname = None
+ self._ip = None
+ self._ports = None
+ self.discriminator = None
+
+ if hostname is not None:
+ self.hostname = hostname
+ if ip is not None:
+ self.ip = ip
+ if ports is not None:
+ self.ports = ports
+
+ @property
+ def hostname(self):
+ """Gets the hostname of this V1IngressLoadBalancerIngress. # noqa: E501
+
+ hostname is set for load-balancer ingress points that are DNS based. # noqa: E501
+
+ :return: The hostname of this V1IngressLoadBalancerIngress. # noqa: E501
+ :rtype: str
+ """
+ return self._hostname
+
+ @hostname.setter
+ def hostname(self, hostname):
+ """Sets the hostname of this V1IngressLoadBalancerIngress.
+
+ hostname is set for load-balancer ingress points that are DNS based. # noqa: E501
+
+ :param hostname: The hostname of this V1IngressLoadBalancerIngress. # noqa: E501
+ :type: str
+ """
+
+ self._hostname = hostname
+
+ @property
+ def ip(self):
+ """Gets the ip of this V1IngressLoadBalancerIngress. # noqa: E501
+
+ ip is set for load-balancer ingress points that are IP based. # noqa: E501
+
+ :return: The ip of this V1IngressLoadBalancerIngress. # noqa: E501
+ :rtype: str
+ """
+ return self._ip
+
+ @ip.setter
+ def ip(self, ip):
+ """Sets the ip of this V1IngressLoadBalancerIngress.
+
+ ip is set for load-balancer ingress points that are IP based. # noqa: E501
+
+ :param ip: The ip of this V1IngressLoadBalancerIngress. # noqa: E501
+ :type: str
+ """
+
+ self._ip = ip
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1IngressLoadBalancerIngress. # noqa: E501
+
+ ports provides information about the ports exposed by this LoadBalancer. # noqa: E501
+
+ :return: The ports of this V1IngressLoadBalancerIngress. # noqa: E501
+ :rtype: list[V1IngressPortStatus]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1IngressLoadBalancerIngress.
+
+ ports provides information about the ports exposed by this LoadBalancer. # noqa: E501
+
+ :param ports: The ports of this V1IngressLoadBalancerIngress. # noqa: E501
+ :type: list[V1IngressPortStatus]
+ """
+
+ self._ports = ports
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressLoadBalancerIngress):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressLoadBalancerIngress):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_status.py
new file mode 100644
index 0000000000..467dae8093
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_load_balancer_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressLoadBalancerStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ingress': 'list[V1IngressLoadBalancerIngress]'
+ }
+
+ attribute_map = {
+ 'ingress': 'ingress'
+ }
+
+ def __init__(self, ingress=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressLoadBalancerStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ingress = None
+ self.discriminator = None
+
+ if ingress is not None:
+ self.ingress = ingress
+
+ @property
+ def ingress(self):
+ """Gets the ingress of this V1IngressLoadBalancerStatus. # noqa: E501
+
+ ingress is a list containing ingress points for the load-balancer. # noqa: E501
+
+ :return: The ingress of this V1IngressLoadBalancerStatus. # noqa: E501
+ :rtype: list[V1IngressLoadBalancerIngress]
+ """
+ return self._ingress
+
+ @ingress.setter
+ def ingress(self, ingress):
+ """Sets the ingress of this V1IngressLoadBalancerStatus.
+
+ ingress is a list containing ingress points for the load-balancer. # noqa: E501
+
+ :param ingress: The ingress of this V1IngressLoadBalancerStatus. # noqa: E501
+ :type: list[V1IngressLoadBalancerIngress]
+ """
+
+ self._ingress = ingress
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressLoadBalancerStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressLoadBalancerStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_port_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_port_status.py
new file mode 100644
index 0000000000..de9105eda1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_port_status.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressPortStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'error': 'str',
+ 'port': 'int',
+ 'protocol': 'str'
+ }
+
+ attribute_map = {
+ 'error': 'error',
+ 'port': 'port',
+ 'protocol': 'protocol'
+ }
+
+ def __init__(self, error=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressPortStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._error = None
+ self._port = None
+ self._protocol = None
+ self.discriminator = None
+
+ if error is not None:
+ self.error = error
+ self.port = port
+ self.protocol = protocol
+
+ @property
+ def error(self):
+ """Gets the error of this V1IngressPortStatus. # noqa: E501
+
+ error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. # noqa: E501
+
+ :return: The error of this V1IngressPortStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._error
+
+ @error.setter
+ def error(self, error):
+ """Sets the error of this V1IngressPortStatus.
+
+ error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. # noqa: E501
+
+ :param error: The error of this V1IngressPortStatus. # noqa: E501
+ :type: str
+ """
+
+ self._error = error
+
+ @property
+ def port(self):
+ """Gets the port of this V1IngressPortStatus. # noqa: E501
+
+ port is the port number of the ingress port. # noqa: E501
+
+ :return: The port of this V1IngressPortStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1IngressPortStatus.
+
+ port is the port number of the ingress port. # noqa: E501
+
+ :param port: The port of this V1IngressPortStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ @property
+ def protocol(self):
+ """Gets the protocol of this V1IngressPortStatus. # noqa: E501
+
+ protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\" # noqa: E501
+
+ :return: The protocol of this V1IngressPortStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, protocol):
+ """Sets the protocol of this V1IngressPortStatus.
+
+ protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\" # noqa: E501
+
+ :param protocol: The protocol of this V1IngressPortStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and protocol is None: # noqa: E501
+ raise ValueError("Invalid value for `protocol`, must not be `None`") # noqa: E501
+
+ self._protocol = protocol
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressPortStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressPortStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_rule.py
new file mode 100644
index 0000000000..536a16a904
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_rule.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'host': 'str',
+ 'http': 'V1HTTPIngressRuleValue'
+ }
+
+ attribute_map = {
+ 'host': 'host',
+ 'http': 'http'
+ }
+
+ def __init__(self, host=None, http=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._host = None
+ self._http = None
+ self.discriminator = None
+
+ if host is not None:
+ self.host = host
+ if http is not None:
+ self.http = http
+
+ @property
+ def host(self):
+ """Gets the host of this V1IngressRule. # noqa: E501
+
+ host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the IP in the Spec of the parent Ingress. 2. The `:` delimiter is not respected because ports are not allowed. Currently the port of an Ingress is implicitly :80 for http and :443 for https. Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. host can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule. # noqa: E501
+
+ :return: The host of this V1IngressRule. # noqa: E501
+ :rtype: str
+ """
+ return self._host
+
+ @host.setter
+ def host(self, host):
+ """Sets the host of this V1IngressRule.
+
+ host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the IP in the Spec of the parent Ingress. 2. The `:` delimiter is not respected because ports are not allowed. Currently the port of an Ingress is implicitly :80 for http and :443 for https. Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. host can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule. # noqa: E501
+
+ :param host: The host of this V1IngressRule. # noqa: E501
+ :type: str
+ """
+
+ self._host = host
+
+ @property
+ def http(self):
+ """Gets the http of this V1IngressRule. # noqa: E501
+
+
+ :return: The http of this V1IngressRule. # noqa: E501
+ :rtype: V1HTTPIngressRuleValue
+ """
+ return self._http
+
+ @http.setter
+ def http(self, http):
+ """Sets the http of this V1IngressRule.
+
+
+ :param http: The http of this V1IngressRule. # noqa: E501
+ :type: V1HTTPIngressRuleValue
+ """
+
+ self._http = http
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_service_backend.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_service_backend.py
new file mode 100644
index 0000000000..d7785ddeee
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_service_backend.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressServiceBackend(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'port': 'V1ServiceBackendPort'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'port': 'port'
+ }
+
+ def __init__(self, name=None, port=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressServiceBackend - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._port = None
+ self.discriminator = None
+
+ self.name = name
+ if port is not None:
+ self.port = port
+
+ @property
+ def name(self):
+ """Gets the name of this V1IngressServiceBackend. # noqa: E501
+
+ name is the referenced service. The service must exist in the same namespace as the Ingress object. # noqa: E501
+
+ :return: The name of this V1IngressServiceBackend. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1IngressServiceBackend.
+
+ name is the referenced service. The service must exist in the same namespace as the Ingress object. # noqa: E501
+
+ :param name: The name of this V1IngressServiceBackend. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def port(self):
+ """Gets the port of this V1IngressServiceBackend. # noqa: E501
+
+
+ :return: The port of this V1IngressServiceBackend. # noqa: E501
+ :rtype: V1ServiceBackendPort
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1IngressServiceBackend.
+
+
+ :param port: The port of this V1IngressServiceBackend. # noqa: E501
+ :type: V1ServiceBackendPort
+ """
+
+ self._port = port
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressServiceBackend):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressServiceBackend):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_spec.py
new file mode 100644
index 0000000000..fbf0f24116
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_spec.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'default_backend': 'V1IngressBackend',
+ 'ingress_class_name': 'str',
+ 'rules': 'list[V1IngressRule]',
+ 'tls': 'list[V1IngressTLS]'
+ }
+
+ attribute_map = {
+ 'default_backend': 'defaultBackend',
+ 'ingress_class_name': 'ingressClassName',
+ 'rules': 'rules',
+ 'tls': 'tls'
+ }
+
+ def __init__(self, default_backend=None, ingress_class_name=None, rules=None, tls=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._default_backend = None
+ self._ingress_class_name = None
+ self._rules = None
+ self._tls = None
+ self.discriminator = None
+
+ if default_backend is not None:
+ self.default_backend = default_backend
+ if ingress_class_name is not None:
+ self.ingress_class_name = ingress_class_name
+ if rules is not None:
+ self.rules = rules
+ if tls is not None:
+ self.tls = tls
+
+ @property
+ def default_backend(self):
+ """Gets the default_backend of this V1IngressSpec. # noqa: E501
+
+
+ :return: The default_backend of this V1IngressSpec. # noqa: E501
+ :rtype: V1IngressBackend
+ """
+ return self._default_backend
+
+ @default_backend.setter
+ def default_backend(self, default_backend):
+ """Sets the default_backend of this V1IngressSpec.
+
+
+ :param default_backend: The default_backend of this V1IngressSpec. # noqa: E501
+ :type: V1IngressBackend
+ """
+
+ self._default_backend = default_backend
+
+ @property
+ def ingress_class_name(self):
+ """Gets the ingress_class_name of this V1IngressSpec. # noqa: E501
+
+ ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present. # noqa: E501
+
+ :return: The ingress_class_name of this V1IngressSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._ingress_class_name
+
+ @ingress_class_name.setter
+ def ingress_class_name(self, ingress_class_name):
+ """Sets the ingress_class_name of this V1IngressSpec.
+
+ ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present. # noqa: E501
+
+ :param ingress_class_name: The ingress_class_name of this V1IngressSpec. # noqa: E501
+ :type: str
+ """
+
+ self._ingress_class_name = ingress_class_name
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1IngressSpec. # noqa: E501
+
+ rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend. # noqa: E501
+
+ :return: The rules of this V1IngressSpec. # noqa: E501
+ :rtype: list[V1IngressRule]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1IngressSpec.
+
+ rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend. # noqa: E501
+
+ :param rules: The rules of this V1IngressSpec. # noqa: E501
+ :type: list[V1IngressRule]
+ """
+
+ self._rules = rules
+
+ @property
+ def tls(self):
+ """Gets the tls of this V1IngressSpec. # noqa: E501
+
+ tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI. # noqa: E501
+
+ :return: The tls of this V1IngressSpec. # noqa: E501
+ :rtype: list[V1IngressTLS]
+ """
+ return self._tls
+
+ @tls.setter
+ def tls(self, tls):
+ """Sets the tls of this V1IngressSpec.
+
+ tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI. # noqa: E501
+
+ :param tls: The tls of this V1IngressSpec. # noqa: E501
+ :type: list[V1IngressTLS]
+ """
+
+ self._tls = tls
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_status.py
new file mode 100644
index 0000000000..9166d09b4a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_status.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'load_balancer': 'V1IngressLoadBalancerStatus'
+ }
+
+ attribute_map = {
+ 'load_balancer': 'loadBalancer'
+ }
+
+ def __init__(self, load_balancer=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._load_balancer = None
+ self.discriminator = None
+
+ if load_balancer is not None:
+ self.load_balancer = load_balancer
+
+ @property
+ def load_balancer(self):
+ """Gets the load_balancer of this V1IngressStatus. # noqa: E501
+
+
+ :return: The load_balancer of this V1IngressStatus. # noqa: E501
+ :rtype: V1IngressLoadBalancerStatus
+ """
+ return self._load_balancer
+
+ @load_balancer.setter
+ def load_balancer(self, load_balancer):
+ """Sets the load_balancer of this V1IngressStatus.
+
+
+ :param load_balancer: The load_balancer of this V1IngressStatus. # noqa: E501
+ :type: V1IngressLoadBalancerStatus
+ """
+
+ self._load_balancer = load_balancer
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_tls.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_tls.py
new file mode 100644
index 0000000000..d81c326c98
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ingress_tls.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IngressTLS(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hosts': 'list[str]',
+ 'secret_name': 'str'
+ }
+
+ attribute_map = {
+ 'hosts': 'hosts',
+ 'secret_name': 'secretName'
+ }
+
+ def __init__(self, hosts=None, secret_name=None, local_vars_configuration=None): # noqa: E501
+ """V1IngressTLS - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hosts = None
+ self._secret_name = None
+ self.discriminator = None
+
+ if hosts is not None:
+ self.hosts = hosts
+ if secret_name is not None:
+ self.secret_name = secret_name
+
+ @property
+ def hosts(self):
+ """Gets the hosts of this V1IngressTLS. # noqa: E501
+
+ hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified. # noqa: E501
+
+ :return: The hosts of this V1IngressTLS. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._hosts
+
+ @hosts.setter
+ def hosts(self, hosts):
+ """Sets the hosts of this V1IngressTLS.
+
+ hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified. # noqa: E501
+
+ :param hosts: The hosts of this V1IngressTLS. # noqa: E501
+ :type: list[str]
+ """
+
+ self._hosts = hosts
+
+ @property
+ def secret_name(self):
+ """Gets the secret_name of this V1IngressTLS. # noqa: E501
+
+ secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing. # noqa: E501
+
+ :return: The secret_name of this V1IngressTLS. # noqa: E501
+ :rtype: str
+ """
+ return self._secret_name
+
+ @secret_name.setter
+ def secret_name(self, secret_name):
+ """Sets the secret_name of this V1IngressTLS.
+
+ secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing. # noqa: E501
+
+ :param secret_name: The secret_name of this V1IngressTLS. # noqa: E501
+ :type: str
+ """
+
+ self._secret_name = secret_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IngressTLS):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IngressTLS):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_ip_block.py b/contrib/python/kubernetes/kubernetes/client/models/v1_ip_block.py
new file mode 100644
index 0000000000..37f5a9373f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_ip_block.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1IPBlock(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'cidr': 'str',
+ '_except': 'list[str]'
+ }
+
+ attribute_map = {
+ 'cidr': 'cidr',
+ '_except': 'except'
+ }
+
+ def __init__(self, cidr=None, _except=None, local_vars_configuration=None): # noqa: E501
+ """V1IPBlock - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._cidr = None
+ self.__except = None
+ self.discriminator = None
+
+ self.cidr = cidr
+ if _except is not None:
+ self._except = _except
+
+ @property
+ def cidr(self):
+ """Gets the cidr of this V1IPBlock. # noqa: E501
+
+ cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" # noqa: E501
+
+ :return: The cidr of this V1IPBlock. # noqa: E501
+ :rtype: str
+ """
+ return self._cidr
+
+ @cidr.setter
+ def cidr(self, cidr):
+ """Sets the cidr of this V1IPBlock.
+
+ cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" # noqa: E501
+
+ :param cidr: The cidr of this V1IPBlock. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and cidr is None: # noqa: E501
+ raise ValueError("Invalid value for `cidr`, must not be `None`") # noqa: E501
+
+ self._cidr = cidr
+
+ @property
+ def _except(self):
+ """Gets the _except of this V1IPBlock. # noqa: E501
+
+ except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range # noqa: E501
+
+ :return: The _except of this V1IPBlock. # noqa: E501
+ :rtype: list[str]
+ """
+ return self.__except
+
+ @_except.setter
+ def _except(self, _except):
+ """Sets the _except of this V1IPBlock.
+
+ except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range # noqa: E501
+
+ :param _except: The _except of this V1IPBlock. # noqa: E501
+ :type: list[str]
+ """
+
+ self.__except = _except
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1IPBlock):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1IPBlock):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_persistent_volume_source.py
new file mode 100644
index 0000000000..cd692e9712
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_persistent_volume_source.py
@@ -0,0 +1,403 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ISCSIPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'chap_auth_discovery': 'bool',
+ 'chap_auth_session': 'bool',
+ 'fs_type': 'str',
+ 'initiator_name': 'str',
+ 'iqn': 'str',
+ 'iscsi_interface': 'str',
+ 'lun': 'int',
+ 'portals': 'list[str]',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1SecretReference',
+ 'target_portal': 'str'
+ }
+
+ attribute_map = {
+ 'chap_auth_discovery': 'chapAuthDiscovery',
+ 'chap_auth_session': 'chapAuthSession',
+ 'fs_type': 'fsType',
+ 'initiator_name': 'initiatorName',
+ 'iqn': 'iqn',
+ 'iscsi_interface': 'iscsiInterface',
+ 'lun': 'lun',
+ 'portals': 'portals',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'target_portal': 'targetPortal'
+ }
+
+ def __init__(self, chap_auth_discovery=None, chap_auth_session=None, fs_type=None, initiator_name=None, iqn=None, iscsi_interface=None, lun=None, portals=None, read_only=None, secret_ref=None, target_portal=None, local_vars_configuration=None): # noqa: E501
+ """V1ISCSIPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._chap_auth_discovery = None
+ self._chap_auth_session = None
+ self._fs_type = None
+ self._initiator_name = None
+ self._iqn = None
+ self._iscsi_interface = None
+ self._lun = None
+ self._portals = None
+ self._read_only = None
+ self._secret_ref = None
+ self._target_portal = None
+ self.discriminator = None
+
+ if chap_auth_discovery is not None:
+ self.chap_auth_discovery = chap_auth_discovery
+ if chap_auth_session is not None:
+ self.chap_auth_session = chap_auth_session
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if initiator_name is not None:
+ self.initiator_name = initiator_name
+ self.iqn = iqn
+ if iscsi_interface is not None:
+ self.iscsi_interface = iscsi_interface
+ self.lun = lun
+ if portals is not None:
+ self.portals = portals
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ self.target_portal = target_portal
+
+ @property
+ def chap_auth_discovery(self):
+ """Gets the chap_auth_discovery of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication # noqa: E501
+
+ :return: The chap_auth_discovery of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._chap_auth_discovery
+
+ @chap_auth_discovery.setter
+ def chap_auth_discovery(self, chap_auth_discovery):
+ """Sets the chap_auth_discovery of this V1ISCSIPersistentVolumeSource.
+
+ chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication # noqa: E501
+
+ :param chap_auth_discovery: The chap_auth_discovery of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._chap_auth_discovery = chap_auth_discovery
+
+ @property
+ def chap_auth_session(self):
+ """Gets the chap_auth_session of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ chapAuthSession defines whether support iSCSI Session CHAP authentication # noqa: E501
+
+ :return: The chap_auth_session of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._chap_auth_session
+
+ @chap_auth_session.setter
+ def chap_auth_session(self, chap_auth_session):
+ """Sets the chap_auth_session of this V1ISCSIPersistentVolumeSource.
+
+ chapAuthSession defines whether support iSCSI Session CHAP authentication # noqa: E501
+
+ :param chap_auth_session: The chap_auth_session of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._chap_auth_session = chap_auth_session
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi # noqa: E501
+
+ :return: The fs_type of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1ISCSIPersistentVolumeSource.
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi # noqa: E501
+
+ :param fs_type: The fs_type of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def initiator_name(self):
+ """Gets the initiator_name of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. # noqa: E501
+
+ :return: The initiator_name of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._initiator_name
+
+ @initiator_name.setter
+ def initiator_name(self, initiator_name):
+ """Sets the initiator_name of this V1ISCSIPersistentVolumeSource.
+
+ initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. # noqa: E501
+
+ :param initiator_name: The initiator_name of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._initiator_name = initiator_name
+
+ @property
+ def iqn(self):
+ """Gets the iqn of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ iqn is Target iSCSI Qualified Name. # noqa: E501
+
+ :return: The iqn of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._iqn
+
+ @iqn.setter
+ def iqn(self, iqn):
+ """Sets the iqn of this V1ISCSIPersistentVolumeSource.
+
+ iqn is Target iSCSI Qualified Name. # noqa: E501
+
+ :param iqn: The iqn of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and iqn is None: # noqa: E501
+ raise ValueError("Invalid value for `iqn`, must not be `None`") # noqa: E501
+
+ self._iqn = iqn
+
+ @property
+ def iscsi_interface(self):
+ """Gets the iscsi_interface of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). # noqa: E501
+
+ :return: The iscsi_interface of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._iscsi_interface
+
+ @iscsi_interface.setter
+ def iscsi_interface(self, iscsi_interface):
+ """Sets the iscsi_interface of this V1ISCSIPersistentVolumeSource.
+
+ iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). # noqa: E501
+
+ :param iscsi_interface: The iscsi_interface of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._iscsi_interface = iscsi_interface
+
+ @property
+ def lun(self):
+ """Gets the lun of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ lun is iSCSI Target Lun number. # noqa: E501
+
+ :return: The lun of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._lun
+
+ @lun.setter
+ def lun(self, lun):
+ """Sets the lun of this V1ISCSIPersistentVolumeSource.
+
+ lun is iSCSI Target Lun number. # noqa: E501
+
+ :param lun: The lun of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and lun is None: # noqa: E501
+ raise ValueError("Invalid value for `lun`, must not be `None`") # noqa: E501
+
+ self._lun = lun
+
+ @property
+ def portals(self):
+ """Gets the portals of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :return: The portals of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._portals
+
+ @portals.setter
+ def portals(self, portals):
+ """Sets the portals of this V1ISCSIPersistentVolumeSource.
+
+ portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :param portals: The portals of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+
+ self._portals = portals
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. # noqa: E501
+
+ :return: The read_only of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1ISCSIPersistentVolumeSource.
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. # noqa: E501
+
+ :param read_only: The read_only of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1ISCSIPersistentVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def target_portal(self):
+ """Gets the target_portal of this V1ISCSIPersistentVolumeSource. # noqa: E501
+
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :return: The target_portal of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._target_portal
+
+ @target_portal.setter
+ def target_portal(self, target_portal):
+ """Sets the target_portal of this V1ISCSIPersistentVolumeSource.
+
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :param target_portal: The target_portal of this V1ISCSIPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and target_portal is None: # noqa: E501
+ raise ValueError("Invalid value for `target_portal`, must not be `None`") # noqa: E501
+
+ self._target_portal = target_portal
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ISCSIPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ISCSIPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_volume_source.py
new file mode 100644
index 0000000000..bc1645effd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_iscsi_volume_source.py
@@ -0,0 +1,403 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ISCSIVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'chap_auth_discovery': 'bool',
+ 'chap_auth_session': 'bool',
+ 'fs_type': 'str',
+ 'initiator_name': 'str',
+ 'iqn': 'str',
+ 'iscsi_interface': 'str',
+ 'lun': 'int',
+ 'portals': 'list[str]',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1LocalObjectReference',
+ 'target_portal': 'str'
+ }
+
+ attribute_map = {
+ 'chap_auth_discovery': 'chapAuthDiscovery',
+ 'chap_auth_session': 'chapAuthSession',
+ 'fs_type': 'fsType',
+ 'initiator_name': 'initiatorName',
+ 'iqn': 'iqn',
+ 'iscsi_interface': 'iscsiInterface',
+ 'lun': 'lun',
+ 'portals': 'portals',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'target_portal': 'targetPortal'
+ }
+
+ def __init__(self, chap_auth_discovery=None, chap_auth_session=None, fs_type=None, initiator_name=None, iqn=None, iscsi_interface=None, lun=None, portals=None, read_only=None, secret_ref=None, target_portal=None, local_vars_configuration=None): # noqa: E501
+ """V1ISCSIVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._chap_auth_discovery = None
+ self._chap_auth_session = None
+ self._fs_type = None
+ self._initiator_name = None
+ self._iqn = None
+ self._iscsi_interface = None
+ self._lun = None
+ self._portals = None
+ self._read_only = None
+ self._secret_ref = None
+ self._target_portal = None
+ self.discriminator = None
+
+ if chap_auth_discovery is not None:
+ self.chap_auth_discovery = chap_auth_discovery
+ if chap_auth_session is not None:
+ self.chap_auth_session = chap_auth_session
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if initiator_name is not None:
+ self.initiator_name = initiator_name
+ self.iqn = iqn
+ if iscsi_interface is not None:
+ self.iscsi_interface = iscsi_interface
+ self.lun = lun
+ if portals is not None:
+ self.portals = portals
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ self.target_portal = target_portal
+
+ @property
+ def chap_auth_discovery(self):
+ """Gets the chap_auth_discovery of this V1ISCSIVolumeSource. # noqa: E501
+
+ chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication # noqa: E501
+
+ :return: The chap_auth_discovery of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._chap_auth_discovery
+
+ @chap_auth_discovery.setter
+ def chap_auth_discovery(self, chap_auth_discovery):
+ """Sets the chap_auth_discovery of this V1ISCSIVolumeSource.
+
+ chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication # noqa: E501
+
+ :param chap_auth_discovery: The chap_auth_discovery of this V1ISCSIVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._chap_auth_discovery = chap_auth_discovery
+
+ @property
+ def chap_auth_session(self):
+ """Gets the chap_auth_session of this V1ISCSIVolumeSource. # noqa: E501
+
+ chapAuthSession defines whether support iSCSI Session CHAP authentication # noqa: E501
+
+ :return: The chap_auth_session of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._chap_auth_session
+
+ @chap_auth_session.setter
+ def chap_auth_session(self, chap_auth_session):
+ """Sets the chap_auth_session of this V1ISCSIVolumeSource.
+
+ chapAuthSession defines whether support iSCSI Session CHAP authentication # noqa: E501
+
+ :param chap_auth_session: The chap_auth_session of this V1ISCSIVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._chap_auth_session = chap_auth_session
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1ISCSIVolumeSource. # noqa: E501
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi # noqa: E501
+
+ :return: The fs_type of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1ISCSIVolumeSource.
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi # noqa: E501
+
+ :param fs_type: The fs_type of this V1ISCSIVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def initiator_name(self):
+ """Gets the initiator_name of this V1ISCSIVolumeSource. # noqa: E501
+
+ initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. # noqa: E501
+
+ :return: The initiator_name of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._initiator_name
+
+ @initiator_name.setter
+ def initiator_name(self, initiator_name):
+ """Sets the initiator_name of this V1ISCSIVolumeSource.
+
+ initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. # noqa: E501
+
+ :param initiator_name: The initiator_name of this V1ISCSIVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._initiator_name = initiator_name
+
+ @property
+ def iqn(self):
+ """Gets the iqn of this V1ISCSIVolumeSource. # noqa: E501
+
+ iqn is the target iSCSI Qualified Name. # noqa: E501
+
+ :return: The iqn of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._iqn
+
+ @iqn.setter
+ def iqn(self, iqn):
+ """Sets the iqn of this V1ISCSIVolumeSource.
+
+ iqn is the target iSCSI Qualified Name. # noqa: E501
+
+ :param iqn: The iqn of this V1ISCSIVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and iqn is None: # noqa: E501
+ raise ValueError("Invalid value for `iqn`, must not be `None`") # noqa: E501
+
+ self._iqn = iqn
+
+ @property
+ def iscsi_interface(self):
+ """Gets the iscsi_interface of this V1ISCSIVolumeSource. # noqa: E501
+
+ iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). # noqa: E501
+
+ :return: The iscsi_interface of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._iscsi_interface
+
+ @iscsi_interface.setter
+ def iscsi_interface(self, iscsi_interface):
+ """Sets the iscsi_interface of this V1ISCSIVolumeSource.
+
+ iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). # noqa: E501
+
+ :param iscsi_interface: The iscsi_interface of this V1ISCSIVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._iscsi_interface = iscsi_interface
+
+ @property
+ def lun(self):
+ """Gets the lun of this V1ISCSIVolumeSource. # noqa: E501
+
+ lun represents iSCSI Target Lun number. # noqa: E501
+
+ :return: The lun of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._lun
+
+ @lun.setter
+ def lun(self, lun):
+ """Sets the lun of this V1ISCSIVolumeSource.
+
+ lun represents iSCSI Target Lun number. # noqa: E501
+
+ :param lun: The lun of this V1ISCSIVolumeSource. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and lun is None: # noqa: E501
+ raise ValueError("Invalid value for `lun`, must not be `None`") # noqa: E501
+
+ self._lun = lun
+
+ @property
+ def portals(self):
+ """Gets the portals of this V1ISCSIVolumeSource. # noqa: E501
+
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :return: The portals of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._portals
+
+ @portals.setter
+ def portals(self, portals):
+ """Sets the portals of this V1ISCSIVolumeSource.
+
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :param portals: The portals of this V1ISCSIVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+
+ self._portals = portals
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1ISCSIVolumeSource. # noqa: E501
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. # noqa: E501
+
+ :return: The read_only of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1ISCSIVolumeSource.
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. # noqa: E501
+
+ :param read_only: The read_only of this V1ISCSIVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1ISCSIVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1ISCSIVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1ISCSIVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def target_portal(self):
+ """Gets the target_portal of this V1ISCSIVolumeSource. # noqa: E501
+
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :return: The target_portal of this V1ISCSIVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._target_portal
+
+ @target_portal.setter
+ def target_portal(self, target_portal):
+ """Sets the target_portal of this V1ISCSIVolumeSource.
+
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). # noqa: E501
+
+ :param target_portal: The target_portal of this V1ISCSIVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and target_portal is None: # noqa: E501
+ raise ValueError("Invalid value for `target_portal`, must not be `None`") # noqa: E501
+
+ self._target_portal = target_portal
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ISCSIVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ISCSIVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_job.py b/contrib/python/kubernetes/kubernetes/client/models/v1_job.py
new file mode 100644
index 0000000000..861161f191
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_job.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Job(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1JobSpec',
+ 'status': 'V1JobStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Job - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Job. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Job. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Job.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Job. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Job. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Job. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Job.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Job. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Job. # noqa: E501
+
+
+ :return: The metadata of this V1Job. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Job.
+
+
+ :param metadata: The metadata of this V1Job. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Job. # noqa: E501
+
+
+ :return: The spec of this V1Job. # noqa: E501
+ :rtype: V1JobSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Job.
+
+
+ :param spec: The spec of this V1Job. # noqa: E501
+ :type: V1JobSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Job. # noqa: E501
+
+
+ :return: The status of this V1Job. # noqa: E501
+ :rtype: V1JobStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Job.
+
+
+ :param status: The status of this V1Job. # noqa: E501
+ :type: V1JobStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Job):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Job):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_job_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_job_condition.py
new file mode 100644
index 0000000000..150111229c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_job_condition.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1JobCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_probe_time': 'datetime',
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_probe_time': 'lastProbeTime',
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_probe_time=None, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1JobCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_probe_time = None
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_probe_time is not None:
+ self.last_probe_time = last_probe_time
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_probe_time(self):
+ """Gets the last_probe_time of this V1JobCondition. # noqa: E501
+
+ Last time the condition was checked. # noqa: E501
+
+ :return: The last_probe_time of this V1JobCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_probe_time
+
+ @last_probe_time.setter
+ def last_probe_time(self, last_probe_time):
+ """Sets the last_probe_time of this V1JobCondition.
+
+ Last time the condition was checked. # noqa: E501
+
+ :param last_probe_time: The last_probe_time of this V1JobCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_probe_time = last_probe_time
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1JobCondition. # noqa: E501
+
+ Last time the condition transit from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1JobCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1JobCondition.
+
+ Last time the condition transit from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1JobCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1JobCondition. # noqa: E501
+
+ Human readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1JobCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1JobCondition.
+
+ Human readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1JobCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1JobCondition. # noqa: E501
+
+ (brief) reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1JobCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1JobCondition.
+
+ (brief) reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1JobCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1JobCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1JobCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1JobCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1JobCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1JobCondition. # noqa: E501
+
+ Type of job condition, Complete or Failed. # noqa: E501
+
+ :return: The type of this V1JobCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1JobCondition.
+
+ Type of job condition, Complete or Failed. # noqa: E501
+
+ :param type: The type of this V1JobCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1JobCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1JobCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_job_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_job_list.py
new file mode 100644
index 0000000000..eea74b9ac8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_job_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1JobList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Job]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1JobList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1JobList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1JobList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1JobList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1JobList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1JobList. # noqa: E501
+
+ items is the list of Jobs. # noqa: E501
+
+ :return: The items of this V1JobList. # noqa: E501
+ :rtype: list[V1Job]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1JobList.
+
+ items is the list of Jobs. # noqa: E501
+
+ :param items: The items of this V1JobList. # noqa: E501
+ :type: list[V1Job]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1JobList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1JobList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1JobList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1JobList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1JobList. # noqa: E501
+
+
+ :return: The metadata of this V1JobList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1JobList.
+
+
+ :param metadata: The metadata of this V1JobList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1JobList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1JobList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_job_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_job_spec.py
new file mode 100644
index 0000000000..3a05454037
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_job_spec.py
@@ -0,0 +1,481 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1JobSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'active_deadline_seconds': 'int',
+ 'backoff_limit': 'int',
+ 'backoff_limit_per_index': 'int',
+ 'completion_mode': 'str',
+ 'completions': 'int',
+ 'manual_selector': 'bool',
+ 'max_failed_indexes': 'int',
+ 'parallelism': 'int',
+ 'pod_failure_policy': 'V1PodFailurePolicy',
+ 'pod_replacement_policy': 'str',
+ 'selector': 'V1LabelSelector',
+ 'suspend': 'bool',
+ 'template': 'V1PodTemplateSpec',
+ 'ttl_seconds_after_finished': 'int'
+ }
+
+ attribute_map = {
+ 'active_deadline_seconds': 'activeDeadlineSeconds',
+ 'backoff_limit': 'backoffLimit',
+ 'backoff_limit_per_index': 'backoffLimitPerIndex',
+ 'completion_mode': 'completionMode',
+ 'completions': 'completions',
+ 'manual_selector': 'manualSelector',
+ 'max_failed_indexes': 'maxFailedIndexes',
+ 'parallelism': 'parallelism',
+ 'pod_failure_policy': 'podFailurePolicy',
+ 'pod_replacement_policy': 'podReplacementPolicy',
+ 'selector': 'selector',
+ 'suspend': 'suspend',
+ 'template': 'template',
+ 'ttl_seconds_after_finished': 'ttlSecondsAfterFinished'
+ }
+
+ def __init__(self, active_deadline_seconds=None, backoff_limit=None, backoff_limit_per_index=None, completion_mode=None, completions=None, manual_selector=None, max_failed_indexes=None, parallelism=None, pod_failure_policy=None, pod_replacement_policy=None, selector=None, suspend=None, template=None, ttl_seconds_after_finished=None, local_vars_configuration=None): # noqa: E501
+ """V1JobSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._active_deadline_seconds = None
+ self._backoff_limit = None
+ self._backoff_limit_per_index = None
+ self._completion_mode = None
+ self._completions = None
+ self._manual_selector = None
+ self._max_failed_indexes = None
+ self._parallelism = None
+ self._pod_failure_policy = None
+ self._pod_replacement_policy = None
+ self._selector = None
+ self._suspend = None
+ self._template = None
+ self._ttl_seconds_after_finished = None
+ self.discriminator = None
+
+ if active_deadline_seconds is not None:
+ self.active_deadline_seconds = active_deadline_seconds
+ if backoff_limit is not None:
+ self.backoff_limit = backoff_limit
+ if backoff_limit_per_index is not None:
+ self.backoff_limit_per_index = backoff_limit_per_index
+ if completion_mode is not None:
+ self.completion_mode = completion_mode
+ if completions is not None:
+ self.completions = completions
+ if manual_selector is not None:
+ self.manual_selector = manual_selector
+ if max_failed_indexes is not None:
+ self.max_failed_indexes = max_failed_indexes
+ if parallelism is not None:
+ self.parallelism = parallelism
+ if pod_failure_policy is not None:
+ self.pod_failure_policy = pod_failure_policy
+ if pod_replacement_policy is not None:
+ self.pod_replacement_policy = pod_replacement_policy
+ if selector is not None:
+ self.selector = selector
+ if suspend is not None:
+ self.suspend = suspend
+ self.template = template
+ if ttl_seconds_after_finished is not None:
+ self.ttl_seconds_after_finished = ttl_seconds_after_finished
+
+ @property
+ def active_deadline_seconds(self):
+ """Gets the active_deadline_seconds of this V1JobSpec. # noqa: E501
+
+ Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again. # noqa: E501
+
+ :return: The active_deadline_seconds of this V1JobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._active_deadline_seconds
+
+ @active_deadline_seconds.setter
+ def active_deadline_seconds(self, active_deadline_seconds):
+ """Sets the active_deadline_seconds of this V1JobSpec.
+
+ Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again. # noqa: E501
+
+ :param active_deadline_seconds: The active_deadline_seconds of this V1JobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._active_deadline_seconds = active_deadline_seconds
+
+ @property
+ def backoff_limit(self):
+ """Gets the backoff_limit of this V1JobSpec. # noqa: E501
+
+ Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
+
+ :return: The backoff_limit of this V1JobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._backoff_limit
+
+ @backoff_limit.setter
+ def backoff_limit(self, backoff_limit):
+ """Sets the backoff_limit of this V1JobSpec.
+
+ Specifies the number of retries before marking this job failed. Defaults to 6 # noqa: E501
+
+ :param backoff_limit: The backoff_limit of this V1JobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._backoff_limit = backoff_limit
+
+ @property
+ def backoff_limit_per_index(self):
+ """Gets the backoff_limit_per_index of this V1JobSpec. # noqa: E501
+
+ Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). # noqa: E501
+
+ :return: The backoff_limit_per_index of this V1JobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._backoff_limit_per_index
+
+ @backoff_limit_per_index.setter
+ def backoff_limit_per_index(self, backoff_limit_per_index):
+ """Sets the backoff_limit_per_index of this V1JobSpec.
+
+ Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). # noqa: E501
+
+ :param backoff_limit_per_index: The backoff_limit_per_index of this V1JobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._backoff_limit_per_index = backoff_limit_per_index
+
+ @property
+ def completion_mode(self):
+ """Gets the completion_mode of this V1JobSpec. # noqa: E501
+
+ completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job. # noqa: E501
+
+ :return: The completion_mode of this V1JobSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._completion_mode
+
+ @completion_mode.setter
+ def completion_mode(self, completion_mode):
+ """Sets the completion_mode of this V1JobSpec.
+
+ completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job. # noqa: E501
+
+ :param completion_mode: The completion_mode of this V1JobSpec. # noqa: E501
+ :type: str
+ """
+
+ self._completion_mode = completion_mode
+
+ @property
+ def completions(self):
+ """Gets the completions of this V1JobSpec. # noqa: E501
+
+ Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
+
+ :return: The completions of this V1JobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._completions
+
+ @completions.setter
+ def completions(self, completions):
+ """Sets the completions of this V1JobSpec.
+
+ Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
+
+ :param completions: The completions of this V1JobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._completions = completions
+
+ @property
+ def manual_selector(self):
+ """Gets the manual_selector of this V1JobSpec. # noqa: E501
+
+ manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
+
+ :return: The manual_selector of this V1JobSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._manual_selector
+
+ @manual_selector.setter
+ def manual_selector(self, manual_selector):
+ """Sets the manual_selector of this V1JobSpec.
+
+ manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector # noqa: E501
+
+ :param manual_selector: The manual_selector of this V1JobSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._manual_selector = manual_selector
+
+ @property
+ def max_failed_indexes(self):
+ """Gets the max_failed_indexes of this V1JobSpec. # noqa: E501
+
+ Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). # noqa: E501
+
+ :return: The max_failed_indexes of this V1JobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._max_failed_indexes
+
+ @max_failed_indexes.setter
+ def max_failed_indexes(self, max_failed_indexes):
+ """Sets the max_failed_indexes of this V1JobSpec.
+
+ Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). # noqa: E501
+
+ :param max_failed_indexes: The max_failed_indexes of this V1JobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._max_failed_indexes = max_failed_indexes
+
+ @property
+ def parallelism(self):
+ """Gets the parallelism of this V1JobSpec. # noqa: E501
+
+ Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
+
+ :return: The parallelism of this V1JobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._parallelism
+
+ @parallelism.setter
+ def parallelism(self, parallelism):
+ """Sets the parallelism of this V1JobSpec.
+
+ Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
+
+ :param parallelism: The parallelism of this V1JobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._parallelism = parallelism
+
+ @property
+ def pod_failure_policy(self):
+ """Gets the pod_failure_policy of this V1JobSpec. # noqa: E501
+
+
+ :return: The pod_failure_policy of this V1JobSpec. # noqa: E501
+ :rtype: V1PodFailurePolicy
+ """
+ return self._pod_failure_policy
+
+ @pod_failure_policy.setter
+ def pod_failure_policy(self, pod_failure_policy):
+ """Sets the pod_failure_policy of this V1JobSpec.
+
+
+ :param pod_failure_policy: The pod_failure_policy of this V1JobSpec. # noqa: E501
+ :type: V1PodFailurePolicy
+ """
+
+ self._pod_failure_policy = pod_failure_policy
+
+ @property
+ def pod_replacement_policy(self):
+ """Gets the pod_replacement_policy of this V1JobSpec. # noqa: E501
+
+ podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods when they are terminating (has a metadata.deletionTimestamp) or failed. - Failed means to wait until a previously created Pod is fully terminated (has phase Failed or Succeeded) before creating a replacement Pod. When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. # noqa: E501
+
+ :return: The pod_replacement_policy of this V1JobSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._pod_replacement_policy
+
+ @pod_replacement_policy.setter
+ def pod_replacement_policy(self, pod_replacement_policy):
+ """Sets the pod_replacement_policy of this V1JobSpec.
+
+ podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods when they are terminating (has a metadata.deletionTimestamp) or failed. - Failed means to wait until a previously created Pod is fully terminated (has phase Failed or Succeeded) before creating a replacement Pod. When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. # noqa: E501
+
+ :param pod_replacement_policy: The pod_replacement_policy of this V1JobSpec. # noqa: E501
+ :type: str
+ """
+
+ self._pod_replacement_policy = pod_replacement_policy
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1JobSpec. # noqa: E501
+
+
+ :return: The selector of this V1JobSpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1JobSpec.
+
+
+ :param selector: The selector of this V1JobSpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._selector = selector
+
+ @property
+ def suspend(self):
+ """Gets the suspend of this V1JobSpec. # noqa: E501
+
+ suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false. # noqa: E501
+
+ :return: The suspend of this V1JobSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._suspend
+
+ @suspend.setter
+ def suspend(self, suspend):
+ """Sets the suspend of this V1JobSpec.
+
+ suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false. # noqa: E501
+
+ :param suspend: The suspend of this V1JobSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._suspend = suspend
+
+ @property
+ def template(self):
+ """Gets the template of this V1JobSpec. # noqa: E501
+
+
+ :return: The template of this V1JobSpec. # noqa: E501
+ :rtype: V1PodTemplateSpec
+ """
+ return self._template
+
+ @template.setter
+ def template(self, template):
+ """Sets the template of this V1JobSpec.
+
+
+ :param template: The template of this V1JobSpec. # noqa: E501
+ :type: V1PodTemplateSpec
+ """
+ if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
+ raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
+
+ self._template = template
+
+ @property
+ def ttl_seconds_after_finished(self):
+ """Gets the ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
+
+ ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. # noqa: E501
+
+ :return: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._ttl_seconds_after_finished
+
+ @ttl_seconds_after_finished.setter
+ def ttl_seconds_after_finished(self, ttl_seconds_after_finished):
+ """Sets the ttl_seconds_after_finished of this V1JobSpec.
+
+ ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. # noqa: E501
+
+ :param ttl_seconds_after_finished: The ttl_seconds_after_finished of this V1JobSpec. # noqa: E501
+ :type: int
+ """
+
+ self._ttl_seconds_after_finished = ttl_seconds_after_finished
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1JobSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1JobSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_job_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_job_status.py
new file mode 100644
index 0000000000..33cf942163
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_job_status.py
@@ -0,0 +1,400 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1JobStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'active': 'int',
+ 'completed_indexes': 'str',
+ 'completion_time': 'datetime',
+ 'conditions': 'list[V1JobCondition]',
+ 'failed': 'int',
+ 'failed_indexes': 'str',
+ 'ready': 'int',
+ 'start_time': 'datetime',
+ 'succeeded': 'int',
+ 'terminating': 'int',
+ 'uncounted_terminated_pods': 'V1UncountedTerminatedPods'
+ }
+
+ attribute_map = {
+ 'active': 'active',
+ 'completed_indexes': 'completedIndexes',
+ 'completion_time': 'completionTime',
+ 'conditions': 'conditions',
+ 'failed': 'failed',
+ 'failed_indexes': 'failedIndexes',
+ 'ready': 'ready',
+ 'start_time': 'startTime',
+ 'succeeded': 'succeeded',
+ 'terminating': 'terminating',
+ 'uncounted_terminated_pods': 'uncountedTerminatedPods'
+ }
+
+ def __init__(self, active=None, completed_indexes=None, completion_time=None, conditions=None, failed=None, failed_indexes=None, ready=None, start_time=None, succeeded=None, terminating=None, uncounted_terminated_pods=None, local_vars_configuration=None): # noqa: E501
+ """V1JobStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._active = None
+ self._completed_indexes = None
+ self._completion_time = None
+ self._conditions = None
+ self._failed = None
+ self._failed_indexes = None
+ self._ready = None
+ self._start_time = None
+ self._succeeded = None
+ self._terminating = None
+ self._uncounted_terminated_pods = None
+ self.discriminator = None
+
+ if active is not None:
+ self.active = active
+ if completed_indexes is not None:
+ self.completed_indexes = completed_indexes
+ if completion_time is not None:
+ self.completion_time = completion_time
+ if conditions is not None:
+ self.conditions = conditions
+ if failed is not None:
+ self.failed = failed
+ if failed_indexes is not None:
+ self.failed_indexes = failed_indexes
+ if ready is not None:
+ self.ready = ready
+ if start_time is not None:
+ self.start_time = start_time
+ if succeeded is not None:
+ self.succeeded = succeeded
+ if terminating is not None:
+ self.terminating = terminating
+ if uncounted_terminated_pods is not None:
+ self.uncounted_terminated_pods = uncounted_terminated_pods
+
+ @property
+ def active(self):
+ """Gets the active of this V1JobStatus. # noqa: E501
+
+ The number of pending and running pods. # noqa: E501
+
+ :return: The active of this V1JobStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._active
+
+ @active.setter
+ def active(self, active):
+ """Sets the active of this V1JobStatus.
+
+ The number of pending and running pods. # noqa: E501
+
+ :param active: The active of this V1JobStatus. # noqa: E501
+ :type: int
+ """
+
+ self._active = active
+
+ @property
+ def completed_indexes(self):
+ """Gets the completed_indexes of this V1JobStatus. # noqa: E501
+
+ completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". # noqa: E501
+
+ :return: The completed_indexes of this V1JobStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._completed_indexes
+
+ @completed_indexes.setter
+ def completed_indexes(self, completed_indexes):
+ """Sets the completed_indexes of this V1JobStatus.
+
+ completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". # noqa: E501
+
+ :param completed_indexes: The completed_indexes of this V1JobStatus. # noqa: E501
+ :type: str
+ """
+
+ self._completed_indexes = completed_indexes
+
+ @property
+ def completion_time(self):
+ """Gets the completion_time of this V1JobStatus. # noqa: E501
+
+ Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully. # noqa: E501
+
+ :return: The completion_time of this V1JobStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._completion_time
+
+ @completion_time.setter
+ def completion_time(self, completion_time):
+ """Sets the completion_time of this V1JobStatus.
+
+ Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully. # noqa: E501
+
+ :param completion_time: The completion_time of this V1JobStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._completion_time = completion_time
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1JobStatus. # noqa: E501
+
+ The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
+
+ :return: The conditions of this V1JobStatus. # noqa: E501
+ :rtype: list[V1JobCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1JobStatus.
+
+ The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ # noqa: E501
+
+ :param conditions: The conditions of this V1JobStatus. # noqa: E501
+ :type: list[V1JobCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def failed(self):
+ """Gets the failed of this V1JobStatus. # noqa: E501
+
+ The number of pods which reached phase Failed. # noqa: E501
+
+ :return: The failed of this V1JobStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._failed
+
+ @failed.setter
+ def failed(self, failed):
+ """Sets the failed of this V1JobStatus.
+
+ The number of pods which reached phase Failed. # noqa: E501
+
+ :param failed: The failed of this V1JobStatus. # noqa: E501
+ :type: int
+ """
+
+ self._failed = failed
+
+ @property
+ def failed_indexes(self):
+ """Gets the failed_indexes of this V1JobStatus. # noqa: E501
+
+ FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). # noqa: E501
+
+ :return: The failed_indexes of this V1JobStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._failed_indexes
+
+ @failed_indexes.setter
+ def failed_indexes(self, failed_indexes):
+ """Sets the failed_indexes of this V1JobStatus.
+
+ FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). # noqa: E501
+
+ :param failed_indexes: The failed_indexes of this V1JobStatus. # noqa: E501
+ :type: str
+ """
+
+ self._failed_indexes = failed_indexes
+
+ @property
+ def ready(self):
+ """Gets the ready of this V1JobStatus. # noqa: E501
+
+ The number of pods which have a Ready condition. This field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default). # noqa: E501
+
+ :return: The ready of this V1JobStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._ready
+
+ @ready.setter
+ def ready(self, ready):
+ """Sets the ready of this V1JobStatus.
+
+ The number of pods which have a Ready condition. This field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default). # noqa: E501
+
+ :param ready: The ready of this V1JobStatus. # noqa: E501
+ :type: int
+ """
+
+ self._ready = ready
+
+ @property
+ def start_time(self):
+ """Gets the start_time of this V1JobStatus. # noqa: E501
+
+ Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC. # noqa: E501
+
+ :return: The start_time of this V1JobStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._start_time
+
+ @start_time.setter
+ def start_time(self, start_time):
+ """Sets the start_time of this V1JobStatus.
+
+ Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC. # noqa: E501
+
+ :param start_time: The start_time of this V1JobStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._start_time = start_time
+
+ @property
+ def succeeded(self):
+ """Gets the succeeded of this V1JobStatus. # noqa: E501
+
+ The number of pods which reached phase Succeeded. # noqa: E501
+
+ :return: The succeeded of this V1JobStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._succeeded
+
+ @succeeded.setter
+ def succeeded(self, succeeded):
+ """Sets the succeeded of this V1JobStatus.
+
+ The number of pods which reached phase Succeeded. # noqa: E501
+
+ :param succeeded: The succeeded of this V1JobStatus. # noqa: E501
+ :type: int
+ """
+
+ self._succeeded = succeeded
+
+ @property
+ def terminating(self):
+ """Gets the terminating of this V1JobStatus. # noqa: E501
+
+ The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp). This field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default). # noqa: E501
+
+ :return: The terminating of this V1JobStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._terminating
+
+ @terminating.setter
+ def terminating(self, terminating):
+ """Sets the terminating of this V1JobStatus.
+
+ The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp). This field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default). # noqa: E501
+
+ :param terminating: The terminating of this V1JobStatus. # noqa: E501
+ :type: int
+ """
+
+ self._terminating = terminating
+
+ @property
+ def uncounted_terminated_pods(self):
+ """Gets the uncounted_terminated_pods of this V1JobStatus. # noqa: E501
+
+
+ :return: The uncounted_terminated_pods of this V1JobStatus. # noqa: E501
+ :rtype: V1UncountedTerminatedPods
+ """
+ return self._uncounted_terminated_pods
+
+ @uncounted_terminated_pods.setter
+ def uncounted_terminated_pods(self, uncounted_terminated_pods):
+ """Sets the uncounted_terminated_pods of this V1JobStatus.
+
+
+ :param uncounted_terminated_pods: The uncounted_terminated_pods of this V1JobStatus. # noqa: E501
+ :type: V1UncountedTerminatedPods
+ """
+
+ self._uncounted_terminated_pods = uncounted_terminated_pods
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1JobStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1JobStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_job_template_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_job_template_spec.py
new file mode 100644
index 0000000000..336f89fe1c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_job_template_spec.py
@@ -0,0 +1,146 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1JobTemplateSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1JobSpec'
+ }
+
+ attribute_map = {
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1JobTemplateSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1JobTemplateSpec. # noqa: E501
+
+
+ :return: The metadata of this V1JobTemplateSpec. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1JobTemplateSpec.
+
+
+ :param metadata: The metadata of this V1JobTemplateSpec. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1JobTemplateSpec. # noqa: E501
+
+
+ :return: The spec of this V1JobTemplateSpec. # noqa: E501
+ :rtype: V1JobSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1JobTemplateSpec.
+
+
+ :param spec: The spec of this V1JobTemplateSpec. # noqa: E501
+ :type: V1JobSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1JobTemplateSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1JobTemplateSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_json_schema_props.py b/contrib/python/kubernetes/kubernetes/client/models/v1_json_schema_props.py
new file mode 100644
index 0000000000..c1f90f93c3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_json_schema_props.py
@@ -0,0 +1,1264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1JSONSchemaProps(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ref': 'str',
+ 'schema': 'str',
+ 'additional_items': 'object',
+ 'additional_properties': 'object',
+ 'all_of': 'list[V1JSONSchemaProps]',
+ 'any_of': 'list[V1JSONSchemaProps]',
+ 'default': 'object',
+ 'definitions': 'dict(str, V1JSONSchemaProps)',
+ 'dependencies': 'dict(str, object)',
+ 'description': 'str',
+ 'enum': 'list[object]',
+ 'example': 'object',
+ 'exclusive_maximum': 'bool',
+ 'exclusive_minimum': 'bool',
+ 'external_docs': 'V1ExternalDocumentation',
+ 'format': 'str',
+ 'id': 'str',
+ 'items': 'object',
+ 'max_items': 'int',
+ 'max_length': 'int',
+ 'max_properties': 'int',
+ 'maximum': 'float',
+ 'min_items': 'int',
+ 'min_length': 'int',
+ 'min_properties': 'int',
+ 'minimum': 'float',
+ 'multiple_of': 'float',
+ '_not': 'V1JSONSchemaProps',
+ 'nullable': 'bool',
+ 'one_of': 'list[V1JSONSchemaProps]',
+ 'pattern': 'str',
+ 'pattern_properties': 'dict(str, V1JSONSchemaProps)',
+ 'properties': 'dict(str, V1JSONSchemaProps)',
+ 'required': 'list[str]',
+ 'title': 'str',
+ 'type': 'str',
+ 'unique_items': 'bool',
+ 'x_kubernetes_embedded_resource': 'bool',
+ 'x_kubernetes_int_or_string': 'bool',
+ 'x_kubernetes_list_map_keys': 'list[str]',
+ 'x_kubernetes_list_type': 'str',
+ 'x_kubernetes_map_type': 'str',
+ 'x_kubernetes_preserve_unknown_fields': 'bool',
+ 'x_kubernetes_validations': 'list[V1ValidationRule]'
+ }
+
+ attribute_map = {
+ 'ref': '$ref',
+ 'schema': '$schema',
+ 'additional_items': 'additionalItems',
+ 'additional_properties': 'additionalProperties',
+ 'all_of': 'allOf',
+ 'any_of': 'anyOf',
+ 'default': 'default',
+ 'definitions': 'definitions',
+ 'dependencies': 'dependencies',
+ 'description': 'description',
+ 'enum': 'enum',
+ 'example': 'example',
+ 'exclusive_maximum': 'exclusiveMaximum',
+ 'exclusive_minimum': 'exclusiveMinimum',
+ 'external_docs': 'externalDocs',
+ 'format': 'format',
+ 'id': 'id',
+ 'items': 'items',
+ 'max_items': 'maxItems',
+ 'max_length': 'maxLength',
+ 'max_properties': 'maxProperties',
+ 'maximum': 'maximum',
+ 'min_items': 'minItems',
+ 'min_length': 'minLength',
+ 'min_properties': 'minProperties',
+ 'minimum': 'minimum',
+ 'multiple_of': 'multipleOf',
+ '_not': 'not',
+ 'nullable': 'nullable',
+ 'one_of': 'oneOf',
+ 'pattern': 'pattern',
+ 'pattern_properties': 'patternProperties',
+ 'properties': 'properties',
+ 'required': 'required',
+ 'title': 'title',
+ 'type': 'type',
+ 'unique_items': 'uniqueItems',
+ 'x_kubernetes_embedded_resource': 'x-kubernetes-embedded-resource',
+ 'x_kubernetes_int_or_string': 'x-kubernetes-int-or-string',
+ 'x_kubernetes_list_map_keys': 'x-kubernetes-list-map-keys',
+ 'x_kubernetes_list_type': 'x-kubernetes-list-type',
+ 'x_kubernetes_map_type': 'x-kubernetes-map-type',
+ 'x_kubernetes_preserve_unknown_fields': 'x-kubernetes-preserve-unknown-fields',
+ 'x_kubernetes_validations': 'x-kubernetes-validations'
+ }
+
+ def __init__(self, ref=None, schema=None, additional_items=None, additional_properties=None, all_of=None, any_of=None, default=None, definitions=None, dependencies=None, description=None, enum=None, example=None, exclusive_maximum=None, exclusive_minimum=None, external_docs=None, format=None, id=None, items=None, max_items=None, max_length=None, max_properties=None, maximum=None, min_items=None, min_length=None, min_properties=None, minimum=None, multiple_of=None, _not=None, nullable=None, one_of=None, pattern=None, pattern_properties=None, properties=None, required=None, title=None, type=None, unique_items=None, x_kubernetes_embedded_resource=None, x_kubernetes_int_or_string=None, x_kubernetes_list_map_keys=None, x_kubernetes_list_type=None, x_kubernetes_map_type=None, x_kubernetes_preserve_unknown_fields=None, x_kubernetes_validations=None, local_vars_configuration=None): # noqa: E501
+ """V1JSONSchemaProps - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ref = None
+ self._schema = None
+ self._additional_items = None
+ self._additional_properties = None
+ self._all_of = None
+ self._any_of = None
+ self._default = None
+ self._definitions = None
+ self._dependencies = None
+ self._description = None
+ self._enum = None
+ self._example = None
+ self._exclusive_maximum = None
+ self._exclusive_minimum = None
+ self._external_docs = None
+ self._format = None
+ self._id = None
+ self._items = None
+ self._max_items = None
+ self._max_length = None
+ self._max_properties = None
+ self._maximum = None
+ self._min_items = None
+ self._min_length = None
+ self._min_properties = None
+ self._minimum = None
+ self._multiple_of = None
+ self.__not = None
+ self._nullable = None
+ self._one_of = None
+ self._pattern = None
+ self._pattern_properties = None
+ self._properties = None
+ self._required = None
+ self._title = None
+ self._type = None
+ self._unique_items = None
+ self._x_kubernetes_embedded_resource = None
+ self._x_kubernetes_int_or_string = None
+ self._x_kubernetes_list_map_keys = None
+ self._x_kubernetes_list_type = None
+ self._x_kubernetes_map_type = None
+ self._x_kubernetes_preserve_unknown_fields = None
+ self._x_kubernetes_validations = None
+ self.discriminator = None
+
+ if ref is not None:
+ self.ref = ref
+ if schema is not None:
+ self.schema = schema
+ if additional_items is not None:
+ self.additional_items = additional_items
+ if additional_properties is not None:
+ self.additional_properties = additional_properties
+ if all_of is not None:
+ self.all_of = all_of
+ if any_of is not None:
+ self.any_of = any_of
+ if default is not None:
+ self.default = default
+ if definitions is not None:
+ self.definitions = definitions
+ if dependencies is not None:
+ self.dependencies = dependencies
+ if description is not None:
+ self.description = description
+ if enum is not None:
+ self.enum = enum
+ if example is not None:
+ self.example = example
+ if exclusive_maximum is not None:
+ self.exclusive_maximum = exclusive_maximum
+ if exclusive_minimum is not None:
+ self.exclusive_minimum = exclusive_minimum
+ if external_docs is not None:
+ self.external_docs = external_docs
+ if format is not None:
+ self.format = format
+ if id is not None:
+ self.id = id
+ if items is not None:
+ self.items = items
+ if max_items is not None:
+ self.max_items = max_items
+ if max_length is not None:
+ self.max_length = max_length
+ if max_properties is not None:
+ self.max_properties = max_properties
+ if maximum is not None:
+ self.maximum = maximum
+ if min_items is not None:
+ self.min_items = min_items
+ if min_length is not None:
+ self.min_length = min_length
+ if min_properties is not None:
+ self.min_properties = min_properties
+ if minimum is not None:
+ self.minimum = minimum
+ if multiple_of is not None:
+ self.multiple_of = multiple_of
+ if _not is not None:
+ self._not = _not
+ if nullable is not None:
+ self.nullable = nullable
+ if one_of is not None:
+ self.one_of = one_of
+ if pattern is not None:
+ self.pattern = pattern
+ if pattern_properties is not None:
+ self.pattern_properties = pattern_properties
+ if properties is not None:
+ self.properties = properties
+ if required is not None:
+ self.required = required
+ if title is not None:
+ self.title = title
+ if type is not None:
+ self.type = type
+ if unique_items is not None:
+ self.unique_items = unique_items
+ if x_kubernetes_embedded_resource is not None:
+ self.x_kubernetes_embedded_resource = x_kubernetes_embedded_resource
+ if x_kubernetes_int_or_string is not None:
+ self.x_kubernetes_int_or_string = x_kubernetes_int_or_string
+ if x_kubernetes_list_map_keys is not None:
+ self.x_kubernetes_list_map_keys = x_kubernetes_list_map_keys
+ if x_kubernetes_list_type is not None:
+ self.x_kubernetes_list_type = x_kubernetes_list_type
+ if x_kubernetes_map_type is not None:
+ self.x_kubernetes_map_type = x_kubernetes_map_type
+ if x_kubernetes_preserve_unknown_fields is not None:
+ self.x_kubernetes_preserve_unknown_fields = x_kubernetes_preserve_unknown_fields
+ if x_kubernetes_validations is not None:
+ self.x_kubernetes_validations = x_kubernetes_validations
+
+ @property
+ def ref(self):
+ """Gets the ref of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The ref of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._ref
+
+ @ref.setter
+ def ref(self, ref):
+ """Sets the ref of this V1JSONSchemaProps.
+
+
+ :param ref: The ref of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._ref = ref
+
+ @property
+ def schema(self):
+ """Gets the schema of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The schema of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._schema
+
+ @schema.setter
+ def schema(self, schema):
+ """Sets the schema of this V1JSONSchemaProps.
+
+
+ :param schema: The schema of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._schema = schema
+
+ @property
+ def additional_items(self):
+ """Gets the additional_items of this V1JSONSchemaProps. # noqa: E501
+
+ JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. # noqa: E501
+
+ :return: The additional_items of this V1JSONSchemaProps. # noqa: E501
+ :rtype: object
+ """
+ return self._additional_items
+
+ @additional_items.setter
+ def additional_items(self, additional_items):
+ """Sets the additional_items of this V1JSONSchemaProps.
+
+ JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. # noqa: E501
+
+ :param additional_items: The additional_items of this V1JSONSchemaProps. # noqa: E501
+ :type: object
+ """
+
+ self._additional_items = additional_items
+
+ @property
+ def additional_properties(self):
+ """Gets the additional_properties of this V1JSONSchemaProps. # noqa: E501
+
+ JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. # noqa: E501
+
+ :return: The additional_properties of this V1JSONSchemaProps. # noqa: E501
+ :rtype: object
+ """
+ return self._additional_properties
+
+ @additional_properties.setter
+ def additional_properties(self, additional_properties):
+ """Sets the additional_properties of this V1JSONSchemaProps.
+
+ JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. # noqa: E501
+
+ :param additional_properties: The additional_properties of this V1JSONSchemaProps. # noqa: E501
+ :type: object
+ """
+
+ self._additional_properties = additional_properties
+
+ @property
+ def all_of(self):
+ """Gets the all_of of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The all_of of this V1JSONSchemaProps. # noqa: E501
+ :rtype: list[V1JSONSchemaProps]
+ """
+ return self._all_of
+
+ @all_of.setter
+ def all_of(self, all_of):
+ """Sets the all_of of this V1JSONSchemaProps.
+
+
+ :param all_of: The all_of of this V1JSONSchemaProps. # noqa: E501
+ :type: list[V1JSONSchemaProps]
+ """
+
+ self._all_of = all_of
+
+ @property
+ def any_of(self):
+ """Gets the any_of of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The any_of of this V1JSONSchemaProps. # noqa: E501
+ :rtype: list[V1JSONSchemaProps]
+ """
+ return self._any_of
+
+ @any_of.setter
+ def any_of(self, any_of):
+ """Sets the any_of of this V1JSONSchemaProps.
+
+
+ :param any_of: The any_of of this V1JSONSchemaProps. # noqa: E501
+ :type: list[V1JSONSchemaProps]
+ """
+
+ self._any_of = any_of
+
+ @property
+ def default(self):
+ """Gets the default of this V1JSONSchemaProps. # noqa: E501
+
+ default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. # noqa: E501
+
+ :return: The default of this V1JSONSchemaProps. # noqa: E501
+ :rtype: object
+ """
+ return self._default
+
+ @default.setter
+ def default(self, default):
+ """Sets the default of this V1JSONSchemaProps.
+
+ default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. # noqa: E501
+
+ :param default: The default of this V1JSONSchemaProps. # noqa: E501
+ :type: object
+ """
+
+ self._default = default
+
+ @property
+ def definitions(self):
+ """Gets the definitions of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The definitions of this V1JSONSchemaProps. # noqa: E501
+ :rtype: dict(str, V1JSONSchemaProps)
+ """
+ return self._definitions
+
+ @definitions.setter
+ def definitions(self, definitions):
+ """Sets the definitions of this V1JSONSchemaProps.
+
+
+ :param definitions: The definitions of this V1JSONSchemaProps. # noqa: E501
+ :type: dict(str, V1JSONSchemaProps)
+ """
+
+ self._definitions = definitions
+
+ @property
+ def dependencies(self):
+ """Gets the dependencies of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The dependencies of this V1JSONSchemaProps. # noqa: E501
+ :rtype: dict(str, object)
+ """
+ return self._dependencies
+
+ @dependencies.setter
+ def dependencies(self, dependencies):
+ """Sets the dependencies of this V1JSONSchemaProps.
+
+
+ :param dependencies: The dependencies of this V1JSONSchemaProps. # noqa: E501
+ :type: dict(str, object)
+ """
+
+ self._dependencies = dependencies
+
+ @property
+ def description(self):
+ """Gets the description of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The description of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._description
+
+ @description.setter
+ def description(self, description):
+ """Sets the description of this V1JSONSchemaProps.
+
+
+ :param description: The description of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._description = description
+
+ @property
+ def enum(self):
+ """Gets the enum of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The enum of this V1JSONSchemaProps. # noqa: E501
+ :rtype: list[object]
+ """
+ return self._enum
+
+ @enum.setter
+ def enum(self, enum):
+ """Sets the enum of this V1JSONSchemaProps.
+
+
+ :param enum: The enum of this V1JSONSchemaProps. # noqa: E501
+ :type: list[object]
+ """
+
+ self._enum = enum
+
+ @property
+ def example(self):
+ """Gets the example of this V1JSONSchemaProps. # noqa: E501
+
+ JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. # noqa: E501
+
+ :return: The example of this V1JSONSchemaProps. # noqa: E501
+ :rtype: object
+ """
+ return self._example
+
+ @example.setter
+ def example(self, example):
+ """Sets the example of this V1JSONSchemaProps.
+
+ JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. # noqa: E501
+
+ :param example: The example of this V1JSONSchemaProps. # noqa: E501
+ :type: object
+ """
+
+ self._example = example
+
+ @property
+ def exclusive_maximum(self):
+ """Gets the exclusive_maximum of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The exclusive_maximum of this V1JSONSchemaProps. # noqa: E501
+ :rtype: bool
+ """
+ return self._exclusive_maximum
+
+ @exclusive_maximum.setter
+ def exclusive_maximum(self, exclusive_maximum):
+ """Sets the exclusive_maximum of this V1JSONSchemaProps.
+
+
+ :param exclusive_maximum: The exclusive_maximum of this V1JSONSchemaProps. # noqa: E501
+ :type: bool
+ """
+
+ self._exclusive_maximum = exclusive_maximum
+
+ @property
+ def exclusive_minimum(self):
+ """Gets the exclusive_minimum of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The exclusive_minimum of this V1JSONSchemaProps. # noqa: E501
+ :rtype: bool
+ """
+ return self._exclusive_minimum
+
+ @exclusive_minimum.setter
+ def exclusive_minimum(self, exclusive_minimum):
+ """Sets the exclusive_minimum of this V1JSONSchemaProps.
+
+
+ :param exclusive_minimum: The exclusive_minimum of this V1JSONSchemaProps. # noqa: E501
+ :type: bool
+ """
+
+ self._exclusive_minimum = exclusive_minimum
+
+ @property
+ def external_docs(self):
+ """Gets the external_docs of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The external_docs of this V1JSONSchemaProps. # noqa: E501
+ :rtype: V1ExternalDocumentation
+ """
+ return self._external_docs
+
+ @external_docs.setter
+ def external_docs(self, external_docs):
+ """Sets the external_docs of this V1JSONSchemaProps.
+
+
+ :param external_docs: The external_docs of this V1JSONSchemaProps. # noqa: E501
+ :type: V1ExternalDocumentation
+ """
+
+ self._external_docs = external_docs
+
+ @property
+ def format(self):
+ """Gets the format of this V1JSONSchemaProps. # noqa: E501
+
+ format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like \"0321751043\" or \"978-0321751041\" - isbn10: an ISBN10 number string like \"0321751043\" - isbn13: an ISBN13 number string like \"978-0321751041\" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like \"#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like \"rgb(255,255,2559\" - byte: base64 encoded binary data - password: any kind of string - date: a date string like \"2006-01-02\" as defined by full-date in RFC3339 - duration: a duration string like \"22 ns\" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like \"2014-12-15T19:30:20.000Z\" as defined by date-time in RFC3339. # noqa: E501
+
+ :return: The format of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._format
+
+ @format.setter
+ def format(self, format):
+ """Sets the format of this V1JSONSchemaProps.
+
+ format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like \"0321751043\" or \"978-0321751041\" - isbn10: an ISBN10 number string like \"0321751043\" - isbn13: an ISBN13 number string like \"978-0321751041\" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like \"#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like \"rgb(255,255,2559\" - byte: base64 encoded binary data - password: any kind of string - date: a date string like \"2006-01-02\" as defined by full-date in RFC3339 - duration: a duration string like \"22 ns\" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like \"2014-12-15T19:30:20.000Z\" as defined by date-time in RFC3339. # noqa: E501
+
+ :param format: The format of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._format = format
+
+ @property
+ def id(self):
+ """Gets the id of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The id of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._id
+
+ @id.setter
+ def id(self, id):
+ """Sets the id of this V1JSONSchemaProps.
+
+
+ :param id: The id of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._id = id
+
+ @property
+ def items(self):
+ """Gets the items of this V1JSONSchemaProps. # noqa: E501
+
+ JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes. # noqa: E501
+
+ :return: The items of this V1JSONSchemaProps. # noqa: E501
+ :rtype: object
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1JSONSchemaProps.
+
+ JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes. # noqa: E501
+
+ :param items: The items of this V1JSONSchemaProps. # noqa: E501
+ :type: object
+ """
+
+ self._items = items
+
+ @property
+ def max_items(self):
+ """Gets the max_items of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The max_items of this V1JSONSchemaProps. # noqa: E501
+ :rtype: int
+ """
+ return self._max_items
+
+ @max_items.setter
+ def max_items(self, max_items):
+ """Sets the max_items of this V1JSONSchemaProps.
+
+
+ :param max_items: The max_items of this V1JSONSchemaProps. # noqa: E501
+ :type: int
+ """
+
+ self._max_items = max_items
+
+ @property
+ def max_length(self):
+ """Gets the max_length of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The max_length of this V1JSONSchemaProps. # noqa: E501
+ :rtype: int
+ """
+ return self._max_length
+
+ @max_length.setter
+ def max_length(self, max_length):
+ """Sets the max_length of this V1JSONSchemaProps.
+
+
+ :param max_length: The max_length of this V1JSONSchemaProps. # noqa: E501
+ :type: int
+ """
+
+ self._max_length = max_length
+
+ @property
+ def max_properties(self):
+ """Gets the max_properties of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The max_properties of this V1JSONSchemaProps. # noqa: E501
+ :rtype: int
+ """
+ return self._max_properties
+
+ @max_properties.setter
+ def max_properties(self, max_properties):
+ """Sets the max_properties of this V1JSONSchemaProps.
+
+
+ :param max_properties: The max_properties of this V1JSONSchemaProps. # noqa: E501
+ :type: int
+ """
+
+ self._max_properties = max_properties
+
+ @property
+ def maximum(self):
+ """Gets the maximum of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The maximum of this V1JSONSchemaProps. # noqa: E501
+ :rtype: float
+ """
+ return self._maximum
+
+ @maximum.setter
+ def maximum(self, maximum):
+ """Sets the maximum of this V1JSONSchemaProps.
+
+
+ :param maximum: The maximum of this V1JSONSchemaProps. # noqa: E501
+ :type: float
+ """
+
+ self._maximum = maximum
+
+ @property
+ def min_items(self):
+ """Gets the min_items of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The min_items of this V1JSONSchemaProps. # noqa: E501
+ :rtype: int
+ """
+ return self._min_items
+
+ @min_items.setter
+ def min_items(self, min_items):
+ """Sets the min_items of this V1JSONSchemaProps.
+
+
+ :param min_items: The min_items of this V1JSONSchemaProps. # noqa: E501
+ :type: int
+ """
+
+ self._min_items = min_items
+
+ @property
+ def min_length(self):
+ """Gets the min_length of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The min_length of this V1JSONSchemaProps. # noqa: E501
+ :rtype: int
+ """
+ return self._min_length
+
+ @min_length.setter
+ def min_length(self, min_length):
+ """Sets the min_length of this V1JSONSchemaProps.
+
+
+ :param min_length: The min_length of this V1JSONSchemaProps. # noqa: E501
+ :type: int
+ """
+
+ self._min_length = min_length
+
+ @property
+ def min_properties(self):
+ """Gets the min_properties of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The min_properties of this V1JSONSchemaProps. # noqa: E501
+ :rtype: int
+ """
+ return self._min_properties
+
+ @min_properties.setter
+ def min_properties(self, min_properties):
+ """Sets the min_properties of this V1JSONSchemaProps.
+
+
+ :param min_properties: The min_properties of this V1JSONSchemaProps. # noqa: E501
+ :type: int
+ """
+
+ self._min_properties = min_properties
+
+ @property
+ def minimum(self):
+ """Gets the minimum of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The minimum of this V1JSONSchemaProps. # noqa: E501
+ :rtype: float
+ """
+ return self._minimum
+
+ @minimum.setter
+ def minimum(self, minimum):
+ """Sets the minimum of this V1JSONSchemaProps.
+
+
+ :param minimum: The minimum of this V1JSONSchemaProps. # noqa: E501
+ :type: float
+ """
+
+ self._minimum = minimum
+
+ @property
+ def multiple_of(self):
+ """Gets the multiple_of of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The multiple_of of this V1JSONSchemaProps. # noqa: E501
+ :rtype: float
+ """
+ return self._multiple_of
+
+ @multiple_of.setter
+ def multiple_of(self, multiple_of):
+ """Sets the multiple_of of this V1JSONSchemaProps.
+
+
+ :param multiple_of: The multiple_of of this V1JSONSchemaProps. # noqa: E501
+ :type: float
+ """
+
+ self._multiple_of = multiple_of
+
+ @property
+ def _not(self):
+ """Gets the _not of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The _not of this V1JSONSchemaProps. # noqa: E501
+ :rtype: V1JSONSchemaProps
+ """
+ return self.__not
+
+ @_not.setter
+ def _not(self, _not):
+ """Sets the _not of this V1JSONSchemaProps.
+
+
+ :param _not: The _not of this V1JSONSchemaProps. # noqa: E501
+ :type: V1JSONSchemaProps
+ """
+
+ self.__not = _not
+
+ @property
+ def nullable(self):
+ """Gets the nullable of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The nullable of this V1JSONSchemaProps. # noqa: E501
+ :rtype: bool
+ """
+ return self._nullable
+
+ @nullable.setter
+ def nullable(self, nullable):
+ """Sets the nullable of this V1JSONSchemaProps.
+
+
+ :param nullable: The nullable of this V1JSONSchemaProps. # noqa: E501
+ :type: bool
+ """
+
+ self._nullable = nullable
+
+ @property
+ def one_of(self):
+ """Gets the one_of of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The one_of of this V1JSONSchemaProps. # noqa: E501
+ :rtype: list[V1JSONSchemaProps]
+ """
+ return self._one_of
+
+ @one_of.setter
+ def one_of(self, one_of):
+ """Sets the one_of of this V1JSONSchemaProps.
+
+
+ :param one_of: The one_of of this V1JSONSchemaProps. # noqa: E501
+ :type: list[V1JSONSchemaProps]
+ """
+
+ self._one_of = one_of
+
+ @property
+ def pattern(self):
+ """Gets the pattern of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The pattern of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._pattern
+
+ @pattern.setter
+ def pattern(self, pattern):
+ """Sets the pattern of this V1JSONSchemaProps.
+
+
+ :param pattern: The pattern of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._pattern = pattern
+
+ @property
+ def pattern_properties(self):
+ """Gets the pattern_properties of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The pattern_properties of this V1JSONSchemaProps. # noqa: E501
+ :rtype: dict(str, V1JSONSchemaProps)
+ """
+ return self._pattern_properties
+
+ @pattern_properties.setter
+ def pattern_properties(self, pattern_properties):
+ """Sets the pattern_properties of this V1JSONSchemaProps.
+
+
+ :param pattern_properties: The pattern_properties of this V1JSONSchemaProps. # noqa: E501
+ :type: dict(str, V1JSONSchemaProps)
+ """
+
+ self._pattern_properties = pattern_properties
+
+ @property
+ def properties(self):
+ """Gets the properties of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The properties of this V1JSONSchemaProps. # noqa: E501
+ :rtype: dict(str, V1JSONSchemaProps)
+ """
+ return self._properties
+
+ @properties.setter
+ def properties(self, properties):
+ """Sets the properties of this V1JSONSchemaProps.
+
+
+ :param properties: The properties of this V1JSONSchemaProps. # noqa: E501
+ :type: dict(str, V1JSONSchemaProps)
+ """
+
+ self._properties = properties
+
+ @property
+ def required(self):
+ """Gets the required of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The required of this V1JSONSchemaProps. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._required
+
+ @required.setter
+ def required(self, required):
+ """Sets the required of this V1JSONSchemaProps.
+
+
+ :param required: The required of this V1JSONSchemaProps. # noqa: E501
+ :type: list[str]
+ """
+
+ self._required = required
+
+ @property
+ def title(self):
+ """Gets the title of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The title of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._title
+
+ @title.setter
+ def title(self, title):
+ """Sets the title of this V1JSONSchemaProps.
+
+
+ :param title: The title of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._title = title
+
+ @property
+ def type(self):
+ """Gets the type of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The type of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1JSONSchemaProps.
+
+
+ :param type: The type of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ @property
+ def unique_items(self):
+ """Gets the unique_items of this V1JSONSchemaProps. # noqa: E501
+
+
+ :return: The unique_items of this V1JSONSchemaProps. # noqa: E501
+ :rtype: bool
+ """
+ return self._unique_items
+
+ @unique_items.setter
+ def unique_items(self, unique_items):
+ """Sets the unique_items of this V1JSONSchemaProps.
+
+
+ :param unique_items: The unique_items of this V1JSONSchemaProps. # noqa: E501
+ :type: bool
+ """
+
+ self._unique_items = unique_items
+
+ @property
+ def x_kubernetes_embedded_resource(self):
+ """Gets the x_kubernetes_embedded_resource of this V1JSONSchemaProps. # noqa: E501
+
+ x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata). # noqa: E501
+
+ :return: The x_kubernetes_embedded_resource of this V1JSONSchemaProps. # noqa: E501
+ :rtype: bool
+ """
+ return self._x_kubernetes_embedded_resource
+
+ @x_kubernetes_embedded_resource.setter
+ def x_kubernetes_embedded_resource(self, x_kubernetes_embedded_resource):
+ """Sets the x_kubernetes_embedded_resource of this V1JSONSchemaProps.
+
+ x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata). # noqa: E501
+
+ :param x_kubernetes_embedded_resource: The x_kubernetes_embedded_resource of this V1JSONSchemaProps. # noqa: E501
+ :type: bool
+ """
+
+ self._x_kubernetes_embedded_resource = x_kubernetes_embedded_resource
+
+ @property
+ def x_kubernetes_int_or_string(self):
+ """Gets the x_kubernetes_int_or_string of this V1JSONSchemaProps. # noqa: E501
+
+ x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: 1) anyOf: - type: integer - type: string 2) allOf: - anyOf: - type: integer - type: string - ... zero or more # noqa: E501
+
+ :return: The x_kubernetes_int_or_string of this V1JSONSchemaProps. # noqa: E501
+ :rtype: bool
+ """
+ return self._x_kubernetes_int_or_string
+
+ @x_kubernetes_int_or_string.setter
+ def x_kubernetes_int_or_string(self, x_kubernetes_int_or_string):
+ """Sets the x_kubernetes_int_or_string of this V1JSONSchemaProps.
+
+ x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: 1) anyOf: - type: integer - type: string 2) allOf: - anyOf: - type: integer - type: string - ... zero or more # noqa: E501
+
+ :param x_kubernetes_int_or_string: The x_kubernetes_int_or_string of this V1JSONSchemaProps. # noqa: E501
+ :type: bool
+ """
+
+ self._x_kubernetes_int_or_string = x_kubernetes_int_or_string
+
+ @property
+ def x_kubernetes_list_map_keys(self):
+ """Gets the x_kubernetes_list_map_keys of this V1JSONSchemaProps. # noqa: E501
+
+ x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. This tag MUST only be used on lists that have the \"x-kubernetes-list-type\" extension set to \"map\". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). The properties specified must either be required or have a default value, to ensure those properties are present for all list items. # noqa: E501
+
+ :return: The x_kubernetes_list_map_keys of this V1JSONSchemaProps. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._x_kubernetes_list_map_keys
+
+ @x_kubernetes_list_map_keys.setter
+ def x_kubernetes_list_map_keys(self, x_kubernetes_list_map_keys):
+ """Sets the x_kubernetes_list_map_keys of this V1JSONSchemaProps.
+
+ x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. This tag MUST only be used on lists that have the \"x-kubernetes-list-type\" extension set to \"map\". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). The properties specified must either be required or have a default value, to ensure those properties are present for all list items. # noqa: E501
+
+ :param x_kubernetes_list_map_keys: The x_kubernetes_list_map_keys of this V1JSONSchemaProps. # noqa: E501
+ :type: list[str]
+ """
+
+ self._x_kubernetes_list_map_keys = x_kubernetes_list_map_keys
+
+ @property
+ def x_kubernetes_list_type(self):
+ """Gets the x_kubernetes_list_type of this V1JSONSchemaProps. # noqa: E501
+
+ x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: 1) `atomic`: the list is treated as a single entity, like a scalar. Atomic lists will be entirely replaced when updated. This extension may be used on any type of list (struct, scalar, ...). 2) `set`: Sets are lists that must not have multiple items with the same value. Each value must be a scalar, an object with x-kubernetes-map-type `atomic` or an array with x-kubernetes-list-type `atomic`. 3) `map`: These lists are like maps in that their elements have a non-index key used to identify them. Order is preserved upon merge. The map tag must only be used on a list with elements of type object. Defaults to atomic for arrays. # noqa: E501
+
+ :return: The x_kubernetes_list_type of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._x_kubernetes_list_type
+
+ @x_kubernetes_list_type.setter
+ def x_kubernetes_list_type(self, x_kubernetes_list_type):
+ """Sets the x_kubernetes_list_type of this V1JSONSchemaProps.
+
+ x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: 1) `atomic`: the list is treated as a single entity, like a scalar. Atomic lists will be entirely replaced when updated. This extension may be used on any type of list (struct, scalar, ...). 2) `set`: Sets are lists that must not have multiple items with the same value. Each value must be a scalar, an object with x-kubernetes-map-type `atomic` or an array with x-kubernetes-list-type `atomic`. 3) `map`: These lists are like maps in that their elements have a non-index key used to identify them. Order is preserved upon merge. The map tag must only be used on a list with elements of type object. Defaults to atomic for arrays. # noqa: E501
+
+ :param x_kubernetes_list_type: The x_kubernetes_list_type of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._x_kubernetes_list_type = x_kubernetes_list_type
+
+ @property
+ def x_kubernetes_map_type(self):
+ """Gets the x_kubernetes_map_type of this V1JSONSchemaProps. # noqa: E501
+
+ x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: 1) `granular`: These maps are actual maps (key-value pairs) and each fields are independent from each other (they can each be manipulated by separate actors). This is the default behaviour for all maps. 2) `atomic`: the list is treated as a single entity, like a scalar. Atomic maps will be entirely replaced when updated. # noqa: E501
+
+ :return: The x_kubernetes_map_type of this V1JSONSchemaProps. # noqa: E501
+ :rtype: str
+ """
+ return self._x_kubernetes_map_type
+
+ @x_kubernetes_map_type.setter
+ def x_kubernetes_map_type(self, x_kubernetes_map_type):
+ """Sets the x_kubernetes_map_type of this V1JSONSchemaProps.
+
+ x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: 1) `granular`: These maps are actual maps (key-value pairs) and each fields are independent from each other (they can each be manipulated by separate actors). This is the default behaviour for all maps. 2) `atomic`: the list is treated as a single entity, like a scalar. Atomic maps will be entirely replaced when updated. # noqa: E501
+
+ :param x_kubernetes_map_type: The x_kubernetes_map_type of this V1JSONSchemaProps. # noqa: E501
+ :type: str
+ """
+
+ self._x_kubernetes_map_type = x_kubernetes_map_type
+
+ @property
+ def x_kubernetes_preserve_unknown_fields(self):
+ """Gets the x_kubernetes_preserve_unknown_fields of this V1JSONSchemaProps. # noqa: E501
+
+ x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden. # noqa: E501
+
+ :return: The x_kubernetes_preserve_unknown_fields of this V1JSONSchemaProps. # noqa: E501
+ :rtype: bool
+ """
+ return self._x_kubernetes_preserve_unknown_fields
+
+ @x_kubernetes_preserve_unknown_fields.setter
+ def x_kubernetes_preserve_unknown_fields(self, x_kubernetes_preserve_unknown_fields):
+ """Sets the x_kubernetes_preserve_unknown_fields of this V1JSONSchemaProps.
+
+ x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden. # noqa: E501
+
+ :param x_kubernetes_preserve_unknown_fields: The x_kubernetes_preserve_unknown_fields of this V1JSONSchemaProps. # noqa: E501
+ :type: bool
+ """
+
+ self._x_kubernetes_preserve_unknown_fields = x_kubernetes_preserve_unknown_fields
+
+ @property
+ def x_kubernetes_validations(self):
+ """Gets the x_kubernetes_validations of this V1JSONSchemaProps. # noqa: E501
+
+ x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. # noqa: E501
+
+ :return: The x_kubernetes_validations of this V1JSONSchemaProps. # noqa: E501
+ :rtype: list[V1ValidationRule]
+ """
+ return self._x_kubernetes_validations
+
+ @x_kubernetes_validations.setter
+ def x_kubernetes_validations(self, x_kubernetes_validations):
+ """Sets the x_kubernetes_validations of this V1JSONSchemaProps.
+
+ x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. # noqa: E501
+
+ :param x_kubernetes_validations: The x_kubernetes_validations of this V1JSONSchemaProps. # noqa: E501
+ :type: list[V1ValidationRule]
+ """
+
+ self._x_kubernetes_validations = x_kubernetes_validations
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1JSONSchemaProps):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1JSONSchemaProps):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_key_to_path.py b/contrib/python/kubernetes/kubernetes/client/models/v1_key_to_path.py
new file mode 100644
index 0000000000..cc4ffb322c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_key_to_path.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1KeyToPath(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'mode': 'int',
+ 'path': 'str'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'mode': 'mode',
+ 'path': 'path'
+ }
+
+ def __init__(self, key=None, mode=None, path=None, local_vars_configuration=None): # noqa: E501
+ """V1KeyToPath - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._mode = None
+ self._path = None
+ self.discriminator = None
+
+ self.key = key
+ if mode is not None:
+ self.mode = mode
+ self.path = path
+
+ @property
+ def key(self):
+ """Gets the key of this V1KeyToPath. # noqa: E501
+
+ key is the key to project. # noqa: E501
+
+ :return: The key of this V1KeyToPath. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1KeyToPath.
+
+ key is the key to project. # noqa: E501
+
+ :param key: The key of this V1KeyToPath. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def mode(self):
+ """Gets the mode of this V1KeyToPath. # noqa: E501
+
+ mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :return: The mode of this V1KeyToPath. # noqa: E501
+ :rtype: int
+ """
+ return self._mode
+
+ @mode.setter
+ def mode(self, mode):
+ """Sets the mode of this V1KeyToPath.
+
+ mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :param mode: The mode of this V1KeyToPath. # noqa: E501
+ :type: int
+ """
+
+ self._mode = mode
+
+ @property
+ def path(self):
+ """Gets the path of this V1KeyToPath. # noqa: E501
+
+ path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. # noqa: E501
+
+ :return: The path of this V1KeyToPath. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1KeyToPath.
+
+ path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. # noqa: E501
+
+ :param path: The path of this V1KeyToPath. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1KeyToPath):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1KeyToPath):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_label_selector.py b/contrib/python/kubernetes/kubernetes/client/models/v1_label_selector.py
new file mode 100644
index 0000000000..d52382b806
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_label_selector.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LabelSelector(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'match_expressions': 'list[V1LabelSelectorRequirement]',
+ 'match_labels': 'dict(str, str)'
+ }
+
+ attribute_map = {
+ 'match_expressions': 'matchExpressions',
+ 'match_labels': 'matchLabels'
+ }
+
+ def __init__(self, match_expressions=None, match_labels=None, local_vars_configuration=None): # noqa: E501
+ """V1LabelSelector - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._match_expressions = None
+ self._match_labels = None
+ self.discriminator = None
+
+ if match_expressions is not None:
+ self.match_expressions = match_expressions
+ if match_labels is not None:
+ self.match_labels = match_labels
+
+ @property
+ def match_expressions(self):
+ """Gets the match_expressions of this V1LabelSelector. # noqa: E501
+
+ matchExpressions is a list of label selector requirements. The requirements are ANDed. # noqa: E501
+
+ :return: The match_expressions of this V1LabelSelector. # noqa: E501
+ :rtype: list[V1LabelSelectorRequirement]
+ """
+ return self._match_expressions
+
+ @match_expressions.setter
+ def match_expressions(self, match_expressions):
+ """Sets the match_expressions of this V1LabelSelector.
+
+ matchExpressions is a list of label selector requirements. The requirements are ANDed. # noqa: E501
+
+ :param match_expressions: The match_expressions of this V1LabelSelector. # noqa: E501
+ :type: list[V1LabelSelectorRequirement]
+ """
+
+ self._match_expressions = match_expressions
+
+ @property
+ def match_labels(self):
+ """Gets the match_labels of this V1LabelSelector. # noqa: E501
+
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed. # noqa: E501
+
+ :return: The match_labels of this V1LabelSelector. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._match_labels
+
+ @match_labels.setter
+ def match_labels(self, match_labels):
+ """Sets the match_labels of this V1LabelSelector.
+
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed. # noqa: E501
+
+ :param match_labels: The match_labels of this V1LabelSelector. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._match_labels = match_labels
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LabelSelector):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LabelSelector):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_label_selector_requirement.py b/contrib/python/kubernetes/kubernetes/client/models/v1_label_selector_requirement.py
new file mode 100644
index 0000000000..600b186dca
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_label_selector_requirement.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LabelSelectorRequirement(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'operator': 'str',
+ 'values': 'list[str]'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'operator': 'operator',
+ 'values': 'values'
+ }
+
+ def __init__(self, key=None, operator=None, values=None, local_vars_configuration=None): # noqa: E501
+ """V1LabelSelectorRequirement - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._operator = None
+ self._values = None
+ self.discriminator = None
+
+ self.key = key
+ self.operator = operator
+ if values is not None:
+ self.values = values
+
+ @property
+ def key(self):
+ """Gets the key of this V1LabelSelectorRequirement. # noqa: E501
+
+ key is the label key that the selector applies to. # noqa: E501
+
+ :return: The key of this V1LabelSelectorRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1LabelSelectorRequirement.
+
+ key is the label key that the selector applies to. # noqa: E501
+
+ :param key: The key of this V1LabelSelectorRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def operator(self):
+ """Gets the operator of this V1LabelSelectorRequirement. # noqa: E501
+
+ operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. # noqa: E501
+
+ :return: The operator of this V1LabelSelectorRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._operator
+
+ @operator.setter
+ def operator(self, operator):
+ """Sets the operator of this V1LabelSelectorRequirement.
+
+ operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. # noqa: E501
+
+ :param operator: The operator of this V1LabelSelectorRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501
+ raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501
+
+ self._operator = operator
+
+ @property
+ def values(self):
+ """Gets the values of this V1LabelSelectorRequirement. # noqa: E501
+
+ values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
+
+ :return: The values of this V1LabelSelectorRequirement. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._values
+
+ @values.setter
+ def values(self, values):
+ """Sets the values of this V1LabelSelectorRequirement.
+
+ values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
+
+ :param values: The values of this V1LabelSelectorRequirement. # noqa: E501
+ :type: list[str]
+ """
+
+ self._values = values
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LabelSelectorRequirement):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LabelSelectorRequirement):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_lease.py b/contrib/python/kubernetes/kubernetes/client/models/v1_lease.py
new file mode 100644
index 0000000000..c7d3aa60d6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_lease.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Lease(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1LeaseSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1Lease - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Lease. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Lease. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Lease.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Lease. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Lease. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Lease. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Lease.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Lease. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Lease. # noqa: E501
+
+
+ :return: The metadata of this V1Lease. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Lease.
+
+
+ :param metadata: The metadata of this V1Lease. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Lease. # noqa: E501
+
+
+ :return: The spec of this V1Lease. # noqa: E501
+ :rtype: V1LeaseSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Lease.
+
+
+ :param spec: The spec of this V1Lease. # noqa: E501
+ :type: V1LeaseSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Lease):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Lease):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_lease_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_lease_list.py
new file mode 100644
index 0000000000..7d9c424dd6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_lease_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LeaseList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Lease]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1LeaseList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1LeaseList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1LeaseList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1LeaseList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1LeaseList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1LeaseList. # noqa: E501
+
+ items is a list of schema objects. # noqa: E501
+
+ :return: The items of this V1LeaseList. # noqa: E501
+ :rtype: list[V1Lease]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1LeaseList.
+
+ items is a list of schema objects. # noqa: E501
+
+ :param items: The items of this V1LeaseList. # noqa: E501
+ :type: list[V1Lease]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1LeaseList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1LeaseList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1LeaseList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1LeaseList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1LeaseList. # noqa: E501
+
+
+ :return: The metadata of this V1LeaseList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1LeaseList.
+
+
+ :param metadata: The metadata of this V1LeaseList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LeaseList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LeaseList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_lease_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_lease_spec.py
new file mode 100644
index 0000000000..cc0fb6ff3b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_lease_spec.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LeaseSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'acquire_time': 'datetime',
+ 'holder_identity': 'str',
+ 'lease_duration_seconds': 'int',
+ 'lease_transitions': 'int',
+ 'renew_time': 'datetime'
+ }
+
+ attribute_map = {
+ 'acquire_time': 'acquireTime',
+ 'holder_identity': 'holderIdentity',
+ 'lease_duration_seconds': 'leaseDurationSeconds',
+ 'lease_transitions': 'leaseTransitions',
+ 'renew_time': 'renewTime'
+ }
+
+ def __init__(self, acquire_time=None, holder_identity=None, lease_duration_seconds=None, lease_transitions=None, renew_time=None, local_vars_configuration=None): # noqa: E501
+ """V1LeaseSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._acquire_time = None
+ self._holder_identity = None
+ self._lease_duration_seconds = None
+ self._lease_transitions = None
+ self._renew_time = None
+ self.discriminator = None
+
+ if acquire_time is not None:
+ self.acquire_time = acquire_time
+ if holder_identity is not None:
+ self.holder_identity = holder_identity
+ if lease_duration_seconds is not None:
+ self.lease_duration_seconds = lease_duration_seconds
+ if lease_transitions is not None:
+ self.lease_transitions = lease_transitions
+ if renew_time is not None:
+ self.renew_time = renew_time
+
+ @property
+ def acquire_time(self):
+ """Gets the acquire_time of this V1LeaseSpec. # noqa: E501
+
+ acquireTime is a time when the current lease was acquired. # noqa: E501
+
+ :return: The acquire_time of this V1LeaseSpec. # noqa: E501
+ :rtype: datetime
+ """
+ return self._acquire_time
+
+ @acquire_time.setter
+ def acquire_time(self, acquire_time):
+ """Sets the acquire_time of this V1LeaseSpec.
+
+ acquireTime is a time when the current lease was acquired. # noqa: E501
+
+ :param acquire_time: The acquire_time of this V1LeaseSpec. # noqa: E501
+ :type: datetime
+ """
+
+ self._acquire_time = acquire_time
+
+ @property
+ def holder_identity(self):
+ """Gets the holder_identity of this V1LeaseSpec. # noqa: E501
+
+ holderIdentity contains the identity of the holder of a current lease. # noqa: E501
+
+ :return: The holder_identity of this V1LeaseSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._holder_identity
+
+ @holder_identity.setter
+ def holder_identity(self, holder_identity):
+ """Sets the holder_identity of this V1LeaseSpec.
+
+ holderIdentity contains the identity of the holder of a current lease. # noqa: E501
+
+ :param holder_identity: The holder_identity of this V1LeaseSpec. # noqa: E501
+ :type: str
+ """
+
+ self._holder_identity = holder_identity
+
+ @property
+ def lease_duration_seconds(self):
+ """Gets the lease_duration_seconds of this V1LeaseSpec. # noqa: E501
+
+ leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime. # noqa: E501
+
+ :return: The lease_duration_seconds of this V1LeaseSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._lease_duration_seconds
+
+ @lease_duration_seconds.setter
+ def lease_duration_seconds(self, lease_duration_seconds):
+ """Sets the lease_duration_seconds of this V1LeaseSpec.
+
+ leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime. # noqa: E501
+
+ :param lease_duration_seconds: The lease_duration_seconds of this V1LeaseSpec. # noqa: E501
+ :type: int
+ """
+
+ self._lease_duration_seconds = lease_duration_seconds
+
+ @property
+ def lease_transitions(self):
+ """Gets the lease_transitions of this V1LeaseSpec. # noqa: E501
+
+ leaseTransitions is the number of transitions of a lease between holders. # noqa: E501
+
+ :return: The lease_transitions of this V1LeaseSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._lease_transitions
+
+ @lease_transitions.setter
+ def lease_transitions(self, lease_transitions):
+ """Sets the lease_transitions of this V1LeaseSpec.
+
+ leaseTransitions is the number of transitions of a lease between holders. # noqa: E501
+
+ :param lease_transitions: The lease_transitions of this V1LeaseSpec. # noqa: E501
+ :type: int
+ """
+
+ self._lease_transitions = lease_transitions
+
+ @property
+ def renew_time(self):
+ """Gets the renew_time of this V1LeaseSpec. # noqa: E501
+
+ renewTime is a time when the current holder of a lease has last updated the lease. # noqa: E501
+
+ :return: The renew_time of this V1LeaseSpec. # noqa: E501
+ :rtype: datetime
+ """
+ return self._renew_time
+
+ @renew_time.setter
+ def renew_time(self, renew_time):
+ """Sets the renew_time of this V1LeaseSpec.
+
+ renewTime is a time when the current holder of a lease has last updated the lease. # noqa: E501
+
+ :param renew_time: The renew_time of this V1LeaseSpec. # noqa: E501
+ :type: datetime
+ """
+
+ self._renew_time = renew_time
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LeaseSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LeaseSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle.py b/contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle.py
new file mode 100644
index 0000000000..fd01c97ce6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle.py
@@ -0,0 +1,146 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Lifecycle(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'post_start': 'V1LifecycleHandler',
+ 'pre_stop': 'V1LifecycleHandler'
+ }
+
+ attribute_map = {
+ 'post_start': 'postStart',
+ 'pre_stop': 'preStop'
+ }
+
+ def __init__(self, post_start=None, pre_stop=None, local_vars_configuration=None): # noqa: E501
+ """V1Lifecycle - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._post_start = None
+ self._pre_stop = None
+ self.discriminator = None
+
+ if post_start is not None:
+ self.post_start = post_start
+ if pre_stop is not None:
+ self.pre_stop = pre_stop
+
+ @property
+ def post_start(self):
+ """Gets the post_start of this V1Lifecycle. # noqa: E501
+
+
+ :return: The post_start of this V1Lifecycle. # noqa: E501
+ :rtype: V1LifecycleHandler
+ """
+ return self._post_start
+
+ @post_start.setter
+ def post_start(self, post_start):
+ """Sets the post_start of this V1Lifecycle.
+
+
+ :param post_start: The post_start of this V1Lifecycle. # noqa: E501
+ :type: V1LifecycleHandler
+ """
+
+ self._post_start = post_start
+
+ @property
+ def pre_stop(self):
+ """Gets the pre_stop of this V1Lifecycle. # noqa: E501
+
+
+ :return: The pre_stop of this V1Lifecycle. # noqa: E501
+ :rtype: V1LifecycleHandler
+ """
+ return self._pre_stop
+
+ @pre_stop.setter
+ def pre_stop(self, pre_stop):
+ """Sets the pre_stop of this V1Lifecycle.
+
+
+ :param pre_stop: The pre_stop of this V1Lifecycle. # noqa: E501
+ :type: V1LifecycleHandler
+ """
+
+ self._pre_stop = pre_stop
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Lifecycle):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Lifecycle):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle_handler.py b/contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle_handler.py
new file mode 100644
index 0000000000..233a9973f8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_lifecycle_handler.py
@@ -0,0 +1,172 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LifecycleHandler(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ '_exec': 'V1ExecAction',
+ 'http_get': 'V1HTTPGetAction',
+ 'tcp_socket': 'V1TCPSocketAction'
+ }
+
+ attribute_map = {
+ '_exec': 'exec',
+ 'http_get': 'httpGet',
+ 'tcp_socket': 'tcpSocket'
+ }
+
+ def __init__(self, _exec=None, http_get=None, tcp_socket=None, local_vars_configuration=None): # noqa: E501
+ """V1LifecycleHandler - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self.__exec = None
+ self._http_get = None
+ self._tcp_socket = None
+ self.discriminator = None
+
+ if _exec is not None:
+ self._exec = _exec
+ if http_get is not None:
+ self.http_get = http_get
+ if tcp_socket is not None:
+ self.tcp_socket = tcp_socket
+
+ @property
+ def _exec(self):
+ """Gets the _exec of this V1LifecycleHandler. # noqa: E501
+
+
+ :return: The _exec of this V1LifecycleHandler. # noqa: E501
+ :rtype: V1ExecAction
+ """
+ return self.__exec
+
+ @_exec.setter
+ def _exec(self, _exec):
+ """Sets the _exec of this V1LifecycleHandler.
+
+
+ :param _exec: The _exec of this V1LifecycleHandler. # noqa: E501
+ :type: V1ExecAction
+ """
+
+ self.__exec = _exec
+
+ @property
+ def http_get(self):
+ """Gets the http_get of this V1LifecycleHandler. # noqa: E501
+
+
+ :return: The http_get of this V1LifecycleHandler. # noqa: E501
+ :rtype: V1HTTPGetAction
+ """
+ return self._http_get
+
+ @http_get.setter
+ def http_get(self, http_get):
+ """Sets the http_get of this V1LifecycleHandler.
+
+
+ :param http_get: The http_get of this V1LifecycleHandler. # noqa: E501
+ :type: V1HTTPGetAction
+ """
+
+ self._http_get = http_get
+
+ @property
+ def tcp_socket(self):
+ """Gets the tcp_socket of this V1LifecycleHandler. # noqa: E501
+
+
+ :return: The tcp_socket of this V1LifecycleHandler. # noqa: E501
+ :rtype: V1TCPSocketAction
+ """
+ return self._tcp_socket
+
+ @tcp_socket.setter
+ def tcp_socket(self, tcp_socket):
+ """Sets the tcp_socket of this V1LifecycleHandler.
+
+
+ :param tcp_socket: The tcp_socket of this V1LifecycleHandler. # noqa: E501
+ :type: V1TCPSocketAction
+ """
+
+ self._tcp_socket = tcp_socket
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LifecycleHandler):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LifecycleHandler):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range.py b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range.py
new file mode 100644
index 0000000000..16d0a80cae
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LimitRange(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1LimitRangeSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1LimitRange - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1LimitRange. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1LimitRange. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1LimitRange.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1LimitRange. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1LimitRange. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1LimitRange. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1LimitRange.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1LimitRange. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1LimitRange. # noqa: E501
+
+
+ :return: The metadata of this V1LimitRange. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1LimitRange.
+
+
+ :param metadata: The metadata of this V1LimitRange. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1LimitRange. # noqa: E501
+
+
+ :return: The spec of this V1LimitRange. # noqa: E501
+ :rtype: V1LimitRangeSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1LimitRange.
+
+
+ :param spec: The spec of this V1LimitRange. # noqa: E501
+ :type: V1LimitRangeSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LimitRange):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LimitRange):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_item.py b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_item.py
new file mode 100644
index 0000000000..7e98cd5789
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_item.py
@@ -0,0 +1,263 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LimitRangeItem(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'default': 'dict(str, str)',
+ 'default_request': 'dict(str, str)',
+ 'max': 'dict(str, str)',
+ 'max_limit_request_ratio': 'dict(str, str)',
+ 'min': 'dict(str, str)',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'default': 'default',
+ 'default_request': 'defaultRequest',
+ 'max': 'max',
+ 'max_limit_request_ratio': 'maxLimitRequestRatio',
+ 'min': 'min',
+ 'type': 'type'
+ }
+
+ def __init__(self, default=None, default_request=None, max=None, max_limit_request_ratio=None, min=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1LimitRangeItem - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._default = None
+ self._default_request = None
+ self._max = None
+ self._max_limit_request_ratio = None
+ self._min = None
+ self._type = None
+ self.discriminator = None
+
+ if default is not None:
+ self.default = default
+ if default_request is not None:
+ self.default_request = default_request
+ if max is not None:
+ self.max = max
+ if max_limit_request_ratio is not None:
+ self.max_limit_request_ratio = max_limit_request_ratio
+ if min is not None:
+ self.min = min
+ self.type = type
+
+ @property
+ def default(self):
+ """Gets the default of this V1LimitRangeItem. # noqa: E501
+
+ Default resource requirement limit value by resource name if resource limit is omitted. # noqa: E501
+
+ :return: The default of this V1LimitRangeItem. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._default
+
+ @default.setter
+ def default(self, default):
+ """Sets the default of this V1LimitRangeItem.
+
+ Default resource requirement limit value by resource name if resource limit is omitted. # noqa: E501
+
+ :param default: The default of this V1LimitRangeItem. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._default = default
+
+ @property
+ def default_request(self):
+ """Gets the default_request of this V1LimitRangeItem. # noqa: E501
+
+ DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. # noqa: E501
+
+ :return: The default_request of this V1LimitRangeItem. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._default_request
+
+ @default_request.setter
+ def default_request(self, default_request):
+ """Sets the default_request of this V1LimitRangeItem.
+
+ DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. # noqa: E501
+
+ :param default_request: The default_request of this V1LimitRangeItem. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._default_request = default_request
+
+ @property
+ def max(self):
+ """Gets the max of this V1LimitRangeItem. # noqa: E501
+
+ Max usage constraints on this kind by resource name. # noqa: E501
+
+ :return: The max of this V1LimitRangeItem. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._max
+
+ @max.setter
+ def max(self, max):
+ """Sets the max of this V1LimitRangeItem.
+
+ Max usage constraints on this kind by resource name. # noqa: E501
+
+ :param max: The max of this V1LimitRangeItem. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._max = max
+
+ @property
+ def max_limit_request_ratio(self):
+ """Gets the max_limit_request_ratio of this V1LimitRangeItem. # noqa: E501
+
+ MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. # noqa: E501
+
+ :return: The max_limit_request_ratio of this V1LimitRangeItem. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._max_limit_request_ratio
+
+ @max_limit_request_ratio.setter
+ def max_limit_request_ratio(self, max_limit_request_ratio):
+ """Sets the max_limit_request_ratio of this V1LimitRangeItem.
+
+ MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. # noqa: E501
+
+ :param max_limit_request_ratio: The max_limit_request_ratio of this V1LimitRangeItem. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._max_limit_request_ratio = max_limit_request_ratio
+
+ @property
+ def min(self):
+ """Gets the min of this V1LimitRangeItem. # noqa: E501
+
+ Min usage constraints on this kind by resource name. # noqa: E501
+
+ :return: The min of this V1LimitRangeItem. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._min
+
+ @min.setter
+ def min(self, min):
+ """Sets the min of this V1LimitRangeItem.
+
+ Min usage constraints on this kind by resource name. # noqa: E501
+
+ :param min: The min of this V1LimitRangeItem. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._min = min
+
+ @property
+ def type(self):
+ """Gets the type of this V1LimitRangeItem. # noqa: E501
+
+ Type of resource that this limit applies to. # noqa: E501
+
+ :return: The type of this V1LimitRangeItem. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1LimitRangeItem.
+
+ Type of resource that this limit applies to. # noqa: E501
+
+ :param type: The type of this V1LimitRangeItem. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LimitRangeItem):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LimitRangeItem):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_list.py
new file mode 100644
index 0000000000..969318bbe6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LimitRangeList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1LimitRange]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1LimitRangeList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1LimitRangeList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1LimitRangeList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1LimitRangeList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1LimitRangeList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1LimitRangeList. # noqa: E501
+
+ Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
+
+ :return: The items of this V1LimitRangeList. # noqa: E501
+ :rtype: list[V1LimitRange]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1LimitRangeList.
+
+ Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
+
+ :param items: The items of this V1LimitRangeList. # noqa: E501
+ :type: list[V1LimitRange]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1LimitRangeList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1LimitRangeList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1LimitRangeList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1LimitRangeList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1LimitRangeList. # noqa: E501
+
+
+ :return: The metadata of this V1LimitRangeList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1LimitRangeList.
+
+
+ :param metadata: The metadata of this V1LimitRangeList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LimitRangeList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LimitRangeList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_spec.py
new file mode 100644
index 0000000000..f3d0f86268
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_limit_range_spec.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LimitRangeSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'limits': 'list[V1LimitRangeItem]'
+ }
+
+ attribute_map = {
+ 'limits': 'limits'
+ }
+
+ def __init__(self, limits=None, local_vars_configuration=None): # noqa: E501
+ """V1LimitRangeSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._limits = None
+ self.discriminator = None
+
+ self.limits = limits
+
+ @property
+ def limits(self):
+ """Gets the limits of this V1LimitRangeSpec. # noqa: E501
+
+ Limits is the list of LimitRangeItem objects that are enforced. # noqa: E501
+
+ :return: The limits of this V1LimitRangeSpec. # noqa: E501
+ :rtype: list[V1LimitRangeItem]
+ """
+ return self._limits
+
+ @limits.setter
+ def limits(self, limits):
+ """Sets the limits of this V1LimitRangeSpec.
+
+ Limits is the list of LimitRangeItem objects that are enforced. # noqa: E501
+
+ :param limits: The limits of this V1LimitRangeSpec. # noqa: E501
+ :type: list[V1LimitRangeItem]
+ """
+ if self.local_vars_configuration.client_side_validation and limits is None: # noqa: E501
+ raise ValueError("Invalid value for `limits`, must not be `None`") # noqa: E501
+
+ self._limits = limits
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LimitRangeSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LimitRangeSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_list_meta.py b/contrib/python/kubernetes/kubernetes/client/models/v1_list_meta.py
new file mode 100644
index 0000000000..c13f5ffc65
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_list_meta.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ListMeta(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ '_continue': 'str',
+ 'remaining_item_count': 'int',
+ 'resource_version': 'str',
+ 'self_link': 'str'
+ }
+
+ attribute_map = {
+ '_continue': 'continue',
+ 'remaining_item_count': 'remainingItemCount',
+ 'resource_version': 'resourceVersion',
+ 'self_link': 'selfLink'
+ }
+
+ def __init__(self, _continue=None, remaining_item_count=None, resource_version=None, self_link=None, local_vars_configuration=None): # noqa: E501
+ """V1ListMeta - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self.__continue = None
+ self._remaining_item_count = None
+ self._resource_version = None
+ self._self_link = None
+ self.discriminator = None
+
+ if _continue is not None:
+ self._continue = _continue
+ if remaining_item_count is not None:
+ self.remaining_item_count = remaining_item_count
+ if resource_version is not None:
+ self.resource_version = resource_version
+ if self_link is not None:
+ self.self_link = self_link
+
+ @property
+ def _continue(self):
+ """Gets the _continue of this V1ListMeta. # noqa: E501
+
+ continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. # noqa: E501
+
+ :return: The _continue of this V1ListMeta. # noqa: E501
+ :rtype: str
+ """
+ return self.__continue
+
+ @_continue.setter
+ def _continue(self, _continue):
+ """Sets the _continue of this V1ListMeta.
+
+ continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. # noqa: E501
+
+ :param _continue: The _continue of this V1ListMeta. # noqa: E501
+ :type: str
+ """
+
+ self.__continue = _continue
+
+ @property
+ def remaining_item_count(self):
+ """Gets the remaining_item_count of this V1ListMeta. # noqa: E501
+
+ remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact. # noqa: E501
+
+ :return: The remaining_item_count of this V1ListMeta. # noqa: E501
+ :rtype: int
+ """
+ return self._remaining_item_count
+
+ @remaining_item_count.setter
+ def remaining_item_count(self, remaining_item_count):
+ """Sets the remaining_item_count of this V1ListMeta.
+
+ remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact. # noqa: E501
+
+ :param remaining_item_count: The remaining_item_count of this V1ListMeta. # noqa: E501
+ :type: int
+ """
+
+ self._remaining_item_count = remaining_item_count
+
+ @property
+ def resource_version(self):
+ """Gets the resource_version of this V1ListMeta. # noqa: E501
+
+ String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
+
+ :return: The resource_version of this V1ListMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_version
+
+ @resource_version.setter
+ def resource_version(self, resource_version):
+ """Sets the resource_version of this V1ListMeta.
+
+ String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
+
+ :param resource_version: The resource_version of this V1ListMeta. # noqa: E501
+ :type: str
+ """
+
+ self._resource_version = resource_version
+
+ @property
+ def self_link(self):
+ """Gets the self_link of this V1ListMeta. # noqa: E501
+
+ Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. # noqa: E501
+
+ :return: The self_link of this V1ListMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._self_link
+
+ @self_link.setter
+ def self_link(self, self_link):
+ """Sets the self_link of this V1ListMeta.
+
+ Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. # noqa: E501
+
+ :param self_link: The self_link of this V1ListMeta. # noqa: E501
+ :type: str
+ """
+
+ self._self_link = self_link
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ListMeta):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ListMeta):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_ingress.py b/contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_ingress.py
new file mode 100644
index 0000000000..744cd02587
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_ingress.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LoadBalancerIngress(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hostname': 'str',
+ 'ip': 'str',
+ 'ports': 'list[V1PortStatus]'
+ }
+
+ attribute_map = {
+ 'hostname': 'hostname',
+ 'ip': 'ip',
+ 'ports': 'ports'
+ }
+
+ def __init__(self, hostname=None, ip=None, ports=None, local_vars_configuration=None): # noqa: E501
+ """V1LoadBalancerIngress - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hostname = None
+ self._ip = None
+ self._ports = None
+ self.discriminator = None
+
+ if hostname is not None:
+ self.hostname = hostname
+ if ip is not None:
+ self.ip = ip
+ if ports is not None:
+ self.ports = ports
+
+ @property
+ def hostname(self):
+ """Gets the hostname of this V1LoadBalancerIngress. # noqa: E501
+
+ Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers) # noqa: E501
+
+ :return: The hostname of this V1LoadBalancerIngress. # noqa: E501
+ :rtype: str
+ """
+ return self._hostname
+
+ @hostname.setter
+ def hostname(self, hostname):
+ """Sets the hostname of this V1LoadBalancerIngress.
+
+ Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers) # noqa: E501
+
+ :param hostname: The hostname of this V1LoadBalancerIngress. # noqa: E501
+ :type: str
+ """
+
+ self._hostname = hostname
+
+ @property
+ def ip(self):
+ """Gets the ip of this V1LoadBalancerIngress. # noqa: E501
+
+ IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers) # noqa: E501
+
+ :return: The ip of this V1LoadBalancerIngress. # noqa: E501
+ :rtype: str
+ """
+ return self._ip
+
+ @ip.setter
+ def ip(self, ip):
+ """Sets the ip of this V1LoadBalancerIngress.
+
+ IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers) # noqa: E501
+
+ :param ip: The ip of this V1LoadBalancerIngress. # noqa: E501
+ :type: str
+ """
+
+ self._ip = ip
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1LoadBalancerIngress. # noqa: E501
+
+ Ports is a list of records of service ports If used, every port defined in the service should have an entry in it # noqa: E501
+
+ :return: The ports of this V1LoadBalancerIngress. # noqa: E501
+ :rtype: list[V1PortStatus]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1LoadBalancerIngress.
+
+ Ports is a list of records of service ports If used, every port defined in the service should have an entry in it # noqa: E501
+
+ :param ports: The ports of this V1LoadBalancerIngress. # noqa: E501
+ :type: list[V1PortStatus]
+ """
+
+ self._ports = ports
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LoadBalancerIngress):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LoadBalancerIngress):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_status.py
new file mode 100644
index 0000000000..702e77ecdb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_load_balancer_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LoadBalancerStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ingress': 'list[V1LoadBalancerIngress]'
+ }
+
+ attribute_map = {
+ 'ingress': 'ingress'
+ }
+
+ def __init__(self, ingress=None, local_vars_configuration=None): # noqa: E501
+ """V1LoadBalancerStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ingress = None
+ self.discriminator = None
+
+ if ingress is not None:
+ self.ingress = ingress
+
+ @property
+ def ingress(self):
+ """Gets the ingress of this V1LoadBalancerStatus. # noqa: E501
+
+ Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points. # noqa: E501
+
+ :return: The ingress of this V1LoadBalancerStatus. # noqa: E501
+ :rtype: list[V1LoadBalancerIngress]
+ """
+ return self._ingress
+
+ @ingress.setter
+ def ingress(self, ingress):
+ """Sets the ingress of this V1LoadBalancerStatus.
+
+ Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points. # noqa: E501
+
+ :param ingress: The ingress of this V1LoadBalancerStatus. # noqa: E501
+ :type: list[V1LoadBalancerIngress]
+ """
+
+ self._ingress = ingress
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LoadBalancerStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LoadBalancerStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_local_object_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_local_object_reference.py
new file mode 100644
index 0000000000..9b97a7a1a7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_local_object_reference.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LocalObjectReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1LocalObjectReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1LocalObjectReference. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1LocalObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1LocalObjectReference.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1LocalObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LocalObjectReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LocalObjectReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_local_subject_access_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1_local_subject_access_review.py
new file mode 100644
index 0000000000..65bd106c37
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_local_subject_access_review.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LocalSubjectAccessReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1SubjectAccessReviewSpec',
+ 'status': 'V1SubjectAccessReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1LocalSubjectAccessReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1LocalSubjectAccessReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1LocalSubjectAccessReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1LocalSubjectAccessReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1LocalSubjectAccessReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1LocalSubjectAccessReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1LocalSubjectAccessReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1LocalSubjectAccessReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1LocalSubjectAccessReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1LocalSubjectAccessReview. # noqa: E501
+
+
+ :return: The metadata of this V1LocalSubjectAccessReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1LocalSubjectAccessReview.
+
+
+ :param metadata: The metadata of this V1LocalSubjectAccessReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1LocalSubjectAccessReview. # noqa: E501
+
+
+ :return: The spec of this V1LocalSubjectAccessReview. # noqa: E501
+ :rtype: V1SubjectAccessReviewSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1LocalSubjectAccessReview.
+
+
+ :param spec: The spec of this V1LocalSubjectAccessReview. # noqa: E501
+ :type: V1SubjectAccessReviewSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1LocalSubjectAccessReview. # noqa: E501
+
+
+ :return: The status of this V1LocalSubjectAccessReview. # noqa: E501
+ :rtype: V1SubjectAccessReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1LocalSubjectAccessReview.
+
+
+ :param status: The status of this V1LocalSubjectAccessReview. # noqa: E501
+ :type: V1SubjectAccessReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LocalSubjectAccessReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LocalSubjectAccessReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_local_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_local_volume_source.py
new file mode 100644
index 0000000000..0442d001bf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_local_volume_source.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1LocalVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'path': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'path': 'path'
+ }
+
+ def __init__(self, fs_type=None, path=None, local_vars_configuration=None): # noqa: E501
+ """V1LocalVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._path = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ self.path = path
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1LocalVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1LocalVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1LocalVolumeSource.
+
+ fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1LocalVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def path(self):
+ """Gets the path of this V1LocalVolumeSource. # noqa: E501
+
+ path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...). # noqa: E501
+
+ :return: The path of this V1LocalVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1LocalVolumeSource.
+
+ path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...). # noqa: E501
+
+ :param path: The path of this V1LocalVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1LocalVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1LocalVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_managed_fields_entry.py b/contrib/python/kubernetes/kubernetes/client/models/v1_managed_fields_entry.py
new file mode 100644
index 0000000000..e9e0cd5b0c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_managed_fields_entry.py
@@ -0,0 +1,290 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ManagedFieldsEntry(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'fields_type': 'str',
+ 'fields_v1': 'object',
+ 'manager': 'str',
+ 'operation': 'str',
+ 'subresource': 'str',
+ 'time': 'datetime'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'fields_type': 'fieldsType',
+ 'fields_v1': 'fieldsV1',
+ 'manager': 'manager',
+ 'operation': 'operation',
+ 'subresource': 'subresource',
+ 'time': 'time'
+ }
+
+ def __init__(self, api_version=None, fields_type=None, fields_v1=None, manager=None, operation=None, subresource=None, time=None, local_vars_configuration=None): # noqa: E501
+ """V1ManagedFieldsEntry - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._fields_type = None
+ self._fields_v1 = None
+ self._manager = None
+ self._operation = None
+ self._subresource = None
+ self._time = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if fields_type is not None:
+ self.fields_type = fields_type
+ if fields_v1 is not None:
+ self.fields_v1 = fields_v1
+ if manager is not None:
+ self.manager = manager
+ if operation is not None:
+ self.operation = operation
+ if subresource is not None:
+ self.subresource = subresource
+ if time is not None:
+ self.time = time
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ManagedFieldsEntry. # noqa: E501
+
+ APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted. # noqa: E501
+
+ :return: The api_version of this V1ManagedFieldsEntry. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ManagedFieldsEntry.
+
+ APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted. # noqa: E501
+
+ :param api_version: The api_version of this V1ManagedFieldsEntry. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def fields_type(self):
+ """Gets the fields_type of this V1ManagedFieldsEntry. # noqa: E501
+
+ FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\" # noqa: E501
+
+ :return: The fields_type of this V1ManagedFieldsEntry. # noqa: E501
+ :rtype: str
+ """
+ return self._fields_type
+
+ @fields_type.setter
+ def fields_type(self, fields_type):
+ """Sets the fields_type of this V1ManagedFieldsEntry.
+
+ FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\" # noqa: E501
+
+ :param fields_type: The fields_type of this V1ManagedFieldsEntry. # noqa: E501
+ :type: str
+ """
+
+ self._fields_type = fields_type
+
+ @property
+ def fields_v1(self):
+ """Gets the fields_v1 of this V1ManagedFieldsEntry. # noqa: E501
+
+ FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type. # noqa: E501
+
+ :return: The fields_v1 of this V1ManagedFieldsEntry. # noqa: E501
+ :rtype: object
+ """
+ return self._fields_v1
+
+ @fields_v1.setter
+ def fields_v1(self, fields_v1):
+ """Sets the fields_v1 of this V1ManagedFieldsEntry.
+
+ FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type. # noqa: E501
+
+ :param fields_v1: The fields_v1 of this V1ManagedFieldsEntry. # noqa: E501
+ :type: object
+ """
+
+ self._fields_v1 = fields_v1
+
+ @property
+ def manager(self):
+ """Gets the manager of this V1ManagedFieldsEntry. # noqa: E501
+
+ Manager is an identifier of the workflow managing these fields. # noqa: E501
+
+ :return: The manager of this V1ManagedFieldsEntry. # noqa: E501
+ :rtype: str
+ """
+ return self._manager
+
+ @manager.setter
+ def manager(self, manager):
+ """Sets the manager of this V1ManagedFieldsEntry.
+
+ Manager is an identifier of the workflow managing these fields. # noqa: E501
+
+ :param manager: The manager of this V1ManagedFieldsEntry. # noqa: E501
+ :type: str
+ """
+
+ self._manager = manager
+
+ @property
+ def operation(self):
+ """Gets the operation of this V1ManagedFieldsEntry. # noqa: E501
+
+ Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'. # noqa: E501
+
+ :return: The operation of this V1ManagedFieldsEntry. # noqa: E501
+ :rtype: str
+ """
+ return self._operation
+
+ @operation.setter
+ def operation(self, operation):
+ """Sets the operation of this V1ManagedFieldsEntry.
+
+ Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'. # noqa: E501
+
+ :param operation: The operation of this V1ManagedFieldsEntry. # noqa: E501
+ :type: str
+ """
+
+ self._operation = operation
+
+ @property
+ def subresource(self):
+ """Gets the subresource of this V1ManagedFieldsEntry. # noqa: E501
+
+ Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. # noqa: E501
+
+ :return: The subresource of this V1ManagedFieldsEntry. # noqa: E501
+ :rtype: str
+ """
+ return self._subresource
+
+ @subresource.setter
+ def subresource(self, subresource):
+ """Sets the subresource of this V1ManagedFieldsEntry.
+
+ Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. # noqa: E501
+
+ :param subresource: The subresource of this V1ManagedFieldsEntry. # noqa: E501
+ :type: str
+ """
+
+ self._subresource = subresource
+
+ @property
+ def time(self):
+ """Gets the time of this V1ManagedFieldsEntry. # noqa: E501
+
+ Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over. # noqa: E501
+
+ :return: The time of this V1ManagedFieldsEntry. # noqa: E501
+ :rtype: datetime
+ """
+ return self._time
+
+ @time.setter
+ def time(self, time):
+ """Sets the time of this V1ManagedFieldsEntry.
+
+ Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over. # noqa: E501
+
+ :param time: The time of this V1ManagedFieldsEntry. # noqa: E501
+ :type: datetime
+ """
+
+ self._time = time
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ManagedFieldsEntry):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ManagedFieldsEntry):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_match_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_match_condition.py
new file mode 100644
index 0000000000..15801108d6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_match_condition.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1MatchCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'expression': 'expression',
+ 'name': 'name'
+ }
+
+ def __init__(self, expression=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1MatchCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression = None
+ self._name = None
+ self.discriminator = None
+
+ self.expression = expression
+ self.name = name
+
+ @property
+ def expression(self):
+ """Gets the expression of this V1MatchCondition. # noqa: E501
+
+ Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
+
+ :return: The expression of this V1MatchCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, expression):
+ """Sets the expression of this V1MatchCondition.
+
+ Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
+
+ :param expression: The expression of this V1MatchCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
+ raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
+
+ self._expression = expression
+
+ @property
+ def name(self):
+ """Gets the name of this V1MatchCondition. # noqa: E501
+
+ Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
+
+ :return: The name of this V1MatchCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1MatchCondition.
+
+ Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
+
+ :param name: The name of this V1MatchCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1MatchCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1MatchCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook.py b/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook.py
new file mode 100644
index 0000000000..17dd2b9ccc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook.py
@@ -0,0 +1,428 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1MutatingWebhook(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'admission_review_versions': 'list[str]',
+ 'client_config': 'AdmissionregistrationV1WebhookClientConfig',
+ 'failure_policy': 'str',
+ 'match_conditions': 'list[V1MatchCondition]',
+ 'match_policy': 'str',
+ 'name': 'str',
+ 'namespace_selector': 'V1LabelSelector',
+ 'object_selector': 'V1LabelSelector',
+ 'reinvocation_policy': 'str',
+ 'rules': 'list[V1RuleWithOperations]',
+ 'side_effects': 'str',
+ 'timeout_seconds': 'int'
+ }
+
+ attribute_map = {
+ 'admission_review_versions': 'admissionReviewVersions',
+ 'client_config': 'clientConfig',
+ 'failure_policy': 'failurePolicy',
+ 'match_conditions': 'matchConditions',
+ 'match_policy': 'matchPolicy',
+ 'name': 'name',
+ 'namespace_selector': 'namespaceSelector',
+ 'object_selector': 'objectSelector',
+ 'reinvocation_policy': 'reinvocationPolicy',
+ 'rules': 'rules',
+ 'side_effects': 'sideEffects',
+ 'timeout_seconds': 'timeoutSeconds'
+ }
+
+ def __init__(self, admission_review_versions=None, client_config=None, failure_policy=None, match_conditions=None, match_policy=None, name=None, namespace_selector=None, object_selector=None, reinvocation_policy=None, rules=None, side_effects=None, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
+ """V1MutatingWebhook - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._admission_review_versions = None
+ self._client_config = None
+ self._failure_policy = None
+ self._match_conditions = None
+ self._match_policy = None
+ self._name = None
+ self._namespace_selector = None
+ self._object_selector = None
+ self._reinvocation_policy = None
+ self._rules = None
+ self._side_effects = None
+ self._timeout_seconds = None
+ self.discriminator = None
+
+ self.admission_review_versions = admission_review_versions
+ self.client_config = client_config
+ if failure_policy is not None:
+ self.failure_policy = failure_policy
+ if match_conditions is not None:
+ self.match_conditions = match_conditions
+ if match_policy is not None:
+ self.match_policy = match_policy
+ self.name = name
+ if namespace_selector is not None:
+ self.namespace_selector = namespace_selector
+ if object_selector is not None:
+ self.object_selector = object_selector
+ if reinvocation_policy is not None:
+ self.reinvocation_policy = reinvocation_policy
+ if rules is not None:
+ self.rules = rules
+ self.side_effects = side_effects
+ if timeout_seconds is not None:
+ self.timeout_seconds = timeout_seconds
+
+ @property
+ def admission_review_versions(self):
+ """Gets the admission_review_versions of this V1MutatingWebhook. # noqa: E501
+
+ AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
+
+ :return: The admission_review_versions of this V1MutatingWebhook. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._admission_review_versions
+
+ @admission_review_versions.setter
+ def admission_review_versions(self, admission_review_versions):
+ """Sets the admission_review_versions of this V1MutatingWebhook.
+
+ AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
+
+ :param admission_review_versions: The admission_review_versions of this V1MutatingWebhook. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and admission_review_versions is None: # noqa: E501
+ raise ValueError("Invalid value for `admission_review_versions`, must not be `None`") # noqa: E501
+
+ self._admission_review_versions = admission_review_versions
+
+ @property
+ def client_config(self):
+ """Gets the client_config of this V1MutatingWebhook. # noqa: E501
+
+
+ :return: The client_config of this V1MutatingWebhook. # noqa: E501
+ :rtype: AdmissionregistrationV1WebhookClientConfig
+ """
+ return self._client_config
+
+ @client_config.setter
+ def client_config(self, client_config):
+ """Sets the client_config of this V1MutatingWebhook.
+
+
+ :param client_config: The client_config of this V1MutatingWebhook. # noqa: E501
+ :type: AdmissionregistrationV1WebhookClientConfig
+ """
+ if self.local_vars_configuration.client_side_validation and client_config is None: # noqa: E501
+ raise ValueError("Invalid value for `client_config`, must not be `None`") # noqa: E501
+
+ self._client_config = client_config
+
+ @property
+ def failure_policy(self):
+ """Gets the failure_policy of this V1MutatingWebhook. # noqa: E501
+
+ FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :return: The failure_policy of this V1MutatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._failure_policy
+
+ @failure_policy.setter
+ def failure_policy(self, failure_policy):
+ """Sets the failure_policy of this V1MutatingWebhook.
+
+ FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :param failure_policy: The failure_policy of this V1MutatingWebhook. # noqa: E501
+ :type: str
+ """
+
+ self._failure_policy = failure_policy
+
+ @property
+ def match_conditions(self):
+ """Gets the match_conditions of this V1MutatingWebhook. # noqa: E501
+
+ MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. # noqa: E501
+
+ :return: The match_conditions of this V1MutatingWebhook. # noqa: E501
+ :rtype: list[V1MatchCondition]
+ """
+ return self._match_conditions
+
+ @match_conditions.setter
+ def match_conditions(self, match_conditions):
+ """Sets the match_conditions of this V1MutatingWebhook.
+
+ MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. # noqa: E501
+
+ :param match_conditions: The match_conditions of this V1MutatingWebhook. # noqa: E501
+ :type: list[V1MatchCondition]
+ """
+
+ self._match_conditions = match_conditions
+
+ @property
+ def match_policy(self):
+ """Gets the match_policy of this V1MutatingWebhook. # noqa: E501
+
+ matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
+
+ :return: The match_policy of this V1MutatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._match_policy
+
+ @match_policy.setter
+ def match_policy(self, match_policy):
+ """Sets the match_policy of this V1MutatingWebhook.
+
+ matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
+
+ :param match_policy: The match_policy of this V1MutatingWebhook. # noqa: E501
+ :type: str
+ """
+
+ self._match_policy = match_policy
+
+ @property
+ def name(self):
+ """Gets the name of this V1MutatingWebhook. # noqa: E501
+
+ The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
+
+ :return: The name of this V1MutatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1MutatingWebhook.
+
+ The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
+
+ :param name: The name of this V1MutatingWebhook. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace_selector(self):
+ """Gets the namespace_selector of this V1MutatingWebhook. # noqa: E501
+
+
+ :return: The namespace_selector of this V1MutatingWebhook. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._namespace_selector
+
+ @namespace_selector.setter
+ def namespace_selector(self, namespace_selector):
+ """Sets the namespace_selector of this V1MutatingWebhook.
+
+
+ :param namespace_selector: The namespace_selector of this V1MutatingWebhook. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._namespace_selector = namespace_selector
+
+ @property
+ def object_selector(self):
+ """Gets the object_selector of this V1MutatingWebhook. # noqa: E501
+
+
+ :return: The object_selector of this V1MutatingWebhook. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._object_selector
+
+ @object_selector.setter
+ def object_selector(self, object_selector):
+ """Sets the object_selector of this V1MutatingWebhook.
+
+
+ :param object_selector: The object_selector of this V1MutatingWebhook. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._object_selector = object_selector
+
+ @property
+ def reinvocation_policy(self):
+ """Gets the reinvocation_policy of this V1MutatingWebhook. # noqa: E501
+
+ reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\". Never: the webhook will not be called more than once in a single admission evaluation. IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. Defaults to \"Never\". # noqa: E501
+
+ :return: The reinvocation_policy of this V1MutatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._reinvocation_policy
+
+ @reinvocation_policy.setter
+ def reinvocation_policy(self, reinvocation_policy):
+ """Sets the reinvocation_policy of this V1MutatingWebhook.
+
+ reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\". Never: the webhook will not be called more than once in a single admission evaluation. IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. Defaults to \"Never\". # noqa: E501
+
+ :param reinvocation_policy: The reinvocation_policy of this V1MutatingWebhook. # noqa: E501
+ :type: str
+ """
+
+ self._reinvocation_policy = reinvocation_policy
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1MutatingWebhook. # noqa: E501
+
+ Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
+
+ :return: The rules of this V1MutatingWebhook. # noqa: E501
+ :rtype: list[V1RuleWithOperations]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1MutatingWebhook.
+
+ Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
+
+ :param rules: The rules of this V1MutatingWebhook. # noqa: E501
+ :type: list[V1RuleWithOperations]
+ """
+
+ self._rules = rules
+
+ @property
+ def side_effects(self):
+ """Gets the side_effects of this V1MutatingWebhook. # noqa: E501
+
+ SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
+
+ :return: The side_effects of this V1MutatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._side_effects
+
+ @side_effects.setter
+ def side_effects(self, side_effects):
+ """Sets the side_effects of this V1MutatingWebhook.
+
+ SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
+
+ :param side_effects: The side_effects of this V1MutatingWebhook. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and side_effects is None: # noqa: E501
+ raise ValueError("Invalid value for `side_effects`, must not be `None`") # noqa: E501
+
+ self._side_effects = side_effects
+
+ @property
+ def timeout_seconds(self):
+ """Gets the timeout_seconds of this V1MutatingWebhook. # noqa: E501
+
+ TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
+
+ :return: The timeout_seconds of this V1MutatingWebhook. # noqa: E501
+ :rtype: int
+ """
+ return self._timeout_seconds
+
+ @timeout_seconds.setter
+ def timeout_seconds(self, timeout_seconds):
+ """Sets the timeout_seconds of this V1MutatingWebhook.
+
+ TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
+
+ :param timeout_seconds: The timeout_seconds of this V1MutatingWebhook. # noqa: E501
+ :type: int
+ """
+
+ self._timeout_seconds = timeout_seconds
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1MutatingWebhook):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1MutatingWebhook):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration.py
new file mode 100644
index 0000000000..53c123cdec
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1MutatingWebhookConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'webhooks': 'list[V1MutatingWebhook]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'webhooks': 'webhooks'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, webhooks=None, local_vars_configuration=None): # noqa: E501
+ """V1MutatingWebhookConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._webhooks = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if webhooks is not None:
+ self.webhooks = webhooks
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1MutatingWebhookConfiguration. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1MutatingWebhookConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1MutatingWebhookConfiguration.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1MutatingWebhookConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1MutatingWebhookConfiguration. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1MutatingWebhookConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1MutatingWebhookConfiguration.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1MutatingWebhookConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1MutatingWebhookConfiguration. # noqa: E501
+
+
+ :return: The metadata of this V1MutatingWebhookConfiguration. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1MutatingWebhookConfiguration.
+
+
+ :param metadata: The metadata of this V1MutatingWebhookConfiguration. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def webhooks(self):
+ """Gets the webhooks of this V1MutatingWebhookConfiguration. # noqa: E501
+
+ Webhooks is a list of webhooks and the affected resources and operations. # noqa: E501
+
+ :return: The webhooks of this V1MutatingWebhookConfiguration. # noqa: E501
+ :rtype: list[V1MutatingWebhook]
+ """
+ return self._webhooks
+
+ @webhooks.setter
+ def webhooks(self, webhooks):
+ """Sets the webhooks of this V1MutatingWebhookConfiguration.
+
+ Webhooks is a list of webhooks and the affected resources and operations. # noqa: E501
+
+ :param webhooks: The webhooks of this V1MutatingWebhookConfiguration. # noqa: E501
+ :type: list[V1MutatingWebhook]
+ """
+
+ self._webhooks = webhooks
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1MutatingWebhookConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1MutatingWebhookConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration_list.py
new file mode 100644
index 0000000000..17051269e2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_mutating_webhook_configuration_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1MutatingWebhookConfigurationList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1MutatingWebhookConfiguration]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1MutatingWebhookConfigurationList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1MutatingWebhookConfigurationList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1MutatingWebhookConfigurationList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1MutatingWebhookConfigurationList. # noqa: E501
+
+ List of MutatingWebhookConfiguration. # noqa: E501
+
+ :return: The items of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :rtype: list[V1MutatingWebhookConfiguration]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1MutatingWebhookConfigurationList.
+
+ List of MutatingWebhookConfiguration. # noqa: E501
+
+ :param items: The items of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :type: list[V1MutatingWebhookConfiguration]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1MutatingWebhookConfigurationList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1MutatingWebhookConfigurationList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1MutatingWebhookConfigurationList. # noqa: E501
+
+
+ :return: The metadata of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1MutatingWebhookConfigurationList.
+
+
+ :param metadata: The metadata of this V1MutatingWebhookConfigurationList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1MutatingWebhookConfigurationList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1MutatingWebhookConfigurationList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_namespace.py b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace.py
new file mode 100644
index 0000000000..abe621e719
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Namespace(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1NamespaceSpec',
+ 'status': 'V1NamespaceStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Namespace - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Namespace. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Namespace. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Namespace.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Namespace. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Namespace. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Namespace. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Namespace.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Namespace. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Namespace. # noqa: E501
+
+
+ :return: The metadata of this V1Namespace. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Namespace.
+
+
+ :param metadata: The metadata of this V1Namespace. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Namespace. # noqa: E501
+
+
+ :return: The spec of this V1Namespace. # noqa: E501
+ :rtype: V1NamespaceSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Namespace.
+
+
+ :param spec: The spec of this V1Namespace. # noqa: E501
+ :type: V1NamespaceSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Namespace. # noqa: E501
+
+
+ :return: The status of this V1Namespace. # noqa: E501
+ :rtype: V1NamespaceStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Namespace.
+
+
+ :param status: The status of this V1Namespace. # noqa: E501
+ :type: V1NamespaceStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Namespace):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Namespace):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_condition.py
new file mode 100644
index 0000000000..4ece7de424
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_condition.py
@@ -0,0 +1,232 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NamespaceCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1NamespaceCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1NamespaceCondition. # noqa: E501
+
+ Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. # noqa: E501
+
+ :return: The last_transition_time of this V1NamespaceCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1NamespaceCondition.
+
+ Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1NamespaceCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1NamespaceCondition. # noqa: E501
+
+
+ :return: The message of this V1NamespaceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1NamespaceCondition.
+
+
+ :param message: The message of this V1NamespaceCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1NamespaceCondition. # noqa: E501
+
+
+ :return: The reason of this V1NamespaceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1NamespaceCondition.
+
+
+ :param reason: The reason of this V1NamespaceCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1NamespaceCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1NamespaceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1NamespaceCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1NamespaceCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1NamespaceCondition. # noqa: E501
+
+ Type of namespace controller condition. # noqa: E501
+
+ :return: The type of this V1NamespaceCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1NamespaceCondition.
+
+ Type of namespace controller condition. # noqa: E501
+
+ :param type: The type of this V1NamespaceCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NamespaceCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NamespaceCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_list.py
new file mode 100644
index 0000000000..1c567ba503
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NamespaceList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Namespace]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1NamespaceList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1NamespaceList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1NamespaceList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1NamespaceList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1NamespaceList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1NamespaceList. # noqa: E501
+
+ Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ # noqa: E501
+
+ :return: The items of this V1NamespaceList. # noqa: E501
+ :rtype: list[V1Namespace]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1NamespaceList.
+
+ Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ # noqa: E501
+
+ :param items: The items of this V1NamespaceList. # noqa: E501
+ :type: list[V1Namespace]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1NamespaceList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1NamespaceList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1NamespaceList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1NamespaceList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1NamespaceList. # noqa: E501
+
+
+ :return: The metadata of this V1NamespaceList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1NamespaceList.
+
+
+ :param metadata: The metadata of this V1NamespaceList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NamespaceList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NamespaceList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_spec.py
new file mode 100644
index 0000000000..a86dd478f6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_spec.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NamespaceSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'finalizers': 'list[str]'
+ }
+
+ attribute_map = {
+ 'finalizers': 'finalizers'
+ }
+
+ def __init__(self, finalizers=None, local_vars_configuration=None): # noqa: E501
+ """V1NamespaceSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._finalizers = None
+ self.discriminator = None
+
+ if finalizers is not None:
+ self.finalizers = finalizers
+
+ @property
+ def finalizers(self):
+ """Gets the finalizers of this V1NamespaceSpec. # noqa: E501
+
+ Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ # noqa: E501
+
+ :return: The finalizers of this V1NamespaceSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._finalizers
+
+ @finalizers.setter
+ def finalizers(self, finalizers):
+ """Sets the finalizers of this V1NamespaceSpec.
+
+ Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ # noqa: E501
+
+ :param finalizers: The finalizers of this V1NamespaceSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._finalizers = finalizers
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NamespaceSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NamespaceSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_status.py
new file mode 100644
index 0000000000..65f05818d4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_namespace_status.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NamespaceStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1NamespaceCondition]',
+ 'phase': 'str'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions',
+ 'phase': 'phase'
+ }
+
+ def __init__(self, conditions=None, phase=None, local_vars_configuration=None): # noqa: E501
+ """V1NamespaceStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self._phase = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+ if phase is not None:
+ self.phase = phase
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1NamespaceStatus. # noqa: E501
+
+ Represents the latest available observations of a namespace's current state. # noqa: E501
+
+ :return: The conditions of this V1NamespaceStatus. # noqa: E501
+ :rtype: list[V1NamespaceCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1NamespaceStatus.
+
+ Represents the latest available observations of a namespace's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1NamespaceStatus. # noqa: E501
+ :type: list[V1NamespaceCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def phase(self):
+ """Gets the phase of this V1NamespaceStatus. # noqa: E501
+
+ Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ # noqa: E501
+
+ :return: The phase of this V1NamespaceStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._phase
+
+ @phase.setter
+ def phase(self, phase):
+ """Sets the phase of this V1NamespaceStatus.
+
+ Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ # noqa: E501
+
+ :param phase: The phase of this V1NamespaceStatus. # noqa: E501
+ :type: str
+ """
+
+ self._phase = phase
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NamespaceStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NamespaceStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy.py
new file mode 100644
index 0000000000..33a9fee88c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1NetworkPolicySpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1NetworkPolicy. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1NetworkPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1NetworkPolicy.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1NetworkPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1NetworkPolicy. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1NetworkPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1NetworkPolicy.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1NetworkPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1NetworkPolicy. # noqa: E501
+
+
+ :return: The metadata of this V1NetworkPolicy. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1NetworkPolicy.
+
+
+ :param metadata: The metadata of this V1NetworkPolicy. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1NetworkPolicy. # noqa: E501
+
+
+ :return: The spec of this V1NetworkPolicy. # noqa: E501
+ :rtype: V1NetworkPolicySpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1NetworkPolicy.
+
+
+ :param spec: The spec of this V1NetworkPolicy. # noqa: E501
+ :type: V1NetworkPolicySpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_egress_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_egress_rule.py
new file mode 100644
index 0000000000..40a4369c5a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_egress_rule.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicyEgressRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ports': 'list[V1NetworkPolicyPort]',
+ 'to': 'list[V1NetworkPolicyPeer]'
+ }
+
+ attribute_map = {
+ 'ports': 'ports',
+ 'to': 'to'
+ }
+
+ def __init__(self, ports=None, to=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicyEgressRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ports = None
+ self._to = None
+ self.discriminator = None
+
+ if ports is not None:
+ self.ports = ports
+ if to is not None:
+ self.to = to
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1NetworkPolicyEgressRule. # noqa: E501
+
+ ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
+
+ :return: The ports of this V1NetworkPolicyEgressRule. # noqa: E501
+ :rtype: list[V1NetworkPolicyPort]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1NetworkPolicyEgressRule.
+
+ ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
+
+ :param ports: The ports of this V1NetworkPolicyEgressRule. # noqa: E501
+ :type: list[V1NetworkPolicyPort]
+ """
+
+ self._ports = ports
+
+ @property
+ def to(self):
+ """Gets the to of this V1NetworkPolicyEgressRule. # noqa: E501
+
+ to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list. # noqa: E501
+
+ :return: The to of this V1NetworkPolicyEgressRule. # noqa: E501
+ :rtype: list[V1NetworkPolicyPeer]
+ """
+ return self._to
+
+ @to.setter
+ def to(self, to):
+ """Sets the to of this V1NetworkPolicyEgressRule.
+
+ to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list. # noqa: E501
+
+ :param to: The to of this V1NetworkPolicyEgressRule. # noqa: E501
+ :type: list[V1NetworkPolicyPeer]
+ """
+
+ self._to = to
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicyEgressRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicyEgressRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_ingress_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_ingress_rule.py
new file mode 100644
index 0000000000..75fc74766f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_ingress_rule.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicyIngressRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ '_from': 'list[V1NetworkPolicyPeer]',
+ 'ports': 'list[V1NetworkPolicyPort]'
+ }
+
+ attribute_map = {
+ '_from': 'from',
+ 'ports': 'ports'
+ }
+
+ def __init__(self, _from=None, ports=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicyIngressRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self.__from = None
+ self._ports = None
+ self.discriminator = None
+
+ if _from is not None:
+ self._from = _from
+ if ports is not None:
+ self.ports = ports
+
+ @property
+ def _from(self):
+ """Gets the _from of this V1NetworkPolicyIngressRule. # noqa: E501
+
+ from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list. # noqa: E501
+
+ :return: The _from of this V1NetworkPolicyIngressRule. # noqa: E501
+ :rtype: list[V1NetworkPolicyPeer]
+ """
+ return self.__from
+
+ @_from.setter
+ def _from(self, _from):
+ """Sets the _from of this V1NetworkPolicyIngressRule.
+
+ from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list. # noqa: E501
+
+ :param _from: The _from of this V1NetworkPolicyIngressRule. # noqa: E501
+ :type: list[V1NetworkPolicyPeer]
+ """
+
+ self.__from = _from
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1NetworkPolicyIngressRule. # noqa: E501
+
+ ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
+
+ :return: The ports of this V1NetworkPolicyIngressRule. # noqa: E501
+ :rtype: list[V1NetworkPolicyPort]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1NetworkPolicyIngressRule.
+
+ ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
+
+ :param ports: The ports of this V1NetworkPolicyIngressRule. # noqa: E501
+ :type: list[V1NetworkPolicyPort]
+ """
+
+ self._ports = ports
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicyIngressRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicyIngressRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_list.py
new file mode 100644
index 0000000000..3b00d5c610
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicyList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1NetworkPolicy]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicyList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1NetworkPolicyList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1NetworkPolicyList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1NetworkPolicyList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1NetworkPolicyList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1NetworkPolicyList. # noqa: E501
+
+ items is a list of schema objects. # noqa: E501
+
+ :return: The items of this V1NetworkPolicyList. # noqa: E501
+ :rtype: list[V1NetworkPolicy]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1NetworkPolicyList.
+
+ items is a list of schema objects. # noqa: E501
+
+ :param items: The items of this V1NetworkPolicyList. # noqa: E501
+ :type: list[V1NetworkPolicy]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1NetworkPolicyList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1NetworkPolicyList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1NetworkPolicyList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1NetworkPolicyList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1NetworkPolicyList. # noqa: E501
+
+
+ :return: The metadata of this V1NetworkPolicyList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1NetworkPolicyList.
+
+
+ :param metadata: The metadata of this V1NetworkPolicyList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicyList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicyList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_peer.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_peer.py
new file mode 100644
index 0000000000..a759ba5d89
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_peer.py
@@ -0,0 +1,172 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicyPeer(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ip_block': 'V1IPBlock',
+ 'namespace_selector': 'V1LabelSelector',
+ 'pod_selector': 'V1LabelSelector'
+ }
+
+ attribute_map = {
+ 'ip_block': 'ipBlock',
+ 'namespace_selector': 'namespaceSelector',
+ 'pod_selector': 'podSelector'
+ }
+
+ def __init__(self, ip_block=None, namespace_selector=None, pod_selector=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicyPeer - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ip_block = None
+ self._namespace_selector = None
+ self._pod_selector = None
+ self.discriminator = None
+
+ if ip_block is not None:
+ self.ip_block = ip_block
+ if namespace_selector is not None:
+ self.namespace_selector = namespace_selector
+ if pod_selector is not None:
+ self.pod_selector = pod_selector
+
+ @property
+ def ip_block(self):
+ """Gets the ip_block of this V1NetworkPolicyPeer. # noqa: E501
+
+
+ :return: The ip_block of this V1NetworkPolicyPeer. # noqa: E501
+ :rtype: V1IPBlock
+ """
+ return self._ip_block
+
+ @ip_block.setter
+ def ip_block(self, ip_block):
+ """Sets the ip_block of this V1NetworkPolicyPeer.
+
+
+ :param ip_block: The ip_block of this V1NetworkPolicyPeer. # noqa: E501
+ :type: V1IPBlock
+ """
+
+ self._ip_block = ip_block
+
+ @property
+ def namespace_selector(self):
+ """Gets the namespace_selector of this V1NetworkPolicyPeer. # noqa: E501
+
+
+ :return: The namespace_selector of this V1NetworkPolicyPeer. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._namespace_selector
+
+ @namespace_selector.setter
+ def namespace_selector(self, namespace_selector):
+ """Sets the namespace_selector of this V1NetworkPolicyPeer.
+
+
+ :param namespace_selector: The namespace_selector of this V1NetworkPolicyPeer. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._namespace_selector = namespace_selector
+
+ @property
+ def pod_selector(self):
+ """Gets the pod_selector of this V1NetworkPolicyPeer. # noqa: E501
+
+
+ :return: The pod_selector of this V1NetworkPolicyPeer. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._pod_selector
+
+ @pod_selector.setter
+ def pod_selector(self, pod_selector):
+ """Sets the pod_selector of this V1NetworkPolicyPeer.
+
+
+ :param pod_selector: The pod_selector of this V1NetworkPolicyPeer. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._pod_selector = pod_selector
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicyPeer):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicyPeer):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_port.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_port.py
new file mode 100644
index 0000000000..621f3a52a2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_port.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicyPort(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'end_port': 'int',
+ 'port': 'object',
+ 'protocol': 'str'
+ }
+
+ attribute_map = {
+ 'end_port': 'endPort',
+ 'port': 'port',
+ 'protocol': 'protocol'
+ }
+
+ def __init__(self, end_port=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicyPort - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._end_port = None
+ self._port = None
+ self._protocol = None
+ self.discriminator = None
+
+ if end_port is not None:
+ self.end_port = end_port
+ if port is not None:
+ self.port = port
+ if protocol is not None:
+ self.protocol = protocol
+
+ @property
+ def end_port(self):
+ """Gets the end_port of this V1NetworkPolicyPort. # noqa: E501
+
+ endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. # noqa: E501
+
+ :return: The end_port of this V1NetworkPolicyPort. # noqa: E501
+ :rtype: int
+ """
+ return self._end_port
+
+ @end_port.setter
+ def end_port(self, end_port):
+ """Sets the end_port of this V1NetworkPolicyPort.
+
+ endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. # noqa: E501
+
+ :param end_port: The end_port of this V1NetworkPolicyPort. # noqa: E501
+ :type: int
+ """
+
+ self._end_port = end_port
+
+ @property
+ def port(self):
+ """Gets the port of this V1NetworkPolicyPort. # noqa: E501
+
+ port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched. # noqa: E501
+
+ :return: The port of this V1NetworkPolicyPort. # noqa: E501
+ :rtype: object
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1NetworkPolicyPort.
+
+ port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched. # noqa: E501
+
+ :param port: The port of this V1NetworkPolicyPort. # noqa: E501
+ :type: object
+ """
+
+ self._port = port
+
+ @property
+ def protocol(self):
+ """Gets the protocol of this V1NetworkPolicyPort. # noqa: E501
+
+ protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP. # noqa: E501
+
+ :return: The protocol of this V1NetworkPolicyPort. # noqa: E501
+ :rtype: str
+ """
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, protocol):
+ """Sets the protocol of this V1NetworkPolicyPort.
+
+ protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP. # noqa: E501
+
+ :param protocol: The protocol of this V1NetworkPolicyPort. # noqa: E501
+ :type: str
+ """
+
+ self._protocol = protocol
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicyPort):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicyPort):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_spec.py
new file mode 100644
index 0000000000..031b4e6d4d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_spec.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicySpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'egress': 'list[V1NetworkPolicyEgressRule]',
+ 'ingress': 'list[V1NetworkPolicyIngressRule]',
+ 'pod_selector': 'V1LabelSelector',
+ 'policy_types': 'list[str]'
+ }
+
+ attribute_map = {
+ 'egress': 'egress',
+ 'ingress': 'ingress',
+ 'pod_selector': 'podSelector',
+ 'policy_types': 'policyTypes'
+ }
+
+ def __init__(self, egress=None, ingress=None, pod_selector=None, policy_types=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicySpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._egress = None
+ self._ingress = None
+ self._pod_selector = None
+ self._policy_types = None
+ self.discriminator = None
+
+ if egress is not None:
+ self.egress = egress
+ if ingress is not None:
+ self.ingress = ingress
+ self.pod_selector = pod_selector
+ if policy_types is not None:
+ self.policy_types = policy_types
+
+ @property
+ def egress(self):
+ """Gets the egress of this V1NetworkPolicySpec. # noqa: E501
+
+ egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8 # noqa: E501
+
+ :return: The egress of this V1NetworkPolicySpec. # noqa: E501
+ :rtype: list[V1NetworkPolicyEgressRule]
+ """
+ return self._egress
+
+ @egress.setter
+ def egress(self, egress):
+ """Sets the egress of this V1NetworkPolicySpec.
+
+ egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8 # noqa: E501
+
+ :param egress: The egress of this V1NetworkPolicySpec. # noqa: E501
+ :type: list[V1NetworkPolicyEgressRule]
+ """
+
+ self._egress = egress
+
+ @property
+ def ingress(self):
+ """Gets the ingress of this V1NetworkPolicySpec. # noqa: E501
+
+ ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default) # noqa: E501
+
+ :return: The ingress of this V1NetworkPolicySpec. # noqa: E501
+ :rtype: list[V1NetworkPolicyIngressRule]
+ """
+ return self._ingress
+
+ @ingress.setter
+ def ingress(self, ingress):
+ """Sets the ingress of this V1NetworkPolicySpec.
+
+ ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default) # noqa: E501
+
+ :param ingress: The ingress of this V1NetworkPolicySpec. # noqa: E501
+ :type: list[V1NetworkPolicyIngressRule]
+ """
+
+ self._ingress = ingress
+
+ @property
+ def pod_selector(self):
+ """Gets the pod_selector of this V1NetworkPolicySpec. # noqa: E501
+
+
+ :return: The pod_selector of this V1NetworkPolicySpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._pod_selector
+
+ @pod_selector.setter
+ def pod_selector(self, pod_selector):
+ """Sets the pod_selector of this V1NetworkPolicySpec.
+
+
+ :param pod_selector: The pod_selector of this V1NetworkPolicySpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+ if self.local_vars_configuration.client_side_validation and pod_selector is None: # noqa: E501
+ raise ValueError("Invalid value for `pod_selector`, must not be `None`") # noqa: E501
+
+ self._pod_selector = pod_selector
+
+ @property
+ def policy_types(self):
+ """Gets the policy_types of this V1NetworkPolicySpec. # noqa: E501
+
+ policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8 # noqa: E501
+
+ :return: The policy_types of this V1NetworkPolicySpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._policy_types
+
+ @policy_types.setter
+ def policy_types(self, policy_types):
+ """Sets the policy_types of this V1NetworkPolicySpec.
+
+ policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8 # noqa: E501
+
+ :param policy_types: The policy_types of this V1NetworkPolicySpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._policy_types = policy_types
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicySpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicySpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_status.py
new file mode 100644
index 0000000000..ec371ffbde
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_network_policy_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.27
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NetworkPolicyStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1Condition]'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions'
+ }
+
+ def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1NetworkPolicyStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1NetworkPolicyStatus. # noqa: E501
+
+ conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state # noqa: E501
+
+ :return: The conditions of this V1NetworkPolicyStatus. # noqa: E501
+ :rtype: list[V1Condition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1NetworkPolicyStatus.
+
+ conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state # noqa: E501
+
+ :param conditions: The conditions of this V1NetworkPolicyStatus. # noqa: E501
+ :type: list[V1Condition]
+ """
+
+ self._conditions = conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NetworkPolicyStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NetworkPolicyStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_nfs_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_nfs_volume_source.py
new file mode 100644
index 0000000000..04b166803d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_nfs_volume_source.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NFSVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'path': 'str',
+ 'read_only': 'bool',
+ 'server': 'str'
+ }
+
+ attribute_map = {
+ 'path': 'path',
+ 'read_only': 'readOnly',
+ 'server': 'server'
+ }
+
+ def __init__(self, path=None, read_only=None, server=None, local_vars_configuration=None): # noqa: E501
+ """V1NFSVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._path = None
+ self._read_only = None
+ self._server = None
+ self.discriminator = None
+
+ self.path = path
+ if read_only is not None:
+ self.read_only = read_only
+ self.server = server
+
+ @property
+ def path(self):
+ """Gets the path of this V1NFSVolumeSource. # noqa: E501
+
+ path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
+
+ :return: The path of this V1NFSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1NFSVolumeSource.
+
+ path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
+
+ :param path: The path of this V1NFSVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1NFSVolumeSource. # noqa: E501
+
+ readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
+
+ :return: The read_only of this V1NFSVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1NFSVolumeSource.
+
+ readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
+
+ :param read_only: The read_only of this V1NFSVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def server(self):
+ """Gets the server of this V1NFSVolumeSource. # noqa: E501
+
+ server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
+
+ :return: The server of this V1NFSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._server
+
+ @server.setter
+ def server(self, server):
+ """Sets the server of this V1NFSVolumeSource.
+
+ server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs # noqa: E501
+
+ :param server: The server of this V1NFSVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and server is None: # noqa: E501
+ raise ValueError("Invalid value for `server`, must not be `None`") # noqa: E501
+
+ self._server = server
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NFSVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NFSVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node.py
new file mode 100644
index 0000000000..a88f47677e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Node(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1NodeSpec',
+ 'status': 'V1NodeStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Node - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Node. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Node. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Node.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Node. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Node. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Node. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Node.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Node. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Node. # noqa: E501
+
+
+ :return: The metadata of this V1Node. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Node.
+
+
+ :param metadata: The metadata of this V1Node. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Node. # noqa: E501
+
+
+ :return: The spec of this V1Node. # noqa: E501
+ :rtype: V1NodeSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Node.
+
+
+ :param spec: The spec of this V1Node. # noqa: E501
+ :type: V1NodeSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Node. # noqa: E501
+
+
+ :return: The status of this V1Node. # noqa: E501
+ :rtype: V1NodeStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Node.
+
+
+ :param status: The status of this V1Node. # noqa: E501
+ :type: V1NodeStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Node):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Node):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_address.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_address.py
new file mode 100644
index 0000000000..617e5f27d1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_address.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeAddress(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'address': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'address': 'address',
+ 'type': 'type'
+ }
+
+ def __init__(self, address=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeAddress - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._address = None
+ self._type = None
+ self.discriminator = None
+
+ self.address = address
+ self.type = type
+
+ @property
+ def address(self):
+ """Gets the address of this V1NodeAddress. # noqa: E501
+
+ The node address. # noqa: E501
+
+ :return: The address of this V1NodeAddress. # noqa: E501
+ :rtype: str
+ """
+ return self._address
+
+ @address.setter
+ def address(self, address):
+ """Sets the address of this V1NodeAddress.
+
+ The node address. # noqa: E501
+
+ :param address: The address of this V1NodeAddress. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and address is None: # noqa: E501
+ raise ValueError("Invalid value for `address`, must not be `None`") # noqa: E501
+
+ self._address = address
+
+ @property
+ def type(self):
+ """Gets the type of this V1NodeAddress. # noqa: E501
+
+ Node address type, one of Hostname, ExternalIP or InternalIP. # noqa: E501
+
+ :return: The type of this V1NodeAddress. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1NodeAddress.
+
+ Node address type, one of Hostname, ExternalIP or InternalIP. # noqa: E501
+
+ :param type: The type of this V1NodeAddress. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeAddress):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeAddress):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_affinity.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_affinity.py
new file mode 100644
index 0000000000..46706c0847
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_affinity.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeAffinity(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'preferred_during_scheduling_ignored_during_execution': 'list[V1PreferredSchedulingTerm]',
+ 'required_during_scheduling_ignored_during_execution': 'V1NodeSelector'
+ }
+
+ attribute_map = {
+ 'preferred_during_scheduling_ignored_during_execution': 'preferredDuringSchedulingIgnoredDuringExecution',
+ 'required_during_scheduling_ignored_during_execution': 'requiredDuringSchedulingIgnoredDuringExecution'
+ }
+
+ def __init__(self, preferred_during_scheduling_ignored_during_execution=None, required_during_scheduling_ignored_during_execution=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeAffinity - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._preferred_during_scheduling_ignored_during_execution = None
+ self._required_during_scheduling_ignored_during_execution = None
+ self.discriminator = None
+
+ if preferred_during_scheduling_ignored_during_execution is not None:
+ self.preferred_during_scheduling_ignored_during_execution = preferred_during_scheduling_ignored_during_execution
+ if required_during_scheduling_ignored_during_execution is not None:
+ self.required_during_scheduling_ignored_during_execution = required_during_scheduling_ignored_during_execution
+
+ @property
+ def preferred_during_scheduling_ignored_during_execution(self):
+ """Gets the preferred_during_scheduling_ignored_during_execution of this V1NodeAffinity. # noqa: E501
+
+ The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. # noqa: E501
+
+ :return: The preferred_during_scheduling_ignored_during_execution of this V1NodeAffinity. # noqa: E501
+ :rtype: list[V1PreferredSchedulingTerm]
+ """
+ return self._preferred_during_scheduling_ignored_during_execution
+
+ @preferred_during_scheduling_ignored_during_execution.setter
+ def preferred_during_scheduling_ignored_during_execution(self, preferred_during_scheduling_ignored_during_execution):
+ """Sets the preferred_during_scheduling_ignored_during_execution of this V1NodeAffinity.
+
+ The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. # noqa: E501
+
+ :param preferred_during_scheduling_ignored_during_execution: The preferred_during_scheduling_ignored_during_execution of this V1NodeAffinity. # noqa: E501
+ :type: list[V1PreferredSchedulingTerm]
+ """
+
+ self._preferred_during_scheduling_ignored_during_execution = preferred_during_scheduling_ignored_during_execution
+
+ @property
+ def required_during_scheduling_ignored_during_execution(self):
+ """Gets the required_during_scheduling_ignored_during_execution of this V1NodeAffinity. # noqa: E501
+
+
+ :return: The required_during_scheduling_ignored_during_execution of this V1NodeAffinity. # noqa: E501
+ :rtype: V1NodeSelector
+ """
+ return self._required_during_scheduling_ignored_during_execution
+
+ @required_during_scheduling_ignored_during_execution.setter
+ def required_during_scheduling_ignored_during_execution(self, required_during_scheduling_ignored_during_execution):
+ """Sets the required_during_scheduling_ignored_during_execution of this V1NodeAffinity.
+
+
+ :param required_during_scheduling_ignored_during_execution: The required_during_scheduling_ignored_during_execution of this V1NodeAffinity. # noqa: E501
+ :type: V1NodeSelector
+ """
+
+ self._required_during_scheduling_ignored_during_execution = required_during_scheduling_ignored_during_execution
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeAffinity):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeAffinity):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_condition.py
new file mode 100644
index 0000000000..c17556ff9f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_condition.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_heartbeat_time': 'datetime',
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_heartbeat_time': 'lastHeartbeatTime',
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_heartbeat_time=None, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_heartbeat_time = None
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_heartbeat_time is not None:
+ self.last_heartbeat_time = last_heartbeat_time
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_heartbeat_time(self):
+ """Gets the last_heartbeat_time of this V1NodeCondition. # noqa: E501
+
+ Last time we got an update on a given condition. # noqa: E501
+
+ :return: The last_heartbeat_time of this V1NodeCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_heartbeat_time
+
+ @last_heartbeat_time.setter
+ def last_heartbeat_time(self, last_heartbeat_time):
+ """Sets the last_heartbeat_time of this V1NodeCondition.
+
+ Last time we got an update on a given condition. # noqa: E501
+
+ :param last_heartbeat_time: The last_heartbeat_time of this V1NodeCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_heartbeat_time = last_heartbeat_time
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1NodeCondition. # noqa: E501
+
+ Last time the condition transit from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1NodeCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1NodeCondition.
+
+ Last time the condition transit from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1NodeCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1NodeCondition. # noqa: E501
+
+ Human readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1NodeCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1NodeCondition.
+
+ Human readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1NodeCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1NodeCondition. # noqa: E501
+
+ (brief) reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1NodeCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1NodeCondition.
+
+ (brief) reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1NodeCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1NodeCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1NodeCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1NodeCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1NodeCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1NodeCondition. # noqa: E501
+
+ Type of node condition. # noqa: E501
+
+ :return: The type of this V1NodeCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1NodeCondition.
+
+ Type of node condition. # noqa: E501
+
+ :param type: The type of this V1NodeCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_config_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_config_source.py
new file mode 100644
index 0000000000..c5cb745337
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_config_source.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeConfigSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'config_map': 'V1ConfigMapNodeConfigSource'
+ }
+
+ attribute_map = {
+ 'config_map': 'configMap'
+ }
+
+ def __init__(self, config_map=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeConfigSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._config_map = None
+ self.discriminator = None
+
+ if config_map is not None:
+ self.config_map = config_map
+
+ @property
+ def config_map(self):
+ """Gets the config_map of this V1NodeConfigSource. # noqa: E501
+
+
+ :return: The config_map of this V1NodeConfigSource. # noqa: E501
+ :rtype: V1ConfigMapNodeConfigSource
+ """
+ return self._config_map
+
+ @config_map.setter
+ def config_map(self, config_map):
+ """Sets the config_map of this V1NodeConfigSource.
+
+
+ :param config_map: The config_map of this V1NodeConfigSource. # noqa: E501
+ :type: V1ConfigMapNodeConfigSource
+ """
+
+ self._config_map = config_map
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeConfigSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeConfigSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_config_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_config_status.py
new file mode 100644
index 0000000000..7d589c797d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_config_status.py
@@ -0,0 +1,200 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeConfigStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'active': 'V1NodeConfigSource',
+ 'assigned': 'V1NodeConfigSource',
+ 'error': 'str',
+ 'last_known_good': 'V1NodeConfigSource'
+ }
+
+ attribute_map = {
+ 'active': 'active',
+ 'assigned': 'assigned',
+ 'error': 'error',
+ 'last_known_good': 'lastKnownGood'
+ }
+
+ def __init__(self, active=None, assigned=None, error=None, last_known_good=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeConfigStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._active = None
+ self._assigned = None
+ self._error = None
+ self._last_known_good = None
+ self.discriminator = None
+
+ if active is not None:
+ self.active = active
+ if assigned is not None:
+ self.assigned = assigned
+ if error is not None:
+ self.error = error
+ if last_known_good is not None:
+ self.last_known_good = last_known_good
+
+ @property
+ def active(self):
+ """Gets the active of this V1NodeConfigStatus. # noqa: E501
+
+
+ :return: The active of this V1NodeConfigStatus. # noqa: E501
+ :rtype: V1NodeConfigSource
+ """
+ return self._active
+
+ @active.setter
+ def active(self, active):
+ """Sets the active of this V1NodeConfigStatus.
+
+
+ :param active: The active of this V1NodeConfigStatus. # noqa: E501
+ :type: V1NodeConfigSource
+ """
+
+ self._active = active
+
+ @property
+ def assigned(self):
+ """Gets the assigned of this V1NodeConfigStatus. # noqa: E501
+
+
+ :return: The assigned of this V1NodeConfigStatus. # noqa: E501
+ :rtype: V1NodeConfigSource
+ """
+ return self._assigned
+
+ @assigned.setter
+ def assigned(self, assigned):
+ """Sets the assigned of this V1NodeConfigStatus.
+
+
+ :param assigned: The assigned of this V1NodeConfigStatus. # noqa: E501
+ :type: V1NodeConfigSource
+ """
+
+ self._assigned = assigned
+
+ @property
+ def error(self):
+ """Gets the error of this V1NodeConfigStatus. # noqa: E501
+
+ Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions. # noqa: E501
+
+ :return: The error of this V1NodeConfigStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._error
+
+ @error.setter
+ def error(self, error):
+ """Sets the error of this V1NodeConfigStatus.
+
+ Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions. # noqa: E501
+
+ :param error: The error of this V1NodeConfigStatus. # noqa: E501
+ :type: str
+ """
+
+ self._error = error
+
+ @property
+ def last_known_good(self):
+ """Gets the last_known_good of this V1NodeConfigStatus. # noqa: E501
+
+
+ :return: The last_known_good of this V1NodeConfigStatus. # noqa: E501
+ :rtype: V1NodeConfigSource
+ """
+ return self._last_known_good
+
+ @last_known_good.setter
+ def last_known_good(self, last_known_good):
+ """Sets the last_known_good of this V1NodeConfigStatus.
+
+
+ :param last_known_good: The last_known_good of this V1NodeConfigStatus. # noqa: E501
+ :type: V1NodeConfigSource
+ """
+
+ self._last_known_good = last_known_good
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeConfigStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeConfigStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_daemon_endpoints.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_daemon_endpoints.py
new file mode 100644
index 0000000000..9d996cd0f7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_daemon_endpoints.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeDaemonEndpoints(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'kubelet_endpoint': 'V1DaemonEndpoint'
+ }
+
+ attribute_map = {
+ 'kubelet_endpoint': 'kubeletEndpoint'
+ }
+
+ def __init__(self, kubelet_endpoint=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeDaemonEndpoints - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._kubelet_endpoint = None
+ self.discriminator = None
+
+ if kubelet_endpoint is not None:
+ self.kubelet_endpoint = kubelet_endpoint
+
+ @property
+ def kubelet_endpoint(self):
+ """Gets the kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
+
+
+ :return: The kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
+ :rtype: V1DaemonEndpoint
+ """
+ return self._kubelet_endpoint
+
+ @kubelet_endpoint.setter
+ def kubelet_endpoint(self, kubelet_endpoint):
+ """Sets the kubelet_endpoint of this V1NodeDaemonEndpoints.
+
+
+ :param kubelet_endpoint: The kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
+ :type: V1DaemonEndpoint
+ """
+
+ self._kubelet_endpoint = kubelet_endpoint
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeDaemonEndpoints):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeDaemonEndpoints):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_list.py
new file mode 100644
index 0000000000..9125563a86
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Node]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1NodeList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1NodeList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1NodeList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1NodeList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1NodeList. # noqa: E501
+
+ List of nodes # noqa: E501
+
+ :return: The items of this V1NodeList. # noqa: E501
+ :rtype: list[V1Node]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1NodeList.
+
+ List of nodes # noqa: E501
+
+ :param items: The items of this V1NodeList. # noqa: E501
+ :type: list[V1Node]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1NodeList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1NodeList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1NodeList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1NodeList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1NodeList. # noqa: E501
+
+
+ :return: The metadata of this V1NodeList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1NodeList.
+
+
+ :param metadata: The metadata of this V1NodeList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector.py
new file mode 100644
index 0000000000..6dc9e3e675
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeSelector(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'node_selector_terms': 'list[V1NodeSelectorTerm]'
+ }
+
+ attribute_map = {
+ 'node_selector_terms': 'nodeSelectorTerms'
+ }
+
+ def __init__(self, node_selector_terms=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeSelector - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._node_selector_terms = None
+ self.discriminator = None
+
+ self.node_selector_terms = node_selector_terms
+
+ @property
+ def node_selector_terms(self):
+ """Gets the node_selector_terms of this V1NodeSelector. # noqa: E501
+
+ Required. A list of node selector terms. The terms are ORed. # noqa: E501
+
+ :return: The node_selector_terms of this V1NodeSelector. # noqa: E501
+ :rtype: list[V1NodeSelectorTerm]
+ """
+ return self._node_selector_terms
+
+ @node_selector_terms.setter
+ def node_selector_terms(self, node_selector_terms):
+ """Sets the node_selector_terms of this V1NodeSelector.
+
+ Required. A list of node selector terms. The terms are ORed. # noqa: E501
+
+ :param node_selector_terms: The node_selector_terms of this V1NodeSelector. # noqa: E501
+ :type: list[V1NodeSelectorTerm]
+ """
+ if self.local_vars_configuration.client_side_validation and node_selector_terms is None: # noqa: E501
+ raise ValueError("Invalid value for `node_selector_terms`, must not be `None`") # noqa: E501
+
+ self._node_selector_terms = node_selector_terms
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeSelector):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeSelector):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_requirement.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_requirement.py
new file mode 100644
index 0000000000..2edce207d0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_requirement.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeSelectorRequirement(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'operator': 'str',
+ 'values': 'list[str]'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'operator': 'operator',
+ 'values': 'values'
+ }
+
+ def __init__(self, key=None, operator=None, values=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeSelectorRequirement - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._operator = None
+ self._values = None
+ self.discriminator = None
+
+ self.key = key
+ self.operator = operator
+ if values is not None:
+ self.values = values
+
+ @property
+ def key(self):
+ """Gets the key of this V1NodeSelectorRequirement. # noqa: E501
+
+ The label key that the selector applies to. # noqa: E501
+
+ :return: The key of this V1NodeSelectorRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1NodeSelectorRequirement.
+
+ The label key that the selector applies to. # noqa: E501
+
+ :param key: The key of this V1NodeSelectorRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def operator(self):
+ """Gets the operator of this V1NodeSelectorRequirement. # noqa: E501
+
+ Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. # noqa: E501
+
+ :return: The operator of this V1NodeSelectorRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._operator
+
+ @operator.setter
+ def operator(self, operator):
+ """Sets the operator of this V1NodeSelectorRequirement.
+
+ Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. # noqa: E501
+
+ :param operator: The operator of this V1NodeSelectorRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501
+ raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501
+
+ self._operator = operator
+
+ @property
+ def values(self):
+ """Gets the values of this V1NodeSelectorRequirement. # noqa: E501
+
+ An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. # noqa: E501
+
+ :return: The values of this V1NodeSelectorRequirement. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._values
+
+ @values.setter
+ def values(self, values):
+ """Sets the values of this V1NodeSelectorRequirement.
+
+ An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. # noqa: E501
+
+ :param values: The values of this V1NodeSelectorRequirement. # noqa: E501
+ :type: list[str]
+ """
+
+ self._values = values
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeSelectorRequirement):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeSelectorRequirement):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_term.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_term.py
new file mode 100644
index 0000000000..3b8e8399e2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_selector_term.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeSelectorTerm(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'match_expressions': 'list[V1NodeSelectorRequirement]',
+ 'match_fields': 'list[V1NodeSelectorRequirement]'
+ }
+
+ attribute_map = {
+ 'match_expressions': 'matchExpressions',
+ 'match_fields': 'matchFields'
+ }
+
+ def __init__(self, match_expressions=None, match_fields=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeSelectorTerm - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._match_expressions = None
+ self._match_fields = None
+ self.discriminator = None
+
+ if match_expressions is not None:
+ self.match_expressions = match_expressions
+ if match_fields is not None:
+ self.match_fields = match_fields
+
+ @property
+ def match_expressions(self):
+ """Gets the match_expressions of this V1NodeSelectorTerm. # noqa: E501
+
+ A list of node selector requirements by node's labels. # noqa: E501
+
+ :return: The match_expressions of this V1NodeSelectorTerm. # noqa: E501
+ :rtype: list[V1NodeSelectorRequirement]
+ """
+ return self._match_expressions
+
+ @match_expressions.setter
+ def match_expressions(self, match_expressions):
+ """Sets the match_expressions of this V1NodeSelectorTerm.
+
+ A list of node selector requirements by node's labels. # noqa: E501
+
+ :param match_expressions: The match_expressions of this V1NodeSelectorTerm. # noqa: E501
+ :type: list[V1NodeSelectorRequirement]
+ """
+
+ self._match_expressions = match_expressions
+
+ @property
+ def match_fields(self):
+ """Gets the match_fields of this V1NodeSelectorTerm. # noqa: E501
+
+ A list of node selector requirements by node's fields. # noqa: E501
+
+ :return: The match_fields of this V1NodeSelectorTerm. # noqa: E501
+ :rtype: list[V1NodeSelectorRequirement]
+ """
+ return self._match_fields
+
+ @match_fields.setter
+ def match_fields(self, match_fields):
+ """Sets the match_fields of this V1NodeSelectorTerm.
+
+ A list of node selector requirements by node's fields. # noqa: E501
+
+ :param match_fields: The match_fields of this V1NodeSelectorTerm. # noqa: E501
+ :type: list[V1NodeSelectorRequirement]
+ """
+
+ self._match_fields = match_fields
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeSelectorTerm):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeSelectorTerm):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_spec.py
new file mode 100644
index 0000000000..d59ca66d61
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_spec.py
@@ -0,0 +1,288 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'config_source': 'V1NodeConfigSource',
+ 'external_id': 'str',
+ 'pod_cidr': 'str',
+ 'pod_cid_rs': 'list[str]',
+ 'provider_id': 'str',
+ 'taints': 'list[V1Taint]',
+ 'unschedulable': 'bool'
+ }
+
+ attribute_map = {
+ 'config_source': 'configSource',
+ 'external_id': 'externalID',
+ 'pod_cidr': 'podCIDR',
+ 'pod_cid_rs': 'podCIDRs',
+ 'provider_id': 'providerID',
+ 'taints': 'taints',
+ 'unschedulable': 'unschedulable'
+ }
+
+ def __init__(self, config_source=None, external_id=None, pod_cidr=None, pod_cid_rs=None, provider_id=None, taints=None, unschedulable=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._config_source = None
+ self._external_id = None
+ self._pod_cidr = None
+ self._pod_cid_rs = None
+ self._provider_id = None
+ self._taints = None
+ self._unschedulable = None
+ self.discriminator = None
+
+ if config_source is not None:
+ self.config_source = config_source
+ if external_id is not None:
+ self.external_id = external_id
+ if pod_cidr is not None:
+ self.pod_cidr = pod_cidr
+ if pod_cid_rs is not None:
+ self.pod_cid_rs = pod_cid_rs
+ if provider_id is not None:
+ self.provider_id = provider_id
+ if taints is not None:
+ self.taints = taints
+ if unschedulable is not None:
+ self.unschedulable = unschedulable
+
+ @property
+ def config_source(self):
+ """Gets the config_source of this V1NodeSpec. # noqa: E501
+
+
+ :return: The config_source of this V1NodeSpec. # noqa: E501
+ :rtype: V1NodeConfigSource
+ """
+ return self._config_source
+
+ @config_source.setter
+ def config_source(self, config_source):
+ """Sets the config_source of this V1NodeSpec.
+
+
+ :param config_source: The config_source of this V1NodeSpec. # noqa: E501
+ :type: V1NodeConfigSource
+ """
+
+ self._config_source = config_source
+
+ @property
+ def external_id(self):
+ """Gets the external_id of this V1NodeSpec. # noqa: E501
+
+ Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966 # noqa: E501
+
+ :return: The external_id of this V1NodeSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._external_id
+
+ @external_id.setter
+ def external_id(self, external_id):
+ """Sets the external_id of this V1NodeSpec.
+
+ Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966 # noqa: E501
+
+ :param external_id: The external_id of this V1NodeSpec. # noqa: E501
+ :type: str
+ """
+
+ self._external_id = external_id
+
+ @property
+ def pod_cidr(self):
+ """Gets the pod_cidr of this V1NodeSpec. # noqa: E501
+
+ PodCIDR represents the pod IP range assigned to the node. # noqa: E501
+
+ :return: The pod_cidr of this V1NodeSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._pod_cidr
+
+ @pod_cidr.setter
+ def pod_cidr(self, pod_cidr):
+ """Sets the pod_cidr of this V1NodeSpec.
+
+ PodCIDR represents the pod IP range assigned to the node. # noqa: E501
+
+ :param pod_cidr: The pod_cidr of this V1NodeSpec. # noqa: E501
+ :type: str
+ """
+
+ self._pod_cidr = pod_cidr
+
+ @property
+ def pod_cid_rs(self):
+ """Gets the pod_cid_rs of this V1NodeSpec. # noqa: E501
+
+ podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6. # noqa: E501
+
+ :return: The pod_cid_rs of this V1NodeSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._pod_cid_rs
+
+ @pod_cid_rs.setter
+ def pod_cid_rs(self, pod_cid_rs):
+ """Sets the pod_cid_rs of this V1NodeSpec.
+
+ podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6. # noqa: E501
+
+ :param pod_cid_rs: The pod_cid_rs of this V1NodeSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._pod_cid_rs = pod_cid_rs
+
+ @property
+ def provider_id(self):
+ """Gets the provider_id of this V1NodeSpec. # noqa: E501
+
+ ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID> # noqa: E501
+
+ :return: The provider_id of this V1NodeSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._provider_id
+
+ @provider_id.setter
+ def provider_id(self, provider_id):
+ """Sets the provider_id of this V1NodeSpec.
+
+ ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID> # noqa: E501
+
+ :param provider_id: The provider_id of this V1NodeSpec. # noqa: E501
+ :type: str
+ """
+
+ self._provider_id = provider_id
+
+ @property
+ def taints(self):
+ """Gets the taints of this V1NodeSpec. # noqa: E501
+
+ If specified, the node's taints. # noqa: E501
+
+ :return: The taints of this V1NodeSpec. # noqa: E501
+ :rtype: list[V1Taint]
+ """
+ return self._taints
+
+ @taints.setter
+ def taints(self, taints):
+ """Sets the taints of this V1NodeSpec.
+
+ If specified, the node's taints. # noqa: E501
+
+ :param taints: The taints of this V1NodeSpec. # noqa: E501
+ :type: list[V1Taint]
+ """
+
+ self._taints = taints
+
+ @property
+ def unschedulable(self):
+ """Gets the unschedulable of this V1NodeSpec. # noqa: E501
+
+ Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration # noqa: E501
+
+ :return: The unschedulable of this V1NodeSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._unschedulable
+
+ @unschedulable.setter
+ def unschedulable(self, unschedulable):
+ """Sets the unschedulable of this V1NodeSpec.
+
+ Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration # noqa: E501
+
+ :param unschedulable: The unschedulable of this V1NodeSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._unschedulable = unschedulable
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_status.py
new file mode 100644
index 0000000000..e189bbdd23
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_status.py
@@ -0,0 +1,396 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'addresses': 'list[V1NodeAddress]',
+ 'allocatable': 'dict(str, str)',
+ 'capacity': 'dict(str, str)',
+ 'conditions': 'list[V1NodeCondition]',
+ 'config': 'V1NodeConfigStatus',
+ 'daemon_endpoints': 'V1NodeDaemonEndpoints',
+ 'images': 'list[V1ContainerImage]',
+ 'node_info': 'V1NodeSystemInfo',
+ 'phase': 'str',
+ 'volumes_attached': 'list[V1AttachedVolume]',
+ 'volumes_in_use': 'list[str]'
+ }
+
+ attribute_map = {
+ 'addresses': 'addresses',
+ 'allocatable': 'allocatable',
+ 'capacity': 'capacity',
+ 'conditions': 'conditions',
+ 'config': 'config',
+ 'daemon_endpoints': 'daemonEndpoints',
+ 'images': 'images',
+ 'node_info': 'nodeInfo',
+ 'phase': 'phase',
+ 'volumes_attached': 'volumesAttached',
+ 'volumes_in_use': 'volumesInUse'
+ }
+
+ def __init__(self, addresses=None, allocatable=None, capacity=None, conditions=None, config=None, daemon_endpoints=None, images=None, node_info=None, phase=None, volumes_attached=None, volumes_in_use=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._addresses = None
+ self._allocatable = None
+ self._capacity = None
+ self._conditions = None
+ self._config = None
+ self._daemon_endpoints = None
+ self._images = None
+ self._node_info = None
+ self._phase = None
+ self._volumes_attached = None
+ self._volumes_in_use = None
+ self.discriminator = None
+
+ if addresses is not None:
+ self.addresses = addresses
+ if allocatable is not None:
+ self.allocatable = allocatable
+ if capacity is not None:
+ self.capacity = capacity
+ if conditions is not None:
+ self.conditions = conditions
+ if config is not None:
+ self.config = config
+ if daemon_endpoints is not None:
+ self.daemon_endpoints = daemon_endpoints
+ if images is not None:
+ self.images = images
+ if node_info is not None:
+ self.node_info = node_info
+ if phase is not None:
+ self.phase = phase
+ if volumes_attached is not None:
+ self.volumes_attached = volumes_attached
+ if volumes_in_use is not None:
+ self.volumes_in_use = volumes_in_use
+
+ @property
+ def addresses(self):
+ """Gets the addresses of this V1NodeStatus. # noqa: E501
+
+ List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP). # noqa: E501
+
+ :return: The addresses of this V1NodeStatus. # noqa: E501
+ :rtype: list[V1NodeAddress]
+ """
+ return self._addresses
+
+ @addresses.setter
+ def addresses(self, addresses):
+ """Sets the addresses of this V1NodeStatus.
+
+ List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP). # noqa: E501
+
+ :param addresses: The addresses of this V1NodeStatus. # noqa: E501
+ :type: list[V1NodeAddress]
+ """
+
+ self._addresses = addresses
+
+ @property
+ def allocatable(self):
+ """Gets the allocatable of this V1NodeStatus. # noqa: E501
+
+ Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity. # noqa: E501
+
+ :return: The allocatable of this V1NodeStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._allocatable
+
+ @allocatable.setter
+ def allocatable(self, allocatable):
+ """Sets the allocatable of this V1NodeStatus.
+
+ Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity. # noqa: E501
+
+ :param allocatable: The allocatable of this V1NodeStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._allocatable = allocatable
+
+ @property
+ def capacity(self):
+ """Gets the capacity of this V1NodeStatus. # noqa: E501
+
+ Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
+
+ :return: The capacity of this V1NodeStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._capacity
+
+ @capacity.setter
+ def capacity(self, capacity):
+ """Sets the capacity of this V1NodeStatus.
+
+ Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
+
+ :param capacity: The capacity of this V1NodeStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._capacity = capacity
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1NodeStatus. # noqa: E501
+
+ Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition # noqa: E501
+
+ :return: The conditions of this V1NodeStatus. # noqa: E501
+ :rtype: list[V1NodeCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1NodeStatus.
+
+ Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition # noqa: E501
+
+ :param conditions: The conditions of this V1NodeStatus. # noqa: E501
+ :type: list[V1NodeCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def config(self):
+ """Gets the config of this V1NodeStatus. # noqa: E501
+
+
+ :return: The config of this V1NodeStatus. # noqa: E501
+ :rtype: V1NodeConfigStatus
+ """
+ return self._config
+
+ @config.setter
+ def config(self, config):
+ """Sets the config of this V1NodeStatus.
+
+
+ :param config: The config of this V1NodeStatus. # noqa: E501
+ :type: V1NodeConfigStatus
+ """
+
+ self._config = config
+
+ @property
+ def daemon_endpoints(self):
+ """Gets the daemon_endpoints of this V1NodeStatus. # noqa: E501
+
+
+ :return: The daemon_endpoints of this V1NodeStatus. # noqa: E501
+ :rtype: V1NodeDaemonEndpoints
+ """
+ return self._daemon_endpoints
+
+ @daemon_endpoints.setter
+ def daemon_endpoints(self, daemon_endpoints):
+ """Sets the daemon_endpoints of this V1NodeStatus.
+
+
+ :param daemon_endpoints: The daemon_endpoints of this V1NodeStatus. # noqa: E501
+ :type: V1NodeDaemonEndpoints
+ """
+
+ self._daemon_endpoints = daemon_endpoints
+
+ @property
+ def images(self):
+ """Gets the images of this V1NodeStatus. # noqa: E501
+
+ List of container images on this node # noqa: E501
+
+ :return: The images of this V1NodeStatus. # noqa: E501
+ :rtype: list[V1ContainerImage]
+ """
+ return self._images
+
+ @images.setter
+ def images(self, images):
+ """Sets the images of this V1NodeStatus.
+
+ List of container images on this node # noqa: E501
+
+ :param images: The images of this V1NodeStatus. # noqa: E501
+ :type: list[V1ContainerImage]
+ """
+
+ self._images = images
+
+ @property
+ def node_info(self):
+ """Gets the node_info of this V1NodeStatus. # noqa: E501
+
+
+ :return: The node_info of this V1NodeStatus. # noqa: E501
+ :rtype: V1NodeSystemInfo
+ """
+ return self._node_info
+
+ @node_info.setter
+ def node_info(self, node_info):
+ """Sets the node_info of this V1NodeStatus.
+
+
+ :param node_info: The node_info of this V1NodeStatus. # noqa: E501
+ :type: V1NodeSystemInfo
+ """
+
+ self._node_info = node_info
+
+ @property
+ def phase(self):
+ """Gets the phase of this V1NodeStatus. # noqa: E501
+
+ NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated. # noqa: E501
+
+ :return: The phase of this V1NodeStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._phase
+
+ @phase.setter
+ def phase(self, phase):
+ """Sets the phase of this V1NodeStatus.
+
+ NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated. # noqa: E501
+
+ :param phase: The phase of this V1NodeStatus. # noqa: E501
+ :type: str
+ """
+
+ self._phase = phase
+
+ @property
+ def volumes_attached(self):
+ """Gets the volumes_attached of this V1NodeStatus. # noqa: E501
+
+ List of volumes that are attached to the node. # noqa: E501
+
+ :return: The volumes_attached of this V1NodeStatus. # noqa: E501
+ :rtype: list[V1AttachedVolume]
+ """
+ return self._volumes_attached
+
+ @volumes_attached.setter
+ def volumes_attached(self, volumes_attached):
+ """Sets the volumes_attached of this V1NodeStatus.
+
+ List of volumes that are attached to the node. # noqa: E501
+
+ :param volumes_attached: The volumes_attached of this V1NodeStatus. # noqa: E501
+ :type: list[V1AttachedVolume]
+ """
+
+ self._volumes_attached = volumes_attached
+
+ @property
+ def volumes_in_use(self):
+ """Gets the volumes_in_use of this V1NodeStatus. # noqa: E501
+
+ List of attachable volumes in use (mounted) by the node. # noqa: E501
+
+ :return: The volumes_in_use of this V1NodeStatus. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._volumes_in_use
+
+ @volumes_in_use.setter
+ def volumes_in_use(self, volumes_in_use):
+ """Sets the volumes_in_use of this V1NodeStatus.
+
+ List of attachable volumes in use (mounted) by the node. # noqa: E501
+
+ :param volumes_in_use: The volumes_in_use of this V1NodeStatus. # noqa: E501
+ :type: list[str]
+ """
+
+ self._volumes_in_use = volumes_in_use
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_node_system_info.py b/contrib/python/kubernetes/kubernetes/client/models/v1_node_system_info.py
new file mode 100644
index 0000000000..876743a1dd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_node_system_info.py
@@ -0,0 +1,384 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NodeSystemInfo(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'architecture': 'str',
+ 'boot_id': 'str',
+ 'container_runtime_version': 'str',
+ 'kernel_version': 'str',
+ 'kube_proxy_version': 'str',
+ 'kubelet_version': 'str',
+ 'machine_id': 'str',
+ 'operating_system': 'str',
+ 'os_image': 'str',
+ 'system_uuid': 'str'
+ }
+
+ attribute_map = {
+ 'architecture': 'architecture',
+ 'boot_id': 'bootID',
+ 'container_runtime_version': 'containerRuntimeVersion',
+ 'kernel_version': 'kernelVersion',
+ 'kube_proxy_version': 'kubeProxyVersion',
+ 'kubelet_version': 'kubeletVersion',
+ 'machine_id': 'machineID',
+ 'operating_system': 'operatingSystem',
+ 'os_image': 'osImage',
+ 'system_uuid': 'systemUUID'
+ }
+
+ def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None, local_vars_configuration=None): # noqa: E501
+ """V1NodeSystemInfo - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._architecture = None
+ self._boot_id = None
+ self._container_runtime_version = None
+ self._kernel_version = None
+ self._kube_proxy_version = None
+ self._kubelet_version = None
+ self._machine_id = None
+ self._operating_system = None
+ self._os_image = None
+ self._system_uuid = None
+ self.discriminator = None
+
+ self.architecture = architecture
+ self.boot_id = boot_id
+ self.container_runtime_version = container_runtime_version
+ self.kernel_version = kernel_version
+ self.kube_proxy_version = kube_proxy_version
+ self.kubelet_version = kubelet_version
+ self.machine_id = machine_id
+ self.operating_system = operating_system
+ self.os_image = os_image
+ self.system_uuid = system_uuid
+
+ @property
+ def architecture(self):
+ """Gets the architecture of this V1NodeSystemInfo. # noqa: E501
+
+ The Architecture reported by the node # noqa: E501
+
+ :return: The architecture of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._architecture
+
+ @architecture.setter
+ def architecture(self, architecture):
+ """Sets the architecture of this V1NodeSystemInfo.
+
+ The Architecture reported by the node # noqa: E501
+
+ :param architecture: The architecture of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and architecture is None: # noqa: E501
+ raise ValueError("Invalid value for `architecture`, must not be `None`") # noqa: E501
+
+ self._architecture = architecture
+
+ @property
+ def boot_id(self):
+ """Gets the boot_id of this V1NodeSystemInfo. # noqa: E501
+
+ Boot ID reported by the node. # noqa: E501
+
+ :return: The boot_id of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._boot_id
+
+ @boot_id.setter
+ def boot_id(self, boot_id):
+ """Sets the boot_id of this V1NodeSystemInfo.
+
+ Boot ID reported by the node. # noqa: E501
+
+ :param boot_id: The boot_id of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and boot_id is None: # noqa: E501
+ raise ValueError("Invalid value for `boot_id`, must not be `None`") # noqa: E501
+
+ self._boot_id = boot_id
+
+ @property
+ def container_runtime_version(self):
+ """Gets the container_runtime_version of this V1NodeSystemInfo. # noqa: E501
+
+ ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2). # noqa: E501
+
+ :return: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._container_runtime_version
+
+ @container_runtime_version.setter
+ def container_runtime_version(self, container_runtime_version):
+ """Sets the container_runtime_version of this V1NodeSystemInfo.
+
+ ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2). # noqa: E501
+
+ :param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and container_runtime_version is None: # noqa: E501
+ raise ValueError("Invalid value for `container_runtime_version`, must not be `None`") # noqa: E501
+
+ self._container_runtime_version = container_runtime_version
+
+ @property
+ def kernel_version(self):
+ """Gets the kernel_version of this V1NodeSystemInfo. # noqa: E501
+
+ Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
+
+ :return: The kernel_version of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._kernel_version
+
+ @kernel_version.setter
+ def kernel_version(self, kernel_version):
+ """Sets the kernel_version of this V1NodeSystemInfo.
+
+ Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
+
+ :param kernel_version: The kernel_version of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kernel_version is None: # noqa: E501
+ raise ValueError("Invalid value for `kernel_version`, must not be `None`") # noqa: E501
+
+ self._kernel_version = kernel_version
+
+ @property
+ def kube_proxy_version(self):
+ """Gets the kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
+
+ KubeProxy Version reported by the node. # noqa: E501
+
+ :return: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._kube_proxy_version
+
+ @kube_proxy_version.setter
+ def kube_proxy_version(self, kube_proxy_version):
+ """Sets the kube_proxy_version of this V1NodeSystemInfo.
+
+ KubeProxy Version reported by the node. # noqa: E501
+
+ :param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kube_proxy_version is None: # noqa: E501
+ raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`") # noqa: E501
+
+ self._kube_proxy_version = kube_proxy_version
+
+ @property
+ def kubelet_version(self):
+ """Gets the kubelet_version of this V1NodeSystemInfo. # noqa: E501
+
+ Kubelet Version reported by the node. # noqa: E501
+
+ :return: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._kubelet_version
+
+ @kubelet_version.setter
+ def kubelet_version(self, kubelet_version):
+ """Sets the kubelet_version of this V1NodeSystemInfo.
+
+ Kubelet Version reported by the node. # noqa: E501
+
+ :param kubelet_version: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kubelet_version is None: # noqa: E501
+ raise ValueError("Invalid value for `kubelet_version`, must not be `None`") # noqa: E501
+
+ self._kubelet_version = kubelet_version
+
+ @property
+ def machine_id(self):
+ """Gets the machine_id of this V1NodeSystemInfo. # noqa: E501
+
+ MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
+
+ :return: The machine_id of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._machine_id
+
+ @machine_id.setter
+ def machine_id(self, machine_id):
+ """Sets the machine_id of this V1NodeSystemInfo.
+
+ MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
+
+ :param machine_id: The machine_id of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and machine_id is None: # noqa: E501
+ raise ValueError("Invalid value for `machine_id`, must not be `None`") # noqa: E501
+
+ self._machine_id = machine_id
+
+ @property
+ def operating_system(self):
+ """Gets the operating_system of this V1NodeSystemInfo. # noqa: E501
+
+ The Operating System reported by the node # noqa: E501
+
+ :return: The operating_system of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._operating_system
+
+ @operating_system.setter
+ def operating_system(self, operating_system):
+ """Sets the operating_system of this V1NodeSystemInfo.
+
+ The Operating System reported by the node # noqa: E501
+
+ :param operating_system: The operating_system of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and operating_system is None: # noqa: E501
+ raise ValueError("Invalid value for `operating_system`, must not be `None`") # noqa: E501
+
+ self._operating_system = operating_system
+
+ @property
+ def os_image(self):
+ """Gets the os_image of this V1NodeSystemInfo. # noqa: E501
+
+ OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
+
+ :return: The os_image of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._os_image
+
+ @os_image.setter
+ def os_image(self, os_image):
+ """Sets the os_image of this V1NodeSystemInfo.
+
+ OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
+
+ :param os_image: The os_image of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and os_image is None: # noqa: E501
+ raise ValueError("Invalid value for `os_image`, must not be `None`") # noqa: E501
+
+ self._os_image = os_image
+
+ @property
+ def system_uuid(self):
+ """Gets the system_uuid of this V1NodeSystemInfo. # noqa: E501
+
+ SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid # noqa: E501
+
+ :return: The system_uuid of this V1NodeSystemInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._system_uuid
+
+ @system_uuid.setter
+ def system_uuid(self, system_uuid):
+ """Sets the system_uuid of this V1NodeSystemInfo.
+
+ SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid # noqa: E501
+
+ :param system_uuid: The system_uuid of this V1NodeSystemInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and system_uuid is None: # noqa: E501
+ raise ValueError("Invalid value for `system_uuid`, must not be `None`") # noqa: E501
+
+ self._system_uuid = system_uuid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NodeSystemInfo):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NodeSystemInfo):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_attributes.py b/contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_attributes.py
new file mode 100644
index 0000000000..0b8ced17f5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_attributes.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NonResourceAttributes(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'path': 'str',
+ 'verb': 'str'
+ }
+
+ attribute_map = {
+ 'path': 'path',
+ 'verb': 'verb'
+ }
+
+ def __init__(self, path=None, verb=None, local_vars_configuration=None): # noqa: E501
+ """V1NonResourceAttributes - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._path = None
+ self._verb = None
+ self.discriminator = None
+
+ if path is not None:
+ self.path = path
+ if verb is not None:
+ self.verb = verb
+
+ @property
+ def path(self):
+ """Gets the path of this V1NonResourceAttributes. # noqa: E501
+
+ Path is the URL path of the request # noqa: E501
+
+ :return: The path of this V1NonResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1NonResourceAttributes.
+
+ Path is the URL path of the request # noqa: E501
+
+ :param path: The path of this V1NonResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._path = path
+
+ @property
+ def verb(self):
+ """Gets the verb of this V1NonResourceAttributes. # noqa: E501
+
+ Verb is the standard HTTP verb # noqa: E501
+
+ :return: The verb of this V1NonResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._verb
+
+ @verb.setter
+ def verb(self, verb):
+ """Sets the verb of this V1NonResourceAttributes.
+
+ Verb is the standard HTTP verb # noqa: E501
+
+ :param verb: The verb of this V1NonResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._verb = verb
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NonResourceAttributes):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NonResourceAttributes):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_rule.py
new file mode 100644
index 0000000000..a094eff573
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_non_resource_rule.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1NonResourceRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'non_resource_ur_ls': 'list[str]',
+ 'verbs': 'list[str]'
+ }
+
+ attribute_map = {
+ 'non_resource_ur_ls': 'nonResourceURLs',
+ 'verbs': 'verbs'
+ }
+
+ def __init__(self, non_resource_ur_ls=None, verbs=None, local_vars_configuration=None): # noqa: E501
+ """V1NonResourceRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._non_resource_ur_ls = None
+ self._verbs = None
+ self.discriminator = None
+
+ if non_resource_ur_ls is not None:
+ self.non_resource_ur_ls = non_resource_ur_ls
+ self.verbs = verbs
+
+ @property
+ def non_resource_ur_ls(self):
+ """Gets the non_resource_ur_ls of this V1NonResourceRule. # noqa: E501
+
+ NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all. # noqa: E501
+
+ :return: The non_resource_ur_ls of this V1NonResourceRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._non_resource_ur_ls
+
+ @non_resource_ur_ls.setter
+ def non_resource_ur_ls(self, non_resource_ur_ls):
+ """Sets the non_resource_ur_ls of this V1NonResourceRule.
+
+ NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all. # noqa: E501
+
+ :param non_resource_ur_ls: The non_resource_ur_ls of this V1NonResourceRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._non_resource_ur_ls = non_resource_ur_ls
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1NonResourceRule. # noqa: E501
+
+ Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all. # noqa: E501
+
+ :return: The verbs of this V1NonResourceRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1NonResourceRule.
+
+ Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all. # noqa: E501
+
+ :param verbs: The verbs of this V1NonResourceRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1NonResourceRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1NonResourceRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_object_field_selector.py b/contrib/python/kubernetes/kubernetes/client/models/v1_object_field_selector.py
new file mode 100644
index 0000000000..49c6bafa48
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_object_field_selector.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ObjectFieldSelector(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'field_path': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'field_path': 'fieldPath'
+ }
+
+ def __init__(self, api_version=None, field_path=None, local_vars_configuration=None): # noqa: E501
+ """V1ObjectFieldSelector - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._field_path = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.field_path = field_path
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ObjectFieldSelector. # noqa: E501
+
+ Version of the schema the FieldPath is written in terms of, defaults to \"v1\". # noqa: E501
+
+ :return: The api_version of this V1ObjectFieldSelector. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ObjectFieldSelector.
+
+ Version of the schema the FieldPath is written in terms of, defaults to \"v1\". # noqa: E501
+
+ :param api_version: The api_version of this V1ObjectFieldSelector. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def field_path(self):
+ """Gets the field_path of this V1ObjectFieldSelector. # noqa: E501
+
+ Path of the field to select in the specified API version. # noqa: E501
+
+ :return: The field_path of this V1ObjectFieldSelector. # noqa: E501
+ :rtype: str
+ """
+ return self._field_path
+
+ @field_path.setter
+ def field_path(self, field_path):
+ """Sets the field_path of this V1ObjectFieldSelector.
+
+ Path of the field to select in the specified API version. # noqa: E501
+
+ :param field_path: The field_path of this V1ObjectFieldSelector. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and field_path is None: # noqa: E501
+ raise ValueError("Invalid value for `field_path`, must not be `None`") # noqa: E501
+
+ self._field_path = field_path
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ObjectFieldSelector):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ObjectFieldSelector):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_object_meta.py b/contrib/python/kubernetes/kubernetes/client/models/v1_object_meta.py
new file mode 100644
index 0000000000..55379325e4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_object_meta.py
@@ -0,0 +1,514 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ObjectMeta(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'annotations': 'dict(str, str)',
+ 'creation_timestamp': 'datetime',
+ 'deletion_grace_period_seconds': 'int',
+ 'deletion_timestamp': 'datetime',
+ 'finalizers': 'list[str]',
+ 'generate_name': 'str',
+ 'generation': 'int',
+ 'labels': 'dict(str, str)',
+ 'managed_fields': 'list[V1ManagedFieldsEntry]',
+ 'name': 'str',
+ 'namespace': 'str',
+ 'owner_references': 'list[V1OwnerReference]',
+ 'resource_version': 'str',
+ 'self_link': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'annotations': 'annotations',
+ 'creation_timestamp': 'creationTimestamp',
+ 'deletion_grace_period_seconds': 'deletionGracePeriodSeconds',
+ 'deletion_timestamp': 'deletionTimestamp',
+ 'finalizers': 'finalizers',
+ 'generate_name': 'generateName',
+ 'generation': 'generation',
+ 'labels': 'labels',
+ 'managed_fields': 'managedFields',
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'owner_references': 'ownerReferences',
+ 'resource_version': 'resourceVersion',
+ 'self_link': 'selfLink',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, annotations=None, creation_timestamp=None, deletion_grace_period_seconds=None, deletion_timestamp=None, finalizers=None, generate_name=None, generation=None, labels=None, managed_fields=None, name=None, namespace=None, owner_references=None, resource_version=None, self_link=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1ObjectMeta - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._annotations = None
+ self._creation_timestamp = None
+ self._deletion_grace_period_seconds = None
+ self._deletion_timestamp = None
+ self._finalizers = None
+ self._generate_name = None
+ self._generation = None
+ self._labels = None
+ self._managed_fields = None
+ self._name = None
+ self._namespace = None
+ self._owner_references = None
+ self._resource_version = None
+ self._self_link = None
+ self._uid = None
+ self.discriminator = None
+
+ if annotations is not None:
+ self.annotations = annotations
+ if creation_timestamp is not None:
+ self.creation_timestamp = creation_timestamp
+ if deletion_grace_period_seconds is not None:
+ self.deletion_grace_period_seconds = deletion_grace_period_seconds
+ if deletion_timestamp is not None:
+ self.deletion_timestamp = deletion_timestamp
+ if finalizers is not None:
+ self.finalizers = finalizers
+ if generate_name is not None:
+ self.generate_name = generate_name
+ if generation is not None:
+ self.generation = generation
+ if labels is not None:
+ self.labels = labels
+ if managed_fields is not None:
+ self.managed_fields = managed_fields
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if owner_references is not None:
+ self.owner_references = owner_references
+ if resource_version is not None:
+ self.resource_version = resource_version
+ if self_link is not None:
+ self.self_link = self_link
+ if uid is not None:
+ self.uid = uid
+
+ @property
+ def annotations(self):
+ """Gets the annotations of this V1ObjectMeta. # noqa: E501
+
+ Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations # noqa: E501
+
+ :return: The annotations of this V1ObjectMeta. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._annotations
+
+ @annotations.setter
+ def annotations(self, annotations):
+ """Sets the annotations of this V1ObjectMeta.
+
+ Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations # noqa: E501
+
+ :param annotations: The annotations of this V1ObjectMeta. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._annotations = annotations
+
+ @property
+ def creation_timestamp(self):
+ """Gets the creation_timestamp of this V1ObjectMeta. # noqa: E501
+
+ CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
+
+ :return: The creation_timestamp of this V1ObjectMeta. # noqa: E501
+ :rtype: datetime
+ """
+ return self._creation_timestamp
+
+ @creation_timestamp.setter
+ def creation_timestamp(self, creation_timestamp):
+ """Sets the creation_timestamp of this V1ObjectMeta.
+
+ CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
+
+ :param creation_timestamp: The creation_timestamp of this V1ObjectMeta. # noqa: E501
+ :type: datetime
+ """
+
+ self._creation_timestamp = creation_timestamp
+
+ @property
+ def deletion_grace_period_seconds(self):
+ """Gets the deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
+
+ Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. # noqa: E501
+
+ :return: The deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
+ :rtype: int
+ """
+ return self._deletion_grace_period_seconds
+
+ @deletion_grace_period_seconds.setter
+ def deletion_grace_period_seconds(self, deletion_grace_period_seconds):
+ """Sets the deletion_grace_period_seconds of this V1ObjectMeta.
+
+ Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. # noqa: E501
+
+ :param deletion_grace_period_seconds: The deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
+ :type: int
+ """
+
+ self._deletion_grace_period_seconds = deletion_grace_period_seconds
+
+ @property
+ def deletion_timestamp(self):
+ """Gets the deletion_timestamp of this V1ObjectMeta. # noqa: E501
+
+ DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
+
+ :return: The deletion_timestamp of this V1ObjectMeta. # noqa: E501
+ :rtype: datetime
+ """
+ return self._deletion_timestamp
+
+ @deletion_timestamp.setter
+ def deletion_timestamp(self, deletion_timestamp):
+ """Sets the deletion_timestamp of this V1ObjectMeta.
+
+ DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
+
+ :param deletion_timestamp: The deletion_timestamp of this V1ObjectMeta. # noqa: E501
+ :type: datetime
+ """
+
+ self._deletion_timestamp = deletion_timestamp
+
+ @property
+ def finalizers(self):
+ """Gets the finalizers of this V1ObjectMeta. # noqa: E501
+
+ Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. # noqa: E501
+
+ :return: The finalizers of this V1ObjectMeta. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._finalizers
+
+ @finalizers.setter
+ def finalizers(self, finalizers):
+ """Sets the finalizers of this V1ObjectMeta.
+
+ Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. # noqa: E501
+
+ :param finalizers: The finalizers of this V1ObjectMeta. # noqa: E501
+ :type: list[str]
+ """
+
+ self._finalizers = finalizers
+
+ @property
+ def generate_name(self):
+ """Gets the generate_name of this V1ObjectMeta. # noqa: E501
+
+ GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency # noqa: E501
+
+ :return: The generate_name of this V1ObjectMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._generate_name
+
+ @generate_name.setter
+ def generate_name(self, generate_name):
+ """Sets the generate_name of this V1ObjectMeta.
+
+ GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency # noqa: E501
+
+ :param generate_name: The generate_name of this V1ObjectMeta. # noqa: E501
+ :type: str
+ """
+
+ self._generate_name = generate_name
+
+ @property
+ def generation(self):
+ """Gets the generation of this V1ObjectMeta. # noqa: E501
+
+ A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. # noqa: E501
+
+ :return: The generation of this V1ObjectMeta. # noqa: E501
+ :rtype: int
+ """
+ return self._generation
+
+ @generation.setter
+ def generation(self, generation):
+ """Sets the generation of this V1ObjectMeta.
+
+ A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. # noqa: E501
+
+ :param generation: The generation of this V1ObjectMeta. # noqa: E501
+ :type: int
+ """
+
+ self._generation = generation
+
+ @property
+ def labels(self):
+ """Gets the labels of this V1ObjectMeta. # noqa: E501
+
+ Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels # noqa: E501
+
+ :return: The labels of this V1ObjectMeta. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._labels
+
+ @labels.setter
+ def labels(self, labels):
+ """Sets the labels of this V1ObjectMeta.
+
+ Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels # noqa: E501
+
+ :param labels: The labels of this V1ObjectMeta. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._labels = labels
+
+ @property
+ def managed_fields(self):
+ """Gets the managed_fields of this V1ObjectMeta. # noqa: E501
+
+ ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. # noqa: E501
+
+ :return: The managed_fields of this V1ObjectMeta. # noqa: E501
+ :rtype: list[V1ManagedFieldsEntry]
+ """
+ return self._managed_fields
+
+ @managed_fields.setter
+ def managed_fields(self, managed_fields):
+ """Sets the managed_fields of this V1ObjectMeta.
+
+ ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. # noqa: E501
+
+ :param managed_fields: The managed_fields of this V1ObjectMeta. # noqa: E501
+ :type: list[V1ManagedFieldsEntry]
+ """
+
+ self._managed_fields = managed_fields
+
+ @property
+ def name(self):
+ """Gets the name of this V1ObjectMeta. # noqa: E501
+
+ Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
+
+ :return: The name of this V1ObjectMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ObjectMeta.
+
+ Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
+
+ :param name: The name of this V1ObjectMeta. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1ObjectMeta. # noqa: E501
+
+ Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces # noqa: E501
+
+ :return: The namespace of this V1ObjectMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1ObjectMeta.
+
+ Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces # noqa: E501
+
+ :param namespace: The namespace of this V1ObjectMeta. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def owner_references(self):
+ """Gets the owner_references of this V1ObjectMeta. # noqa: E501
+
+ List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. # noqa: E501
+
+ :return: The owner_references of this V1ObjectMeta. # noqa: E501
+ :rtype: list[V1OwnerReference]
+ """
+ return self._owner_references
+
+ @owner_references.setter
+ def owner_references(self, owner_references):
+ """Sets the owner_references of this V1ObjectMeta.
+
+ List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. # noqa: E501
+
+ :param owner_references: The owner_references of this V1ObjectMeta. # noqa: E501
+ :type: list[V1OwnerReference]
+ """
+
+ self._owner_references = owner_references
+
+ @property
+ def resource_version(self):
+ """Gets the resource_version of this V1ObjectMeta. # noqa: E501
+
+ An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
+
+ :return: The resource_version of this V1ObjectMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_version
+
+ @resource_version.setter
+ def resource_version(self, resource_version):
+ """Sets the resource_version of this V1ObjectMeta.
+
+ An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
+
+ :param resource_version: The resource_version of this V1ObjectMeta. # noqa: E501
+ :type: str
+ """
+
+ self._resource_version = resource_version
+
+ @property
+ def self_link(self):
+ """Gets the self_link of this V1ObjectMeta. # noqa: E501
+
+ Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. # noqa: E501
+
+ :return: The self_link of this V1ObjectMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._self_link
+
+ @self_link.setter
+ def self_link(self, self_link):
+ """Sets the self_link of this V1ObjectMeta.
+
+ Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. # noqa: E501
+
+ :param self_link: The self_link of this V1ObjectMeta. # noqa: E501
+ :type: str
+ """
+
+ self._self_link = self_link
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1ObjectMeta. # noqa: E501
+
+ UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
+
+ :return: The uid of this V1ObjectMeta. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1ObjectMeta.
+
+ UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
+
+ :param uid: The uid of this V1ObjectMeta. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ObjectMeta):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ObjectMeta):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_object_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_object_reference.py
new file mode 100644
index 0000000000..55b9ddd76a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_object_reference.py
@@ -0,0 +1,290 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ObjectReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'field_path': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'namespace': 'str',
+ 'resource_version': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'field_path': 'fieldPath',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'resource_version': 'resourceVersion',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, api_version=None, field_path=None, kind=None, name=None, namespace=None, resource_version=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1ObjectReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._field_path = None
+ self._kind = None
+ self._name = None
+ self._namespace = None
+ self._resource_version = None
+ self._uid = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if field_path is not None:
+ self.field_path = field_path
+ if kind is not None:
+ self.kind = kind
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if resource_version is not None:
+ self.resource_version = resource_version
+ if uid is not None:
+ self.uid = uid
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ObjectReference. # noqa: E501
+
+ API version of the referent. # noqa: E501
+
+ :return: The api_version of this V1ObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ObjectReference.
+
+ API version of the referent. # noqa: E501
+
+ :param api_version: The api_version of this V1ObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def field_path(self):
+ """Gets the field_path of this V1ObjectReference. # noqa: E501
+
+ If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. # noqa: E501
+
+ :return: The field_path of this V1ObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._field_path
+
+ @field_path.setter
+ def field_path(self, field_path):
+ """Sets the field_path of this V1ObjectReference.
+
+ If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. # noqa: E501
+
+ :param field_path: The field_path of this V1ObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._field_path = field_path
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ObjectReference. # noqa: E501
+
+ Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ObjectReference.
+
+ Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1ObjectReference. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1ObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ObjectReference.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1ObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1ObjectReference. # noqa: E501
+
+ Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ # noqa: E501
+
+ :return: The namespace of this V1ObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1ObjectReference.
+
+ Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ # noqa: E501
+
+ :param namespace: The namespace of this V1ObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def resource_version(self):
+ """Gets the resource_version of this V1ObjectReference. # noqa: E501
+
+ Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
+
+ :return: The resource_version of this V1ObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_version
+
+ @resource_version.setter
+ def resource_version(self, resource_version):
+ """Sets the resource_version of this V1ObjectReference.
+
+ Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
+
+ :param resource_version: The resource_version of this V1ObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._resource_version = resource_version
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1ObjectReference. # noqa: E501
+
+ UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids # noqa: E501
+
+ :return: The uid of this V1ObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1ObjectReference.
+
+ UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids # noqa: E501
+
+ :param uid: The uid of this V1ObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ObjectReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ObjectReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_overhead.py b/contrib/python/kubernetes/kubernetes/client/models/v1_overhead.py
new file mode 100644
index 0000000000..cf26bec655
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_overhead.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Overhead(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'pod_fixed': 'dict(str, str)'
+ }
+
+ attribute_map = {
+ 'pod_fixed': 'podFixed'
+ }
+
+ def __init__(self, pod_fixed=None, local_vars_configuration=None): # noqa: E501
+ """V1Overhead - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._pod_fixed = None
+ self.discriminator = None
+
+ if pod_fixed is not None:
+ self.pod_fixed = pod_fixed
+
+ @property
+ def pod_fixed(self):
+ """Gets the pod_fixed of this V1Overhead. # noqa: E501
+
+ podFixed represents the fixed resource overhead associated with running a pod. # noqa: E501
+
+ :return: The pod_fixed of this V1Overhead. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._pod_fixed
+
+ @pod_fixed.setter
+ def pod_fixed(self, pod_fixed):
+ """Sets the pod_fixed of this V1Overhead.
+
+ podFixed represents the fixed resource overhead associated with running a pod. # noqa: E501
+
+ :param pod_fixed: The pod_fixed of this V1Overhead. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._pod_fixed = pod_fixed
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Overhead):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Overhead):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_owner_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_owner_reference.py
new file mode 100644
index 0000000000..e9173d6c05
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_owner_reference.py
@@ -0,0 +1,266 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1OwnerReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'block_owner_deletion': 'bool',
+ 'controller': 'bool',
+ 'kind': 'str',
+ 'name': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'block_owner_deletion': 'blockOwnerDeletion',
+ 'controller': 'controller',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, api_version=None, block_owner_deletion=None, controller=None, kind=None, name=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1OwnerReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._block_owner_deletion = None
+ self._controller = None
+ self._kind = None
+ self._name = None
+ self._uid = None
+ self.discriminator = None
+
+ self.api_version = api_version
+ if block_owner_deletion is not None:
+ self.block_owner_deletion = block_owner_deletion
+ if controller is not None:
+ self.controller = controller
+ self.kind = kind
+ self.name = name
+ self.uid = uid
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1OwnerReference. # noqa: E501
+
+ API version of the referent. # noqa: E501
+
+ :return: The api_version of this V1OwnerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1OwnerReference.
+
+ API version of the referent. # noqa: E501
+
+ :param api_version: The api_version of this V1OwnerReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and api_version is None: # noqa: E501
+ raise ValueError("Invalid value for `api_version`, must not be `None`") # noqa: E501
+
+ self._api_version = api_version
+
+ @property
+ def block_owner_deletion(self):
+ """Gets the block_owner_deletion of this V1OwnerReference. # noqa: E501
+
+ If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. # noqa: E501
+
+ :return: The block_owner_deletion of this V1OwnerReference. # noqa: E501
+ :rtype: bool
+ """
+ return self._block_owner_deletion
+
+ @block_owner_deletion.setter
+ def block_owner_deletion(self, block_owner_deletion):
+ """Sets the block_owner_deletion of this V1OwnerReference.
+
+ If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. # noqa: E501
+
+ :param block_owner_deletion: The block_owner_deletion of this V1OwnerReference. # noqa: E501
+ :type: bool
+ """
+
+ self._block_owner_deletion = block_owner_deletion
+
+ @property
+ def controller(self):
+ """Gets the controller of this V1OwnerReference. # noqa: E501
+
+ If true, this reference points to the managing controller. # noqa: E501
+
+ :return: The controller of this V1OwnerReference. # noqa: E501
+ :rtype: bool
+ """
+ return self._controller
+
+ @controller.setter
+ def controller(self, controller):
+ """Sets the controller of this V1OwnerReference.
+
+ If true, this reference points to the managing controller. # noqa: E501
+
+ :param controller: The controller of this V1OwnerReference. # noqa: E501
+ :type: bool
+ """
+
+ self._controller = controller
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1OwnerReference. # noqa: E501
+
+ Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1OwnerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1OwnerReference.
+
+ Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1OwnerReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1OwnerReference. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
+
+ :return: The name of this V1OwnerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1OwnerReference.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
+
+ :param name: The name of this V1OwnerReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1OwnerReference. # noqa: E501
+
+ UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
+
+ :return: The uid of this V1OwnerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1OwnerReference.
+
+ UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
+
+ :param uid: The uid of this V1OwnerReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
+ raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1OwnerReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1OwnerReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume.py
new file mode 100644
index 0000000000..bdbef18c02
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolume(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1PersistentVolumeSpec',
+ 'status': 'V1PersistentVolumeStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolume - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PersistentVolume. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PersistentVolume. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PersistentVolume.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PersistentVolume. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PersistentVolume. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PersistentVolume. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PersistentVolume.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PersistentVolume. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PersistentVolume. # noqa: E501
+
+
+ :return: The metadata of this V1PersistentVolume. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PersistentVolume.
+
+
+ :param metadata: The metadata of this V1PersistentVolume. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1PersistentVolume. # noqa: E501
+
+
+ :return: The spec of this V1PersistentVolume. # noqa: E501
+ :rtype: V1PersistentVolumeSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1PersistentVolume.
+
+
+ :param spec: The spec of this V1PersistentVolume. # noqa: E501
+ :type: V1PersistentVolumeSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1PersistentVolume. # noqa: E501
+
+
+ :return: The status of this V1PersistentVolume. # noqa: E501
+ :rtype: V1PersistentVolumeStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1PersistentVolume.
+
+
+ :param status: The status of this V1PersistentVolume. # noqa: E501
+ :type: V1PersistentVolumeStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolume):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolume):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim.py
new file mode 100644
index 0000000000..b848aef7cb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeClaim(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1PersistentVolumeClaimSpec',
+ 'status': 'V1PersistentVolumeClaimStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeClaim - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PersistentVolumeClaim. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PersistentVolumeClaim. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PersistentVolumeClaim.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PersistentVolumeClaim. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PersistentVolumeClaim. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PersistentVolumeClaim. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PersistentVolumeClaim.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PersistentVolumeClaim. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PersistentVolumeClaim. # noqa: E501
+
+
+ :return: The metadata of this V1PersistentVolumeClaim. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PersistentVolumeClaim.
+
+
+ :param metadata: The metadata of this V1PersistentVolumeClaim. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1PersistentVolumeClaim. # noqa: E501
+
+
+ :return: The spec of this V1PersistentVolumeClaim. # noqa: E501
+ :rtype: V1PersistentVolumeClaimSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1PersistentVolumeClaim.
+
+
+ :param spec: The spec of this V1PersistentVolumeClaim. # noqa: E501
+ :type: V1PersistentVolumeClaimSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1PersistentVolumeClaim. # noqa: E501
+
+
+ :return: The status of this V1PersistentVolumeClaim. # noqa: E501
+ :rtype: V1PersistentVolumeClaimStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1PersistentVolumeClaim.
+
+
+ :param status: The status of this V1PersistentVolumeClaim. # noqa: E501
+ :type: V1PersistentVolumeClaimStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeClaim):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeClaim):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_condition.py
new file mode 100644
index 0000000000..3c5923d0f5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_condition.py
@@ -0,0 +1,260 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeClaimCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_probe_time': 'datetime',
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_probe_time': 'lastProbeTime',
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_probe_time=None, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeClaimCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_probe_time = None
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_probe_time is not None:
+ self.last_probe_time = last_probe_time
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_probe_time(self):
+ """Gets the last_probe_time of this V1PersistentVolumeClaimCondition. # noqa: E501
+
+ lastProbeTime is the time we probed the condition. # noqa: E501
+
+ :return: The last_probe_time of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_probe_time
+
+ @last_probe_time.setter
+ def last_probe_time(self, last_probe_time):
+ """Sets the last_probe_time of this V1PersistentVolumeClaimCondition.
+
+ lastProbeTime is the time we probed the condition. # noqa: E501
+
+ :param last_probe_time: The last_probe_time of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_probe_time = last_probe_time
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1PersistentVolumeClaimCondition. # noqa: E501
+
+ lastTransitionTime is the time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1PersistentVolumeClaimCondition.
+
+ lastTransitionTime is the time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1PersistentVolumeClaimCondition. # noqa: E501
+
+ message is the human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1PersistentVolumeClaimCondition.
+
+ message is the human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1PersistentVolumeClaimCondition. # noqa: E501
+
+ reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. # noqa: E501
+
+ :return: The reason of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1PersistentVolumeClaimCondition.
+
+ reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. # noqa: E501
+
+ :param reason: The reason of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1PersistentVolumeClaimCondition. # noqa: E501
+
+
+ :return: The status of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1PersistentVolumeClaimCondition.
+
+
+ :param status: The status of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1PersistentVolumeClaimCondition. # noqa: E501
+
+
+ :return: The type of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1PersistentVolumeClaimCondition.
+
+
+ :param type: The type of this V1PersistentVolumeClaimCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeClaimCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeClaimCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_list.py
new file mode 100644
index 0000000000..246f2d37ce
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeClaimList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1PersistentVolumeClaim]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeClaimList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PersistentVolumeClaimList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PersistentVolumeClaimList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PersistentVolumeClaimList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PersistentVolumeClaimList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1PersistentVolumeClaimList. # noqa: E501
+
+ items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa: E501
+
+ :return: The items of this V1PersistentVolumeClaimList. # noqa: E501
+ :rtype: list[V1PersistentVolumeClaim]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1PersistentVolumeClaimList.
+
+ items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa: E501
+
+ :param items: The items of this V1PersistentVolumeClaimList. # noqa: E501
+ :type: list[V1PersistentVolumeClaim]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PersistentVolumeClaimList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PersistentVolumeClaimList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PersistentVolumeClaimList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PersistentVolumeClaimList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PersistentVolumeClaimList. # noqa: E501
+
+
+ :return: The metadata of this V1PersistentVolumeClaimList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PersistentVolumeClaimList.
+
+
+ :param metadata: The metadata of this V1PersistentVolumeClaimList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeClaimList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeClaimList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_spec.py
new file mode 100644
index 0000000000..3bf0e0b5cf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_spec.py
@@ -0,0 +1,310 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeClaimSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'access_modes': 'list[str]',
+ 'data_source': 'V1TypedLocalObjectReference',
+ 'data_source_ref': 'V1TypedObjectReference',
+ 'resources': 'V1ResourceRequirements',
+ 'selector': 'V1LabelSelector',
+ 'storage_class_name': 'str',
+ 'volume_mode': 'str',
+ 'volume_name': 'str'
+ }
+
+ attribute_map = {
+ 'access_modes': 'accessModes',
+ 'data_source': 'dataSource',
+ 'data_source_ref': 'dataSourceRef',
+ 'resources': 'resources',
+ 'selector': 'selector',
+ 'storage_class_name': 'storageClassName',
+ 'volume_mode': 'volumeMode',
+ 'volume_name': 'volumeName'
+ }
+
+ def __init__(self, access_modes=None, data_source=None, data_source_ref=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeClaimSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._access_modes = None
+ self._data_source = None
+ self._data_source_ref = None
+ self._resources = None
+ self._selector = None
+ self._storage_class_name = None
+ self._volume_mode = None
+ self._volume_name = None
+ self.discriminator = None
+
+ if access_modes is not None:
+ self.access_modes = access_modes
+ if data_source is not None:
+ self.data_source = data_source
+ if data_source_ref is not None:
+ self.data_source_ref = data_source_ref
+ if resources is not None:
+ self.resources = resources
+ if selector is not None:
+ self.selector = selector
+ if storage_class_name is not None:
+ self.storage_class_name = storage_class_name
+ if volume_mode is not None:
+ self.volume_mode = volume_mode
+ if volume_name is not None:
+ self.volume_name = volume_name
+
+ @property
+ def access_modes(self):
+ """Gets the access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+ accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
+
+ :return: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._access_modes
+
+ @access_modes.setter
+ def access_modes(self, access_modes):
+ """Sets the access_modes of this V1PersistentVolumeClaimSpec.
+
+ accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
+
+ :param access_modes: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._access_modes = access_modes
+
+ @property
+ def data_source(self):
+ """Gets the data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+
+ :return: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: V1TypedLocalObjectReference
+ """
+ return self._data_source
+
+ @data_source.setter
+ def data_source(self, data_source):
+ """Sets the data_source of this V1PersistentVolumeClaimSpec.
+
+
+ :param data_source: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: V1TypedLocalObjectReference
+ """
+
+ self._data_source = data_source
+
+ @property
+ def data_source_ref(self):
+ """Gets the data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+
+ :return: The data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: V1TypedObjectReference
+ """
+ return self._data_source_ref
+
+ @data_source_ref.setter
+ def data_source_ref(self, data_source_ref):
+ """Sets the data_source_ref of this V1PersistentVolumeClaimSpec.
+
+
+ :param data_source_ref: The data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: V1TypedObjectReference
+ """
+
+ self._data_source_ref = data_source_ref
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+
+ :return: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: V1ResourceRequirements
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1PersistentVolumeClaimSpec.
+
+
+ :param resources: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: V1ResourceRequirements
+ """
+
+ self._resources = resources
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+
+ :return: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1PersistentVolumeClaimSpec.
+
+
+ :param selector: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._selector = selector
+
+ @property
+ def storage_class_name(self):
+ """Gets the storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+ storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
+
+ :return: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_class_name
+
+ @storage_class_name.setter
+ def storage_class_name(self, storage_class_name):
+ """Sets the storage_class_name of this V1PersistentVolumeClaimSpec.
+
+ storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
+
+ :param storage_class_name: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: str
+ """
+
+ self._storage_class_name = storage_class_name
+
+ @property
+ def volume_mode(self):
+ """Gets the volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+ volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
+
+ :return: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_mode
+
+ @volume_mode.setter
+ def volume_mode(self, volume_mode):
+ """Sets the volume_mode of this V1PersistentVolumeClaimSpec.
+
+ volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
+
+ :param volume_mode: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: str
+ """
+
+ self._volume_mode = volume_mode
+
+ @property
+ def volume_name(self):
+ """Gets the volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
+
+ volumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
+
+ :return: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_name
+
+ @volume_name.setter
+ def volume_name(self, volume_name):
+ """Sets the volume_name of this V1PersistentVolumeClaimSpec.
+
+ volumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
+
+ :param volume_name: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
+ :type: str
+ """
+
+ self._volume_name = volume_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeClaimSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeClaimSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_status.py
new file mode 100644
index 0000000000..af4b0d3741
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_status.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeClaimStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'access_modes': 'list[str]',
+ 'allocated_resource_statuses': 'dict(str, str)',
+ 'allocated_resources': 'dict(str, str)',
+ 'capacity': 'dict(str, str)',
+ 'conditions': 'list[V1PersistentVolumeClaimCondition]',
+ 'phase': 'str'
+ }
+
+ attribute_map = {
+ 'access_modes': 'accessModes',
+ 'allocated_resource_statuses': 'allocatedResourceStatuses',
+ 'allocated_resources': 'allocatedResources',
+ 'capacity': 'capacity',
+ 'conditions': 'conditions',
+ 'phase': 'phase'
+ }
+
+ def __init__(self, access_modes=None, allocated_resource_statuses=None, allocated_resources=None, capacity=None, conditions=None, phase=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeClaimStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._access_modes = None
+ self._allocated_resource_statuses = None
+ self._allocated_resources = None
+ self._capacity = None
+ self._conditions = None
+ self._phase = None
+ self.discriminator = None
+
+ if access_modes is not None:
+ self.access_modes = access_modes
+ if allocated_resource_statuses is not None:
+ self.allocated_resource_statuses = allocated_resource_statuses
+ if allocated_resources is not None:
+ self.allocated_resources = allocated_resources
+ if capacity is not None:
+ self.capacity = capacity
+ if conditions is not None:
+ self.conditions = conditions
+ if phase is not None:
+ self.phase = phase
+
+ @property
+ def access_modes(self):
+ """Gets the access_modes of this V1PersistentVolumeClaimStatus. # noqa: E501
+
+ accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
+
+ :return: The access_modes of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._access_modes
+
+ @access_modes.setter
+ def access_modes(self, access_modes):
+ """Sets the access_modes of this V1PersistentVolumeClaimStatus.
+
+ accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
+
+ :param access_modes: The access_modes of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :type: list[str]
+ """
+
+ self._access_modes = access_modes
+
+ @property
+ def allocated_resource_statuses(self):
+ """Gets the allocated_resource_statuses of this V1PersistentVolumeClaimStatus. # noqa: E501
+
+ allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. ClaimResourceStatus can be in any of following states: - ControllerResizeInProgress: State set when resize controller starts resizing the volume in control-plane. - ControllerResizeFailed: State set when resize has failed in resize controller with a terminal error. - NodeResizePending: State set when resize controller has finished resizing the volume but further resizing of volume is needed on the node. - NodeResizeInProgress: State set when kubelet starts resizing the volume. - NodeResizeFailed: State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed. For example: if expanding a PVC for more capacity - this field can be one of the following states: - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\" When this field is not set, it means that no resize operation is in progress for the given PVC. A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. # noqa: E501
+
+ :return: The allocated_resource_statuses of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._allocated_resource_statuses
+
+ @allocated_resource_statuses.setter
+ def allocated_resource_statuses(self, allocated_resource_statuses):
+ """Sets the allocated_resource_statuses of this V1PersistentVolumeClaimStatus.
+
+ allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. ClaimResourceStatus can be in any of following states: - ControllerResizeInProgress: State set when resize controller starts resizing the volume in control-plane. - ControllerResizeFailed: State set when resize has failed in resize controller with a terminal error. - NodeResizePending: State set when resize controller has finished resizing the volume but further resizing of volume is needed on the node. - NodeResizeInProgress: State set when kubelet starts resizing the volume. - NodeResizeFailed: State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed. For example: if expanding a PVC for more capacity - this field can be one of the following states: - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\" When this field is not set, it means that no resize operation is in progress for the given PVC. A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. # noqa: E501
+
+ :param allocated_resource_statuses: The allocated_resource_statuses of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._allocated_resource_statuses = allocated_resource_statuses
+
+ @property
+ def allocated_resources(self):
+ """Gets the allocated_resources of this V1PersistentVolumeClaimStatus. # noqa: E501
+
+ allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. Capacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. A controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. # noqa: E501
+
+ :return: The allocated_resources of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._allocated_resources
+
+ @allocated_resources.setter
+ def allocated_resources(self, allocated_resources):
+ """Sets the allocated_resources of this V1PersistentVolumeClaimStatus.
+
+ allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. Capacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. A controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. # noqa: E501
+
+ :param allocated_resources: The allocated_resources of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._allocated_resources = allocated_resources
+
+ @property
+ def capacity(self):
+ """Gets the capacity of this V1PersistentVolumeClaimStatus. # noqa: E501
+
+ capacity represents the actual resources of the underlying volume. # noqa: E501
+
+ :return: The capacity of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._capacity
+
+ @capacity.setter
+ def capacity(self, capacity):
+ """Sets the capacity of this V1PersistentVolumeClaimStatus.
+
+ capacity represents the actual resources of the underlying volume. # noqa: E501
+
+ :param capacity: The capacity of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._capacity = capacity
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1PersistentVolumeClaimStatus. # noqa: E501
+
+ conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. # noqa: E501
+
+ :return: The conditions of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :rtype: list[V1PersistentVolumeClaimCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1PersistentVolumeClaimStatus.
+
+ conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. # noqa: E501
+
+ :param conditions: The conditions of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :type: list[V1PersistentVolumeClaimCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def phase(self):
+ """Gets the phase of this V1PersistentVolumeClaimStatus. # noqa: E501
+
+ phase represents the current phase of PersistentVolumeClaim. # noqa: E501
+
+ :return: The phase of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._phase
+
+ @phase.setter
+ def phase(self, phase):
+ """Sets the phase of this V1PersistentVolumeClaimStatus.
+
+ phase represents the current phase of PersistentVolumeClaim. # noqa: E501
+
+ :param phase: The phase of this V1PersistentVolumeClaimStatus. # noqa: E501
+ :type: str
+ """
+
+ self._phase = phase
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeClaimStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeClaimStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_template.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_template.py
new file mode 100644
index 0000000000..75619ce8ee
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_template.py
@@ -0,0 +1,147 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeClaimTemplate(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1PersistentVolumeClaimSpec'
+ }
+
+ attribute_map = {
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeClaimTemplate - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PersistentVolumeClaimTemplate. # noqa: E501
+
+
+ :return: The metadata of this V1PersistentVolumeClaimTemplate. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PersistentVolumeClaimTemplate.
+
+
+ :param metadata: The metadata of this V1PersistentVolumeClaimTemplate. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1PersistentVolumeClaimTemplate. # noqa: E501
+
+
+ :return: The spec of this V1PersistentVolumeClaimTemplate. # noqa: E501
+ :rtype: V1PersistentVolumeClaimSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1PersistentVolumeClaimTemplate.
+
+
+ :param spec: The spec of this V1PersistentVolumeClaimTemplate. # noqa: E501
+ :type: V1PersistentVolumeClaimSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeClaimTemplate):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeClaimTemplate):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_volume_source.py
new file mode 100644
index 0000000000..6dbc304ebf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_claim_volume_source.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeClaimVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'claim_name': 'str',
+ 'read_only': 'bool'
+ }
+
+ attribute_map = {
+ 'claim_name': 'claimName',
+ 'read_only': 'readOnly'
+ }
+
+ def __init__(self, claim_name=None, read_only=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeClaimVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._claim_name = None
+ self._read_only = None
+ self.discriminator = None
+
+ self.claim_name = claim_name
+ if read_only is not None:
+ self.read_only = read_only
+
+ @property
+ def claim_name(self):
+ """Gets the claim_name of this V1PersistentVolumeClaimVolumeSource. # noqa: E501
+
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa: E501
+
+ :return: The claim_name of this V1PersistentVolumeClaimVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._claim_name
+
+ @claim_name.setter
+ def claim_name(self, claim_name):
+ """Sets the claim_name of this V1PersistentVolumeClaimVolumeSource.
+
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa: E501
+
+ :param claim_name: The claim_name of this V1PersistentVolumeClaimVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and claim_name is None: # noqa: E501
+ raise ValueError("Invalid value for `claim_name`, must not be `None`") # noqa: E501
+
+ self._claim_name = claim_name
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1PersistentVolumeClaimVolumeSource. # noqa: E501
+
+ readOnly Will force the ReadOnly setting in VolumeMounts. Default false. # noqa: E501
+
+ :return: The read_only of this V1PersistentVolumeClaimVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1PersistentVolumeClaimVolumeSource.
+
+ readOnly Will force the ReadOnly setting in VolumeMounts. Default false. # noqa: E501
+
+ :param read_only: The read_only of this V1PersistentVolumeClaimVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeClaimVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeClaimVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_list.py
new file mode 100644
index 0000000000..85712bdf83
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1PersistentVolume]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PersistentVolumeList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PersistentVolumeList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PersistentVolumeList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PersistentVolumeList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1PersistentVolumeList. # noqa: E501
+
+ items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes # noqa: E501
+
+ :return: The items of this V1PersistentVolumeList. # noqa: E501
+ :rtype: list[V1PersistentVolume]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1PersistentVolumeList.
+
+ items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes # noqa: E501
+
+ :param items: The items of this V1PersistentVolumeList. # noqa: E501
+ :type: list[V1PersistentVolume]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PersistentVolumeList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PersistentVolumeList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PersistentVolumeList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PersistentVolumeList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PersistentVolumeList. # noqa: E501
+
+
+ :return: The metadata of this V1PersistentVolumeList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PersistentVolumeList.
+
+
+ :param metadata: The metadata of this V1PersistentVolumeList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_spec.py
new file mode 100644
index 0000000000..2c7930426e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_spec.py
@@ -0,0 +1,886 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'access_modes': 'list[str]',
+ 'aws_elastic_block_store': 'V1AWSElasticBlockStoreVolumeSource',
+ 'azure_disk': 'V1AzureDiskVolumeSource',
+ 'azure_file': 'V1AzureFilePersistentVolumeSource',
+ 'capacity': 'dict(str, str)',
+ 'cephfs': 'V1CephFSPersistentVolumeSource',
+ 'cinder': 'V1CinderPersistentVolumeSource',
+ 'claim_ref': 'V1ObjectReference',
+ 'csi': 'V1CSIPersistentVolumeSource',
+ 'fc': 'V1FCVolumeSource',
+ 'flex_volume': 'V1FlexPersistentVolumeSource',
+ 'flocker': 'V1FlockerVolumeSource',
+ 'gce_persistent_disk': 'V1GCEPersistentDiskVolumeSource',
+ 'glusterfs': 'V1GlusterfsPersistentVolumeSource',
+ 'host_path': 'V1HostPathVolumeSource',
+ 'iscsi': 'V1ISCSIPersistentVolumeSource',
+ 'local': 'V1LocalVolumeSource',
+ 'mount_options': 'list[str]',
+ 'nfs': 'V1NFSVolumeSource',
+ 'node_affinity': 'V1VolumeNodeAffinity',
+ 'persistent_volume_reclaim_policy': 'str',
+ 'photon_persistent_disk': 'V1PhotonPersistentDiskVolumeSource',
+ 'portworx_volume': 'V1PortworxVolumeSource',
+ 'quobyte': 'V1QuobyteVolumeSource',
+ 'rbd': 'V1RBDPersistentVolumeSource',
+ 'scale_io': 'V1ScaleIOPersistentVolumeSource',
+ 'storage_class_name': 'str',
+ 'storageos': 'V1StorageOSPersistentVolumeSource',
+ 'volume_mode': 'str',
+ 'vsphere_volume': 'V1VsphereVirtualDiskVolumeSource'
+ }
+
+ attribute_map = {
+ 'access_modes': 'accessModes',
+ 'aws_elastic_block_store': 'awsElasticBlockStore',
+ 'azure_disk': 'azureDisk',
+ 'azure_file': 'azureFile',
+ 'capacity': 'capacity',
+ 'cephfs': 'cephfs',
+ 'cinder': 'cinder',
+ 'claim_ref': 'claimRef',
+ 'csi': 'csi',
+ 'fc': 'fc',
+ 'flex_volume': 'flexVolume',
+ 'flocker': 'flocker',
+ 'gce_persistent_disk': 'gcePersistentDisk',
+ 'glusterfs': 'glusterfs',
+ 'host_path': 'hostPath',
+ 'iscsi': 'iscsi',
+ 'local': 'local',
+ 'mount_options': 'mountOptions',
+ 'nfs': 'nfs',
+ 'node_affinity': 'nodeAffinity',
+ 'persistent_volume_reclaim_policy': 'persistentVolumeReclaimPolicy',
+ 'photon_persistent_disk': 'photonPersistentDisk',
+ 'portworx_volume': 'portworxVolume',
+ 'quobyte': 'quobyte',
+ 'rbd': 'rbd',
+ 'scale_io': 'scaleIO',
+ 'storage_class_name': 'storageClassName',
+ 'storageos': 'storageos',
+ 'volume_mode': 'volumeMode',
+ 'vsphere_volume': 'vsphereVolume'
+ }
+
+ def __init__(self, access_modes=None, aws_elastic_block_store=None, azure_disk=None, azure_file=None, capacity=None, cephfs=None, cinder=None, claim_ref=None, csi=None, fc=None, flex_volume=None, flocker=None, gce_persistent_disk=None, glusterfs=None, host_path=None, iscsi=None, local=None, mount_options=None, nfs=None, node_affinity=None, persistent_volume_reclaim_policy=None, photon_persistent_disk=None, portworx_volume=None, quobyte=None, rbd=None, scale_io=None, storage_class_name=None, storageos=None, volume_mode=None, vsphere_volume=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._access_modes = None
+ self._aws_elastic_block_store = None
+ self._azure_disk = None
+ self._azure_file = None
+ self._capacity = None
+ self._cephfs = None
+ self._cinder = None
+ self._claim_ref = None
+ self._csi = None
+ self._fc = None
+ self._flex_volume = None
+ self._flocker = None
+ self._gce_persistent_disk = None
+ self._glusterfs = None
+ self._host_path = None
+ self._iscsi = None
+ self._local = None
+ self._mount_options = None
+ self._nfs = None
+ self._node_affinity = None
+ self._persistent_volume_reclaim_policy = None
+ self._photon_persistent_disk = None
+ self._portworx_volume = None
+ self._quobyte = None
+ self._rbd = None
+ self._scale_io = None
+ self._storage_class_name = None
+ self._storageos = None
+ self._volume_mode = None
+ self._vsphere_volume = None
+ self.discriminator = None
+
+ if access_modes is not None:
+ self.access_modes = access_modes
+ if aws_elastic_block_store is not None:
+ self.aws_elastic_block_store = aws_elastic_block_store
+ if azure_disk is not None:
+ self.azure_disk = azure_disk
+ if azure_file is not None:
+ self.azure_file = azure_file
+ if capacity is not None:
+ self.capacity = capacity
+ if cephfs is not None:
+ self.cephfs = cephfs
+ if cinder is not None:
+ self.cinder = cinder
+ if claim_ref is not None:
+ self.claim_ref = claim_ref
+ if csi is not None:
+ self.csi = csi
+ if fc is not None:
+ self.fc = fc
+ if flex_volume is not None:
+ self.flex_volume = flex_volume
+ if flocker is not None:
+ self.flocker = flocker
+ if gce_persistent_disk is not None:
+ self.gce_persistent_disk = gce_persistent_disk
+ if glusterfs is not None:
+ self.glusterfs = glusterfs
+ if host_path is not None:
+ self.host_path = host_path
+ if iscsi is not None:
+ self.iscsi = iscsi
+ if local is not None:
+ self.local = local
+ if mount_options is not None:
+ self.mount_options = mount_options
+ if nfs is not None:
+ self.nfs = nfs
+ if node_affinity is not None:
+ self.node_affinity = node_affinity
+ if persistent_volume_reclaim_policy is not None:
+ self.persistent_volume_reclaim_policy = persistent_volume_reclaim_policy
+ if photon_persistent_disk is not None:
+ self.photon_persistent_disk = photon_persistent_disk
+ if portworx_volume is not None:
+ self.portworx_volume = portworx_volume
+ if quobyte is not None:
+ self.quobyte = quobyte
+ if rbd is not None:
+ self.rbd = rbd
+ if scale_io is not None:
+ self.scale_io = scale_io
+ if storage_class_name is not None:
+ self.storage_class_name = storage_class_name
+ if storageos is not None:
+ self.storageos = storageos
+ if volume_mode is not None:
+ self.volume_mode = volume_mode
+ if vsphere_volume is not None:
+ self.vsphere_volume = vsphere_volume
+
+ @property
+ def access_modes(self):
+ """Gets the access_modes of this V1PersistentVolumeSpec. # noqa: E501
+
+ accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes # noqa: E501
+
+ :return: The access_modes of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._access_modes
+
+ @access_modes.setter
+ def access_modes(self, access_modes):
+ """Sets the access_modes of this V1PersistentVolumeSpec.
+
+ accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes # noqa: E501
+
+ :param access_modes: The access_modes of this V1PersistentVolumeSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._access_modes = access_modes
+
+ @property
+ def aws_elastic_block_store(self):
+ """Gets the aws_elastic_block_store of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The aws_elastic_block_store of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1AWSElasticBlockStoreVolumeSource
+ """
+ return self._aws_elastic_block_store
+
+ @aws_elastic_block_store.setter
+ def aws_elastic_block_store(self, aws_elastic_block_store):
+ """Sets the aws_elastic_block_store of this V1PersistentVolumeSpec.
+
+
+ :param aws_elastic_block_store: The aws_elastic_block_store of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1AWSElasticBlockStoreVolumeSource
+ """
+
+ self._aws_elastic_block_store = aws_elastic_block_store
+
+ @property
+ def azure_disk(self):
+ """Gets the azure_disk of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The azure_disk of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1AzureDiskVolumeSource
+ """
+ return self._azure_disk
+
+ @azure_disk.setter
+ def azure_disk(self, azure_disk):
+ """Sets the azure_disk of this V1PersistentVolumeSpec.
+
+
+ :param azure_disk: The azure_disk of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1AzureDiskVolumeSource
+ """
+
+ self._azure_disk = azure_disk
+
+ @property
+ def azure_file(self):
+ """Gets the azure_file of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The azure_file of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1AzureFilePersistentVolumeSource
+ """
+ return self._azure_file
+
+ @azure_file.setter
+ def azure_file(self, azure_file):
+ """Sets the azure_file of this V1PersistentVolumeSpec.
+
+
+ :param azure_file: The azure_file of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1AzureFilePersistentVolumeSource
+ """
+
+ self._azure_file = azure_file
+
+ @property
+ def capacity(self):
+ """Gets the capacity of this V1PersistentVolumeSpec. # noqa: E501
+
+ capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
+
+ :return: The capacity of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._capacity
+
+ @capacity.setter
+ def capacity(self, capacity):
+ """Sets the capacity of this V1PersistentVolumeSpec.
+
+ capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
+
+ :param capacity: The capacity of this V1PersistentVolumeSpec. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._capacity = capacity
+
+ @property
+ def cephfs(self):
+ """Gets the cephfs of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The cephfs of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1CephFSPersistentVolumeSource
+ """
+ return self._cephfs
+
+ @cephfs.setter
+ def cephfs(self, cephfs):
+ """Sets the cephfs of this V1PersistentVolumeSpec.
+
+
+ :param cephfs: The cephfs of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1CephFSPersistentVolumeSource
+ """
+
+ self._cephfs = cephfs
+
+ @property
+ def cinder(self):
+ """Gets the cinder of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The cinder of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1CinderPersistentVolumeSource
+ """
+ return self._cinder
+
+ @cinder.setter
+ def cinder(self, cinder):
+ """Sets the cinder of this V1PersistentVolumeSpec.
+
+
+ :param cinder: The cinder of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1CinderPersistentVolumeSource
+ """
+
+ self._cinder = cinder
+
+ @property
+ def claim_ref(self):
+ """Gets the claim_ref of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The claim_ref of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._claim_ref
+
+ @claim_ref.setter
+ def claim_ref(self, claim_ref):
+ """Sets the claim_ref of this V1PersistentVolumeSpec.
+
+
+ :param claim_ref: The claim_ref of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1ObjectReference
+ """
+
+ self._claim_ref = claim_ref
+
+ @property
+ def csi(self):
+ """Gets the csi of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The csi of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1CSIPersistentVolumeSource
+ """
+ return self._csi
+
+ @csi.setter
+ def csi(self, csi):
+ """Sets the csi of this V1PersistentVolumeSpec.
+
+
+ :param csi: The csi of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1CSIPersistentVolumeSource
+ """
+
+ self._csi = csi
+
+ @property
+ def fc(self):
+ """Gets the fc of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The fc of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1FCVolumeSource
+ """
+ return self._fc
+
+ @fc.setter
+ def fc(self, fc):
+ """Sets the fc of this V1PersistentVolumeSpec.
+
+
+ :param fc: The fc of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1FCVolumeSource
+ """
+
+ self._fc = fc
+
+ @property
+ def flex_volume(self):
+ """Gets the flex_volume of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The flex_volume of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1FlexPersistentVolumeSource
+ """
+ return self._flex_volume
+
+ @flex_volume.setter
+ def flex_volume(self, flex_volume):
+ """Sets the flex_volume of this V1PersistentVolumeSpec.
+
+
+ :param flex_volume: The flex_volume of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1FlexPersistentVolumeSource
+ """
+
+ self._flex_volume = flex_volume
+
+ @property
+ def flocker(self):
+ """Gets the flocker of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The flocker of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1FlockerVolumeSource
+ """
+ return self._flocker
+
+ @flocker.setter
+ def flocker(self, flocker):
+ """Sets the flocker of this V1PersistentVolumeSpec.
+
+
+ :param flocker: The flocker of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1FlockerVolumeSource
+ """
+
+ self._flocker = flocker
+
+ @property
+ def gce_persistent_disk(self):
+ """Gets the gce_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The gce_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1GCEPersistentDiskVolumeSource
+ """
+ return self._gce_persistent_disk
+
+ @gce_persistent_disk.setter
+ def gce_persistent_disk(self, gce_persistent_disk):
+ """Sets the gce_persistent_disk of this V1PersistentVolumeSpec.
+
+
+ :param gce_persistent_disk: The gce_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1GCEPersistentDiskVolumeSource
+ """
+
+ self._gce_persistent_disk = gce_persistent_disk
+
+ @property
+ def glusterfs(self):
+ """Gets the glusterfs of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The glusterfs of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1GlusterfsPersistentVolumeSource
+ """
+ return self._glusterfs
+
+ @glusterfs.setter
+ def glusterfs(self, glusterfs):
+ """Sets the glusterfs of this V1PersistentVolumeSpec.
+
+
+ :param glusterfs: The glusterfs of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1GlusterfsPersistentVolumeSource
+ """
+
+ self._glusterfs = glusterfs
+
+ @property
+ def host_path(self):
+ """Gets the host_path of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The host_path of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1HostPathVolumeSource
+ """
+ return self._host_path
+
+ @host_path.setter
+ def host_path(self, host_path):
+ """Sets the host_path of this V1PersistentVolumeSpec.
+
+
+ :param host_path: The host_path of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1HostPathVolumeSource
+ """
+
+ self._host_path = host_path
+
+ @property
+ def iscsi(self):
+ """Gets the iscsi of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The iscsi of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1ISCSIPersistentVolumeSource
+ """
+ return self._iscsi
+
+ @iscsi.setter
+ def iscsi(self, iscsi):
+ """Sets the iscsi of this V1PersistentVolumeSpec.
+
+
+ :param iscsi: The iscsi of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1ISCSIPersistentVolumeSource
+ """
+
+ self._iscsi = iscsi
+
+ @property
+ def local(self):
+ """Gets the local of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The local of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1LocalVolumeSource
+ """
+ return self._local
+
+ @local.setter
+ def local(self, local):
+ """Sets the local of this V1PersistentVolumeSpec.
+
+
+ :param local: The local of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1LocalVolumeSource
+ """
+
+ self._local = local
+
+ @property
+ def mount_options(self):
+ """Gets the mount_options of this V1PersistentVolumeSpec. # noqa: E501
+
+ mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options # noqa: E501
+
+ :return: The mount_options of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._mount_options
+
+ @mount_options.setter
+ def mount_options(self, mount_options):
+ """Sets the mount_options of this V1PersistentVolumeSpec.
+
+ mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options # noqa: E501
+
+ :param mount_options: The mount_options of this V1PersistentVolumeSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._mount_options = mount_options
+
+ @property
+ def nfs(self):
+ """Gets the nfs of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The nfs of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1NFSVolumeSource
+ """
+ return self._nfs
+
+ @nfs.setter
+ def nfs(self, nfs):
+ """Sets the nfs of this V1PersistentVolumeSpec.
+
+
+ :param nfs: The nfs of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1NFSVolumeSource
+ """
+
+ self._nfs = nfs
+
+ @property
+ def node_affinity(self):
+ """Gets the node_affinity of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The node_affinity of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1VolumeNodeAffinity
+ """
+ return self._node_affinity
+
+ @node_affinity.setter
+ def node_affinity(self, node_affinity):
+ """Sets the node_affinity of this V1PersistentVolumeSpec.
+
+
+ :param node_affinity: The node_affinity of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1VolumeNodeAffinity
+ """
+
+ self._node_affinity = node_affinity
+
+ @property
+ def persistent_volume_reclaim_policy(self):
+ """Gets the persistent_volume_reclaim_policy of this V1PersistentVolumeSpec. # noqa: E501
+
+ persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming # noqa: E501
+
+ :return: The persistent_volume_reclaim_policy of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._persistent_volume_reclaim_policy
+
+ @persistent_volume_reclaim_policy.setter
+ def persistent_volume_reclaim_policy(self, persistent_volume_reclaim_policy):
+ """Sets the persistent_volume_reclaim_policy of this V1PersistentVolumeSpec.
+
+ persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming # noqa: E501
+
+ :param persistent_volume_reclaim_policy: The persistent_volume_reclaim_policy of this V1PersistentVolumeSpec. # noqa: E501
+ :type: str
+ """
+
+ self._persistent_volume_reclaim_policy = persistent_volume_reclaim_policy
+
+ @property
+ def photon_persistent_disk(self):
+ """Gets the photon_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The photon_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1PhotonPersistentDiskVolumeSource
+ """
+ return self._photon_persistent_disk
+
+ @photon_persistent_disk.setter
+ def photon_persistent_disk(self, photon_persistent_disk):
+ """Sets the photon_persistent_disk of this V1PersistentVolumeSpec.
+
+
+ :param photon_persistent_disk: The photon_persistent_disk of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1PhotonPersistentDiskVolumeSource
+ """
+
+ self._photon_persistent_disk = photon_persistent_disk
+
+ @property
+ def portworx_volume(self):
+ """Gets the portworx_volume of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The portworx_volume of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1PortworxVolumeSource
+ """
+ return self._portworx_volume
+
+ @portworx_volume.setter
+ def portworx_volume(self, portworx_volume):
+ """Sets the portworx_volume of this V1PersistentVolumeSpec.
+
+
+ :param portworx_volume: The portworx_volume of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1PortworxVolumeSource
+ """
+
+ self._portworx_volume = portworx_volume
+
+ @property
+ def quobyte(self):
+ """Gets the quobyte of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The quobyte of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1QuobyteVolumeSource
+ """
+ return self._quobyte
+
+ @quobyte.setter
+ def quobyte(self, quobyte):
+ """Sets the quobyte of this V1PersistentVolumeSpec.
+
+
+ :param quobyte: The quobyte of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1QuobyteVolumeSource
+ """
+
+ self._quobyte = quobyte
+
+ @property
+ def rbd(self):
+ """Gets the rbd of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The rbd of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1RBDPersistentVolumeSource
+ """
+ return self._rbd
+
+ @rbd.setter
+ def rbd(self, rbd):
+ """Sets the rbd of this V1PersistentVolumeSpec.
+
+
+ :param rbd: The rbd of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1RBDPersistentVolumeSource
+ """
+
+ self._rbd = rbd
+
+ @property
+ def scale_io(self):
+ """Gets the scale_io of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The scale_io of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1ScaleIOPersistentVolumeSource
+ """
+ return self._scale_io
+
+ @scale_io.setter
+ def scale_io(self, scale_io):
+ """Sets the scale_io of this V1PersistentVolumeSpec.
+
+
+ :param scale_io: The scale_io of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1ScaleIOPersistentVolumeSource
+ """
+
+ self._scale_io = scale_io
+
+ @property
+ def storage_class_name(self):
+ """Gets the storage_class_name of this V1PersistentVolumeSpec. # noqa: E501
+
+ storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass. # noqa: E501
+
+ :return: The storage_class_name of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_class_name
+
+ @storage_class_name.setter
+ def storage_class_name(self, storage_class_name):
+ """Sets the storage_class_name of this V1PersistentVolumeSpec.
+
+ storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass. # noqa: E501
+
+ :param storage_class_name: The storage_class_name of this V1PersistentVolumeSpec. # noqa: E501
+ :type: str
+ """
+
+ self._storage_class_name = storage_class_name
+
+ @property
+ def storageos(self):
+ """Gets the storageos of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The storageos of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1StorageOSPersistentVolumeSource
+ """
+ return self._storageos
+
+ @storageos.setter
+ def storageos(self, storageos):
+ """Sets the storageos of this V1PersistentVolumeSpec.
+
+
+ :param storageos: The storageos of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1StorageOSPersistentVolumeSource
+ """
+
+ self._storageos = storageos
+
+ @property
+ def volume_mode(self):
+ """Gets the volume_mode of this V1PersistentVolumeSpec. # noqa: E501
+
+ volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. # noqa: E501
+
+ :return: The volume_mode of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_mode
+
+ @volume_mode.setter
+ def volume_mode(self, volume_mode):
+ """Sets the volume_mode of this V1PersistentVolumeSpec.
+
+ volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. # noqa: E501
+
+ :param volume_mode: The volume_mode of this V1PersistentVolumeSpec. # noqa: E501
+ :type: str
+ """
+
+ self._volume_mode = volume_mode
+
+ @property
+ def vsphere_volume(self):
+ """Gets the vsphere_volume of this V1PersistentVolumeSpec. # noqa: E501
+
+
+ :return: The vsphere_volume of this V1PersistentVolumeSpec. # noqa: E501
+ :rtype: V1VsphereVirtualDiskVolumeSource
+ """
+ return self._vsphere_volume
+
+ @vsphere_volume.setter
+ def vsphere_volume(self, vsphere_volume):
+ """Sets the vsphere_volume of this V1PersistentVolumeSpec.
+
+
+ :param vsphere_volume: The vsphere_volume of this V1PersistentVolumeSpec. # noqa: E501
+ :type: V1VsphereVirtualDiskVolumeSource
+ """
+
+ self._vsphere_volume = vsphere_volume
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_status.py
new file mode 100644
index 0000000000..23f1e52a7e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_persistent_volume_status.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PersistentVolumeStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_phase_transition_time': 'datetime',
+ 'message': 'str',
+ 'phase': 'str',
+ 'reason': 'str'
+ }
+
+ attribute_map = {
+ 'last_phase_transition_time': 'lastPhaseTransitionTime',
+ 'message': 'message',
+ 'phase': 'phase',
+ 'reason': 'reason'
+ }
+
+ def __init__(self, last_phase_transition_time=None, message=None, phase=None, reason=None, local_vars_configuration=None): # noqa: E501
+ """V1PersistentVolumeStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_phase_transition_time = None
+ self._message = None
+ self._phase = None
+ self._reason = None
+ self.discriminator = None
+
+ if last_phase_transition_time is not None:
+ self.last_phase_transition_time = last_phase_transition_time
+ if message is not None:
+ self.message = message
+ if phase is not None:
+ self.phase = phase
+ if reason is not None:
+ self.reason = reason
+
+ @property
+ def last_phase_transition_time(self):
+ """Gets the last_phase_transition_time of this V1PersistentVolumeStatus. # noqa: E501
+
+ lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. # noqa: E501
+
+ :return: The last_phase_transition_time of this V1PersistentVolumeStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_phase_transition_time
+
+ @last_phase_transition_time.setter
+ def last_phase_transition_time(self, last_phase_transition_time):
+ """Sets the last_phase_transition_time of this V1PersistentVolumeStatus.
+
+ lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. # noqa: E501
+
+ :param last_phase_transition_time: The last_phase_transition_time of this V1PersistentVolumeStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_phase_transition_time = last_phase_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1PersistentVolumeStatus. # noqa: E501
+
+ message is a human-readable message indicating details about why the volume is in this state. # noqa: E501
+
+ :return: The message of this V1PersistentVolumeStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1PersistentVolumeStatus.
+
+ message is a human-readable message indicating details about why the volume is in this state. # noqa: E501
+
+ :param message: The message of this V1PersistentVolumeStatus. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def phase(self):
+ """Gets the phase of this V1PersistentVolumeStatus. # noqa: E501
+
+ phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase # noqa: E501
+
+ :return: The phase of this V1PersistentVolumeStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._phase
+
+ @phase.setter
+ def phase(self, phase):
+ """Sets the phase of this V1PersistentVolumeStatus.
+
+ phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase # noqa: E501
+
+ :param phase: The phase of this V1PersistentVolumeStatus. # noqa: E501
+ :type: str
+ """
+
+ self._phase = phase
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1PersistentVolumeStatus. # noqa: E501
+
+ reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. # noqa: E501
+
+ :return: The reason of this V1PersistentVolumeStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1PersistentVolumeStatus.
+
+ reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. # noqa: E501
+
+ :param reason: The reason of this V1PersistentVolumeStatus. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PersistentVolumeStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PersistentVolumeStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_photon_persistent_disk_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_photon_persistent_disk_volume_source.py
new file mode 100644
index 0000000000..15a89fe2e3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_photon_persistent_disk_volume_source.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PhotonPersistentDiskVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'pd_id': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'pd_id': 'pdID'
+ }
+
+ def __init__(self, fs_type=None, pd_id=None, local_vars_configuration=None): # noqa: E501
+ """V1PhotonPersistentDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._pd_id = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ self.pd_id = pd_id
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1PhotonPersistentDiskVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def pd_id(self):
+ """Gets the pd_id of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
+
+ pdID is the ID that identifies Photon Controller persistent disk # noqa: E501
+
+ :return: The pd_id of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._pd_id
+
+ @pd_id.setter
+ def pd_id(self, pd_id):
+ """Sets the pd_id of this V1PhotonPersistentDiskVolumeSource.
+
+ pdID is the ID that identifies Photon Controller persistent disk # noqa: E501
+
+ :param pd_id: The pd_id of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and pd_id is None: # noqa: E501
+ raise ValueError("Invalid value for `pd_id`, must not be `None`") # noqa: E501
+
+ self._pd_id = pd_id
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PhotonPersistentDiskVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PhotonPersistentDiskVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod.py
new file mode 100644
index 0000000000..e4350815f4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Pod(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1PodSpec',
+ 'status': 'V1PodStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Pod - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Pod. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Pod. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Pod.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Pod. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Pod. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Pod. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Pod.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Pod. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Pod. # noqa: E501
+
+
+ :return: The metadata of this V1Pod. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Pod.
+
+
+ :param metadata: The metadata of this V1Pod. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Pod. # noqa: E501
+
+
+ :return: The spec of this V1Pod. # noqa: E501
+ :rtype: V1PodSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Pod.
+
+
+ :param spec: The spec of this V1Pod. # noqa: E501
+ :type: V1PodSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Pod. # noqa: E501
+
+
+ :return: The status of this V1Pod. # noqa: E501
+ :rtype: V1PodStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Pod.
+
+
+ :param status: The status of this V1Pod. # noqa: E501
+ :type: V1PodStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Pod):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Pod):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity.py
new file mode 100644
index 0000000000..7724e6c691
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodAffinity(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'preferred_during_scheduling_ignored_during_execution': 'list[V1WeightedPodAffinityTerm]',
+ 'required_during_scheduling_ignored_during_execution': 'list[V1PodAffinityTerm]'
+ }
+
+ attribute_map = {
+ 'preferred_during_scheduling_ignored_during_execution': 'preferredDuringSchedulingIgnoredDuringExecution',
+ 'required_during_scheduling_ignored_during_execution': 'requiredDuringSchedulingIgnoredDuringExecution'
+ }
+
+ def __init__(self, preferred_during_scheduling_ignored_during_execution=None, required_during_scheduling_ignored_during_execution=None, local_vars_configuration=None): # noqa: E501
+ """V1PodAffinity - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._preferred_during_scheduling_ignored_during_execution = None
+ self._required_during_scheduling_ignored_during_execution = None
+ self.discriminator = None
+
+ if preferred_during_scheduling_ignored_during_execution is not None:
+ self.preferred_during_scheduling_ignored_during_execution = preferred_during_scheduling_ignored_during_execution
+ if required_during_scheduling_ignored_during_execution is not None:
+ self.required_during_scheduling_ignored_during_execution = required_during_scheduling_ignored_during_execution
+
+ @property
+ def preferred_during_scheduling_ignored_during_execution(self):
+ """Gets the preferred_during_scheduling_ignored_during_execution of this V1PodAffinity. # noqa: E501
+
+ The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. # noqa: E501
+
+ :return: The preferred_during_scheduling_ignored_during_execution of this V1PodAffinity. # noqa: E501
+ :rtype: list[V1WeightedPodAffinityTerm]
+ """
+ return self._preferred_during_scheduling_ignored_during_execution
+
+ @preferred_during_scheduling_ignored_during_execution.setter
+ def preferred_during_scheduling_ignored_during_execution(self, preferred_during_scheduling_ignored_during_execution):
+ """Sets the preferred_during_scheduling_ignored_during_execution of this V1PodAffinity.
+
+ The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. # noqa: E501
+
+ :param preferred_during_scheduling_ignored_during_execution: The preferred_during_scheduling_ignored_during_execution of this V1PodAffinity. # noqa: E501
+ :type: list[V1WeightedPodAffinityTerm]
+ """
+
+ self._preferred_during_scheduling_ignored_during_execution = preferred_during_scheduling_ignored_during_execution
+
+ @property
+ def required_during_scheduling_ignored_during_execution(self):
+ """Gets the required_during_scheduling_ignored_during_execution of this V1PodAffinity. # noqa: E501
+
+ If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. # noqa: E501
+
+ :return: The required_during_scheduling_ignored_during_execution of this V1PodAffinity. # noqa: E501
+ :rtype: list[V1PodAffinityTerm]
+ """
+ return self._required_during_scheduling_ignored_during_execution
+
+ @required_during_scheduling_ignored_during_execution.setter
+ def required_during_scheduling_ignored_during_execution(self, required_during_scheduling_ignored_during_execution):
+ """Sets the required_during_scheduling_ignored_during_execution of this V1PodAffinity.
+
+ If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. # noqa: E501
+
+ :param required_during_scheduling_ignored_during_execution: The required_during_scheduling_ignored_during_execution of this V1PodAffinity. # noqa: E501
+ :type: list[V1PodAffinityTerm]
+ """
+
+ self._required_during_scheduling_ignored_during_execution = required_during_scheduling_ignored_during_execution
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodAffinity):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodAffinity):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity_term.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity_term.py
new file mode 100644
index 0000000000..97534c91f9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_affinity_term.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodAffinityTerm(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'label_selector': 'V1LabelSelector',
+ 'namespace_selector': 'V1LabelSelector',
+ 'namespaces': 'list[str]',
+ 'topology_key': 'str'
+ }
+
+ attribute_map = {
+ 'label_selector': 'labelSelector',
+ 'namespace_selector': 'namespaceSelector',
+ 'namespaces': 'namespaces',
+ 'topology_key': 'topologyKey'
+ }
+
+ def __init__(self, label_selector=None, namespace_selector=None, namespaces=None, topology_key=None, local_vars_configuration=None): # noqa: E501
+ """V1PodAffinityTerm - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._label_selector = None
+ self._namespace_selector = None
+ self._namespaces = None
+ self._topology_key = None
+ self.discriminator = None
+
+ if label_selector is not None:
+ self.label_selector = label_selector
+ if namespace_selector is not None:
+ self.namespace_selector = namespace_selector
+ if namespaces is not None:
+ self.namespaces = namespaces
+ self.topology_key = topology_key
+
+ @property
+ def label_selector(self):
+ """Gets the label_selector of this V1PodAffinityTerm. # noqa: E501
+
+
+ :return: The label_selector of this V1PodAffinityTerm. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._label_selector
+
+ @label_selector.setter
+ def label_selector(self, label_selector):
+ """Sets the label_selector of this V1PodAffinityTerm.
+
+
+ :param label_selector: The label_selector of this V1PodAffinityTerm. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._label_selector = label_selector
+
+ @property
+ def namespace_selector(self):
+ """Gets the namespace_selector of this V1PodAffinityTerm. # noqa: E501
+
+
+ :return: The namespace_selector of this V1PodAffinityTerm. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._namespace_selector
+
+ @namespace_selector.setter
+ def namespace_selector(self, namespace_selector):
+ """Sets the namespace_selector of this V1PodAffinityTerm.
+
+
+ :param namespace_selector: The namespace_selector of this V1PodAffinityTerm. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._namespace_selector = namespace_selector
+
+ @property
+ def namespaces(self):
+ """Gets the namespaces of this V1PodAffinityTerm. # noqa: E501
+
+ namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". # noqa: E501
+
+ :return: The namespaces of this V1PodAffinityTerm. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._namespaces
+
+ @namespaces.setter
+ def namespaces(self, namespaces):
+ """Sets the namespaces of this V1PodAffinityTerm.
+
+ namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". # noqa: E501
+
+ :param namespaces: The namespaces of this V1PodAffinityTerm. # noqa: E501
+ :type: list[str]
+ """
+
+ self._namespaces = namespaces
+
+ @property
+ def topology_key(self):
+ """Gets the topology_key of this V1PodAffinityTerm. # noqa: E501
+
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
+
+ :return: The topology_key of this V1PodAffinityTerm. # noqa: E501
+ :rtype: str
+ """
+ return self._topology_key
+
+ @topology_key.setter
+ def topology_key(self, topology_key):
+ """Sets the topology_key of this V1PodAffinityTerm.
+
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. # noqa: E501
+
+ :param topology_key: The topology_key of this V1PodAffinityTerm. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and topology_key is None: # noqa: E501
+ raise ValueError("Invalid value for `topology_key`, must not be `None`") # noqa: E501
+
+ self._topology_key = topology_key
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodAffinityTerm):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodAffinityTerm):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_anti_affinity.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_anti_affinity.py
new file mode 100644
index 0000000000..d79bf3118f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_anti_affinity.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodAntiAffinity(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'preferred_during_scheduling_ignored_during_execution': 'list[V1WeightedPodAffinityTerm]',
+ 'required_during_scheduling_ignored_during_execution': 'list[V1PodAffinityTerm]'
+ }
+
+ attribute_map = {
+ 'preferred_during_scheduling_ignored_during_execution': 'preferredDuringSchedulingIgnoredDuringExecution',
+ 'required_during_scheduling_ignored_during_execution': 'requiredDuringSchedulingIgnoredDuringExecution'
+ }
+
+ def __init__(self, preferred_during_scheduling_ignored_during_execution=None, required_during_scheduling_ignored_during_execution=None, local_vars_configuration=None): # noqa: E501
+ """V1PodAntiAffinity - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._preferred_during_scheduling_ignored_during_execution = None
+ self._required_during_scheduling_ignored_during_execution = None
+ self.discriminator = None
+
+ if preferred_during_scheduling_ignored_during_execution is not None:
+ self.preferred_during_scheduling_ignored_during_execution = preferred_during_scheduling_ignored_during_execution
+ if required_during_scheduling_ignored_during_execution is not None:
+ self.required_during_scheduling_ignored_during_execution = required_during_scheduling_ignored_during_execution
+
+ @property
+ def preferred_during_scheduling_ignored_during_execution(self):
+ """Gets the preferred_during_scheduling_ignored_during_execution of this V1PodAntiAffinity. # noqa: E501
+
+ The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. # noqa: E501
+
+ :return: The preferred_during_scheduling_ignored_during_execution of this V1PodAntiAffinity. # noqa: E501
+ :rtype: list[V1WeightedPodAffinityTerm]
+ """
+ return self._preferred_during_scheduling_ignored_during_execution
+
+ @preferred_during_scheduling_ignored_during_execution.setter
+ def preferred_during_scheduling_ignored_during_execution(self, preferred_during_scheduling_ignored_during_execution):
+ """Sets the preferred_during_scheduling_ignored_during_execution of this V1PodAntiAffinity.
+
+ The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. # noqa: E501
+
+ :param preferred_during_scheduling_ignored_during_execution: The preferred_during_scheduling_ignored_during_execution of this V1PodAntiAffinity. # noqa: E501
+ :type: list[V1WeightedPodAffinityTerm]
+ """
+
+ self._preferred_during_scheduling_ignored_during_execution = preferred_during_scheduling_ignored_during_execution
+
+ @property
+ def required_during_scheduling_ignored_during_execution(self):
+ """Gets the required_during_scheduling_ignored_during_execution of this V1PodAntiAffinity. # noqa: E501
+
+ If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. # noqa: E501
+
+ :return: The required_during_scheduling_ignored_during_execution of this V1PodAntiAffinity. # noqa: E501
+ :rtype: list[V1PodAffinityTerm]
+ """
+ return self._required_during_scheduling_ignored_during_execution
+
+ @required_during_scheduling_ignored_during_execution.setter
+ def required_during_scheduling_ignored_during_execution(self, required_during_scheduling_ignored_during_execution):
+ """Sets the required_during_scheduling_ignored_during_execution of this V1PodAntiAffinity.
+
+ If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. # noqa: E501
+
+ :param required_during_scheduling_ignored_during_execution: The required_during_scheduling_ignored_during_execution of this V1PodAntiAffinity. # noqa: E501
+ :type: list[V1PodAffinityTerm]
+ """
+
+ self._required_during_scheduling_ignored_during_execution = required_during_scheduling_ignored_during_execution
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodAntiAffinity):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodAntiAffinity):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_condition.py
new file mode 100644
index 0000000000..356a0ba698
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_condition.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_probe_time': 'datetime',
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_probe_time': 'lastProbeTime',
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_probe_time=None, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1PodCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_probe_time = None
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_probe_time is not None:
+ self.last_probe_time = last_probe_time
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_probe_time(self):
+ """Gets the last_probe_time of this V1PodCondition. # noqa: E501
+
+ Last time we probed the condition. # noqa: E501
+
+ :return: The last_probe_time of this V1PodCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_probe_time
+
+ @last_probe_time.setter
+ def last_probe_time(self, last_probe_time):
+ """Sets the last_probe_time of this V1PodCondition.
+
+ Last time we probed the condition. # noqa: E501
+
+ :param last_probe_time: The last_probe_time of this V1PodCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_probe_time = last_probe_time
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1PodCondition. # noqa: E501
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1PodCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1PodCondition.
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1PodCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1PodCondition. # noqa: E501
+
+ Human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1PodCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1PodCondition.
+
+ Human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1PodCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1PodCondition. # noqa: E501
+
+ Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1PodCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1PodCondition.
+
+ Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1PodCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1PodCondition. # noqa: E501
+
+ Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
+
+ :return: The status of this V1PodCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1PodCondition.
+
+ Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
+
+ :param status: The status of this V1PodCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1PodCondition. # noqa: E501
+
+ Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
+
+ :return: The type of this V1PodCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1PodCondition.
+
+ Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
+
+ :param type: The type of this V1PodCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget.py
new file mode 100644
index 0000000000..a3c11c9830
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodDisruptionBudget(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1PodDisruptionBudgetSpec',
+ 'status': 'V1PodDisruptionBudgetStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1PodDisruptionBudget - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PodDisruptionBudget. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PodDisruptionBudget. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PodDisruptionBudget.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PodDisruptionBudget. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PodDisruptionBudget. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PodDisruptionBudget. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PodDisruptionBudget.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PodDisruptionBudget. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PodDisruptionBudget. # noqa: E501
+
+
+ :return: The metadata of this V1PodDisruptionBudget. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PodDisruptionBudget.
+
+
+ :param metadata: The metadata of this V1PodDisruptionBudget. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1PodDisruptionBudget. # noqa: E501
+
+
+ :return: The spec of this V1PodDisruptionBudget. # noqa: E501
+ :rtype: V1PodDisruptionBudgetSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1PodDisruptionBudget.
+
+
+ :param spec: The spec of this V1PodDisruptionBudget. # noqa: E501
+ :type: V1PodDisruptionBudgetSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1PodDisruptionBudget. # noqa: E501
+
+
+ :return: The status of this V1PodDisruptionBudget. # noqa: E501
+ :rtype: V1PodDisruptionBudgetStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1PodDisruptionBudget.
+
+
+ :param status: The status of this V1PodDisruptionBudget. # noqa: E501
+ :type: V1PodDisruptionBudgetStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodDisruptionBudget):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodDisruptionBudget):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_list.py
new file mode 100644
index 0000000000..b228aa4d4c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodDisruptionBudgetList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1PodDisruptionBudget]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1PodDisruptionBudgetList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PodDisruptionBudgetList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PodDisruptionBudgetList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PodDisruptionBudgetList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PodDisruptionBudgetList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1PodDisruptionBudgetList. # noqa: E501
+
+ Items is a list of PodDisruptionBudgets # noqa: E501
+
+ :return: The items of this V1PodDisruptionBudgetList. # noqa: E501
+ :rtype: list[V1PodDisruptionBudget]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1PodDisruptionBudgetList.
+
+ Items is a list of PodDisruptionBudgets # noqa: E501
+
+ :param items: The items of this V1PodDisruptionBudgetList. # noqa: E501
+ :type: list[V1PodDisruptionBudget]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PodDisruptionBudgetList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PodDisruptionBudgetList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PodDisruptionBudgetList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PodDisruptionBudgetList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PodDisruptionBudgetList. # noqa: E501
+
+
+ :return: The metadata of this V1PodDisruptionBudgetList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PodDisruptionBudgetList.
+
+
+ :param metadata: The metadata of this V1PodDisruptionBudgetList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodDisruptionBudgetList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodDisruptionBudgetList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_spec.py
new file mode 100644
index 0000000000..db1139935d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_spec.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodDisruptionBudgetSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'max_unavailable': 'object',
+ 'min_available': 'object',
+ 'selector': 'V1LabelSelector',
+ 'unhealthy_pod_eviction_policy': 'str'
+ }
+
+ attribute_map = {
+ 'max_unavailable': 'maxUnavailable',
+ 'min_available': 'minAvailable',
+ 'selector': 'selector',
+ 'unhealthy_pod_eviction_policy': 'unhealthyPodEvictionPolicy'
+ }
+
+ def __init__(self, max_unavailable=None, min_available=None, selector=None, unhealthy_pod_eviction_policy=None, local_vars_configuration=None): # noqa: E501
+ """V1PodDisruptionBudgetSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._max_unavailable = None
+ self._min_available = None
+ self._selector = None
+ self._unhealthy_pod_eviction_policy = None
+ self.discriminator = None
+
+ if max_unavailable is not None:
+ self.max_unavailable = max_unavailable
+ if min_available is not None:
+ self.min_available = min_available
+ if selector is not None:
+ self.selector = selector
+ if unhealthy_pod_eviction_policy is not None:
+ self.unhealthy_pod_eviction_policy = unhealthy_pod_eviction_policy
+
+ @property
+ def max_unavailable(self):
+ """Gets the max_unavailable of this V1PodDisruptionBudgetSpec. # noqa: E501
+
+ An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\". # noqa: E501
+
+ :return: The max_unavailable of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :rtype: object
+ """
+ return self._max_unavailable
+
+ @max_unavailable.setter
+ def max_unavailable(self, max_unavailable):
+ """Sets the max_unavailable of this V1PodDisruptionBudgetSpec.
+
+ An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\". # noqa: E501
+
+ :param max_unavailable: The max_unavailable of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :type: object
+ """
+
+ self._max_unavailable = max_unavailable
+
+ @property
+ def min_available(self):
+ """Gets the min_available of this V1PodDisruptionBudgetSpec. # noqa: E501
+
+ An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\". # noqa: E501
+
+ :return: The min_available of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :rtype: object
+ """
+ return self._min_available
+
+ @min_available.setter
+ def min_available(self, min_available):
+ """Sets the min_available of this V1PodDisruptionBudgetSpec.
+
+ An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\". # noqa: E501
+
+ :param min_available: The min_available of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :type: object
+ """
+
+ self._min_available = min_available
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1PodDisruptionBudgetSpec. # noqa: E501
+
+
+ :return: The selector of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1PodDisruptionBudgetSpec.
+
+
+ :param selector: The selector of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._selector = selector
+
+ @property
+ def unhealthy_pod_eviction_policy(self):
+ """Gets the unhealthy_pod_eviction_policy of this V1PodDisruptionBudgetSpec. # noqa: E501
+
+ UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). # noqa: E501
+
+ :return: The unhealthy_pod_eviction_policy of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._unhealthy_pod_eviction_policy
+
+ @unhealthy_pod_eviction_policy.setter
+ def unhealthy_pod_eviction_policy(self, unhealthy_pod_eviction_policy):
+ """Sets the unhealthy_pod_eviction_policy of this V1PodDisruptionBudgetSpec.
+
+ UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). # noqa: E501
+
+ :param unhealthy_pod_eviction_policy: The unhealthy_pod_eviction_policy of this V1PodDisruptionBudgetSpec. # noqa: E501
+ :type: str
+ """
+
+ self._unhealthy_pod_eviction_policy = unhealthy_pod_eviction_policy
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodDisruptionBudgetSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodDisruptionBudgetSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_status.py
new file mode 100644
index 0000000000..17e4149f61
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_disruption_budget_status.py
@@ -0,0 +1,294 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodDisruptionBudgetStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1Condition]',
+ 'current_healthy': 'int',
+ 'desired_healthy': 'int',
+ 'disrupted_pods': 'dict(str, datetime)',
+ 'disruptions_allowed': 'int',
+ 'expected_pods': 'int',
+ 'observed_generation': 'int'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions',
+ 'current_healthy': 'currentHealthy',
+ 'desired_healthy': 'desiredHealthy',
+ 'disrupted_pods': 'disruptedPods',
+ 'disruptions_allowed': 'disruptionsAllowed',
+ 'expected_pods': 'expectedPods',
+ 'observed_generation': 'observedGeneration'
+ }
+
+ def __init__(self, conditions=None, current_healthy=None, desired_healthy=None, disrupted_pods=None, disruptions_allowed=None, expected_pods=None, observed_generation=None, local_vars_configuration=None): # noqa: E501
+ """V1PodDisruptionBudgetStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self._current_healthy = None
+ self._desired_healthy = None
+ self._disrupted_pods = None
+ self._disruptions_allowed = None
+ self._expected_pods = None
+ self._observed_generation = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+ self.current_healthy = current_healthy
+ self.desired_healthy = desired_healthy
+ if disrupted_pods is not None:
+ self.disrupted_pods = disrupted_pods
+ self.disruptions_allowed = disruptions_allowed
+ self.expected_pods = expected_pods
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1PodDisruptionBudgetStatus. # noqa: E501
+
+ Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute the number of allowed disruptions. Therefore no disruptions are allowed and the status of the condition will be False. - InsufficientPods: The number of pods are either at or below the number required by the PodDisruptionBudget. No disruptions are allowed and the status of the condition will be False. - SufficientPods: There are more pods than required by the PodDisruptionBudget. The condition will be True, and the number of allowed disruptions are provided by the disruptionsAllowed property. # noqa: E501
+
+ :return: The conditions of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :rtype: list[V1Condition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1PodDisruptionBudgetStatus.
+
+ Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute the number of allowed disruptions. Therefore no disruptions are allowed and the status of the condition will be False. - InsufficientPods: The number of pods are either at or below the number required by the PodDisruptionBudget. No disruptions are allowed and the status of the condition will be False. - SufficientPods: There are more pods than required by the PodDisruptionBudget. The condition will be True, and the number of allowed disruptions are provided by the disruptionsAllowed property. # noqa: E501
+
+ :param conditions: The conditions of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :type: list[V1Condition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def current_healthy(self):
+ """Gets the current_healthy of this V1PodDisruptionBudgetStatus. # noqa: E501
+
+ current number of healthy pods # noqa: E501
+
+ :return: The current_healthy of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._current_healthy
+
+ @current_healthy.setter
+ def current_healthy(self, current_healthy):
+ """Sets the current_healthy of this V1PodDisruptionBudgetStatus.
+
+ current number of healthy pods # noqa: E501
+
+ :param current_healthy: The current_healthy of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and current_healthy is None: # noqa: E501
+ raise ValueError("Invalid value for `current_healthy`, must not be `None`") # noqa: E501
+
+ self._current_healthy = current_healthy
+
+ @property
+ def desired_healthy(self):
+ """Gets the desired_healthy of this V1PodDisruptionBudgetStatus. # noqa: E501
+
+ minimum desired number of healthy pods # noqa: E501
+
+ :return: The desired_healthy of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._desired_healthy
+
+ @desired_healthy.setter
+ def desired_healthy(self, desired_healthy):
+ """Sets the desired_healthy of this V1PodDisruptionBudgetStatus.
+
+ minimum desired number of healthy pods # noqa: E501
+
+ :param desired_healthy: The desired_healthy of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and desired_healthy is None: # noqa: E501
+ raise ValueError("Invalid value for `desired_healthy`, must not be `None`") # noqa: E501
+
+ self._desired_healthy = desired_healthy
+
+ @property
+ def disrupted_pods(self):
+ """Gets the disrupted_pods of this V1PodDisruptionBudgetStatus. # noqa: E501
+
+ DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions. # noqa: E501
+
+ :return: The disrupted_pods of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :rtype: dict(str, datetime)
+ """
+ return self._disrupted_pods
+
+ @disrupted_pods.setter
+ def disrupted_pods(self, disrupted_pods):
+ """Sets the disrupted_pods of this V1PodDisruptionBudgetStatus.
+
+ DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions. # noqa: E501
+
+ :param disrupted_pods: The disrupted_pods of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :type: dict(str, datetime)
+ """
+
+ self._disrupted_pods = disrupted_pods
+
+ @property
+ def disruptions_allowed(self):
+ """Gets the disruptions_allowed of this V1PodDisruptionBudgetStatus. # noqa: E501
+
+ Number of pod disruptions that are currently allowed. # noqa: E501
+
+ :return: The disruptions_allowed of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._disruptions_allowed
+
+ @disruptions_allowed.setter
+ def disruptions_allowed(self, disruptions_allowed):
+ """Sets the disruptions_allowed of this V1PodDisruptionBudgetStatus.
+
+ Number of pod disruptions that are currently allowed. # noqa: E501
+
+ :param disruptions_allowed: The disruptions_allowed of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and disruptions_allowed is None: # noqa: E501
+ raise ValueError("Invalid value for `disruptions_allowed`, must not be `None`") # noqa: E501
+
+ self._disruptions_allowed = disruptions_allowed
+
+ @property
+ def expected_pods(self):
+ """Gets the expected_pods of this V1PodDisruptionBudgetStatus. # noqa: E501
+
+ total number of pods counted by this disruption budget # noqa: E501
+
+ :return: The expected_pods of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._expected_pods
+
+ @expected_pods.setter
+ def expected_pods(self, expected_pods):
+ """Sets the expected_pods of this V1PodDisruptionBudgetStatus.
+
+ total number of pods counted by this disruption budget # noqa: E501
+
+ :param expected_pods: The expected_pods of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and expected_pods is None: # noqa: E501
+ raise ValueError("Invalid value for `expected_pods`, must not be `None`") # noqa: E501
+
+ self._expected_pods = expected_pods
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1PodDisruptionBudgetStatus. # noqa: E501
+
+ Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation. # noqa: E501
+
+ :return: The observed_generation of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1PodDisruptionBudgetStatus.
+
+ Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1PodDisruptionBudgetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodDisruptionBudgetStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodDisruptionBudgetStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config.py
new file mode 100644
index 0000000000..91fbf7d4f3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodDNSConfig(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'nameservers': 'list[str]',
+ 'options': 'list[V1PodDNSConfigOption]',
+ 'searches': 'list[str]'
+ }
+
+ attribute_map = {
+ 'nameservers': 'nameservers',
+ 'options': 'options',
+ 'searches': 'searches'
+ }
+
+ def __init__(self, nameservers=None, options=None, searches=None, local_vars_configuration=None): # noqa: E501
+ """V1PodDNSConfig - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._nameservers = None
+ self._options = None
+ self._searches = None
+ self.discriminator = None
+
+ if nameservers is not None:
+ self.nameservers = nameservers
+ if options is not None:
+ self.options = options
+ if searches is not None:
+ self.searches = searches
+
+ @property
+ def nameservers(self):
+ """Gets the nameservers of this V1PodDNSConfig. # noqa: E501
+
+ A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. # noqa: E501
+
+ :return: The nameservers of this V1PodDNSConfig. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._nameservers
+
+ @nameservers.setter
+ def nameservers(self, nameservers):
+ """Sets the nameservers of this V1PodDNSConfig.
+
+ A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. # noqa: E501
+
+ :param nameservers: The nameservers of this V1PodDNSConfig. # noqa: E501
+ :type: list[str]
+ """
+
+ self._nameservers = nameservers
+
+ @property
+ def options(self):
+ """Gets the options of this V1PodDNSConfig. # noqa: E501
+
+ A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. # noqa: E501
+
+ :return: The options of this V1PodDNSConfig. # noqa: E501
+ :rtype: list[V1PodDNSConfigOption]
+ """
+ return self._options
+
+ @options.setter
+ def options(self, options):
+ """Sets the options of this V1PodDNSConfig.
+
+ A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. # noqa: E501
+
+ :param options: The options of this V1PodDNSConfig. # noqa: E501
+ :type: list[V1PodDNSConfigOption]
+ """
+
+ self._options = options
+
+ @property
+ def searches(self):
+ """Gets the searches of this V1PodDNSConfig. # noqa: E501
+
+ A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. # noqa: E501
+
+ :return: The searches of this V1PodDNSConfig. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._searches
+
+ @searches.setter
+ def searches(self, searches):
+ """Sets the searches of this V1PodDNSConfig.
+
+ A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. # noqa: E501
+
+ :param searches: The searches of this V1PodDNSConfig. # noqa: E501
+ :type: list[str]
+ """
+
+ self._searches = searches
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodDNSConfig):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodDNSConfig):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config_option.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config_option.py
new file mode 100644
index 0000000000..a41a1976af
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_dns_config_option.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodDNSConfigOption(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'value': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'value': 'value'
+ }
+
+ def __init__(self, name=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V1PodDNSConfigOption - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._value = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if value is not None:
+ self.value = value
+
+ @property
+ def name(self):
+ """Gets the name of this V1PodDNSConfigOption. # noqa: E501
+
+ Required. # noqa: E501
+
+ :return: The name of this V1PodDNSConfigOption. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1PodDNSConfigOption.
+
+ Required. # noqa: E501
+
+ :param name: The name of this V1PodDNSConfigOption. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def value(self):
+ """Gets the value of this V1PodDNSConfigOption. # noqa: E501
+
+
+ :return: The value of this V1PodDNSConfigOption. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V1PodDNSConfigOption.
+
+
+ :param value: The value of this V1PodDNSConfigOption. # noqa: E501
+ :type: str
+ """
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodDNSConfigOption):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodDNSConfigOption):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy.py
new file mode 100644
index 0000000000..b8b98d1298
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodFailurePolicy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'rules': 'list[V1PodFailurePolicyRule]'
+ }
+
+ attribute_map = {
+ 'rules': 'rules'
+ }
+
+ def __init__(self, rules=None, local_vars_configuration=None): # noqa: E501
+ """V1PodFailurePolicy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._rules = None
+ self.discriminator = None
+
+ self.rules = rules
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1PodFailurePolicy. # noqa: E501
+
+ A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed. # noqa: E501
+
+ :return: The rules of this V1PodFailurePolicy. # noqa: E501
+ :rtype: list[V1PodFailurePolicyRule]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1PodFailurePolicy.
+
+ A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed. # noqa: E501
+
+ :param rules: The rules of this V1PodFailurePolicy. # noqa: E501
+ :type: list[V1PodFailurePolicyRule]
+ """
+ if self.local_vars_configuration.client_side_validation and rules is None: # noqa: E501
+ raise ValueError("Invalid value for `rules`, must not be `None`") # noqa: E501
+
+ self._rules = rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodFailurePolicy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodFailurePolicy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_exit_codes_requirement.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_exit_codes_requirement.py
new file mode 100644
index 0000000000..aa81a74ccd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_exit_codes_requirement.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodFailurePolicyOnExitCodesRequirement(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container_name': 'str',
+ 'operator': 'str',
+ 'values': 'list[int]'
+ }
+
+ attribute_map = {
+ 'container_name': 'containerName',
+ 'operator': 'operator',
+ 'values': 'values'
+ }
+
+ def __init__(self, container_name=None, operator=None, values=None, local_vars_configuration=None): # noqa: E501
+ """V1PodFailurePolicyOnExitCodesRequirement - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container_name = None
+ self._operator = None
+ self._values = None
+ self.discriminator = None
+
+ if container_name is not None:
+ self.container_name = container_name
+ self.operator = operator
+ self.values = values
+
+ @property
+ def container_name(self):
+ """Gets the container_name of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+
+ Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template. # noqa: E501
+
+ :return: The container_name of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._container_name
+
+ @container_name.setter
+ def container_name(self, container_name):
+ """Sets the container_name of this V1PodFailurePolicyOnExitCodesRequirement.
+
+ Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template. # noqa: E501
+
+ :param container_name: The container_name of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+ :type: str
+ """
+
+ self._container_name = container_name
+
+ @property
+ def operator(self):
+ """Gets the operator of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+
+ Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - In: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is in the set of specified values. - NotIn: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is not in the set of specified values. Additional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied. # noqa: E501
+
+ :return: The operator of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._operator
+
+ @operator.setter
+ def operator(self, operator):
+ """Sets the operator of this V1PodFailurePolicyOnExitCodesRequirement.
+
+ Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - In: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is in the set of specified values. - NotIn: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is not in the set of specified values. Additional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied. # noqa: E501
+
+ :param operator: The operator of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501
+ raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501
+
+ self._operator = operator
+
+ @property
+ def values(self):
+ """Gets the values of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+
+ Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed. # noqa: E501
+
+ :return: The values of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+ :rtype: list[int]
+ """
+ return self._values
+
+ @values.setter
+ def values(self, values):
+ """Sets the values of this V1PodFailurePolicyOnExitCodesRequirement.
+
+ Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed. # noqa: E501
+
+ :param values: The values of this V1PodFailurePolicyOnExitCodesRequirement. # noqa: E501
+ :type: list[int]
+ """
+ if self.local_vars_configuration.client_side_validation and values is None: # noqa: E501
+ raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
+
+ self._values = values
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodFailurePolicyOnExitCodesRequirement):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodFailurePolicyOnExitCodesRequirement):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_pod_conditions_pattern.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_pod_conditions_pattern.py
new file mode 100644
index 0000000000..86384b76e3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_on_pod_conditions_pattern.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodFailurePolicyOnPodConditionsPattern(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1PodFailurePolicyOnPodConditionsPattern - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ self.status = status
+ self.type = type
+
+ @property
+ def status(self):
+ """Gets the status of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501
+
+ Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True. # noqa: E501
+
+ :return: The status of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1PodFailurePolicyOnPodConditionsPattern.
+
+ Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True. # noqa: E501
+
+ :param status: The status of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501
+
+ Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type. # noqa: E501
+
+ :return: The type of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1PodFailurePolicyOnPodConditionsPattern.
+
+ Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type. # noqa: E501
+
+ :param type: The type of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodFailurePolicyOnPodConditionsPattern):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodFailurePolicyOnPodConditionsPattern):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_rule.py
new file mode 100644
index 0000000000..5dc4cbc6bd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_failure_policy_rule.py
@@ -0,0 +1,177 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodFailurePolicyRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'action': 'str',
+ 'on_exit_codes': 'V1PodFailurePolicyOnExitCodesRequirement',
+ 'on_pod_conditions': 'list[V1PodFailurePolicyOnPodConditionsPattern]'
+ }
+
+ attribute_map = {
+ 'action': 'action',
+ 'on_exit_codes': 'onExitCodes',
+ 'on_pod_conditions': 'onPodConditions'
+ }
+
+ def __init__(self, action=None, on_exit_codes=None, on_pod_conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1PodFailurePolicyRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._action = None
+ self._on_exit_codes = None
+ self._on_pod_conditions = None
+ self.discriminator = None
+
+ self.action = action
+ if on_exit_codes is not None:
+ self.on_exit_codes = on_exit_codes
+ if on_pod_conditions is not None:
+ self.on_pod_conditions = on_pod_conditions
+
+ @property
+ def action(self):
+ """Gets the action of this V1PodFailurePolicyRule. # noqa: E501
+
+ Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. This value is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule. # noqa: E501
+
+ :return: The action of this V1PodFailurePolicyRule. # noqa: E501
+ :rtype: str
+ """
+ return self._action
+
+ @action.setter
+ def action(self, action):
+ """Sets the action of this V1PodFailurePolicyRule.
+
+ Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. This value is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule. # noqa: E501
+
+ :param action: The action of this V1PodFailurePolicyRule. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and action is None: # noqa: E501
+ raise ValueError("Invalid value for `action`, must not be `None`") # noqa: E501
+
+ self._action = action
+
+ @property
+ def on_exit_codes(self):
+ """Gets the on_exit_codes of this V1PodFailurePolicyRule. # noqa: E501
+
+
+ :return: The on_exit_codes of this V1PodFailurePolicyRule. # noqa: E501
+ :rtype: V1PodFailurePolicyOnExitCodesRequirement
+ """
+ return self._on_exit_codes
+
+ @on_exit_codes.setter
+ def on_exit_codes(self, on_exit_codes):
+ """Sets the on_exit_codes of this V1PodFailurePolicyRule.
+
+
+ :param on_exit_codes: The on_exit_codes of this V1PodFailurePolicyRule. # noqa: E501
+ :type: V1PodFailurePolicyOnExitCodesRequirement
+ """
+
+ self._on_exit_codes = on_exit_codes
+
+ @property
+ def on_pod_conditions(self):
+ """Gets the on_pod_conditions of this V1PodFailurePolicyRule. # noqa: E501
+
+ Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed. # noqa: E501
+
+ :return: The on_pod_conditions of this V1PodFailurePolicyRule. # noqa: E501
+ :rtype: list[V1PodFailurePolicyOnPodConditionsPattern]
+ """
+ return self._on_pod_conditions
+
+ @on_pod_conditions.setter
+ def on_pod_conditions(self, on_pod_conditions):
+ """Sets the on_pod_conditions of this V1PodFailurePolicyRule.
+
+ Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed. # noqa: E501
+
+ :param on_pod_conditions: The on_pod_conditions of this V1PodFailurePolicyRule. # noqa: E501
+ :type: list[V1PodFailurePolicyOnPodConditionsPattern]
+ """
+
+ self._on_pod_conditions = on_pod_conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodFailurePolicyRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodFailurePolicyRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_ip.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_ip.py
new file mode 100644
index 0000000000..202f9684a2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_ip.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodIP(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ip': 'str'
+ }
+
+ attribute_map = {
+ 'ip': 'ip'
+ }
+
+ def __init__(self, ip=None, local_vars_configuration=None): # noqa: E501
+ """V1PodIP - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ip = None
+ self.discriminator = None
+
+ if ip is not None:
+ self.ip = ip
+
+ @property
+ def ip(self):
+ """Gets the ip of this V1PodIP. # noqa: E501
+
+ IP is the IP address assigned to the pod # noqa: E501
+
+ :return: The ip of this V1PodIP. # noqa: E501
+ :rtype: str
+ """
+ return self._ip
+
+ @ip.setter
+ def ip(self, ip):
+ """Sets the ip of this V1PodIP.
+
+ IP is the IP address assigned to the pod # noqa: E501
+
+ :param ip: The ip of this V1PodIP. # noqa: E501
+ :type: str
+ """
+
+ self._ip = ip
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodIP):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodIP):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_list.py
new file mode 100644
index 0000000000..4f0e87243a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Pod]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1PodList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PodList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PodList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PodList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PodList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1PodList. # noqa: E501
+
+ List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501
+
+ :return: The items of this V1PodList. # noqa: E501
+ :rtype: list[V1Pod]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1PodList.
+
+ List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501
+
+ :param items: The items of this V1PodList. # noqa: E501
+ :type: list[V1Pod]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PodList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PodList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PodList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PodList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PodList. # noqa: E501
+
+
+ :return: The metadata of this V1PodList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PodList.
+
+
+ :param metadata: The metadata of this V1PodList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_os.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_os.py
new file mode 100644
index 0000000000..d209f06287
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_os.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodOS(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1PodOS - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1PodOS. # noqa: E501
+
+ Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null # noqa: E501
+
+ :return: The name of this V1PodOS. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1PodOS.
+
+ Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null # noqa: E501
+
+ :param name: The name of this V1PodOS. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodOS):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodOS):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_readiness_gate.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_readiness_gate.py
new file mode 100644
index 0000000000..b76e691646
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_readiness_gate.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodReadinessGate(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'condition_type': 'str'
+ }
+
+ attribute_map = {
+ 'condition_type': 'conditionType'
+ }
+
+ def __init__(self, condition_type=None, local_vars_configuration=None): # noqa: E501
+ """V1PodReadinessGate - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._condition_type = None
+ self.discriminator = None
+
+ self.condition_type = condition_type
+
+ @property
+ def condition_type(self):
+ """Gets the condition_type of this V1PodReadinessGate. # noqa: E501
+
+ ConditionType refers to a condition in the pod's condition list with matching type. # noqa: E501
+
+ :return: The condition_type of this V1PodReadinessGate. # noqa: E501
+ :rtype: str
+ """
+ return self._condition_type
+
+ @condition_type.setter
+ def condition_type(self, condition_type):
+ """Sets the condition_type of this V1PodReadinessGate.
+
+ ConditionType refers to a condition in the pod's condition list with matching type. # noqa: E501
+
+ :param condition_type: The condition_type of this V1PodReadinessGate. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and condition_type is None: # noqa: E501
+ raise ValueError("Invalid value for `condition_type`, must not be `None`") # noqa: E501
+
+ self._condition_type = condition_type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodReadinessGate):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodReadinessGate):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim.py
new file mode 100644
index 0000000000..0d178d44f9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodResourceClaim(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'source': 'V1ClaimSource'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'source': 'source'
+ }
+
+ def __init__(self, name=None, source=None, local_vars_configuration=None): # noqa: E501
+ """V1PodResourceClaim - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._source = None
+ self.discriminator = None
+
+ self.name = name
+ if source is not None:
+ self.source = source
+
+ @property
+ def name(self):
+ """Gets the name of this V1PodResourceClaim. # noqa: E501
+
+ Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. # noqa: E501
+
+ :return: The name of this V1PodResourceClaim. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1PodResourceClaim.
+
+ Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. # noqa: E501
+
+ :param name: The name of this V1PodResourceClaim. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def source(self):
+ """Gets the source of this V1PodResourceClaim. # noqa: E501
+
+
+ :return: The source of this V1PodResourceClaim. # noqa: E501
+ :rtype: V1ClaimSource
+ """
+ return self._source
+
+ @source.setter
+ def source(self, source):
+ """Sets the source of this V1PodResourceClaim.
+
+
+ :param source: The source of this V1PodResourceClaim. # noqa: E501
+ :type: V1ClaimSource
+ """
+
+ self._source = source
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodResourceClaim):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodResourceClaim):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim_status.py
new file mode 100644
index 0000000000..9d044995a6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_resource_claim_status.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodResourceClaimStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'resource_claim_name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'resource_claim_name': 'resourceClaimName'
+ }
+
+ def __init__(self, name=None, resource_claim_name=None, local_vars_configuration=None): # noqa: E501
+ """V1PodResourceClaimStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._resource_claim_name = None
+ self.discriminator = None
+
+ self.name = name
+ if resource_claim_name is not None:
+ self.resource_claim_name = resource_claim_name
+
+ @property
+ def name(self):
+ """Gets the name of this V1PodResourceClaimStatus. # noqa: E501
+
+ Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL. # noqa: E501
+
+ :return: The name of this V1PodResourceClaimStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1PodResourceClaimStatus.
+
+ Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL. # noqa: E501
+
+ :param name: The name of this V1PodResourceClaimStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def resource_claim_name(self):
+ """Gets the resource_claim_name of this V1PodResourceClaimStatus. # noqa: E501
+
+ ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case. # noqa: E501
+
+ :return: The resource_claim_name of this V1PodResourceClaimStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_claim_name
+
+ @resource_claim_name.setter
+ def resource_claim_name(self, resource_claim_name):
+ """Sets the resource_claim_name of this V1PodResourceClaimStatus.
+
+ ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case. # noqa: E501
+
+ :param resource_claim_name: The resource_claim_name of this V1PodResourceClaimStatus. # noqa: E501
+ :type: str
+ """
+
+ self._resource_claim_name = resource_claim_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodResourceClaimStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodResourceClaimStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_scheduling_gate.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_scheduling_gate.py
new file mode 100644
index 0000000000..716f57477f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_scheduling_gate.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodSchedulingGate(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1PodSchedulingGate - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1PodSchedulingGate. # noqa: E501
+
+ Name of the scheduling gate. Each scheduling gate must have a unique name field. # noqa: E501
+
+ :return: The name of this V1PodSchedulingGate. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1PodSchedulingGate.
+
+ Name of the scheduling gate. Each scheduling gate must have a unique name field. # noqa: E501
+
+ :param name: The name of this V1PodSchedulingGate. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodSchedulingGate):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodSchedulingGate):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_security_context.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_security_context.py
new file mode 100644
index 0000000000..168b25eeb4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_security_context.py
@@ -0,0 +1,368 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodSecurityContext(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_group': 'int',
+ 'fs_group_change_policy': 'str',
+ 'run_as_group': 'int',
+ 'run_as_non_root': 'bool',
+ 'run_as_user': 'int',
+ 'se_linux_options': 'V1SELinuxOptions',
+ 'seccomp_profile': 'V1SeccompProfile',
+ 'supplemental_groups': 'list[int]',
+ 'sysctls': 'list[V1Sysctl]',
+ 'windows_options': 'V1WindowsSecurityContextOptions'
+ }
+
+ attribute_map = {
+ 'fs_group': 'fsGroup',
+ 'fs_group_change_policy': 'fsGroupChangePolicy',
+ 'run_as_group': 'runAsGroup',
+ 'run_as_non_root': 'runAsNonRoot',
+ 'run_as_user': 'runAsUser',
+ 'se_linux_options': 'seLinuxOptions',
+ 'seccomp_profile': 'seccompProfile',
+ 'supplemental_groups': 'supplementalGroups',
+ 'sysctls': 'sysctls',
+ 'windows_options': 'windowsOptions'
+ }
+
+ def __init__(self, fs_group=None, fs_group_change_policy=None, run_as_group=None, run_as_non_root=None, run_as_user=None, se_linux_options=None, seccomp_profile=None, supplemental_groups=None, sysctls=None, windows_options=None, local_vars_configuration=None): # noqa: E501
+ """V1PodSecurityContext - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_group = None
+ self._fs_group_change_policy = None
+ self._run_as_group = None
+ self._run_as_non_root = None
+ self._run_as_user = None
+ self._se_linux_options = None
+ self._seccomp_profile = None
+ self._supplemental_groups = None
+ self._sysctls = None
+ self._windows_options = None
+ self.discriminator = None
+
+ if fs_group is not None:
+ self.fs_group = fs_group
+ if fs_group_change_policy is not None:
+ self.fs_group_change_policy = fs_group_change_policy
+ if run_as_group is not None:
+ self.run_as_group = run_as_group
+ if run_as_non_root is not None:
+ self.run_as_non_root = run_as_non_root
+ if run_as_user is not None:
+ self.run_as_user = run_as_user
+ if se_linux_options is not None:
+ self.se_linux_options = se_linux_options
+ if seccomp_profile is not None:
+ self.seccomp_profile = seccomp_profile
+ if supplemental_groups is not None:
+ self.supplemental_groups = supplemental_groups
+ if sysctls is not None:
+ self.sysctls = sysctls
+ if windows_options is not None:
+ self.windows_options = windows_options
+
+ @property
+ def fs_group(self):
+ """Gets the fs_group of this V1PodSecurityContext. # noqa: E501
+
+ A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The fs_group of this V1PodSecurityContext. # noqa: E501
+ :rtype: int
+ """
+ return self._fs_group
+
+ @fs_group.setter
+ def fs_group(self, fs_group):
+ """Sets the fs_group of this V1PodSecurityContext.
+
+ A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param fs_group: The fs_group of this V1PodSecurityContext. # noqa: E501
+ :type: int
+ """
+
+ self._fs_group = fs_group
+
+ @property
+ def fs_group_change_policy(self):
+ """Gets the fs_group_change_policy of this V1PodSecurityContext. # noqa: E501
+
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The fs_group_change_policy of this V1PodSecurityContext. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_group_change_policy
+
+ @fs_group_change_policy.setter
+ def fs_group_change_policy(self, fs_group_change_policy):
+ """Sets the fs_group_change_policy of this V1PodSecurityContext.
+
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param fs_group_change_policy: The fs_group_change_policy of this V1PodSecurityContext. # noqa: E501
+ :type: str
+ """
+
+ self._fs_group_change_policy = fs_group_change_policy
+
+ @property
+ def run_as_group(self):
+ """Gets the run_as_group of this V1PodSecurityContext. # noqa: E501
+
+ The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The run_as_group of this V1PodSecurityContext. # noqa: E501
+ :rtype: int
+ """
+ return self._run_as_group
+
+ @run_as_group.setter
+ def run_as_group(self, run_as_group):
+ """Sets the run_as_group of this V1PodSecurityContext.
+
+ The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param run_as_group: The run_as_group of this V1PodSecurityContext. # noqa: E501
+ :type: int
+ """
+
+ self._run_as_group = run_as_group
+
+ @property
+ def run_as_non_root(self):
+ """Gets the run_as_non_root of this V1PodSecurityContext. # noqa: E501
+
+ Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
+
+ :return: The run_as_non_root of this V1PodSecurityContext. # noqa: E501
+ :rtype: bool
+ """
+ return self._run_as_non_root
+
+ @run_as_non_root.setter
+ def run_as_non_root(self, run_as_non_root):
+ """Sets the run_as_non_root of this V1PodSecurityContext.
+
+ Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
+
+ :param run_as_non_root: The run_as_non_root of this V1PodSecurityContext. # noqa: E501
+ :type: bool
+ """
+
+ self._run_as_non_root = run_as_non_root
+
+ @property
+ def run_as_user(self):
+ """Gets the run_as_user of this V1PodSecurityContext. # noqa: E501
+
+ The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The run_as_user of this V1PodSecurityContext. # noqa: E501
+ :rtype: int
+ """
+ return self._run_as_user
+
+ @run_as_user.setter
+ def run_as_user(self, run_as_user):
+ """Sets the run_as_user of this V1PodSecurityContext.
+
+ The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param run_as_user: The run_as_user of this V1PodSecurityContext. # noqa: E501
+ :type: int
+ """
+
+ self._run_as_user = run_as_user
+
+ @property
+ def se_linux_options(self):
+ """Gets the se_linux_options of this V1PodSecurityContext. # noqa: E501
+
+
+ :return: The se_linux_options of this V1PodSecurityContext. # noqa: E501
+ :rtype: V1SELinuxOptions
+ """
+ return self._se_linux_options
+
+ @se_linux_options.setter
+ def se_linux_options(self, se_linux_options):
+ """Sets the se_linux_options of this V1PodSecurityContext.
+
+
+ :param se_linux_options: The se_linux_options of this V1PodSecurityContext. # noqa: E501
+ :type: V1SELinuxOptions
+ """
+
+ self._se_linux_options = se_linux_options
+
+ @property
+ def seccomp_profile(self):
+ """Gets the seccomp_profile of this V1PodSecurityContext. # noqa: E501
+
+
+ :return: The seccomp_profile of this V1PodSecurityContext. # noqa: E501
+ :rtype: V1SeccompProfile
+ """
+ return self._seccomp_profile
+
+ @seccomp_profile.setter
+ def seccomp_profile(self, seccomp_profile):
+ """Sets the seccomp_profile of this V1PodSecurityContext.
+
+
+ :param seccomp_profile: The seccomp_profile of this V1PodSecurityContext. # noqa: E501
+ :type: V1SeccompProfile
+ """
+
+ self._seccomp_profile = seccomp_profile
+
+ @property
+ def supplemental_groups(self):
+ """Gets the supplemental_groups of this V1PodSecurityContext. # noqa: E501
+
+ A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The supplemental_groups of this V1PodSecurityContext. # noqa: E501
+ :rtype: list[int]
+ """
+ return self._supplemental_groups
+
+ @supplemental_groups.setter
+ def supplemental_groups(self, supplemental_groups):
+ """Sets the supplemental_groups of this V1PodSecurityContext.
+
+ A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param supplemental_groups: The supplemental_groups of this V1PodSecurityContext. # noqa: E501
+ :type: list[int]
+ """
+
+ self._supplemental_groups = supplemental_groups
+
+ @property
+ def sysctls(self):
+ """Gets the sysctls of this V1PodSecurityContext. # noqa: E501
+
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The sysctls of this V1PodSecurityContext. # noqa: E501
+ :rtype: list[V1Sysctl]
+ """
+ return self._sysctls
+
+ @sysctls.setter
+ def sysctls(self, sysctls):
+ """Sets the sysctls of this V1PodSecurityContext.
+
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param sysctls: The sysctls of this V1PodSecurityContext. # noqa: E501
+ :type: list[V1Sysctl]
+ """
+
+ self._sysctls = sysctls
+
+ @property
+ def windows_options(self):
+ """Gets the windows_options of this V1PodSecurityContext. # noqa: E501
+
+
+ :return: The windows_options of this V1PodSecurityContext. # noqa: E501
+ :rtype: V1WindowsSecurityContextOptions
+ """
+ return self._windows_options
+
+ @windows_options.setter
+ def windows_options(self, windows_options):
+ """Sets the windows_options of this V1PodSecurityContext.
+
+
+ :param windows_options: The windows_options of this V1PodSecurityContext. # noqa: E501
+ :type: V1WindowsSecurityContextOptions
+ """
+
+ self._windows_options = windows_options
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodSecurityContext):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodSecurityContext):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_spec.py
new file mode 100644
index 0000000000..6ecb60e2e7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_spec.py
@@ -0,0 +1,1179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'active_deadline_seconds': 'int',
+ 'affinity': 'V1Affinity',
+ 'automount_service_account_token': 'bool',
+ 'containers': 'list[V1Container]',
+ 'dns_config': 'V1PodDNSConfig',
+ 'dns_policy': 'str',
+ 'enable_service_links': 'bool',
+ 'ephemeral_containers': 'list[V1EphemeralContainer]',
+ 'host_aliases': 'list[V1HostAlias]',
+ 'host_ipc': 'bool',
+ 'host_network': 'bool',
+ 'host_pid': 'bool',
+ 'host_users': 'bool',
+ 'hostname': 'str',
+ 'image_pull_secrets': 'list[V1LocalObjectReference]',
+ 'init_containers': 'list[V1Container]',
+ 'node_name': 'str',
+ 'node_selector': 'dict(str, str)',
+ 'os': 'V1PodOS',
+ 'overhead': 'dict(str, str)',
+ 'preemption_policy': 'str',
+ 'priority': 'int',
+ 'priority_class_name': 'str',
+ 'readiness_gates': 'list[V1PodReadinessGate]',
+ 'resource_claims': 'list[V1PodResourceClaim]',
+ 'restart_policy': 'str',
+ 'runtime_class_name': 'str',
+ 'scheduler_name': 'str',
+ 'scheduling_gates': 'list[V1PodSchedulingGate]',
+ 'security_context': 'V1PodSecurityContext',
+ 'service_account': 'str',
+ 'service_account_name': 'str',
+ 'set_hostname_as_fqdn': 'bool',
+ 'share_process_namespace': 'bool',
+ 'subdomain': 'str',
+ 'termination_grace_period_seconds': 'int',
+ 'tolerations': 'list[V1Toleration]',
+ 'topology_spread_constraints': 'list[V1TopologySpreadConstraint]',
+ 'volumes': 'list[V1Volume]'
+ }
+
+ attribute_map = {
+ 'active_deadline_seconds': 'activeDeadlineSeconds',
+ 'affinity': 'affinity',
+ 'automount_service_account_token': 'automountServiceAccountToken',
+ 'containers': 'containers',
+ 'dns_config': 'dnsConfig',
+ 'dns_policy': 'dnsPolicy',
+ 'enable_service_links': 'enableServiceLinks',
+ 'ephemeral_containers': 'ephemeralContainers',
+ 'host_aliases': 'hostAliases',
+ 'host_ipc': 'hostIPC',
+ 'host_network': 'hostNetwork',
+ 'host_pid': 'hostPID',
+ 'host_users': 'hostUsers',
+ 'hostname': 'hostname',
+ 'image_pull_secrets': 'imagePullSecrets',
+ 'init_containers': 'initContainers',
+ 'node_name': 'nodeName',
+ 'node_selector': 'nodeSelector',
+ 'os': 'os',
+ 'overhead': 'overhead',
+ 'preemption_policy': 'preemptionPolicy',
+ 'priority': 'priority',
+ 'priority_class_name': 'priorityClassName',
+ 'readiness_gates': 'readinessGates',
+ 'resource_claims': 'resourceClaims',
+ 'restart_policy': 'restartPolicy',
+ 'runtime_class_name': 'runtimeClassName',
+ 'scheduler_name': 'schedulerName',
+ 'scheduling_gates': 'schedulingGates',
+ 'security_context': 'securityContext',
+ 'service_account': 'serviceAccount',
+ 'service_account_name': 'serviceAccountName',
+ 'set_hostname_as_fqdn': 'setHostnameAsFQDN',
+ 'share_process_namespace': 'shareProcessNamespace',
+ 'subdomain': 'subdomain',
+ 'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
+ 'tolerations': 'tolerations',
+ 'topology_spread_constraints': 'topologySpreadConstraints',
+ 'volumes': 'volumes'
+ }
+
+ def __init__(self, active_deadline_seconds=None, affinity=None, automount_service_account_token=None, containers=None, dns_config=None, dns_policy=None, enable_service_links=None, ephemeral_containers=None, host_aliases=None, host_ipc=None, host_network=None, host_pid=None, host_users=None, hostname=None, image_pull_secrets=None, init_containers=None, node_name=None, node_selector=None, os=None, overhead=None, preemption_policy=None, priority=None, priority_class_name=None, readiness_gates=None, resource_claims=None, restart_policy=None, runtime_class_name=None, scheduler_name=None, scheduling_gates=None, security_context=None, service_account=None, service_account_name=None, set_hostname_as_fqdn=None, share_process_namespace=None, subdomain=None, termination_grace_period_seconds=None, tolerations=None, topology_spread_constraints=None, volumes=None, local_vars_configuration=None): # noqa: E501
+ """V1PodSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._active_deadline_seconds = None
+ self._affinity = None
+ self._automount_service_account_token = None
+ self._containers = None
+ self._dns_config = None
+ self._dns_policy = None
+ self._enable_service_links = None
+ self._ephemeral_containers = None
+ self._host_aliases = None
+ self._host_ipc = None
+ self._host_network = None
+ self._host_pid = None
+ self._host_users = None
+ self._hostname = None
+ self._image_pull_secrets = None
+ self._init_containers = None
+ self._node_name = None
+ self._node_selector = None
+ self._os = None
+ self._overhead = None
+ self._preemption_policy = None
+ self._priority = None
+ self._priority_class_name = None
+ self._readiness_gates = None
+ self._resource_claims = None
+ self._restart_policy = None
+ self._runtime_class_name = None
+ self._scheduler_name = None
+ self._scheduling_gates = None
+ self._security_context = None
+ self._service_account = None
+ self._service_account_name = None
+ self._set_hostname_as_fqdn = None
+ self._share_process_namespace = None
+ self._subdomain = None
+ self._termination_grace_period_seconds = None
+ self._tolerations = None
+ self._topology_spread_constraints = None
+ self._volumes = None
+ self.discriminator = None
+
+ if active_deadline_seconds is not None:
+ self.active_deadline_seconds = active_deadline_seconds
+ if affinity is not None:
+ self.affinity = affinity
+ if automount_service_account_token is not None:
+ self.automount_service_account_token = automount_service_account_token
+ self.containers = containers
+ if dns_config is not None:
+ self.dns_config = dns_config
+ if dns_policy is not None:
+ self.dns_policy = dns_policy
+ if enable_service_links is not None:
+ self.enable_service_links = enable_service_links
+ if ephemeral_containers is not None:
+ self.ephemeral_containers = ephemeral_containers
+ if host_aliases is not None:
+ self.host_aliases = host_aliases
+ if host_ipc is not None:
+ self.host_ipc = host_ipc
+ if host_network is not None:
+ self.host_network = host_network
+ if host_pid is not None:
+ self.host_pid = host_pid
+ if host_users is not None:
+ self.host_users = host_users
+ if hostname is not None:
+ self.hostname = hostname
+ if image_pull_secrets is not None:
+ self.image_pull_secrets = image_pull_secrets
+ if init_containers is not None:
+ self.init_containers = init_containers
+ if node_name is not None:
+ self.node_name = node_name
+ if node_selector is not None:
+ self.node_selector = node_selector
+ if os is not None:
+ self.os = os
+ if overhead is not None:
+ self.overhead = overhead
+ if preemption_policy is not None:
+ self.preemption_policy = preemption_policy
+ if priority is not None:
+ self.priority = priority
+ if priority_class_name is not None:
+ self.priority_class_name = priority_class_name
+ if readiness_gates is not None:
+ self.readiness_gates = readiness_gates
+ if resource_claims is not None:
+ self.resource_claims = resource_claims
+ if restart_policy is not None:
+ self.restart_policy = restart_policy
+ if runtime_class_name is not None:
+ self.runtime_class_name = runtime_class_name
+ if scheduler_name is not None:
+ self.scheduler_name = scheduler_name
+ if scheduling_gates is not None:
+ self.scheduling_gates = scheduling_gates
+ if security_context is not None:
+ self.security_context = security_context
+ if service_account is not None:
+ self.service_account = service_account
+ if service_account_name is not None:
+ self.service_account_name = service_account_name
+ if set_hostname_as_fqdn is not None:
+ self.set_hostname_as_fqdn = set_hostname_as_fqdn
+ if share_process_namespace is not None:
+ self.share_process_namespace = share_process_namespace
+ if subdomain is not None:
+ self.subdomain = subdomain
+ if termination_grace_period_seconds is not None:
+ self.termination_grace_period_seconds = termination_grace_period_seconds
+ if tolerations is not None:
+ self.tolerations = tolerations
+ if topology_spread_constraints is not None:
+ self.topology_spread_constraints = topology_spread_constraints
+ if volumes is not None:
+ self.volumes = volumes
+
+ @property
+ def active_deadline_seconds(self):
+ """Gets the active_deadline_seconds of this V1PodSpec. # noqa: E501
+
+ Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. # noqa: E501
+
+ :return: The active_deadline_seconds of this V1PodSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._active_deadline_seconds
+
+ @active_deadline_seconds.setter
+ def active_deadline_seconds(self, active_deadline_seconds):
+ """Sets the active_deadline_seconds of this V1PodSpec.
+
+ Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. # noqa: E501
+
+ :param active_deadline_seconds: The active_deadline_seconds of this V1PodSpec. # noqa: E501
+ :type: int
+ """
+
+ self._active_deadline_seconds = active_deadline_seconds
+
+ @property
+ def affinity(self):
+ """Gets the affinity of this V1PodSpec. # noqa: E501
+
+
+ :return: The affinity of this V1PodSpec. # noqa: E501
+ :rtype: V1Affinity
+ """
+ return self._affinity
+
+ @affinity.setter
+ def affinity(self, affinity):
+ """Sets the affinity of this V1PodSpec.
+
+
+ :param affinity: The affinity of this V1PodSpec. # noqa: E501
+ :type: V1Affinity
+ """
+
+ self._affinity = affinity
+
+ @property
+ def automount_service_account_token(self):
+ """Gets the automount_service_account_token of this V1PodSpec. # noqa: E501
+
+ AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. # noqa: E501
+
+ :return: The automount_service_account_token of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._automount_service_account_token
+
+ @automount_service_account_token.setter
+ def automount_service_account_token(self, automount_service_account_token):
+ """Sets the automount_service_account_token of this V1PodSpec.
+
+ AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. # noqa: E501
+
+ :param automount_service_account_token: The automount_service_account_token of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._automount_service_account_token = automount_service_account_token
+
+ @property
+ def containers(self):
+ """Gets the containers of this V1PodSpec. # noqa: E501
+
+ List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. # noqa: E501
+
+ :return: The containers of this V1PodSpec. # noqa: E501
+ :rtype: list[V1Container]
+ """
+ return self._containers
+
+ @containers.setter
+ def containers(self, containers):
+ """Sets the containers of this V1PodSpec.
+
+ List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. # noqa: E501
+
+ :param containers: The containers of this V1PodSpec. # noqa: E501
+ :type: list[V1Container]
+ """
+ if self.local_vars_configuration.client_side_validation and containers is None: # noqa: E501
+ raise ValueError("Invalid value for `containers`, must not be `None`") # noqa: E501
+
+ self._containers = containers
+
+ @property
+ def dns_config(self):
+ """Gets the dns_config of this V1PodSpec. # noqa: E501
+
+
+ :return: The dns_config of this V1PodSpec. # noqa: E501
+ :rtype: V1PodDNSConfig
+ """
+ return self._dns_config
+
+ @dns_config.setter
+ def dns_config(self, dns_config):
+ """Sets the dns_config of this V1PodSpec.
+
+
+ :param dns_config: The dns_config of this V1PodSpec. # noqa: E501
+ :type: V1PodDNSConfig
+ """
+
+ self._dns_config = dns_config
+
+ @property
+ def dns_policy(self):
+ """Gets the dns_policy of this V1PodSpec. # noqa: E501
+
+ Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501
+
+ :return: The dns_policy of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._dns_policy
+
+ @dns_policy.setter
+ def dns_policy(self, dns_policy):
+ """Sets the dns_policy of this V1PodSpec.
+
+ Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501
+
+ :param dns_policy: The dns_policy of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._dns_policy = dns_policy
+
+ @property
+ def enable_service_links(self):
+ """Gets the enable_service_links of this V1PodSpec. # noqa: E501
+
+ EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501
+
+ :return: The enable_service_links of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._enable_service_links
+
+ @enable_service_links.setter
+ def enable_service_links(self, enable_service_links):
+ """Sets the enable_service_links of this V1PodSpec.
+
+ EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501
+
+ :param enable_service_links: The enable_service_links of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._enable_service_links = enable_service_links
+
+ @property
+ def ephemeral_containers(self):
+ """Gets the ephemeral_containers of this V1PodSpec. # noqa: E501
+
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. # noqa: E501
+
+ :return: The ephemeral_containers of this V1PodSpec. # noqa: E501
+ :rtype: list[V1EphemeralContainer]
+ """
+ return self._ephemeral_containers
+
+ @ephemeral_containers.setter
+ def ephemeral_containers(self, ephemeral_containers):
+ """Sets the ephemeral_containers of this V1PodSpec.
+
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. # noqa: E501
+
+ :param ephemeral_containers: The ephemeral_containers of this V1PodSpec. # noqa: E501
+ :type: list[V1EphemeralContainer]
+ """
+
+ self._ephemeral_containers = ephemeral_containers
+
+ @property
+ def host_aliases(self):
+ """Gets the host_aliases of this V1PodSpec. # noqa: E501
+
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. # noqa: E501
+
+ :return: The host_aliases of this V1PodSpec. # noqa: E501
+ :rtype: list[V1HostAlias]
+ """
+ return self._host_aliases
+
+ @host_aliases.setter
+ def host_aliases(self, host_aliases):
+ """Sets the host_aliases of this V1PodSpec.
+
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. # noqa: E501
+
+ :param host_aliases: The host_aliases of this V1PodSpec. # noqa: E501
+ :type: list[V1HostAlias]
+ """
+
+ self._host_aliases = host_aliases
+
+ @property
+ def host_ipc(self):
+ """Gets the host_ipc of this V1PodSpec. # noqa: E501
+
+ Use the host's ipc namespace. Optional: Default to false. # noqa: E501
+
+ :return: The host_ipc of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._host_ipc
+
+ @host_ipc.setter
+ def host_ipc(self, host_ipc):
+ """Sets the host_ipc of this V1PodSpec.
+
+ Use the host's ipc namespace. Optional: Default to false. # noqa: E501
+
+ :param host_ipc: The host_ipc of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._host_ipc = host_ipc
+
+ @property
+ def host_network(self):
+ """Gets the host_network of this V1PodSpec. # noqa: E501
+
+ Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. # noqa: E501
+
+ :return: The host_network of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._host_network
+
+ @host_network.setter
+ def host_network(self, host_network):
+ """Sets the host_network of this V1PodSpec.
+
+ Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. # noqa: E501
+
+ :param host_network: The host_network of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._host_network = host_network
+
+ @property
+ def host_pid(self):
+ """Gets the host_pid of this V1PodSpec. # noqa: E501
+
+ Use the host's pid namespace. Optional: Default to false. # noqa: E501
+
+ :return: The host_pid of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._host_pid
+
+ @host_pid.setter
+ def host_pid(self, host_pid):
+ """Sets the host_pid of this V1PodSpec.
+
+ Use the host's pid namespace. Optional: Default to false. # noqa: E501
+
+ :param host_pid: The host_pid of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._host_pid = host_pid
+
+ @property
+ def host_users(self):
+ """Gets the host_users of this V1PodSpec. # noqa: E501
+
+ Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. # noqa: E501
+
+ :return: The host_users of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._host_users
+
+ @host_users.setter
+ def host_users(self, host_users):
+ """Sets the host_users of this V1PodSpec.
+
+ Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. # noqa: E501
+
+ :param host_users: The host_users of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._host_users = host_users
+
+ @property
+ def hostname(self):
+ """Gets the hostname of this V1PodSpec. # noqa: E501
+
+ Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. # noqa: E501
+
+ :return: The hostname of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._hostname
+
+ @hostname.setter
+ def hostname(self, hostname):
+ """Sets the hostname of this V1PodSpec.
+
+ Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. # noqa: E501
+
+ :param hostname: The hostname of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._hostname = hostname
+
+ @property
+ def image_pull_secrets(self):
+ """Gets the image_pull_secrets of this V1PodSpec. # noqa: E501
+
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod # noqa: E501
+
+ :return: The image_pull_secrets of this V1PodSpec. # noqa: E501
+ :rtype: list[V1LocalObjectReference]
+ """
+ return self._image_pull_secrets
+
+ @image_pull_secrets.setter
+ def image_pull_secrets(self, image_pull_secrets):
+ """Sets the image_pull_secrets of this V1PodSpec.
+
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod # noqa: E501
+
+ :param image_pull_secrets: The image_pull_secrets of this V1PodSpec. # noqa: E501
+ :type: list[V1LocalObjectReference]
+ """
+
+ self._image_pull_secrets = image_pull_secrets
+
+ @property
+ def init_containers(self):
+ """Gets the init_containers of this V1PodSpec. # noqa: E501
+
+ List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ # noqa: E501
+
+ :return: The init_containers of this V1PodSpec. # noqa: E501
+ :rtype: list[V1Container]
+ """
+ return self._init_containers
+
+ @init_containers.setter
+ def init_containers(self, init_containers):
+ """Sets the init_containers of this V1PodSpec.
+
+ List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ # noqa: E501
+
+ :param init_containers: The init_containers of this V1PodSpec. # noqa: E501
+ :type: list[V1Container]
+ """
+
+ self._init_containers = init_containers
+
+ @property
+ def node_name(self):
+ """Gets the node_name of this V1PodSpec. # noqa: E501
+
+ NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. # noqa: E501
+
+ :return: The node_name of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._node_name
+
+ @node_name.setter
+ def node_name(self, node_name):
+ """Sets the node_name of this V1PodSpec.
+
+ NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. # noqa: E501
+
+ :param node_name: The node_name of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._node_name = node_name
+
+ @property
+ def node_selector(self):
+ """Gets the node_selector of this V1PodSpec. # noqa: E501
+
+ NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
+
+ :return: The node_selector of this V1PodSpec. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._node_selector
+
+ @node_selector.setter
+ def node_selector(self, node_selector):
+ """Sets the node_selector of this V1PodSpec.
+
+ NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
+
+ :param node_selector: The node_selector of this V1PodSpec. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._node_selector = node_selector
+
+ @property
+ def os(self):
+ """Gets the os of this V1PodSpec. # noqa: E501
+
+
+ :return: The os of this V1PodSpec. # noqa: E501
+ :rtype: V1PodOS
+ """
+ return self._os
+
+ @os.setter
+ def os(self, os):
+ """Sets the os of this V1PodSpec.
+
+
+ :param os: The os of this V1PodSpec. # noqa: E501
+ :type: V1PodOS
+ """
+
+ self._os = os
+
+ @property
+ def overhead(self):
+ """Gets the overhead of this V1PodSpec. # noqa: E501
+
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md # noqa: E501
+
+ :return: The overhead of this V1PodSpec. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._overhead
+
+ @overhead.setter
+ def overhead(self, overhead):
+ """Sets the overhead of this V1PodSpec.
+
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md # noqa: E501
+
+ :param overhead: The overhead of this V1PodSpec. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._overhead = overhead
+
+ @property
+ def preemption_policy(self):
+ """Gets the preemption_policy of this V1PodSpec. # noqa: E501
+
+ PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
+
+ :return: The preemption_policy of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._preemption_policy
+
+ @preemption_policy.setter
+ def preemption_policy(self, preemption_policy):
+ """Sets the preemption_policy of this V1PodSpec.
+
+ PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
+
+ :param preemption_policy: The preemption_policy of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._preemption_policy = preemption_policy
+
+ @property
+ def priority(self):
+ """Gets the priority of this V1PodSpec. # noqa: E501
+
+ The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501
+
+ :return: The priority of this V1PodSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._priority
+
+ @priority.setter
+ def priority(self, priority):
+ """Sets the priority of this V1PodSpec.
+
+ The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501
+
+ :param priority: The priority of this V1PodSpec. # noqa: E501
+ :type: int
+ """
+
+ self._priority = priority
+
+ @property
+ def priority_class_name(self):
+ """Gets the priority_class_name of this V1PodSpec. # noqa: E501
+
+ If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
+
+ :return: The priority_class_name of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._priority_class_name
+
+ @priority_class_name.setter
+ def priority_class_name(self, priority_class_name):
+ """Sets the priority_class_name of this V1PodSpec.
+
+ If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
+
+ :param priority_class_name: The priority_class_name of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._priority_class_name = priority_class_name
+
+ @property
+ def readiness_gates(self):
+ """Gets the readiness_gates of this V1PodSpec. # noqa: E501
+
+ If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates # noqa: E501
+
+ :return: The readiness_gates of this V1PodSpec. # noqa: E501
+ :rtype: list[V1PodReadinessGate]
+ """
+ return self._readiness_gates
+
+ @readiness_gates.setter
+ def readiness_gates(self, readiness_gates):
+ """Sets the readiness_gates of this V1PodSpec.
+
+ If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates # noqa: E501
+
+ :param readiness_gates: The readiness_gates of this V1PodSpec. # noqa: E501
+ :type: list[V1PodReadinessGate]
+ """
+
+ self._readiness_gates = readiness_gates
+
+ @property
+ def resource_claims(self):
+ """Gets the resource_claims of this V1PodSpec. # noqa: E501
+
+ ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. # noqa: E501
+
+ :return: The resource_claims of this V1PodSpec. # noqa: E501
+ :rtype: list[V1PodResourceClaim]
+ """
+ return self._resource_claims
+
+ @resource_claims.setter
+ def resource_claims(self, resource_claims):
+ """Sets the resource_claims of this V1PodSpec.
+
+ ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. # noqa: E501
+
+ :param resource_claims: The resource_claims of this V1PodSpec. # noqa: E501
+ :type: list[V1PodResourceClaim]
+ """
+
+ self._resource_claims = resource_claims
+
+ @property
+ def restart_policy(self):
+ """Gets the restart_policy of this V1PodSpec. # noqa: E501
+
+ Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy # noqa: E501
+
+ :return: The restart_policy of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._restart_policy
+
+ @restart_policy.setter
+ def restart_policy(self, restart_policy):
+ """Sets the restart_policy of this V1PodSpec.
+
+ Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy # noqa: E501
+
+ :param restart_policy: The restart_policy of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._restart_policy = restart_policy
+
+ @property
+ def runtime_class_name(self):
+ """Gets the runtime_class_name of this V1PodSpec. # noqa: E501
+
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class # noqa: E501
+
+ :return: The runtime_class_name of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._runtime_class_name
+
+ @runtime_class_name.setter
+ def runtime_class_name(self, runtime_class_name):
+ """Sets the runtime_class_name of this V1PodSpec.
+
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class # noqa: E501
+
+ :param runtime_class_name: The runtime_class_name of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._runtime_class_name = runtime_class_name
+
+ @property
+ def scheduler_name(self):
+ """Gets the scheduler_name of this V1PodSpec. # noqa: E501
+
+ If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. # noqa: E501
+
+ :return: The scheduler_name of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._scheduler_name
+
+ @scheduler_name.setter
+ def scheduler_name(self, scheduler_name):
+ """Sets the scheduler_name of this V1PodSpec.
+
+ If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. # noqa: E501
+
+ :param scheduler_name: The scheduler_name of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._scheduler_name = scheduler_name
+
+ @property
+ def scheduling_gates(self):
+ """Gets the scheduling_gates of this V1PodSpec. # noqa: E501
+
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. SchedulingGates can only be set at pod creation time, and be removed only afterwards. This is a beta feature enabled by the PodSchedulingReadiness feature gate. # noqa: E501
+
+ :return: The scheduling_gates of this V1PodSpec. # noqa: E501
+ :rtype: list[V1PodSchedulingGate]
+ """
+ return self._scheduling_gates
+
+ @scheduling_gates.setter
+ def scheduling_gates(self, scheduling_gates):
+ """Sets the scheduling_gates of this V1PodSpec.
+
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. SchedulingGates can only be set at pod creation time, and be removed only afterwards. This is a beta feature enabled by the PodSchedulingReadiness feature gate. # noqa: E501
+
+ :param scheduling_gates: The scheduling_gates of this V1PodSpec. # noqa: E501
+ :type: list[V1PodSchedulingGate]
+ """
+
+ self._scheduling_gates = scheduling_gates
+
+ @property
+ def security_context(self):
+ """Gets the security_context of this V1PodSpec. # noqa: E501
+
+
+ :return: The security_context of this V1PodSpec. # noqa: E501
+ :rtype: V1PodSecurityContext
+ """
+ return self._security_context
+
+ @security_context.setter
+ def security_context(self, security_context):
+ """Sets the security_context of this V1PodSpec.
+
+
+ :param security_context: The security_context of this V1PodSpec. # noqa: E501
+ :type: V1PodSecurityContext
+ """
+
+ self._security_context = security_context
+
+ @property
+ def service_account(self):
+ """Gets the service_account of this V1PodSpec. # noqa: E501
+
+ DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. # noqa: E501
+
+ :return: The service_account of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._service_account
+
+ @service_account.setter
+ def service_account(self, service_account):
+ """Sets the service_account of this V1PodSpec.
+
+ DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. # noqa: E501
+
+ :param service_account: The service_account of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._service_account = service_account
+
+ @property
+ def service_account_name(self):
+ """Gets the service_account_name of this V1PodSpec. # noqa: E501
+
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
+
+ :return: The service_account_name of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._service_account_name
+
+ @service_account_name.setter
+ def service_account_name(self, service_account_name):
+ """Sets the service_account_name of this V1PodSpec.
+
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
+
+ :param service_account_name: The service_account_name of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._service_account_name = service_account_name
+
+ @property
+ def set_hostname_as_fqdn(self):
+ """Gets the set_hostname_as_fqdn of this V1PodSpec. # noqa: E501
+
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. # noqa: E501
+
+ :return: The set_hostname_as_fqdn of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._set_hostname_as_fqdn
+
+ @set_hostname_as_fqdn.setter
+ def set_hostname_as_fqdn(self, set_hostname_as_fqdn):
+ """Sets the set_hostname_as_fqdn of this V1PodSpec.
+
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. # noqa: E501
+
+ :param set_hostname_as_fqdn: The set_hostname_as_fqdn of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._set_hostname_as_fqdn = set_hostname_as_fqdn
+
+ @property
+ def share_process_namespace(self):
+ """Gets the share_process_namespace of this V1PodSpec. # noqa: E501
+
+ Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. # noqa: E501
+
+ :return: The share_process_namespace of this V1PodSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._share_process_namespace
+
+ @share_process_namespace.setter
+ def share_process_namespace(self, share_process_namespace):
+ """Sets the share_process_namespace of this V1PodSpec.
+
+ Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. # noqa: E501
+
+ :param share_process_namespace: The share_process_namespace of this V1PodSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._share_process_namespace = share_process_namespace
+
+ @property
+ def subdomain(self):
+ """Gets the subdomain of this V1PodSpec. # noqa: E501
+
+ If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all. # noqa: E501
+
+ :return: The subdomain of this V1PodSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._subdomain
+
+ @subdomain.setter
+ def subdomain(self, subdomain):
+ """Sets the subdomain of this V1PodSpec.
+
+ If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all. # noqa: E501
+
+ :param subdomain: The subdomain of this V1PodSpec. # noqa: E501
+ :type: str
+ """
+
+ self._subdomain = subdomain
+
+ @property
+ def termination_grace_period_seconds(self):
+ """Gets the termination_grace_period_seconds of this V1PodSpec. # noqa: E501
+
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. # noqa: E501
+
+ :return: The termination_grace_period_seconds of this V1PodSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._termination_grace_period_seconds
+
+ @termination_grace_period_seconds.setter
+ def termination_grace_period_seconds(self, termination_grace_period_seconds):
+ """Sets the termination_grace_period_seconds of this V1PodSpec.
+
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. # noqa: E501
+
+ :param termination_grace_period_seconds: The termination_grace_period_seconds of this V1PodSpec. # noqa: E501
+ :type: int
+ """
+
+ self._termination_grace_period_seconds = termination_grace_period_seconds
+
+ @property
+ def tolerations(self):
+ """Gets the tolerations of this V1PodSpec. # noqa: E501
+
+ If specified, the pod's tolerations. # noqa: E501
+
+ :return: The tolerations of this V1PodSpec. # noqa: E501
+ :rtype: list[V1Toleration]
+ """
+ return self._tolerations
+
+ @tolerations.setter
+ def tolerations(self, tolerations):
+ """Sets the tolerations of this V1PodSpec.
+
+ If specified, the pod's tolerations. # noqa: E501
+
+ :param tolerations: The tolerations of this V1PodSpec. # noqa: E501
+ :type: list[V1Toleration]
+ """
+
+ self._tolerations = tolerations
+
+ @property
+ def topology_spread_constraints(self):
+ """Gets the topology_spread_constraints of this V1PodSpec. # noqa: E501
+
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. # noqa: E501
+
+ :return: The topology_spread_constraints of this V1PodSpec. # noqa: E501
+ :rtype: list[V1TopologySpreadConstraint]
+ """
+ return self._topology_spread_constraints
+
+ @topology_spread_constraints.setter
+ def topology_spread_constraints(self, topology_spread_constraints):
+ """Sets the topology_spread_constraints of this V1PodSpec.
+
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. # noqa: E501
+
+ :param topology_spread_constraints: The topology_spread_constraints of this V1PodSpec. # noqa: E501
+ :type: list[V1TopologySpreadConstraint]
+ """
+
+ self._topology_spread_constraints = topology_spread_constraints
+
+ @property
+ def volumes(self):
+ """Gets the volumes of this V1PodSpec. # noqa: E501
+
+ List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501
+
+ :return: The volumes of this V1PodSpec. # noqa: E501
+ :rtype: list[V1Volume]
+ """
+ return self._volumes
+
+ @volumes.setter
+ def volumes(self, volumes):
+ """Sets the volumes of this V1PodSpec.
+
+ List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501
+
+ :param volumes: The volumes of this V1PodSpec. # noqa: E501
+ :type: list[V1Volume]
+ """
+
+ self._volumes = volumes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_status.py
new file mode 100644
index 0000000000..0a77f639e8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_status.py
@@ -0,0 +1,542 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1PodCondition]',
+ 'container_statuses': 'list[V1ContainerStatus]',
+ 'ephemeral_container_statuses': 'list[V1ContainerStatus]',
+ 'host_ip': 'str',
+ 'host_i_ps': 'list[V1HostIP]',
+ 'init_container_statuses': 'list[V1ContainerStatus]',
+ 'message': 'str',
+ 'nominated_node_name': 'str',
+ 'phase': 'str',
+ 'pod_ip': 'str',
+ 'pod_i_ps': 'list[V1PodIP]',
+ 'qos_class': 'str',
+ 'reason': 'str',
+ 'resize': 'str',
+ 'resource_claim_statuses': 'list[V1PodResourceClaimStatus]',
+ 'start_time': 'datetime'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions',
+ 'container_statuses': 'containerStatuses',
+ 'ephemeral_container_statuses': 'ephemeralContainerStatuses',
+ 'host_ip': 'hostIP',
+ 'host_i_ps': 'hostIPs',
+ 'init_container_statuses': 'initContainerStatuses',
+ 'message': 'message',
+ 'nominated_node_name': 'nominatedNodeName',
+ 'phase': 'phase',
+ 'pod_ip': 'podIP',
+ 'pod_i_ps': 'podIPs',
+ 'qos_class': 'qosClass',
+ 'reason': 'reason',
+ 'resize': 'resize',
+ 'resource_claim_statuses': 'resourceClaimStatuses',
+ 'start_time': 'startTime'
+ }
+
+ def __init__(self, conditions=None, container_statuses=None, ephemeral_container_statuses=None, host_ip=None, host_i_ps=None, init_container_statuses=None, message=None, nominated_node_name=None, phase=None, pod_ip=None, pod_i_ps=None, qos_class=None, reason=None, resize=None, resource_claim_statuses=None, start_time=None, local_vars_configuration=None): # noqa: E501
+ """V1PodStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self._container_statuses = None
+ self._ephemeral_container_statuses = None
+ self._host_ip = None
+ self._host_i_ps = None
+ self._init_container_statuses = None
+ self._message = None
+ self._nominated_node_name = None
+ self._phase = None
+ self._pod_ip = None
+ self._pod_i_ps = None
+ self._qos_class = None
+ self._reason = None
+ self._resize = None
+ self._resource_claim_statuses = None
+ self._start_time = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+ if container_statuses is not None:
+ self.container_statuses = container_statuses
+ if ephemeral_container_statuses is not None:
+ self.ephemeral_container_statuses = ephemeral_container_statuses
+ if host_ip is not None:
+ self.host_ip = host_ip
+ if host_i_ps is not None:
+ self.host_i_ps = host_i_ps
+ if init_container_statuses is not None:
+ self.init_container_statuses = init_container_statuses
+ if message is not None:
+ self.message = message
+ if nominated_node_name is not None:
+ self.nominated_node_name = nominated_node_name
+ if phase is not None:
+ self.phase = phase
+ if pod_ip is not None:
+ self.pod_ip = pod_ip
+ if pod_i_ps is not None:
+ self.pod_i_ps = pod_i_ps
+ if qos_class is not None:
+ self.qos_class = qos_class
+ if reason is not None:
+ self.reason = reason
+ if resize is not None:
+ self.resize = resize
+ if resource_claim_statuses is not None:
+ self.resource_claim_statuses = resource_claim_statuses
+ if start_time is not None:
+ self.start_time = start_time
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1PodStatus. # noqa: E501
+
+ Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
+
+ :return: The conditions of this V1PodStatus. # noqa: E501
+ :rtype: list[V1PodCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1PodStatus.
+
+ Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions # noqa: E501
+
+ :param conditions: The conditions of this V1PodStatus. # noqa: E501
+ :type: list[V1PodCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def container_statuses(self):
+ """Gets the container_statuses of this V1PodStatus. # noqa: E501
+
+ The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
+
+ :return: The container_statuses of this V1PodStatus. # noqa: E501
+ :rtype: list[V1ContainerStatus]
+ """
+ return self._container_statuses
+
+ @container_statuses.setter
+ def container_statuses(self, container_statuses):
+ """Sets the container_statuses of this V1PodStatus.
+
+ The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
+
+ :param container_statuses: The container_statuses of this V1PodStatus. # noqa: E501
+ :type: list[V1ContainerStatus]
+ """
+
+ self._container_statuses = container_statuses
+
+ @property
+ def ephemeral_container_statuses(self):
+ """Gets the ephemeral_container_statuses of this V1PodStatus. # noqa: E501
+
+ Status for any ephemeral containers that have run in this pod. # noqa: E501
+
+ :return: The ephemeral_container_statuses of this V1PodStatus. # noqa: E501
+ :rtype: list[V1ContainerStatus]
+ """
+ return self._ephemeral_container_statuses
+
+ @ephemeral_container_statuses.setter
+ def ephemeral_container_statuses(self, ephemeral_container_statuses):
+ """Sets the ephemeral_container_statuses of this V1PodStatus.
+
+ Status for any ephemeral containers that have run in this pod. # noqa: E501
+
+ :param ephemeral_container_statuses: The ephemeral_container_statuses of this V1PodStatus. # noqa: E501
+ :type: list[V1ContainerStatus]
+ """
+
+ self._ephemeral_container_statuses = ephemeral_container_statuses
+
+ @property
+ def host_ip(self):
+ """Gets the host_ip of this V1PodStatus. # noqa: E501
+
+ hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod # noqa: E501
+
+ :return: The host_ip of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._host_ip
+
+ @host_ip.setter
+ def host_ip(self, host_ip):
+ """Sets the host_ip of this V1PodStatus.
+
+ hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod # noqa: E501
+
+ :param host_ip: The host_ip of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._host_ip = host_ip
+
+ @property
+ def host_i_ps(self):
+ """Gets the host_i_ps of this V1PodStatus. # noqa: E501
+
+ hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod. # noqa: E501
+
+ :return: The host_i_ps of this V1PodStatus. # noqa: E501
+ :rtype: list[V1HostIP]
+ """
+ return self._host_i_ps
+
+ @host_i_ps.setter
+ def host_i_ps(self, host_i_ps):
+ """Sets the host_i_ps of this V1PodStatus.
+
+ hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod. # noqa: E501
+
+ :param host_i_ps: The host_i_ps of this V1PodStatus. # noqa: E501
+ :type: list[V1HostIP]
+ """
+
+ self._host_i_ps = host_i_ps
+
+ @property
+ def init_container_statuses(self):
+ """Gets the init_container_statuses of this V1PodStatus. # noqa: E501
+
+ The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
+
+ :return: The init_container_statuses of this V1PodStatus. # noqa: E501
+ :rtype: list[V1ContainerStatus]
+ """
+ return self._init_container_statuses
+
+ @init_container_statuses.setter
+ def init_container_statuses(self, init_container_statuses):
+ """Sets the init_container_statuses of this V1PodStatus.
+
+ The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status # noqa: E501
+
+ :param init_container_statuses: The init_container_statuses of this V1PodStatus. # noqa: E501
+ :type: list[V1ContainerStatus]
+ """
+
+ self._init_container_statuses = init_container_statuses
+
+ @property
+ def message(self):
+ """Gets the message of this V1PodStatus. # noqa: E501
+
+ A human readable message indicating details about why the pod is in this condition. # noqa: E501
+
+ :return: The message of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1PodStatus.
+
+ A human readable message indicating details about why the pod is in this condition. # noqa: E501
+
+ :param message: The message of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def nominated_node_name(self):
+ """Gets the nominated_node_name of this V1PodStatus. # noqa: E501
+
+ nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled. # noqa: E501
+
+ :return: The nominated_node_name of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._nominated_node_name
+
+ @nominated_node_name.setter
+ def nominated_node_name(self, nominated_node_name):
+ """Sets the nominated_node_name of this V1PodStatus.
+
+ nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled. # noqa: E501
+
+ :param nominated_node_name: The nominated_node_name of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._nominated_node_name = nominated_node_name
+
+ @property
+ def phase(self):
+ """Gets the phase of this V1PodStatus. # noqa: E501
+
+ The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values: Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase # noqa: E501
+
+ :return: The phase of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._phase
+
+ @phase.setter
+ def phase(self, phase):
+ """Sets the phase of this V1PodStatus.
+
+ The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values: Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase # noqa: E501
+
+ :param phase: The phase of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._phase = phase
+
+ @property
+ def pod_ip(self):
+ """Gets the pod_ip of this V1PodStatus. # noqa: E501
+
+ podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated. # noqa: E501
+
+ :return: The pod_ip of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._pod_ip
+
+ @pod_ip.setter
+ def pod_ip(self, pod_ip):
+ """Sets the pod_ip of this V1PodStatus.
+
+ podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated. # noqa: E501
+
+ :param pod_ip: The pod_ip of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._pod_ip = pod_ip
+
+ @property
+ def pod_i_ps(self):
+ """Gets the pod_i_ps of this V1PodStatus. # noqa: E501
+
+ podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet. # noqa: E501
+
+ :return: The pod_i_ps of this V1PodStatus. # noqa: E501
+ :rtype: list[V1PodIP]
+ """
+ return self._pod_i_ps
+
+ @pod_i_ps.setter
+ def pod_i_ps(self, pod_i_ps):
+ """Sets the pod_i_ps of this V1PodStatus.
+
+ podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet. # noqa: E501
+
+ :param pod_i_ps: The pod_i_ps of this V1PodStatus. # noqa: E501
+ :type: list[V1PodIP]
+ """
+
+ self._pod_i_ps = pod_i_ps
+
+ @property
+ def qos_class(self):
+ """Gets the qos_class of this V1PodStatus. # noqa: E501
+
+ The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes # noqa: E501
+
+ :return: The qos_class of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._qos_class
+
+ @qos_class.setter
+ def qos_class(self, qos_class):
+ """Sets the qos_class of this V1PodStatus.
+
+ The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes # noqa: E501
+
+ :param qos_class: The qos_class of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._qos_class = qos_class
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1PodStatus. # noqa: E501
+
+ A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' # noqa: E501
+
+ :return: The reason of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1PodStatus.
+
+ A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' # noqa: E501
+
+ :param reason: The reason of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def resize(self):
+ """Gets the resize of this V1PodStatus. # noqa: E501
+
+ Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" # noqa: E501
+
+ :return: The resize of this V1PodStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._resize
+
+ @resize.setter
+ def resize(self, resize):
+ """Sets the resize of this V1PodStatus.
+
+ Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" # noqa: E501
+
+ :param resize: The resize of this V1PodStatus. # noqa: E501
+ :type: str
+ """
+
+ self._resize = resize
+
+ @property
+ def resource_claim_statuses(self):
+ """Gets the resource_claim_statuses of this V1PodStatus. # noqa: E501
+
+ Status of resource claims. # noqa: E501
+
+ :return: The resource_claim_statuses of this V1PodStatus. # noqa: E501
+ :rtype: list[V1PodResourceClaimStatus]
+ """
+ return self._resource_claim_statuses
+
+ @resource_claim_statuses.setter
+ def resource_claim_statuses(self, resource_claim_statuses):
+ """Sets the resource_claim_statuses of this V1PodStatus.
+
+ Status of resource claims. # noqa: E501
+
+ :param resource_claim_statuses: The resource_claim_statuses of this V1PodStatus. # noqa: E501
+ :type: list[V1PodResourceClaimStatus]
+ """
+
+ self._resource_claim_statuses = resource_claim_statuses
+
+ @property
+ def start_time(self):
+ """Gets the start_time of this V1PodStatus. # noqa: E501
+
+ RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod. # noqa: E501
+
+ :return: The start_time of this V1PodStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._start_time
+
+ @start_time.setter
+ def start_time(self, start_time):
+ """Sets the start_time of this V1PodStatus.
+
+ RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod. # noqa: E501
+
+ :param start_time: The start_time of this V1PodStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._start_time = start_time
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template.py
new file mode 100644
index 0000000000..8edae92ac8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodTemplate(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'template': 'V1PodTemplateSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'template': 'template'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, template=None, local_vars_configuration=None): # noqa: E501
+ """V1PodTemplate - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._template = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if template is not None:
+ self.template = template
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PodTemplate. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PodTemplate. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PodTemplate.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PodTemplate. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PodTemplate. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PodTemplate. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PodTemplate.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PodTemplate. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PodTemplate. # noqa: E501
+
+
+ :return: The metadata of this V1PodTemplate. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PodTemplate.
+
+
+ :param metadata: The metadata of this V1PodTemplate. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def template(self):
+ """Gets the template of this V1PodTemplate. # noqa: E501
+
+
+ :return: The template of this V1PodTemplate. # noqa: E501
+ :rtype: V1PodTemplateSpec
+ """
+ return self._template
+
+ @template.setter
+ def template(self, template):
+ """Sets the template of this V1PodTemplate.
+
+
+ :param template: The template of this V1PodTemplate. # noqa: E501
+ :type: V1PodTemplateSpec
+ """
+
+ self._template = template
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodTemplate):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodTemplate):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_list.py
new file mode 100644
index 0000000000..d2d56367fa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodTemplateList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1PodTemplate]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1PodTemplateList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PodTemplateList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PodTemplateList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PodTemplateList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PodTemplateList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1PodTemplateList. # noqa: E501
+
+ List of pod templates # noqa: E501
+
+ :return: The items of this V1PodTemplateList. # noqa: E501
+ :rtype: list[V1PodTemplate]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1PodTemplateList.
+
+ List of pod templates # noqa: E501
+
+ :param items: The items of this V1PodTemplateList. # noqa: E501
+ :type: list[V1PodTemplate]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PodTemplateList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PodTemplateList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PodTemplateList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PodTemplateList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PodTemplateList. # noqa: E501
+
+
+ :return: The metadata of this V1PodTemplateList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PodTemplateList.
+
+
+ :param metadata: The metadata of this V1PodTemplateList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodTemplateList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodTemplateList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_spec.py
new file mode 100644
index 0000000000..0c8b9a7aba
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_pod_template_spec.py
@@ -0,0 +1,146 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PodTemplateSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1PodSpec'
+ }
+
+ attribute_map = {
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1PodTemplateSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PodTemplateSpec. # noqa: E501
+
+
+ :return: The metadata of this V1PodTemplateSpec. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PodTemplateSpec.
+
+
+ :param metadata: The metadata of this V1PodTemplateSpec. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1PodTemplateSpec. # noqa: E501
+
+
+ :return: The spec of this V1PodTemplateSpec. # noqa: E501
+ :rtype: V1PodSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1PodTemplateSpec.
+
+
+ :param spec: The spec of this V1PodTemplateSpec. # noqa: E501
+ :type: V1PodSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PodTemplateSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PodTemplateSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_policy_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_policy_rule.py
new file mode 100644
index 0000000000..a1616ba37c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_policy_rule.py
@@ -0,0 +1,235 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PolicyRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_groups': 'list[str]',
+ 'non_resource_ur_ls': 'list[str]',
+ 'resource_names': 'list[str]',
+ 'resources': 'list[str]',
+ 'verbs': 'list[str]'
+ }
+
+ attribute_map = {
+ 'api_groups': 'apiGroups',
+ 'non_resource_ur_ls': 'nonResourceURLs',
+ 'resource_names': 'resourceNames',
+ 'resources': 'resources',
+ 'verbs': 'verbs'
+ }
+
+ def __init__(self, api_groups=None, non_resource_ur_ls=None, resource_names=None, resources=None, verbs=None, local_vars_configuration=None): # noqa: E501
+ """V1PolicyRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_groups = None
+ self._non_resource_ur_ls = None
+ self._resource_names = None
+ self._resources = None
+ self._verbs = None
+ self.discriminator = None
+
+ if api_groups is not None:
+ self.api_groups = api_groups
+ if non_resource_ur_ls is not None:
+ self.non_resource_ur_ls = non_resource_ur_ls
+ if resource_names is not None:
+ self.resource_names = resource_names
+ if resources is not None:
+ self.resources = resources
+ self.verbs = verbs
+
+ @property
+ def api_groups(self):
+ """Gets the api_groups of this V1PolicyRule. # noqa: E501
+
+ APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups. # noqa: E501
+
+ :return: The api_groups of this V1PolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_groups
+
+ @api_groups.setter
+ def api_groups(self, api_groups):
+ """Sets the api_groups of this V1PolicyRule.
+
+ APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups. # noqa: E501
+
+ :param api_groups: The api_groups of this V1PolicyRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_groups = api_groups
+
+ @property
+ def non_resource_ur_ls(self):
+ """Gets the non_resource_ur_ls of this V1PolicyRule. # noqa: E501
+
+ NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both. # noqa: E501
+
+ :return: The non_resource_ur_ls of this V1PolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._non_resource_ur_ls
+
+ @non_resource_ur_ls.setter
+ def non_resource_ur_ls(self, non_resource_ur_ls):
+ """Sets the non_resource_ur_ls of this V1PolicyRule.
+
+ NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both. # noqa: E501
+
+ :param non_resource_ur_ls: The non_resource_ur_ls of this V1PolicyRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._non_resource_ur_ls = non_resource_ur_ls
+
+ @property
+ def resource_names(self):
+ """Gets the resource_names of this V1PolicyRule. # noqa: E501
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
+
+ :return: The resource_names of this V1PolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resource_names
+
+ @resource_names.setter
+ def resource_names(self, resource_names):
+ """Sets the resource_names of this V1PolicyRule.
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
+
+ :param resource_names: The resource_names of this V1PolicyRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resource_names = resource_names
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1PolicyRule. # noqa: E501
+
+ Resources is a list of resources this rule applies to. '*' represents all resources. # noqa: E501
+
+ :return: The resources of this V1PolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1PolicyRule.
+
+ Resources is a list of resources this rule applies to. '*' represents all resources. # noqa: E501
+
+ :param resources: The resources of this V1PolicyRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resources = resources
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1PolicyRule. # noqa: E501
+
+ Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. # noqa: E501
+
+ :return: The verbs of this V1PolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1PolicyRule.
+
+ Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. # noqa: E501
+
+ :param verbs: The verbs of this V1PolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PolicyRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PolicyRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_port_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_port_status.py
new file mode 100644
index 0000000000..620b4f2aae
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_port_status.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PortStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'error': 'str',
+ 'port': 'int',
+ 'protocol': 'str'
+ }
+
+ attribute_map = {
+ 'error': 'error',
+ 'port': 'port',
+ 'protocol': 'protocol'
+ }
+
+ def __init__(self, error=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
+ """V1PortStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._error = None
+ self._port = None
+ self._protocol = None
+ self.discriminator = None
+
+ if error is not None:
+ self.error = error
+ self.port = port
+ self.protocol = protocol
+
+ @property
+ def error(self):
+ """Gets the error of this V1PortStatus. # noqa: E501
+
+ Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. # noqa: E501
+
+ :return: The error of this V1PortStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._error
+
+ @error.setter
+ def error(self, error):
+ """Sets the error of this V1PortStatus.
+
+ Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. # noqa: E501
+
+ :param error: The error of this V1PortStatus. # noqa: E501
+ :type: str
+ """
+
+ self._error = error
+
+ @property
+ def port(self):
+ """Gets the port of this V1PortStatus. # noqa: E501
+
+ Port is the port number of the service port of which status is recorded here # noqa: E501
+
+ :return: The port of this V1PortStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1PortStatus.
+
+ Port is the port number of the service port of which status is recorded here # noqa: E501
+
+ :param port: The port of this V1PortStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ @property
+ def protocol(self):
+ """Gets the protocol of this V1PortStatus. # noqa: E501
+
+ Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\" # noqa: E501
+
+ :return: The protocol of this V1PortStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, protocol):
+ """Sets the protocol of this V1PortStatus.
+
+ Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\" # noqa: E501
+
+ :param protocol: The protocol of this V1PortStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and protocol is None: # noqa: E501
+ raise ValueError("Invalid value for `protocol`, must not be `None`") # noqa: E501
+
+ self._protocol = protocol
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PortStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PortStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_portworx_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_portworx_volume_source.py
new file mode 100644
index 0000000000..9528f59033
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_portworx_volume_source.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PortworxVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'read_only': 'bool',
+ 'volume_id': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'read_only': 'readOnly',
+ 'volume_id': 'volumeID'
+ }
+
+ def __init__(self, fs_type=None, read_only=None, volume_id=None, local_vars_configuration=None): # noqa: E501
+ """V1PortworxVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._read_only = None
+ self._volume_id = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if read_only is not None:
+ self.read_only = read_only
+ self.volume_id = volume_id
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1PortworxVolumeSource. # noqa: E501
+
+ fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1PortworxVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1PortworxVolumeSource.
+
+ fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1PortworxVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1PortworxVolumeSource. # noqa: E501
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1PortworxVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1PortworxVolumeSource.
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1PortworxVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def volume_id(self):
+ """Gets the volume_id of this V1PortworxVolumeSource. # noqa: E501
+
+ volumeID uniquely identifies a Portworx volume # noqa: E501
+
+ :return: The volume_id of this V1PortworxVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_id
+
+ @volume_id.setter
+ def volume_id(self, volume_id):
+ """Sets the volume_id of this V1PortworxVolumeSource.
+
+ volumeID uniquely identifies a Portworx volume # noqa: E501
+
+ :param volume_id: The volume_id of this V1PortworxVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and volume_id is None: # noqa: E501
+ raise ValueError("Invalid value for `volume_id`, must not be `None`") # noqa: E501
+
+ self._volume_id = volume_id
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PortworxVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PortworxVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_preconditions.py b/contrib/python/kubernetes/kubernetes/client/models/v1_preconditions.py
new file mode 100644
index 0000000000..6e8fdf4a99
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_preconditions.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Preconditions(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'resource_version': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'resource_version': 'resourceVersion',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, resource_version=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1Preconditions - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._resource_version = None
+ self._uid = None
+ self.discriminator = None
+
+ if resource_version is not None:
+ self.resource_version = resource_version
+ if uid is not None:
+ self.uid = uid
+
+ @property
+ def resource_version(self):
+ """Gets the resource_version of this V1Preconditions. # noqa: E501
+
+ Specifies the target ResourceVersion # noqa: E501
+
+ :return: The resource_version of this V1Preconditions. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_version
+
+ @resource_version.setter
+ def resource_version(self, resource_version):
+ """Sets the resource_version of this V1Preconditions.
+
+ Specifies the target ResourceVersion # noqa: E501
+
+ :param resource_version: The resource_version of this V1Preconditions. # noqa: E501
+ :type: str
+ """
+
+ self._resource_version = resource_version
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1Preconditions. # noqa: E501
+
+ Specifies the target UID. # noqa: E501
+
+ :return: The uid of this V1Preconditions. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1Preconditions.
+
+ Specifies the target UID. # noqa: E501
+
+ :param uid: The uid of this V1Preconditions. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Preconditions):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Preconditions):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_preferred_scheduling_term.py b/contrib/python/kubernetes/kubernetes/client/models/v1_preferred_scheduling_term.py
new file mode 100644
index 0000000000..2537f3845a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_preferred_scheduling_term.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PreferredSchedulingTerm(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'preference': 'V1NodeSelectorTerm',
+ 'weight': 'int'
+ }
+
+ attribute_map = {
+ 'preference': 'preference',
+ 'weight': 'weight'
+ }
+
+ def __init__(self, preference=None, weight=None, local_vars_configuration=None): # noqa: E501
+ """V1PreferredSchedulingTerm - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._preference = None
+ self._weight = None
+ self.discriminator = None
+
+ self.preference = preference
+ self.weight = weight
+
+ @property
+ def preference(self):
+ """Gets the preference of this V1PreferredSchedulingTerm. # noqa: E501
+
+
+ :return: The preference of this V1PreferredSchedulingTerm. # noqa: E501
+ :rtype: V1NodeSelectorTerm
+ """
+ return self._preference
+
+ @preference.setter
+ def preference(self, preference):
+ """Sets the preference of this V1PreferredSchedulingTerm.
+
+
+ :param preference: The preference of this V1PreferredSchedulingTerm. # noqa: E501
+ :type: V1NodeSelectorTerm
+ """
+ if self.local_vars_configuration.client_side_validation and preference is None: # noqa: E501
+ raise ValueError("Invalid value for `preference`, must not be `None`") # noqa: E501
+
+ self._preference = preference
+
+ @property
+ def weight(self):
+ """Gets the weight of this V1PreferredSchedulingTerm. # noqa: E501
+
+ Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. # noqa: E501
+
+ :return: The weight of this V1PreferredSchedulingTerm. # noqa: E501
+ :rtype: int
+ """
+ return self._weight
+
+ @weight.setter
+ def weight(self, weight):
+ """Sets the weight of this V1PreferredSchedulingTerm.
+
+ Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. # noqa: E501
+
+ :param weight: The weight of this V1PreferredSchedulingTerm. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and weight is None: # noqa: E501
+ raise ValueError("Invalid value for `weight`, must not be `None`") # noqa: E501
+
+ self._weight = weight
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PreferredSchedulingTerm):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PreferredSchedulingTerm):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_priority_class.py b/contrib/python/kubernetes/kubernetes/client/models/v1_priority_class.py
new file mode 100644
index 0000000000..81e6fc0e58
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_priority_class.py
@@ -0,0 +1,289 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PriorityClass(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'description': 'str',
+ 'global_default': 'bool',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'preemption_policy': 'str',
+ 'value': 'int'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'description': 'description',
+ 'global_default': 'globalDefault',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'preemption_policy': 'preemptionPolicy',
+ 'value': 'value'
+ }
+
+ def __init__(self, api_version=None, description=None, global_default=None, kind=None, metadata=None, preemption_policy=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V1PriorityClass - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._description = None
+ self._global_default = None
+ self._kind = None
+ self._metadata = None
+ self._preemption_policy = None
+ self._value = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if description is not None:
+ self.description = description
+ if global_default is not None:
+ self.global_default = global_default
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if preemption_policy is not None:
+ self.preemption_policy = preemption_policy
+ self.value = value
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PriorityClass. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PriorityClass. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PriorityClass.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PriorityClass. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def description(self):
+ """Gets the description of this V1PriorityClass. # noqa: E501
+
+ description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
+
+ :return: The description of this V1PriorityClass. # noqa: E501
+ :rtype: str
+ """
+ return self._description
+
+ @description.setter
+ def description(self, description):
+ """Sets the description of this V1PriorityClass.
+
+ description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
+
+ :param description: The description of this V1PriorityClass. # noqa: E501
+ :type: str
+ """
+
+ self._description = description
+
+ @property
+ def global_default(self):
+ """Gets the global_default of this V1PriorityClass. # noqa: E501
+
+ globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
+
+ :return: The global_default of this V1PriorityClass. # noqa: E501
+ :rtype: bool
+ """
+ return self._global_default
+
+ @global_default.setter
+ def global_default(self, global_default):
+ """Sets the global_default of this V1PriorityClass.
+
+ globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
+
+ :param global_default: The global_default of this V1PriorityClass. # noqa: E501
+ :type: bool
+ """
+
+ self._global_default = global_default
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PriorityClass. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PriorityClass. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PriorityClass.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PriorityClass. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PriorityClass. # noqa: E501
+
+
+ :return: The metadata of this V1PriorityClass. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PriorityClass.
+
+
+ :param metadata: The metadata of this V1PriorityClass. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def preemption_policy(self):
+ """Gets the preemption_policy of this V1PriorityClass. # noqa: E501
+
+ preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
+
+ :return: The preemption_policy of this V1PriorityClass. # noqa: E501
+ :rtype: str
+ """
+ return self._preemption_policy
+
+ @preemption_policy.setter
+ def preemption_policy(self, preemption_policy):
+ """Sets the preemption_policy of this V1PriorityClass.
+
+ preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. # noqa: E501
+
+ :param preemption_policy: The preemption_policy of this V1PriorityClass. # noqa: E501
+ :type: str
+ """
+
+ self._preemption_policy = preemption_policy
+
+ @property
+ def value(self):
+ """Gets the value of this V1PriorityClass. # noqa: E501
+
+ value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
+
+ :return: The value of this V1PriorityClass. # noqa: E501
+ :rtype: int
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V1PriorityClass.
+
+ value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
+
+ :param value: The value of this V1PriorityClass. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
+ raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PriorityClass):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PriorityClass):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_priority_class_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_priority_class_list.py
new file mode 100644
index 0000000000..43634572c8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_priority_class_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1PriorityClassList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1PriorityClass]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1PriorityClassList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1PriorityClassList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1PriorityClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1PriorityClassList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1PriorityClassList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1PriorityClassList. # noqa: E501
+
+ items is the list of PriorityClasses # noqa: E501
+
+ :return: The items of this V1PriorityClassList. # noqa: E501
+ :rtype: list[V1PriorityClass]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1PriorityClassList.
+
+ items is the list of PriorityClasses # noqa: E501
+
+ :param items: The items of this V1PriorityClassList. # noqa: E501
+ :type: list[V1PriorityClass]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1PriorityClassList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1PriorityClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1PriorityClassList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1PriorityClassList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1PriorityClassList. # noqa: E501
+
+
+ :return: The metadata of this V1PriorityClassList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1PriorityClassList.
+
+
+ :param metadata: The metadata of this V1PriorityClassList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1PriorityClassList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1PriorityClassList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_probe.py b/contrib/python/kubernetes/kubernetes/client/models/v1_probe.py
new file mode 100644
index 0000000000..bc08b3e840
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_probe.py
@@ -0,0 +1,366 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Probe(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ '_exec': 'V1ExecAction',
+ 'failure_threshold': 'int',
+ 'grpc': 'V1GRPCAction',
+ 'http_get': 'V1HTTPGetAction',
+ 'initial_delay_seconds': 'int',
+ 'period_seconds': 'int',
+ 'success_threshold': 'int',
+ 'tcp_socket': 'V1TCPSocketAction',
+ 'termination_grace_period_seconds': 'int',
+ 'timeout_seconds': 'int'
+ }
+
+ attribute_map = {
+ '_exec': 'exec',
+ 'failure_threshold': 'failureThreshold',
+ 'grpc': 'grpc',
+ 'http_get': 'httpGet',
+ 'initial_delay_seconds': 'initialDelaySeconds',
+ 'period_seconds': 'periodSeconds',
+ 'success_threshold': 'successThreshold',
+ 'tcp_socket': 'tcpSocket',
+ 'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
+ 'timeout_seconds': 'timeoutSeconds'
+ }
+
+ def __init__(self, _exec=None, failure_threshold=None, grpc=None, http_get=None, initial_delay_seconds=None, period_seconds=None, success_threshold=None, tcp_socket=None, termination_grace_period_seconds=None, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
+ """V1Probe - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self.__exec = None
+ self._failure_threshold = None
+ self._grpc = None
+ self._http_get = None
+ self._initial_delay_seconds = None
+ self._period_seconds = None
+ self._success_threshold = None
+ self._tcp_socket = None
+ self._termination_grace_period_seconds = None
+ self._timeout_seconds = None
+ self.discriminator = None
+
+ if _exec is not None:
+ self._exec = _exec
+ if failure_threshold is not None:
+ self.failure_threshold = failure_threshold
+ if grpc is not None:
+ self.grpc = grpc
+ if http_get is not None:
+ self.http_get = http_get
+ if initial_delay_seconds is not None:
+ self.initial_delay_seconds = initial_delay_seconds
+ if period_seconds is not None:
+ self.period_seconds = period_seconds
+ if success_threshold is not None:
+ self.success_threshold = success_threshold
+ if tcp_socket is not None:
+ self.tcp_socket = tcp_socket
+ if termination_grace_period_seconds is not None:
+ self.termination_grace_period_seconds = termination_grace_period_seconds
+ if timeout_seconds is not None:
+ self.timeout_seconds = timeout_seconds
+
+ @property
+ def _exec(self):
+ """Gets the _exec of this V1Probe. # noqa: E501
+
+
+ :return: The _exec of this V1Probe. # noqa: E501
+ :rtype: V1ExecAction
+ """
+ return self.__exec
+
+ @_exec.setter
+ def _exec(self, _exec):
+ """Sets the _exec of this V1Probe.
+
+
+ :param _exec: The _exec of this V1Probe. # noqa: E501
+ :type: V1ExecAction
+ """
+
+ self.__exec = _exec
+
+ @property
+ def failure_threshold(self):
+ """Gets the failure_threshold of this V1Probe. # noqa: E501
+
+ Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. # noqa: E501
+
+ :return: The failure_threshold of this V1Probe. # noqa: E501
+ :rtype: int
+ """
+ return self._failure_threshold
+
+ @failure_threshold.setter
+ def failure_threshold(self, failure_threshold):
+ """Sets the failure_threshold of this V1Probe.
+
+ Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. # noqa: E501
+
+ :param failure_threshold: The failure_threshold of this V1Probe. # noqa: E501
+ :type: int
+ """
+
+ self._failure_threshold = failure_threshold
+
+ @property
+ def grpc(self):
+ """Gets the grpc of this V1Probe. # noqa: E501
+
+
+ :return: The grpc of this V1Probe. # noqa: E501
+ :rtype: V1GRPCAction
+ """
+ return self._grpc
+
+ @grpc.setter
+ def grpc(self, grpc):
+ """Sets the grpc of this V1Probe.
+
+
+ :param grpc: The grpc of this V1Probe. # noqa: E501
+ :type: V1GRPCAction
+ """
+
+ self._grpc = grpc
+
+ @property
+ def http_get(self):
+ """Gets the http_get of this V1Probe. # noqa: E501
+
+
+ :return: The http_get of this V1Probe. # noqa: E501
+ :rtype: V1HTTPGetAction
+ """
+ return self._http_get
+
+ @http_get.setter
+ def http_get(self, http_get):
+ """Sets the http_get of this V1Probe.
+
+
+ :param http_get: The http_get of this V1Probe. # noqa: E501
+ :type: V1HTTPGetAction
+ """
+
+ self._http_get = http_get
+
+ @property
+ def initial_delay_seconds(self):
+ """Gets the initial_delay_seconds of this V1Probe. # noqa: E501
+
+ Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
+
+ :return: The initial_delay_seconds of this V1Probe. # noqa: E501
+ :rtype: int
+ """
+ return self._initial_delay_seconds
+
+ @initial_delay_seconds.setter
+ def initial_delay_seconds(self, initial_delay_seconds):
+ """Sets the initial_delay_seconds of this V1Probe.
+
+ Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
+
+ :param initial_delay_seconds: The initial_delay_seconds of this V1Probe. # noqa: E501
+ :type: int
+ """
+
+ self._initial_delay_seconds = initial_delay_seconds
+
+ @property
+ def period_seconds(self):
+ """Gets the period_seconds of this V1Probe. # noqa: E501
+
+ How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. # noqa: E501
+
+ :return: The period_seconds of this V1Probe. # noqa: E501
+ :rtype: int
+ """
+ return self._period_seconds
+
+ @period_seconds.setter
+ def period_seconds(self, period_seconds):
+ """Sets the period_seconds of this V1Probe.
+
+ How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. # noqa: E501
+
+ :param period_seconds: The period_seconds of this V1Probe. # noqa: E501
+ :type: int
+ """
+
+ self._period_seconds = period_seconds
+
+ @property
+ def success_threshold(self):
+ """Gets the success_threshold of this V1Probe. # noqa: E501
+
+ Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. # noqa: E501
+
+ :return: The success_threshold of this V1Probe. # noqa: E501
+ :rtype: int
+ """
+ return self._success_threshold
+
+ @success_threshold.setter
+ def success_threshold(self, success_threshold):
+ """Sets the success_threshold of this V1Probe.
+
+ Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. # noqa: E501
+
+ :param success_threshold: The success_threshold of this V1Probe. # noqa: E501
+ :type: int
+ """
+
+ self._success_threshold = success_threshold
+
+ @property
+ def tcp_socket(self):
+ """Gets the tcp_socket of this V1Probe. # noqa: E501
+
+
+ :return: The tcp_socket of this V1Probe. # noqa: E501
+ :rtype: V1TCPSocketAction
+ """
+ return self._tcp_socket
+
+ @tcp_socket.setter
+ def tcp_socket(self, tcp_socket):
+ """Sets the tcp_socket of this V1Probe.
+
+
+ :param tcp_socket: The tcp_socket of this V1Probe. # noqa: E501
+ :type: V1TCPSocketAction
+ """
+
+ self._tcp_socket = tcp_socket
+
+ @property
+ def termination_grace_period_seconds(self):
+ """Gets the termination_grace_period_seconds of this V1Probe. # noqa: E501
+
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. # noqa: E501
+
+ :return: The termination_grace_period_seconds of this V1Probe. # noqa: E501
+ :rtype: int
+ """
+ return self._termination_grace_period_seconds
+
+ @termination_grace_period_seconds.setter
+ def termination_grace_period_seconds(self, termination_grace_period_seconds):
+ """Sets the termination_grace_period_seconds of this V1Probe.
+
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. # noqa: E501
+
+ :param termination_grace_period_seconds: The termination_grace_period_seconds of this V1Probe. # noqa: E501
+ :type: int
+ """
+
+ self._termination_grace_period_seconds = termination_grace_period_seconds
+
+ @property
+ def timeout_seconds(self):
+ """Gets the timeout_seconds of this V1Probe. # noqa: E501
+
+ Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
+
+ :return: The timeout_seconds of this V1Probe. # noqa: E501
+ :rtype: int
+ """
+ return self._timeout_seconds
+
+ @timeout_seconds.setter
+ def timeout_seconds(self, timeout_seconds):
+ """Sets the timeout_seconds of this V1Probe.
+
+ Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
+
+ :param timeout_seconds: The timeout_seconds of this V1Probe. # noqa: E501
+ :type: int
+ """
+
+ self._timeout_seconds = timeout_seconds
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Probe):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Probe):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_projected_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_projected_volume_source.py
new file mode 100644
index 0000000000..0d97847580
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_projected_volume_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ProjectedVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'default_mode': 'int',
+ 'sources': 'list[V1VolumeProjection]'
+ }
+
+ attribute_map = {
+ 'default_mode': 'defaultMode',
+ 'sources': 'sources'
+ }
+
+ def __init__(self, default_mode=None, sources=None, local_vars_configuration=None): # noqa: E501
+ """V1ProjectedVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._default_mode = None
+ self._sources = None
+ self.discriminator = None
+
+ if default_mode is not None:
+ self.default_mode = default_mode
+ if sources is not None:
+ self.sources = sources
+
+ @property
+ def default_mode(self):
+ """Gets the default_mode of this V1ProjectedVolumeSource. # noqa: E501
+
+ defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :return: The default_mode of this V1ProjectedVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._default_mode
+
+ @default_mode.setter
+ def default_mode(self, default_mode):
+ """Sets the default_mode of this V1ProjectedVolumeSource.
+
+ defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :param default_mode: The default_mode of this V1ProjectedVolumeSource. # noqa: E501
+ :type: int
+ """
+
+ self._default_mode = default_mode
+
+ @property
+ def sources(self):
+ """Gets the sources of this V1ProjectedVolumeSource. # noqa: E501
+
+ sources is the list of volume projections # noqa: E501
+
+ :return: The sources of this V1ProjectedVolumeSource. # noqa: E501
+ :rtype: list[V1VolumeProjection]
+ """
+ return self._sources
+
+ @sources.setter
+ def sources(self, sources):
+ """Sets the sources of this V1ProjectedVolumeSource.
+
+ sources is the list of volume projections # noqa: E501
+
+ :param sources: The sources of this V1ProjectedVolumeSource. # noqa: E501
+ :type: list[V1VolumeProjection]
+ """
+
+ self._sources = sources
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ProjectedVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ProjectedVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_quobyte_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_quobyte_volume_source.py
new file mode 100644
index 0000000000..0b87b362b0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_quobyte_volume_source.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1QuobyteVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'group': 'str',
+ 'read_only': 'bool',
+ 'registry': 'str',
+ 'tenant': 'str',
+ 'user': 'str',
+ 'volume': 'str'
+ }
+
+ attribute_map = {
+ 'group': 'group',
+ 'read_only': 'readOnly',
+ 'registry': 'registry',
+ 'tenant': 'tenant',
+ 'user': 'user',
+ 'volume': 'volume'
+ }
+
+ def __init__(self, group=None, read_only=None, registry=None, tenant=None, user=None, volume=None, local_vars_configuration=None): # noqa: E501
+ """V1QuobyteVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._group = None
+ self._read_only = None
+ self._registry = None
+ self._tenant = None
+ self._user = None
+ self._volume = None
+ self.discriminator = None
+
+ if group is not None:
+ self.group = group
+ if read_only is not None:
+ self.read_only = read_only
+ self.registry = registry
+ if tenant is not None:
+ self.tenant = tenant
+ if user is not None:
+ self.user = user
+ self.volume = volume
+
+ @property
+ def group(self):
+ """Gets the group of this V1QuobyteVolumeSource. # noqa: E501
+
+ group to map volume access to Default is no group # noqa: E501
+
+ :return: The group of this V1QuobyteVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1QuobyteVolumeSource.
+
+ group to map volume access to Default is no group # noqa: E501
+
+ :param group: The group of this V1QuobyteVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._group = group
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1QuobyteVolumeSource. # noqa: E501
+
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. # noqa: E501
+
+ :return: The read_only of this V1QuobyteVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1QuobyteVolumeSource.
+
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. # noqa: E501
+
+ :param read_only: The read_only of this V1QuobyteVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def registry(self):
+ """Gets the registry of this V1QuobyteVolumeSource. # noqa: E501
+
+ registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes # noqa: E501
+
+ :return: The registry of this V1QuobyteVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._registry
+
+ @registry.setter
+ def registry(self, registry):
+ """Sets the registry of this V1QuobyteVolumeSource.
+
+ registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes # noqa: E501
+
+ :param registry: The registry of this V1QuobyteVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and registry is None: # noqa: E501
+ raise ValueError("Invalid value for `registry`, must not be `None`") # noqa: E501
+
+ self._registry = registry
+
+ @property
+ def tenant(self):
+ """Gets the tenant of this V1QuobyteVolumeSource. # noqa: E501
+
+ tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin # noqa: E501
+
+ :return: The tenant of this V1QuobyteVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._tenant
+
+ @tenant.setter
+ def tenant(self, tenant):
+ """Sets the tenant of this V1QuobyteVolumeSource.
+
+ tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin # noqa: E501
+
+ :param tenant: The tenant of this V1QuobyteVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._tenant = tenant
+
+ @property
+ def user(self):
+ """Gets the user of this V1QuobyteVolumeSource. # noqa: E501
+
+ user to map volume access to Defaults to serivceaccount user # noqa: E501
+
+ :return: The user of this V1QuobyteVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1QuobyteVolumeSource.
+
+ user to map volume access to Defaults to serivceaccount user # noqa: E501
+
+ :param user: The user of this V1QuobyteVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._user = user
+
+ @property
+ def volume(self):
+ """Gets the volume of this V1QuobyteVolumeSource. # noqa: E501
+
+ volume is a string that references an already created Quobyte volume by name. # noqa: E501
+
+ :return: The volume of this V1QuobyteVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume
+
+ @volume.setter
+ def volume(self, volume):
+ """Sets the volume of this V1QuobyteVolumeSource.
+
+ volume is a string that references an already created Quobyte volume by name. # noqa: E501
+
+ :param volume: The volume of this V1QuobyteVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and volume is None: # noqa: E501
+ raise ValueError("Invalid value for `volume`, must not be `None`") # noqa: E501
+
+ self._volume = volume
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1QuobyteVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1QuobyteVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_rbd_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_rbd_persistent_volume_source.py
new file mode 100644
index 0000000000..3e3380488a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_rbd_persistent_volume_source.py
@@ -0,0 +1,318 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RBDPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'image': 'str',
+ 'keyring': 'str',
+ 'monitors': 'list[str]',
+ 'pool': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1SecretReference',
+ 'user': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'image': 'image',
+ 'keyring': 'keyring',
+ 'monitors': 'monitors',
+ 'pool': 'pool',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'user': 'user'
+ }
+
+ def __init__(self, fs_type=None, image=None, keyring=None, monitors=None, pool=None, read_only=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1RBDPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._image = None
+ self._keyring = None
+ self._monitors = None
+ self._pool = None
+ self._read_only = None
+ self._secret_ref = None
+ self._user = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ self.image = image
+ if keyring is not None:
+ self.keyring = keyring
+ self.monitors = monitors
+ if pool is not None:
+ self.pool = pool
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ if user is not None:
+ self.user = user
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1RBDPersistentVolumeSource. # noqa: E501
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
+
+ :return: The fs_type of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1RBDPersistentVolumeSource.
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
+
+ :param fs_type: The fs_type of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def image(self):
+ """Gets the image of this V1RBDPersistentVolumeSource. # noqa: E501
+
+ image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The image of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._image
+
+ @image.setter
+ def image(self, image):
+ """Sets the image of this V1RBDPersistentVolumeSource.
+
+ image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param image: The image of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and image is None: # noqa: E501
+ raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
+
+ self._image = image
+
+ @property
+ def keyring(self):
+ """Gets the keyring of this V1RBDPersistentVolumeSource. # noqa: E501
+
+ keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The keyring of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._keyring
+
+ @keyring.setter
+ def keyring(self, keyring):
+ """Sets the keyring of this V1RBDPersistentVolumeSource.
+
+ keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param keyring: The keyring of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._keyring = keyring
+
+ @property
+ def monitors(self):
+ """Gets the monitors of this V1RBDPersistentVolumeSource. # noqa: E501
+
+ monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The monitors of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._monitors
+
+ @monitors.setter
+ def monitors(self, monitors):
+ """Sets the monitors of this V1RBDPersistentVolumeSource.
+
+ monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param monitors: The monitors of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501
+ raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501
+
+ self._monitors = monitors
+
+ @property
+ def pool(self):
+ """Gets the pool of this V1RBDPersistentVolumeSource. # noqa: E501
+
+ pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The pool of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._pool
+
+ @pool.setter
+ def pool(self, pool):
+ """Sets the pool of this V1RBDPersistentVolumeSource.
+
+ pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param pool: The pool of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._pool = pool
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1RBDPersistentVolumeSource. # noqa: E501
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The read_only of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1RBDPersistentVolumeSource.
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param read_only: The read_only of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1RBDPersistentVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1RBDPersistentVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def user(self):
+ """Gets the user of this V1RBDPersistentVolumeSource. # noqa: E501
+
+ user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The user of this V1RBDPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1RBDPersistentVolumeSource.
+
+ user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param user: The user of this V1RBDPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RBDPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RBDPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_rbd_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_rbd_volume_source.py
new file mode 100644
index 0000000000..3c0045f6f9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_rbd_volume_source.py
@@ -0,0 +1,318 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RBDVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'image': 'str',
+ 'keyring': 'str',
+ 'monitors': 'list[str]',
+ 'pool': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1LocalObjectReference',
+ 'user': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'image': 'image',
+ 'keyring': 'keyring',
+ 'monitors': 'monitors',
+ 'pool': 'pool',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'user': 'user'
+ }
+
+ def __init__(self, fs_type=None, image=None, keyring=None, monitors=None, pool=None, read_only=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1RBDVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._image = None
+ self._keyring = None
+ self._monitors = None
+ self._pool = None
+ self._read_only = None
+ self._secret_ref = None
+ self._user = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ self.image = image
+ if keyring is not None:
+ self.keyring = keyring
+ self.monitors = monitors
+ if pool is not None:
+ self.pool = pool
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ if user is not None:
+ self.user = user
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1RBDVolumeSource. # noqa: E501
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
+
+ :return: The fs_type of this V1RBDVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1RBDVolumeSource.
+
+ fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
+
+ :param fs_type: The fs_type of this V1RBDVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def image(self):
+ """Gets the image of this V1RBDVolumeSource. # noqa: E501
+
+ image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The image of this V1RBDVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._image
+
+ @image.setter
+ def image(self, image):
+ """Sets the image of this V1RBDVolumeSource.
+
+ image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param image: The image of this V1RBDVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and image is None: # noqa: E501
+ raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
+
+ self._image = image
+
+ @property
+ def keyring(self):
+ """Gets the keyring of this V1RBDVolumeSource. # noqa: E501
+
+ keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The keyring of this V1RBDVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._keyring
+
+ @keyring.setter
+ def keyring(self, keyring):
+ """Sets the keyring of this V1RBDVolumeSource.
+
+ keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param keyring: The keyring of this V1RBDVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._keyring = keyring
+
+ @property
+ def monitors(self):
+ """Gets the monitors of this V1RBDVolumeSource. # noqa: E501
+
+ monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The monitors of this V1RBDVolumeSource. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._monitors
+
+ @monitors.setter
+ def monitors(self, monitors):
+ """Sets the monitors of this V1RBDVolumeSource.
+
+ monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param monitors: The monitors of this V1RBDVolumeSource. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501
+ raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501
+
+ self._monitors = monitors
+
+ @property
+ def pool(self):
+ """Gets the pool of this V1RBDVolumeSource. # noqa: E501
+
+ pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The pool of this V1RBDVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._pool
+
+ @pool.setter
+ def pool(self, pool):
+ """Sets the pool of this V1RBDVolumeSource.
+
+ pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param pool: The pool of this V1RBDVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._pool = pool
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1RBDVolumeSource. # noqa: E501
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The read_only of this V1RBDVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1RBDVolumeSource.
+
+ readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param read_only: The read_only of this V1RBDVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1RBDVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1RBDVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1RBDVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1RBDVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def user(self):
+ """Gets the user of this V1RBDVolumeSource. # noqa: E501
+
+ user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :return: The user of this V1RBDVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1RBDVolumeSource.
+
+ user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
+
+ :param user: The user of this V1RBDVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RBDVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RBDVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set.py
new file mode 100644
index 0000000000..86b66372cb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicaSet(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1ReplicaSetSpec',
+ 'status': 'V1ReplicaSetStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicaSet - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ReplicaSet. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ReplicaSet. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ReplicaSet.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ReplicaSet. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ReplicaSet. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ReplicaSet. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ReplicaSet.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ReplicaSet. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ReplicaSet. # noqa: E501
+
+
+ :return: The metadata of this V1ReplicaSet. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ReplicaSet.
+
+
+ :param metadata: The metadata of this V1ReplicaSet. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1ReplicaSet. # noqa: E501
+
+
+ :return: The spec of this V1ReplicaSet. # noqa: E501
+ :rtype: V1ReplicaSetSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1ReplicaSet.
+
+
+ :param spec: The spec of this V1ReplicaSet. # noqa: E501
+ :type: V1ReplicaSetSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1ReplicaSet. # noqa: E501
+
+
+ :return: The status of this V1ReplicaSet. # noqa: E501
+ :rtype: V1ReplicaSetStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1ReplicaSet.
+
+
+ :param status: The status of this V1ReplicaSet. # noqa: E501
+ :type: V1ReplicaSetStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicaSet):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicaSet):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_condition.py
new file mode 100644
index 0000000000..0a19b7936c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_condition.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicaSetCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicaSetCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1ReplicaSetCondition. # noqa: E501
+
+ The last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1ReplicaSetCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1ReplicaSetCondition.
+
+ The last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1ReplicaSetCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1ReplicaSetCondition. # noqa: E501
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :return: The message of this V1ReplicaSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1ReplicaSetCondition.
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :param message: The message of this V1ReplicaSetCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1ReplicaSetCondition. # noqa: E501
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1ReplicaSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1ReplicaSetCondition.
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1ReplicaSetCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1ReplicaSetCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1ReplicaSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1ReplicaSetCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1ReplicaSetCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1ReplicaSetCondition. # noqa: E501
+
+ Type of replica set condition. # noqa: E501
+
+ :return: The type of this V1ReplicaSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1ReplicaSetCondition.
+
+ Type of replica set condition. # noqa: E501
+
+ :param type: The type of this V1ReplicaSetCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicaSetCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicaSetCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_list.py
new file mode 100644
index 0000000000..a8626d2246
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicaSetList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ReplicaSet]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicaSetList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ReplicaSetList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ReplicaSetList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ReplicaSetList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ReplicaSetList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ReplicaSetList. # noqa: E501
+
+ List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller # noqa: E501
+
+ :return: The items of this V1ReplicaSetList. # noqa: E501
+ :rtype: list[V1ReplicaSet]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ReplicaSetList.
+
+ List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller # noqa: E501
+
+ :param items: The items of this V1ReplicaSetList. # noqa: E501
+ :type: list[V1ReplicaSet]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ReplicaSetList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ReplicaSetList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ReplicaSetList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ReplicaSetList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ReplicaSetList. # noqa: E501
+
+
+ :return: The metadata of this V1ReplicaSetList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ReplicaSetList.
+
+
+ :param metadata: The metadata of this V1ReplicaSetList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicaSetList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicaSetList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_spec.py
new file mode 100644
index 0000000000..2f4ca0d9cf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_spec.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicaSetSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'min_ready_seconds': 'int',
+ 'replicas': 'int',
+ 'selector': 'V1LabelSelector',
+ 'template': 'V1PodTemplateSpec'
+ }
+
+ attribute_map = {
+ 'min_ready_seconds': 'minReadySeconds',
+ 'replicas': 'replicas',
+ 'selector': 'selector',
+ 'template': 'template'
+ }
+
+ def __init__(self, min_ready_seconds=None, replicas=None, selector=None, template=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicaSetSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._min_ready_seconds = None
+ self._replicas = None
+ self._selector = None
+ self._template = None
+ self.discriminator = None
+
+ if min_ready_seconds is not None:
+ self.min_ready_seconds = min_ready_seconds
+ if replicas is not None:
+ self.replicas = replicas
+ self.selector = selector
+ if template is not None:
+ self.template = template
+
+ @property
+ def min_ready_seconds(self):
+ """Gets the min_ready_seconds of this V1ReplicaSetSpec. # noqa: E501
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :return: The min_ready_seconds of this V1ReplicaSetSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._min_ready_seconds
+
+ @min_ready_seconds.setter
+ def min_ready_seconds(self, min_ready_seconds):
+ """Sets the min_ready_seconds of this V1ReplicaSetSpec.
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :param min_ready_seconds: The min_ready_seconds of this V1ReplicaSetSpec. # noqa: E501
+ :type: int
+ """
+
+ self._min_ready_seconds = min_ready_seconds
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1ReplicaSetSpec. # noqa: E501
+
+ Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller # noqa: E501
+
+ :return: The replicas of this V1ReplicaSetSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1ReplicaSetSpec.
+
+ Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller # noqa: E501
+
+ :param replicas: The replicas of this V1ReplicaSetSpec. # noqa: E501
+ :type: int
+ """
+
+ self._replicas = replicas
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1ReplicaSetSpec. # noqa: E501
+
+
+ :return: The selector of this V1ReplicaSetSpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1ReplicaSetSpec.
+
+
+ :param selector: The selector of this V1ReplicaSetSpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+ if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
+ raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
+
+ self._selector = selector
+
+ @property
+ def template(self):
+ """Gets the template of this V1ReplicaSetSpec. # noqa: E501
+
+
+ :return: The template of this V1ReplicaSetSpec. # noqa: E501
+ :rtype: V1PodTemplateSpec
+ """
+ return self._template
+
+ @template.setter
+ def template(self, template):
+ """Sets the template of this V1ReplicaSetSpec.
+
+
+ :param template: The template of this V1ReplicaSetSpec. # noqa: E501
+ :type: V1PodTemplateSpec
+ """
+
+ self._template = template
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicaSetSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicaSetSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_status.py
new file mode 100644
index 0000000000..d3e866463a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replica_set_status.py
@@ -0,0 +1,263 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicaSetStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'available_replicas': 'int',
+ 'conditions': 'list[V1ReplicaSetCondition]',
+ 'fully_labeled_replicas': 'int',
+ 'observed_generation': 'int',
+ 'ready_replicas': 'int',
+ 'replicas': 'int'
+ }
+
+ attribute_map = {
+ 'available_replicas': 'availableReplicas',
+ 'conditions': 'conditions',
+ 'fully_labeled_replicas': 'fullyLabeledReplicas',
+ 'observed_generation': 'observedGeneration',
+ 'ready_replicas': 'readyReplicas',
+ 'replicas': 'replicas'
+ }
+
+ def __init__(self, available_replicas=None, conditions=None, fully_labeled_replicas=None, observed_generation=None, ready_replicas=None, replicas=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicaSetStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._available_replicas = None
+ self._conditions = None
+ self._fully_labeled_replicas = None
+ self._observed_generation = None
+ self._ready_replicas = None
+ self._replicas = None
+ self.discriminator = None
+
+ if available_replicas is not None:
+ self.available_replicas = available_replicas
+ if conditions is not None:
+ self.conditions = conditions
+ if fully_labeled_replicas is not None:
+ self.fully_labeled_replicas = fully_labeled_replicas
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ if ready_replicas is not None:
+ self.ready_replicas = ready_replicas
+ self.replicas = replicas
+
+ @property
+ def available_replicas(self):
+ """Gets the available_replicas of this V1ReplicaSetStatus. # noqa: E501
+
+ The number of available replicas (ready for at least minReadySeconds) for this replica set. # noqa: E501
+
+ :return: The available_replicas of this V1ReplicaSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._available_replicas
+
+ @available_replicas.setter
+ def available_replicas(self, available_replicas):
+ """Sets the available_replicas of this V1ReplicaSetStatus.
+
+ The number of available replicas (ready for at least minReadySeconds) for this replica set. # noqa: E501
+
+ :param available_replicas: The available_replicas of this V1ReplicaSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._available_replicas = available_replicas
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1ReplicaSetStatus. # noqa: E501
+
+ Represents the latest available observations of a replica set's current state. # noqa: E501
+
+ :return: The conditions of this V1ReplicaSetStatus. # noqa: E501
+ :rtype: list[V1ReplicaSetCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1ReplicaSetStatus.
+
+ Represents the latest available observations of a replica set's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1ReplicaSetStatus. # noqa: E501
+ :type: list[V1ReplicaSetCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def fully_labeled_replicas(self):
+ """Gets the fully_labeled_replicas of this V1ReplicaSetStatus. # noqa: E501
+
+ The number of pods that have labels matching the labels of the pod template of the replicaset. # noqa: E501
+
+ :return: The fully_labeled_replicas of this V1ReplicaSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._fully_labeled_replicas
+
+ @fully_labeled_replicas.setter
+ def fully_labeled_replicas(self, fully_labeled_replicas):
+ """Sets the fully_labeled_replicas of this V1ReplicaSetStatus.
+
+ The number of pods that have labels matching the labels of the pod template of the replicaset. # noqa: E501
+
+ :param fully_labeled_replicas: The fully_labeled_replicas of this V1ReplicaSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._fully_labeled_replicas = fully_labeled_replicas
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1ReplicaSetStatus. # noqa: E501
+
+ ObservedGeneration reflects the generation of the most recently observed ReplicaSet. # noqa: E501
+
+ :return: The observed_generation of this V1ReplicaSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1ReplicaSetStatus.
+
+ ObservedGeneration reflects the generation of the most recently observed ReplicaSet. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1ReplicaSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def ready_replicas(self):
+ """Gets the ready_replicas of this V1ReplicaSetStatus. # noqa: E501
+
+ readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. # noqa: E501
+
+ :return: The ready_replicas of this V1ReplicaSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._ready_replicas
+
+ @ready_replicas.setter
+ def ready_replicas(self, ready_replicas):
+ """Sets the ready_replicas of this V1ReplicaSetStatus.
+
+ readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition. # noqa: E501
+
+ :param ready_replicas: The ready_replicas of this V1ReplicaSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._ready_replicas = ready_replicas
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1ReplicaSetStatus. # noqa: E501
+
+ Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller # noqa: E501
+
+ :return: The replicas of this V1ReplicaSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1ReplicaSetStatus.
+
+ Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller # noqa: E501
+
+ :param replicas: The replicas of this V1ReplicaSetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
+
+ self._replicas = replicas
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicaSetStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicaSetStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller.py
new file mode 100644
index 0000000000..2f5480a2fa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicationController(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1ReplicationControllerSpec',
+ 'status': 'V1ReplicationControllerStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicationController - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ReplicationController. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ReplicationController. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ReplicationController.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ReplicationController. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ReplicationController. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ReplicationController. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ReplicationController.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ReplicationController. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ReplicationController. # noqa: E501
+
+
+ :return: The metadata of this V1ReplicationController. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ReplicationController.
+
+
+ :param metadata: The metadata of this V1ReplicationController. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1ReplicationController. # noqa: E501
+
+
+ :return: The spec of this V1ReplicationController. # noqa: E501
+ :rtype: V1ReplicationControllerSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1ReplicationController.
+
+
+ :param spec: The spec of this V1ReplicationController. # noqa: E501
+ :type: V1ReplicationControllerSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1ReplicationController. # noqa: E501
+
+
+ :return: The status of this V1ReplicationController. # noqa: E501
+ :rtype: V1ReplicationControllerStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1ReplicationController.
+
+
+ :param status: The status of this V1ReplicationController. # noqa: E501
+ :type: V1ReplicationControllerStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicationController):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicationController):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_condition.py
new file mode 100644
index 0000000000..b33ac61682
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_condition.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicationControllerCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicationControllerCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
+
+ The last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1ReplicationControllerCondition.
+
+ The last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1ReplicationControllerCondition. # noqa: E501
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :return: The message of this V1ReplicationControllerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1ReplicationControllerCondition.
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :param message: The message of this V1ReplicationControllerCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1ReplicationControllerCondition. # noqa: E501
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1ReplicationControllerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1ReplicationControllerCondition.
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1ReplicationControllerCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1ReplicationControllerCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1ReplicationControllerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1ReplicationControllerCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1ReplicationControllerCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1ReplicationControllerCondition. # noqa: E501
+
+ Type of replication controller condition. # noqa: E501
+
+ :return: The type of this V1ReplicationControllerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1ReplicationControllerCondition.
+
+ Type of replication controller condition. # noqa: E501
+
+ :param type: The type of this V1ReplicationControllerCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicationControllerCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicationControllerCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_list.py
new file mode 100644
index 0000000000..aa0757cae0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicationControllerList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ReplicationController]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicationControllerList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ReplicationControllerList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ReplicationControllerList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ReplicationControllerList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ReplicationControllerList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ReplicationControllerList. # noqa: E501
+
+ List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller # noqa: E501
+
+ :return: The items of this V1ReplicationControllerList. # noqa: E501
+ :rtype: list[V1ReplicationController]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ReplicationControllerList.
+
+ List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller # noqa: E501
+
+ :param items: The items of this V1ReplicationControllerList. # noqa: E501
+ :type: list[V1ReplicationController]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ReplicationControllerList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ReplicationControllerList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ReplicationControllerList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ReplicationControllerList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ReplicationControllerList. # noqa: E501
+
+
+ :return: The metadata of this V1ReplicationControllerList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ReplicationControllerList.
+
+
+ :param metadata: The metadata of this V1ReplicationControllerList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicationControllerList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicationControllerList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_spec.py
new file mode 100644
index 0000000000..c7b850fd4f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_spec.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicationControllerSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'min_ready_seconds': 'int',
+ 'replicas': 'int',
+ 'selector': 'dict(str, str)',
+ 'template': 'V1PodTemplateSpec'
+ }
+
+ attribute_map = {
+ 'min_ready_seconds': 'minReadySeconds',
+ 'replicas': 'replicas',
+ 'selector': 'selector',
+ 'template': 'template'
+ }
+
+ def __init__(self, min_ready_seconds=None, replicas=None, selector=None, template=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicationControllerSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._min_ready_seconds = None
+ self._replicas = None
+ self._selector = None
+ self._template = None
+ self.discriminator = None
+
+ if min_ready_seconds is not None:
+ self.min_ready_seconds = min_ready_seconds
+ if replicas is not None:
+ self.replicas = replicas
+ if selector is not None:
+ self.selector = selector
+ if template is not None:
+ self.template = template
+
+ @property
+ def min_ready_seconds(self):
+ """Gets the min_ready_seconds of this V1ReplicationControllerSpec. # noqa: E501
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :return: The min_ready_seconds of this V1ReplicationControllerSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._min_ready_seconds
+
+ @min_ready_seconds.setter
+ def min_ready_seconds(self, min_ready_seconds):
+ """Sets the min_ready_seconds of this V1ReplicationControllerSpec.
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :param min_ready_seconds: The min_ready_seconds of this V1ReplicationControllerSpec. # noqa: E501
+ :type: int
+ """
+
+ self._min_ready_seconds = min_ready_seconds
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1ReplicationControllerSpec. # noqa: E501
+
+ Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller # noqa: E501
+
+ :return: The replicas of this V1ReplicationControllerSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1ReplicationControllerSpec.
+
+ Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller # noqa: E501
+
+ :param replicas: The replicas of this V1ReplicationControllerSpec. # noqa: E501
+ :type: int
+ """
+
+ self._replicas = replicas
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1ReplicationControllerSpec. # noqa: E501
+
+ Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
+
+ :return: The selector of this V1ReplicationControllerSpec. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1ReplicationControllerSpec.
+
+ Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
+
+ :param selector: The selector of this V1ReplicationControllerSpec. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._selector = selector
+
+ @property
+ def template(self):
+ """Gets the template of this V1ReplicationControllerSpec. # noqa: E501
+
+
+ :return: The template of this V1ReplicationControllerSpec. # noqa: E501
+ :rtype: V1PodTemplateSpec
+ """
+ return self._template
+
+ @template.setter
+ def template(self, template):
+ """Sets the template of this V1ReplicationControllerSpec.
+
+
+ :param template: The template of this V1ReplicationControllerSpec. # noqa: E501
+ :type: V1PodTemplateSpec
+ """
+
+ self._template = template
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicationControllerSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicationControllerSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_status.py
new file mode 100644
index 0000000000..66c39e7a65
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_replication_controller_status.py
@@ -0,0 +1,263 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ReplicationControllerStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'available_replicas': 'int',
+ 'conditions': 'list[V1ReplicationControllerCondition]',
+ 'fully_labeled_replicas': 'int',
+ 'observed_generation': 'int',
+ 'ready_replicas': 'int',
+ 'replicas': 'int'
+ }
+
+ attribute_map = {
+ 'available_replicas': 'availableReplicas',
+ 'conditions': 'conditions',
+ 'fully_labeled_replicas': 'fullyLabeledReplicas',
+ 'observed_generation': 'observedGeneration',
+ 'ready_replicas': 'readyReplicas',
+ 'replicas': 'replicas'
+ }
+
+ def __init__(self, available_replicas=None, conditions=None, fully_labeled_replicas=None, observed_generation=None, ready_replicas=None, replicas=None, local_vars_configuration=None): # noqa: E501
+ """V1ReplicationControllerStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._available_replicas = None
+ self._conditions = None
+ self._fully_labeled_replicas = None
+ self._observed_generation = None
+ self._ready_replicas = None
+ self._replicas = None
+ self.discriminator = None
+
+ if available_replicas is not None:
+ self.available_replicas = available_replicas
+ if conditions is not None:
+ self.conditions = conditions
+ if fully_labeled_replicas is not None:
+ self.fully_labeled_replicas = fully_labeled_replicas
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ if ready_replicas is not None:
+ self.ready_replicas = ready_replicas
+ self.replicas = replicas
+
+ @property
+ def available_replicas(self):
+ """Gets the available_replicas of this V1ReplicationControllerStatus. # noqa: E501
+
+ The number of available replicas (ready for at least minReadySeconds) for this replication controller. # noqa: E501
+
+ :return: The available_replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._available_replicas
+
+ @available_replicas.setter
+ def available_replicas(self, available_replicas):
+ """Sets the available_replicas of this V1ReplicationControllerStatus.
+
+ The number of available replicas (ready for at least minReadySeconds) for this replication controller. # noqa: E501
+
+ :param available_replicas: The available_replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._available_replicas = available_replicas
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1ReplicationControllerStatus. # noqa: E501
+
+ Represents the latest available observations of a replication controller's current state. # noqa: E501
+
+ :return: The conditions of this V1ReplicationControllerStatus. # noqa: E501
+ :rtype: list[V1ReplicationControllerCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1ReplicationControllerStatus.
+
+ Represents the latest available observations of a replication controller's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1ReplicationControllerStatus. # noqa: E501
+ :type: list[V1ReplicationControllerCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def fully_labeled_replicas(self):
+ """Gets the fully_labeled_replicas of this V1ReplicationControllerStatus. # noqa: E501
+
+ The number of pods that have labels matching the labels of the pod template of the replication controller. # noqa: E501
+
+ :return: The fully_labeled_replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._fully_labeled_replicas
+
+ @fully_labeled_replicas.setter
+ def fully_labeled_replicas(self, fully_labeled_replicas):
+ """Sets the fully_labeled_replicas of this V1ReplicationControllerStatus.
+
+ The number of pods that have labels matching the labels of the pod template of the replication controller. # noqa: E501
+
+ :param fully_labeled_replicas: The fully_labeled_replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._fully_labeled_replicas = fully_labeled_replicas
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1ReplicationControllerStatus. # noqa: E501
+
+ ObservedGeneration reflects the generation of the most recently observed replication controller. # noqa: E501
+
+ :return: The observed_generation of this V1ReplicationControllerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1ReplicationControllerStatus.
+
+ ObservedGeneration reflects the generation of the most recently observed replication controller. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1ReplicationControllerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def ready_replicas(self):
+ """Gets the ready_replicas of this V1ReplicationControllerStatus. # noqa: E501
+
+ The number of ready replicas for this replication controller. # noqa: E501
+
+ :return: The ready_replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._ready_replicas
+
+ @ready_replicas.setter
+ def ready_replicas(self, ready_replicas):
+ """Sets the ready_replicas of this V1ReplicationControllerStatus.
+
+ The number of ready replicas for this replication controller. # noqa: E501
+
+ :param ready_replicas: The ready_replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._ready_replicas = ready_replicas
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1ReplicationControllerStatus. # noqa: E501
+
+ Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller # noqa: E501
+
+ :return: The replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1ReplicationControllerStatus.
+
+ Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller # noqa: E501
+
+ :param replicas: The replicas of this V1ReplicationControllerStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
+
+ self._replicas = replicas
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ReplicationControllerStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ReplicationControllerStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_attributes.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_attributes.py
new file mode 100644
index 0000000000..4bf6a21ab2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_attributes.py
@@ -0,0 +1,290 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceAttributes(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'group': 'str',
+ 'name': 'str',
+ 'namespace': 'str',
+ 'resource': 'str',
+ 'subresource': 'str',
+ 'verb': 'str',
+ 'version': 'str'
+ }
+
+ attribute_map = {
+ 'group': 'group',
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'resource': 'resource',
+ 'subresource': 'subresource',
+ 'verb': 'verb',
+ 'version': 'version'
+ }
+
+ def __init__(self, group=None, name=None, namespace=None, resource=None, subresource=None, verb=None, version=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceAttributes - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._group = None
+ self._name = None
+ self._namespace = None
+ self._resource = None
+ self._subresource = None
+ self._verb = None
+ self._version = None
+ self.discriminator = None
+
+ if group is not None:
+ self.group = group
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if resource is not None:
+ self.resource = resource
+ if subresource is not None:
+ self.subresource = subresource
+ if verb is not None:
+ self.verb = verb
+ if version is not None:
+ self.version = version
+
+ @property
+ def group(self):
+ """Gets the group of this V1ResourceAttributes. # noqa: E501
+
+ Group is the API Group of the Resource. \"*\" means all. # noqa: E501
+
+ :return: The group of this V1ResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1ResourceAttributes.
+
+ Group is the API Group of the Resource. \"*\" means all. # noqa: E501
+
+ :param group: The group of this V1ResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._group = group
+
+ @property
+ def name(self):
+ """Gets the name of this V1ResourceAttributes. # noqa: E501
+
+ Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all. # noqa: E501
+
+ :return: The name of this V1ResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ResourceAttributes.
+
+ Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all. # noqa: E501
+
+ :param name: The name of this V1ResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1ResourceAttributes. # noqa: E501
+
+ Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview # noqa: E501
+
+ :return: The namespace of this V1ResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1ResourceAttributes.
+
+ Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview # noqa: E501
+
+ :param namespace: The namespace of this V1ResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def resource(self):
+ """Gets the resource of this V1ResourceAttributes. # noqa: E501
+
+ Resource is one of the existing resource types. \"*\" means all. # noqa: E501
+
+ :return: The resource of this V1ResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._resource
+
+ @resource.setter
+ def resource(self, resource):
+ """Sets the resource of this V1ResourceAttributes.
+
+ Resource is one of the existing resource types. \"*\" means all. # noqa: E501
+
+ :param resource: The resource of this V1ResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._resource = resource
+
+ @property
+ def subresource(self):
+ """Gets the subresource of this V1ResourceAttributes. # noqa: E501
+
+ Subresource is one of the existing resource types. \"\" means none. # noqa: E501
+
+ :return: The subresource of this V1ResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._subresource
+
+ @subresource.setter
+ def subresource(self, subresource):
+ """Sets the subresource of this V1ResourceAttributes.
+
+ Subresource is one of the existing resource types. \"\" means none. # noqa: E501
+
+ :param subresource: The subresource of this V1ResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._subresource = subresource
+
+ @property
+ def verb(self):
+ """Gets the verb of this V1ResourceAttributes. # noqa: E501
+
+ Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
+
+ :return: The verb of this V1ResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._verb
+
+ @verb.setter
+ def verb(self, verb):
+ """Sets the verb of this V1ResourceAttributes.
+
+ Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
+
+ :param verb: The verb of this V1ResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._verb = verb
+
+ @property
+ def version(self):
+ """Gets the version of this V1ResourceAttributes. # noqa: E501
+
+ Version is the API Version of the Resource. \"*\" means all. # noqa: E501
+
+ :return: The version of this V1ResourceAttributes. # noqa: E501
+ :rtype: str
+ """
+ return self._version
+
+ @version.setter
+ def version(self, version):
+ """Sets the version of this V1ResourceAttributes.
+
+ Version is the API Version of the Resource. \"*\" means all. # noqa: E501
+
+ :param version: The version of this V1ResourceAttributes. # noqa: E501
+ :type: str
+ """
+
+ self._version = version
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceAttributes):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceAttributes):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_claim.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_claim.py
new file mode 100644
index 0000000000..bbe7dfc71a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_claim.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceClaim(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceClaim - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1ResourceClaim. # noqa: E501
+
+ Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. # noqa: E501
+
+ :return: The name of this V1ResourceClaim. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ResourceClaim.
+
+ Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. # noqa: E501
+
+ :param name: The name of this V1ResourceClaim. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceClaim):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceClaim):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_field_selector.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_field_selector.py
new file mode 100644
index 0000000000..4b097b829c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_field_selector.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceFieldSelector(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container_name': 'str',
+ 'divisor': 'str',
+ 'resource': 'str'
+ }
+
+ attribute_map = {
+ 'container_name': 'containerName',
+ 'divisor': 'divisor',
+ 'resource': 'resource'
+ }
+
+ def __init__(self, container_name=None, divisor=None, resource=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceFieldSelector - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container_name = None
+ self._divisor = None
+ self._resource = None
+ self.discriminator = None
+
+ if container_name is not None:
+ self.container_name = container_name
+ if divisor is not None:
+ self.divisor = divisor
+ self.resource = resource
+
+ @property
+ def container_name(self):
+ """Gets the container_name of this V1ResourceFieldSelector. # noqa: E501
+
+ Container name: required for volumes, optional for env vars # noqa: E501
+
+ :return: The container_name of this V1ResourceFieldSelector. # noqa: E501
+ :rtype: str
+ """
+ return self._container_name
+
+ @container_name.setter
+ def container_name(self, container_name):
+ """Sets the container_name of this V1ResourceFieldSelector.
+
+ Container name: required for volumes, optional for env vars # noqa: E501
+
+ :param container_name: The container_name of this V1ResourceFieldSelector. # noqa: E501
+ :type: str
+ """
+
+ self._container_name = container_name
+
+ @property
+ def divisor(self):
+ """Gets the divisor of this V1ResourceFieldSelector. # noqa: E501
+
+ Specifies the output format of the exposed resources, defaults to \"1\" # noqa: E501
+
+ :return: The divisor of this V1ResourceFieldSelector. # noqa: E501
+ :rtype: str
+ """
+ return self._divisor
+
+ @divisor.setter
+ def divisor(self, divisor):
+ """Sets the divisor of this V1ResourceFieldSelector.
+
+ Specifies the output format of the exposed resources, defaults to \"1\" # noqa: E501
+
+ :param divisor: The divisor of this V1ResourceFieldSelector. # noqa: E501
+ :type: str
+ """
+
+ self._divisor = divisor
+
+ @property
+ def resource(self):
+ """Gets the resource of this V1ResourceFieldSelector. # noqa: E501
+
+ Required: resource to select # noqa: E501
+
+ :return: The resource of this V1ResourceFieldSelector. # noqa: E501
+ :rtype: str
+ """
+ return self._resource
+
+ @resource.setter
+ def resource(self, resource):
+ """Sets the resource of this V1ResourceFieldSelector.
+
+ Required: resource to select # noqa: E501
+
+ :param resource: The resource of this V1ResourceFieldSelector. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and resource is None: # noqa: E501
+ raise ValueError("Invalid value for `resource`, must not be `None`") # noqa: E501
+
+ self._resource = resource
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceFieldSelector):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceFieldSelector):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota.py
new file mode 100644
index 0000000000..5f2a31d6a6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceQuota(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1ResourceQuotaSpec',
+ 'status': 'V1ResourceQuotaStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceQuota - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ResourceQuota. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ResourceQuota. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ResourceQuota.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ResourceQuota. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ResourceQuota. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ResourceQuota. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ResourceQuota.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ResourceQuota. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ResourceQuota. # noqa: E501
+
+
+ :return: The metadata of this V1ResourceQuota. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ResourceQuota.
+
+
+ :param metadata: The metadata of this V1ResourceQuota. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1ResourceQuota. # noqa: E501
+
+
+ :return: The spec of this V1ResourceQuota. # noqa: E501
+ :rtype: V1ResourceQuotaSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1ResourceQuota.
+
+
+ :param spec: The spec of this V1ResourceQuota. # noqa: E501
+ :type: V1ResourceQuotaSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1ResourceQuota. # noqa: E501
+
+
+ :return: The status of this V1ResourceQuota. # noqa: E501
+ :rtype: V1ResourceQuotaStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1ResourceQuota.
+
+
+ :param status: The status of this V1ResourceQuota. # noqa: E501
+ :type: V1ResourceQuotaStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceQuota):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceQuota):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_list.py
new file mode 100644
index 0000000000..eb1e177cde
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceQuotaList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ResourceQuota]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceQuotaList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ResourceQuotaList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ResourceQuotaList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ResourceQuotaList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ResourceQuotaList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ResourceQuotaList. # noqa: E501
+
+ Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
+
+ :return: The items of this V1ResourceQuotaList. # noqa: E501
+ :rtype: list[V1ResourceQuota]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ResourceQuotaList.
+
+ Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
+
+ :param items: The items of this V1ResourceQuotaList. # noqa: E501
+ :type: list[V1ResourceQuota]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ResourceQuotaList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ResourceQuotaList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ResourceQuotaList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ResourceQuotaList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ResourceQuotaList. # noqa: E501
+
+
+ :return: The metadata of this V1ResourceQuotaList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ResourceQuotaList.
+
+
+ :param metadata: The metadata of this V1ResourceQuotaList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceQuotaList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceQuotaList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_spec.py
new file mode 100644
index 0000000000..f2c49c3893
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_spec.py
@@ -0,0 +1,176 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceQuotaSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hard': 'dict(str, str)',
+ 'scope_selector': 'V1ScopeSelector',
+ 'scopes': 'list[str]'
+ }
+
+ attribute_map = {
+ 'hard': 'hard',
+ 'scope_selector': 'scopeSelector',
+ 'scopes': 'scopes'
+ }
+
+ def __init__(self, hard=None, scope_selector=None, scopes=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceQuotaSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hard = None
+ self._scope_selector = None
+ self._scopes = None
+ self.discriminator = None
+
+ if hard is not None:
+ self.hard = hard
+ if scope_selector is not None:
+ self.scope_selector = scope_selector
+ if scopes is not None:
+ self.scopes = scopes
+
+ @property
+ def hard(self):
+ """Gets the hard of this V1ResourceQuotaSpec. # noqa: E501
+
+ hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
+
+ :return: The hard of this V1ResourceQuotaSpec. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._hard
+
+ @hard.setter
+ def hard(self, hard):
+ """Sets the hard of this V1ResourceQuotaSpec.
+
+ hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
+
+ :param hard: The hard of this V1ResourceQuotaSpec. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._hard = hard
+
+ @property
+ def scope_selector(self):
+ """Gets the scope_selector of this V1ResourceQuotaSpec. # noqa: E501
+
+
+ :return: The scope_selector of this V1ResourceQuotaSpec. # noqa: E501
+ :rtype: V1ScopeSelector
+ """
+ return self._scope_selector
+
+ @scope_selector.setter
+ def scope_selector(self, scope_selector):
+ """Sets the scope_selector of this V1ResourceQuotaSpec.
+
+
+ :param scope_selector: The scope_selector of this V1ResourceQuotaSpec. # noqa: E501
+ :type: V1ScopeSelector
+ """
+
+ self._scope_selector = scope_selector
+
+ @property
+ def scopes(self):
+ """Gets the scopes of this V1ResourceQuotaSpec. # noqa: E501
+
+ A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects. # noqa: E501
+
+ :return: The scopes of this V1ResourceQuotaSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._scopes
+
+ @scopes.setter
+ def scopes(self, scopes):
+ """Sets the scopes of this V1ResourceQuotaSpec.
+
+ A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects. # noqa: E501
+
+ :param scopes: The scopes of this V1ResourceQuotaSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._scopes = scopes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceQuotaSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceQuotaSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_status.py
new file mode 100644
index 0000000000..c7fcdbe429
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_quota_status.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceQuotaStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hard': 'dict(str, str)',
+ 'used': 'dict(str, str)'
+ }
+
+ attribute_map = {
+ 'hard': 'hard',
+ 'used': 'used'
+ }
+
+ def __init__(self, hard=None, used=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceQuotaStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hard = None
+ self._used = None
+ self.discriminator = None
+
+ if hard is not None:
+ self.hard = hard
+ if used is not None:
+ self.used = used
+
+ @property
+ def hard(self):
+ """Gets the hard of this V1ResourceQuotaStatus. # noqa: E501
+
+ Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
+
+ :return: The hard of this V1ResourceQuotaStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._hard
+
+ @hard.setter
+ def hard(self, hard):
+ """Sets the hard of this V1ResourceQuotaStatus.
+
+ Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ # noqa: E501
+
+ :param hard: The hard of this V1ResourceQuotaStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._hard = hard
+
+ @property
+ def used(self):
+ """Gets the used of this V1ResourceQuotaStatus. # noqa: E501
+
+ Used is the current observed total usage of the resource in the namespace. # noqa: E501
+
+ :return: The used of this V1ResourceQuotaStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._used
+
+ @used.setter
+ def used(self, used):
+ """Sets the used of this V1ResourceQuotaStatus.
+
+ Used is the current observed total usage of the resource in the namespace. # noqa: E501
+
+ :param used: The used of this V1ResourceQuotaStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._used = used
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceQuotaStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceQuotaStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_requirements.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_requirements.py
new file mode 100644
index 0000000000..c1cade49e2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_requirements.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceRequirements(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'claims': 'list[V1ResourceClaim]',
+ 'limits': 'dict(str, str)',
+ 'requests': 'dict(str, str)'
+ }
+
+ attribute_map = {
+ 'claims': 'claims',
+ 'limits': 'limits',
+ 'requests': 'requests'
+ }
+
+ def __init__(self, claims=None, limits=None, requests=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceRequirements - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._claims = None
+ self._limits = None
+ self._requests = None
+ self.discriminator = None
+
+ if claims is not None:
+ self.claims = claims
+ if limits is not None:
+ self.limits = limits
+ if requests is not None:
+ self.requests = requests
+
+ @property
+ def claims(self):
+ """Gets the claims of this V1ResourceRequirements. # noqa: E501
+
+ Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. # noqa: E501
+
+ :return: The claims of this V1ResourceRequirements. # noqa: E501
+ :rtype: list[V1ResourceClaim]
+ """
+ return self._claims
+
+ @claims.setter
+ def claims(self, claims):
+ """Sets the claims of this V1ResourceRequirements.
+
+ Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. # noqa: E501
+
+ :param claims: The claims of this V1ResourceRequirements. # noqa: E501
+ :type: list[V1ResourceClaim]
+ """
+
+ self._claims = claims
+
+ @property
+ def limits(self):
+ """Gets the limits of this V1ResourceRequirements. # noqa: E501
+
+ Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
+
+ :return: The limits of this V1ResourceRequirements. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._limits
+
+ @limits.setter
+ def limits(self, limits):
+ """Sets the limits of this V1ResourceRequirements.
+
+ Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
+
+ :param limits: The limits of this V1ResourceRequirements. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._limits = limits
+
+ @property
+ def requests(self):
+ """Gets the requests of this V1ResourceRequirements. # noqa: E501
+
+ Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
+
+ :return: The requests of this V1ResourceRequirements. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._requests
+
+ @requests.setter
+ def requests(self, requests):
+ """Sets the requests of this V1ResourceRequirements.
+
+ Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501
+
+ :param requests: The requests of this V1ResourceRequirements. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._requests = requests
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceRequirements):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceRequirements):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_resource_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_rule.py
new file mode 100644
index 0000000000..399e6723f6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_resource_rule.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ResourceRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_groups': 'list[str]',
+ 'resource_names': 'list[str]',
+ 'resources': 'list[str]',
+ 'verbs': 'list[str]'
+ }
+
+ attribute_map = {
+ 'api_groups': 'apiGroups',
+ 'resource_names': 'resourceNames',
+ 'resources': 'resources',
+ 'verbs': 'verbs'
+ }
+
+ def __init__(self, api_groups=None, resource_names=None, resources=None, verbs=None, local_vars_configuration=None): # noqa: E501
+ """V1ResourceRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_groups = None
+ self._resource_names = None
+ self._resources = None
+ self._verbs = None
+ self.discriminator = None
+
+ if api_groups is not None:
+ self.api_groups = api_groups
+ if resource_names is not None:
+ self.resource_names = resource_names
+ if resources is not None:
+ self.resources = resources
+ self.verbs = verbs
+
+ @property
+ def api_groups(self):
+ """Gets the api_groups of this V1ResourceRule. # noqa: E501
+
+ APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all. # noqa: E501
+
+ :return: The api_groups of this V1ResourceRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_groups
+
+ @api_groups.setter
+ def api_groups(self, api_groups):
+ """Sets the api_groups of this V1ResourceRule.
+
+ APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all. # noqa: E501
+
+ :param api_groups: The api_groups of this V1ResourceRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_groups = api_groups
+
+ @property
+ def resource_names(self):
+ """Gets the resource_names of this V1ResourceRule. # noqa: E501
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all. # noqa: E501
+
+ :return: The resource_names of this V1ResourceRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resource_names
+
+ @resource_names.setter
+ def resource_names(self, resource_names):
+ """Sets the resource_names of this V1ResourceRule.
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all. # noqa: E501
+
+ :param resource_names: The resource_names of this V1ResourceRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resource_names = resource_names
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1ResourceRule. # noqa: E501
+
+ Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups. \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups. # noqa: E501
+
+ :return: The resources of this V1ResourceRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1ResourceRule.
+
+ Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups. \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups. # noqa: E501
+
+ :param resources: The resources of this V1ResourceRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resources = resources
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1ResourceRule. # noqa: E501
+
+ Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
+
+ :return: The verbs of this V1ResourceRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1ResourceRule.
+
+ Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
+
+ :param verbs: The verbs of this V1ResourceRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ResourceRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ResourceRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_role.py b/contrib/python/kubernetes/kubernetes/client/models/v1_role.py
new file mode 100644
index 0000000000..0a944358fa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_role.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Role(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'rules': 'list[V1PolicyRule]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'rules': 'rules'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, rules=None, local_vars_configuration=None): # noqa: E501
+ """V1Role - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._rules = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if rules is not None:
+ self.rules = rules
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Role. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Role. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Role.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Role. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Role. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Role. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Role.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Role. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Role. # noqa: E501
+
+
+ :return: The metadata of this V1Role. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Role.
+
+
+ :param metadata: The metadata of this V1Role. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1Role. # noqa: E501
+
+ Rules holds all the PolicyRules for this Role # noqa: E501
+
+ :return: The rules of this V1Role. # noqa: E501
+ :rtype: list[V1PolicyRule]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1Role.
+
+ Rules holds all the PolicyRules for this Role # noqa: E501
+
+ :param rules: The rules of this V1Role. # noqa: E501
+ :type: list[V1PolicyRule]
+ """
+
+ self._rules = rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Role):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Role):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_role_binding.py b/contrib/python/kubernetes/kubernetes/client/models/v1_role_binding.py
new file mode 100644
index 0000000000..d44c610f74
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_role_binding.py
@@ -0,0 +1,231 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RoleBinding(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'role_ref': 'V1RoleRef',
+ 'subjects': 'list[V1Subject]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'role_ref': 'roleRef',
+ 'subjects': 'subjects'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, role_ref=None, subjects=None, local_vars_configuration=None): # noqa: E501
+ """V1RoleBinding - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._role_ref = None
+ self._subjects = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.role_ref = role_ref
+ if subjects is not None:
+ self.subjects = subjects
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1RoleBinding. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1RoleBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1RoleBinding.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1RoleBinding. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1RoleBinding. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1RoleBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1RoleBinding.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1RoleBinding. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1RoleBinding. # noqa: E501
+
+
+ :return: The metadata of this V1RoleBinding. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1RoleBinding.
+
+
+ :param metadata: The metadata of this V1RoleBinding. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def role_ref(self):
+ """Gets the role_ref of this V1RoleBinding. # noqa: E501
+
+
+ :return: The role_ref of this V1RoleBinding. # noqa: E501
+ :rtype: V1RoleRef
+ """
+ return self._role_ref
+
+ @role_ref.setter
+ def role_ref(self, role_ref):
+ """Sets the role_ref of this V1RoleBinding.
+
+
+ :param role_ref: The role_ref of this V1RoleBinding. # noqa: E501
+ :type: V1RoleRef
+ """
+ if self.local_vars_configuration.client_side_validation and role_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `role_ref`, must not be `None`") # noqa: E501
+
+ self._role_ref = role_ref
+
+ @property
+ def subjects(self):
+ """Gets the subjects of this V1RoleBinding. # noqa: E501
+
+ Subjects holds references to the objects the role applies to. # noqa: E501
+
+ :return: The subjects of this V1RoleBinding. # noqa: E501
+ :rtype: list[V1Subject]
+ """
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, subjects):
+ """Sets the subjects of this V1RoleBinding.
+
+ Subjects holds references to the objects the role applies to. # noqa: E501
+
+ :param subjects: The subjects of this V1RoleBinding. # noqa: E501
+ :type: list[V1Subject]
+ """
+
+ self._subjects = subjects
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RoleBinding):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RoleBinding):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_role_binding_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_role_binding_list.py
new file mode 100644
index 0000000000..ef6dab66cb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_role_binding_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RoleBindingList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1RoleBinding]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1RoleBindingList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1RoleBindingList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1RoleBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1RoleBindingList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1RoleBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1RoleBindingList. # noqa: E501
+
+ Items is a list of RoleBindings # noqa: E501
+
+ :return: The items of this V1RoleBindingList. # noqa: E501
+ :rtype: list[V1RoleBinding]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1RoleBindingList.
+
+ Items is a list of RoleBindings # noqa: E501
+
+ :param items: The items of this V1RoleBindingList. # noqa: E501
+ :type: list[V1RoleBinding]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1RoleBindingList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1RoleBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1RoleBindingList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1RoleBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1RoleBindingList. # noqa: E501
+
+
+ :return: The metadata of this V1RoleBindingList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1RoleBindingList.
+
+
+ :param metadata: The metadata of this V1RoleBindingList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RoleBindingList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RoleBindingList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_role_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_role_list.py
new file mode 100644
index 0000000000..80fd06c9b1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_role_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RoleList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Role]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1RoleList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1RoleList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1RoleList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1RoleList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1RoleList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1RoleList. # noqa: E501
+
+ Items is a list of Roles # noqa: E501
+
+ :return: The items of this V1RoleList. # noqa: E501
+ :rtype: list[V1Role]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1RoleList.
+
+ Items is a list of Roles # noqa: E501
+
+ :param items: The items of this V1RoleList. # noqa: E501
+ :type: list[V1Role]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1RoleList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1RoleList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1RoleList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1RoleList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1RoleList. # noqa: E501
+
+
+ :return: The metadata of this V1RoleList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1RoleList.
+
+
+ :param metadata: The metadata of this V1RoleList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RoleList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RoleList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_role_ref.py b/contrib/python/kubernetes/kubernetes/client/models/v1_role_ref.py
new file mode 100644
index 0000000000..aea998cb94
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_role_ref.py
@@ -0,0 +1,181 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RoleRef(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'kind': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'kind': 'kind',
+ 'name': 'name'
+ }
+
+ def __init__(self, api_group=None, kind=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1RoleRef - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._kind = None
+ self._name = None
+ self.discriminator = None
+
+ self.api_group = api_group
+ self.kind = kind
+ self.name = name
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1RoleRef. # noqa: E501
+
+ APIGroup is the group for the resource being referenced # noqa: E501
+
+ :return: The api_group of this V1RoleRef. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1RoleRef.
+
+ APIGroup is the group for the resource being referenced # noqa: E501
+
+ :param api_group: The api_group of this V1RoleRef. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and api_group is None: # noqa: E501
+ raise ValueError("Invalid value for `api_group`, must not be `None`") # noqa: E501
+
+ self._api_group = api_group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1RoleRef. # noqa: E501
+
+ Kind is the type of resource being referenced # noqa: E501
+
+ :return: The kind of this V1RoleRef. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1RoleRef.
+
+ Kind is the type of resource being referenced # noqa: E501
+
+ :param kind: The kind of this V1RoleRef. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1RoleRef. # noqa: E501
+
+ Name is the name of resource being referenced # noqa: E501
+
+ :return: The name of this V1RoleRef. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1RoleRef.
+
+ Name is the name of resource being referenced # noqa: E501
+
+ :param name: The name of this V1RoleRef. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RoleRef):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RoleRef):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_daemon_set.py b/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_daemon_set.py
new file mode 100644
index 0000000000..f921823daf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_daemon_set.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RollingUpdateDaemonSet(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'max_surge': 'object',
+ 'max_unavailable': 'object'
+ }
+
+ attribute_map = {
+ 'max_surge': 'maxSurge',
+ 'max_unavailable': 'maxUnavailable'
+ }
+
+ def __init__(self, max_surge=None, max_unavailable=None, local_vars_configuration=None): # noqa: E501
+ """V1RollingUpdateDaemonSet - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._max_surge = None
+ self._max_unavailable = None
+ self.discriminator = None
+
+ if max_surge is not None:
+ self.max_surge = max_surge
+ if max_unavailable is not None:
+ self.max_unavailable = max_unavailable
+
+ @property
+ def max_surge(self):
+ """Gets the max_surge of this V1RollingUpdateDaemonSet. # noqa: E501
+
+ The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. # noqa: E501
+
+ :return: The max_surge of this V1RollingUpdateDaemonSet. # noqa: E501
+ :rtype: object
+ """
+ return self._max_surge
+
+ @max_surge.setter
+ def max_surge(self, max_surge):
+ """Sets the max_surge of this V1RollingUpdateDaemonSet.
+
+ The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. # noqa: E501
+
+ :param max_surge: The max_surge of this V1RollingUpdateDaemonSet. # noqa: E501
+ :type: object
+ """
+
+ self._max_surge = max_surge
+
+ @property
+ def max_unavailable(self):
+ """Gets the max_unavailable of this V1RollingUpdateDaemonSet. # noqa: E501
+
+ The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. # noqa: E501
+
+ :return: The max_unavailable of this V1RollingUpdateDaemonSet. # noqa: E501
+ :rtype: object
+ """
+ return self._max_unavailable
+
+ @max_unavailable.setter
+ def max_unavailable(self, max_unavailable):
+ """Sets the max_unavailable of this V1RollingUpdateDaemonSet.
+
+ The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. # noqa: E501
+
+ :param max_unavailable: The max_unavailable of this V1RollingUpdateDaemonSet. # noqa: E501
+ :type: object
+ """
+
+ self._max_unavailable = max_unavailable
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RollingUpdateDaemonSet):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RollingUpdateDaemonSet):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_deployment.py b/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_deployment.py
new file mode 100644
index 0000000000..d4686e39ba
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_deployment.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RollingUpdateDeployment(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'max_surge': 'object',
+ 'max_unavailable': 'object'
+ }
+
+ attribute_map = {
+ 'max_surge': 'maxSurge',
+ 'max_unavailable': 'maxUnavailable'
+ }
+
+ def __init__(self, max_surge=None, max_unavailable=None, local_vars_configuration=None): # noqa: E501
+ """V1RollingUpdateDeployment - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._max_surge = None
+ self._max_unavailable = None
+ self.discriminator = None
+
+ if max_surge is not None:
+ self.max_surge = max_surge
+ if max_unavailable is not None:
+ self.max_unavailable = max_unavailable
+
+ @property
+ def max_surge(self):
+ """Gets the max_surge of this V1RollingUpdateDeployment. # noqa: E501
+
+ The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods. # noqa: E501
+
+ :return: The max_surge of this V1RollingUpdateDeployment. # noqa: E501
+ :rtype: object
+ """
+ return self._max_surge
+
+ @max_surge.setter
+ def max_surge(self, max_surge):
+ """Sets the max_surge of this V1RollingUpdateDeployment.
+
+ The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods. # noqa: E501
+
+ :param max_surge: The max_surge of this V1RollingUpdateDeployment. # noqa: E501
+ :type: object
+ """
+
+ self._max_surge = max_surge
+
+ @property
+ def max_unavailable(self):
+ """Gets the max_unavailable of this V1RollingUpdateDeployment. # noqa: E501
+
+ The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods. # noqa: E501
+
+ :return: The max_unavailable of this V1RollingUpdateDeployment. # noqa: E501
+ :rtype: object
+ """
+ return self._max_unavailable
+
+ @max_unavailable.setter
+ def max_unavailable(self, max_unavailable):
+ """Sets the max_unavailable of this V1RollingUpdateDeployment.
+
+ The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods. # noqa: E501
+
+ :param max_unavailable: The max_unavailable of this V1RollingUpdateDeployment. # noqa: E501
+ :type: object
+ """
+
+ self._max_unavailable = max_unavailable
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RollingUpdateDeployment):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RollingUpdateDeployment):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py
new file mode 100644
index 0000000000..1b9f3ad2e2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RollingUpdateStatefulSetStrategy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'max_unavailable': 'object',
+ 'partition': 'int'
+ }
+
+ attribute_map = {
+ 'max_unavailable': 'maxUnavailable',
+ 'partition': 'partition'
+ }
+
+ def __init__(self, max_unavailable=None, partition=None, local_vars_configuration=None): # noqa: E501
+ """V1RollingUpdateStatefulSetStrategy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._max_unavailable = None
+ self._partition = None
+ self.discriminator = None
+
+ if max_unavailable is not None:
+ self.max_unavailable = max_unavailable
+ if partition is not None:
+ self.partition = partition
+
+ @property
+ def max_unavailable(self):
+ """Gets the max_unavailable of this V1RollingUpdateStatefulSetStrategy. # noqa: E501
+
+ The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. # noqa: E501
+
+ :return: The max_unavailable of this V1RollingUpdateStatefulSetStrategy. # noqa: E501
+ :rtype: object
+ """
+ return self._max_unavailable
+
+ @max_unavailable.setter
+ def max_unavailable(self, max_unavailable):
+ """Sets the max_unavailable of this V1RollingUpdateStatefulSetStrategy.
+
+ The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. # noqa: E501
+
+ :param max_unavailable: The max_unavailable of this V1RollingUpdateStatefulSetStrategy. # noqa: E501
+ :type: object
+ """
+
+ self._max_unavailable = max_unavailable
+
+ @property
+ def partition(self):
+ """Gets the partition of this V1RollingUpdateStatefulSetStrategy. # noqa: E501
+
+ Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0. # noqa: E501
+
+ :return: The partition of this V1RollingUpdateStatefulSetStrategy. # noqa: E501
+ :rtype: int
+ """
+ return self._partition
+
+ @partition.setter
+ def partition(self, partition):
+ """Sets the partition of this V1RollingUpdateStatefulSetStrategy.
+
+ Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0. # noqa: E501
+
+ :param partition: The partition of this V1RollingUpdateStatefulSetStrategy. # noqa: E501
+ :type: int
+ """
+
+ self._partition = partition
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RollingUpdateStatefulSetStrategy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RollingUpdateStatefulSetStrategy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_rule_with_operations.py b/contrib/python/kubernetes/kubernetes/client/models/v1_rule_with_operations.py
new file mode 100644
index 0000000000..5342daed6c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_rule_with_operations.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RuleWithOperations(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_groups': 'list[str]',
+ 'api_versions': 'list[str]',
+ 'operations': 'list[str]',
+ 'resources': 'list[str]',
+ 'scope': 'str'
+ }
+
+ attribute_map = {
+ 'api_groups': 'apiGroups',
+ 'api_versions': 'apiVersions',
+ 'operations': 'operations',
+ 'resources': 'resources',
+ 'scope': 'scope'
+ }
+
+ def __init__(self, api_groups=None, api_versions=None, operations=None, resources=None, scope=None, local_vars_configuration=None): # noqa: E501
+ """V1RuleWithOperations - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_groups = None
+ self._api_versions = None
+ self._operations = None
+ self._resources = None
+ self._scope = None
+ self.discriminator = None
+
+ if api_groups is not None:
+ self.api_groups = api_groups
+ if api_versions is not None:
+ self.api_versions = api_versions
+ if operations is not None:
+ self.operations = operations
+ if resources is not None:
+ self.resources = resources
+ if scope is not None:
+ self.scope = scope
+
+ @property
+ def api_groups(self):
+ """Gets the api_groups of this V1RuleWithOperations. # noqa: E501
+
+ APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The api_groups of this V1RuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_groups
+
+ @api_groups.setter
+ def api_groups(self, api_groups):
+ """Sets the api_groups of this V1RuleWithOperations.
+
+ APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param api_groups: The api_groups of this V1RuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_groups = api_groups
+
+ @property
+ def api_versions(self):
+ """Gets the api_versions of this V1RuleWithOperations. # noqa: E501
+
+ APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The api_versions of this V1RuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_versions
+
+ @api_versions.setter
+ def api_versions(self, api_versions):
+ """Sets the api_versions of this V1RuleWithOperations.
+
+ APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param api_versions: The api_versions of this V1RuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_versions = api_versions
+
+ @property
+ def operations(self):
+ """Gets the operations of this V1RuleWithOperations. # noqa: E501
+
+ Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The operations of this V1RuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._operations
+
+ @operations.setter
+ def operations(self, operations):
+ """Sets the operations of this V1RuleWithOperations.
+
+ Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param operations: The operations of this V1RuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._operations = operations
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1RuleWithOperations. # noqa: E501
+
+ Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
+
+ :return: The resources of this V1RuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1RuleWithOperations.
+
+ Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
+
+ :param resources: The resources of this V1RuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resources = resources
+
+ @property
+ def scope(self):
+ """Gets the scope of this V1RuleWithOperations. # noqa: E501
+
+ scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
+
+ :return: The scope of this V1RuleWithOperations. # noqa: E501
+ :rtype: str
+ """
+ return self._scope
+
+ @scope.setter
+ def scope(self, scope):
+ """Sets the scope of this V1RuleWithOperations.
+
+ scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
+
+ :param scope: The scope of this V1RuleWithOperations. # noqa: E501
+ :type: str
+ """
+
+ self._scope = scope
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RuleWithOperations):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RuleWithOperations):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class.py b/contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class.py
new file mode 100644
index 0000000000..4b391d8929
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class.py
@@ -0,0 +1,257 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RuntimeClass(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'handler': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'overhead': 'V1Overhead',
+ 'scheduling': 'V1Scheduling'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'handler': 'handler',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'overhead': 'overhead',
+ 'scheduling': 'scheduling'
+ }
+
+ def __init__(self, api_version=None, handler=None, kind=None, metadata=None, overhead=None, scheduling=None, local_vars_configuration=None): # noqa: E501
+ """V1RuntimeClass - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._handler = None
+ self._kind = None
+ self._metadata = None
+ self._overhead = None
+ self._scheduling = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.handler = handler
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if overhead is not None:
+ self.overhead = overhead
+ if scheduling is not None:
+ self.scheduling = scheduling
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1RuntimeClass. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1RuntimeClass. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1RuntimeClass.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1RuntimeClass. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def handler(self):
+ """Gets the handler of this V1RuntimeClass. # noqa: E501
+
+ handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable. # noqa: E501
+
+ :return: The handler of this V1RuntimeClass. # noqa: E501
+ :rtype: str
+ """
+ return self._handler
+
+ @handler.setter
+ def handler(self, handler):
+ """Sets the handler of this V1RuntimeClass.
+
+ handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable. # noqa: E501
+
+ :param handler: The handler of this V1RuntimeClass. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and handler is None: # noqa: E501
+ raise ValueError("Invalid value for `handler`, must not be `None`") # noqa: E501
+
+ self._handler = handler
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1RuntimeClass. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1RuntimeClass. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1RuntimeClass.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1RuntimeClass. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1RuntimeClass. # noqa: E501
+
+
+ :return: The metadata of this V1RuntimeClass. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1RuntimeClass.
+
+
+ :param metadata: The metadata of this V1RuntimeClass. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def overhead(self):
+ """Gets the overhead of this V1RuntimeClass. # noqa: E501
+
+
+ :return: The overhead of this V1RuntimeClass. # noqa: E501
+ :rtype: V1Overhead
+ """
+ return self._overhead
+
+ @overhead.setter
+ def overhead(self, overhead):
+ """Sets the overhead of this V1RuntimeClass.
+
+
+ :param overhead: The overhead of this V1RuntimeClass. # noqa: E501
+ :type: V1Overhead
+ """
+
+ self._overhead = overhead
+
+ @property
+ def scheduling(self):
+ """Gets the scheduling of this V1RuntimeClass. # noqa: E501
+
+
+ :return: The scheduling of this V1RuntimeClass. # noqa: E501
+ :rtype: V1Scheduling
+ """
+ return self._scheduling
+
+ @scheduling.setter
+ def scheduling(self, scheduling):
+ """Sets the scheduling of this V1RuntimeClass.
+
+
+ :param scheduling: The scheduling of this V1RuntimeClass. # noqa: E501
+ :type: V1Scheduling
+ """
+
+ self._scheduling = scheduling
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RuntimeClass):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RuntimeClass):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class_list.py
new file mode 100644
index 0000000000..9a856b90f7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_runtime_class_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1RuntimeClassList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1RuntimeClass]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1RuntimeClassList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1RuntimeClassList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1RuntimeClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1RuntimeClassList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1RuntimeClassList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1RuntimeClassList. # noqa: E501
+
+ items is a list of schema objects. # noqa: E501
+
+ :return: The items of this V1RuntimeClassList. # noqa: E501
+ :rtype: list[V1RuntimeClass]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1RuntimeClassList.
+
+ items is a list of schema objects. # noqa: E501
+
+ :param items: The items of this V1RuntimeClassList. # noqa: E501
+ :type: list[V1RuntimeClass]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1RuntimeClassList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1RuntimeClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1RuntimeClassList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1RuntimeClassList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1RuntimeClassList. # noqa: E501
+
+
+ :return: The metadata of this V1RuntimeClassList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1RuntimeClassList.
+
+
+ :param metadata: The metadata of this V1RuntimeClassList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1RuntimeClassList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1RuntimeClassList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scale.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scale.py
new file mode 100644
index 0000000000..6f95eee735
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scale.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Scale(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1ScaleSpec',
+ 'status': 'V1ScaleStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Scale - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Scale. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Scale. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Scale.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Scale. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Scale. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Scale. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Scale.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Scale. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Scale. # noqa: E501
+
+
+ :return: The metadata of this V1Scale. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Scale.
+
+
+ :param metadata: The metadata of this V1Scale. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Scale. # noqa: E501
+
+
+ :return: The spec of this V1Scale. # noqa: E501
+ :rtype: V1ScaleSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Scale.
+
+
+ :param spec: The spec of this V1Scale. # noqa: E501
+ :type: V1ScaleSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Scale. # noqa: E501
+
+
+ :return: The status of this V1Scale. # noqa: E501
+ :rtype: V1ScaleStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Scale.
+
+
+ :param status: The status of this V1Scale. # noqa: E501
+ :type: V1ScaleStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Scale):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Scale):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_persistent_volume_source.py
new file mode 100644
index 0000000000..d86a2c26e6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_persistent_volume_source.py
@@ -0,0 +1,375 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ScaleIOPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'gateway': 'str',
+ 'protection_domain': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1SecretReference',
+ 'ssl_enabled': 'bool',
+ 'storage_mode': 'str',
+ 'storage_pool': 'str',
+ 'system': 'str',
+ 'volume_name': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'gateway': 'gateway',
+ 'protection_domain': 'protectionDomain',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'ssl_enabled': 'sslEnabled',
+ 'storage_mode': 'storageMode',
+ 'storage_pool': 'storagePool',
+ 'system': 'system',
+ 'volume_name': 'volumeName'
+ }
+
+ def __init__(self, fs_type=None, gateway=None, protection_domain=None, read_only=None, secret_ref=None, ssl_enabled=None, storage_mode=None, storage_pool=None, system=None, volume_name=None, local_vars_configuration=None): # noqa: E501
+ """V1ScaleIOPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._gateway = None
+ self._protection_domain = None
+ self._read_only = None
+ self._secret_ref = None
+ self._ssl_enabled = None
+ self._storage_mode = None
+ self._storage_pool = None
+ self._system = None
+ self._volume_name = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ self.gateway = gateway
+ if protection_domain is not None:
+ self.protection_domain = protection_domain
+ if read_only is not None:
+ self.read_only = read_only
+ self.secret_ref = secret_ref
+ if ssl_enabled is not None:
+ self.ssl_enabled = ssl_enabled
+ if storage_mode is not None:
+ self.storage_mode = storage_mode
+ if storage_pool is not None:
+ self.storage_pool = storage_pool
+ self.system = system
+ if volume_name is not None:
+ self.volume_name = volume_name
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\" # noqa: E501
+
+ :return: The fs_type of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1ScaleIOPersistentVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\" # noqa: E501
+
+ :param fs_type: The fs_type of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def gateway(self):
+ """Gets the gateway of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ gateway is the host address of the ScaleIO API Gateway. # noqa: E501
+
+ :return: The gateway of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._gateway
+
+ @gateway.setter
+ def gateway(self, gateway):
+ """Sets the gateway of this V1ScaleIOPersistentVolumeSource.
+
+ gateway is the host address of the ScaleIO API Gateway. # noqa: E501
+
+ :param gateway: The gateway of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and gateway is None: # noqa: E501
+ raise ValueError("Invalid value for `gateway`, must not be `None`") # noqa: E501
+
+ self._gateway = gateway
+
+ @property
+ def protection_domain(self):
+ """Gets the protection_domain of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. # noqa: E501
+
+ :return: The protection_domain of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._protection_domain
+
+ @protection_domain.setter
+ def protection_domain(self, protection_domain):
+ """Sets the protection_domain of this V1ScaleIOPersistentVolumeSource.
+
+ protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. # noqa: E501
+
+ :param protection_domain: The protection_domain of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._protection_domain = protection_domain
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1ScaleIOPersistentVolumeSource.
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: V1SecretReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1ScaleIOPersistentVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: V1SecretReference
+ """
+ if self.local_vars_configuration.client_side_validation and secret_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `secret_ref`, must not be `None`") # noqa: E501
+
+ self._secret_ref = secret_ref
+
+ @property
+ def ssl_enabled(self):
+ """Gets the ssl_enabled of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ sslEnabled is the flag to enable/disable SSL communication with Gateway, default false # noqa: E501
+
+ :return: The ssl_enabled of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._ssl_enabled
+
+ @ssl_enabled.setter
+ def ssl_enabled(self, ssl_enabled):
+ """Sets the ssl_enabled of this V1ScaleIOPersistentVolumeSource.
+
+ sslEnabled is the flag to enable/disable SSL communication with Gateway, default false # noqa: E501
+
+ :param ssl_enabled: The ssl_enabled of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._ssl_enabled = ssl_enabled
+
+ @property
+ def storage_mode(self):
+ """Gets the storage_mode of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. # noqa: E501
+
+ :return: The storage_mode of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_mode
+
+ @storage_mode.setter
+ def storage_mode(self, storage_mode):
+ """Sets the storage_mode of this V1ScaleIOPersistentVolumeSource.
+
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. # noqa: E501
+
+ :param storage_mode: The storage_mode of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._storage_mode = storage_mode
+
+ @property
+ def storage_pool(self):
+ """Gets the storage_pool of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ storagePool is the ScaleIO Storage Pool associated with the protection domain. # noqa: E501
+
+ :return: The storage_pool of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_pool
+
+ @storage_pool.setter
+ def storage_pool(self, storage_pool):
+ """Sets the storage_pool of this V1ScaleIOPersistentVolumeSource.
+
+ storagePool is the ScaleIO Storage Pool associated with the protection domain. # noqa: E501
+
+ :param storage_pool: The storage_pool of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._storage_pool = storage_pool
+
+ @property
+ def system(self):
+ """Gets the system of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ system is the name of the storage system as configured in ScaleIO. # noqa: E501
+
+ :return: The system of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._system
+
+ @system.setter
+ def system(self, system):
+ """Sets the system of this V1ScaleIOPersistentVolumeSource.
+
+ system is the name of the storage system as configured in ScaleIO. # noqa: E501
+
+ :param system: The system of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and system is None: # noqa: E501
+ raise ValueError("Invalid value for `system`, must not be `None`") # noqa: E501
+
+ self._system = system
+
+ @property
+ def volume_name(self):
+ """Gets the volume_name of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+
+ volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. # noqa: E501
+
+ :return: The volume_name of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_name
+
+ @volume_name.setter
+ def volume_name(self, volume_name):
+ """Sets the volume_name of this V1ScaleIOPersistentVolumeSource.
+
+ volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. # noqa: E501
+
+ :param volume_name: The volume_name of this V1ScaleIOPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._volume_name = volume_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ScaleIOPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ScaleIOPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_volume_source.py
new file mode 100644
index 0000000000..135c39e039
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_io_volume_source.py
@@ -0,0 +1,375 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ScaleIOVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'gateway': 'str',
+ 'protection_domain': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1LocalObjectReference',
+ 'ssl_enabled': 'bool',
+ 'storage_mode': 'str',
+ 'storage_pool': 'str',
+ 'system': 'str',
+ 'volume_name': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'gateway': 'gateway',
+ 'protection_domain': 'protectionDomain',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'ssl_enabled': 'sslEnabled',
+ 'storage_mode': 'storageMode',
+ 'storage_pool': 'storagePool',
+ 'system': 'system',
+ 'volume_name': 'volumeName'
+ }
+
+ def __init__(self, fs_type=None, gateway=None, protection_domain=None, read_only=None, secret_ref=None, ssl_enabled=None, storage_mode=None, storage_pool=None, system=None, volume_name=None, local_vars_configuration=None): # noqa: E501
+ """V1ScaleIOVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._gateway = None
+ self._protection_domain = None
+ self._read_only = None
+ self._secret_ref = None
+ self._ssl_enabled = None
+ self._storage_mode = None
+ self._storage_pool = None
+ self._system = None
+ self._volume_name = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ self.gateway = gateway
+ if protection_domain is not None:
+ self.protection_domain = protection_domain
+ if read_only is not None:
+ self.read_only = read_only
+ self.secret_ref = secret_ref
+ if ssl_enabled is not None:
+ self.ssl_enabled = ssl_enabled
+ if storage_mode is not None:
+ self.storage_mode = storage_mode
+ if storage_pool is not None:
+ self.storage_pool = storage_pool
+ self.system = system
+ if volume_name is not None:
+ self.volume_name = volume_name
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1ScaleIOVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\". # noqa: E501
+
+ :return: The fs_type of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1ScaleIOVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\". # noqa: E501
+
+ :param fs_type: The fs_type of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def gateway(self):
+ """Gets the gateway of this V1ScaleIOVolumeSource. # noqa: E501
+
+ gateway is the host address of the ScaleIO API Gateway. # noqa: E501
+
+ :return: The gateway of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._gateway
+
+ @gateway.setter
+ def gateway(self, gateway):
+ """Sets the gateway of this V1ScaleIOVolumeSource.
+
+ gateway is the host address of the ScaleIO API Gateway. # noqa: E501
+
+ :param gateway: The gateway of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and gateway is None: # noqa: E501
+ raise ValueError("Invalid value for `gateway`, must not be `None`") # noqa: E501
+
+ self._gateway = gateway
+
+ @property
+ def protection_domain(self):
+ """Gets the protection_domain of this V1ScaleIOVolumeSource. # noqa: E501
+
+ protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. # noqa: E501
+
+ :return: The protection_domain of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._protection_domain
+
+ @protection_domain.setter
+ def protection_domain(self, protection_domain):
+ """Sets the protection_domain of this V1ScaleIOVolumeSource.
+
+ protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. # noqa: E501
+
+ :param protection_domain: The protection_domain of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._protection_domain = protection_domain
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1ScaleIOVolumeSource. # noqa: E501
+
+ readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1ScaleIOVolumeSource.
+
+ readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1ScaleIOVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1ScaleIOVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+ if self.local_vars_configuration.client_side_validation and secret_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `secret_ref`, must not be `None`") # noqa: E501
+
+ self._secret_ref = secret_ref
+
+ @property
+ def ssl_enabled(self):
+ """Gets the ssl_enabled of this V1ScaleIOVolumeSource. # noqa: E501
+
+ sslEnabled Flag enable/disable SSL communication with Gateway, default false # noqa: E501
+
+ :return: The ssl_enabled of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._ssl_enabled
+
+ @ssl_enabled.setter
+ def ssl_enabled(self, ssl_enabled):
+ """Sets the ssl_enabled of this V1ScaleIOVolumeSource.
+
+ sslEnabled Flag enable/disable SSL communication with Gateway, default false # noqa: E501
+
+ :param ssl_enabled: The ssl_enabled of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._ssl_enabled = ssl_enabled
+
+ @property
+ def storage_mode(self):
+ """Gets the storage_mode of this V1ScaleIOVolumeSource. # noqa: E501
+
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. # noqa: E501
+
+ :return: The storage_mode of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_mode
+
+ @storage_mode.setter
+ def storage_mode(self, storage_mode):
+ """Sets the storage_mode of this V1ScaleIOVolumeSource.
+
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. # noqa: E501
+
+ :param storage_mode: The storage_mode of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._storage_mode = storage_mode
+
+ @property
+ def storage_pool(self):
+ """Gets the storage_pool of this V1ScaleIOVolumeSource. # noqa: E501
+
+ storagePool is the ScaleIO Storage Pool associated with the protection domain. # noqa: E501
+
+ :return: The storage_pool of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_pool
+
+ @storage_pool.setter
+ def storage_pool(self, storage_pool):
+ """Sets the storage_pool of this V1ScaleIOVolumeSource.
+
+ storagePool is the ScaleIO Storage Pool associated with the protection domain. # noqa: E501
+
+ :param storage_pool: The storage_pool of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._storage_pool = storage_pool
+
+ @property
+ def system(self):
+ """Gets the system of this V1ScaleIOVolumeSource. # noqa: E501
+
+ system is the name of the storage system as configured in ScaleIO. # noqa: E501
+
+ :return: The system of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._system
+
+ @system.setter
+ def system(self, system):
+ """Sets the system of this V1ScaleIOVolumeSource.
+
+ system is the name of the storage system as configured in ScaleIO. # noqa: E501
+
+ :param system: The system of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and system is None: # noqa: E501
+ raise ValueError("Invalid value for `system`, must not be `None`") # noqa: E501
+
+ self._system = system
+
+ @property
+ def volume_name(self):
+ """Gets the volume_name of this V1ScaleIOVolumeSource. # noqa: E501
+
+ volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. # noqa: E501
+
+ :return: The volume_name of this V1ScaleIOVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_name
+
+ @volume_name.setter
+ def volume_name(self, volume_name):
+ """Sets the volume_name of this V1ScaleIOVolumeSource.
+
+ volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. # noqa: E501
+
+ :param volume_name: The volume_name of this V1ScaleIOVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._volume_name = volume_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ScaleIOVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ScaleIOVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scale_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_spec.py
new file mode 100644
index 0000000000..4000fde6db
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_spec.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ScaleSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'replicas': 'int'
+ }
+
+ attribute_map = {
+ 'replicas': 'replicas'
+ }
+
+ def __init__(self, replicas=None, local_vars_configuration=None): # noqa: E501
+ """V1ScaleSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._replicas = None
+ self.discriminator = None
+
+ if replicas is not None:
+ self.replicas = replicas
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1ScaleSpec. # noqa: E501
+
+ replicas is the desired number of instances for the scaled object. # noqa: E501
+
+ :return: The replicas of this V1ScaleSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1ScaleSpec.
+
+ replicas is the desired number of instances for the scaled object. # noqa: E501
+
+ :param replicas: The replicas of this V1ScaleSpec. # noqa: E501
+ :type: int
+ """
+
+ self._replicas = replicas
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ScaleSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ScaleSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scale_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_status.py
new file mode 100644
index 0000000000..6ea5f054e7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scale_status.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ScaleStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'replicas': 'int',
+ 'selector': 'str'
+ }
+
+ attribute_map = {
+ 'replicas': 'replicas',
+ 'selector': 'selector'
+ }
+
+ def __init__(self, replicas=None, selector=None, local_vars_configuration=None): # noqa: E501
+ """V1ScaleStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._replicas = None
+ self._selector = None
+ self.discriminator = None
+
+ self.replicas = replicas
+ if selector is not None:
+ self.selector = selector
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1ScaleStatus. # noqa: E501
+
+ replicas is the actual number of observed instances of the scaled object. # noqa: E501
+
+ :return: The replicas of this V1ScaleStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1ScaleStatus.
+
+ replicas is the actual number of observed instances of the scaled object. # noqa: E501
+
+ :param replicas: The replicas of this V1ScaleStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
+
+ self._replicas = replicas
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1ScaleStatus. # noqa: E501
+
+ selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ # noqa: E501
+
+ :return: The selector of this V1ScaleStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1ScaleStatus.
+
+ selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ # noqa: E501
+
+ :param selector: The selector of this V1ScaleStatus. # noqa: E501
+ :type: str
+ """
+
+ self._selector = selector
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ScaleStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ScaleStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scheduling.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scheduling.py
new file mode 100644
index 0000000000..5fbb408d27
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scheduling.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Scheduling(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'node_selector': 'dict(str, str)',
+ 'tolerations': 'list[V1Toleration]'
+ }
+
+ attribute_map = {
+ 'node_selector': 'nodeSelector',
+ 'tolerations': 'tolerations'
+ }
+
+ def __init__(self, node_selector=None, tolerations=None, local_vars_configuration=None): # noqa: E501
+ """V1Scheduling - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._node_selector = None
+ self._tolerations = None
+ self.discriminator = None
+
+ if node_selector is not None:
+ self.node_selector = node_selector
+ if tolerations is not None:
+ self.tolerations = tolerations
+
+ @property
+ def node_selector(self):
+ """Gets the node_selector of this V1Scheduling. # noqa: E501
+
+ nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission. # noqa: E501
+
+ :return: The node_selector of this V1Scheduling. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._node_selector
+
+ @node_selector.setter
+ def node_selector(self, node_selector):
+ """Sets the node_selector of this V1Scheduling.
+
+ nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission. # noqa: E501
+
+ :param node_selector: The node_selector of this V1Scheduling. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._node_selector = node_selector
+
+ @property
+ def tolerations(self):
+ """Gets the tolerations of this V1Scheduling. # noqa: E501
+
+ tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. # noqa: E501
+
+ :return: The tolerations of this V1Scheduling. # noqa: E501
+ :rtype: list[V1Toleration]
+ """
+ return self._tolerations
+
+ @tolerations.setter
+ def tolerations(self, tolerations):
+ """Sets the tolerations of this V1Scheduling.
+
+ tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. # noqa: E501
+
+ :param tolerations: The tolerations of this V1Scheduling. # noqa: E501
+ :type: list[V1Toleration]
+ """
+
+ self._tolerations = tolerations
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Scheduling):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Scheduling):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scope_selector.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scope_selector.py
new file mode 100644
index 0000000000..8a34e84aa0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scope_selector.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ScopeSelector(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'match_expressions': 'list[V1ScopedResourceSelectorRequirement]'
+ }
+
+ attribute_map = {
+ 'match_expressions': 'matchExpressions'
+ }
+
+ def __init__(self, match_expressions=None, local_vars_configuration=None): # noqa: E501
+ """V1ScopeSelector - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._match_expressions = None
+ self.discriminator = None
+
+ if match_expressions is not None:
+ self.match_expressions = match_expressions
+
+ @property
+ def match_expressions(self):
+ """Gets the match_expressions of this V1ScopeSelector. # noqa: E501
+
+ A list of scope selector requirements by scope of the resources. # noqa: E501
+
+ :return: The match_expressions of this V1ScopeSelector. # noqa: E501
+ :rtype: list[V1ScopedResourceSelectorRequirement]
+ """
+ return self._match_expressions
+
+ @match_expressions.setter
+ def match_expressions(self, match_expressions):
+ """Sets the match_expressions of this V1ScopeSelector.
+
+ A list of scope selector requirements by scope of the resources. # noqa: E501
+
+ :param match_expressions: The match_expressions of this V1ScopeSelector. # noqa: E501
+ :type: list[V1ScopedResourceSelectorRequirement]
+ """
+
+ self._match_expressions = match_expressions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ScopeSelector):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ScopeSelector):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_scoped_resource_selector_requirement.py b/contrib/python/kubernetes/kubernetes/client/models/v1_scoped_resource_selector_requirement.py
new file mode 100644
index 0000000000..55b51726f4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_scoped_resource_selector_requirement.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ScopedResourceSelectorRequirement(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'operator': 'str',
+ 'scope_name': 'str',
+ 'values': 'list[str]'
+ }
+
+ attribute_map = {
+ 'operator': 'operator',
+ 'scope_name': 'scopeName',
+ 'values': 'values'
+ }
+
+ def __init__(self, operator=None, scope_name=None, values=None, local_vars_configuration=None): # noqa: E501
+ """V1ScopedResourceSelectorRequirement - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._operator = None
+ self._scope_name = None
+ self._values = None
+ self.discriminator = None
+
+ self.operator = operator
+ self.scope_name = scope_name
+ if values is not None:
+ self.values = values
+
+ @property
+ def operator(self):
+ """Gets the operator of this V1ScopedResourceSelectorRequirement. # noqa: E501
+
+ Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. # noqa: E501
+
+ :return: The operator of this V1ScopedResourceSelectorRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._operator
+
+ @operator.setter
+ def operator(self, operator):
+ """Sets the operator of this V1ScopedResourceSelectorRequirement.
+
+ Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. # noqa: E501
+
+ :param operator: The operator of this V1ScopedResourceSelectorRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501
+ raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501
+
+ self._operator = operator
+
+ @property
+ def scope_name(self):
+ """Gets the scope_name of this V1ScopedResourceSelectorRequirement. # noqa: E501
+
+ The name of the scope that the selector applies to. # noqa: E501
+
+ :return: The scope_name of this V1ScopedResourceSelectorRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._scope_name
+
+ @scope_name.setter
+ def scope_name(self, scope_name):
+ """Sets the scope_name of this V1ScopedResourceSelectorRequirement.
+
+ The name of the scope that the selector applies to. # noqa: E501
+
+ :param scope_name: The scope_name of this V1ScopedResourceSelectorRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and scope_name is None: # noqa: E501
+ raise ValueError("Invalid value for `scope_name`, must not be `None`") # noqa: E501
+
+ self._scope_name = scope_name
+
+ @property
+ def values(self):
+ """Gets the values of this V1ScopedResourceSelectorRequirement. # noqa: E501
+
+ An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
+
+ :return: The values of this V1ScopedResourceSelectorRequirement. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._values
+
+ @values.setter
+ def values(self, values):
+ """Sets the values of this V1ScopedResourceSelectorRequirement.
+
+ An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
+
+ :param values: The values of this V1ScopedResourceSelectorRequirement. # noqa: E501
+ :type: list[str]
+ """
+
+ self._values = values
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ScopedResourceSelectorRequirement):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ScopedResourceSelectorRequirement):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_se_linux_options.py b/contrib/python/kubernetes/kubernetes/client/models/v1_se_linux_options.py
new file mode 100644
index 0000000000..fdb6bbdea6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_se_linux_options.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SELinuxOptions(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'level': 'str',
+ 'role': 'str',
+ 'type': 'str',
+ 'user': 'str'
+ }
+
+ attribute_map = {
+ 'level': 'level',
+ 'role': 'role',
+ 'type': 'type',
+ 'user': 'user'
+ }
+
+ def __init__(self, level=None, role=None, type=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1SELinuxOptions - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._level = None
+ self._role = None
+ self._type = None
+ self._user = None
+ self.discriminator = None
+
+ if level is not None:
+ self.level = level
+ if role is not None:
+ self.role = role
+ if type is not None:
+ self.type = type
+ if user is not None:
+ self.user = user
+
+ @property
+ def level(self):
+ """Gets the level of this V1SELinuxOptions. # noqa: E501
+
+ Level is SELinux level label that applies to the container. # noqa: E501
+
+ :return: The level of this V1SELinuxOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._level
+
+ @level.setter
+ def level(self, level):
+ """Sets the level of this V1SELinuxOptions.
+
+ Level is SELinux level label that applies to the container. # noqa: E501
+
+ :param level: The level of this V1SELinuxOptions. # noqa: E501
+ :type: str
+ """
+
+ self._level = level
+
+ @property
+ def role(self):
+ """Gets the role of this V1SELinuxOptions. # noqa: E501
+
+ Role is a SELinux role label that applies to the container. # noqa: E501
+
+ :return: The role of this V1SELinuxOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._role
+
+ @role.setter
+ def role(self, role):
+ """Sets the role of this V1SELinuxOptions.
+
+ Role is a SELinux role label that applies to the container. # noqa: E501
+
+ :param role: The role of this V1SELinuxOptions. # noqa: E501
+ :type: str
+ """
+
+ self._role = role
+
+ @property
+ def type(self):
+ """Gets the type of this V1SELinuxOptions. # noqa: E501
+
+ Type is a SELinux type label that applies to the container. # noqa: E501
+
+ :return: The type of this V1SELinuxOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1SELinuxOptions.
+
+ Type is a SELinux type label that applies to the container. # noqa: E501
+
+ :param type: The type of this V1SELinuxOptions. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ @property
+ def user(self):
+ """Gets the user of this V1SELinuxOptions. # noqa: E501
+
+ User is a SELinux user label that applies to the container. # noqa: E501
+
+ :return: The user of this V1SELinuxOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1SELinuxOptions.
+
+ User is a SELinux user label that applies to the container. # noqa: E501
+
+ :param user: The user of this V1SELinuxOptions. # noqa: E501
+ :type: str
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SELinuxOptions):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SELinuxOptions):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_seccomp_profile.py b/contrib/python/kubernetes/kubernetes/client/models/v1_seccomp_profile.py
new file mode 100644
index 0000000000..01dc363d6b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_seccomp_profile.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SeccompProfile(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'localhost_profile': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'localhost_profile': 'localhostProfile',
+ 'type': 'type'
+ }
+
+ def __init__(self, localhost_profile=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1SeccompProfile - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._localhost_profile = None
+ self._type = None
+ self.discriminator = None
+
+ if localhost_profile is not None:
+ self.localhost_profile = localhost_profile
+ self.type = type
+
+ @property
+ def localhost_profile(self):
+ """Gets the localhost_profile of this V1SeccompProfile. # noqa: E501
+
+ localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type. # noqa: E501
+
+ :return: The localhost_profile of this V1SeccompProfile. # noqa: E501
+ :rtype: str
+ """
+ return self._localhost_profile
+
+ @localhost_profile.setter
+ def localhost_profile(self, localhost_profile):
+ """Sets the localhost_profile of this V1SeccompProfile.
+
+ localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type. # noqa: E501
+
+ :param localhost_profile: The localhost_profile of this V1SeccompProfile. # noqa: E501
+ :type: str
+ """
+
+ self._localhost_profile = localhost_profile
+
+ @property
+ def type(self):
+ """Gets the type of this V1SeccompProfile. # noqa: E501
+
+ type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. # noqa: E501
+
+ :return: The type of this V1SeccompProfile. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1SeccompProfile.
+
+ type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. # noqa: E501
+
+ :param type: The type of this V1SeccompProfile. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SeccompProfile):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SeccompProfile):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_secret.py b/contrib/python/kubernetes/kubernetes/client/models/v1_secret.py
new file mode 100644
index 0000000000..b28bb66dbf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_secret.py
@@ -0,0 +1,288 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Secret(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'data': 'dict(str, str)',
+ 'immutable': 'bool',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'string_data': 'dict(str, str)',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'data': 'data',
+ 'immutable': 'immutable',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'string_data': 'stringData',
+ 'type': 'type'
+ }
+
+ def __init__(self, api_version=None, data=None, immutable=None, kind=None, metadata=None, string_data=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1Secret - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._data = None
+ self._immutable = None
+ self._kind = None
+ self._metadata = None
+ self._string_data = None
+ self._type = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if data is not None:
+ self.data = data
+ if immutable is not None:
+ self.immutable = immutable
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if string_data is not None:
+ self.string_data = string_data
+ if type is not None:
+ self.type = type
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Secret. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Secret. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Secret.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Secret. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def data(self):
+ """Gets the data of this V1Secret. # noqa: E501
+
+ Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 # noqa: E501
+
+ :return: The data of this V1Secret. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._data
+
+ @data.setter
+ def data(self, data):
+ """Sets the data of this V1Secret.
+
+ Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 # noqa: E501
+
+ :param data: The data of this V1Secret. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._data = data
+
+ @property
+ def immutable(self):
+ """Gets the immutable of this V1Secret. # noqa: E501
+
+ Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
+
+ :return: The immutable of this V1Secret. # noqa: E501
+ :rtype: bool
+ """
+ return self._immutable
+
+ @immutable.setter
+ def immutable(self, immutable):
+ """Sets the immutable of this V1Secret.
+
+ Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
+
+ :param immutable: The immutable of this V1Secret. # noqa: E501
+ :type: bool
+ """
+
+ self._immutable = immutable
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Secret. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Secret. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Secret.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Secret. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Secret. # noqa: E501
+
+
+ :return: The metadata of this V1Secret. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Secret.
+
+
+ :param metadata: The metadata of this V1Secret. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def string_data(self):
+ """Gets the string_data of this V1Secret. # noqa: E501
+
+ stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API. # noqa: E501
+
+ :return: The string_data of this V1Secret. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._string_data
+
+ @string_data.setter
+ def string_data(self, string_data):
+ """Sets the string_data of this V1Secret.
+
+ stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API. # noqa: E501
+
+ :param string_data: The string_data of this V1Secret. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._string_data = string_data
+
+ @property
+ def type(self):
+ """Gets the type of this V1Secret. # noqa: E501
+
+ Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types # noqa: E501
+
+ :return: The type of this V1Secret. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1Secret.
+
+ Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types # noqa: E501
+
+ :param type: The type of this V1Secret. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Secret):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Secret):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_secret_env_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_env_source.py
new file mode 100644
index 0000000000..19a6fdd93c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_env_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SecretEnvSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'optional': 'bool'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'optional': 'optional'
+ }
+
+ def __init__(self, name=None, optional=None, local_vars_configuration=None): # noqa: E501
+ """V1SecretEnvSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._optional = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if optional is not None:
+ self.optional = optional
+
+ @property
+ def name(self):
+ """Gets the name of this V1SecretEnvSource. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1SecretEnvSource. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1SecretEnvSource.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1SecretEnvSource. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1SecretEnvSource. # noqa: E501
+
+ Specify whether the Secret must be defined # noqa: E501
+
+ :return: The optional of this V1SecretEnvSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1SecretEnvSource.
+
+ Specify whether the Secret must be defined # noqa: E501
+
+ :param optional: The optional of this V1SecretEnvSource. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SecretEnvSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SecretEnvSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_secret_key_selector.py b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_key_selector.py
new file mode 100644
index 0000000000..a2e324e3b5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_key_selector.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SecretKeySelector(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'name': 'str',
+ 'optional': 'bool'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'name': 'name',
+ 'optional': 'optional'
+ }
+
+ def __init__(self, key=None, name=None, optional=None, local_vars_configuration=None): # noqa: E501
+ """V1SecretKeySelector - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._name = None
+ self._optional = None
+ self.discriminator = None
+
+ self.key = key
+ if name is not None:
+ self.name = name
+ if optional is not None:
+ self.optional = optional
+
+ @property
+ def key(self):
+ """Gets the key of this V1SecretKeySelector. # noqa: E501
+
+ The key of the secret to select from. Must be a valid secret key. # noqa: E501
+
+ :return: The key of this V1SecretKeySelector. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1SecretKeySelector.
+
+ The key of the secret to select from. Must be a valid secret key. # noqa: E501
+
+ :param key: The key of this V1SecretKeySelector. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def name(self):
+ """Gets the name of this V1SecretKeySelector. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1SecretKeySelector. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1SecretKeySelector.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1SecretKeySelector. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1SecretKeySelector. # noqa: E501
+
+ Specify whether the Secret or its key must be defined # noqa: E501
+
+ :return: The optional of this V1SecretKeySelector. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1SecretKeySelector.
+
+ Specify whether the Secret or its key must be defined # noqa: E501
+
+ :param optional: The optional of this V1SecretKeySelector. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SecretKeySelector):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SecretKeySelector):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_secret_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_list.py
new file mode 100644
index 0000000000..46a7145fb3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SecretList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Secret]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1SecretList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1SecretList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1SecretList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1SecretList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1SecretList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1SecretList. # noqa: E501
+
+ Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret # noqa: E501
+
+ :return: The items of this V1SecretList. # noqa: E501
+ :rtype: list[V1Secret]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1SecretList.
+
+ Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret # noqa: E501
+
+ :param items: The items of this V1SecretList. # noqa: E501
+ :type: list[V1Secret]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1SecretList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1SecretList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1SecretList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1SecretList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1SecretList. # noqa: E501
+
+
+ :return: The metadata of this V1SecretList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1SecretList.
+
+
+ :param metadata: The metadata of this V1SecretList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SecretList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SecretList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_secret_projection.py b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_projection.py
new file mode 100644
index 0000000000..53c9385fec
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_projection.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SecretProjection(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'items': 'list[V1KeyToPath]',
+ 'name': 'str',
+ 'optional': 'bool'
+ }
+
+ attribute_map = {
+ 'items': 'items',
+ 'name': 'name',
+ 'optional': 'optional'
+ }
+
+ def __init__(self, items=None, name=None, optional=None, local_vars_configuration=None): # noqa: E501
+ """V1SecretProjection - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._items = None
+ self._name = None
+ self._optional = None
+ self.discriminator = None
+
+ if items is not None:
+ self.items = items
+ if name is not None:
+ self.name = name
+ if optional is not None:
+ self.optional = optional
+
+ @property
+ def items(self):
+ """Gets the items of this V1SecretProjection. # noqa: E501
+
+ items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :return: The items of this V1SecretProjection. # noqa: E501
+ :rtype: list[V1KeyToPath]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1SecretProjection.
+
+ items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :param items: The items of this V1SecretProjection. # noqa: E501
+ :type: list[V1KeyToPath]
+ """
+
+ self._items = items
+
+ @property
+ def name(self):
+ """Gets the name of this V1SecretProjection. # noqa: E501
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1SecretProjection. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1SecretProjection.
+
+ Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1SecretProjection. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1SecretProjection. # noqa: E501
+
+ optional field specify whether the Secret or its key must be defined # noqa: E501
+
+ :return: The optional of this V1SecretProjection. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1SecretProjection.
+
+ optional field specify whether the Secret or its key must be defined # noqa: E501
+
+ :param optional: The optional of this V1SecretProjection. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SecretProjection):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SecretProjection):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_secret_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_reference.py
new file mode 100644
index 0000000000..45ab4eaae8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_reference.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SecretReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace'
+ }
+
+ def __init__(self, name=None, namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1SecretReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+
+ @property
+ def name(self):
+ """Gets the name of this V1SecretReference. # noqa: E501
+
+ name is unique within a namespace to reference a secret resource. # noqa: E501
+
+ :return: The name of this V1SecretReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1SecretReference.
+
+ name is unique within a namespace to reference a secret resource. # noqa: E501
+
+ :param name: The name of this V1SecretReference. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1SecretReference. # noqa: E501
+
+ namespace defines the space within which the secret name must be unique. # noqa: E501
+
+ :return: The namespace of this V1SecretReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1SecretReference.
+
+ namespace defines the space within which the secret name must be unique. # noqa: E501
+
+ :param namespace: The namespace of this V1SecretReference. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SecretReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SecretReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_secret_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_volume_source.py
new file mode 100644
index 0000000000..e2be32ec42
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_secret_volume_source.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SecretVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'default_mode': 'int',
+ 'items': 'list[V1KeyToPath]',
+ 'optional': 'bool',
+ 'secret_name': 'str'
+ }
+
+ attribute_map = {
+ 'default_mode': 'defaultMode',
+ 'items': 'items',
+ 'optional': 'optional',
+ 'secret_name': 'secretName'
+ }
+
+ def __init__(self, default_mode=None, items=None, optional=None, secret_name=None, local_vars_configuration=None): # noqa: E501
+ """V1SecretVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._default_mode = None
+ self._items = None
+ self._optional = None
+ self._secret_name = None
+ self.discriminator = None
+
+ if default_mode is not None:
+ self.default_mode = default_mode
+ if items is not None:
+ self.items = items
+ if optional is not None:
+ self.optional = optional
+ if secret_name is not None:
+ self.secret_name = secret_name
+
+ @property
+ def default_mode(self):
+ """Gets the default_mode of this V1SecretVolumeSource. # noqa: E501
+
+ defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :return: The default_mode of this V1SecretVolumeSource. # noqa: E501
+ :rtype: int
+ """
+ return self._default_mode
+
+ @default_mode.setter
+ def default_mode(self, default_mode):
+ """Sets the default_mode of this V1SecretVolumeSource.
+
+ defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
+
+ :param default_mode: The default_mode of this V1SecretVolumeSource. # noqa: E501
+ :type: int
+ """
+
+ self._default_mode = default_mode
+
+ @property
+ def items(self):
+ """Gets the items of this V1SecretVolumeSource. # noqa: E501
+
+ items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :return: The items of this V1SecretVolumeSource. # noqa: E501
+ :rtype: list[V1KeyToPath]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1SecretVolumeSource.
+
+ items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
+
+ :param items: The items of this V1SecretVolumeSource. # noqa: E501
+ :type: list[V1KeyToPath]
+ """
+
+ self._items = items
+
+ @property
+ def optional(self):
+ """Gets the optional of this V1SecretVolumeSource. # noqa: E501
+
+ optional field specify whether the Secret or its keys must be defined # noqa: E501
+
+ :return: The optional of this V1SecretVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._optional
+
+ @optional.setter
+ def optional(self, optional):
+ """Sets the optional of this V1SecretVolumeSource.
+
+ optional field specify whether the Secret or its keys must be defined # noqa: E501
+
+ :param optional: The optional of this V1SecretVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._optional = optional
+
+ @property
+ def secret_name(self):
+ """Gets the secret_name of this V1SecretVolumeSource. # noqa: E501
+
+ secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret # noqa: E501
+
+ :return: The secret_name of this V1SecretVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._secret_name
+
+ @secret_name.setter
+ def secret_name(self, secret_name):
+ """Sets the secret_name of this V1SecretVolumeSource.
+
+ secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret # noqa: E501
+
+ :param secret_name: The secret_name of this V1SecretVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._secret_name = secret_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SecretVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SecretVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_security_context.py b/contrib/python/kubernetes/kubernetes/client/models/v1_security_context.py
new file mode 100644
index 0000000000..bd5c8c1037
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_security_context.py
@@ -0,0 +1,394 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SecurityContext(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allow_privilege_escalation': 'bool',
+ 'capabilities': 'V1Capabilities',
+ 'privileged': 'bool',
+ 'proc_mount': 'str',
+ 'read_only_root_filesystem': 'bool',
+ 'run_as_group': 'int',
+ 'run_as_non_root': 'bool',
+ 'run_as_user': 'int',
+ 'se_linux_options': 'V1SELinuxOptions',
+ 'seccomp_profile': 'V1SeccompProfile',
+ 'windows_options': 'V1WindowsSecurityContextOptions'
+ }
+
+ attribute_map = {
+ 'allow_privilege_escalation': 'allowPrivilegeEscalation',
+ 'capabilities': 'capabilities',
+ 'privileged': 'privileged',
+ 'proc_mount': 'procMount',
+ 'read_only_root_filesystem': 'readOnlyRootFilesystem',
+ 'run_as_group': 'runAsGroup',
+ 'run_as_non_root': 'runAsNonRoot',
+ 'run_as_user': 'runAsUser',
+ 'se_linux_options': 'seLinuxOptions',
+ 'seccomp_profile': 'seccompProfile',
+ 'windows_options': 'windowsOptions'
+ }
+
+ def __init__(self, allow_privilege_escalation=None, capabilities=None, privileged=None, proc_mount=None, read_only_root_filesystem=None, run_as_group=None, run_as_non_root=None, run_as_user=None, se_linux_options=None, seccomp_profile=None, windows_options=None, local_vars_configuration=None): # noqa: E501
+ """V1SecurityContext - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allow_privilege_escalation = None
+ self._capabilities = None
+ self._privileged = None
+ self._proc_mount = None
+ self._read_only_root_filesystem = None
+ self._run_as_group = None
+ self._run_as_non_root = None
+ self._run_as_user = None
+ self._se_linux_options = None
+ self._seccomp_profile = None
+ self._windows_options = None
+ self.discriminator = None
+
+ if allow_privilege_escalation is not None:
+ self.allow_privilege_escalation = allow_privilege_escalation
+ if capabilities is not None:
+ self.capabilities = capabilities
+ if privileged is not None:
+ self.privileged = privileged
+ if proc_mount is not None:
+ self.proc_mount = proc_mount
+ if read_only_root_filesystem is not None:
+ self.read_only_root_filesystem = read_only_root_filesystem
+ if run_as_group is not None:
+ self.run_as_group = run_as_group
+ if run_as_non_root is not None:
+ self.run_as_non_root = run_as_non_root
+ if run_as_user is not None:
+ self.run_as_user = run_as_user
+ if se_linux_options is not None:
+ self.se_linux_options = se_linux_options
+ if seccomp_profile is not None:
+ self.seccomp_profile = seccomp_profile
+ if windows_options is not None:
+ self.windows_options = windows_options
+
+ @property
+ def allow_privilege_escalation(self):
+ """Gets the allow_privilege_escalation of this V1SecurityContext. # noqa: E501
+
+ AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The allow_privilege_escalation of this V1SecurityContext. # noqa: E501
+ :rtype: bool
+ """
+ return self._allow_privilege_escalation
+
+ @allow_privilege_escalation.setter
+ def allow_privilege_escalation(self, allow_privilege_escalation):
+ """Sets the allow_privilege_escalation of this V1SecurityContext.
+
+ AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param allow_privilege_escalation: The allow_privilege_escalation of this V1SecurityContext. # noqa: E501
+ :type: bool
+ """
+
+ self._allow_privilege_escalation = allow_privilege_escalation
+
+ @property
+ def capabilities(self):
+ """Gets the capabilities of this V1SecurityContext. # noqa: E501
+
+
+ :return: The capabilities of this V1SecurityContext. # noqa: E501
+ :rtype: V1Capabilities
+ """
+ return self._capabilities
+
+ @capabilities.setter
+ def capabilities(self, capabilities):
+ """Sets the capabilities of this V1SecurityContext.
+
+
+ :param capabilities: The capabilities of this V1SecurityContext. # noqa: E501
+ :type: V1Capabilities
+ """
+
+ self._capabilities = capabilities
+
+ @property
+ def privileged(self):
+ """Gets the privileged of this V1SecurityContext. # noqa: E501
+
+ Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The privileged of this V1SecurityContext. # noqa: E501
+ :rtype: bool
+ """
+ return self._privileged
+
+ @privileged.setter
+ def privileged(self, privileged):
+ """Sets the privileged of this V1SecurityContext.
+
+ Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param privileged: The privileged of this V1SecurityContext. # noqa: E501
+ :type: bool
+ """
+
+ self._privileged = privileged
+
+ @property
+ def proc_mount(self):
+ """Gets the proc_mount of this V1SecurityContext. # noqa: E501
+
+ procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The proc_mount of this V1SecurityContext. # noqa: E501
+ :rtype: str
+ """
+ return self._proc_mount
+
+ @proc_mount.setter
+ def proc_mount(self, proc_mount):
+ """Sets the proc_mount of this V1SecurityContext.
+
+ procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param proc_mount: The proc_mount of this V1SecurityContext. # noqa: E501
+ :type: str
+ """
+
+ self._proc_mount = proc_mount
+
+ @property
+ def read_only_root_filesystem(self):
+ """Gets the read_only_root_filesystem of this V1SecurityContext. # noqa: E501
+
+ Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The read_only_root_filesystem of this V1SecurityContext. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only_root_filesystem
+
+ @read_only_root_filesystem.setter
+ def read_only_root_filesystem(self, read_only_root_filesystem):
+ """Sets the read_only_root_filesystem of this V1SecurityContext.
+
+ Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param read_only_root_filesystem: The read_only_root_filesystem of this V1SecurityContext. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only_root_filesystem = read_only_root_filesystem
+
+ @property
+ def run_as_group(self):
+ """Gets the run_as_group of this V1SecurityContext. # noqa: E501
+
+ The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The run_as_group of this V1SecurityContext. # noqa: E501
+ :rtype: int
+ """
+ return self._run_as_group
+
+ @run_as_group.setter
+ def run_as_group(self, run_as_group):
+ """Sets the run_as_group of this V1SecurityContext.
+
+ The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param run_as_group: The run_as_group of this V1SecurityContext. # noqa: E501
+ :type: int
+ """
+
+ self._run_as_group = run_as_group
+
+ @property
+ def run_as_non_root(self):
+ """Gets the run_as_non_root of this V1SecurityContext. # noqa: E501
+
+ Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
+
+ :return: The run_as_non_root of this V1SecurityContext. # noqa: E501
+ :rtype: bool
+ """
+ return self._run_as_non_root
+
+ @run_as_non_root.setter
+ def run_as_non_root(self, run_as_non_root):
+ """Sets the run_as_non_root of this V1SecurityContext.
+
+ Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
+
+ :param run_as_non_root: The run_as_non_root of this V1SecurityContext. # noqa: E501
+ :type: bool
+ """
+
+ self._run_as_non_root = run_as_non_root
+
+ @property
+ def run_as_user(self):
+ """Gets the run_as_user of this V1SecurityContext. # noqa: E501
+
+ The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :return: The run_as_user of this V1SecurityContext. # noqa: E501
+ :rtype: int
+ """
+ return self._run_as_user
+
+ @run_as_user.setter
+ def run_as_user(self, run_as_user):
+ """Sets the run_as_user of this V1SecurityContext.
+
+ The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. # noqa: E501
+
+ :param run_as_user: The run_as_user of this V1SecurityContext. # noqa: E501
+ :type: int
+ """
+
+ self._run_as_user = run_as_user
+
+ @property
+ def se_linux_options(self):
+ """Gets the se_linux_options of this V1SecurityContext. # noqa: E501
+
+
+ :return: The se_linux_options of this V1SecurityContext. # noqa: E501
+ :rtype: V1SELinuxOptions
+ """
+ return self._se_linux_options
+
+ @se_linux_options.setter
+ def se_linux_options(self, se_linux_options):
+ """Sets the se_linux_options of this V1SecurityContext.
+
+
+ :param se_linux_options: The se_linux_options of this V1SecurityContext. # noqa: E501
+ :type: V1SELinuxOptions
+ """
+
+ self._se_linux_options = se_linux_options
+
+ @property
+ def seccomp_profile(self):
+ """Gets the seccomp_profile of this V1SecurityContext. # noqa: E501
+
+
+ :return: The seccomp_profile of this V1SecurityContext. # noqa: E501
+ :rtype: V1SeccompProfile
+ """
+ return self._seccomp_profile
+
+ @seccomp_profile.setter
+ def seccomp_profile(self, seccomp_profile):
+ """Sets the seccomp_profile of this V1SecurityContext.
+
+
+ :param seccomp_profile: The seccomp_profile of this V1SecurityContext. # noqa: E501
+ :type: V1SeccompProfile
+ """
+
+ self._seccomp_profile = seccomp_profile
+
+ @property
+ def windows_options(self):
+ """Gets the windows_options of this V1SecurityContext. # noqa: E501
+
+
+ :return: The windows_options of this V1SecurityContext. # noqa: E501
+ :rtype: V1WindowsSecurityContextOptions
+ """
+ return self._windows_options
+
+ @windows_options.setter
+ def windows_options(self, windows_options):
+ """Sets the windows_options of this V1SecurityContext.
+
+
+ :param windows_options: The windows_options of this V1SecurityContext. # noqa: E501
+ :type: V1WindowsSecurityContextOptions
+ """
+
+ self._windows_options = windows_options
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SecurityContext):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SecurityContext):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review.py
new file mode 100644
index 0000000000..433ffb1ab1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SelfSubjectAccessReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1SelfSubjectAccessReviewSpec',
+ 'status': 'V1SubjectAccessReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1SelfSubjectAccessReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1SelfSubjectAccessReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1SelfSubjectAccessReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1SelfSubjectAccessReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1SelfSubjectAccessReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1SelfSubjectAccessReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1SelfSubjectAccessReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1SelfSubjectAccessReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1SelfSubjectAccessReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1SelfSubjectAccessReview. # noqa: E501
+
+
+ :return: The metadata of this V1SelfSubjectAccessReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1SelfSubjectAccessReview.
+
+
+ :param metadata: The metadata of this V1SelfSubjectAccessReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1SelfSubjectAccessReview. # noqa: E501
+
+
+ :return: The spec of this V1SelfSubjectAccessReview. # noqa: E501
+ :rtype: V1SelfSubjectAccessReviewSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1SelfSubjectAccessReview.
+
+
+ :param spec: The spec of this V1SelfSubjectAccessReview. # noqa: E501
+ :type: V1SelfSubjectAccessReviewSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1SelfSubjectAccessReview. # noqa: E501
+
+
+ :return: The status of this V1SelfSubjectAccessReview. # noqa: E501
+ :rtype: V1SubjectAccessReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1SelfSubjectAccessReview.
+
+
+ :param status: The status of this V1SelfSubjectAccessReview. # noqa: E501
+ :type: V1SubjectAccessReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SelfSubjectAccessReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SelfSubjectAccessReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review_spec.py
new file mode 100644
index 0000000000..70a20078cf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_access_review_spec.py
@@ -0,0 +1,146 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SelfSubjectAccessReviewSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'non_resource_attributes': 'V1NonResourceAttributes',
+ 'resource_attributes': 'V1ResourceAttributes'
+ }
+
+ attribute_map = {
+ 'non_resource_attributes': 'nonResourceAttributes',
+ 'resource_attributes': 'resourceAttributes'
+ }
+
+ def __init__(self, non_resource_attributes=None, resource_attributes=None, local_vars_configuration=None): # noqa: E501
+ """V1SelfSubjectAccessReviewSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._non_resource_attributes = None
+ self._resource_attributes = None
+ self.discriminator = None
+
+ if non_resource_attributes is not None:
+ self.non_resource_attributes = non_resource_attributes
+ if resource_attributes is not None:
+ self.resource_attributes = resource_attributes
+
+ @property
+ def non_resource_attributes(self):
+ """Gets the non_resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
+
+
+ :return: The non_resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
+ :rtype: V1NonResourceAttributes
+ """
+ return self._non_resource_attributes
+
+ @non_resource_attributes.setter
+ def non_resource_attributes(self, non_resource_attributes):
+ """Sets the non_resource_attributes of this V1SelfSubjectAccessReviewSpec.
+
+
+ :param non_resource_attributes: The non_resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
+ :type: V1NonResourceAttributes
+ """
+
+ self._non_resource_attributes = non_resource_attributes
+
+ @property
+ def resource_attributes(self):
+ """Gets the resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
+
+
+ :return: The resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
+ :rtype: V1ResourceAttributes
+ """
+ return self._resource_attributes
+
+ @resource_attributes.setter
+ def resource_attributes(self, resource_attributes):
+ """Sets the resource_attributes of this V1SelfSubjectAccessReviewSpec.
+
+
+ :param resource_attributes: The resource_attributes of this V1SelfSubjectAccessReviewSpec. # noqa: E501
+ :type: V1ResourceAttributes
+ """
+
+ self._resource_attributes = resource_attributes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SelfSubjectAccessReviewSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SelfSubjectAccessReviewSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review.py
new file mode 100644
index 0000000000..167abf6e75
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SelfSubjectReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'status': 'V1SelfSubjectReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1SelfSubjectReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1SelfSubjectReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1SelfSubjectReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1SelfSubjectReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1SelfSubjectReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1SelfSubjectReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1SelfSubjectReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1SelfSubjectReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1SelfSubjectReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1SelfSubjectReview. # noqa: E501
+
+
+ :return: The metadata of this V1SelfSubjectReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1SelfSubjectReview.
+
+
+ :param metadata: The metadata of this V1SelfSubjectReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def status(self):
+ """Gets the status of this V1SelfSubjectReview. # noqa: E501
+
+
+ :return: The status of this V1SelfSubjectReview. # noqa: E501
+ :rtype: V1SelfSubjectReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1SelfSubjectReview.
+
+
+ :param status: The status of this V1SelfSubjectReview. # noqa: E501
+ :type: V1SelfSubjectReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SelfSubjectReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SelfSubjectReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review_status.py
new file mode 100644
index 0000000000..92868951f4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_review_status.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SelfSubjectReviewStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'user_info': 'V1UserInfo'
+ }
+
+ attribute_map = {
+ 'user_info': 'userInfo'
+ }
+
+ def __init__(self, user_info=None, local_vars_configuration=None): # noqa: E501
+ """V1SelfSubjectReviewStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._user_info = None
+ self.discriminator = None
+
+ if user_info is not None:
+ self.user_info = user_info
+
+ @property
+ def user_info(self):
+ """Gets the user_info of this V1SelfSubjectReviewStatus. # noqa: E501
+
+
+ :return: The user_info of this V1SelfSubjectReviewStatus. # noqa: E501
+ :rtype: V1UserInfo
+ """
+ return self._user_info
+
+ @user_info.setter
+ def user_info(self, user_info):
+ """Sets the user_info of this V1SelfSubjectReviewStatus.
+
+
+ :param user_info: The user_info of this V1SelfSubjectReviewStatus. # noqa: E501
+ :type: V1UserInfo
+ """
+
+ self._user_info = user_info
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SelfSubjectReviewStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SelfSubjectReviewStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review.py
new file mode 100644
index 0000000000..936efe480f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SelfSubjectRulesReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1SelfSubjectRulesReviewSpec',
+ 'status': 'V1SubjectRulesReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1SelfSubjectRulesReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1SelfSubjectRulesReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1SelfSubjectRulesReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1SelfSubjectRulesReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1SelfSubjectRulesReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1SelfSubjectRulesReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1SelfSubjectRulesReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1SelfSubjectRulesReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1SelfSubjectRulesReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1SelfSubjectRulesReview. # noqa: E501
+
+
+ :return: The metadata of this V1SelfSubjectRulesReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1SelfSubjectRulesReview.
+
+
+ :param metadata: The metadata of this V1SelfSubjectRulesReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1SelfSubjectRulesReview. # noqa: E501
+
+
+ :return: The spec of this V1SelfSubjectRulesReview. # noqa: E501
+ :rtype: V1SelfSubjectRulesReviewSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1SelfSubjectRulesReview.
+
+
+ :param spec: The spec of this V1SelfSubjectRulesReview. # noqa: E501
+ :type: V1SelfSubjectRulesReviewSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1SelfSubjectRulesReview. # noqa: E501
+
+
+ :return: The status of this V1SelfSubjectRulesReview. # noqa: E501
+ :rtype: V1SubjectRulesReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1SelfSubjectRulesReview.
+
+
+ :param status: The status of this V1SelfSubjectRulesReview. # noqa: E501
+ :type: V1SubjectRulesReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SelfSubjectRulesReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SelfSubjectRulesReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review_spec.py
new file mode 100644
index 0000000000..504f2836e9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_self_subject_rules_review_spec.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SelfSubjectRulesReviewSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'namespace': 'str'
+ }
+
+ attribute_map = {
+ 'namespace': 'namespace'
+ }
+
+ def __init__(self, namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1SelfSubjectRulesReviewSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._namespace = None
+ self.discriminator = None
+
+ if namespace is not None:
+ self.namespace = namespace
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1SelfSubjectRulesReviewSpec. # noqa: E501
+
+ Namespace to evaluate rules for. Required. # noqa: E501
+
+ :return: The namespace of this V1SelfSubjectRulesReviewSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1SelfSubjectRulesReviewSpec.
+
+ Namespace to evaluate rules for. Required. # noqa: E501
+
+ :param namespace: The namespace of this V1SelfSubjectRulesReviewSpec. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SelfSubjectRulesReviewSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SelfSubjectRulesReviewSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_server_address_by_client_cidr.py b/contrib/python/kubernetes/kubernetes/client/models/v1_server_address_by_client_cidr.py
new file mode 100644
index 0000000000..d90be5cd75
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_server_address_by_client_cidr.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServerAddressByClientCIDR(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'client_cidr': 'str',
+ 'server_address': 'str'
+ }
+
+ attribute_map = {
+ 'client_cidr': 'clientCIDR',
+ 'server_address': 'serverAddress'
+ }
+
+ def __init__(self, client_cidr=None, server_address=None, local_vars_configuration=None): # noqa: E501
+ """V1ServerAddressByClientCIDR - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._client_cidr = None
+ self._server_address = None
+ self.discriminator = None
+
+ self.client_cidr = client_cidr
+ self.server_address = server_address
+
+ @property
+ def client_cidr(self):
+ """Gets the client_cidr of this V1ServerAddressByClientCIDR. # noqa: E501
+
+ The CIDR with which clients can match their IP to figure out the server address that they should use. # noqa: E501
+
+ :return: The client_cidr of this V1ServerAddressByClientCIDR. # noqa: E501
+ :rtype: str
+ """
+ return self._client_cidr
+
+ @client_cidr.setter
+ def client_cidr(self, client_cidr):
+ """Sets the client_cidr of this V1ServerAddressByClientCIDR.
+
+ The CIDR with which clients can match their IP to figure out the server address that they should use. # noqa: E501
+
+ :param client_cidr: The client_cidr of this V1ServerAddressByClientCIDR. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and client_cidr is None: # noqa: E501
+ raise ValueError("Invalid value for `client_cidr`, must not be `None`") # noqa: E501
+
+ self._client_cidr = client_cidr
+
+ @property
+ def server_address(self):
+ """Gets the server_address of this V1ServerAddressByClientCIDR. # noqa: E501
+
+ Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port. # noqa: E501
+
+ :return: The server_address of this V1ServerAddressByClientCIDR. # noqa: E501
+ :rtype: str
+ """
+ return self._server_address
+
+ @server_address.setter
+ def server_address(self, server_address):
+ """Sets the server_address of this V1ServerAddressByClientCIDR.
+
+ Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port. # noqa: E501
+
+ :param server_address: The server_address of this V1ServerAddressByClientCIDR. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and server_address is None: # noqa: E501
+ raise ValueError("Invalid value for `server_address`, must not be `None`") # noqa: E501
+
+ self._server_address = server_address
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServerAddressByClientCIDR):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServerAddressByClientCIDR):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service.py
new file mode 100644
index 0000000000..165ee33ca0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Service(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1ServiceSpec',
+ 'status': 'V1ServiceStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Service - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Service. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Service. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Service.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Service. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Service. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Service. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Service.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Service. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Service. # noqa: E501
+
+
+ :return: The metadata of this V1Service. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Service.
+
+
+ :param metadata: The metadata of this V1Service. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1Service. # noqa: E501
+
+
+ :return: The spec of this V1Service. # noqa: E501
+ :rtype: V1ServiceSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1Service.
+
+
+ :param spec: The spec of this V1Service. # noqa: E501
+ :type: V1ServiceSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1Service. # noqa: E501
+
+
+ :return: The status of this V1Service. # noqa: E501
+ :rtype: V1ServiceStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Service.
+
+
+ :param status: The status of this V1Service. # noqa: E501
+ :type: V1ServiceStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Service):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Service):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_account.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_account.py
new file mode 100644
index 0000000000..5400810148
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_account.py
@@ -0,0 +1,260 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServiceAccount(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'automount_service_account_token': 'bool',
+ 'image_pull_secrets': 'list[V1LocalObjectReference]',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'secrets': 'list[V1ObjectReference]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'automount_service_account_token': 'automountServiceAccountToken',
+ 'image_pull_secrets': 'imagePullSecrets',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'secrets': 'secrets'
+ }
+
+ def __init__(self, api_version=None, automount_service_account_token=None, image_pull_secrets=None, kind=None, metadata=None, secrets=None, local_vars_configuration=None): # noqa: E501
+ """V1ServiceAccount - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._automount_service_account_token = None
+ self._image_pull_secrets = None
+ self._kind = None
+ self._metadata = None
+ self._secrets = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if automount_service_account_token is not None:
+ self.automount_service_account_token = automount_service_account_token
+ if image_pull_secrets is not None:
+ self.image_pull_secrets = image_pull_secrets
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if secrets is not None:
+ self.secrets = secrets
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ServiceAccount. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ServiceAccount. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ServiceAccount.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ServiceAccount. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def automount_service_account_token(self):
+ """Gets the automount_service_account_token of this V1ServiceAccount. # noqa: E501
+
+ AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level. # noqa: E501
+
+ :return: The automount_service_account_token of this V1ServiceAccount. # noqa: E501
+ :rtype: bool
+ """
+ return self._automount_service_account_token
+
+ @automount_service_account_token.setter
+ def automount_service_account_token(self, automount_service_account_token):
+ """Sets the automount_service_account_token of this V1ServiceAccount.
+
+ AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level. # noqa: E501
+
+ :param automount_service_account_token: The automount_service_account_token of this V1ServiceAccount. # noqa: E501
+ :type: bool
+ """
+
+ self._automount_service_account_token = automount_service_account_token
+
+ @property
+ def image_pull_secrets(self):
+ """Gets the image_pull_secrets of this V1ServiceAccount. # noqa: E501
+
+ ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod # noqa: E501
+
+ :return: The image_pull_secrets of this V1ServiceAccount. # noqa: E501
+ :rtype: list[V1LocalObjectReference]
+ """
+ return self._image_pull_secrets
+
+ @image_pull_secrets.setter
+ def image_pull_secrets(self, image_pull_secrets):
+ """Sets the image_pull_secrets of this V1ServiceAccount.
+
+ ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod # noqa: E501
+
+ :param image_pull_secrets: The image_pull_secrets of this V1ServiceAccount. # noqa: E501
+ :type: list[V1LocalObjectReference]
+ """
+
+ self._image_pull_secrets = image_pull_secrets
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ServiceAccount. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ServiceAccount. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ServiceAccount.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ServiceAccount. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ServiceAccount. # noqa: E501
+
+
+ :return: The metadata of this V1ServiceAccount. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ServiceAccount.
+
+
+ :param metadata: The metadata of this V1ServiceAccount. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def secrets(self):
+ """Gets the secrets of this V1ServiceAccount. # noqa: E501
+
+ Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret # noqa: E501
+
+ :return: The secrets of this V1ServiceAccount. # noqa: E501
+ :rtype: list[V1ObjectReference]
+ """
+ return self._secrets
+
+ @secrets.setter
+ def secrets(self, secrets):
+ """Sets the secrets of this V1ServiceAccount.
+
+ Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret # noqa: E501
+
+ :param secrets: The secrets of this V1ServiceAccount. # noqa: E501
+ :type: list[V1ObjectReference]
+ """
+
+ self._secrets = secrets
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServiceAccount):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServiceAccount):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_account_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_account_list.py
new file mode 100644
index 0000000000..46ca88ae6b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_account_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServiceAccountList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ServiceAccount]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ServiceAccountList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ServiceAccountList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ServiceAccountList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ServiceAccountList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ServiceAccountList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ServiceAccountList. # noqa: E501
+
+ List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
+
+ :return: The items of this V1ServiceAccountList. # noqa: E501
+ :rtype: list[V1ServiceAccount]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ServiceAccountList.
+
+ List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ # noqa: E501
+
+ :param items: The items of this V1ServiceAccountList. # noqa: E501
+ :type: list[V1ServiceAccount]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ServiceAccountList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ServiceAccountList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ServiceAccountList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ServiceAccountList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ServiceAccountList. # noqa: E501
+
+
+ :return: The metadata of this V1ServiceAccountList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ServiceAccountList.
+
+
+ :param metadata: The metadata of this V1ServiceAccountList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServiceAccountList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServiceAccountList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_account_token_projection.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_account_token_projection.py
new file mode 100644
index 0000000000..e2911bb89a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_account_token_projection.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServiceAccountTokenProjection(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'audience': 'str',
+ 'expiration_seconds': 'int',
+ 'path': 'str'
+ }
+
+ attribute_map = {
+ 'audience': 'audience',
+ 'expiration_seconds': 'expirationSeconds',
+ 'path': 'path'
+ }
+
+ def __init__(self, audience=None, expiration_seconds=None, path=None, local_vars_configuration=None): # noqa: E501
+ """V1ServiceAccountTokenProjection - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._audience = None
+ self._expiration_seconds = None
+ self._path = None
+ self.discriminator = None
+
+ if audience is not None:
+ self.audience = audience
+ if expiration_seconds is not None:
+ self.expiration_seconds = expiration_seconds
+ self.path = path
+
+ @property
+ def audience(self):
+ """Gets the audience of this V1ServiceAccountTokenProjection. # noqa: E501
+
+ audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. # noqa: E501
+
+ :return: The audience of this V1ServiceAccountTokenProjection. # noqa: E501
+ :rtype: str
+ """
+ return self._audience
+
+ @audience.setter
+ def audience(self, audience):
+ """Sets the audience of this V1ServiceAccountTokenProjection.
+
+ audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. # noqa: E501
+
+ :param audience: The audience of this V1ServiceAccountTokenProjection. # noqa: E501
+ :type: str
+ """
+
+ self._audience = audience
+
+ @property
+ def expiration_seconds(self):
+ """Gets the expiration_seconds of this V1ServiceAccountTokenProjection. # noqa: E501
+
+ expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. # noqa: E501
+
+ :return: The expiration_seconds of this V1ServiceAccountTokenProjection. # noqa: E501
+ :rtype: int
+ """
+ return self._expiration_seconds
+
+ @expiration_seconds.setter
+ def expiration_seconds(self, expiration_seconds):
+ """Sets the expiration_seconds of this V1ServiceAccountTokenProjection.
+
+ expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. # noqa: E501
+
+ :param expiration_seconds: The expiration_seconds of this V1ServiceAccountTokenProjection. # noqa: E501
+ :type: int
+ """
+
+ self._expiration_seconds = expiration_seconds
+
+ @property
+ def path(self):
+ """Gets the path of this V1ServiceAccountTokenProjection. # noqa: E501
+
+ path is the path relative to the mount point of the file to project the token into. # noqa: E501
+
+ :return: The path of this V1ServiceAccountTokenProjection. # noqa: E501
+ :rtype: str
+ """
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ """Sets the path of this V1ServiceAccountTokenProjection.
+
+ path is the path relative to the mount point of the file to project the token into. # noqa: E501
+
+ :param path: The path of this V1ServiceAccountTokenProjection. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
+ raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
+
+ self._path = path
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServiceAccountTokenProjection):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServiceAccountTokenProjection):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_backend_port.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_backend_port.py
new file mode 100644
index 0000000000..b43358631e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_backend_port.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServiceBackendPort(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'number': 'int'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'number': 'number'
+ }
+
+ def __init__(self, name=None, number=None, local_vars_configuration=None): # noqa: E501
+ """V1ServiceBackendPort - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._number = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if number is not None:
+ self.number = number
+
+ @property
+ def name(self):
+ """Gets the name of this V1ServiceBackendPort. # noqa: E501
+
+ name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\". # noqa: E501
+
+ :return: The name of this V1ServiceBackendPort. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ServiceBackendPort.
+
+ name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\". # noqa: E501
+
+ :param name: The name of this V1ServiceBackendPort. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def number(self):
+ """Gets the number of this V1ServiceBackendPort. # noqa: E501
+
+ number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\". # noqa: E501
+
+ :return: The number of this V1ServiceBackendPort. # noqa: E501
+ :rtype: int
+ """
+ return self._number
+
+ @number.setter
+ def number(self, number):
+ """Sets the number of this V1ServiceBackendPort.
+
+ number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\". # noqa: E501
+
+ :param number: The number of this V1ServiceBackendPort. # noqa: E501
+ :type: int
+ """
+
+ self._number = number
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServiceBackendPort):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServiceBackendPort):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_list.py
new file mode 100644
index 0000000000..dce0113144
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServiceList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1Service]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ServiceList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ServiceList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ServiceList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ServiceList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ServiceList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ServiceList. # noqa: E501
+
+ List of services # noqa: E501
+
+ :return: The items of this V1ServiceList. # noqa: E501
+ :rtype: list[V1Service]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ServiceList.
+
+ List of services # noqa: E501
+
+ :param items: The items of this V1ServiceList. # noqa: E501
+ :type: list[V1Service]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ServiceList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ServiceList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ServiceList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ServiceList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ServiceList. # noqa: E501
+
+
+ :return: The metadata of this V1ServiceList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ServiceList.
+
+
+ :param metadata: The metadata of this V1ServiceList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServiceList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServiceList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_port.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_port.py
new file mode 100644
index 0000000000..8aee95d92c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_port.py
@@ -0,0 +1,263 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServicePort(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'app_protocol': 'str',
+ 'name': 'str',
+ 'node_port': 'int',
+ 'port': 'int',
+ 'protocol': 'str',
+ 'target_port': 'object'
+ }
+
+ attribute_map = {
+ 'app_protocol': 'appProtocol',
+ 'name': 'name',
+ 'node_port': 'nodePort',
+ 'port': 'port',
+ 'protocol': 'protocol',
+ 'target_port': 'targetPort'
+ }
+
+ def __init__(self, app_protocol=None, name=None, node_port=None, port=None, protocol=None, target_port=None, local_vars_configuration=None): # noqa: E501
+ """V1ServicePort - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._app_protocol = None
+ self._name = None
+ self._node_port = None
+ self._port = None
+ self._protocol = None
+ self._target_port = None
+ self.discriminator = None
+
+ if app_protocol is not None:
+ self.app_protocol = app_protocol
+ if name is not None:
+ self.name = name
+ if node_port is not None:
+ self.node_port = node_port
+ self.port = port
+ if protocol is not None:
+ self.protocol = protocol
+ if target_port is not None:
+ self.target_port = target_port
+
+ @property
+ def app_protocol(self):
+ """Gets the app_protocol of this V1ServicePort. # noqa: E501
+
+ The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
+
+ :return: The app_protocol of this V1ServicePort. # noqa: E501
+ :rtype: str
+ """
+ return self._app_protocol
+
+ @app_protocol.setter
+ def app_protocol(self, app_protocol):
+ """Sets the app_protocol of this V1ServicePort.
+
+ The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either: * Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 * Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
+
+ :param app_protocol: The app_protocol of this V1ServicePort. # noqa: E501
+ :type: str
+ """
+
+ self._app_protocol = app_protocol
+
+ @property
+ def name(self):
+ """Gets the name of this V1ServicePort. # noqa: E501
+
+ The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service. # noqa: E501
+
+ :return: The name of this V1ServicePort. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ServicePort.
+
+ The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service. # noqa: E501
+
+ :param name: The name of this V1ServicePort. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def node_port(self):
+ """Gets the node_port of this V1ServicePort. # noqa: E501
+
+ The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport # noqa: E501
+
+ :return: The node_port of this V1ServicePort. # noqa: E501
+ :rtype: int
+ """
+ return self._node_port
+
+ @node_port.setter
+ def node_port(self, node_port):
+ """Sets the node_port of this V1ServicePort.
+
+ The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport # noqa: E501
+
+ :param node_port: The node_port of this V1ServicePort. # noqa: E501
+ :type: int
+ """
+
+ self._node_port = node_port
+
+ @property
+ def port(self):
+ """Gets the port of this V1ServicePort. # noqa: E501
+
+ The port that will be exposed by this service. # noqa: E501
+
+ :return: The port of this V1ServicePort. # noqa: E501
+ :rtype: int
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1ServicePort.
+
+ The port that will be exposed by this service. # noqa: E501
+
+ :param port: The port of this V1ServicePort. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ @property
+ def protocol(self):
+ """Gets the protocol of this V1ServicePort. # noqa: E501
+
+ The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. # noqa: E501
+
+ :return: The protocol of this V1ServicePort. # noqa: E501
+ :rtype: str
+ """
+ return self._protocol
+
+ @protocol.setter
+ def protocol(self, protocol):
+ """Sets the protocol of this V1ServicePort.
+
+ The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. # noqa: E501
+
+ :param protocol: The protocol of this V1ServicePort. # noqa: E501
+ :type: str
+ """
+
+ self._protocol = protocol
+
+ @property
+ def target_port(self):
+ """Gets the target_port of this V1ServicePort. # noqa: E501
+
+ Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service # noqa: E501
+
+ :return: The target_port of this V1ServicePort. # noqa: E501
+ :rtype: object
+ """
+ return self._target_port
+
+ @target_port.setter
+ def target_port(self, target_port):
+ """Sets the target_port of this V1ServicePort.
+
+ Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service # noqa: E501
+
+ :param target_port: The target_port of this V1ServicePort. # noqa: E501
+ :type: object
+ """
+
+ self._target_port = target_port
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServicePort):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServicePort):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_spec.py
new file mode 100644
index 0000000000..5a8c83a9c6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_spec.py
@@ -0,0 +1,624 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServiceSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allocate_load_balancer_node_ports': 'bool',
+ 'cluster_ip': 'str',
+ 'cluster_i_ps': 'list[str]',
+ 'external_i_ps': 'list[str]',
+ 'external_name': 'str',
+ 'external_traffic_policy': 'str',
+ 'health_check_node_port': 'int',
+ 'internal_traffic_policy': 'str',
+ 'ip_families': 'list[str]',
+ 'ip_family_policy': 'str',
+ 'load_balancer_class': 'str',
+ 'load_balancer_ip': 'str',
+ 'load_balancer_source_ranges': 'list[str]',
+ 'ports': 'list[V1ServicePort]',
+ 'publish_not_ready_addresses': 'bool',
+ 'selector': 'dict(str, str)',
+ 'session_affinity': 'str',
+ 'session_affinity_config': 'V1SessionAffinityConfig',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'allocate_load_balancer_node_ports': 'allocateLoadBalancerNodePorts',
+ 'cluster_ip': 'clusterIP',
+ 'cluster_i_ps': 'clusterIPs',
+ 'external_i_ps': 'externalIPs',
+ 'external_name': 'externalName',
+ 'external_traffic_policy': 'externalTrafficPolicy',
+ 'health_check_node_port': 'healthCheckNodePort',
+ 'internal_traffic_policy': 'internalTrafficPolicy',
+ 'ip_families': 'ipFamilies',
+ 'ip_family_policy': 'ipFamilyPolicy',
+ 'load_balancer_class': 'loadBalancerClass',
+ 'load_balancer_ip': 'loadBalancerIP',
+ 'load_balancer_source_ranges': 'loadBalancerSourceRanges',
+ 'ports': 'ports',
+ 'publish_not_ready_addresses': 'publishNotReadyAddresses',
+ 'selector': 'selector',
+ 'session_affinity': 'sessionAffinity',
+ 'session_affinity_config': 'sessionAffinityConfig',
+ 'type': 'type'
+ }
+
+ def __init__(self, allocate_load_balancer_node_ports=None, cluster_ip=None, cluster_i_ps=None, external_i_ps=None, external_name=None, external_traffic_policy=None, health_check_node_port=None, internal_traffic_policy=None, ip_families=None, ip_family_policy=None, load_balancer_class=None, load_balancer_ip=None, load_balancer_source_ranges=None, ports=None, publish_not_ready_addresses=None, selector=None, session_affinity=None, session_affinity_config=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1ServiceSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allocate_load_balancer_node_ports = None
+ self._cluster_ip = None
+ self._cluster_i_ps = None
+ self._external_i_ps = None
+ self._external_name = None
+ self._external_traffic_policy = None
+ self._health_check_node_port = None
+ self._internal_traffic_policy = None
+ self._ip_families = None
+ self._ip_family_policy = None
+ self._load_balancer_class = None
+ self._load_balancer_ip = None
+ self._load_balancer_source_ranges = None
+ self._ports = None
+ self._publish_not_ready_addresses = None
+ self._selector = None
+ self._session_affinity = None
+ self._session_affinity_config = None
+ self._type = None
+ self.discriminator = None
+
+ if allocate_load_balancer_node_ports is not None:
+ self.allocate_load_balancer_node_ports = allocate_load_balancer_node_ports
+ if cluster_ip is not None:
+ self.cluster_ip = cluster_ip
+ if cluster_i_ps is not None:
+ self.cluster_i_ps = cluster_i_ps
+ if external_i_ps is not None:
+ self.external_i_ps = external_i_ps
+ if external_name is not None:
+ self.external_name = external_name
+ if external_traffic_policy is not None:
+ self.external_traffic_policy = external_traffic_policy
+ if health_check_node_port is not None:
+ self.health_check_node_port = health_check_node_port
+ if internal_traffic_policy is not None:
+ self.internal_traffic_policy = internal_traffic_policy
+ if ip_families is not None:
+ self.ip_families = ip_families
+ if ip_family_policy is not None:
+ self.ip_family_policy = ip_family_policy
+ if load_balancer_class is not None:
+ self.load_balancer_class = load_balancer_class
+ if load_balancer_ip is not None:
+ self.load_balancer_ip = load_balancer_ip
+ if load_balancer_source_ranges is not None:
+ self.load_balancer_source_ranges = load_balancer_source_ranges
+ if ports is not None:
+ self.ports = ports
+ if publish_not_ready_addresses is not None:
+ self.publish_not_ready_addresses = publish_not_ready_addresses
+ if selector is not None:
+ self.selector = selector
+ if session_affinity is not None:
+ self.session_affinity = session_affinity
+ if session_affinity_config is not None:
+ self.session_affinity_config = session_affinity_config
+ if type is not None:
+ self.type = type
+
+ @property
+ def allocate_load_balancer_node_ports(self):
+ """Gets the allocate_load_balancer_node_ports of this V1ServiceSpec. # noqa: E501
+
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. # noqa: E501
+
+ :return: The allocate_load_balancer_node_ports of this V1ServiceSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._allocate_load_balancer_node_ports
+
+ @allocate_load_balancer_node_ports.setter
+ def allocate_load_balancer_node_ports(self, allocate_load_balancer_node_ports):
+ """Sets the allocate_load_balancer_node_ports of this V1ServiceSpec.
+
+ allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. # noqa: E501
+
+ :param allocate_load_balancer_node_ports: The allocate_load_balancer_node_ports of this V1ServiceSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._allocate_load_balancer_node_ports = allocate_load_balancer_node_ports
+
+ @property
+ def cluster_ip(self):
+ """Gets the cluster_ip of this V1ServiceSpec. # noqa: E501
+
+ clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :return: The cluster_ip of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._cluster_ip
+
+ @cluster_ip.setter
+ def cluster_ip(self, cluster_ip):
+ """Sets the cluster_ip of this V1ServiceSpec.
+
+ clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :param cluster_ip: The cluster_ip of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._cluster_ip = cluster_ip
+
+ @property
+ def cluster_i_ps(self):
+ """Gets the cluster_i_ps of this V1ServiceSpec. # noqa: E501
+
+ ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value. This field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :return: The cluster_i_ps of this V1ServiceSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._cluster_i_ps
+
+ @cluster_i_ps.setter
+ def cluster_i_ps(self, cluster_i_ps):
+ """Sets the cluster_i_ps of this V1ServiceSpec.
+
+ ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value. This field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :param cluster_i_ps: The cluster_i_ps of this V1ServiceSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._cluster_i_ps = cluster_i_ps
+
+ @property
+ def external_i_ps(self):
+ """Gets the external_i_ps of this V1ServiceSpec. # noqa: E501
+
+ externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. # noqa: E501
+
+ :return: The external_i_ps of this V1ServiceSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._external_i_ps
+
+ @external_i_ps.setter
+ def external_i_ps(self, external_i_ps):
+ """Sets the external_i_ps of this V1ServiceSpec.
+
+ externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. # noqa: E501
+
+ :param external_i_ps: The external_i_ps of this V1ServiceSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._external_i_ps = external_i_ps
+
+ @property
+ def external_name(self):
+ """Gets the external_name of this V1ServiceSpec. # noqa: E501
+
+ externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\". # noqa: E501
+
+ :return: The external_name of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._external_name
+
+ @external_name.setter
+ def external_name(self, external_name):
+ """Sets the external_name of this V1ServiceSpec.
+
+ externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\". # noqa: E501
+
+ :param external_name: The external_name of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._external_name = external_name
+
+ @property
+ def external_traffic_policy(self):
+ """Gets the external_traffic_policy of this V1ServiceSpec. # noqa: E501
+
+ externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node. # noqa: E501
+
+ :return: The external_traffic_policy of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._external_traffic_policy
+
+ @external_traffic_policy.setter
+ def external_traffic_policy(self, external_traffic_policy):
+ """Sets the external_traffic_policy of this V1ServiceSpec.
+
+ externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node. # noqa: E501
+
+ :param external_traffic_policy: The external_traffic_policy of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._external_traffic_policy = external_traffic_policy
+
+ @property
+ def health_check_node_port(self):
+ """Gets the health_check_node_port of this V1ServiceSpec. # noqa: E501
+
+ healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set. # noqa: E501
+
+ :return: The health_check_node_port of this V1ServiceSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._health_check_node_port
+
+ @health_check_node_port.setter
+ def health_check_node_port(self, health_check_node_port):
+ """Sets the health_check_node_port of this V1ServiceSpec.
+
+ healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set. # noqa: E501
+
+ :param health_check_node_port: The health_check_node_port of this V1ServiceSpec. # noqa: E501
+ :type: int
+ """
+
+ self._health_check_node_port = health_check_node_port
+
+ @property
+ def internal_traffic_policy(self):
+ """Gets the internal_traffic_policy of this V1ServiceSpec. # noqa: E501
+
+ InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). # noqa: E501
+
+ :return: The internal_traffic_policy of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._internal_traffic_policy
+
+ @internal_traffic_policy.setter
+ def internal_traffic_policy(self, internal_traffic_policy):
+ """Sets the internal_traffic_policy of this V1ServiceSpec.
+
+ InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). # noqa: E501
+
+ :param internal_traffic_policy: The internal_traffic_policy of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._internal_traffic_policy = internal_traffic_policy
+
+ @property
+ def ip_families(self):
+ """Gets the ip_families of this V1ServiceSpec. # noqa: E501
+
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName. This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. # noqa: E501
+
+ :return: The ip_families of this V1ServiceSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._ip_families
+
+ @ip_families.setter
+ def ip_families(self, ip_families):
+ """Sets the ip_families of this V1ServiceSpec.
+
+ IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName. This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. # noqa: E501
+
+ :param ip_families: The ip_families of this V1ServiceSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._ip_families = ip_families
+
+ @property
+ def ip_family_policy(self):
+ """Gets the ip_family_policy of this V1ServiceSpec. # noqa: E501
+
+ IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName. # noqa: E501
+
+ :return: The ip_family_policy of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._ip_family_policy
+
+ @ip_family_policy.setter
+ def ip_family_policy(self, ip_family_policy):
+ """Sets the ip_family_policy of this V1ServiceSpec.
+
+ IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName. # noqa: E501
+
+ :param ip_family_policy: The ip_family_policy of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._ip_family_policy = ip_family_policy
+
+ @property
+ def load_balancer_class(self):
+ """Gets the load_balancer_class of this V1ServiceSpec. # noqa: E501
+
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. # noqa: E501
+
+ :return: The load_balancer_class of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._load_balancer_class
+
+ @load_balancer_class.setter
+ def load_balancer_class(self, load_balancer_class):
+ """Sets the load_balancer_class of this V1ServiceSpec.
+
+ loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. # noqa: E501
+
+ :param load_balancer_class: The load_balancer_class of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._load_balancer_class = load_balancer_class
+
+ @property
+ def load_balancer_ip(self):
+ """Gets the load_balancer_ip of this V1ServiceSpec. # noqa: E501
+
+ Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available. # noqa: E501
+
+ :return: The load_balancer_ip of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._load_balancer_ip
+
+ @load_balancer_ip.setter
+ def load_balancer_ip(self, load_balancer_ip):
+ """Sets the load_balancer_ip of this V1ServiceSpec.
+
+ Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available. # noqa: E501
+
+ :param load_balancer_ip: The load_balancer_ip of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._load_balancer_ip = load_balancer_ip
+
+ @property
+ def load_balancer_source_ranges(self):
+ """Gets the load_balancer_source_ranges of this V1ServiceSpec. # noqa: E501
+
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ # noqa: E501
+
+ :return: The load_balancer_source_ranges of this V1ServiceSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._load_balancer_source_ranges
+
+ @load_balancer_source_ranges.setter
+ def load_balancer_source_ranges(self, load_balancer_source_ranges):
+ """Sets the load_balancer_source_ranges of this V1ServiceSpec.
+
+ If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ # noqa: E501
+
+ :param load_balancer_source_ranges: The load_balancer_source_ranges of this V1ServiceSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._load_balancer_source_ranges = load_balancer_source_ranges
+
+ @property
+ def ports(self):
+ """Gets the ports of this V1ServiceSpec. # noqa: E501
+
+ The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :return: The ports of this V1ServiceSpec. # noqa: E501
+ :rtype: list[V1ServicePort]
+ """
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ """Sets the ports of this V1ServiceSpec.
+
+ The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :param ports: The ports of this V1ServiceSpec. # noqa: E501
+ :type: list[V1ServicePort]
+ """
+
+ self._ports = ports
+
+ @property
+ def publish_not_ready_addresses(self):
+ """Gets the publish_not_ready_addresses of this V1ServiceSpec. # noqa: E501
+
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior. # noqa: E501
+
+ :return: The publish_not_ready_addresses of this V1ServiceSpec. # noqa: E501
+ :rtype: bool
+ """
+ return self._publish_not_ready_addresses
+
+ @publish_not_ready_addresses.setter
+ def publish_not_ready_addresses(self, publish_not_ready_addresses):
+ """Sets the publish_not_ready_addresses of this V1ServiceSpec.
+
+ publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior. # noqa: E501
+
+ :param publish_not_ready_addresses: The publish_not_ready_addresses of this V1ServiceSpec. # noqa: E501
+ :type: bool
+ """
+
+ self._publish_not_ready_addresses = publish_not_ready_addresses
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1ServiceSpec. # noqa: E501
+
+ Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ # noqa: E501
+
+ :return: The selector of this V1ServiceSpec. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1ServiceSpec.
+
+ Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ # noqa: E501
+
+ :param selector: The selector of this V1ServiceSpec. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._selector = selector
+
+ @property
+ def session_affinity(self):
+ """Gets the session_affinity of this V1ServiceSpec. # noqa: E501
+
+ Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :return: The session_affinity of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._session_affinity
+
+ @session_affinity.setter
+ def session_affinity(self, session_affinity):
+ """Sets the session_affinity of this V1ServiceSpec.
+
+ Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
+
+ :param session_affinity: The session_affinity of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._session_affinity = session_affinity
+
+ @property
+ def session_affinity_config(self):
+ """Gets the session_affinity_config of this V1ServiceSpec. # noqa: E501
+
+
+ :return: The session_affinity_config of this V1ServiceSpec. # noqa: E501
+ :rtype: V1SessionAffinityConfig
+ """
+ return self._session_affinity_config
+
+ @session_affinity_config.setter
+ def session_affinity_config(self, session_affinity_config):
+ """Sets the session_affinity_config of this V1ServiceSpec.
+
+
+ :param session_affinity_config: The session_affinity_config of this V1ServiceSpec. # noqa: E501
+ :type: V1SessionAffinityConfig
+ """
+
+ self._session_affinity_config = session_affinity_config
+
+ @property
+ def type(self):
+ """Gets the type of this V1ServiceSpec. # noqa: E501
+
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types # noqa: E501
+
+ :return: The type of this V1ServiceSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1ServiceSpec.
+
+ type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types # noqa: E501
+
+ :param type: The type of this V1ServiceSpec. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServiceSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServiceSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_service_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_service_status.py
new file mode 100644
index 0000000000..a2e4654d2e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_service_status.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ServiceStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1Condition]',
+ 'load_balancer': 'V1LoadBalancerStatus'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions',
+ 'load_balancer': 'loadBalancer'
+ }
+
+ def __init__(self, conditions=None, load_balancer=None, local_vars_configuration=None): # noqa: E501
+ """V1ServiceStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self._load_balancer = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+ if load_balancer is not None:
+ self.load_balancer = load_balancer
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1ServiceStatus. # noqa: E501
+
+ Current service state # noqa: E501
+
+ :return: The conditions of this V1ServiceStatus. # noqa: E501
+ :rtype: list[V1Condition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1ServiceStatus.
+
+ Current service state # noqa: E501
+
+ :param conditions: The conditions of this V1ServiceStatus. # noqa: E501
+ :type: list[V1Condition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def load_balancer(self):
+ """Gets the load_balancer of this V1ServiceStatus. # noqa: E501
+
+
+ :return: The load_balancer of this V1ServiceStatus. # noqa: E501
+ :rtype: V1LoadBalancerStatus
+ """
+ return self._load_balancer
+
+ @load_balancer.setter
+ def load_balancer(self, load_balancer):
+ """Sets the load_balancer of this V1ServiceStatus.
+
+
+ :param load_balancer: The load_balancer of this V1ServiceStatus. # noqa: E501
+ :type: V1LoadBalancerStatus
+ """
+
+ self._load_balancer = load_balancer
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ServiceStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ServiceStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_session_affinity_config.py b/contrib/python/kubernetes/kubernetes/client/models/v1_session_affinity_config.py
new file mode 100644
index 0000000000..eb1eb4dc36
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_session_affinity_config.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SessionAffinityConfig(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'client_ip': 'V1ClientIPConfig'
+ }
+
+ attribute_map = {
+ 'client_ip': 'clientIP'
+ }
+
+ def __init__(self, client_ip=None, local_vars_configuration=None): # noqa: E501
+ """V1SessionAffinityConfig - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._client_ip = None
+ self.discriminator = None
+
+ if client_ip is not None:
+ self.client_ip = client_ip
+
+ @property
+ def client_ip(self):
+ """Gets the client_ip of this V1SessionAffinityConfig. # noqa: E501
+
+
+ :return: The client_ip of this V1SessionAffinityConfig. # noqa: E501
+ :rtype: V1ClientIPConfig
+ """
+ return self._client_ip
+
+ @client_ip.setter
+ def client_ip(self, client_ip):
+ """Sets the client_ip of this V1SessionAffinityConfig.
+
+
+ :param client_ip: The client_ip of this V1SessionAffinityConfig. # noqa: E501
+ :type: V1ClientIPConfig
+ """
+
+ self._client_ip = client_ip
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SessionAffinityConfig):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SessionAffinityConfig):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set.py
new file mode 100644
index 0000000000..0ce6093cbe
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSet(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1StatefulSetSpec',
+ 'status': 'V1StatefulSetStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSet - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1StatefulSet. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1StatefulSet. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1StatefulSet.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1StatefulSet. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1StatefulSet. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1StatefulSet. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1StatefulSet.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1StatefulSet. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1StatefulSet. # noqa: E501
+
+
+ :return: The metadata of this V1StatefulSet. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1StatefulSet.
+
+
+ :param metadata: The metadata of this V1StatefulSet. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1StatefulSet. # noqa: E501
+
+
+ :return: The spec of this V1StatefulSet. # noqa: E501
+ :rtype: V1StatefulSetSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1StatefulSet.
+
+
+ :param spec: The spec of this V1StatefulSet. # noqa: E501
+ :type: V1StatefulSetSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1StatefulSet. # noqa: E501
+
+
+ :return: The status of this V1StatefulSet. # noqa: E501
+ :rtype: V1StatefulSetStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1StatefulSet.
+
+
+ :param status: The status of this V1StatefulSet. # noqa: E501
+ :type: V1StatefulSetStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSet):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSet):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_condition.py
new file mode 100644
index 0000000000..b17a056758
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_condition.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSetCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSetCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1StatefulSetCondition. # noqa: E501
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1StatefulSetCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1StatefulSetCondition.
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1StatefulSetCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1StatefulSetCondition. # noqa: E501
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :return: The message of this V1StatefulSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1StatefulSetCondition.
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :param message: The message of this V1StatefulSetCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1StatefulSetCondition. # noqa: E501
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1StatefulSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1StatefulSetCondition.
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1StatefulSetCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1StatefulSetCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1StatefulSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1StatefulSetCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1StatefulSetCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1StatefulSetCondition. # noqa: E501
+
+ Type of statefulset condition. # noqa: E501
+
+ :return: The type of this V1StatefulSetCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1StatefulSetCondition.
+
+ Type of statefulset condition. # noqa: E501
+
+ :param type: The type of this V1StatefulSetCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSetCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSetCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_list.py
new file mode 100644
index 0000000000..0e392dcecf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSetList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1StatefulSet]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSetList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1StatefulSetList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1StatefulSetList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1StatefulSetList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1StatefulSetList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1StatefulSetList. # noqa: E501
+
+ Items is the list of stateful sets. # noqa: E501
+
+ :return: The items of this V1StatefulSetList. # noqa: E501
+ :rtype: list[V1StatefulSet]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1StatefulSetList.
+
+ Items is the list of stateful sets. # noqa: E501
+
+ :param items: The items of this V1StatefulSetList. # noqa: E501
+ :type: list[V1StatefulSet]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1StatefulSetList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1StatefulSetList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1StatefulSetList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1StatefulSetList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1StatefulSetList. # noqa: E501
+
+
+ :return: The metadata of this V1StatefulSetList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1StatefulSetList.
+
+
+ :param metadata: The metadata of this V1StatefulSetList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSetList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSetList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_ordinals.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_ordinals.py
new file mode 100644
index 0000000000..969ff4b744
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_ordinals.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSetOrdinals(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'start': 'int'
+ }
+
+ attribute_map = {
+ 'start': 'start'
+ }
+
+ def __init__(self, start=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSetOrdinals - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._start = None
+ self.discriminator = None
+
+ if start is not None:
+ self.start = start
+
+ @property
+ def start(self):
+ """Gets the start of this V1StatefulSetOrdinals. # noqa: E501
+
+ start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range: [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). If unset, defaults to 0. Replica indices will be in the range: [0, .spec.replicas). # noqa: E501
+
+ :return: The start of this V1StatefulSetOrdinals. # noqa: E501
+ :rtype: int
+ """
+ return self._start
+
+ @start.setter
+ def start(self, start):
+ """Sets the start of this V1StatefulSetOrdinals.
+
+ start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range: [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). If unset, defaults to 0. Replica indices will be in the range: [0, .spec.replicas). # noqa: E501
+
+ :param start: The start of this V1StatefulSetOrdinals. # noqa: E501
+ :type: int
+ """
+
+ self._start = start
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSetOrdinals):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSetOrdinals):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_persistent_volume_claim_retention_policy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_persistent_volume_claim_retention_policy.py
new file mode 100644
index 0000000000..6108752824
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_persistent_volume_claim_retention_policy.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSetPersistentVolumeClaimRetentionPolicy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'when_deleted': 'str',
+ 'when_scaled': 'str'
+ }
+
+ attribute_map = {
+ 'when_deleted': 'whenDeleted',
+ 'when_scaled': 'whenScaled'
+ }
+
+ def __init__(self, when_deleted=None, when_scaled=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSetPersistentVolumeClaimRetentionPolicy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._when_deleted = None
+ self._when_scaled = None
+ self.discriminator = None
+
+ if when_deleted is not None:
+ self.when_deleted = when_deleted
+ if when_scaled is not None:
+ self.when_scaled = when_scaled
+
+ @property
+ def when_deleted(self):
+ """Gets the when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
+
+ WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted. # noqa: E501
+
+ :return: The when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._when_deleted
+
+ @when_deleted.setter
+ def when_deleted(self, when_deleted):
+ """Sets the when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy.
+
+ WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted. # noqa: E501
+
+ :param when_deleted: The when_deleted of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._when_deleted = when_deleted
+
+ @property
+ def when_scaled(self):
+ """Gets the when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
+
+ WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted. # noqa: E501
+
+ :return: The when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._when_scaled
+
+ @when_scaled.setter
+ def when_scaled(self, when_scaled):
+ """Sets the when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy.
+
+ WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted. # noqa: E501
+
+ :param when_scaled: The when_scaled of this V1StatefulSetPersistentVolumeClaimRetentionPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._when_scaled = when_scaled
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSetPersistentVolumeClaimRetentionPolicy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSetPersistentVolumeClaimRetentionPolicy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_spec.py
new file mode 100644
index 0000000000..5927a128d5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_spec.py
@@ -0,0 +1,395 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSetSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'min_ready_seconds': 'int',
+ 'ordinals': 'V1StatefulSetOrdinals',
+ 'persistent_volume_claim_retention_policy': 'V1StatefulSetPersistentVolumeClaimRetentionPolicy',
+ 'pod_management_policy': 'str',
+ 'replicas': 'int',
+ 'revision_history_limit': 'int',
+ 'selector': 'V1LabelSelector',
+ 'service_name': 'str',
+ 'template': 'V1PodTemplateSpec',
+ 'update_strategy': 'V1StatefulSetUpdateStrategy',
+ 'volume_claim_templates': 'list[V1PersistentVolumeClaim]'
+ }
+
+ attribute_map = {
+ 'min_ready_seconds': 'minReadySeconds',
+ 'ordinals': 'ordinals',
+ 'persistent_volume_claim_retention_policy': 'persistentVolumeClaimRetentionPolicy',
+ 'pod_management_policy': 'podManagementPolicy',
+ 'replicas': 'replicas',
+ 'revision_history_limit': 'revisionHistoryLimit',
+ 'selector': 'selector',
+ 'service_name': 'serviceName',
+ 'template': 'template',
+ 'update_strategy': 'updateStrategy',
+ 'volume_claim_templates': 'volumeClaimTemplates'
+ }
+
+ def __init__(self, min_ready_seconds=None, ordinals=None, persistent_volume_claim_retention_policy=None, pod_management_policy=None, replicas=None, revision_history_limit=None, selector=None, service_name=None, template=None, update_strategy=None, volume_claim_templates=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSetSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._min_ready_seconds = None
+ self._ordinals = None
+ self._persistent_volume_claim_retention_policy = None
+ self._pod_management_policy = None
+ self._replicas = None
+ self._revision_history_limit = None
+ self._selector = None
+ self._service_name = None
+ self._template = None
+ self._update_strategy = None
+ self._volume_claim_templates = None
+ self.discriminator = None
+
+ if min_ready_seconds is not None:
+ self.min_ready_seconds = min_ready_seconds
+ if ordinals is not None:
+ self.ordinals = ordinals
+ if persistent_volume_claim_retention_policy is not None:
+ self.persistent_volume_claim_retention_policy = persistent_volume_claim_retention_policy
+ if pod_management_policy is not None:
+ self.pod_management_policy = pod_management_policy
+ if replicas is not None:
+ self.replicas = replicas
+ if revision_history_limit is not None:
+ self.revision_history_limit = revision_history_limit
+ self.selector = selector
+ self.service_name = service_name
+ self.template = template
+ if update_strategy is not None:
+ self.update_strategy = update_strategy
+ if volume_claim_templates is not None:
+ self.volume_claim_templates = volume_claim_templates
+
+ @property
+ def min_ready_seconds(self):
+ """Gets the min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :return: The min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._min_ready_seconds
+
+ @min_ready_seconds.setter
+ def min_ready_seconds(self, min_ready_seconds):
+ """Sets the min_ready_seconds of this V1StatefulSetSpec.
+
+ Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
+
+ :param min_ready_seconds: The min_ready_seconds of this V1StatefulSetSpec. # noqa: E501
+ :type: int
+ """
+
+ self._min_ready_seconds = min_ready_seconds
+
+ @property
+ def ordinals(self):
+ """Gets the ordinals of this V1StatefulSetSpec. # noqa: E501
+
+
+ :return: The ordinals of this V1StatefulSetSpec. # noqa: E501
+ :rtype: V1StatefulSetOrdinals
+ """
+ return self._ordinals
+
+ @ordinals.setter
+ def ordinals(self, ordinals):
+ """Sets the ordinals of this V1StatefulSetSpec.
+
+
+ :param ordinals: The ordinals of this V1StatefulSetSpec. # noqa: E501
+ :type: V1StatefulSetOrdinals
+ """
+
+ self._ordinals = ordinals
+
+ @property
+ def persistent_volume_claim_retention_policy(self):
+ """Gets the persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
+
+
+ :return: The persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
+ :rtype: V1StatefulSetPersistentVolumeClaimRetentionPolicy
+ """
+ return self._persistent_volume_claim_retention_policy
+
+ @persistent_volume_claim_retention_policy.setter
+ def persistent_volume_claim_retention_policy(self, persistent_volume_claim_retention_policy):
+ """Sets the persistent_volume_claim_retention_policy of this V1StatefulSetSpec.
+
+
+ :param persistent_volume_claim_retention_policy: The persistent_volume_claim_retention_policy of this V1StatefulSetSpec. # noqa: E501
+ :type: V1StatefulSetPersistentVolumeClaimRetentionPolicy
+ """
+
+ self._persistent_volume_claim_retention_policy = persistent_volume_claim_retention_policy
+
+ @property
+ def pod_management_policy(self):
+ """Gets the pod_management_policy of this V1StatefulSetSpec. # noqa: E501
+
+ podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once. # noqa: E501
+
+ :return: The pod_management_policy of this V1StatefulSetSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._pod_management_policy
+
+ @pod_management_policy.setter
+ def pod_management_policy(self, pod_management_policy):
+ """Sets the pod_management_policy of this V1StatefulSetSpec.
+
+ podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once. # noqa: E501
+
+ :param pod_management_policy: The pod_management_policy of this V1StatefulSetSpec. # noqa: E501
+ :type: str
+ """
+
+ self._pod_management_policy = pod_management_policy
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1StatefulSetSpec. # noqa: E501
+
+ replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1. # noqa: E501
+
+ :return: The replicas of this V1StatefulSetSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1StatefulSetSpec.
+
+ replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1. # noqa: E501
+
+ :param replicas: The replicas of this V1StatefulSetSpec. # noqa: E501
+ :type: int
+ """
+
+ self._replicas = replicas
+
+ @property
+ def revision_history_limit(self):
+ """Gets the revision_history_limit of this V1StatefulSetSpec. # noqa: E501
+
+ revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10. # noqa: E501
+
+ :return: The revision_history_limit of this V1StatefulSetSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._revision_history_limit
+
+ @revision_history_limit.setter
+ def revision_history_limit(self, revision_history_limit):
+ """Sets the revision_history_limit of this V1StatefulSetSpec.
+
+ revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10. # noqa: E501
+
+ :param revision_history_limit: The revision_history_limit of this V1StatefulSetSpec. # noqa: E501
+ :type: int
+ """
+
+ self._revision_history_limit = revision_history_limit
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1StatefulSetSpec. # noqa: E501
+
+
+ :return: The selector of this V1StatefulSetSpec. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1StatefulSetSpec.
+
+
+ :param selector: The selector of this V1StatefulSetSpec. # noqa: E501
+ :type: V1LabelSelector
+ """
+ if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
+ raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
+
+ self._selector = selector
+
+ @property
+ def service_name(self):
+ """Gets the service_name of this V1StatefulSetSpec. # noqa: E501
+
+ serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller. # noqa: E501
+
+ :return: The service_name of this V1StatefulSetSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._service_name
+
+ @service_name.setter
+ def service_name(self, service_name):
+ """Sets the service_name of this V1StatefulSetSpec.
+
+ serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller. # noqa: E501
+
+ :param service_name: The service_name of this V1StatefulSetSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and service_name is None: # noqa: E501
+ raise ValueError("Invalid value for `service_name`, must not be `None`") # noqa: E501
+
+ self._service_name = service_name
+
+ @property
+ def template(self):
+ """Gets the template of this V1StatefulSetSpec. # noqa: E501
+
+
+ :return: The template of this V1StatefulSetSpec. # noqa: E501
+ :rtype: V1PodTemplateSpec
+ """
+ return self._template
+
+ @template.setter
+ def template(self, template):
+ """Sets the template of this V1StatefulSetSpec.
+
+
+ :param template: The template of this V1StatefulSetSpec. # noqa: E501
+ :type: V1PodTemplateSpec
+ """
+ if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
+ raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
+
+ self._template = template
+
+ @property
+ def update_strategy(self):
+ """Gets the update_strategy of this V1StatefulSetSpec. # noqa: E501
+
+
+ :return: The update_strategy of this V1StatefulSetSpec. # noqa: E501
+ :rtype: V1StatefulSetUpdateStrategy
+ """
+ return self._update_strategy
+
+ @update_strategy.setter
+ def update_strategy(self, update_strategy):
+ """Sets the update_strategy of this V1StatefulSetSpec.
+
+
+ :param update_strategy: The update_strategy of this V1StatefulSetSpec. # noqa: E501
+ :type: V1StatefulSetUpdateStrategy
+ """
+
+ self._update_strategy = update_strategy
+
+ @property
+ def volume_claim_templates(self):
+ """Gets the volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
+
+ volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. # noqa: E501
+
+ :return: The volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
+ :rtype: list[V1PersistentVolumeClaim]
+ """
+ return self._volume_claim_templates
+
+ @volume_claim_templates.setter
+ def volume_claim_templates(self, volume_claim_templates):
+ """Sets the volume_claim_templates of this V1StatefulSetSpec.
+
+ volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name. # noqa: E501
+
+ :param volume_claim_templates: The volume_claim_templates of this V1StatefulSetSpec. # noqa: E501
+ :type: list[V1PersistentVolumeClaim]
+ """
+
+ self._volume_claim_templates = volume_claim_templates
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSetSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSetSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_status.py
new file mode 100644
index 0000000000..a9770a288b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_status.py
@@ -0,0 +1,375 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSetStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'available_replicas': 'int',
+ 'collision_count': 'int',
+ 'conditions': 'list[V1StatefulSetCondition]',
+ 'current_replicas': 'int',
+ 'current_revision': 'str',
+ 'observed_generation': 'int',
+ 'ready_replicas': 'int',
+ 'replicas': 'int',
+ 'update_revision': 'str',
+ 'updated_replicas': 'int'
+ }
+
+ attribute_map = {
+ 'available_replicas': 'availableReplicas',
+ 'collision_count': 'collisionCount',
+ 'conditions': 'conditions',
+ 'current_replicas': 'currentReplicas',
+ 'current_revision': 'currentRevision',
+ 'observed_generation': 'observedGeneration',
+ 'ready_replicas': 'readyReplicas',
+ 'replicas': 'replicas',
+ 'update_revision': 'updateRevision',
+ 'updated_replicas': 'updatedReplicas'
+ }
+
+ def __init__(self, available_replicas=None, collision_count=None, conditions=None, current_replicas=None, current_revision=None, observed_generation=None, ready_replicas=None, replicas=None, update_revision=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSetStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._available_replicas = None
+ self._collision_count = None
+ self._conditions = None
+ self._current_replicas = None
+ self._current_revision = None
+ self._observed_generation = None
+ self._ready_replicas = None
+ self._replicas = None
+ self._update_revision = None
+ self._updated_replicas = None
+ self.discriminator = None
+
+ if available_replicas is not None:
+ self.available_replicas = available_replicas
+ if collision_count is not None:
+ self.collision_count = collision_count
+ if conditions is not None:
+ self.conditions = conditions
+ if current_replicas is not None:
+ self.current_replicas = current_replicas
+ if current_revision is not None:
+ self.current_revision = current_revision
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ if ready_replicas is not None:
+ self.ready_replicas = ready_replicas
+ self.replicas = replicas
+ if update_revision is not None:
+ self.update_revision = update_revision
+ if updated_replicas is not None:
+ self.updated_replicas = updated_replicas
+
+ @property
+ def available_replicas(self):
+ """Gets the available_replicas of this V1StatefulSetStatus. # noqa: E501
+
+ Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. # noqa: E501
+
+ :return: The available_replicas of this V1StatefulSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._available_replicas
+
+ @available_replicas.setter
+ def available_replicas(self, available_replicas):
+ """Sets the available_replicas of this V1StatefulSetStatus.
+
+ Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. # noqa: E501
+
+ :param available_replicas: The available_replicas of this V1StatefulSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._available_replicas = available_replicas
+
+ @property
+ def collision_count(self):
+ """Gets the collision_count of this V1StatefulSetStatus. # noqa: E501
+
+ collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
+
+ :return: The collision_count of this V1StatefulSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._collision_count
+
+ @collision_count.setter
+ def collision_count(self, collision_count):
+ """Sets the collision_count of this V1StatefulSetStatus.
+
+ collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
+
+ :param collision_count: The collision_count of this V1StatefulSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._collision_count = collision_count
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1StatefulSetStatus. # noqa: E501
+
+ Represents the latest available observations of a statefulset's current state. # noqa: E501
+
+ :return: The conditions of this V1StatefulSetStatus. # noqa: E501
+ :rtype: list[V1StatefulSetCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1StatefulSetStatus.
+
+ Represents the latest available observations of a statefulset's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1StatefulSetStatus. # noqa: E501
+ :type: list[V1StatefulSetCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def current_replicas(self):
+ """Gets the current_replicas of this V1StatefulSetStatus. # noqa: E501
+
+ currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
+
+ :return: The current_replicas of this V1StatefulSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._current_replicas
+
+ @current_replicas.setter
+ def current_replicas(self, current_replicas):
+ """Sets the current_replicas of this V1StatefulSetStatus.
+
+ currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
+
+ :param current_replicas: The current_replicas of this V1StatefulSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._current_replicas = current_replicas
+
+ @property
+ def current_revision(self):
+ """Gets the current_revision of this V1StatefulSetStatus. # noqa: E501
+
+ currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
+
+ :return: The current_revision of this V1StatefulSetStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._current_revision
+
+ @current_revision.setter
+ def current_revision(self, current_revision):
+ """Sets the current_revision of this V1StatefulSetStatus.
+
+ currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
+
+ :param current_revision: The current_revision of this V1StatefulSetStatus. # noqa: E501
+ :type: str
+ """
+
+ self._current_revision = current_revision
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1StatefulSetStatus. # noqa: E501
+
+ observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
+
+ :return: The observed_generation of this V1StatefulSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1StatefulSetStatus.
+
+ observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1StatefulSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def ready_replicas(self):
+ """Gets the ready_replicas of this V1StatefulSetStatus. # noqa: E501
+
+ readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. # noqa: E501
+
+ :return: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._ready_replicas
+
+ @ready_replicas.setter
+ def ready_replicas(self, ready_replicas):
+ """Sets the ready_replicas of this V1StatefulSetStatus.
+
+ readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. # noqa: E501
+
+ :param ready_replicas: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._ready_replicas = ready_replicas
+
+ @property
+ def replicas(self):
+ """Gets the replicas of this V1StatefulSetStatus. # noqa: E501
+
+ replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
+
+ :return: The replicas of this V1StatefulSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._replicas
+
+ @replicas.setter
+ def replicas(self, replicas):
+ """Sets the replicas of this V1StatefulSetStatus.
+
+ replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
+
+ :param replicas: The replicas of this V1StatefulSetStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
+
+ self._replicas = replicas
+
+ @property
+ def update_revision(self):
+ """Gets the update_revision of this V1StatefulSetStatus. # noqa: E501
+
+ updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
+
+ :return: The update_revision of this V1StatefulSetStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._update_revision
+
+ @update_revision.setter
+ def update_revision(self, update_revision):
+ """Sets the update_revision of this V1StatefulSetStatus.
+
+ updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
+
+ :param update_revision: The update_revision of this V1StatefulSetStatus. # noqa: E501
+ :type: str
+ """
+
+ self._update_revision = update_revision
+
+ @property
+ def updated_replicas(self):
+ """Gets the updated_replicas of this V1StatefulSetStatus. # noqa: E501
+
+ updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
+
+ :return: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._updated_replicas
+
+ @updated_replicas.setter
+ def updated_replicas(self, updated_replicas):
+ """Sets the updated_replicas of this V1StatefulSetStatus.
+
+ updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
+
+ :param updated_replicas: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
+ :type: int
+ """
+
+ self._updated_replicas = updated_replicas
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSetStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSetStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_update_strategy.py b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_update_strategy.py
new file mode 100644
index 0000000000..18054e44f2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_stateful_set_update_strategy.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatefulSetUpdateStrategy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'rolling_update': 'V1RollingUpdateStatefulSetStrategy',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'rolling_update': 'rollingUpdate',
+ 'type': 'type'
+ }
+
+ def __init__(self, rolling_update=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1StatefulSetUpdateStrategy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._rolling_update = None
+ self._type = None
+ self.discriminator = None
+
+ if rolling_update is not None:
+ self.rolling_update = rolling_update
+ if type is not None:
+ self.type = type
+
+ @property
+ def rolling_update(self):
+ """Gets the rolling_update of this V1StatefulSetUpdateStrategy. # noqa: E501
+
+
+ :return: The rolling_update of this V1StatefulSetUpdateStrategy. # noqa: E501
+ :rtype: V1RollingUpdateStatefulSetStrategy
+ """
+ return self._rolling_update
+
+ @rolling_update.setter
+ def rolling_update(self, rolling_update):
+ """Sets the rolling_update of this V1StatefulSetUpdateStrategy.
+
+
+ :param rolling_update: The rolling_update of this V1StatefulSetUpdateStrategy. # noqa: E501
+ :type: V1RollingUpdateStatefulSetStrategy
+ """
+
+ self._rolling_update = rolling_update
+
+ @property
+ def type(self):
+ """Gets the type of this V1StatefulSetUpdateStrategy. # noqa: E501
+
+ Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate. # noqa: E501
+
+ :return: The type of this V1StatefulSetUpdateStrategy. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1StatefulSetUpdateStrategy.
+
+ Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate. # noqa: E501
+
+ :param type: The type of this V1StatefulSetUpdateStrategy. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatefulSetUpdateStrategy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatefulSetUpdateStrategy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_status.py
new file mode 100644
index 0000000000..da6ffe1aa0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_status.py
@@ -0,0 +1,314 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Status(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'code': 'int',
+ 'details': 'V1StatusDetails',
+ 'kind': 'str',
+ 'message': 'str',
+ 'metadata': 'V1ListMeta',
+ 'reason': 'str',
+ 'status': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'code': 'code',
+ 'details': 'details',
+ 'kind': 'kind',
+ 'message': 'message',
+ 'metadata': 'metadata',
+ 'reason': 'reason',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, code=None, details=None, kind=None, message=None, metadata=None, reason=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1Status - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._code = None
+ self._details = None
+ self._kind = None
+ self._message = None
+ self._metadata = None
+ self._reason = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if code is not None:
+ self.code = code
+ if details is not None:
+ self.details = details
+ if kind is not None:
+ self.kind = kind
+ if message is not None:
+ self.message = message
+ if metadata is not None:
+ self.metadata = metadata
+ if reason is not None:
+ self.reason = reason
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1Status. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1Status. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1Status.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1Status. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def code(self):
+ """Gets the code of this V1Status. # noqa: E501
+
+ Suggested HTTP return code for this status, 0 if not set. # noqa: E501
+
+ :return: The code of this V1Status. # noqa: E501
+ :rtype: int
+ """
+ return self._code
+
+ @code.setter
+ def code(self, code):
+ """Sets the code of this V1Status.
+
+ Suggested HTTP return code for this status, 0 if not set. # noqa: E501
+
+ :param code: The code of this V1Status. # noqa: E501
+ :type: int
+ """
+
+ self._code = code
+
+ @property
+ def details(self):
+ """Gets the details of this V1Status. # noqa: E501
+
+
+ :return: The details of this V1Status. # noqa: E501
+ :rtype: V1StatusDetails
+ """
+ return self._details
+
+ @details.setter
+ def details(self, details):
+ """Sets the details of this V1Status.
+
+
+ :param details: The details of this V1Status. # noqa: E501
+ :type: V1StatusDetails
+ """
+
+ self._details = details
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Status. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1Status. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Status.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1Status. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def message(self):
+ """Gets the message of this V1Status. # noqa: E501
+
+ A human-readable description of the status of this operation. # noqa: E501
+
+ :return: The message of this V1Status. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1Status.
+
+ A human-readable description of the status of this operation. # noqa: E501
+
+ :param message: The message of this V1Status. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1Status. # noqa: E501
+
+
+ :return: The metadata of this V1Status. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1Status.
+
+
+ :param metadata: The metadata of this V1Status. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1Status. # noqa: E501
+
+ A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it. # noqa: E501
+
+ :return: The reason of this V1Status. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1Status.
+
+ A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it. # noqa: E501
+
+ :param reason: The reason of this V1Status. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1Status. # noqa: E501
+
+ Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status # noqa: E501
+
+ :return: The status of this V1Status. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1Status.
+
+ Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status # noqa: E501
+
+ :param status: The status of this V1Status. # noqa: E501
+ :type: str
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Status):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Status):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_status_cause.py b/contrib/python/kubernetes/kubernetes/client/models/v1_status_cause.py
new file mode 100644
index 0000000000..adf559a4d3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_status_cause.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatusCause(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'field': 'str',
+ 'message': 'str',
+ 'reason': 'str'
+ }
+
+ attribute_map = {
+ 'field': 'field',
+ 'message': 'message',
+ 'reason': 'reason'
+ }
+
+ def __init__(self, field=None, message=None, reason=None, local_vars_configuration=None): # noqa: E501
+ """V1StatusCause - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._field = None
+ self._message = None
+ self._reason = None
+ self.discriminator = None
+
+ if field is not None:
+ self.field = field
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+
+ @property
+ def field(self):
+ """Gets the field of this V1StatusCause. # noqa: E501
+
+ The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. Examples: \"name\" - the field \"name\" on the current resource \"items[0].name\" - the field \"name\" on the first array entry in \"items\" # noqa: E501
+
+ :return: The field of this V1StatusCause. # noqa: E501
+ :rtype: str
+ """
+ return self._field
+
+ @field.setter
+ def field(self, field):
+ """Sets the field of this V1StatusCause.
+
+ The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. Examples: \"name\" - the field \"name\" on the current resource \"items[0].name\" - the field \"name\" on the first array entry in \"items\" # noqa: E501
+
+ :param field: The field of this V1StatusCause. # noqa: E501
+ :type: str
+ """
+
+ self._field = field
+
+ @property
+ def message(self):
+ """Gets the message of this V1StatusCause. # noqa: E501
+
+ A human-readable description of the cause of the error. This field may be presented as-is to a reader. # noqa: E501
+
+ :return: The message of this V1StatusCause. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1StatusCause.
+
+ A human-readable description of the cause of the error. This field may be presented as-is to a reader. # noqa: E501
+
+ :param message: The message of this V1StatusCause. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1StatusCause. # noqa: E501
+
+ A machine-readable description of the cause of the error. If this value is empty there is no information available. # noqa: E501
+
+ :return: The reason of this V1StatusCause. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1StatusCause.
+
+ A machine-readable description of the cause of the error. If this value is empty there is no information available. # noqa: E501
+
+ :param reason: The reason of this V1StatusCause. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatusCause):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatusCause):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_status_details.py b/contrib/python/kubernetes/kubernetes/client/models/v1_status_details.py
new file mode 100644
index 0000000000..47310193bb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_status_details.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StatusDetails(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'causes': 'list[V1StatusCause]',
+ 'group': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'retry_after_seconds': 'int',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'causes': 'causes',
+ 'group': 'group',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'retry_after_seconds': 'retryAfterSeconds',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, causes=None, group=None, kind=None, name=None, retry_after_seconds=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1StatusDetails - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._causes = None
+ self._group = None
+ self._kind = None
+ self._name = None
+ self._retry_after_seconds = None
+ self._uid = None
+ self.discriminator = None
+
+ if causes is not None:
+ self.causes = causes
+ if group is not None:
+ self.group = group
+ if kind is not None:
+ self.kind = kind
+ if name is not None:
+ self.name = name
+ if retry_after_seconds is not None:
+ self.retry_after_seconds = retry_after_seconds
+ if uid is not None:
+ self.uid = uid
+
+ @property
+ def causes(self):
+ """Gets the causes of this V1StatusDetails. # noqa: E501
+
+ The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes. # noqa: E501
+
+ :return: The causes of this V1StatusDetails. # noqa: E501
+ :rtype: list[V1StatusCause]
+ """
+ return self._causes
+
+ @causes.setter
+ def causes(self, causes):
+ """Sets the causes of this V1StatusDetails.
+
+ The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes. # noqa: E501
+
+ :param causes: The causes of this V1StatusDetails. # noqa: E501
+ :type: list[V1StatusCause]
+ """
+
+ self._causes = causes
+
+ @property
+ def group(self):
+ """Gets the group of this V1StatusDetails. # noqa: E501
+
+ The group attribute of the resource associated with the status StatusReason. # noqa: E501
+
+ :return: The group of this V1StatusDetails. # noqa: E501
+ :rtype: str
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1StatusDetails.
+
+ The group attribute of the resource associated with the status StatusReason. # noqa: E501
+
+ :param group: The group of this V1StatusDetails. # noqa: E501
+ :type: str
+ """
+
+ self._group = group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1StatusDetails. # noqa: E501
+
+ The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1StatusDetails. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1StatusDetails.
+
+ The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1StatusDetails. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1StatusDetails. # noqa: E501
+
+ The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described). # noqa: E501
+
+ :return: The name of this V1StatusDetails. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1StatusDetails.
+
+ The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described). # noqa: E501
+
+ :param name: The name of this V1StatusDetails. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def retry_after_seconds(self):
+ """Gets the retry_after_seconds of this V1StatusDetails. # noqa: E501
+
+ If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action. # noqa: E501
+
+ :return: The retry_after_seconds of this V1StatusDetails. # noqa: E501
+ :rtype: int
+ """
+ return self._retry_after_seconds
+
+ @retry_after_seconds.setter
+ def retry_after_seconds(self, retry_after_seconds):
+ """Sets the retry_after_seconds of this V1StatusDetails.
+
+ If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action. # noqa: E501
+
+ :param retry_after_seconds: The retry_after_seconds of this V1StatusDetails. # noqa: E501
+ :type: int
+ """
+
+ self._retry_after_seconds = retry_after_seconds
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1StatusDetails. # noqa: E501
+
+ UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
+
+ :return: The uid of this V1StatusDetails. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1StatusDetails.
+
+ UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
+
+ :param uid: The uid of this V1StatusDetails. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StatusDetails):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StatusDetails):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_storage_class.py b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_class.py
new file mode 100644
index 0000000000..7981ba1c94
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_class.py
@@ -0,0 +1,373 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StorageClass(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allow_volume_expansion': 'bool',
+ 'allowed_topologies': 'list[V1TopologySelectorTerm]',
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'mount_options': 'list[str]',
+ 'parameters': 'dict(str, str)',
+ 'provisioner': 'str',
+ 'reclaim_policy': 'str',
+ 'volume_binding_mode': 'str'
+ }
+
+ attribute_map = {
+ 'allow_volume_expansion': 'allowVolumeExpansion',
+ 'allowed_topologies': 'allowedTopologies',
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'mount_options': 'mountOptions',
+ 'parameters': 'parameters',
+ 'provisioner': 'provisioner',
+ 'reclaim_policy': 'reclaimPolicy',
+ 'volume_binding_mode': 'volumeBindingMode'
+ }
+
+ def __init__(self, allow_volume_expansion=None, allowed_topologies=None, api_version=None, kind=None, metadata=None, mount_options=None, parameters=None, provisioner=None, reclaim_policy=None, volume_binding_mode=None, local_vars_configuration=None): # noqa: E501
+ """V1StorageClass - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allow_volume_expansion = None
+ self._allowed_topologies = None
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._mount_options = None
+ self._parameters = None
+ self._provisioner = None
+ self._reclaim_policy = None
+ self._volume_binding_mode = None
+ self.discriminator = None
+
+ if allow_volume_expansion is not None:
+ self.allow_volume_expansion = allow_volume_expansion
+ if allowed_topologies is not None:
+ self.allowed_topologies = allowed_topologies
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if mount_options is not None:
+ self.mount_options = mount_options
+ if parameters is not None:
+ self.parameters = parameters
+ self.provisioner = provisioner
+ if reclaim_policy is not None:
+ self.reclaim_policy = reclaim_policy
+ if volume_binding_mode is not None:
+ self.volume_binding_mode = volume_binding_mode
+
+ @property
+ def allow_volume_expansion(self):
+ """Gets the allow_volume_expansion of this V1StorageClass. # noqa: E501
+
+ allowVolumeExpansion shows whether the storage class allow volume expand. # noqa: E501
+
+ :return: The allow_volume_expansion of this V1StorageClass. # noqa: E501
+ :rtype: bool
+ """
+ return self._allow_volume_expansion
+
+ @allow_volume_expansion.setter
+ def allow_volume_expansion(self, allow_volume_expansion):
+ """Sets the allow_volume_expansion of this V1StorageClass.
+
+ allowVolumeExpansion shows whether the storage class allow volume expand. # noqa: E501
+
+ :param allow_volume_expansion: The allow_volume_expansion of this V1StorageClass. # noqa: E501
+ :type: bool
+ """
+
+ self._allow_volume_expansion = allow_volume_expansion
+
+ @property
+ def allowed_topologies(self):
+ """Gets the allowed_topologies of this V1StorageClass. # noqa: E501
+
+ allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
+
+ :return: The allowed_topologies of this V1StorageClass. # noqa: E501
+ :rtype: list[V1TopologySelectorTerm]
+ """
+ return self._allowed_topologies
+
+ @allowed_topologies.setter
+ def allowed_topologies(self, allowed_topologies):
+ """Sets the allowed_topologies of this V1StorageClass.
+
+ allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
+
+ :param allowed_topologies: The allowed_topologies of this V1StorageClass. # noqa: E501
+ :type: list[V1TopologySelectorTerm]
+ """
+
+ self._allowed_topologies = allowed_topologies
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1StorageClass. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1StorageClass. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1StorageClass.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1StorageClass. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1StorageClass. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1StorageClass. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1StorageClass.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1StorageClass. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1StorageClass. # noqa: E501
+
+
+ :return: The metadata of this V1StorageClass. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1StorageClass.
+
+
+ :param metadata: The metadata of this V1StorageClass. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def mount_options(self):
+ """Gets the mount_options of this V1StorageClass. # noqa: E501
+
+ mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid. # noqa: E501
+
+ :return: The mount_options of this V1StorageClass. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._mount_options
+
+ @mount_options.setter
+ def mount_options(self, mount_options):
+ """Sets the mount_options of this V1StorageClass.
+
+ mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid. # noqa: E501
+
+ :param mount_options: The mount_options of this V1StorageClass. # noqa: E501
+ :type: list[str]
+ """
+
+ self._mount_options = mount_options
+
+ @property
+ def parameters(self):
+ """Gets the parameters of this V1StorageClass. # noqa: E501
+
+ parameters holds the parameters for the provisioner that should create volumes of this storage class. # noqa: E501
+
+ :return: The parameters of this V1StorageClass. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._parameters
+
+ @parameters.setter
+ def parameters(self, parameters):
+ """Sets the parameters of this V1StorageClass.
+
+ parameters holds the parameters for the provisioner that should create volumes of this storage class. # noqa: E501
+
+ :param parameters: The parameters of this V1StorageClass. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._parameters = parameters
+
+ @property
+ def provisioner(self):
+ """Gets the provisioner of this V1StorageClass. # noqa: E501
+
+ provisioner indicates the type of the provisioner. # noqa: E501
+
+ :return: The provisioner of this V1StorageClass. # noqa: E501
+ :rtype: str
+ """
+ return self._provisioner
+
+ @provisioner.setter
+ def provisioner(self, provisioner):
+ """Sets the provisioner of this V1StorageClass.
+
+ provisioner indicates the type of the provisioner. # noqa: E501
+
+ :param provisioner: The provisioner of this V1StorageClass. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and provisioner is None: # noqa: E501
+ raise ValueError("Invalid value for `provisioner`, must not be `None`") # noqa: E501
+
+ self._provisioner = provisioner
+
+ @property
+ def reclaim_policy(self):
+ """Gets the reclaim_policy of this V1StorageClass. # noqa: E501
+
+ reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete. # noqa: E501
+
+ :return: The reclaim_policy of this V1StorageClass. # noqa: E501
+ :rtype: str
+ """
+ return self._reclaim_policy
+
+ @reclaim_policy.setter
+ def reclaim_policy(self, reclaim_policy):
+ """Sets the reclaim_policy of this V1StorageClass.
+
+ reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete. # noqa: E501
+
+ :param reclaim_policy: The reclaim_policy of this V1StorageClass. # noqa: E501
+ :type: str
+ """
+
+ self._reclaim_policy = reclaim_policy
+
+ @property
+ def volume_binding_mode(self):
+ """Gets the volume_binding_mode of this V1StorageClass. # noqa: E501
+
+ volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
+
+ :return: The volume_binding_mode of this V1StorageClass. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_binding_mode
+
+ @volume_binding_mode.setter
+ def volume_binding_mode(self, volume_binding_mode):
+ """Sets the volume_binding_mode of this V1StorageClass.
+
+ volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
+
+ :param volume_binding_mode: The volume_binding_mode of this V1StorageClass. # noqa: E501
+ :type: str
+ """
+
+ self._volume_binding_mode = volume_binding_mode
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StorageClass):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StorageClass):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_storage_class_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_class_list.py
new file mode 100644
index 0000000000..14649b989f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_class_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StorageClassList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1StorageClass]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1StorageClassList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1StorageClassList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1StorageClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1StorageClassList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1StorageClassList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1StorageClassList. # noqa: E501
+
+ items is the list of StorageClasses # noqa: E501
+
+ :return: The items of this V1StorageClassList. # noqa: E501
+ :rtype: list[V1StorageClass]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1StorageClassList.
+
+ items is the list of StorageClasses # noqa: E501
+
+ :param items: The items of this V1StorageClassList. # noqa: E501
+ :type: list[V1StorageClass]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1StorageClassList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1StorageClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1StorageClassList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1StorageClassList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1StorageClassList. # noqa: E501
+
+
+ :return: The metadata of this V1StorageClassList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1StorageClassList.
+
+
+ :param metadata: The metadata of this V1StorageClassList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StorageClassList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StorageClassList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_persistent_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_persistent_volume_source.py
new file mode 100644
index 0000000000..99c7608551
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_persistent_volume_source.py
@@ -0,0 +1,232 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StorageOSPersistentVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1ObjectReference',
+ 'volume_name': 'str',
+ 'volume_namespace': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'volume_name': 'volumeName',
+ 'volume_namespace': 'volumeNamespace'
+ }
+
+ def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_name=None, volume_namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1StorageOSPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._read_only = None
+ self._secret_ref = None
+ self._volume_name = None
+ self._volume_namespace = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ if volume_name is not None:
+ self.volume_name = volume_name
+ if volume_namespace is not None:
+ self.volume_namespace = volume_namespace
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1StorageOSPersistentVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1StorageOSPersistentVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1StorageOSPersistentVolumeSource. # noqa: E501
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1StorageOSPersistentVolumeSource.
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1StorageOSPersistentVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :rtype: V1ObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1StorageOSPersistentVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :type: V1ObjectReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def volume_name(self):
+ """Gets the volume_name of this V1StorageOSPersistentVolumeSource. # noqa: E501
+
+ volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. # noqa: E501
+
+ :return: The volume_name of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_name
+
+ @volume_name.setter
+ def volume_name(self, volume_name):
+ """Sets the volume_name of this V1StorageOSPersistentVolumeSource.
+
+ volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. # noqa: E501
+
+ :param volume_name: The volume_name of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._volume_name = volume_name
+
+ @property
+ def volume_namespace(self):
+ """Gets the volume_namespace of this V1StorageOSPersistentVolumeSource. # noqa: E501
+
+ volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. # noqa: E501
+
+ :return: The volume_namespace of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_namespace
+
+ @volume_namespace.setter
+ def volume_namespace(self, volume_namespace):
+ """Sets the volume_namespace of this V1StorageOSPersistentVolumeSource.
+
+ volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. # noqa: E501
+
+ :param volume_namespace: The volume_namespace of this V1StorageOSPersistentVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._volume_namespace = volume_namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StorageOSPersistentVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StorageOSPersistentVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_volume_source.py
new file mode 100644
index 0000000000..a468b6c3d2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_storage_os_volume_source.py
@@ -0,0 +1,232 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1StorageOSVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'read_only': 'bool',
+ 'secret_ref': 'V1LocalObjectReference',
+ 'volume_name': 'str',
+ 'volume_namespace': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'read_only': 'readOnly',
+ 'secret_ref': 'secretRef',
+ 'volume_name': 'volumeName',
+ 'volume_namespace': 'volumeNamespace'
+ }
+
+ def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_name=None, volume_namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1StorageOSVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._read_only = None
+ self._secret_ref = None
+ self._volume_name = None
+ self._volume_namespace = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if read_only is not None:
+ self.read_only = read_only
+ if secret_ref is not None:
+ self.secret_ref = secret_ref
+ if volume_name is not None:
+ self.volume_name = volume_name
+ if volume_namespace is not None:
+ self.volume_namespace = volume_namespace
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1StorageOSVolumeSource. # noqa: E501
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1StorageOSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1StorageOSVolumeSource.
+
+ fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1StorageOSVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1StorageOSVolumeSource. # noqa: E501
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :return: The read_only of this V1StorageOSVolumeSource. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1StorageOSVolumeSource.
+
+ readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
+
+ :param read_only: The read_only of this V1StorageOSVolumeSource. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def secret_ref(self):
+ """Gets the secret_ref of this V1StorageOSVolumeSource. # noqa: E501
+
+
+ :return: The secret_ref of this V1StorageOSVolumeSource. # noqa: E501
+ :rtype: V1LocalObjectReference
+ """
+ return self._secret_ref
+
+ @secret_ref.setter
+ def secret_ref(self, secret_ref):
+ """Sets the secret_ref of this V1StorageOSVolumeSource.
+
+
+ :param secret_ref: The secret_ref of this V1StorageOSVolumeSource. # noqa: E501
+ :type: V1LocalObjectReference
+ """
+
+ self._secret_ref = secret_ref
+
+ @property
+ def volume_name(self):
+ """Gets the volume_name of this V1StorageOSVolumeSource. # noqa: E501
+
+ volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. # noqa: E501
+
+ :return: The volume_name of this V1StorageOSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_name
+
+ @volume_name.setter
+ def volume_name(self, volume_name):
+ """Sets the volume_name of this V1StorageOSVolumeSource.
+
+ volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. # noqa: E501
+
+ :param volume_name: The volume_name of this V1StorageOSVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._volume_name = volume_name
+
+ @property
+ def volume_namespace(self):
+ """Gets the volume_namespace of this V1StorageOSVolumeSource. # noqa: E501
+
+ volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. # noqa: E501
+
+ :return: The volume_namespace of this V1StorageOSVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_namespace
+
+ @volume_namespace.setter
+ def volume_namespace(self, volume_namespace):
+ """Sets the volume_namespace of this V1StorageOSVolumeSource.
+
+ volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. # noqa: E501
+
+ :param volume_namespace: The volume_namespace of this V1StorageOSVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._volume_namespace = volume_namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1StorageOSVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1StorageOSVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1_subject.py
new file mode 100644
index 0000000000..c493d8bd89
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_subject.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Subject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'namespace': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'namespace': 'namespace'
+ }
+
+ def __init__(self, api_group=None, kind=None, name=None, namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1Subject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._kind = None
+ self._name = None
+ self._namespace = None
+ self.discriminator = None
+
+ if api_group is not None:
+ self.api_group = api_group
+ self.kind = kind
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1Subject. # noqa: E501
+
+ APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects. # noqa: E501
+
+ :return: The api_group of this V1Subject. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1Subject.
+
+ APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects. # noqa: E501
+
+ :param api_group: The api_group of this V1Subject. # noqa: E501
+ :type: str
+ """
+
+ self._api_group = api_group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1Subject. # noqa: E501
+
+ Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error. # noqa: E501
+
+ :return: The kind of this V1Subject. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1Subject.
+
+ Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error. # noqa: E501
+
+ :param kind: The kind of this V1Subject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1Subject. # noqa: E501
+
+ Name of the object being referenced. # noqa: E501
+
+ :return: The name of this V1Subject. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1Subject.
+
+ Name of the object being referenced. # noqa: E501
+
+ :param name: The name of this V1Subject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1Subject. # noqa: E501
+
+ Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error. # noqa: E501
+
+ :return: The namespace of this V1Subject. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1Subject.
+
+ Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error. # noqa: E501
+
+ :param namespace: The namespace of this V1Subject. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Subject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Subject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review.py
new file mode 100644
index 0000000000..446adcd222
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SubjectAccessReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1SubjectAccessReviewSpec',
+ 'status': 'V1SubjectAccessReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1SubjectAccessReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1SubjectAccessReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1SubjectAccessReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1SubjectAccessReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1SubjectAccessReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1SubjectAccessReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1SubjectAccessReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1SubjectAccessReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1SubjectAccessReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1SubjectAccessReview. # noqa: E501
+
+
+ :return: The metadata of this V1SubjectAccessReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1SubjectAccessReview.
+
+
+ :param metadata: The metadata of this V1SubjectAccessReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1SubjectAccessReview. # noqa: E501
+
+
+ :return: The spec of this V1SubjectAccessReview. # noqa: E501
+ :rtype: V1SubjectAccessReviewSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1SubjectAccessReview.
+
+
+ :param spec: The spec of this V1SubjectAccessReview. # noqa: E501
+ :type: V1SubjectAccessReviewSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1SubjectAccessReview. # noqa: E501
+
+
+ :return: The status of this V1SubjectAccessReview. # noqa: E501
+ :rtype: V1SubjectAccessReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1SubjectAccessReview.
+
+
+ :param status: The status of this V1SubjectAccessReview. # noqa: E501
+ :type: V1SubjectAccessReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SubjectAccessReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SubjectAccessReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_spec.py
new file mode 100644
index 0000000000..832dcb74bd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_spec.py
@@ -0,0 +1,258 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SubjectAccessReviewSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'extra': 'dict(str, list[str])',
+ 'groups': 'list[str]',
+ 'non_resource_attributes': 'V1NonResourceAttributes',
+ 'resource_attributes': 'V1ResourceAttributes',
+ 'uid': 'str',
+ 'user': 'str'
+ }
+
+ attribute_map = {
+ 'extra': 'extra',
+ 'groups': 'groups',
+ 'non_resource_attributes': 'nonResourceAttributes',
+ 'resource_attributes': 'resourceAttributes',
+ 'uid': 'uid',
+ 'user': 'user'
+ }
+
+ def __init__(self, extra=None, groups=None, non_resource_attributes=None, resource_attributes=None, uid=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1SubjectAccessReviewSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._extra = None
+ self._groups = None
+ self._non_resource_attributes = None
+ self._resource_attributes = None
+ self._uid = None
+ self._user = None
+ self.discriminator = None
+
+ if extra is not None:
+ self.extra = extra
+ if groups is not None:
+ self.groups = groups
+ if non_resource_attributes is not None:
+ self.non_resource_attributes = non_resource_attributes
+ if resource_attributes is not None:
+ self.resource_attributes = resource_attributes
+ if uid is not None:
+ self.uid = uid
+ if user is not None:
+ self.user = user
+
+ @property
+ def extra(self):
+ """Gets the extra of this V1SubjectAccessReviewSpec. # noqa: E501
+
+ Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here. # noqa: E501
+
+ :return: The extra of this V1SubjectAccessReviewSpec. # noqa: E501
+ :rtype: dict(str, list[str])
+ """
+ return self._extra
+
+ @extra.setter
+ def extra(self, extra):
+ """Sets the extra of this V1SubjectAccessReviewSpec.
+
+ Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here. # noqa: E501
+
+ :param extra: The extra of this V1SubjectAccessReviewSpec. # noqa: E501
+ :type: dict(str, list[str])
+ """
+
+ self._extra = extra
+
+ @property
+ def groups(self):
+ """Gets the groups of this V1SubjectAccessReviewSpec. # noqa: E501
+
+ Groups is the groups you're testing for. # noqa: E501
+
+ :return: The groups of this V1SubjectAccessReviewSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._groups
+
+ @groups.setter
+ def groups(self, groups):
+ """Sets the groups of this V1SubjectAccessReviewSpec.
+
+ Groups is the groups you're testing for. # noqa: E501
+
+ :param groups: The groups of this V1SubjectAccessReviewSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._groups = groups
+
+ @property
+ def non_resource_attributes(self):
+ """Gets the non_resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
+
+
+ :return: The non_resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
+ :rtype: V1NonResourceAttributes
+ """
+ return self._non_resource_attributes
+
+ @non_resource_attributes.setter
+ def non_resource_attributes(self, non_resource_attributes):
+ """Sets the non_resource_attributes of this V1SubjectAccessReviewSpec.
+
+
+ :param non_resource_attributes: The non_resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
+ :type: V1NonResourceAttributes
+ """
+
+ self._non_resource_attributes = non_resource_attributes
+
+ @property
+ def resource_attributes(self):
+ """Gets the resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
+
+
+ :return: The resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
+ :rtype: V1ResourceAttributes
+ """
+ return self._resource_attributes
+
+ @resource_attributes.setter
+ def resource_attributes(self, resource_attributes):
+ """Sets the resource_attributes of this V1SubjectAccessReviewSpec.
+
+
+ :param resource_attributes: The resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
+ :type: V1ResourceAttributes
+ """
+
+ self._resource_attributes = resource_attributes
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1SubjectAccessReviewSpec. # noqa: E501
+
+ UID information about the requesting user. # noqa: E501
+
+ :return: The uid of this V1SubjectAccessReviewSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1SubjectAccessReviewSpec.
+
+ UID information about the requesting user. # noqa: E501
+
+ :param uid: The uid of this V1SubjectAccessReviewSpec. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ @property
+ def user(self):
+ """Gets the user of this V1SubjectAccessReviewSpec. # noqa: E501
+
+ User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups # noqa: E501
+
+ :return: The user of this V1SubjectAccessReviewSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1SubjectAccessReviewSpec.
+
+ User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups # noqa: E501
+
+ :param user: The user of this V1SubjectAccessReviewSpec. # noqa: E501
+ :type: str
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SubjectAccessReviewSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SubjectAccessReviewSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_status.py
new file mode 100644
index 0000000000..2f45a2b259
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_access_review_status.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SubjectAccessReviewStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allowed': 'bool',
+ 'denied': 'bool',
+ 'evaluation_error': 'str',
+ 'reason': 'str'
+ }
+
+ attribute_map = {
+ 'allowed': 'allowed',
+ 'denied': 'denied',
+ 'evaluation_error': 'evaluationError',
+ 'reason': 'reason'
+ }
+
+ def __init__(self, allowed=None, denied=None, evaluation_error=None, reason=None, local_vars_configuration=None): # noqa: E501
+ """V1SubjectAccessReviewStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allowed = None
+ self._denied = None
+ self._evaluation_error = None
+ self._reason = None
+ self.discriminator = None
+
+ self.allowed = allowed
+ if denied is not None:
+ self.denied = denied
+ if evaluation_error is not None:
+ self.evaluation_error = evaluation_error
+ if reason is not None:
+ self.reason = reason
+
+ @property
+ def allowed(self):
+ """Gets the allowed of this V1SubjectAccessReviewStatus. # noqa: E501
+
+ Allowed is required. True if the action would be allowed, false otherwise. # noqa: E501
+
+ :return: The allowed of this V1SubjectAccessReviewStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._allowed
+
+ @allowed.setter
+ def allowed(self, allowed):
+ """Sets the allowed of this V1SubjectAccessReviewStatus.
+
+ Allowed is required. True if the action would be allowed, false otherwise. # noqa: E501
+
+ :param allowed: The allowed of this V1SubjectAccessReviewStatus. # noqa: E501
+ :type: bool
+ """
+ if self.local_vars_configuration.client_side_validation and allowed is None: # noqa: E501
+ raise ValueError("Invalid value for `allowed`, must not be `None`") # noqa: E501
+
+ self._allowed = allowed
+
+ @property
+ def denied(self):
+ """Gets the denied of this V1SubjectAccessReviewStatus. # noqa: E501
+
+ Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true. # noqa: E501
+
+ :return: The denied of this V1SubjectAccessReviewStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._denied
+
+ @denied.setter
+ def denied(self, denied):
+ """Sets the denied of this V1SubjectAccessReviewStatus.
+
+ Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true. # noqa: E501
+
+ :param denied: The denied of this V1SubjectAccessReviewStatus. # noqa: E501
+ :type: bool
+ """
+
+ self._denied = denied
+
+ @property
+ def evaluation_error(self):
+ """Gets the evaluation_error of this V1SubjectAccessReviewStatus. # noqa: E501
+
+ EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. # noqa: E501
+
+ :return: The evaluation_error of this V1SubjectAccessReviewStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._evaluation_error
+
+ @evaluation_error.setter
+ def evaluation_error(self, evaluation_error):
+ """Sets the evaluation_error of this V1SubjectAccessReviewStatus.
+
+ EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. # noqa: E501
+
+ :param evaluation_error: The evaluation_error of this V1SubjectAccessReviewStatus. # noqa: E501
+ :type: str
+ """
+
+ self._evaluation_error = evaluation_error
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1SubjectAccessReviewStatus. # noqa: E501
+
+ Reason is optional. It indicates why a request was allowed or denied. # noqa: E501
+
+ :return: The reason of this V1SubjectAccessReviewStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1SubjectAccessReviewStatus.
+
+ Reason is optional. It indicates why a request was allowed or denied. # noqa: E501
+
+ :param reason: The reason of this V1SubjectAccessReviewStatus. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SubjectAccessReviewStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SubjectAccessReviewStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_subject_rules_review_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_rules_review_status.py
new file mode 100644
index 0000000000..136bed6118
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_subject_rules_review_status.py
@@ -0,0 +1,209 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1SubjectRulesReviewStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'evaluation_error': 'str',
+ 'incomplete': 'bool',
+ 'non_resource_rules': 'list[V1NonResourceRule]',
+ 'resource_rules': 'list[V1ResourceRule]'
+ }
+
+ attribute_map = {
+ 'evaluation_error': 'evaluationError',
+ 'incomplete': 'incomplete',
+ 'non_resource_rules': 'nonResourceRules',
+ 'resource_rules': 'resourceRules'
+ }
+
+ def __init__(self, evaluation_error=None, incomplete=None, non_resource_rules=None, resource_rules=None, local_vars_configuration=None): # noqa: E501
+ """V1SubjectRulesReviewStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._evaluation_error = None
+ self._incomplete = None
+ self._non_resource_rules = None
+ self._resource_rules = None
+ self.discriminator = None
+
+ if evaluation_error is not None:
+ self.evaluation_error = evaluation_error
+ self.incomplete = incomplete
+ self.non_resource_rules = non_resource_rules
+ self.resource_rules = resource_rules
+
+ @property
+ def evaluation_error(self):
+ """Gets the evaluation_error of this V1SubjectRulesReviewStatus. # noqa: E501
+
+ EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete. # noqa: E501
+
+ :return: The evaluation_error of this V1SubjectRulesReviewStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._evaluation_error
+
+ @evaluation_error.setter
+ def evaluation_error(self, evaluation_error):
+ """Sets the evaluation_error of this V1SubjectRulesReviewStatus.
+
+ EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete. # noqa: E501
+
+ :param evaluation_error: The evaluation_error of this V1SubjectRulesReviewStatus. # noqa: E501
+ :type: str
+ """
+
+ self._evaluation_error = evaluation_error
+
+ @property
+ def incomplete(self):
+ """Gets the incomplete of this V1SubjectRulesReviewStatus. # noqa: E501
+
+ Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. # noqa: E501
+
+ :return: The incomplete of this V1SubjectRulesReviewStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._incomplete
+
+ @incomplete.setter
+ def incomplete(self, incomplete):
+ """Sets the incomplete of this V1SubjectRulesReviewStatus.
+
+ Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. # noqa: E501
+
+ :param incomplete: The incomplete of this V1SubjectRulesReviewStatus. # noqa: E501
+ :type: bool
+ """
+ if self.local_vars_configuration.client_side_validation and incomplete is None: # noqa: E501
+ raise ValueError("Invalid value for `incomplete`, must not be `None`") # noqa: E501
+
+ self._incomplete = incomplete
+
+ @property
+ def non_resource_rules(self):
+ """Gets the non_resource_rules of this V1SubjectRulesReviewStatus. # noqa: E501
+
+ NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete. # noqa: E501
+
+ :return: The non_resource_rules of this V1SubjectRulesReviewStatus. # noqa: E501
+ :rtype: list[V1NonResourceRule]
+ """
+ return self._non_resource_rules
+
+ @non_resource_rules.setter
+ def non_resource_rules(self, non_resource_rules):
+ """Sets the non_resource_rules of this V1SubjectRulesReviewStatus.
+
+ NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete. # noqa: E501
+
+ :param non_resource_rules: The non_resource_rules of this V1SubjectRulesReviewStatus. # noqa: E501
+ :type: list[V1NonResourceRule]
+ """
+ if self.local_vars_configuration.client_side_validation and non_resource_rules is None: # noqa: E501
+ raise ValueError("Invalid value for `non_resource_rules`, must not be `None`") # noqa: E501
+
+ self._non_resource_rules = non_resource_rules
+
+ @property
+ def resource_rules(self):
+ """Gets the resource_rules of this V1SubjectRulesReviewStatus. # noqa: E501
+
+ ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete. # noqa: E501
+
+ :return: The resource_rules of this V1SubjectRulesReviewStatus. # noqa: E501
+ :rtype: list[V1ResourceRule]
+ """
+ return self._resource_rules
+
+ @resource_rules.setter
+ def resource_rules(self, resource_rules):
+ """Sets the resource_rules of this V1SubjectRulesReviewStatus.
+
+ ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete. # noqa: E501
+
+ :param resource_rules: The resource_rules of this V1SubjectRulesReviewStatus. # noqa: E501
+ :type: list[V1ResourceRule]
+ """
+ if self.local_vars_configuration.client_side_validation and resource_rules is None: # noqa: E501
+ raise ValueError("Invalid value for `resource_rules`, must not be `None`") # noqa: E501
+
+ self._resource_rules = resource_rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1SubjectRulesReviewStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1SubjectRulesReviewStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_sysctl.py b/contrib/python/kubernetes/kubernetes/client/models/v1_sysctl.py
new file mode 100644
index 0000000000..b482e28a3b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_sysctl.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Sysctl(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'value': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'value': 'value'
+ }
+
+ def __init__(self, name=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V1Sysctl - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._value = None
+ self.discriminator = None
+
+ self.name = name
+ self.value = value
+
+ @property
+ def name(self):
+ """Gets the name of this V1Sysctl. # noqa: E501
+
+ Name of a property to set # noqa: E501
+
+ :return: The name of this V1Sysctl. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1Sysctl.
+
+ Name of a property to set # noqa: E501
+
+ :param name: The name of this V1Sysctl. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def value(self):
+ """Gets the value of this V1Sysctl. # noqa: E501
+
+ Value of a property to set # noqa: E501
+
+ :return: The value of this V1Sysctl. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V1Sysctl.
+
+ Value of a property to set # noqa: E501
+
+ :param value: The value of this V1Sysctl. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
+ raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Sysctl):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Sysctl):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_taint.py b/contrib/python/kubernetes/kubernetes/client/models/v1_taint.py
new file mode 100644
index 0000000000..2db7e4c9aa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_taint.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Taint(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'effect': 'str',
+ 'key': 'str',
+ 'time_added': 'datetime',
+ 'value': 'str'
+ }
+
+ attribute_map = {
+ 'effect': 'effect',
+ 'key': 'key',
+ 'time_added': 'timeAdded',
+ 'value': 'value'
+ }
+
+ def __init__(self, effect=None, key=None, time_added=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V1Taint - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._effect = None
+ self._key = None
+ self._time_added = None
+ self._value = None
+ self.discriminator = None
+
+ self.effect = effect
+ self.key = key
+ if time_added is not None:
+ self.time_added = time_added
+ if value is not None:
+ self.value = value
+
+ @property
+ def effect(self):
+ """Gets the effect of this V1Taint. # noqa: E501
+
+ Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. # noqa: E501
+
+ :return: The effect of this V1Taint. # noqa: E501
+ :rtype: str
+ """
+ return self._effect
+
+ @effect.setter
+ def effect(self, effect):
+ """Sets the effect of this V1Taint.
+
+ Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. # noqa: E501
+
+ :param effect: The effect of this V1Taint. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and effect is None: # noqa: E501
+ raise ValueError("Invalid value for `effect`, must not be `None`") # noqa: E501
+
+ self._effect = effect
+
+ @property
+ def key(self):
+ """Gets the key of this V1Taint. # noqa: E501
+
+ Required. The taint key to be applied to a node. # noqa: E501
+
+ :return: The key of this V1Taint. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1Taint.
+
+ Required. The taint key to be applied to a node. # noqa: E501
+
+ :param key: The key of this V1Taint. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def time_added(self):
+ """Gets the time_added of this V1Taint. # noqa: E501
+
+ TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. # noqa: E501
+
+ :return: The time_added of this V1Taint. # noqa: E501
+ :rtype: datetime
+ """
+ return self._time_added
+
+ @time_added.setter
+ def time_added(self, time_added):
+ """Sets the time_added of this V1Taint.
+
+ TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. # noqa: E501
+
+ :param time_added: The time_added of this V1Taint. # noqa: E501
+ :type: datetime
+ """
+
+ self._time_added = time_added
+
+ @property
+ def value(self):
+ """Gets the value of this V1Taint. # noqa: E501
+
+ The taint value corresponding to the taint key. # noqa: E501
+
+ :return: The value of this V1Taint. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V1Taint.
+
+ The taint value corresponding to the taint key. # noqa: E501
+
+ :param value: The value of this V1Taint. # noqa: E501
+ :type: str
+ """
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Taint):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Taint):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_tcp_socket_action.py b/contrib/python/kubernetes/kubernetes/client/models/v1_tcp_socket_action.py
new file mode 100644
index 0000000000..c470f7db89
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_tcp_socket_action.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TCPSocketAction(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'host': 'str',
+ 'port': 'object'
+ }
+
+ attribute_map = {
+ 'host': 'host',
+ 'port': 'port'
+ }
+
+ def __init__(self, host=None, port=None, local_vars_configuration=None): # noqa: E501
+ """V1TCPSocketAction - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._host = None
+ self._port = None
+ self.discriminator = None
+
+ if host is not None:
+ self.host = host
+ self.port = port
+
+ @property
+ def host(self):
+ """Gets the host of this V1TCPSocketAction. # noqa: E501
+
+ Optional: Host name to connect to, defaults to the pod IP. # noqa: E501
+
+ :return: The host of this V1TCPSocketAction. # noqa: E501
+ :rtype: str
+ """
+ return self._host
+
+ @host.setter
+ def host(self, host):
+ """Sets the host of this V1TCPSocketAction.
+
+ Optional: Host name to connect to, defaults to the pod IP. # noqa: E501
+
+ :param host: The host of this V1TCPSocketAction. # noqa: E501
+ :type: str
+ """
+
+ self._host = host
+
+ @property
+ def port(self):
+ """Gets the port of this V1TCPSocketAction. # noqa: E501
+
+ Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. # noqa: E501
+
+ :return: The port of this V1TCPSocketAction. # noqa: E501
+ :rtype: object
+ """
+ return self._port
+
+ @port.setter
+ def port(self, port):
+ """Sets the port of this V1TCPSocketAction.
+
+ Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. # noqa: E501
+
+ :param port: The port of this V1TCPSocketAction. # noqa: E501
+ :type: object
+ """
+ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501
+ raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
+
+ self._port = port
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TCPSocketAction):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TCPSocketAction):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_token_request_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_token_request_spec.py
new file mode 100644
index 0000000000..d85d232d34
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_token_request_spec.py
@@ -0,0 +1,177 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TokenRequestSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'audiences': 'list[str]',
+ 'bound_object_ref': 'V1BoundObjectReference',
+ 'expiration_seconds': 'int'
+ }
+
+ attribute_map = {
+ 'audiences': 'audiences',
+ 'bound_object_ref': 'boundObjectRef',
+ 'expiration_seconds': 'expirationSeconds'
+ }
+
+ def __init__(self, audiences=None, bound_object_ref=None, expiration_seconds=None, local_vars_configuration=None): # noqa: E501
+ """V1TokenRequestSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._audiences = None
+ self._bound_object_ref = None
+ self._expiration_seconds = None
+ self.discriminator = None
+
+ self.audiences = audiences
+ if bound_object_ref is not None:
+ self.bound_object_ref = bound_object_ref
+ if expiration_seconds is not None:
+ self.expiration_seconds = expiration_seconds
+
+ @property
+ def audiences(self):
+ """Gets the audiences of this V1TokenRequestSpec. # noqa: E501
+
+ Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. # noqa: E501
+
+ :return: The audiences of this V1TokenRequestSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._audiences
+
+ @audiences.setter
+ def audiences(self, audiences):
+ """Sets the audiences of this V1TokenRequestSpec.
+
+ Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences. # noqa: E501
+
+ :param audiences: The audiences of this V1TokenRequestSpec. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and audiences is None: # noqa: E501
+ raise ValueError("Invalid value for `audiences`, must not be `None`") # noqa: E501
+
+ self._audiences = audiences
+
+ @property
+ def bound_object_ref(self):
+ """Gets the bound_object_ref of this V1TokenRequestSpec. # noqa: E501
+
+
+ :return: The bound_object_ref of this V1TokenRequestSpec. # noqa: E501
+ :rtype: V1BoundObjectReference
+ """
+ return self._bound_object_ref
+
+ @bound_object_ref.setter
+ def bound_object_ref(self, bound_object_ref):
+ """Sets the bound_object_ref of this V1TokenRequestSpec.
+
+
+ :param bound_object_ref: The bound_object_ref of this V1TokenRequestSpec. # noqa: E501
+ :type: V1BoundObjectReference
+ """
+
+ self._bound_object_ref = bound_object_ref
+
+ @property
+ def expiration_seconds(self):
+ """Gets the expiration_seconds of this V1TokenRequestSpec. # noqa: E501
+
+ ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. # noqa: E501
+
+ :return: The expiration_seconds of this V1TokenRequestSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._expiration_seconds
+
+ @expiration_seconds.setter
+ def expiration_seconds(self, expiration_seconds):
+ """Sets the expiration_seconds of this V1TokenRequestSpec.
+
+ ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response. # noqa: E501
+
+ :param expiration_seconds: The expiration_seconds of this V1TokenRequestSpec. # noqa: E501
+ :type: int
+ """
+
+ self._expiration_seconds = expiration_seconds
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TokenRequestSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TokenRequestSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_token_request_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_token_request_status.py
new file mode 100644
index 0000000000..6a8c30eaa1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_token_request_status.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TokenRequestStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expiration_timestamp': 'datetime',
+ 'token': 'str'
+ }
+
+ attribute_map = {
+ 'expiration_timestamp': 'expirationTimestamp',
+ 'token': 'token'
+ }
+
+ def __init__(self, expiration_timestamp=None, token=None, local_vars_configuration=None): # noqa: E501
+ """V1TokenRequestStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expiration_timestamp = None
+ self._token = None
+ self.discriminator = None
+
+ self.expiration_timestamp = expiration_timestamp
+ self.token = token
+
+ @property
+ def expiration_timestamp(self):
+ """Gets the expiration_timestamp of this V1TokenRequestStatus. # noqa: E501
+
+ ExpirationTimestamp is the time of expiration of the returned token. # noqa: E501
+
+ :return: The expiration_timestamp of this V1TokenRequestStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._expiration_timestamp
+
+ @expiration_timestamp.setter
+ def expiration_timestamp(self, expiration_timestamp):
+ """Sets the expiration_timestamp of this V1TokenRequestStatus.
+
+ ExpirationTimestamp is the time of expiration of the returned token. # noqa: E501
+
+ :param expiration_timestamp: The expiration_timestamp of this V1TokenRequestStatus. # noqa: E501
+ :type: datetime
+ """
+ if self.local_vars_configuration.client_side_validation and expiration_timestamp is None: # noqa: E501
+ raise ValueError("Invalid value for `expiration_timestamp`, must not be `None`") # noqa: E501
+
+ self._expiration_timestamp = expiration_timestamp
+
+ @property
+ def token(self):
+ """Gets the token of this V1TokenRequestStatus. # noqa: E501
+
+ Token is the opaque bearer token. # noqa: E501
+
+ :return: The token of this V1TokenRequestStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._token
+
+ @token.setter
+ def token(self, token):
+ """Sets the token of this V1TokenRequestStatus.
+
+ Token is the opaque bearer token. # noqa: E501
+
+ :param token: The token of this V1TokenRequestStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and token is None: # noqa: E501
+ raise ValueError("Invalid value for `token`, must not be `None`") # noqa: E501
+
+ self._token = token
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TokenRequestStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TokenRequestStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_token_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1_token_review.py
new file mode 100644
index 0000000000..b70084f07d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_token_review.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TokenReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1TokenReviewSpec',
+ 'status': 'V1TokenReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1TokenReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1TokenReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1TokenReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1TokenReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1TokenReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1TokenReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1TokenReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1TokenReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1TokenReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1TokenReview. # noqa: E501
+
+
+ :return: The metadata of this V1TokenReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1TokenReview.
+
+
+ :param metadata: The metadata of this V1TokenReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1TokenReview. # noqa: E501
+
+
+ :return: The spec of this V1TokenReview. # noqa: E501
+ :rtype: V1TokenReviewSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1TokenReview.
+
+
+ :param spec: The spec of this V1TokenReview. # noqa: E501
+ :type: V1TokenReviewSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1TokenReview. # noqa: E501
+
+
+ :return: The status of this V1TokenReview. # noqa: E501
+ :rtype: V1TokenReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1TokenReview.
+
+
+ :param status: The status of this V1TokenReview. # noqa: E501
+ :type: V1TokenReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TokenReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TokenReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_token_review_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_token_review_spec.py
new file mode 100644
index 0000000000..4b8a6d7968
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_token_review_spec.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TokenReviewSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'audiences': 'list[str]',
+ 'token': 'str'
+ }
+
+ attribute_map = {
+ 'audiences': 'audiences',
+ 'token': 'token'
+ }
+
+ def __init__(self, audiences=None, token=None, local_vars_configuration=None): # noqa: E501
+ """V1TokenReviewSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._audiences = None
+ self._token = None
+ self.discriminator = None
+
+ if audiences is not None:
+ self.audiences = audiences
+ if token is not None:
+ self.token = token
+
+ @property
+ def audiences(self):
+ """Gets the audiences of this V1TokenReviewSpec. # noqa: E501
+
+ Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver. # noqa: E501
+
+ :return: The audiences of this V1TokenReviewSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._audiences
+
+ @audiences.setter
+ def audiences(self, audiences):
+ """Sets the audiences of this V1TokenReviewSpec.
+
+ Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver. # noqa: E501
+
+ :param audiences: The audiences of this V1TokenReviewSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._audiences = audiences
+
+ @property
+ def token(self):
+ """Gets the token of this V1TokenReviewSpec. # noqa: E501
+
+ Token is the opaque bearer token. # noqa: E501
+
+ :return: The token of this V1TokenReviewSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._token
+
+ @token.setter
+ def token(self, token):
+ """Sets the token of this V1TokenReviewSpec.
+
+ Token is the opaque bearer token. # noqa: E501
+
+ :param token: The token of this V1TokenReviewSpec. # noqa: E501
+ :type: str
+ """
+
+ self._token = token
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TokenReviewSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TokenReviewSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_token_review_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_token_review_status.py
new file mode 100644
index 0000000000..884589059f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_token_review_status.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TokenReviewStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'audiences': 'list[str]',
+ 'authenticated': 'bool',
+ 'error': 'str',
+ 'user': 'V1UserInfo'
+ }
+
+ attribute_map = {
+ 'audiences': 'audiences',
+ 'authenticated': 'authenticated',
+ 'error': 'error',
+ 'user': 'user'
+ }
+
+ def __init__(self, audiences=None, authenticated=None, error=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1TokenReviewStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._audiences = None
+ self._authenticated = None
+ self._error = None
+ self._user = None
+ self.discriminator = None
+
+ if audiences is not None:
+ self.audiences = audiences
+ if authenticated is not None:
+ self.authenticated = authenticated
+ if error is not None:
+ self.error = error
+ if user is not None:
+ self.user = user
+
+ @property
+ def audiences(self):
+ """Gets the audiences of this V1TokenReviewStatus. # noqa: E501
+
+ Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server. # noqa: E501
+
+ :return: The audiences of this V1TokenReviewStatus. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._audiences
+
+ @audiences.setter
+ def audiences(self, audiences):
+ """Sets the audiences of this V1TokenReviewStatus.
+
+ Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server. # noqa: E501
+
+ :param audiences: The audiences of this V1TokenReviewStatus. # noqa: E501
+ :type: list[str]
+ """
+
+ self._audiences = audiences
+
+ @property
+ def authenticated(self):
+ """Gets the authenticated of this V1TokenReviewStatus. # noqa: E501
+
+ Authenticated indicates that the token was associated with a known user. # noqa: E501
+
+ :return: The authenticated of this V1TokenReviewStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._authenticated
+
+ @authenticated.setter
+ def authenticated(self, authenticated):
+ """Sets the authenticated of this V1TokenReviewStatus.
+
+ Authenticated indicates that the token was associated with a known user. # noqa: E501
+
+ :param authenticated: The authenticated of this V1TokenReviewStatus. # noqa: E501
+ :type: bool
+ """
+
+ self._authenticated = authenticated
+
+ @property
+ def error(self):
+ """Gets the error of this V1TokenReviewStatus. # noqa: E501
+
+ Error indicates that the token couldn't be checked # noqa: E501
+
+ :return: The error of this V1TokenReviewStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._error
+
+ @error.setter
+ def error(self, error):
+ """Sets the error of this V1TokenReviewStatus.
+
+ Error indicates that the token couldn't be checked # noqa: E501
+
+ :param error: The error of this V1TokenReviewStatus. # noqa: E501
+ :type: str
+ """
+
+ self._error = error
+
+ @property
+ def user(self):
+ """Gets the user of this V1TokenReviewStatus. # noqa: E501
+
+
+ :return: The user of this V1TokenReviewStatus. # noqa: E501
+ :rtype: V1UserInfo
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1TokenReviewStatus.
+
+
+ :param user: The user of this V1TokenReviewStatus. # noqa: E501
+ :type: V1UserInfo
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TokenReviewStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TokenReviewStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_toleration.py b/contrib/python/kubernetes/kubernetes/client/models/v1_toleration.py
new file mode 100644
index 0000000000..e38614d713
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_toleration.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Toleration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'effect': 'str',
+ 'key': 'str',
+ 'operator': 'str',
+ 'toleration_seconds': 'int',
+ 'value': 'str'
+ }
+
+ attribute_map = {
+ 'effect': 'effect',
+ 'key': 'key',
+ 'operator': 'operator',
+ 'toleration_seconds': 'tolerationSeconds',
+ 'value': 'value'
+ }
+
+ def __init__(self, effect=None, key=None, operator=None, toleration_seconds=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V1Toleration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._effect = None
+ self._key = None
+ self._operator = None
+ self._toleration_seconds = None
+ self._value = None
+ self.discriminator = None
+
+ if effect is not None:
+ self.effect = effect
+ if key is not None:
+ self.key = key
+ if operator is not None:
+ self.operator = operator
+ if toleration_seconds is not None:
+ self.toleration_seconds = toleration_seconds
+ if value is not None:
+ self.value = value
+
+ @property
+ def effect(self):
+ """Gets the effect of this V1Toleration. # noqa: E501
+
+ Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. # noqa: E501
+
+ :return: The effect of this V1Toleration. # noqa: E501
+ :rtype: str
+ """
+ return self._effect
+
+ @effect.setter
+ def effect(self, effect):
+ """Sets the effect of this V1Toleration.
+
+ Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. # noqa: E501
+
+ :param effect: The effect of this V1Toleration. # noqa: E501
+ :type: str
+ """
+
+ self._effect = effect
+
+ @property
+ def key(self):
+ """Gets the key of this V1Toleration. # noqa: E501
+
+ Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. # noqa: E501
+
+ :return: The key of this V1Toleration. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1Toleration.
+
+ Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. # noqa: E501
+
+ :param key: The key of this V1Toleration. # noqa: E501
+ :type: str
+ """
+
+ self._key = key
+
+ @property
+ def operator(self):
+ """Gets the operator of this V1Toleration. # noqa: E501
+
+ Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. # noqa: E501
+
+ :return: The operator of this V1Toleration. # noqa: E501
+ :rtype: str
+ """
+ return self._operator
+
+ @operator.setter
+ def operator(self, operator):
+ """Sets the operator of this V1Toleration.
+
+ Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. # noqa: E501
+
+ :param operator: The operator of this V1Toleration. # noqa: E501
+ :type: str
+ """
+
+ self._operator = operator
+
+ @property
+ def toleration_seconds(self):
+ """Gets the toleration_seconds of this V1Toleration. # noqa: E501
+
+ TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. # noqa: E501
+
+ :return: The toleration_seconds of this V1Toleration. # noqa: E501
+ :rtype: int
+ """
+ return self._toleration_seconds
+
+ @toleration_seconds.setter
+ def toleration_seconds(self, toleration_seconds):
+ """Sets the toleration_seconds of this V1Toleration.
+
+ TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. # noqa: E501
+
+ :param toleration_seconds: The toleration_seconds of this V1Toleration. # noqa: E501
+ :type: int
+ """
+
+ self._toleration_seconds = toleration_seconds
+
+ @property
+ def value(self):
+ """Gets the value of this V1Toleration. # noqa: E501
+
+ Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. # noqa: E501
+
+ :return: The value of this V1Toleration. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V1Toleration.
+
+ Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. # noqa: E501
+
+ :param value: The value of this V1Toleration. # noqa: E501
+ :type: str
+ """
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Toleration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Toleration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_label_requirement.py b/contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_label_requirement.py
new file mode 100644
index 0000000000..6fcbd1af63
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_label_requirement.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TopologySelectorLabelRequirement(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'values': 'list[str]'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'values': 'values'
+ }
+
+ def __init__(self, key=None, values=None, local_vars_configuration=None): # noqa: E501
+ """V1TopologySelectorLabelRequirement - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._values = None
+ self.discriminator = None
+
+ self.key = key
+ self.values = values
+
+ @property
+ def key(self):
+ """Gets the key of this V1TopologySelectorLabelRequirement. # noqa: E501
+
+ The label key that the selector applies to. # noqa: E501
+
+ :return: The key of this V1TopologySelectorLabelRequirement. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1TopologySelectorLabelRequirement.
+
+ The label key that the selector applies to. # noqa: E501
+
+ :param key: The key of this V1TopologySelectorLabelRequirement. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def values(self):
+ """Gets the values of this V1TopologySelectorLabelRequirement. # noqa: E501
+
+ An array of string values. One value must match the label to be selected. Each entry in Values is ORed. # noqa: E501
+
+ :return: The values of this V1TopologySelectorLabelRequirement. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._values
+
+ @values.setter
+ def values(self, values):
+ """Sets the values of this V1TopologySelectorLabelRequirement.
+
+ An array of string values. One value must match the label to be selected. Each entry in Values is ORed. # noqa: E501
+
+ :param values: The values of this V1TopologySelectorLabelRequirement. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and values is None: # noqa: E501
+ raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
+
+ self._values = values
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TopologySelectorLabelRequirement):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TopologySelectorLabelRequirement):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_term.py b/contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_term.py
new file mode 100644
index 0000000000..81aec9a1b8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_topology_selector_term.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TopologySelectorTerm(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'match_label_expressions': 'list[V1TopologySelectorLabelRequirement]'
+ }
+
+ attribute_map = {
+ 'match_label_expressions': 'matchLabelExpressions'
+ }
+
+ def __init__(self, match_label_expressions=None, local_vars_configuration=None): # noqa: E501
+ """V1TopologySelectorTerm - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._match_label_expressions = None
+ self.discriminator = None
+
+ if match_label_expressions is not None:
+ self.match_label_expressions = match_label_expressions
+
+ @property
+ def match_label_expressions(self):
+ """Gets the match_label_expressions of this V1TopologySelectorTerm. # noqa: E501
+
+ A list of topology selector requirements by labels. # noqa: E501
+
+ :return: The match_label_expressions of this V1TopologySelectorTerm. # noqa: E501
+ :rtype: list[V1TopologySelectorLabelRequirement]
+ """
+ return self._match_label_expressions
+
+ @match_label_expressions.setter
+ def match_label_expressions(self, match_label_expressions):
+ """Sets the match_label_expressions of this V1TopologySelectorTerm.
+
+ A list of topology selector requirements by labels. # noqa: E501
+
+ :param match_label_expressions: The match_label_expressions of this V1TopologySelectorTerm. # noqa: E501
+ :type: list[V1TopologySelectorLabelRequirement]
+ """
+
+ self._match_label_expressions = match_label_expressions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TopologySelectorTerm):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TopologySelectorTerm):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_topology_spread_constraint.py b/contrib/python/kubernetes/kubernetes/client/models/v1_topology_spread_constraint.py
new file mode 100644
index 0000000000..e896115861
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_topology_spread_constraint.py
@@ -0,0 +1,319 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TopologySpreadConstraint(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'label_selector': 'V1LabelSelector',
+ 'match_label_keys': 'list[str]',
+ 'max_skew': 'int',
+ 'min_domains': 'int',
+ 'node_affinity_policy': 'str',
+ 'node_taints_policy': 'str',
+ 'topology_key': 'str',
+ 'when_unsatisfiable': 'str'
+ }
+
+ attribute_map = {
+ 'label_selector': 'labelSelector',
+ 'match_label_keys': 'matchLabelKeys',
+ 'max_skew': 'maxSkew',
+ 'min_domains': 'minDomains',
+ 'node_affinity_policy': 'nodeAffinityPolicy',
+ 'node_taints_policy': 'nodeTaintsPolicy',
+ 'topology_key': 'topologyKey',
+ 'when_unsatisfiable': 'whenUnsatisfiable'
+ }
+
+ def __init__(self, label_selector=None, match_label_keys=None, max_skew=None, min_domains=None, node_affinity_policy=None, node_taints_policy=None, topology_key=None, when_unsatisfiable=None, local_vars_configuration=None): # noqa: E501
+ """V1TopologySpreadConstraint - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._label_selector = None
+ self._match_label_keys = None
+ self._max_skew = None
+ self._min_domains = None
+ self._node_affinity_policy = None
+ self._node_taints_policy = None
+ self._topology_key = None
+ self._when_unsatisfiable = None
+ self.discriminator = None
+
+ if label_selector is not None:
+ self.label_selector = label_selector
+ if match_label_keys is not None:
+ self.match_label_keys = match_label_keys
+ self.max_skew = max_skew
+ if min_domains is not None:
+ self.min_domains = min_domains
+ if node_affinity_policy is not None:
+ self.node_affinity_policy = node_affinity_policy
+ if node_taints_policy is not None:
+ self.node_taints_policy = node_taints_policy
+ self.topology_key = topology_key
+ self.when_unsatisfiable = when_unsatisfiable
+
+ @property
+ def label_selector(self):
+ """Gets the label_selector of this V1TopologySpreadConstraint. # noqa: E501
+
+
+ :return: The label_selector of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._label_selector
+
+ @label_selector.setter
+ def label_selector(self, label_selector):
+ """Sets the label_selector of this V1TopologySpreadConstraint.
+
+
+ :param label_selector: The label_selector of this V1TopologySpreadConstraint. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._label_selector = label_selector
+
+ @property
+ def match_label_keys(self):
+ """Gets the match_label_keys of this V1TopologySpreadConstraint. # noqa: E501
+
+ MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). # noqa: E501
+
+ :return: The match_label_keys of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._match_label_keys
+
+ @match_label_keys.setter
+ def match_label_keys(self, match_label_keys):
+ """Sets the match_label_keys of this V1TopologySpreadConstraint.
+
+ MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). # noqa: E501
+
+ :param match_label_keys: The match_label_keys of this V1TopologySpreadConstraint. # noqa: E501
+ :type: list[str]
+ """
+
+ self._match_label_keys = match_label_keys
+
+ @property
+ def max_skew(self):
+ """Gets the max_skew of this V1TopologySpreadConstraint. # noqa: E501
+
+ MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. # noqa: E501
+
+ :return: The max_skew of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: int
+ """
+ return self._max_skew
+
+ @max_skew.setter
+ def max_skew(self, max_skew):
+ """Sets the max_skew of this V1TopologySpreadConstraint.
+
+ MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. # noqa: E501
+
+ :param max_skew: The max_skew of this V1TopologySpreadConstraint. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and max_skew is None: # noqa: E501
+ raise ValueError("Invalid value for `max_skew`, must not be `None`") # noqa: E501
+
+ self._max_skew = max_skew
+
+ @property
+ def min_domains(self):
+ """Gets the min_domains of this V1TopologySpreadConstraint. # noqa: E501
+
+ MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). # noqa: E501
+
+ :return: The min_domains of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: int
+ """
+ return self._min_domains
+
+ @min_domains.setter
+ def min_domains(self, min_domains):
+ """Sets the min_domains of this V1TopologySpreadConstraint.
+
+ MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). # noqa: E501
+
+ :param min_domains: The min_domains of this V1TopologySpreadConstraint. # noqa: E501
+ :type: int
+ """
+
+ self._min_domains = min_domains
+
+ @property
+ def node_affinity_policy(self):
+ """Gets the node_affinity_policy of this V1TopologySpreadConstraint. # noqa: E501
+
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
+
+ :return: The node_affinity_policy of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: str
+ """
+ return self._node_affinity_policy
+
+ @node_affinity_policy.setter
+ def node_affinity_policy(self, node_affinity_policy):
+ """Sets the node_affinity_policy of this V1TopologySpreadConstraint.
+
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
+
+ :param node_affinity_policy: The node_affinity_policy of this V1TopologySpreadConstraint. # noqa: E501
+ :type: str
+ """
+
+ self._node_affinity_policy = node_affinity_policy
+
+ @property
+ def node_taints_policy(self):
+ """Gets the node_taints_policy of this V1TopologySpreadConstraint. # noqa: E501
+
+ NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
+
+ :return: The node_taints_policy of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: str
+ """
+ return self._node_taints_policy
+
+ @node_taints_policy.setter
+ def node_taints_policy(self, node_taints_policy):
+ """Sets the node_taints_policy of this V1TopologySpreadConstraint.
+
+ NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
+
+ :param node_taints_policy: The node_taints_policy of this V1TopologySpreadConstraint. # noqa: E501
+ :type: str
+ """
+
+ self._node_taints_policy = node_taints_policy
+
+ @property
+ def topology_key(self):
+ """Gets the topology_key of this V1TopologySpreadConstraint. # noqa: E501
+
+ TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field. # noqa: E501
+
+ :return: The topology_key of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: str
+ """
+ return self._topology_key
+
+ @topology_key.setter
+ def topology_key(self, topology_key):
+ """Sets the topology_key of this V1TopologySpreadConstraint.
+
+ TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field. # noqa: E501
+
+ :param topology_key: The topology_key of this V1TopologySpreadConstraint. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and topology_key is None: # noqa: E501
+ raise ValueError("Invalid value for `topology_key`, must not be `None`") # noqa: E501
+
+ self._topology_key = topology_key
+
+ @property
+ def when_unsatisfiable(self):
+ """Gets the when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
+
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. # noqa: E501
+
+ :return: The when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
+ :rtype: str
+ """
+ return self._when_unsatisfiable
+
+ @when_unsatisfiable.setter
+ def when_unsatisfiable(self, when_unsatisfiable):
+ """Sets the when_unsatisfiable of this V1TopologySpreadConstraint.
+
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. # noqa: E501
+
+ :param when_unsatisfiable: The when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and when_unsatisfiable is None: # noqa: E501
+ raise ValueError("Invalid value for `when_unsatisfiable`, must not be `None`") # noqa: E501
+
+ self._when_unsatisfiable = when_unsatisfiable
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TopologySpreadConstraint):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TopologySpreadConstraint):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_typed_local_object_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_typed_local_object_reference.py
new file mode 100644
index 0000000000..16cdb33f12
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_typed_local_object_reference.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TypedLocalObjectReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'kind': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'kind': 'kind',
+ 'name': 'name'
+ }
+
+ def __init__(self, api_group=None, kind=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1TypedLocalObjectReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._kind = None
+ self._name = None
+ self.discriminator = None
+
+ if api_group is not None:
+ self.api_group = api_group
+ self.kind = kind
+ self.name = name
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1TypedLocalObjectReference. # noqa: E501
+
+ APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
+
+ :return: The api_group of this V1TypedLocalObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1TypedLocalObjectReference.
+
+ APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
+
+ :param api_group: The api_group of this V1TypedLocalObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_group = api_group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1TypedLocalObjectReference. # noqa: E501
+
+ Kind is the type of resource being referenced # noqa: E501
+
+ :return: The kind of this V1TypedLocalObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1TypedLocalObjectReference.
+
+ Kind is the type of resource being referenced # noqa: E501
+
+ :param kind: The kind of this V1TypedLocalObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1TypedLocalObjectReference. # noqa: E501
+
+ Name is the name of resource being referenced # noqa: E501
+
+ :return: The name of this V1TypedLocalObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1TypedLocalObjectReference.
+
+ Name is the name of resource being referenced # noqa: E501
+
+ :param name: The name of this V1TypedLocalObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TypedLocalObjectReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TypedLocalObjectReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_typed_object_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1_typed_object_reference.py
new file mode 100644
index 0000000000..cd86c9537e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_typed_object_reference.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1TypedObjectReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'namespace': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'namespace': 'namespace'
+ }
+
+ def __init__(self, api_group=None, kind=None, name=None, namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1TypedObjectReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._kind = None
+ self._name = None
+ self._namespace = None
+ self.discriminator = None
+
+ if api_group is not None:
+ self.api_group = api_group
+ self.kind = kind
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1TypedObjectReference. # noqa: E501
+
+ APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
+
+ :return: The api_group of this V1TypedObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1TypedObjectReference.
+
+ APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. # noqa: E501
+
+ :param api_group: The api_group of this V1TypedObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_group = api_group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1TypedObjectReference. # noqa: E501
+
+ Kind is the type of resource being referenced # noqa: E501
+
+ :return: The kind of this V1TypedObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1TypedObjectReference.
+
+ Kind is the type of resource being referenced # noqa: E501
+
+ :param kind: The kind of this V1TypedObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1TypedObjectReference. # noqa: E501
+
+ Name is the name of resource being referenced # noqa: E501
+
+ :return: The name of this V1TypedObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1TypedObjectReference.
+
+ Name is the name of resource being referenced # noqa: E501
+
+ :param name: The name of this V1TypedObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1TypedObjectReference. # noqa: E501
+
+ Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. # noqa: E501
+
+ :return: The namespace of this V1TypedObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1TypedObjectReference.
+
+ Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. # noqa: E501
+
+ :param namespace: The namespace of this V1TypedObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1TypedObjectReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1TypedObjectReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_uncounted_terminated_pods.py b/contrib/python/kubernetes/kubernetes/client/models/v1_uncounted_terminated_pods.py
new file mode 100644
index 0000000000..7d0ce08b70
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_uncounted_terminated_pods.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1UncountedTerminatedPods(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'failed': 'list[str]',
+ 'succeeded': 'list[str]'
+ }
+
+ attribute_map = {
+ 'failed': 'failed',
+ 'succeeded': 'succeeded'
+ }
+
+ def __init__(self, failed=None, succeeded=None, local_vars_configuration=None): # noqa: E501
+ """V1UncountedTerminatedPods - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._failed = None
+ self._succeeded = None
+ self.discriminator = None
+
+ if failed is not None:
+ self.failed = failed
+ if succeeded is not None:
+ self.succeeded = succeeded
+
+ @property
+ def failed(self):
+ """Gets the failed of this V1UncountedTerminatedPods. # noqa: E501
+
+ failed holds UIDs of failed Pods. # noqa: E501
+
+ :return: The failed of this V1UncountedTerminatedPods. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._failed
+
+ @failed.setter
+ def failed(self, failed):
+ """Sets the failed of this V1UncountedTerminatedPods.
+
+ failed holds UIDs of failed Pods. # noqa: E501
+
+ :param failed: The failed of this V1UncountedTerminatedPods. # noqa: E501
+ :type: list[str]
+ """
+
+ self._failed = failed
+
+ @property
+ def succeeded(self):
+ """Gets the succeeded of this V1UncountedTerminatedPods. # noqa: E501
+
+ succeeded holds UIDs of succeeded Pods. # noqa: E501
+
+ :return: The succeeded of this V1UncountedTerminatedPods. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._succeeded
+
+ @succeeded.setter
+ def succeeded(self, succeeded):
+ """Sets the succeeded of this V1UncountedTerminatedPods.
+
+ succeeded holds UIDs of succeeded Pods. # noqa: E501
+
+ :param succeeded: The succeeded of this V1UncountedTerminatedPods. # noqa: E501
+ :type: list[str]
+ """
+
+ self._succeeded = succeeded
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1UncountedTerminatedPods):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1UncountedTerminatedPods):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_user_info.py b/contrib/python/kubernetes/kubernetes/client/models/v1_user_info.py
new file mode 100644
index 0000000000..e98d59c8bb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_user_info.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1UserInfo(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'extra': 'dict(str, list[str])',
+ 'groups': 'list[str]',
+ 'uid': 'str',
+ 'username': 'str'
+ }
+
+ attribute_map = {
+ 'extra': 'extra',
+ 'groups': 'groups',
+ 'uid': 'uid',
+ 'username': 'username'
+ }
+
+ def __init__(self, extra=None, groups=None, uid=None, username=None, local_vars_configuration=None): # noqa: E501
+ """V1UserInfo - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._extra = None
+ self._groups = None
+ self._uid = None
+ self._username = None
+ self.discriminator = None
+
+ if extra is not None:
+ self.extra = extra
+ if groups is not None:
+ self.groups = groups
+ if uid is not None:
+ self.uid = uid
+ if username is not None:
+ self.username = username
+
+ @property
+ def extra(self):
+ """Gets the extra of this V1UserInfo. # noqa: E501
+
+ Any additional information provided by the authenticator. # noqa: E501
+
+ :return: The extra of this V1UserInfo. # noqa: E501
+ :rtype: dict(str, list[str])
+ """
+ return self._extra
+
+ @extra.setter
+ def extra(self, extra):
+ """Sets the extra of this V1UserInfo.
+
+ Any additional information provided by the authenticator. # noqa: E501
+
+ :param extra: The extra of this V1UserInfo. # noqa: E501
+ :type: dict(str, list[str])
+ """
+
+ self._extra = extra
+
+ @property
+ def groups(self):
+ """Gets the groups of this V1UserInfo. # noqa: E501
+
+ The names of groups this user is a part of. # noqa: E501
+
+ :return: The groups of this V1UserInfo. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._groups
+
+ @groups.setter
+ def groups(self, groups):
+ """Sets the groups of this V1UserInfo.
+
+ The names of groups this user is a part of. # noqa: E501
+
+ :param groups: The groups of this V1UserInfo. # noqa: E501
+ :type: list[str]
+ """
+
+ self._groups = groups
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1UserInfo. # noqa: E501
+
+ A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs. # noqa: E501
+
+ :return: The uid of this V1UserInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1UserInfo.
+
+ A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs. # noqa: E501
+
+ :param uid: The uid of this V1UserInfo. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ @property
+ def username(self):
+ """Gets the username of this V1UserInfo. # noqa: E501
+
+ The name that uniquely identifies this user among all active users. # noqa: E501
+
+ :return: The username of this V1UserInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._username
+
+ @username.setter
+ def username(self, username):
+ """Sets the username of this V1UserInfo.
+
+ The name that uniquely identifies this user among all active users. # noqa: E501
+
+ :param username: The username of this V1UserInfo. # noqa: E501
+ :type: str
+ """
+
+ self._username = username
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1UserInfo):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1UserInfo):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook.py b/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook.py
new file mode 100644
index 0000000000..22f50562e0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook.py
@@ -0,0 +1,400 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ValidatingWebhook(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'admission_review_versions': 'list[str]',
+ 'client_config': 'AdmissionregistrationV1WebhookClientConfig',
+ 'failure_policy': 'str',
+ 'match_conditions': 'list[V1MatchCondition]',
+ 'match_policy': 'str',
+ 'name': 'str',
+ 'namespace_selector': 'V1LabelSelector',
+ 'object_selector': 'V1LabelSelector',
+ 'rules': 'list[V1RuleWithOperations]',
+ 'side_effects': 'str',
+ 'timeout_seconds': 'int'
+ }
+
+ attribute_map = {
+ 'admission_review_versions': 'admissionReviewVersions',
+ 'client_config': 'clientConfig',
+ 'failure_policy': 'failurePolicy',
+ 'match_conditions': 'matchConditions',
+ 'match_policy': 'matchPolicy',
+ 'name': 'name',
+ 'namespace_selector': 'namespaceSelector',
+ 'object_selector': 'objectSelector',
+ 'rules': 'rules',
+ 'side_effects': 'sideEffects',
+ 'timeout_seconds': 'timeoutSeconds'
+ }
+
+ def __init__(self, admission_review_versions=None, client_config=None, failure_policy=None, match_conditions=None, match_policy=None, name=None, namespace_selector=None, object_selector=None, rules=None, side_effects=None, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
+ """V1ValidatingWebhook - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._admission_review_versions = None
+ self._client_config = None
+ self._failure_policy = None
+ self._match_conditions = None
+ self._match_policy = None
+ self._name = None
+ self._namespace_selector = None
+ self._object_selector = None
+ self._rules = None
+ self._side_effects = None
+ self._timeout_seconds = None
+ self.discriminator = None
+
+ self.admission_review_versions = admission_review_versions
+ self.client_config = client_config
+ if failure_policy is not None:
+ self.failure_policy = failure_policy
+ if match_conditions is not None:
+ self.match_conditions = match_conditions
+ if match_policy is not None:
+ self.match_policy = match_policy
+ self.name = name
+ if namespace_selector is not None:
+ self.namespace_selector = namespace_selector
+ if object_selector is not None:
+ self.object_selector = object_selector
+ if rules is not None:
+ self.rules = rules
+ self.side_effects = side_effects
+ if timeout_seconds is not None:
+ self.timeout_seconds = timeout_seconds
+
+ @property
+ def admission_review_versions(self):
+ """Gets the admission_review_versions of this V1ValidatingWebhook. # noqa: E501
+
+ AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
+
+ :return: The admission_review_versions of this V1ValidatingWebhook. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._admission_review_versions
+
+ @admission_review_versions.setter
+ def admission_review_versions(self, admission_review_versions):
+ """Sets the admission_review_versions of this V1ValidatingWebhook.
+
+ AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
+
+ :param admission_review_versions: The admission_review_versions of this V1ValidatingWebhook. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and admission_review_versions is None: # noqa: E501
+ raise ValueError("Invalid value for `admission_review_versions`, must not be `None`") # noqa: E501
+
+ self._admission_review_versions = admission_review_versions
+
+ @property
+ def client_config(self):
+ """Gets the client_config of this V1ValidatingWebhook. # noqa: E501
+
+
+ :return: The client_config of this V1ValidatingWebhook. # noqa: E501
+ :rtype: AdmissionregistrationV1WebhookClientConfig
+ """
+ return self._client_config
+
+ @client_config.setter
+ def client_config(self, client_config):
+ """Sets the client_config of this V1ValidatingWebhook.
+
+
+ :param client_config: The client_config of this V1ValidatingWebhook. # noqa: E501
+ :type: AdmissionregistrationV1WebhookClientConfig
+ """
+ if self.local_vars_configuration.client_side_validation and client_config is None: # noqa: E501
+ raise ValueError("Invalid value for `client_config`, must not be `None`") # noqa: E501
+
+ self._client_config = client_config
+
+ @property
+ def failure_policy(self):
+ """Gets the failure_policy of this V1ValidatingWebhook. # noqa: E501
+
+ FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :return: The failure_policy of this V1ValidatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._failure_policy
+
+ @failure_policy.setter
+ def failure_policy(self, failure_policy):
+ """Sets the failure_policy of this V1ValidatingWebhook.
+
+ FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :param failure_policy: The failure_policy of this V1ValidatingWebhook. # noqa: E501
+ :type: str
+ """
+
+ self._failure_policy = failure_policy
+
+ @property
+ def match_conditions(self):
+ """Gets the match_conditions of this V1ValidatingWebhook. # noqa: E501
+
+ MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. # noqa: E501
+
+ :return: The match_conditions of this V1ValidatingWebhook. # noqa: E501
+ :rtype: list[V1MatchCondition]
+ """
+ return self._match_conditions
+
+ @match_conditions.setter
+ def match_conditions(self, match_conditions):
+ """Sets the match_conditions of this V1ValidatingWebhook.
+
+ MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. # noqa: E501
+
+ :param match_conditions: The match_conditions of this V1ValidatingWebhook. # noqa: E501
+ :type: list[V1MatchCondition]
+ """
+
+ self._match_conditions = match_conditions
+
+ @property
+ def match_policy(self):
+ """Gets the match_policy of this V1ValidatingWebhook. # noqa: E501
+
+ matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
+
+ :return: The match_policy of this V1ValidatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._match_policy
+
+ @match_policy.setter
+ def match_policy(self, match_policy):
+ """Sets the match_policy of this V1ValidatingWebhook.
+
+ matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
+
+ :param match_policy: The match_policy of this V1ValidatingWebhook. # noqa: E501
+ :type: str
+ """
+
+ self._match_policy = match_policy
+
+ @property
+ def name(self):
+ """Gets the name of this V1ValidatingWebhook. # noqa: E501
+
+ The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
+
+ :return: The name of this V1ValidatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1ValidatingWebhook.
+
+ The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
+
+ :param name: The name of this V1ValidatingWebhook. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace_selector(self):
+ """Gets the namespace_selector of this V1ValidatingWebhook. # noqa: E501
+
+
+ :return: The namespace_selector of this V1ValidatingWebhook. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._namespace_selector
+
+ @namespace_selector.setter
+ def namespace_selector(self, namespace_selector):
+ """Sets the namespace_selector of this V1ValidatingWebhook.
+
+
+ :param namespace_selector: The namespace_selector of this V1ValidatingWebhook. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._namespace_selector = namespace_selector
+
+ @property
+ def object_selector(self):
+ """Gets the object_selector of this V1ValidatingWebhook. # noqa: E501
+
+
+ :return: The object_selector of this V1ValidatingWebhook. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._object_selector
+
+ @object_selector.setter
+ def object_selector(self, object_selector):
+ """Sets the object_selector of this V1ValidatingWebhook.
+
+
+ :param object_selector: The object_selector of this V1ValidatingWebhook. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._object_selector = object_selector
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1ValidatingWebhook. # noqa: E501
+
+ Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
+
+ :return: The rules of this V1ValidatingWebhook. # noqa: E501
+ :rtype: list[V1RuleWithOperations]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1ValidatingWebhook.
+
+ Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
+
+ :param rules: The rules of this V1ValidatingWebhook. # noqa: E501
+ :type: list[V1RuleWithOperations]
+ """
+
+ self._rules = rules
+
+ @property
+ def side_effects(self):
+ """Gets the side_effects of this V1ValidatingWebhook. # noqa: E501
+
+ SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
+
+ :return: The side_effects of this V1ValidatingWebhook. # noqa: E501
+ :rtype: str
+ """
+ return self._side_effects
+
+ @side_effects.setter
+ def side_effects(self, side_effects):
+ """Sets the side_effects of this V1ValidatingWebhook.
+
+ SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
+
+ :param side_effects: The side_effects of this V1ValidatingWebhook. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and side_effects is None: # noqa: E501
+ raise ValueError("Invalid value for `side_effects`, must not be `None`") # noqa: E501
+
+ self._side_effects = side_effects
+
+ @property
+ def timeout_seconds(self):
+ """Gets the timeout_seconds of this V1ValidatingWebhook. # noqa: E501
+
+ TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
+
+ :return: The timeout_seconds of this V1ValidatingWebhook. # noqa: E501
+ :rtype: int
+ """
+ return self._timeout_seconds
+
+ @timeout_seconds.setter
+ def timeout_seconds(self, timeout_seconds):
+ """Sets the timeout_seconds of this V1ValidatingWebhook.
+
+ TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
+
+ :param timeout_seconds: The timeout_seconds of this V1ValidatingWebhook. # noqa: E501
+ :type: int
+ """
+
+ self._timeout_seconds = timeout_seconds
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ValidatingWebhook):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ValidatingWebhook):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration.py
new file mode 100644
index 0000000000..7dbf76a7cb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ValidatingWebhookConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'webhooks': 'list[V1ValidatingWebhook]'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'webhooks': 'webhooks'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, webhooks=None, local_vars_configuration=None): # noqa: E501
+ """V1ValidatingWebhookConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._webhooks = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if webhooks is not None:
+ self.webhooks = webhooks
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ValidatingWebhookConfiguration. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ValidatingWebhookConfiguration.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ValidatingWebhookConfiguration. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ValidatingWebhookConfiguration.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ValidatingWebhookConfiguration. # noqa: E501
+
+
+ :return: The metadata of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ValidatingWebhookConfiguration.
+
+
+ :param metadata: The metadata of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def webhooks(self):
+ """Gets the webhooks of this V1ValidatingWebhookConfiguration. # noqa: E501
+
+ Webhooks is a list of webhooks and the affected resources and operations. # noqa: E501
+
+ :return: The webhooks of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :rtype: list[V1ValidatingWebhook]
+ """
+ return self._webhooks
+
+ @webhooks.setter
+ def webhooks(self, webhooks):
+ """Sets the webhooks of this V1ValidatingWebhookConfiguration.
+
+ Webhooks is a list of webhooks and the affected resources and operations. # noqa: E501
+
+ :param webhooks: The webhooks of this V1ValidatingWebhookConfiguration. # noqa: E501
+ :type: list[V1ValidatingWebhook]
+ """
+
+ self._webhooks = webhooks
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ValidatingWebhookConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ValidatingWebhookConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration_list.py
new file mode 100644
index 0000000000..2a3544eebd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_validating_webhook_configuration_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ValidatingWebhookConfigurationList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1ValidatingWebhookConfiguration]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1ValidatingWebhookConfigurationList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1ValidatingWebhookConfigurationList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1ValidatingWebhookConfigurationList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1ValidatingWebhookConfigurationList. # noqa: E501
+
+ List of ValidatingWebhookConfiguration. # noqa: E501
+
+ :return: The items of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :rtype: list[V1ValidatingWebhookConfiguration]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1ValidatingWebhookConfigurationList.
+
+ List of ValidatingWebhookConfiguration. # noqa: E501
+
+ :param items: The items of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :type: list[V1ValidatingWebhookConfiguration]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1ValidatingWebhookConfigurationList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1ValidatingWebhookConfigurationList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1ValidatingWebhookConfigurationList. # noqa: E501
+
+
+ :return: The metadata of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1ValidatingWebhookConfigurationList.
+
+
+ :param metadata: The metadata of this V1ValidatingWebhookConfigurationList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ValidatingWebhookConfigurationList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ValidatingWebhookConfigurationList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_validation_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1_validation_rule.py
new file mode 100644
index 0000000000..a7aafcd387
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_validation_rule.py
@@ -0,0 +1,235 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1ValidationRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'field_path': 'str',
+ 'message': 'str',
+ 'message_expression': 'str',
+ 'reason': 'str',
+ 'rule': 'str'
+ }
+
+ attribute_map = {
+ 'field_path': 'fieldPath',
+ 'message': 'message',
+ 'message_expression': 'messageExpression',
+ 'reason': 'reason',
+ 'rule': 'rule'
+ }
+
+ def __init__(self, field_path=None, message=None, message_expression=None, reason=None, rule=None, local_vars_configuration=None): # noqa: E501
+ """V1ValidationRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._field_path = None
+ self._message = None
+ self._message_expression = None
+ self._reason = None
+ self._rule = None
+ self.discriminator = None
+
+ if field_path is not None:
+ self.field_path = field_path
+ if message is not None:
+ self.message = message
+ if message_expression is not None:
+ self.message_expression = message_expression
+ if reason is not None:
+ self.reason = reason
+ self.rule = rule
+
+ @property
+ def field_path(self):
+ """Gets the field_path of this V1ValidationRule. # noqa: E501
+
+ fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` # noqa: E501
+
+ :return: The field_path of this V1ValidationRule. # noqa: E501
+ :rtype: str
+ """
+ return self._field_path
+
+ @field_path.setter
+ def field_path(self, field_path):
+ """Sets the field_path of this V1ValidationRule.
+
+ fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` # noqa: E501
+
+ :param field_path: The field_path of this V1ValidationRule. # noqa: E501
+ :type: str
+ """
+
+ self._field_path = field_path
+
+ @property
+ def message(self):
+ """Gets the message of this V1ValidationRule. # noqa: E501
+
+ Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" # noqa: E501
+
+ :return: The message of this V1ValidationRule. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1ValidationRule.
+
+ Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" # noqa: E501
+
+ :param message: The message of this V1ValidationRule. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def message_expression(self):
+ """Gets the message_expression of this V1ValidationRule. # noqa: E501
+
+ MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: \"x must be less than max (\"+string(self.max)+\")\" # noqa: E501
+
+ :return: The message_expression of this V1ValidationRule. # noqa: E501
+ :rtype: str
+ """
+ return self._message_expression
+
+ @message_expression.setter
+ def message_expression(self, message_expression):
+ """Sets the message_expression of this V1ValidationRule.
+
+ MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: \"x must be less than max (\"+string(self.max)+\")\" # noqa: E501
+
+ :param message_expression: The message_expression of this V1ValidationRule. # noqa: E501
+ :type: str
+ """
+
+ self._message_expression = message_expression
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1ValidationRule. # noqa: E501
+
+ reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: \"FieldValueInvalid\", \"FieldValueForbidden\", \"FieldValueRequired\", \"FieldValueDuplicate\". If not set, default to use \"FieldValueInvalid\". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. # noqa: E501
+
+ :return: The reason of this V1ValidationRule. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1ValidationRule.
+
+ reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: \"FieldValueInvalid\", \"FieldValueForbidden\", \"FieldValueRequired\", \"FieldValueDuplicate\". If not set, default to use \"FieldValueInvalid\". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. # noqa: E501
+
+ :param reason: The reason of this V1ValidationRule. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def rule(self):
+ """Gets the rule of this V1ValidationRule. # noqa: E501
+
+ Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {\"rule\": \"self.status.actual <= self.spec.maxDesired\"} If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {\"rule\": \"self.components['Widget'].priority < 10\"} - Rule scoped to a list of integers: {\"rule\": \"self.values.all(value, value >= 0 && value < 100)\"} - Rule scoped to a string value: {\"rule\": \"self.startsWith('kube')\"} The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an \"unknown type\". An \"unknown type\" is recursively defined as: - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - An array where the items schema is of an \"unknown type\" - An object where the additionalProperties schema is of an \"unknown type\" Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Rule accessing a property named \"namespace\": {\"rule\": \"self.__namespace__ > 0\"} - Rule accessing a property named \"x-prop\": {\"rule\": \"self.x__dash__prop > 0\"} - Rule accessing a property named \"redact__d\": {\"rule\": \"self.redact__underscores__d > 0\"} Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. # noqa: E501
+
+ :return: The rule of this V1ValidationRule. # noqa: E501
+ :rtype: str
+ """
+ return self._rule
+
+ @rule.setter
+ def rule(self, rule):
+ """Sets the rule of this V1ValidationRule.
+
+ Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {\"rule\": \"self.status.actual <= self.spec.maxDesired\"} If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {\"rule\": \"self.components['Widget'].priority < 10\"} - Rule scoped to a list of integers: {\"rule\": \"self.values.all(value, value >= 0 && value < 100)\"} - Rule scoped to a string value: {\"rule\": \"self.startsWith('kube')\"} The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an \"unknown type\". An \"unknown type\" is recursively defined as: - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - An array where the items schema is of an \"unknown type\" - An object where the additionalProperties schema is of an \"unknown type\" Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Rule accessing a property named \"namespace\": {\"rule\": \"self.__namespace__ > 0\"} - Rule accessing a property named \"x-prop\": {\"rule\": \"self.x__dash__prop > 0\"} - Rule accessing a property named \"redact__d\": {\"rule\": \"self.redact__underscores__d > 0\"} Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. # noqa: E501
+
+ :param rule: The rule of this V1ValidationRule. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and rule is None: # noqa: E501
+ raise ValueError("Invalid value for `rule`, must not be `None`") # noqa: E501
+
+ self._rule = rule
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1ValidationRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1ValidationRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume.py
new file mode 100644
index 0000000000..cd4a7932d0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume.py
@@ -0,0 +1,877 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1Volume(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'aws_elastic_block_store': 'V1AWSElasticBlockStoreVolumeSource',
+ 'azure_disk': 'V1AzureDiskVolumeSource',
+ 'azure_file': 'V1AzureFileVolumeSource',
+ 'cephfs': 'V1CephFSVolumeSource',
+ 'cinder': 'V1CinderVolumeSource',
+ 'config_map': 'V1ConfigMapVolumeSource',
+ 'csi': 'V1CSIVolumeSource',
+ 'downward_api': 'V1DownwardAPIVolumeSource',
+ 'empty_dir': 'V1EmptyDirVolumeSource',
+ 'ephemeral': 'V1EphemeralVolumeSource',
+ 'fc': 'V1FCVolumeSource',
+ 'flex_volume': 'V1FlexVolumeSource',
+ 'flocker': 'V1FlockerVolumeSource',
+ 'gce_persistent_disk': 'V1GCEPersistentDiskVolumeSource',
+ 'git_repo': 'V1GitRepoVolumeSource',
+ 'glusterfs': 'V1GlusterfsVolumeSource',
+ 'host_path': 'V1HostPathVolumeSource',
+ 'iscsi': 'V1ISCSIVolumeSource',
+ 'name': 'str',
+ 'nfs': 'V1NFSVolumeSource',
+ 'persistent_volume_claim': 'V1PersistentVolumeClaimVolumeSource',
+ 'photon_persistent_disk': 'V1PhotonPersistentDiskVolumeSource',
+ 'portworx_volume': 'V1PortworxVolumeSource',
+ 'projected': 'V1ProjectedVolumeSource',
+ 'quobyte': 'V1QuobyteVolumeSource',
+ 'rbd': 'V1RBDVolumeSource',
+ 'scale_io': 'V1ScaleIOVolumeSource',
+ 'secret': 'V1SecretVolumeSource',
+ 'storageos': 'V1StorageOSVolumeSource',
+ 'vsphere_volume': 'V1VsphereVirtualDiskVolumeSource'
+ }
+
+ attribute_map = {
+ 'aws_elastic_block_store': 'awsElasticBlockStore',
+ 'azure_disk': 'azureDisk',
+ 'azure_file': 'azureFile',
+ 'cephfs': 'cephfs',
+ 'cinder': 'cinder',
+ 'config_map': 'configMap',
+ 'csi': 'csi',
+ 'downward_api': 'downwardAPI',
+ 'empty_dir': 'emptyDir',
+ 'ephemeral': 'ephemeral',
+ 'fc': 'fc',
+ 'flex_volume': 'flexVolume',
+ 'flocker': 'flocker',
+ 'gce_persistent_disk': 'gcePersistentDisk',
+ 'git_repo': 'gitRepo',
+ 'glusterfs': 'glusterfs',
+ 'host_path': 'hostPath',
+ 'iscsi': 'iscsi',
+ 'name': 'name',
+ 'nfs': 'nfs',
+ 'persistent_volume_claim': 'persistentVolumeClaim',
+ 'photon_persistent_disk': 'photonPersistentDisk',
+ 'portworx_volume': 'portworxVolume',
+ 'projected': 'projected',
+ 'quobyte': 'quobyte',
+ 'rbd': 'rbd',
+ 'scale_io': 'scaleIO',
+ 'secret': 'secret',
+ 'storageos': 'storageos',
+ 'vsphere_volume': 'vsphereVolume'
+ }
+
+ def __init__(self, aws_elastic_block_store=None, azure_disk=None, azure_file=None, cephfs=None, cinder=None, config_map=None, csi=None, downward_api=None, empty_dir=None, ephemeral=None, fc=None, flex_volume=None, flocker=None, gce_persistent_disk=None, git_repo=None, glusterfs=None, host_path=None, iscsi=None, name=None, nfs=None, persistent_volume_claim=None, photon_persistent_disk=None, portworx_volume=None, projected=None, quobyte=None, rbd=None, scale_io=None, secret=None, storageos=None, vsphere_volume=None, local_vars_configuration=None): # noqa: E501
+ """V1Volume - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._aws_elastic_block_store = None
+ self._azure_disk = None
+ self._azure_file = None
+ self._cephfs = None
+ self._cinder = None
+ self._config_map = None
+ self._csi = None
+ self._downward_api = None
+ self._empty_dir = None
+ self._ephemeral = None
+ self._fc = None
+ self._flex_volume = None
+ self._flocker = None
+ self._gce_persistent_disk = None
+ self._git_repo = None
+ self._glusterfs = None
+ self._host_path = None
+ self._iscsi = None
+ self._name = None
+ self._nfs = None
+ self._persistent_volume_claim = None
+ self._photon_persistent_disk = None
+ self._portworx_volume = None
+ self._projected = None
+ self._quobyte = None
+ self._rbd = None
+ self._scale_io = None
+ self._secret = None
+ self._storageos = None
+ self._vsphere_volume = None
+ self.discriminator = None
+
+ if aws_elastic_block_store is not None:
+ self.aws_elastic_block_store = aws_elastic_block_store
+ if azure_disk is not None:
+ self.azure_disk = azure_disk
+ if azure_file is not None:
+ self.azure_file = azure_file
+ if cephfs is not None:
+ self.cephfs = cephfs
+ if cinder is not None:
+ self.cinder = cinder
+ if config_map is not None:
+ self.config_map = config_map
+ if csi is not None:
+ self.csi = csi
+ if downward_api is not None:
+ self.downward_api = downward_api
+ if empty_dir is not None:
+ self.empty_dir = empty_dir
+ if ephemeral is not None:
+ self.ephemeral = ephemeral
+ if fc is not None:
+ self.fc = fc
+ if flex_volume is not None:
+ self.flex_volume = flex_volume
+ if flocker is not None:
+ self.flocker = flocker
+ if gce_persistent_disk is not None:
+ self.gce_persistent_disk = gce_persistent_disk
+ if git_repo is not None:
+ self.git_repo = git_repo
+ if glusterfs is not None:
+ self.glusterfs = glusterfs
+ if host_path is not None:
+ self.host_path = host_path
+ if iscsi is not None:
+ self.iscsi = iscsi
+ self.name = name
+ if nfs is not None:
+ self.nfs = nfs
+ if persistent_volume_claim is not None:
+ self.persistent_volume_claim = persistent_volume_claim
+ if photon_persistent_disk is not None:
+ self.photon_persistent_disk = photon_persistent_disk
+ if portworx_volume is not None:
+ self.portworx_volume = portworx_volume
+ if projected is not None:
+ self.projected = projected
+ if quobyte is not None:
+ self.quobyte = quobyte
+ if rbd is not None:
+ self.rbd = rbd
+ if scale_io is not None:
+ self.scale_io = scale_io
+ if secret is not None:
+ self.secret = secret
+ if storageos is not None:
+ self.storageos = storageos
+ if vsphere_volume is not None:
+ self.vsphere_volume = vsphere_volume
+
+ @property
+ def aws_elastic_block_store(self):
+ """Gets the aws_elastic_block_store of this V1Volume. # noqa: E501
+
+
+ :return: The aws_elastic_block_store of this V1Volume. # noqa: E501
+ :rtype: V1AWSElasticBlockStoreVolumeSource
+ """
+ return self._aws_elastic_block_store
+
+ @aws_elastic_block_store.setter
+ def aws_elastic_block_store(self, aws_elastic_block_store):
+ """Sets the aws_elastic_block_store of this V1Volume.
+
+
+ :param aws_elastic_block_store: The aws_elastic_block_store of this V1Volume. # noqa: E501
+ :type: V1AWSElasticBlockStoreVolumeSource
+ """
+
+ self._aws_elastic_block_store = aws_elastic_block_store
+
+ @property
+ def azure_disk(self):
+ """Gets the azure_disk of this V1Volume. # noqa: E501
+
+
+ :return: The azure_disk of this V1Volume. # noqa: E501
+ :rtype: V1AzureDiskVolumeSource
+ """
+ return self._azure_disk
+
+ @azure_disk.setter
+ def azure_disk(self, azure_disk):
+ """Sets the azure_disk of this V1Volume.
+
+
+ :param azure_disk: The azure_disk of this V1Volume. # noqa: E501
+ :type: V1AzureDiskVolumeSource
+ """
+
+ self._azure_disk = azure_disk
+
+ @property
+ def azure_file(self):
+ """Gets the azure_file of this V1Volume. # noqa: E501
+
+
+ :return: The azure_file of this V1Volume. # noqa: E501
+ :rtype: V1AzureFileVolumeSource
+ """
+ return self._azure_file
+
+ @azure_file.setter
+ def azure_file(self, azure_file):
+ """Sets the azure_file of this V1Volume.
+
+
+ :param azure_file: The azure_file of this V1Volume. # noqa: E501
+ :type: V1AzureFileVolumeSource
+ """
+
+ self._azure_file = azure_file
+
+ @property
+ def cephfs(self):
+ """Gets the cephfs of this V1Volume. # noqa: E501
+
+
+ :return: The cephfs of this V1Volume. # noqa: E501
+ :rtype: V1CephFSVolumeSource
+ """
+ return self._cephfs
+
+ @cephfs.setter
+ def cephfs(self, cephfs):
+ """Sets the cephfs of this V1Volume.
+
+
+ :param cephfs: The cephfs of this V1Volume. # noqa: E501
+ :type: V1CephFSVolumeSource
+ """
+
+ self._cephfs = cephfs
+
+ @property
+ def cinder(self):
+ """Gets the cinder of this V1Volume. # noqa: E501
+
+
+ :return: The cinder of this V1Volume. # noqa: E501
+ :rtype: V1CinderVolumeSource
+ """
+ return self._cinder
+
+ @cinder.setter
+ def cinder(self, cinder):
+ """Sets the cinder of this V1Volume.
+
+
+ :param cinder: The cinder of this V1Volume. # noqa: E501
+ :type: V1CinderVolumeSource
+ """
+
+ self._cinder = cinder
+
+ @property
+ def config_map(self):
+ """Gets the config_map of this V1Volume. # noqa: E501
+
+
+ :return: The config_map of this V1Volume. # noqa: E501
+ :rtype: V1ConfigMapVolumeSource
+ """
+ return self._config_map
+
+ @config_map.setter
+ def config_map(self, config_map):
+ """Sets the config_map of this V1Volume.
+
+
+ :param config_map: The config_map of this V1Volume. # noqa: E501
+ :type: V1ConfigMapVolumeSource
+ """
+
+ self._config_map = config_map
+
+ @property
+ def csi(self):
+ """Gets the csi of this V1Volume. # noqa: E501
+
+
+ :return: The csi of this V1Volume. # noqa: E501
+ :rtype: V1CSIVolumeSource
+ """
+ return self._csi
+
+ @csi.setter
+ def csi(self, csi):
+ """Sets the csi of this V1Volume.
+
+
+ :param csi: The csi of this V1Volume. # noqa: E501
+ :type: V1CSIVolumeSource
+ """
+
+ self._csi = csi
+
+ @property
+ def downward_api(self):
+ """Gets the downward_api of this V1Volume. # noqa: E501
+
+
+ :return: The downward_api of this V1Volume. # noqa: E501
+ :rtype: V1DownwardAPIVolumeSource
+ """
+ return self._downward_api
+
+ @downward_api.setter
+ def downward_api(self, downward_api):
+ """Sets the downward_api of this V1Volume.
+
+
+ :param downward_api: The downward_api of this V1Volume. # noqa: E501
+ :type: V1DownwardAPIVolumeSource
+ """
+
+ self._downward_api = downward_api
+
+ @property
+ def empty_dir(self):
+ """Gets the empty_dir of this V1Volume. # noqa: E501
+
+
+ :return: The empty_dir of this V1Volume. # noqa: E501
+ :rtype: V1EmptyDirVolumeSource
+ """
+ return self._empty_dir
+
+ @empty_dir.setter
+ def empty_dir(self, empty_dir):
+ """Sets the empty_dir of this V1Volume.
+
+
+ :param empty_dir: The empty_dir of this V1Volume. # noqa: E501
+ :type: V1EmptyDirVolumeSource
+ """
+
+ self._empty_dir = empty_dir
+
+ @property
+ def ephemeral(self):
+ """Gets the ephemeral of this V1Volume. # noqa: E501
+
+
+ :return: The ephemeral of this V1Volume. # noqa: E501
+ :rtype: V1EphemeralVolumeSource
+ """
+ return self._ephemeral
+
+ @ephemeral.setter
+ def ephemeral(self, ephemeral):
+ """Sets the ephemeral of this V1Volume.
+
+
+ :param ephemeral: The ephemeral of this V1Volume. # noqa: E501
+ :type: V1EphemeralVolumeSource
+ """
+
+ self._ephemeral = ephemeral
+
+ @property
+ def fc(self):
+ """Gets the fc of this V1Volume. # noqa: E501
+
+
+ :return: The fc of this V1Volume. # noqa: E501
+ :rtype: V1FCVolumeSource
+ """
+ return self._fc
+
+ @fc.setter
+ def fc(self, fc):
+ """Sets the fc of this V1Volume.
+
+
+ :param fc: The fc of this V1Volume. # noqa: E501
+ :type: V1FCVolumeSource
+ """
+
+ self._fc = fc
+
+ @property
+ def flex_volume(self):
+ """Gets the flex_volume of this V1Volume. # noqa: E501
+
+
+ :return: The flex_volume of this V1Volume. # noqa: E501
+ :rtype: V1FlexVolumeSource
+ """
+ return self._flex_volume
+
+ @flex_volume.setter
+ def flex_volume(self, flex_volume):
+ """Sets the flex_volume of this V1Volume.
+
+
+ :param flex_volume: The flex_volume of this V1Volume. # noqa: E501
+ :type: V1FlexVolumeSource
+ """
+
+ self._flex_volume = flex_volume
+
+ @property
+ def flocker(self):
+ """Gets the flocker of this V1Volume. # noqa: E501
+
+
+ :return: The flocker of this V1Volume. # noqa: E501
+ :rtype: V1FlockerVolumeSource
+ """
+ return self._flocker
+
+ @flocker.setter
+ def flocker(self, flocker):
+ """Sets the flocker of this V1Volume.
+
+
+ :param flocker: The flocker of this V1Volume. # noqa: E501
+ :type: V1FlockerVolumeSource
+ """
+
+ self._flocker = flocker
+
+ @property
+ def gce_persistent_disk(self):
+ """Gets the gce_persistent_disk of this V1Volume. # noqa: E501
+
+
+ :return: The gce_persistent_disk of this V1Volume. # noqa: E501
+ :rtype: V1GCEPersistentDiskVolumeSource
+ """
+ return self._gce_persistent_disk
+
+ @gce_persistent_disk.setter
+ def gce_persistent_disk(self, gce_persistent_disk):
+ """Sets the gce_persistent_disk of this V1Volume.
+
+
+ :param gce_persistent_disk: The gce_persistent_disk of this V1Volume. # noqa: E501
+ :type: V1GCEPersistentDiskVolumeSource
+ """
+
+ self._gce_persistent_disk = gce_persistent_disk
+
+ @property
+ def git_repo(self):
+ """Gets the git_repo of this V1Volume. # noqa: E501
+
+
+ :return: The git_repo of this V1Volume. # noqa: E501
+ :rtype: V1GitRepoVolumeSource
+ """
+ return self._git_repo
+
+ @git_repo.setter
+ def git_repo(self, git_repo):
+ """Sets the git_repo of this V1Volume.
+
+
+ :param git_repo: The git_repo of this V1Volume. # noqa: E501
+ :type: V1GitRepoVolumeSource
+ """
+
+ self._git_repo = git_repo
+
+ @property
+ def glusterfs(self):
+ """Gets the glusterfs of this V1Volume. # noqa: E501
+
+
+ :return: The glusterfs of this V1Volume. # noqa: E501
+ :rtype: V1GlusterfsVolumeSource
+ """
+ return self._glusterfs
+
+ @glusterfs.setter
+ def glusterfs(self, glusterfs):
+ """Sets the glusterfs of this V1Volume.
+
+
+ :param glusterfs: The glusterfs of this V1Volume. # noqa: E501
+ :type: V1GlusterfsVolumeSource
+ """
+
+ self._glusterfs = glusterfs
+
+ @property
+ def host_path(self):
+ """Gets the host_path of this V1Volume. # noqa: E501
+
+
+ :return: The host_path of this V1Volume. # noqa: E501
+ :rtype: V1HostPathVolumeSource
+ """
+ return self._host_path
+
+ @host_path.setter
+ def host_path(self, host_path):
+ """Sets the host_path of this V1Volume.
+
+
+ :param host_path: The host_path of this V1Volume. # noqa: E501
+ :type: V1HostPathVolumeSource
+ """
+
+ self._host_path = host_path
+
+ @property
+ def iscsi(self):
+ """Gets the iscsi of this V1Volume. # noqa: E501
+
+
+ :return: The iscsi of this V1Volume. # noqa: E501
+ :rtype: V1ISCSIVolumeSource
+ """
+ return self._iscsi
+
+ @iscsi.setter
+ def iscsi(self, iscsi):
+ """Sets the iscsi of this V1Volume.
+
+
+ :param iscsi: The iscsi of this V1Volume. # noqa: E501
+ :type: V1ISCSIVolumeSource
+ """
+
+ self._iscsi = iscsi
+
+ @property
+ def name(self):
+ """Gets the name of this V1Volume. # noqa: E501
+
+ name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V1Volume. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1Volume.
+
+ name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V1Volume. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def nfs(self):
+ """Gets the nfs of this V1Volume. # noqa: E501
+
+
+ :return: The nfs of this V1Volume. # noqa: E501
+ :rtype: V1NFSVolumeSource
+ """
+ return self._nfs
+
+ @nfs.setter
+ def nfs(self, nfs):
+ """Sets the nfs of this V1Volume.
+
+
+ :param nfs: The nfs of this V1Volume. # noqa: E501
+ :type: V1NFSVolumeSource
+ """
+
+ self._nfs = nfs
+
+ @property
+ def persistent_volume_claim(self):
+ """Gets the persistent_volume_claim of this V1Volume. # noqa: E501
+
+
+ :return: The persistent_volume_claim of this V1Volume. # noqa: E501
+ :rtype: V1PersistentVolumeClaimVolumeSource
+ """
+ return self._persistent_volume_claim
+
+ @persistent_volume_claim.setter
+ def persistent_volume_claim(self, persistent_volume_claim):
+ """Sets the persistent_volume_claim of this V1Volume.
+
+
+ :param persistent_volume_claim: The persistent_volume_claim of this V1Volume. # noqa: E501
+ :type: V1PersistentVolumeClaimVolumeSource
+ """
+
+ self._persistent_volume_claim = persistent_volume_claim
+
+ @property
+ def photon_persistent_disk(self):
+ """Gets the photon_persistent_disk of this V1Volume. # noqa: E501
+
+
+ :return: The photon_persistent_disk of this V1Volume. # noqa: E501
+ :rtype: V1PhotonPersistentDiskVolumeSource
+ """
+ return self._photon_persistent_disk
+
+ @photon_persistent_disk.setter
+ def photon_persistent_disk(self, photon_persistent_disk):
+ """Sets the photon_persistent_disk of this V1Volume.
+
+
+ :param photon_persistent_disk: The photon_persistent_disk of this V1Volume. # noqa: E501
+ :type: V1PhotonPersistentDiskVolumeSource
+ """
+
+ self._photon_persistent_disk = photon_persistent_disk
+
+ @property
+ def portworx_volume(self):
+ """Gets the portworx_volume of this V1Volume. # noqa: E501
+
+
+ :return: The portworx_volume of this V1Volume. # noqa: E501
+ :rtype: V1PortworxVolumeSource
+ """
+ return self._portworx_volume
+
+ @portworx_volume.setter
+ def portworx_volume(self, portworx_volume):
+ """Sets the portworx_volume of this V1Volume.
+
+
+ :param portworx_volume: The portworx_volume of this V1Volume. # noqa: E501
+ :type: V1PortworxVolumeSource
+ """
+
+ self._portworx_volume = portworx_volume
+
+ @property
+ def projected(self):
+ """Gets the projected of this V1Volume. # noqa: E501
+
+
+ :return: The projected of this V1Volume. # noqa: E501
+ :rtype: V1ProjectedVolumeSource
+ """
+ return self._projected
+
+ @projected.setter
+ def projected(self, projected):
+ """Sets the projected of this V1Volume.
+
+
+ :param projected: The projected of this V1Volume. # noqa: E501
+ :type: V1ProjectedVolumeSource
+ """
+
+ self._projected = projected
+
+ @property
+ def quobyte(self):
+ """Gets the quobyte of this V1Volume. # noqa: E501
+
+
+ :return: The quobyte of this V1Volume. # noqa: E501
+ :rtype: V1QuobyteVolumeSource
+ """
+ return self._quobyte
+
+ @quobyte.setter
+ def quobyte(self, quobyte):
+ """Sets the quobyte of this V1Volume.
+
+
+ :param quobyte: The quobyte of this V1Volume. # noqa: E501
+ :type: V1QuobyteVolumeSource
+ """
+
+ self._quobyte = quobyte
+
+ @property
+ def rbd(self):
+ """Gets the rbd of this V1Volume. # noqa: E501
+
+
+ :return: The rbd of this V1Volume. # noqa: E501
+ :rtype: V1RBDVolumeSource
+ """
+ return self._rbd
+
+ @rbd.setter
+ def rbd(self, rbd):
+ """Sets the rbd of this V1Volume.
+
+
+ :param rbd: The rbd of this V1Volume. # noqa: E501
+ :type: V1RBDVolumeSource
+ """
+
+ self._rbd = rbd
+
+ @property
+ def scale_io(self):
+ """Gets the scale_io of this V1Volume. # noqa: E501
+
+
+ :return: The scale_io of this V1Volume. # noqa: E501
+ :rtype: V1ScaleIOVolumeSource
+ """
+ return self._scale_io
+
+ @scale_io.setter
+ def scale_io(self, scale_io):
+ """Sets the scale_io of this V1Volume.
+
+
+ :param scale_io: The scale_io of this V1Volume. # noqa: E501
+ :type: V1ScaleIOVolumeSource
+ """
+
+ self._scale_io = scale_io
+
+ @property
+ def secret(self):
+ """Gets the secret of this V1Volume. # noqa: E501
+
+
+ :return: The secret of this V1Volume. # noqa: E501
+ :rtype: V1SecretVolumeSource
+ """
+ return self._secret
+
+ @secret.setter
+ def secret(self, secret):
+ """Sets the secret of this V1Volume.
+
+
+ :param secret: The secret of this V1Volume. # noqa: E501
+ :type: V1SecretVolumeSource
+ """
+
+ self._secret = secret
+
+ @property
+ def storageos(self):
+ """Gets the storageos of this V1Volume. # noqa: E501
+
+
+ :return: The storageos of this V1Volume. # noqa: E501
+ :rtype: V1StorageOSVolumeSource
+ """
+ return self._storageos
+
+ @storageos.setter
+ def storageos(self, storageos):
+ """Sets the storageos of this V1Volume.
+
+
+ :param storageos: The storageos of this V1Volume. # noqa: E501
+ :type: V1StorageOSVolumeSource
+ """
+
+ self._storageos = storageos
+
+ @property
+ def vsphere_volume(self):
+ """Gets the vsphere_volume of this V1Volume. # noqa: E501
+
+
+ :return: The vsphere_volume of this V1Volume. # noqa: E501
+ :rtype: V1VsphereVirtualDiskVolumeSource
+ """
+ return self._vsphere_volume
+
+ @vsphere_volume.setter
+ def vsphere_volume(self, vsphere_volume):
+ """Sets the vsphere_volume of this V1Volume.
+
+
+ :param vsphere_volume: The vsphere_volume of this V1Volume. # noqa: E501
+ :type: V1VsphereVirtualDiskVolumeSource
+ """
+
+ self._vsphere_volume = vsphere_volume
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1Volume):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1Volume):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment.py
new file mode 100644
index 0000000000..d74cb9a42d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeAttachment(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1VolumeAttachmentSpec',
+ 'status': 'V1VolumeAttachmentStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeAttachment - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1VolumeAttachment. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1VolumeAttachment. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1VolumeAttachment.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1VolumeAttachment. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1VolumeAttachment. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1VolumeAttachment. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1VolumeAttachment.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1VolumeAttachment. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1VolumeAttachment. # noqa: E501
+
+
+ :return: The metadata of this V1VolumeAttachment. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1VolumeAttachment.
+
+
+ :param metadata: The metadata of this V1VolumeAttachment. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1VolumeAttachment. # noqa: E501
+
+
+ :return: The spec of this V1VolumeAttachment. # noqa: E501
+ :rtype: V1VolumeAttachmentSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1VolumeAttachment.
+
+
+ :param spec: The spec of this V1VolumeAttachment. # noqa: E501
+ :type: V1VolumeAttachmentSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1VolumeAttachment. # noqa: E501
+
+
+ :return: The status of this V1VolumeAttachment. # noqa: E501
+ :rtype: V1VolumeAttachmentStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1VolumeAttachment.
+
+
+ :param status: The status of this V1VolumeAttachment. # noqa: E501
+ :type: V1VolumeAttachmentStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeAttachment):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeAttachment):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_list.py
new file mode 100644
index 0000000000..2563ca139c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeAttachmentList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1VolumeAttachment]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeAttachmentList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1VolumeAttachmentList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1VolumeAttachmentList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1VolumeAttachmentList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1VolumeAttachmentList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1VolumeAttachmentList. # noqa: E501
+
+ items is the list of VolumeAttachments # noqa: E501
+
+ :return: The items of this V1VolumeAttachmentList. # noqa: E501
+ :rtype: list[V1VolumeAttachment]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1VolumeAttachmentList.
+
+ items is the list of VolumeAttachments # noqa: E501
+
+ :param items: The items of this V1VolumeAttachmentList. # noqa: E501
+ :type: list[V1VolumeAttachment]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1VolumeAttachmentList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1VolumeAttachmentList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1VolumeAttachmentList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1VolumeAttachmentList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1VolumeAttachmentList. # noqa: E501
+
+
+ :return: The metadata of this V1VolumeAttachmentList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1VolumeAttachmentList.
+
+
+ :param metadata: The metadata of this V1VolumeAttachmentList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeAttachmentList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeAttachmentList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_source.py
new file mode 100644
index 0000000000..d16fab6d35
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_source.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeAttachmentSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'inline_volume_spec': 'V1PersistentVolumeSpec',
+ 'persistent_volume_name': 'str'
+ }
+
+ attribute_map = {
+ 'inline_volume_spec': 'inlineVolumeSpec',
+ 'persistent_volume_name': 'persistentVolumeName'
+ }
+
+ def __init__(self, inline_volume_spec=None, persistent_volume_name=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeAttachmentSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._inline_volume_spec = None
+ self._persistent_volume_name = None
+ self.discriminator = None
+
+ if inline_volume_spec is not None:
+ self.inline_volume_spec = inline_volume_spec
+ if persistent_volume_name is not None:
+ self.persistent_volume_name = persistent_volume_name
+
+ @property
+ def inline_volume_spec(self):
+ """Gets the inline_volume_spec of this V1VolumeAttachmentSource. # noqa: E501
+
+
+ :return: The inline_volume_spec of this V1VolumeAttachmentSource. # noqa: E501
+ :rtype: V1PersistentVolumeSpec
+ """
+ return self._inline_volume_spec
+
+ @inline_volume_spec.setter
+ def inline_volume_spec(self, inline_volume_spec):
+ """Sets the inline_volume_spec of this V1VolumeAttachmentSource.
+
+
+ :param inline_volume_spec: The inline_volume_spec of this V1VolumeAttachmentSource. # noqa: E501
+ :type: V1PersistentVolumeSpec
+ """
+
+ self._inline_volume_spec = inline_volume_spec
+
+ @property
+ def persistent_volume_name(self):
+ """Gets the persistent_volume_name of this V1VolumeAttachmentSource. # noqa: E501
+
+ persistentVolumeName represents the name of the persistent volume to attach. # noqa: E501
+
+ :return: The persistent_volume_name of this V1VolumeAttachmentSource. # noqa: E501
+ :rtype: str
+ """
+ return self._persistent_volume_name
+
+ @persistent_volume_name.setter
+ def persistent_volume_name(self, persistent_volume_name):
+ """Sets the persistent_volume_name of this V1VolumeAttachmentSource.
+
+ persistentVolumeName represents the name of the persistent volume to attach. # noqa: E501
+
+ :param persistent_volume_name: The persistent_volume_name of this V1VolumeAttachmentSource. # noqa: E501
+ :type: str
+ """
+
+ self._persistent_volume_name = persistent_volume_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeAttachmentSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeAttachmentSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_spec.py
new file mode 100644
index 0000000000..ea17388e6e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_spec.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeAttachmentSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'attacher': 'str',
+ 'node_name': 'str',
+ 'source': 'V1VolumeAttachmentSource'
+ }
+
+ attribute_map = {
+ 'attacher': 'attacher',
+ 'node_name': 'nodeName',
+ 'source': 'source'
+ }
+
+ def __init__(self, attacher=None, node_name=None, source=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeAttachmentSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._attacher = None
+ self._node_name = None
+ self._source = None
+ self.discriminator = None
+
+ self.attacher = attacher
+ self.node_name = node_name
+ self.source = source
+
+ @property
+ def attacher(self):
+ """Gets the attacher of this V1VolumeAttachmentSpec. # noqa: E501
+
+ attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName(). # noqa: E501
+
+ :return: The attacher of this V1VolumeAttachmentSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._attacher
+
+ @attacher.setter
+ def attacher(self, attacher):
+ """Sets the attacher of this V1VolumeAttachmentSpec.
+
+ attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName(). # noqa: E501
+
+ :param attacher: The attacher of this V1VolumeAttachmentSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and attacher is None: # noqa: E501
+ raise ValueError("Invalid value for `attacher`, must not be `None`") # noqa: E501
+
+ self._attacher = attacher
+
+ @property
+ def node_name(self):
+ """Gets the node_name of this V1VolumeAttachmentSpec. # noqa: E501
+
+ nodeName represents the node that the volume should be attached to. # noqa: E501
+
+ :return: The node_name of this V1VolumeAttachmentSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._node_name
+
+ @node_name.setter
+ def node_name(self, node_name):
+ """Sets the node_name of this V1VolumeAttachmentSpec.
+
+ nodeName represents the node that the volume should be attached to. # noqa: E501
+
+ :param node_name: The node_name of this V1VolumeAttachmentSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and node_name is None: # noqa: E501
+ raise ValueError("Invalid value for `node_name`, must not be `None`") # noqa: E501
+
+ self._node_name = node_name
+
+ @property
+ def source(self):
+ """Gets the source of this V1VolumeAttachmentSpec. # noqa: E501
+
+
+ :return: The source of this V1VolumeAttachmentSpec. # noqa: E501
+ :rtype: V1VolumeAttachmentSource
+ """
+ return self._source
+
+ @source.setter
+ def source(self, source):
+ """Sets the source of this V1VolumeAttachmentSpec.
+
+
+ :param source: The source of this V1VolumeAttachmentSpec. # noqa: E501
+ :type: V1VolumeAttachmentSource
+ """
+ if self.local_vars_configuration.client_side_validation and source is None: # noqa: E501
+ raise ValueError("Invalid value for `source`, must not be `None`") # noqa: E501
+
+ self._source = source
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeAttachmentSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeAttachmentSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_status.py
new file mode 100644
index 0000000000..fa2e3452e4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_attachment_status.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeAttachmentStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'attach_error': 'V1VolumeError',
+ 'attached': 'bool',
+ 'attachment_metadata': 'dict(str, str)',
+ 'detach_error': 'V1VolumeError'
+ }
+
+ attribute_map = {
+ 'attach_error': 'attachError',
+ 'attached': 'attached',
+ 'attachment_metadata': 'attachmentMetadata',
+ 'detach_error': 'detachError'
+ }
+
+ def __init__(self, attach_error=None, attached=None, attachment_metadata=None, detach_error=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeAttachmentStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._attach_error = None
+ self._attached = None
+ self._attachment_metadata = None
+ self._detach_error = None
+ self.discriminator = None
+
+ if attach_error is not None:
+ self.attach_error = attach_error
+ self.attached = attached
+ if attachment_metadata is not None:
+ self.attachment_metadata = attachment_metadata
+ if detach_error is not None:
+ self.detach_error = detach_error
+
+ @property
+ def attach_error(self):
+ """Gets the attach_error of this V1VolumeAttachmentStatus. # noqa: E501
+
+
+ :return: The attach_error of this V1VolumeAttachmentStatus. # noqa: E501
+ :rtype: V1VolumeError
+ """
+ return self._attach_error
+
+ @attach_error.setter
+ def attach_error(self, attach_error):
+ """Sets the attach_error of this V1VolumeAttachmentStatus.
+
+
+ :param attach_error: The attach_error of this V1VolumeAttachmentStatus. # noqa: E501
+ :type: V1VolumeError
+ """
+
+ self._attach_error = attach_error
+
+ @property
+ def attached(self):
+ """Gets the attached of this V1VolumeAttachmentStatus. # noqa: E501
+
+ attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
+
+ :return: The attached of this V1VolumeAttachmentStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._attached
+
+ @attached.setter
+ def attached(self, attached):
+ """Sets the attached of this V1VolumeAttachmentStatus.
+
+ attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
+
+ :param attached: The attached of this V1VolumeAttachmentStatus. # noqa: E501
+ :type: bool
+ """
+ if self.local_vars_configuration.client_side_validation and attached is None: # noqa: E501
+ raise ValueError("Invalid value for `attached`, must not be `None`") # noqa: E501
+
+ self._attached = attached
+
+ @property
+ def attachment_metadata(self):
+ """Gets the attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
+
+ attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
+
+ :return: The attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
+ :rtype: dict(str, str)
+ """
+ return self._attachment_metadata
+
+ @attachment_metadata.setter
+ def attachment_metadata(self, attachment_metadata):
+ """Sets the attachment_metadata of this V1VolumeAttachmentStatus.
+
+ attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
+
+ :param attachment_metadata: The attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
+ :type: dict(str, str)
+ """
+
+ self._attachment_metadata = attachment_metadata
+
+ @property
+ def detach_error(self):
+ """Gets the detach_error of this V1VolumeAttachmentStatus. # noqa: E501
+
+
+ :return: The detach_error of this V1VolumeAttachmentStatus. # noqa: E501
+ :rtype: V1VolumeError
+ """
+ return self._detach_error
+
+ @detach_error.setter
+ def detach_error(self, detach_error):
+ """Sets the detach_error of this V1VolumeAttachmentStatus.
+
+
+ :param detach_error: The detach_error of this V1VolumeAttachmentStatus. # noqa: E501
+ :type: V1VolumeError
+ """
+
+ self._detach_error = detach_error
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeAttachmentStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeAttachmentStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_device.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_device.py
new file mode 100644
index 0000000000..7682a51d5f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_device.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeDevice(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'device_path': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'device_path': 'devicePath',
+ 'name': 'name'
+ }
+
+ def __init__(self, device_path=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeDevice - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._device_path = None
+ self._name = None
+ self.discriminator = None
+
+ self.device_path = device_path
+ self.name = name
+
+ @property
+ def device_path(self):
+ """Gets the device_path of this V1VolumeDevice. # noqa: E501
+
+ devicePath is the path inside of the container that the device will be mapped to. # noqa: E501
+
+ :return: The device_path of this V1VolumeDevice. # noqa: E501
+ :rtype: str
+ """
+ return self._device_path
+
+ @device_path.setter
+ def device_path(self, device_path):
+ """Sets the device_path of this V1VolumeDevice.
+
+ devicePath is the path inside of the container that the device will be mapped to. # noqa: E501
+
+ :param device_path: The device_path of this V1VolumeDevice. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and device_path is None: # noqa: E501
+ raise ValueError("Invalid value for `device_path`, must not be `None`") # noqa: E501
+
+ self._device_path = device_path
+
+ @property
+ def name(self):
+ """Gets the name of this V1VolumeDevice. # noqa: E501
+
+ name must match the name of a persistentVolumeClaim in the pod # noqa: E501
+
+ :return: The name of this V1VolumeDevice. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1VolumeDevice.
+
+ name must match the name of a persistentVolumeClaim in the pod # noqa: E501
+
+ :param name: The name of this V1VolumeDevice. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeDevice):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeDevice):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_error.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_error.py
new file mode 100644
index 0000000000..75f7dfb302
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_error.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeError(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'message': 'str',
+ 'time': 'datetime'
+ }
+
+ attribute_map = {
+ 'message': 'message',
+ 'time': 'time'
+ }
+
+ def __init__(self, message=None, time=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeError - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._message = None
+ self._time = None
+ self.discriminator = None
+
+ if message is not None:
+ self.message = message
+ if time is not None:
+ self.time = time
+
+ @property
+ def message(self):
+ """Gets the message of this V1VolumeError. # noqa: E501
+
+ message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information. # noqa: E501
+
+ :return: The message of this V1VolumeError. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1VolumeError.
+
+ message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information. # noqa: E501
+
+ :param message: The message of this V1VolumeError. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def time(self):
+ """Gets the time of this V1VolumeError. # noqa: E501
+
+ time represents the time the error was encountered. # noqa: E501
+
+ :return: The time of this V1VolumeError. # noqa: E501
+ :rtype: datetime
+ """
+ return self._time
+
+ @time.setter
+ def time(self, time):
+ """Sets the time of this V1VolumeError.
+
+ time represents the time the error was encountered. # noqa: E501
+
+ :param time: The time of this V1VolumeError. # noqa: E501
+ :type: datetime
+ """
+
+ self._time = time
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeError):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeError):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_mount.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_mount.py
new file mode 100644
index 0000000000..ecd34859e9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_mount.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeMount(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'mount_path': 'str',
+ 'mount_propagation': 'str',
+ 'name': 'str',
+ 'read_only': 'bool',
+ 'sub_path': 'str',
+ 'sub_path_expr': 'str'
+ }
+
+ attribute_map = {
+ 'mount_path': 'mountPath',
+ 'mount_propagation': 'mountPropagation',
+ 'name': 'name',
+ 'read_only': 'readOnly',
+ 'sub_path': 'subPath',
+ 'sub_path_expr': 'subPathExpr'
+ }
+
+ def __init__(self, mount_path=None, mount_propagation=None, name=None, read_only=None, sub_path=None, sub_path_expr=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeMount - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._mount_path = None
+ self._mount_propagation = None
+ self._name = None
+ self._read_only = None
+ self._sub_path = None
+ self._sub_path_expr = None
+ self.discriminator = None
+
+ self.mount_path = mount_path
+ if mount_propagation is not None:
+ self.mount_propagation = mount_propagation
+ self.name = name
+ if read_only is not None:
+ self.read_only = read_only
+ if sub_path is not None:
+ self.sub_path = sub_path
+ if sub_path_expr is not None:
+ self.sub_path_expr = sub_path_expr
+
+ @property
+ def mount_path(self):
+ """Gets the mount_path of this V1VolumeMount. # noqa: E501
+
+ Path within the container at which the volume should be mounted. Must not contain ':'. # noqa: E501
+
+ :return: The mount_path of this V1VolumeMount. # noqa: E501
+ :rtype: str
+ """
+ return self._mount_path
+
+ @mount_path.setter
+ def mount_path(self, mount_path):
+ """Sets the mount_path of this V1VolumeMount.
+
+ Path within the container at which the volume should be mounted. Must not contain ':'. # noqa: E501
+
+ :param mount_path: The mount_path of this V1VolumeMount. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and mount_path is None: # noqa: E501
+ raise ValueError("Invalid value for `mount_path`, must not be `None`") # noqa: E501
+
+ self._mount_path = mount_path
+
+ @property
+ def mount_propagation(self):
+ """Gets the mount_propagation of this V1VolumeMount. # noqa: E501
+
+ mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. # noqa: E501
+
+ :return: The mount_propagation of this V1VolumeMount. # noqa: E501
+ :rtype: str
+ """
+ return self._mount_propagation
+
+ @mount_propagation.setter
+ def mount_propagation(self, mount_propagation):
+ """Sets the mount_propagation of this V1VolumeMount.
+
+ mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. # noqa: E501
+
+ :param mount_propagation: The mount_propagation of this V1VolumeMount. # noqa: E501
+ :type: str
+ """
+
+ self._mount_propagation = mount_propagation
+
+ @property
+ def name(self):
+ """Gets the name of this V1VolumeMount. # noqa: E501
+
+ This must match the Name of a Volume. # noqa: E501
+
+ :return: The name of this V1VolumeMount. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1VolumeMount.
+
+ This must match the Name of a Volume. # noqa: E501
+
+ :param name: The name of this V1VolumeMount. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def read_only(self):
+ """Gets the read_only of this V1VolumeMount. # noqa: E501
+
+ Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. # noqa: E501
+
+ :return: The read_only of this V1VolumeMount. # noqa: E501
+ :rtype: bool
+ """
+ return self._read_only
+
+ @read_only.setter
+ def read_only(self, read_only):
+ """Sets the read_only of this V1VolumeMount.
+
+ Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. # noqa: E501
+
+ :param read_only: The read_only of this V1VolumeMount. # noqa: E501
+ :type: bool
+ """
+
+ self._read_only = read_only
+
+ @property
+ def sub_path(self):
+ """Gets the sub_path of this V1VolumeMount. # noqa: E501
+
+ Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root). # noqa: E501
+
+ :return: The sub_path of this V1VolumeMount. # noqa: E501
+ :rtype: str
+ """
+ return self._sub_path
+
+ @sub_path.setter
+ def sub_path(self, sub_path):
+ """Sets the sub_path of this V1VolumeMount.
+
+ Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root). # noqa: E501
+
+ :param sub_path: The sub_path of this V1VolumeMount. # noqa: E501
+ :type: str
+ """
+
+ self._sub_path = sub_path
+
+ @property
+ def sub_path_expr(self):
+ """Gets the sub_path_expr of this V1VolumeMount. # noqa: E501
+
+ Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. # noqa: E501
+
+ :return: The sub_path_expr of this V1VolumeMount. # noqa: E501
+ :rtype: str
+ """
+ return self._sub_path_expr
+
+ @sub_path_expr.setter
+ def sub_path_expr(self, sub_path_expr):
+ """Sets the sub_path_expr of this V1VolumeMount.
+
+ Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. # noqa: E501
+
+ :param sub_path_expr: The sub_path_expr of this V1VolumeMount. # noqa: E501
+ :type: str
+ """
+
+ self._sub_path_expr = sub_path_expr
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeMount):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeMount):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_affinity.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_affinity.py
new file mode 100644
index 0000000000..18423f70ed
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_affinity.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeNodeAffinity(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'required': 'V1NodeSelector'
+ }
+
+ attribute_map = {
+ 'required': 'required'
+ }
+
+ def __init__(self, required=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeNodeAffinity - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._required = None
+ self.discriminator = None
+
+ if required is not None:
+ self.required = required
+
+ @property
+ def required(self):
+ """Gets the required of this V1VolumeNodeAffinity. # noqa: E501
+
+
+ :return: The required of this V1VolumeNodeAffinity. # noqa: E501
+ :rtype: V1NodeSelector
+ """
+ return self._required
+
+ @required.setter
+ def required(self, required):
+ """Sets the required of this V1VolumeNodeAffinity.
+
+
+ :param required: The required of this V1VolumeNodeAffinity. # noqa: E501
+ :type: V1NodeSelector
+ """
+
+ self._required = required
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeNodeAffinity):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeNodeAffinity):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_resources.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_resources.py
new file mode 100644
index 0000000000..eb1ae8317a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_node_resources.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeNodeResources(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'count': 'int'
+ }
+
+ attribute_map = {
+ 'count': 'count'
+ }
+
+ def __init__(self, count=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeNodeResources - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._count = None
+ self.discriminator = None
+
+ if count is not None:
+ self.count = count
+
+ @property
+ def count(self):
+ """Gets the count of this V1VolumeNodeResources. # noqa: E501
+
+ count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded. # noqa: E501
+
+ :return: The count of this V1VolumeNodeResources. # noqa: E501
+ :rtype: int
+ """
+ return self._count
+
+ @count.setter
+ def count(self, count):
+ """Sets the count of this V1VolumeNodeResources.
+
+ count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded. # noqa: E501
+
+ :param count: The count of this V1VolumeNodeResources. # noqa: E501
+ :type: int
+ """
+
+ self._count = count
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeNodeResources):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeNodeResources):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_volume_projection.py b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_projection.py
new file mode 100644
index 0000000000..1fae42d9c8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_volume_projection.py
@@ -0,0 +1,198 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VolumeProjection(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'config_map': 'V1ConfigMapProjection',
+ 'downward_api': 'V1DownwardAPIProjection',
+ 'secret': 'V1SecretProjection',
+ 'service_account_token': 'V1ServiceAccountTokenProjection'
+ }
+
+ attribute_map = {
+ 'config_map': 'configMap',
+ 'downward_api': 'downwardAPI',
+ 'secret': 'secret',
+ 'service_account_token': 'serviceAccountToken'
+ }
+
+ def __init__(self, config_map=None, downward_api=None, secret=None, service_account_token=None, local_vars_configuration=None): # noqa: E501
+ """V1VolumeProjection - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._config_map = None
+ self._downward_api = None
+ self._secret = None
+ self._service_account_token = None
+ self.discriminator = None
+
+ if config_map is not None:
+ self.config_map = config_map
+ if downward_api is not None:
+ self.downward_api = downward_api
+ if secret is not None:
+ self.secret = secret
+ if service_account_token is not None:
+ self.service_account_token = service_account_token
+
+ @property
+ def config_map(self):
+ """Gets the config_map of this V1VolumeProjection. # noqa: E501
+
+
+ :return: The config_map of this V1VolumeProjection. # noqa: E501
+ :rtype: V1ConfigMapProjection
+ """
+ return self._config_map
+
+ @config_map.setter
+ def config_map(self, config_map):
+ """Sets the config_map of this V1VolumeProjection.
+
+
+ :param config_map: The config_map of this V1VolumeProjection. # noqa: E501
+ :type: V1ConfigMapProjection
+ """
+
+ self._config_map = config_map
+
+ @property
+ def downward_api(self):
+ """Gets the downward_api of this V1VolumeProjection. # noqa: E501
+
+
+ :return: The downward_api of this V1VolumeProjection. # noqa: E501
+ :rtype: V1DownwardAPIProjection
+ """
+ return self._downward_api
+
+ @downward_api.setter
+ def downward_api(self, downward_api):
+ """Sets the downward_api of this V1VolumeProjection.
+
+
+ :param downward_api: The downward_api of this V1VolumeProjection. # noqa: E501
+ :type: V1DownwardAPIProjection
+ """
+
+ self._downward_api = downward_api
+
+ @property
+ def secret(self):
+ """Gets the secret of this V1VolumeProjection. # noqa: E501
+
+
+ :return: The secret of this V1VolumeProjection. # noqa: E501
+ :rtype: V1SecretProjection
+ """
+ return self._secret
+
+ @secret.setter
+ def secret(self, secret):
+ """Sets the secret of this V1VolumeProjection.
+
+
+ :param secret: The secret of this V1VolumeProjection. # noqa: E501
+ :type: V1SecretProjection
+ """
+
+ self._secret = secret
+
+ @property
+ def service_account_token(self):
+ """Gets the service_account_token of this V1VolumeProjection. # noqa: E501
+
+
+ :return: The service_account_token of this V1VolumeProjection. # noqa: E501
+ :rtype: V1ServiceAccountTokenProjection
+ """
+ return self._service_account_token
+
+ @service_account_token.setter
+ def service_account_token(self, service_account_token):
+ """Sets the service_account_token of this V1VolumeProjection.
+
+
+ :param service_account_token: The service_account_token of this V1VolumeProjection. # noqa: E501
+ :type: V1ServiceAccountTokenProjection
+ """
+
+ self._service_account_token = service_account_token
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VolumeProjection):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VolumeProjection):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_vsphere_virtual_disk_volume_source.py b/contrib/python/kubernetes/kubernetes/client/models/v1_vsphere_virtual_disk_volume_source.py
new file mode 100644
index 0000000000..0826256617
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_vsphere_virtual_disk_volume_source.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1VsphereVirtualDiskVolumeSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'fs_type': 'str',
+ 'storage_policy_id': 'str',
+ 'storage_policy_name': 'str',
+ 'volume_path': 'str'
+ }
+
+ attribute_map = {
+ 'fs_type': 'fsType',
+ 'storage_policy_id': 'storagePolicyID',
+ 'storage_policy_name': 'storagePolicyName',
+ 'volume_path': 'volumePath'
+ }
+
+ def __init__(self, fs_type=None, storage_policy_id=None, storage_policy_name=None, volume_path=None, local_vars_configuration=None): # noqa: E501
+ """V1VsphereVirtualDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._fs_type = None
+ self._storage_policy_id = None
+ self._storage_policy_name = None
+ self._volume_path = None
+ self.discriminator = None
+
+ if fs_type is not None:
+ self.fs_type = fs_type
+ if storage_policy_id is not None:
+ self.storage_policy_id = storage_policy_id
+ if storage_policy_name is not None:
+ self.storage_policy_name = storage_policy_name
+ self.volume_path = volume_path
+
+ @property
+ def fs_type(self):
+ """Gets the fs_type of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+
+ fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :return: The fs_type of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._fs_type
+
+ @fs_type.setter
+ def fs_type(self, fs_type):
+ """Sets the fs_type of this V1VsphereVirtualDiskVolumeSource.
+
+ fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
+
+ :param fs_type: The fs_type of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._fs_type = fs_type
+
+ @property
+ def storage_policy_id(self):
+ """Gets the storage_policy_id of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+
+ storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. # noqa: E501
+
+ :return: The storage_policy_id of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_policy_id
+
+ @storage_policy_id.setter
+ def storage_policy_id(self, storage_policy_id):
+ """Sets the storage_policy_id of this V1VsphereVirtualDiskVolumeSource.
+
+ storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. # noqa: E501
+
+ :param storage_policy_id: The storage_policy_id of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._storage_policy_id = storage_policy_id
+
+ @property
+ def storage_policy_name(self):
+ """Gets the storage_policy_name of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+
+ storagePolicyName is the storage Policy Based Management (SPBM) profile name. # noqa: E501
+
+ :return: The storage_policy_name of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._storage_policy_name
+
+ @storage_policy_name.setter
+ def storage_policy_name(self, storage_policy_name):
+ """Sets the storage_policy_name of this V1VsphereVirtualDiskVolumeSource.
+
+ storagePolicyName is the storage Policy Based Management (SPBM) profile name. # noqa: E501
+
+ :param storage_policy_name: The storage_policy_name of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+
+ self._storage_policy_name = storage_policy_name
+
+ @property
+ def volume_path(self):
+ """Gets the volume_path of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+
+ volumePath is the path that identifies vSphere volume vmdk # noqa: E501
+
+ :return: The volume_path of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :rtype: str
+ """
+ return self._volume_path
+
+ @volume_path.setter
+ def volume_path(self, volume_path):
+ """Sets the volume_path of this V1VsphereVirtualDiskVolumeSource.
+
+ volumePath is the path that identifies vSphere volume vmdk # noqa: E501
+
+ :param volume_path: The volume_path of this V1VsphereVirtualDiskVolumeSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and volume_path is None: # noqa: E501
+ raise ValueError("Invalid value for `volume_path`, must not be `None`") # noqa: E501
+
+ self._volume_path = volume_path
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1VsphereVirtualDiskVolumeSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1VsphereVirtualDiskVolumeSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_watch_event.py b/contrib/python/kubernetes/kubernetes/client/models/v1_watch_event.py
new file mode 100644
index 0000000000..c9c6f62dba
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_watch_event.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1WatchEvent(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'object': 'object',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'object': 'object',
+ 'type': 'type'
+ }
+
+ def __init__(self, object=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1WatchEvent - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._object = None
+ self._type = None
+ self.discriminator = None
+
+ self.object = object
+ self.type = type
+
+ @property
+ def object(self):
+ """Gets the object of this V1WatchEvent. # noqa: E501
+
+ Object is: * If Type is Added or Modified: the new state of the object. * If Type is Deleted: the state of the object immediately before deletion. * If Type is Error: *Status is recommended; other types may make sense depending on context. # noqa: E501
+
+ :return: The object of this V1WatchEvent. # noqa: E501
+ :rtype: object
+ """
+ return self._object
+
+ @object.setter
+ def object(self, object):
+ """Sets the object of this V1WatchEvent.
+
+ Object is: * If Type is Added or Modified: the new state of the object. * If Type is Deleted: the state of the object immediately before deletion. * If Type is Error: *Status is recommended; other types may make sense depending on context. # noqa: E501
+
+ :param object: The object of this V1WatchEvent. # noqa: E501
+ :type: object
+ """
+ if self.local_vars_configuration.client_side_validation and object is None: # noqa: E501
+ raise ValueError("Invalid value for `object`, must not be `None`") # noqa: E501
+
+ self._object = object
+
+ @property
+ def type(self):
+ """Gets the type of this V1WatchEvent. # noqa: E501
+
+
+ :return: The type of this V1WatchEvent. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1WatchEvent.
+
+
+ :param type: The type of this V1WatchEvent. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1WatchEvent):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1WatchEvent):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_webhook_conversion.py b/contrib/python/kubernetes/kubernetes/client/models/v1_webhook_conversion.py
new file mode 100644
index 0000000000..7fccacd6cd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_webhook_conversion.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1WebhookConversion(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'client_config': 'ApiextensionsV1WebhookClientConfig',
+ 'conversion_review_versions': 'list[str]'
+ }
+
+ attribute_map = {
+ 'client_config': 'clientConfig',
+ 'conversion_review_versions': 'conversionReviewVersions'
+ }
+
+ def __init__(self, client_config=None, conversion_review_versions=None, local_vars_configuration=None): # noqa: E501
+ """V1WebhookConversion - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._client_config = None
+ self._conversion_review_versions = None
+ self.discriminator = None
+
+ if client_config is not None:
+ self.client_config = client_config
+ self.conversion_review_versions = conversion_review_versions
+
+ @property
+ def client_config(self):
+ """Gets the client_config of this V1WebhookConversion. # noqa: E501
+
+
+ :return: The client_config of this V1WebhookConversion. # noqa: E501
+ :rtype: ApiextensionsV1WebhookClientConfig
+ """
+ return self._client_config
+
+ @client_config.setter
+ def client_config(self, client_config):
+ """Sets the client_config of this V1WebhookConversion.
+
+
+ :param client_config: The client_config of this V1WebhookConversion. # noqa: E501
+ :type: ApiextensionsV1WebhookClientConfig
+ """
+
+ self._client_config = client_config
+
+ @property
+ def conversion_review_versions(self):
+ """Gets the conversion_review_versions of this V1WebhookConversion. # noqa: E501
+
+ conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. # noqa: E501
+
+ :return: The conversion_review_versions of this V1WebhookConversion. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._conversion_review_versions
+
+ @conversion_review_versions.setter
+ def conversion_review_versions(self, conversion_review_versions):
+ """Sets the conversion_review_versions of this V1WebhookConversion.
+
+ conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. # noqa: E501
+
+ :param conversion_review_versions: The conversion_review_versions of this V1WebhookConversion. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and conversion_review_versions is None: # noqa: E501
+ raise ValueError("Invalid value for `conversion_review_versions`, must not be `None`") # noqa: E501
+
+ self._conversion_review_versions = conversion_review_versions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1WebhookConversion):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1WebhookConversion):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_weighted_pod_affinity_term.py b/contrib/python/kubernetes/kubernetes/client/models/v1_weighted_pod_affinity_term.py
new file mode 100644
index 0000000000..7dc43fbd15
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_weighted_pod_affinity_term.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1WeightedPodAffinityTerm(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'pod_affinity_term': 'V1PodAffinityTerm',
+ 'weight': 'int'
+ }
+
+ attribute_map = {
+ 'pod_affinity_term': 'podAffinityTerm',
+ 'weight': 'weight'
+ }
+
+ def __init__(self, pod_affinity_term=None, weight=None, local_vars_configuration=None): # noqa: E501
+ """V1WeightedPodAffinityTerm - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._pod_affinity_term = None
+ self._weight = None
+ self.discriminator = None
+
+ self.pod_affinity_term = pod_affinity_term
+ self.weight = weight
+
+ @property
+ def pod_affinity_term(self):
+ """Gets the pod_affinity_term of this V1WeightedPodAffinityTerm. # noqa: E501
+
+
+ :return: The pod_affinity_term of this V1WeightedPodAffinityTerm. # noqa: E501
+ :rtype: V1PodAffinityTerm
+ """
+ return self._pod_affinity_term
+
+ @pod_affinity_term.setter
+ def pod_affinity_term(self, pod_affinity_term):
+ """Sets the pod_affinity_term of this V1WeightedPodAffinityTerm.
+
+
+ :param pod_affinity_term: The pod_affinity_term of this V1WeightedPodAffinityTerm. # noqa: E501
+ :type: V1PodAffinityTerm
+ """
+ if self.local_vars_configuration.client_side_validation and pod_affinity_term is None: # noqa: E501
+ raise ValueError("Invalid value for `pod_affinity_term`, must not be `None`") # noqa: E501
+
+ self._pod_affinity_term = pod_affinity_term
+
+ @property
+ def weight(self):
+ """Gets the weight of this V1WeightedPodAffinityTerm. # noqa: E501
+
+ weight associated with matching the corresponding podAffinityTerm, in the range 1-100. # noqa: E501
+
+ :return: The weight of this V1WeightedPodAffinityTerm. # noqa: E501
+ :rtype: int
+ """
+ return self._weight
+
+ @weight.setter
+ def weight(self, weight):
+ """Sets the weight of this V1WeightedPodAffinityTerm.
+
+ weight associated with matching the corresponding podAffinityTerm, in the range 1-100. # noqa: E501
+
+ :param weight: The weight of this V1WeightedPodAffinityTerm. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and weight is None: # noqa: E501
+ raise ValueError("Invalid value for `weight`, must not be `None`") # noqa: E501
+
+ self._weight = weight
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1WeightedPodAffinityTerm):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1WeightedPodAffinityTerm):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1_windows_security_context_options.py b/contrib/python/kubernetes/kubernetes/client/models/v1_windows_security_context_options.py
new file mode 100644
index 0000000000..7ea2bb0b8b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1_windows_security_context_options.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1WindowsSecurityContextOptions(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'gmsa_credential_spec': 'str',
+ 'gmsa_credential_spec_name': 'str',
+ 'host_process': 'bool',
+ 'run_as_user_name': 'str'
+ }
+
+ attribute_map = {
+ 'gmsa_credential_spec': 'gmsaCredentialSpec',
+ 'gmsa_credential_spec_name': 'gmsaCredentialSpecName',
+ 'host_process': 'hostProcess',
+ 'run_as_user_name': 'runAsUserName'
+ }
+
+ def __init__(self, gmsa_credential_spec=None, gmsa_credential_spec_name=None, host_process=None, run_as_user_name=None, local_vars_configuration=None): # noqa: E501
+ """V1WindowsSecurityContextOptions - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._gmsa_credential_spec = None
+ self._gmsa_credential_spec_name = None
+ self._host_process = None
+ self._run_as_user_name = None
+ self.discriminator = None
+
+ if gmsa_credential_spec is not None:
+ self.gmsa_credential_spec = gmsa_credential_spec
+ if gmsa_credential_spec_name is not None:
+ self.gmsa_credential_spec_name = gmsa_credential_spec_name
+ if host_process is not None:
+ self.host_process = host_process
+ if run_as_user_name is not None:
+ self.run_as_user_name = run_as_user_name
+
+ @property
+ def gmsa_credential_spec(self):
+ """Gets the gmsa_credential_spec of this V1WindowsSecurityContextOptions. # noqa: E501
+
+ GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. # noqa: E501
+
+ :return: The gmsa_credential_spec of this V1WindowsSecurityContextOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._gmsa_credential_spec
+
+ @gmsa_credential_spec.setter
+ def gmsa_credential_spec(self, gmsa_credential_spec):
+ """Sets the gmsa_credential_spec of this V1WindowsSecurityContextOptions.
+
+ GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. # noqa: E501
+
+ :param gmsa_credential_spec: The gmsa_credential_spec of this V1WindowsSecurityContextOptions. # noqa: E501
+ :type: str
+ """
+
+ self._gmsa_credential_spec = gmsa_credential_spec
+
+ @property
+ def gmsa_credential_spec_name(self):
+ """Gets the gmsa_credential_spec_name of this V1WindowsSecurityContextOptions. # noqa: E501
+
+ GMSACredentialSpecName is the name of the GMSA credential spec to use. # noqa: E501
+
+ :return: The gmsa_credential_spec_name of this V1WindowsSecurityContextOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._gmsa_credential_spec_name
+
+ @gmsa_credential_spec_name.setter
+ def gmsa_credential_spec_name(self, gmsa_credential_spec_name):
+ """Sets the gmsa_credential_spec_name of this V1WindowsSecurityContextOptions.
+
+ GMSACredentialSpecName is the name of the GMSA credential spec to use. # noqa: E501
+
+ :param gmsa_credential_spec_name: The gmsa_credential_spec_name of this V1WindowsSecurityContextOptions. # noqa: E501
+ :type: str
+ """
+
+ self._gmsa_credential_spec_name = gmsa_credential_spec_name
+
+ @property
+ def host_process(self):
+ """Gets the host_process of this V1WindowsSecurityContextOptions. # noqa: E501
+
+ HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. # noqa: E501
+
+ :return: The host_process of this V1WindowsSecurityContextOptions. # noqa: E501
+ :rtype: bool
+ """
+ return self._host_process
+
+ @host_process.setter
+ def host_process(self, host_process):
+ """Sets the host_process of this V1WindowsSecurityContextOptions.
+
+ HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. # noqa: E501
+
+ :param host_process: The host_process of this V1WindowsSecurityContextOptions. # noqa: E501
+ :type: bool
+ """
+
+ self._host_process = host_process
+
+ @property
+ def run_as_user_name(self):
+ """Gets the run_as_user_name of this V1WindowsSecurityContextOptions. # noqa: E501
+
+ The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
+
+ :return: The run_as_user_name of this V1WindowsSecurityContextOptions. # noqa: E501
+ :rtype: str
+ """
+ return self._run_as_user_name
+
+ @run_as_user_name.setter
+ def run_as_user_name(self, run_as_user_name):
+ """Sets the run_as_user_name of this V1WindowsSecurityContextOptions.
+
+ The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
+
+ :param run_as_user_name: The run_as_user_name of this V1WindowsSecurityContextOptions. # noqa: E501
+ :type: str
+ """
+
+ self._run_as_user_name = run_as_user_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1WindowsSecurityContextOptions):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1WindowsSecurityContextOptions):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_audit_annotation.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_audit_annotation.py
new file mode 100644
index 0000000000..fb0cc64879
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_audit_annotation.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1AuditAnnotation(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'value_expression': 'str'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'value_expression': 'valueExpression'
+ }
+
+ def __init__(self, key=None, value_expression=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1AuditAnnotation - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._value_expression = None
+ self.discriminator = None
+
+ self.key = key
+ self.value_expression = value_expression
+
+ @property
+ def key(self):
+ """Gets the key of this V1alpha1AuditAnnotation. # noqa: E501
+
+ key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. The key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\". If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded. Required. # noqa: E501
+
+ :return: The key of this V1alpha1AuditAnnotation. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1alpha1AuditAnnotation.
+
+ key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. The key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\". If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded. Required. # noqa: E501
+
+ :param key: The key of this V1alpha1AuditAnnotation. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def value_expression(self):
+ """Gets the value_expression of this V1alpha1AuditAnnotation. # noqa: E501
+
+ valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb. If multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list. Required. # noqa: E501
+
+ :return: The value_expression of this V1alpha1AuditAnnotation. # noqa: E501
+ :rtype: str
+ """
+ return self._value_expression
+
+ @value_expression.setter
+ def value_expression(self, value_expression):
+ """Sets the value_expression of this V1alpha1AuditAnnotation.
+
+ valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb. If multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list. Required. # noqa: E501
+
+ :param value_expression: The value_expression of this V1alpha1AuditAnnotation. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and value_expression is None: # noqa: E501
+ raise ValueError("Invalid value for `value_expression`, must not be `None`") # noqa: E501
+
+ self._value_expression = value_expression
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1AuditAnnotation):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1AuditAnnotation):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr.py
new file mode 100644
index 0000000000..a69ca4be92
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ClusterCIDR(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha1ClusterCIDRSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ClusterCIDR - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ClusterCIDR. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ClusterCIDR. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ClusterCIDR.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ClusterCIDR. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ClusterCIDR. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ClusterCIDR. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ClusterCIDR.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ClusterCIDR. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ClusterCIDR. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ClusterCIDR. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ClusterCIDR.
+
+
+ :param metadata: The metadata of this V1alpha1ClusterCIDR. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha1ClusterCIDR. # noqa: E501
+
+
+ :return: The spec of this V1alpha1ClusterCIDR. # noqa: E501
+ :rtype: V1alpha1ClusterCIDRSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha1ClusterCIDR.
+
+
+ :param spec: The spec of this V1alpha1ClusterCIDR. # noqa: E501
+ :type: V1alpha1ClusterCIDRSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ClusterCIDR):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ClusterCIDR):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_list.py
new file mode 100644
index 0000000000..b9dbf5930f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ClusterCIDRList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha1ClusterCIDR]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ClusterCIDRList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ClusterCIDRList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ClusterCIDRList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ClusterCIDRList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ClusterCIDRList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha1ClusterCIDRList. # noqa: E501
+
+ items is the list of ClusterCIDRs. # noqa: E501
+
+ :return: The items of this V1alpha1ClusterCIDRList. # noqa: E501
+ :rtype: list[V1alpha1ClusterCIDR]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha1ClusterCIDRList.
+
+ items is the list of ClusterCIDRs. # noqa: E501
+
+ :param items: The items of this V1alpha1ClusterCIDRList. # noqa: E501
+ :type: list[V1alpha1ClusterCIDR]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ClusterCIDRList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ClusterCIDRList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ClusterCIDRList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ClusterCIDRList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ClusterCIDRList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ClusterCIDRList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ClusterCIDRList.
+
+
+ :param metadata: The metadata of this V1alpha1ClusterCIDRList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ClusterCIDRList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ClusterCIDRList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_spec.py
new file mode 100644
index 0000000000..4f0b372c13
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_cidr_spec.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ClusterCIDRSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'ipv4': 'str',
+ 'ipv6': 'str',
+ 'node_selector': 'V1NodeSelector',
+ 'per_node_host_bits': 'int'
+ }
+
+ attribute_map = {
+ 'ipv4': 'ipv4',
+ 'ipv6': 'ipv6',
+ 'node_selector': 'nodeSelector',
+ 'per_node_host_bits': 'perNodeHostBits'
+ }
+
+ def __init__(self, ipv4=None, ipv6=None, node_selector=None, per_node_host_bits=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ClusterCIDRSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._ipv4 = None
+ self._ipv6 = None
+ self._node_selector = None
+ self._per_node_host_bits = None
+ self.discriminator = None
+
+ if ipv4 is not None:
+ self.ipv4 = ipv4
+ if ipv6 is not None:
+ self.ipv6 = ipv6
+ if node_selector is not None:
+ self.node_selector = node_selector
+ self.per_node_host_bits = per_node_host_bits
+
+ @property
+ def ipv4(self):
+ """Gets the ipv4 of this V1alpha1ClusterCIDRSpec. # noqa: E501
+
+ ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable. # noqa: E501
+
+ :return: The ipv4 of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._ipv4
+
+ @ipv4.setter
+ def ipv4(self, ipv4):
+ """Sets the ipv4 of this V1alpha1ClusterCIDRSpec.
+
+ ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable. # noqa: E501
+
+ :param ipv4: The ipv4 of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :type: str
+ """
+
+ self._ipv4 = ipv4
+
+ @property
+ def ipv6(self):
+ """Gets the ipv6 of this V1alpha1ClusterCIDRSpec. # noqa: E501
+
+ ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable. # noqa: E501
+
+ :return: The ipv6 of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._ipv6
+
+ @ipv6.setter
+ def ipv6(self, ipv6):
+ """Sets the ipv6 of this V1alpha1ClusterCIDRSpec.
+
+ ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable. # noqa: E501
+
+ :param ipv6: The ipv6 of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :type: str
+ """
+
+ self._ipv6 = ipv6
+
+ @property
+ def node_selector(self):
+ """Gets the node_selector of this V1alpha1ClusterCIDRSpec. # noqa: E501
+
+
+ :return: The node_selector of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :rtype: V1NodeSelector
+ """
+ return self._node_selector
+
+ @node_selector.setter
+ def node_selector(self, node_selector):
+ """Sets the node_selector of this V1alpha1ClusterCIDRSpec.
+
+
+ :param node_selector: The node_selector of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :type: V1NodeSelector
+ """
+
+ self._node_selector = node_selector
+
+ @property
+ def per_node_host_bits(self):
+ """Gets the per_node_host_bits of this V1alpha1ClusterCIDRSpec. # noqa: E501
+
+ perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable. # noqa: E501
+
+ :return: The per_node_host_bits of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._per_node_host_bits
+
+ @per_node_host_bits.setter
+ def per_node_host_bits(self, per_node_host_bits):
+ """Sets the per_node_host_bits of this V1alpha1ClusterCIDRSpec.
+
+ perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable. # noqa: E501
+
+ :param per_node_host_bits: The per_node_host_bits of this V1alpha1ClusterCIDRSpec. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and per_node_host_bits is None: # noqa: E501
+ raise ValueError("Invalid value for `per_node_host_bits`, must not be `None`") # noqa: E501
+
+ self._per_node_host_bits = per_node_host_bits
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ClusterCIDRSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ClusterCIDRSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle.py
new file mode 100644
index 0000000000..c7f9eda9df
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ClusterTrustBundle(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha1ClusterTrustBundleSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ClusterTrustBundle - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ClusterTrustBundle. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ClusterTrustBundle.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ClusterTrustBundle. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ClusterTrustBundle.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ClusterTrustBundle. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ClusterTrustBundle.
+
+
+ :param metadata: The metadata of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha1ClusterTrustBundle. # noqa: E501
+
+
+ :return: The spec of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :rtype: V1alpha1ClusterTrustBundleSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha1ClusterTrustBundle.
+
+
+ :param spec: The spec of this V1alpha1ClusterTrustBundle. # noqa: E501
+ :type: V1alpha1ClusterTrustBundleSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ClusterTrustBundle):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ClusterTrustBundle):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_list.py
new file mode 100644
index 0000000000..3968cd03b3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ClusterTrustBundleList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha1ClusterTrustBundle]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ClusterTrustBundleList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ClusterTrustBundleList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ClusterTrustBundleList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha1ClusterTrustBundleList. # noqa: E501
+
+ items is a collection of ClusterTrustBundle objects # noqa: E501
+
+ :return: The items of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :rtype: list[V1alpha1ClusterTrustBundle]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha1ClusterTrustBundleList.
+
+ items is a collection of ClusterTrustBundle objects # noqa: E501
+
+ :param items: The items of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :type: list[V1alpha1ClusterTrustBundle]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ClusterTrustBundleList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ClusterTrustBundleList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ClusterTrustBundleList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ClusterTrustBundleList.
+
+
+ :param metadata: The metadata of this V1alpha1ClusterTrustBundleList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ClusterTrustBundleList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ClusterTrustBundleList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_spec.py
new file mode 100644
index 0000000000..456404e7f4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_cluster_trust_bundle_spec.py
@@ -0,0 +1,151 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ClusterTrustBundleSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'signer_name': 'str',
+ 'trust_bundle': 'str'
+ }
+
+ attribute_map = {
+ 'signer_name': 'signerName',
+ 'trust_bundle': 'trustBundle'
+ }
+
+ def __init__(self, signer_name=None, trust_bundle=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ClusterTrustBundleSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._signer_name = None
+ self._trust_bundle = None
+ self.discriminator = None
+
+ if signer_name is not None:
+ self.signer_name = signer_name
+ self.trust_bundle = trust_bundle
+
+ @property
+ def signer_name(self):
+ """Gets the signer_name of this V1alpha1ClusterTrustBundleSpec. # noqa: E501
+
+ signerName indicates the associated signer, if any. In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest. If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`. If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix. List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector. # noqa: E501
+
+ :return: The signer_name of this V1alpha1ClusterTrustBundleSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._signer_name
+
+ @signer_name.setter
+ def signer_name(self, signer_name):
+ """Sets the signer_name of this V1alpha1ClusterTrustBundleSpec.
+
+ signerName indicates the associated signer, if any. In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest. If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`. If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix. List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector. # noqa: E501
+
+ :param signer_name: The signer_name of this V1alpha1ClusterTrustBundleSpec. # noqa: E501
+ :type: str
+ """
+
+ self._signer_name = signer_name
+
+ @property
+ def trust_bundle(self):
+ """Gets the trust_bundle of this V1alpha1ClusterTrustBundleSpec. # noqa: E501
+
+ trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers. Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data. # noqa: E501
+
+ :return: The trust_bundle of this V1alpha1ClusterTrustBundleSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._trust_bundle
+
+ @trust_bundle.setter
+ def trust_bundle(self, trust_bundle):
+ """Sets the trust_bundle of this V1alpha1ClusterTrustBundleSpec.
+
+ trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers. Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data. # noqa: E501
+
+ :param trust_bundle: The trust_bundle of this V1alpha1ClusterTrustBundleSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and trust_bundle is None: # noqa: E501
+ raise ValueError("Invalid value for `trust_bundle`, must not be `None`") # noqa: E501
+
+ self._trust_bundle = trust_bundle
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ClusterTrustBundleSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ClusterTrustBundleSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_expression_warning.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_expression_warning.py
new file mode 100644
index 0000000000..9985be5707
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_expression_warning.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ExpressionWarning(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'field_ref': 'str',
+ 'warning': 'str'
+ }
+
+ attribute_map = {
+ 'field_ref': 'fieldRef',
+ 'warning': 'warning'
+ }
+
+ def __init__(self, field_ref=None, warning=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ExpressionWarning - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._field_ref = None
+ self._warning = None
+ self.discriminator = None
+
+ self.field_ref = field_ref
+ self.warning = warning
+
+ @property
+ def field_ref(self):
+ """Gets the field_ref of this V1alpha1ExpressionWarning. # noqa: E501
+
+ The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\" # noqa: E501
+
+ :return: The field_ref of this V1alpha1ExpressionWarning. # noqa: E501
+ :rtype: str
+ """
+ return self._field_ref
+
+ @field_ref.setter
+ def field_ref(self, field_ref):
+ """Sets the field_ref of this V1alpha1ExpressionWarning.
+
+ The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\" # noqa: E501
+
+ :param field_ref: The field_ref of this V1alpha1ExpressionWarning. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and field_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `field_ref`, must not be `None`") # noqa: E501
+
+ self._field_ref = field_ref
+
+ @property
+ def warning(self):
+ """Gets the warning of this V1alpha1ExpressionWarning. # noqa: E501
+
+ The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler. # noqa: E501
+
+ :return: The warning of this V1alpha1ExpressionWarning. # noqa: E501
+ :rtype: str
+ """
+ return self._warning
+
+ @warning.setter
+ def warning(self, warning):
+ """Sets the warning of this V1alpha1ExpressionWarning.
+
+ The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler. # noqa: E501
+
+ :param warning: The warning of this V1alpha1ExpressionWarning. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and warning is None: # noqa: E501
+ raise ValueError("Invalid value for `warning`, must not be `None`") # noqa: E501
+
+ self._warning = warning
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ExpressionWarning):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ExpressionWarning):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address.py
new file mode 100644
index 0000000000..dc87e13300
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1IPAddress(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha1IPAddressSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1IPAddress - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1IPAddress. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1IPAddress. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1IPAddress.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1IPAddress. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1IPAddress. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1IPAddress. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1IPAddress.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1IPAddress. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1IPAddress. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1IPAddress. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1IPAddress.
+
+
+ :param metadata: The metadata of this V1alpha1IPAddress. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha1IPAddress. # noqa: E501
+
+
+ :return: The spec of this V1alpha1IPAddress. # noqa: E501
+ :rtype: V1alpha1IPAddressSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha1IPAddress.
+
+
+ :param spec: The spec of this V1alpha1IPAddress. # noqa: E501
+ :type: V1alpha1IPAddressSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1IPAddress):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1IPAddress):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_list.py
new file mode 100644
index 0000000000..bbb146d704
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1IPAddressList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha1IPAddress]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1IPAddressList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1IPAddressList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1IPAddressList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1IPAddressList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1IPAddressList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha1IPAddressList. # noqa: E501
+
+ items is the list of IPAddresses. # noqa: E501
+
+ :return: The items of this V1alpha1IPAddressList. # noqa: E501
+ :rtype: list[V1alpha1IPAddress]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha1IPAddressList.
+
+ items is the list of IPAddresses. # noqa: E501
+
+ :param items: The items of this V1alpha1IPAddressList. # noqa: E501
+ :type: list[V1alpha1IPAddress]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1IPAddressList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1IPAddressList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1IPAddressList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1IPAddressList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1IPAddressList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1IPAddressList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1IPAddressList.
+
+
+ :param metadata: The metadata of this V1alpha1IPAddressList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1IPAddressList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1IPAddressList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_spec.py
new file mode 100644
index 0000000000..396784c637
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_ip_address_spec.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1IPAddressSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'parent_ref': 'V1alpha1ParentReference'
+ }
+
+ attribute_map = {
+ 'parent_ref': 'parentRef'
+ }
+
+ def __init__(self, parent_ref=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1IPAddressSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._parent_ref = None
+ self.discriminator = None
+
+ if parent_ref is not None:
+ self.parent_ref = parent_ref
+
+ @property
+ def parent_ref(self):
+ """Gets the parent_ref of this V1alpha1IPAddressSpec. # noqa: E501
+
+
+ :return: The parent_ref of this V1alpha1IPAddressSpec. # noqa: E501
+ :rtype: V1alpha1ParentReference
+ """
+ return self._parent_ref
+
+ @parent_ref.setter
+ def parent_ref(self, parent_ref):
+ """Sets the parent_ref of this V1alpha1IPAddressSpec.
+
+
+ :param parent_ref: The parent_ref of this V1alpha1IPAddressSpec. # noqa: E501
+ :type: V1alpha1ParentReference
+ """
+
+ self._parent_ref = parent_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1IPAddressSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1IPAddressSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_condition.py
new file mode 100644
index 0000000000..c7bc8e72f7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_condition.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1MatchCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'expression': 'expression',
+ 'name': 'name'
+ }
+
+ def __init__(self, expression=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1MatchCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression = None
+ self._name = None
+ self.discriminator = None
+
+ self.expression = expression
+ self.name = name
+
+ @property
+ def expression(self):
+ """Gets the expression of this V1alpha1MatchCondition. # noqa: E501
+
+ Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
+
+ :return: The expression of this V1alpha1MatchCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, expression):
+ """Sets the expression of this V1alpha1MatchCondition.
+
+ Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
+
+ :param expression: The expression of this V1alpha1MatchCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
+ raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
+
+ self._expression = expression
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha1MatchCondition. # noqa: E501
+
+ Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
+
+ :return: The name of this V1alpha1MatchCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha1MatchCondition.
+
+ Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
+
+ :param name: The name of this V1alpha1MatchCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1MatchCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1MatchCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_resources.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_resources.py
new file mode 100644
index 0000000000..8b77d6250b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_match_resources.py
@@ -0,0 +1,230 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1MatchResources(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'exclude_resource_rules': 'list[V1alpha1NamedRuleWithOperations]',
+ 'match_policy': 'str',
+ 'namespace_selector': 'V1LabelSelector',
+ 'object_selector': 'V1LabelSelector',
+ 'resource_rules': 'list[V1alpha1NamedRuleWithOperations]'
+ }
+
+ attribute_map = {
+ 'exclude_resource_rules': 'excludeResourceRules',
+ 'match_policy': 'matchPolicy',
+ 'namespace_selector': 'namespaceSelector',
+ 'object_selector': 'objectSelector',
+ 'resource_rules': 'resourceRules'
+ }
+
+ def __init__(self, exclude_resource_rules=None, match_policy=None, namespace_selector=None, object_selector=None, resource_rules=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1MatchResources - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._exclude_resource_rules = None
+ self._match_policy = None
+ self._namespace_selector = None
+ self._object_selector = None
+ self._resource_rules = None
+ self.discriminator = None
+
+ if exclude_resource_rules is not None:
+ self.exclude_resource_rules = exclude_resource_rules
+ if match_policy is not None:
+ self.match_policy = match_policy
+ if namespace_selector is not None:
+ self.namespace_selector = namespace_selector
+ if object_selector is not None:
+ self.object_selector = object_selector
+ if resource_rules is not None:
+ self.resource_rules = resource_rules
+
+ @property
+ def exclude_resource_rules(self):
+ """Gets the exclude_resource_rules of this V1alpha1MatchResources. # noqa: E501
+
+ ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded) # noqa: E501
+
+ :return: The exclude_resource_rules of this V1alpha1MatchResources. # noqa: E501
+ :rtype: list[V1alpha1NamedRuleWithOperations]
+ """
+ return self._exclude_resource_rules
+
+ @exclude_resource_rules.setter
+ def exclude_resource_rules(self, exclude_resource_rules):
+ """Sets the exclude_resource_rules of this V1alpha1MatchResources.
+
+ ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded) # noqa: E501
+
+ :param exclude_resource_rules: The exclude_resource_rules of this V1alpha1MatchResources. # noqa: E501
+ :type: list[V1alpha1NamedRuleWithOperations]
+ """
+
+ self._exclude_resource_rules = exclude_resource_rules
+
+ @property
+ def match_policy(self):
+ """Gets the match_policy of this V1alpha1MatchResources. # noqa: E501
+
+ matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. Defaults to \"Equivalent\" # noqa: E501
+
+ :return: The match_policy of this V1alpha1MatchResources. # noqa: E501
+ :rtype: str
+ """
+ return self._match_policy
+
+ @match_policy.setter
+ def match_policy(self, match_policy):
+ """Sets the match_policy of this V1alpha1MatchResources.
+
+ matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. Defaults to \"Equivalent\" # noqa: E501
+
+ :param match_policy: The match_policy of this V1alpha1MatchResources. # noqa: E501
+ :type: str
+ """
+
+ self._match_policy = match_policy
+
+ @property
+ def namespace_selector(self):
+ """Gets the namespace_selector of this V1alpha1MatchResources. # noqa: E501
+
+
+ :return: The namespace_selector of this V1alpha1MatchResources. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._namespace_selector
+
+ @namespace_selector.setter
+ def namespace_selector(self, namespace_selector):
+ """Sets the namespace_selector of this V1alpha1MatchResources.
+
+
+ :param namespace_selector: The namespace_selector of this V1alpha1MatchResources. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._namespace_selector = namespace_selector
+
+ @property
+ def object_selector(self):
+ """Gets the object_selector of this V1alpha1MatchResources. # noqa: E501
+
+
+ :return: The object_selector of this V1alpha1MatchResources. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._object_selector
+
+ @object_selector.setter
+ def object_selector(self, object_selector):
+ """Sets the object_selector of this V1alpha1MatchResources.
+
+
+ :param object_selector: The object_selector of this V1alpha1MatchResources. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._object_selector = object_selector
+
+ @property
+ def resource_rules(self):
+ """Gets the resource_rules of this V1alpha1MatchResources. # noqa: E501
+
+ ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule. # noqa: E501
+
+ :return: The resource_rules of this V1alpha1MatchResources. # noqa: E501
+ :rtype: list[V1alpha1NamedRuleWithOperations]
+ """
+ return self._resource_rules
+
+ @resource_rules.setter
+ def resource_rules(self, resource_rules):
+ """Sets the resource_rules of this V1alpha1MatchResources.
+
+ ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule. # noqa: E501
+
+ :param resource_rules: The resource_rules of this V1alpha1MatchResources. # noqa: E501
+ :type: list[V1alpha1NamedRuleWithOperations]
+ """
+
+ self._resource_rules = resource_rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1MatchResources):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1MatchResources):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_named_rule_with_operations.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_named_rule_with_operations.py
new file mode 100644
index 0000000000..00857180f6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_named_rule_with_operations.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1NamedRuleWithOperations(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_groups': 'list[str]',
+ 'api_versions': 'list[str]',
+ 'operations': 'list[str]',
+ 'resource_names': 'list[str]',
+ 'resources': 'list[str]',
+ 'scope': 'str'
+ }
+
+ attribute_map = {
+ 'api_groups': 'apiGroups',
+ 'api_versions': 'apiVersions',
+ 'operations': 'operations',
+ 'resource_names': 'resourceNames',
+ 'resources': 'resources',
+ 'scope': 'scope'
+ }
+
+ def __init__(self, api_groups=None, api_versions=None, operations=None, resource_names=None, resources=None, scope=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1NamedRuleWithOperations - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_groups = None
+ self._api_versions = None
+ self._operations = None
+ self._resource_names = None
+ self._resources = None
+ self._scope = None
+ self.discriminator = None
+
+ if api_groups is not None:
+ self.api_groups = api_groups
+ if api_versions is not None:
+ self.api_versions = api_versions
+ if operations is not None:
+ self.operations = operations
+ if resource_names is not None:
+ self.resource_names = resource_names
+ if resources is not None:
+ self.resources = resources
+ if scope is not None:
+ self.scope = scope
+
+ @property
+ def api_groups(self):
+ """Gets the api_groups of this V1alpha1NamedRuleWithOperations. # noqa: E501
+
+ APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The api_groups of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_groups
+
+ @api_groups.setter
+ def api_groups(self, api_groups):
+ """Sets the api_groups of this V1alpha1NamedRuleWithOperations.
+
+ APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param api_groups: The api_groups of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_groups = api_groups
+
+ @property
+ def api_versions(self):
+ """Gets the api_versions of this V1alpha1NamedRuleWithOperations. # noqa: E501
+
+ APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The api_versions of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_versions
+
+ @api_versions.setter
+ def api_versions(self, api_versions):
+ """Sets the api_versions of this V1alpha1NamedRuleWithOperations.
+
+ APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param api_versions: The api_versions of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_versions = api_versions
+
+ @property
+ def operations(self):
+ """Gets the operations of this V1alpha1NamedRuleWithOperations. # noqa: E501
+
+ Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The operations of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._operations
+
+ @operations.setter
+ def operations(self, operations):
+ """Sets the operations of this V1alpha1NamedRuleWithOperations.
+
+ Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param operations: The operations of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._operations = operations
+
+ @property
+ def resource_names(self):
+ """Gets the resource_names of this V1alpha1NamedRuleWithOperations. # noqa: E501
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
+
+ :return: The resource_names of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resource_names
+
+ @resource_names.setter
+ def resource_names(self, resource_names):
+ """Sets the resource_names of this V1alpha1NamedRuleWithOperations.
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
+
+ :param resource_names: The resource_names of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resource_names = resource_names
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1alpha1NamedRuleWithOperations. # noqa: E501
+
+ Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
+
+ :return: The resources of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1alpha1NamedRuleWithOperations.
+
+ Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
+
+ :param resources: The resources of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resources = resources
+
+ @property
+ def scope(self):
+ """Gets the scope of this V1alpha1NamedRuleWithOperations. # noqa: E501
+
+ scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
+
+ :return: The scope of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :rtype: str
+ """
+ return self._scope
+
+ @scope.setter
+ def scope(self, scope):
+ """Sets the scope of this V1alpha1NamedRuleWithOperations.
+
+ scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
+
+ :param scope: The scope of this V1alpha1NamedRuleWithOperations. # noqa: E501
+ :type: str
+ """
+
+ self._scope = scope
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1NamedRuleWithOperations):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1NamedRuleWithOperations):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_kind.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_kind.py
new file mode 100644
index 0000000000..802c688783
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_kind.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ParamKind(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind'
+ }
+
+ def __init__(self, api_version=None, kind=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ParamKind - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ParamKind. # noqa: E501
+
+ APIVersion is the API group version the resources belong to. In format of \"group/version\". Required. # noqa: E501
+
+ :return: The api_version of this V1alpha1ParamKind. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ParamKind.
+
+ APIVersion is the API group version the resources belong to. In format of \"group/version\". Required. # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ParamKind. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ParamKind. # noqa: E501
+
+ Kind is the API kind the resources belong to. Required. # noqa: E501
+
+ :return: The kind of this V1alpha1ParamKind. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ParamKind.
+
+ Kind is the API kind the resources belong to. Required. # noqa: E501
+
+ :param kind: The kind of this V1alpha1ParamKind. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ParamKind):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ParamKind):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_ref.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_ref.py
new file mode 100644
index 0000000000..b4a5f3af9f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_param_ref.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ParamRef(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str',
+ 'parameter_not_found_action': 'str',
+ 'selector': 'V1LabelSelector'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'parameter_not_found_action': 'parameterNotFoundAction',
+ 'selector': 'selector'
+ }
+
+ def __init__(self, name=None, namespace=None, parameter_not_found_action=None, selector=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ParamRef - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self._parameter_not_found_action = None
+ self._selector = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if parameter_not_found_action is not None:
+ self.parameter_not_found_action = parameter_not_found_action
+ if selector is not None:
+ self.selector = selector
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha1ParamRef. # noqa: E501
+
+ `name` is the name of the resource being referenced. `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. # noqa: E501
+
+ :return: The name of this V1alpha1ParamRef. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha1ParamRef.
+
+ `name` is the name of the resource being referenced. `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. # noqa: E501
+
+ :param name: The name of this V1alpha1ParamRef. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1alpha1ParamRef. # noqa: E501
+
+ namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
+
+ :return: The namespace of this V1alpha1ParamRef. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1alpha1ParamRef.
+
+ namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
+
+ :param namespace: The namespace of this V1alpha1ParamRef. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def parameter_not_found_action(self):
+ """Gets the parameter_not_found_action of this V1alpha1ParamRef. # noqa: E501
+
+ `parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Default to `Deny` # noqa: E501
+
+ :return: The parameter_not_found_action of this V1alpha1ParamRef. # noqa: E501
+ :rtype: str
+ """
+ return self._parameter_not_found_action
+
+ @parameter_not_found_action.setter
+ def parameter_not_found_action(self, parameter_not_found_action):
+ """Sets the parameter_not_found_action of this V1alpha1ParamRef.
+
+ `parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Default to `Deny` # noqa: E501
+
+ :param parameter_not_found_action: The parameter_not_found_action of this V1alpha1ParamRef. # noqa: E501
+ :type: str
+ """
+
+ self._parameter_not_found_action = parameter_not_found_action
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1alpha1ParamRef. # noqa: E501
+
+
+ :return: The selector of this V1alpha1ParamRef. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1alpha1ParamRef.
+
+
+ :param selector: The selector of this V1alpha1ParamRef. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._selector = selector
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ParamRef):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ParamRef):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_parent_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_parent_reference.py
new file mode 100644
index 0000000000..a1309677d1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_parent_reference.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ParentReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'group': 'str',
+ 'name': 'str',
+ 'namespace': 'str',
+ 'resource': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'group': 'group',
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'resource': 'resource',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, group=None, name=None, namespace=None, resource=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ParentReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._group = None
+ self._name = None
+ self._namespace = None
+ self._resource = None
+ self._uid = None
+ self.discriminator = None
+
+ if group is not None:
+ self.group = group
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if resource is not None:
+ self.resource = resource
+ if uid is not None:
+ self.uid = uid
+
+ @property
+ def group(self):
+ """Gets the group of this V1alpha1ParentReference. # noqa: E501
+
+ Group is the group of the object being referenced. # noqa: E501
+
+ :return: The group of this V1alpha1ParentReference. # noqa: E501
+ :rtype: str
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1alpha1ParentReference.
+
+ Group is the group of the object being referenced. # noqa: E501
+
+ :param group: The group of this V1alpha1ParentReference. # noqa: E501
+ :type: str
+ """
+
+ self._group = group
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha1ParentReference. # noqa: E501
+
+ Name is the name of the object being referenced. # noqa: E501
+
+ :return: The name of this V1alpha1ParentReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha1ParentReference.
+
+ Name is the name of the object being referenced. # noqa: E501
+
+ :param name: The name of this V1alpha1ParentReference. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1alpha1ParentReference. # noqa: E501
+
+ Namespace is the namespace of the object being referenced. # noqa: E501
+
+ :return: The namespace of this V1alpha1ParentReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1alpha1ParentReference.
+
+ Namespace is the namespace of the object being referenced. # noqa: E501
+
+ :param namespace: The namespace of this V1alpha1ParentReference. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def resource(self):
+ """Gets the resource of this V1alpha1ParentReference. # noqa: E501
+
+ Resource is the resource of the object being referenced. # noqa: E501
+
+ :return: The resource of this V1alpha1ParentReference. # noqa: E501
+ :rtype: str
+ """
+ return self._resource
+
+ @resource.setter
+ def resource(self, resource):
+ """Sets the resource of this V1alpha1ParentReference.
+
+ Resource is the resource of the object being referenced. # noqa: E501
+
+ :param resource: The resource of this V1alpha1ParentReference. # noqa: E501
+ :type: str
+ """
+
+ self._resource = resource
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1alpha1ParentReference. # noqa: E501
+
+ UID is the uid of the object being referenced. # noqa: E501
+
+ :return: The uid of this V1alpha1ParentReference. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1alpha1ParentReference.
+
+ UID is the uid of the object being referenced. # noqa: E501
+
+ :param uid: The uid of this V1alpha1ParentReference. # noqa: E501
+ :type: str
+ """
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ParentReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ParentReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review.py
new file mode 100644
index 0000000000..c1475dacb2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1SelfSubjectReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'status': 'V1alpha1SelfSubjectReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1SelfSubjectReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1SelfSubjectReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1SelfSubjectReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1SelfSubjectReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1SelfSubjectReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1SelfSubjectReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1SelfSubjectReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1SelfSubjectReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1SelfSubjectReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1SelfSubjectReview. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1SelfSubjectReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1SelfSubjectReview.
+
+
+ :param metadata: The metadata of this V1alpha1SelfSubjectReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def status(self):
+ """Gets the status of this V1alpha1SelfSubjectReview. # noqa: E501
+
+
+ :return: The status of this V1alpha1SelfSubjectReview. # noqa: E501
+ :rtype: V1alpha1SelfSubjectReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1alpha1SelfSubjectReview.
+
+
+ :param status: The status of this V1alpha1SelfSubjectReview. # noqa: E501
+ :type: V1alpha1SelfSubjectReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1SelfSubjectReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1SelfSubjectReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review_status.py
new file mode 100644
index 0000000000..fd2f133980
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_self_subject_review_status.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1SelfSubjectReviewStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'user_info': 'V1UserInfo'
+ }
+
+ attribute_map = {
+ 'user_info': 'userInfo'
+ }
+
+ def __init__(self, user_info=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1SelfSubjectReviewStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._user_info = None
+ self.discriminator = None
+
+ if user_info is not None:
+ self.user_info = user_info
+
+ @property
+ def user_info(self):
+ """Gets the user_info of this V1alpha1SelfSubjectReviewStatus. # noqa: E501
+
+
+ :return: The user_info of this V1alpha1SelfSubjectReviewStatus. # noqa: E501
+ :rtype: V1UserInfo
+ """
+ return self._user_info
+
+ @user_info.setter
+ def user_info(self, user_info):
+ """Sets the user_info of this V1alpha1SelfSubjectReviewStatus.
+
+
+ :param user_info: The user_info of this V1alpha1SelfSubjectReviewStatus. # noqa: E501
+ :type: V1UserInfo
+ """
+
+ self._user_info = user_info
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1SelfSubjectReviewStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1SelfSubjectReviewStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_server_storage_version.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_server_storage_version.py
new file mode 100644
index 0000000000..c465bcb581
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_server_storage_version.py
@@ -0,0 +1,206 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ServerStorageVersion(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_server_id': 'str',
+ 'decodable_versions': 'list[str]',
+ 'encoding_version': 'str',
+ 'served_versions': 'list[str]'
+ }
+
+ attribute_map = {
+ 'api_server_id': 'apiServerID',
+ 'decodable_versions': 'decodableVersions',
+ 'encoding_version': 'encodingVersion',
+ 'served_versions': 'servedVersions'
+ }
+
+ def __init__(self, api_server_id=None, decodable_versions=None, encoding_version=None, served_versions=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ServerStorageVersion - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_server_id = None
+ self._decodable_versions = None
+ self._encoding_version = None
+ self._served_versions = None
+ self.discriminator = None
+
+ if api_server_id is not None:
+ self.api_server_id = api_server_id
+ if decodable_versions is not None:
+ self.decodable_versions = decodable_versions
+ if encoding_version is not None:
+ self.encoding_version = encoding_version
+ if served_versions is not None:
+ self.served_versions = served_versions
+
+ @property
+ def api_server_id(self):
+ """Gets the api_server_id of this V1alpha1ServerStorageVersion. # noqa: E501
+
+ The ID of the reporting API server. # noqa: E501
+
+ :return: The api_server_id of this V1alpha1ServerStorageVersion. # noqa: E501
+ :rtype: str
+ """
+ return self._api_server_id
+
+ @api_server_id.setter
+ def api_server_id(self, api_server_id):
+ """Sets the api_server_id of this V1alpha1ServerStorageVersion.
+
+ The ID of the reporting API server. # noqa: E501
+
+ :param api_server_id: The api_server_id of this V1alpha1ServerStorageVersion. # noqa: E501
+ :type: str
+ """
+
+ self._api_server_id = api_server_id
+
+ @property
+ def decodable_versions(self):
+ """Gets the decodable_versions of this V1alpha1ServerStorageVersion. # noqa: E501
+
+ The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions. # noqa: E501
+
+ :return: The decodable_versions of this V1alpha1ServerStorageVersion. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._decodable_versions
+
+ @decodable_versions.setter
+ def decodable_versions(self, decodable_versions):
+ """Sets the decodable_versions of this V1alpha1ServerStorageVersion.
+
+ The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions. # noqa: E501
+
+ :param decodable_versions: The decodable_versions of this V1alpha1ServerStorageVersion. # noqa: E501
+ :type: list[str]
+ """
+
+ self._decodable_versions = decodable_versions
+
+ @property
+ def encoding_version(self):
+ """Gets the encoding_version of this V1alpha1ServerStorageVersion. # noqa: E501
+
+ The API server encodes the object to this version when persisting it in the backend (e.g., etcd). # noqa: E501
+
+ :return: The encoding_version of this V1alpha1ServerStorageVersion. # noqa: E501
+ :rtype: str
+ """
+ return self._encoding_version
+
+ @encoding_version.setter
+ def encoding_version(self, encoding_version):
+ """Sets the encoding_version of this V1alpha1ServerStorageVersion.
+
+ The API server encodes the object to this version when persisting it in the backend (e.g., etcd). # noqa: E501
+
+ :param encoding_version: The encoding_version of this V1alpha1ServerStorageVersion. # noqa: E501
+ :type: str
+ """
+
+ self._encoding_version = encoding_version
+
+ @property
+ def served_versions(self):
+ """Gets the served_versions of this V1alpha1ServerStorageVersion. # noqa: E501
+
+ The API server can serve these versions. DecodableVersions must include all ServedVersions. # noqa: E501
+
+ :return: The served_versions of this V1alpha1ServerStorageVersion. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._served_versions
+
+ @served_versions.setter
+ def served_versions(self, served_versions):
+ """Sets the served_versions of this V1alpha1ServerStorageVersion.
+
+ The API server can serve these versions. DecodableVersions must include all ServedVersions. # noqa: E501
+
+ :param served_versions: The served_versions of this V1alpha1ServerStorageVersion. # noqa: E501
+ :type: list[str]
+ """
+
+ self._served_versions = served_versions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ServerStorageVersion):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ServerStorageVersion):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version.py
new file mode 100644
index 0000000000..9c48db1758
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version.py
@@ -0,0 +1,232 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1StorageVersion(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'object',
+ 'status': 'V1alpha1StorageVersionStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1StorageVersion - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1StorageVersion. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1StorageVersion. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1StorageVersion.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1StorageVersion. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1StorageVersion. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1StorageVersion. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1StorageVersion.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1StorageVersion. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1StorageVersion. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1StorageVersion. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1StorageVersion.
+
+
+ :param metadata: The metadata of this V1alpha1StorageVersion. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha1StorageVersion. # noqa: E501
+
+ Spec is an empty spec. It is here to comply with Kubernetes API style. # noqa: E501
+
+ :return: The spec of this V1alpha1StorageVersion. # noqa: E501
+ :rtype: object
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha1StorageVersion.
+
+ Spec is an empty spec. It is here to comply with Kubernetes API style. # noqa: E501
+
+ :param spec: The spec of this V1alpha1StorageVersion. # noqa: E501
+ :type: object
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1alpha1StorageVersion. # noqa: E501
+
+
+ :return: The status of this V1alpha1StorageVersion. # noqa: E501
+ :rtype: V1alpha1StorageVersionStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1alpha1StorageVersion.
+
+
+ :param status: The status of this V1alpha1StorageVersion. # noqa: E501
+ :type: V1alpha1StorageVersionStatus
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1StorageVersion):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1StorageVersion):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_condition.py
new file mode 100644
index 0000000000..db4063f553
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_condition.py
@@ -0,0 +1,265 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1StorageVersionCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'observed_generation': 'int',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'observed_generation': 'observedGeneration',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, observed_generation=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1StorageVersionCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._observed_generation = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1alpha1StorageVersionCondition. # noqa: E501
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1alpha1StorageVersionCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1alpha1StorageVersionCondition.
+
+ Last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1alpha1StorageVersionCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1alpha1StorageVersionCondition. # noqa: E501
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :return: The message of this V1alpha1StorageVersionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1alpha1StorageVersionCondition.
+
+ A human readable message indicating details about the transition. # noqa: E501
+
+ :param message: The message of this V1alpha1StorageVersionCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1alpha1StorageVersionCondition. # noqa: E501
+
+ If set, this represents the .metadata.generation that the condition was set based upon. # noqa: E501
+
+ :return: The observed_generation of this V1alpha1StorageVersionCondition. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1alpha1StorageVersionCondition.
+
+ If set, this represents the .metadata.generation that the condition was set based upon. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1alpha1StorageVersionCondition. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1alpha1StorageVersionCondition. # noqa: E501
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1alpha1StorageVersionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1alpha1StorageVersionCondition.
+
+ The reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1alpha1StorageVersionCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and reason is None: # noqa: E501
+ raise ValueError("Invalid value for `reason`, must not be `None`") # noqa: E501
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1alpha1StorageVersionCondition. # noqa: E501
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :return: The status of this V1alpha1StorageVersionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1alpha1StorageVersionCondition.
+
+ Status of the condition, one of True, False, Unknown. # noqa: E501
+
+ :param status: The status of this V1alpha1StorageVersionCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1alpha1StorageVersionCondition. # noqa: E501
+
+ Type of the condition. # noqa: E501
+
+ :return: The type of this V1alpha1StorageVersionCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1alpha1StorageVersionCondition.
+
+ Type of the condition. # noqa: E501
+
+ :param type: The type of this V1alpha1StorageVersionCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1StorageVersionCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1StorageVersionCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_list.py
new file mode 100644
index 0000000000..7da8501af5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1StorageVersionList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha1StorageVersion]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1StorageVersionList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1StorageVersionList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1StorageVersionList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1StorageVersionList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1StorageVersionList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha1StorageVersionList. # noqa: E501
+
+ Items holds a list of StorageVersion # noqa: E501
+
+ :return: The items of this V1alpha1StorageVersionList. # noqa: E501
+ :rtype: list[V1alpha1StorageVersion]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha1StorageVersionList.
+
+ Items holds a list of StorageVersion # noqa: E501
+
+ :param items: The items of this V1alpha1StorageVersionList. # noqa: E501
+ :type: list[V1alpha1StorageVersion]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1StorageVersionList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1StorageVersionList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1StorageVersionList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1StorageVersionList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1StorageVersionList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1StorageVersionList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1StorageVersionList.
+
+
+ :param metadata: The metadata of this V1alpha1StorageVersionList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1StorageVersionList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1StorageVersionList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_status.py
new file mode 100644
index 0000000000..06b426ca14
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_storage_version_status.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1StorageVersionStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'common_encoding_version': 'str',
+ 'conditions': 'list[V1alpha1StorageVersionCondition]',
+ 'storage_versions': 'list[V1alpha1ServerStorageVersion]'
+ }
+
+ attribute_map = {
+ 'common_encoding_version': 'commonEncodingVersion',
+ 'conditions': 'conditions',
+ 'storage_versions': 'storageVersions'
+ }
+
+ def __init__(self, common_encoding_version=None, conditions=None, storage_versions=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1StorageVersionStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._common_encoding_version = None
+ self._conditions = None
+ self._storage_versions = None
+ self.discriminator = None
+
+ if common_encoding_version is not None:
+ self.common_encoding_version = common_encoding_version
+ if conditions is not None:
+ self.conditions = conditions
+ if storage_versions is not None:
+ self.storage_versions = storage_versions
+
+ @property
+ def common_encoding_version(self):
+ """Gets the common_encoding_version of this V1alpha1StorageVersionStatus. # noqa: E501
+
+ If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality. # noqa: E501
+
+ :return: The common_encoding_version of this V1alpha1StorageVersionStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._common_encoding_version
+
+ @common_encoding_version.setter
+ def common_encoding_version(self, common_encoding_version):
+ """Sets the common_encoding_version of this V1alpha1StorageVersionStatus.
+
+ If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality. # noqa: E501
+
+ :param common_encoding_version: The common_encoding_version of this V1alpha1StorageVersionStatus. # noqa: E501
+ :type: str
+ """
+
+ self._common_encoding_version = common_encoding_version
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1alpha1StorageVersionStatus. # noqa: E501
+
+ The latest available observations of the storageVersion's state. # noqa: E501
+
+ :return: The conditions of this V1alpha1StorageVersionStatus. # noqa: E501
+ :rtype: list[V1alpha1StorageVersionCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1alpha1StorageVersionStatus.
+
+ The latest available observations of the storageVersion's state. # noqa: E501
+
+ :param conditions: The conditions of this V1alpha1StorageVersionStatus. # noqa: E501
+ :type: list[V1alpha1StorageVersionCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def storage_versions(self):
+ """Gets the storage_versions of this V1alpha1StorageVersionStatus. # noqa: E501
+
+ The reported versions per API server instance. # noqa: E501
+
+ :return: The storage_versions of this V1alpha1StorageVersionStatus. # noqa: E501
+ :rtype: list[V1alpha1ServerStorageVersion]
+ """
+ return self._storage_versions
+
+ @storage_versions.setter
+ def storage_versions(self, storage_versions):
+ """Sets the storage_versions of this V1alpha1StorageVersionStatus.
+
+ The reported versions per API server instance. # noqa: E501
+
+ :param storage_versions: The storage_versions of this V1alpha1StorageVersionStatus. # noqa: E501
+ :type: list[V1alpha1ServerStorageVersion]
+ """
+
+ self._storage_versions = storage_versions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1StorageVersionStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1StorageVersionStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_type_checking.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_type_checking.py
new file mode 100644
index 0000000000..3212c98fd1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_type_checking.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1TypeChecking(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression_warnings': 'list[V1alpha1ExpressionWarning]'
+ }
+
+ attribute_map = {
+ 'expression_warnings': 'expressionWarnings'
+ }
+
+ def __init__(self, expression_warnings=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1TypeChecking - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression_warnings = None
+ self.discriminator = None
+
+ if expression_warnings is not None:
+ self.expression_warnings = expression_warnings
+
+ @property
+ def expression_warnings(self):
+ """Gets the expression_warnings of this V1alpha1TypeChecking. # noqa: E501
+
+ The type checking warnings for each expression. # noqa: E501
+
+ :return: The expression_warnings of this V1alpha1TypeChecking. # noqa: E501
+ :rtype: list[V1alpha1ExpressionWarning]
+ """
+ return self._expression_warnings
+
+ @expression_warnings.setter
+ def expression_warnings(self, expression_warnings):
+ """Sets the expression_warnings of this V1alpha1TypeChecking.
+
+ The type checking warnings for each expression. # noqa: E501
+
+ :param expression_warnings: The expression_warnings of this V1alpha1TypeChecking. # noqa: E501
+ :type: list[V1alpha1ExpressionWarning]
+ """
+
+ self._expression_warnings = expression_warnings
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1TypeChecking):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1TypeChecking):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy.py
new file mode 100644
index 0000000000..15846379a7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ValidatingAdmissionPolicy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha1ValidatingAdmissionPolicySpec',
+ 'status': 'V1alpha1ValidatingAdmissionPolicyStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ValidatingAdmissionPolicy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ValidatingAdmissionPolicy.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ValidatingAdmissionPolicy.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ValidatingAdmissionPolicy.
+
+
+ :param metadata: The metadata of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+
+
+ :return: The spec of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: V1alpha1ValidatingAdmissionPolicySpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha1ValidatingAdmissionPolicy.
+
+
+ :param spec: The spec of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :type: V1alpha1ValidatingAdmissionPolicySpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+
+
+ :return: The status of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: V1alpha1ValidatingAdmissionPolicyStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1alpha1ValidatingAdmissionPolicy.
+
+
+ :param status: The status of this V1alpha1ValidatingAdmissionPolicy. # noqa: E501
+ :type: V1alpha1ValidatingAdmissionPolicyStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding.py
new file mode 100644
index 0000000000..aa0673a550
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ValidatingAdmissionPolicyBinding(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha1ValidatingAdmissionPolicyBindingSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ValidatingAdmissionPolicyBinding - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ValidatingAdmissionPolicyBinding.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ValidatingAdmissionPolicyBinding.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ValidatingAdmissionPolicyBinding.
+
+
+ :param metadata: The metadata of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+
+ :return: The spec of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: V1alpha1ValidatingAdmissionPolicyBindingSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha1ValidatingAdmissionPolicyBinding.
+
+
+ :param spec: The spec of this V1alpha1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: V1alpha1ValidatingAdmissionPolicyBindingSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyBinding):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyBinding):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_list.py
new file mode 100644
index 0000000000..8ee3fedb1b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_list.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ValidatingAdmissionPolicyBindingList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha1ValidatingAdmissionPolicyBinding]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ValidatingAdmissionPolicyBindingList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if items is not None:
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ValidatingAdmissionPolicyBindingList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+ List of PolicyBinding. # noqa: E501
+
+ :return: The items of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: list[V1alpha1ValidatingAdmissionPolicyBinding]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha1ValidatingAdmissionPolicyBindingList.
+
+ List of PolicyBinding. # noqa: E501
+
+ :param items: The items of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: list[V1alpha1ValidatingAdmissionPolicyBinding]
+ """
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ValidatingAdmissionPolicyBindingList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ValidatingAdmissionPolicyBindingList.
+
+
+ :param metadata: The metadata of this V1alpha1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyBindingList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyBindingList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_spec.py
new file mode 100644
index 0000000000..58619f0d15
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_binding_spec.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ValidatingAdmissionPolicyBindingSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'match_resources': 'V1alpha1MatchResources',
+ 'param_ref': 'V1alpha1ParamRef',
+ 'policy_name': 'str',
+ 'validation_actions': 'list[str]'
+ }
+
+ attribute_map = {
+ 'match_resources': 'matchResources',
+ 'param_ref': 'paramRef',
+ 'policy_name': 'policyName',
+ 'validation_actions': 'validationActions'
+ }
+
+ def __init__(self, match_resources=None, param_ref=None, policy_name=None, validation_actions=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ValidatingAdmissionPolicyBindingSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._match_resources = None
+ self._param_ref = None
+ self._policy_name = None
+ self._validation_actions = None
+ self.discriminator = None
+
+ if match_resources is not None:
+ self.match_resources = match_resources
+ if param_ref is not None:
+ self.param_ref = param_ref
+ if policy_name is not None:
+ self.policy_name = policy_name
+ if validation_actions is not None:
+ self.validation_actions = validation_actions
+
+ @property
+ def match_resources(self):
+ """Gets the match_resources of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+
+ :return: The match_resources of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: V1alpha1MatchResources
+ """
+ return self._match_resources
+
+ @match_resources.setter
+ def match_resources(self, match_resources):
+ """Sets the match_resources of this V1alpha1ValidatingAdmissionPolicyBindingSpec.
+
+
+ :param match_resources: The match_resources of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: V1alpha1MatchResources
+ """
+
+ self._match_resources = match_resources
+
+ @property
+ def param_ref(self):
+ """Gets the param_ref of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+
+ :return: The param_ref of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: V1alpha1ParamRef
+ """
+ return self._param_ref
+
+ @param_ref.setter
+ def param_ref(self, param_ref):
+ """Sets the param_ref of this V1alpha1ValidatingAdmissionPolicyBindingSpec.
+
+
+ :param param_ref: The param_ref of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: V1alpha1ParamRef
+ """
+
+ self._param_ref = param_ref
+
+ @property
+ def policy_name(self):
+ """Gets the policy_name of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+ PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required. # noqa: E501
+
+ :return: The policy_name of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._policy_name
+
+ @policy_name.setter
+ def policy_name(self, policy_name):
+ """Sets the policy_name of this V1alpha1ValidatingAdmissionPolicyBindingSpec.
+
+ PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required. # noqa: E501
+
+ :param policy_name: The policy_name of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: str
+ """
+
+ self._policy_name = policy_name
+
+ @property
+ def validation_actions(self):
+ """Gets the validation_actions of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+ validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions. Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. validationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action. The supported actions values are: \"Deny\" specifies that a validation failure results in a denied request. \"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses. \"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"` Clients should expect to handle additional values by ignoring any values not recognized. \"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers. Required. # noqa: E501
+
+ :return: The validation_actions of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._validation_actions
+
+ @validation_actions.setter
+ def validation_actions(self, validation_actions):
+ """Sets the validation_actions of this V1alpha1ValidatingAdmissionPolicyBindingSpec.
+
+ validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions. Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. validationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action. The supported actions values are: \"Deny\" specifies that a validation failure results in a denied request. \"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses. \"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"` Clients should expect to handle additional values by ignoring any values not recognized. \"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers. Required. # noqa: E501
+
+ :param validation_actions: The validation_actions of this V1alpha1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._validation_actions = validation_actions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyBindingSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyBindingSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_list.py
new file mode 100644
index 0000000000..d3edb68a86
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_list.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ValidatingAdmissionPolicyList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha1ValidatingAdmissionPolicy]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ValidatingAdmissionPolicyList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if items is not None:
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha1ValidatingAdmissionPolicyList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+
+ List of ValidatingAdmissionPolicy. # noqa: E501
+
+ :return: The items of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: list[V1alpha1ValidatingAdmissionPolicy]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha1ValidatingAdmissionPolicyList.
+
+ List of ValidatingAdmissionPolicy. # noqa: E501
+
+ :param items: The items of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: list[V1alpha1ValidatingAdmissionPolicy]
+ """
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha1ValidatingAdmissionPolicyList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha1ValidatingAdmissionPolicyList.
+
+
+ :param metadata: The metadata of this V1alpha1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_spec.py
new file mode 100644
index 0000000000..ca0e30880d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_spec.py
@@ -0,0 +1,286 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ValidatingAdmissionPolicySpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'audit_annotations': 'list[V1alpha1AuditAnnotation]',
+ 'failure_policy': 'str',
+ 'match_conditions': 'list[V1alpha1MatchCondition]',
+ 'match_constraints': 'V1alpha1MatchResources',
+ 'param_kind': 'V1alpha1ParamKind',
+ 'validations': 'list[V1alpha1Validation]',
+ 'variables': 'list[V1alpha1Variable]'
+ }
+
+ attribute_map = {
+ 'audit_annotations': 'auditAnnotations',
+ 'failure_policy': 'failurePolicy',
+ 'match_conditions': 'matchConditions',
+ 'match_constraints': 'matchConstraints',
+ 'param_kind': 'paramKind',
+ 'validations': 'validations',
+ 'variables': 'variables'
+ }
+
+ def __init__(self, audit_annotations=None, failure_policy=None, match_conditions=None, match_constraints=None, param_kind=None, validations=None, variables=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ValidatingAdmissionPolicySpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._audit_annotations = None
+ self._failure_policy = None
+ self._match_conditions = None
+ self._match_constraints = None
+ self._param_kind = None
+ self._validations = None
+ self._variables = None
+ self.discriminator = None
+
+ if audit_annotations is not None:
+ self.audit_annotations = audit_annotations
+ if failure_policy is not None:
+ self.failure_policy = failure_policy
+ if match_conditions is not None:
+ self.match_conditions = match_conditions
+ if match_constraints is not None:
+ self.match_constraints = match_constraints
+ if param_kind is not None:
+ self.param_kind = param_kind
+ if validations is not None:
+ self.validations = validations
+ if variables is not None:
+ self.variables = variables
+
+ @property
+ def audit_annotations(self):
+ """Gets the audit_annotations of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required. # noqa: E501
+
+ :return: The audit_annotations of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1alpha1AuditAnnotation]
+ """
+ return self._audit_annotations
+
+ @audit_annotations.setter
+ def audit_annotations(self, audit_annotations):
+ """Sets the audit_annotations of this V1alpha1ValidatingAdmissionPolicySpec.
+
+ auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required. # noqa: E501
+
+ :param audit_annotations: The audit_annotations of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1alpha1AuditAnnotation]
+ """
+
+ self._audit_annotations = audit_annotations
+
+ @property
+ def failure_policy(self):
+ """Gets the failure_policy of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings. A policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource. failurePolicy does not define how validations that evaluate to false are handled. When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced. Allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :return: The failure_policy of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: str
+ """
+ return self._failure_policy
+
+ @failure_policy.setter
+ def failure_policy(self, failure_policy):
+ """Sets the failure_policy of this V1alpha1ValidatingAdmissionPolicySpec.
+
+ failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings. A policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource. failurePolicy does not define how validations that evaluate to false are handled. When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced. Allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :param failure_policy: The failure_policy of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: str
+ """
+
+ self._failure_policy = failure_policy
+
+ @property
+ def match_conditions(self):
+ """Gets the match_conditions of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. If a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the policy is skipped # noqa: E501
+
+ :return: The match_conditions of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1alpha1MatchCondition]
+ """
+ return self._match_conditions
+
+ @match_conditions.setter
+ def match_conditions(self, match_conditions):
+ """Sets the match_conditions of this V1alpha1ValidatingAdmissionPolicySpec.
+
+ MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. If a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the policy is skipped # noqa: E501
+
+ :param match_conditions: The match_conditions of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1alpha1MatchCondition]
+ """
+
+ self._match_conditions = match_conditions
+
+ @property
+ def match_constraints(self):
+ """Gets the match_constraints of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+
+
+ :return: The match_constraints of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: V1alpha1MatchResources
+ """
+ return self._match_constraints
+
+ @match_constraints.setter
+ def match_constraints(self, match_constraints):
+ """Sets the match_constraints of this V1alpha1ValidatingAdmissionPolicySpec.
+
+
+ :param match_constraints: The match_constraints of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: V1alpha1MatchResources
+ """
+
+ self._match_constraints = match_constraints
+
+ @property
+ def param_kind(self):
+ """Gets the param_kind of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+
+
+ :return: The param_kind of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: V1alpha1ParamKind
+ """
+ return self._param_kind
+
+ @param_kind.setter
+ def param_kind(self, param_kind):
+ """Sets the param_kind of this V1alpha1ValidatingAdmissionPolicySpec.
+
+
+ :param param_kind: The param_kind of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: V1alpha1ParamKind
+ """
+
+ self._param_kind = param_kind
+
+ @property
+ def validations(self):
+ """Gets the validations of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required. # noqa: E501
+
+ :return: The validations of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1alpha1Validation]
+ """
+ return self._validations
+
+ @validations.setter
+ def validations(self, validations):
+ """Sets the validations of this V1alpha1ValidatingAdmissionPolicySpec.
+
+ Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required. # noqa: E501
+
+ :param validations: The validations of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1alpha1Validation]
+ """
+
+ self._validations = validations
+
+ @property
+ def variables(self):
+ """Gets the variables of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy. The expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic. # noqa: E501
+
+ :return: The variables of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1alpha1Variable]
+ """
+ return self._variables
+
+ @variables.setter
+ def variables(self, variables):
+ """Sets the variables of this V1alpha1ValidatingAdmissionPolicySpec.
+
+ Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy. The expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic. # noqa: E501
+
+ :param variables: The variables of this V1alpha1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1alpha1Variable]
+ """
+
+ self._variables = variables
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicySpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicySpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_status.py
new file mode 100644
index 0000000000..a0d2d08b6e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validating_admission_policy_status.py
@@ -0,0 +1,176 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1ValidatingAdmissionPolicyStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1Condition]',
+ 'observed_generation': 'int',
+ 'type_checking': 'V1alpha1TypeChecking'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions',
+ 'observed_generation': 'observedGeneration',
+ 'type_checking': 'typeChecking'
+ }
+
+ def __init__(self, conditions=None, observed_generation=None, type_checking=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1ValidatingAdmissionPolicyStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self._observed_generation = None
+ self._type_checking = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ if type_checking is not None:
+ self.type_checking = type_checking
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+
+ The conditions represent the latest available observations of a policy's current state. # noqa: E501
+
+ :return: The conditions of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :rtype: list[V1Condition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1alpha1ValidatingAdmissionPolicyStatus.
+
+ The conditions represent the latest available observations of a policy's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :type: list[V1Condition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+
+ The generation observed by the controller. # noqa: E501
+
+ :return: The observed_generation of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1alpha1ValidatingAdmissionPolicyStatus.
+
+ The generation observed by the controller. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def type_checking(self):
+ """Gets the type_checking of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+
+
+ :return: The type_checking of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :rtype: V1alpha1TypeChecking
+ """
+ return self._type_checking
+
+ @type_checking.setter
+ def type_checking(self, type_checking):
+ """Sets the type_checking of this V1alpha1ValidatingAdmissionPolicyStatus.
+
+
+ :param type_checking: The type_checking of this V1alpha1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :type: V1alpha1TypeChecking
+ """
+
+ self._type_checking = type_checking
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1ValidatingAdmissionPolicyStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validation.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validation.py
new file mode 100644
index 0000000000..7f5d3a0a74
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_validation.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1Validation(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression': 'str',
+ 'message': 'str',
+ 'message_expression': 'str',
+ 'reason': 'str'
+ }
+
+ attribute_map = {
+ 'expression': 'expression',
+ 'message': 'message',
+ 'message_expression': 'messageExpression',
+ 'reason': 'reason'
+ }
+
+ def __init__(self, expression=None, message=None, message_expression=None, reason=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1Validation - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression = None
+ self._message = None
+ self._message_expression = None
+ self._reason = None
+ self.discriminator = None
+
+ self.expression = expression
+ if message is not None:
+ self.message = message
+ if message_expression is not None:
+ self.message_expression = message_expression
+ if reason is not None:
+ self.reason = reason
+
+ @property
+ def expression(self):
+ """Gets the expression of this V1alpha1Validation. # noqa: E501
+
+ Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"} - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"} - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"} Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. Required. # noqa: E501
+
+ :return: The expression of this V1alpha1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, expression):
+ """Sets the expression of this V1alpha1Validation.
+
+ Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"} - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"} - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"} Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. Required. # noqa: E501
+
+ :param expression: The expression of this V1alpha1Validation. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
+ raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
+
+ self._expression = expression
+
+ @property
+ def message(self):
+ """Gets the message of this V1alpha1Validation. # noqa: E501
+
+ Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\". # noqa: E501
+
+ :return: The message of this V1alpha1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1alpha1Validation.
+
+ Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\". # noqa: E501
+
+ :param message: The message of this V1alpha1Validation. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def message_expression(self):
+ """Gets the message_expression of this V1alpha1Validation. # noqa: E501
+
+ messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\" # noqa: E501
+
+ :return: The message_expression of this V1alpha1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._message_expression
+
+ @message_expression.setter
+ def message_expression(self, message_expression):
+ """Sets the message_expression of this V1alpha1Validation.
+
+ messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\" # noqa: E501
+
+ :param message_expression: The message_expression of this V1alpha1Validation. # noqa: E501
+ :type: str
+ """
+
+ self._message_expression = message_expression
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1alpha1Validation. # noqa: E501
+
+ Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client. # noqa: E501
+
+ :return: The reason of this V1alpha1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1alpha1Validation.
+
+ Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client. # noqa: E501
+
+ :param reason: The reason of this V1alpha1Validation. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1Validation):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1Validation):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_variable.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_variable.py
new file mode 100644
index 0000000000..c4c56df2cc
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha1_variable.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha1Variable(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'expression': 'expression',
+ 'name': 'name'
+ }
+
+ def __init__(self, expression=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha1Variable - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression = None
+ self._name = None
+ self.discriminator = None
+
+ self.expression = expression
+ self.name = name
+
+ @property
+ def expression(self):
+ """Gets the expression of this V1alpha1Variable. # noqa: E501
+
+ Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation. # noqa: E501
+
+ :return: The expression of this V1alpha1Variable. # noqa: E501
+ :rtype: str
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, expression):
+ """Sets the expression of this V1alpha1Variable.
+
+ Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation. # noqa: E501
+
+ :param expression: The expression of this V1alpha1Variable. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
+ raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
+
+ self._expression = expression
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha1Variable. # noqa: E501
+
+ Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo` # noqa: E501
+
+ :return: The name of this V1alpha1Variable. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha1Variable.
+
+ Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo` # noqa: E501
+
+ :param name: The name of this V1alpha1Variable. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha1Variable):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha1Variable):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_allocation_result.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_allocation_result.py
new file mode 100644
index 0000000000..791d1d06d4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_allocation_result.py
@@ -0,0 +1,176 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2AllocationResult(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'available_on_nodes': 'V1NodeSelector',
+ 'resource_handles': 'list[V1alpha2ResourceHandle]',
+ 'shareable': 'bool'
+ }
+
+ attribute_map = {
+ 'available_on_nodes': 'availableOnNodes',
+ 'resource_handles': 'resourceHandles',
+ 'shareable': 'shareable'
+ }
+
+ def __init__(self, available_on_nodes=None, resource_handles=None, shareable=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2AllocationResult - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._available_on_nodes = None
+ self._resource_handles = None
+ self._shareable = None
+ self.discriminator = None
+
+ if available_on_nodes is not None:
+ self.available_on_nodes = available_on_nodes
+ if resource_handles is not None:
+ self.resource_handles = resource_handles
+ if shareable is not None:
+ self.shareable = shareable
+
+ @property
+ def available_on_nodes(self):
+ """Gets the available_on_nodes of this V1alpha2AllocationResult. # noqa: E501
+
+
+ :return: The available_on_nodes of this V1alpha2AllocationResult. # noqa: E501
+ :rtype: V1NodeSelector
+ """
+ return self._available_on_nodes
+
+ @available_on_nodes.setter
+ def available_on_nodes(self, available_on_nodes):
+ """Sets the available_on_nodes of this V1alpha2AllocationResult.
+
+
+ :param available_on_nodes: The available_on_nodes of this V1alpha2AllocationResult. # noqa: E501
+ :type: V1NodeSelector
+ """
+
+ self._available_on_nodes = available_on_nodes
+
+ @property
+ def resource_handles(self):
+ """Gets the resource_handles of this V1alpha2AllocationResult. # noqa: E501
+
+ ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed. Setting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in. # noqa: E501
+
+ :return: The resource_handles of this V1alpha2AllocationResult. # noqa: E501
+ :rtype: list[V1alpha2ResourceHandle]
+ """
+ return self._resource_handles
+
+ @resource_handles.setter
+ def resource_handles(self, resource_handles):
+ """Sets the resource_handles of this V1alpha2AllocationResult.
+
+ ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed. Setting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in. # noqa: E501
+
+ :param resource_handles: The resource_handles of this V1alpha2AllocationResult. # noqa: E501
+ :type: list[V1alpha2ResourceHandle]
+ """
+
+ self._resource_handles = resource_handles
+
+ @property
+ def shareable(self):
+ """Gets the shareable of this V1alpha2AllocationResult. # noqa: E501
+
+ Shareable determines whether the resource supports more than one consumer at a time. # noqa: E501
+
+ :return: The shareable of this V1alpha2AllocationResult. # noqa: E501
+ :rtype: bool
+ """
+ return self._shareable
+
+ @shareable.setter
+ def shareable(self, shareable):
+ """Sets the shareable of this V1alpha2AllocationResult.
+
+ Shareable determines whether the resource supports more than one consumer at a time. # noqa: E501
+
+ :param shareable: The shareable of this V1alpha2AllocationResult. # noqa: E501
+ :type: bool
+ """
+
+ self._shareable = shareable
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2AllocationResult):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2AllocationResult):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context.py
new file mode 100644
index 0000000000..8d7c6e6951
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2PodSchedulingContext(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha2PodSchedulingContextSpec',
+ 'status': 'V1alpha2PodSchedulingContextStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2PodSchedulingContext - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2PodSchedulingContext. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2PodSchedulingContext. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2PodSchedulingContext.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2PodSchedulingContext. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2PodSchedulingContext. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2PodSchedulingContext. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2PodSchedulingContext.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2PodSchedulingContext. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2PodSchedulingContext. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2PodSchedulingContext. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2PodSchedulingContext.
+
+
+ :param metadata: The metadata of this V1alpha2PodSchedulingContext. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha2PodSchedulingContext. # noqa: E501
+
+
+ :return: The spec of this V1alpha2PodSchedulingContext. # noqa: E501
+ :rtype: V1alpha2PodSchedulingContextSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha2PodSchedulingContext.
+
+
+ :param spec: The spec of this V1alpha2PodSchedulingContext. # noqa: E501
+ :type: V1alpha2PodSchedulingContextSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1alpha2PodSchedulingContext. # noqa: E501
+
+
+ :return: The status of this V1alpha2PodSchedulingContext. # noqa: E501
+ :rtype: V1alpha2PodSchedulingContextStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1alpha2PodSchedulingContext.
+
+
+ :param status: The status of this V1alpha2PodSchedulingContext. # noqa: E501
+ :type: V1alpha2PodSchedulingContextStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContext):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContext):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_list.py
new file mode 100644
index 0000000000..ff056c1976
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2PodSchedulingContextList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha2PodSchedulingContext]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2PodSchedulingContextList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2PodSchedulingContextList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2PodSchedulingContextList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha2PodSchedulingContextList. # noqa: E501
+
+ Items is the list of PodSchedulingContext objects. # noqa: E501
+
+ :return: The items of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :rtype: list[V1alpha2PodSchedulingContext]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha2PodSchedulingContextList.
+
+ Items is the list of PodSchedulingContext objects. # noqa: E501
+
+ :param items: The items of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :type: list[V1alpha2PodSchedulingContext]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2PodSchedulingContextList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2PodSchedulingContextList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2PodSchedulingContextList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2PodSchedulingContextList.
+
+
+ :param metadata: The metadata of this V1alpha2PodSchedulingContextList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContextList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContextList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_spec.py
new file mode 100644
index 0000000000..6e3823c428
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_spec.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2PodSchedulingContextSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'potential_nodes': 'list[str]',
+ 'selected_node': 'str'
+ }
+
+ attribute_map = {
+ 'potential_nodes': 'potentialNodes',
+ 'selected_node': 'selectedNode'
+ }
+
+ def __init__(self, potential_nodes=None, selected_node=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2PodSchedulingContextSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._potential_nodes = None
+ self._selected_node = None
+ self.discriminator = None
+
+ if potential_nodes is not None:
+ self.potential_nodes = potential_nodes
+ if selected_node is not None:
+ self.selected_node = selected_node
+
+ @property
+ def potential_nodes(self):
+ """Gets the potential_nodes of this V1alpha2PodSchedulingContextSpec. # noqa: E501
+
+ PotentialNodes lists nodes where the Pod might be able to run. The size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced. # noqa: E501
+
+ :return: The potential_nodes of this V1alpha2PodSchedulingContextSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._potential_nodes
+
+ @potential_nodes.setter
+ def potential_nodes(self, potential_nodes):
+ """Sets the potential_nodes of this V1alpha2PodSchedulingContextSpec.
+
+ PotentialNodes lists nodes where the Pod might be able to run. The size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced. # noqa: E501
+
+ :param potential_nodes: The potential_nodes of this V1alpha2PodSchedulingContextSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._potential_nodes = potential_nodes
+
+ @property
+ def selected_node(self):
+ """Gets the selected_node of this V1alpha2PodSchedulingContextSpec. # noqa: E501
+
+ SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted. # noqa: E501
+
+ :return: The selected_node of this V1alpha2PodSchedulingContextSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._selected_node
+
+ @selected_node.setter
+ def selected_node(self, selected_node):
+ """Sets the selected_node of this V1alpha2PodSchedulingContextSpec.
+
+ SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted. # noqa: E501
+
+ :param selected_node: The selected_node of this V1alpha2PodSchedulingContextSpec. # noqa: E501
+ :type: str
+ """
+
+ self._selected_node = selected_node
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContextSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContextSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_status.py
new file mode 100644
index 0000000000..1f3bfe4344
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_pod_scheduling_context_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2PodSchedulingContextStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'resource_claims': 'list[V1alpha2ResourceClaimSchedulingStatus]'
+ }
+
+ attribute_map = {
+ 'resource_claims': 'resourceClaims'
+ }
+
+ def __init__(self, resource_claims=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2PodSchedulingContextStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._resource_claims = None
+ self.discriminator = None
+
+ if resource_claims is not None:
+ self.resource_claims = resource_claims
+
+ @property
+ def resource_claims(self):
+ """Gets the resource_claims of this V1alpha2PodSchedulingContextStatus. # noqa: E501
+
+ ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode. # noqa: E501
+
+ :return: The resource_claims of this V1alpha2PodSchedulingContextStatus. # noqa: E501
+ :rtype: list[V1alpha2ResourceClaimSchedulingStatus]
+ """
+ return self._resource_claims
+
+ @resource_claims.setter
+ def resource_claims(self, resource_claims):
+ """Sets the resource_claims of this V1alpha2PodSchedulingContextStatus.
+
+ ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode. # noqa: E501
+
+ :param resource_claims: The resource_claims of this V1alpha2PodSchedulingContextStatus. # noqa: E501
+ :type: list[V1alpha2ResourceClaimSchedulingStatus]
+ """
+
+ self._resource_claims = resource_claims
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContextStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2PodSchedulingContextStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim.py
new file mode 100644
index 0000000000..8ddb2c7f59
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaim(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha2ResourceClaimSpec',
+ 'status': 'V1alpha2ResourceClaimStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaim - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2ResourceClaim. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2ResourceClaim. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2ResourceClaim.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2ResourceClaim. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClaim. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClaim. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClaim.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClaim. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2ResourceClaim. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2ResourceClaim. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2ResourceClaim.
+
+
+ :param metadata: The metadata of this V1alpha2ResourceClaim. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha2ResourceClaim. # noqa: E501
+
+
+ :return: The spec of this V1alpha2ResourceClaim. # noqa: E501
+ :rtype: V1alpha2ResourceClaimSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha2ResourceClaim.
+
+
+ :param spec: The spec of this V1alpha2ResourceClaim. # noqa: E501
+ :type: V1alpha2ResourceClaimSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1alpha2ResourceClaim. # noqa: E501
+
+
+ :return: The status of this V1alpha2ResourceClaim. # noqa: E501
+ :rtype: V1alpha2ResourceClaimStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1alpha2ResourceClaim.
+
+
+ :param status: The status of this V1alpha2ResourceClaim. # noqa: E501
+ :type: V1alpha2ResourceClaimStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaim):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaim):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_consumer_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_consumer_reference.py
new file mode 100644
index 0000000000..b02f4b5699
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_consumer_reference.py
@@ -0,0 +1,209 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimConsumerReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'name': 'str',
+ 'resource': 'str',
+ 'uid': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'name': 'name',
+ 'resource': 'resource',
+ 'uid': 'uid'
+ }
+
+ def __init__(self, api_group=None, name=None, resource=None, uid=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimConsumerReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._name = None
+ self._resource = None
+ self._uid = None
+ self.discriminator = None
+
+ if api_group is not None:
+ self.api_group = api_group
+ self.name = name
+ self.resource = resource
+ self.uid = uid
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+
+ APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
+
+ :return: The api_group of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1alpha2ResourceClaimConsumerReference.
+
+ APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
+
+ :param api_group: The api_group of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_group = api_group
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+
+ Name is the name of resource being referenced. # noqa: E501
+
+ :return: The name of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha2ResourceClaimConsumerReference.
+
+ Name is the name of resource being referenced. # noqa: E501
+
+ :param name: The name of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def resource(self):
+ """Gets the resource of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+
+ Resource is the type of resource being referenced, for example \"pods\". # noqa: E501
+
+ :return: The resource of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._resource
+
+ @resource.setter
+ def resource(self, resource):
+ """Sets the resource of this V1alpha2ResourceClaimConsumerReference.
+
+ Resource is the type of resource being referenced, for example \"pods\". # noqa: E501
+
+ :param resource: The resource of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and resource is None: # noqa: E501
+ raise ValueError("Invalid value for `resource`, must not be `None`") # noqa: E501
+
+ self._resource = resource
+
+ @property
+ def uid(self):
+ """Gets the uid of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+
+ UID identifies exactly one incarnation of the resource. # noqa: E501
+
+ :return: The uid of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :rtype: str
+ """
+ return self._uid
+
+ @uid.setter
+ def uid(self, uid):
+ """Sets the uid of this V1alpha2ResourceClaimConsumerReference.
+
+ UID identifies exactly one incarnation of the resource. # noqa: E501
+
+ :param uid: The uid of this V1alpha2ResourceClaimConsumerReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
+ raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
+
+ self._uid = uid
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimConsumerReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimConsumerReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_list.py
new file mode 100644
index 0000000000..e8117f1976
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha2ResourceClaim]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2ResourceClaimList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2ResourceClaimList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2ResourceClaimList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2ResourceClaimList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha2ResourceClaimList. # noqa: E501
+
+ Items is the list of resource claims. # noqa: E501
+
+ :return: The items of this V1alpha2ResourceClaimList. # noqa: E501
+ :rtype: list[V1alpha2ResourceClaim]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha2ResourceClaimList.
+
+ Items is the list of resource claims. # noqa: E501
+
+ :param items: The items of this V1alpha2ResourceClaimList. # noqa: E501
+ :type: list[V1alpha2ResourceClaim]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClaimList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClaimList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClaimList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClaimList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2ResourceClaimList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2ResourceClaimList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2ResourceClaimList.
+
+
+ :param metadata: The metadata of this V1alpha2ResourceClaimList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_parameters_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_parameters_reference.py
new file mode 100644
index 0000000000..fca2dafed5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_parameters_reference.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimParametersReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'kind': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'kind': 'kind',
+ 'name': 'name'
+ }
+
+ def __init__(self, api_group=None, kind=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimParametersReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._kind = None
+ self._name = None
+ self.discriminator = None
+
+ if api_group is not None:
+ self.api_group = api_group
+ self.kind = kind
+ self.name = name
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+
+ APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
+
+ :return: The api_group of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1alpha2ResourceClaimParametersReference.
+
+ APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
+
+ :param api_group: The api_group of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_group = api_group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+
+ Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\". # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClaimParametersReference.
+
+ Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\". # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+
+ Name is the name of resource being referenced. # noqa: E501
+
+ :return: The name of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha2ResourceClaimParametersReference.
+
+ Name is the name of resource being referenced. # noqa: E501
+
+ :param name: The name of this V1alpha2ResourceClaimParametersReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimParametersReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimParametersReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_scheduling_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_scheduling_status.py
new file mode 100644
index 0000000000..4809a9d687
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_scheduling_status.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimSchedulingStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'unsuitable_nodes': 'list[str]'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'unsuitable_nodes': 'unsuitableNodes'
+ }
+
+ def __init__(self, name=None, unsuitable_nodes=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimSchedulingStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._unsuitable_nodes = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if unsuitable_nodes is not None:
+ self.unsuitable_nodes = unsuitable_nodes
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha2ResourceClaimSchedulingStatus. # noqa: E501
+
+ Name matches the pod.spec.resourceClaims[*].Name field. # noqa: E501
+
+ :return: The name of this V1alpha2ResourceClaimSchedulingStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha2ResourceClaimSchedulingStatus.
+
+ Name matches the pod.spec.resourceClaims[*].Name field. # noqa: E501
+
+ :param name: The name of this V1alpha2ResourceClaimSchedulingStatus. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def unsuitable_nodes(self):
+ """Gets the unsuitable_nodes of this V1alpha2ResourceClaimSchedulingStatus. # noqa: E501
+
+ UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for. The size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced. # noqa: E501
+
+ :return: The unsuitable_nodes of this V1alpha2ResourceClaimSchedulingStatus. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._unsuitable_nodes
+
+ @unsuitable_nodes.setter
+ def unsuitable_nodes(self, unsuitable_nodes):
+ """Sets the unsuitable_nodes of this V1alpha2ResourceClaimSchedulingStatus.
+
+ UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for. The size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced. # noqa: E501
+
+ :param unsuitable_nodes: The unsuitable_nodes of this V1alpha2ResourceClaimSchedulingStatus. # noqa: E501
+ :type: list[str]
+ """
+
+ self._unsuitable_nodes = unsuitable_nodes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimSchedulingStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimSchedulingStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_spec.py
new file mode 100644
index 0000000000..c724980fc9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_spec.py
@@ -0,0 +1,177 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allocation_mode': 'str',
+ 'parameters_ref': 'V1alpha2ResourceClaimParametersReference',
+ 'resource_class_name': 'str'
+ }
+
+ attribute_map = {
+ 'allocation_mode': 'allocationMode',
+ 'parameters_ref': 'parametersRef',
+ 'resource_class_name': 'resourceClassName'
+ }
+
+ def __init__(self, allocation_mode=None, parameters_ref=None, resource_class_name=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allocation_mode = None
+ self._parameters_ref = None
+ self._resource_class_name = None
+ self.discriminator = None
+
+ if allocation_mode is not None:
+ self.allocation_mode = allocation_mode
+ if parameters_ref is not None:
+ self.parameters_ref = parameters_ref
+ self.resource_class_name = resource_class_name
+
+ @property
+ def allocation_mode(self):
+ """Gets the allocation_mode of this V1alpha2ResourceClaimSpec. # noqa: E501
+
+ Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default. # noqa: E501
+
+ :return: The allocation_mode of this V1alpha2ResourceClaimSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._allocation_mode
+
+ @allocation_mode.setter
+ def allocation_mode(self, allocation_mode):
+ """Sets the allocation_mode of this V1alpha2ResourceClaimSpec.
+
+ Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default. # noqa: E501
+
+ :param allocation_mode: The allocation_mode of this V1alpha2ResourceClaimSpec. # noqa: E501
+ :type: str
+ """
+
+ self._allocation_mode = allocation_mode
+
+ @property
+ def parameters_ref(self):
+ """Gets the parameters_ref of this V1alpha2ResourceClaimSpec. # noqa: E501
+
+
+ :return: The parameters_ref of this V1alpha2ResourceClaimSpec. # noqa: E501
+ :rtype: V1alpha2ResourceClaimParametersReference
+ """
+ return self._parameters_ref
+
+ @parameters_ref.setter
+ def parameters_ref(self, parameters_ref):
+ """Sets the parameters_ref of this V1alpha2ResourceClaimSpec.
+
+
+ :param parameters_ref: The parameters_ref of this V1alpha2ResourceClaimSpec. # noqa: E501
+ :type: V1alpha2ResourceClaimParametersReference
+ """
+
+ self._parameters_ref = parameters_ref
+
+ @property
+ def resource_class_name(self):
+ """Gets the resource_class_name of this V1alpha2ResourceClaimSpec. # noqa: E501
+
+ ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment. # noqa: E501
+
+ :return: The resource_class_name of this V1alpha2ResourceClaimSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._resource_class_name
+
+ @resource_class_name.setter
+ def resource_class_name(self, resource_class_name):
+ """Sets the resource_class_name of this V1alpha2ResourceClaimSpec.
+
+ ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment. # noqa: E501
+
+ :param resource_class_name: The resource_class_name of this V1alpha2ResourceClaimSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and resource_class_name is None: # noqa: E501
+ raise ValueError("Invalid value for `resource_class_name`, must not be `None`") # noqa: E501
+
+ self._resource_class_name = resource_class_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_status.py
new file mode 100644
index 0000000000..c893f71469
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_status.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'allocation': 'V1alpha2AllocationResult',
+ 'deallocation_requested': 'bool',
+ 'driver_name': 'str',
+ 'reserved_for': 'list[V1alpha2ResourceClaimConsumerReference]'
+ }
+
+ attribute_map = {
+ 'allocation': 'allocation',
+ 'deallocation_requested': 'deallocationRequested',
+ 'driver_name': 'driverName',
+ 'reserved_for': 'reservedFor'
+ }
+
+ def __init__(self, allocation=None, deallocation_requested=None, driver_name=None, reserved_for=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._allocation = None
+ self._deallocation_requested = None
+ self._driver_name = None
+ self._reserved_for = None
+ self.discriminator = None
+
+ if allocation is not None:
+ self.allocation = allocation
+ if deallocation_requested is not None:
+ self.deallocation_requested = deallocation_requested
+ if driver_name is not None:
+ self.driver_name = driver_name
+ if reserved_for is not None:
+ self.reserved_for = reserved_for
+
+ @property
+ def allocation(self):
+ """Gets the allocation of this V1alpha2ResourceClaimStatus. # noqa: E501
+
+
+ :return: The allocation of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :rtype: V1alpha2AllocationResult
+ """
+ return self._allocation
+
+ @allocation.setter
+ def allocation(self, allocation):
+ """Sets the allocation of this V1alpha2ResourceClaimStatus.
+
+
+ :param allocation: The allocation of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :type: V1alpha2AllocationResult
+ """
+
+ self._allocation = allocation
+
+ @property
+ def deallocation_requested(self):
+ """Gets the deallocation_requested of this V1alpha2ResourceClaimStatus. # noqa: E501
+
+ DeallocationRequested indicates that a ResourceClaim is to be deallocated. The driver then must deallocate this claim and reset the field together with clearing the Allocation field. While DeallocationRequested is set, no new consumers may be added to ReservedFor. # noqa: E501
+
+ :return: The deallocation_requested of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :rtype: bool
+ """
+ return self._deallocation_requested
+
+ @deallocation_requested.setter
+ def deallocation_requested(self, deallocation_requested):
+ """Sets the deallocation_requested of this V1alpha2ResourceClaimStatus.
+
+ DeallocationRequested indicates that a ResourceClaim is to be deallocated. The driver then must deallocate this claim and reset the field together with clearing the Allocation field. While DeallocationRequested is set, no new consumers may be added to ReservedFor. # noqa: E501
+
+ :param deallocation_requested: The deallocation_requested of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :type: bool
+ """
+
+ self._deallocation_requested = deallocation_requested
+
+ @property
+ def driver_name(self):
+ """Gets the driver_name of this V1alpha2ResourceClaimStatus. # noqa: E501
+
+ DriverName is a copy of the driver name from the ResourceClass at the time when allocation started. # noqa: E501
+
+ :return: The driver_name of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._driver_name
+
+ @driver_name.setter
+ def driver_name(self, driver_name):
+ """Sets the driver_name of this V1alpha2ResourceClaimStatus.
+
+ DriverName is a copy of the driver name from the ResourceClass at the time when allocation started. # noqa: E501
+
+ :param driver_name: The driver_name of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :type: str
+ """
+
+ self._driver_name = driver_name
+
+ @property
+ def reserved_for(self):
+ """Gets the reserved_for of this V1alpha2ResourceClaimStatus. # noqa: E501
+
+ ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. There can be at most 32 such reservations. This may get increased in the future, but not reduced. # noqa: E501
+
+ :return: The reserved_for of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :rtype: list[V1alpha2ResourceClaimConsumerReference]
+ """
+ return self._reserved_for
+
+ @reserved_for.setter
+ def reserved_for(self, reserved_for):
+ """Sets the reserved_for of this V1alpha2ResourceClaimStatus.
+
+ ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. There can be at most 32 such reservations. This may get increased in the future, but not reduced. # noqa: E501
+
+ :param reserved_for: The reserved_for of this V1alpha2ResourceClaimStatus. # noqa: E501
+ :type: list[V1alpha2ResourceClaimConsumerReference]
+ """
+
+ self._reserved_for = reserved_for
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template.py
new file mode 100644
index 0000000000..be03a3f946
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimTemplate(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha2ResourceClaimTemplateSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimTemplate - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2ResourceClaimTemplate. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2ResourceClaimTemplate.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClaimTemplate. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClaimTemplate.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2ResourceClaimTemplate. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2ResourceClaimTemplate.
+
+
+ :param metadata: The metadata of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha2ResourceClaimTemplate. # noqa: E501
+
+
+ :return: The spec of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :rtype: V1alpha2ResourceClaimTemplateSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha2ResourceClaimTemplate.
+
+
+ :param spec: The spec of this V1alpha2ResourceClaimTemplate. # noqa: E501
+ :type: V1alpha2ResourceClaimTemplateSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimTemplate):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimTemplate):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_list.py
new file mode 100644
index 0000000000..44581be68f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimTemplateList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha2ResourceClaimTemplate]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimTemplateList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2ResourceClaimTemplateList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+
+ Items is the list of resource claim templates. # noqa: E501
+
+ :return: The items of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :rtype: list[V1alpha2ResourceClaimTemplate]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha2ResourceClaimTemplateList.
+
+ Items is the list of resource claim templates. # noqa: E501
+
+ :param items: The items of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :type: list[V1alpha2ResourceClaimTemplate]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClaimTemplateList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2ResourceClaimTemplateList.
+
+
+ :param metadata: The metadata of this V1alpha2ResourceClaimTemplateList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimTemplateList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimTemplateList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_spec.py
new file mode 100644
index 0000000000..114a38aeab
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_claim_template_spec.py
@@ -0,0 +1,147 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClaimTemplateSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1alpha2ResourceClaimSpec'
+ }
+
+ attribute_map = {
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClaimTemplateSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if metadata is not None:
+ self.metadata = metadata
+ self.spec = spec
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2ResourceClaimTemplateSpec. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2ResourceClaimTemplateSpec. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2ResourceClaimTemplateSpec.
+
+
+ :param metadata: The metadata of this V1alpha2ResourceClaimTemplateSpec. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1alpha2ResourceClaimTemplateSpec. # noqa: E501
+
+
+ :return: The spec of this V1alpha2ResourceClaimTemplateSpec. # noqa: E501
+ :rtype: V1alpha2ResourceClaimSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1alpha2ResourceClaimTemplateSpec.
+
+
+ :param spec: The spec of this V1alpha2ResourceClaimTemplateSpec. # noqa: E501
+ :type: V1alpha2ResourceClaimSpec
+ """
+ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
+ raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClaimTemplateSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClaimTemplateSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class.py
new file mode 100644
index 0000000000..b1f29343b7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class.py
@@ -0,0 +1,257 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClass(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'driver_name': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'parameters_ref': 'V1alpha2ResourceClassParametersReference',
+ 'suitable_nodes': 'V1NodeSelector'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'driver_name': 'driverName',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'parameters_ref': 'parametersRef',
+ 'suitable_nodes': 'suitableNodes'
+ }
+
+ def __init__(self, api_version=None, driver_name=None, kind=None, metadata=None, parameters_ref=None, suitable_nodes=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClass - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._driver_name = None
+ self._kind = None
+ self._metadata = None
+ self._parameters_ref = None
+ self._suitable_nodes = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.driver_name = driver_name
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if parameters_ref is not None:
+ self.parameters_ref = parameters_ref
+ if suitable_nodes is not None:
+ self.suitable_nodes = suitable_nodes
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2ResourceClass. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2ResourceClass. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2ResourceClass.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2ResourceClass. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def driver_name(self):
+ """Gets the driver_name of this V1alpha2ResourceClass. # noqa: E501
+
+ DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class. Resource drivers have a unique name in forward domain order (acme.example.com). # noqa: E501
+
+ :return: The driver_name of this V1alpha2ResourceClass. # noqa: E501
+ :rtype: str
+ """
+ return self._driver_name
+
+ @driver_name.setter
+ def driver_name(self, driver_name):
+ """Sets the driver_name of this V1alpha2ResourceClass.
+
+ DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class. Resource drivers have a unique name in forward domain order (acme.example.com). # noqa: E501
+
+ :param driver_name: The driver_name of this V1alpha2ResourceClass. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and driver_name is None: # noqa: E501
+ raise ValueError("Invalid value for `driver_name`, must not be `None`") # noqa: E501
+
+ self._driver_name = driver_name
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClass. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClass. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClass.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClass. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2ResourceClass. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2ResourceClass. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2ResourceClass.
+
+
+ :param metadata: The metadata of this V1alpha2ResourceClass. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def parameters_ref(self):
+ """Gets the parameters_ref of this V1alpha2ResourceClass. # noqa: E501
+
+
+ :return: The parameters_ref of this V1alpha2ResourceClass. # noqa: E501
+ :rtype: V1alpha2ResourceClassParametersReference
+ """
+ return self._parameters_ref
+
+ @parameters_ref.setter
+ def parameters_ref(self, parameters_ref):
+ """Sets the parameters_ref of this V1alpha2ResourceClass.
+
+
+ :param parameters_ref: The parameters_ref of this V1alpha2ResourceClass. # noqa: E501
+ :type: V1alpha2ResourceClassParametersReference
+ """
+
+ self._parameters_ref = parameters_ref
+
+ @property
+ def suitable_nodes(self):
+ """Gets the suitable_nodes of this V1alpha2ResourceClass. # noqa: E501
+
+
+ :return: The suitable_nodes of this V1alpha2ResourceClass. # noqa: E501
+ :rtype: V1NodeSelector
+ """
+ return self._suitable_nodes
+
+ @suitable_nodes.setter
+ def suitable_nodes(self, suitable_nodes):
+ """Sets the suitable_nodes of this V1alpha2ResourceClass.
+
+
+ :param suitable_nodes: The suitable_nodes of this V1alpha2ResourceClass. # noqa: E501
+ :type: V1NodeSelector
+ """
+
+ self._suitable_nodes = suitable_nodes
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClass):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClass):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_list.py
new file mode 100644
index 0000000000..2868c42df8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClassList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1alpha2ResourceClass]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClassList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1alpha2ResourceClassList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1alpha2ResourceClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1alpha2ResourceClassList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1alpha2ResourceClassList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1alpha2ResourceClassList. # noqa: E501
+
+ Items is the list of resource classes. # noqa: E501
+
+ :return: The items of this V1alpha2ResourceClassList. # noqa: E501
+ :rtype: list[V1alpha2ResourceClass]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1alpha2ResourceClassList.
+
+ Items is the list of resource classes. # noqa: E501
+
+ :param items: The items of this V1alpha2ResourceClassList. # noqa: E501
+ :type: list[V1alpha2ResourceClass]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClassList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClassList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClassList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClassList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1alpha2ResourceClassList. # noqa: E501
+
+
+ :return: The metadata of this V1alpha2ResourceClassList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1alpha2ResourceClassList.
+
+
+ :param metadata: The metadata of this V1alpha2ResourceClassList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClassList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClassList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_parameters_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_parameters_reference.py
new file mode 100644
index 0000000000..8e7b57dde2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_class_parameters_reference.py
@@ -0,0 +1,208 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceClassParametersReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_group': 'str',
+ 'kind': 'str',
+ 'name': 'str',
+ 'namespace': 'str'
+ }
+
+ attribute_map = {
+ 'api_group': 'apiGroup',
+ 'kind': 'kind',
+ 'name': 'name',
+ 'namespace': 'namespace'
+ }
+
+ def __init__(self, api_group=None, kind=None, name=None, namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceClassParametersReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_group = None
+ self._kind = None
+ self._name = None
+ self._namespace = None
+ self.discriminator = None
+
+ if api_group is not None:
+ self.api_group = api_group
+ self.kind = kind
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+
+ @property
+ def api_group(self):
+ """Gets the api_group of this V1alpha2ResourceClassParametersReference. # noqa: E501
+
+ APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
+
+ :return: The api_group of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_group
+
+ @api_group.setter
+ def api_group(self, api_group):
+ """Sets the api_group of this V1alpha2ResourceClassParametersReference.
+
+ APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
+
+ :param api_group: The api_group of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_group = api_group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1alpha2ResourceClassParametersReference. # noqa: E501
+
+ Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata. # noqa: E501
+
+ :return: The kind of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1alpha2ResourceClassParametersReference.
+
+ Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata. # noqa: E501
+
+ :param kind: The kind of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V1alpha2ResourceClassParametersReference. # noqa: E501
+
+ Name is the name of resource being referenced. # noqa: E501
+
+ :return: The name of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1alpha2ResourceClassParametersReference.
+
+ Name is the name of resource being referenced. # noqa: E501
+
+ :param name: The name of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1alpha2ResourceClassParametersReference. # noqa: E501
+
+ Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources. # noqa: E501
+
+ :return: The namespace of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1alpha2ResourceClassParametersReference.
+
+ Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources. # noqa: E501
+
+ :param namespace: The namespace of this V1alpha2ResourceClassParametersReference. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceClassParametersReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceClassParametersReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_handle.py b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_handle.py
new file mode 100644
index 0000000000..3db2dbf8f1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1alpha2_resource_handle.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1alpha2ResourceHandle(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'data': 'str',
+ 'driver_name': 'str'
+ }
+
+ attribute_map = {
+ 'data': 'data',
+ 'driver_name': 'driverName'
+ }
+
+ def __init__(self, data=None, driver_name=None, local_vars_configuration=None): # noqa: E501
+ """V1alpha2ResourceHandle - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._data = None
+ self._driver_name = None
+ self.discriminator = None
+
+ if data is not None:
+ self.data = data
+ if driver_name is not None:
+ self.driver_name = driver_name
+
+ @property
+ def data(self):
+ """Gets the data of this V1alpha2ResourceHandle. # noqa: E501
+
+ Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle. The maximum size of this field is 16KiB. This may get increased in the future, but not reduced. # noqa: E501
+
+ :return: The data of this V1alpha2ResourceHandle. # noqa: E501
+ :rtype: str
+ """
+ return self._data
+
+ @data.setter
+ def data(self, data):
+ """Sets the data of this V1alpha2ResourceHandle.
+
+ Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle. The maximum size of this field is 16KiB. This may get increased in the future, but not reduced. # noqa: E501
+
+ :param data: The data of this V1alpha2ResourceHandle. # noqa: E501
+ :type: str
+ """
+
+ self._data = data
+
+ @property
+ def driver_name(self):
+ """Gets the driver_name of this V1alpha2ResourceHandle. # noqa: E501
+
+ DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in. # noqa: E501
+
+ :return: The driver_name of this V1alpha2ResourceHandle. # noqa: E501
+ :rtype: str
+ """
+ return self._driver_name
+
+ @driver_name.setter
+ def driver_name(self, driver_name):
+ """Sets the driver_name of this V1alpha2ResourceHandle.
+
+ DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in. # noqa: E501
+
+ :param driver_name: The driver_name of this V1alpha2ResourceHandle. # noqa: E501
+ :type: str
+ """
+
+ self._driver_name = driver_name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1alpha2ResourceHandle):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1alpha2ResourceHandle):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_audit_annotation.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_audit_annotation.py
new file mode 100644
index 0000000000..43900cfd6b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_audit_annotation.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1AuditAnnotation(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'key': 'str',
+ 'value_expression': 'str'
+ }
+
+ attribute_map = {
+ 'key': 'key',
+ 'value_expression': 'valueExpression'
+ }
+
+ def __init__(self, key=None, value_expression=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1AuditAnnotation - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._key = None
+ self._value_expression = None
+ self.discriminator = None
+
+ self.key = key
+ self.value_expression = value_expression
+
+ @property
+ def key(self):
+ """Gets the key of this V1beta1AuditAnnotation. # noqa: E501
+
+ key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. The key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\". If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded. Required. # noqa: E501
+
+ :return: The key of this V1beta1AuditAnnotation. # noqa: E501
+ :rtype: str
+ """
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ """Sets the key of this V1beta1AuditAnnotation.
+
+ key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. The key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\". If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded. Required. # noqa: E501
+
+ :param key: The key of this V1beta1AuditAnnotation. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
+ raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
+
+ self._key = key
+
+ @property
+ def value_expression(self):
+ """Gets the value_expression of this V1beta1AuditAnnotation. # noqa: E501
+
+ valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb. If multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list. Required. # noqa: E501
+
+ :return: The value_expression of this V1beta1AuditAnnotation. # noqa: E501
+ :rtype: str
+ """
+ return self._value_expression
+
+ @value_expression.setter
+ def value_expression(self, value_expression):
+ """Sets the value_expression of this V1beta1AuditAnnotation.
+
+ valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb. If multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list. Required. # noqa: E501
+
+ :param value_expression: The value_expression of this V1beta1AuditAnnotation. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and value_expression is None: # noqa: E501
+ raise ValueError("Invalid value for `value_expression`, must not be `None`") # noqa: E501
+
+ self._value_expression = value_expression
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1AuditAnnotation):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1AuditAnnotation):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_expression_warning.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_expression_warning.py
new file mode 100644
index 0000000000..7148964498
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_expression_warning.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ExpressionWarning(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'field_ref': 'str',
+ 'warning': 'str'
+ }
+
+ attribute_map = {
+ 'field_ref': 'fieldRef',
+ 'warning': 'warning'
+ }
+
+ def __init__(self, field_ref=None, warning=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ExpressionWarning - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._field_ref = None
+ self._warning = None
+ self.discriminator = None
+
+ self.field_ref = field_ref
+ self.warning = warning
+
+ @property
+ def field_ref(self):
+ """Gets the field_ref of this V1beta1ExpressionWarning. # noqa: E501
+
+ The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\" # noqa: E501
+
+ :return: The field_ref of this V1beta1ExpressionWarning. # noqa: E501
+ :rtype: str
+ """
+ return self._field_ref
+
+ @field_ref.setter
+ def field_ref(self, field_ref):
+ """Sets the field_ref of this V1beta1ExpressionWarning.
+
+ The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\" # noqa: E501
+
+ :param field_ref: The field_ref of this V1beta1ExpressionWarning. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and field_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `field_ref`, must not be `None`") # noqa: E501
+
+ self._field_ref = field_ref
+
+ @property
+ def warning(self):
+ """Gets the warning of this V1beta1ExpressionWarning. # noqa: E501
+
+ The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler. # noqa: E501
+
+ :return: The warning of this V1beta1ExpressionWarning. # noqa: E501
+ :rtype: str
+ """
+ return self._warning
+
+ @warning.setter
+ def warning(self, warning):
+ """Sets the warning of this V1beta1ExpressionWarning.
+
+ The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler. # noqa: E501
+
+ :param warning: The warning of this V1beta1ExpressionWarning. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and warning is None: # noqa: E501
+ raise ValueError("Invalid value for `warning`, must not be `None`") # noqa: E501
+
+ self._warning = warning
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ExpressionWarning):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ExpressionWarning):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_condition.py
new file mode 100644
index 0000000000..e5723129d0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_condition.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1MatchCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'expression': 'expression',
+ 'name': 'name'
+ }
+
+ def __init__(self, expression=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1MatchCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression = None
+ self._name = None
+ self.discriminator = None
+
+ self.expression = expression
+ self.name = name
+
+ @property
+ def expression(self):
+ """Gets the expression of this V1beta1MatchCondition. # noqa: E501
+
+ Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
+
+ :return: The expression of this V1beta1MatchCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, expression):
+ """Sets the expression of this V1beta1MatchCondition.
+
+ Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
+
+ :param expression: The expression of this V1beta1MatchCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
+ raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
+
+ self._expression = expression
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta1MatchCondition. # noqa: E501
+
+ Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
+
+ :return: The name of this V1beta1MatchCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta1MatchCondition.
+
+ Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
+
+ :param name: The name of this V1beta1MatchCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1MatchCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1MatchCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_resources.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_resources.py
new file mode 100644
index 0000000000..3742deb626
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_match_resources.py
@@ -0,0 +1,230 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1MatchResources(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'exclude_resource_rules': 'list[V1beta1NamedRuleWithOperations]',
+ 'match_policy': 'str',
+ 'namespace_selector': 'V1LabelSelector',
+ 'object_selector': 'V1LabelSelector',
+ 'resource_rules': 'list[V1beta1NamedRuleWithOperations]'
+ }
+
+ attribute_map = {
+ 'exclude_resource_rules': 'excludeResourceRules',
+ 'match_policy': 'matchPolicy',
+ 'namespace_selector': 'namespaceSelector',
+ 'object_selector': 'objectSelector',
+ 'resource_rules': 'resourceRules'
+ }
+
+ def __init__(self, exclude_resource_rules=None, match_policy=None, namespace_selector=None, object_selector=None, resource_rules=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1MatchResources - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._exclude_resource_rules = None
+ self._match_policy = None
+ self._namespace_selector = None
+ self._object_selector = None
+ self._resource_rules = None
+ self.discriminator = None
+
+ if exclude_resource_rules is not None:
+ self.exclude_resource_rules = exclude_resource_rules
+ if match_policy is not None:
+ self.match_policy = match_policy
+ if namespace_selector is not None:
+ self.namespace_selector = namespace_selector
+ if object_selector is not None:
+ self.object_selector = object_selector
+ if resource_rules is not None:
+ self.resource_rules = resource_rules
+
+ @property
+ def exclude_resource_rules(self):
+ """Gets the exclude_resource_rules of this V1beta1MatchResources. # noqa: E501
+
+ ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded) # noqa: E501
+
+ :return: The exclude_resource_rules of this V1beta1MatchResources. # noqa: E501
+ :rtype: list[V1beta1NamedRuleWithOperations]
+ """
+ return self._exclude_resource_rules
+
+ @exclude_resource_rules.setter
+ def exclude_resource_rules(self, exclude_resource_rules):
+ """Sets the exclude_resource_rules of this V1beta1MatchResources.
+
+ ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded) # noqa: E501
+
+ :param exclude_resource_rules: The exclude_resource_rules of this V1beta1MatchResources. # noqa: E501
+ :type: list[V1beta1NamedRuleWithOperations]
+ """
+
+ self._exclude_resource_rules = exclude_resource_rules
+
+ @property
+ def match_policy(self):
+ """Gets the match_policy of this V1beta1MatchResources. # noqa: E501
+
+ matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. Defaults to \"Equivalent\" # noqa: E501
+
+ :return: The match_policy of this V1beta1MatchResources. # noqa: E501
+ :rtype: str
+ """
+ return self._match_policy
+
+ @match_policy.setter
+ def match_policy(self, match_policy):
+ """Sets the match_policy of this V1beta1MatchResources.
+
+ matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. Defaults to \"Equivalent\" # noqa: E501
+
+ :param match_policy: The match_policy of this V1beta1MatchResources. # noqa: E501
+ :type: str
+ """
+
+ self._match_policy = match_policy
+
+ @property
+ def namespace_selector(self):
+ """Gets the namespace_selector of this V1beta1MatchResources. # noqa: E501
+
+
+ :return: The namespace_selector of this V1beta1MatchResources. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._namespace_selector
+
+ @namespace_selector.setter
+ def namespace_selector(self, namespace_selector):
+ """Sets the namespace_selector of this V1beta1MatchResources.
+
+
+ :param namespace_selector: The namespace_selector of this V1beta1MatchResources. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._namespace_selector = namespace_selector
+
+ @property
+ def object_selector(self):
+ """Gets the object_selector of this V1beta1MatchResources. # noqa: E501
+
+
+ :return: The object_selector of this V1beta1MatchResources. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._object_selector
+
+ @object_selector.setter
+ def object_selector(self, object_selector):
+ """Sets the object_selector of this V1beta1MatchResources.
+
+
+ :param object_selector: The object_selector of this V1beta1MatchResources. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._object_selector = object_selector
+
+ @property
+ def resource_rules(self):
+ """Gets the resource_rules of this V1beta1MatchResources. # noqa: E501
+
+ ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule. # noqa: E501
+
+ :return: The resource_rules of this V1beta1MatchResources. # noqa: E501
+ :rtype: list[V1beta1NamedRuleWithOperations]
+ """
+ return self._resource_rules
+
+ @resource_rules.setter
+ def resource_rules(self, resource_rules):
+ """Sets the resource_rules of this V1beta1MatchResources.
+
+ ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule. # noqa: E501
+
+ :param resource_rules: The resource_rules of this V1beta1MatchResources. # noqa: E501
+ :type: list[V1beta1NamedRuleWithOperations]
+ """
+
+ self._resource_rules = resource_rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1MatchResources):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1MatchResources):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_named_rule_with_operations.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_named_rule_with_operations.py
new file mode 100644
index 0000000000..d3c3f40023
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_named_rule_with_operations.py
@@ -0,0 +1,262 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1NamedRuleWithOperations(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_groups': 'list[str]',
+ 'api_versions': 'list[str]',
+ 'operations': 'list[str]',
+ 'resource_names': 'list[str]',
+ 'resources': 'list[str]',
+ 'scope': 'str'
+ }
+
+ attribute_map = {
+ 'api_groups': 'apiGroups',
+ 'api_versions': 'apiVersions',
+ 'operations': 'operations',
+ 'resource_names': 'resourceNames',
+ 'resources': 'resources',
+ 'scope': 'scope'
+ }
+
+ def __init__(self, api_groups=None, api_versions=None, operations=None, resource_names=None, resources=None, scope=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1NamedRuleWithOperations - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_groups = None
+ self._api_versions = None
+ self._operations = None
+ self._resource_names = None
+ self._resources = None
+ self._scope = None
+ self.discriminator = None
+
+ if api_groups is not None:
+ self.api_groups = api_groups
+ if api_versions is not None:
+ self.api_versions = api_versions
+ if operations is not None:
+ self.operations = operations
+ if resource_names is not None:
+ self.resource_names = resource_names
+ if resources is not None:
+ self.resources = resources
+ if scope is not None:
+ self.scope = scope
+
+ @property
+ def api_groups(self):
+ """Gets the api_groups of this V1beta1NamedRuleWithOperations. # noqa: E501
+
+ APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The api_groups of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_groups
+
+ @api_groups.setter
+ def api_groups(self, api_groups):
+ """Sets the api_groups of this V1beta1NamedRuleWithOperations.
+
+ APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param api_groups: The api_groups of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_groups = api_groups
+
+ @property
+ def api_versions(self):
+ """Gets the api_versions of this V1beta1NamedRuleWithOperations. # noqa: E501
+
+ APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The api_versions of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_versions
+
+ @api_versions.setter
+ def api_versions(self, api_versions):
+ """Sets the api_versions of this V1beta1NamedRuleWithOperations.
+
+ APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param api_versions: The api_versions of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._api_versions = api_versions
+
+ @property
+ def operations(self):
+ """Gets the operations of this V1beta1NamedRuleWithOperations. # noqa: E501
+
+ Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :return: The operations of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._operations
+
+ @operations.setter
+ def operations(self, operations):
+ """Sets the operations of this V1beta1NamedRuleWithOperations.
+
+ Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501
+
+ :param operations: The operations of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._operations = operations
+
+ @property
+ def resource_names(self):
+ """Gets the resource_names of this V1beta1NamedRuleWithOperations. # noqa: E501
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
+
+ :return: The resource_names of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resource_names
+
+ @resource_names.setter
+ def resource_names(self, resource_names):
+ """Sets the resource_names of this V1beta1NamedRuleWithOperations.
+
+ ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
+
+ :param resource_names: The resource_names of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resource_names = resource_names
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1beta1NamedRuleWithOperations. # noqa: E501
+
+ Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
+
+ :return: The resources of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1beta1NamedRuleWithOperations.
+
+ Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
+
+ :param resources: The resources of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :type: list[str]
+ """
+
+ self._resources = resources
+
+ @property
+ def scope(self):
+ """Gets the scope of this V1beta1NamedRuleWithOperations. # noqa: E501
+
+ scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
+
+ :return: The scope of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :rtype: str
+ """
+ return self._scope
+
+ @scope.setter
+ def scope(self, scope):
+ """Sets the scope of this V1beta1NamedRuleWithOperations.
+
+ scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
+
+ :param scope: The scope of this V1beta1NamedRuleWithOperations. # noqa: E501
+ :type: str
+ """
+
+ self._scope = scope
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1NamedRuleWithOperations):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1NamedRuleWithOperations):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_kind.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_kind.py
new file mode 100644
index 0000000000..0fd42f2fa8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_kind.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ParamKind(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind'
+ }
+
+ def __init__(self, api_version=None, kind=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ParamKind - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta1ParamKind. # noqa: E501
+
+ APIVersion is the API group version the resources belong to. In format of \"group/version\". Required. # noqa: E501
+
+ :return: The api_version of this V1beta1ParamKind. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta1ParamKind.
+
+ APIVersion is the API group version the resources belong to. In format of \"group/version\". Required. # noqa: E501
+
+ :param api_version: The api_version of this V1beta1ParamKind. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta1ParamKind. # noqa: E501
+
+ Kind is the API kind the resources belong to. Required. # noqa: E501
+
+ :return: The kind of this V1beta1ParamKind. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta1ParamKind.
+
+ Kind is the API kind the resources belong to. Required. # noqa: E501
+
+ :param kind: The kind of this V1beta1ParamKind. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ParamKind):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ParamKind):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_ref.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_ref.py
new file mode 100644
index 0000000000..29a2be20ac
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_param_ref.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ParamRef(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str',
+ 'parameter_not_found_action': 'str',
+ 'selector': 'V1LabelSelector'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace',
+ 'parameter_not_found_action': 'parameterNotFoundAction',
+ 'selector': 'selector'
+ }
+
+ def __init__(self, name=None, namespace=None, parameter_not_found_action=None, selector=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ParamRef - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self._parameter_not_found_action = None
+ self._selector = None
+ self.discriminator = None
+
+ if name is not None:
+ self.name = name
+ if namespace is not None:
+ self.namespace = namespace
+ if parameter_not_found_action is not None:
+ self.parameter_not_found_action = parameter_not_found_action
+ if selector is not None:
+ self.selector = selector
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta1ParamRef. # noqa: E501
+
+ name is the name of the resource being referenced. One of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. A single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped. # noqa: E501
+
+ :return: The name of this V1beta1ParamRef. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta1ParamRef.
+
+ name is the name of the resource being referenced. One of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. A single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped. # noqa: E501
+
+ :param name: The name of this V1beta1ParamRef. # noqa: E501
+ :type: str
+ """
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1beta1ParamRef. # noqa: E501
+
+ namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
+
+ :return: The namespace of this V1beta1ParamRef. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1beta1ParamRef.
+
+ namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
+
+ :param namespace: The namespace of this V1beta1ParamRef. # noqa: E501
+ :type: str
+ """
+
+ self._namespace = namespace
+
+ @property
+ def parameter_not_found_action(self):
+ """Gets the parameter_not_found_action of this V1beta1ParamRef. # noqa: E501
+
+ `parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Required # noqa: E501
+
+ :return: The parameter_not_found_action of this V1beta1ParamRef. # noqa: E501
+ :rtype: str
+ """
+ return self._parameter_not_found_action
+
+ @parameter_not_found_action.setter
+ def parameter_not_found_action(self, parameter_not_found_action):
+ """Sets the parameter_not_found_action of this V1beta1ParamRef.
+
+ `parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Required # noqa: E501
+
+ :param parameter_not_found_action: The parameter_not_found_action of this V1beta1ParamRef. # noqa: E501
+ :type: str
+ """
+
+ self._parameter_not_found_action = parameter_not_found_action
+
+ @property
+ def selector(self):
+ """Gets the selector of this V1beta1ParamRef. # noqa: E501
+
+
+ :return: The selector of this V1beta1ParamRef. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V1beta1ParamRef.
+
+
+ :param selector: The selector of this V1beta1ParamRef. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._selector = selector
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ParamRef):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ParamRef):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review.py
new file mode 100644
index 0000000000..814014f544
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1SelfSubjectReview(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'status': 'V1beta1SelfSubjectReviewStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1SelfSubjectReview - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta1SelfSubjectReview. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta1SelfSubjectReview. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta1SelfSubjectReview.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta1SelfSubjectReview. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta1SelfSubjectReview. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta1SelfSubjectReview. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta1SelfSubjectReview.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta1SelfSubjectReview. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta1SelfSubjectReview. # noqa: E501
+
+
+ :return: The metadata of this V1beta1SelfSubjectReview. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta1SelfSubjectReview.
+
+
+ :param metadata: The metadata of this V1beta1SelfSubjectReview. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta1SelfSubjectReview. # noqa: E501
+
+
+ :return: The status of this V1beta1SelfSubjectReview. # noqa: E501
+ :rtype: V1beta1SelfSubjectReviewStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta1SelfSubjectReview.
+
+
+ :param status: The status of this V1beta1SelfSubjectReview. # noqa: E501
+ :type: V1beta1SelfSubjectReviewStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1SelfSubjectReview):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1SelfSubjectReview):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review_status.py
new file mode 100644
index 0000000000..cb6e5b6b21
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_self_subject_review_status.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1SelfSubjectReviewStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'user_info': 'V1UserInfo'
+ }
+
+ attribute_map = {
+ 'user_info': 'userInfo'
+ }
+
+ def __init__(self, user_info=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1SelfSubjectReviewStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._user_info = None
+ self.discriminator = None
+
+ if user_info is not None:
+ self.user_info = user_info
+
+ @property
+ def user_info(self):
+ """Gets the user_info of this V1beta1SelfSubjectReviewStatus. # noqa: E501
+
+
+ :return: The user_info of this V1beta1SelfSubjectReviewStatus. # noqa: E501
+ :rtype: V1UserInfo
+ """
+ return self._user_info
+
+ @user_info.setter
+ def user_info(self, user_info):
+ """Sets the user_info of this V1beta1SelfSubjectReviewStatus.
+
+
+ :param user_info: The user_info of this V1beta1SelfSubjectReviewStatus. # noqa: E501
+ :type: V1UserInfo
+ """
+
+ self._user_info = user_info
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1SelfSubjectReviewStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1SelfSubjectReviewStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_type_checking.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_type_checking.py
new file mode 100644
index 0000000000..ad0f8bc22e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_type_checking.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1TypeChecking(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression_warnings': 'list[V1beta1ExpressionWarning]'
+ }
+
+ attribute_map = {
+ 'expression_warnings': 'expressionWarnings'
+ }
+
+ def __init__(self, expression_warnings=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1TypeChecking - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression_warnings = None
+ self.discriminator = None
+
+ if expression_warnings is not None:
+ self.expression_warnings = expression_warnings
+
+ @property
+ def expression_warnings(self):
+ """Gets the expression_warnings of this V1beta1TypeChecking. # noqa: E501
+
+ The type checking warnings for each expression. # noqa: E501
+
+ :return: The expression_warnings of this V1beta1TypeChecking. # noqa: E501
+ :rtype: list[V1beta1ExpressionWarning]
+ """
+ return self._expression_warnings
+
+ @expression_warnings.setter
+ def expression_warnings(self, expression_warnings):
+ """Sets the expression_warnings of this V1beta1TypeChecking.
+
+ The type checking warnings for each expression. # noqa: E501
+
+ :param expression_warnings: The expression_warnings of this V1beta1TypeChecking. # noqa: E501
+ :type: list[V1beta1ExpressionWarning]
+ """
+
+ self._expression_warnings = expression_warnings
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1TypeChecking):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1TypeChecking):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy.py
new file mode 100644
index 0000000000..b995d722e7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ValidatingAdmissionPolicy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1beta1ValidatingAdmissionPolicySpec',
+ 'status': 'V1beta1ValidatingAdmissionPolicyStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ValidatingAdmissionPolicy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta1ValidatingAdmissionPolicy.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta1ValidatingAdmissionPolicy.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+
+
+ :return: The metadata of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta1ValidatingAdmissionPolicy.
+
+
+ :param metadata: The metadata of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+
+
+ :return: The spec of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: V1beta1ValidatingAdmissionPolicySpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1beta1ValidatingAdmissionPolicy.
+
+
+ :param spec: The spec of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :type: V1beta1ValidatingAdmissionPolicySpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+
+
+ :return: The status of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :rtype: V1beta1ValidatingAdmissionPolicyStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta1ValidatingAdmissionPolicy.
+
+
+ :param status: The status of this V1beta1ValidatingAdmissionPolicy. # noqa: E501
+ :type: V1beta1ValidatingAdmissionPolicyStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding.py
new file mode 100644
index 0000000000..bcd251fd5c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ValidatingAdmissionPolicyBinding(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1beta1ValidatingAdmissionPolicyBindingSpec'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ValidatingAdmissionPolicyBinding - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta1ValidatingAdmissionPolicyBinding.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta1ValidatingAdmissionPolicyBinding.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+
+ :return: The metadata of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta1ValidatingAdmissionPolicyBinding.
+
+
+ :param metadata: The metadata of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+
+
+ :return: The spec of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :rtype: V1beta1ValidatingAdmissionPolicyBindingSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1beta1ValidatingAdmissionPolicyBinding.
+
+
+ :param spec: The spec of this V1beta1ValidatingAdmissionPolicyBinding. # noqa: E501
+ :type: V1beta1ValidatingAdmissionPolicyBindingSpec
+ """
+
+ self._spec = spec
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyBinding):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyBinding):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_list.py
new file mode 100644
index 0000000000..c8013e9b06
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_list.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ValidatingAdmissionPolicyBindingList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1beta1ValidatingAdmissionPolicyBinding]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ValidatingAdmissionPolicyBindingList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if items is not None:
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta1ValidatingAdmissionPolicyBindingList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+ List of PolicyBinding. # noqa: E501
+
+ :return: The items of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: list[V1beta1ValidatingAdmissionPolicyBinding]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1beta1ValidatingAdmissionPolicyBindingList.
+
+ List of PolicyBinding. # noqa: E501
+
+ :param items: The items of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: list[V1beta1ValidatingAdmissionPolicyBinding]
+ """
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta1ValidatingAdmissionPolicyBindingList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+
+
+ :return: The metadata of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta1ValidatingAdmissionPolicyBindingList.
+
+
+ :param metadata: The metadata of this V1beta1ValidatingAdmissionPolicyBindingList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyBindingList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyBindingList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_spec.py
new file mode 100644
index 0000000000..75cc79f760
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_binding_spec.py
@@ -0,0 +1,202 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ValidatingAdmissionPolicyBindingSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'match_resources': 'V1beta1MatchResources',
+ 'param_ref': 'V1beta1ParamRef',
+ 'policy_name': 'str',
+ 'validation_actions': 'list[str]'
+ }
+
+ attribute_map = {
+ 'match_resources': 'matchResources',
+ 'param_ref': 'paramRef',
+ 'policy_name': 'policyName',
+ 'validation_actions': 'validationActions'
+ }
+
+ def __init__(self, match_resources=None, param_ref=None, policy_name=None, validation_actions=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ValidatingAdmissionPolicyBindingSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._match_resources = None
+ self._param_ref = None
+ self._policy_name = None
+ self._validation_actions = None
+ self.discriminator = None
+
+ if match_resources is not None:
+ self.match_resources = match_resources
+ if param_ref is not None:
+ self.param_ref = param_ref
+ if policy_name is not None:
+ self.policy_name = policy_name
+ if validation_actions is not None:
+ self.validation_actions = validation_actions
+
+ @property
+ def match_resources(self):
+ """Gets the match_resources of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+
+ :return: The match_resources of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: V1beta1MatchResources
+ """
+ return self._match_resources
+
+ @match_resources.setter
+ def match_resources(self, match_resources):
+ """Sets the match_resources of this V1beta1ValidatingAdmissionPolicyBindingSpec.
+
+
+ :param match_resources: The match_resources of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: V1beta1MatchResources
+ """
+
+ self._match_resources = match_resources
+
+ @property
+ def param_ref(self):
+ """Gets the param_ref of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+
+ :return: The param_ref of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: V1beta1ParamRef
+ """
+ return self._param_ref
+
+ @param_ref.setter
+ def param_ref(self, param_ref):
+ """Sets the param_ref of this V1beta1ValidatingAdmissionPolicyBindingSpec.
+
+
+ :param param_ref: The param_ref of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: V1beta1ParamRef
+ """
+
+ self._param_ref = param_ref
+
+ @property
+ def policy_name(self):
+ """Gets the policy_name of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+ PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required. # noqa: E501
+
+ :return: The policy_name of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._policy_name
+
+ @policy_name.setter
+ def policy_name(self, policy_name):
+ """Sets the policy_name of this V1beta1ValidatingAdmissionPolicyBindingSpec.
+
+ PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required. # noqa: E501
+
+ :param policy_name: The policy_name of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: str
+ """
+
+ self._policy_name = policy_name
+
+ @property
+ def validation_actions(self):
+ """Gets the validation_actions of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+
+ validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions. Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. validationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action. The supported actions values are: \"Deny\" specifies that a validation failure results in a denied request. \"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses. \"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"` Clients should expect to handle additional values by ignoring any values not recognized. \"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers. Required. # noqa: E501
+
+ :return: The validation_actions of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._validation_actions
+
+ @validation_actions.setter
+ def validation_actions(self, validation_actions):
+ """Sets the validation_actions of this V1beta1ValidatingAdmissionPolicyBindingSpec.
+
+ validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions. Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. validationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action. The supported actions values are: \"Deny\" specifies that a validation failure results in a denied request. \"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses. \"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"` Clients should expect to handle additional values by ignoring any values not recognized. \"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers. Required. # noqa: E501
+
+ :param validation_actions: The validation_actions of this V1beta1ValidatingAdmissionPolicyBindingSpec. # noqa: E501
+ :type: list[str]
+ """
+
+ self._validation_actions = validation_actions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyBindingSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyBindingSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_list.py
new file mode 100644
index 0000000000..206fce4be6
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_list.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ValidatingAdmissionPolicyList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1beta1ValidatingAdmissionPolicy]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ValidatingAdmissionPolicyList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if items is not None:
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta1ValidatingAdmissionPolicyList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+
+ List of ValidatingAdmissionPolicy. # noqa: E501
+
+ :return: The items of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: list[V1beta1ValidatingAdmissionPolicy]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1beta1ValidatingAdmissionPolicyList.
+
+ List of ValidatingAdmissionPolicy. # noqa: E501
+
+ :param items: The items of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: list[V1beta1ValidatingAdmissionPolicy]
+ """
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta1ValidatingAdmissionPolicyList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+
+
+ :return: The metadata of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta1ValidatingAdmissionPolicyList.
+
+
+ :param metadata: The metadata of this V1beta1ValidatingAdmissionPolicyList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_spec.py
new file mode 100644
index 0000000000..9eba45198f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_spec.py
@@ -0,0 +1,286 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ValidatingAdmissionPolicySpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'audit_annotations': 'list[V1beta1AuditAnnotation]',
+ 'failure_policy': 'str',
+ 'match_conditions': 'list[V1beta1MatchCondition]',
+ 'match_constraints': 'V1beta1MatchResources',
+ 'param_kind': 'V1beta1ParamKind',
+ 'validations': 'list[V1beta1Validation]',
+ 'variables': 'list[V1beta1Variable]'
+ }
+
+ attribute_map = {
+ 'audit_annotations': 'auditAnnotations',
+ 'failure_policy': 'failurePolicy',
+ 'match_conditions': 'matchConditions',
+ 'match_constraints': 'matchConstraints',
+ 'param_kind': 'paramKind',
+ 'validations': 'validations',
+ 'variables': 'variables'
+ }
+
+ def __init__(self, audit_annotations=None, failure_policy=None, match_conditions=None, match_constraints=None, param_kind=None, validations=None, variables=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ValidatingAdmissionPolicySpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._audit_annotations = None
+ self._failure_policy = None
+ self._match_conditions = None
+ self._match_constraints = None
+ self._param_kind = None
+ self._validations = None
+ self._variables = None
+ self.discriminator = None
+
+ if audit_annotations is not None:
+ self.audit_annotations = audit_annotations
+ if failure_policy is not None:
+ self.failure_policy = failure_policy
+ if match_conditions is not None:
+ self.match_conditions = match_conditions
+ if match_constraints is not None:
+ self.match_constraints = match_constraints
+ if param_kind is not None:
+ self.param_kind = param_kind
+ if validations is not None:
+ self.validations = validations
+ if variables is not None:
+ self.variables = variables
+
+ @property
+ def audit_annotations(self):
+ """Gets the audit_annotations of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required. # noqa: E501
+
+ :return: The audit_annotations of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1beta1AuditAnnotation]
+ """
+ return self._audit_annotations
+
+ @audit_annotations.setter
+ def audit_annotations(self, audit_annotations):
+ """Sets the audit_annotations of this V1beta1ValidatingAdmissionPolicySpec.
+
+ auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required. # noqa: E501
+
+ :param audit_annotations: The audit_annotations of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1beta1AuditAnnotation]
+ """
+
+ self._audit_annotations = audit_annotations
+
+ @property
+ def failure_policy(self):
+ """Gets the failure_policy of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings. A policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource. failurePolicy does not define how validations that evaluate to false are handled. When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced. Allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :return: The failure_policy of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: str
+ """
+ return self._failure_policy
+
+ @failure_policy.setter
+ def failure_policy(self, failure_policy):
+ """Sets the failure_policy of this V1beta1ValidatingAdmissionPolicySpec.
+
+ failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings. A policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource. failurePolicy does not define how validations that evaluate to false are handled. When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced. Allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
+
+ :param failure_policy: The failure_policy of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: str
+ """
+
+ self._failure_policy = failure_policy
+
+ @property
+ def match_conditions(self):
+ """Gets the match_conditions of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. If a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the policy is skipped # noqa: E501
+
+ :return: The match_conditions of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1beta1MatchCondition]
+ """
+ return self._match_conditions
+
+ @match_conditions.setter
+ def match_conditions(self, match_conditions):
+ """Sets the match_conditions of this V1beta1ValidatingAdmissionPolicySpec.
+
+ MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. If a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the policy is skipped # noqa: E501
+
+ :param match_conditions: The match_conditions of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1beta1MatchCondition]
+ """
+
+ self._match_conditions = match_conditions
+
+ @property
+ def match_constraints(self):
+ """Gets the match_constraints of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+
+
+ :return: The match_constraints of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: V1beta1MatchResources
+ """
+ return self._match_constraints
+
+ @match_constraints.setter
+ def match_constraints(self, match_constraints):
+ """Sets the match_constraints of this V1beta1ValidatingAdmissionPolicySpec.
+
+
+ :param match_constraints: The match_constraints of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: V1beta1MatchResources
+ """
+
+ self._match_constraints = match_constraints
+
+ @property
+ def param_kind(self):
+ """Gets the param_kind of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+
+
+ :return: The param_kind of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: V1beta1ParamKind
+ """
+ return self._param_kind
+
+ @param_kind.setter
+ def param_kind(self, param_kind):
+ """Sets the param_kind of this V1beta1ValidatingAdmissionPolicySpec.
+
+
+ :param param_kind: The param_kind of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: V1beta1ParamKind
+ """
+
+ self._param_kind = param_kind
+
+ @property
+ def validations(self):
+ """Gets the validations of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required. # noqa: E501
+
+ :return: The validations of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1beta1Validation]
+ """
+ return self._validations
+
+ @validations.setter
+ def validations(self, validations):
+ """Sets the validations of this V1beta1ValidatingAdmissionPolicySpec.
+
+ Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required. # noqa: E501
+
+ :param validations: The validations of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1beta1Validation]
+ """
+
+ self._validations = validations
+
+ @property
+ def variables(self):
+ """Gets the variables of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+
+ Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy. The expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic. # noqa: E501
+
+ :return: The variables of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :rtype: list[V1beta1Variable]
+ """
+ return self._variables
+
+ @variables.setter
+ def variables(self, variables):
+ """Sets the variables of this V1beta1ValidatingAdmissionPolicySpec.
+
+ Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy. The expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic. # noqa: E501
+
+ :param variables: The variables of this V1beta1ValidatingAdmissionPolicySpec. # noqa: E501
+ :type: list[V1beta1Variable]
+ """
+
+ self._variables = variables
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicySpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicySpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_status.py
new file mode 100644
index 0000000000..9cd435186b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validating_admission_policy_status.py
@@ -0,0 +1,176 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1ValidatingAdmissionPolicyStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1Condition]',
+ 'observed_generation': 'int',
+ 'type_checking': 'V1beta1TypeChecking'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions',
+ 'observed_generation': 'observedGeneration',
+ 'type_checking': 'typeChecking'
+ }
+
+ def __init__(self, conditions=None, observed_generation=None, type_checking=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1ValidatingAdmissionPolicyStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self._observed_generation = None
+ self._type_checking = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+ if type_checking is not None:
+ self.type_checking = type_checking
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+
+ The conditions represent the latest available observations of a policy's current state. # noqa: E501
+
+ :return: The conditions of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :rtype: list[V1Condition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1beta1ValidatingAdmissionPolicyStatus.
+
+ The conditions represent the latest available observations of a policy's current state. # noqa: E501
+
+ :param conditions: The conditions of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :type: list[V1Condition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+
+ The generation observed by the controller. # noqa: E501
+
+ :return: The observed_generation of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V1beta1ValidatingAdmissionPolicyStatus.
+
+ The generation observed by the controller. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ @property
+ def type_checking(self):
+ """Gets the type_checking of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+
+
+ :return: The type_checking of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :rtype: V1beta1TypeChecking
+ """
+ return self._type_checking
+
+ @type_checking.setter
+ def type_checking(self, type_checking):
+ """Sets the type_checking of this V1beta1ValidatingAdmissionPolicyStatus.
+
+
+ :param type_checking: The type_checking of this V1beta1ValidatingAdmissionPolicyStatus. # noqa: E501
+ :type: V1beta1TypeChecking
+ """
+
+ self._type_checking = type_checking
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1ValidatingAdmissionPolicyStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validation.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validation.py
new file mode 100644
index 0000000000..bfa8a6676a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_validation.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1Validation(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression': 'str',
+ 'message': 'str',
+ 'message_expression': 'str',
+ 'reason': 'str'
+ }
+
+ attribute_map = {
+ 'expression': 'expression',
+ 'message': 'message',
+ 'message_expression': 'messageExpression',
+ 'reason': 'reason'
+ }
+
+ def __init__(self, expression=None, message=None, message_expression=None, reason=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1Validation - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression = None
+ self._message = None
+ self._message_expression = None
+ self._reason = None
+ self.discriminator = None
+
+ self.expression = expression
+ if message is not None:
+ self.message = message
+ if message_expression is not None:
+ self.message_expression = message_expression
+ if reason is not None:
+ self.reason = reason
+
+ @property
+ def expression(self):
+ """Gets the expression of this V1beta1Validation. # noqa: E501
+
+ Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"} - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"} - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"} Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. Required. # noqa: E501
+
+ :return: The expression of this V1beta1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, expression):
+ """Sets the expression of this V1beta1Validation.
+
+ Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"} - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"} - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"} Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. Required. # noqa: E501
+
+ :param expression: The expression of this V1beta1Validation. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
+ raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
+
+ self._expression = expression
+
+ @property
+ def message(self):
+ """Gets the message of this V1beta1Validation. # noqa: E501
+
+ Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\". # noqa: E501
+
+ :return: The message of this V1beta1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1beta1Validation.
+
+ Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\". # noqa: E501
+
+ :param message: The message of this V1beta1Validation. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def message_expression(self):
+ """Gets the message_expression of this V1beta1Validation. # noqa: E501
+
+ messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\" # noqa: E501
+
+ :return: The message_expression of this V1beta1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._message_expression
+
+ @message_expression.setter
+ def message_expression(self, message_expression):
+ """Sets the message_expression of this V1beta1Validation.
+
+ messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\" # noqa: E501
+
+ :param message_expression: The message_expression of this V1beta1Validation. # noqa: E501
+ :type: str
+ """
+
+ self._message_expression = message_expression
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1beta1Validation. # noqa: E501
+
+ Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client. # noqa: E501
+
+ :return: The reason of this V1beta1Validation. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1beta1Validation.
+
+ Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client. # noqa: E501
+
+ :param reason: The reason of this V1beta1Validation. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1Validation):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1Validation):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta1_variable.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_variable.py
new file mode 100644
index 0000000000..7492779dab
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta1_variable.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta1Variable(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'expression': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'expression': 'expression',
+ 'name': 'name'
+ }
+
+ def __init__(self, expression=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta1Variable - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._expression = None
+ self._name = None
+ self.discriminator = None
+
+ self.expression = expression
+ self.name = name
+
+ @property
+ def expression(self):
+ """Gets the expression of this V1beta1Variable. # noqa: E501
+
+ Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation. # noqa: E501
+
+ :return: The expression of this V1beta1Variable. # noqa: E501
+ :rtype: str
+ """
+ return self._expression
+
+ @expression.setter
+ def expression(self, expression):
+ """Sets the expression of this V1beta1Variable.
+
+ Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation. # noqa: E501
+
+ :param expression: The expression of this V1beta1Variable. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
+ raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
+
+ self._expression = expression
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta1Variable. # noqa: E501
+
+ Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo` # noqa: E501
+
+ :return: The name of this V1beta1Variable. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta1Variable.
+
+ Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo` # noqa: E501
+
+ :param name: The name of this V1beta1Variable. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta1Variable):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta1Variable):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_exempt_priority_level_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_exempt_priority_level_configuration.py
new file mode 100644
index 0000000000..02c9c8b223
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_exempt_priority_level_configuration.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2ExemptPriorityLevelConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'lendable_percent': 'int',
+ 'nominal_concurrency_shares': 'int'
+ }
+
+ attribute_map = {
+ 'lendable_percent': 'lendablePercent',
+ 'nominal_concurrency_shares': 'nominalConcurrencyShares'
+ }
+
+ def __init__(self, lendable_percent=None, nominal_concurrency_shares=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2ExemptPriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._lendable_percent = None
+ self._nominal_concurrency_shares = None
+ self.discriminator = None
+
+ if lendable_percent is not None:
+ self.lendable_percent = lendable_percent
+ if nominal_concurrency_shares is not None:
+ self.nominal_concurrency_shares = nominal_concurrency_shares
+
+ @property
+ def lendable_percent(self):
+ """Gets the lendable_percent of this V1beta2ExemptPriorityLevelConfiguration. # noqa: E501
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :return: The lendable_percent of this V1beta2ExemptPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._lendable_percent
+
+ @lendable_percent.setter
+ def lendable_percent(self, lendable_percent):
+ """Sets the lendable_percent of this V1beta2ExemptPriorityLevelConfiguration.
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :param lendable_percent: The lendable_percent of this V1beta2ExemptPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._lendable_percent = lendable_percent
+
+ @property
+ def nominal_concurrency_shares(self):
+ """Gets the nominal_concurrency_shares of this V1beta2ExemptPriorityLevelConfiguration. # noqa: E501
+
+ `nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values: NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k) Bigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero. # noqa: E501
+
+ :return: The nominal_concurrency_shares of this V1beta2ExemptPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._nominal_concurrency_shares
+
+ @nominal_concurrency_shares.setter
+ def nominal_concurrency_shares(self, nominal_concurrency_shares):
+ """Sets the nominal_concurrency_shares of this V1beta2ExemptPriorityLevelConfiguration.
+
+ `nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values: NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k) Bigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero. # noqa: E501
+
+ :param nominal_concurrency_shares: The nominal_concurrency_shares of this V1beta2ExemptPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._nominal_concurrency_shares = nominal_concurrency_shares
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2ExemptPriorityLevelConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2ExemptPriorityLevelConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_distinguisher_method.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_distinguisher_method.py
new file mode 100644
index 0000000000..5e1c83bb24
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_distinguisher_method.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2FlowDistinguisherMethod(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'type': 'type'
+ }
+
+ def __init__(self, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2FlowDistinguisherMethod - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._type = None
+ self.discriminator = None
+
+ self.type = type
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta2FlowDistinguisherMethod. # noqa: E501
+
+ `type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required. # noqa: E501
+
+ :return: The type of this V1beta2FlowDistinguisherMethod. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta2FlowDistinguisherMethod.
+
+ `type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required. # noqa: E501
+
+ :param type: The type of this V1beta2FlowDistinguisherMethod. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2FlowDistinguisherMethod):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2FlowDistinguisherMethod):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema.py
new file mode 100644
index 0000000000..27289f8959
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2FlowSchema(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1beta2FlowSchemaSpec',
+ 'status': 'V1beta2FlowSchemaStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2FlowSchema - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta2FlowSchema. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta2FlowSchema. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta2FlowSchema.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta2FlowSchema. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta2FlowSchema. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta2FlowSchema. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta2FlowSchema.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta2FlowSchema. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta2FlowSchema. # noqa: E501
+
+
+ :return: The metadata of this V1beta2FlowSchema. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta2FlowSchema.
+
+
+ :param metadata: The metadata of this V1beta2FlowSchema. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1beta2FlowSchema. # noqa: E501
+
+
+ :return: The spec of this V1beta2FlowSchema. # noqa: E501
+ :rtype: V1beta2FlowSchemaSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1beta2FlowSchema.
+
+
+ :param spec: The spec of this V1beta2FlowSchema. # noqa: E501
+ :type: V1beta2FlowSchemaSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta2FlowSchema. # noqa: E501
+
+
+ :return: The status of this V1beta2FlowSchema. # noqa: E501
+ :rtype: V1beta2FlowSchemaStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta2FlowSchema.
+
+
+ :param status: The status of this V1beta2FlowSchema. # noqa: E501
+ :type: V1beta2FlowSchemaStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2FlowSchema):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2FlowSchema):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_condition.py
new file mode 100644
index 0000000000..2df0890e8d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_condition.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2FlowSchemaCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2FlowSchemaCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ if status is not None:
+ self.status = status
+ if type is not None:
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1beta2FlowSchemaCondition. # noqa: E501
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1beta2FlowSchemaCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1beta2FlowSchemaCondition.
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1beta2FlowSchemaCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1beta2FlowSchemaCondition. # noqa: E501
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1beta2FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1beta2FlowSchemaCondition.
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1beta2FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1beta2FlowSchemaCondition. # noqa: E501
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1beta2FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1beta2FlowSchemaCondition.
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1beta2FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta2FlowSchemaCondition. # noqa: E501
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :return: The status of this V1beta2FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta2FlowSchemaCondition.
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :param status: The status of this V1beta2FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta2FlowSchemaCondition. # noqa: E501
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :return: The type of this V1beta2FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta2FlowSchemaCondition.
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :param type: The type of this V1beta2FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2FlowSchemaCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2FlowSchemaCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_list.py
new file mode 100644
index 0000000000..8d2d11ae25
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2FlowSchemaList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1beta2FlowSchema]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2FlowSchemaList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta2FlowSchemaList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta2FlowSchemaList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta2FlowSchemaList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta2FlowSchemaList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1beta2FlowSchemaList. # noqa: E501
+
+ `items` is a list of FlowSchemas. # noqa: E501
+
+ :return: The items of this V1beta2FlowSchemaList. # noqa: E501
+ :rtype: list[V1beta2FlowSchema]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1beta2FlowSchemaList.
+
+ `items` is a list of FlowSchemas. # noqa: E501
+
+ :param items: The items of this V1beta2FlowSchemaList. # noqa: E501
+ :type: list[V1beta2FlowSchema]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta2FlowSchemaList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta2FlowSchemaList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta2FlowSchemaList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta2FlowSchemaList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta2FlowSchemaList. # noqa: E501
+
+
+ :return: The metadata of this V1beta2FlowSchemaList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta2FlowSchemaList.
+
+
+ :param metadata: The metadata of this V1beta2FlowSchemaList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2FlowSchemaList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2FlowSchemaList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_spec.py
new file mode 100644
index 0000000000..9250e0d913
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_spec.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2FlowSchemaSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'distinguisher_method': 'V1beta2FlowDistinguisherMethod',
+ 'matching_precedence': 'int',
+ 'priority_level_configuration': 'V1beta2PriorityLevelConfigurationReference',
+ 'rules': 'list[V1beta2PolicyRulesWithSubjects]'
+ }
+
+ attribute_map = {
+ 'distinguisher_method': 'distinguisherMethod',
+ 'matching_precedence': 'matchingPrecedence',
+ 'priority_level_configuration': 'priorityLevelConfiguration',
+ 'rules': 'rules'
+ }
+
+ def __init__(self, distinguisher_method=None, matching_precedence=None, priority_level_configuration=None, rules=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2FlowSchemaSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._distinguisher_method = None
+ self._matching_precedence = None
+ self._priority_level_configuration = None
+ self._rules = None
+ self.discriminator = None
+
+ if distinguisher_method is not None:
+ self.distinguisher_method = distinguisher_method
+ if matching_precedence is not None:
+ self.matching_precedence = matching_precedence
+ self.priority_level_configuration = priority_level_configuration
+ if rules is not None:
+ self.rules = rules
+
+ @property
+ def distinguisher_method(self):
+ """Gets the distinguisher_method of this V1beta2FlowSchemaSpec. # noqa: E501
+
+
+ :return: The distinguisher_method of this V1beta2FlowSchemaSpec. # noqa: E501
+ :rtype: V1beta2FlowDistinguisherMethod
+ """
+ return self._distinguisher_method
+
+ @distinguisher_method.setter
+ def distinguisher_method(self, distinguisher_method):
+ """Sets the distinguisher_method of this V1beta2FlowSchemaSpec.
+
+
+ :param distinguisher_method: The distinguisher_method of this V1beta2FlowSchemaSpec. # noqa: E501
+ :type: V1beta2FlowDistinguisherMethod
+ """
+
+ self._distinguisher_method = distinguisher_method
+
+ @property
+ def matching_precedence(self):
+ """Gets the matching_precedence of this V1beta2FlowSchemaSpec. # noqa: E501
+
+ `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
+
+ :return: The matching_precedence of this V1beta2FlowSchemaSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._matching_precedence
+
+ @matching_precedence.setter
+ def matching_precedence(self, matching_precedence):
+ """Sets the matching_precedence of this V1beta2FlowSchemaSpec.
+
+ `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
+
+ :param matching_precedence: The matching_precedence of this V1beta2FlowSchemaSpec. # noqa: E501
+ :type: int
+ """
+
+ self._matching_precedence = matching_precedence
+
+ @property
+ def priority_level_configuration(self):
+ """Gets the priority_level_configuration of this V1beta2FlowSchemaSpec. # noqa: E501
+
+
+ :return: The priority_level_configuration of this V1beta2FlowSchemaSpec. # noqa: E501
+ :rtype: V1beta2PriorityLevelConfigurationReference
+ """
+ return self._priority_level_configuration
+
+ @priority_level_configuration.setter
+ def priority_level_configuration(self, priority_level_configuration):
+ """Sets the priority_level_configuration of this V1beta2FlowSchemaSpec.
+
+
+ :param priority_level_configuration: The priority_level_configuration of this V1beta2FlowSchemaSpec. # noqa: E501
+ :type: V1beta2PriorityLevelConfigurationReference
+ """
+ if self.local_vars_configuration.client_side_validation and priority_level_configuration is None: # noqa: E501
+ raise ValueError("Invalid value for `priority_level_configuration`, must not be `None`") # noqa: E501
+
+ self._priority_level_configuration = priority_level_configuration
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1beta2FlowSchemaSpec. # noqa: E501
+
+ `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
+
+ :return: The rules of this V1beta2FlowSchemaSpec. # noqa: E501
+ :rtype: list[V1beta2PolicyRulesWithSubjects]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1beta2FlowSchemaSpec.
+
+ `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
+
+ :param rules: The rules of this V1beta2FlowSchemaSpec. # noqa: E501
+ :type: list[V1beta2PolicyRulesWithSubjects]
+ """
+
+ self._rules = rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2FlowSchemaSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2FlowSchemaSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_status.py
new file mode 100644
index 0000000000..625e4ffe36
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_flow_schema_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2FlowSchemaStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1beta2FlowSchemaCondition]'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions'
+ }
+
+ def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2FlowSchemaStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1beta2FlowSchemaStatus. # noqa: E501
+
+ `conditions` is a list of the current states of FlowSchema. # noqa: E501
+
+ :return: The conditions of this V1beta2FlowSchemaStatus. # noqa: E501
+ :rtype: list[V1beta2FlowSchemaCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1beta2FlowSchemaStatus.
+
+ `conditions` is a list of the current states of FlowSchema. # noqa: E501
+
+ :param conditions: The conditions of this V1beta2FlowSchemaStatus. # noqa: E501
+ :type: list[V1beta2FlowSchemaCondition]
+ """
+
+ self._conditions = conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2FlowSchemaStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2FlowSchemaStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_group_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_group_subject.py
new file mode 100644
index 0000000000..c5932db440
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_group_subject.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2GroupSubject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2GroupSubject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta2GroupSubject. # noqa: E501
+
+ name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
+
+ :return: The name of this V1beta2GroupSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta2GroupSubject.
+
+ name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
+
+ :param name: The name of this V1beta2GroupSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2GroupSubject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2GroupSubject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_limit_response.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_limit_response.py
new file mode 100644
index 0000000000..301aa55c13
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_limit_response.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2LimitResponse(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'queuing': 'V1beta2QueuingConfiguration',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'queuing': 'queuing',
+ 'type': 'type'
+ }
+
+ def __init__(self, queuing=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2LimitResponse - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._queuing = None
+ self._type = None
+ self.discriminator = None
+
+ if queuing is not None:
+ self.queuing = queuing
+ self.type = type
+
+ @property
+ def queuing(self):
+ """Gets the queuing of this V1beta2LimitResponse. # noqa: E501
+
+
+ :return: The queuing of this V1beta2LimitResponse. # noqa: E501
+ :rtype: V1beta2QueuingConfiguration
+ """
+ return self._queuing
+
+ @queuing.setter
+ def queuing(self, queuing):
+ """Sets the queuing of this V1beta2LimitResponse.
+
+
+ :param queuing: The queuing of this V1beta2LimitResponse. # noqa: E501
+ :type: V1beta2QueuingConfiguration
+ """
+
+ self._queuing = queuing
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta2LimitResponse. # noqa: E501
+
+ `type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required. # noqa: E501
+
+ :return: The type of this V1beta2LimitResponse. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta2LimitResponse.
+
+ `type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required. # noqa: E501
+
+ :param type: The type of this V1beta2LimitResponse. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2LimitResponse):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2LimitResponse):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_limited_priority_level_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_limited_priority_level_configuration.py
new file mode 100644
index 0000000000..5527259bcb
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_limited_priority_level_configuration.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2LimitedPriorityLevelConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'assured_concurrency_shares': 'int',
+ 'borrowing_limit_percent': 'int',
+ 'lendable_percent': 'int',
+ 'limit_response': 'V1beta2LimitResponse'
+ }
+
+ attribute_map = {
+ 'assured_concurrency_shares': 'assuredConcurrencyShares',
+ 'borrowing_limit_percent': 'borrowingLimitPercent',
+ 'lendable_percent': 'lendablePercent',
+ 'limit_response': 'limitResponse'
+ }
+
+ def __init__(self, assured_concurrency_shares=None, borrowing_limit_percent=None, lendable_percent=None, limit_response=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2LimitedPriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._assured_concurrency_shares = None
+ self._borrowing_limit_percent = None
+ self._lendable_percent = None
+ self._limit_response = None
+ self.discriminator = None
+
+ if assured_concurrency_shares is not None:
+ self.assured_concurrency_shares = assured_concurrency_shares
+ if borrowing_limit_percent is not None:
+ self.borrowing_limit_percent = borrowing_limit_percent
+ if lendable_percent is not None:
+ self.lendable_percent = lendable_percent
+ if limit_response is not None:
+ self.limit_response = limit_response
+
+ @property
+ def assured_concurrency_shares(self):
+ """Gets the assured_concurrency_shares of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+
+ `assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level: ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) bigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30. # noqa: E501
+
+ :return: The assured_concurrency_shares of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._assured_concurrency_shares
+
+ @assured_concurrency_shares.setter
+ def assured_concurrency_shares(self, assured_concurrency_shares):
+ """Sets the assured_concurrency_shares of this V1beta2LimitedPriorityLevelConfiguration.
+
+ `assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level: ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) bigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30. # noqa: E501
+
+ :param assured_concurrency_shares: The assured_concurrency_shares of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._assured_concurrency_shares = assured_concurrency_shares
+
+ @property
+ def borrowing_limit_percent(self):
+ """Gets the borrowing_limit_percent of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+
+ `borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows. BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) The value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite. # noqa: E501
+
+ :return: The borrowing_limit_percent of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._borrowing_limit_percent
+
+ @borrowing_limit_percent.setter
+ def borrowing_limit_percent(self, borrowing_limit_percent):
+ """Sets the borrowing_limit_percent of this V1beta2LimitedPriorityLevelConfiguration.
+
+ `borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows. BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) The value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite. # noqa: E501
+
+ :param borrowing_limit_percent: The borrowing_limit_percent of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._borrowing_limit_percent = borrowing_limit_percent
+
+ @property
+ def lendable_percent(self):
+ """Gets the lendable_percent of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :return: The lendable_percent of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._lendable_percent
+
+ @lendable_percent.setter
+ def lendable_percent(self, lendable_percent):
+ """Sets the lendable_percent of this V1beta2LimitedPriorityLevelConfiguration.
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :param lendable_percent: The lendable_percent of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._lendable_percent = lendable_percent
+
+ @property
+ def limit_response(self):
+ """Gets the limit_response of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The limit_response of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: V1beta2LimitResponse
+ """
+ return self._limit_response
+
+ @limit_response.setter
+ def limit_response(self, limit_response):
+ """Sets the limit_response of this V1beta2LimitedPriorityLevelConfiguration.
+
+
+ :param limit_response: The limit_response of this V1beta2LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: V1beta2LimitResponse
+ """
+
+ self._limit_response = limit_response
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2LimitedPriorityLevelConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2LimitedPriorityLevelConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_non_resource_policy_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_non_resource_policy_rule.py
new file mode 100644
index 0000000000..aadefcf716
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_non_resource_policy_rule.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2NonResourcePolicyRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'non_resource_ur_ls': 'list[str]',
+ 'verbs': 'list[str]'
+ }
+
+ attribute_map = {
+ 'non_resource_ur_ls': 'nonResourceURLs',
+ 'verbs': 'verbs'
+ }
+
+ def __init__(self, non_resource_ur_ls=None, verbs=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2NonResourcePolicyRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._non_resource_ur_ls = None
+ self._verbs = None
+ self.discriminator = None
+
+ self.non_resource_ur_ls = non_resource_ur_ls
+ self.verbs = verbs
+
+ @property
+ def non_resource_ur_ls(self):
+ """Gets the non_resource_ur_ls of this V1beta2NonResourcePolicyRule. # noqa: E501
+
+ `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
+
+ :return: The non_resource_ur_ls of this V1beta2NonResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._non_resource_ur_ls
+
+ @non_resource_ur_ls.setter
+ def non_resource_ur_ls(self, non_resource_ur_ls):
+ """Sets the non_resource_ur_ls of this V1beta2NonResourcePolicyRule.
+
+ `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
+
+ :param non_resource_ur_ls: The non_resource_ur_ls of this V1beta2NonResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and non_resource_ur_ls is None: # noqa: E501
+ raise ValueError("Invalid value for `non_resource_ur_ls`, must not be `None`") # noqa: E501
+
+ self._non_resource_ur_ls = non_resource_ur_ls
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1beta2NonResourcePolicyRule. # noqa: E501
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
+
+ :return: The verbs of this V1beta2NonResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1beta2NonResourcePolicyRule.
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
+
+ :param verbs: The verbs of this V1beta2NonResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2NonResourcePolicyRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2NonResourcePolicyRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_policy_rules_with_subjects.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_policy_rules_with_subjects.py
new file mode 100644
index 0000000000..de25768d5c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_policy_rules_with_subjects.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2PolicyRulesWithSubjects(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'non_resource_rules': 'list[V1beta2NonResourcePolicyRule]',
+ 'resource_rules': 'list[V1beta2ResourcePolicyRule]',
+ 'subjects': 'list[V1beta2Subject]'
+ }
+
+ attribute_map = {
+ 'non_resource_rules': 'nonResourceRules',
+ 'resource_rules': 'resourceRules',
+ 'subjects': 'subjects'
+ }
+
+ def __init__(self, non_resource_rules=None, resource_rules=None, subjects=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2PolicyRulesWithSubjects - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._non_resource_rules = None
+ self._resource_rules = None
+ self._subjects = None
+ self.discriminator = None
+
+ if non_resource_rules is not None:
+ self.non_resource_rules = non_resource_rules
+ if resource_rules is not None:
+ self.resource_rules = resource_rules
+ self.subjects = subjects
+
+ @property
+ def non_resource_rules(self):
+ """Gets the non_resource_rules of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+
+ `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
+
+ :return: The non_resource_rules of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+ :rtype: list[V1beta2NonResourcePolicyRule]
+ """
+ return self._non_resource_rules
+
+ @non_resource_rules.setter
+ def non_resource_rules(self, non_resource_rules):
+ """Sets the non_resource_rules of this V1beta2PolicyRulesWithSubjects.
+
+ `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
+
+ :param non_resource_rules: The non_resource_rules of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+ :type: list[V1beta2NonResourcePolicyRule]
+ """
+
+ self._non_resource_rules = non_resource_rules
+
+ @property
+ def resource_rules(self):
+ """Gets the resource_rules of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+
+ `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
+
+ :return: The resource_rules of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+ :rtype: list[V1beta2ResourcePolicyRule]
+ """
+ return self._resource_rules
+
+ @resource_rules.setter
+ def resource_rules(self, resource_rules):
+ """Sets the resource_rules of this V1beta2PolicyRulesWithSubjects.
+
+ `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
+
+ :param resource_rules: The resource_rules of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+ :type: list[V1beta2ResourcePolicyRule]
+ """
+
+ self._resource_rules = resource_rules
+
+ @property
+ def subjects(self):
+ """Gets the subjects of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+
+ subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
+
+ :return: The subjects of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+ :rtype: list[V1beta2Subject]
+ """
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, subjects):
+ """Sets the subjects of this V1beta2PolicyRulesWithSubjects.
+
+ subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
+
+ :param subjects: The subjects of this V1beta2PolicyRulesWithSubjects. # noqa: E501
+ :type: list[V1beta2Subject]
+ """
+ if self.local_vars_configuration.client_side_validation and subjects is None: # noqa: E501
+ raise ValueError("Invalid value for `subjects`, must not be `None`") # noqa: E501
+
+ self._subjects = subjects
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2PolicyRulesWithSubjects):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2PolicyRulesWithSubjects):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration.py
new file mode 100644
index 0000000000..4b0f0fc1e9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2PriorityLevelConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1beta2PriorityLevelConfigurationSpec',
+ 'status': 'V1beta2PriorityLevelConfigurationStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2PriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta2PriorityLevelConfiguration. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta2PriorityLevelConfiguration.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta2PriorityLevelConfiguration. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta2PriorityLevelConfiguration.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta2PriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The metadata of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta2PriorityLevelConfiguration.
+
+
+ :param metadata: The metadata of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1beta2PriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The spec of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :rtype: V1beta2PriorityLevelConfigurationSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1beta2PriorityLevelConfiguration.
+
+
+ :param spec: The spec of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :type: V1beta2PriorityLevelConfigurationSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta2PriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The status of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :rtype: V1beta2PriorityLevelConfigurationStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta2PriorityLevelConfiguration.
+
+
+ :param status: The status of this V1beta2PriorityLevelConfiguration. # noqa: E501
+ :type: V1beta2PriorityLevelConfigurationStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_condition.py
new file mode 100644
index 0000000000..41489fdecf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_condition.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2PriorityLevelConfigurationCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2PriorityLevelConfigurationCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ if status is not None:
+ self.status = status
+ if type is not None:
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1beta2PriorityLevelConfigurationCondition.
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1beta2PriorityLevelConfigurationCondition.
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1beta2PriorityLevelConfigurationCondition.
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :return: The status of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta2PriorityLevelConfigurationCondition.
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :param status: The status of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :return: The type of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta2PriorityLevelConfigurationCondition.
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :param type: The type of this V1beta2PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_list.py
new file mode 100644
index 0000000000..b6228ae078
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2PriorityLevelConfigurationList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1beta2PriorityLevelConfiguration]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2PriorityLevelConfigurationList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta2PriorityLevelConfigurationList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+
+ `items` is a list of request-priorities. # noqa: E501
+
+ :return: The items of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :rtype: list[V1beta2PriorityLevelConfiguration]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1beta2PriorityLevelConfigurationList.
+
+ `items` is a list of request-priorities. # noqa: E501
+
+ :param items: The items of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :type: list[V1beta2PriorityLevelConfiguration]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta2PriorityLevelConfigurationList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+
+
+ :return: The metadata of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta2PriorityLevelConfigurationList.
+
+
+ :param metadata: The metadata of this V1beta2PriorityLevelConfigurationList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_reference.py
new file mode 100644
index 0000000000..4912fd0c79
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_reference.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2PriorityLevelConfigurationReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2PriorityLevelConfigurationReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta2PriorityLevelConfigurationReference. # noqa: E501
+
+ `name` is the name of the priority level configuration being referenced Required. # noqa: E501
+
+ :return: The name of this V1beta2PriorityLevelConfigurationReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta2PriorityLevelConfigurationReference.
+
+ `name` is the name of the priority level configuration being referenced Required. # noqa: E501
+
+ :param name: The name of this V1beta2PriorityLevelConfigurationReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_spec.py
new file mode 100644
index 0000000000..f5381810d9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_spec.py
@@ -0,0 +1,175 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2PriorityLevelConfigurationSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'exempt': 'V1beta2ExemptPriorityLevelConfiguration',
+ 'limited': 'V1beta2LimitedPriorityLevelConfiguration',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'exempt': 'exempt',
+ 'limited': 'limited',
+ 'type': 'type'
+ }
+
+ def __init__(self, exempt=None, limited=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2PriorityLevelConfigurationSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._exempt = None
+ self._limited = None
+ self._type = None
+ self.discriminator = None
+
+ if exempt is not None:
+ self.exempt = exempt
+ if limited is not None:
+ self.limited = limited
+ self.type = type
+
+ @property
+ def exempt(self):
+ """Gets the exempt of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+
+
+ :return: The exempt of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+ :rtype: V1beta2ExemptPriorityLevelConfiguration
+ """
+ return self._exempt
+
+ @exempt.setter
+ def exempt(self, exempt):
+ """Sets the exempt of this V1beta2PriorityLevelConfigurationSpec.
+
+
+ :param exempt: The exempt of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+ :type: V1beta2ExemptPriorityLevelConfiguration
+ """
+
+ self._exempt = exempt
+
+ @property
+ def limited(self):
+ """Gets the limited of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+
+
+ :return: The limited of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+ :rtype: V1beta2LimitedPriorityLevelConfiguration
+ """
+ return self._limited
+
+ @limited.setter
+ def limited(self, limited):
+ """Sets the limited of this V1beta2PriorityLevelConfigurationSpec.
+
+
+ :param limited: The limited of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+ :type: V1beta2LimitedPriorityLevelConfiguration
+ """
+
+ self._limited = limited
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+
+ `type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required. # noqa: E501
+
+ :return: The type of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta2PriorityLevelConfigurationSpec.
+
+ `type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required. # noqa: E501
+
+ :param type: The type of this V1beta2PriorityLevelConfigurationSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_status.py
new file mode 100644
index 0000000000..f536745890
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_priority_level_configuration_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2PriorityLevelConfigurationStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1beta2PriorityLevelConfigurationCondition]'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions'
+ }
+
+ def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2PriorityLevelConfigurationStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1beta2PriorityLevelConfigurationStatus. # noqa: E501
+
+ `conditions` is the current state of \"request-priority\". # noqa: E501
+
+ :return: The conditions of this V1beta2PriorityLevelConfigurationStatus. # noqa: E501
+ :rtype: list[V1beta2PriorityLevelConfigurationCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1beta2PriorityLevelConfigurationStatus.
+
+ `conditions` is the current state of \"request-priority\". # noqa: E501
+
+ :param conditions: The conditions of this V1beta2PriorityLevelConfigurationStatus. # noqa: E501
+ :type: list[V1beta2PriorityLevelConfigurationCondition]
+ """
+
+ self._conditions = conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2PriorityLevelConfigurationStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_queuing_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_queuing_configuration.py
new file mode 100644
index 0000000000..0ebd0d4d4b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_queuing_configuration.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2QueuingConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hand_size': 'int',
+ 'queue_length_limit': 'int',
+ 'queues': 'int'
+ }
+
+ attribute_map = {
+ 'hand_size': 'handSize',
+ 'queue_length_limit': 'queueLengthLimit',
+ 'queues': 'queues'
+ }
+
+ def __init__(self, hand_size=None, queue_length_limit=None, queues=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2QueuingConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hand_size = None
+ self._queue_length_limit = None
+ self._queues = None
+ self.discriminator = None
+
+ if hand_size is not None:
+ self.hand_size = hand_size
+ if queue_length_limit is not None:
+ self.queue_length_limit = queue_length_limit
+ if queues is not None:
+ self.queues = queues
+
+ @property
+ def hand_size(self):
+ """Gets the hand_size of this V1beta2QueuingConfiguration. # noqa: E501
+
+ `handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8. # noqa: E501
+
+ :return: The hand_size of this V1beta2QueuingConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._hand_size
+
+ @hand_size.setter
+ def hand_size(self, hand_size):
+ """Sets the hand_size of this V1beta2QueuingConfiguration.
+
+ `handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8. # noqa: E501
+
+ :param hand_size: The hand_size of this V1beta2QueuingConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._hand_size = hand_size
+
+ @property
+ def queue_length_limit(self):
+ """Gets the queue_length_limit of this V1beta2QueuingConfiguration. # noqa: E501
+
+ `queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50. # noqa: E501
+
+ :return: The queue_length_limit of this V1beta2QueuingConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._queue_length_limit
+
+ @queue_length_limit.setter
+ def queue_length_limit(self, queue_length_limit):
+ """Sets the queue_length_limit of this V1beta2QueuingConfiguration.
+
+ `queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50. # noqa: E501
+
+ :param queue_length_limit: The queue_length_limit of this V1beta2QueuingConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._queue_length_limit = queue_length_limit
+
+ @property
+ def queues(self):
+ """Gets the queues of this V1beta2QueuingConfiguration. # noqa: E501
+
+ `queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64. # noqa: E501
+
+ :return: The queues of this V1beta2QueuingConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._queues
+
+ @queues.setter
+ def queues(self, queues):
+ """Sets the queues of this V1beta2QueuingConfiguration.
+
+ `queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64. # noqa: E501
+
+ :param queues: The queues of this V1beta2QueuingConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._queues = queues
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2QueuingConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2QueuingConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_resource_policy_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_resource_policy_rule.py
new file mode 100644
index 0000000000..e48263820e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_resource_policy_rule.py
@@ -0,0 +1,237 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2ResourcePolicyRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_groups': 'list[str]',
+ 'cluster_scope': 'bool',
+ 'namespaces': 'list[str]',
+ 'resources': 'list[str]',
+ 'verbs': 'list[str]'
+ }
+
+ attribute_map = {
+ 'api_groups': 'apiGroups',
+ 'cluster_scope': 'clusterScope',
+ 'namespaces': 'namespaces',
+ 'resources': 'resources',
+ 'verbs': 'verbs'
+ }
+
+ def __init__(self, api_groups=None, cluster_scope=None, namespaces=None, resources=None, verbs=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2ResourcePolicyRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_groups = None
+ self._cluster_scope = None
+ self._namespaces = None
+ self._resources = None
+ self._verbs = None
+ self.discriminator = None
+
+ self.api_groups = api_groups
+ if cluster_scope is not None:
+ self.cluster_scope = cluster_scope
+ if namespaces is not None:
+ self.namespaces = namespaces
+ self.resources = resources
+ self.verbs = verbs
+
+ @property
+ def api_groups(self):
+ """Gets the api_groups of this V1beta2ResourcePolicyRule. # noqa: E501
+
+ `apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required. # noqa: E501
+
+ :return: The api_groups of this V1beta2ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_groups
+
+ @api_groups.setter
+ def api_groups(self, api_groups):
+ """Sets the api_groups of this V1beta2ResourcePolicyRule.
+
+ `apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required. # noqa: E501
+
+ :param api_groups: The api_groups of this V1beta2ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and api_groups is None: # noqa: E501
+ raise ValueError("Invalid value for `api_groups`, must not be `None`") # noqa: E501
+
+ self._api_groups = api_groups
+
+ @property
+ def cluster_scope(self):
+ """Gets the cluster_scope of this V1beta2ResourcePolicyRule. # noqa: E501
+
+ `clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list. # noqa: E501
+
+ :return: The cluster_scope of this V1beta2ResourcePolicyRule. # noqa: E501
+ :rtype: bool
+ """
+ return self._cluster_scope
+
+ @cluster_scope.setter
+ def cluster_scope(self, cluster_scope):
+ """Sets the cluster_scope of this V1beta2ResourcePolicyRule.
+
+ `clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list. # noqa: E501
+
+ :param cluster_scope: The cluster_scope of this V1beta2ResourcePolicyRule. # noqa: E501
+ :type: bool
+ """
+
+ self._cluster_scope = cluster_scope
+
+ @property
+ def namespaces(self):
+ """Gets the namespaces of this V1beta2ResourcePolicyRule. # noqa: E501
+
+ `namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true. # noqa: E501
+
+ :return: The namespaces of this V1beta2ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._namespaces
+
+ @namespaces.setter
+ def namespaces(self, namespaces):
+ """Sets the namespaces of this V1beta2ResourcePolicyRule.
+
+ `namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true. # noqa: E501
+
+ :param namespaces: The namespaces of this V1beta2ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._namespaces = namespaces
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1beta2ResourcePolicyRule. # noqa: E501
+
+ `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required. # noqa: E501
+
+ :return: The resources of this V1beta2ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1beta2ResourcePolicyRule.
+
+ `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required. # noqa: E501
+
+ :param resources: The resources of this V1beta2ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and resources is None: # noqa: E501
+ raise ValueError("Invalid value for `resources`, must not be `None`") # noqa: E501
+
+ self._resources = resources
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1beta2ResourcePolicyRule. # noqa: E501
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required. # noqa: E501
+
+ :return: The verbs of this V1beta2ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1beta2ResourcePolicyRule.
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required. # noqa: E501
+
+ :param verbs: The verbs of this V1beta2ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2ResourcePolicyRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2ResourcePolicyRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_service_account_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_service_account_subject.py
new file mode 100644
index 0000000000..2cd6d9dd84
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_service_account_subject.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2ServiceAccountSubject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace'
+ }
+
+ def __init__(self, name=None, namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2ServiceAccountSubject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self.discriminator = None
+
+ self.name = name
+ self.namespace = namespace
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta2ServiceAccountSubject. # noqa: E501
+
+ `name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required. # noqa: E501
+
+ :return: The name of this V1beta2ServiceAccountSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta2ServiceAccountSubject.
+
+ `name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required. # noqa: E501
+
+ :param name: The name of this V1beta2ServiceAccountSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1beta2ServiceAccountSubject. # noqa: E501
+
+ `namespace` is the namespace of matching ServiceAccount objects. Required. # noqa: E501
+
+ :return: The namespace of this V1beta2ServiceAccountSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1beta2ServiceAccountSubject.
+
+ `namespace` is the namespace of matching ServiceAccount objects. Required. # noqa: E501
+
+ :param namespace: The namespace of this V1beta2ServiceAccountSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
+ raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
+
+ self._namespace = namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2ServiceAccountSubject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2ServiceAccountSubject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_subject.py
new file mode 100644
index 0000000000..873d97be84
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_subject.py
@@ -0,0 +1,201 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2Subject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'group': 'V1beta2GroupSubject',
+ 'kind': 'str',
+ 'service_account': 'V1beta2ServiceAccountSubject',
+ 'user': 'V1beta2UserSubject'
+ }
+
+ attribute_map = {
+ 'group': 'group',
+ 'kind': 'kind',
+ 'service_account': 'serviceAccount',
+ 'user': 'user'
+ }
+
+ def __init__(self, group=None, kind=None, service_account=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2Subject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._group = None
+ self._kind = None
+ self._service_account = None
+ self._user = None
+ self.discriminator = None
+
+ if group is not None:
+ self.group = group
+ self.kind = kind
+ if service_account is not None:
+ self.service_account = service_account
+ if user is not None:
+ self.user = user
+
+ @property
+ def group(self):
+ """Gets the group of this V1beta2Subject. # noqa: E501
+
+
+ :return: The group of this V1beta2Subject. # noqa: E501
+ :rtype: V1beta2GroupSubject
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1beta2Subject.
+
+
+ :param group: The group of this V1beta2Subject. # noqa: E501
+ :type: V1beta2GroupSubject
+ """
+
+ self._group = group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta2Subject. # noqa: E501
+
+ `kind` indicates which one of the other fields is non-empty. Required # noqa: E501
+
+ :return: The kind of this V1beta2Subject. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta2Subject.
+
+ `kind` indicates which one of the other fields is non-empty. Required # noqa: E501
+
+ :param kind: The kind of this V1beta2Subject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def service_account(self):
+ """Gets the service_account of this V1beta2Subject. # noqa: E501
+
+
+ :return: The service_account of this V1beta2Subject. # noqa: E501
+ :rtype: V1beta2ServiceAccountSubject
+ """
+ return self._service_account
+
+ @service_account.setter
+ def service_account(self, service_account):
+ """Sets the service_account of this V1beta2Subject.
+
+
+ :param service_account: The service_account of this V1beta2Subject. # noqa: E501
+ :type: V1beta2ServiceAccountSubject
+ """
+
+ self._service_account = service_account
+
+ @property
+ def user(self):
+ """Gets the user of this V1beta2Subject. # noqa: E501
+
+
+ :return: The user of this V1beta2Subject. # noqa: E501
+ :rtype: V1beta2UserSubject
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1beta2Subject.
+
+
+ :param user: The user of this V1beta2Subject. # noqa: E501
+ :type: V1beta2UserSubject
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2Subject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2Subject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta2_user_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_user_subject.py
new file mode 100644
index 0000000000..8e8e8096b4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta2_user_subject.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta2UserSubject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta2UserSubject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta2UserSubject. # noqa: E501
+
+ `name` is the username that matches, or \"*\" to match all usernames. Required. # noqa: E501
+
+ :return: The name of this V1beta2UserSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta2UserSubject.
+
+ `name` is the username that matches, or \"*\" to match all usernames. Required. # noqa: E501
+
+ :param name: The name of this V1beta2UserSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta2UserSubject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta2UserSubject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_exempt_priority_level_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_exempt_priority_level_configuration.py
new file mode 100644
index 0000000000..ec84b8b363
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_exempt_priority_level_configuration.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3ExemptPriorityLevelConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'lendable_percent': 'int',
+ 'nominal_concurrency_shares': 'int'
+ }
+
+ attribute_map = {
+ 'lendable_percent': 'lendablePercent',
+ 'nominal_concurrency_shares': 'nominalConcurrencyShares'
+ }
+
+ def __init__(self, lendable_percent=None, nominal_concurrency_shares=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3ExemptPriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._lendable_percent = None
+ self._nominal_concurrency_shares = None
+ self.discriminator = None
+
+ if lendable_percent is not None:
+ self.lendable_percent = lendable_percent
+ if nominal_concurrency_shares is not None:
+ self.nominal_concurrency_shares = nominal_concurrency_shares
+
+ @property
+ def lendable_percent(self):
+ """Gets the lendable_percent of this V1beta3ExemptPriorityLevelConfiguration. # noqa: E501
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :return: The lendable_percent of this V1beta3ExemptPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._lendable_percent
+
+ @lendable_percent.setter
+ def lendable_percent(self, lendable_percent):
+ """Sets the lendable_percent of this V1beta3ExemptPriorityLevelConfiguration.
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :param lendable_percent: The lendable_percent of this V1beta3ExemptPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._lendable_percent = lendable_percent
+
+ @property
+ def nominal_concurrency_shares(self):
+ """Gets the nominal_concurrency_shares of this V1beta3ExemptPriorityLevelConfiguration. # noqa: E501
+
+ `nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values: NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k) Bigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero. # noqa: E501
+
+ :return: The nominal_concurrency_shares of this V1beta3ExemptPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._nominal_concurrency_shares
+
+ @nominal_concurrency_shares.setter
+ def nominal_concurrency_shares(self, nominal_concurrency_shares):
+ """Sets the nominal_concurrency_shares of this V1beta3ExemptPriorityLevelConfiguration.
+
+ `nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values: NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k) Bigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero. # noqa: E501
+
+ :param nominal_concurrency_shares: The nominal_concurrency_shares of this V1beta3ExemptPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._nominal_concurrency_shares = nominal_concurrency_shares
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3ExemptPriorityLevelConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3ExemptPriorityLevelConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_distinguisher_method.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_distinguisher_method.py
new file mode 100644
index 0000000000..1ce4faee1d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_distinguisher_method.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3FlowDistinguisherMethod(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'type': 'type'
+ }
+
+ def __init__(self, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3FlowDistinguisherMethod - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._type = None
+ self.discriminator = None
+
+ self.type = type
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta3FlowDistinguisherMethod. # noqa: E501
+
+ `type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required. # noqa: E501
+
+ :return: The type of this V1beta3FlowDistinguisherMethod. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta3FlowDistinguisherMethod.
+
+ `type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required. # noqa: E501
+
+ :param type: The type of this V1beta3FlowDistinguisherMethod. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3FlowDistinguisherMethod):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3FlowDistinguisherMethod):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema.py
new file mode 100644
index 0000000000..d149173ff3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3FlowSchema(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1beta3FlowSchemaSpec',
+ 'status': 'V1beta3FlowSchemaStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3FlowSchema - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta3FlowSchema. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta3FlowSchema. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta3FlowSchema.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta3FlowSchema. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta3FlowSchema. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta3FlowSchema. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta3FlowSchema.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta3FlowSchema. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta3FlowSchema. # noqa: E501
+
+
+ :return: The metadata of this V1beta3FlowSchema. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta3FlowSchema.
+
+
+ :param metadata: The metadata of this V1beta3FlowSchema. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1beta3FlowSchema. # noqa: E501
+
+
+ :return: The spec of this V1beta3FlowSchema. # noqa: E501
+ :rtype: V1beta3FlowSchemaSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1beta3FlowSchema.
+
+
+ :param spec: The spec of this V1beta3FlowSchema. # noqa: E501
+ :type: V1beta3FlowSchemaSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta3FlowSchema. # noqa: E501
+
+
+ :return: The status of this V1beta3FlowSchema. # noqa: E501
+ :rtype: V1beta3FlowSchemaStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta3FlowSchema.
+
+
+ :param status: The status of this V1beta3FlowSchema. # noqa: E501
+ :type: V1beta3FlowSchemaStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3FlowSchema):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3FlowSchema):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_condition.py
new file mode 100644
index 0000000000..cda753fb83
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_condition.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3FlowSchemaCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3FlowSchemaCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ if status is not None:
+ self.status = status
+ if type is not None:
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1beta3FlowSchemaCondition. # noqa: E501
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1beta3FlowSchemaCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1beta3FlowSchemaCondition.
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1beta3FlowSchemaCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1beta3FlowSchemaCondition. # noqa: E501
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1beta3FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1beta3FlowSchemaCondition.
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1beta3FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1beta3FlowSchemaCondition. # noqa: E501
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1beta3FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1beta3FlowSchemaCondition.
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1beta3FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta3FlowSchemaCondition. # noqa: E501
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :return: The status of this V1beta3FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta3FlowSchemaCondition.
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :param status: The status of this V1beta3FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta3FlowSchemaCondition. # noqa: E501
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :return: The type of this V1beta3FlowSchemaCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta3FlowSchemaCondition.
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :param type: The type of this V1beta3FlowSchemaCondition. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3FlowSchemaCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3FlowSchemaCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_list.py
new file mode 100644
index 0000000000..56b06e363b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3FlowSchemaList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1beta3FlowSchema]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3FlowSchemaList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta3FlowSchemaList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta3FlowSchemaList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta3FlowSchemaList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta3FlowSchemaList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1beta3FlowSchemaList. # noqa: E501
+
+ `items` is a list of FlowSchemas. # noqa: E501
+
+ :return: The items of this V1beta3FlowSchemaList. # noqa: E501
+ :rtype: list[V1beta3FlowSchema]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1beta3FlowSchemaList.
+
+ `items` is a list of FlowSchemas. # noqa: E501
+
+ :param items: The items of this V1beta3FlowSchemaList. # noqa: E501
+ :type: list[V1beta3FlowSchema]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta3FlowSchemaList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta3FlowSchemaList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta3FlowSchemaList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta3FlowSchemaList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta3FlowSchemaList. # noqa: E501
+
+
+ :return: The metadata of this V1beta3FlowSchemaList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta3FlowSchemaList.
+
+
+ :param metadata: The metadata of this V1beta3FlowSchemaList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3FlowSchemaList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3FlowSchemaList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_spec.py
new file mode 100644
index 0000000000..649490e9f8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_spec.py
@@ -0,0 +1,203 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3FlowSchemaSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'distinguisher_method': 'V1beta3FlowDistinguisherMethod',
+ 'matching_precedence': 'int',
+ 'priority_level_configuration': 'V1beta3PriorityLevelConfigurationReference',
+ 'rules': 'list[V1beta3PolicyRulesWithSubjects]'
+ }
+
+ attribute_map = {
+ 'distinguisher_method': 'distinguisherMethod',
+ 'matching_precedence': 'matchingPrecedence',
+ 'priority_level_configuration': 'priorityLevelConfiguration',
+ 'rules': 'rules'
+ }
+
+ def __init__(self, distinguisher_method=None, matching_precedence=None, priority_level_configuration=None, rules=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3FlowSchemaSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._distinguisher_method = None
+ self._matching_precedence = None
+ self._priority_level_configuration = None
+ self._rules = None
+ self.discriminator = None
+
+ if distinguisher_method is not None:
+ self.distinguisher_method = distinguisher_method
+ if matching_precedence is not None:
+ self.matching_precedence = matching_precedence
+ self.priority_level_configuration = priority_level_configuration
+ if rules is not None:
+ self.rules = rules
+
+ @property
+ def distinguisher_method(self):
+ """Gets the distinguisher_method of this V1beta3FlowSchemaSpec. # noqa: E501
+
+
+ :return: The distinguisher_method of this V1beta3FlowSchemaSpec. # noqa: E501
+ :rtype: V1beta3FlowDistinguisherMethod
+ """
+ return self._distinguisher_method
+
+ @distinguisher_method.setter
+ def distinguisher_method(self, distinguisher_method):
+ """Sets the distinguisher_method of this V1beta3FlowSchemaSpec.
+
+
+ :param distinguisher_method: The distinguisher_method of this V1beta3FlowSchemaSpec. # noqa: E501
+ :type: V1beta3FlowDistinguisherMethod
+ """
+
+ self._distinguisher_method = distinguisher_method
+
+ @property
+ def matching_precedence(self):
+ """Gets the matching_precedence of this V1beta3FlowSchemaSpec. # noqa: E501
+
+ `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
+
+ :return: The matching_precedence of this V1beta3FlowSchemaSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._matching_precedence
+
+ @matching_precedence.setter
+ def matching_precedence(self, matching_precedence):
+ """Sets the matching_precedence of this V1beta3FlowSchemaSpec.
+
+ `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
+
+ :param matching_precedence: The matching_precedence of this V1beta3FlowSchemaSpec. # noqa: E501
+ :type: int
+ """
+
+ self._matching_precedence = matching_precedence
+
+ @property
+ def priority_level_configuration(self):
+ """Gets the priority_level_configuration of this V1beta3FlowSchemaSpec. # noqa: E501
+
+
+ :return: The priority_level_configuration of this V1beta3FlowSchemaSpec. # noqa: E501
+ :rtype: V1beta3PriorityLevelConfigurationReference
+ """
+ return self._priority_level_configuration
+
+ @priority_level_configuration.setter
+ def priority_level_configuration(self, priority_level_configuration):
+ """Sets the priority_level_configuration of this V1beta3FlowSchemaSpec.
+
+
+ :param priority_level_configuration: The priority_level_configuration of this V1beta3FlowSchemaSpec. # noqa: E501
+ :type: V1beta3PriorityLevelConfigurationReference
+ """
+ if self.local_vars_configuration.client_side_validation and priority_level_configuration is None: # noqa: E501
+ raise ValueError("Invalid value for `priority_level_configuration`, must not be `None`") # noqa: E501
+
+ self._priority_level_configuration = priority_level_configuration
+
+ @property
+ def rules(self):
+ """Gets the rules of this V1beta3FlowSchemaSpec. # noqa: E501
+
+ `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
+
+ :return: The rules of this V1beta3FlowSchemaSpec. # noqa: E501
+ :rtype: list[V1beta3PolicyRulesWithSubjects]
+ """
+ return self._rules
+
+ @rules.setter
+ def rules(self, rules):
+ """Sets the rules of this V1beta3FlowSchemaSpec.
+
+ `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
+
+ :param rules: The rules of this V1beta3FlowSchemaSpec. # noqa: E501
+ :type: list[V1beta3PolicyRulesWithSubjects]
+ """
+
+ self._rules = rules
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3FlowSchemaSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3FlowSchemaSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_status.py
new file mode 100644
index 0000000000..cf6b2e083f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_flow_schema_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3FlowSchemaStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1beta3FlowSchemaCondition]'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions'
+ }
+
+ def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3FlowSchemaStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1beta3FlowSchemaStatus. # noqa: E501
+
+ `conditions` is a list of the current states of FlowSchema. # noqa: E501
+
+ :return: The conditions of this V1beta3FlowSchemaStatus. # noqa: E501
+ :rtype: list[V1beta3FlowSchemaCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1beta3FlowSchemaStatus.
+
+ `conditions` is a list of the current states of FlowSchema. # noqa: E501
+
+ :param conditions: The conditions of this V1beta3FlowSchemaStatus. # noqa: E501
+ :type: list[V1beta3FlowSchemaCondition]
+ """
+
+ self._conditions = conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3FlowSchemaStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3FlowSchemaStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_group_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_group_subject.py
new file mode 100644
index 0000000000..fbd54a9b00
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_group_subject.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3GroupSubject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3GroupSubject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta3GroupSubject. # noqa: E501
+
+ name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
+
+ :return: The name of this V1beta3GroupSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta3GroupSubject.
+
+ name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
+
+ :param name: The name of this V1beta3GroupSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3GroupSubject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3GroupSubject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_limit_response.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_limit_response.py
new file mode 100644
index 0000000000..9a28a855c5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_limit_response.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3LimitResponse(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'queuing': 'V1beta3QueuingConfiguration',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'queuing': 'queuing',
+ 'type': 'type'
+ }
+
+ def __init__(self, queuing=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3LimitResponse - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._queuing = None
+ self._type = None
+ self.discriminator = None
+
+ if queuing is not None:
+ self.queuing = queuing
+ self.type = type
+
+ @property
+ def queuing(self):
+ """Gets the queuing of this V1beta3LimitResponse. # noqa: E501
+
+
+ :return: The queuing of this V1beta3LimitResponse. # noqa: E501
+ :rtype: V1beta3QueuingConfiguration
+ """
+ return self._queuing
+
+ @queuing.setter
+ def queuing(self, queuing):
+ """Sets the queuing of this V1beta3LimitResponse.
+
+
+ :param queuing: The queuing of this V1beta3LimitResponse. # noqa: E501
+ :type: V1beta3QueuingConfiguration
+ """
+
+ self._queuing = queuing
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta3LimitResponse. # noqa: E501
+
+ `type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required. # noqa: E501
+
+ :return: The type of this V1beta3LimitResponse. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta3LimitResponse.
+
+ `type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required. # noqa: E501
+
+ :param type: The type of this V1beta3LimitResponse. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3LimitResponse):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3LimitResponse):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_limited_priority_level_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_limited_priority_level_configuration.py
new file mode 100644
index 0000000000..cf9a4aeac5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_limited_priority_level_configuration.py
@@ -0,0 +1,204 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3LimitedPriorityLevelConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'borrowing_limit_percent': 'int',
+ 'lendable_percent': 'int',
+ 'limit_response': 'V1beta3LimitResponse',
+ 'nominal_concurrency_shares': 'int'
+ }
+
+ attribute_map = {
+ 'borrowing_limit_percent': 'borrowingLimitPercent',
+ 'lendable_percent': 'lendablePercent',
+ 'limit_response': 'limitResponse',
+ 'nominal_concurrency_shares': 'nominalConcurrencyShares'
+ }
+
+ def __init__(self, borrowing_limit_percent=None, lendable_percent=None, limit_response=None, nominal_concurrency_shares=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3LimitedPriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._borrowing_limit_percent = None
+ self._lendable_percent = None
+ self._limit_response = None
+ self._nominal_concurrency_shares = None
+ self.discriminator = None
+
+ if borrowing_limit_percent is not None:
+ self.borrowing_limit_percent = borrowing_limit_percent
+ if lendable_percent is not None:
+ self.lendable_percent = lendable_percent
+ if limit_response is not None:
+ self.limit_response = limit_response
+ if nominal_concurrency_shares is not None:
+ self.nominal_concurrency_shares = nominal_concurrency_shares
+
+ @property
+ def borrowing_limit_percent(self):
+ """Gets the borrowing_limit_percent of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+
+ `borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows. BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) The value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite. # noqa: E501
+
+ :return: The borrowing_limit_percent of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._borrowing_limit_percent
+
+ @borrowing_limit_percent.setter
+ def borrowing_limit_percent(self, borrowing_limit_percent):
+ """Sets the borrowing_limit_percent of this V1beta3LimitedPriorityLevelConfiguration.
+
+ `borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows. BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) The value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite. # noqa: E501
+
+ :param borrowing_limit_percent: The borrowing_limit_percent of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._borrowing_limit_percent = borrowing_limit_percent
+
+ @property
+ def lendable_percent(self):
+ """Gets the lendable_percent of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :return: The lendable_percent of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._lendable_percent
+
+ @lendable_percent.setter
+ def lendable_percent(self, lendable_percent):
+ """Sets the lendable_percent of this V1beta3LimitedPriorityLevelConfiguration.
+
+ `lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) # noqa: E501
+
+ :param lendable_percent: The lendable_percent of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._lendable_percent = lendable_percent
+
+ @property
+ def limit_response(self):
+ """Gets the limit_response of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The limit_response of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: V1beta3LimitResponse
+ """
+ return self._limit_response
+
+ @limit_response.setter
+ def limit_response(self, limit_response):
+ """Sets the limit_response of this V1beta3LimitedPriorityLevelConfiguration.
+
+
+ :param limit_response: The limit_response of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: V1beta3LimitResponse
+ """
+
+ self._limit_response = limit_response
+
+ @property
+ def nominal_concurrency_shares(self):
+ """Gets the nominal_concurrency_shares of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+
+ `nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values: NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k) Bigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30. # noqa: E501
+
+ :return: The nominal_concurrency_shares of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._nominal_concurrency_shares
+
+ @nominal_concurrency_shares.setter
+ def nominal_concurrency_shares(self, nominal_concurrency_shares):
+ """Sets the nominal_concurrency_shares of this V1beta3LimitedPriorityLevelConfiguration.
+
+ `nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values: NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k) Bigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30. # noqa: E501
+
+ :param nominal_concurrency_shares: The nominal_concurrency_shares of this V1beta3LimitedPriorityLevelConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._nominal_concurrency_shares = nominal_concurrency_shares
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3LimitedPriorityLevelConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3LimitedPriorityLevelConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_non_resource_policy_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_non_resource_policy_rule.py
new file mode 100644
index 0000000000..dc280e0cf8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_non_resource_policy_rule.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3NonResourcePolicyRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'non_resource_ur_ls': 'list[str]',
+ 'verbs': 'list[str]'
+ }
+
+ attribute_map = {
+ 'non_resource_ur_ls': 'nonResourceURLs',
+ 'verbs': 'verbs'
+ }
+
+ def __init__(self, non_resource_ur_ls=None, verbs=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3NonResourcePolicyRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._non_resource_ur_ls = None
+ self._verbs = None
+ self.discriminator = None
+
+ self.non_resource_ur_ls = non_resource_ur_ls
+ self.verbs = verbs
+
+ @property
+ def non_resource_ur_ls(self):
+ """Gets the non_resource_ur_ls of this V1beta3NonResourcePolicyRule. # noqa: E501
+
+ `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
+
+ :return: The non_resource_ur_ls of this V1beta3NonResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._non_resource_ur_ls
+
+ @non_resource_ur_ls.setter
+ def non_resource_ur_ls(self, non_resource_ur_ls):
+ """Sets the non_resource_ur_ls of this V1beta3NonResourcePolicyRule.
+
+ `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
+
+ :param non_resource_ur_ls: The non_resource_ur_ls of this V1beta3NonResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and non_resource_ur_ls is None: # noqa: E501
+ raise ValueError("Invalid value for `non_resource_ur_ls`, must not be `None`") # noqa: E501
+
+ self._non_resource_ur_ls = non_resource_ur_ls
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1beta3NonResourcePolicyRule. # noqa: E501
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
+
+ :return: The verbs of this V1beta3NonResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1beta3NonResourcePolicyRule.
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
+
+ :param verbs: The verbs of this V1beta3NonResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3NonResourcePolicyRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3NonResourcePolicyRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_policy_rules_with_subjects.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_policy_rules_with_subjects.py
new file mode 100644
index 0000000000..489c60d8ad
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_policy_rules_with_subjects.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3PolicyRulesWithSubjects(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'non_resource_rules': 'list[V1beta3NonResourcePolicyRule]',
+ 'resource_rules': 'list[V1beta3ResourcePolicyRule]',
+ 'subjects': 'list[V1beta3Subject]'
+ }
+
+ attribute_map = {
+ 'non_resource_rules': 'nonResourceRules',
+ 'resource_rules': 'resourceRules',
+ 'subjects': 'subjects'
+ }
+
+ def __init__(self, non_resource_rules=None, resource_rules=None, subjects=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3PolicyRulesWithSubjects - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._non_resource_rules = None
+ self._resource_rules = None
+ self._subjects = None
+ self.discriminator = None
+
+ if non_resource_rules is not None:
+ self.non_resource_rules = non_resource_rules
+ if resource_rules is not None:
+ self.resource_rules = resource_rules
+ self.subjects = subjects
+
+ @property
+ def non_resource_rules(self):
+ """Gets the non_resource_rules of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+
+ `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
+
+ :return: The non_resource_rules of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+ :rtype: list[V1beta3NonResourcePolicyRule]
+ """
+ return self._non_resource_rules
+
+ @non_resource_rules.setter
+ def non_resource_rules(self, non_resource_rules):
+ """Sets the non_resource_rules of this V1beta3PolicyRulesWithSubjects.
+
+ `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
+
+ :param non_resource_rules: The non_resource_rules of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+ :type: list[V1beta3NonResourcePolicyRule]
+ """
+
+ self._non_resource_rules = non_resource_rules
+
+ @property
+ def resource_rules(self):
+ """Gets the resource_rules of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+
+ `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
+
+ :return: The resource_rules of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+ :rtype: list[V1beta3ResourcePolicyRule]
+ """
+ return self._resource_rules
+
+ @resource_rules.setter
+ def resource_rules(self, resource_rules):
+ """Sets the resource_rules of this V1beta3PolicyRulesWithSubjects.
+
+ `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
+
+ :param resource_rules: The resource_rules of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+ :type: list[V1beta3ResourcePolicyRule]
+ """
+
+ self._resource_rules = resource_rules
+
+ @property
+ def subjects(self):
+ """Gets the subjects of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+
+ subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
+
+ :return: The subjects of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+ :rtype: list[V1beta3Subject]
+ """
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, subjects):
+ """Sets the subjects of this V1beta3PolicyRulesWithSubjects.
+
+ subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
+
+ :param subjects: The subjects of this V1beta3PolicyRulesWithSubjects. # noqa: E501
+ :type: list[V1beta3Subject]
+ """
+ if self.local_vars_configuration.client_side_validation and subjects is None: # noqa: E501
+ raise ValueError("Invalid value for `subjects`, must not be `None`") # noqa: E501
+
+ self._subjects = subjects
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3PolicyRulesWithSubjects):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3PolicyRulesWithSubjects):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration.py
new file mode 100644
index 0000000000..1bcd38269f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3PriorityLevelConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V1beta3PriorityLevelConfigurationSpec',
+ 'status': 'V1beta3PriorityLevelConfigurationStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3PriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta3PriorityLevelConfiguration. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta3PriorityLevelConfiguration.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta3PriorityLevelConfiguration. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta3PriorityLevelConfiguration.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta3PriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The metadata of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta3PriorityLevelConfiguration.
+
+
+ :param metadata: The metadata of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V1beta3PriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The spec of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :rtype: V1beta3PriorityLevelConfigurationSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V1beta3PriorityLevelConfiguration.
+
+
+ :param spec: The spec of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :type: V1beta3PriorityLevelConfigurationSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta3PriorityLevelConfiguration. # noqa: E501
+
+
+ :return: The status of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :rtype: V1beta3PriorityLevelConfigurationStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta3PriorityLevelConfiguration.
+
+
+ :param status: The status of this V1beta3PriorityLevelConfiguration. # noqa: E501
+ :type: V1beta3PriorityLevelConfigurationStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_condition.py
new file mode 100644
index 0000000000..f887574601
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_condition.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3PriorityLevelConfigurationCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3PriorityLevelConfigurationCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ if status is not None:
+ self.status = status
+ if type is not None:
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :return: The last_transition_time of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V1beta3PriorityLevelConfigurationCondition.
+
+ `lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :return: The message of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V1beta3PriorityLevelConfigurationCondition.
+
+ `message` is a human-readable message indicating details about last transition. # noqa: E501
+
+ :param message: The message of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V1beta3PriorityLevelConfigurationCondition.
+
+ `reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :return: The status of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V1beta3PriorityLevelConfigurationCondition.
+
+ `status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
+
+ :param status: The status of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :return: The type of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta3PriorityLevelConfigurationCondition.
+
+ `type` is the type of the condition. Required. # noqa: E501
+
+ :param type: The type of this V1beta3PriorityLevelConfigurationCondition. # noqa: E501
+ :type: str
+ """
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_list.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_list.py
new file mode 100644
index 0000000000..f4910773d2
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3PriorityLevelConfigurationList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V1beta3PriorityLevelConfiguration]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3PriorityLevelConfigurationList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V1beta3PriorityLevelConfigurationList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+
+ `items` is a list of request-priorities. # noqa: E501
+
+ :return: The items of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :rtype: list[V1beta3PriorityLevelConfiguration]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V1beta3PriorityLevelConfigurationList.
+
+ `items` is a list of request-priorities. # noqa: E501
+
+ :param items: The items of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :type: list[V1beta3PriorityLevelConfiguration]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta3PriorityLevelConfigurationList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+
+
+ :return: The metadata of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V1beta3PriorityLevelConfigurationList.
+
+
+ :param metadata: The metadata of this V1beta3PriorityLevelConfigurationList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_reference.py
new file mode 100644
index 0000000000..e8b87edb9a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_reference.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3PriorityLevelConfigurationReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3PriorityLevelConfigurationReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta3PriorityLevelConfigurationReference. # noqa: E501
+
+ `name` is the name of the priority level configuration being referenced Required. # noqa: E501
+
+ :return: The name of this V1beta3PriorityLevelConfigurationReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta3PriorityLevelConfigurationReference.
+
+ `name` is the name of the priority level configuration being referenced Required. # noqa: E501
+
+ :param name: The name of this V1beta3PriorityLevelConfigurationReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_spec.py
new file mode 100644
index 0000000000..aa85c0a905
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_spec.py
@@ -0,0 +1,175 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3PriorityLevelConfigurationSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'exempt': 'V1beta3ExemptPriorityLevelConfiguration',
+ 'limited': 'V1beta3LimitedPriorityLevelConfiguration',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'exempt': 'exempt',
+ 'limited': 'limited',
+ 'type': 'type'
+ }
+
+ def __init__(self, exempt=None, limited=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3PriorityLevelConfigurationSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._exempt = None
+ self._limited = None
+ self._type = None
+ self.discriminator = None
+
+ if exempt is not None:
+ self.exempt = exempt
+ if limited is not None:
+ self.limited = limited
+ self.type = type
+
+ @property
+ def exempt(self):
+ """Gets the exempt of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+
+
+ :return: The exempt of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+ :rtype: V1beta3ExemptPriorityLevelConfiguration
+ """
+ return self._exempt
+
+ @exempt.setter
+ def exempt(self, exempt):
+ """Sets the exempt of this V1beta3PriorityLevelConfigurationSpec.
+
+
+ :param exempt: The exempt of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+ :type: V1beta3ExemptPriorityLevelConfiguration
+ """
+
+ self._exempt = exempt
+
+ @property
+ def limited(self):
+ """Gets the limited of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+
+
+ :return: The limited of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+ :rtype: V1beta3LimitedPriorityLevelConfiguration
+ """
+ return self._limited
+
+ @limited.setter
+ def limited(self, limited):
+ """Sets the limited of this V1beta3PriorityLevelConfigurationSpec.
+
+
+ :param limited: The limited of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+ :type: V1beta3LimitedPriorityLevelConfiguration
+ """
+
+ self._limited = limited
+
+ @property
+ def type(self):
+ """Gets the type of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+
+ `type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required. # noqa: E501
+
+ :return: The type of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V1beta3PriorityLevelConfigurationSpec.
+
+ `type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required. # noqa: E501
+
+ :param type: The type of this V1beta3PriorityLevelConfigurationSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_status.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_status.py
new file mode 100644
index 0000000000..85e7725440
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_priority_level_configuration_status.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3PriorityLevelConfigurationStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V1beta3PriorityLevelConfigurationCondition]'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions'
+ }
+
+ def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3PriorityLevelConfigurationStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V1beta3PriorityLevelConfigurationStatus. # noqa: E501
+
+ `conditions` is the current state of \"request-priority\". # noqa: E501
+
+ :return: The conditions of this V1beta3PriorityLevelConfigurationStatus. # noqa: E501
+ :rtype: list[V1beta3PriorityLevelConfigurationCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V1beta3PriorityLevelConfigurationStatus.
+
+ `conditions` is the current state of \"request-priority\". # noqa: E501
+
+ :param conditions: The conditions of this V1beta3PriorityLevelConfigurationStatus. # noqa: E501
+ :type: list[V1beta3PriorityLevelConfigurationCondition]
+ """
+
+ self._conditions = conditions
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3PriorityLevelConfigurationStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_queuing_configuration.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_queuing_configuration.py
new file mode 100644
index 0000000000..3f1150b46e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_queuing_configuration.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3QueuingConfiguration(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'hand_size': 'int',
+ 'queue_length_limit': 'int',
+ 'queues': 'int'
+ }
+
+ attribute_map = {
+ 'hand_size': 'handSize',
+ 'queue_length_limit': 'queueLengthLimit',
+ 'queues': 'queues'
+ }
+
+ def __init__(self, hand_size=None, queue_length_limit=None, queues=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3QueuingConfiguration - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._hand_size = None
+ self._queue_length_limit = None
+ self._queues = None
+ self.discriminator = None
+
+ if hand_size is not None:
+ self.hand_size = hand_size
+ if queue_length_limit is not None:
+ self.queue_length_limit = queue_length_limit
+ if queues is not None:
+ self.queues = queues
+
+ @property
+ def hand_size(self):
+ """Gets the hand_size of this V1beta3QueuingConfiguration. # noqa: E501
+
+ `handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8. # noqa: E501
+
+ :return: The hand_size of this V1beta3QueuingConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._hand_size
+
+ @hand_size.setter
+ def hand_size(self, hand_size):
+ """Sets the hand_size of this V1beta3QueuingConfiguration.
+
+ `handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8. # noqa: E501
+
+ :param hand_size: The hand_size of this V1beta3QueuingConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._hand_size = hand_size
+
+ @property
+ def queue_length_limit(self):
+ """Gets the queue_length_limit of this V1beta3QueuingConfiguration. # noqa: E501
+
+ `queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50. # noqa: E501
+
+ :return: The queue_length_limit of this V1beta3QueuingConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._queue_length_limit
+
+ @queue_length_limit.setter
+ def queue_length_limit(self, queue_length_limit):
+ """Sets the queue_length_limit of this V1beta3QueuingConfiguration.
+
+ `queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50. # noqa: E501
+
+ :param queue_length_limit: The queue_length_limit of this V1beta3QueuingConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._queue_length_limit = queue_length_limit
+
+ @property
+ def queues(self):
+ """Gets the queues of this V1beta3QueuingConfiguration. # noqa: E501
+
+ `queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64. # noqa: E501
+
+ :return: The queues of this V1beta3QueuingConfiguration. # noqa: E501
+ :rtype: int
+ """
+ return self._queues
+
+ @queues.setter
+ def queues(self, queues):
+ """Sets the queues of this V1beta3QueuingConfiguration.
+
+ `queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64. # noqa: E501
+
+ :param queues: The queues of this V1beta3QueuingConfiguration. # noqa: E501
+ :type: int
+ """
+
+ self._queues = queues
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3QueuingConfiguration):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3QueuingConfiguration):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_resource_policy_rule.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_resource_policy_rule.py
new file mode 100644
index 0000000000..e949815ac5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_resource_policy_rule.py
@@ -0,0 +1,237 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3ResourcePolicyRule(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_groups': 'list[str]',
+ 'cluster_scope': 'bool',
+ 'namespaces': 'list[str]',
+ 'resources': 'list[str]',
+ 'verbs': 'list[str]'
+ }
+
+ attribute_map = {
+ 'api_groups': 'apiGroups',
+ 'cluster_scope': 'clusterScope',
+ 'namespaces': 'namespaces',
+ 'resources': 'resources',
+ 'verbs': 'verbs'
+ }
+
+ def __init__(self, api_groups=None, cluster_scope=None, namespaces=None, resources=None, verbs=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3ResourcePolicyRule - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_groups = None
+ self._cluster_scope = None
+ self._namespaces = None
+ self._resources = None
+ self._verbs = None
+ self.discriminator = None
+
+ self.api_groups = api_groups
+ if cluster_scope is not None:
+ self.cluster_scope = cluster_scope
+ if namespaces is not None:
+ self.namespaces = namespaces
+ self.resources = resources
+ self.verbs = verbs
+
+ @property
+ def api_groups(self):
+ """Gets the api_groups of this V1beta3ResourcePolicyRule. # noqa: E501
+
+ `apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required. # noqa: E501
+
+ :return: The api_groups of this V1beta3ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._api_groups
+
+ @api_groups.setter
+ def api_groups(self, api_groups):
+ """Sets the api_groups of this V1beta3ResourcePolicyRule.
+
+ `apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required. # noqa: E501
+
+ :param api_groups: The api_groups of this V1beta3ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and api_groups is None: # noqa: E501
+ raise ValueError("Invalid value for `api_groups`, must not be `None`") # noqa: E501
+
+ self._api_groups = api_groups
+
+ @property
+ def cluster_scope(self):
+ """Gets the cluster_scope of this V1beta3ResourcePolicyRule. # noqa: E501
+
+ `clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list. # noqa: E501
+
+ :return: The cluster_scope of this V1beta3ResourcePolicyRule. # noqa: E501
+ :rtype: bool
+ """
+ return self._cluster_scope
+
+ @cluster_scope.setter
+ def cluster_scope(self, cluster_scope):
+ """Sets the cluster_scope of this V1beta3ResourcePolicyRule.
+
+ `clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list. # noqa: E501
+
+ :param cluster_scope: The cluster_scope of this V1beta3ResourcePolicyRule. # noqa: E501
+ :type: bool
+ """
+
+ self._cluster_scope = cluster_scope
+
+ @property
+ def namespaces(self):
+ """Gets the namespaces of this V1beta3ResourcePolicyRule. # noqa: E501
+
+ `namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true. # noqa: E501
+
+ :return: The namespaces of this V1beta3ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._namespaces
+
+ @namespaces.setter
+ def namespaces(self, namespaces):
+ """Sets the namespaces of this V1beta3ResourcePolicyRule.
+
+ `namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true. # noqa: E501
+
+ :param namespaces: The namespaces of this V1beta3ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+
+ self._namespaces = namespaces
+
+ @property
+ def resources(self):
+ """Gets the resources of this V1beta3ResourcePolicyRule. # noqa: E501
+
+ `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required. # noqa: E501
+
+ :return: The resources of this V1beta3ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._resources
+
+ @resources.setter
+ def resources(self, resources):
+ """Sets the resources of this V1beta3ResourcePolicyRule.
+
+ `resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required. # noqa: E501
+
+ :param resources: The resources of this V1beta3ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and resources is None: # noqa: E501
+ raise ValueError("Invalid value for `resources`, must not be `None`") # noqa: E501
+
+ self._resources = resources
+
+ @property
+ def verbs(self):
+ """Gets the verbs of this V1beta3ResourcePolicyRule. # noqa: E501
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required. # noqa: E501
+
+ :return: The verbs of this V1beta3ResourcePolicyRule. # noqa: E501
+ :rtype: list[str]
+ """
+ return self._verbs
+
+ @verbs.setter
+ def verbs(self, verbs):
+ """Sets the verbs of this V1beta3ResourcePolicyRule.
+
+ `verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required. # noqa: E501
+
+ :param verbs: The verbs of this V1beta3ResourcePolicyRule. # noqa: E501
+ :type: list[str]
+ """
+ if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
+ raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
+
+ self._verbs = verbs
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3ResourcePolicyRule):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3ResourcePolicyRule):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_service_account_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_service_account_subject.py
new file mode 100644
index 0000000000..52759393c7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_service_account_subject.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3ServiceAccountSubject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'namespace': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'namespace': 'namespace'
+ }
+
+ def __init__(self, name=None, namespace=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3ServiceAccountSubject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._namespace = None
+ self.discriminator = None
+
+ self.name = name
+ self.namespace = namespace
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta3ServiceAccountSubject. # noqa: E501
+
+ `name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required. # noqa: E501
+
+ :return: The name of this V1beta3ServiceAccountSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta3ServiceAccountSubject.
+
+ `name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required. # noqa: E501
+
+ :param name: The name of this V1beta3ServiceAccountSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def namespace(self):
+ """Gets the namespace of this V1beta3ServiceAccountSubject. # noqa: E501
+
+ `namespace` is the namespace of matching ServiceAccount objects. Required. # noqa: E501
+
+ :return: The namespace of this V1beta3ServiceAccountSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._namespace
+
+ @namespace.setter
+ def namespace(self, namespace):
+ """Sets the namespace of this V1beta3ServiceAccountSubject.
+
+ `namespace` is the namespace of matching ServiceAccount objects. Required. # noqa: E501
+
+ :param namespace: The namespace of this V1beta3ServiceAccountSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
+ raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
+
+ self._namespace = namespace
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3ServiceAccountSubject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3ServiceAccountSubject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_subject.py
new file mode 100644
index 0000000000..48ad69d26c
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_subject.py
@@ -0,0 +1,201 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3Subject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'group': 'V1beta3GroupSubject',
+ 'kind': 'str',
+ 'service_account': 'V1beta3ServiceAccountSubject',
+ 'user': 'V1beta3UserSubject'
+ }
+
+ attribute_map = {
+ 'group': 'group',
+ 'kind': 'kind',
+ 'service_account': 'serviceAccount',
+ 'user': 'user'
+ }
+
+ def __init__(self, group=None, kind=None, service_account=None, user=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3Subject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._group = None
+ self._kind = None
+ self._service_account = None
+ self._user = None
+ self.discriminator = None
+
+ if group is not None:
+ self.group = group
+ self.kind = kind
+ if service_account is not None:
+ self.service_account = service_account
+ if user is not None:
+ self.user = user
+
+ @property
+ def group(self):
+ """Gets the group of this V1beta3Subject. # noqa: E501
+
+
+ :return: The group of this V1beta3Subject. # noqa: E501
+ :rtype: V1beta3GroupSubject
+ """
+ return self._group
+
+ @group.setter
+ def group(self, group):
+ """Sets the group of this V1beta3Subject.
+
+
+ :param group: The group of this V1beta3Subject. # noqa: E501
+ :type: V1beta3GroupSubject
+ """
+
+ self._group = group
+
+ @property
+ def kind(self):
+ """Gets the kind of this V1beta3Subject. # noqa: E501
+
+ `kind` indicates which one of the other fields is non-empty. Required # noqa: E501
+
+ :return: The kind of this V1beta3Subject. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V1beta3Subject.
+
+ `kind` indicates which one of the other fields is non-empty. Required # noqa: E501
+
+ :param kind: The kind of this V1beta3Subject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def service_account(self):
+ """Gets the service_account of this V1beta3Subject. # noqa: E501
+
+
+ :return: The service_account of this V1beta3Subject. # noqa: E501
+ :rtype: V1beta3ServiceAccountSubject
+ """
+ return self._service_account
+
+ @service_account.setter
+ def service_account(self, service_account):
+ """Sets the service_account of this V1beta3Subject.
+
+
+ :param service_account: The service_account of this V1beta3Subject. # noqa: E501
+ :type: V1beta3ServiceAccountSubject
+ """
+
+ self._service_account = service_account
+
+ @property
+ def user(self):
+ """Gets the user of this V1beta3Subject. # noqa: E501
+
+
+ :return: The user of this V1beta3Subject. # noqa: E501
+ :rtype: V1beta3UserSubject
+ """
+ return self._user
+
+ @user.setter
+ def user(self, user):
+ """Sets the user of this V1beta3Subject.
+
+
+ :param user: The user of this V1beta3Subject. # noqa: E501
+ :type: V1beta3UserSubject
+ """
+
+ self._user = user
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3Subject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3Subject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v1beta3_user_subject.py b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_user_subject.py
new file mode 100644
index 0000000000..0241749a90
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v1beta3_user_subject.py
@@ -0,0 +1,123 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V1beta3UserSubject(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'name': 'name'
+ }
+
+ def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
+ """V1beta3UserSubject - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self.discriminator = None
+
+ self.name = name
+
+ @property
+ def name(self):
+ """Gets the name of this V1beta3UserSubject. # noqa: E501
+
+ `name` is the username that matches, or \"*\" to match all usernames. Required. # noqa: E501
+
+ :return: The name of this V1beta3UserSubject. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V1beta3UserSubject.
+
+ `name` is the username that matches, or \"*\" to match all usernames. Required. # noqa: E501
+
+ :param name: The name of this V1beta3UserSubject. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V1beta3UserSubject):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V1beta3UserSubject):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_source.py b/contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_source.py
new file mode 100644
index 0000000000..86f643ffce
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_source.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ContainerResourceMetricSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container': 'str',
+ 'name': 'str',
+ 'target': 'V2MetricTarget'
+ }
+
+ attribute_map = {
+ 'container': 'container',
+ 'name': 'name',
+ 'target': 'target'
+ }
+
+ def __init__(self, container=None, name=None, target=None, local_vars_configuration=None): # noqa: E501
+ """V2ContainerResourceMetricSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container = None
+ self._name = None
+ self._target = None
+ self.discriminator = None
+
+ self.container = container
+ self.name = name
+ self.target = target
+
+ @property
+ def container(self):
+ """Gets the container of this V2ContainerResourceMetricSource. # noqa: E501
+
+ container is the name of the container in the pods of the scaling target # noqa: E501
+
+ :return: The container of this V2ContainerResourceMetricSource. # noqa: E501
+ :rtype: str
+ """
+ return self._container
+
+ @container.setter
+ def container(self, container):
+ """Sets the container of this V2ContainerResourceMetricSource.
+
+ container is the name of the container in the pods of the scaling target # noqa: E501
+
+ :param container: The container of this V2ContainerResourceMetricSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and container is None: # noqa: E501
+ raise ValueError("Invalid value for `container`, must not be `None`") # noqa: E501
+
+ self._container = container
+
+ @property
+ def name(self):
+ """Gets the name of this V2ContainerResourceMetricSource. # noqa: E501
+
+ name is the name of the resource in question. # noqa: E501
+
+ :return: The name of this V2ContainerResourceMetricSource. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V2ContainerResourceMetricSource.
+
+ name is the name of the resource in question. # noqa: E501
+
+ :param name: The name of this V2ContainerResourceMetricSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def target(self):
+ """Gets the target of this V2ContainerResourceMetricSource. # noqa: E501
+
+
+ :return: The target of this V2ContainerResourceMetricSource. # noqa: E501
+ :rtype: V2MetricTarget
+ """
+ return self._target
+
+ @target.setter
+ def target(self, target):
+ """Sets the target of this V2ContainerResourceMetricSource.
+
+
+ :param target: The target of this V2ContainerResourceMetricSource. # noqa: E501
+ :type: V2MetricTarget
+ """
+ if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
+ raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
+
+ self._target = target
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ContainerResourceMetricSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ContainerResourceMetricSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_status.py
new file mode 100644
index 0000000000..1157831915
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_container_resource_metric_status.py
@@ -0,0 +1,179 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ContainerResourceMetricStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container': 'str',
+ 'current': 'V2MetricValueStatus',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'container': 'container',
+ 'current': 'current',
+ 'name': 'name'
+ }
+
+ def __init__(self, container=None, current=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V2ContainerResourceMetricStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container = None
+ self._current = None
+ self._name = None
+ self.discriminator = None
+
+ self.container = container
+ self.current = current
+ self.name = name
+
+ @property
+ def container(self):
+ """Gets the container of this V2ContainerResourceMetricStatus. # noqa: E501
+
+ container is the name of the container in the pods of the scaling target # noqa: E501
+
+ :return: The container of this V2ContainerResourceMetricStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._container
+
+ @container.setter
+ def container(self, container):
+ """Sets the container of this V2ContainerResourceMetricStatus.
+
+ container is the name of the container in the pods of the scaling target # noqa: E501
+
+ :param container: The container of this V2ContainerResourceMetricStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and container is None: # noqa: E501
+ raise ValueError("Invalid value for `container`, must not be `None`") # noqa: E501
+
+ self._container = container
+
+ @property
+ def current(self):
+ """Gets the current of this V2ContainerResourceMetricStatus. # noqa: E501
+
+
+ :return: The current of this V2ContainerResourceMetricStatus. # noqa: E501
+ :rtype: V2MetricValueStatus
+ """
+ return self._current
+
+ @current.setter
+ def current(self, current):
+ """Sets the current of this V2ContainerResourceMetricStatus.
+
+
+ :param current: The current of this V2ContainerResourceMetricStatus. # noqa: E501
+ :type: V2MetricValueStatus
+ """
+ if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
+ raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
+
+ self._current = current
+
+ @property
+ def name(self):
+ """Gets the name of this V2ContainerResourceMetricStatus. # noqa: E501
+
+ name is the name of the resource in question. # noqa: E501
+
+ :return: The name of this V2ContainerResourceMetricStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V2ContainerResourceMetricStatus.
+
+ name is the name of the resource in question. # noqa: E501
+
+ :param name: The name of this V2ContainerResourceMetricStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ContainerResourceMetricStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ContainerResourceMetricStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_cross_version_object_reference.py b/contrib/python/kubernetes/kubernetes/client/models/v2_cross_version_object_reference.py
new file mode 100644
index 0000000000..aa44b905ba
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_cross_version_object_reference.py
@@ -0,0 +1,180 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2CrossVersionObjectReference(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'name': 'name'
+ }
+
+ def __init__(self, api_version=None, kind=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V2CrossVersionObjectReference - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._name = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.kind = kind
+ self.name = name
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V2CrossVersionObjectReference. # noqa: E501
+
+ apiVersion is the API version of the referent # noqa: E501
+
+ :return: The api_version of this V2CrossVersionObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V2CrossVersionObjectReference.
+
+ apiVersion is the API version of the referent # noqa: E501
+
+ :param api_version: The api_version of this V2CrossVersionObjectReference. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V2CrossVersionObjectReference. # noqa: E501
+
+ kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V2CrossVersionObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V2CrossVersionObjectReference.
+
+ kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V2CrossVersionObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
+ raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
+
+ self._kind = kind
+
+ @property
+ def name(self):
+ """Gets the name of this V2CrossVersionObjectReference. # noqa: E501
+
+ name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :return: The name of this V2CrossVersionObjectReference. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V2CrossVersionObjectReference.
+
+ name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
+
+ :param name: The name of this V2CrossVersionObjectReference. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2CrossVersionObjectReference):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2CrossVersionObjectReference):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_source.py b/contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_source.py
new file mode 100644
index 0000000000..54fff57ba0
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_source.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ExternalMetricSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'metric': 'V2MetricIdentifier',
+ 'target': 'V2MetricTarget'
+ }
+
+ attribute_map = {
+ 'metric': 'metric',
+ 'target': 'target'
+ }
+
+ def __init__(self, metric=None, target=None, local_vars_configuration=None): # noqa: E501
+ """V2ExternalMetricSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._metric = None
+ self._target = None
+ self.discriminator = None
+
+ self.metric = metric
+ self.target = target
+
+ @property
+ def metric(self):
+ """Gets the metric of this V2ExternalMetricSource. # noqa: E501
+
+
+ :return: The metric of this V2ExternalMetricSource. # noqa: E501
+ :rtype: V2MetricIdentifier
+ """
+ return self._metric
+
+ @metric.setter
+ def metric(self, metric):
+ """Sets the metric of this V2ExternalMetricSource.
+
+
+ :param metric: The metric of this V2ExternalMetricSource. # noqa: E501
+ :type: V2MetricIdentifier
+ """
+ if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
+ raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
+
+ self._metric = metric
+
+ @property
+ def target(self):
+ """Gets the target of this V2ExternalMetricSource. # noqa: E501
+
+
+ :return: The target of this V2ExternalMetricSource. # noqa: E501
+ :rtype: V2MetricTarget
+ """
+ return self._target
+
+ @target.setter
+ def target(self, target):
+ """Sets the target of this V2ExternalMetricSource.
+
+
+ :param target: The target of this V2ExternalMetricSource. # noqa: E501
+ :type: V2MetricTarget
+ """
+ if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
+ raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
+
+ self._target = target
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ExternalMetricSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ExternalMetricSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_status.py
new file mode 100644
index 0000000000..73625778c9
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_external_metric_status.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ExternalMetricStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'current': 'V2MetricValueStatus',
+ 'metric': 'V2MetricIdentifier'
+ }
+
+ attribute_map = {
+ 'current': 'current',
+ 'metric': 'metric'
+ }
+
+ def __init__(self, current=None, metric=None, local_vars_configuration=None): # noqa: E501
+ """V2ExternalMetricStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._current = None
+ self._metric = None
+ self.discriminator = None
+
+ self.current = current
+ self.metric = metric
+
+ @property
+ def current(self):
+ """Gets the current of this V2ExternalMetricStatus. # noqa: E501
+
+
+ :return: The current of this V2ExternalMetricStatus. # noqa: E501
+ :rtype: V2MetricValueStatus
+ """
+ return self._current
+
+ @current.setter
+ def current(self, current):
+ """Sets the current of this V2ExternalMetricStatus.
+
+
+ :param current: The current of this V2ExternalMetricStatus. # noqa: E501
+ :type: V2MetricValueStatus
+ """
+ if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
+ raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
+
+ self._current = current
+
+ @property
+ def metric(self):
+ """Gets the metric of this V2ExternalMetricStatus. # noqa: E501
+
+
+ :return: The metric of this V2ExternalMetricStatus. # noqa: E501
+ :rtype: V2MetricIdentifier
+ """
+ return self._metric
+
+ @metric.setter
+ def metric(self, metric):
+ """Sets the metric of this V2ExternalMetricStatus.
+
+
+ :param metric: The metric of this V2ExternalMetricStatus. # noqa: E501
+ :type: V2MetricIdentifier
+ """
+ if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
+ raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
+
+ self._metric = metric
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ExternalMetricStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ExternalMetricStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler.py b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler.py
new file mode 100644
index 0000000000..397092177e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HorizontalPodAutoscaler(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'kind': 'str',
+ 'metadata': 'V1ObjectMeta',
+ 'spec': 'V2HorizontalPodAutoscalerSpec',
+ 'status': 'V2HorizontalPodAutoscalerStatus'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'kind': 'kind',
+ 'metadata': 'metadata',
+ 'spec': 'spec',
+ 'status': 'status'
+ }
+
+ def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
+ """V2HorizontalPodAutoscaler - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._kind = None
+ self._metadata = None
+ self._spec = None
+ self._status = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+ if spec is not None:
+ self.spec = spec
+ if status is not None:
+ self.status = status
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V2HorizontalPodAutoscaler. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V2HorizontalPodAutoscaler. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V2HorizontalPodAutoscaler.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V2HorizontalPodAutoscaler. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def kind(self):
+ """Gets the kind of this V2HorizontalPodAutoscaler. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V2HorizontalPodAutoscaler. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V2HorizontalPodAutoscaler.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V2HorizontalPodAutoscaler. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V2HorizontalPodAutoscaler. # noqa: E501
+
+
+ :return: The metadata of this V2HorizontalPodAutoscaler. # noqa: E501
+ :rtype: V1ObjectMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V2HorizontalPodAutoscaler.
+
+
+ :param metadata: The metadata of this V2HorizontalPodAutoscaler. # noqa: E501
+ :type: V1ObjectMeta
+ """
+
+ self._metadata = metadata
+
+ @property
+ def spec(self):
+ """Gets the spec of this V2HorizontalPodAutoscaler. # noqa: E501
+
+
+ :return: The spec of this V2HorizontalPodAutoscaler. # noqa: E501
+ :rtype: V2HorizontalPodAutoscalerSpec
+ """
+ return self._spec
+
+ @spec.setter
+ def spec(self, spec):
+ """Sets the spec of this V2HorizontalPodAutoscaler.
+
+
+ :param spec: The spec of this V2HorizontalPodAutoscaler. # noqa: E501
+ :type: V2HorizontalPodAutoscalerSpec
+ """
+
+ self._spec = spec
+
+ @property
+ def status(self):
+ """Gets the status of this V2HorizontalPodAutoscaler. # noqa: E501
+
+
+ :return: The status of this V2HorizontalPodAutoscaler. # noqa: E501
+ :rtype: V2HorizontalPodAutoscalerStatus
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V2HorizontalPodAutoscaler.
+
+
+ :param status: The status of this V2HorizontalPodAutoscaler. # noqa: E501
+ :type: V2HorizontalPodAutoscalerStatus
+ """
+
+ self._status = status
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HorizontalPodAutoscaler):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HorizontalPodAutoscaler):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_behavior.py b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_behavior.py
new file mode 100644
index 0000000000..240a8a1bcd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_behavior.py
@@ -0,0 +1,146 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HorizontalPodAutoscalerBehavior(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'scale_down': 'V2HPAScalingRules',
+ 'scale_up': 'V2HPAScalingRules'
+ }
+
+ attribute_map = {
+ 'scale_down': 'scaleDown',
+ 'scale_up': 'scaleUp'
+ }
+
+ def __init__(self, scale_down=None, scale_up=None, local_vars_configuration=None): # noqa: E501
+ """V2HorizontalPodAutoscalerBehavior - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._scale_down = None
+ self._scale_up = None
+ self.discriminator = None
+
+ if scale_down is not None:
+ self.scale_down = scale_down
+ if scale_up is not None:
+ self.scale_up = scale_up
+
+ @property
+ def scale_down(self):
+ """Gets the scale_down of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
+
+
+ :return: The scale_down of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
+ :rtype: V2HPAScalingRules
+ """
+ return self._scale_down
+
+ @scale_down.setter
+ def scale_down(self, scale_down):
+ """Sets the scale_down of this V2HorizontalPodAutoscalerBehavior.
+
+
+ :param scale_down: The scale_down of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
+ :type: V2HPAScalingRules
+ """
+
+ self._scale_down = scale_down
+
+ @property
+ def scale_up(self):
+ """Gets the scale_up of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
+
+
+ :return: The scale_up of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
+ :rtype: V2HPAScalingRules
+ """
+ return self._scale_up
+
+ @scale_up.setter
+ def scale_up(self, scale_up):
+ """Sets the scale_up of this V2HorizontalPodAutoscalerBehavior.
+
+
+ :param scale_up: The scale_up of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
+ :type: V2HPAScalingRules
+ """
+
+ self._scale_up = scale_up
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerBehavior):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerBehavior):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_condition.py b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_condition.py
new file mode 100644
index 0000000000..05b441eb46
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_condition.py
@@ -0,0 +1,236 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HorizontalPodAutoscalerCondition(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'last_transition_time': 'datetime',
+ 'message': 'str',
+ 'reason': 'str',
+ 'status': 'str',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'last_transition_time': 'lastTransitionTime',
+ 'message': 'message',
+ 'reason': 'reason',
+ 'status': 'status',
+ 'type': 'type'
+ }
+
+ def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V2HorizontalPodAutoscalerCondition - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._last_transition_time = None
+ self._message = None
+ self._reason = None
+ self._status = None
+ self._type = None
+ self.discriminator = None
+
+ if last_transition_time is not None:
+ self.last_transition_time = last_transition_time
+ if message is not None:
+ self.message = message
+ if reason is not None:
+ self.reason = reason
+ self.status = status
+ self.type = type
+
+ @property
+ def last_transition_time(self):
+ """Gets the last_transition_time of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+
+ lastTransitionTime is the last time the condition transitioned from one status to another # noqa: E501
+
+ :return: The last_transition_time of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_transition_time
+
+ @last_transition_time.setter
+ def last_transition_time(self, last_transition_time):
+ """Sets the last_transition_time of this V2HorizontalPodAutoscalerCondition.
+
+ lastTransitionTime is the last time the condition transitioned from one status to another # noqa: E501
+
+ :param last_transition_time: The last_transition_time of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_transition_time = last_transition_time
+
+ @property
+ def message(self):
+ """Gets the message of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+
+ message is a human-readable explanation containing details about the transition # noqa: E501
+
+ :return: The message of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._message
+
+ @message.setter
+ def message(self, message):
+ """Sets the message of this V2HorizontalPodAutoscalerCondition.
+
+ message is a human-readable explanation containing details about the transition # noqa: E501
+
+ :param message: The message of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :type: str
+ """
+
+ self._message = message
+
+ @property
+ def reason(self):
+ """Gets the reason of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+
+ reason is the reason for the condition's last transition. # noqa: E501
+
+ :return: The reason of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._reason
+
+ @reason.setter
+ def reason(self, reason):
+ """Sets the reason of this V2HorizontalPodAutoscalerCondition.
+
+ reason is the reason for the condition's last transition. # noqa: E501
+
+ :param reason: The reason of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :type: str
+ """
+
+ self._reason = reason
+
+ @property
+ def status(self):
+ """Gets the status of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+
+ status is the status of the condition (True, False, Unknown) # noqa: E501
+
+ :return: The status of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._status
+
+ @status.setter
+ def status(self, status):
+ """Sets the status of this V2HorizontalPodAutoscalerCondition.
+
+ status is the status of the condition (True, False, Unknown) # noqa: E501
+
+ :param status: The status of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
+ raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
+
+ self._status = status
+
+ @property
+ def type(self):
+ """Gets the type of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+
+ type describes the current condition # noqa: E501
+
+ :return: The type of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V2HorizontalPodAutoscalerCondition.
+
+ type describes the current condition # noqa: E501
+
+ :param type: The type of this V2HorizontalPodAutoscalerCondition. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerCondition):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerCondition):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_list.py b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_list.py
new file mode 100644
index 0000000000..128eba7140
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_list.py
@@ -0,0 +1,205 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HorizontalPodAutoscalerList(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'api_version': 'str',
+ 'items': 'list[V2HorizontalPodAutoscaler]',
+ 'kind': 'str',
+ 'metadata': 'V1ListMeta'
+ }
+
+ attribute_map = {
+ 'api_version': 'apiVersion',
+ 'items': 'items',
+ 'kind': 'kind',
+ 'metadata': 'metadata'
+ }
+
+ def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
+ """V2HorizontalPodAutoscalerList - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._api_version = None
+ self._items = None
+ self._kind = None
+ self._metadata = None
+ self.discriminator = None
+
+ if api_version is not None:
+ self.api_version = api_version
+ self.items = items
+ if kind is not None:
+ self.kind = kind
+ if metadata is not None:
+ self.metadata = metadata
+
+ @property
+ def api_version(self):
+ """Gets the api_version of this V2HorizontalPodAutoscalerList. # noqa: E501
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :return: The api_version of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: str
+ """
+ return self._api_version
+
+ @api_version.setter
+ def api_version(self, api_version):
+ """Sets the api_version of this V2HorizontalPodAutoscalerList.
+
+ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
+
+ :param api_version: The api_version of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :type: str
+ """
+
+ self._api_version = api_version
+
+ @property
+ def items(self):
+ """Gets the items of this V2HorizontalPodAutoscalerList. # noqa: E501
+
+ items is the list of horizontal pod autoscaler objects. # noqa: E501
+
+ :return: The items of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: list[V2HorizontalPodAutoscaler]
+ """
+ return self._items
+
+ @items.setter
+ def items(self, items):
+ """Sets the items of this V2HorizontalPodAutoscalerList.
+
+ items is the list of horizontal pod autoscaler objects. # noqa: E501
+
+ :param items: The items of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :type: list[V2HorizontalPodAutoscaler]
+ """
+ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
+ raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
+
+ self._items = items
+
+ @property
+ def kind(self):
+ """Gets the kind of this V2HorizontalPodAutoscalerList. # noqa: E501
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :return: The kind of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: str
+ """
+ return self._kind
+
+ @kind.setter
+ def kind(self, kind):
+ """Sets the kind of this V2HorizontalPodAutoscalerList.
+
+ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
+
+ :param kind: The kind of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :type: str
+ """
+
+ self._kind = kind
+
+ @property
+ def metadata(self):
+ """Gets the metadata of this V2HorizontalPodAutoscalerList. # noqa: E501
+
+
+ :return: The metadata of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :rtype: V1ListMeta
+ """
+ return self._metadata
+
+ @metadata.setter
+ def metadata(self, metadata):
+ """Sets the metadata of this V2HorizontalPodAutoscalerList.
+
+
+ :param metadata: The metadata of this V2HorizontalPodAutoscalerList. # noqa: E501
+ :type: V1ListMeta
+ """
+
+ self._metadata = metadata
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerList):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerList):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_spec.py
new file mode 100644
index 0000000000..0636a0c5f7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_spec.py
@@ -0,0 +1,232 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HorizontalPodAutoscalerSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'behavior': 'V2HorizontalPodAutoscalerBehavior',
+ 'max_replicas': 'int',
+ 'metrics': 'list[V2MetricSpec]',
+ 'min_replicas': 'int',
+ 'scale_target_ref': 'V2CrossVersionObjectReference'
+ }
+
+ attribute_map = {
+ 'behavior': 'behavior',
+ 'max_replicas': 'maxReplicas',
+ 'metrics': 'metrics',
+ 'min_replicas': 'minReplicas',
+ 'scale_target_ref': 'scaleTargetRef'
+ }
+
+ def __init__(self, behavior=None, max_replicas=None, metrics=None, min_replicas=None, scale_target_ref=None, local_vars_configuration=None): # noqa: E501
+ """V2HorizontalPodAutoscalerSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._behavior = None
+ self._max_replicas = None
+ self._metrics = None
+ self._min_replicas = None
+ self._scale_target_ref = None
+ self.discriminator = None
+
+ if behavior is not None:
+ self.behavior = behavior
+ self.max_replicas = max_replicas
+ if metrics is not None:
+ self.metrics = metrics
+ if min_replicas is not None:
+ self.min_replicas = min_replicas
+ self.scale_target_ref = scale_target_ref
+
+ @property
+ def behavior(self):
+ """Gets the behavior of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+
+
+ :return: The behavior of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: V2HorizontalPodAutoscalerBehavior
+ """
+ return self._behavior
+
+ @behavior.setter
+ def behavior(self, behavior):
+ """Sets the behavior of this V2HorizontalPodAutoscalerSpec.
+
+
+ :param behavior: The behavior of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: V2HorizontalPodAutoscalerBehavior
+ """
+
+ self._behavior = behavior
+
+ @property
+ def max_replicas(self):
+ """Gets the max_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+
+ maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas. # noqa: E501
+
+ :return: The max_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._max_replicas
+
+ @max_replicas.setter
+ def max_replicas(self, max_replicas):
+ """Sets the max_replicas of this V2HorizontalPodAutoscalerSpec.
+
+ maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas. # noqa: E501
+
+ :param max_replicas: The max_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and max_replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `max_replicas`, must not be `None`") # noqa: E501
+
+ self._max_replicas = max_replicas
+
+ @property
+ def metrics(self):
+ """Gets the metrics of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+
+ metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization. # noqa: E501
+
+ :return: The metrics of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: list[V2MetricSpec]
+ """
+ return self._metrics
+
+ @metrics.setter
+ def metrics(self, metrics):
+ """Sets the metrics of this V2HorizontalPodAutoscalerSpec.
+
+ metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization. # noqa: E501
+
+ :param metrics: The metrics of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: list[V2MetricSpec]
+ """
+
+ self._metrics = metrics
+
+ @property
+ def min_replicas(self):
+ """Gets the min_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+
+ minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
+
+ :return: The min_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: int
+ """
+ return self._min_replicas
+
+ @min_replicas.setter
+ def min_replicas(self, min_replicas):
+ """Sets the min_replicas of this V2HorizontalPodAutoscalerSpec.
+
+ minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
+
+ :param min_replicas: The min_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: int
+ """
+
+ self._min_replicas = min_replicas
+
+ @property
+ def scale_target_ref(self):
+ """Gets the scale_target_ref of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+
+
+ :return: The scale_target_ref of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :rtype: V2CrossVersionObjectReference
+ """
+ return self._scale_target_ref
+
+ @scale_target_ref.setter
+ def scale_target_ref(self, scale_target_ref):
+ """Sets the scale_target_ref of this V2HorizontalPodAutoscalerSpec.
+
+
+ :param scale_target_ref: The scale_target_ref of this V2HorizontalPodAutoscalerSpec. # noqa: E501
+ :type: V2CrossVersionObjectReference
+ """
+ if self.local_vars_configuration.client_side_validation and scale_target_ref is None: # noqa: E501
+ raise ValueError("Invalid value for `scale_target_ref`, must not be `None`") # noqa: E501
+
+ self._scale_target_ref = scale_target_ref
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_status.py
new file mode 100644
index 0000000000..542dbc4e9b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_horizontal_pod_autoscaler_status.py
@@ -0,0 +1,263 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HorizontalPodAutoscalerStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'conditions': 'list[V2HorizontalPodAutoscalerCondition]',
+ 'current_metrics': 'list[V2MetricStatus]',
+ 'current_replicas': 'int',
+ 'desired_replicas': 'int',
+ 'last_scale_time': 'datetime',
+ 'observed_generation': 'int'
+ }
+
+ attribute_map = {
+ 'conditions': 'conditions',
+ 'current_metrics': 'currentMetrics',
+ 'current_replicas': 'currentReplicas',
+ 'desired_replicas': 'desiredReplicas',
+ 'last_scale_time': 'lastScaleTime',
+ 'observed_generation': 'observedGeneration'
+ }
+
+ def __init__(self, conditions=None, current_metrics=None, current_replicas=None, desired_replicas=None, last_scale_time=None, observed_generation=None, local_vars_configuration=None): # noqa: E501
+ """V2HorizontalPodAutoscalerStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._conditions = None
+ self._current_metrics = None
+ self._current_replicas = None
+ self._desired_replicas = None
+ self._last_scale_time = None
+ self._observed_generation = None
+ self.discriminator = None
+
+ if conditions is not None:
+ self.conditions = conditions
+ if current_metrics is not None:
+ self.current_metrics = current_metrics
+ if current_replicas is not None:
+ self.current_replicas = current_replicas
+ self.desired_replicas = desired_replicas
+ if last_scale_time is not None:
+ self.last_scale_time = last_scale_time
+ if observed_generation is not None:
+ self.observed_generation = observed_generation
+
+ @property
+ def conditions(self):
+ """Gets the conditions of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+
+ conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met. # noqa: E501
+
+ :return: The conditions of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: list[V2HorizontalPodAutoscalerCondition]
+ """
+ return self._conditions
+
+ @conditions.setter
+ def conditions(self, conditions):
+ """Sets the conditions of this V2HorizontalPodAutoscalerStatus.
+
+ conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met. # noqa: E501
+
+ :param conditions: The conditions of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: list[V2HorizontalPodAutoscalerCondition]
+ """
+
+ self._conditions = conditions
+
+ @property
+ def current_metrics(self):
+ """Gets the current_metrics of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+
+ currentMetrics is the last read state of the metrics used by this autoscaler. # noqa: E501
+
+ :return: The current_metrics of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: list[V2MetricStatus]
+ """
+ return self._current_metrics
+
+ @current_metrics.setter
+ def current_metrics(self, current_metrics):
+ """Sets the current_metrics of this V2HorizontalPodAutoscalerStatus.
+
+ currentMetrics is the last read state of the metrics used by this autoscaler. # noqa: E501
+
+ :param current_metrics: The current_metrics of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: list[V2MetricStatus]
+ """
+
+ self._current_metrics = current_metrics
+
+ @property
+ def current_replicas(self):
+ """Gets the current_replicas of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+
+ currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler. # noqa: E501
+
+ :return: The current_replicas of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._current_replicas
+
+ @current_replicas.setter
+ def current_replicas(self, current_replicas):
+ """Sets the current_replicas of this V2HorizontalPodAutoscalerStatus.
+
+ currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler. # noqa: E501
+
+ :param current_replicas: The current_replicas of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._current_replicas = current_replicas
+
+ @property
+ def desired_replicas(self):
+ """Gets the desired_replicas of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+
+ desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler. # noqa: E501
+
+ :return: The desired_replicas of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._desired_replicas
+
+ @desired_replicas.setter
+ def desired_replicas(self, desired_replicas):
+ """Sets the desired_replicas of this V2HorizontalPodAutoscalerStatus.
+
+ desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler. # noqa: E501
+
+ :param desired_replicas: The desired_replicas of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and desired_replicas is None: # noqa: E501
+ raise ValueError("Invalid value for `desired_replicas`, must not be `None`") # noqa: E501
+
+ self._desired_replicas = desired_replicas
+
+ @property
+ def last_scale_time(self):
+ """Gets the last_scale_time of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+
+ lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed. # noqa: E501
+
+ :return: The last_scale_time of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: datetime
+ """
+ return self._last_scale_time
+
+ @last_scale_time.setter
+ def last_scale_time(self, last_scale_time):
+ """Sets the last_scale_time of this V2HorizontalPodAutoscalerStatus.
+
+ lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed. # noqa: E501
+
+ :param last_scale_time: The last_scale_time of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: datetime
+ """
+
+ self._last_scale_time = last_scale_time
+
+ @property
+ def observed_generation(self):
+ """Gets the observed_generation of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+
+ observedGeneration is the most recent generation observed by this autoscaler. # noqa: E501
+
+ :return: The observed_generation of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._observed_generation
+
+ @observed_generation.setter
+ def observed_generation(self, observed_generation):
+ """Sets the observed_generation of this V2HorizontalPodAutoscalerStatus.
+
+ observedGeneration is the most recent generation observed by this autoscaler. # noqa: E501
+
+ :param observed_generation: The observed_generation of this V2HorizontalPodAutoscalerStatus. # noqa: E501
+ :type: int
+ """
+
+ self._observed_generation = observed_generation
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HorizontalPodAutoscalerStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_policy.py b/contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_policy.py
new file mode 100644
index 0000000000..1963f6e90a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_policy.py
@@ -0,0 +1,181 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HPAScalingPolicy(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'period_seconds': 'int',
+ 'type': 'str',
+ 'value': 'int'
+ }
+
+ attribute_map = {
+ 'period_seconds': 'periodSeconds',
+ 'type': 'type',
+ 'value': 'value'
+ }
+
+ def __init__(self, period_seconds=None, type=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V2HPAScalingPolicy - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._period_seconds = None
+ self._type = None
+ self._value = None
+ self.discriminator = None
+
+ self.period_seconds = period_seconds
+ self.type = type
+ self.value = value
+
+ @property
+ def period_seconds(self):
+ """Gets the period_seconds of this V2HPAScalingPolicy. # noqa: E501
+
+ periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). # noqa: E501
+
+ :return: The period_seconds of this V2HPAScalingPolicy. # noqa: E501
+ :rtype: int
+ """
+ return self._period_seconds
+
+ @period_seconds.setter
+ def period_seconds(self, period_seconds):
+ """Sets the period_seconds of this V2HPAScalingPolicy.
+
+ periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). # noqa: E501
+
+ :param period_seconds: The period_seconds of this V2HPAScalingPolicy. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and period_seconds is None: # noqa: E501
+ raise ValueError("Invalid value for `period_seconds`, must not be `None`") # noqa: E501
+
+ self._period_seconds = period_seconds
+
+ @property
+ def type(self):
+ """Gets the type of this V2HPAScalingPolicy. # noqa: E501
+
+ type is used to specify the scaling policy. # noqa: E501
+
+ :return: The type of this V2HPAScalingPolicy. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V2HPAScalingPolicy.
+
+ type is used to specify the scaling policy. # noqa: E501
+
+ :param type: The type of this V2HPAScalingPolicy. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ @property
+ def value(self):
+ """Gets the value of this V2HPAScalingPolicy. # noqa: E501
+
+ value contains the amount of change which is permitted by the policy. It must be greater than zero # noqa: E501
+
+ :return: The value of this V2HPAScalingPolicy. # noqa: E501
+ :rtype: int
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V2HPAScalingPolicy.
+
+ value contains the amount of change which is permitted by the policy. It must be greater than zero # noqa: E501
+
+ :param value: The value of this V2HPAScalingPolicy. # noqa: E501
+ :type: int
+ """
+ if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
+ raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HPAScalingPolicy):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HPAScalingPolicy):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_rules.py b/contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_rules.py
new file mode 100644
index 0000000000..ead292e19f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_hpa_scaling_rules.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2HPAScalingRules(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'policies': 'list[V2HPAScalingPolicy]',
+ 'select_policy': 'str',
+ 'stabilization_window_seconds': 'int'
+ }
+
+ attribute_map = {
+ 'policies': 'policies',
+ 'select_policy': 'selectPolicy',
+ 'stabilization_window_seconds': 'stabilizationWindowSeconds'
+ }
+
+ def __init__(self, policies=None, select_policy=None, stabilization_window_seconds=None, local_vars_configuration=None): # noqa: E501
+ """V2HPAScalingRules - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._policies = None
+ self._select_policy = None
+ self._stabilization_window_seconds = None
+ self.discriminator = None
+
+ if policies is not None:
+ self.policies = policies
+ if select_policy is not None:
+ self.select_policy = select_policy
+ if stabilization_window_seconds is not None:
+ self.stabilization_window_seconds = stabilization_window_seconds
+
+ @property
+ def policies(self):
+ """Gets the policies of this V2HPAScalingRules. # noqa: E501
+
+ policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid # noqa: E501
+
+ :return: The policies of this V2HPAScalingRules. # noqa: E501
+ :rtype: list[V2HPAScalingPolicy]
+ """
+ return self._policies
+
+ @policies.setter
+ def policies(self, policies):
+ """Sets the policies of this V2HPAScalingRules.
+
+ policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid # noqa: E501
+
+ :param policies: The policies of this V2HPAScalingRules. # noqa: E501
+ :type: list[V2HPAScalingPolicy]
+ """
+
+ self._policies = policies
+
+ @property
+ def select_policy(self):
+ """Gets the select_policy of this V2HPAScalingRules. # noqa: E501
+
+ selectPolicy is used to specify which policy should be used. If not set, the default value Max is used. # noqa: E501
+
+ :return: The select_policy of this V2HPAScalingRules. # noqa: E501
+ :rtype: str
+ """
+ return self._select_policy
+
+ @select_policy.setter
+ def select_policy(self, select_policy):
+ """Sets the select_policy of this V2HPAScalingRules.
+
+ selectPolicy is used to specify which policy should be used. If not set, the default value Max is used. # noqa: E501
+
+ :param select_policy: The select_policy of this V2HPAScalingRules. # noqa: E501
+ :type: str
+ """
+
+ self._select_policy = select_policy
+
+ @property
+ def stabilization_window_seconds(self):
+ """Gets the stabilization_window_seconds of this V2HPAScalingRules. # noqa: E501
+
+ stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). # noqa: E501
+
+ :return: The stabilization_window_seconds of this V2HPAScalingRules. # noqa: E501
+ :rtype: int
+ """
+ return self._stabilization_window_seconds
+
+ @stabilization_window_seconds.setter
+ def stabilization_window_seconds(self, stabilization_window_seconds):
+ """Sets the stabilization_window_seconds of this V2HPAScalingRules.
+
+ stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long). # noqa: E501
+
+ :param stabilization_window_seconds: The stabilization_window_seconds of this V2HPAScalingRules. # noqa: E501
+ :type: int
+ """
+
+ self._stabilization_window_seconds = stabilization_window_seconds
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2HPAScalingRules):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2HPAScalingRules):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_metric_identifier.py b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_identifier.py
new file mode 100644
index 0000000000..4eb014d66b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_identifier.py
@@ -0,0 +1,149 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2MetricIdentifier(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'selector': 'V1LabelSelector'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'selector': 'selector'
+ }
+
+ def __init__(self, name=None, selector=None, local_vars_configuration=None): # noqa: E501
+ """V2MetricIdentifier - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._selector = None
+ self.discriminator = None
+
+ self.name = name
+ if selector is not None:
+ self.selector = selector
+
+ @property
+ def name(self):
+ """Gets the name of this V2MetricIdentifier. # noqa: E501
+
+ name is the name of the given metric # noqa: E501
+
+ :return: The name of this V2MetricIdentifier. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V2MetricIdentifier.
+
+ name is the name of the given metric # noqa: E501
+
+ :param name: The name of this V2MetricIdentifier. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def selector(self):
+ """Gets the selector of this V2MetricIdentifier. # noqa: E501
+
+
+ :return: The selector of this V2MetricIdentifier. # noqa: E501
+ :rtype: V1LabelSelector
+ """
+ return self._selector
+
+ @selector.setter
+ def selector(self, selector):
+ """Sets the selector of this V2MetricIdentifier.
+
+
+ :param selector: The selector of this V2MetricIdentifier. # noqa: E501
+ :type: V1LabelSelector
+ """
+
+ self._selector = selector
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2MetricIdentifier):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2MetricIdentifier):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_metric_spec.py b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_spec.py
new file mode 100644
index 0000000000..3a1a013a6a
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_spec.py
@@ -0,0 +1,253 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2MetricSpec(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container_resource': 'V2ContainerResourceMetricSource',
+ 'external': 'V2ExternalMetricSource',
+ 'object': 'V2ObjectMetricSource',
+ 'pods': 'V2PodsMetricSource',
+ 'resource': 'V2ResourceMetricSource',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'container_resource': 'containerResource',
+ 'external': 'external',
+ 'object': 'object',
+ 'pods': 'pods',
+ 'resource': 'resource',
+ 'type': 'type'
+ }
+
+ def __init__(self, container_resource=None, external=None, object=None, pods=None, resource=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V2MetricSpec - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container_resource = None
+ self._external = None
+ self._object = None
+ self._pods = None
+ self._resource = None
+ self._type = None
+ self.discriminator = None
+
+ if container_resource is not None:
+ self.container_resource = container_resource
+ if external is not None:
+ self.external = external
+ if object is not None:
+ self.object = object
+ if pods is not None:
+ self.pods = pods
+ if resource is not None:
+ self.resource = resource
+ self.type = type
+
+ @property
+ def container_resource(self):
+ """Gets the container_resource of this V2MetricSpec. # noqa: E501
+
+
+ :return: The container_resource of this V2MetricSpec. # noqa: E501
+ :rtype: V2ContainerResourceMetricSource
+ """
+ return self._container_resource
+
+ @container_resource.setter
+ def container_resource(self, container_resource):
+ """Sets the container_resource of this V2MetricSpec.
+
+
+ :param container_resource: The container_resource of this V2MetricSpec. # noqa: E501
+ :type: V2ContainerResourceMetricSource
+ """
+
+ self._container_resource = container_resource
+
+ @property
+ def external(self):
+ """Gets the external of this V2MetricSpec. # noqa: E501
+
+
+ :return: The external of this V2MetricSpec. # noqa: E501
+ :rtype: V2ExternalMetricSource
+ """
+ return self._external
+
+ @external.setter
+ def external(self, external):
+ """Sets the external of this V2MetricSpec.
+
+
+ :param external: The external of this V2MetricSpec. # noqa: E501
+ :type: V2ExternalMetricSource
+ """
+
+ self._external = external
+
+ @property
+ def object(self):
+ """Gets the object of this V2MetricSpec. # noqa: E501
+
+
+ :return: The object of this V2MetricSpec. # noqa: E501
+ :rtype: V2ObjectMetricSource
+ """
+ return self._object
+
+ @object.setter
+ def object(self, object):
+ """Sets the object of this V2MetricSpec.
+
+
+ :param object: The object of this V2MetricSpec. # noqa: E501
+ :type: V2ObjectMetricSource
+ """
+
+ self._object = object
+
+ @property
+ def pods(self):
+ """Gets the pods of this V2MetricSpec. # noqa: E501
+
+
+ :return: The pods of this V2MetricSpec. # noqa: E501
+ :rtype: V2PodsMetricSource
+ """
+ return self._pods
+
+ @pods.setter
+ def pods(self, pods):
+ """Sets the pods of this V2MetricSpec.
+
+
+ :param pods: The pods of this V2MetricSpec. # noqa: E501
+ :type: V2PodsMetricSource
+ """
+
+ self._pods = pods
+
+ @property
+ def resource(self):
+ """Gets the resource of this V2MetricSpec. # noqa: E501
+
+
+ :return: The resource of this V2MetricSpec. # noqa: E501
+ :rtype: V2ResourceMetricSource
+ """
+ return self._resource
+
+ @resource.setter
+ def resource(self, resource):
+ """Sets the resource of this V2MetricSpec.
+
+
+ :param resource: The resource of this V2MetricSpec. # noqa: E501
+ :type: V2ResourceMetricSource
+ """
+
+ self._resource = resource
+
+ @property
+ def type(self):
+ """Gets the type of this V2MetricSpec. # noqa: E501
+
+ type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
+
+ :return: The type of this V2MetricSpec. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V2MetricSpec.
+
+ type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
+
+ :param type: The type of this V2MetricSpec. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2MetricSpec):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2MetricSpec):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_metric_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_status.py
new file mode 100644
index 0000000000..9b23432954
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_status.py
@@ -0,0 +1,253 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2MetricStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'container_resource': 'V2ContainerResourceMetricStatus',
+ 'external': 'V2ExternalMetricStatus',
+ 'object': 'V2ObjectMetricStatus',
+ 'pods': 'V2PodsMetricStatus',
+ 'resource': 'V2ResourceMetricStatus',
+ 'type': 'str'
+ }
+
+ attribute_map = {
+ 'container_resource': 'containerResource',
+ 'external': 'external',
+ 'object': 'object',
+ 'pods': 'pods',
+ 'resource': 'resource',
+ 'type': 'type'
+ }
+
+ def __init__(self, container_resource=None, external=None, object=None, pods=None, resource=None, type=None, local_vars_configuration=None): # noqa: E501
+ """V2MetricStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._container_resource = None
+ self._external = None
+ self._object = None
+ self._pods = None
+ self._resource = None
+ self._type = None
+ self.discriminator = None
+
+ if container_resource is not None:
+ self.container_resource = container_resource
+ if external is not None:
+ self.external = external
+ if object is not None:
+ self.object = object
+ if pods is not None:
+ self.pods = pods
+ if resource is not None:
+ self.resource = resource
+ self.type = type
+
+ @property
+ def container_resource(self):
+ """Gets the container_resource of this V2MetricStatus. # noqa: E501
+
+
+ :return: The container_resource of this V2MetricStatus. # noqa: E501
+ :rtype: V2ContainerResourceMetricStatus
+ """
+ return self._container_resource
+
+ @container_resource.setter
+ def container_resource(self, container_resource):
+ """Sets the container_resource of this V2MetricStatus.
+
+
+ :param container_resource: The container_resource of this V2MetricStatus. # noqa: E501
+ :type: V2ContainerResourceMetricStatus
+ """
+
+ self._container_resource = container_resource
+
+ @property
+ def external(self):
+ """Gets the external of this V2MetricStatus. # noqa: E501
+
+
+ :return: The external of this V2MetricStatus. # noqa: E501
+ :rtype: V2ExternalMetricStatus
+ """
+ return self._external
+
+ @external.setter
+ def external(self, external):
+ """Sets the external of this V2MetricStatus.
+
+
+ :param external: The external of this V2MetricStatus. # noqa: E501
+ :type: V2ExternalMetricStatus
+ """
+
+ self._external = external
+
+ @property
+ def object(self):
+ """Gets the object of this V2MetricStatus. # noqa: E501
+
+
+ :return: The object of this V2MetricStatus. # noqa: E501
+ :rtype: V2ObjectMetricStatus
+ """
+ return self._object
+
+ @object.setter
+ def object(self, object):
+ """Sets the object of this V2MetricStatus.
+
+
+ :param object: The object of this V2MetricStatus. # noqa: E501
+ :type: V2ObjectMetricStatus
+ """
+
+ self._object = object
+
+ @property
+ def pods(self):
+ """Gets the pods of this V2MetricStatus. # noqa: E501
+
+
+ :return: The pods of this V2MetricStatus. # noqa: E501
+ :rtype: V2PodsMetricStatus
+ """
+ return self._pods
+
+ @pods.setter
+ def pods(self, pods):
+ """Sets the pods of this V2MetricStatus.
+
+
+ :param pods: The pods of this V2MetricStatus. # noqa: E501
+ :type: V2PodsMetricStatus
+ """
+
+ self._pods = pods
+
+ @property
+ def resource(self):
+ """Gets the resource of this V2MetricStatus. # noqa: E501
+
+
+ :return: The resource of this V2MetricStatus. # noqa: E501
+ :rtype: V2ResourceMetricStatus
+ """
+ return self._resource
+
+ @resource.setter
+ def resource(self, resource):
+ """Sets the resource of this V2MetricStatus.
+
+
+ :param resource: The resource of this V2MetricStatus. # noqa: E501
+ :type: V2ResourceMetricStatus
+ """
+
+ self._resource = resource
+
+ @property
+ def type(self):
+ """Gets the type of this V2MetricStatus. # noqa: E501
+
+ type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
+
+ :return: The type of this V2MetricStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V2MetricStatus.
+
+ type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled # noqa: E501
+
+ :param type: The type of this V2MetricStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2MetricStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2MetricStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_metric_target.py b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_target.py
new file mode 100644
index 0000000000..0c058ce6bf
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_target.py
@@ -0,0 +1,207 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2MetricTarget(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'average_utilization': 'int',
+ 'average_value': 'str',
+ 'type': 'str',
+ 'value': 'str'
+ }
+
+ attribute_map = {
+ 'average_utilization': 'averageUtilization',
+ 'average_value': 'averageValue',
+ 'type': 'type',
+ 'value': 'value'
+ }
+
+ def __init__(self, average_utilization=None, average_value=None, type=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V2MetricTarget - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._average_utilization = None
+ self._average_value = None
+ self._type = None
+ self._value = None
+ self.discriminator = None
+
+ if average_utilization is not None:
+ self.average_utilization = average_utilization
+ if average_value is not None:
+ self.average_value = average_value
+ self.type = type
+ if value is not None:
+ self.value = value
+
+ @property
+ def average_utilization(self):
+ """Gets the average_utilization of this V2MetricTarget. # noqa: E501
+
+ averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type # noqa: E501
+
+ :return: The average_utilization of this V2MetricTarget. # noqa: E501
+ :rtype: int
+ """
+ return self._average_utilization
+
+ @average_utilization.setter
+ def average_utilization(self, average_utilization):
+ """Sets the average_utilization of this V2MetricTarget.
+
+ averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type # noqa: E501
+
+ :param average_utilization: The average_utilization of this V2MetricTarget. # noqa: E501
+ :type: int
+ """
+
+ self._average_utilization = average_utilization
+
+ @property
+ def average_value(self):
+ """Gets the average_value of this V2MetricTarget. # noqa: E501
+
+ averageValue is the target value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
+
+ :return: The average_value of this V2MetricTarget. # noqa: E501
+ :rtype: str
+ """
+ return self._average_value
+
+ @average_value.setter
+ def average_value(self, average_value):
+ """Sets the average_value of this V2MetricTarget.
+
+ averageValue is the target value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
+
+ :param average_value: The average_value of this V2MetricTarget. # noqa: E501
+ :type: str
+ """
+
+ self._average_value = average_value
+
+ @property
+ def type(self):
+ """Gets the type of this V2MetricTarget. # noqa: E501
+
+ type represents whether the metric type is Utilization, Value, or AverageValue # noqa: E501
+
+ :return: The type of this V2MetricTarget. # noqa: E501
+ :rtype: str
+ """
+ return self._type
+
+ @type.setter
+ def type(self, type):
+ """Sets the type of this V2MetricTarget.
+
+ type represents whether the metric type is Utilization, Value, or AverageValue # noqa: E501
+
+ :param type: The type of this V2MetricTarget. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
+ raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
+
+ self._type = type
+
+ @property
+ def value(self):
+ """Gets the value of this V2MetricTarget. # noqa: E501
+
+ value is the target value of the metric (as a quantity). # noqa: E501
+
+ :return: The value of this V2MetricTarget. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V2MetricTarget.
+
+ value is the target value of the metric (as a quantity). # noqa: E501
+
+ :param value: The value of this V2MetricTarget. # noqa: E501
+ :type: str
+ """
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2MetricTarget):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2MetricTarget):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_metric_value_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_value_status.py
new file mode 100644
index 0000000000..575d9eb7d3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_metric_value_status.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2MetricValueStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'average_utilization': 'int',
+ 'average_value': 'str',
+ 'value': 'str'
+ }
+
+ attribute_map = {
+ 'average_utilization': 'averageUtilization',
+ 'average_value': 'averageValue',
+ 'value': 'value'
+ }
+
+ def __init__(self, average_utilization=None, average_value=None, value=None, local_vars_configuration=None): # noqa: E501
+ """V2MetricValueStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._average_utilization = None
+ self._average_value = None
+ self._value = None
+ self.discriminator = None
+
+ if average_utilization is not None:
+ self.average_utilization = average_utilization
+ if average_value is not None:
+ self.average_value = average_value
+ if value is not None:
+ self.value = value
+
+ @property
+ def average_utilization(self):
+ """Gets the average_utilization of this V2MetricValueStatus. # noqa: E501
+
+ currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. # noqa: E501
+
+ :return: The average_utilization of this V2MetricValueStatus. # noqa: E501
+ :rtype: int
+ """
+ return self._average_utilization
+
+ @average_utilization.setter
+ def average_utilization(self, average_utilization):
+ """Sets the average_utilization of this V2MetricValueStatus.
+
+ currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. # noqa: E501
+
+ :param average_utilization: The average_utilization of this V2MetricValueStatus. # noqa: E501
+ :type: int
+ """
+
+ self._average_utilization = average_utilization
+
+ @property
+ def average_value(self):
+ """Gets the average_value of this V2MetricValueStatus. # noqa: E501
+
+ averageValue is the current value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
+
+ :return: The average_value of this V2MetricValueStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._average_value
+
+ @average_value.setter
+ def average_value(self, average_value):
+ """Sets the average_value of this V2MetricValueStatus.
+
+ averageValue is the current value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
+
+ :param average_value: The average_value of this V2MetricValueStatus. # noqa: E501
+ :type: str
+ """
+
+ self._average_value = average_value
+
+ @property
+ def value(self):
+ """Gets the value of this V2MetricValueStatus. # noqa: E501
+
+ value is the current value of the metric (as a quantity). # noqa: E501
+
+ :return: The value of this V2MetricValueStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ """Sets the value of this V2MetricValueStatus.
+
+ value is the current value of the metric (as a quantity). # noqa: E501
+
+ :param value: The value of this V2MetricValueStatus. # noqa: E501
+ :type: str
+ """
+
+ self._value = value
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2MetricValueStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2MetricValueStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_source.py b/contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_source.py
new file mode 100644
index 0000000000..2429dada40
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_source.py
@@ -0,0 +1,175 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ObjectMetricSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'described_object': 'V2CrossVersionObjectReference',
+ 'metric': 'V2MetricIdentifier',
+ 'target': 'V2MetricTarget'
+ }
+
+ attribute_map = {
+ 'described_object': 'describedObject',
+ 'metric': 'metric',
+ 'target': 'target'
+ }
+
+ def __init__(self, described_object=None, metric=None, target=None, local_vars_configuration=None): # noqa: E501
+ """V2ObjectMetricSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._described_object = None
+ self._metric = None
+ self._target = None
+ self.discriminator = None
+
+ self.described_object = described_object
+ self.metric = metric
+ self.target = target
+
+ @property
+ def described_object(self):
+ """Gets the described_object of this V2ObjectMetricSource. # noqa: E501
+
+
+ :return: The described_object of this V2ObjectMetricSource. # noqa: E501
+ :rtype: V2CrossVersionObjectReference
+ """
+ return self._described_object
+
+ @described_object.setter
+ def described_object(self, described_object):
+ """Sets the described_object of this V2ObjectMetricSource.
+
+
+ :param described_object: The described_object of this V2ObjectMetricSource. # noqa: E501
+ :type: V2CrossVersionObjectReference
+ """
+ if self.local_vars_configuration.client_side_validation and described_object is None: # noqa: E501
+ raise ValueError("Invalid value for `described_object`, must not be `None`") # noqa: E501
+
+ self._described_object = described_object
+
+ @property
+ def metric(self):
+ """Gets the metric of this V2ObjectMetricSource. # noqa: E501
+
+
+ :return: The metric of this V2ObjectMetricSource. # noqa: E501
+ :rtype: V2MetricIdentifier
+ """
+ return self._metric
+
+ @metric.setter
+ def metric(self, metric):
+ """Sets the metric of this V2ObjectMetricSource.
+
+
+ :param metric: The metric of this V2ObjectMetricSource. # noqa: E501
+ :type: V2MetricIdentifier
+ """
+ if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
+ raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
+
+ self._metric = metric
+
+ @property
+ def target(self):
+ """Gets the target of this V2ObjectMetricSource. # noqa: E501
+
+
+ :return: The target of this V2ObjectMetricSource. # noqa: E501
+ :rtype: V2MetricTarget
+ """
+ return self._target
+
+ @target.setter
+ def target(self, target):
+ """Sets the target of this V2ObjectMetricSource.
+
+
+ :param target: The target of this V2ObjectMetricSource. # noqa: E501
+ :type: V2MetricTarget
+ """
+ if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
+ raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
+
+ self._target = target
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ObjectMetricSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ObjectMetricSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_status.py
new file mode 100644
index 0000000000..0296b6d890
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_object_metric_status.py
@@ -0,0 +1,175 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ObjectMetricStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'current': 'V2MetricValueStatus',
+ 'described_object': 'V2CrossVersionObjectReference',
+ 'metric': 'V2MetricIdentifier'
+ }
+
+ attribute_map = {
+ 'current': 'current',
+ 'described_object': 'describedObject',
+ 'metric': 'metric'
+ }
+
+ def __init__(self, current=None, described_object=None, metric=None, local_vars_configuration=None): # noqa: E501
+ """V2ObjectMetricStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._current = None
+ self._described_object = None
+ self._metric = None
+ self.discriminator = None
+
+ self.current = current
+ self.described_object = described_object
+ self.metric = metric
+
+ @property
+ def current(self):
+ """Gets the current of this V2ObjectMetricStatus. # noqa: E501
+
+
+ :return: The current of this V2ObjectMetricStatus. # noqa: E501
+ :rtype: V2MetricValueStatus
+ """
+ return self._current
+
+ @current.setter
+ def current(self, current):
+ """Sets the current of this V2ObjectMetricStatus.
+
+
+ :param current: The current of this V2ObjectMetricStatus. # noqa: E501
+ :type: V2MetricValueStatus
+ """
+ if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
+ raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
+
+ self._current = current
+
+ @property
+ def described_object(self):
+ """Gets the described_object of this V2ObjectMetricStatus. # noqa: E501
+
+
+ :return: The described_object of this V2ObjectMetricStatus. # noqa: E501
+ :rtype: V2CrossVersionObjectReference
+ """
+ return self._described_object
+
+ @described_object.setter
+ def described_object(self, described_object):
+ """Sets the described_object of this V2ObjectMetricStatus.
+
+
+ :param described_object: The described_object of this V2ObjectMetricStatus. # noqa: E501
+ :type: V2CrossVersionObjectReference
+ """
+ if self.local_vars_configuration.client_side_validation and described_object is None: # noqa: E501
+ raise ValueError("Invalid value for `described_object`, must not be `None`") # noqa: E501
+
+ self._described_object = described_object
+
+ @property
+ def metric(self):
+ """Gets the metric of this V2ObjectMetricStatus. # noqa: E501
+
+
+ :return: The metric of this V2ObjectMetricStatus. # noqa: E501
+ :rtype: V2MetricIdentifier
+ """
+ return self._metric
+
+ @metric.setter
+ def metric(self, metric):
+ """Sets the metric of this V2ObjectMetricStatus.
+
+
+ :param metric: The metric of this V2ObjectMetricStatus. # noqa: E501
+ :type: V2MetricIdentifier
+ """
+ if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
+ raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
+
+ self._metric = metric
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ObjectMetricStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ObjectMetricStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_source.py b/contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_source.py
new file mode 100644
index 0000000000..95abcad403
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_source.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2PodsMetricSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'metric': 'V2MetricIdentifier',
+ 'target': 'V2MetricTarget'
+ }
+
+ attribute_map = {
+ 'metric': 'metric',
+ 'target': 'target'
+ }
+
+ def __init__(self, metric=None, target=None, local_vars_configuration=None): # noqa: E501
+ """V2PodsMetricSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._metric = None
+ self._target = None
+ self.discriminator = None
+
+ self.metric = metric
+ self.target = target
+
+ @property
+ def metric(self):
+ """Gets the metric of this V2PodsMetricSource. # noqa: E501
+
+
+ :return: The metric of this V2PodsMetricSource. # noqa: E501
+ :rtype: V2MetricIdentifier
+ """
+ return self._metric
+
+ @metric.setter
+ def metric(self, metric):
+ """Sets the metric of this V2PodsMetricSource.
+
+
+ :param metric: The metric of this V2PodsMetricSource. # noqa: E501
+ :type: V2MetricIdentifier
+ """
+ if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
+ raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
+
+ self._metric = metric
+
+ @property
+ def target(self):
+ """Gets the target of this V2PodsMetricSource. # noqa: E501
+
+
+ :return: The target of this V2PodsMetricSource. # noqa: E501
+ :rtype: V2MetricTarget
+ """
+ return self._target
+
+ @target.setter
+ def target(self, target):
+ """Sets the target of this V2PodsMetricSource.
+
+
+ :param target: The target of this V2PodsMetricSource. # noqa: E501
+ :type: V2MetricTarget
+ """
+ if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
+ raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
+
+ self._target = target
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2PodsMetricSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2PodsMetricSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_status.py
new file mode 100644
index 0000000000..809975249f
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_pods_metric_status.py
@@ -0,0 +1,148 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2PodsMetricStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'current': 'V2MetricValueStatus',
+ 'metric': 'V2MetricIdentifier'
+ }
+
+ attribute_map = {
+ 'current': 'current',
+ 'metric': 'metric'
+ }
+
+ def __init__(self, current=None, metric=None, local_vars_configuration=None): # noqa: E501
+ """V2PodsMetricStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._current = None
+ self._metric = None
+ self.discriminator = None
+
+ self.current = current
+ self.metric = metric
+
+ @property
+ def current(self):
+ """Gets the current of this V2PodsMetricStatus. # noqa: E501
+
+
+ :return: The current of this V2PodsMetricStatus. # noqa: E501
+ :rtype: V2MetricValueStatus
+ """
+ return self._current
+
+ @current.setter
+ def current(self, current):
+ """Sets the current of this V2PodsMetricStatus.
+
+
+ :param current: The current of this V2PodsMetricStatus. # noqa: E501
+ :type: V2MetricValueStatus
+ """
+ if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
+ raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
+
+ self._current = current
+
+ @property
+ def metric(self):
+ """Gets the metric of this V2PodsMetricStatus. # noqa: E501
+
+
+ :return: The metric of this V2PodsMetricStatus. # noqa: E501
+ :rtype: V2MetricIdentifier
+ """
+ return self._metric
+
+ @metric.setter
+ def metric(self, metric):
+ """Sets the metric of this V2PodsMetricStatus.
+
+
+ :param metric: The metric of this V2PodsMetricStatus. # noqa: E501
+ :type: V2MetricIdentifier
+ """
+ if self.local_vars_configuration.client_side_validation and metric is None: # noqa: E501
+ raise ValueError("Invalid value for `metric`, must not be `None`") # noqa: E501
+
+ self._metric = metric
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2PodsMetricStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2PodsMetricStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_source.py b/contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_source.py
new file mode 100644
index 0000000000..5454e2a666
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_source.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ResourceMetricSource(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'name': 'str',
+ 'target': 'V2MetricTarget'
+ }
+
+ attribute_map = {
+ 'name': 'name',
+ 'target': 'target'
+ }
+
+ def __init__(self, name=None, target=None, local_vars_configuration=None): # noqa: E501
+ """V2ResourceMetricSource - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._name = None
+ self._target = None
+ self.discriminator = None
+
+ self.name = name
+ self.target = target
+
+ @property
+ def name(self):
+ """Gets the name of this V2ResourceMetricSource. # noqa: E501
+
+ name is the name of the resource in question. # noqa: E501
+
+ :return: The name of this V2ResourceMetricSource. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V2ResourceMetricSource.
+
+ name is the name of the resource in question. # noqa: E501
+
+ :param name: The name of this V2ResourceMetricSource. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ @property
+ def target(self):
+ """Gets the target of this V2ResourceMetricSource. # noqa: E501
+
+
+ :return: The target of this V2ResourceMetricSource. # noqa: E501
+ :rtype: V2MetricTarget
+ """
+ return self._target
+
+ @target.setter
+ def target(self, target):
+ """Sets the target of this V2ResourceMetricSource.
+
+
+ :param target: The target of this V2ResourceMetricSource. # noqa: E501
+ :type: V2MetricTarget
+ """
+ if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
+ raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
+
+ self._target = target
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ResourceMetricSource):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ResourceMetricSource):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_status.py b/contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_status.py
new file mode 100644
index 0000000000..845a749d16
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/v2_resource_metric_status.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class V2ResourceMetricStatus(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'current': 'V2MetricValueStatus',
+ 'name': 'str'
+ }
+
+ attribute_map = {
+ 'current': 'current',
+ 'name': 'name'
+ }
+
+ def __init__(self, current=None, name=None, local_vars_configuration=None): # noqa: E501
+ """V2ResourceMetricStatus - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._current = None
+ self._name = None
+ self.discriminator = None
+
+ self.current = current
+ self.name = name
+
+ @property
+ def current(self):
+ """Gets the current of this V2ResourceMetricStatus. # noqa: E501
+
+
+ :return: The current of this V2ResourceMetricStatus. # noqa: E501
+ :rtype: V2MetricValueStatus
+ """
+ return self._current
+
+ @current.setter
+ def current(self, current):
+ """Sets the current of this V2ResourceMetricStatus.
+
+
+ :param current: The current of this V2ResourceMetricStatus. # noqa: E501
+ :type: V2MetricValueStatus
+ """
+ if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
+ raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
+
+ self._current = current
+
+ @property
+ def name(self):
+ """Gets the name of this V2ResourceMetricStatus. # noqa: E501
+
+ name is the name of the resource in question. # noqa: E501
+
+ :return: The name of this V2ResourceMetricStatus. # noqa: E501
+ :rtype: str
+ """
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ """Sets the name of this V2ResourceMetricStatus.
+
+ name is the name of the resource in question. # noqa: E501
+
+ :param name: The name of this V2ResourceMetricStatus. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
+ raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
+
+ self._name = name
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, V2ResourceMetricStatus):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, V2ResourceMetricStatus):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/models/version_info.py b/contrib/python/kubernetes/kubernetes/client/models/version_info.py
new file mode 100644
index 0000000000..b5f35d8a3d
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/models/version_info.py
@@ -0,0 +1,337 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+import pprint
+import re # noqa: F401
+
+import six
+
+from kubernetes.client.configuration import Configuration
+
+
+class VersionInfo(object):
+ """NOTE: This class is auto generated by OpenAPI Generator.
+ Ref: https://openapi-generator.tech
+
+ Do not edit the class manually.
+ """
+
+ """
+ Attributes:
+ openapi_types (dict): The key is attribute name
+ and the value is attribute type.
+ attribute_map (dict): The key is attribute name
+ and the value is json key in definition.
+ """
+ openapi_types = {
+ 'build_date': 'str',
+ 'compiler': 'str',
+ 'git_commit': 'str',
+ 'git_tree_state': 'str',
+ 'git_version': 'str',
+ 'go_version': 'str',
+ 'major': 'str',
+ 'minor': 'str',
+ 'platform': 'str'
+ }
+
+ attribute_map = {
+ 'build_date': 'buildDate',
+ 'compiler': 'compiler',
+ 'git_commit': 'gitCommit',
+ 'git_tree_state': 'gitTreeState',
+ 'git_version': 'gitVersion',
+ 'go_version': 'goVersion',
+ 'major': 'major',
+ 'minor': 'minor',
+ 'platform': 'platform'
+ }
+
+ def __init__(self, build_date=None, compiler=None, git_commit=None, git_tree_state=None, git_version=None, go_version=None, major=None, minor=None, platform=None, local_vars_configuration=None): # noqa: E501
+ """VersionInfo - a model defined in OpenAPI""" # noqa: E501
+ if local_vars_configuration is None:
+ local_vars_configuration = Configuration()
+ self.local_vars_configuration = local_vars_configuration
+
+ self._build_date = None
+ self._compiler = None
+ self._git_commit = None
+ self._git_tree_state = None
+ self._git_version = None
+ self._go_version = None
+ self._major = None
+ self._minor = None
+ self._platform = None
+ self.discriminator = None
+
+ self.build_date = build_date
+ self.compiler = compiler
+ self.git_commit = git_commit
+ self.git_tree_state = git_tree_state
+ self.git_version = git_version
+ self.go_version = go_version
+ self.major = major
+ self.minor = minor
+ self.platform = platform
+
+ @property
+ def build_date(self):
+ """Gets the build_date of this VersionInfo. # noqa: E501
+
+
+ :return: The build_date of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._build_date
+
+ @build_date.setter
+ def build_date(self, build_date):
+ """Sets the build_date of this VersionInfo.
+
+
+ :param build_date: The build_date of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and build_date is None: # noqa: E501
+ raise ValueError("Invalid value for `build_date`, must not be `None`") # noqa: E501
+
+ self._build_date = build_date
+
+ @property
+ def compiler(self):
+ """Gets the compiler of this VersionInfo. # noqa: E501
+
+
+ :return: The compiler of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._compiler
+
+ @compiler.setter
+ def compiler(self, compiler):
+ """Sets the compiler of this VersionInfo.
+
+
+ :param compiler: The compiler of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and compiler is None: # noqa: E501
+ raise ValueError("Invalid value for `compiler`, must not be `None`") # noqa: E501
+
+ self._compiler = compiler
+
+ @property
+ def git_commit(self):
+ """Gets the git_commit of this VersionInfo. # noqa: E501
+
+
+ :return: The git_commit of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._git_commit
+
+ @git_commit.setter
+ def git_commit(self, git_commit):
+ """Sets the git_commit of this VersionInfo.
+
+
+ :param git_commit: The git_commit of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and git_commit is None: # noqa: E501
+ raise ValueError("Invalid value for `git_commit`, must not be `None`") # noqa: E501
+
+ self._git_commit = git_commit
+
+ @property
+ def git_tree_state(self):
+ """Gets the git_tree_state of this VersionInfo. # noqa: E501
+
+
+ :return: The git_tree_state of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._git_tree_state
+
+ @git_tree_state.setter
+ def git_tree_state(self, git_tree_state):
+ """Sets the git_tree_state of this VersionInfo.
+
+
+ :param git_tree_state: The git_tree_state of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and git_tree_state is None: # noqa: E501
+ raise ValueError("Invalid value for `git_tree_state`, must not be `None`") # noqa: E501
+
+ self._git_tree_state = git_tree_state
+
+ @property
+ def git_version(self):
+ """Gets the git_version of this VersionInfo. # noqa: E501
+
+
+ :return: The git_version of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._git_version
+
+ @git_version.setter
+ def git_version(self, git_version):
+ """Sets the git_version of this VersionInfo.
+
+
+ :param git_version: The git_version of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and git_version is None: # noqa: E501
+ raise ValueError("Invalid value for `git_version`, must not be `None`") # noqa: E501
+
+ self._git_version = git_version
+
+ @property
+ def go_version(self):
+ """Gets the go_version of this VersionInfo. # noqa: E501
+
+
+ :return: The go_version of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._go_version
+
+ @go_version.setter
+ def go_version(self, go_version):
+ """Sets the go_version of this VersionInfo.
+
+
+ :param go_version: The go_version of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and go_version is None: # noqa: E501
+ raise ValueError("Invalid value for `go_version`, must not be `None`") # noqa: E501
+
+ self._go_version = go_version
+
+ @property
+ def major(self):
+ """Gets the major of this VersionInfo. # noqa: E501
+
+
+ :return: The major of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._major
+
+ @major.setter
+ def major(self, major):
+ """Sets the major of this VersionInfo.
+
+
+ :param major: The major of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and major is None: # noqa: E501
+ raise ValueError("Invalid value for `major`, must not be `None`") # noqa: E501
+
+ self._major = major
+
+ @property
+ def minor(self):
+ """Gets the minor of this VersionInfo. # noqa: E501
+
+
+ :return: The minor of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._minor
+
+ @minor.setter
+ def minor(self, minor):
+ """Sets the minor of this VersionInfo.
+
+
+ :param minor: The minor of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and minor is None: # noqa: E501
+ raise ValueError("Invalid value for `minor`, must not be `None`") # noqa: E501
+
+ self._minor = minor
+
+ @property
+ def platform(self):
+ """Gets the platform of this VersionInfo. # noqa: E501
+
+
+ :return: The platform of this VersionInfo. # noqa: E501
+ :rtype: str
+ """
+ return self._platform
+
+ @platform.setter
+ def platform(self, platform):
+ """Sets the platform of this VersionInfo.
+
+
+ :param platform: The platform of this VersionInfo. # noqa: E501
+ :type: str
+ """
+ if self.local_vars_configuration.client_side_validation and platform is None: # noqa: E501
+ raise ValueError("Invalid value for `platform`, must not be `None`") # noqa: E501
+
+ self._platform = platform
+
+ def to_dict(self):
+ """Returns the model properties as a dict"""
+ result = {}
+
+ for attr, _ in six.iteritems(self.openapi_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model"""
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ if not isinstance(other, VersionInfo):
+ return False
+
+ return self.to_dict() == other.to_dict()
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ if not isinstance(other, VersionInfo):
+ return True
+
+ return self.to_dict() != other.to_dict()
diff --git a/contrib/python/kubernetes/kubernetes/client/rest.py b/contrib/python/kubernetes/kubernetes/client/rest.py
new file mode 100644
index 0000000000..5678ad3861
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/client/rest.py
@@ -0,0 +1,305 @@
+# coding: utf-8
+
+"""
+ Kubernetes
+
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
+
+ The version of the OpenAPI document: release-1.28
+ Generated by: https://openapi-generator.tech
+"""
+
+
+from __future__ import absolute_import
+
+import io
+import json
+import logging
+import re
+import ssl
+
+import certifi
+# python 2 and python 3 compatibility library
+import six
+from six.moves.urllib.parse import urlencode
+import urllib3
+
+from kubernetes.client.exceptions import ApiException, ApiValueError
+from requests.utils import should_bypass_proxies
+
+
+logger = logging.getLogger(__name__)
+
+
+class RESTResponse(io.IOBase):
+
+ def __init__(self, resp):
+ self.urllib3_response = resp
+ self.status = resp.status
+ self.reason = resp.reason
+ self.data = resp.data
+
+ def getheaders(self):
+ """Returns a dictionary of the response headers."""
+ return self.urllib3_response.getheaders()
+
+ def getheader(self, name, default=None):
+ """Returns a given response header."""
+ return self.urllib3_response.getheader(name, default)
+
+
+class RESTClientObject(object):
+
+ def __init__(self, configuration, pools_size=4, maxsize=None):
+ # urllib3.PoolManager will pass all kw parameters to connectionpool
+ # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
+ # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
+ # maxsize is the number of requests to host that are allowed in parallel # noqa: E501
+ # Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
+
+ # cert_reqs
+ if configuration.verify_ssl:
+ cert_reqs = ssl.CERT_REQUIRED
+ else:
+ cert_reqs = ssl.CERT_NONE
+
+ # ca_certs
+ if configuration.ssl_ca_cert:
+ ca_certs = configuration.ssl_ca_cert
+ else:
+ # if not set certificate file, use Mozilla's root certificates.
+ ca_certs = certifi.where()
+
+ addition_pool_args = {}
+ if configuration.assert_hostname is not None:
+ addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
+
+ if configuration.retries is not None:
+ addition_pool_args['retries'] = configuration.retries
+
+ if configuration.tls_server_name:
+ addition_pool_args['server_hostname'] = configuration.tls_server_name
+
+ if maxsize is None:
+ if configuration.connection_pool_maxsize is not None:
+ maxsize = configuration.connection_pool_maxsize
+ else:
+ maxsize = 4
+
+ # https pool manager
+ if configuration.proxy and not should_bypass_proxies(configuration.host, no_proxy=configuration.no_proxy or ''):
+ self.pool_manager = urllib3.ProxyManager(
+ num_pools=pools_size,
+ maxsize=maxsize,
+ cert_reqs=cert_reqs,
+ ca_certs=ca_certs,
+ cert_file=configuration.cert_file,
+ key_file=configuration.key_file,
+ proxy_url=configuration.proxy,
+ proxy_headers=configuration.proxy_headers,
+ **addition_pool_args
+ )
+ else:
+ self.pool_manager = urllib3.PoolManager(
+ num_pools=pools_size,
+ maxsize=maxsize,
+ cert_reqs=cert_reqs,
+ ca_certs=ca_certs,
+ cert_file=configuration.cert_file,
+ key_file=configuration.key_file,
+ **addition_pool_args
+ )
+
+ def request(self, method, url, query_params=None, headers=None,
+ body=None, post_params=None, _preload_content=True,
+ _request_timeout=None):
+ """Perform requests.
+
+ :param method: http request method
+ :param url: http request url
+ :param query_params: query parameters in the url
+ :param headers: http request headers
+ :param body: request json body, for `application/json`
+ :param post_params: request post parameters,
+ `application/x-www-form-urlencoded`
+ and `multipart/form-data`
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ """
+ method = method.upper()
+ assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
+ 'PATCH', 'OPTIONS']
+
+ if post_params and body:
+ raise ApiValueError(
+ "body parameter cannot be used with post_params parameter."
+ )
+
+ post_params = post_params or {}
+ headers = headers or {}
+
+ timeout = None
+ if _request_timeout:
+ if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
+ timeout = urllib3.Timeout(total=_request_timeout)
+ elif (isinstance(_request_timeout, tuple) and
+ len(_request_timeout) == 2):
+ timeout = urllib3.Timeout(
+ connect=_request_timeout[0], read=_request_timeout[1])
+
+ if 'Content-Type' not in headers:
+ headers['Content-Type'] = 'application/json'
+
+ try:
+ # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
+ if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
+ if query_params:
+ url += '?' + urlencode(query_params)
+ if (re.search('json', headers['Content-Type'], re.IGNORECASE) or
+ headers['Content-Type'] == 'application/apply-patch+yaml'):
+ if headers['Content-Type'] == 'application/json-patch+json':
+ if not isinstance(body, list):
+ headers['Content-Type'] = \
+ 'application/strategic-merge-patch+json'
+ request_body = None
+ if body is not None:
+ request_body = json.dumps(body)
+ r = self.pool_manager.request(
+ method, url,
+ body=request_body,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
+ r = self.pool_manager.request(
+ method, url,
+ fields=post_params,
+ encode_multipart=False,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ elif headers['Content-Type'] == 'multipart/form-data':
+ # must del headers['Content-Type'], or the correct
+ # Content-Type which generated by urllib3 will be
+ # overwritten.
+ del headers['Content-Type']
+ r = self.pool_manager.request(
+ method, url,
+ fields=post_params,
+ encode_multipart=True,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ # Pass a `string` parameter directly in the body to support
+ # other content types than Json when `body` argument is
+ # provided in serialized form
+ elif isinstance(body, str) or isinstance(body, bytes):
+ request_body = body
+ r = self.pool_manager.request(
+ method, url,
+ body=request_body,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ else:
+ # Cannot generate the request from given parameters
+ msg = """Cannot prepare a request message for provided
+ arguments. Please check that your arguments match
+ declared content type."""
+ raise ApiException(status=0, reason=msg)
+ # For `GET`, `HEAD`
+ else:
+ r = self.pool_manager.request(method, url,
+ fields=query_params,
+ preload_content=_preload_content,
+ timeout=timeout,
+ headers=headers)
+ except urllib3.exceptions.SSLError as e:
+ msg = "{0}\n{1}".format(type(e).__name__, str(e))
+ raise ApiException(status=0, reason=msg)
+
+ if _preload_content:
+ r = RESTResponse(r)
+
+ # In the python 3, the response.data is bytes.
+ # we need to decode it to string.
+ if six.PY3:
+ r.data = r.data.decode('utf8')
+
+ # log response body
+ logger.debug("response body: %s", r.data)
+
+ if not 200 <= r.status <= 299:
+ raise ApiException(http_resp=r)
+
+ return r
+
+ def GET(self, url, headers=None, query_params=None, _preload_content=True,
+ _request_timeout=None):
+ return self.request("GET", url,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ query_params=query_params)
+
+ def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
+ _request_timeout=None):
+ return self.request("HEAD", url,
+ headers=headers,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ query_params=query_params)
+
+ def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("OPTIONS", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def DELETE(self, url, headers=None, query_params=None, body=None,
+ _preload_content=True, _request_timeout=None):
+ return self.request("DELETE", url,
+ headers=headers,
+ query_params=query_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def POST(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("POST", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def PUT(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("PUT", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
+
+ def PATCH(self, url, headers=None, query_params=None, post_params=None,
+ body=None, _preload_content=True, _request_timeout=None):
+ return self.request("PATCH", url,
+ headers=headers,
+ query_params=query_params,
+ post_params=post_params,
+ _preload_content=_preload_content,
+ _request_timeout=_request_timeout,
+ body=body)
diff --git a/contrib/python/kubernetes/kubernetes/config/__init__.py b/contrib/python/kubernetes/kubernetes/config/__init__.py
new file mode 100644
index 0000000000..3f49ce0e91
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/config/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from os.path import exists, expanduser
+
+from .config_exception import ConfigException
+from .incluster_config import load_incluster_config
+from .kube_config import (KUBE_CONFIG_DEFAULT_LOCATION,
+ list_kube_config_contexts, load_kube_config,
+ load_kube_config_from_dict, new_client_from_config, new_client_from_config_dict)
+
+
+def load_config(**kwargs):
+ """
+ Wrapper function to load the kube_config.
+ It will initially try to load_kube_config from provided path,
+ then check if the KUBE_CONFIG_DEFAULT_LOCATION exists
+ If neither exists, it will fall back to load_incluster_config
+ and inform the user accordingly.
+
+ :param kwargs: A combination of all possible kwargs that
+ can be passed to either load_kube_config or
+ load_incluster_config functions.
+ """
+ if "config_file" in kwargs.keys():
+ load_kube_config(**kwargs)
+ elif "kube_config_path" in kwargs.keys():
+ kwargs["config_file"] = kwargs.pop("kube_config_path", None)
+ load_kube_config(**kwargs)
+ elif exists(expanduser(KUBE_CONFIG_DEFAULT_LOCATION)):
+ load_kube_config(**kwargs)
+ else:
+ print(
+ "kube_config_path not provided and "
+ "default location ({0}) does not exist. "
+ "Using inCluster Config. "
+ "This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION))
+ load_incluster_config(**kwargs)
diff --git a/contrib/python/kubernetes/kubernetes/config/config_exception.py b/contrib/python/kubernetes/kubernetes/config/config_exception.py
new file mode 100644
index 0000000000..23fab022c7
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/config/config_exception.py
@@ -0,0 +1,17 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class ConfigException(Exception):
+ pass
diff --git a/contrib/python/kubernetes/kubernetes/config/dateutil.py b/contrib/python/kubernetes/kubernetes/config/dateutil.py
new file mode 100644
index 0000000000..972e003eba
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/config/dateutil.py
@@ -0,0 +1,84 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import math
+import re
+
+
+class TimezoneInfo(datetime.tzinfo):
+ def __init__(self, h, m):
+ self._name = "UTC"
+ if h != 0 and m != 0:
+ self._name += "%+03d:%2d" % (h, m)
+ self._delta = datetime.timedelta(hours=h, minutes=math.copysign(m, h))
+
+ def utcoffset(self, dt):
+ return self._delta
+
+ def tzname(self, dt):
+ return self._name
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+
+UTC = TimezoneInfo(0, 0)
+
+# ref https://www.ietf.org/rfc/rfc3339.txt
+_re_rfc3339 = re.compile(r"(\d\d\d\d)-(\d\d)-(\d\d)" # full-date
+ r"[ Tt]" # Separator
+ r"(\d\d):(\d\d):(\d\d)([.,]\d+)?" # partial-time
+ r"([zZ ]|[-+]\d\d?:\d\d)?", # time-offset
+ re.VERBOSE + re.IGNORECASE)
+_re_timezone = re.compile(r"([-+])(\d\d?):?(\d\d)?")
+
+MICROSEC_PER_SEC = 1000000
+
+
+def parse_rfc3339(s):
+ if isinstance(s, datetime.datetime):
+ # no need to parse it, just make sure it has a timezone.
+ if not s.tzinfo:
+ return s.replace(tzinfo=UTC)
+ return s
+ groups = _re_rfc3339.search(s).groups()
+ dt = [0] * 7
+ for x in range(6):
+ dt[x] = int(groups[x])
+ us = 0
+ if groups[6] is not None:
+ partial_sec = float(groups[6].replace(",", "."))
+ us = int(MICROSEC_PER_SEC * partial_sec)
+ tz = UTC
+ if groups[7] is not None and groups[7] != 'Z' and groups[7] != 'z':
+ tz_groups = _re_timezone.search(groups[7]).groups()
+ hour = int(tz_groups[1])
+ minute = 0
+ if tz_groups[0] == "-":
+ hour *= -1
+ if tz_groups[2]:
+ minute = int(tz_groups[2])
+ tz = TimezoneInfo(hour, minute)
+ return datetime.datetime(
+ year=dt[0], month=dt[1], day=dt[2],
+ hour=dt[3], minute=dt[4], second=dt[5],
+ microsecond=us, tzinfo=tz)
+
+
+def format_rfc3339(date_time):
+ if date_time.tzinfo is None:
+ date_time = date_time.replace(tzinfo=UTC)
+ date_time = date_time.astimezone(UTC)
+ return date_time.strftime('%Y-%m-%dT%H:%M:%SZ')
diff --git a/contrib/python/kubernetes/kubernetes/config/exec_provider.py b/contrib/python/kubernetes/kubernetes/config/exec_provider.py
new file mode 100644
index 0000000000..9dd3827609
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/config/exec_provider.py
@@ -0,0 +1,100 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import os
+import subprocess
+import sys
+
+from .config_exception import ConfigException
+
+
+class ExecProvider(object):
+ """
+ Implementation of the proposal for out-of-tree client
+ authentication providers as described here --
+ https://github.com/kubernetes/community/blob/master/contributors/design-proposals/auth/kubectl-exec-plugins.md
+
+ Missing from implementation:
+
+ * TLS cert support
+ * caching
+ """
+
+ def __init__(self, exec_config, cwd):
+ """
+ exec_config must be of type ConfigNode because we depend on
+ safe_get(self, key) to correctly handle optional exec provider
+ config parameters.
+ """
+ for key in ['command', 'apiVersion']:
+ if key not in exec_config:
+ raise ConfigException(
+ 'exec: malformed request. missing key \'%s\'' % key)
+ self.api_version = exec_config['apiVersion']
+ self.args = [exec_config['command']]
+ if exec_config.safe_get('args'):
+ self.args.extend(exec_config['args'])
+ self.env = os.environ.copy()
+ if exec_config.safe_get('env'):
+ additional_vars = {}
+ for item in exec_config['env']:
+ name = item['name']
+ value = item['value']
+ additional_vars[name] = value
+ self.env.update(additional_vars)
+
+ self.cwd = cwd or None
+
+ def run(self, previous_response=None):
+ is_interactive = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
+ kubernetes_exec_info = {
+ 'apiVersion': self.api_version,
+ 'kind': 'ExecCredential',
+ 'spec': {
+ 'interactive': is_interactive
+ }
+ }
+ if previous_response:
+ kubernetes_exec_info['spec']['response'] = previous_response
+ self.env['KUBERNETES_EXEC_INFO'] = json.dumps(kubernetes_exec_info)
+ process = subprocess.Popen(
+ self.args,
+ stdout=subprocess.PIPE,
+ stderr=sys.stderr if is_interactive else subprocess.PIPE,
+ stdin=sys.stdin if is_interactive else None,
+ cwd=self.cwd,
+ env=self.env,
+ universal_newlines=True)
+ (stdout, stderr) = process.communicate()
+ exit_code = process.wait()
+ if exit_code != 0:
+ msg = 'exec: process returned %d' % exit_code
+ stderr = stderr.strip()
+ if stderr:
+ msg += '. %s' % stderr
+ raise ConfigException(msg)
+ try:
+ data = json.loads(stdout)
+ except ValueError as de:
+ raise ConfigException(
+ 'exec: failed to decode process output: %s' % de)
+ for key in ('apiVersion', 'kind', 'status'):
+ if key not in data:
+ raise ConfigException(
+ 'exec: malformed response. missing key \'%s\'' % key)
+ if data['apiVersion'] != self.api_version:
+ raise ConfigException(
+ 'exec: plugin api version %s does not match %s' %
+ (data['apiVersion'], self.api_version))
+ return data['status']
diff --git a/contrib/python/kubernetes/kubernetes/config/incluster_config.py b/contrib/python/kubernetes/kubernetes/config/incluster_config.py
new file mode 100644
index 0000000000..86070df43b
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/config/incluster_config.py
@@ -0,0 +1,121 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import os
+
+from kubernetes.client import Configuration
+
+from .config_exception import ConfigException
+
+SERVICE_HOST_ENV_NAME = "KUBERNETES_SERVICE_HOST"
+SERVICE_PORT_ENV_NAME = "KUBERNETES_SERVICE_PORT"
+SERVICE_TOKEN_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+SERVICE_CERT_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+
+
+def _join_host_port(host, port):
+ """Adapted golang's net.JoinHostPort"""
+ template = "%s:%s"
+ host_requires_bracketing = ':' in host or '%' in host
+ if host_requires_bracketing:
+ template = "[%s]:%s"
+ return template % (host, port)
+
+
+class InClusterConfigLoader(object):
+ def __init__(self,
+ token_filename,
+ cert_filename,
+ try_refresh_token=True,
+ environ=os.environ):
+ self._token_filename = token_filename
+ self._cert_filename = cert_filename
+ self._environ = environ
+ self._try_refresh_token = try_refresh_token
+ self._token_refresh_period = datetime.timedelta(minutes=1)
+
+ def load_and_set(self, client_configuration=None):
+ try_set_default = False
+ if client_configuration is None:
+ client_configuration = type.__call__(Configuration)
+ try_set_default = True
+ self._load_config()
+ self._set_config(client_configuration)
+ if try_set_default:
+ Configuration.set_default(client_configuration)
+
+ def _load_config(self):
+ if (SERVICE_HOST_ENV_NAME not in self._environ
+ or SERVICE_PORT_ENV_NAME not in self._environ):
+ raise ConfigException("Service host/port is not set.")
+
+ if (not self._environ[SERVICE_HOST_ENV_NAME]
+ or not self._environ[SERVICE_PORT_ENV_NAME]):
+ raise ConfigException("Service host/port is set but empty.")
+
+ self.host = ("https://" +
+ _join_host_port(self._environ[SERVICE_HOST_ENV_NAME],
+ self._environ[SERVICE_PORT_ENV_NAME]))
+
+ if not os.path.isfile(self._token_filename):
+ raise ConfigException("Service token file does not exist.")
+
+ self._read_token_file()
+
+ if not os.path.isfile(self._cert_filename):
+ raise ConfigException(
+ "Service certification file does not exist.")
+
+ with open(self._cert_filename) as f:
+ if not f.read():
+ raise ConfigException("Cert file exists but empty.")
+
+ self.ssl_ca_cert = self._cert_filename
+
+ def _set_config(self, client_configuration):
+ client_configuration.host = self.host
+ client_configuration.ssl_ca_cert = self.ssl_ca_cert
+ if self.token is not None:
+ client_configuration.api_key['authorization'] = self.token
+ if not self._try_refresh_token:
+ return
+
+ def _refresh_api_key(client_configuration):
+ if self.token_expires_at <= datetime.datetime.now():
+ self._read_token_file()
+ self._set_config(client_configuration)
+
+ client_configuration.refresh_api_key_hook = _refresh_api_key
+
+ def _read_token_file(self):
+ with open(self._token_filename) as f:
+ content = f.read()
+ if not content:
+ raise ConfigException("Token file exists but empty.")
+ self.token = "bearer " + content
+ self.token_expires_at = datetime.datetime.now(
+ ) + self._token_refresh_period
+
+
+def load_incluster_config(client_configuration=None, try_refresh_token=True):
+ """
+ Use the service account kubernetes gives to pods to connect to kubernetes
+ cluster. It's intended for clients that expect to be running inside a pod
+ running on kubernetes. It will raise an exception if called from a process
+ not running in a kubernetes environment."""
+ InClusterConfigLoader(
+ token_filename=SERVICE_TOKEN_FILENAME,
+ cert_filename=SERVICE_CERT_FILENAME,
+ try_refresh_token=try_refresh_token).load_and_set(client_configuration)
diff --git a/contrib/python/kubernetes/kubernetes/config/kube_config.py b/contrib/python/kubernetes/kubernetes/config/kube_config.py
new file mode 100644
index 0000000000..d8c63a8261
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/config/kube_config.py
@@ -0,0 +1,893 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import atexit
+import base64
+import copy
+import datetime
+import json
+import logging
+import os
+import platform
+import subprocess
+import tempfile
+import time
+from collections import namedtuple
+
+import google.auth
+import google.auth.transport.requests
+import oauthlib.oauth2
+import urllib3
+import yaml
+from requests_oauthlib import OAuth2Session
+from six import PY3
+
+from kubernetes.client import ApiClient, Configuration
+from kubernetes.config.exec_provider import ExecProvider
+
+from .config_exception import ConfigException
+from .dateutil import UTC, format_rfc3339, parse_rfc3339
+
+try:
+ import adal
+except ImportError:
+ pass
+
+EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5)
+KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config')
+ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':'
+_temp_files = {}
+
+
+def _cleanup_temp_files():
+ global _temp_files
+ for temp_file in _temp_files.values():
+ try:
+ os.remove(temp_file)
+ except OSError:
+ pass
+ _temp_files = {}
+
+
+def _create_temp_file_with_content(content, temp_file_path=None):
+ if len(_temp_files) == 0:
+ atexit.register(_cleanup_temp_files)
+ # Because we may change context several times, try to remember files we
+ # created and reuse them at a small memory cost.
+ content_key = str(content)
+ if content_key in _temp_files:
+ return _temp_files[content_key]
+ if temp_file_path and not os.path.isdir(temp_file_path):
+ os.makedirs(name=temp_file_path)
+ fd, name = tempfile.mkstemp(dir=temp_file_path)
+ os.close(fd)
+ _temp_files[content_key] = name
+ with open(name, 'wb') as fd:
+ fd.write(content.encode() if isinstance(content, str) else content)
+ return name
+
+
+def _is_expired(expiry):
+ return ((parse_rfc3339(expiry) - EXPIRY_SKEW_PREVENTION_DELAY) <=
+ datetime.datetime.utcnow().replace(tzinfo=UTC))
+
+
+class FileOrData(object):
+ """Utility class to read content of obj[%data_key_name] or file's
+ content of obj[%file_key_name] and represent it as file or data.
+ Note that the data is preferred. The obj[%file_key_name] will be used iff
+ obj['%data_key_name'] is not set or empty. Assumption is file content is
+ raw data and data field is base64 string. The assumption can be changed
+ with base64_file_content flag. If set to False, the content of the file
+ will assumed to be base64 and read as is. The default True value will
+ result in base64 encode of the file content after read."""
+
+ def __init__(self, obj, file_key_name, data_key_name=None,
+ file_base_path="", base64_file_content=True,
+ temp_file_path=None):
+ if not data_key_name:
+ data_key_name = file_key_name + "-data"
+ self._file = None
+ self._data = None
+ self._base64_file_content = base64_file_content
+ self._temp_file_path = temp_file_path
+ if not obj:
+ return
+ if data_key_name in obj:
+ self._data = obj[data_key_name]
+ elif file_key_name in obj:
+ self._file = os.path.normpath(
+ os.path.join(file_base_path, obj[file_key_name]))
+
+ def as_file(self):
+ """If obj[%data_key_name] exists, return name of a file with base64
+ decoded obj[%data_key_name] content otherwise obj[%file_key_name]."""
+ use_data_if_no_file = not self._file and self._data
+ if use_data_if_no_file:
+ if self._base64_file_content:
+ if isinstance(self._data, str):
+ content = self._data.encode()
+ else:
+ content = self._data
+ self._file = _create_temp_file_with_content(
+ base64.standard_b64decode(content), self._temp_file_path)
+ else:
+ self._file = _create_temp_file_with_content(
+ self._data, self._temp_file_path)
+ if self._file and not os.path.isfile(self._file):
+ raise ConfigException("File does not exist: %s" % self._file)
+ return self._file
+
+ def as_data(self):
+ """If obj[%data_key_name] exists, Return obj[%data_key_name] otherwise
+ base64 encoded string of obj[%file_key_name] file content."""
+ use_file_if_no_data = not self._data and self._file
+ if use_file_if_no_data:
+ with open(self._file) as f:
+ if self._base64_file_content:
+ self._data = bytes.decode(
+ base64.standard_b64encode(str.encode(f.read())))
+ else:
+ self._data = f.read()
+ return self._data
+
+
+class CommandTokenSource(object):
+ def __init__(self, cmd, args, tokenKey, expiryKey):
+ self._cmd = cmd
+ self._args = args
+ if not tokenKey:
+ self._tokenKey = '{.access_token}'
+ else:
+ self._tokenKey = tokenKey
+ if not expiryKey:
+ self._expiryKey = '{.token_expiry}'
+ else:
+ self._expiryKey = expiryKey
+
+ def token(self):
+ fullCmd = self._cmd + (" ") + " ".join(self._args)
+ process = subprocess.Popen(
+ [self._cmd] + self._args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ (stdout, stderr) = process.communicate()
+ exit_code = process.wait()
+ if exit_code != 0:
+ msg = 'cmd-path: process returned %d' % exit_code
+ msg += "\nCmd: %s" % fullCmd
+ stderr = stderr.strip()
+ if stderr:
+ msg += '\nStderr: %s' % stderr
+ raise ConfigException(msg)
+ try:
+ data = json.loads(stdout)
+ except ValueError as de:
+ raise ConfigException(
+ 'exec: failed to decode process output: %s' % de)
+ A = namedtuple('A', ['token', 'expiry'])
+ return A(
+ token=data['credential']['access_token'],
+ expiry=parse_rfc3339(data['credential']['token_expiry']))
+
+
+class KubeConfigLoader(object):
+
+ def __init__(self, config_dict, active_context=None,
+ get_google_credentials=None,
+ config_base_path="",
+ config_persister=None,
+ temp_file_path=None):
+
+ if config_dict is None:
+ raise ConfigException(
+ 'Invalid kube-config. '
+ 'Expected config_dict to not be None.')
+ elif isinstance(config_dict, ConfigNode):
+ self._config = config_dict
+ else:
+ self._config = ConfigNode('kube-config', config_dict)
+
+ self._current_context = None
+ self._user = None
+ self._cluster = None
+ self.set_active_context(active_context)
+ self._config_base_path = config_base_path
+ self._config_persister = config_persister
+ self._temp_file_path = temp_file_path
+
+ def _refresh_credentials_with_cmd_path():
+ config = self._user['auth-provider']['config']
+ cmd = config['cmd-path']
+ if len(cmd) == 0:
+ raise ConfigException(
+ 'missing access token cmd '
+ '(cmd-path is an empty string in your kubeconfig file)')
+ if 'scopes' in config and config['scopes'] != "":
+ raise ConfigException(
+ 'scopes can only be used '
+ 'when kubectl is using a gcp service account key')
+ args = []
+ if 'cmd-args' in config:
+ args = config['cmd-args'].split()
+ else:
+ fields = config['cmd-path'].split()
+ cmd = fields[0]
+ args = fields[1:]
+
+ commandTokenSource = CommandTokenSource(
+ cmd, args,
+ config.safe_get('token-key'),
+ config.safe_get('expiry-key'))
+ return commandTokenSource.token()
+
+ def _refresh_credentials():
+ # Refresh credentials using cmd-path
+ if ('auth-provider' in self._user and
+ 'config' in self._user['auth-provider'] and
+ 'cmd-path' in self._user['auth-provider']['config']):
+ return _refresh_credentials_with_cmd_path()
+
+ credentials, project_id = google.auth.default(scopes=[
+ 'https://www.googleapis.com/auth/cloud-platform',
+ 'https://www.googleapis.com/auth/userinfo.email'
+ ])
+ request = google.auth.transport.requests.Request()
+ credentials.refresh(request)
+ return credentials
+
+ if get_google_credentials:
+ self._get_google_credentials = get_google_credentials
+ else:
+ self._get_google_credentials = _refresh_credentials
+
+ def set_active_context(self, context_name=None):
+ if context_name is None:
+ context_name = self._config['current-context']
+ self._current_context = self._config['contexts'].get_with_name(
+ context_name)
+ if (self._current_context['context'].safe_get('user') and
+ self._config.safe_get('users')):
+ user = self._config['users'].get_with_name(
+ self._current_context['context']['user'], safe=True)
+ if user:
+ self._user = user['user']
+ else:
+ self._user = None
+ else:
+ self._user = None
+ self._cluster = self._config['clusters'].get_with_name(
+ self._current_context['context']['cluster'])['cluster']
+
+ def _load_authentication(self):
+ """Read authentication from kube-config user section if exists.
+
+ This function goes through various authentication methods in user
+ section of kube-config and stops if it finds a valid authentication
+ method. The order of authentication methods is:
+
+ 1. auth-provider (gcp, azure, oidc)
+ 2. token field (point to a token file)
+ 3. exec provided plugin
+ 4. username/password
+ """
+ if not self._user:
+ return
+ if self._load_auth_provider_token():
+ return
+ if self._load_user_token():
+ return
+ if self._load_from_exec_plugin():
+ return
+ self._load_user_pass_token()
+
+ def _load_auth_provider_token(self):
+ if 'auth-provider' not in self._user:
+ return
+ provider = self._user['auth-provider']
+ if 'name' not in provider:
+ return
+ if provider['name'] == 'gcp':
+ return self._load_gcp_token(provider)
+ if provider['name'] == 'azure':
+ return self._load_azure_token(provider)
+ if provider['name'] == 'oidc':
+ return self._load_oid_token(provider)
+
+ def _azure_is_expired(self, provider):
+ expires_on = provider['config']['expires-on']
+ if expires_on.isdigit():
+ return int(expires_on) < time.time()
+ else:
+ exp_time = time.strptime(expires_on, '%Y-%m-%d %H:%M:%S.%f')
+ return exp_time < time.gmtime()
+
+ def _load_azure_token(self, provider):
+ if 'config' not in provider:
+ return
+ if 'access-token' not in provider['config']:
+ return
+ if 'expires-on' in provider['config']:
+ if self._azure_is_expired(provider):
+ self._refresh_azure_token(provider['config'])
+ self.token = 'Bearer %s' % provider['config']['access-token']
+ return self.token
+
+ def _refresh_azure_token(self, config):
+ if 'adal' not in globals():
+ raise ImportError('refresh token error, adal library not imported')
+
+ tenant = config['tenant-id']
+ authority = 'https://login.microsoftonline.com/{}'.format(tenant)
+ context = adal.AuthenticationContext(
+ authority, validate_authority=True, api_version='1.0'
+ )
+ refresh_token = config['refresh-token']
+ client_id = config['client-id']
+ apiserver_id = '00000002-0000-0000-c000-000000000000'
+ try:
+ apiserver_id = config['apiserver-id']
+ except ConfigException:
+ # We've already set a default above
+ pass
+ token_response = context.acquire_token_with_refresh_token(
+ refresh_token, client_id, apiserver_id)
+
+ provider = self._user['auth-provider']['config']
+ provider.value['access-token'] = token_response['accessToken']
+ provider.value['expires-on'] = token_response['expiresOn']
+ if self._config_persister:
+ self._config_persister()
+
+ def _load_gcp_token(self, provider):
+ if (('config' not in provider) or
+ ('access-token' not in provider['config']) or
+ ('expiry' in provider['config'] and
+ _is_expired(provider['config']['expiry']))):
+ # token is not available or expired, refresh it
+ self._refresh_gcp_token()
+
+ self.token = "Bearer %s" % provider['config']['access-token']
+ if 'expiry' in provider['config']:
+ self.expiry = parse_rfc3339(provider['config']['expiry'])
+ return self.token
+
+ def _refresh_gcp_token(self):
+ if 'config' not in self._user['auth-provider']:
+ self._user['auth-provider'].value['config'] = {}
+ provider = self._user['auth-provider']['config']
+ credentials = self._get_google_credentials()
+ provider.value['access-token'] = credentials.token
+ provider.value['expiry'] = format_rfc3339(credentials.expiry)
+ if self._config_persister:
+ self._config_persister()
+
+ def _load_oid_token(self, provider):
+ if 'config' not in provider:
+ return
+
+ reserved_characters = frozenset(["=", "+", "/"])
+ token = provider['config']['id-token']
+
+ if any(char in token for char in reserved_characters):
+ # Invalid jwt, as it contains url-unsafe chars
+ return
+
+ parts = token.split('.')
+ if len(parts) != 3: # Not a valid JWT
+ return
+
+ padding = (4 - len(parts[1]) % 4) * '='
+ if len(padding) == 3:
+ # According to spec, 3 padding characters cannot occur
+ # in a valid jwt
+ # https://tools.ietf.org/html/rfc7515#appendix-C
+ return
+
+ if PY3:
+ jwt_attributes = json.loads(
+ base64.urlsafe_b64decode(parts[1] + padding).decode('utf-8')
+ )
+ else:
+ jwt_attributes = json.loads(
+ base64.b64decode(parts[1] + padding)
+ )
+
+ expire = jwt_attributes.get('exp')
+
+ if ((expire is not None) and
+ (_is_expired(datetime.datetime.fromtimestamp(expire,
+ tz=UTC)))):
+ self._refresh_oidc(provider)
+
+ if self._config_persister:
+ self._config_persister()
+
+ self.token = "Bearer %s" % provider['config']['id-token']
+
+ return self.token
+
+ def _refresh_oidc(self, provider):
+ config = Configuration()
+
+ if 'idp-certificate-authority-data' in provider['config']:
+ ca_cert = tempfile.NamedTemporaryFile(delete=True)
+
+ if PY3:
+ cert = base64.b64decode(
+ provider['config']['idp-certificate-authority-data']
+ ).decode('utf-8')
+ else:
+ cert = base64.b64decode(
+ provider['config']['idp-certificate-authority-data'] + "=="
+ )
+
+ with open(ca_cert.name, 'w') as fh:
+ fh.write(cert)
+
+ config.ssl_ca_cert = ca_cert.name
+
+ elif 'idp-certificate-authority' in provider['config']:
+ config.ssl_ca_cert = provider['config']['idp-certificate-authority']
+
+ else:
+ config.verify_ssl = False
+
+ client = ApiClient(configuration=config)
+
+ response = client.request(
+ method="GET",
+ url="%s/.well-known/openid-configuration"
+ % provider['config']['idp-issuer-url']
+ )
+
+ if response.status != 200:
+ return
+
+ response = json.loads(response.data)
+
+ request = OAuth2Session(
+ client_id=provider['config']['client-id'],
+ token=provider['config']['refresh-token'],
+ auto_refresh_kwargs={
+ 'client_id': provider['config']['client-id'],
+ 'client_secret': provider['config']['client-secret']
+ },
+ auto_refresh_url=response['token_endpoint']
+ )
+
+ try:
+ refresh = request.refresh_token(
+ token_url=response['token_endpoint'],
+ refresh_token=provider['config']['refresh-token'],
+ auth=(provider['config']['client-id'],
+ provider['config']['client-secret']),
+ verify=config.ssl_ca_cert if config.verify_ssl else None
+ )
+ except oauthlib.oauth2.rfc6749.errors.InvalidClientIdError:
+ return
+
+ provider['config'].value['id-token'] = refresh['id_token']
+ provider['config'].value['refresh-token'] = refresh['refresh_token']
+
+ def _load_from_exec_plugin(self):
+ if 'exec' not in self._user:
+ return
+ try:
+ base_path = self._get_base_path(self._cluster.path)
+ status = ExecProvider(self._user['exec'], base_path).run()
+ if 'token' in status:
+ self.token = "Bearer %s" % status['token']
+ elif 'clientCertificateData' in status:
+ # https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats
+ # Plugin has provided certificates instead of a token.
+ if 'clientKeyData' not in status:
+ logging.error('exec: missing clientKeyData field in '
+ 'plugin output')
+ return None
+ self.cert_file = FileOrData(
+ status, None,
+ data_key_name='clientCertificateData',
+ file_base_path=base_path,
+ base64_file_content=False,
+ temp_file_path=self._temp_file_path).as_file()
+ self.key_file = FileOrData(
+ status, None,
+ data_key_name='clientKeyData',
+ file_base_path=base_path,
+ base64_file_content=False,
+ temp_file_path=self._temp_file_path).as_file()
+ else:
+ logging.error('exec: missing token or clientCertificateData '
+ 'field in plugin output')
+ return None
+ if 'expirationTimestamp' in status:
+ self.expiry = parse_rfc3339(status['expirationTimestamp'])
+ return True
+ except Exception as e:
+ logging.error(str(e))
+
+ def _load_user_token(self):
+ base_path = self._get_base_path(self._user.path)
+ token = FileOrData(
+ self._user, 'tokenFile', 'token',
+ file_base_path=base_path,
+ base64_file_content=False,
+ temp_file_path=self._temp_file_path).as_data()
+ if token:
+ self.token = "Bearer %s" % token
+ return True
+
+ def _load_user_pass_token(self):
+ if 'username' in self._user and 'password' in self._user:
+ self.token = urllib3.util.make_headers(
+ basic_auth=(self._user['username'] + ':' +
+ self._user['password'])).get('authorization')
+ return True
+
+ def _get_base_path(self, config_path):
+ if self._config_base_path is not None:
+ return self._config_base_path
+ if config_path is not None:
+ return os.path.abspath(os.path.dirname(config_path))
+ return ""
+
+ def _load_cluster_info(self):
+ if 'server' in self._cluster:
+ self.host = self._cluster['server'].rstrip('/')
+ if self.host.startswith("https"):
+ base_path = self._get_base_path(self._cluster.path)
+ self.ssl_ca_cert = FileOrData(
+ self._cluster, 'certificate-authority',
+ file_base_path=base_path,
+ temp_file_path=self._temp_file_path).as_file()
+ if 'cert_file' not in self.__dict__:
+ # cert_file could have been provided by
+ # _load_from_exec_plugin; only load from the _user
+ # section if we need it.
+ self.cert_file = FileOrData(
+ self._user, 'client-certificate',
+ file_base_path=base_path,
+ temp_file_path=self._temp_file_path).as_file()
+ self.key_file = FileOrData(
+ self._user, 'client-key',
+ file_base_path=base_path,
+ temp_file_path=self._temp_file_path).as_file()
+ if 'insecure-skip-tls-verify' in self._cluster:
+ self.verify_ssl = not self._cluster['insecure-skip-tls-verify']
+ if 'tls-server-name' in self._cluster:
+ self.tls_server_name = self._cluster['tls-server-name']
+
+ def _set_config(self, client_configuration):
+ if 'token' in self.__dict__:
+ client_configuration.api_key['authorization'] = self.token
+
+ def _refresh_api_key(client_configuration):
+ if ('expiry' in self.__dict__ and _is_expired(self.expiry)):
+ self._load_authentication()
+ self._set_config(client_configuration)
+ client_configuration.refresh_api_key_hook = _refresh_api_key
+ # copy these keys directly from self to configuration object
+ keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl','tls_server_name']
+ for key in keys:
+ if key in self.__dict__:
+ setattr(client_configuration, key, getattr(self, key))
+
+ def load_and_set(self, client_configuration):
+ self._load_authentication()
+ self._load_cluster_info()
+ self._set_config(client_configuration)
+
+ def list_contexts(self):
+ return [context.value for context in self._config['contexts']]
+
+ @property
+ def current_context(self):
+ return self._current_context.value
+
+
+class ConfigNode(object):
+ """Remembers each config key's path and construct a relevant exception
+ message in case of missing keys. The assumption is all access keys are
+ present in a well-formed kube-config."""
+
+ def __init__(self, name, value, path=None):
+ self.name = name
+ self.value = value
+ self.path = path
+
+ def __contains__(self, key):
+ return key in self.value
+
+ def __len__(self):
+ return len(self.value)
+
+ def safe_get(self, key):
+ if (isinstance(self.value, list) and isinstance(key, int) or
+ key in self.value):
+ return self.value[key]
+
+ def __getitem__(self, key):
+ v = self.safe_get(key)
+ if v is None:
+ raise ConfigException(
+ 'Invalid kube-config file. Expected key %s in %s'
+ % (key, self.name))
+ if isinstance(v, dict) or isinstance(v, list):
+ return ConfigNode('%s/%s' % (self.name, key), v, self.path)
+ else:
+ return v
+
+ def get_with_name(self, name, safe=False):
+ if not isinstance(self.value, list):
+ raise ConfigException(
+ 'Invalid kube-config file. Expected %s to be a list'
+ % self.name)
+ result = None
+ for v in self.value:
+ if 'name' not in v:
+ raise ConfigException(
+ 'Invalid kube-config file. '
+ 'Expected all values in %s list to have \'name\' key'
+ % self.name)
+ if v['name'] == name:
+ if result is None:
+ result = v
+ else:
+ raise ConfigException(
+ 'Invalid kube-config file. '
+ 'Expected only one object with name %s in %s list'
+ % (name, self.name))
+ if result is not None:
+ if isinstance(result, ConfigNode):
+ return result
+ else:
+ return ConfigNode(
+ '%s[name=%s]' %
+ (self.name, name), result, self.path)
+ if safe:
+ return None
+ raise ConfigException(
+ 'Invalid kube-config file. '
+ 'Expected object with name %s in %s list' % (name, self.name))
+
+
+class KubeConfigMerger:
+
+ """Reads and merges configuration from one or more kube-config's.
+ The property `config` can be passed to the KubeConfigLoader as config_dict.
+
+ It uses a path attribute from ConfigNode to store the path to kubeconfig.
+ This path is required to load certs from relative paths.
+
+ A method `save_changes` updates changed kubeconfig's (it compares current
+ state of dicts with).
+ """
+
+ def __init__(self, paths):
+ self.paths = []
+ self.config_files = {}
+ self.config_merged = None
+ if hasattr(paths, 'read'):
+ self._load_config_from_file_like_object(paths)
+ else:
+ self._load_config_from_file_path(paths)
+
+ @property
+ def config(self):
+ return self.config_merged
+
+ def _load_config_from_file_like_object(self, string):
+ if hasattr(string, 'getvalue'):
+ config = yaml.safe_load(string.getvalue())
+ else:
+ config = yaml.safe_load(string.read())
+
+ if config is None:
+ raise ConfigException(
+ 'Invalid kube-config.')
+ if self.config_merged is None:
+ self.config_merged = copy.deepcopy(config)
+ # doesn't need to do any further merging
+
+ def _load_config_from_file_path(self, string):
+ for path in string.split(ENV_KUBECONFIG_PATH_SEPARATOR):
+ if path:
+ path = os.path.expanduser(path)
+ if os.path.exists(path):
+ self.paths.append(path)
+ self.load_config(path)
+ self.config_saved = copy.deepcopy(self.config_files)
+
+ def load_config(self, path):
+ with open(path) as f:
+ config = yaml.safe_load(f)
+
+ if config is None:
+ raise ConfigException(
+ 'Invalid kube-config. '
+ '%s file is empty' % path)
+
+ if self.config_merged is None:
+ config_merged = copy.deepcopy(config)
+ for item in ('clusters', 'contexts', 'users'):
+ config_merged[item] = []
+ self.config_merged = ConfigNode(path, config_merged, path)
+ for item in ('clusters', 'contexts', 'users'):
+ self._merge(item, config.get(item, []) or [], path)
+ self.config_files[path] = config
+
+ def _merge(self, item, add_cfg, path):
+ for new_item in add_cfg:
+ for exists in self.config_merged.value[item]:
+ if exists['name'] == new_item['name']:
+ break
+ else:
+ self.config_merged.value[item].append(ConfigNode(
+ '{}/{}'.format(path, new_item), new_item, path))
+
+ def save_changes(self):
+ for path in self.paths:
+ if self.config_saved[path] != self.config_files[path]:
+ self.save_config(path)
+ self.config_saved = copy.deepcopy(self.config_files)
+
+ def save_config(self, path):
+ with open(path, 'w') as f:
+ yaml.safe_dump(self.config_files[path], f,
+ default_flow_style=False)
+
+
+def _get_kube_config_loader_for_yaml_file(
+ filename, persist_config=False, **kwargs):
+ return _get_kube_config_loader(
+ filename=filename,
+ persist_config=persist_config,
+ **kwargs)
+
+
+def _get_kube_config_loader(
+ filename=None,
+ config_dict=None,
+ persist_config=False,
+ **kwargs):
+ if config_dict is None:
+ kcfg = KubeConfigMerger(filename)
+ if persist_config and 'config_persister' not in kwargs:
+ kwargs['config_persister'] = kcfg.save_changes
+
+ if kcfg.config is None:
+ raise ConfigException(
+ 'Invalid kube-config file. '
+ 'No configuration found.')
+ return KubeConfigLoader(
+ config_dict=kcfg.config,
+ config_base_path=None,
+ **kwargs)
+ else:
+ return KubeConfigLoader(
+ config_dict=config_dict,
+ config_base_path=None,
+ **kwargs)
+
+
+def list_kube_config_contexts(config_file=None):
+
+ if config_file is None:
+ config_file = KUBE_CONFIG_DEFAULT_LOCATION
+
+ loader = _get_kube_config_loader(filename=config_file)
+ return loader.list_contexts(), loader.current_context
+
+
+def load_kube_config(config_file=None, context=None,
+ client_configuration=None,
+ persist_config=True,
+ temp_file_path=None):
+ """Loads authentication and cluster information from kube-config file
+ and stores them in kubernetes.client.configuration.
+
+ :param config_file: Name of the kube-config file.
+ :param context: set the active context. If is set to None, current_context
+ from config file will be used.
+ :param client_configuration: The kubernetes.client.Configuration to
+ set configs to.
+ :param persist_config: If True, config file will be updated when changed
+ (e.g GCP token refresh).
+ :param temp_file_path: store temp files path.
+ """
+
+ if config_file is None:
+ config_file = KUBE_CONFIG_DEFAULT_LOCATION
+
+ loader = _get_kube_config_loader(
+ filename=config_file, active_context=context,
+ persist_config=persist_config,
+ temp_file_path=temp_file_path)
+
+ if client_configuration is None:
+ config = type.__call__(Configuration)
+ loader.load_and_set(config)
+ Configuration.set_default(config)
+ else:
+ loader.load_and_set(client_configuration)
+
+
+def load_kube_config_from_dict(config_dict, context=None,
+ client_configuration=None,
+ persist_config=True,
+ temp_file_path=None):
+ """Loads authentication and cluster information from config_dict file
+ and stores them in kubernetes.client.configuration.
+
+ :param config_dict: Takes the config file as a dict.
+ :param context: set the active context. If is set to None, current_context
+ from config file will be used.
+ :param client_configuration: The kubernetes.client.Configuration to
+ set configs to.
+ :param persist_config: If True, config file will be updated when changed
+ (e.g GCP token refresh).
+ :param temp_file_path: store temp files path.
+ """
+ if config_dict is None:
+ raise ConfigException(
+ 'Invalid kube-config dict. '
+ 'No configuration found.')
+
+ loader = _get_kube_config_loader(
+ config_dict=config_dict, active_context=context,
+ persist_config=persist_config,
+ temp_file_path=temp_file_path)
+
+ if client_configuration is None:
+ config = type.__call__(Configuration)
+ loader.load_and_set(config)
+ Configuration.set_default(config)
+ else:
+ loader.load_and_set(client_configuration)
+
+
+def new_client_from_config(
+ config_file=None,
+ context=None,
+ persist_config=True):
+ """
+ Loads configuration the same as load_kube_config but returns an ApiClient
+ to be used with any API object. This will allow the caller to concurrently
+ talk with multiple clusters.
+ """
+ client_config = type.__call__(Configuration)
+ load_kube_config(config_file=config_file, context=context,
+ client_configuration=client_config,
+ persist_config=persist_config)
+ return ApiClient(configuration=client_config)
+
+
+def new_client_from_config_dict(
+ config_dict=None,
+ context=None,
+ persist_config=True,
+ temp_file_path=None):
+ """
+ Loads configuration the same as load_kube_config_from_dict but returns an ApiClient
+ to be used with any API object. This will allow the caller to concurrently
+ talk with multiple clusters.
+ """
+ client_config = type.__call__(Configuration)
+ load_kube_config_from_dict(config_dict=config_dict, context=context,
+ client_configuration=client_config,
+ persist_config=persist_config,
+ temp_file_path=temp_file_path)
+ return ApiClient(configuration=client_config)
diff --git a/contrib/python/kubernetes/kubernetes/dynamic/__init__.py b/contrib/python/kubernetes/kubernetes/dynamic/__init__.py
new file mode 100644
index 0000000000..a1d3d8f8ea
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/dynamic/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .client import * # NOQA
diff --git a/contrib/python/kubernetes/kubernetes/dynamic/client.py b/contrib/python/kubernetes/kubernetes/dynamic/client.py
new file mode 100644
index 0000000000..e4f2e1487e
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/dynamic/client.py
@@ -0,0 +1,320 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import six
+import json
+
+from kubernetes import watch
+from kubernetes.client.rest import ApiException
+
+from .discovery import EagerDiscoverer, LazyDiscoverer
+from .exceptions import api_exception, KubernetesValidateMissing
+from .resource import Resource, ResourceList, Subresource, ResourceInstance, ResourceField
+
+try:
+ import kubernetes_validate
+ HAS_KUBERNETES_VALIDATE = True
+except ImportError:
+ HAS_KUBERNETES_VALIDATE = False
+
+try:
+ from kubernetes_validate.utils import VersionNotSupportedError
+except ImportError:
+ class VersionNotSupportedError(NotImplementedError):
+ pass
+
+__all__ = [
+ 'DynamicClient',
+ 'ResourceInstance',
+ 'Resource',
+ 'ResourceList',
+ 'Subresource',
+ 'EagerDiscoverer',
+ 'LazyDiscoverer',
+ 'ResourceField',
+]
+
+
+def meta_request(func):
+ """ Handles parsing response structure and translating API Exceptions """
+ def inner(self, *args, **kwargs):
+ serialize_response = kwargs.pop('serialize', True)
+ serializer = kwargs.pop('serializer', ResourceInstance)
+ try:
+ resp = func(self, *args, **kwargs)
+ except ApiException as e:
+ raise api_exception(e)
+ if serialize_response:
+ try:
+ if six.PY2:
+ return serializer(self, json.loads(resp.data))
+ return serializer(self, json.loads(resp.data.decode('utf8')))
+ except ValueError:
+ if six.PY2:
+ return resp.data
+ return resp.data.decode('utf8')
+ return resp
+
+ return inner
+
+
+class DynamicClient(object):
+ """ A kubernetes client that dynamically discovers and interacts with
+ the kubernetes API
+ """
+
+ def __init__(self, client, cache_file=None, discoverer=None):
+ # Setting default here to delay evaluation of LazyDiscoverer class
+ # until constructor is called
+ discoverer = discoverer or LazyDiscoverer
+
+ self.client = client
+ self.configuration = client.configuration
+ self.__discoverer = discoverer(self, cache_file)
+
+ @property
+ def resources(self):
+ return self.__discoverer
+
+ @property
+ def version(self):
+ return self.__discoverer.version
+
+ def ensure_namespace(self, resource, namespace, body):
+ namespace = namespace or body.get('metadata', {}).get('namespace')
+ if not namespace:
+ raise ValueError("Namespace is required for {}.{}".format(resource.group_version, resource.kind))
+ return namespace
+
+ def serialize_body(self, body):
+ """Serialize body to raw dict so apiserver can handle it
+
+ :param body: kubernetes resource body, current support: Union[Dict, ResourceInstance]
+ """
+ # This should match any `ResourceInstance` instances
+ if callable(getattr(body, 'to_dict', None)):
+ return body.to_dict()
+ return body or {}
+
+ def get(self, resource, name=None, namespace=None, **kwargs):
+ path = resource.path(name=name, namespace=namespace)
+ return self.request('get', path, **kwargs)
+
+ def create(self, resource, body=None, namespace=None, **kwargs):
+ body = self.serialize_body(body)
+ if resource.namespaced:
+ namespace = self.ensure_namespace(resource, namespace, body)
+ path = resource.path(namespace=namespace)
+ return self.request('post', path, body=body, **kwargs)
+
+ def delete(self, resource, name=None, namespace=None, body=None, label_selector=None, field_selector=None, **kwargs):
+ if not (name or label_selector or field_selector):
+ raise ValueError("At least one of name|label_selector|field_selector is required")
+ if resource.namespaced and not (label_selector or field_selector or namespace):
+ raise ValueError("At least one of namespace|label_selector|field_selector is required")
+ path = resource.path(name=name, namespace=namespace)
+ return self.request('delete', path, body=body, label_selector=label_selector, field_selector=field_selector, **kwargs)
+
+ def replace(self, resource, body=None, name=None, namespace=None, **kwargs):
+ body = self.serialize_body(body)
+ name = name or body.get('metadata', {}).get('name')
+ if not name:
+ raise ValueError("name is required to replace {}.{}".format(resource.group_version, resource.kind))
+ if resource.namespaced:
+ namespace = self.ensure_namespace(resource, namespace, body)
+ path = resource.path(name=name, namespace=namespace)
+ return self.request('put', path, body=body, **kwargs)
+
+ def patch(self, resource, body=None, name=None, namespace=None, **kwargs):
+ body = self.serialize_body(body)
+ name = name or body.get('metadata', {}).get('name')
+ if not name:
+ raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind))
+ if resource.namespaced:
+ namespace = self.ensure_namespace(resource, namespace, body)
+
+ content_type = kwargs.pop('content_type', 'application/strategic-merge-patch+json')
+ path = resource.path(name=name, namespace=namespace)
+
+ return self.request('patch', path, body=body, content_type=content_type, **kwargs)
+
+ def server_side_apply(self, resource, body=None, name=None, namespace=None, force_conflicts=None, **kwargs):
+ body = self.serialize_body(body)
+ name = name or body.get('metadata', {}).get('name')
+ if not name:
+ raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind))
+ if resource.namespaced:
+ namespace = self.ensure_namespace(resource, namespace, body)
+
+ # force content type to 'application/apply-patch+yaml'
+ kwargs.update({'content_type': 'application/apply-patch+yaml'})
+ path = resource.path(name=name, namespace=namespace)
+
+ return self.request('patch', path, body=body, force_conflicts=force_conflicts, **kwargs)
+
+ def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None, watcher=None):
+ """
+ Stream events for a resource from the Kubernetes API
+
+ :param resource: The API resource object that will be used to query the API
+ :param namespace: The namespace to query
+ :param name: The name of the resource instance to query
+ :param label_selector: The label selector with which to filter results
+ :param field_selector: The field selector with which to filter results
+ :param resource_version: The version with which to filter results. Only events with
+ a resource_version greater than this value will be returned
+ :param timeout: The amount of time in seconds to wait before terminating the stream
+ :param watcher: The Watcher object that will be used to stream the resource
+
+ :return: Event object with these keys:
+ 'type': The type of event such as "ADDED", "DELETED", etc.
+ 'raw_object': a dict representing the watched object.
+ 'object': A ResourceInstance wrapping raw_object.
+
+ Example:
+ client = DynamicClient(k8s_client)
+ watcher = watch.Watch()
+ v1_pods = client.resources.get(api_version='v1', kind='Pod')
+
+ for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5, watcher=watcher):
+ print(e['type'])
+ print(e['object'].metadata)
+ # If you want to gracefully stop the stream watcher
+ watcher.stop()
+ """
+ if not watcher: watcher = watch.Watch()
+
+ for event in watcher.stream(
+ resource.get,
+ namespace=namespace,
+ name=name,
+ field_selector=field_selector,
+ label_selector=label_selector,
+ resource_version=resource_version,
+ serialize=False,
+ timeout_seconds=timeout
+ ):
+ event['object'] = ResourceInstance(resource, event['object'])
+ yield event
+
+ @meta_request
+ def request(self, method, path, body=None, **params):
+ if not path.startswith('/'):
+ path = '/' + path
+
+ path_params = params.get('path_params', {})
+ query_params = params.get('query_params', [])
+ if params.get('pretty') is not None:
+ query_params.append(('pretty', params['pretty']))
+ if params.get('_continue') is not None:
+ query_params.append(('continue', params['_continue']))
+ if params.get('include_uninitialized') is not None:
+ query_params.append(('includeUninitialized', params['include_uninitialized']))
+ if params.get('field_selector') is not None:
+ query_params.append(('fieldSelector', params['field_selector']))
+ if params.get('label_selector') is not None:
+ query_params.append(('labelSelector', params['label_selector']))
+ if params.get('limit') is not None:
+ query_params.append(('limit', params['limit']))
+ if params.get('resource_version') is not None:
+ query_params.append(('resourceVersion', params['resource_version']))
+ if params.get('timeout_seconds') is not None:
+ query_params.append(('timeoutSeconds', params['timeout_seconds']))
+ if params.get('watch') is not None:
+ query_params.append(('watch', params['watch']))
+ if params.get('grace_period_seconds') is not None:
+ query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
+ if params.get('propagation_policy') is not None:
+ query_params.append(('propagationPolicy', params['propagation_policy']))
+ if params.get('orphan_dependents') is not None:
+ query_params.append(('orphanDependents', params['orphan_dependents']))
+ if params.get('dry_run') is not None:
+ query_params.append(('dryRun', params['dry_run']))
+ if params.get('field_manager') is not None:
+ query_params.append(('fieldManager', params['field_manager']))
+ if params.get('force_conflicts') is not None:
+ query_params.append(('force', params['force_conflicts']))
+
+ header_params = params.get('header_params', {})
+ form_params = []
+ local_var_files = {}
+
+ # Checking Accept header.
+ new_header_params = dict((key.lower(), value) for key, value in header_params.items())
+ if not 'accept' in new_header_params:
+ header_params['Accept'] = self.client.select_header_accept([
+ 'application/json',
+ 'application/yaml',
+ ])
+
+ # HTTP header `Content-Type`
+ if params.get('content_type'):
+ header_params['Content-Type'] = params['content_type']
+ else:
+ header_params['Content-Type'] = self.client.select_header_content_type(['*/*'])
+
+ # Authentication setting
+ auth_settings = ['BearerToken']
+
+ api_response = self.client.call_api(
+ path,
+ method.upper(),
+ path_params,
+ query_params,
+ header_params,
+ body=body,
+ post_params=form_params,
+ async_req=params.get('async_req'),
+ files=local_var_files,
+ auth_settings=auth_settings,
+ _preload_content=False,
+ _return_http_data_only=params.get('_return_http_data_only', True),
+ _request_timeout=params.get('_request_timeout')
+ )
+ if params.get('async_req'):
+ return api_response.get()
+ else:
+ return api_response
+
+ def validate(self, definition, version=None, strict=False):
+ """validate checks a kubernetes resource definition
+
+ Args:
+ definition (dict): resource definition
+ version (str): version of kubernetes to validate against
+ strict (bool): whether unexpected additional properties should be considered errors
+
+ Returns:
+ warnings (list), errors (list): warnings are missing validations, errors are validation failures
+ """
+ if not HAS_KUBERNETES_VALIDATE:
+ raise KubernetesValidateMissing()
+
+ errors = list()
+ warnings = list()
+ try:
+ if version is None:
+ try:
+ version = self.version['kubernetes']['gitVersion']
+ except KeyError:
+ version = kubernetes_validate.latest_version()
+ kubernetes_validate.validate(definition, version, strict)
+ except kubernetes_validate.utils.ValidationError as e:
+ errors.append("resource definition validation error at %s: %s" % ('.'.join([str(item) for item in e.path]), e.message)) # noqa: B306
+ except VersionNotSupportedError:
+ errors.append("Kubernetes version %s is not supported by kubernetes-validate" % version)
+ except kubernetes_validate.utils.SchemaNotFoundError as e:
+ warnings.append("Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)" %
+ (e.kind, e.api_version, e.version))
+ return warnings, errors
diff --git a/contrib/python/kubernetes/kubernetes/dynamic/discovery.py b/contrib/python/kubernetes/kubernetes/dynamic/discovery.py
new file mode 100644
index 0000000000..c00dfa3ef8
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/dynamic/discovery.py
@@ -0,0 +1,433 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import six
+import json
+import logging
+import hashlib
+import tempfile
+from functools import partial
+from collections import defaultdict
+from abc import abstractmethod, abstractproperty
+
+from urllib3.exceptions import ProtocolError, MaxRetryError
+
+from kubernetes import __version__
+from .exceptions import NotFoundError, ResourceNotFoundError, ResourceNotUniqueError, ApiException, ServiceUnavailableError
+from .resource import Resource, ResourceList
+
+
+DISCOVERY_PREFIX = 'apis'
+
+
+class Discoverer(object):
+ """
+ A convenient container for storing discovered API resources. Allows
+ easy searching and retrieval of specific resources.
+
+ Subclasses implement the abstract methods with different loading strategies.
+ """
+
+ def __init__(self, client, cache_file):
+ self.client = client
+ default_cache_id = self.client.configuration.host
+ if six.PY3:
+ default_cache_id = default_cache_id.encode('utf-8')
+ try:
+ default_cachefile_name = 'osrcp-{0}.json'.format(hashlib.md5(default_cache_id, usedforsecurity=False).hexdigest())
+ except TypeError:
+ # usedforsecurity is only supported in 3.9+
+ default_cachefile_name = 'osrcp-{0}.json'.format(hashlib.md5(default_cache_id).hexdigest())
+ self.__cache_file = cache_file or os.path.join(tempfile.gettempdir(), default_cachefile_name)
+ self.__init_cache()
+
+ def __init_cache(self, refresh=False):
+ if refresh or not os.path.exists(self.__cache_file):
+ self._cache = {'library_version': __version__}
+ refresh = True
+ else:
+ try:
+ with open(self.__cache_file, 'r') as f:
+ self._cache = json.load(f, cls=partial(CacheDecoder, self.client))
+ if self._cache.get('library_version') != __version__:
+ # Version mismatch, need to refresh cache
+ self.invalidate_cache()
+ except Exception as e:
+ logging.error("load cache error: %s", e)
+ self.invalidate_cache()
+ self._load_server_info()
+ self.discover()
+ if refresh:
+ self._write_cache()
+
+ def _write_cache(self):
+ try:
+ with open(self.__cache_file, 'w') as f:
+ json.dump(self._cache, f, cls=CacheEncoder)
+ except Exception:
+ # Failing to write the cache isn't a big enough error to crash on
+ pass
+
+ def invalidate_cache(self):
+ self.__init_cache(refresh=True)
+
+ @abstractproperty
+ def api_groups(self):
+ pass
+
+ @abstractmethod
+ def search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
+ pass
+
+ @abstractmethod
+ def discover(self):
+ pass
+
+ @property
+ def version(self):
+ return self.__version
+
+ def default_groups(self, request_resources=False):
+ groups = {}
+ groups['api'] = { '': {
+ 'v1': (ResourceGroup( True, resources=self.get_resources_for_api_version('api', '', 'v1', True) )
+ if request_resources else ResourceGroup(True))
+ }}
+
+ groups[DISCOVERY_PREFIX] = {'': {
+ 'v1': ResourceGroup(True, resources = {"List": [ResourceList(self.client)]})
+ }}
+ return groups
+
+ def parse_api_groups(self, request_resources=False, update=False):
+ """ Discovers all API groups present in the cluster """
+ if not self._cache.get('resources') or update:
+ self._cache['resources'] = self._cache.get('resources', {})
+ groups_response = self.client.request('GET', '/{}'.format(DISCOVERY_PREFIX)).groups
+
+ groups = self.default_groups(request_resources=request_resources)
+
+ for group in groups_response:
+ new_group = {}
+ for version_raw in group['versions']:
+ version = version_raw['version']
+ resource_group = self._cache.get('resources', {}).get(DISCOVERY_PREFIX, {}).get(group['name'], {}).get(version)
+ preferred = version_raw == group['preferredVersion']
+ resources = resource_group.resources if resource_group else {}
+ if request_resources:
+ resources = self.get_resources_for_api_version(DISCOVERY_PREFIX, group['name'], version, preferred)
+ new_group[version] = ResourceGroup(preferred, resources=resources)
+ groups[DISCOVERY_PREFIX][group['name']] = new_group
+ self._cache['resources'].update(groups)
+ self._write_cache()
+
+ return self._cache['resources']
+
+ def _load_server_info(self):
+ def just_json(_, serialized):
+ return serialized
+
+ if not self._cache.get('version'):
+ try:
+ self._cache['version'] = {
+ 'kubernetes': self.client.request('get', '/version', serializer=just_json)
+ }
+ except (ValueError, MaxRetryError) as e:
+ if isinstance(e, MaxRetryError) and not isinstance(e.reason, ProtocolError):
+ raise
+ if not self.client.configuration.host.startswith("https://"):
+ raise ValueError("Host value %s should start with https:// when talking to HTTPS endpoint" %
+ self.client.configuration.host)
+ else:
+ raise
+
+ self.__version = self._cache['version']
+
+ def get_resources_for_api_version(self, prefix, group, version, preferred):
+ """ returns a dictionary of resources associated with provided (prefix, group, version)"""
+
+ resources = defaultdict(list)
+ subresources = {}
+
+ path = '/'.join(filter(None, [prefix, group, version]))
+ try:
+ resources_response = self.client.request('GET', path).resources or []
+ except ServiceUnavailableError:
+ resources_response = []
+
+ resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
+ subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
+ for subresource in subresources_raw:
+ resource, name = subresource['name'].split('/', 1)
+ if not subresources.get(resource):
+ subresources[resource] = {}
+ subresources[resource][name] = subresource
+
+ for resource in resources_raw:
+ # Prevent duplicate keys
+ for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
+ resource.pop(key, None)
+
+ resourceobj = Resource(
+ prefix=prefix,
+ group=group,
+ api_version=version,
+ client=self.client,
+ preferred=preferred,
+ subresources=subresources.get(resource['name']),
+ **resource
+ )
+ resources[resource['kind']].append(resourceobj)
+
+ resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'])
+ resources[resource_list.kind].append(resource_list)
+ return resources
+
+ def get(self, **kwargs):
+ """ Same as search, but will throw an error if there are multiple or no
+ results. If there are multiple results and only one is an exact match
+ on api_version, that resource will be returned.
+ """
+ results = self.search(**kwargs)
+ # If there are multiple matches, prefer exact matches on api_version
+ if len(results) > 1 and kwargs.get('api_version'):
+ results = [
+ result for result in results if result.group_version == kwargs['api_version']
+ ]
+ # If there are multiple matches, prefer non-List kinds
+ if len(results) > 1 and not all([isinstance(x, ResourceList) for x in results]):
+ results = [result for result in results if not isinstance(result, ResourceList)]
+ if len(results) == 1:
+ return results[0]
+ elif not results:
+ raise ResourceNotFoundError('No matches found for {}'.format(kwargs))
+ else:
+ raise ResourceNotUniqueError('Multiple matches found for {}: {}'.format(kwargs, results))
+
+
+class LazyDiscoverer(Discoverer):
+ """ A convenient container for storing discovered API resources. Allows
+ easy searching and retrieval of specific resources.
+
+ Resources for the cluster are loaded lazily.
+ """
+
+ def __init__(self, client, cache_file):
+ Discoverer.__init__(self, client, cache_file)
+ self.__update_cache = False
+
+ def discover(self):
+ self.__resources = self.parse_api_groups(request_resources=False)
+
+ def __maybe_write_cache(self):
+ if self.__update_cache:
+ self._write_cache()
+ self.__update_cache = False
+
+ @property
+ def api_groups(self):
+ return self.parse_api_groups(request_resources=False, update=True)['apis'].keys()
+
+ def search(self, **kwargs):
+ # In first call, ignore ResourceNotFoundError and set default value for results
+ try:
+ results = self.__search(self.__build_search(**kwargs), self.__resources, [])
+ except ResourceNotFoundError:
+ results = []
+ if not results:
+ self.invalidate_cache()
+ results = self.__search(self.__build_search(**kwargs), self.__resources, [])
+ self.__maybe_write_cache()
+ return results
+
+ def __search(self, parts, resources, reqParams):
+ part = parts[0]
+ if part != '*':
+
+ resourcePart = resources.get(part)
+ if not resourcePart:
+ return []
+ elif isinstance(resourcePart, ResourceGroup):
+ if len(reqParams) != 2:
+ raise ValueError("prefix and group params should be present, have %s" % reqParams)
+ # Check if we've requested resources for this group
+ if not resourcePart.resources:
+ prefix, group, version = reqParams[0], reqParams[1], part
+ try:
+ resourcePart.resources = self.get_resources_for_api_version(
+ prefix, group, part, resourcePart.preferred)
+ except NotFoundError:
+ raise ResourceNotFoundError
+
+ self._cache['resources'][prefix][group][version] = resourcePart
+ self.__update_cache = True
+ return self.__search(parts[1:], resourcePart.resources, reqParams)
+ elif isinstance(resourcePart, dict):
+ # In this case parts [0] will be a specified prefix, group, version
+ # as we recurse
+ return self.__search(parts[1:], resourcePart, reqParams + [part] )
+ else:
+ if parts[1] != '*' and isinstance(parts[1], dict):
+ for _resource in resourcePart:
+ for term, value in parts[1].items():
+ if getattr(_resource, term) == value:
+ return [_resource]
+
+ return []
+ else:
+ return resourcePart
+ else:
+ matches = []
+ for key in resources.keys():
+ matches.extend(self.__search([key] + parts[1:], resources, reqParams))
+ return matches
+
+ def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
+ if not group and api_version and '/' in api_version:
+ group, api_version = api_version.split('/')
+
+ items = [prefix, group, api_version, kind, kwargs]
+ return list(map(lambda x: x or '*', items))
+
+ def __iter__(self):
+ for prefix, groups in self.__resources.items():
+ for group, versions in groups.items():
+ for version, rg in versions.items():
+ # Request resources for this groupVersion if we haven't yet
+ if not rg.resources:
+ rg.resources = self.get_resources_for_api_version(
+ prefix, group, version, rg.preferred)
+ self._cache['resources'][prefix][group][version] = rg
+ self.__update_cache = True
+ for _, resource in six.iteritems(rg.resources):
+ yield resource
+ self.__maybe_write_cache()
+
+
+class EagerDiscoverer(Discoverer):
+ """ A convenient container for storing discovered API resources. Allows
+ easy searching and retrieval of specific resources.
+
+ All resources are discovered for the cluster upon object instantiation.
+ """
+
+ def update(self, resources):
+ self.__resources = resources
+
+ def __init__(self, client, cache_file):
+ Discoverer.__init__(self, client, cache_file)
+
+ def discover(self):
+ self.__resources = self.parse_api_groups(request_resources=True)
+
+ @property
+ def api_groups(self):
+ """ list available api groups """
+ return self.parse_api_groups(request_resources=True, update=True)['apis'].keys()
+
+
+ def search(self, **kwargs):
+ """ Takes keyword arguments and returns matching resources. The search
+ will happen in the following order:
+ prefix: The api prefix for a resource, ie, /api, /oapi, /apis. Can usually be ignored
+ group: The api group of a resource. Will also be extracted from api_version if it is present there
+ api_version: The api version of a resource
+ kind: The kind of the resource
+ arbitrary arguments (see below), in random order
+
+ The arbitrary arguments can be any valid attribute for an Resource object
+ """
+ results = self.__search(self.__build_search(**kwargs), self.__resources)
+ if not results:
+ self.invalidate_cache()
+ results = self.__search(self.__build_search(**kwargs), self.__resources)
+ return results
+
+ def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
+ if not group and api_version and '/' in api_version:
+ group, api_version = api_version.split('/')
+
+ items = [prefix, group, api_version, kind, kwargs]
+ return list(map(lambda x: x or '*', items))
+
+ def __search(self, parts, resources):
+ part = parts[0]
+ resourcePart = resources.get(part)
+
+ if part != '*' and resourcePart:
+ if isinstance(resourcePart, ResourceGroup):
+ return self.__search(parts[1:], resourcePart.resources)
+ elif isinstance(resourcePart, dict):
+ return self.__search(parts[1:], resourcePart)
+ else:
+ if parts[1] != '*' and isinstance(parts[1], dict):
+ for _resource in resourcePart:
+ for term, value in parts[1].items():
+ if getattr(_resource, term) == value:
+ return [_resource]
+ return []
+ else:
+ return resourcePart
+ elif part == '*':
+ matches = []
+ for key in resources.keys():
+ matches.extend(self.__search([key] + parts[1:], resources))
+ return matches
+ return []
+
+ def __iter__(self):
+ for _, groups in self.__resources.items():
+ for _, versions in groups.items():
+ for _, resources in versions.items():
+ for _, resource in resources.items():
+ yield resource
+
+
+class ResourceGroup(object):
+ """Helper class for Discoverer container"""
+ def __init__(self, preferred, resources=None):
+ self.preferred = preferred
+ self.resources = resources or {}
+
+ def to_dict(self):
+ return {
+ '_type': 'ResourceGroup',
+ 'preferred': self.preferred,
+ 'resources': self.resources,
+ }
+
+
+class CacheEncoder(json.JSONEncoder):
+
+ def default(self, o):
+ return o.to_dict()
+
+
+class CacheDecoder(json.JSONDecoder):
+ def __init__(self, client, *args, **kwargs):
+ self.client = client
+ json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
+
+ def object_hook(self, obj):
+ if '_type' not in obj:
+ return obj
+ _type = obj.pop('_type')
+ if _type == 'Resource':
+ return Resource(client=self.client, **obj)
+ elif _type == 'ResourceList':
+ return ResourceList(self.client, **obj)
+ elif _type == 'ResourceGroup':
+ return ResourceGroup(obj['preferred'], resources=self.object_hook(obj['resources']))
+ return obj
diff --git a/contrib/python/kubernetes/kubernetes/dynamic/exceptions.py b/contrib/python/kubernetes/kubernetes/dynamic/exceptions.py
new file mode 100644
index 0000000000..c8b908e7d5
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/dynamic/exceptions.py
@@ -0,0 +1,110 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import sys
+import traceback
+
+from kubernetes.client.rest import ApiException
+
+
+def api_exception(e):
+ """
+ Returns the proper Exception class for the given kubernetes.client.rest.ApiException object
+ https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#success-codes
+ """
+ _, _, exc_traceback = sys.exc_info()
+ tb = '\n'.join(traceback.format_tb(exc_traceback))
+ return {
+ 400: BadRequestError,
+ 401: UnauthorizedError,
+ 403: ForbiddenError,
+ 404: NotFoundError,
+ 405: MethodNotAllowedError,
+ 409: ConflictError,
+ 410: GoneError,
+ 422: UnprocessibleEntityError,
+ 429: TooManyRequestsError,
+ 500: InternalServerError,
+ 503: ServiceUnavailableError,
+ 504: ServerTimeoutError,
+ }.get(e.status, DynamicApiError)(e, tb)
+
+
+class DynamicApiError(ApiException):
+ """ Generic API Error for the dynamic client """
+ def __init__(self, e, tb=None):
+ self.status = e.status
+ self.reason = e.reason
+ self.body = e.body
+ self.headers = e.headers
+ self.original_traceback = tb
+
+ def __str__(self):
+ error_message = [str(self.status), "Reason: {}".format(self.reason)]
+ if self.headers:
+ error_message.append("HTTP response headers: {}".format(self.headers))
+
+ if self.body:
+ error_message.append("HTTP response body: {}".format(self.body))
+
+ if self.original_traceback:
+ error_message.append("Original traceback: \n{}".format(self.original_traceback))
+
+ return '\n'.join(error_message)
+
+ def summary(self):
+ if self.body:
+ if self.headers and self.headers.get('Content-Type') == 'application/json':
+ message = json.loads(self.body).get('message')
+ if message:
+ return message
+
+ return self.body
+ else:
+ return "{} Reason: {}".format(self.status, self.reason)
+
+class ResourceNotFoundError(Exception):
+ """ Resource was not found in available APIs """
+class ResourceNotUniqueError(Exception):
+ """ Parameters given matched multiple API resources """
+
+class KubernetesValidateMissing(Exception):
+ """ kubernetes-validate is not installed """
+
+# HTTP Errors
+class BadRequestError(DynamicApiError):
+ """ 400: StatusBadRequest """
+class UnauthorizedError(DynamicApiError):
+ """ 401: StatusUnauthorized """
+class ForbiddenError(DynamicApiError):
+ """ 403: StatusForbidden """
+class NotFoundError(DynamicApiError):
+ """ 404: StatusNotFound """
+class MethodNotAllowedError(DynamicApiError):
+ """ 405: StatusMethodNotAllowed """
+class ConflictError(DynamicApiError):
+ """ 409: StatusConflict """
+class GoneError(DynamicApiError):
+ """ 410: StatusGone """
+class UnprocessibleEntityError(DynamicApiError):
+ """ 422: StatusUnprocessibleEntity """
+class TooManyRequestsError(DynamicApiError):
+ """ 429: StatusTooManyRequests """
+class InternalServerError(DynamicApiError):
+ """ 500: StatusInternalServer """
+class ServiceUnavailableError(DynamicApiError):
+ """ 503: StatusServiceUnavailable """
+class ServerTimeoutError(DynamicApiError):
+ """ 504: StatusServerTimeout """
diff --git a/contrib/python/kubernetes/kubernetes/dynamic/resource.py b/contrib/python/kubernetes/kubernetes/dynamic/resource.py
new file mode 100644
index 0000000000..1586b23199
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/dynamic/resource.py
@@ -0,0 +1,403 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import yaml
+from functools import partial
+
+from pprint import pformat
+
+
+class Resource(object):
+ """ Represents an API resource type, containing the information required to build urls for requests """
+
+ def __init__(self, prefix=None, group=None, api_version=None, kind=None,
+ namespaced=False, verbs=None, name=None, preferred=False, client=None,
+ singularName=None, shortNames=None, categories=None, subresources=None, **kwargs):
+
+ if None in (api_version, kind, prefix):
+ raise ValueError("At least prefix, kind, and api_version must be provided")
+
+ self.prefix = prefix
+ self.group = group
+ self.api_version = api_version
+ self.kind = kind
+ self.namespaced = namespaced
+ self.verbs = verbs
+ self.name = name
+ self.preferred = preferred
+ self.client = client
+ self.singular_name = singularName or (name[:-1] if name else "")
+ self.short_names = shortNames
+ self.categories = categories
+ self.subresources = {
+ k: Subresource(self, **v) for k, v in (subresources or {}).items()
+ }
+
+ self.extra_args = kwargs
+
+ def to_dict(self):
+ d = {
+ '_type': 'Resource',
+ 'prefix': self.prefix,
+ 'group': self.group,
+ 'api_version': self.api_version,
+ 'kind': self.kind,
+ 'namespaced': self.namespaced,
+ 'verbs': self.verbs,
+ 'name': self.name,
+ 'preferred': self.preferred,
+ 'singularName': self.singular_name,
+ 'shortNames': self.short_names,
+ 'categories': self.categories,
+ 'subresources': {k: sr.to_dict() for k, sr in self.subresources.items()},
+ }
+ d.update(self.extra_args)
+ return d
+
+ @property
+ def group_version(self):
+ if self.group:
+ return '{}/{}'.format(self.group, self.api_version)
+ return self.api_version
+
+ def __repr__(self):
+ return '<{}({}/{})>'.format(self.__class__.__name__, self.group_version, self.name)
+
+ @property
+ def urls(self):
+ full_prefix = '{}/{}'.format(self.prefix, self.group_version)
+ resource_name = self.name.lower()
+ return {
+ 'base': '/{}/{}'.format(full_prefix, resource_name),
+ 'namespaced_base': '/{}/namespaces/{{namespace}}/{}'.format(full_prefix, resource_name),
+ 'full': '/{}/{}/{{name}}'.format(full_prefix, resource_name),
+ 'namespaced_full': '/{}/namespaces/{{namespace}}/{}/{{name}}'.format(full_prefix, resource_name)
+ }
+
+ def path(self, name=None, namespace=None):
+ url_type = []
+ path_params = {}
+ if self.namespaced and namespace:
+ url_type.append('namespaced')
+ path_params['namespace'] = namespace
+ if name:
+ url_type.append('full')
+ path_params['name'] = name
+ else:
+ url_type.append('base')
+ return self.urls['_'.join(url_type)].format(**path_params)
+
+ def __getattr__(self, name):
+ if name in self.subresources:
+ return self.subresources[name]
+ return partial(getattr(self.client, name), self)
+
+
+class ResourceList(Resource):
+ """ Represents a list of API objects """
+
+ def __init__(self, client, group='', api_version='v1', base_kind='', kind=None, base_resource_lookup=None):
+ self.client = client
+ self.group = group
+ self.api_version = api_version
+ self.kind = kind or '{}List'.format(base_kind)
+ self.base_kind = base_kind
+ self.base_resource_lookup = base_resource_lookup
+ self.__base_resource = None
+
+ def base_resource(self):
+ if self.__base_resource:
+ return self.__base_resource
+ elif self.base_resource_lookup:
+ self.__base_resource = self.client.resources.get(**self.base_resource_lookup)
+ return self.__base_resource
+ elif self.base_kind:
+ self.__base_resource = self.client.resources.get(group=self.group, api_version=self.api_version, kind=self.base_kind)
+ return self.__base_resource
+ return None
+
+ def _items_to_resources(self, body):
+ """ Takes a List body and return a dictionary with the following structure:
+ {
+ 'api_version': str,
+ 'kind': str,
+ 'items': [{
+ 'resource': Resource,
+ 'name': str,
+ 'namespace': str,
+ }]
+ }
+ """
+ if body is None:
+ raise ValueError("You must provide a body when calling methods on a ResourceList")
+
+ api_version = body['apiVersion']
+ kind = body['kind']
+ items = body.get('items')
+ if not items:
+ raise ValueError('The `items` field in the body must be populated when calling methods on a ResourceList')
+
+ if self.kind != kind:
+ raise ValueError('Methods on a {} must be called with a body containing the same kind. Received {} instead'.format(self.kind, kind))
+
+ return {
+ 'api_version': api_version,
+ 'kind': kind,
+ 'items': [self._item_to_resource(item) for item in items]
+ }
+
+ def _item_to_resource(self, item):
+ metadata = item.get('metadata', {})
+ resource = self.base_resource()
+ if not resource:
+ api_version = item.get('apiVersion', self.api_version)
+ kind = item.get('kind', self.base_kind)
+ resource = self.client.resources.get(api_version=api_version, kind=kind)
+ return {
+ 'resource': resource,
+ 'definition': item,
+ 'name': metadata.get('name'),
+ 'namespace': metadata.get('namespace')
+ }
+
+ def get(self, body, name=None, namespace=None, **kwargs):
+ if name:
+ raise ValueError('Operations on ResourceList objects do not support the `name` argument')
+ resource_list = self._items_to_resources(body)
+ response = copy.deepcopy(body)
+
+ response['items'] = [
+ item['resource'].get(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict()
+ for item in resource_list['items']
+ ]
+ return ResourceInstance(self, response)
+
+ def delete(self, body, name=None, namespace=None, **kwargs):
+ if name:
+ raise ValueError('Operations on ResourceList objects do not support the `name` argument')
+ resource_list = self._items_to_resources(body)
+ response = copy.deepcopy(body)
+
+ response['items'] = [
+ item['resource'].delete(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict()
+ for item in resource_list['items']
+ ]
+ return ResourceInstance(self, response)
+
+ def verb_mapper(self, verb, body, **kwargs):
+ resource_list = self._items_to_resources(body)
+ response = copy.deepcopy(body)
+ response['items'] = [
+ getattr(item['resource'], verb)(body=item['definition'], **kwargs).to_dict()
+ for item in resource_list['items']
+ ]
+ return ResourceInstance(self, response)
+
+ def create(self, *args, **kwargs):
+ return self.verb_mapper('create', *args, **kwargs)
+
+ def replace(self, *args, **kwargs):
+ return self.verb_mapper('replace', *args, **kwargs)
+
+ def patch(self, *args, **kwargs):
+ return self.verb_mapper('patch', *args, **kwargs)
+
+ def to_dict(self):
+ return {
+ '_type': 'ResourceList',
+ 'group': self.group,
+ 'api_version': self.api_version,
+ 'kind': self.kind,
+ 'base_kind': self.base_kind
+ }
+
+ def __getattr__(self, name):
+ if self.base_resource():
+ return getattr(self.base_resource(), name)
+ return None
+
+
+class Subresource(Resource):
+ """ Represents a subresource of an API resource. This generally includes operations
+ like scale, as well as status objects for an instantiated resource
+ """
+
+ def __init__(self, parent, **kwargs):
+ self.parent = parent
+ self.prefix = parent.prefix
+ self.group = parent.group
+ self.api_version = parent.api_version
+ self.kind = kwargs.pop('kind')
+ self.name = kwargs.pop('name')
+ self.subresource = kwargs.pop('subresource', None) or self.name.split('/')[1]
+ self.namespaced = kwargs.pop('namespaced', False)
+ self.verbs = kwargs.pop('verbs', None)
+ self.extra_args = kwargs
+
+ #TODO(fabianvf): Determine proper way to handle differences between resources + subresources
+ def create(self, body=None, name=None, namespace=None, **kwargs):
+ name = name or body.get('metadata', {}).get('name')
+ body = self.parent.client.serialize_body(body)
+ if self.parent.namespaced:
+ namespace = self.parent.client.ensure_namespace(self.parent, namespace, body)
+ path = self.path(name=name, namespace=namespace)
+ return self.parent.client.request('post', path, body=body, **kwargs)
+
+ @property
+ def urls(self):
+ full_prefix = '{}/{}'.format(self.prefix, self.group_version)
+ return {
+ 'full': '/{}/{}/{{name}}/{}'.format(full_prefix, self.parent.name, self.subresource),
+ 'namespaced_full': '/{}/namespaces/{{namespace}}/{}/{{name}}/{}'.format(full_prefix, self.parent.name, self.subresource)
+ }
+
+ def __getattr__(self, name):
+ return partial(getattr(self.parent.client, name), self)
+
+ def to_dict(self):
+ d = {
+ 'kind': self.kind,
+ 'name': self.name,
+ 'subresource': self.subresource,
+ 'namespaced': self.namespaced,
+ 'verbs': self.verbs
+ }
+ d.update(self.extra_args)
+ return d
+
+
+class ResourceInstance(object):
+ """ A parsed instance of an API resource. It exists solely to
+ ease interaction with API objects by allowing attributes to
+ be accessed with '.' notation.
+ """
+
+ def __init__(self, client, instance):
+ self.client = client
+ # If we have a list of resources, then set the apiVersion and kind of
+ # each resource in 'items'
+ kind = instance['kind']
+ if kind.endswith('List') and 'items' in instance:
+ kind = instance['kind'][:-4]
+ for item in instance['items']:
+ if 'apiVersion' not in item:
+ item['apiVersion'] = instance['apiVersion']
+ if 'kind' not in item:
+ item['kind'] = kind
+
+ self.attributes = self.__deserialize(instance)
+ self.__initialised = True
+
+ def __deserialize(self, field):
+ if isinstance(field, dict):
+ return ResourceField(params={
+ k: self.__deserialize(v) for k, v in field.items()
+ })
+ elif isinstance(field, (list, tuple)):
+ return [self.__deserialize(item) for item in field]
+ else:
+ return field
+
+ def __serialize(self, field):
+ if isinstance(field, ResourceField):
+ return {
+ k: self.__serialize(v) for k, v in field.__dict__.items()
+ }
+ elif isinstance(field, (list, tuple)):
+ return [self.__serialize(item) for item in field]
+ elif isinstance(field, ResourceInstance):
+ return field.to_dict()
+ else:
+ return field
+
+ def to_dict(self):
+ return self.__serialize(self.attributes)
+
+ def to_str(self):
+ return repr(self)
+
+ def __repr__(self):
+ return "ResourceInstance[{}]:\n {}".format(
+ self.attributes.kind,
+ ' '.join(yaml.safe_dump(self.to_dict()).splitlines(True))
+ )
+
+ def __getattr__(self, name):
+ if not '_ResourceInstance__initialised' in self.__dict__:
+ return super(ResourceInstance, self).__getattr__(name)
+ return getattr(self.attributes, name)
+
+ def __setattr__(self, name, value):
+ if not '_ResourceInstance__initialised' in self.__dict__:
+ return super(ResourceInstance, self).__setattr__(name, value)
+ elif name in self.__dict__:
+ return super(ResourceInstance, self).__setattr__(name, value)
+ else:
+ self.attributes[name] = value
+
+ def __getitem__(self, name):
+ return self.attributes[name]
+
+ def __setitem__(self, name, value):
+ self.attributes[name] = value
+
+ def __dir__(self):
+ return dir(type(self)) + list(self.attributes.__dict__.keys())
+
+
+class ResourceField(object):
+ """ A parsed instance of an API resource attribute. It exists
+ solely to ease interaction with API objects by allowing
+ attributes to be accessed with '.' notation
+ """
+
+ def __init__(self, params):
+ self.__dict__.update(**params)
+
+ def __repr__(self):
+ return pformat(self.__dict__)
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __getitem__(self, name):
+ return self.__dict__.get(name)
+
+ # Here resource.items will return items if available or resource.__dict__.items function if not
+ # resource.get will call resource.__dict__.get after attempting resource.__dict__.get('get')
+ def __getattr__(self, name):
+ return self.__dict__.get(name, getattr(self.__dict__, name, None))
+
+ def __setattr__(self, name, value):
+ self.__dict__[name] = value
+
+ def __dir__(self):
+ return dir(type(self)) + list(self.__dict__.keys())
+
+ def __iter__(self):
+ for k, v in self.__dict__.items():
+ yield (k, v)
+
+ def to_dict(self):
+ return self.__serialize(self)
+
+ def __serialize(self, field):
+ if isinstance(field, ResourceField):
+ return {
+ k: self.__serialize(v) for k, v in field.__dict__.items()
+ }
+ if isinstance(field, (list, tuple)):
+ return [self.__serialize(item) for item in field]
+ return field
diff --git a/contrib/python/kubernetes/kubernetes/leaderelection/__init__.py b/contrib/python/kubernetes/kubernetes/leaderelection/__init__.py
new file mode 100644
index 0000000000..37da225cfa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/leaderelection/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/kubernetes/kubernetes/leaderelection/electionconfig.py b/contrib/python/kubernetes/kubernetes/leaderelection/electionconfig.py
new file mode 100644
index 0000000000..7b0db639b4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/leaderelection/electionconfig.py
@@ -0,0 +1,59 @@
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import logging
+logging.basicConfig(level=logging.INFO)
+
+
+class Config:
+ # Validate config, exit if an error is detected
+ def __init__(self, lock, lease_duration, renew_deadline, retry_period, onstarted_leading, onstopped_leading):
+ self.jitter_factor = 1.2
+
+ if lock is None:
+ sys.exit("lock cannot be None")
+ self.lock = lock
+
+ if lease_duration <= renew_deadline:
+ sys.exit("lease_duration must be greater than renew_deadline")
+
+ if renew_deadline <= self.jitter_factor * retry_period:
+ sys.exit("renewDeadline must be greater than retry_period*jitter_factor")
+
+ if lease_duration < 1:
+ sys.exit("lease_duration must be greater than one")
+
+ if renew_deadline < 1:
+ sys.exit("renew_deadline must be greater than one")
+
+ if retry_period < 1:
+ sys.exit("retry_period must be greater than one")
+
+ self.lease_duration = lease_duration
+ self.renew_deadline = renew_deadline
+ self.retry_period = retry_period
+
+ if onstarted_leading is None:
+ sys.exit("callback onstarted_leading cannot be None")
+ self.onstarted_leading = onstarted_leading
+
+ if onstopped_leading is None:
+ self.onstopped_leading = self.on_stoppedleading_callback
+ else:
+ self.onstopped_leading = onstopped_leading
+
+ # Default callback for when the current candidate if a leader, stops leading
+ def on_stoppedleading_callback(self):
+ logging.info("stopped leading".format(self.lock.identity))
diff --git a/contrib/python/kubernetes/kubernetes/leaderelection/leaderelection.py b/contrib/python/kubernetes/kubernetes/leaderelection/leaderelection.py
new file mode 100644
index 0000000000..a707fbaccd
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/leaderelection/leaderelection.py
@@ -0,0 +1,191 @@
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import sys
+import time
+import json
+import threading
+from .leaderelectionrecord import LeaderElectionRecord
+import logging
+# if condition to be removed when support for python2 will be removed
+if sys.version_info > (3, 0):
+ from http import HTTPStatus
+else:
+ import httplib
+logging.basicConfig(level=logging.INFO)
+
+"""
+This package implements leader election using an annotation in a Kubernetes object.
+The onstarted_leading function is run in a thread and when it returns, if it does
+it might not be safe to run it again in a process.
+
+At first all candidates are considered followers. The one to create a lock or update
+an existing lock first becomes the leader and remains so until it keeps renewing its
+lease.
+"""
+
+
+class LeaderElection:
+ def __init__(self, election_config):
+ if election_config is None:
+ sys.exit("argument config not passed")
+
+ # Latest record observed in the created lock object
+ self.observed_record = None
+
+ # The configuration set for this candidate
+ self.election_config = election_config
+
+ # Latest update time of the lock
+ self.observed_time_milliseconds = 0
+
+ # Point of entry to Leader election
+ def run(self):
+ # Try to create/ acquire a lock
+ if self.acquire():
+ logging.info("{} successfully acquired lease".format(self.election_config.lock.identity))
+
+ # Start leading and call OnStartedLeading()
+ threading.daemon = True
+ threading.Thread(target=self.election_config.onstarted_leading).start()
+
+ self.renew_loop()
+
+ # Failed to update lease, run OnStoppedLeading callback
+ self.election_config.onstopped_leading()
+
+ def acquire(self):
+ # Follower
+ logging.info("{} is a follower".format(self.election_config.lock.identity))
+ retry_period = self.election_config.retry_period
+
+ while True:
+ succeeded = self.try_acquire_or_renew()
+
+ if succeeded:
+ return True
+
+ time.sleep(retry_period)
+
+ def renew_loop(self):
+ # Leader
+ logging.info("Leader has entered renew loop and will try to update lease continuously")
+
+ retry_period = self.election_config.retry_period
+ renew_deadline = self.election_config.renew_deadline * 1000
+
+ while True:
+ timeout = int(time.time() * 1000) + renew_deadline
+ succeeded = False
+
+ while int(time.time() * 1000) < timeout:
+ succeeded = self.try_acquire_or_renew()
+
+ if succeeded:
+ break
+ time.sleep(retry_period)
+
+ if succeeded:
+ time.sleep(retry_period)
+ continue
+
+ # failed to renew, return
+ return
+
+ def try_acquire_or_renew(self):
+ now_timestamp = time.time()
+ now = datetime.datetime.fromtimestamp(now_timestamp)
+
+ # Check if lock is created
+ lock_status, old_election_record = self.election_config.lock.get(self.election_config.lock.name,
+ self.election_config.lock.namespace)
+
+ # create a default Election record for this candidate
+ leader_election_record = LeaderElectionRecord(self.election_config.lock.identity,
+ str(self.election_config.lease_duration), str(now), str(now))
+
+ # A lock is not created with that name, try to create one
+ if not lock_status:
+ # To be removed when support for python2 will be removed
+ if sys.version_info > (3, 0):
+ if json.loads(old_election_record.body)['code'] != HTTPStatus.NOT_FOUND:
+ logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name,
+ old_election_record.reason))
+ return False
+ else:
+ if json.loads(old_election_record.body)['code'] != httplib.NOT_FOUND:
+ logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name,
+ old_election_record.reason))
+ return False
+
+ logging.info("{} is trying to create a lock".format(leader_election_record.holder_identity))
+ create_status = self.election_config.lock.create(name=self.election_config.lock.name,
+ namespace=self.election_config.lock.namespace,
+ election_record=leader_election_record)
+
+ if create_status is False:
+ logging.info("{} Failed to create lock".format(leader_election_record.holder_identity))
+ return False
+
+ self.observed_record = leader_election_record
+ self.observed_time_milliseconds = int(time.time() * 1000)
+ return True
+
+ # A lock exists with that name
+ # Validate old_election_record
+ if old_election_record is None:
+ # try to update lock with proper annotation and election record
+ return self.update_lock(leader_election_record)
+
+ if (old_election_record.holder_identity is None or old_election_record.lease_duration is None
+ or old_election_record.acquire_time is None or old_election_record.renew_time is None):
+ # try to update lock with proper annotation and election record
+ return self.update_lock(leader_election_record)
+
+ # Report transitions
+ if self.observed_record and self.observed_record.holder_identity != old_election_record.holder_identity:
+ logging.info("Leader has switched to {}".format(old_election_record.holder_identity))
+
+ if self.observed_record is None or old_election_record.__dict__ != self.observed_record.__dict__:
+ self.observed_record = old_election_record
+ self.observed_time_milliseconds = int(time.time() * 1000)
+
+ # If This candidate is not the leader and lease duration is yet to finish
+ if (self.election_config.lock.identity != self.observed_record.holder_identity
+ and self.observed_time_milliseconds + self.election_config.lease_duration * 1000 > int(now_timestamp * 1000)):
+ logging.info("yet to finish lease_duration, lease held by {} and has not expired".format(old_election_record.holder_identity))
+ return False
+
+ # If this candidate is the Leader
+ if self.election_config.lock.identity == self.observed_record.holder_identity:
+ # Leader updates renewTime, but keeps acquire_time unchanged
+ leader_election_record.acquire_time = self.observed_record.acquire_time
+
+ return self.update_lock(leader_election_record)
+
+ def update_lock(self, leader_election_record):
+ # Update object with latest election record
+ update_status = self.election_config.lock.update(self.election_config.lock.name,
+ self.election_config.lock.namespace,
+ leader_election_record)
+
+ if update_status is False:
+ logging.info("{} failed to acquire lease".format(leader_election_record.holder_identity))
+ return False
+
+ self.observed_record = leader_election_record
+ self.observed_time_milliseconds = int(time.time() * 1000)
+ logging.info("leader {} has successfully acquired lease".format(leader_election_record.holder_identity))
+ return True
diff --git a/contrib/python/kubernetes/kubernetes/leaderelection/leaderelectionrecord.py b/contrib/python/kubernetes/kubernetes/leaderelection/leaderelectionrecord.py
new file mode 100644
index 0000000000..ebb550d4d1
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/leaderelection/leaderelectionrecord.py
@@ -0,0 +1,22 @@
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class LeaderElectionRecord:
+ # Annotation used in the lock object
+ def __init__(self, holder_identity, lease_duration, acquire_time, renew_time):
+ self.holder_identity = holder_identity
+ self.lease_duration = lease_duration
+ self.acquire_time = acquire_time
+ self.renew_time = renew_time
diff --git a/contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/__init__.py b/contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/__init__.py
new file mode 100644
index 0000000000..37da225cfa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/configmaplock.py b/contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/configmaplock.py
new file mode 100644
index 0000000000..a4ccf49d27
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/leaderelection/resourcelock/configmaplock.py
@@ -0,0 +1,129 @@
+# Copyright 2021 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from kubernetes.client.rest import ApiException
+from kubernetes import client, config
+from kubernetes.client.api_client import ApiClient
+from ..leaderelectionrecord import LeaderElectionRecord
+import json
+import logging
+logging.basicConfig(level=logging.INFO)
+
+
+class ConfigMapLock:
+ def __init__(self, name, namespace, identity):
+ """
+ :param name: name of the lock
+ :param namespace: namespace
+ :param identity: A unique identifier that the candidate is using
+ """
+ self.api_instance = client.CoreV1Api()
+ self.leader_electionrecord_annotationkey = 'control-plane.alpha.kubernetes.io/leader'
+ self.name = name
+ self.namespace = namespace
+ self.identity = str(identity)
+ self.configmap_reference = None
+ self.lock_record = {
+ 'holderIdentity': None,
+ 'leaseDurationSeconds': None,
+ 'acquireTime': None,
+ 'renewTime': None
+ }
+
+ # get returns the election record from a ConfigMap Annotation
+ def get(self, name, namespace):
+ """
+ :param name: Name of the configmap object information to get
+ :param namespace: Namespace in which the configmap object is to be searched
+ :return: 'True, election record' if object found else 'False, exception response'
+ """
+ try:
+ api_response = self.api_instance.read_namespaced_config_map(name, namespace)
+
+ # If an annotation does not exist - add the leader_electionrecord_annotationkey
+ annotations = api_response.metadata.annotations
+ if annotations is None or annotations == '':
+ api_response.metadata.annotations = {self.leader_electionrecord_annotationkey: ''}
+ self.configmap_reference = api_response
+ return True, None
+
+ # If an annotation exists but, the leader_electionrecord_annotationkey does not then add it as a key
+ if not annotations.get(self.leader_electionrecord_annotationkey):
+ api_response.metadata.annotations = {self.leader_electionrecord_annotationkey: ''}
+ self.configmap_reference = api_response
+ return True, None
+
+ lock_record = self.get_lock_object(json.loads(annotations[self.leader_electionrecord_annotationkey]))
+
+ self.configmap_reference = api_response
+ return True, lock_record
+ except ApiException as e:
+ return False, e
+
+ def create(self, name, namespace, election_record):
+ """
+ :param electionRecord: Annotation string
+ :param name: Name of the configmap object to be created
+ :param namespace: Namespace in which the configmap object is to be created
+ :return: 'True' if object is created else 'False' if failed
+ """
+ body = client.V1ConfigMap(
+ metadata={"name": name,
+ "annotations": {self.leader_electionrecord_annotationkey: json.dumps(self.get_lock_dict(election_record))}})
+
+ try:
+ api_response = self.api_instance.create_namespaced_config_map(namespace, body, pretty=True)
+ return True
+ except ApiException as e:
+ logging.info("Failed to create lock as {}".format(e))
+ return False
+
+ def update(self, name, namespace, updated_record):
+ """
+ :param name: name of the lock to be updated
+ :param namespace: namespace the lock is in
+ :param updated_record: the updated election record
+ :return: True if update is successful False if it fails
+ """
+ try:
+ # Set the updated record
+ self.configmap_reference.metadata.annotations[self.leader_electionrecord_annotationkey] = json.dumps(self.get_lock_dict(updated_record))
+ api_response = self.api_instance.replace_namespaced_config_map(name=name, namespace=namespace,
+ body=self.configmap_reference)
+ return True
+ except ApiException as e:
+ logging.info("Failed to update lock as {}".format(e))
+ return False
+
+ def get_lock_object(self, lock_record):
+ leader_election_record = LeaderElectionRecord(None, None, None, None)
+
+ if lock_record.get('holderIdentity'):
+ leader_election_record.holder_identity = lock_record['holderIdentity']
+ if lock_record.get('leaseDurationSeconds'):
+ leader_election_record.lease_duration = lock_record['leaseDurationSeconds']
+ if lock_record.get('acquireTime'):
+ leader_election_record.acquire_time = lock_record['acquireTime']
+ if lock_record.get('renewTime'):
+ leader_election_record.renew_time = lock_record['renewTime']
+
+ return leader_election_record
+
+ def get_lock_dict(self, leader_election_record):
+ self.lock_record['holderIdentity'] = leader_election_record.holder_identity
+ self.lock_record['leaseDurationSeconds'] = leader_election_record.lease_duration
+ self.lock_record['acquireTime'] = leader_election_record.acquire_time
+ self.lock_record['renewTime'] = leader_election_record.renew_time
+
+ return self.lock_record \ No newline at end of file
diff --git a/contrib/python/kubernetes/kubernetes/stream/__init__.py b/contrib/python/kubernetes/kubernetes/stream/__init__.py
new file mode 100644
index 0000000000..cd34652811
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/stream/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .stream import stream, portforward
diff --git a/contrib/python/kubernetes/kubernetes/stream/stream.py b/contrib/python/kubernetes/kubernetes/stream/stream.py
new file mode 100644
index 0000000000..115a899b50
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/stream/stream.py
@@ -0,0 +1,41 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+
+from . import ws_client
+
+
+def _websocket_request(websocket_request, force_kwargs, api_method, *args, **kwargs):
+ """Override the ApiClient.request method with an alternative websocket based
+ method and call the supplied Kubernetes API method with that in place."""
+ if force_kwargs:
+ for kwarg, value in force_kwargs.items():
+ kwargs[kwarg] = value
+ api_client = api_method.__self__.api_client
+ # old generated code's api client has config. new ones has configuration
+ try:
+ configuration = api_client.configuration
+ except AttributeError:
+ configuration = api_client.config
+ prev_request = api_client.request
+ try:
+ api_client.request = functools.partial(websocket_request, configuration)
+ return api_method(*args, **kwargs)
+ finally:
+ api_client.request = prev_request
+
+
+stream = functools.partial(_websocket_request, ws_client.websocket_call, None)
+portforward = functools.partial(_websocket_request, ws_client.portforward_call, {'_preload_content':False})
diff --git a/contrib/python/kubernetes/kubernetes/stream/ws_client.py b/contrib/python/kubernetes/kubernetes/stream/ws_client.py
new file mode 100644
index 0000000000..5ec8e7d4aa
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/stream/ws_client.py
@@ -0,0 +1,562 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import sys
+
+from kubernetes.client.rest import ApiException, ApiValueError
+
+import certifi
+import collections
+import select
+import socket
+import ssl
+import threading
+import time
+
+import six
+import yaml
+
+from six.moves.urllib.parse import urlencode, urlparse, urlunparse
+from six import StringIO
+
+from websocket import WebSocket, ABNF, enableTrace
+from base64 import urlsafe_b64decode
+from requests.utils import should_bypass_proxies
+
+STDIN_CHANNEL = 0
+STDOUT_CHANNEL = 1
+STDERR_CHANNEL = 2
+ERROR_CHANNEL = 3
+RESIZE_CHANNEL = 4
+
+class _IgnoredIO:
+ def write(self, _x):
+ pass
+
+ def getvalue(self):
+ raise TypeError("Tried to read_all() from a WSClient configured to not capture. Did you mean `capture_all=True`?")
+
+
+class WSClient:
+ def __init__(self, configuration, url, headers, capture_all):
+ """A websocket client with support for channels.
+
+ Exec command uses different channels for different streams. for
+ example, 0 is stdin, 1 is stdout and 2 is stderr. Some other API calls
+ like port forwarding can forward different pods' streams to different
+ channels.
+ """
+ self._connected = False
+ self._channels = {}
+ if capture_all:
+ self._all = StringIO()
+ else:
+ self._all = _IgnoredIO()
+ self.sock = create_websocket(configuration, url, headers)
+ self._connected = True
+ self._returncode = None
+
+ def peek_channel(self, channel, timeout=0):
+ """Peek a channel and return part of the input,
+ empty string otherwise."""
+ self.update(timeout=timeout)
+ if channel in self._channels:
+ return self._channels[channel]
+ return ""
+
+ def read_channel(self, channel, timeout=0):
+ """Read data from a channel."""
+ if channel not in self._channels:
+ ret = self.peek_channel(channel, timeout)
+ else:
+ ret = self._channels[channel]
+ if channel in self._channels:
+ del self._channels[channel]
+ return ret
+
+ def readline_channel(self, channel, timeout=None):
+ """Read a line from a channel."""
+ if timeout is None:
+ timeout = float("inf")
+ start = time.time()
+ while self.is_open() and time.time() - start < timeout:
+ if channel in self._channels:
+ data = self._channels[channel]
+ if "\n" in data:
+ index = data.find("\n")
+ ret = data[:index]
+ data = data[index+1:]
+ if data:
+ self._channels[channel] = data
+ else:
+ del self._channels[channel]
+ return ret
+ self.update(timeout=(timeout - time.time() + start))
+
+ def write_channel(self, channel, data):
+ """Write data to a channel."""
+ # check if we're writing binary data or not
+ binary = six.PY3 and type(data) == six.binary_type
+ opcode = ABNF.OPCODE_BINARY if binary else ABNF.OPCODE_TEXT
+
+ channel_prefix = chr(channel)
+ if binary:
+ channel_prefix = six.binary_type(channel_prefix, "ascii")
+
+ payload = channel_prefix + data
+ self.sock.send(payload, opcode=opcode)
+
+ def peek_stdout(self, timeout=0):
+ """Same as peek_channel with channel=1."""
+ return self.peek_channel(STDOUT_CHANNEL, timeout=timeout)
+
+ def read_stdout(self, timeout=None):
+ """Same as read_channel with channel=1."""
+ return self.read_channel(STDOUT_CHANNEL, timeout=timeout)
+
+ def readline_stdout(self, timeout=None):
+ """Same as readline_channel with channel=1."""
+ return self.readline_channel(STDOUT_CHANNEL, timeout=timeout)
+
+ def peek_stderr(self, timeout=0):
+ """Same as peek_channel with channel=2."""
+ return self.peek_channel(STDERR_CHANNEL, timeout=timeout)
+
+ def read_stderr(self, timeout=None):
+ """Same as read_channel with channel=2."""
+ return self.read_channel(STDERR_CHANNEL, timeout=timeout)
+
+ def readline_stderr(self, timeout=None):
+ """Same as readline_channel with channel=2."""
+ return self.readline_channel(STDERR_CHANNEL, timeout=timeout)
+
+ def read_all(self):
+ """Return buffered data received on stdout and stderr channels.
+ This is useful for non-interactive call where a set of command passed
+ to the API call and their result is needed after the call is concluded.
+ Should be called after run_forever() or update()
+
+ TODO: Maybe we can process this and return a more meaningful map with
+ channels mapped for each input.
+ """
+ out = self._all.getvalue()
+ self._all = self._all.__class__()
+ self._channels = {}
+ return out
+
+ def is_open(self):
+ """True if the connection is still alive."""
+ return self._connected
+
+ def write_stdin(self, data):
+ """The same as write_channel with channel=0."""
+ self.write_channel(STDIN_CHANNEL, data)
+
+ def update(self, timeout=0):
+ """Update channel buffers with at most one complete frame of input."""
+ if not self.is_open():
+ return
+ if not self.sock.connected:
+ self._connected = False
+ return
+
+ # The options here are:
+ # select.select() - this will work on most OS, however, it has a
+ # limitation of only able to read fd numbers up to 1024.
+ # i.e. does not scale well. This was the original
+ # implementation.
+ # select.poll() - this will work on most unix based OS, but not as
+ # efficient as epoll. Will work for fd numbers above 1024.
+ # select.epoll() - newest and most efficient way of polling.
+ # However, only works on linux.
+ if hasattr(select, "poll"):
+ poll = select.poll()
+ poll.register(self.sock.sock, select.POLLIN)
+ if timeout is not None:
+ timeout *= 1_000 # poll method uses milliseconds as the time unit
+ r = poll.poll(timeout)
+ poll.unregister(self.sock.sock)
+ else:
+ r, _, _ = select.select(
+ (self.sock.sock, ), (), (), timeout)
+
+ if r:
+ op_code, frame = self.sock.recv_data_frame(True)
+ if op_code == ABNF.OPCODE_CLOSE:
+ self._connected = False
+ return
+ elif op_code == ABNF.OPCODE_BINARY or op_code == ABNF.OPCODE_TEXT:
+ data = frame.data
+ if six.PY3:
+ data = data.decode("utf-8", "replace")
+ if len(data) > 1:
+ channel = ord(data[0])
+ data = data[1:]
+ if data:
+ if channel in [STDOUT_CHANNEL, STDERR_CHANNEL]:
+ # keeping all messages in the order they received
+ # for non-blocking call.
+ self._all.write(data)
+ if channel not in self._channels:
+ self._channels[channel] = data
+ else:
+ self._channels[channel] += data
+
+ def run_forever(self, timeout=None):
+ """Wait till connection is closed or timeout reached. Buffer any input
+ received during this time."""
+ if timeout:
+ start = time.time()
+ while self.is_open() and time.time() - start < timeout:
+ self.update(timeout=(timeout - time.time() + start))
+ else:
+ while self.is_open():
+ self.update(timeout=None)
+ @property
+ def returncode(self):
+ """
+ The return code, A None value indicates that the process hasn't
+ terminated yet.
+ """
+ if self.is_open():
+ return None
+ else:
+ if self._returncode is None:
+ err = self.read_channel(ERROR_CHANNEL)
+ err = yaml.safe_load(err)
+ if err['status'] == "Success":
+ self._returncode = 0
+ else:
+ self._returncode = int(err['details']['causes'][0]['message'])
+ return self._returncode
+
+ def close(self, **kwargs):
+ """
+ close websocket connection.
+ """
+ self._connected = False
+ if self.sock:
+ self.sock.close(**kwargs)
+
+
+WSResponse = collections.namedtuple('WSResponse', ['data'])
+
+
+class PortForward:
+ def __init__(self, websocket, ports):
+ """A websocket client with support for port forwarding.
+
+ Port Forward command sends on 2 channels per port, a read/write
+ data channel and a read only error channel. Both channels are sent an
+ initial frame containing the port number that channel is associated with.
+ """
+
+ self.websocket = websocket
+ self.local_ports = {}
+ for ix, port_number in enumerate(ports):
+ self.local_ports[port_number] = self._Port(ix, port_number)
+ # There is a thread run per PortForward instance which performs the translation between the
+ # raw socket data sent by the python application and the websocket protocol. This thread
+ # terminates after either side has closed all ports, and after flushing all pending data.
+ proxy = threading.Thread(
+ name="Kubernetes port forward proxy: %s" % ', '.join([str(port) for port in ports]),
+ target=self._proxy
+ )
+ proxy.daemon = True
+ proxy.start()
+
+ @property
+ def connected(self):
+ return self.websocket.connected
+
+ def socket(self, port_number):
+ if port_number not in self.local_ports:
+ raise ValueError("Invalid port number")
+ return self.local_ports[port_number].socket
+
+ def error(self, port_number):
+ if port_number not in self.local_ports:
+ raise ValueError("Invalid port number")
+ return self.local_ports[port_number].error
+
+ def close(self):
+ for port in self.local_ports.values():
+ port.socket.close()
+
+ class _Port:
+ def __init__(self, ix, port_number):
+ # The remote port number
+ self.port_number = port_number
+ # The websocket channel byte number for this port
+ self.channel = six.int2byte(ix * 2)
+ # A socket pair is created to provide a means of translating the data flow
+ # between the python application and the kubernetes websocket. The self.python
+ # half of the socket pair is used by the _proxy method to receive and send data
+ # to the running python application.
+ s, self.python = socket.socketpair()
+ # The self.socket half of the pair is used by the python application to send
+ # and receive data to the eventual pod port. It is wrapped in the _Socket class
+ # because a socket pair is an AF_UNIX socket, not a AF_INET socket. This allows
+ # intercepting setting AF_INET socket options that would error against an AF_UNIX
+ # socket.
+ self.socket = self._Socket(s)
+ # Data accumulated from the websocket to be sent to the python application.
+ self.data = b''
+ # All data sent from kubernetes on the port error channel.
+ self.error = None
+
+ class _Socket:
+ def __init__(self, socket):
+ self._socket = socket
+
+ def __getattr__(self, name):
+ return getattr(self._socket, name)
+
+ def setsockopt(self, level, optname, value):
+ # The following socket option is not valid with a socket created from socketpair,
+ # and is set by the http.client.HTTPConnection.connect method.
+ if level == socket.IPPROTO_TCP and optname == socket.TCP_NODELAY:
+ return
+ self._socket.setsockopt(level, optname, value)
+
+ # Proxy all socket data between the python code and the kubernetes websocket.
+ def _proxy(self):
+ channel_ports = []
+ channel_initialized = []
+ local_ports = {}
+ for port in self.local_ports.values():
+ # Setup the data channel for this port number
+ channel_ports.append(port)
+ channel_initialized.append(False)
+ # Setup the error channel for this port number
+ channel_ports.append(port)
+ channel_initialized.append(False)
+ port.python.setblocking(True)
+ local_ports[port.python] = port
+ # The data to send on the websocket socket
+ kubernetes_data = b''
+ while True:
+ rlist = [] # List of sockets to read from
+ wlist = [] # List of sockets to write to
+ if self.websocket.connected:
+ rlist.append(self.websocket)
+ if kubernetes_data:
+ wlist.append(self.websocket)
+ local_all_closed = True
+ for port in self.local_ports.values():
+ if port.python.fileno() != -1:
+ if self.websocket.connected:
+ rlist.append(port.python)
+ if port.data:
+ wlist.append(port.python)
+ local_all_closed = False
+ else:
+ if port.data:
+ wlist.append(port.python)
+ local_all_closed = False
+ else:
+ port.python.close()
+ if local_all_closed and not (self.websocket.connected and kubernetes_data):
+ self.websocket.close()
+ return
+ r, w, _ = select.select(rlist, wlist, [])
+ for sock in r:
+ if sock == self.websocket:
+ pending = True
+ while pending:
+ opcode, frame = self.websocket.recv_data_frame(True)
+ if opcode == ABNF.OPCODE_BINARY:
+ if not frame.data:
+ raise RuntimeError("Unexpected frame data size")
+ channel = six.byte2int(frame.data)
+ if channel >= len(channel_ports):
+ raise RuntimeError("Unexpected channel number: %s" % channel)
+ port = channel_ports[channel]
+ if channel_initialized[channel]:
+ if channel % 2:
+ if port.error is None:
+ port.error = ''
+ port.error += frame.data[1:].decode()
+ port.python.close()
+ else:
+ port.data += frame.data[1:]
+ else:
+ if len(frame.data) != 3:
+ raise RuntimeError(
+ "Unexpected initial channel frame data size"
+ )
+ port_number = six.byte2int(frame.data[1:2]) + (six.byte2int(frame.data[2:3]) * 256)
+ if port_number != port.port_number:
+ raise RuntimeError(
+ "Unexpected port number in initial channel frame: %s" % port_number
+ )
+ channel_initialized[channel] = True
+ elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG, ABNF.OPCODE_CLOSE):
+ raise RuntimeError("Unexpected websocket opcode: %s" % opcode)
+ if not (isinstance(self.websocket.sock, ssl.SSLSocket) and self.websocket.sock.pending()):
+ pending = False
+ else:
+ port = local_ports[sock]
+ if port.python.fileno() != -1:
+ data = port.python.recv(1024 * 1024)
+ if data:
+ kubernetes_data += ABNF.create_frame(
+ port.channel + data,
+ ABNF.OPCODE_BINARY,
+ ).format()
+ else:
+ port.python.close()
+ for sock in w:
+ if sock == self.websocket:
+ sent = self.websocket.sock.send(kubernetes_data)
+ kubernetes_data = kubernetes_data[sent:]
+ else:
+ port = local_ports[sock]
+ if port.python.fileno() != -1:
+ sent = port.python.send(port.data)
+ port.data = port.data[sent:]
+
+
+def get_websocket_url(url, query_params=None):
+ parsed_url = urlparse(url)
+ parts = list(parsed_url)
+ if parsed_url.scheme == 'http':
+ parts[0] = 'ws'
+ elif parsed_url.scheme == 'https':
+ parts[0] = 'wss'
+ if query_params:
+ query = []
+ for key, value in query_params:
+ if key == 'command' and isinstance(value, list):
+ for command in value:
+ query.append((key, command))
+ else:
+ query.append((key, value))
+ if query:
+ parts[4] = urlencode(query)
+ return urlunparse(parts)
+
+
+def create_websocket(configuration, url, headers=None):
+ enableTrace(False)
+
+ # We just need to pass the Authorization, ignore all the other
+ # http headers we get from the generated code
+ header = []
+ if headers and 'authorization' in headers:
+ header.append("authorization: %s" % headers['authorization'])
+ if headers and 'sec-websocket-protocol' in headers:
+ header.append("sec-websocket-protocol: %s" %
+ headers['sec-websocket-protocol'])
+ else:
+ header.append("sec-websocket-protocol: v4.channel.k8s.io")
+
+ if url.startswith('wss://') and configuration.verify_ssl:
+ ssl_opts = {
+ 'cert_reqs': ssl.CERT_REQUIRED,
+ 'ca_certs': configuration.ssl_ca_cert or certifi.where(),
+ }
+ if configuration.assert_hostname is not None:
+ ssl_opts['check_hostname'] = configuration.assert_hostname
+ else:
+ ssl_opts = {'cert_reqs': ssl.CERT_NONE}
+
+ if configuration.cert_file:
+ ssl_opts['certfile'] = configuration.cert_file
+ if configuration.key_file:
+ ssl_opts['keyfile'] = configuration.key_file
+ if configuration.tls_server_name:
+ ssl_opts['server_hostname'] = configuration.tls_server_name
+
+ websocket = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False)
+ connect_opt = {
+ 'header': header
+ }
+
+ if configuration.proxy or configuration.proxy_headers:
+ connect_opt = websocket_proxycare(connect_opt, configuration, url, headers)
+
+ websocket.connect(url, **connect_opt)
+ return websocket
+
+def websocket_proxycare(connect_opt, configuration, url, headers):
+ """ An internal function to be called in api-client when a websocket
+ create is requested.
+ """
+ if configuration.no_proxy:
+ connect_opt.update({ 'http_no_proxy': configuration.no_proxy.split(',') })
+
+ if configuration.proxy:
+ proxy_url = urlparse(configuration.proxy)
+ connect_opt.update({'http_proxy_host': proxy_url.hostname, 'http_proxy_port': proxy_url.port})
+ if configuration.proxy_headers:
+ for key,value in configuration.proxy_headers.items():
+ if key == 'proxy-authorization' and value.startswith('Basic'):
+ b64value = value.split()[1]
+ auth = urlsafe_b64decode(b64value).decode().split(':')
+ connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) })
+ return(connect_opt)
+
+
+def websocket_call(configuration, _method, url, **kwargs):
+ """An internal function to be called in api-client when a websocket
+ connection is required. method, url, and kwargs are the parameters of
+ apiClient.request method."""
+
+ url = get_websocket_url(url, kwargs.get("query_params"))
+ headers = kwargs.get("headers")
+ _request_timeout = kwargs.get("_request_timeout", 60)
+ _preload_content = kwargs.get("_preload_content", True)
+ capture_all = kwargs.get("capture_all", True)
+
+ try:
+ client = WSClient(configuration, url, headers, capture_all)
+ if not _preload_content:
+ return client
+ client.run_forever(timeout=_request_timeout)
+ return WSResponse('%s' % ''.join(client.read_all()))
+ except (Exception, KeyboardInterrupt, SystemExit) as e:
+ raise ApiException(status=0, reason=str(e))
+
+
+def portforward_call(configuration, _method, url, **kwargs):
+ """An internal function to be called in api-client when a websocket
+ connection is required for port forwarding. args and kwargs are the
+ parameters of apiClient.request method."""
+
+ query_params = kwargs.get("query_params")
+
+ ports = []
+ for param, value in query_params:
+ if param == 'ports':
+ for port in value.split(','):
+ try:
+ port_number = int(port)
+ except ValueError:
+ raise ApiValueError("Invalid port number: %s" % port)
+ if not (0 < port_number < 65536):
+ raise ApiValueError("Port number must be between 0 and 65536: %s" % port)
+ if port_number in ports:
+ raise ApiValueError("Duplicate port numbers: %s" % port)
+ ports.append(port_number)
+ if not ports:
+ raise ApiValueError("Missing required parameter `ports`")
+
+ url = get_websocket_url(url, query_params)
+ headers = kwargs.get("headers")
+
+ try:
+ websocket = create_websocket(configuration, url, headers)
+ return PortForward(websocket, ports)
+ except (Exception, KeyboardInterrupt, SystemExit) as e:
+ raise ApiException(status=0, reason=str(e))
diff --git a/contrib/python/kubernetes/kubernetes/utils/__init__.py b/contrib/python/kubernetes/kubernetes/utils/__init__.py
new file mode 100644
index 0000000000..217d9ccbc4
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/utils/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+from .create_from_yaml import (FailToCreateError, create_from_dict,
+ create_from_yaml, create_from_directory)
+from .quantity import parse_quantity
diff --git a/contrib/python/kubernetes/kubernetes/utils/create_from_yaml.py b/contrib/python/kubernetes/kubernetes/utils/create_from_yaml.py
new file mode 100644
index 0000000000..459c291ef3
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/utils/create_from_yaml.py
@@ -0,0 +1,287 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import re
+import os
+
+import yaml
+
+from kubernetes import client
+
+UPPER_FOLLOWED_BY_LOWER_RE = re.compile('(.)([A-Z][a-z]+)')
+LOWER_OR_NUM_FOLLOWED_BY_UPPER_RE = re.compile('([a-z0-9])([A-Z])')
+
+
+def create_from_directory(
+ k8s_client,
+ yaml_dir=None,
+ verbose=False,
+ namespace="default",
+ **kwargs):
+ """
+ Perform an action from files from a directory. Pass True for verbose to
+ print confirmation information.
+
+ Input:
+ k8s_client: an ApiClient object, initialized with the client args.
+ yaml_dir: string. Contains the path to directory.
+ verbose: If True, print confirmation from the create action.
+ Default is False.
+ namespace: string. Contains the namespace to create all
+ resources inside. The namespace must preexist otherwise
+ the resource creation will fail. If the API object in
+ the yaml file already contains a namespace definition
+ this parameter has no effect.
+
+ Available parameters for creating <kind>:
+ :param async_req bool
+ :param bool include_uninitialized: If true, partially initialized
+ resources are included in the response.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications
+ should not be persisted. An invalid or unrecognized dryRun
+ directive will result in an error response and no further
+ processing of the request.
+ Valid values are: - All: all dry run stages will be processed
+
+ Returns:
+ The list containing the created kubernetes API objects.
+
+ Raises:
+ FailToCreateError which holds list of `client.rest.ApiException`
+ instances for each object that failed to create.
+ """
+
+ if not yaml_dir:
+ raise ValueError(
+ '`yaml_dir` argument must be provided')
+ elif not os.path.isdir(yaml_dir):
+ raise ValueError(
+ '`yaml_dir` argument must be a path to directory')
+
+ files = [os.path.join(yaml_dir, i) for i in os.listdir(yaml_dir)
+ if os.path.isfile(os.path.join(yaml_dir, i))]
+ if not files:
+ raise ValueError(
+ '`yaml_dir` contains no files')
+
+ failures = []
+ k8s_objects_all = []
+
+ for file in files:
+ try:
+ k8s_objects = create_from_yaml(k8s_client, file,
+ verbose=verbose,
+ namespace=namespace,
+ **kwargs)
+ k8s_objects_all.append(k8s_objects)
+ except FailToCreateError as failure:
+ failures.extend(failure.api_exceptions)
+ if failures:
+ raise FailToCreateError(failures)
+ return k8s_objects_all
+
+
+def create_from_yaml(
+ k8s_client,
+ yaml_file=None,
+ yaml_objects=None,
+ verbose=False,
+ namespace="default",
+ **kwargs):
+ """
+ Perform an action from a yaml file. Pass True for verbose to
+ print confirmation information.
+ Input:
+ yaml_file: string. Contains the path to yaml file.
+ k8s_client: an ApiClient object, initialized with the client args.
+ yaml_objects: List[dict]. Optional list of YAML objects; used instead
+ of reading the `yaml_file`. Default is None.
+ verbose: If True, print confirmation from the create action.
+ Default is False.
+ namespace: string. Contains the namespace to create all
+ resources inside. The namespace must preexist otherwise
+ the resource creation will fail. If the API object in
+ the yaml file already contains a namespace definition
+ this parameter has no effect.
+
+ Available parameters for creating <kind>:
+ :param async_req bool
+ :param bool include_uninitialized: If true, partially initialized
+ resources are included in the response.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str dry_run: When present, indicates that modifications
+ should not be persisted. An invalid or unrecognized dryRun
+ directive will result in an error response and no further
+ processing of the request.
+ Valid values are: - All: all dry run stages will be processed
+
+ Returns:
+ The created kubernetes API objects.
+
+ Raises:
+ FailToCreateError which holds list of `client.rest.ApiException`
+ instances for each object that failed to create.
+ """
+
+ def create_with(objects):
+ failures = []
+ k8s_objects = []
+ for yml_document in objects:
+ if yml_document is None:
+ continue
+ try:
+ created = create_from_dict(k8s_client, yml_document, verbose,
+ namespace=namespace,
+ **kwargs)
+ k8s_objects.append(created)
+ except FailToCreateError as failure:
+ failures.extend(failure.api_exceptions)
+ if failures:
+ raise FailToCreateError(failures)
+ return k8s_objects
+
+ class Loader(yaml.loader.SafeLoader):
+ yaml_implicit_resolvers = yaml.loader.SafeLoader.yaml_implicit_resolvers.copy()
+ if "=" in yaml_implicit_resolvers:
+ yaml_implicit_resolvers.pop("=")
+
+ if yaml_objects:
+ yml_document_all = yaml_objects
+ return create_with(yml_document_all)
+ elif yaml_file:
+ with open(os.path.abspath(yaml_file)) as f:
+ yml_document_all = yaml.load_all(f, Loader=Loader)
+ return create_with(yml_document_all)
+ else:
+ raise ValueError(
+ 'One of `yaml_file` or `yaml_objects` arguments must be provided')
+
+
+def create_from_dict(k8s_client, data, verbose=False, namespace='default',
+ **kwargs):
+ """
+ Perform an action from a dictionary containing valid kubernetes
+ API object (i.e. List, Service, etc).
+
+ Input:
+ k8s_client: an ApiClient object, initialized with the client args.
+ data: a dictionary holding valid kubernetes objects
+ verbose: If True, print confirmation from the create action.
+ Default is False.
+ namespace: string. Contains the namespace to create all
+ resources inside. The namespace must preexist otherwise
+ the resource creation will fail. If the API object in
+ the yaml file already contains a namespace definition
+ this parameter has no effect.
+
+ Returns:
+ The created kubernetes API objects.
+
+ Raises:
+ FailToCreateError which holds list of `client.rest.ApiException`
+ instances for each object that failed to create.
+ """
+ # If it is a list type, will need to iterate its items
+ api_exceptions = []
+ k8s_objects = []
+
+ if "List" in data["kind"]:
+ # Could be "List" or "Pod/Service/...List"
+ # This is a list type. iterate within its items
+ kind = data["kind"].replace("List", "")
+ for yml_object in data["items"]:
+ # Mitigate cases when server returns a xxxList object
+ # See kubernetes-client/python#586
+ if kind != "":
+ yml_object["apiVersion"] = data["apiVersion"]
+ yml_object["kind"] = kind
+ try:
+ created = create_from_yaml_single_item(
+ k8s_client, yml_object, verbose, namespace=namespace,
+ **kwargs)
+ k8s_objects.append(created)
+ except client.rest.ApiException as api_exception:
+ api_exceptions.append(api_exception)
+ else:
+ # This is a single object. Call the single item method
+ try:
+ created = create_from_yaml_single_item(
+ k8s_client, data, verbose, namespace=namespace, **kwargs)
+ k8s_objects.append(created)
+ except client.rest.ApiException as api_exception:
+ api_exceptions.append(api_exception)
+
+ # In case we have exceptions waiting for us, raise them
+ if api_exceptions:
+ raise FailToCreateError(api_exceptions)
+
+ return k8s_objects
+
+
+def create_from_yaml_single_item(
+ k8s_client, yml_object, verbose=False, **kwargs):
+ group, _, version = yml_object["apiVersion"].partition("/")
+ if version == "":
+ version = group
+ group = "core"
+ # Take care for the case e.g. api_type is "apiextensions.k8s.io"
+ # Only replace the last instance
+ group = "".join(group.rsplit(".k8s.io", 1))
+ # convert group name from DNS subdomain format to
+ # python class name convention
+ group = "".join(word.capitalize() for word in group.split('.'))
+ fcn_to_call = "{0}{1}Api".format(group, version.capitalize())
+ k8s_api = getattr(client, fcn_to_call)(k8s_client)
+ # Replace CamelCased action_type into snake_case
+ kind = yml_object["kind"]
+ kind = UPPER_FOLLOWED_BY_LOWER_RE.sub(r'\1_\2', kind)
+ kind = LOWER_OR_NUM_FOLLOWED_BY_UPPER_RE.sub(r'\1_\2', kind).lower()
+ # Expect the user to create namespaced objects more often
+ if hasattr(k8s_api, "create_namespaced_{0}".format(kind)):
+ # Decide which namespace we are going to put the object in,
+ # if any
+ if "namespace" in yml_object["metadata"]:
+ namespace = yml_object["metadata"]["namespace"]
+ kwargs['namespace'] = namespace
+ resp = getattr(k8s_api, "create_namespaced_{0}".format(kind))(
+ body=yml_object, **kwargs)
+ else:
+ kwargs.pop('namespace', None)
+ resp = getattr(k8s_api, "create_{0}".format(kind))(
+ body=yml_object, **kwargs)
+ if verbose:
+ msg = "{0} created.".format(kind)
+ if hasattr(resp, 'status'):
+ msg += " status='{0}'".format(str(resp.status))
+ print(msg)
+ return resp
+
+
+class FailToCreateError(Exception):
+ """
+ An exception class for handling error if an error occurred when
+ handling a yaml file.
+ """
+
+ def __init__(self, api_exceptions):
+ self.api_exceptions = api_exceptions
+
+ def __str__(self):
+ msg = ""
+ for api_exception in self.api_exceptions:
+ msg += "Error from server ({0}): {1}".format(
+ api_exception.reason, api_exception.body)
+ return msg
diff --git a/contrib/python/kubernetes/kubernetes/utils/quantity.py b/contrib/python/kubernetes/kubernetes/utils/quantity.py
new file mode 100644
index 0000000000..68e57d9807
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/utils/quantity.py
@@ -0,0 +1,75 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from decimal import Decimal, InvalidOperation
+
+
+def parse_quantity(quantity):
+ """
+ Parse kubernetes canonical form quantity like 200Mi to a decimal number.
+ Supported SI suffixes:
+ base1024: Ki | Mi | Gi | Ti | Pi | Ei
+ base1000: n | u | m | "" | k | M | G | T | P | E
+
+ See https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go
+
+ Input:
+ quantity: string. kubernetes canonical form quantity
+
+ Returns:
+ Decimal
+
+ Raises:
+ ValueError on invalid or unknown input
+ """
+ if isinstance(quantity, (int, float, Decimal)):
+ return Decimal(quantity)
+
+ exponents = {"n": -3, "u": -2, "m": -1, "K": 1, "k": 1, "M": 2,
+ "G": 3, "T": 4, "P": 5, "E": 6}
+
+ quantity = str(quantity)
+ number = quantity
+ suffix = None
+ if len(quantity) >= 2 and quantity[-1] == "i":
+ if quantity[-2] in exponents:
+ number = quantity[:-2]
+ suffix = quantity[-2:]
+ elif len(quantity) >= 1 and quantity[-1] in exponents:
+ number = quantity[:-1]
+ suffix = quantity[-1:]
+
+ try:
+ number = Decimal(number)
+ except InvalidOperation:
+ raise ValueError("Invalid number format: {}".format(number))
+
+ if suffix is None:
+ return number
+
+ if suffix.endswith("i"):
+ base = 1024
+ elif len(suffix) == 1:
+ base = 1000
+ else:
+ raise ValueError("{} has unknown suffix".format(quantity))
+
+ # handle SI inconsistency
+ if suffix == "ki":
+ raise ValueError("{} has unknown suffix".format(quantity))
+
+ if suffix[0] not in exponents:
+ raise ValueError("{} has unknown suffix".format(quantity))
+
+ exponent = Decimal(exponents[suffix[0]])
+ return number * (base ** exponent)
diff --git a/contrib/python/kubernetes/kubernetes/watch/__init__.py b/contrib/python/kubernetes/kubernetes/watch/__init__.py
new file mode 100644
index 0000000000..ca9ac06987
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/watch/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .watch import Watch
diff --git a/contrib/python/kubernetes/kubernetes/watch/watch.py b/contrib/python/kubernetes/kubernetes/watch/watch.py
new file mode 100644
index 0000000000..71fd459191
--- /dev/null
+++ b/contrib/python/kubernetes/kubernetes/watch/watch.py
@@ -0,0 +1,200 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import pydoc
+import sys
+
+from kubernetes import client
+
+PYDOC_RETURN_LABEL = ":return:"
+PYDOC_FOLLOW_PARAM = ":param bool follow:"
+
+# Removing this suffix from return type name should give us event's object
+# type. e.g., if list_namespaces() returns "NamespaceList" type,
+# then list_namespaces(watch=true) returns a stream of events with objects
+# of type "Namespace". In case this assumption is not true, user should
+# provide return_type to Watch class's __init__.
+TYPE_LIST_SUFFIX = "List"
+
+
+PY2 = sys.version_info[0] == 2
+if PY2:
+ import httplib
+ HTTP_STATUS_GONE = httplib.GONE
+else:
+ import http
+ HTTP_STATUS_GONE = http.HTTPStatus.GONE
+
+
+class SimpleNamespace:
+
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+
+def _find_return_type(func):
+ for line in pydoc.getdoc(func).splitlines():
+ if line.startswith(PYDOC_RETURN_LABEL):
+ return line[len(PYDOC_RETURN_LABEL):].strip()
+ return ""
+
+
+def iter_resp_lines(resp):
+ prev = ""
+ for seg in resp.stream(amt=None, decode_content=False):
+ if isinstance(seg, bytes):
+ seg = seg.decode('utf8')
+ seg = prev + seg
+ lines = seg.split("\n")
+ if not seg.endswith("\n"):
+ prev = lines[-1]
+ lines = lines[:-1]
+ else:
+ prev = ""
+ for line in lines:
+ if line:
+ yield line
+
+
+class Watch(object):
+
+ def __init__(self, return_type=None):
+ self._raw_return_type = return_type
+ self._stop = False
+ self._api_client = client.ApiClient()
+ self.resource_version = None
+
+ def stop(self):
+ self._stop = True
+
+ def get_return_type(self, func):
+ if self._raw_return_type:
+ return self._raw_return_type
+ return_type = _find_return_type(func)
+ if return_type.endswith(TYPE_LIST_SUFFIX):
+ return return_type[:-len(TYPE_LIST_SUFFIX)]
+ return return_type
+
+ def get_watch_argument_name(self, func):
+ if PYDOC_FOLLOW_PARAM in pydoc.getdoc(func):
+ return 'follow'
+ else:
+ return 'watch'
+
+ def unmarshal_event(self, data, return_type):
+ js = json.loads(data)
+ js['raw_object'] = js['object']
+ # BOOKMARK event is treated the same as ERROR for a quick fix of
+ # decoding exception
+ # TODO: make use of the resource_version in BOOKMARK event for more
+ # efficient WATCH
+ if return_type and js['type'] != 'ERROR' and js['type'] != 'BOOKMARK':
+ obj = SimpleNamespace(data=json.dumps(js['raw_object']))
+ js['object'] = self._api_client.deserialize(obj, return_type)
+ if hasattr(js['object'], 'metadata'):
+ self.resource_version = js['object'].metadata.resource_version
+ # For custom objects that we don't have model defined, json
+ # deserialization results in dictionary
+ elif (isinstance(js['object'], dict) and 'metadata' in js['object']
+ and 'resourceVersion' in js['object']['metadata']):
+ self.resource_version = js['object']['metadata'][
+ 'resourceVersion']
+ return js
+
+ def stream(self, func, *args, **kwargs):
+ """Watch an API resource and stream the result back via a generator.
+
+ Note that watching an API resource can expire. The method tries to
+ resume automatically once from the last result, but if that last result
+ is too old as well, an `ApiException` exception will be thrown with
+ ``code`` 410. In that case you have to recover yourself, probably
+ by listing the API resource to obtain the latest state and then
+ watching from that state on by setting ``resource_version`` to
+ one returned from listing.
+
+ :param func: The API function pointer. Any parameter to the function
+ can be passed after this parameter.
+
+ :return: Event object with these keys:
+ 'type': The type of event such as "ADDED", "DELETED", etc.
+ 'raw_object': a dict representing the watched object.
+ 'object': A model representation of raw_object. The name of
+ model will be determined based on
+ the func's doc string. If it cannot be determined,
+ 'object' value will be the same as 'raw_object'.
+
+ Example:
+ v1 = kubernetes.client.CoreV1Api()
+ watch = kubernetes.watch.Watch()
+ for e in watch.stream(v1.list_namespace, resource_version=1127):
+ type = e['type']
+ object = e['object'] # object is one of type return_type
+ raw_object = e['raw_object'] # raw_object is a dict
+ ...
+ if should_stop:
+ watch.stop()
+ """
+
+ self._stop = False
+ return_type = self.get_return_type(func)
+ watch_arg = self.get_watch_argument_name(func)
+ kwargs[watch_arg] = True
+ kwargs['_preload_content'] = False
+ if 'resource_version' in kwargs:
+ self.resource_version = kwargs['resource_version']
+
+ # Do not attempt retries if user specifies a timeout.
+ # We want to ensure we are returning within that timeout.
+ disable_retries = ('timeout_seconds' in kwargs)
+ retry_after_410 = False
+ while True:
+ resp = func(*args, **kwargs)
+ try:
+ for line in iter_resp_lines(resp):
+ # unmarshal when we are receiving events from watch,
+ # return raw string when we are streaming log
+ if watch_arg == "watch":
+ event = self.unmarshal_event(line, return_type)
+ if isinstance(event, dict) \
+ and event['type'] == 'ERROR':
+ obj = event['raw_object']
+ # Current request expired, let's retry, (if enabled)
+ # but only if we have not already retried.
+ if not disable_retries and not retry_after_410 and \
+ obj['code'] == HTTP_STATUS_GONE:
+ retry_after_410 = True
+ break
+ else:
+ reason = "%s: %s" % (
+ obj['reason'], obj['message'])
+ raise client.rest.ApiException(
+ status=obj['code'], reason=reason)
+ else:
+ retry_after_410 = False
+ yield event
+ else:
+ yield line
+ if self._stop:
+ break
+ finally:
+ resp.close()
+ resp.release_conn()
+ if self.resource_version is not None:
+ kwargs['resource_version'] = self.resource_version
+ else:
+ self._stop = True
+
+ if self._stop or disable_retries:
+ break
diff --git a/contrib/python/kubernetes/ya.make b/contrib/python/kubernetes/ya.make
new file mode 100644
index 0000000000..66c5faec12
--- /dev/null
+++ b/contrib/python/kubernetes/ya.make
@@ -0,0 +1,684 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(28.1.0)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/PyYAML
+ contrib/python/certifi
+ contrib/python/google-auth
+ contrib/python/oauthlib
+ contrib/python/python-dateutil
+ contrib/python/requests
+ contrib/python/requests-oauthlib
+ contrib/python/six
+ contrib/python/urllib3
+ contrib/python/websocket-client
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ kubernetes/__init__.py
+ kubernetes/client/__init__.py
+ kubernetes/client/api/__init__.py
+ kubernetes/client/api/admissionregistration_api.py
+ kubernetes/client/api/admissionregistration_v1_api.py
+ kubernetes/client/api/admissionregistration_v1alpha1_api.py
+ kubernetes/client/api/admissionregistration_v1beta1_api.py
+ kubernetes/client/api/apiextensions_api.py
+ kubernetes/client/api/apiextensions_v1_api.py
+ kubernetes/client/api/apiregistration_api.py
+ kubernetes/client/api/apiregistration_v1_api.py
+ kubernetes/client/api/apis_api.py
+ kubernetes/client/api/apps_api.py
+ kubernetes/client/api/apps_v1_api.py
+ kubernetes/client/api/authentication_api.py
+ kubernetes/client/api/authentication_v1_api.py
+ kubernetes/client/api/authentication_v1alpha1_api.py
+ kubernetes/client/api/authentication_v1beta1_api.py
+ kubernetes/client/api/authorization_api.py
+ kubernetes/client/api/authorization_v1_api.py
+ kubernetes/client/api/autoscaling_api.py
+ kubernetes/client/api/autoscaling_v1_api.py
+ kubernetes/client/api/autoscaling_v2_api.py
+ kubernetes/client/api/batch_api.py
+ kubernetes/client/api/batch_v1_api.py
+ kubernetes/client/api/certificates_api.py
+ kubernetes/client/api/certificates_v1_api.py
+ kubernetes/client/api/certificates_v1alpha1_api.py
+ kubernetes/client/api/coordination_api.py
+ kubernetes/client/api/coordination_v1_api.py
+ kubernetes/client/api/core_api.py
+ kubernetes/client/api/core_v1_api.py
+ kubernetes/client/api/custom_objects_api.py
+ kubernetes/client/api/discovery_api.py
+ kubernetes/client/api/discovery_v1_api.py
+ kubernetes/client/api/events_api.py
+ kubernetes/client/api/events_v1_api.py
+ kubernetes/client/api/flowcontrol_apiserver_api.py
+ kubernetes/client/api/flowcontrol_apiserver_v1beta2_api.py
+ kubernetes/client/api/flowcontrol_apiserver_v1beta3_api.py
+ kubernetes/client/api/internal_apiserver_api.py
+ kubernetes/client/api/internal_apiserver_v1alpha1_api.py
+ kubernetes/client/api/logs_api.py
+ kubernetes/client/api/networking_api.py
+ kubernetes/client/api/networking_v1_api.py
+ kubernetes/client/api/networking_v1alpha1_api.py
+ kubernetes/client/api/node_api.py
+ kubernetes/client/api/node_v1_api.py
+ kubernetes/client/api/openid_api.py
+ kubernetes/client/api/policy_api.py
+ kubernetes/client/api/policy_v1_api.py
+ kubernetes/client/api/rbac_authorization_api.py
+ kubernetes/client/api/rbac_authorization_v1_api.py
+ kubernetes/client/api/resource_api.py
+ kubernetes/client/api/resource_v1alpha2_api.py
+ kubernetes/client/api/scheduling_api.py
+ kubernetes/client/api/scheduling_v1_api.py
+ kubernetes/client/api/storage_api.py
+ kubernetes/client/api/storage_v1_api.py
+ kubernetes/client/api/version_api.py
+ kubernetes/client/api/well_known_api.py
+ kubernetes/client/api_client.py
+ kubernetes/client/apis/__init__.py
+ kubernetes/client/configuration.py
+ kubernetes/client/exceptions.py
+ kubernetes/client/models/__init__.py
+ kubernetes/client/models/admissionregistration_v1_service_reference.py
+ kubernetes/client/models/admissionregistration_v1_webhook_client_config.py
+ kubernetes/client/models/apiextensions_v1_service_reference.py
+ kubernetes/client/models/apiextensions_v1_webhook_client_config.py
+ kubernetes/client/models/apiregistration_v1_service_reference.py
+ kubernetes/client/models/authentication_v1_token_request.py
+ kubernetes/client/models/core_v1_endpoint_port.py
+ kubernetes/client/models/core_v1_event.py
+ kubernetes/client/models/core_v1_event_list.py
+ kubernetes/client/models/core_v1_event_series.py
+ kubernetes/client/models/discovery_v1_endpoint_port.py
+ kubernetes/client/models/events_v1_event.py
+ kubernetes/client/models/events_v1_event_list.py
+ kubernetes/client/models/events_v1_event_series.py
+ kubernetes/client/models/storage_v1_token_request.py
+ kubernetes/client/models/v1_affinity.py
+ kubernetes/client/models/v1_aggregation_rule.py
+ kubernetes/client/models/v1_api_group.py
+ kubernetes/client/models/v1_api_group_list.py
+ kubernetes/client/models/v1_api_resource.py
+ kubernetes/client/models/v1_api_resource_list.py
+ kubernetes/client/models/v1_api_service.py
+ kubernetes/client/models/v1_api_service_condition.py
+ kubernetes/client/models/v1_api_service_list.py
+ kubernetes/client/models/v1_api_service_spec.py
+ kubernetes/client/models/v1_api_service_status.py
+ kubernetes/client/models/v1_api_versions.py
+ kubernetes/client/models/v1_attached_volume.py
+ kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py
+ kubernetes/client/models/v1_azure_disk_volume_source.py
+ kubernetes/client/models/v1_azure_file_persistent_volume_source.py
+ kubernetes/client/models/v1_azure_file_volume_source.py
+ kubernetes/client/models/v1_binding.py
+ kubernetes/client/models/v1_bound_object_reference.py
+ kubernetes/client/models/v1_capabilities.py
+ kubernetes/client/models/v1_ceph_fs_persistent_volume_source.py
+ kubernetes/client/models/v1_ceph_fs_volume_source.py
+ kubernetes/client/models/v1_certificate_signing_request.py
+ kubernetes/client/models/v1_certificate_signing_request_condition.py
+ kubernetes/client/models/v1_certificate_signing_request_list.py
+ kubernetes/client/models/v1_certificate_signing_request_spec.py
+ kubernetes/client/models/v1_certificate_signing_request_status.py
+ kubernetes/client/models/v1_cinder_persistent_volume_source.py
+ kubernetes/client/models/v1_cinder_volume_source.py
+ kubernetes/client/models/v1_claim_source.py
+ kubernetes/client/models/v1_client_ip_config.py
+ kubernetes/client/models/v1_cluster_role.py
+ kubernetes/client/models/v1_cluster_role_binding.py
+ kubernetes/client/models/v1_cluster_role_binding_list.py
+ kubernetes/client/models/v1_cluster_role_list.py
+ kubernetes/client/models/v1_component_condition.py
+ kubernetes/client/models/v1_component_status.py
+ kubernetes/client/models/v1_component_status_list.py
+ kubernetes/client/models/v1_condition.py
+ kubernetes/client/models/v1_config_map.py
+ kubernetes/client/models/v1_config_map_env_source.py
+ kubernetes/client/models/v1_config_map_key_selector.py
+ kubernetes/client/models/v1_config_map_list.py
+ kubernetes/client/models/v1_config_map_node_config_source.py
+ kubernetes/client/models/v1_config_map_projection.py
+ kubernetes/client/models/v1_config_map_volume_source.py
+ kubernetes/client/models/v1_container.py
+ kubernetes/client/models/v1_container_image.py
+ kubernetes/client/models/v1_container_port.py
+ kubernetes/client/models/v1_container_resize_policy.py
+ kubernetes/client/models/v1_container_state.py
+ kubernetes/client/models/v1_container_state_running.py
+ kubernetes/client/models/v1_container_state_terminated.py
+ kubernetes/client/models/v1_container_state_waiting.py
+ kubernetes/client/models/v1_container_status.py
+ kubernetes/client/models/v1_controller_revision.py
+ kubernetes/client/models/v1_controller_revision_list.py
+ kubernetes/client/models/v1_cron_job.py
+ kubernetes/client/models/v1_cron_job_list.py
+ kubernetes/client/models/v1_cron_job_spec.py
+ kubernetes/client/models/v1_cron_job_status.py
+ kubernetes/client/models/v1_cross_version_object_reference.py
+ kubernetes/client/models/v1_csi_driver.py
+ kubernetes/client/models/v1_csi_driver_list.py
+ kubernetes/client/models/v1_csi_driver_spec.py
+ kubernetes/client/models/v1_csi_node.py
+ kubernetes/client/models/v1_csi_node_driver.py
+ kubernetes/client/models/v1_csi_node_list.py
+ kubernetes/client/models/v1_csi_node_spec.py
+ kubernetes/client/models/v1_csi_persistent_volume_source.py
+ kubernetes/client/models/v1_csi_storage_capacity.py
+ kubernetes/client/models/v1_csi_storage_capacity_list.py
+ kubernetes/client/models/v1_csi_volume_source.py
+ kubernetes/client/models/v1_custom_resource_column_definition.py
+ kubernetes/client/models/v1_custom_resource_conversion.py
+ kubernetes/client/models/v1_custom_resource_definition.py
+ kubernetes/client/models/v1_custom_resource_definition_condition.py
+ kubernetes/client/models/v1_custom_resource_definition_list.py
+ kubernetes/client/models/v1_custom_resource_definition_names.py
+ kubernetes/client/models/v1_custom_resource_definition_spec.py
+ kubernetes/client/models/v1_custom_resource_definition_status.py
+ kubernetes/client/models/v1_custom_resource_definition_version.py
+ kubernetes/client/models/v1_custom_resource_subresource_scale.py
+ kubernetes/client/models/v1_custom_resource_subresources.py
+ kubernetes/client/models/v1_custom_resource_validation.py
+ kubernetes/client/models/v1_daemon_endpoint.py
+ kubernetes/client/models/v1_daemon_set.py
+ kubernetes/client/models/v1_daemon_set_condition.py
+ kubernetes/client/models/v1_daemon_set_list.py
+ kubernetes/client/models/v1_daemon_set_spec.py
+ kubernetes/client/models/v1_daemon_set_status.py
+ kubernetes/client/models/v1_daemon_set_update_strategy.py
+ kubernetes/client/models/v1_delete_options.py
+ kubernetes/client/models/v1_deployment.py
+ kubernetes/client/models/v1_deployment_condition.py
+ kubernetes/client/models/v1_deployment_list.py
+ kubernetes/client/models/v1_deployment_spec.py
+ kubernetes/client/models/v1_deployment_status.py
+ kubernetes/client/models/v1_deployment_strategy.py
+ kubernetes/client/models/v1_downward_api_projection.py
+ kubernetes/client/models/v1_downward_api_volume_file.py
+ kubernetes/client/models/v1_downward_api_volume_source.py
+ kubernetes/client/models/v1_empty_dir_volume_source.py
+ kubernetes/client/models/v1_endpoint.py
+ kubernetes/client/models/v1_endpoint_address.py
+ kubernetes/client/models/v1_endpoint_conditions.py
+ kubernetes/client/models/v1_endpoint_hints.py
+ kubernetes/client/models/v1_endpoint_slice.py
+ kubernetes/client/models/v1_endpoint_slice_list.py
+ kubernetes/client/models/v1_endpoint_subset.py
+ kubernetes/client/models/v1_endpoints.py
+ kubernetes/client/models/v1_endpoints_list.py
+ kubernetes/client/models/v1_env_from_source.py
+ kubernetes/client/models/v1_env_var.py
+ kubernetes/client/models/v1_env_var_source.py
+ kubernetes/client/models/v1_ephemeral_container.py
+ kubernetes/client/models/v1_ephemeral_volume_source.py
+ kubernetes/client/models/v1_event_source.py
+ kubernetes/client/models/v1_eviction.py
+ kubernetes/client/models/v1_exec_action.py
+ kubernetes/client/models/v1_external_documentation.py
+ kubernetes/client/models/v1_fc_volume_source.py
+ kubernetes/client/models/v1_flex_persistent_volume_source.py
+ kubernetes/client/models/v1_flex_volume_source.py
+ kubernetes/client/models/v1_flocker_volume_source.py
+ kubernetes/client/models/v1_for_zone.py
+ kubernetes/client/models/v1_gce_persistent_disk_volume_source.py
+ kubernetes/client/models/v1_git_repo_volume_source.py
+ kubernetes/client/models/v1_glusterfs_persistent_volume_source.py
+ kubernetes/client/models/v1_glusterfs_volume_source.py
+ kubernetes/client/models/v1_group_version_for_discovery.py
+ kubernetes/client/models/v1_grpc_action.py
+ kubernetes/client/models/v1_horizontal_pod_autoscaler.py
+ kubernetes/client/models/v1_horizontal_pod_autoscaler_list.py
+ kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py
+ kubernetes/client/models/v1_horizontal_pod_autoscaler_status.py
+ kubernetes/client/models/v1_host_alias.py
+ kubernetes/client/models/v1_host_ip.py
+ kubernetes/client/models/v1_host_path_volume_source.py
+ kubernetes/client/models/v1_http_get_action.py
+ kubernetes/client/models/v1_http_header.py
+ kubernetes/client/models/v1_http_ingress_path.py
+ kubernetes/client/models/v1_http_ingress_rule_value.py
+ kubernetes/client/models/v1_ingress.py
+ kubernetes/client/models/v1_ingress_backend.py
+ kubernetes/client/models/v1_ingress_class.py
+ kubernetes/client/models/v1_ingress_class_list.py
+ kubernetes/client/models/v1_ingress_class_parameters_reference.py
+ kubernetes/client/models/v1_ingress_class_spec.py
+ kubernetes/client/models/v1_ingress_list.py
+ kubernetes/client/models/v1_ingress_load_balancer_ingress.py
+ kubernetes/client/models/v1_ingress_load_balancer_status.py
+ kubernetes/client/models/v1_ingress_port_status.py
+ kubernetes/client/models/v1_ingress_rule.py
+ kubernetes/client/models/v1_ingress_service_backend.py
+ kubernetes/client/models/v1_ingress_spec.py
+ kubernetes/client/models/v1_ingress_status.py
+ kubernetes/client/models/v1_ingress_tls.py
+ kubernetes/client/models/v1_ip_block.py
+ kubernetes/client/models/v1_iscsi_persistent_volume_source.py
+ kubernetes/client/models/v1_iscsi_volume_source.py
+ kubernetes/client/models/v1_job.py
+ kubernetes/client/models/v1_job_condition.py
+ kubernetes/client/models/v1_job_list.py
+ kubernetes/client/models/v1_job_spec.py
+ kubernetes/client/models/v1_job_status.py
+ kubernetes/client/models/v1_job_template_spec.py
+ kubernetes/client/models/v1_json_schema_props.py
+ kubernetes/client/models/v1_key_to_path.py
+ kubernetes/client/models/v1_label_selector.py
+ kubernetes/client/models/v1_label_selector_requirement.py
+ kubernetes/client/models/v1_lease.py
+ kubernetes/client/models/v1_lease_list.py
+ kubernetes/client/models/v1_lease_spec.py
+ kubernetes/client/models/v1_lifecycle.py
+ kubernetes/client/models/v1_lifecycle_handler.py
+ kubernetes/client/models/v1_limit_range.py
+ kubernetes/client/models/v1_limit_range_item.py
+ kubernetes/client/models/v1_limit_range_list.py
+ kubernetes/client/models/v1_limit_range_spec.py
+ kubernetes/client/models/v1_list_meta.py
+ kubernetes/client/models/v1_load_balancer_ingress.py
+ kubernetes/client/models/v1_load_balancer_status.py
+ kubernetes/client/models/v1_local_object_reference.py
+ kubernetes/client/models/v1_local_subject_access_review.py
+ kubernetes/client/models/v1_local_volume_source.py
+ kubernetes/client/models/v1_managed_fields_entry.py
+ kubernetes/client/models/v1_match_condition.py
+ kubernetes/client/models/v1_mutating_webhook.py
+ kubernetes/client/models/v1_mutating_webhook_configuration.py
+ kubernetes/client/models/v1_mutating_webhook_configuration_list.py
+ kubernetes/client/models/v1_namespace.py
+ kubernetes/client/models/v1_namespace_condition.py
+ kubernetes/client/models/v1_namespace_list.py
+ kubernetes/client/models/v1_namespace_spec.py
+ kubernetes/client/models/v1_namespace_status.py
+ kubernetes/client/models/v1_network_policy.py
+ kubernetes/client/models/v1_network_policy_egress_rule.py
+ kubernetes/client/models/v1_network_policy_ingress_rule.py
+ kubernetes/client/models/v1_network_policy_list.py
+ kubernetes/client/models/v1_network_policy_peer.py
+ kubernetes/client/models/v1_network_policy_port.py
+ kubernetes/client/models/v1_network_policy_spec.py
+ kubernetes/client/models/v1_network_policy_status.py
+ kubernetes/client/models/v1_nfs_volume_source.py
+ kubernetes/client/models/v1_node.py
+ kubernetes/client/models/v1_node_address.py
+ kubernetes/client/models/v1_node_affinity.py
+ kubernetes/client/models/v1_node_condition.py
+ kubernetes/client/models/v1_node_config_source.py
+ kubernetes/client/models/v1_node_config_status.py
+ kubernetes/client/models/v1_node_daemon_endpoints.py
+ kubernetes/client/models/v1_node_list.py
+ kubernetes/client/models/v1_node_selector.py
+ kubernetes/client/models/v1_node_selector_requirement.py
+ kubernetes/client/models/v1_node_selector_term.py
+ kubernetes/client/models/v1_node_spec.py
+ kubernetes/client/models/v1_node_status.py
+ kubernetes/client/models/v1_node_system_info.py
+ kubernetes/client/models/v1_non_resource_attributes.py
+ kubernetes/client/models/v1_non_resource_rule.py
+ kubernetes/client/models/v1_object_field_selector.py
+ kubernetes/client/models/v1_object_meta.py
+ kubernetes/client/models/v1_object_reference.py
+ kubernetes/client/models/v1_overhead.py
+ kubernetes/client/models/v1_owner_reference.py
+ kubernetes/client/models/v1_persistent_volume.py
+ kubernetes/client/models/v1_persistent_volume_claim.py
+ kubernetes/client/models/v1_persistent_volume_claim_condition.py
+ kubernetes/client/models/v1_persistent_volume_claim_list.py
+ kubernetes/client/models/v1_persistent_volume_claim_spec.py
+ kubernetes/client/models/v1_persistent_volume_claim_status.py
+ kubernetes/client/models/v1_persistent_volume_claim_template.py
+ kubernetes/client/models/v1_persistent_volume_claim_volume_source.py
+ kubernetes/client/models/v1_persistent_volume_list.py
+ kubernetes/client/models/v1_persistent_volume_spec.py
+ kubernetes/client/models/v1_persistent_volume_status.py
+ kubernetes/client/models/v1_photon_persistent_disk_volume_source.py
+ kubernetes/client/models/v1_pod.py
+ kubernetes/client/models/v1_pod_affinity.py
+ kubernetes/client/models/v1_pod_affinity_term.py
+ kubernetes/client/models/v1_pod_anti_affinity.py
+ kubernetes/client/models/v1_pod_condition.py
+ kubernetes/client/models/v1_pod_disruption_budget.py
+ kubernetes/client/models/v1_pod_disruption_budget_list.py
+ kubernetes/client/models/v1_pod_disruption_budget_spec.py
+ kubernetes/client/models/v1_pod_disruption_budget_status.py
+ kubernetes/client/models/v1_pod_dns_config.py
+ kubernetes/client/models/v1_pod_dns_config_option.py
+ kubernetes/client/models/v1_pod_failure_policy.py
+ kubernetes/client/models/v1_pod_failure_policy_on_exit_codes_requirement.py
+ kubernetes/client/models/v1_pod_failure_policy_on_pod_conditions_pattern.py
+ kubernetes/client/models/v1_pod_failure_policy_rule.py
+ kubernetes/client/models/v1_pod_ip.py
+ kubernetes/client/models/v1_pod_list.py
+ kubernetes/client/models/v1_pod_os.py
+ kubernetes/client/models/v1_pod_readiness_gate.py
+ kubernetes/client/models/v1_pod_resource_claim.py
+ kubernetes/client/models/v1_pod_resource_claim_status.py
+ kubernetes/client/models/v1_pod_scheduling_gate.py
+ kubernetes/client/models/v1_pod_security_context.py
+ kubernetes/client/models/v1_pod_spec.py
+ kubernetes/client/models/v1_pod_status.py
+ kubernetes/client/models/v1_pod_template.py
+ kubernetes/client/models/v1_pod_template_list.py
+ kubernetes/client/models/v1_pod_template_spec.py
+ kubernetes/client/models/v1_policy_rule.py
+ kubernetes/client/models/v1_port_status.py
+ kubernetes/client/models/v1_portworx_volume_source.py
+ kubernetes/client/models/v1_preconditions.py
+ kubernetes/client/models/v1_preferred_scheduling_term.py
+ kubernetes/client/models/v1_priority_class.py
+ kubernetes/client/models/v1_priority_class_list.py
+ kubernetes/client/models/v1_probe.py
+ kubernetes/client/models/v1_projected_volume_source.py
+ kubernetes/client/models/v1_quobyte_volume_source.py
+ kubernetes/client/models/v1_rbd_persistent_volume_source.py
+ kubernetes/client/models/v1_rbd_volume_source.py
+ kubernetes/client/models/v1_replica_set.py
+ kubernetes/client/models/v1_replica_set_condition.py
+ kubernetes/client/models/v1_replica_set_list.py
+ kubernetes/client/models/v1_replica_set_spec.py
+ kubernetes/client/models/v1_replica_set_status.py
+ kubernetes/client/models/v1_replication_controller.py
+ kubernetes/client/models/v1_replication_controller_condition.py
+ kubernetes/client/models/v1_replication_controller_list.py
+ kubernetes/client/models/v1_replication_controller_spec.py
+ kubernetes/client/models/v1_replication_controller_status.py
+ kubernetes/client/models/v1_resource_attributes.py
+ kubernetes/client/models/v1_resource_claim.py
+ kubernetes/client/models/v1_resource_field_selector.py
+ kubernetes/client/models/v1_resource_quota.py
+ kubernetes/client/models/v1_resource_quota_list.py
+ kubernetes/client/models/v1_resource_quota_spec.py
+ kubernetes/client/models/v1_resource_quota_status.py
+ kubernetes/client/models/v1_resource_requirements.py
+ kubernetes/client/models/v1_resource_rule.py
+ kubernetes/client/models/v1_role.py
+ kubernetes/client/models/v1_role_binding.py
+ kubernetes/client/models/v1_role_binding_list.py
+ kubernetes/client/models/v1_role_list.py
+ kubernetes/client/models/v1_role_ref.py
+ kubernetes/client/models/v1_rolling_update_daemon_set.py
+ kubernetes/client/models/v1_rolling_update_deployment.py
+ kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py
+ kubernetes/client/models/v1_rule_with_operations.py
+ kubernetes/client/models/v1_runtime_class.py
+ kubernetes/client/models/v1_runtime_class_list.py
+ kubernetes/client/models/v1_scale.py
+ kubernetes/client/models/v1_scale_io_persistent_volume_source.py
+ kubernetes/client/models/v1_scale_io_volume_source.py
+ kubernetes/client/models/v1_scale_spec.py
+ kubernetes/client/models/v1_scale_status.py
+ kubernetes/client/models/v1_scheduling.py
+ kubernetes/client/models/v1_scope_selector.py
+ kubernetes/client/models/v1_scoped_resource_selector_requirement.py
+ kubernetes/client/models/v1_se_linux_options.py
+ kubernetes/client/models/v1_seccomp_profile.py
+ kubernetes/client/models/v1_secret.py
+ kubernetes/client/models/v1_secret_env_source.py
+ kubernetes/client/models/v1_secret_key_selector.py
+ kubernetes/client/models/v1_secret_list.py
+ kubernetes/client/models/v1_secret_projection.py
+ kubernetes/client/models/v1_secret_reference.py
+ kubernetes/client/models/v1_secret_volume_source.py
+ kubernetes/client/models/v1_security_context.py
+ kubernetes/client/models/v1_self_subject_access_review.py
+ kubernetes/client/models/v1_self_subject_access_review_spec.py
+ kubernetes/client/models/v1_self_subject_review.py
+ kubernetes/client/models/v1_self_subject_review_status.py
+ kubernetes/client/models/v1_self_subject_rules_review.py
+ kubernetes/client/models/v1_self_subject_rules_review_spec.py
+ kubernetes/client/models/v1_server_address_by_client_cidr.py
+ kubernetes/client/models/v1_service.py
+ kubernetes/client/models/v1_service_account.py
+ kubernetes/client/models/v1_service_account_list.py
+ kubernetes/client/models/v1_service_account_token_projection.py
+ kubernetes/client/models/v1_service_backend_port.py
+ kubernetes/client/models/v1_service_list.py
+ kubernetes/client/models/v1_service_port.py
+ kubernetes/client/models/v1_service_spec.py
+ kubernetes/client/models/v1_service_status.py
+ kubernetes/client/models/v1_session_affinity_config.py
+ kubernetes/client/models/v1_stateful_set.py
+ kubernetes/client/models/v1_stateful_set_condition.py
+ kubernetes/client/models/v1_stateful_set_list.py
+ kubernetes/client/models/v1_stateful_set_ordinals.py
+ kubernetes/client/models/v1_stateful_set_persistent_volume_claim_retention_policy.py
+ kubernetes/client/models/v1_stateful_set_spec.py
+ kubernetes/client/models/v1_stateful_set_status.py
+ kubernetes/client/models/v1_stateful_set_update_strategy.py
+ kubernetes/client/models/v1_status.py
+ kubernetes/client/models/v1_status_cause.py
+ kubernetes/client/models/v1_status_details.py
+ kubernetes/client/models/v1_storage_class.py
+ kubernetes/client/models/v1_storage_class_list.py
+ kubernetes/client/models/v1_storage_os_persistent_volume_source.py
+ kubernetes/client/models/v1_storage_os_volume_source.py
+ kubernetes/client/models/v1_subject.py
+ kubernetes/client/models/v1_subject_access_review.py
+ kubernetes/client/models/v1_subject_access_review_spec.py
+ kubernetes/client/models/v1_subject_access_review_status.py
+ kubernetes/client/models/v1_subject_rules_review_status.py
+ kubernetes/client/models/v1_sysctl.py
+ kubernetes/client/models/v1_taint.py
+ kubernetes/client/models/v1_tcp_socket_action.py
+ kubernetes/client/models/v1_token_request_spec.py
+ kubernetes/client/models/v1_token_request_status.py
+ kubernetes/client/models/v1_token_review.py
+ kubernetes/client/models/v1_token_review_spec.py
+ kubernetes/client/models/v1_token_review_status.py
+ kubernetes/client/models/v1_toleration.py
+ kubernetes/client/models/v1_topology_selector_label_requirement.py
+ kubernetes/client/models/v1_topology_selector_term.py
+ kubernetes/client/models/v1_topology_spread_constraint.py
+ kubernetes/client/models/v1_typed_local_object_reference.py
+ kubernetes/client/models/v1_typed_object_reference.py
+ kubernetes/client/models/v1_uncounted_terminated_pods.py
+ kubernetes/client/models/v1_user_info.py
+ kubernetes/client/models/v1_validating_webhook.py
+ kubernetes/client/models/v1_validating_webhook_configuration.py
+ kubernetes/client/models/v1_validating_webhook_configuration_list.py
+ kubernetes/client/models/v1_validation_rule.py
+ kubernetes/client/models/v1_volume.py
+ kubernetes/client/models/v1_volume_attachment.py
+ kubernetes/client/models/v1_volume_attachment_list.py
+ kubernetes/client/models/v1_volume_attachment_source.py
+ kubernetes/client/models/v1_volume_attachment_spec.py
+ kubernetes/client/models/v1_volume_attachment_status.py
+ kubernetes/client/models/v1_volume_device.py
+ kubernetes/client/models/v1_volume_error.py
+ kubernetes/client/models/v1_volume_mount.py
+ kubernetes/client/models/v1_volume_node_affinity.py
+ kubernetes/client/models/v1_volume_node_resources.py
+ kubernetes/client/models/v1_volume_projection.py
+ kubernetes/client/models/v1_vsphere_virtual_disk_volume_source.py
+ kubernetes/client/models/v1_watch_event.py
+ kubernetes/client/models/v1_webhook_conversion.py
+ kubernetes/client/models/v1_weighted_pod_affinity_term.py
+ kubernetes/client/models/v1_windows_security_context_options.py
+ kubernetes/client/models/v1alpha1_audit_annotation.py
+ kubernetes/client/models/v1alpha1_cluster_cidr.py
+ kubernetes/client/models/v1alpha1_cluster_cidr_list.py
+ kubernetes/client/models/v1alpha1_cluster_cidr_spec.py
+ kubernetes/client/models/v1alpha1_cluster_trust_bundle.py
+ kubernetes/client/models/v1alpha1_cluster_trust_bundle_list.py
+ kubernetes/client/models/v1alpha1_cluster_trust_bundle_spec.py
+ kubernetes/client/models/v1alpha1_expression_warning.py
+ kubernetes/client/models/v1alpha1_ip_address.py
+ kubernetes/client/models/v1alpha1_ip_address_list.py
+ kubernetes/client/models/v1alpha1_ip_address_spec.py
+ kubernetes/client/models/v1alpha1_match_condition.py
+ kubernetes/client/models/v1alpha1_match_resources.py
+ kubernetes/client/models/v1alpha1_named_rule_with_operations.py
+ kubernetes/client/models/v1alpha1_param_kind.py
+ kubernetes/client/models/v1alpha1_param_ref.py
+ kubernetes/client/models/v1alpha1_parent_reference.py
+ kubernetes/client/models/v1alpha1_self_subject_review.py
+ kubernetes/client/models/v1alpha1_self_subject_review_status.py
+ kubernetes/client/models/v1alpha1_server_storage_version.py
+ kubernetes/client/models/v1alpha1_storage_version.py
+ kubernetes/client/models/v1alpha1_storage_version_condition.py
+ kubernetes/client/models/v1alpha1_storage_version_list.py
+ kubernetes/client/models/v1alpha1_storage_version_status.py
+ kubernetes/client/models/v1alpha1_type_checking.py
+ kubernetes/client/models/v1alpha1_validating_admission_policy.py
+ kubernetes/client/models/v1alpha1_validating_admission_policy_binding.py
+ kubernetes/client/models/v1alpha1_validating_admission_policy_binding_list.py
+ kubernetes/client/models/v1alpha1_validating_admission_policy_binding_spec.py
+ kubernetes/client/models/v1alpha1_validating_admission_policy_list.py
+ kubernetes/client/models/v1alpha1_validating_admission_policy_spec.py
+ kubernetes/client/models/v1alpha1_validating_admission_policy_status.py
+ kubernetes/client/models/v1alpha1_validation.py
+ kubernetes/client/models/v1alpha1_variable.py
+ kubernetes/client/models/v1alpha2_allocation_result.py
+ kubernetes/client/models/v1alpha2_pod_scheduling_context.py
+ kubernetes/client/models/v1alpha2_pod_scheduling_context_list.py
+ kubernetes/client/models/v1alpha2_pod_scheduling_context_spec.py
+ kubernetes/client/models/v1alpha2_pod_scheduling_context_status.py
+ kubernetes/client/models/v1alpha2_resource_claim.py
+ kubernetes/client/models/v1alpha2_resource_claim_consumer_reference.py
+ kubernetes/client/models/v1alpha2_resource_claim_list.py
+ kubernetes/client/models/v1alpha2_resource_claim_parameters_reference.py
+ kubernetes/client/models/v1alpha2_resource_claim_scheduling_status.py
+ kubernetes/client/models/v1alpha2_resource_claim_spec.py
+ kubernetes/client/models/v1alpha2_resource_claim_status.py
+ kubernetes/client/models/v1alpha2_resource_claim_template.py
+ kubernetes/client/models/v1alpha2_resource_claim_template_list.py
+ kubernetes/client/models/v1alpha2_resource_claim_template_spec.py
+ kubernetes/client/models/v1alpha2_resource_class.py
+ kubernetes/client/models/v1alpha2_resource_class_list.py
+ kubernetes/client/models/v1alpha2_resource_class_parameters_reference.py
+ kubernetes/client/models/v1alpha2_resource_handle.py
+ kubernetes/client/models/v1beta1_audit_annotation.py
+ kubernetes/client/models/v1beta1_expression_warning.py
+ kubernetes/client/models/v1beta1_match_condition.py
+ kubernetes/client/models/v1beta1_match_resources.py
+ kubernetes/client/models/v1beta1_named_rule_with_operations.py
+ kubernetes/client/models/v1beta1_param_kind.py
+ kubernetes/client/models/v1beta1_param_ref.py
+ kubernetes/client/models/v1beta1_self_subject_review.py
+ kubernetes/client/models/v1beta1_self_subject_review_status.py
+ kubernetes/client/models/v1beta1_type_checking.py
+ kubernetes/client/models/v1beta1_validating_admission_policy.py
+ kubernetes/client/models/v1beta1_validating_admission_policy_binding.py
+ kubernetes/client/models/v1beta1_validating_admission_policy_binding_list.py
+ kubernetes/client/models/v1beta1_validating_admission_policy_binding_spec.py
+ kubernetes/client/models/v1beta1_validating_admission_policy_list.py
+ kubernetes/client/models/v1beta1_validating_admission_policy_spec.py
+ kubernetes/client/models/v1beta1_validating_admission_policy_status.py
+ kubernetes/client/models/v1beta1_validation.py
+ kubernetes/client/models/v1beta1_variable.py
+ kubernetes/client/models/v1beta2_exempt_priority_level_configuration.py
+ kubernetes/client/models/v1beta2_flow_distinguisher_method.py
+ kubernetes/client/models/v1beta2_flow_schema.py
+ kubernetes/client/models/v1beta2_flow_schema_condition.py
+ kubernetes/client/models/v1beta2_flow_schema_list.py
+ kubernetes/client/models/v1beta2_flow_schema_spec.py
+ kubernetes/client/models/v1beta2_flow_schema_status.py
+ kubernetes/client/models/v1beta2_group_subject.py
+ kubernetes/client/models/v1beta2_limit_response.py
+ kubernetes/client/models/v1beta2_limited_priority_level_configuration.py
+ kubernetes/client/models/v1beta2_non_resource_policy_rule.py
+ kubernetes/client/models/v1beta2_policy_rules_with_subjects.py
+ kubernetes/client/models/v1beta2_priority_level_configuration.py
+ kubernetes/client/models/v1beta2_priority_level_configuration_condition.py
+ kubernetes/client/models/v1beta2_priority_level_configuration_list.py
+ kubernetes/client/models/v1beta2_priority_level_configuration_reference.py
+ kubernetes/client/models/v1beta2_priority_level_configuration_spec.py
+ kubernetes/client/models/v1beta2_priority_level_configuration_status.py
+ kubernetes/client/models/v1beta2_queuing_configuration.py
+ kubernetes/client/models/v1beta2_resource_policy_rule.py
+ kubernetes/client/models/v1beta2_service_account_subject.py
+ kubernetes/client/models/v1beta2_subject.py
+ kubernetes/client/models/v1beta2_user_subject.py
+ kubernetes/client/models/v1beta3_exempt_priority_level_configuration.py
+ kubernetes/client/models/v1beta3_flow_distinguisher_method.py
+ kubernetes/client/models/v1beta3_flow_schema.py
+ kubernetes/client/models/v1beta3_flow_schema_condition.py
+ kubernetes/client/models/v1beta3_flow_schema_list.py
+ kubernetes/client/models/v1beta3_flow_schema_spec.py
+ kubernetes/client/models/v1beta3_flow_schema_status.py
+ kubernetes/client/models/v1beta3_group_subject.py
+ kubernetes/client/models/v1beta3_limit_response.py
+ kubernetes/client/models/v1beta3_limited_priority_level_configuration.py
+ kubernetes/client/models/v1beta3_non_resource_policy_rule.py
+ kubernetes/client/models/v1beta3_policy_rules_with_subjects.py
+ kubernetes/client/models/v1beta3_priority_level_configuration.py
+ kubernetes/client/models/v1beta3_priority_level_configuration_condition.py
+ kubernetes/client/models/v1beta3_priority_level_configuration_list.py
+ kubernetes/client/models/v1beta3_priority_level_configuration_reference.py
+ kubernetes/client/models/v1beta3_priority_level_configuration_spec.py
+ kubernetes/client/models/v1beta3_priority_level_configuration_status.py
+ kubernetes/client/models/v1beta3_queuing_configuration.py
+ kubernetes/client/models/v1beta3_resource_policy_rule.py
+ kubernetes/client/models/v1beta3_service_account_subject.py
+ kubernetes/client/models/v1beta3_subject.py
+ kubernetes/client/models/v1beta3_user_subject.py
+ kubernetes/client/models/v2_container_resource_metric_source.py
+ kubernetes/client/models/v2_container_resource_metric_status.py
+ kubernetes/client/models/v2_cross_version_object_reference.py
+ kubernetes/client/models/v2_external_metric_source.py
+ kubernetes/client/models/v2_external_metric_status.py
+ kubernetes/client/models/v2_horizontal_pod_autoscaler.py
+ kubernetes/client/models/v2_horizontal_pod_autoscaler_behavior.py
+ kubernetes/client/models/v2_horizontal_pod_autoscaler_condition.py
+ kubernetes/client/models/v2_horizontal_pod_autoscaler_list.py
+ kubernetes/client/models/v2_horizontal_pod_autoscaler_spec.py
+ kubernetes/client/models/v2_horizontal_pod_autoscaler_status.py
+ kubernetes/client/models/v2_hpa_scaling_policy.py
+ kubernetes/client/models/v2_hpa_scaling_rules.py
+ kubernetes/client/models/v2_metric_identifier.py
+ kubernetes/client/models/v2_metric_spec.py
+ kubernetes/client/models/v2_metric_status.py
+ kubernetes/client/models/v2_metric_target.py
+ kubernetes/client/models/v2_metric_value_status.py
+ kubernetes/client/models/v2_object_metric_source.py
+ kubernetes/client/models/v2_object_metric_status.py
+ kubernetes/client/models/v2_pods_metric_source.py
+ kubernetes/client/models/v2_pods_metric_status.py
+ kubernetes/client/models/v2_resource_metric_source.py
+ kubernetes/client/models/v2_resource_metric_status.py
+ kubernetes/client/models/version_info.py
+ kubernetes/client/rest.py
+ kubernetes/config/__init__.py
+ kubernetes/config/config_exception.py
+ kubernetes/config/dateutil.py
+ kubernetes/config/exec_provider.py
+ kubernetes/config/incluster_config.py
+ kubernetes/config/kube_config.py
+ kubernetes/dynamic/__init__.py
+ kubernetes/dynamic/client.py
+ kubernetes/dynamic/discovery.py
+ kubernetes/dynamic/exceptions.py
+ kubernetes/dynamic/resource.py
+ kubernetes/leaderelection/__init__.py
+ kubernetes/leaderelection/electionconfig.py
+ kubernetes/leaderelection/leaderelection.py
+ kubernetes/leaderelection/leaderelectionrecord.py
+ kubernetes/leaderelection/resourcelock/__init__.py
+ kubernetes/leaderelection/resourcelock/configmaplock.py
+ kubernetes/stream/__init__.py
+ kubernetes/stream/stream.py
+ kubernetes/stream/ws_client.py
+ kubernetes/utils/__init__.py
+ kubernetes/utils/create_from_yaml.py
+ kubernetes/utils/quantity.py
+ kubernetes/watch/__init__.py
+ kubernetes/watch/watch.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/kubernetes/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/monotonic/py2/.dist-info/METADATA b/contrib/python/monotonic/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..d8e0265dd6
--- /dev/null
+++ b/contrib/python/monotonic/py2/.dist-info/METADATA
@@ -0,0 +1,40 @@
+Metadata-Version: 2.1
+Name: monotonic
+Version: 1.6
+Summary: An implementation of time.monotonic() for Python 2 & < 3.3
+Home-page: https://github.com/atdt/monotonic
+Author: Ori Livneh
+Author-email: ori@wikimedia.org
+License: Apache
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+
+monotonic
+~~~~~~~~~
+
+This module provides a ``monotonic()`` function which returns the
+value (in fractional seconds) of a clock which never goes backwards.
+
+On Python 3.3 or newer, ``monotonic`` will be an alias of
+``time.monotonic`` from the standard library. On older versions,
+it will fall back to an equivalent implementation:
+
++------------------+----------------------------------------+
+| Linux, BSD, AIX | ``clock_gettime(3)`` |
++------------------+----------------------------------------+
+| Windows | ``GetTickCount`` or ``GetTickCount64`` |
++------------------+----------------------------------------+
+| OS X | ``mach_absolute_time`` |
++------------------+----------------------------------------+
+
+If no suitable implementation exists for the current platform,
+attempting to import this module (or to import from it) will
+cause a ``RuntimeError`` exception to be raised.
+
+
+
diff --git a/contrib/python/monotonic/py2/.dist-info/top_level.txt b/contrib/python/monotonic/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..9f2595e0a4
--- /dev/null
+++ b/contrib/python/monotonic/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+monotonic
diff --git a/contrib/python/monotonic/py2/LICENSE b/contrib/python/monotonic/py2/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/contrib/python/monotonic/py2/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/contrib/python/monotonic/py2/README.md b/contrib/python/monotonic/py2/README.md
new file mode 100644
index 0000000000..1fa8565973
--- /dev/null
+++ b/contrib/python/monotonic/py2/README.md
@@ -0,0 +1,49 @@
+monotonic
+=========
+NOTE: **This library is considered stable and complete, and will not receive
+any further updates. Python versions 3.3 and newer include
+[``time.monotonic()``][0] in the standard library.**
+
+
+This module provides a ``monotonic()`` function which returns the
+value (in fractional seconds) of a clock which never goes backwards.
+It is compatible with Python 2 and Python 3.
+
+On Python 3.3 or newer, ``monotonic`` will be an alias of
+[``time.monotonic``][0] from the standard library. On older versions,
+it will fall back to an equivalent implementation:
+
+ OS | Implementation
+-----------------|-----------------------------------------
+ Linux, BSD, AIX | [clock_gettime][1]
+ Windows | [GetTickCount][2] or [GetTickCount64][3]
+ OS X | [mach_absolute_time][3]
+
+If no suitable implementation exists for the current platform,
+attempting to import this module (or to import from it) will
+cause a RuntimeError exception to be raised.
+
+monotonic is available via the Python Cheese Shop (PyPI):
+ https://pypi.python.org/pypi/monotonic/
+
+License
+-------
+Copyright 2014, 2015, 2016, 2017 Ori Livneh <ori@wikimedia.org>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+[0]: https://docs.python.org/3/library/time.html#time.monotonic
+[1]: http://linux.die.net/man/3/clock_gettime
+[2]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724408
+[3]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724411
+[4]: https://developer.apple.com/library/mac/qa/qa1398/
diff --git a/contrib/python/monotonic/py2/monotonic.py b/contrib/python/monotonic/py2/monotonic.py
new file mode 100644
index 0000000000..c4372a3ac8
--- /dev/null
+++ b/contrib/python/monotonic/py2/monotonic.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+"""
+ monotonic
+ ~~~~~~~~~
+
+ This module provides a ``monotonic()`` function which returns the
+ value (in fractional seconds) of a clock which never goes backwards.
+
+ On Python 3.3 or newer, ``monotonic`` will be an alias of
+ ``time.monotonic`` from the standard library. On older versions,
+ it will fall back to an equivalent implementation:
+
+ +-------------+----------------------------------------+
+ | Linux, BSD | ``clock_gettime(3)`` |
+ +-------------+----------------------------------------+
+ | Windows | ``GetTickCount`` or ``GetTickCount64`` |
+ +-------------+----------------------------------------+
+ | OS X | ``mach_absolute_time`` |
+ +-------------+----------------------------------------+
+
+ If no suitable implementation exists for the current platform,
+ attempting to import this module (or to import from it) will
+ cause a ``RuntimeError`` exception to be raised.
+
+
+ Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+"""
+import time
+
+
+__all__ = ('monotonic',)
+
+
+try:
+ monotonic = time.monotonic
+except AttributeError:
+ import ctypes
+ import ctypes.util
+ import os
+ import sys
+ import threading
+ try:
+ if sys.platform == 'darwin': # OS X, iOS
+ # See Technical Q&A QA1398 of the Mac Developer Library:
+ # <https://developer.apple.com/library/mac/qa/qa1398/>
+ libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ """System timebase info. Defined in <mach/mach_time.h>."""
+ _fields_ = (('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32))
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ timebase = mach_timebase_info_data_t()
+ libc.mach_timebase_info(ctypes.byref(timebase))
+ nanoseconds_in_second = 1.0e9
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ nanoseconds = mach_absolute_time() * timebase.numer / timebase.denom
+ return nanoseconds / nanoseconds_in_second
+
+ elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
+ if sys.platform.startswith('cygwin'):
+ # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
+ # version 1.7.6. Using raw WinAPI for maximum version compatibility.
+
+ # Ugly hack using the wrong calling convention (in 32-bit mode)
+ # because ctypes has no windll under cygwin (and it also seems that
+ # the code letting you select stdcall in _ctypes doesn't exist under
+ # the preprocessor definitions relevant to cygwin).
+ # This is 'safe' because:
+ # 1. The ABI of GetTickCount and GetTickCount64 is identical for
+ # both calling conventions because they both have no parameters.
+ # 2. libffi masks the problem because after making the call it doesn't
+ # touch anything through esp and epilogue code restores a correct
+ # esp from ebp afterwards.
+ try:
+ kernel32 = ctypes.cdll.kernel32
+ except OSError: # 'No such file or directory'
+ kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
+ else:
+ kernel32 = ctypes.windll.kernel32
+
+ GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
+ if GetTickCount64:
+ # Windows Vista / Windows Server 2008 or newer.
+ GetTickCount64.restype = ctypes.c_ulonglong
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return GetTickCount64() / 1000.0
+
+ else:
+ # Before Windows Vista.
+ GetTickCount = kernel32.GetTickCount
+ GetTickCount.restype = ctypes.c_uint32
+
+ get_tick_count_lock = threading.Lock()
+ get_tick_count_last_sample = 0
+ get_tick_count_wraparounds = 0
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ global get_tick_count_last_sample
+ global get_tick_count_wraparounds
+
+ with get_tick_count_lock:
+ current_sample = GetTickCount()
+ if current_sample < get_tick_count_last_sample:
+ get_tick_count_wraparounds += 1
+ get_tick_count_last_sample = current_sample
+
+ final_milliseconds = get_tick_count_wraparounds << 32
+ final_milliseconds += get_tick_count_last_sample
+ return final_milliseconds / 1000.0
+
+ else:
+ try:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
+ use_errno=True).clock_gettime
+ except Exception:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
+ use_errno=True).clock_gettime
+
+ class timespec(ctypes.Structure):
+ """Time specification, as described in clock_gettime(3)."""
+ _fields_ = (('tv_sec', ctypes.c_long),
+ ('tv_nsec', ctypes.c_long))
+
+ if sys.platform.startswith('linux'):
+ CLOCK_MONOTONIC = 1
+ elif sys.platform.startswith('freebsd'):
+ CLOCK_MONOTONIC = 4
+ elif sys.platform.startswith('sunos5'):
+ CLOCK_MONOTONIC = 4
+ elif 'bsd' in sys.platform:
+ CLOCK_MONOTONIC = 3
+ elif sys.platform.startswith('aix'):
+ CLOCK_MONOTONIC = ctypes.c_longlong(10)
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ ts = timespec()
+ if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
+ errno = ctypes.get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return ts.tv_sec + ts.tv_nsec / 1.0e9
+
+ # Perform a sanity-check.
+ if monotonic() - monotonic() > 0:
+ raise ValueError('monotonic() is not monotonic!')
+
+ except Exception as e:
+ raise RuntimeError('no suitable implementation for this system: ' + repr(e))
diff --git a/contrib/python/monotonic/py2/ya.make b/contrib/python/monotonic/py2/ya.make
new file mode 100644
index 0000000000..bfee7f6907
--- /dev/null
+++ b/contrib/python/monotonic/py2/ya.make
@@ -0,0 +1,22 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(1.6)
+
+LICENSE(Apache-2.0)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ monotonic.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/monotonic/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/monotonic/py3/LICENSE b/contrib/python/monotonic/py3/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/contrib/python/monotonic/py3/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/contrib/python/monotonic/py3/README.md b/contrib/python/monotonic/py3/README.md
new file mode 100644
index 0000000000..1fa8565973
--- /dev/null
+++ b/contrib/python/monotonic/py3/README.md
@@ -0,0 +1,49 @@
+monotonic
+=========
+NOTE: **This library is considered stable and complete, and will not receive
+any further updates. Python versions 3.3 and newer include
+[``time.monotonic()``][0] in the standard library.**
+
+
+This module provides a ``monotonic()`` function which returns the
+value (in fractional seconds) of a clock which never goes backwards.
+It is compatible with Python 2 and Python 3.
+
+On Python 3.3 or newer, ``monotonic`` will be an alias of
+[``time.monotonic``][0] from the standard library. On older versions,
+it will fall back to an equivalent implementation:
+
+ OS | Implementation
+-----------------|-----------------------------------------
+ Linux, BSD, AIX | [clock_gettime][1]
+ Windows | [GetTickCount][2] or [GetTickCount64][3]
+ OS X | [mach_absolute_time][3]
+
+If no suitable implementation exists for the current platform,
+attempting to import this module (or to import from it) will
+cause a RuntimeError exception to be raised.
+
+monotonic is available via the Python Cheese Shop (PyPI):
+ https://pypi.python.org/pypi/monotonic/
+
+License
+-------
+Copyright 2014, 2015, 2016, 2017 Ori Livneh <ori@wikimedia.org>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+[0]: https://docs.python.org/3/library/time.html#time.monotonic
+[1]: http://linux.die.net/man/3/clock_gettime
+[2]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724408
+[3]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724411
+[4]: https://developer.apple.com/library/mac/qa/qa1398/
diff --git a/contrib/python/monotonic/ya.make b/contrib/python/monotonic/ya.make
new file mode 100644
index 0000000000..8cdec1c354
--- /dev/null
+++ b/contrib/python/monotonic/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/monotonic/py2)
+ELSE()
+ PEERDIR(contrib/python/monotonic/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/oauth2client/py2/.dist-info/METADATA b/contrib/python/oauth2client/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..b4b28000b1
--- /dev/null
+++ b/contrib/python/oauth2client/py2/.dist-info/METADATA
@@ -0,0 +1,34 @@
+Metadata-Version: 2.1
+Name: oauth2client
+Version: 4.1.3
+Summary: OAuth 2.0 client library
+Home-page: http://github.com/google/oauth2client/
+Author: Google Inc.
+Author-email: jonwayne+oauth2client@google.com
+License: Apache 2.0
+Keywords: google oauth 2.0 http client
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Development Status :: 7 - Inactive
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Topic :: Internet :: WWW/HTTP
+Requires-Dist: httplib2 (>=0.9.1)
+Requires-Dist: pyasn1 (>=0.1.7)
+Requires-Dist: pyasn1-modules (>=0.0.5)
+Requires-Dist: rsa (>=3.1.4)
+Requires-Dist: six (>=1.6.1)
+
+oauth2client is a client library for OAuth 2.0.
+
+Note: oauth2client is now deprecated. No more features will be added to the
+ libraries and the core team is turning down support. We recommend you use
+ `google-auth <https://google-auth.readthedocs.io>`__ and
+ `oauthlib <http://oauthlib.readthedocs.io/>`__.
+
+
diff --git a/contrib/python/oauth2client/py2/.dist-info/top_level.txt b/contrib/python/oauth2client/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..c636bd5953
--- /dev/null
+++ b/contrib/python/oauth2client/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+oauth2client
diff --git a/contrib/python/oauth2client/py2/LICENSE b/contrib/python/oauth2client/py2/LICENSE
new file mode 100644
index 0000000000..c8d76dfc54
--- /dev/null
+++ b/contrib/python/oauth2client/py2/LICENSE
@@ -0,0 +1,210 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Dependent Modules
+=================
+
+This code has the following dependencies
+above and beyond the Python standard library:
+
+httplib2 - MIT License
diff --git a/contrib/python/oauth2client/py2/README.md b/contrib/python/oauth2client/py2/README.md
new file mode 100644
index 0000000000..5e7aade714
--- /dev/null
+++ b/contrib/python/oauth2client/py2/README.md
@@ -0,0 +1,33 @@
+[![Build Status](https://travis-ci.org/google/oauth2client.svg?branch=master)](https://travis-ci.org/google/oauth2client)
+[![Coverage Status](https://coveralls.io/repos/google/oauth2client/badge.svg?branch=master&service=github)](https://coveralls.io/github/google/oauth2client?branch=master)
+[![Documentation Status](https://readthedocs.org/projects/oauth2client/badge/?version=latest)](https://oauth2client.readthedocs.io/)
+
+This is a client library for accessing resources protected by OAuth 2.0.
+
+**Note**: oauth2client is now deprecated. No more features will be added to the
+libraries and the core team is turning down support. We recommend you use
+[google-auth](https://google-auth.readthedocs.io) and [oauthlib](http://oauthlib.readthedocs.io/). For more details on the deprecation, see [oauth2client deprecation](https://google-auth.readthedocs.io/en/latest/oauth2client-deprecation.html).
+
+Installation
+============
+
+To install, simply run the following command in your terminal:
+
+```bash
+$ pip install --upgrade oauth2client
+```
+
+Contributing
+============
+
+Please see the [CONTRIBUTING page][1] for more information. In particular, we
+love pull requests -- but please make sure to sign the contributor license
+agreement.
+
+Supported Python Versions
+=========================
+
+We support Python 2.7 and 3.4+. More information [in the docs][2].
+
+[1]: https://github.com/google/oauth2client/blob/master/CONTRIBUTING.md
+[2]: https://oauth2client.readthedocs.io/#supported-python-versions
diff --git a/contrib/python/oauth2client/py2/oauth2client/__init__.py b/contrib/python/oauth2client/py2/oauth2client/__init__.py
new file mode 100644
index 0000000000..92bc191d43
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/__init__.py
@@ -0,0 +1,24 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client library for using OAuth2, especially with Google APIs."""
+
+__version__ = '4.1.3'
+
+GOOGLE_AUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'
+GOOGLE_DEVICE_URI = 'https://oauth2.googleapis.com/device/code'
+GOOGLE_REVOKE_URI = 'https://oauth2.googleapis.com/revoke'
+GOOGLE_TOKEN_URI = 'https://oauth2.googleapis.com/token'
+GOOGLE_TOKEN_INFO_URI = 'https://oauth2.googleapis.com/tokeninfo'
+
diff --git a/contrib/python/oauth2client/py2/oauth2client/_helpers.py b/contrib/python/oauth2client/py2/oauth2client/_helpers.py
new file mode 100644
index 0000000000..e9123971bc
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/_helpers.py
@@ -0,0 +1,341 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for commonly used utilities."""
+
+import base64
+import functools
+import inspect
+import json
+import logging
+import os
+import warnings
+
+import six
+from six.moves import urllib
+
+
+logger = logging.getLogger(__name__)
+
+POSITIONAL_WARNING = 'WARNING'
+POSITIONAL_EXCEPTION = 'EXCEPTION'
+POSITIONAL_IGNORE = 'IGNORE'
+POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
+ POSITIONAL_IGNORE])
+
+positional_parameters_enforcement = POSITIONAL_WARNING
+
+_SYM_LINK_MESSAGE = 'File: {0}: Is a symbolic link.'
+_IS_DIR_MESSAGE = '{0}: Is a directory'
+_MISSING_FILE_MESSAGE = 'Cannot access {0}: No such file or directory'
+
+
+def positional(max_positional_args):
+ """A decorator to declare that only the first N arguments my be positional.
+
+ This decorator makes it easy to support Python 3 style keyword-only
+ parameters. For example, in Python 3 it is possible to write::
+
+ def fn(pos1, *, kwonly1=None, kwonly1=None):
+ ...
+
+ All named parameters after ``*`` must be a keyword::
+
+ fn(10, 'kw1', 'kw2') # Raises exception.
+ fn(10, kwonly1='kw1') # Ok.
+
+ Example
+ ^^^^^^^
+
+ To define a function like above, do::
+
+ @positional(1)
+ def fn(pos1, kwonly1=None, kwonly2=None):
+ ...
+
+ If no default value is provided to a keyword argument, it becomes a
+ required keyword argument::
+
+ @positional(0)
+ def fn(required_kw):
+ ...
+
+ This must be called with the keyword parameter::
+
+ fn() # Raises exception.
+ fn(10) # Raises exception.
+ fn(required_kw=10) # Ok.
+
+ When defining instance or class methods always remember to account for
+ ``self`` and ``cls``::
+
+ class MyClass(object):
+
+ @positional(2)
+ def my_method(self, pos1, kwonly1=None):
+ ...
+
+ @classmethod
+ @positional(2)
+ def my_method(cls, pos1, kwonly1=None):
+ ...
+
+ The positional decorator behavior is controlled by
+ ``_helpers.positional_parameters_enforcement``, which may be set to
+ ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
+ ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
+ nothing, respectively, if a declaration is violated.
+
+ Args:
+ max_positional_arguments: Maximum number of positional arguments. All
+ parameters after the this index must be
+ keyword only.
+
+ Returns:
+ A decorator that prevents using arguments after max_positional_args
+ from being used as positional parameters.
+
+ Raises:
+ TypeError: if a key-word only argument is provided as a positional
+ parameter, but only if
+ _helpers.positional_parameters_enforcement is set to
+ POSITIONAL_EXCEPTION.
+ """
+
+ def positional_decorator(wrapped):
+ @functools.wraps(wrapped)
+ def positional_wrapper(*args, **kwargs):
+ if len(args) > max_positional_args:
+ plural_s = ''
+ if max_positional_args != 1:
+ plural_s = 's'
+ message = ('{function}() takes at most {args_max} positional '
+ 'argument{plural} ({args_given} given)'.format(
+ function=wrapped.__name__,
+ args_max=max_positional_args,
+ args_given=len(args),
+ plural=plural_s))
+ if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
+ raise TypeError(message)
+ elif positional_parameters_enforcement == POSITIONAL_WARNING:
+ logger.warning(message)
+ return wrapped(*args, **kwargs)
+ return positional_wrapper
+
+ if isinstance(max_positional_args, six.integer_types):
+ return positional_decorator
+ else:
+ args, _, _, defaults = inspect.getargspec(max_positional_args)
+ return positional(len(args) - len(defaults))(max_positional_args)
+
+
+def scopes_to_string(scopes):
+ """Converts scope value to a string.
+
+ If scopes is a string then it is simply passed through. If scopes is an
+ iterable then a string is returned that is all the individual scopes
+ concatenated with spaces.
+
+ Args:
+ scopes: string or iterable of strings, the scopes.
+
+ Returns:
+ The scopes formatted as a single string.
+ """
+ if isinstance(scopes, six.string_types):
+ return scopes
+ else:
+ return ' '.join(scopes)
+
+
+def string_to_scopes(scopes):
+ """Converts stringifed scope value to a list.
+
+ If scopes is a list then it is simply passed through. If scopes is an
+ string then a list of each individual scope is returned.
+
+ Args:
+ scopes: a string or iterable of strings, the scopes.
+
+ Returns:
+ The scopes in a list.
+ """
+ if not scopes:
+ return []
+ elif isinstance(scopes, six.string_types):
+ return scopes.split(' ')
+ else:
+ return scopes
+
+
+def parse_unique_urlencoded(content):
+ """Parses unique key-value parameters from urlencoded content.
+
+ Args:
+ content: string, URL-encoded key-value pairs.
+
+ Returns:
+ dict, The key-value pairs from ``content``.
+
+ Raises:
+ ValueError: if one of the keys is repeated.
+ """
+ urlencoded_params = urllib.parse.parse_qs(content)
+ params = {}
+ for key, value in six.iteritems(urlencoded_params):
+ if len(value) != 1:
+ msg = ('URL-encoded content contains a repeated value:'
+ '%s -> %s' % (key, ', '.join(value)))
+ raise ValueError(msg)
+ params[key] = value[0]
+ return params
+
+
+def update_query_params(uri, params):
+ """Updates a URI with new query parameters.
+
+ If a given key from ``params`` is repeated in the ``uri``, then
+ the URI will be considered invalid and an error will occur.
+
+ If the URI is valid, then each value from ``params`` will
+ replace the corresponding value in the query parameters (if
+ it exists).
+
+ Args:
+ uri: string, A valid URI, with potential existing query parameters.
+ params: dict, A dictionary of query parameters.
+
+ Returns:
+ The same URI but with the new query parameters added.
+ """
+ parts = urllib.parse.urlparse(uri)
+ query_params = parse_unique_urlencoded(parts.query)
+ query_params.update(params)
+ new_query = urllib.parse.urlencode(query_params)
+ new_parts = parts._replace(query=new_query)
+ return urllib.parse.urlunparse(new_parts)
+
+
+def _add_query_parameter(url, name, value):
+ """Adds a query parameter to a url.
+
+ Replaces the current value if it already exists in the URL.
+
+ Args:
+ url: string, url to add the query parameter to.
+ name: string, query parameter name.
+ value: string, query parameter value.
+
+ Returns:
+ Updated query parameter. Does not update the url if value is None.
+ """
+ if value is None:
+ return url
+ else:
+ return update_query_params(url, {name: value})
+
+
+def validate_file(filename):
+ if os.path.islink(filename):
+ raise IOError(_SYM_LINK_MESSAGE.format(filename))
+ elif os.path.isdir(filename):
+ raise IOError(_IS_DIR_MESSAGE.format(filename))
+ elif not os.path.isfile(filename):
+ warnings.warn(_MISSING_FILE_MESSAGE.format(filename))
+
+
+def _parse_pem_key(raw_key_input):
+ """Identify and extract PEM keys.
+
+ Determines whether the given key is in the format of PEM key, and extracts
+ the relevant part of the key if it is.
+
+ Args:
+ raw_key_input: The contents of a private key file (either PEM or
+ PKCS12).
+
+ Returns:
+ string, The actual key if the contents are from a PEM file, or
+ else None.
+ """
+ offset = raw_key_input.find(b'-----BEGIN ')
+ if offset != -1:
+ return raw_key_input[offset:]
+
+
+def _json_encode(data):
+ return json.dumps(data, separators=(',', ':'))
+
+
+def _to_bytes(value, encoding='ascii'):
+ """Converts a string value to bytes, if necessary.
+
+ Unfortunately, ``six.b`` is insufficient for this task since in
+ Python2 it does not modify ``unicode`` objects.
+
+ Args:
+ value: The string/bytes value to be converted.
+ encoding: The encoding to use to convert unicode to bytes. Defaults
+ to "ascii", which will not allow any characters from ordinals
+ larger than 127. Other useful values are "latin-1", which
+ which will only allows byte ordinals (up to 255) and "utf-8",
+ which will encode any unicode that needs to be.
+
+ Returns:
+ The original value converted to bytes (if unicode) or as passed in
+ if it started out as bytes.
+
+ Raises:
+ ValueError if the value could not be converted to bytes.
+ """
+ result = (value.encode(encoding)
+ if isinstance(value, six.text_type) else value)
+ if isinstance(result, six.binary_type):
+ return result
+ else:
+ raise ValueError('{0!r} could not be converted to bytes'.format(value))
+
+
+def _from_bytes(value):
+ """Converts bytes to a string value, if necessary.
+
+ Args:
+ value: The string/bytes value to be converted.
+
+ Returns:
+ The original value converted to unicode (if bytes) or as passed in
+ if it started out as unicode.
+
+ Raises:
+ ValueError if the value could not be converted to unicode.
+ """
+ result = (value.decode('utf-8')
+ if isinstance(value, six.binary_type) else value)
+ if isinstance(result, six.text_type):
+ return result
+ else:
+ raise ValueError(
+ '{0!r} could not be converted to unicode'.format(value))
+
+
+def _urlsafe_b64encode(raw_bytes):
+ raw_bytes = _to_bytes(raw_bytes, encoding='utf-8')
+ return base64.urlsafe_b64encode(raw_bytes).rstrip(b'=')
+
+
+def _urlsafe_b64decode(b64string):
+ # Guard against unicode strings, which base64 can't handle.
+ b64string = _to_bytes(b64string)
+ padded = b64string + b'=' * (4 - len(b64string) % 4)
+ return base64.urlsafe_b64decode(padded)
diff --git a/contrib/python/oauth2client/py2/oauth2client/_openssl_crypt.py b/contrib/python/oauth2client/py2/oauth2client/_openssl_crypt.py
new file mode 100644
index 0000000000..77fac74354
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/_openssl_crypt.py
@@ -0,0 +1,136 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""OpenSSL Crypto-related routines for oauth2client."""
+
+from OpenSSL import crypto
+
+from oauth2client import _helpers
+
+
+class OpenSSLVerifier(object):
+ """Verifies the signature on a message."""
+
+ def __init__(self, pubkey):
+ """Constructor.
+
+ Args:
+ pubkey: OpenSSL.crypto.PKey, The public key to verify with.
+ """
+ self._pubkey = pubkey
+
+ def verify(self, message, signature):
+ """Verifies a message against a signature.
+
+ Args:
+ message: string or bytes, The message to verify. If string, will be
+ encoded to bytes as utf-8.
+ signature: string or bytes, The signature on the message. If string,
+ will be encoded to bytes as utf-8.
+
+ Returns:
+ True if message was signed by the private key associated with the
+ public key that this object was constructed with.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ signature = _helpers._to_bytes(signature, encoding='utf-8')
+ try:
+ crypto.verify(self._pubkey, signature, message, 'sha256')
+ return True
+ except crypto.Error:
+ return False
+
+ @staticmethod
+ def from_string(key_pem, is_x509_cert):
+ """Construct a Verified instance from a string.
+
+ Args:
+ key_pem: string, public key in PEM format.
+ is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+ is expected to be an RSA key in PEM format.
+
+ Returns:
+ Verifier instance.
+
+ Raises:
+ OpenSSL.crypto.Error: if the key_pem can't be parsed.
+ """
+ key_pem = _helpers._to_bytes(key_pem)
+ if is_x509_cert:
+ pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
+ else:
+ pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
+ return OpenSSLVerifier(pubkey)
+
+
+class OpenSSLSigner(object):
+ """Signs messages with a private key."""
+
+ def __init__(self, pkey):
+ """Constructor.
+
+ Args:
+ pkey: OpenSSL.crypto.PKey (or equiv), The private key to sign with.
+ """
+ self._key = pkey
+
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message: bytes, Message to be signed.
+
+ Returns:
+ string, The signature of the message for the given key.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return crypto.sign(self._key, message, 'sha256')
+
+ @staticmethod
+ def from_string(key, password=b'notasecret'):
+ """Construct a Signer instance from a string.
+
+ Args:
+ key: string, private key in PKCS12 or PEM format.
+ password: string, password for the private key file.
+
+ Returns:
+ Signer instance.
+
+ Raises:
+ OpenSSL.crypto.Error if the key can't be parsed.
+ """
+ key = _helpers._to_bytes(key)
+ parsed_pem_key = _helpers._parse_pem_key(key)
+ if parsed_pem_key:
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
+ else:
+ password = _helpers._to_bytes(password, encoding='utf-8')
+ pkey = crypto.load_pkcs12(key, password).get_privatekey()
+ return OpenSSLSigner(pkey)
+
+
+def pkcs12_key_as_pem(private_key_bytes, private_key_password):
+ """Convert the contents of a PKCS#12 key to PEM using pyOpenSSL.
+
+ Args:
+ private_key_bytes: Bytes. PKCS#12 key in DER format.
+ private_key_password: String. Password for PKCS#12 key.
+
+ Returns:
+ String. PEM contents of ``private_key_bytes``.
+ """
+ private_key_password = _helpers._to_bytes(private_key_password)
+ pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password)
+ return crypto.dump_privatekey(crypto.FILETYPE_PEM,
+ pkcs12.get_privatekey())
diff --git a/contrib/python/oauth2client/py2/oauth2client/_pkce.py b/contrib/python/oauth2client/py2/oauth2client/_pkce.py
new file mode 100644
index 0000000000..e4952d8c2f
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/_pkce.py
@@ -0,0 +1,67 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utility functions for implementing Proof Key for Code Exchange (PKCE) by OAuth
+Public Clients
+
+See RFC7636.
+"""
+
+import base64
+import hashlib
+import os
+
+
+def code_verifier(n_bytes=64):
+ """
+ Generates a 'code_verifier' as described in section 4.1 of RFC 7636.
+
+ This is a 'high-entropy cryptographic random string' that will be
+ impractical for an attacker to guess.
+
+ Args:
+ n_bytes: integer between 31 and 96, inclusive. default: 64
+ number of bytes of entropy to include in verifier.
+
+ Returns:
+ Bytestring, representing urlsafe base64-encoded random data.
+ """
+ verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')
+ # https://tools.ietf.org/html/rfc7636#section-4.1
+ # minimum length of 43 characters and a maximum length of 128 characters.
+ if len(verifier) < 43:
+ raise ValueError("Verifier too short. n_bytes must be > 30.")
+ elif len(verifier) > 128:
+ raise ValueError("Verifier too long. n_bytes must be < 97.")
+ else:
+ return verifier
+
+
+def code_challenge(verifier):
+ """
+ Creates a 'code_challenge' as described in section 4.2 of RFC 7636
+ by taking the sha256 hash of the verifier and then urlsafe
+ base64-encoding it.
+
+ Args:
+ verifier: bytestring, representing a code_verifier as generated by
+ code_verifier().
+
+ Returns:
+ Bytestring, representing a urlsafe base64-encoded sha256 hash digest,
+ without '=' padding.
+ """
+ digest = hashlib.sha256(verifier).digest()
+ return base64.urlsafe_b64encode(digest).rstrip(b'=')
diff --git a/contrib/python/oauth2client/py2/oauth2client/_pure_python_crypt.py b/contrib/python/oauth2client/py2/oauth2client/_pure_python_crypt.py
new file mode 100644
index 0000000000..2c5d43aae9
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/_pure_python_crypt.py
@@ -0,0 +1,184 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pure Python crypto-related routines for oauth2client.
+
+Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
+to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
+certificates.
+"""
+
+from pyasn1.codec.der import decoder
+from pyasn1_modules import pem
+from pyasn1_modules.rfc2459 import Certificate
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
+import rsa
+import six
+
+from oauth2client import _helpers
+
+
+_PKCS12_ERROR = r"""\
+PKCS12 format is not supported by the RSA library.
+Either install PyOpenSSL, or please convert .p12 format
+to .pem format:
+ $ cat key.p12 | \
+ > openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
+ > openssl rsa > key.pem
+"""
+
+_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
+_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
+ '-----END RSA PRIVATE KEY-----')
+_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
+ '-----END PRIVATE KEY-----')
+_PKCS8_SPEC = PrivateKeyInfo()
+
+
+def _bit_list_to_bytes(bit_list):
+ """Converts an iterable of 1's and 0's to bytes.
+
+ Combines the list 8 at a time, treating each group of 8 bits
+ as a single byte.
+ """
+ num_bits = len(bit_list)
+ byte_vals = bytearray()
+ for start in six.moves.xrange(0, num_bits, 8):
+ curr_bits = bit_list[start:start + 8]
+ char_val = sum(val * digit
+ for val, digit in zip(_POW2, curr_bits))
+ byte_vals.append(char_val)
+ return bytes(byte_vals)
+
+
+class RsaVerifier(object):
+ """Verifies the signature on a message.
+
+ Args:
+ pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
+ """
+
+ def __init__(self, pubkey):
+ self._pubkey = pubkey
+
+ def verify(self, message, signature):
+ """Verifies a message against a signature.
+
+ Args:
+ message: string or bytes, The message to verify. If string, will be
+ encoded to bytes as utf-8.
+ signature: string or bytes, The signature on the message. If
+ string, will be encoded to bytes as utf-8.
+
+ Returns:
+ True if message was signed by the private key associated with the
+ public key that this object was constructed with.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ try:
+ return rsa.pkcs1.verify(message, signature, self._pubkey)
+ except (ValueError, rsa.pkcs1.VerificationError):
+ return False
+
+ @classmethod
+ def from_string(cls, key_pem, is_x509_cert):
+ """Construct an RsaVerifier instance from a string.
+
+ Args:
+ key_pem: string, public key in PEM format.
+ is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+ is expected to be an RSA key in PEM format.
+
+ Returns:
+ RsaVerifier instance.
+
+ Raises:
+ ValueError: if the key_pem can't be parsed. In either case, error
+ will begin with 'No PEM start marker'. If
+ ``is_x509_cert`` is True, will fail to find the
+ "-----BEGIN CERTIFICATE-----" error, otherwise fails
+ to find "-----BEGIN RSA PUBLIC KEY-----".
+ """
+ key_pem = _helpers._to_bytes(key_pem)
+ if is_x509_cert:
+ der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
+ if remaining != b'':
+ raise ValueError('Unused bytes', remaining)
+
+ cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
+ key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
+ pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
+ else:
+ pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
+ return cls(pubkey)
+
+
+class RsaSigner(object):
+ """Signs messages with a private key.
+
+ Args:
+ pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
+ """
+
+ def __init__(self, pkey):
+ self._key = pkey
+
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message: bytes, Message to be signed.
+
+ Returns:
+ string, The signature of the message for the given key.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return rsa.pkcs1.sign(message, self._key, 'SHA-256')
+
+ @classmethod
+ def from_string(cls, key, password='notasecret'):
+ """Construct an RsaSigner instance from a string.
+
+ Args:
+ key: string, private key in PEM format.
+ password: string, password for private key file. Unused for PEM
+ files.
+
+ Returns:
+ RsaSigner instance.
+
+ Raises:
+ ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
+ PEM format.
+ """
+ key = _helpers._from_bytes(key) # pem expects str in Py3
+ marker_id, key_bytes = pem.readPemBlocksFromFile(
+ six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
+
+ if marker_id == 0:
+ pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
+ format='DER')
+ elif marker_id == 1:
+ key_info, remaining = decoder.decode(
+ key_bytes, asn1Spec=_PKCS8_SPEC)
+ if remaining != b'':
+ raise ValueError('Unused bytes', remaining)
+ pkey_info = key_info.getComponentByName('privateKey')
+ pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
+ format='DER')
+ else:
+ raise ValueError('No key could be detected.')
+
+ return cls(pkey)
diff --git a/contrib/python/oauth2client/py2/oauth2client/_pycrypto_crypt.py b/contrib/python/oauth2client/py2/oauth2client/_pycrypto_crypt.py
new file mode 100644
index 0000000000..fd2ce0cd72
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/_pycrypto_crypt.py
@@ -0,0 +1,124 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""pyCrypto Crypto-related routines for oauth2client."""
+
+from Crypto.Hash import SHA256
+from Crypto.PublicKey import RSA
+from Crypto.Signature import PKCS1_v1_5
+from Crypto.Util.asn1 import DerSequence
+
+from oauth2client import _helpers
+
+
+class PyCryptoVerifier(object):
+ """Verifies the signature on a message."""
+
+ def __init__(self, pubkey):
+ """Constructor.
+
+ Args:
+ pubkey: OpenSSL.crypto.PKey (or equiv), The public key to verify
+ with.
+ """
+ self._pubkey = pubkey
+
+ def verify(self, message, signature):
+ """Verifies a message against a signature.
+
+ Args:
+ message: string or bytes, The message to verify. If string, will be
+ encoded to bytes as utf-8.
+ signature: string or bytes, The signature on the message.
+
+ Returns:
+ True if message was signed by the private key associated with the
+ public key that this object was constructed with.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return PKCS1_v1_5.new(self._pubkey).verify(
+ SHA256.new(message), signature)
+
+ @staticmethod
+ def from_string(key_pem, is_x509_cert):
+ """Construct a Verified instance from a string.
+
+ Args:
+ key_pem: string, public key in PEM format.
+ is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+ is expected to be an RSA key in PEM format.
+
+ Returns:
+ Verifier instance.
+ """
+ if is_x509_cert:
+ key_pem = _helpers._to_bytes(key_pem)
+ pemLines = key_pem.replace(b' ', b'').split()
+ certDer = _helpers._urlsafe_b64decode(b''.join(pemLines[1:-1]))
+ certSeq = DerSequence()
+ certSeq.decode(certDer)
+ tbsSeq = DerSequence()
+ tbsSeq.decode(certSeq[0])
+ pubkey = RSA.importKey(tbsSeq[6])
+ else:
+ pubkey = RSA.importKey(key_pem)
+ return PyCryptoVerifier(pubkey)
+
+
+class PyCryptoSigner(object):
+ """Signs messages with a private key."""
+
+ def __init__(self, pkey):
+ """Constructor.
+
+ Args:
+ pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
+ """
+ self._key = pkey
+
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message: string, Message to be signed.
+
+ Returns:
+ string, The signature of the message for the given key.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
+
+ @staticmethod
+ def from_string(key, password='notasecret'):
+ """Construct a Signer instance from a string.
+
+ Args:
+ key: string, private key in PEM format.
+ password: string, password for private key file. Unused for PEM
+ files.
+
+ Returns:
+ Signer instance.
+
+ Raises:
+ NotImplementedError if the key isn't in PEM format.
+ """
+ parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))
+ if parsed_pem_key:
+ pkey = RSA.importKey(parsed_pem_key)
+ else:
+ raise NotImplementedError(
+ 'No key in PEM format was detected. This implementation '
+ 'can only use the PyCrypto library for keys in PEM '
+ 'format.')
+ return PyCryptoSigner(pkey)
diff --git a/contrib/python/oauth2client/py2/oauth2client/client.py b/contrib/python/oauth2client/py2/oauth2client/client.py
new file mode 100644
index 0000000000..7618960e44
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/client.py
@@ -0,0 +1,2170 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""An OAuth 2.0 client.
+
+Tools for interacting with OAuth 2.0 protected resources.
+"""
+
+import collections
+import copy
+import datetime
+import json
+import logging
+import os
+import shutil
+import socket
+import sys
+import tempfile
+
+import six
+from six.moves import http_client
+from six.moves import urllib
+
+import oauth2client
+from oauth2client import _helpers
+from oauth2client import _pkce
+from oauth2client import clientsecrets
+from oauth2client import transport
+
+
+HAS_OPENSSL = False
+HAS_CRYPTO = False
+try:
+ from oauth2client import crypt
+ HAS_CRYPTO = True
+ HAS_OPENSSL = crypt.OpenSSLVerifier is not None
+except ImportError: # pragma: NO COVER
+ pass
+
+
+logger = logging.getLogger(__name__)
+
+# Expiry is stored in RFC3339 UTC format
+EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
+
+# Which certs to use to validate id_tokens received.
+ID_TOKEN_VERIFICATION_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
+# This symbol previously had a typo in the name; we keep the old name
+# around for now, but will remove it in the future.
+ID_TOKEN_VERIFICATON_CERTS = ID_TOKEN_VERIFICATION_CERTS
+
+# Constant to use for the out of band OAuth 2.0 flow.
+OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
+
+# The value representing user credentials.
+AUTHORIZED_USER = 'authorized_user'
+
+# The value representing service account credentials.
+SERVICE_ACCOUNT = 'service_account'
+
+# The environment variable pointing the file with local
+# Application Default Credentials.
+GOOGLE_APPLICATION_CREDENTIALS = 'GOOGLE_APPLICATION_CREDENTIALS'
+# The ~/.config subdirectory containing gcloud credentials. Intended
+# to be swapped out in tests.
+_CLOUDSDK_CONFIG_DIRECTORY = 'gcloud'
+# The environment variable name which can replace ~/.config if set.
+_CLOUDSDK_CONFIG_ENV_VAR = 'CLOUDSDK_CONFIG'
+
+# The error message we show users when we can't find the Application
+# Default Credentials.
+ADC_HELP_MSG = (
+ 'The Application Default Credentials are not available. They are '
+ 'available if running in Google Compute Engine. Otherwise, the '
+ 'environment variable ' +
+ GOOGLE_APPLICATION_CREDENTIALS +
+ ' must be defined pointing to a file defining the credentials. See '
+ 'https://developers.google.com/accounts/docs/'
+ 'application-default-credentials for more information.')
+
+_WELL_KNOWN_CREDENTIALS_FILE = 'application_default_credentials.json'
+
+# The access token along with the seconds in which it expires.
+AccessTokenInfo = collections.namedtuple(
+ 'AccessTokenInfo', ['access_token', 'expires_in'])
+
+DEFAULT_ENV_NAME = 'UNKNOWN'
+
+# If set to True _get_environment avoid GCE check (_detect_gce_environment)
+NO_GCE_CHECK = os.getenv('NO_GCE_CHECK', 'False')
+
+# Timeout in seconds to wait for the GCE metadata server when detecting the
+# GCE environment.
+try:
+ GCE_METADATA_TIMEOUT = int(os.getenv('GCE_METADATA_TIMEOUT', 3))
+except ValueError: # pragma: NO COVER
+ GCE_METADATA_TIMEOUT = 3
+
+_SERVER_SOFTWARE = 'SERVER_SOFTWARE'
+_GCE_METADATA_URI = 'http://' + os.getenv('GCE_METADATA_IP', '169.254.169.254')
+_METADATA_FLAVOR_HEADER = 'metadata-flavor' # lowercase header
+_DESIRED_METADATA_FLAVOR = 'Google'
+_GCE_HEADERS = {_METADATA_FLAVOR_HEADER: _DESIRED_METADATA_FLAVOR}
+
+# Expose utcnow() at module level to allow for
+# easier testing (by replacing with a stub).
+_UTCNOW = datetime.datetime.utcnow
+
+# NOTE: These names were previously defined in this module but have been
+# moved into `oauth2client.transport`,
+clean_headers = transport.clean_headers
+MemoryCache = transport.MemoryCache
+REFRESH_STATUS_CODES = transport.REFRESH_STATUS_CODES
+
+
+class SETTINGS(object):
+ """Settings namespace for globally defined values."""
+ env_name = None
+
+
+class Error(Exception):
+ """Base error for this module."""
+
+
+class FlowExchangeError(Error):
+ """Error trying to exchange an authorization grant for an access token."""
+
+
+class AccessTokenRefreshError(Error):
+ """Error trying to refresh an expired access token."""
+
+
+class HttpAccessTokenRefreshError(AccessTokenRefreshError):
+ """Error (with HTTP status) trying to refresh an expired access token."""
+ def __init__(self, *args, **kwargs):
+ super(HttpAccessTokenRefreshError, self).__init__(*args)
+ self.status = kwargs.get('status')
+
+
+class TokenRevokeError(Error):
+ """Error trying to revoke a token."""
+
+
+class UnknownClientSecretsFlowError(Error):
+ """The client secrets file called for an unknown type of OAuth 2.0 flow."""
+
+
+class AccessTokenCredentialsError(Error):
+ """Having only the access_token means no refresh is possible."""
+
+
+class VerifyJwtTokenError(Error):
+ """Could not retrieve certificates for validation."""
+
+
+class NonAsciiHeaderError(Error):
+ """Header names and values must be ASCII strings."""
+
+
+class ApplicationDefaultCredentialsError(Error):
+ """Error retrieving the Application Default Credentials."""
+
+
+class OAuth2DeviceCodeError(Error):
+ """Error trying to retrieve a device code."""
+
+
+class CryptoUnavailableError(Error, NotImplementedError):
+ """Raised when a crypto library is required, but none is available."""
+
+
+def _parse_expiry(expiry):
+ if expiry and isinstance(expiry, datetime.datetime):
+ return expiry.strftime(EXPIRY_FORMAT)
+ else:
+ return None
+
+
+class Credentials(object):
+ """Base class for all Credentials objects.
+
+ Subclasses must define an authorize() method that applies the credentials
+ to an HTTP transport.
+
+ Subclasses must also specify a classmethod named 'from_json' that takes a
+ JSON string as input and returns an instantiated Credentials object.
+ """
+
+ NON_SERIALIZED_MEMBERS = frozenset(['store'])
+
+ def authorize(self, http):
+ """Take an httplib2.Http instance (or equivalent) and authorizes it.
+
+ Authorizes it for the set of credentials, usually by replacing
+ http.request() with a method that adds in the appropriate headers and
+ then delegates to the original Http.request() method.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+ """
+ raise NotImplementedError
+
+ def refresh(self, http):
+ """Forces a refresh of the access_token.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+ """
+ raise NotImplementedError
+
+ def revoke(self, http):
+ """Revokes a refresh_token and makes the credentials void.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the revoke
+ request.
+ """
+ raise NotImplementedError
+
+ def apply(self, headers):
+ """Add the authorization to the headers.
+
+ Args:
+ headers: dict, the headers to add the Authorization header to.
+ """
+ raise NotImplementedError
+
+ def _to_json(self, strip, to_serialize=None):
+ """Utility function that creates JSON repr. of a Credentials object.
+
+ Args:
+ strip: array, An array of names of members to exclude from the
+ JSON.
+ to_serialize: dict, (Optional) The properties for this object
+ that will be serialized. This allows callers to
+ modify before serializing.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ curr_type = self.__class__
+ if to_serialize is None:
+ to_serialize = copy.copy(self.__dict__)
+ else:
+ # Assumes it is a str->str dictionary, so we don't deep copy.
+ to_serialize = copy.copy(to_serialize)
+ for member in strip:
+ if member in to_serialize:
+ del to_serialize[member]
+ to_serialize['token_expiry'] = _parse_expiry(
+ to_serialize.get('token_expiry'))
+ # Add in information we will need later to reconstitute this instance.
+ to_serialize['_class'] = curr_type.__name__
+ to_serialize['_module'] = curr_type.__module__
+ for key, val in to_serialize.items():
+ if isinstance(val, bytes):
+ to_serialize[key] = val.decode('utf-8')
+ if isinstance(val, set):
+ to_serialize[key] = list(val)
+ return json.dumps(to_serialize)
+
+ def to_json(self):
+ """Creating a JSON representation of an instance of Credentials.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ return self._to_json(self.NON_SERIALIZED_MEMBERS)
+
+ @classmethod
+ def new_from_json(cls, json_data):
+ """Utility class method to instantiate a Credentials subclass from JSON.
+
+ Expects the JSON string to have been produced by to_json().
+
+ Args:
+ json_data: string or bytes, JSON from to_json().
+
+ Returns:
+ An instance of the subclass of Credentials that was serialized with
+ to_json().
+ """
+ json_data_as_unicode = _helpers._from_bytes(json_data)
+ data = json.loads(json_data_as_unicode)
+ # Find and call the right classmethod from_json() to restore
+ # the object.
+ module_name = data['_module']
+ try:
+ module_obj = __import__(module_name)
+ except ImportError:
+ # In case there's an object from the old package structure,
+ # update it
+ module_name = module_name.replace('.googleapiclient', '')
+ module_obj = __import__(module_name)
+
+ module_obj = __import__(module_name,
+ fromlist=module_name.split('.')[:-1])
+ kls = getattr(module_obj, data['_class'])
+ return kls.from_json(json_data_as_unicode)
+
+ @classmethod
+ def from_json(cls, unused_data):
+ """Instantiate a Credentials object from a JSON description of it.
+
+ The JSON should have been produced by calling .to_json() on the object.
+
+ Args:
+ unused_data: dict, A deserialized JSON object.
+
+ Returns:
+ An instance of a Credentials subclass.
+ """
+ return Credentials()
+
+
+class Flow(object):
+ """Base class for all Flow objects."""
+ pass
+
+
+class Storage(object):
+ """Base class for all Storage objects.
+
+ Store and retrieve a single credential. This class supports locking
+ such that multiple processes and threads can operate on a single
+ store.
+ """
+ def __init__(self, lock=None):
+ """Create a Storage instance.
+
+ Args:
+ lock: An optional threading.Lock-like object. Must implement at
+ least acquire() and release(). Does not need to be
+ re-entrant.
+ """
+ self._lock = lock
+
+ def acquire_lock(self):
+ """Acquires any lock necessary to access this Storage.
+
+ This lock is not reentrant.
+ """
+ if self._lock is not None:
+ self._lock.acquire()
+
+ def release_lock(self):
+ """Release the Storage lock.
+
+ Trying to release a lock that isn't held will result in a
+ RuntimeError in the case of a threading.Lock or multiprocessing.Lock.
+ """
+ if self._lock is not None:
+ self._lock.release()
+
+ def locked_get(self):
+ """Retrieve credential.
+
+ The Storage lock must be held when this is called.
+
+ Returns:
+ oauth2client.client.Credentials
+ """
+ raise NotImplementedError
+
+ def locked_put(self, credentials):
+ """Write a credential.
+
+ The Storage lock must be held when this is called.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ raise NotImplementedError
+
+ def locked_delete(self):
+ """Delete a credential.
+
+ The Storage lock must be held when this is called.
+ """
+ raise NotImplementedError
+
+ def get(self):
+ """Retrieve credential.
+
+ The Storage lock must *not* be held when this is called.
+
+ Returns:
+ oauth2client.client.Credentials
+ """
+ self.acquire_lock()
+ try:
+ return self.locked_get()
+ finally:
+ self.release_lock()
+
+ def put(self, credentials):
+ """Write a credential.
+
+ The Storage lock must be held when this is called.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ self.acquire_lock()
+ try:
+ self.locked_put(credentials)
+ finally:
+ self.release_lock()
+
+ def delete(self):
+ """Delete credential.
+
+ Frees any resources associated with storing the credential.
+ The Storage lock must *not* be held when this is called.
+
+ Returns:
+ None
+ """
+ self.acquire_lock()
+ try:
+ return self.locked_delete()
+ finally:
+ self.release_lock()
+
+
+class OAuth2Credentials(Credentials):
+ """Credentials object for OAuth 2.0.
+
+ Credentials can be applied to an httplib2.Http object using the authorize()
+ method, which then adds the OAuth 2.0 access token to each request.
+
+ OAuth2Credentials objects may be safely pickled and unpickled.
+ """
+
+ @_helpers.positional(8)
+ def __init__(self, access_token, client_id, client_secret, refresh_token,
+ token_expiry, token_uri, user_agent, revoke_uri=None,
+ id_token=None, token_response=None, scopes=None,
+ token_info_uri=None, id_token_jwt=None):
+ """Create an instance of OAuth2Credentials.
+
+ This constructor is not usually called by the user, instead
+ OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
+
+ Args:
+ access_token: string, access token.
+ client_id: string, client identifier.
+ client_secret: string, client secret.
+ refresh_token: string, refresh token.
+ token_expiry: datetime, when the access_token expires.
+ token_uri: string, URI of token endpoint.
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ revoke_uri: string, URI for revoke endpoint. Defaults to None; a
+ token can't be revoked if this is None.
+ id_token: object, The identity of the resource owner.
+ token_response: dict, the decoded response to the token request.
+ None if a token hasn't been requested yet. Stored
+ because some providers (e.g. wordpress.com) include
+ extra fields that clients may want.
+ scopes: list, authorized scopes for these credentials.
+ token_info_uri: string, the URI for the token info endpoint.
+ Defaults to None; scopes can not be refreshed if
+ this is None.
+ id_token_jwt: string, the encoded and signed identity JWT. The
+ decoded version of this is stored in id_token.
+
+ Notes:
+ store: callable, A callable that when passed a Credential
+ will store the credential back to where it came from.
+ This is needed to store the latest access_token if it
+ has expired and been refreshed.
+ """
+ self.access_token = access_token
+ self.client_id = client_id
+ self.client_secret = client_secret
+ self.refresh_token = refresh_token
+ self.store = None
+ self.token_expiry = token_expiry
+ self.token_uri = token_uri
+ self.user_agent = user_agent
+ self.revoke_uri = revoke_uri
+ self.id_token = id_token
+ self.id_token_jwt = id_token_jwt
+ self.token_response = token_response
+ self.scopes = set(_helpers.string_to_scopes(scopes or []))
+ self.token_info_uri = token_info_uri
+
+ # True if the credentials have been revoked or expired and can't be
+ # refreshed.
+ self.invalid = False
+
+ def authorize(self, http):
+ """Authorize an httplib2.Http instance with these credentials.
+
+ The modified http.request method will add authentication headers to
+ each request and will refresh access_tokens when a 401 is received on a
+ request. In addition the http.request method has a credentials
+ property, http.request.credentials, which is the Credentials object
+ that authorized it.
+
+ Args:
+ http: An instance of ``httplib2.Http`` or something that acts
+ like it.
+
+ Returns:
+ A modified instance of http that was passed in.
+
+ Example::
+
+ h = httplib2.Http()
+ h = credentials.authorize(h)
+
+ You can't create a new OAuth subclass of httplib2.Authentication
+ because it never gets passed the absolute URI, which is needed for
+ signing. So instead we have to overload 'request' with a closure
+ that adds in the Authorization header and then calls the original
+ version of 'request()'.
+ """
+ transport.wrap_http_for_auth(self, http)
+ return http
+
+ def refresh(self, http):
+ """Forces a refresh of the access_token.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+ """
+ self._refresh(http)
+
+ def revoke(self, http):
+ """Revokes a refresh_token and makes the credentials void.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the revoke
+ request.
+ """
+ self._revoke(http)
+
+ def apply(self, headers):
+ """Add the authorization to the headers.
+
+ Args:
+ headers: dict, the headers to add the Authorization header to.
+ """
+ headers['Authorization'] = 'Bearer ' + self.access_token
+
+ def has_scopes(self, scopes):
+ """Verify that the credentials are authorized for the given scopes.
+
+ Returns True if the credentials authorized scopes contain all of the
+ scopes given.
+
+ Args:
+ scopes: list or string, the scopes to check.
+
+ Notes:
+ There are cases where the credentials are unaware of which scopes
+ are authorized. Notably, credentials obtained and stored before
+ this code was added will not have scopes, AccessTokenCredentials do
+ not have scopes. In both cases, you can use refresh_scopes() to
+ obtain the canonical set of scopes.
+ """
+ scopes = _helpers.string_to_scopes(scopes)
+ return set(scopes).issubset(self.scopes)
+
+ def retrieve_scopes(self, http):
+ """Retrieves the canonical list of scopes for this access token.
+
+ Gets the scopes from the OAuth2 provider.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+
+ Returns:
+ A set of strings containing the canonical list of scopes.
+ """
+ self._retrieve_scopes(http)
+ return self.scopes
+
+ @classmethod
+ def from_json(cls, json_data):
+ """Instantiate a Credentials object from a JSON description of it.
+
+ The JSON should have been produced by calling .to_json() on the object.
+
+ Args:
+ json_data: string or bytes, JSON to deserialize.
+
+ Returns:
+ An instance of a Credentials subclass.
+ """
+ data = json.loads(_helpers._from_bytes(json_data))
+ if (data.get('token_expiry') and
+ not isinstance(data['token_expiry'], datetime.datetime)):
+ try:
+ data['token_expiry'] = datetime.datetime.strptime(
+ data['token_expiry'], EXPIRY_FORMAT)
+ except ValueError:
+ data['token_expiry'] = None
+ retval = cls(
+ data['access_token'],
+ data['client_id'],
+ data['client_secret'],
+ data['refresh_token'],
+ data['token_expiry'],
+ data['token_uri'],
+ data['user_agent'],
+ revoke_uri=data.get('revoke_uri', None),
+ id_token=data.get('id_token', None),
+ id_token_jwt=data.get('id_token_jwt', None),
+ token_response=data.get('token_response', None),
+ scopes=data.get('scopes', None),
+ token_info_uri=data.get('token_info_uri', None))
+ retval.invalid = data['invalid']
+ return retval
+
+ @property
+ def access_token_expired(self):
+ """True if the credential is expired or invalid.
+
+ If the token_expiry isn't set, we assume the token doesn't expire.
+ """
+ if self.invalid:
+ return True
+
+ if not self.token_expiry:
+ return False
+
+ now = _UTCNOW()
+ if now >= self.token_expiry:
+ logger.info('access_token is expired. Now: %s, token_expiry: %s',
+ now, self.token_expiry)
+ return True
+ return False
+
+ def get_access_token(self, http=None):
+ """Return the access token and its expiration information.
+
+ If the token does not exist, get one.
+ If the token expired, refresh it.
+ """
+ if not self.access_token or self.access_token_expired:
+ if not http:
+ http = transport.get_http_object()
+ self.refresh(http)
+ return AccessTokenInfo(access_token=self.access_token,
+ expires_in=self._expires_in())
+
+ def set_store(self, store):
+ """Set the Storage for the credential.
+
+ Args:
+ store: Storage, an implementation of Storage object.
+ This is needed to store the latest access_token if it
+ has expired and been refreshed. This implementation uses
+ locking to check for updates before updating the
+ access_token.
+ """
+ self.store = store
+
+ def _expires_in(self):
+ """Return the number of seconds until this token expires.
+
+ If token_expiry is in the past, this method will return 0, meaning the
+ token has already expired.
+
+ If token_expiry is None, this method will return None. Note that
+ returning 0 in such a case would not be fair: the token may still be
+ valid; we just don't know anything about it.
+ """
+ if self.token_expiry:
+ now = _UTCNOW()
+ if self.token_expiry > now:
+ time_delta = self.token_expiry - now
+ # TODO(orestica): return time_delta.total_seconds()
+ # once dropping support for Python 2.6
+ return time_delta.days * 86400 + time_delta.seconds
+ else:
+ return 0
+
+ def _updateFromCredential(self, other):
+ """Update this Credential from another instance."""
+ self.__dict__.update(other.__getstate__())
+
+ def __getstate__(self):
+ """Trim the state down to something that can be pickled."""
+ d = copy.copy(self.__dict__)
+ del d['store']
+ return d
+
+ def __setstate__(self, state):
+ """Reconstitute the state of the object from being pickled."""
+ self.__dict__.update(state)
+ self.store = None
+
+ def _generate_refresh_request_body(self):
+ """Generate the body that will be used in the refresh request."""
+ body = urllib.parse.urlencode({
+ 'grant_type': 'refresh_token',
+ 'client_id': self.client_id,
+ 'client_secret': self.client_secret,
+ 'refresh_token': self.refresh_token,
+ })
+ return body
+
+ def _generate_refresh_request_headers(self):
+ """Generate the headers that will be used in the refresh request."""
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ }
+
+ if self.user_agent is not None:
+ headers['user-agent'] = self.user_agent
+
+ return headers
+
+ def _refresh(self, http):
+ """Refreshes the access_token.
+
+ This method first checks by reading the Storage object if available.
+ If a refresh is still needed, it holds the Storage lock until the
+ refresh is completed.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+
+ Raises:
+ HttpAccessTokenRefreshError: When the refresh fails.
+ """
+ if not self.store:
+ self._do_refresh_request(http)
+ else:
+ self.store.acquire_lock()
+ try:
+ new_cred = self.store.locked_get()
+
+ if (new_cred and not new_cred.invalid and
+ new_cred.access_token != self.access_token and
+ not new_cred.access_token_expired):
+ logger.info('Updated access_token read from Storage')
+ self._updateFromCredential(new_cred)
+ else:
+ self._do_refresh_request(http)
+ finally:
+ self.store.release_lock()
+
+ def _do_refresh_request(self, http):
+ """Refresh the access_token using the refresh_token.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+
+ Raises:
+ HttpAccessTokenRefreshError: When the refresh fails.
+ """
+ body = self._generate_refresh_request_body()
+ headers = self._generate_refresh_request_headers()
+
+ logger.info('Refreshing access_token')
+ resp, content = transport.request(
+ http, self.token_uri, method='POST',
+ body=body, headers=headers)
+ content = _helpers._from_bytes(content)
+ if resp.status == http_client.OK:
+ d = json.loads(content)
+ self.token_response = d
+ self.access_token = d['access_token']
+ self.refresh_token = d.get('refresh_token', self.refresh_token)
+ if 'expires_in' in d:
+ delta = datetime.timedelta(seconds=int(d['expires_in']))
+ self.token_expiry = delta + _UTCNOW()
+ else:
+ self.token_expiry = None
+ if 'id_token' in d:
+ self.id_token = _extract_id_token(d['id_token'])
+ self.id_token_jwt = d['id_token']
+ else:
+ self.id_token = None
+ self.id_token_jwt = None
+ # On temporary refresh errors, the user does not actually have to
+ # re-authorize, so we unflag here.
+ self.invalid = False
+ if self.store:
+ self.store.locked_put(self)
+ else:
+ # An {'error':...} response body means the token is expired or
+ # revoked, so we flag the credentials as such.
+ logger.info('Failed to retrieve access token: %s', content)
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ d = json.loads(content)
+ if 'error' in d:
+ error_msg = d['error']
+ if 'error_description' in d:
+ error_msg += ': ' + d['error_description']
+ self.invalid = True
+ if self.store is not None:
+ self.store.locked_put(self)
+ except (TypeError, ValueError):
+ pass
+ raise HttpAccessTokenRefreshError(error_msg, status=resp.status)
+
+ def _revoke(self, http):
+ """Revokes this credential and deletes the stored copy (if it exists).
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_revoke(http, self.refresh_token or self.access_token)
+
+ def _do_revoke(self, http, token):
+ """Revokes this credential and deletes the stored copy (if it exists).
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ token: A string used as the token to be revoked. Can be either an
+ access_token or refresh_token.
+
+ Raises:
+ TokenRevokeError: If the revoke request does not return with a
+ 200 OK.
+ """
+ logger.info('Revoking token')
+ query_params = {'token': token}
+ token_revoke_uri = _helpers.update_query_params(
+ self.revoke_uri, query_params)
+ resp, content = transport.request(http, token_revoke_uri)
+ if resp.status == http_client.METHOD_NOT_ALLOWED:
+ body = urllib.parse.urlencode(query_params)
+ resp, content = transport.request(http, token_revoke_uri,
+ method='POST', body=body)
+ if resp.status == http_client.OK:
+ self.invalid = True
+ else:
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ d = json.loads(_helpers._from_bytes(content))
+ if 'error' in d:
+ error_msg = d['error']
+ except (TypeError, ValueError):
+ pass
+ raise TokenRevokeError(error_msg)
+
+ if self.store:
+ self.store.delete()
+
+ def _retrieve_scopes(self, http):
+ """Retrieves the list of authorized scopes from the OAuth2 provider.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_retrieve_scopes(http, self.access_token)
+
+ def _do_retrieve_scopes(self, http, token):
+ """Retrieves the list of authorized scopes from the OAuth2 provider.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ token: A string used as the token to identify the credentials to
+ the provider.
+
+ Raises:
+ Error: When refresh fails, indicating the the access token is
+ invalid.
+ """
+ logger.info('Refreshing scopes')
+ query_params = {'access_token': token, 'fields': 'scope'}
+ token_info_uri = _helpers.update_query_params(
+ self.token_info_uri, query_params)
+ resp, content = transport.request(http, token_info_uri)
+ content = _helpers._from_bytes(content)
+ if resp.status == http_client.OK:
+ d = json.loads(content)
+ self.scopes = set(_helpers.string_to_scopes(d.get('scope', '')))
+ else:
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ d = json.loads(content)
+ if 'error_description' in d:
+ error_msg = d['error_description']
+ except (TypeError, ValueError):
+ pass
+ raise Error(error_msg)
+
+
+class AccessTokenCredentials(OAuth2Credentials):
+ """Credentials object for OAuth 2.0.
+
+ Credentials can be applied to an httplib2.Http object using the
+ authorize() method, which then signs each request from that object
+ with the OAuth 2.0 access token. This set of credentials is for the
+ use case where you have acquired an OAuth 2.0 access_token from
+ another place such as a JavaScript client or another web
+ application, and wish to use it from Python. Because only the
+ access_token is present it can not be refreshed and will in time
+ expire.
+
+ AccessTokenCredentials objects may be safely pickled and unpickled.
+
+ Usage::
+
+ credentials = AccessTokenCredentials('<an access token>',
+ 'my-user-agent/1.0')
+ http = httplib2.Http()
+ http = credentials.authorize(http)
+
+ Raises:
+ AccessTokenCredentialsExpired: raised when the access_token expires or
+ is revoked.
+ """
+
+ def __init__(self, access_token, user_agent, revoke_uri=None):
+ """Create an instance of OAuth2Credentials
+
+ This is one of the few types if Credentials that you should contrust,
+ Credentials objects are usually instantiated by a Flow.
+
+ Args:
+ access_token: string, access token.
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ revoke_uri: string, URI for revoke endpoint. Defaults to None; a
+ token can't be revoked if this is None.
+ """
+ super(AccessTokenCredentials, self).__init__(
+ access_token,
+ None,
+ None,
+ None,
+ None,
+ None,
+ user_agent,
+ revoke_uri=revoke_uri)
+
+ @classmethod
+ def from_json(cls, json_data):
+ data = json.loads(_helpers._from_bytes(json_data))
+ retval = AccessTokenCredentials(
+ data['access_token'],
+ data['user_agent'])
+ return retval
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Args:
+ http: unused HTTP object.
+
+ Raises:
+ AccessTokenCredentialsError: always
+ """
+ raise AccessTokenCredentialsError(
+ 'The access_token is expired or invalid and can\'t be refreshed.')
+
+ def _revoke(self, http):
+ """Revokes the access_token and deletes the store if available.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_revoke(http, self.access_token)
+
+
+def _detect_gce_environment():
+ """Determine if the current environment is Compute Engine.
+
+ Returns:
+ Boolean indicating whether or not the current environment is Google
+ Compute Engine.
+ """
+ # NOTE: The explicit ``timeout`` is a workaround. The underlying
+ # issue is that resolving an unknown host on some networks will take
+ # 20-30 seconds; making this timeout short fixes the issue, but
+ # could lead to false negatives in the event that we are on GCE, but
+ # the metadata resolution was particularly slow. The latter case is
+ # "unlikely".
+ http = transport.get_http_object(timeout=GCE_METADATA_TIMEOUT)
+ try:
+ response, _ = transport.request(
+ http, _GCE_METADATA_URI, headers=_GCE_HEADERS)
+ return (
+ response.status == http_client.OK and
+ response.get(_METADATA_FLAVOR_HEADER) == _DESIRED_METADATA_FLAVOR)
+ except socket.error: # socket.timeout or socket.error(64, 'Host is down')
+ logger.info('Timeout attempting to reach GCE metadata service.')
+ return False
+
+
+def _in_gae_environment():
+ """Detects if the code is running in the App Engine environment.
+
+ Returns:
+ True if running in the GAE environment, False otherwise.
+ """
+ if SETTINGS.env_name is not None:
+ return SETTINGS.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL')
+
+ try:
+ import google.appengine # noqa: unused import
+ except ImportError:
+ pass
+ else:
+ server_software = os.environ.get(_SERVER_SOFTWARE, '')
+ if server_software.startswith('Google App Engine/'):
+ SETTINGS.env_name = 'GAE_PRODUCTION'
+ return True
+ elif server_software.startswith('Development/'):
+ SETTINGS.env_name = 'GAE_LOCAL'
+ return True
+
+ return False
+
+
+def _in_gce_environment():
+ """Detect if the code is running in the Compute Engine environment.
+
+ Returns:
+ True if running in the GCE environment, False otherwise.
+ """
+ if SETTINGS.env_name is not None:
+ return SETTINGS.env_name == 'GCE_PRODUCTION'
+
+ if NO_GCE_CHECK != 'True' and _detect_gce_environment():
+ SETTINGS.env_name = 'GCE_PRODUCTION'
+ return True
+ return False
+
+
+class GoogleCredentials(OAuth2Credentials):
+ """Application Default Credentials for use in calling Google APIs.
+
+ The Application Default Credentials are being constructed as a function of
+ the environment where the code is being run.
+ More details can be found on this page:
+ https://developers.google.com/accounts/docs/application-default-credentials
+
+ Here is an example of how to use the Application Default Credentials for a
+ service that requires authentication::
+
+ from googleapiclient.discovery import build
+ from oauth2client.client import GoogleCredentials
+
+ credentials = GoogleCredentials.get_application_default()
+ service = build('compute', 'v1', credentials=credentials)
+
+ PROJECT = 'bamboo-machine-422'
+ ZONE = 'us-central1-a'
+ request = service.instances().list(project=PROJECT, zone=ZONE)
+ response = request.execute()
+
+ print(response)
+ """
+
+ NON_SERIALIZED_MEMBERS = (
+ frozenset(['_private_key']) |
+ OAuth2Credentials.NON_SERIALIZED_MEMBERS)
+ """Members that aren't serialized when object is converted to JSON."""
+
+ def __init__(self, access_token, client_id, client_secret, refresh_token,
+ token_expiry, token_uri, user_agent,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ """Create an instance of GoogleCredentials.
+
+ This constructor is not usually called by the user, instead
+ GoogleCredentials objects are instantiated by
+ GoogleCredentials.from_stream() or
+ GoogleCredentials.get_application_default().
+
+ Args:
+ access_token: string, access token.
+ client_id: string, client identifier.
+ client_secret: string, client secret.
+ refresh_token: string, refresh token.
+ token_expiry: datetime, when the access_token expires.
+ token_uri: string, URI of token endpoint.
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ revoke_uri: string, URI for revoke endpoint. Defaults to
+ oauth2client.GOOGLE_REVOKE_URI; a token can't be
+ revoked if this is None.
+ """
+ super(GoogleCredentials, self).__init__(
+ access_token, client_id, client_secret, refresh_token,
+ token_expiry, token_uri, user_agent, revoke_uri=revoke_uri)
+
+ def create_scoped_required(self):
+ """Whether this Credentials object is scopeless.
+
+ create_scoped(scopes) method needs to be called in order to create
+ a Credentials object for API calls.
+ """
+ return False
+
+ def create_scoped(self, scopes):
+ """Create a Credentials object for the given scopes.
+
+ The Credentials type is preserved.
+ """
+ return self
+
+ @classmethod
+ def from_json(cls, json_data):
+ # TODO(issue 388): eliminate the circularity that is the reason for
+ # this non-top-level import.
+ from oauth2client import service_account
+ data = json.loads(_helpers._from_bytes(json_data))
+
+ # We handle service_account.ServiceAccountCredentials since it is a
+ # possible return type of GoogleCredentials.get_application_default()
+ if (data['_module'] == 'oauth2client.service_account' and
+ data['_class'] == 'ServiceAccountCredentials'):
+ return service_account.ServiceAccountCredentials.from_json(data)
+ elif (data['_module'] == 'oauth2client.service_account' and
+ data['_class'] == '_JWTAccessCredentials'):
+ return service_account._JWTAccessCredentials.from_json(data)
+
+ token_expiry = _parse_expiry(data.get('token_expiry'))
+ google_credentials = cls(
+ data['access_token'],
+ data['client_id'],
+ data['client_secret'],
+ data['refresh_token'],
+ token_expiry,
+ data['token_uri'],
+ data['user_agent'],
+ revoke_uri=data.get('revoke_uri', None))
+ google_credentials.invalid = data['invalid']
+ return google_credentials
+
+ @property
+ def serialization_data(self):
+ """Get the fields and values identifying the current credentials."""
+ return {
+ 'type': 'authorized_user',
+ 'client_id': self.client_id,
+ 'client_secret': self.client_secret,
+ 'refresh_token': self.refresh_token
+ }
+
+ @staticmethod
+ def _implicit_credentials_from_gae():
+ """Attempts to get implicit credentials in Google App Engine env.
+
+ If the current environment is not detected as App Engine, returns None,
+ indicating no Google App Engine credentials can be detected from the
+ current environment.
+
+ Returns:
+ None, if not in GAE, else an appengine.AppAssertionCredentials
+ object.
+ """
+ if not _in_gae_environment():
+ return None
+
+ return _get_application_default_credential_GAE()
+
+ @staticmethod
+ def _implicit_credentials_from_gce():
+ """Attempts to get implicit credentials in Google Compute Engine env.
+
+ If the current environment is not detected as Compute Engine, returns
+ None, indicating no Google Compute Engine credentials can be detected
+ from the current environment.
+
+ Returns:
+ None, if not in GCE, else a gce.AppAssertionCredentials object.
+ """
+ if not _in_gce_environment():
+ return None
+
+ return _get_application_default_credential_GCE()
+
+ @staticmethod
+ def _implicit_credentials_from_files():
+ """Attempts to get implicit credentials from local credential files.
+
+ First checks if the environment variable GOOGLE_APPLICATION_CREDENTIALS
+ is set with a filename and then falls back to a configuration file (the
+ "well known" file) associated with the 'gcloud' command line tool.
+
+ Returns:
+ Credentials object associated with the
+ GOOGLE_APPLICATION_CREDENTIALS file or the "well known" file if
+ either exist. If neither file is define, returns None, indicating
+ no credentials from a file can detected from the current
+ environment.
+ """
+ credentials_filename = _get_environment_variable_file()
+ if not credentials_filename:
+ credentials_filename = _get_well_known_file()
+ if os.path.isfile(credentials_filename):
+ extra_help = (' (produced automatically when running'
+ ' "gcloud auth login" command)')
+ else:
+ credentials_filename = None
+ else:
+ extra_help = (' (pointed to by ' + GOOGLE_APPLICATION_CREDENTIALS +
+ ' environment variable)')
+
+ if not credentials_filename:
+ return
+
+ # If we can read the credentials from a file, we don't need to know
+ # what environment we are in.
+ SETTINGS.env_name = DEFAULT_ENV_NAME
+
+ try:
+ return _get_application_default_credential_from_file(
+ credentials_filename)
+ except (ApplicationDefaultCredentialsError, ValueError) as error:
+ _raise_exception_for_reading_json(credentials_filename,
+ extra_help, error)
+
+ @classmethod
+ def _get_implicit_credentials(cls):
+ """Gets credentials implicitly from the environment.
+
+ Checks environment in order of precedence:
+ - Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to
+ a file with stored credentials information.
+ - Stored "well known" file associated with `gcloud` command line tool.
+ - Google App Engine (production and testing)
+ - Google Compute Engine production environment.
+
+ Raises:
+ ApplicationDefaultCredentialsError: raised when the credentials
+ fail to be retrieved.
+ """
+ # Environ checks (in order).
+ environ_checkers = [
+ cls._implicit_credentials_from_files,
+ cls._implicit_credentials_from_gae,
+ cls._implicit_credentials_from_gce,
+ ]
+
+ for checker in environ_checkers:
+ credentials = checker()
+ if credentials is not None:
+ return credentials
+
+ # If no credentials, fail.
+ raise ApplicationDefaultCredentialsError(ADC_HELP_MSG)
+
+ @staticmethod
+ def get_application_default():
+ """Get the Application Default Credentials for the current environment.
+
+ Raises:
+ ApplicationDefaultCredentialsError: raised when the credentials
+ fail to be retrieved.
+ """
+ return GoogleCredentials._get_implicit_credentials()
+
+ @staticmethod
+ def from_stream(credential_filename):
+ """Create a Credentials object by reading information from a file.
+
+ It returns an object of type GoogleCredentials.
+
+ Args:
+ credential_filename: the path to the file from where the
+ credentials are to be read
+
+ Raises:
+ ApplicationDefaultCredentialsError: raised when the credentials
+ fail to be retrieved.
+ """
+ if credential_filename and os.path.isfile(credential_filename):
+ try:
+ return _get_application_default_credential_from_file(
+ credential_filename)
+ except (ApplicationDefaultCredentialsError, ValueError) as error:
+ extra_help = (' (provided as parameter to the '
+ 'from_stream() method)')
+ _raise_exception_for_reading_json(credential_filename,
+ extra_help,
+ error)
+ else:
+ raise ApplicationDefaultCredentialsError(
+ 'The parameter passed to the from_stream() '
+ 'method should point to a file.')
+
+
+def _save_private_file(filename, json_contents):
+ """Saves a file with read-write permissions on for the owner.
+
+ Args:
+ filename: String. Absolute path to file.
+ json_contents: JSON serializable object to be saved.
+ """
+ temp_filename = tempfile.mktemp()
+ file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, 0o600)
+ with os.fdopen(file_desc, 'w') as file_handle:
+ json.dump(json_contents, file_handle, sort_keys=True,
+ indent=2, separators=(',', ': '))
+ shutil.move(temp_filename, filename)
+
+
+def save_to_well_known_file(credentials, well_known_file=None):
+ """Save the provided GoogleCredentials to the well known file.
+
+ Args:
+ credentials: the credentials to be saved to the well known file;
+ it should be an instance of GoogleCredentials
+ well_known_file: the name of the file where the credentials are to be
+ saved; this parameter is supposed to be used for
+ testing only
+ """
+ # TODO(orestica): move this method to tools.py
+ # once the argparse import gets fixed (it is not present in Python 2.6)
+
+ if well_known_file is None:
+ well_known_file = _get_well_known_file()
+
+ config_dir = os.path.dirname(well_known_file)
+ if not os.path.isdir(config_dir):
+ raise OSError(
+ 'Config directory does not exist: {0}'.format(config_dir))
+
+ credentials_data = credentials.serialization_data
+ _save_private_file(well_known_file, credentials_data)
+
+
+def _get_environment_variable_file():
+ application_default_credential_filename = (
+ os.environ.get(GOOGLE_APPLICATION_CREDENTIALS, None))
+
+ if application_default_credential_filename:
+ if os.path.isfile(application_default_credential_filename):
+ return application_default_credential_filename
+ else:
+ raise ApplicationDefaultCredentialsError(
+ 'File ' + application_default_credential_filename +
+ ' (pointed by ' +
+ GOOGLE_APPLICATION_CREDENTIALS +
+ ' environment variable) does not exist!')
+
+
+def _get_well_known_file():
+ """Get the well known file produced by command 'gcloud auth login'."""
+ # TODO(orestica): Revisit this method once gcloud provides a better way
+ # of pinpointing the exact location of the file.
+ default_config_dir = os.getenv(_CLOUDSDK_CONFIG_ENV_VAR)
+ if default_config_dir is None:
+ if os.name == 'nt':
+ try:
+ default_config_dir = os.path.join(os.environ['APPDATA'],
+ _CLOUDSDK_CONFIG_DIRECTORY)
+ except KeyError:
+ # This should never happen unless someone is really
+ # messing with things.
+ drive = os.environ.get('SystemDrive', 'C:')
+ default_config_dir = os.path.join(drive, '\\',
+ _CLOUDSDK_CONFIG_DIRECTORY)
+ else:
+ default_config_dir = os.path.join(os.path.expanduser('~'),
+ '.config',
+ _CLOUDSDK_CONFIG_DIRECTORY)
+
+ return os.path.join(default_config_dir, _WELL_KNOWN_CREDENTIALS_FILE)
+
+
+def _get_application_default_credential_from_file(filename):
+ """Build the Application Default Credentials from file."""
+ # read the credentials from the file
+ with open(filename) as file_obj:
+ client_credentials = json.load(file_obj)
+
+ credentials_type = client_credentials.get('type')
+ if credentials_type == AUTHORIZED_USER:
+ required_fields = set(['client_id', 'client_secret', 'refresh_token'])
+ elif credentials_type == SERVICE_ACCOUNT:
+ required_fields = set(['client_id', 'client_email', 'private_key_id',
+ 'private_key'])
+ else:
+ raise ApplicationDefaultCredentialsError(
+ "'type' field should be defined (and have one of the '" +
+ AUTHORIZED_USER + "' or '" + SERVICE_ACCOUNT + "' values)")
+
+ missing_fields = required_fields.difference(client_credentials.keys())
+
+ if missing_fields:
+ _raise_exception_for_missing_fields(missing_fields)
+
+ if client_credentials['type'] == AUTHORIZED_USER:
+ return GoogleCredentials(
+ access_token=None,
+ client_id=client_credentials['client_id'],
+ client_secret=client_credentials['client_secret'],
+ refresh_token=client_credentials['refresh_token'],
+ token_expiry=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ user_agent='Python client library')
+ else: # client_credentials['type'] == SERVICE_ACCOUNT
+ from oauth2client import service_account
+ return service_account._JWTAccessCredentials.from_json_keyfile_dict(
+ client_credentials)
+
+
+def _raise_exception_for_missing_fields(missing_fields):
+ raise ApplicationDefaultCredentialsError(
+ 'The following field(s) must be defined: ' + ', '.join(missing_fields))
+
+
+def _raise_exception_for_reading_json(credential_file,
+ extra_help,
+ error):
+ raise ApplicationDefaultCredentialsError(
+ 'An error was encountered while reading json file: ' +
+ credential_file + extra_help + ': ' + str(error))
+
+
+def _get_application_default_credential_GAE():
+ from oauth2client.contrib.appengine import AppAssertionCredentials
+
+ return AppAssertionCredentials([])
+
+
+def _get_application_default_credential_GCE():
+ from oauth2client.contrib.gce import AppAssertionCredentials
+
+ return AppAssertionCredentials()
+
+
+class AssertionCredentials(GoogleCredentials):
+ """Abstract Credentials object used for OAuth 2.0 assertion grants.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens. It must
+ be subclassed to generate the appropriate assertion string.
+
+ AssertionCredentials objects may be safely pickled and unpickled.
+ """
+
+ @_helpers.positional(2)
+ def __init__(self, assertion_type, user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ **unused_kwargs):
+ """Constructor for AssertionFlowCredentials.
+
+ Args:
+ assertion_type: string, assertion type that will be declared to the
+ auth server
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint.
+ """
+ super(AssertionCredentials, self).__init__(
+ None,
+ None,
+ None,
+ None,
+ None,
+ token_uri,
+ user_agent,
+ revoke_uri=revoke_uri)
+ self.assertion_type = assertion_type
+
+ def _generate_refresh_request_body(self):
+ assertion = self._generate_assertion()
+
+ body = urllib.parse.urlencode({
+ 'assertion': assertion,
+ 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
+ })
+
+ return body
+
+ def _generate_assertion(self):
+ """Generate assertion string to be used in the access token request."""
+ raise NotImplementedError
+
+ def _revoke(self, http):
+ """Revokes the access_token and deletes the store if available.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_revoke(http, self.access_token)
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Returns:
+ tuple, A pair of the private key ID used to sign the blob and
+ the signed contents.
+ """
+ raise NotImplementedError('This method is abstract.')
+
+
+def _require_crypto_or_die():
+ """Ensure we have a crypto library, or throw CryptoUnavailableError.
+
+ The oauth2client.crypt module requires either PyCrypto or PyOpenSSL
+ to be available in order to function, but these are optional
+ dependencies.
+ """
+ if not HAS_CRYPTO:
+ raise CryptoUnavailableError('No crypto library available')
+
+
+@_helpers.positional(2)
+def verify_id_token(id_token, audience, http=None,
+ cert_uri=ID_TOKEN_VERIFICATION_CERTS):
+ """Verifies a signed JWT id_token.
+
+ This function requires PyOpenSSL and because of that it does not work on
+ App Engine.
+
+ Args:
+ id_token: string, A Signed JWT.
+ audience: string, The audience 'aud' that the token should be for.
+ http: httplib2.Http, instance to use to make the HTTP request. Callers
+ should supply an instance that has caching enabled.
+ cert_uri: string, URI of the certificates in JSON format to
+ verify the JWT against.
+
+ Returns:
+ The deserialized JSON in the JWT.
+
+ Raises:
+ oauth2client.crypt.AppIdentityError: if the JWT fails to verify.
+ CryptoUnavailableError: if no crypto library is available.
+ """
+ _require_crypto_or_die()
+ if http is None:
+ http = transport.get_cached_http()
+
+ resp, content = transport.request(http, cert_uri)
+ if resp.status == http_client.OK:
+ certs = json.loads(_helpers._from_bytes(content))
+ return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)
+ else:
+ raise VerifyJwtTokenError('Status code: {0}'.format(resp.status))
+
+
+def _extract_id_token(id_token):
+ """Extract the JSON payload from a JWT.
+
+ Does the extraction w/o checking the signature.
+
+ Args:
+ id_token: string or bytestring, OAuth 2.0 id_token.
+
+ Returns:
+ object, The deserialized JSON payload.
+ """
+ if type(id_token) == bytes:
+ segments = id_token.split(b'.')
+ else:
+ segments = id_token.split(u'.')
+
+ if len(segments) != 3:
+ raise VerifyJwtTokenError(
+ 'Wrong number of segments in token: {0}'.format(id_token))
+
+ return json.loads(
+ _helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1])))
+
+
+def _parse_exchange_token_response(content):
+ """Parses response of an exchange token request.
+
+ Most providers return JSON but some (e.g. Facebook) return a
+ url-encoded string.
+
+ Args:
+ content: The body of a response
+
+ Returns:
+ Content as a dictionary object. Note that the dict could be empty,
+ i.e. {}. That basically indicates a failure.
+ """
+ resp = {}
+ content = _helpers._from_bytes(content)
+ try:
+ resp = json.loads(content)
+ except Exception:
+ # different JSON libs raise different exceptions,
+ # so we just do a catch-all here
+ resp = _helpers.parse_unique_urlencoded(content)
+
+ # some providers respond with 'expires', others with 'expires_in'
+ if resp and 'expires' in resp:
+ resp['expires_in'] = resp.pop('expires')
+
+ return resp
+
+
+@_helpers.positional(4)
+def credentials_from_code(client_id, client_secret, scope, code,
+ redirect_uri='postmessage', http=None,
+ user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ auth_uri=oauth2client.GOOGLE_AUTH_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ device_uri=oauth2client.GOOGLE_DEVICE_URI,
+ token_info_uri=oauth2client.GOOGLE_TOKEN_INFO_URI,
+ pkce=False,
+ code_verifier=None):
+ """Exchanges an authorization code for an OAuth2Credentials object.
+
+ Args:
+ client_id: string, client identifier.
+ client_secret: string, client secret.
+ scope: string or iterable of strings, scope(s) to request.
+ code: string, An authorization code, most likely passed down from
+ the client
+ redirect_uri: string, this is generally set to 'postmessage' to match
+ the redirect_uri that the client specified
+ http: httplib2.Http, optional http instance to use to do the fetch
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ auth_uri: string, URI for authorization endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ device_uri: string, URI for device authorization endpoint. For
+ convenience defaults to Google's endpoints but any OAuth
+ 2.0 provider can be used.
+ pkce: boolean, default: False, Generate and include a "Proof Key
+ for Code Exchange" (PKCE) with your authorization and token
+ requests. This adds security for installed applications that
+ cannot protect a client_secret. See RFC 7636 for details.
+ code_verifier: bytestring or None, default: None, parameter passed
+ as part of the code exchange when pkce=True. If
+ None, a code_verifier will automatically be
+ generated as part of step1_get_authorize_url(). See
+ RFC 7636 for details.
+
+ Returns:
+ An OAuth2Credentials object.
+
+ Raises:
+ FlowExchangeError if the authorization code cannot be exchanged for an
+ access token
+ """
+ flow = OAuth2WebServerFlow(client_id, client_secret, scope,
+ redirect_uri=redirect_uri,
+ user_agent=user_agent,
+ auth_uri=auth_uri,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri,
+ device_uri=device_uri,
+ token_info_uri=token_info_uri,
+ pkce=pkce,
+ code_verifier=code_verifier)
+
+ credentials = flow.step2_exchange(code, http=http)
+ return credentials
+
+
+@_helpers.positional(3)
+def credentials_from_clientsecrets_and_code(filename, scope, code,
+ message=None,
+ redirect_uri='postmessage',
+ http=None,
+ cache=None,
+ device_uri=None):
+ """Returns OAuth2Credentials from a clientsecrets file and an auth code.
+
+ Will create the right kind of Flow based on the contents of the
+ clientsecrets file or will raise InvalidClientSecretsError for unknown
+ types of Flows.
+
+ Args:
+ filename: string, File name of clientsecrets.
+ scope: string or iterable of strings, scope(s) to request.
+ code: string, An authorization code, most likely passed down from
+ the client
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. If message is
+ provided then sys.exit will be called in the case of an error.
+ If message in not provided then
+ clientsecrets.InvalidClientSecretsError will be raised.
+ redirect_uri: string, this is generally set to 'postmessage' to match
+ the redirect_uri that the client specified
+ http: httplib2.Http, optional http instance to use to do the fetch
+ cache: An optional cache service client that implements get() and set()
+ methods. See clientsecrets.loadfile() for details.
+ device_uri: string, OAuth 2.0 device authorization endpoint
+ pkce: boolean, default: False, Generate and include a "Proof Key
+ for Code Exchange" (PKCE) with your authorization and token
+ requests. This adds security for installed applications that
+ cannot protect a client_secret. See RFC 7636 for details.
+ code_verifier: bytestring or None, default: None, parameter passed
+ as part of the code exchange when pkce=True. If
+ None, a code_verifier will automatically be
+ generated as part of step1_get_authorize_url(). See
+ RFC 7636 for details.
+
+ Returns:
+ An OAuth2Credentials object.
+
+ Raises:
+ FlowExchangeError: if the authorization code cannot be exchanged for an
+ access token
+ UnknownClientSecretsFlowError: if the file describes an unknown kind
+ of Flow.
+ clientsecrets.InvalidClientSecretsError: if the clientsecrets file is
+ invalid.
+ """
+ flow = flow_from_clientsecrets(filename, scope, message=message,
+ cache=cache, redirect_uri=redirect_uri,
+ device_uri=device_uri)
+ credentials = flow.step2_exchange(code, http=http)
+ return credentials
+
+
+class DeviceFlowInfo(collections.namedtuple('DeviceFlowInfo', (
+ 'device_code', 'user_code', 'interval', 'verification_url',
+ 'user_code_expiry'))):
+ """Intermediate information the OAuth2 for devices flow."""
+
+ @classmethod
+ def FromResponse(cls, response):
+ """Create a DeviceFlowInfo from a server response.
+
+ The response should be a dict containing entries as described here:
+
+ http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1
+ """
+ # device_code, user_code, and verification_url are required.
+ kwargs = {
+ 'device_code': response['device_code'],
+ 'user_code': response['user_code'],
+ }
+ # The response may list the verification address as either
+ # verification_url or verification_uri, so we check for both.
+ verification_url = response.get(
+ 'verification_url', response.get('verification_uri'))
+ if verification_url is None:
+ raise OAuth2DeviceCodeError(
+ 'No verification_url provided in server response')
+ kwargs['verification_url'] = verification_url
+ # expires_in and interval are optional.
+ kwargs.update({
+ 'interval': response.get('interval'),
+ 'user_code_expiry': None,
+ })
+ if 'expires_in' in response:
+ kwargs['user_code_expiry'] = (
+ _UTCNOW() +
+ datetime.timedelta(seconds=int(response['expires_in'])))
+ return cls(**kwargs)
+
+
+def _oauth2_web_server_flow_params(kwargs):
+ """Configures redirect URI parameters for OAuth2WebServerFlow."""
+ params = {
+ 'access_type': 'offline',
+ 'response_type': 'code',
+ }
+
+ params.update(kwargs)
+
+ # Check for the presence of the deprecated approval_prompt param and
+ # warn appropriately.
+ approval_prompt = params.get('approval_prompt')
+ if approval_prompt is not None:
+ logger.warning(
+ 'The approval_prompt parameter for OAuth2WebServerFlow is '
+ 'deprecated. Please use the prompt parameter instead.')
+
+ if approval_prompt == 'force':
+ logger.warning(
+ 'approval_prompt="force" has been adjusted to '
+ 'prompt="consent"')
+ params['prompt'] = 'consent'
+ del params['approval_prompt']
+
+ return params
+
+
+class OAuth2WebServerFlow(Flow):
+ """Does the Web Server Flow for OAuth 2.0.
+
+ OAuth2WebServerFlow objects may be safely pickled and unpickled.
+ """
+
+ @_helpers.positional(4)
+ def __init__(self, client_id,
+ client_secret=None,
+ scope=None,
+ redirect_uri=None,
+ user_agent=None,
+ auth_uri=oauth2client.GOOGLE_AUTH_URI,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ login_hint=None,
+ device_uri=oauth2client.GOOGLE_DEVICE_URI,
+ token_info_uri=oauth2client.GOOGLE_TOKEN_INFO_URI,
+ authorization_header=None,
+ pkce=False,
+ code_verifier=None,
+ **kwargs):
+ """Constructor for OAuth2WebServerFlow.
+
+ The kwargs argument is used to set extra query parameters on the
+ auth_uri. For example, the access_type and prompt
+ query parameters can be set via kwargs.
+
+ Args:
+ client_id: string, client identifier.
+ client_secret: string client secret.
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
+ for a non-web-based application, or a URI that
+ handles the callback from the authorization server.
+ user_agent: string, HTTP User-Agent to provide for this
+ application.
+ auth_uri: string, URI for authorization endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ token_uri: string, URI for token endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+ login_hint: string, Either an email address or domain. Passing this
+ hint will either pre-fill the email box on the sign-in
+ form or select the proper multi-login session, thereby
+ simplifying the login flow.
+ device_uri: string, URI for device authorization endpoint. For
+ convenience defaults to Google's endpoints but any
+ OAuth 2.0 provider can be used.
+ authorization_header: string, For use with OAuth 2.0 providers that
+ require a client to authenticate using a
+ header value instead of passing client_secret
+ in the POST body.
+ pkce: boolean, default: False, Generate and include a "Proof Key
+ for Code Exchange" (PKCE) with your authorization and token
+ requests. This adds security for installed applications that
+ cannot protect a client_secret. See RFC 7636 for details.
+ code_verifier: bytestring or None, default: None, parameter passed
+ as part of the code exchange when pkce=True. If
+ None, a code_verifier will automatically be
+ generated as part of step1_get_authorize_url(). See
+ RFC 7636 for details.
+ **kwargs: dict, The keyword arguments are all optional and required
+ parameters for the OAuth calls.
+ """
+ # scope is a required argument, but to preserve backwards-compatibility
+ # we don't want to rearrange the positional arguments
+ if scope is None:
+ raise TypeError("The value of scope must not be None")
+ self.client_id = client_id
+ self.client_secret = client_secret
+ self.scope = _helpers.scopes_to_string(scope)
+ self.redirect_uri = redirect_uri
+ self.login_hint = login_hint
+ self.user_agent = user_agent
+ self.auth_uri = auth_uri
+ self.token_uri = token_uri
+ self.revoke_uri = revoke_uri
+ self.device_uri = device_uri
+ self.token_info_uri = token_info_uri
+ self.authorization_header = authorization_header
+ self._pkce = pkce
+ self.code_verifier = code_verifier
+ self.params = _oauth2_web_server_flow_params(kwargs)
+
+ @_helpers.positional(1)
+ def step1_get_authorize_url(self, redirect_uri=None, state=None):
+ """Returns a URI to redirect to the provider.
+
+ Args:
+ redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
+ for a non-web-based application, or a URI that
+ handles the callback from the authorization server.
+ This parameter is deprecated, please move to passing
+ the redirect_uri in via the constructor.
+ state: string, Opaque state string which is passed through the
+ OAuth2 flow and returned to the client as a query parameter
+ in the callback.
+
+ Returns:
+ A URI as a string to redirect the user to begin the authorization
+ flow.
+ """
+ if redirect_uri is not None:
+ logger.warning((
+ 'The redirect_uri parameter for '
+ 'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. '
+ 'Please move to passing the redirect_uri in via the '
+ 'constructor.'))
+ self.redirect_uri = redirect_uri
+
+ if self.redirect_uri is None:
+ raise ValueError('The value of redirect_uri must not be None.')
+
+ query_params = {
+ 'client_id': self.client_id,
+ 'redirect_uri': self.redirect_uri,
+ 'scope': self.scope,
+ }
+ if state is not None:
+ query_params['state'] = state
+ if self.login_hint is not None:
+ query_params['login_hint'] = self.login_hint
+ if self._pkce:
+ if not self.code_verifier:
+ self.code_verifier = _pkce.code_verifier()
+ challenge = _pkce.code_challenge(self.code_verifier)
+ query_params['code_challenge'] = challenge
+ query_params['code_challenge_method'] = 'S256'
+
+ query_params.update(self.params)
+ return _helpers.update_query_params(self.auth_uri, query_params)
+
+ @_helpers.positional(1)
+ def step1_get_device_and_user_codes(self, http=None):
+ """Returns a user code and the verification URL where to enter it
+
+ Returns:
+ A user code as a string for the user to authorize the application
+ An URL as a string where the user has to enter the code
+ """
+ if self.device_uri is None:
+ raise ValueError('The value of device_uri must not be None.')
+
+ body = urllib.parse.urlencode({
+ 'client_id': self.client_id,
+ 'scope': self.scope,
+ })
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ }
+
+ if self.user_agent is not None:
+ headers['user-agent'] = self.user_agent
+
+ if http is None:
+ http = transport.get_http_object()
+
+ resp, content = transport.request(
+ http, self.device_uri, method='POST', body=body, headers=headers)
+ content = _helpers._from_bytes(content)
+ if resp.status == http_client.OK:
+ try:
+ flow_info = json.loads(content)
+ except ValueError as exc:
+ raise OAuth2DeviceCodeError(
+ 'Could not parse server response as JSON: "{0}", '
+ 'error: "{1}"'.format(content, exc))
+ return DeviceFlowInfo.FromResponse(flow_info)
+ else:
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ error_dict = json.loads(content)
+ if 'error' in error_dict:
+ error_msg += ' Error: {0}'.format(error_dict['error'])
+ except ValueError:
+ # Couldn't decode a JSON response, stick with the
+ # default message.
+ pass
+ raise OAuth2DeviceCodeError(error_msg)
+
+ @_helpers.positional(2)
+ def step2_exchange(self, code=None, http=None, device_flow_info=None):
+ """Exchanges a code for OAuth2Credentials.
+
+ Args:
+ code: string, a dict-like object, or None. For a non-device
+ flow, this is either the response code as a string, or a
+ dictionary of query parameters to the redirect_uri. For a
+ device flow, this should be None.
+ http: httplib2.Http, optional http instance to use when fetching
+ credentials.
+ device_flow_info: DeviceFlowInfo, return value from step1 in the
+ case of a device flow.
+
+ Returns:
+ An OAuth2Credentials object that can be used to authorize requests.
+
+ Raises:
+ FlowExchangeError: if a problem occurred exchanging the code for a
+ refresh_token.
+ ValueError: if code and device_flow_info are both provided or both
+ missing.
+ """
+ if code is None and device_flow_info is None:
+ raise ValueError('No code or device_flow_info provided.')
+ if code is not None and device_flow_info is not None:
+ raise ValueError('Cannot provide both code and device_flow_info.')
+
+ if code is None:
+ code = device_flow_info.device_code
+ elif not isinstance(code, (six.string_types, six.binary_type)):
+ if 'code' not in code:
+ raise FlowExchangeError(code.get(
+ 'error', 'No code was supplied in the query parameters.'))
+ code = code['code']
+
+ post_data = {
+ 'client_id': self.client_id,
+ 'code': code,
+ 'scope': self.scope,
+ }
+ if self.client_secret is not None:
+ post_data['client_secret'] = self.client_secret
+ if self._pkce:
+ post_data['code_verifier'] = self.code_verifier
+ if device_flow_info is not None:
+ post_data['grant_type'] = 'http://oauth.net/grant_type/device/1.0'
+ else:
+ post_data['grant_type'] = 'authorization_code'
+ post_data['redirect_uri'] = self.redirect_uri
+ body = urllib.parse.urlencode(post_data)
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ }
+ if self.authorization_header is not None:
+ headers['Authorization'] = self.authorization_header
+ if self.user_agent is not None:
+ headers['user-agent'] = self.user_agent
+
+ if http is None:
+ http = transport.get_http_object()
+
+ resp, content = transport.request(
+ http, self.token_uri, method='POST', body=body, headers=headers)
+ d = _parse_exchange_token_response(content)
+ if resp.status == http_client.OK and 'access_token' in d:
+ access_token = d['access_token']
+ refresh_token = d.get('refresh_token', None)
+ if not refresh_token:
+ logger.info(
+ 'Received token response with no refresh_token. Consider '
+ "reauthenticating with prompt='consent'.")
+ token_expiry = None
+ if 'expires_in' in d:
+ delta = datetime.timedelta(seconds=int(d['expires_in']))
+ token_expiry = delta + _UTCNOW()
+
+ extracted_id_token = None
+ id_token_jwt = None
+ if 'id_token' in d:
+ extracted_id_token = _extract_id_token(d['id_token'])
+ id_token_jwt = d['id_token']
+
+ logger.info('Successfully retrieved access token')
+ return OAuth2Credentials(
+ access_token, self.client_id, self.client_secret,
+ refresh_token, token_expiry, self.token_uri, self.user_agent,
+ revoke_uri=self.revoke_uri, id_token=extracted_id_token,
+ id_token_jwt=id_token_jwt, token_response=d, scopes=self.scope,
+ token_info_uri=self.token_info_uri)
+ else:
+ logger.info('Failed to retrieve access token: %s', content)
+ if 'error' in d:
+ # you never know what those providers got to say
+ error_msg = (str(d['error']) +
+ str(d.get('error_description', '')))
+ else:
+ error_msg = 'Invalid response: {0}.'.format(str(resp.status))
+ raise FlowExchangeError(error_msg)
+
+
+@_helpers.positional(2)
+def flow_from_clientsecrets(filename, scope, redirect_uri=None,
+ message=None, cache=None, login_hint=None,
+ device_uri=None, pkce=None, code_verifier=None,
+ prompt=None):
+ """Create a Flow from a clientsecrets file.
+
+ Will create the right kind of Flow based on the contents of the
+ clientsecrets file or will raise InvalidClientSecretsError for unknown
+ types of Flows.
+
+ Args:
+ filename: string, File name of client secrets.
+ scope: string or iterable of strings, scope(s) to request.
+ redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
+ a non-web-based application, or a URI that handles the
+ callback from the authorization server.
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. If message is
+ provided then sys.exit will be called in the case of an error.
+ If message in not provided then
+ clientsecrets.InvalidClientSecretsError will be raised.
+ cache: An optional cache service client that implements get() and set()
+ methods. See clientsecrets.loadfile() for details.
+ login_hint: string, Either an email address or domain. Passing this
+ hint will either pre-fill the email box on the sign-in form
+ or select the proper multi-login session, thereby
+ simplifying the login flow.
+ device_uri: string, URI for device authorization endpoint. For
+ convenience defaults to Google's endpoints but any
+ OAuth 2.0 provider can be used.
+
+ Returns:
+ A Flow object.
+
+ Raises:
+ UnknownClientSecretsFlowError: if the file describes an unknown kind of
+ Flow.
+ clientsecrets.InvalidClientSecretsError: if the clientsecrets file is
+ invalid.
+ """
+ try:
+ client_type, client_info = clientsecrets.loadfile(filename,
+ cache=cache)
+ if client_type in (clientsecrets.TYPE_WEB,
+ clientsecrets.TYPE_INSTALLED):
+ constructor_kwargs = {
+ 'redirect_uri': redirect_uri,
+ 'auth_uri': client_info['auth_uri'],
+ 'token_uri': client_info['token_uri'],
+ 'login_hint': login_hint,
+ }
+ revoke_uri = client_info.get('revoke_uri')
+ optional = (
+ 'revoke_uri',
+ 'device_uri',
+ 'pkce',
+ 'code_verifier',
+ 'prompt'
+ )
+ for param in optional:
+ if locals()[param] is not None:
+ constructor_kwargs[param] = locals()[param]
+
+ return OAuth2WebServerFlow(
+ client_info['client_id'], client_info['client_secret'],
+ scope, **constructor_kwargs)
+
+ except clientsecrets.InvalidClientSecretsError as e:
+ if message is not None:
+ if e.args:
+ message = ('The client secrets were invalid: '
+ '\n{0}\n{1}'.format(e, message))
+ sys.exit(message)
+ else:
+ raise
+ else:
+ raise UnknownClientSecretsFlowError(
+ 'This OAuth 2.0 flow is unsupported: {0!r}'.format(client_type))
diff --git a/contrib/python/oauth2client/py2/oauth2client/clientsecrets.py b/contrib/python/oauth2client/py2/oauth2client/clientsecrets.py
new file mode 100644
index 0000000000..1598142e87
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/clientsecrets.py
@@ -0,0 +1,173 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for reading OAuth 2.0 client secret files.
+
+A client_secrets.json file contains all the information needed to interact with
+an OAuth 2.0 protected service.
+"""
+
+import json
+
+import six
+
+
+# Properties that make a client_secrets.json file valid.
+TYPE_WEB = 'web'
+TYPE_INSTALLED = 'installed'
+
+VALID_CLIENT = {
+ TYPE_WEB: {
+ 'required': [
+ 'client_id',
+ 'client_secret',
+ 'redirect_uris',
+ 'auth_uri',
+ 'token_uri',
+ ],
+ 'string': [
+ 'client_id',
+ 'client_secret',
+ ],
+ },
+ TYPE_INSTALLED: {
+ 'required': [
+ 'client_id',
+ 'client_secret',
+ 'redirect_uris',
+ 'auth_uri',
+ 'token_uri',
+ ],
+ 'string': [
+ 'client_id',
+ 'client_secret',
+ ],
+ },
+}
+
+
+class Error(Exception):
+ """Base error for this module."""
+
+
+class InvalidClientSecretsError(Error):
+ """Format of ClientSecrets file is invalid."""
+
+
+def _validate_clientsecrets(clientsecrets_dict):
+ """Validate parsed client secrets from a file.
+
+ Args:
+ clientsecrets_dict: dict, a dictionary holding the client secrets.
+
+ Returns:
+ tuple, a string of the client type and the information parsed
+ from the file.
+ """
+ _INVALID_FILE_FORMAT_MSG = (
+ 'Invalid file format. See '
+ 'https://developers.google.com/api-client-library/'
+ 'python/guide/aaa_client_secrets')
+
+ if clientsecrets_dict is None:
+ raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)
+ try:
+ (client_type, client_info), = clientsecrets_dict.items()
+ except (ValueError, AttributeError):
+ raise InvalidClientSecretsError(
+ _INVALID_FILE_FORMAT_MSG + ' '
+ 'Expected a JSON object with a single property for a "web" or '
+ '"installed" application')
+
+ if client_type not in VALID_CLIENT:
+ raise InvalidClientSecretsError(
+ 'Unknown client type: {0}.'.format(client_type))
+
+ for prop_name in VALID_CLIENT[client_type]['required']:
+ if prop_name not in client_info:
+ raise InvalidClientSecretsError(
+ 'Missing property "{0}" in a client type of "{1}".'.format(
+ prop_name, client_type))
+ for prop_name in VALID_CLIENT[client_type]['string']:
+ if client_info[prop_name].startswith('[['):
+ raise InvalidClientSecretsError(
+ 'Property "{0}" is not configured.'.format(prop_name))
+ return client_type, client_info
+
+
+def load(fp):
+ obj = json.load(fp)
+ return _validate_clientsecrets(obj)
+
+
+def loads(s):
+ obj = json.loads(s)
+ return _validate_clientsecrets(obj)
+
+
+def _loadfile(filename):
+ try:
+ with open(filename, 'r') as fp:
+ obj = json.load(fp)
+ except IOError as exc:
+ raise InvalidClientSecretsError('Error opening file', exc.filename,
+ exc.strerror, exc.errno)
+ return _validate_clientsecrets(obj)
+
+
+def loadfile(filename, cache=None):
+ """Loading of client_secrets JSON file, optionally backed by a cache.
+
+ Typical cache storage would be App Engine memcache service,
+ but you can pass in any other cache client that implements
+ these methods:
+
+ * ``get(key, namespace=ns)``
+ * ``set(key, value, namespace=ns)``
+
+ Usage::
+
+ # without caching
+ client_type, client_info = loadfile('secrets.json')
+ # using App Engine memcache service
+ from google.appengine.api import memcache
+ client_type, client_info = loadfile('secrets.json', cache=memcache)
+
+ Args:
+ filename: string, Path to a client_secrets.json file on a filesystem.
+ cache: An optional cache service client that implements get() and set()
+ methods. If not specified, the file is always being loaded from
+ a filesystem.
+
+ Raises:
+ InvalidClientSecretsError: In case of a validation error or some
+ I/O failure. Can happen only on cache miss.
+
+ Returns:
+ (client_type, client_info) tuple, as _loadfile() normally would.
+ JSON contents is validated only during first load. Cache hits are not
+ validated.
+ """
+ _SECRET_NAMESPACE = 'oauth2client:secrets#ns'
+
+ if not cache:
+ return _loadfile(filename)
+
+ obj = cache.get(filename, namespace=_SECRET_NAMESPACE)
+ if obj is None:
+ client_type, client_info = _loadfile(filename)
+ obj = {client_type: client_info}
+ cache.set(filename, obj, namespace=_SECRET_NAMESPACE)
+
+ return next(six.iteritems(obj))
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/__init__.py b/contrib/python/oauth2client/py2/oauth2client/contrib/__init__.py
new file mode 100644
index 0000000000..ecfd06c968
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/__init__.py
@@ -0,0 +1,6 @@
+"""Contributed modules.
+
+Contrib contains modules that are not considered part of the core oauth2client
+library but provide additional functionality. These modules are intended to
+make it easier to use oauth2client.
+"""
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/_appengine_ndb.py b/contrib/python/oauth2client/py2/oauth2client/contrib/_appengine_ndb.py
new file mode 100644
index 0000000000..c863e8f4e7
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/_appengine_ndb.py
@@ -0,0 +1,163 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google App Engine utilities helper.
+
+Classes that directly require App Engine's ndb library. Provided
+as a separate module in case of failure to import ndb while
+other App Engine libraries are present.
+"""
+
+import logging
+
+from google.appengine.ext import ndb
+
+from oauth2client import client
+
+
+NDB_KEY = ndb.Key
+"""Key constant used by :mod:`oauth2client.contrib.appengine`."""
+
+NDB_MODEL = ndb.Model
+"""Model constant used by :mod:`oauth2client.contrib.appengine`."""
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class SiteXsrfSecretKeyNDB(ndb.Model):
+ """NDB Model for storage for the sites XSRF secret key.
+
+ Since this model uses the same kind as SiteXsrfSecretKey, it can be
+ used interchangeably. This simply provides an NDB model for interacting
+ with the same data the DB model interacts with.
+
+ There should only be one instance stored of this model, the one used
+ for the site.
+ """
+ secret = ndb.StringProperty()
+
+ @classmethod
+ def _get_kind(cls):
+ """Return the kind name for this class."""
+ return 'SiteXsrfSecretKey'
+
+
+class FlowNDBProperty(ndb.PickleProperty):
+ """App Engine NDB datastore Property for Flow.
+
+ Serves the same purpose as the DB FlowProperty, but for NDB models.
+ Since PickleProperty inherits from BlobProperty, the underlying
+ representation of the data in the datastore will be the same as in the
+ DB case.
+
+ Utility property that allows easy storage and retrieval of an
+ oauth2client.Flow
+ """
+
+ def _validate(self, value):
+ """Validates a value as a proper Flow object.
+
+ Args:
+ value: A value to be set on the property.
+
+ Raises:
+ TypeError if the value is not an instance of Flow.
+ """
+ _LOGGER.info('validate: Got type %s', type(value))
+ if value is not None and not isinstance(value, client.Flow):
+ raise TypeError(
+ 'Property {0} must be convertible to a flow '
+ 'instance; received: {1}.'.format(self._name, value))
+
+
+class CredentialsNDBProperty(ndb.BlobProperty):
+ """App Engine NDB datastore Property for Credentials.
+
+ Serves the same purpose as the DB CredentialsProperty, but for NDB
+ models. Since CredentialsProperty stores data as a blob and this
+ inherits from BlobProperty, the data in the datastore will be the same
+ as in the DB case.
+
+ Utility property that allows easy storage and retrieval of Credentials
+ and subclasses.
+ """
+
+ def _validate(self, value):
+ """Validates a value as a proper credentials object.
+
+ Args:
+ value: A value to be set on the property.
+
+ Raises:
+ TypeError if the value is not an instance of Credentials.
+ """
+ _LOGGER.info('validate: Got type %s', type(value))
+ if value is not None and not isinstance(value, client.Credentials):
+ raise TypeError(
+ 'Property {0} must be convertible to a credentials '
+ 'instance; received: {1}.'.format(self._name, value))
+
+ def _to_base_type(self, value):
+ """Converts our validated value to a JSON serialized string.
+
+ Args:
+ value: A value to be set in the datastore.
+
+ Returns:
+ A JSON serialized version of the credential, else '' if value
+ is None.
+ """
+ if value is None:
+ return ''
+ else:
+ return value.to_json()
+
+ def _from_base_type(self, value):
+ """Converts our stored JSON string back to the desired type.
+
+ Args:
+ value: A value from the datastore to be converted to the
+ desired type.
+
+ Returns:
+ A deserialized Credentials (or subclass) object, else None if
+ the value can't be parsed.
+ """
+ if not value:
+ return None
+ try:
+ # Uses the from_json method of the implied class of value
+ credentials = client.Credentials.new_from_json(value)
+ except ValueError:
+ credentials = None
+ return credentials
+
+
+class CredentialsNDBModel(ndb.Model):
+ """NDB Model for storage of OAuth 2.0 Credentials
+
+ Since this model uses the same kind as CredentialsModel and has a
+ property which can serialize and deserialize Credentials correctly, it
+ can be used interchangeably with a CredentialsModel to access, insert
+ and delete the same entities. This simply provides an NDB model for
+ interacting with the same data the DB model interacts with.
+
+ Storage of the model is keyed by the user.user_id().
+ """
+ credentials = CredentialsNDBProperty()
+
+ @classmethod
+ def _get_kind(cls):
+ """Return the kind name for this class."""
+ return 'CredentialsModel'
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/_metadata.py b/contrib/python/oauth2client/py2/oauth2client/contrib/_metadata.py
new file mode 100644
index 0000000000..564cd398da
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/_metadata.py
@@ -0,0 +1,118 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provides helper methods for talking to the Compute Engine metadata server.
+
+See https://cloud.google.com/compute/docs/metadata
+"""
+
+import datetime
+import json
+import os
+
+from six.moves import http_client
+from six.moves.urllib import parse as urlparse
+
+from oauth2client import _helpers
+from oauth2client import client
+from oauth2client import transport
+
+
+METADATA_ROOT = 'http://{}/computeMetadata/v1/'.format(
+ os.getenv('GCE_METADATA_ROOT', 'metadata.google.internal'))
+METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
+
+
+def get(http, path, root=METADATA_ROOT, recursive=None):
+ """Fetch a resource from the metadata server.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ path: A string indicating the resource to retrieve. For example,
+ 'instance/service-accounts/default'
+ root: A string indicating the full path to the metadata server root.
+ recursive: A boolean indicating whether to do a recursive query of
+ metadata. See
+ https://cloud.google.com/compute/docs/metadata#aggcontents
+
+ Returns:
+ A dictionary if the metadata server returns JSON, otherwise a string.
+
+ Raises:
+ http_client.HTTPException if an error corrured while
+ retrieving metadata.
+ """
+ url = urlparse.urljoin(root, path)
+ url = _helpers._add_query_parameter(url, 'recursive', recursive)
+
+ response, content = transport.request(
+ http, url, headers=METADATA_HEADERS)
+
+ if response.status == http_client.OK:
+ decoded = _helpers._from_bytes(content)
+ if response['content-type'] == 'application/json':
+ return json.loads(decoded)
+ else:
+ return decoded
+ else:
+ raise http_client.HTTPException(
+ 'Failed to retrieve {0} from the Google Compute Engine'
+ 'metadata service. Response:\n{1}'.format(url, response))
+
+
+def get_service_account_info(http, service_account='default'):
+ """Get information about a service account from the metadata server.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ service_account: An email specifying the service account for which to
+ look up information. Default will be information for the "default"
+ service account of the current compute engine instance.
+
+ Returns:
+ A dictionary with information about the specified service account,
+ for example:
+
+ {
+ 'email': '...',
+ 'scopes': ['scope', ...],
+ 'aliases': ['default', '...']
+ }
+ """
+ return get(
+ http,
+ 'instance/service-accounts/{0}/'.format(service_account),
+ recursive=True)
+
+
+def get_token(http, service_account='default'):
+ """Fetch an oauth token for the
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ service_account: An email specifying the service account this token
+ should represent. Default will be a token for the "default" service
+ account of the current compute engine instance.
+
+ Returns:
+ A tuple of (access token, token expiration), where access token is the
+ access token as a string and token expiration is a datetime object
+ that indicates when the access token will expire.
+ """
+ token_json = get(
+ http,
+ 'instance/service-accounts/{0}/token'.format(service_account))
+ token_expiry = client._UTCNOW() + datetime.timedelta(
+ seconds=token_json['expires_in'])
+ return token_json['access_token'], token_expiry
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/appengine.py b/contrib/python/oauth2client/py2/oauth2client/contrib/appengine.py
new file mode 100644
index 0000000000..c1326eeb57
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/appengine.py
@@ -0,0 +1,910 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for Google App Engine
+
+Utilities for making it easier to use OAuth 2.0 on Google App Engine.
+"""
+
+import cgi
+import json
+import logging
+import os
+import pickle
+import threading
+
+from google.appengine.api import app_identity
+from google.appengine.api import memcache
+from google.appengine.api import users
+from google.appengine.ext import db
+from google.appengine.ext.webapp.util import login_required
+import webapp2 as webapp
+
+import oauth2client
+from oauth2client import _helpers
+from oauth2client import client
+from oauth2client import clientsecrets
+from oauth2client import transport
+from oauth2client.contrib import xsrfutil
+
+# This is a temporary fix for a Google internal issue.
+try:
+ from oauth2client.contrib import _appengine_ndb
+except ImportError: # pragma: NO COVER
+ _appengine_ndb = None
+
+
+logger = logging.getLogger(__name__)
+
+OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
+
+XSRF_MEMCACHE_ID = 'xsrf_secret_key'
+
+if _appengine_ndb is None: # pragma: NO COVER
+ CredentialsNDBModel = None
+ CredentialsNDBProperty = None
+ FlowNDBProperty = None
+ _NDB_KEY = None
+ _NDB_MODEL = None
+ SiteXsrfSecretKeyNDB = None
+else:
+ CredentialsNDBModel = _appengine_ndb.CredentialsNDBModel
+ CredentialsNDBProperty = _appengine_ndb.CredentialsNDBProperty
+ FlowNDBProperty = _appengine_ndb.FlowNDBProperty
+ _NDB_KEY = _appengine_ndb.NDB_KEY
+ _NDB_MODEL = _appengine_ndb.NDB_MODEL
+ SiteXsrfSecretKeyNDB = _appengine_ndb.SiteXsrfSecretKeyNDB
+
+
+def _safe_html(s):
+ """Escape text to make it safe to display.
+
+ Args:
+ s: string, The text to escape.
+
+ Returns:
+ The escaped text as a string.
+ """
+ return cgi.escape(s, quote=1).replace("'", '&#39;')
+
+
+class SiteXsrfSecretKey(db.Model):
+ """Storage for the sites XSRF secret key.
+
+ There will only be one instance stored of this model, the one used for the
+ site.
+ """
+ secret = db.StringProperty()
+
+
+def _generate_new_xsrf_secret_key():
+ """Returns a random XSRF secret key."""
+ return os.urandom(16).encode("hex")
+
+
+def xsrf_secret_key():
+ """Return the secret key for use for XSRF protection.
+
+ If the Site entity does not have a secret key, this method will also create
+ one and persist it.
+
+ Returns:
+ The secret key.
+ """
+ secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
+ if not secret:
+ # Load the one and only instance of SiteXsrfSecretKey.
+ model = SiteXsrfSecretKey.get_or_insert(key_name='site')
+ if not model.secret:
+ model.secret = _generate_new_xsrf_secret_key()
+ model.put()
+ secret = model.secret
+ memcache.add(XSRF_MEMCACHE_ID, secret,
+ namespace=OAUTH2CLIENT_NAMESPACE)
+
+ return str(secret)
+
+
+class AppAssertionCredentials(client.AssertionCredentials):
+ """Credentials object for App Engine Assertion Grants
+
+ This object will allow an App Engine application to identify itself to
+ Google and other OAuth 2.0 servers that can verify assertions. It can be
+ used for the purpose of accessing data stored under an account assigned to
+ the App Engine application itself.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+ """
+
+ @_helpers.positional(2)
+ def __init__(self, scope, **kwargs):
+ """Constructor for AppAssertionCredentials
+
+ Args:
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ **kwargs: optional keyword args, including:
+ service_account_id: service account id of the application. If None
+ or unspecified, the default service account for
+ the app is used.
+ """
+ self.scope = _helpers.scopes_to_string(scope)
+ self._kwargs = kwargs
+ self.service_account_id = kwargs.get('service_account_id', None)
+ self._service_account_email = None
+
+ # Assertion type is no longer used, but still in the
+ # parent class signature.
+ super(AppAssertionCredentials, self).__init__(None)
+
+ @classmethod
+ def from_json(cls, json_data):
+ data = json.loads(json_data)
+ return AppAssertionCredentials(data['scope'])
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Since the underlying App Engine app_identity implementation does its
+ own caching we can skip all the storage hoops and just to a refresh
+ using the API.
+
+ Args:
+ http: unused HTTP object
+
+ Raises:
+ AccessTokenRefreshError: When the refresh fails.
+ """
+ try:
+ scopes = self.scope.split()
+ (token, _) = app_identity.get_access_token(
+ scopes, service_account_id=self.service_account_id)
+ except app_identity.Error as e:
+ raise client.AccessTokenRefreshError(str(e))
+ self.access_token = token
+
+ @property
+ def serialization_data(self):
+ raise NotImplementedError('Cannot serialize credentials '
+ 'for Google App Engine.')
+
+ def create_scoped_required(self):
+ return not self.scope
+
+ def create_scoped(self, scopes):
+ return AppAssertionCredentials(scopes, **self._kwargs)
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ Implements abstract method
+ :meth:`oauth2client.client.AssertionCredentials.sign_blob`.
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Returns:
+ tuple, A pair of the private key ID used to sign the blob and
+ the signed contents.
+ """
+ return app_identity.sign_blob(blob)
+
+ @property
+ def service_account_email(self):
+ """Get the email for the current service account.
+
+ Returns:
+ string, The email associated with the Google App Engine
+ service account.
+ """
+ if self._service_account_email is None:
+ self._service_account_email = (
+ app_identity.get_service_account_name())
+ return self._service_account_email
+
+
+class FlowProperty(db.Property):
+ """App Engine datastore Property for Flow.
+
+ Utility property that allows easy storage and retrieval of an
+ oauth2client.Flow
+ """
+
+ # Tell what the user type is.
+ data_type = client.Flow
+
+ # For writing to datastore.
+ def get_value_for_datastore(self, model_instance):
+ flow = super(FlowProperty, self).get_value_for_datastore(
+ model_instance)
+ return db.Blob(pickle.dumps(flow))
+
+ # For reading from datastore.
+ def make_value_from_datastore(self, value):
+ if value is None:
+ return None
+ return pickle.loads(value)
+
+ def validate(self, value):
+ if value is not None and not isinstance(value, client.Flow):
+ raise db.BadValueError(
+ 'Property {0} must be convertible '
+ 'to a FlowThreeLegged instance ({1})'.format(self.name, value))
+ return super(FlowProperty, self).validate(value)
+
+ def empty(self, value):
+ return not value
+
+
+class CredentialsProperty(db.Property):
+ """App Engine datastore Property for Credentials.
+
+ Utility property that allows easy storage and retrieval of
+ oauth2client.Credentials
+ """
+
+ # Tell what the user type is.
+ data_type = client.Credentials
+
+ # For writing to datastore.
+ def get_value_for_datastore(self, model_instance):
+ logger.info("get: Got type " + str(type(model_instance)))
+ cred = super(CredentialsProperty, self).get_value_for_datastore(
+ model_instance)
+ if cred is None:
+ cred = ''
+ else:
+ cred = cred.to_json()
+ return db.Blob(cred)
+
+ # For reading from datastore.
+ def make_value_from_datastore(self, value):
+ logger.info("make: Got type " + str(type(value)))
+ if value is None:
+ return None
+ if len(value) == 0:
+ return None
+ try:
+ credentials = client.Credentials.new_from_json(value)
+ except ValueError:
+ credentials = None
+ return credentials
+
+ def validate(self, value):
+ value = super(CredentialsProperty, self).validate(value)
+ logger.info("validate: Got type " + str(type(value)))
+ if value is not None and not isinstance(value, client.Credentials):
+ raise db.BadValueError(
+ 'Property {0} must be convertible '
+ 'to a Credentials instance ({1})'.format(self.name, value))
+ return value
+
+
+class StorageByKeyName(client.Storage):
+ """Store and retrieve a credential to and from the App Engine datastore.
+
+ This Storage helper presumes the Credentials have been stored as a
+ CredentialsProperty or CredentialsNDBProperty on a datastore model class,
+ and that entities are stored by key_name.
+ """
+
+ @_helpers.positional(4)
+ def __init__(self, model, key_name, property_name, cache=None, user=None):
+ """Constructor for Storage.
+
+ Args:
+ model: db.Model or ndb.Model, model class
+ key_name: string, key name for the entity that has the credentials
+ property_name: string, name of the property that is a
+ CredentialsProperty or CredentialsNDBProperty.
+ cache: memcache, a write-through cache to put in front of the
+ datastore. If the model you are using is an NDB model, using
+ a cache will be redundant since the model uses an instance
+ cache and memcache for you.
+ user: users.User object, optional. Can be used to grab user ID as a
+ key_name if no key name is specified.
+ """
+ super(StorageByKeyName, self).__init__()
+
+ if key_name is None:
+ if user is None:
+ raise ValueError('StorageByKeyName called with no '
+ 'key name or user.')
+ key_name = user.user_id()
+
+ self._model = model
+ self._key_name = key_name
+ self._property_name = property_name
+ self._cache = cache
+
+ def _is_ndb(self):
+ """Determine whether the model of the instance is an NDB model.
+
+ Returns:
+ Boolean indicating whether or not the model is an NDB or DB model.
+ """
+ # issubclass will fail if one of the arguments is not a class, only
+ # need worry about new-style classes since ndb and db models are
+ # new-style
+ if isinstance(self._model, type):
+ if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL):
+ return True
+ elif issubclass(self._model, db.Model):
+ return False
+
+ raise TypeError(
+ 'Model class not an NDB or DB model: {0}.'.format(self._model))
+
+ def _get_entity(self):
+ """Retrieve entity from datastore.
+
+ Uses a different model method for db or ndb models.
+
+ Returns:
+ Instance of the model corresponding to the current storage object
+ and stored using the key name of the storage object.
+ """
+ if self._is_ndb():
+ return self._model.get_by_id(self._key_name)
+ else:
+ return self._model.get_by_key_name(self._key_name)
+
+ def _delete_entity(self):
+ """Delete entity from datastore.
+
+ Attempts to delete using the key_name stored on the object, whether or
+ not the given key is in the datastore.
+ """
+ if self._is_ndb():
+ _NDB_KEY(self._model, self._key_name).delete()
+ else:
+ entity_key = db.Key.from_path(self._model.kind(), self._key_name)
+ db.delete(entity_key)
+
+ @db.non_transactional(allow_existing=True)
+ def locked_get(self):
+ """Retrieve Credential from datastore.
+
+ Returns:
+ oauth2client.Credentials
+ """
+ credentials = None
+ if self._cache:
+ json = self._cache.get(self._key_name)
+ if json:
+ credentials = client.Credentials.new_from_json(json)
+ if credentials is None:
+ entity = self._get_entity()
+ if entity is not None:
+ credentials = getattr(entity, self._property_name)
+ if self._cache:
+ self._cache.set(self._key_name, credentials.to_json())
+
+ if credentials and hasattr(credentials, 'set_store'):
+ credentials.set_store(self)
+ return credentials
+
+ @db.non_transactional(allow_existing=True)
+ def locked_put(self, credentials):
+ """Write a Credentials to the datastore.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ entity = self._model.get_or_insert(self._key_name)
+ setattr(entity, self._property_name, credentials)
+ entity.put()
+ if self._cache:
+ self._cache.set(self._key_name, credentials.to_json())
+
+ @db.non_transactional(allow_existing=True)
+ def locked_delete(self):
+ """Delete Credential from datastore."""
+
+ if self._cache:
+ self._cache.delete(self._key_name)
+
+ self._delete_entity()
+
+
+class CredentialsModel(db.Model):
+ """Storage for OAuth 2.0 Credentials
+
+ Storage of the model is keyed by the user.user_id().
+ """
+ credentials = CredentialsProperty()
+
+
+def _build_state_value(request_handler, user):
+ """Composes the value for the 'state' parameter.
+
+ Packs the current request URI and an XSRF token into an opaque string that
+ can be passed to the authentication server via the 'state' parameter.
+
+ Args:
+ request_handler: webapp.RequestHandler, The request.
+ user: google.appengine.api.users.User, The current user.
+
+ Returns:
+ The state value as a string.
+ """
+ uri = request_handler.request.url
+ token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
+ action_id=str(uri))
+ return uri + ':' + token
+
+
+def _parse_state_value(state, user):
+ """Parse the value of the 'state' parameter.
+
+ Parses the value and validates the XSRF token in the state parameter.
+
+ Args:
+ state: string, The value of the state parameter.
+ user: google.appengine.api.users.User, The current user.
+
+ Returns:
+ The redirect URI, or None if XSRF token is not valid.
+ """
+ uri, token = state.rsplit(':', 1)
+ if xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
+ action_id=uri):
+ return uri
+ else:
+ return None
+
+
+class OAuth2Decorator(object):
+ """Utility for making OAuth 2.0 easier.
+
+ Instantiate and then use with oauth_required or oauth_aware
+ as decorators on webapp.RequestHandler methods.
+
+ ::
+
+ decorator = OAuth2Decorator(
+ client_id='837...ent.com',
+ client_secret='Qh...wwI',
+ scope='https://www.googleapis.com/auth/plus')
+
+ class MainHandler(webapp.RequestHandler):
+ @decorator.oauth_required
+ def get(self):
+ http = decorator.http()
+ # http is authorized with the user's Credentials and can be
+ # used in API calls
+
+ """
+
+ def set_credentials(self, credentials):
+ self._tls.credentials = credentials
+
+ def get_credentials(self):
+ """A thread local Credentials object.
+
+ Returns:
+ A client.Credentials object, or None if credentials hasn't been set
+ in this thread yet, which may happen when calling has_credentials
+ inside oauth_aware.
+ """
+ return getattr(self._tls, 'credentials', None)
+
+ credentials = property(get_credentials, set_credentials)
+
+ def set_flow(self, flow):
+ self._tls.flow = flow
+
+ def get_flow(self):
+ """A thread local Flow object.
+
+ Returns:
+ A credentials.Flow object, or None if the flow hasn't been set in
+ this thread yet, which happens in _create_flow() since Flows are
+ created lazily.
+ """
+ return getattr(self._tls, 'flow', None)
+
+ flow = property(get_flow, set_flow)
+
+ @_helpers.positional(4)
+ def __init__(self, client_id, client_secret, scope,
+ auth_uri=oauth2client.GOOGLE_AUTH_URI,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ user_agent=None,
+ message=None,
+ callback_path='/oauth2callback',
+ token_response_param=None,
+ _storage_class=StorageByKeyName,
+ _credentials_class=CredentialsModel,
+ _credentials_property_name='credentials',
+ **kwargs):
+ """Constructor for OAuth2Decorator
+
+ Args:
+ client_id: string, client identifier.
+ client_secret: string client secret.
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ auth_uri: string, URI for authorization endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+ user_agent: string, User agent of your application, default to
+ None.
+ message: Message to display if there are problems with the
+ OAuth 2.0 configuration. The message may contain HTML and
+ will be presented on the web interface for any method that
+ uses the decorator.
+ callback_path: string, The absolute path to use as the callback
+ URI. Note that this must match up with the URI given
+ when registering the application in the APIs
+ Console.
+ token_response_param: string. If provided, the full JSON response
+ to the access token request will be encoded
+ and included in this query parameter in the
+ callback URI. This is useful with providers
+ (e.g. wordpress.com) that include extra
+ fields that the client may want.
+ _storage_class: "Protected" keyword argument not typically provided
+ to this constructor. A storage class to aid in
+ storing a Credentials object for a user in the
+ datastore. Defaults to StorageByKeyName.
+ _credentials_class: "Protected" keyword argument not typically
+ provided to this constructor. A db or ndb Model
+ class to hold credentials. Defaults to
+ CredentialsModel.
+ _credentials_property_name: "Protected" keyword argument not
+ typically provided to this constructor.
+ A string indicating the name of the
+ field on the _credentials_class where a
+ Credentials object will be stored.
+ Defaults to 'credentials'.
+ **kwargs: dict, Keyword arguments are passed along as kwargs to
+ the OAuth2WebServerFlow constructor.
+ """
+ self._tls = threading.local()
+ self.flow = None
+ self.credentials = None
+ self._client_id = client_id
+ self._client_secret = client_secret
+ self._scope = _helpers.scopes_to_string(scope)
+ self._auth_uri = auth_uri
+ self._token_uri = token_uri
+ self._revoke_uri = revoke_uri
+ self._user_agent = user_agent
+ self._kwargs = kwargs
+ self._message = message
+ self._in_error = False
+ self._callback_path = callback_path
+ self._token_response_param = token_response_param
+ self._storage_class = _storage_class
+ self._credentials_class = _credentials_class
+ self._credentials_property_name = _credentials_property_name
+
+ def _display_error_message(self, request_handler):
+ request_handler.response.out.write('<html><body>')
+ request_handler.response.out.write(_safe_html(self._message))
+ request_handler.response.out.write('</body></html>')
+
+ def oauth_required(self, method):
+ """Decorator that starts the OAuth 2.0 dance.
+
+ Starts the OAuth dance for the logged in user if they haven't already
+ granted access for this application.
+
+ Args:
+ method: callable, to be decorated method of a webapp.RequestHandler
+ instance.
+ """
+
+ def check_oauth(request_handler, *args, **kwargs):
+ if self._in_error:
+ self._display_error_message(request_handler)
+ return
+
+ user = users.get_current_user()
+ # Don't use @login_decorator as this could be used in a
+ # POST request.
+ if not user:
+ request_handler.redirect(users.create_login_url(
+ request_handler.request.uri))
+ return
+
+ self._create_flow(request_handler)
+
+ # Store the request URI in 'state' so we can use it later
+ self.flow.params['state'] = _build_state_value(
+ request_handler, user)
+ self.credentials = self._storage_class(
+ self._credentials_class, None,
+ self._credentials_property_name, user=user).get()
+
+ if not self.has_credentials():
+ return request_handler.redirect(self.authorize_url())
+ try:
+ resp = method(request_handler, *args, **kwargs)
+ except client.AccessTokenRefreshError:
+ return request_handler.redirect(self.authorize_url())
+ finally:
+ self.credentials = None
+ return resp
+
+ return check_oauth
+
+ def _create_flow(self, request_handler):
+ """Create the Flow object.
+
+ The Flow is calculated lazily since we don't know where this app is
+ running until it receives a request, at which point redirect_uri can be
+ calculated and then the Flow object can be constructed.
+
+ Args:
+ request_handler: webapp.RequestHandler, the request handler.
+ """
+ if self.flow is None:
+ redirect_uri = request_handler.request.relative_url(
+ self._callback_path) # Usually /oauth2callback
+ self.flow = client.OAuth2WebServerFlow(
+ self._client_id, self._client_secret, self._scope,
+ redirect_uri=redirect_uri, user_agent=self._user_agent,
+ auth_uri=self._auth_uri, token_uri=self._token_uri,
+ revoke_uri=self._revoke_uri, **self._kwargs)
+
+ def oauth_aware(self, method):
+ """Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
+
+ Does all the setup for the OAuth dance, but doesn't initiate it.
+ This decorator is useful if you want to create a page that knows
+ whether or not the user has granted access to this application.
+ From within a method decorated with @oauth_aware the has_credentials()
+ and authorize_url() methods can be called.
+
+ Args:
+ method: callable, to be decorated method of a webapp.RequestHandler
+ instance.
+ """
+
+ def setup_oauth(request_handler, *args, **kwargs):
+ if self._in_error:
+ self._display_error_message(request_handler)
+ return
+
+ user = users.get_current_user()
+ # Don't use @login_decorator as this could be used in a
+ # POST request.
+ if not user:
+ request_handler.redirect(users.create_login_url(
+ request_handler.request.uri))
+ return
+
+ self._create_flow(request_handler)
+
+ self.flow.params['state'] = _build_state_value(request_handler,
+ user)
+ self.credentials = self._storage_class(
+ self._credentials_class, None,
+ self._credentials_property_name, user=user).get()
+ try:
+ resp = method(request_handler, *args, **kwargs)
+ finally:
+ self.credentials = None
+ return resp
+ return setup_oauth
+
+ def has_credentials(self):
+ """True if for the logged in user there are valid access Credentials.
+
+ Must only be called from with a webapp.RequestHandler subclassed method
+ that had been decorated with either @oauth_required or @oauth_aware.
+ """
+ return self.credentials is not None and not self.credentials.invalid
+
+ def authorize_url(self):
+ """Returns the URL to start the OAuth dance.
+
+ Must only be called from with a webapp.RequestHandler subclassed method
+ that had been decorated with either @oauth_required or @oauth_aware.
+ """
+ url = self.flow.step1_get_authorize_url()
+ return str(url)
+
+ def http(self, *args, **kwargs):
+ """Returns an authorized http instance.
+
+ Must only be called from within an @oauth_required decorated method, or
+ from within an @oauth_aware decorated method where has_credentials()
+ returns True.
+
+ Args:
+ *args: Positional arguments passed to httplib2.Http constructor.
+ **kwargs: Positional arguments passed to httplib2.Http constructor.
+ """
+ return self.credentials.authorize(
+ transport.get_http_object(*args, **kwargs))
+
+ @property
+ def callback_path(self):
+ """The absolute path where the callback will occur.
+
+ Note this is the absolute path, not the absolute URI, that will be
+ calculated by the decorator at runtime. See callback_handler() for how
+ this should be used.
+
+ Returns:
+ The callback path as a string.
+ """
+ return self._callback_path
+
+ def callback_handler(self):
+ """RequestHandler for the OAuth 2.0 redirect callback.
+
+ Usage::
+
+ app = webapp.WSGIApplication([
+ ('/index', MyIndexHandler),
+ ...,
+ (decorator.callback_path, decorator.callback_handler())
+ ])
+
+ Returns:
+ A webapp.RequestHandler that handles the redirect back from the
+ server during the OAuth 2.0 dance.
+ """
+ decorator = self
+
+ class OAuth2Handler(webapp.RequestHandler):
+ """Handler for the redirect_uri of the OAuth 2.0 dance."""
+
+ @login_required
+ def get(self):
+ error = self.request.get('error')
+ if error:
+ errormsg = self.request.get('error_description', error)
+ self.response.out.write(
+ 'The authorization request failed: {0}'.format(
+ _safe_html(errormsg)))
+ else:
+ user = users.get_current_user()
+ decorator._create_flow(self)
+ credentials = decorator.flow.step2_exchange(
+ self.request.params)
+ decorator._storage_class(
+ decorator._credentials_class, None,
+ decorator._credentials_property_name,
+ user=user).put(credentials)
+ redirect_uri = _parse_state_value(
+ str(self.request.get('state')), user)
+ if redirect_uri is None:
+ self.response.out.write(
+ 'The authorization request failed')
+ return
+
+ if (decorator._token_response_param and
+ credentials.token_response):
+ resp_json = json.dumps(credentials.token_response)
+ redirect_uri = _helpers._add_query_parameter(
+ redirect_uri, decorator._token_response_param,
+ resp_json)
+
+ self.redirect(redirect_uri)
+
+ return OAuth2Handler
+
+ def callback_application(self):
+ """WSGI application for handling the OAuth 2.0 redirect callback.
+
+ If you need finer grained control use `callback_handler` which returns
+ just the webapp.RequestHandler.
+
+ Returns:
+ A webapp.WSGIApplication that handles the redirect back from the
+ server during the OAuth 2.0 dance.
+ """
+ return webapp.WSGIApplication([
+ (self.callback_path, self.callback_handler())
+ ])
+
+
+class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
+ """An OAuth2Decorator that builds from a clientsecrets file.
+
+ Uses a clientsecrets file as the source for all the information when
+ constructing an OAuth2Decorator.
+
+ ::
+
+ decorator = OAuth2DecoratorFromClientSecrets(
+ os.path.join(os.path.dirname(__file__), 'client_secrets.json')
+ scope='https://www.googleapis.com/auth/plus')
+
+ class MainHandler(webapp.RequestHandler):
+ @decorator.oauth_required
+ def get(self):
+ http = decorator.http()
+ # http is authorized with the user's Credentials and can be
+ # used in API calls
+
+ """
+
+ @_helpers.positional(3)
+ def __init__(self, filename, scope, message=None, cache=None, **kwargs):
+ """Constructor
+
+ Args:
+ filename: string, File name of client secrets.
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. The message may
+ contain HTML and will be presented on the web interface
+ for any method that uses the decorator.
+ cache: An optional cache service client that implements get() and
+ set()
+ methods. See clientsecrets.loadfile() for details.
+ **kwargs: dict, Keyword arguments are passed along as kwargs to
+ the OAuth2WebServerFlow constructor.
+ """
+ client_type, client_info = clientsecrets.loadfile(filename,
+ cache=cache)
+ if client_type not in (clientsecrets.TYPE_WEB,
+ clientsecrets.TYPE_INSTALLED):
+ raise clientsecrets.InvalidClientSecretsError(
+ "OAuth2Decorator doesn't support this OAuth 2.0 flow.")
+
+ constructor_kwargs = dict(kwargs)
+ constructor_kwargs.update({
+ 'auth_uri': client_info['auth_uri'],
+ 'token_uri': client_info['token_uri'],
+ 'message': message,
+ })
+ revoke_uri = client_info.get('revoke_uri')
+ if revoke_uri is not None:
+ constructor_kwargs['revoke_uri'] = revoke_uri
+ super(OAuth2DecoratorFromClientSecrets, self).__init__(
+ client_info['client_id'], client_info['client_secret'],
+ scope, **constructor_kwargs)
+ if message is not None:
+ self._message = message
+ else:
+ self._message = 'Please configure your application for OAuth 2.0.'
+
+
+@_helpers.positional(2)
+def oauth2decorator_from_clientsecrets(filename, scope,
+ message=None, cache=None):
+ """Creates an OAuth2Decorator populated from a clientsecrets file.
+
+ Args:
+ filename: string, File name of client secrets.
+ scope: string or list of strings, scope(s) of the credentials being
+ requested.
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. The message may
+ contain HTML and will be presented on the web interface for
+ any method that uses the decorator.
+ cache: An optional cache service client that implements get() and set()
+ methods. See clientsecrets.loadfile() for details.
+
+ Returns: An OAuth2Decorator
+ """
+ return OAuth2DecoratorFromClientSecrets(filename, scope,
+ message=message, cache=cache)
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/devshell.py b/contrib/python/oauth2client/py2/oauth2client/contrib/devshell.py
new file mode 100644
index 0000000000..691765f097
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/devshell.py
@@ -0,0 +1,152 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 utitilies for Google Developer Shell environment."""
+
+import datetime
+import json
+import os
+import socket
+
+from oauth2client import _helpers
+from oauth2client import client
+
+DEVSHELL_ENV = 'DEVSHELL_CLIENT_PORT'
+
+
+class Error(Exception):
+ """Errors for this module."""
+ pass
+
+
+class CommunicationError(Error):
+ """Errors for communication with the Developer Shell server."""
+
+
+class NoDevshellServer(Error):
+ """Error when no Developer Shell server can be contacted."""
+
+
+# The request for credential information to the Developer Shell client socket
+# is always an empty PBLite-formatted JSON object, so just define it as a
+# constant.
+CREDENTIAL_INFO_REQUEST_JSON = '[]'
+
+
+class CredentialInfoResponse(object):
+ """Credential information response from Developer Shell server.
+
+ The credential information response from Developer Shell socket is a
+ PBLite-formatted JSON array with fields encoded by their index in the
+ array:
+
+ * Index 0 - user email
+ * Index 1 - default project ID. None if the project context is not known.
+ * Index 2 - OAuth2 access token. None if there is no valid auth context.
+ * Index 3 - Seconds until the access token expires. None if not present.
+ """
+
+ def __init__(self, json_string):
+ """Initialize the response data from JSON PBLite array."""
+ pbl = json.loads(json_string)
+ if not isinstance(pbl, list):
+ raise ValueError('Not a list: ' + str(pbl))
+ pbl_len = len(pbl)
+ self.user_email = pbl[0] if pbl_len > 0 else None
+ self.project_id = pbl[1] if pbl_len > 1 else None
+ self.access_token = pbl[2] if pbl_len > 2 else None
+ self.expires_in = pbl[3] if pbl_len > 3 else None
+
+
+def _SendRecv():
+ """Communicate with the Developer Shell server socket."""
+
+ port = int(os.getenv(DEVSHELL_ENV, 0))
+ if port == 0:
+ raise NoDevshellServer()
+
+ sock = socket.socket()
+ sock.connect(('localhost', port))
+
+ data = CREDENTIAL_INFO_REQUEST_JSON
+ msg = '{0}\n{1}'.format(len(data), data)
+ sock.sendall(_helpers._to_bytes(msg, encoding='utf-8'))
+
+ header = sock.recv(6).decode()
+ if '\n' not in header:
+ raise CommunicationError('saw no newline in the first 6 bytes')
+ len_str, json_str = header.split('\n', 1)
+ to_read = int(len_str) - len(json_str)
+ if to_read > 0:
+ json_str += sock.recv(to_read, socket.MSG_WAITALL).decode()
+
+ return CredentialInfoResponse(json_str)
+
+
+class DevshellCredentials(client.GoogleCredentials):
+ """Credentials object for Google Developer Shell environment.
+
+ This object will allow a Google Developer Shell session to identify its
+ user to Google and other OAuth 2.0 servers that can verify assertions. It
+ can be used for the purpose of accessing data stored under the user
+ account.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+ """
+
+ def __init__(self, user_agent=None):
+ super(DevshellCredentials, self).__init__(
+ None, # access_token, initialized below
+ None, # client_id
+ None, # client_secret
+ None, # refresh_token
+ None, # token_expiry
+ None, # token_uri
+ user_agent)
+ self._refresh(None)
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Args:
+ http: unused HTTP object
+ """
+ self.devshell_response = _SendRecv()
+ self.access_token = self.devshell_response.access_token
+ expires_in = self.devshell_response.expires_in
+ if expires_in is not None:
+ delta = datetime.timedelta(seconds=expires_in)
+ self.token_expiry = client._UTCNOW() + delta
+ else:
+ self.token_expiry = None
+
+ @property
+ def user_email(self):
+ return self.devshell_response.user_email
+
+ @property
+ def project_id(self):
+ return self.devshell_response.project_id
+
+ @classmethod
+ def from_json(cls, json_data):
+ raise NotImplementedError(
+ 'Cannot load Developer Shell credentials from JSON.')
+
+ @property
+ def serialization_data(self):
+ raise NotImplementedError(
+ 'Cannot serialize Developer Shell credentials.')
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/dictionary_storage.py b/contrib/python/oauth2client/py2/oauth2client/contrib/dictionary_storage.py
new file mode 100644
index 0000000000..6ee333fa7c
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/dictionary_storage.py
@@ -0,0 +1,65 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Dictionary storage for OAuth2 Credentials."""
+
+from oauth2client import client
+
+
+class DictionaryStorage(client.Storage):
+ """Store and retrieve credentials to and from a dictionary-like object.
+
+ Args:
+ dictionary: A dictionary or dictionary-like object.
+ key: A string or other hashable. The credentials will be stored in
+ ``dictionary[key]``.
+ lock: An optional threading.Lock-like object. The lock will be
+ acquired before anything is written or read from the
+ dictionary.
+ """
+
+ def __init__(self, dictionary, key, lock=None):
+ """Construct a DictionaryStorage instance."""
+ super(DictionaryStorage, self).__init__(lock=lock)
+ self._dictionary = dictionary
+ self._key = key
+
+ def locked_get(self):
+ """Retrieve the credentials from the dictionary, if they exist.
+
+ Returns: A :class:`oauth2client.client.OAuth2Credentials` instance.
+ """
+ serialized = self._dictionary.get(self._key)
+
+ if serialized is None:
+ return None
+
+ credentials = client.OAuth2Credentials.from_json(serialized)
+ credentials.set_store(self)
+
+ return credentials
+
+ def locked_put(self, credentials):
+ """Save the credentials to the dictionary.
+
+ Args:
+ credentials: A :class:`oauth2client.client.OAuth2Credentials`
+ instance.
+ """
+ serialized = credentials.to_json()
+ self._dictionary[self._key] = serialized
+
+ def locked_delete(self):
+ """Remove the credentials from the dictionary, if they exist."""
+ self._dictionary.pop(self._key, None)
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/__init__.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/__init__.py
new file mode 100644
index 0000000000..644a8f9fb7
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/__init__.py
@@ -0,0 +1,489 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for the Django web framework.
+
+Provides Django views and helpers the make using the OAuth2 web server
+flow easier. It includes an ``oauth_required`` decorator to automatically
+ensure that user credentials are available, and an ``oauth_enabled`` decorator
+to check if the user has authorized, and helper shortcuts to create the
+authorization URL otherwise.
+
+There are two basic use cases supported. The first is using Google OAuth as the
+primary form of authentication, which is the simpler approach recommended
+for applications without their own user system.
+
+The second use case is adding Google OAuth credentials to an
+existing Django model containing a Django user field. Most of the
+configuration is the same, except for `GOOGLE_OAUTH_MODEL_STORAGE` in
+settings.py. See "Adding Credentials To An Existing Django User System" for
+usage differences.
+
+Only Django versions 1.8+ are supported.
+
+Configuration
+===============
+
+To configure, you'll need a set of OAuth2 web application credentials from
+`Google Developer's Console <https://console.developers.google.com/project/_/apiui/credential>`.
+
+Add the helper to your INSTALLED_APPS:
+
+.. code-block:: python
+ :caption: settings.py
+ :name: installed_apps
+
+ INSTALLED_APPS = (
+ # other apps
+ "django.contrib.sessions.middleware"
+ "oauth2client.contrib.django_util"
+ )
+
+This helper also requires the Django Session Middleware, so
+``django.contrib.sessions.middleware`` should be in INSTALLED_APPS as well.
+MIDDLEWARE or MIDDLEWARE_CLASSES (in Django versions <1.10) should also
+contain the string 'django.contrib.sessions.middleware.SessionMiddleware'.
+
+
+Add the client secrets created earlier to the settings. You can either
+specify the path to the credentials file in JSON format
+
+.. code-block:: python
+ :caption: settings.py
+ :name: secrets_file
+
+ GOOGLE_OAUTH2_CLIENT_SECRETS_JSON=/path/to/client-secret.json
+
+Or, directly configure the client Id and client secret.
+
+
+.. code-block:: python
+ :caption: settings.py
+ :name: secrets_config
+
+ GOOGLE_OAUTH2_CLIENT_ID=client-id-field
+ GOOGLE_OAUTH2_CLIENT_SECRET=client-secret-field
+
+By default, the default scopes for the required decorator only contains the
+``email`` scopes. You can change that default in the settings.
+
+.. code-block:: python
+ :caption: settings.py
+ :name: scopes
+
+ GOOGLE_OAUTH2_SCOPES = ('email', 'https://www.googleapis.com/auth/calendar',)
+
+By default, the decorators will add an `oauth` object to the Django request
+object, and include all of its state and helpers inside that object. If the
+`oauth` name conflicts with another usage, it can be changed
+
+.. code-block:: python
+ :caption: settings.py
+ :name: request_prefix
+
+ # changes request.oauth to request.google_oauth
+ GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'google_oauth'
+
+Add the oauth2 routes to your application's urls.py urlpatterns.
+
+.. code-block:: python
+ :caption: urls.py
+ :name: urls
+
+ from oauth2client.contrib.django_util.site import urls as oauth2_urls
+
+ urlpatterns += [url(r'^oauth2/', include(oauth2_urls))]
+
+To require OAuth2 credentials for a view, use the `oauth2_required` decorator.
+This creates a credentials object with an id_token, and allows you to create
+an `http` object to build service clients with. These are all attached to the
+request.oauth
+
+.. code-block:: python
+ :caption: views.py
+ :name: views_required
+
+ from oauth2client.contrib.django_util.decorators import oauth_required
+
+ @oauth_required
+ def requires_default_scopes(request):
+ email = request.oauth.credentials.id_token['email']
+ service = build(serviceName='calendar', version='v3',
+ http=request.oauth.http,
+ developerKey=API_KEY)
+ events = service.events().list(calendarId='primary').execute()['items']
+ return HttpResponse("email: {0} , calendar: {1}".format(
+ email,str(events)))
+ return HttpResponse(
+ "email: {0} , calendar: {1}".format(email, str(events)))
+
+To make OAuth2 optional and provide an authorization link in your own views.
+
+.. code-block:: python
+ :caption: views.py
+ :name: views_enabled2
+
+ from oauth2client.contrib.django_util.decorators import oauth_enabled
+
+ @oauth_enabled
+ def optional_oauth2(request):
+ if request.oauth.has_credentials():
+ # this could be passed into a view
+ # request.oauth.http is also initialized
+ return HttpResponse("User email: {0}".format(
+ request.oauth.credentials.id_token['email']))
+ else:
+ return HttpResponse(
+ 'Here is an OAuth Authorize link: <a href="{0}">Authorize'
+ '</a>'.format(request.oauth.get_authorize_redirect()))
+
+If a view needs a scope not included in the default scopes specified in
+the settings, you can use [incremental auth](https://developers.google.com/identity/sign-in/web/incremental-auth)
+and specify additional scopes in the decorator arguments.
+
+.. code-block:: python
+ :caption: views.py
+ :name: views_required_additional_scopes
+
+ @oauth_enabled(scopes=['https://www.googleapis.com/auth/drive'])
+ def drive_required(request):
+ if request.oauth.has_credentials():
+ service = build(serviceName='drive', version='v2',
+ http=request.oauth.http,
+ developerKey=API_KEY)
+ events = service.files().list().execute()['items']
+ return HttpResponse(str(events))
+ else:
+ return HttpResponse(
+ 'Here is an OAuth Authorize link: <a href="{0}">Authorize'
+ '</a>'.format(request.oauth.get_authorize_redirect()))
+
+
+To provide a callback on authorization being completed, use the
+oauth2_authorized signal:
+
+.. code-block:: python
+ :caption: views.py
+ :name: signals
+
+ from oauth2client.contrib.django_util.signals import oauth2_authorized
+
+ def test_callback(sender, request, credentials, **kwargs):
+ print("Authorization Signal Received {0}".format(
+ credentials.id_token['email']))
+
+ oauth2_authorized.connect(test_callback)
+
+Adding Credentials To An Existing Django User System
+=====================================================
+
+As an alternative to storing the credentials in the session, the helper
+can be configured to store the fields on a Django model. This might be useful
+if you need to use the credentials outside the context of a user request. It
+also prevents the need for a logged in user to repeat the OAuth flow when
+starting a new session.
+
+To use, change ``settings.py``
+
+.. code-block:: python
+ :caption: settings.py
+ :name: storage_model_config
+
+ GOOGLE_OAUTH2_STORAGE_MODEL = {
+ 'model': 'path.to.model.MyModel',
+ 'user_property': 'user_id',
+ 'credentials_property': 'credential'
+ }
+
+Where ``path.to.model`` class is the fully qualified name of a
+``django.db.model`` class containing a ``django.contrib.auth.models.User``
+field with the name specified by `user_property` and a
+:class:`oauth2client.contrib.django_util.models.CredentialsField` with the name
+specified by `credentials_property`. For the sample configuration given,
+our model would look like
+
+.. code-block:: python
+ :caption: models.py
+ :name: storage_model_model
+
+ from django.contrib.auth.models import User
+ from oauth2client.contrib.django_util.models import CredentialsField
+
+ class MyModel(models.Model):
+ # ... other fields here ...
+ user = models.OneToOneField(User)
+ credential = CredentialsField()
+"""
+
+import importlib
+
+import django.conf
+from django.core import exceptions
+from django.core import urlresolvers
+from six.moves.urllib import parse
+
+from oauth2client import clientsecrets
+from oauth2client import transport
+from oauth2client.contrib import dictionary_storage
+from oauth2client.contrib.django_util import storage
+
+GOOGLE_OAUTH2_DEFAULT_SCOPES = ('email',)
+GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'oauth'
+
+
+def _load_client_secrets(filename):
+ """Loads client secrets from the given filename.
+
+ Args:
+ filename: The name of the file containing the JSON secret key.
+
+ Returns:
+ A 2-tuple, the first item containing the client id, and the second
+ item containing a client secret.
+ """
+ client_type, client_info = clientsecrets.loadfile(filename)
+
+ if client_type != clientsecrets.TYPE_WEB:
+ raise ValueError(
+ 'The flow specified in {} is not supported, only the WEB flow '
+ 'type is supported.'.format(client_type))
+ return client_info['client_id'], client_info['client_secret']
+
+
+def _get_oauth2_client_id_and_secret(settings_instance):
+ """Initializes client id and client secret based on the settings.
+
+ Args:
+ settings_instance: An instance of ``django.conf.settings``.
+
+ Returns:
+ A 2-tuple, the first item is the client id and the second
+ item is the client secret.
+ """
+ secret_json = getattr(settings_instance,
+ 'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)
+ if secret_json is not None:
+ return _load_client_secrets(secret_json)
+ else:
+ client_id = getattr(settings_instance, "GOOGLE_OAUTH2_CLIENT_ID",
+ None)
+ client_secret = getattr(settings_instance,
+ "GOOGLE_OAUTH2_CLIENT_SECRET", None)
+ if client_id is not None and client_secret is not None:
+ return client_id, client_secret
+ else:
+ raise exceptions.ImproperlyConfigured(
+ "Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or "
+ "both GOOGLE_OAUTH2_CLIENT_ID and "
+ "GOOGLE_OAUTH2_CLIENT_SECRET in settings.py")
+
+
+def _get_storage_model():
+ """This configures whether the credentials will be stored in the session
+ or the Django ORM based on the settings. By default, the credentials
+ will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL`
+ is found in the settings. Usually, the ORM storage is used to integrate
+ credentials into an existing Django user system.
+
+ Returns:
+ A tuple containing three strings, or None. If
+ ``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple
+ will contain the fully qualifed path of the `django.db.model`,
+ the name of the ``django.contrib.auth.models.User`` field on the
+ model, and the name of the
+ :class:`oauth2client.contrib.django_util.models.CredentialsField`
+ field on the model. If Django ORM storage is not configured,
+ this function returns None.
+ """
+ storage_model_settings = getattr(django.conf.settings,
+ 'GOOGLE_OAUTH2_STORAGE_MODEL', None)
+ if storage_model_settings is not None:
+ return (storage_model_settings['model'],
+ storage_model_settings['user_property'],
+ storage_model_settings['credentials_property'])
+ else:
+ return None, None, None
+
+
+class OAuth2Settings(object):
+ """Initializes Django OAuth2 Helper Settings
+
+ This class loads the OAuth2 Settings from the Django settings, and then
+ provides those settings as attributes to the rest of the views and
+ decorators in the module.
+
+ Attributes:
+ scopes: A list of OAuth2 scopes that the decorators and views will use
+ as defaults.
+ request_prefix: The name of the attribute that the decorators use to
+ attach the UserOAuth2 object to the Django request object.
+ client_id: The OAuth2 Client ID.
+ client_secret: The OAuth2 Client Secret.
+ """
+
+ def __init__(self, settings_instance):
+ self.scopes = getattr(settings_instance, 'GOOGLE_OAUTH2_SCOPES',
+ GOOGLE_OAUTH2_DEFAULT_SCOPES)
+ self.request_prefix = getattr(settings_instance,
+ 'GOOGLE_OAUTH2_REQUEST_ATTRIBUTE',
+ GOOGLE_OAUTH2_REQUEST_ATTRIBUTE)
+ info = _get_oauth2_client_id_and_secret(settings_instance)
+ self.client_id, self.client_secret = info
+
+ # Django 1.10 deprecated MIDDLEWARE_CLASSES in favor of MIDDLEWARE
+ middleware_settings = getattr(settings_instance, 'MIDDLEWARE', None)
+ if middleware_settings is None:
+ middleware_settings = getattr(
+ settings_instance, 'MIDDLEWARE_CLASSES', None)
+ if middleware_settings is None:
+ raise exceptions.ImproperlyConfigured(
+ 'Django settings has neither MIDDLEWARE nor MIDDLEWARE_CLASSES'
+ 'configured')
+
+ if ('django.contrib.sessions.middleware.SessionMiddleware' not in
+ middleware_settings):
+ raise exceptions.ImproperlyConfigured(
+ 'The Google OAuth2 Helper requires session middleware to '
+ 'be installed. Edit your MIDDLEWARE_CLASSES or MIDDLEWARE '
+ 'setting to include \'django.contrib.sessions.middleware.'
+ 'SessionMiddleware\'.')
+ (self.storage_model, self.storage_model_user_property,
+ self.storage_model_credentials_property) = _get_storage_model()
+
+
+oauth2_settings = OAuth2Settings(django.conf.settings)
+
+_CREDENTIALS_KEY = 'google_oauth2_credentials'
+
+
+def get_storage(request):
+ """ Gets a Credentials storage object provided by the Django OAuth2 Helper
+ object.
+
+ Args:
+ request: Reference to the current request object.
+
+ Returns:
+ An :class:`oauth2.client.Storage` object.
+ """
+ storage_model = oauth2_settings.storage_model
+ user_property = oauth2_settings.storage_model_user_property
+ credentials_property = oauth2_settings.storage_model_credentials_property
+
+ if storage_model:
+ module_name, class_name = storage_model.rsplit('.', 1)
+ module = importlib.import_module(module_name)
+ storage_model_class = getattr(module, class_name)
+ return storage.DjangoORMStorage(storage_model_class,
+ user_property,
+ request.user,
+ credentials_property)
+ else:
+ # use session
+ return dictionary_storage.DictionaryStorage(
+ request.session, key=_CREDENTIALS_KEY)
+
+
+def _redirect_with_params(url_name, *args, **kwargs):
+ """Helper method to create a redirect response with URL params.
+
+ This builds a redirect string that converts kwargs into a
+ query string.
+
+ Args:
+ url_name: The name of the url to redirect to.
+ kwargs: the query string param and their values to build.
+
+ Returns:
+ A properly formatted redirect string.
+ """
+ url = urlresolvers.reverse(url_name, args=args)
+ params = parse.urlencode(kwargs, True)
+ return "{0}?{1}".format(url, params)
+
+
+def _credentials_from_request(request):
+ """Gets the authorized credentials for this flow, if they exist."""
+ # ORM storage requires a logged in user
+ if (oauth2_settings.storage_model is None or
+ request.user.is_authenticated()):
+ return get_storage(request).get()
+ else:
+ return None
+
+
+class UserOAuth2(object):
+ """Class to create oauth2 objects on Django request objects containing
+ credentials and helper methods.
+ """
+
+ def __init__(self, request, scopes=None, return_url=None):
+ """Initialize the Oauth2 Object.
+
+ Args:
+ request: Django request object.
+ scopes: Scopes desired for this OAuth2 flow.
+ return_url: The url to return to after the OAuth flow is complete,
+ defaults to the request's current URL path.
+ """
+ self.request = request
+ self.return_url = return_url or request.get_full_path()
+ if scopes:
+ self._scopes = set(oauth2_settings.scopes) | set(scopes)
+ else:
+ self._scopes = set(oauth2_settings.scopes)
+
+ def get_authorize_redirect(self):
+ """Creates a URl to start the OAuth2 authorization flow."""
+ get_params = {
+ 'return_url': self.return_url,
+ 'scopes': self._get_scopes()
+ }
+
+ return _redirect_with_params('google_oauth:authorize', **get_params)
+
+ def has_credentials(self):
+ """Returns True if there are valid credentials for the current user
+ and required scopes."""
+ credentials = _credentials_from_request(self.request)
+ return (credentials and not credentials.invalid and
+ credentials.has_scopes(self._get_scopes()))
+
+ def _get_scopes(self):
+ """Returns the scopes associated with this object, kept up to
+ date for incremental auth."""
+ if _credentials_from_request(self.request):
+ return (self._scopes |
+ _credentials_from_request(self.request).scopes)
+ else:
+ return self._scopes
+
+ @property
+ def scopes(self):
+ """Returns the scopes associated with this OAuth2 object."""
+ # make sure previously requested custom scopes are maintained
+ # in future authorizations
+ return self._get_scopes()
+
+ @property
+ def credentials(self):
+ """Gets the authorized credentials for this flow, if they exist."""
+ return _credentials_from_request(self.request)
+
+ @property
+ def http(self):
+ """Helper: create HTTP client authorized with OAuth2 credentials."""
+ if self.has_credentials():
+ return self.credentials.authorize(transport.get_http_object())
+ return None
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/apps.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/apps.py
new file mode 100644
index 0000000000..86676b91a8
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/apps.py
@@ -0,0 +1,32 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Application Config For Django OAuth2 Helper.
+
+Django 1.7+ provides an
+[applications](https://docs.djangoproject.com/en/1.8/ref/applications/)
+API so that Django projects can introspect on installed applications using a
+stable API. This module exists to follow that convention.
+"""
+
+import sys
+
+# Django 1.7+ only supports Python 2.7+
+if sys.hexversion >= 0x02070000: # pragma: NO COVER
+ from django.apps import AppConfig
+
+ class GoogleOAuth2HelperConfig(AppConfig):
+ """ App Config for Django Helper"""
+ name = 'oauth2client.django_util'
+ verbose_name = "Google OAuth2 Django Helper"
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/decorators.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/decorators.py
new file mode 100644
index 0000000000..e62e171071
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/decorators.py
@@ -0,0 +1,145 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Decorators for Django OAuth2 Flow.
+
+Contains two decorators, ``oauth_required`` and ``oauth_enabled``.
+
+``oauth_required`` will ensure that a user has an oauth object containing
+credentials associated with the request, and if not, redirect to the
+authorization flow.
+
+``oauth_enabled`` will attach the oauth2 object containing credentials if it
+exists. If it doesn't, the view will still render, but helper methods will be
+attached to start the oauth2 flow.
+"""
+
+from django import shortcuts
+import django.conf
+from six import wraps
+from six.moves.urllib import parse
+
+from oauth2client.contrib import django_util
+
+
+def oauth_required(decorated_function=None, scopes=None, **decorator_kwargs):
+ """ Decorator to require OAuth2 credentials for a view.
+
+
+ .. code-block:: python
+ :caption: views.py
+ :name: views_required_2
+
+
+ from oauth2client.django_util.decorators import oauth_required
+
+ @oauth_required
+ def requires_default_scopes(request):
+ email = request.credentials.id_token['email']
+ service = build(serviceName='calendar', version='v3',
+ http=request.oauth.http,
+ developerKey=API_KEY)
+ events = service.events().list(
+ calendarId='primary').execute()['items']
+ return HttpResponse(
+ "email: {0}, calendar: {1}".format(email, str(events)))
+
+ Args:
+ decorated_function: View function to decorate, must have the Django
+ request object as the first argument.
+ scopes: Scopes to require, will default.
+ decorator_kwargs: Can include ``return_url`` to specify the URL to
+ return to after OAuth2 authorization is complete.
+
+ Returns:
+ An OAuth2 Authorize view if credentials are not found or if the
+ credentials are missing the required scopes. Otherwise,
+ the decorated view.
+ """
+ def curry_wrapper(wrapped_function):
+ @wraps(wrapped_function)
+ def required_wrapper(request, *args, **kwargs):
+ if not (django_util.oauth2_settings.storage_model is None or
+ request.user.is_authenticated()):
+ redirect_str = '{0}?next={1}'.format(
+ django.conf.settings.LOGIN_URL,
+ parse.quote(request.path))
+ return shortcuts.redirect(redirect_str)
+
+ return_url = decorator_kwargs.pop('return_url',
+ request.get_full_path())
+ user_oauth = django_util.UserOAuth2(request, scopes, return_url)
+ if not user_oauth.has_credentials():
+ return shortcuts.redirect(user_oauth.get_authorize_redirect())
+ setattr(request, django_util.oauth2_settings.request_prefix,
+ user_oauth)
+ return wrapped_function(request, *args, **kwargs)
+
+ return required_wrapper
+
+ if decorated_function:
+ return curry_wrapper(decorated_function)
+ else:
+ return curry_wrapper
+
+
+def oauth_enabled(decorated_function=None, scopes=None, **decorator_kwargs):
+ """ Decorator to enable OAuth Credentials if authorized, and setup
+ the oauth object on the request object to provide helper functions
+ to start the flow otherwise.
+
+ .. code-block:: python
+ :caption: views.py
+ :name: views_enabled3
+
+ from oauth2client.django_util.decorators import oauth_enabled
+
+ @oauth_enabled
+ def optional_oauth2(request):
+ if request.oauth.has_credentials():
+ # this could be passed into a view
+ # request.oauth.http is also initialized
+ return HttpResponse("User email: {0}".format(
+ request.oauth.credentials.id_token['email'])
+ else:
+ return HttpResponse('Here is an OAuth Authorize link:
+ <a href="{0}">Authorize</a>'.format(
+ request.oauth.get_authorize_redirect()))
+
+
+ Args:
+ decorated_function: View function to decorate.
+ scopes: Scopes to require, will default.
+ decorator_kwargs: Can include ``return_url`` to specify the URL to
+ return to after OAuth2 authorization is complete.
+
+ Returns:
+ The decorated view function.
+ """
+ def curry_wrapper(wrapped_function):
+ @wraps(wrapped_function)
+ def enabled_wrapper(request, *args, **kwargs):
+ return_url = decorator_kwargs.pop('return_url',
+ request.get_full_path())
+ user_oauth = django_util.UserOAuth2(request, scopes, return_url)
+ setattr(request, django_util.oauth2_settings.request_prefix,
+ user_oauth)
+ return wrapped_function(request, *args, **kwargs)
+
+ return enabled_wrapper
+
+ if decorated_function:
+ return curry_wrapper(decorated_function)
+ else:
+ return curry_wrapper
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/models.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/models.py
new file mode 100644
index 0000000000..37cc697054
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/models.py
@@ -0,0 +1,82 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains classes used for the Django ORM storage."""
+
+import base64
+import pickle
+
+from django.db import models
+from django.utils import encoding
+import jsonpickle
+
+import oauth2client
+
+
+class CredentialsField(models.Field):
+ """Django ORM field for storing OAuth2 Credentials."""
+
+ def __init__(self, *args, **kwargs):
+ if 'null' not in kwargs:
+ kwargs['null'] = True
+ super(CredentialsField, self).__init__(*args, **kwargs)
+
+ def get_internal_type(self):
+ return 'BinaryField'
+
+ def from_db_value(self, value, expression, connection, context):
+ """Overrides ``models.Field`` method. This converts the value
+ returned from the database to an instance of this class.
+ """
+ return self.to_python(value)
+
+ def to_python(self, value):
+ """Overrides ``models.Field`` method. This is used to convert
+ bytes (from serialization etc) to an instance of this class"""
+ if value is None:
+ return None
+ elif isinstance(value, oauth2client.client.Credentials):
+ return value
+ else:
+ try:
+ return jsonpickle.decode(
+ base64.b64decode(encoding.smart_bytes(value)).decode())
+ except ValueError:
+ return pickle.loads(
+ base64.b64decode(encoding.smart_bytes(value)))
+
+ def get_prep_value(self, value):
+ """Overrides ``models.Field`` method. This is used to convert
+ the value from an instances of this class to bytes that can be
+ inserted into the database.
+ """
+ if value is None:
+ return None
+ else:
+ return encoding.smart_text(
+ base64.b64encode(jsonpickle.encode(value).encode()))
+
+ def value_to_string(self, obj):
+ """Convert the field value from the provided model to a string.
+
+ Used during model serialization.
+
+ Args:
+ obj: db.Model, model object
+
+ Returns:
+ string, the serialized field value
+ """
+ value = self._get_val_from_obj(obj)
+ return self.get_prep_value(value)
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/signals.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/signals.py
new file mode 100644
index 0000000000..e9356b4dcb
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/signals.py
@@ -0,0 +1,28 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Signals for Google OAuth2 Helper.
+
+This module contains signals for Google OAuth2 Helper. Currently it only
+contains one, which fires when an OAuth2 authorization flow has completed.
+"""
+
+import django.dispatch
+
+"""Signal that fires when OAuth2 Flow has completed.
+It passes the Django request object and the OAuth2 credentials object to the
+ receiver.
+"""
+oauth2_authorized = django.dispatch.Signal(
+ providing_args=["request", "credentials"])
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/site.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/site.py
new file mode 100644
index 0000000000..631f79bef4
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/site.py
@@ -0,0 +1,26 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains Django URL patterns used for OAuth2 flow."""
+
+from django.conf import urls
+
+from oauth2client.contrib.django_util import views
+
+urlpatterns = [
+ urls.url(r'oauth2callback/', views.oauth2_callback, name="callback"),
+ urls.url(r'oauth2authorize/', views.oauth2_authorize, name="authorize")
+]
+
+urls = (urlpatterns, "google_oauth", "google_oauth")
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/storage.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/storage.py
new file mode 100644
index 0000000000..5682919bc0
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/storage.py
@@ -0,0 +1,81 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains a storage module that stores credentials using the Django ORM."""
+
+from oauth2client import client
+
+
+class DjangoORMStorage(client.Storage):
+ """Store and retrieve a single credential to and from the Django datastore.
+
+ This Storage helper presumes the Credentials
+ have been stored as a CredentialsField
+ on a db model class.
+ """
+
+ def __init__(self, model_class, key_name, key_value, property_name):
+ """Constructor for Storage.
+
+ Args:
+ model: string, fully qualified name of db.Model model class.
+ key_name: string, key name for the entity that has the credentials
+ key_value: string, key value for the entity that has the
+ credentials.
+ property_name: string, name of the property that is an
+ CredentialsProperty.
+ """
+ super(DjangoORMStorage, self).__init__()
+ self.model_class = model_class
+ self.key_name = key_name
+ self.key_value = key_value
+ self.property_name = property_name
+
+ def locked_get(self):
+ """Retrieve stored credential from the Django ORM.
+
+ Returns:
+ oauth2client.Credentials retrieved from the Django ORM, associated
+ with the ``model``, ``key_value``->``key_name`` pair used to query
+ for the model, and ``property_name`` identifying the
+ ``CredentialsProperty`` field, all of which are defined in the
+ constructor for this Storage object.
+
+ """
+ query = {self.key_name: self.key_value}
+ entities = self.model_class.objects.filter(**query)
+ if len(entities) > 0:
+ credential = getattr(entities[0], self.property_name)
+ if getattr(credential, 'set_store', None) is not None:
+ credential.set_store(self)
+ return credential
+ else:
+ return None
+
+ def locked_put(self, credentials):
+ """Write a Credentials to the Django datastore.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ entity, _ = self.model_class.objects.get_or_create(
+ **{self.key_name: self.key_value})
+
+ setattr(entity, self.property_name, credentials)
+ entity.save()
+
+ def locked_delete(self):
+ """Delete Credentials from the datastore."""
+ query = {self.key_name: self.key_value}
+ self.model_class.objects.filter(**query).delete()
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/views.py b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/views.py
new file mode 100644
index 0000000000..1835208a96
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/django_util/views.py
@@ -0,0 +1,193 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains the views used by the OAuth2 flows.
+
+Their are two views used by the OAuth2 flow, the authorize and the callback
+view. The authorize view kicks off the three-legged OAuth flow, and the
+callback view validates the flow and if successful stores the credentials
+in the configured storage."""
+
+import hashlib
+import json
+import os
+
+from django import http
+from django import shortcuts
+from django.conf import settings
+from django.core import urlresolvers
+from django.shortcuts import redirect
+from django.utils import html
+import jsonpickle
+from six.moves.urllib import parse
+
+from oauth2client import client
+from oauth2client.contrib import django_util
+from oauth2client.contrib.django_util import get_storage
+from oauth2client.contrib.django_util import signals
+
+_CSRF_KEY = 'google_oauth2_csrf_token'
+_FLOW_KEY = 'google_oauth2_flow_{0}'
+
+
+def _make_flow(request, scopes, return_url=None):
+ """Creates a Web Server Flow
+
+ Args:
+ request: A Django request object.
+ scopes: the request oauth2 scopes.
+ return_url: The URL to return to after the flow is complete. Defaults
+ to the path of the current request.
+
+ Returns:
+ An OAuth2 flow object that has been stored in the session.
+ """
+ # Generate a CSRF token to prevent malicious requests.
+ csrf_token = hashlib.sha256(os.urandom(1024)).hexdigest()
+
+ request.session[_CSRF_KEY] = csrf_token
+
+ state = json.dumps({
+ 'csrf_token': csrf_token,
+ 'return_url': return_url,
+ })
+
+ flow = client.OAuth2WebServerFlow(
+ client_id=django_util.oauth2_settings.client_id,
+ client_secret=django_util.oauth2_settings.client_secret,
+ scope=scopes,
+ state=state,
+ redirect_uri=request.build_absolute_uri(
+ urlresolvers.reverse("google_oauth:callback")))
+
+ flow_key = _FLOW_KEY.format(csrf_token)
+ request.session[flow_key] = jsonpickle.encode(flow)
+ return flow
+
+
+def _get_flow_for_token(csrf_token, request):
+ """ Looks up the flow in session to recover information about requested
+ scopes.
+
+ Args:
+ csrf_token: The token passed in the callback request that should
+ match the one previously generated and stored in the request on the
+ initial authorization view.
+
+ Returns:
+ The OAuth2 Flow object associated with this flow based on the
+ CSRF token.
+ """
+ flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None)
+ return None if flow_pickle is None else jsonpickle.decode(flow_pickle)
+
+
+def oauth2_callback(request):
+ """ View that handles the user's return from OAuth2 provider.
+
+ This view verifies the CSRF state and OAuth authorization code, and on
+ success stores the credentials obtained in the storage provider,
+ and redirects to the return_url specified in the authorize view and
+ stored in the session.
+
+ Args:
+ request: Django request.
+
+ Returns:
+ A redirect response back to the return_url.
+ """
+ if 'error' in request.GET:
+ reason = request.GET.get(
+ 'error_description', request.GET.get('error', ''))
+ reason = html.escape(reason)
+ return http.HttpResponseBadRequest(
+ 'Authorization failed {0}'.format(reason))
+
+ try:
+ encoded_state = request.GET['state']
+ code = request.GET['code']
+ except KeyError:
+ return http.HttpResponseBadRequest(
+ 'Request missing state or authorization code')
+
+ try:
+ server_csrf = request.session[_CSRF_KEY]
+ except KeyError:
+ return http.HttpResponseBadRequest(
+ 'No existing session for this flow.')
+
+ try:
+ state = json.loads(encoded_state)
+ client_csrf = state['csrf_token']
+ return_url = state['return_url']
+ except (ValueError, KeyError):
+ return http.HttpResponseBadRequest('Invalid state parameter.')
+
+ if client_csrf != server_csrf:
+ return http.HttpResponseBadRequest('Invalid CSRF token.')
+
+ flow = _get_flow_for_token(client_csrf, request)
+
+ if not flow:
+ return http.HttpResponseBadRequest('Missing Oauth2 flow.')
+
+ try:
+ credentials = flow.step2_exchange(code)
+ except client.FlowExchangeError as exchange_error:
+ return http.HttpResponseBadRequest(
+ 'An error has occurred: {0}'.format(exchange_error))
+
+ get_storage(request).put(credentials)
+
+ signals.oauth2_authorized.send(sender=signals.oauth2_authorized,
+ request=request, credentials=credentials)
+
+ return shortcuts.redirect(return_url)
+
+
+def oauth2_authorize(request):
+ """ View to start the OAuth2 Authorization flow.
+
+ This view starts the OAuth2 authorization flow. If scopes is passed in
+ as a GET URL parameter, it will authorize those scopes, otherwise the
+ default scopes specified in settings. The return_url can also be
+ specified as a GET parameter, otherwise the referer header will be
+ checked, and if that isn't found it will return to the root path.
+
+ Args:
+ request: The Django request object.
+
+ Returns:
+ A redirect to Google OAuth2 Authorization.
+ """
+ return_url = request.GET.get('return_url', None)
+ if not return_url:
+ return_url = request.META.get('HTTP_REFERER', '/')
+
+ scopes = request.GET.getlist('scopes', django_util.oauth2_settings.scopes)
+ # Model storage (but not session storage) requires a logged in user
+ if django_util.oauth2_settings.storage_model:
+ if not request.user.is_authenticated():
+ return redirect('{0}?next={1}'.format(
+ settings.LOGIN_URL, parse.quote(request.get_full_path())))
+ # This checks for the case where we ended up here because of a logged
+ # out user but we had credentials for it in the first place
+ else:
+ user_oauth = django_util.UserOAuth2(request, scopes, return_url)
+ if user_oauth.has_credentials():
+ return redirect(return_url)
+
+ flow = _make_flow(request=request, scopes=scopes, return_url=return_url)
+ auth_url = flow.step1_get_authorize_url()
+ return shortcuts.redirect(auth_url)
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/flask_util.py b/contrib/python/oauth2client/py2/oauth2client/contrib/flask_util.py
new file mode 100644
index 0000000000..fabd613b46
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/flask_util.py
@@ -0,0 +1,557 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for the Flask web framework
+
+Provides a Flask extension that makes using OAuth2 web server flow easier.
+The extension includes views that handle the entire auth flow and a
+``@required`` decorator to automatically ensure that user credentials are
+available.
+
+
+Configuration
+=============
+
+To configure, you'll need a set of OAuth2 web application credentials from the
+`Google Developer's Console <https://console.developers.google.com/project/_/\
+apiui/credential>`__.
+
+.. code-block:: python
+
+ from oauth2client.contrib.flask_util import UserOAuth2
+
+ app = Flask(__name__)
+
+ app.config['SECRET_KEY'] = 'your-secret-key'
+
+ app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'] = 'client_secrets.json'
+
+ # or, specify the client id and secret separately
+ app.config['GOOGLE_OAUTH2_CLIENT_ID'] = 'your-client-id'
+ app.config['GOOGLE_OAUTH2_CLIENT_SECRET'] = 'your-client-secret'
+
+ oauth2 = UserOAuth2(app)
+
+
+Usage
+=====
+
+Once configured, you can use the :meth:`UserOAuth2.required` decorator to
+ensure that credentials are available within a view.
+
+.. code-block:: python
+ :emphasize-lines: 3,7,10
+
+ # Note that app.route should be the outermost decorator.
+ @app.route('/needs_credentials')
+ @oauth2.required
+ def example():
+ # http is authorized with the user's credentials and can be used
+ # to make http calls.
+ http = oauth2.http()
+
+ # Or, you can access the credentials directly
+ credentials = oauth2.credentials
+
+If you want credentials to be optional for a view, you can leave the decorator
+off and use :meth:`UserOAuth2.has_credentials` to check.
+
+.. code-block:: python
+ :emphasize-lines: 3
+
+ @app.route('/optional')
+ def optional():
+ if oauth2.has_credentials():
+ return 'Credentials found!'
+ else:
+ return 'No credentials!'
+
+
+When credentials are available, you can use :attr:`UserOAuth2.email` and
+:attr:`UserOAuth2.user_id` to access information from the `ID Token
+<https://developers.google.com/identity/protocols/OpenIDConnect?hl=en>`__, if
+available.
+
+.. code-block:: python
+ :emphasize-lines: 4
+
+ @app.route('/info')
+ @oauth2.required
+ def info():
+ return "Hello, {} ({})".format(oauth2.email, oauth2.user_id)
+
+
+URLs & Trigging Authorization
+=============================
+
+The extension will add two new routes to your application:
+
+ * ``"oauth2.authorize"`` -> ``/oauth2authorize``
+ * ``"oauth2.callback"`` -> ``/oauth2callback``
+
+When configuring your OAuth2 credentials on the Google Developer's Console, be
+sure to add ``http[s]://[your-app-url]/oauth2callback`` as an authorized
+callback url.
+
+Typically you don't not need to use these routes directly, just be sure to
+decorate any views that require credentials with ``@oauth2.required``. If
+needed, you can trigger authorization at any time by redirecting the user
+to the URL returned by :meth:`UserOAuth2.authorize_url`.
+
+.. code-block:: python
+ :emphasize-lines: 3
+
+ @app.route('/login')
+ def login():
+ return oauth2.authorize_url("/")
+
+
+Incremental Auth
+================
+
+This extension also supports `Incremental Auth <https://developers.google.com\
+/identity/protocols/OAuth2WebServer?hl=en#incrementalAuth>`__. To enable it,
+configure the extension with ``include_granted_scopes``.
+
+.. code-block:: python
+
+ oauth2 = UserOAuth2(app, include_granted_scopes=True)
+
+Then specify any additional scopes needed on the decorator, for example:
+
+.. code-block:: python
+ :emphasize-lines: 2,7
+
+ @app.route('/drive')
+ @oauth2.required(scopes=["https://www.googleapis.com/auth/drive"])
+ def requires_drive():
+ ...
+
+ @app.route('/calendar')
+ @oauth2.required(scopes=["https://www.googleapis.com/auth/calendar"])
+ def requires_calendar():
+ ...
+
+The decorator will ensure that the the user has authorized all specified scopes
+before allowing them to access the view, and will also ensure that credentials
+do not lose any previously authorized scopes.
+
+
+Storage
+=======
+
+By default, the extension uses a Flask session-based storage solution. This
+means that credentials are only available for the duration of a session. It
+also means that with Flask's default configuration, the credentials will be
+visible in the session cookie. It's highly recommended to use database-backed
+session and to use https whenever handling user credentials.
+
+If you need the credentials to be available longer than a user session or
+available outside of a request context, you will need to implement your own
+:class:`oauth2client.Storage`.
+"""
+
+from functools import wraps
+import hashlib
+import json
+import os
+import pickle
+
+try:
+ from flask import Blueprint
+ from flask import _app_ctx_stack
+ from flask import current_app
+ from flask import redirect
+ from flask import request
+ from flask import session
+ from flask import url_for
+ import markupsafe
+except ImportError: # pragma: NO COVER
+ raise ImportError('The flask utilities require flask 0.9 or newer.')
+
+import six.moves.http_client as httplib
+
+from oauth2client import client
+from oauth2client import clientsecrets
+from oauth2client import transport
+from oauth2client.contrib import dictionary_storage
+
+
+_DEFAULT_SCOPES = ('email',)
+_CREDENTIALS_KEY = 'google_oauth2_credentials'
+_FLOW_KEY = 'google_oauth2_flow_{0}'
+_CSRF_KEY = 'google_oauth2_csrf_token'
+
+
+def _get_flow_for_token(csrf_token):
+ """Retrieves the flow instance associated with a given CSRF token from
+ the Flask session."""
+ flow_pickle = session.pop(
+ _FLOW_KEY.format(csrf_token), None)
+
+ if flow_pickle is None:
+ return None
+ else:
+ return pickle.loads(flow_pickle)
+
+
+class UserOAuth2(object):
+ """Flask extension for making OAuth 2.0 easier.
+
+ Configuration values:
+
+ * ``GOOGLE_OAUTH2_CLIENT_SECRETS_FILE`` path to a client secrets json
+ file, obtained from the credentials screen in the Google Developers
+ console.
+ * ``GOOGLE_OAUTH2_CLIENT_ID`` the oauth2 credentials' client ID. This
+ is only needed if ``GOOGLE_OAUTH2_CLIENT_SECRETS_FILE`` is not
+ specified.
+ * ``GOOGLE_OAUTH2_CLIENT_SECRET`` the oauth2 credentials' client
+ secret. This is only needed if ``GOOGLE_OAUTH2_CLIENT_SECRETS_FILE``
+ is not specified.
+
+ If app is specified, all arguments will be passed along to init_app.
+
+ If no app is specified, then you should call init_app in your application
+ factory to finish initialization.
+ """
+
+ def __init__(self, app=None, *args, **kwargs):
+ self.app = app
+ if app is not None:
+ self.init_app(app, *args, **kwargs)
+
+ def init_app(self, app, scopes=None, client_secrets_file=None,
+ client_id=None, client_secret=None, authorize_callback=None,
+ storage=None, **kwargs):
+ """Initialize this extension for the given app.
+
+ Arguments:
+ app: A Flask application.
+ scopes: Optional list of scopes to authorize.
+ client_secrets_file: Path to a file containing client secrets. You
+ can also specify the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE config
+ value.
+ client_id: If not specifying a client secrets file, specify the
+ OAuth2 client id. You can also specify the
+ GOOGLE_OAUTH2_CLIENT_ID config value. You must also provide a
+ client secret.
+ client_secret: The OAuth2 client secret. You can also specify the
+ GOOGLE_OAUTH2_CLIENT_SECRET config value.
+ authorize_callback: A function that is executed after successful
+ user authorization.
+ storage: A oauth2client.client.Storage subclass for storing the
+ credentials. By default, this is a Flask session based storage.
+ kwargs: Any additional args are passed along to the Flow
+ constructor.
+ """
+ self.app = app
+ self.authorize_callback = authorize_callback
+ self.flow_kwargs = kwargs
+
+ if storage is None:
+ storage = dictionary_storage.DictionaryStorage(
+ session, key=_CREDENTIALS_KEY)
+ self.storage = storage
+
+ if scopes is None:
+ scopes = app.config.get('GOOGLE_OAUTH2_SCOPES', _DEFAULT_SCOPES)
+ self.scopes = scopes
+
+ self._load_config(client_secrets_file, client_id, client_secret)
+
+ app.register_blueprint(self._create_blueprint())
+
+ def _load_config(self, client_secrets_file, client_id, client_secret):
+ """Loads oauth2 configuration in order of priority.
+
+ Priority:
+ 1. Config passed to the constructor or init_app.
+ 2. Config passed via the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE app
+ config.
+ 3. Config passed via the GOOGLE_OAUTH2_CLIENT_ID and
+ GOOGLE_OAUTH2_CLIENT_SECRET app config.
+
+ Raises:
+ ValueError if no config could be found.
+ """
+ if client_id and client_secret:
+ self.client_id, self.client_secret = client_id, client_secret
+ return
+
+ if client_secrets_file:
+ self._load_client_secrets(client_secrets_file)
+ return
+
+ if 'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE' in self.app.config:
+ self._load_client_secrets(
+ self.app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'])
+ return
+
+ try:
+ self.client_id, self.client_secret = (
+ self.app.config['GOOGLE_OAUTH2_CLIENT_ID'],
+ self.app.config['GOOGLE_OAUTH2_CLIENT_SECRET'])
+ except KeyError:
+ raise ValueError(
+ 'OAuth2 configuration could not be found. Either specify the '
+ 'client_secrets_file or client_id and client_secret or set '
+ 'the app configuration variables '
+ 'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE or '
+ 'GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET.')
+
+ def _load_client_secrets(self, filename):
+ """Loads client secrets from the given filename."""
+ client_type, client_info = clientsecrets.loadfile(filename)
+ if client_type != clientsecrets.TYPE_WEB:
+ raise ValueError(
+ 'The flow specified in {0} is not supported.'.format(
+ client_type))
+
+ self.client_id = client_info['client_id']
+ self.client_secret = client_info['client_secret']
+
+ def _make_flow(self, return_url=None, **kwargs):
+ """Creates a Web Server Flow"""
+ # Generate a CSRF token to prevent malicious requests.
+ csrf_token = hashlib.sha256(os.urandom(1024)).hexdigest()
+
+ session[_CSRF_KEY] = csrf_token
+
+ state = json.dumps({
+ 'csrf_token': csrf_token,
+ 'return_url': return_url
+ })
+
+ kw = self.flow_kwargs.copy()
+ kw.update(kwargs)
+
+ extra_scopes = kw.pop('scopes', [])
+ scopes = set(self.scopes).union(set(extra_scopes))
+
+ flow = client.OAuth2WebServerFlow(
+ client_id=self.client_id,
+ client_secret=self.client_secret,
+ scope=scopes,
+ state=state,
+ redirect_uri=url_for('oauth2.callback', _external=True),
+ **kw)
+
+ flow_key = _FLOW_KEY.format(csrf_token)
+ session[flow_key] = pickle.dumps(flow)
+
+ return flow
+
+ def _create_blueprint(self):
+ bp = Blueprint('oauth2', __name__)
+ bp.add_url_rule('/oauth2authorize', 'authorize', self.authorize_view)
+ bp.add_url_rule('/oauth2callback', 'callback', self.callback_view)
+
+ return bp
+
+ def authorize_view(self):
+ """Flask view that starts the authorization flow.
+
+ Starts flow by redirecting the user to the OAuth2 provider.
+ """
+ args = request.args.to_dict()
+
+ # Scopes will be passed as mutliple args, and to_dict() will only
+ # return one. So, we use getlist() to get all of the scopes.
+ args['scopes'] = request.args.getlist('scopes')
+
+ return_url = args.pop('return_url', None)
+ if return_url is None:
+ return_url = request.referrer or '/'
+
+ flow = self._make_flow(return_url=return_url, **args)
+ auth_url = flow.step1_get_authorize_url()
+
+ return redirect(auth_url)
+
+ def callback_view(self):
+ """Flask view that handles the user's return from OAuth2 provider.
+
+ On return, exchanges the authorization code for credentials and stores
+ the credentials.
+ """
+ if 'error' in request.args:
+ reason = request.args.get(
+ 'error_description', request.args.get('error', ''))
+ reason = markupsafe.escape(reason)
+ return ('Authorization failed: {0}'.format(reason),
+ httplib.BAD_REQUEST)
+
+ try:
+ encoded_state = request.args['state']
+ server_csrf = session[_CSRF_KEY]
+ code = request.args['code']
+ except KeyError:
+ return 'Invalid request', httplib.BAD_REQUEST
+
+ try:
+ state = json.loads(encoded_state)
+ client_csrf = state['csrf_token']
+ return_url = state['return_url']
+ except (ValueError, KeyError):
+ return 'Invalid request state', httplib.BAD_REQUEST
+
+ if client_csrf != server_csrf:
+ return 'Invalid request state', httplib.BAD_REQUEST
+
+ flow = _get_flow_for_token(server_csrf)
+
+ if flow is None:
+ return 'Invalid request state', httplib.BAD_REQUEST
+
+ # Exchange the auth code for credentials.
+ try:
+ credentials = flow.step2_exchange(code)
+ except client.FlowExchangeError as exchange_error:
+ current_app.logger.exception(exchange_error)
+ content = 'An error occurred: {0}'.format(exchange_error)
+ return content, httplib.BAD_REQUEST
+
+ # Save the credentials to the storage.
+ self.storage.put(credentials)
+
+ if self.authorize_callback:
+ self.authorize_callback(credentials)
+
+ return redirect(return_url)
+
+ @property
+ def credentials(self):
+ """The credentials for the current user or None if unavailable."""
+ ctx = _app_ctx_stack.top
+
+ if not hasattr(ctx, _CREDENTIALS_KEY):
+ ctx.google_oauth2_credentials = self.storage.get()
+
+ return ctx.google_oauth2_credentials
+
+ def has_credentials(self):
+ """Returns True if there are valid credentials for the current user."""
+ if not self.credentials:
+ return False
+ # Is the access token expired? If so, do we have an refresh token?
+ elif (self.credentials.access_token_expired and
+ not self.credentials.refresh_token):
+ return False
+ else:
+ return True
+
+ @property
+ def email(self):
+ """Returns the user's email address or None if there are no credentials.
+
+ The email address is provided by the current credentials' id_token.
+ This should not be used as unique identifier as the user can change
+ their email. If you need a unique identifier, use user_id.
+ """
+ if not self.credentials:
+ return None
+ try:
+ return self.credentials.id_token['email']
+ except KeyError:
+ current_app.logger.error(
+ 'Invalid id_token {0}'.format(self.credentials.id_token))
+
+ @property
+ def user_id(self):
+ """Returns the a unique identifier for the user
+
+ Returns None if there are no credentials.
+
+ The id is provided by the current credentials' id_token.
+ """
+ if not self.credentials:
+ return None
+ try:
+ return self.credentials.id_token['sub']
+ except KeyError:
+ current_app.logger.error(
+ 'Invalid id_token {0}'.format(self.credentials.id_token))
+
+ def authorize_url(self, return_url, **kwargs):
+ """Creates a URL that can be used to start the authorization flow.
+
+ When the user is directed to the URL, the authorization flow will
+ begin. Once complete, the user will be redirected to the specified
+ return URL.
+
+ Any kwargs are passed into the flow constructor.
+ """
+ return url_for('oauth2.authorize', return_url=return_url, **kwargs)
+
+ def required(self, decorated_function=None, scopes=None,
+ **decorator_kwargs):
+ """Decorator to require OAuth2 credentials for a view.
+
+ If credentials are not available for the current user, then they will
+ be redirected to the authorization flow. Once complete, the user will
+ be redirected back to the original page.
+ """
+
+ def curry_wrapper(wrapped_function):
+ @wraps(wrapped_function)
+ def required_wrapper(*args, **kwargs):
+ return_url = decorator_kwargs.pop('return_url', request.url)
+
+ requested_scopes = set(self.scopes)
+ if scopes is not None:
+ requested_scopes |= set(scopes)
+ if self.has_credentials():
+ requested_scopes |= self.credentials.scopes
+
+ requested_scopes = list(requested_scopes)
+
+ # Does the user have credentials and does the credentials have
+ # all of the needed scopes?
+ if (self.has_credentials() and
+ self.credentials.has_scopes(requested_scopes)):
+ return wrapped_function(*args, **kwargs)
+ # Otherwise, redirect to authorization
+ else:
+ auth_url = self.authorize_url(
+ return_url,
+ scopes=requested_scopes,
+ **decorator_kwargs)
+
+ return redirect(auth_url)
+
+ return required_wrapper
+
+ if decorated_function:
+ return curry_wrapper(decorated_function)
+ else:
+ return curry_wrapper
+
+ def http(self, *args, **kwargs):
+ """Returns an authorized http instance.
+
+ Can only be called if there are valid credentials for the user, such
+ as inside of a view that is decorated with @required.
+
+ Args:
+ *args: Positional arguments passed to httplib2.Http constructor.
+ **kwargs: Positional arguments passed to httplib2.Http constructor.
+
+ Raises:
+ ValueError if no credentials are available.
+ """
+ if not self.credentials:
+ raise ValueError('No credentials available.')
+ return self.credentials.authorize(
+ transport.get_http_object(*args, **kwargs))
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/gce.py b/contrib/python/oauth2client/py2/oauth2client/contrib/gce.py
new file mode 100644
index 0000000000..aaab15ffce
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/gce.py
@@ -0,0 +1,156 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for Google Compute Engine
+
+Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
+"""
+
+import logging
+import warnings
+
+from six.moves import http_client
+
+from oauth2client import client
+from oauth2client.contrib import _metadata
+
+
+logger = logging.getLogger(__name__)
+
+_SCOPES_WARNING = """\
+You have requested explicit scopes to be used with a GCE service account.
+Using this argument will have no effect on the actual scopes for tokens
+requested. These scopes are set at VM instance creation time and
+can't be overridden in the request.
+"""
+
+
+class AppAssertionCredentials(client.AssertionCredentials):
+ """Credentials object for Compute Engine Assertion Grants
+
+ This object will allow a Compute Engine instance to identify itself to
+ Google and other OAuth 2.0 servers that can verify assertions. It can be
+ used for the purpose of accessing data stored under an account assigned to
+ the Compute Engine instance itself.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+
+ Note that :attr:`service_account_email` and :attr:`scopes`
+ will both return None until the credentials have been refreshed.
+ To check whether credentials have previously been refreshed use
+ :attr:`invalid`.
+ """
+
+ def __init__(self, email=None, *args, **kwargs):
+ """Constructor for AppAssertionCredentials
+
+ Args:
+ email: an email that specifies the service account to use.
+ Only necessary if using custom service accounts
+ (see https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#createdefaultserviceaccount).
+ """
+ if 'scopes' in kwargs:
+ warnings.warn(_SCOPES_WARNING)
+ kwargs['scopes'] = None
+
+ # Assertion type is no longer used, but still in the
+ # parent class signature.
+ super(AppAssertionCredentials, self).__init__(None, *args, **kwargs)
+
+ self.service_account_email = email
+ self.scopes = None
+ self.invalid = True
+
+ @classmethod
+ def from_json(cls, json_data):
+ raise NotImplementedError(
+ 'Cannot serialize credentials for GCE service accounts.')
+
+ def to_json(self):
+ raise NotImplementedError(
+ 'Cannot serialize credentials for GCE service accounts.')
+
+ def retrieve_scopes(self, http):
+ """Retrieves the canonical list of scopes for this access token.
+
+ Overrides client.Credentials.retrieve_scopes. Fetches scopes info
+ from the metadata server.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+
+ Returns:
+ A set of strings containing the canonical list of scopes.
+ """
+ self._retrieve_info(http)
+ return self.scopes
+
+ def _retrieve_info(self, http):
+ """Retrieves service account info for invalid credentials.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ if self.invalid:
+ info = _metadata.get_service_account_info(
+ http,
+ service_account=self.service_account_email or 'default')
+ self.invalid = False
+ self.service_account_email = info['email']
+ self.scopes = info['scopes']
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Skip all the storage hoops and just refresh using the API.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+
+ Raises:
+ HttpAccessTokenRefreshError: When the refresh fails.
+ """
+ try:
+ self._retrieve_info(http)
+ self.access_token, self.token_expiry = _metadata.get_token(
+ http, service_account=self.service_account_email)
+ except http_client.HTTPException as err:
+ raise client.HttpAccessTokenRefreshError(str(err))
+
+ @property
+ def serialization_data(self):
+ raise NotImplementedError(
+ 'Cannot serialize credentials for GCE service accounts.')
+
+ def create_scoped_required(self):
+ return False
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ This method is provided to support a common interface, but
+ the actual key used for a Google Compute Engine service account
+ is not available, so it can't be used to sign content.
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Raises:
+ NotImplementedError, always.
+ """
+ raise NotImplementedError(
+ 'Compute Engine service accounts cannot sign blobs')
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/keyring_storage.py b/contrib/python/oauth2client/py2/oauth2client/contrib/keyring_storage.py
new file mode 100644
index 0000000000..4af944881a
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/keyring_storage.py
@@ -0,0 +1,95 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A keyring based Storage.
+
+A Storage for Credentials that uses the keyring module.
+"""
+
+import threading
+
+import keyring
+
+from oauth2client import client
+
+
+class Storage(client.Storage):
+ """Store and retrieve a single credential to and from the keyring.
+
+ To use this module you must have the keyring module installed. See
+ <http://pypi.python.org/pypi/keyring/>. This is an optional module and is
+ not installed with oauth2client by default because it does not work on all
+ the platforms that oauth2client supports, such as Google App Engine.
+
+ The keyring module <http://pypi.python.org/pypi/keyring/> is a
+ cross-platform library for access the keyring capabilities of the local
+ system. The user will be prompted for their keyring password when this
+ module is used, and the manner in which the user is prompted will vary per
+ platform.
+
+ Usage::
+
+ from oauth2client import keyring_storage
+
+ s = keyring_storage.Storage('name_of_application', 'user1')
+ credentials = s.get()
+
+ """
+
+ def __init__(self, service_name, user_name):
+ """Constructor.
+
+ Args:
+ service_name: string, The name of the service under which the
+ credentials are stored.
+ user_name: string, The name of the user to store credentials for.
+ """
+ super(Storage, self).__init__(lock=threading.Lock())
+ self._service_name = service_name
+ self._user_name = user_name
+
+ def locked_get(self):
+ """Retrieve Credential from file.
+
+ Returns:
+ oauth2client.client.Credentials
+ """
+ credentials = None
+ content = keyring.get_password(self._service_name, self._user_name)
+
+ if content is not None:
+ try:
+ credentials = client.Credentials.new_from_json(content)
+ credentials.set_store(self)
+ except ValueError:
+ pass
+
+ return credentials
+
+ def locked_put(self, credentials):
+ """Write Credentials to file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ keyring.set_password(self._service_name, self._user_name,
+ credentials.to_json())
+
+ def locked_delete(self):
+ """Delete Credentials file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ keyring.set_password(self._service_name, self._user_name, '')
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/multiprocess_file_storage.py b/contrib/python/oauth2client/py2/oauth2client/contrib/multiprocess_file_storage.py
new file mode 100644
index 0000000000..e9e8c8cd1d
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/multiprocess_file_storage.py
@@ -0,0 +1,355 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Multiprocess file credential storage.
+
+This module provides file-based storage that supports multiple credentials and
+cross-thread and process access.
+
+This module supersedes the functionality previously found in `multistore_file`.
+
+This module provides :class:`MultiprocessFileStorage` which:
+ * Is tied to a single credential via a user-specified key. This key can be
+ used to distinguish between multiple users, client ids, and/or scopes.
+ * Can be safely accessed and refreshed across threads and processes.
+
+Process & thread safety guarantees the following behavior:
+ * If one thread or process refreshes a credential, subsequent refreshes
+ from other processes will re-fetch the credentials from the file instead
+ of performing an http request.
+ * If two processes or threads attempt to refresh concurrently, only one
+ will be able to acquire the lock and refresh, with the deadlock caveat
+ below.
+ * The interprocess lock will not deadlock, instead, the if a process can
+ not acquire the interprocess lock within ``INTERPROCESS_LOCK_DEADLINE``
+ it will allow refreshing the credential but will not write the updated
+ credential to disk, This logic happens during every lock cycle - if the
+ credentials are refreshed again it will retry locking and writing as
+ normal.
+
+Usage
+=====
+
+Before using the storage, you need to decide how you want to key the
+credentials. A few common strategies include:
+
+ * If you're storing credentials for multiple users in a single file, use
+ a unique identifier for each user as the key.
+ * If you're storing credentials for multiple client IDs in a single file,
+ use the client ID as the key.
+ * If you're storing multiple credentials for one user, use the scopes as
+ the key.
+ * If you have a complicated setup, use a compound key. For example, you
+ can use a combination of the client ID and scopes as the key.
+
+Create an instance of :class:`MultiprocessFileStorage` for each credential you
+want to store, for example::
+
+ filename = 'credentials'
+ key = '{}-{}'.format(client_id, user_id)
+ storage = MultiprocessFileStorage(filename, key)
+
+To store the credentials::
+
+ storage.put(credentials)
+
+If you're going to continue to use the credentials after storing them, be sure
+to call :func:`set_store`::
+
+ credentials.set_store(storage)
+
+To retrieve the credentials::
+
+ storage.get(credentials)
+
+"""
+
+import base64
+import json
+import logging
+import os
+import threading
+
+import fasteners
+from six import iteritems
+
+from oauth2client import _helpers
+from oauth2client import client
+
+
+#: The maximum amount of time, in seconds, to wait when acquire the
+#: interprocess lock before falling back to read-only mode.
+INTERPROCESS_LOCK_DEADLINE = 1
+
+logger = logging.getLogger(__name__)
+_backends = {}
+_backends_lock = threading.Lock()
+
+
+def _create_file_if_needed(filename):
+ """Creates the an empty file if it does not already exist.
+
+ Returns:
+ True if the file was created, False otherwise.
+ """
+ if os.path.exists(filename):
+ return False
+ else:
+ # Equivalent to "touch".
+ open(filename, 'a+b').close()
+ logger.info('Credential file {0} created'.format(filename))
+ return True
+
+
+def _load_credentials_file(credentials_file):
+ """Load credentials from the given file handle.
+
+ The file is expected to be in this format:
+
+ {
+ "file_version": 2,
+ "credentials": {
+ "key": "base64 encoded json representation of credentials."
+ }
+ }
+
+ This function will warn and return empty credentials instead of raising
+ exceptions.
+
+ Args:
+ credentials_file: An open file handle.
+
+ Returns:
+ A dictionary mapping user-defined keys to an instance of
+ :class:`oauth2client.client.Credentials`.
+ """
+ try:
+ credentials_file.seek(0)
+ data = json.load(credentials_file)
+ except Exception:
+ logger.warning(
+ 'Credentials file could not be loaded, will ignore and '
+ 'overwrite.')
+ return {}
+
+ if data.get('file_version') != 2:
+ logger.warning(
+ 'Credentials file is not version 2, will ignore and '
+ 'overwrite.')
+ return {}
+
+ credentials = {}
+
+ for key, encoded_credential in iteritems(data.get('credentials', {})):
+ try:
+ credential_json = base64.b64decode(encoded_credential)
+ credential = client.Credentials.new_from_json(credential_json)
+ credentials[key] = credential
+ except:
+ logger.warning(
+ 'Invalid credential {0} in file, ignoring.'.format(key))
+
+ return credentials
+
+
+def _write_credentials_file(credentials_file, credentials):
+ """Writes credentials to a file.
+
+ Refer to :func:`_load_credentials_file` for the format.
+
+ Args:
+ credentials_file: An open file handle, must be read/write.
+ credentials: A dictionary mapping user-defined keys to an instance of
+ :class:`oauth2client.client.Credentials`.
+ """
+ data = {'file_version': 2, 'credentials': {}}
+
+ for key, credential in iteritems(credentials):
+ credential_json = credential.to_json()
+ encoded_credential = _helpers._from_bytes(base64.b64encode(
+ _helpers._to_bytes(credential_json)))
+ data['credentials'][key] = encoded_credential
+
+ credentials_file.seek(0)
+ json.dump(data, credentials_file)
+ credentials_file.truncate()
+
+
+class _MultiprocessStorageBackend(object):
+ """Thread-local backend for multiprocess storage.
+
+ Each process has only one instance of this backend per file. All threads
+ share a single instance of this backend. This ensures that all threads
+ use the same thread lock and process lock when accessing the file.
+ """
+
+ def __init__(self, filename):
+ self._file = None
+ self._filename = filename
+ self._process_lock = fasteners.InterProcessLock(
+ '{0}.lock'.format(filename))
+ self._thread_lock = threading.Lock()
+ self._read_only = False
+ self._credentials = {}
+
+ def _load_credentials(self):
+ """(Re-)loads the credentials from the file."""
+ if not self._file:
+ return
+
+ loaded_credentials = _load_credentials_file(self._file)
+ self._credentials.update(loaded_credentials)
+
+ logger.debug('Read credential file')
+
+ def _write_credentials(self):
+ if self._read_only:
+ logger.debug('In read-only mode, not writing credentials.')
+ return
+
+ _write_credentials_file(self._file, self._credentials)
+ logger.debug('Wrote credential file {0}.'.format(self._filename))
+
+ def acquire_lock(self):
+ self._thread_lock.acquire()
+ locked = self._process_lock.acquire(timeout=INTERPROCESS_LOCK_DEADLINE)
+
+ if locked:
+ _create_file_if_needed(self._filename)
+ self._file = open(self._filename, 'r+')
+ self._read_only = False
+
+ else:
+ logger.warn(
+ 'Failed to obtain interprocess lock for credentials. '
+ 'If a credential is being refreshed, other processes may '
+ 'not see the updated access token and refresh as well.')
+ if os.path.exists(self._filename):
+ self._file = open(self._filename, 'r')
+ else:
+ self._file = None
+ self._read_only = True
+
+ self._load_credentials()
+
+ def release_lock(self):
+ if self._file is not None:
+ self._file.close()
+ self._file = None
+
+ if not self._read_only:
+ self._process_lock.release()
+
+ self._thread_lock.release()
+
+ def _refresh_predicate(self, credentials):
+ if credentials is None:
+ return True
+ elif credentials.invalid:
+ return True
+ elif credentials.access_token_expired:
+ return True
+ else:
+ return False
+
+ def locked_get(self, key):
+ # Check if the credential is already in memory.
+ credentials = self._credentials.get(key, None)
+
+ # Use the refresh predicate to determine if the entire store should be
+ # reloaded. This basically checks if the credentials are invalid
+ # or expired. This covers the situation where another process has
+ # refreshed the credentials and this process doesn't know about it yet.
+ # In that case, this process won't needlessly refresh the credentials.
+ if self._refresh_predicate(credentials):
+ self._load_credentials()
+ credentials = self._credentials.get(key, None)
+
+ return credentials
+
+ def locked_put(self, key, credentials):
+ self._load_credentials()
+ self._credentials[key] = credentials
+ self._write_credentials()
+
+ def locked_delete(self, key):
+ self._load_credentials()
+ self._credentials.pop(key, None)
+ self._write_credentials()
+
+
+def _get_backend(filename):
+ """A helper method to get or create a backend with thread locking.
+
+ This ensures that only one backend is used per-file per-process, so that
+ thread and process locks are appropriately shared.
+
+ Args:
+ filename: The full path to the credential storage file.
+
+ Returns:
+ An instance of :class:`_MultiprocessStorageBackend`.
+ """
+ filename = os.path.abspath(filename)
+
+ with _backends_lock:
+ if filename not in _backends:
+ _backends[filename] = _MultiprocessStorageBackend(filename)
+ return _backends[filename]
+
+
+class MultiprocessFileStorage(client.Storage):
+ """Multiprocess file credential storage.
+
+ Args:
+ filename: The path to the file where credentials will be stored.
+ key: An arbitrary string used to uniquely identify this set of
+ credentials. For example, you may use the user's ID as the key or
+ a combination of the client ID and user ID.
+ """
+ def __init__(self, filename, key):
+ self._key = key
+ self._backend = _get_backend(filename)
+
+ def acquire_lock(self):
+ self._backend.acquire_lock()
+
+ def release_lock(self):
+ self._backend.release_lock()
+
+ def locked_get(self):
+ """Retrieves the current credentials from the store.
+
+ Returns:
+ An instance of :class:`oauth2client.client.Credentials` or `None`.
+ """
+ credential = self._backend.locked_get(self._key)
+
+ if credential is not None:
+ credential.set_store(self)
+
+ return credential
+
+ def locked_put(self, credentials):
+ """Writes the given credentials to the store.
+
+ Args:
+ credentials: an instance of
+ :class:`oauth2client.client.Credentials`.
+ """
+ return self._backend.locked_put(self._key, credentials)
+
+ def locked_delete(self):
+ """Deletes the current credentials from the store."""
+ return self._backend.locked_delete(self._key)
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/sqlalchemy.py b/contrib/python/oauth2client/py2/oauth2client/contrib/sqlalchemy.py
new file mode 100644
index 0000000000..7d9fd4b23f
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/sqlalchemy.py
@@ -0,0 +1,173 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 utilities for SQLAlchemy.
+
+Utilities for using OAuth 2.0 in conjunction with a SQLAlchemy.
+
+Configuration
+=============
+
+In order to use this storage, you'll need to create table
+with :class:`oauth2client.contrib.sqlalchemy.CredentialsType` column.
+It's recommended to either put this column on some sort of user info
+table or put the column in a table with a belongs-to relationship to
+a user info table.
+
+Here's an example of a simple table with a :class:`CredentialsType`
+column that's related to a user table by the `user_id` key.
+
+.. code-block:: python
+
+ from sqlalchemy import Column, ForeignKey, Integer
+ from sqlalchemy.ext.declarative import declarative_base
+ from sqlalchemy.orm import relationship
+
+ from oauth2client.contrib.sqlalchemy import CredentialsType
+
+
+ Base = declarative_base()
+
+
+ class Credentials(Base):
+ __tablename__ = 'credentials'
+
+ user_id = Column(Integer, ForeignKey('user.id'))
+ credentials = Column(CredentialsType)
+
+
+ class User(Base):
+ id = Column(Integer, primary_key=True)
+ # bunch of other columns
+ credentials = relationship('Credentials')
+
+
+Usage
+=====
+
+With tables ready, you are now able to store credentials in database.
+We will reuse tables defined above.
+
+.. code-block:: python
+
+ from sqlalchemy.orm import Session
+
+ from oauth2client.client import OAuth2Credentials
+ from oauth2client.contrib.sql_alchemy import Storage
+
+ session = Session()
+ user = session.query(User).first()
+ storage = Storage(
+ session=session,
+ model_class=Credentials,
+ # This is the key column used to identify
+ # the row that stores the credentials.
+ key_name='user_id',
+ key_value=user.id,
+ property_name='credentials',
+ )
+
+ # Store
+ credentials = OAuth2Credentials(...)
+ storage.put(credentials)
+
+ # Retrieve
+ credentials = storage.get()
+
+ # Delete
+ storage.delete()
+
+"""
+
+from __future__ import absolute_import
+
+import sqlalchemy.types
+
+from oauth2client import client
+
+
+class CredentialsType(sqlalchemy.types.PickleType):
+ """Type representing credentials.
+
+ Alias for :class:`sqlalchemy.types.PickleType`.
+ """
+
+
+class Storage(client.Storage):
+ """Store and retrieve a single credential to and from SQLAlchemy.
+ This helper presumes the Credentials
+ have been stored as a Credentials column
+ on a db model class.
+ """
+
+ def __init__(self, session, model_class, key_name,
+ key_value, property_name):
+ """Constructor for Storage.
+
+ Args:
+ session: An instance of :class:`sqlalchemy.orm.Session`.
+ model_class: SQLAlchemy declarative mapping.
+ key_name: string, key name for the entity that has the credentials
+ key_value: key value for the entity that has the credentials
+ property_name: A string indicating which property on the
+ ``model_class`` to store the credentials.
+ This property must be a
+ :class:`CredentialsType` column.
+ """
+ super(Storage, self).__init__()
+
+ self.session = session
+ self.model_class = model_class
+ self.key_name = key_name
+ self.key_value = key_value
+ self.property_name = property_name
+
+ def locked_get(self):
+ """Retrieve stored credential.
+
+ Returns:
+ A :class:`oauth2client.Credentials` instance or `None`.
+ """
+ filters = {self.key_name: self.key_value}
+ query = self.session.query(self.model_class).filter_by(**filters)
+ entity = query.first()
+
+ if entity:
+ credential = getattr(entity, self.property_name)
+ if credential and hasattr(credential, 'set_store'):
+ credential.set_store(self)
+ return credential
+ else:
+ return None
+
+ def locked_put(self, credentials):
+ """Write a credentials to the SQLAlchemy datastore.
+
+ Args:
+ credentials: :class:`oauth2client.Credentials`
+ """
+ filters = {self.key_name: self.key_value}
+ query = self.session.query(self.model_class).filter_by(**filters)
+ entity = query.first()
+
+ if not entity:
+ entity = self.model_class(**filters)
+
+ setattr(entity, self.property_name, credentials)
+ self.session.add(entity)
+
+ def locked_delete(self):
+ """Delete credentials from the SQLAlchemy datastore."""
+ filters = {self.key_name: self.key_value}
+ self.session.query(self.model_class).filter_by(**filters).delete()
diff --git a/contrib/python/oauth2client/py2/oauth2client/contrib/xsrfutil.py b/contrib/python/oauth2client/py2/oauth2client/contrib/xsrfutil.py
new file mode 100644
index 0000000000..7c3ec0353a
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/contrib/xsrfutil.py
@@ -0,0 +1,101 @@
+# Copyright 2014 the Melange authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper methods for creating & verifying XSRF tokens."""
+
+import base64
+import binascii
+import hmac
+import time
+
+from oauth2client import _helpers
+
+
+# Delimiter character
+DELIMITER = b':'
+
+# 1 hour in seconds
+DEFAULT_TIMEOUT_SECS = 60 * 60
+
+
+@_helpers.positional(2)
+def generate_token(key, user_id, action_id='', when=None):
+ """Generates a URL-safe token for the given user, action, time tuple.
+
+ Args:
+ key: secret key to use.
+ user_id: the user ID of the authenticated user.
+ action_id: a string identifier of the action they requested
+ authorization for.
+ when: the time in seconds since the epoch at which the user was
+ authorized for this action. If not set the current time is used.
+
+ Returns:
+ A string XSRF protection token.
+ """
+ digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))
+ digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))
+ digester.update(DELIMITER)
+ digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))
+ digester.update(DELIMITER)
+ when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')
+ digester.update(when)
+ digest = digester.digest()
+
+ token = base64.urlsafe_b64encode(digest + DELIMITER + when)
+ return token
+
+
+@_helpers.positional(3)
+def validate_token(key, token, user_id, action_id="", current_time=None):
+ """Validates that the given token authorizes the user for the action.
+
+ Tokens are invalid if the time of issue is too old or if the token
+ does not match what generateToken outputs (i.e. the token was forged).
+
+ Args:
+ key: secret key to use.
+ token: a string of the token generated by generateToken.
+ user_id: the user ID of the authenticated user.
+ action_id: a string identifier of the action they requested
+ authorization for.
+
+ Returns:
+ A boolean - True if the user is authorized for the action, False
+ otherwise.
+ """
+ if not token:
+ return False
+ try:
+ decoded = base64.urlsafe_b64decode(token)
+ token_time = int(decoded.split(DELIMITER)[-1])
+ except (TypeError, ValueError, binascii.Error):
+ return False
+ if current_time is None:
+ current_time = time.time()
+ # If the token is too old it's not valid.
+ if current_time - token_time > DEFAULT_TIMEOUT_SECS:
+ return False
+
+ # The given token should match the generated one with the same time.
+ expected_token = generate_token(key, user_id, action_id=action_id,
+ when=token_time)
+ if len(token) != len(expected_token):
+ return False
+
+ # Perform constant time comparison to avoid timing attacks
+ different = 0
+ for x, y in zip(bytearray(token), bytearray(expected_token)):
+ different |= x ^ y
+ return not different
diff --git a/contrib/python/oauth2client/py2/oauth2client/crypt.py b/contrib/python/oauth2client/py2/oauth2client/crypt.py
new file mode 100644
index 0000000000..13260982a6
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/crypt.py
@@ -0,0 +1,250 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Crypto-related routines for oauth2client."""
+
+import json
+import logging
+import time
+
+from oauth2client import _helpers
+from oauth2client import _pure_python_crypt
+
+
+RsaSigner = _pure_python_crypt.RsaSigner
+RsaVerifier = _pure_python_crypt.RsaVerifier
+
+CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
+AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
+MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
+
+logger = logging.getLogger(__name__)
+
+
+class AppIdentityError(Exception):
+ """Error to indicate crypto failure."""
+
+
+def _bad_pkcs12_key_as_pem(*args, **kwargs):
+ raise NotImplementedError('pkcs12_key_as_pem requires OpenSSL.')
+
+
+try:
+ from oauth2client import _openssl_crypt
+ OpenSSLSigner = _openssl_crypt.OpenSSLSigner
+ OpenSSLVerifier = _openssl_crypt.OpenSSLVerifier
+ pkcs12_key_as_pem = _openssl_crypt.pkcs12_key_as_pem
+except ImportError: # pragma: NO COVER
+ OpenSSLVerifier = None
+ OpenSSLSigner = None
+ pkcs12_key_as_pem = _bad_pkcs12_key_as_pem
+
+try:
+ from oauth2client import _pycrypto_crypt
+ PyCryptoSigner = _pycrypto_crypt.PyCryptoSigner
+ PyCryptoVerifier = _pycrypto_crypt.PyCryptoVerifier
+except ImportError: # pragma: NO COVER
+ PyCryptoVerifier = None
+ PyCryptoSigner = None
+
+
+if OpenSSLSigner:
+ Signer = OpenSSLSigner
+ Verifier = OpenSSLVerifier
+elif PyCryptoSigner: # pragma: NO COVER
+ Signer = PyCryptoSigner
+ Verifier = PyCryptoVerifier
+else: # pragma: NO COVER
+ Signer = RsaSigner
+ Verifier = RsaVerifier
+
+
+def make_signed_jwt(signer, payload, key_id=None):
+ """Make a signed JWT.
+
+ See http://self-issued.info/docs/draft-jones-json-web-token.html.
+
+ Args:
+ signer: crypt.Signer, Cryptographic signer.
+ payload: dict, Dictionary of data to convert to JSON and then sign.
+ key_id: string, (Optional) Key ID header.
+
+ Returns:
+ string, The JWT for the payload.
+ """
+ header = {'typ': 'JWT', 'alg': 'RS256'}
+ if key_id is not None:
+ header['kid'] = key_id
+
+ segments = [
+ _helpers._urlsafe_b64encode(_helpers._json_encode(header)),
+ _helpers._urlsafe_b64encode(_helpers._json_encode(payload)),
+ ]
+ signing_input = b'.'.join(segments)
+
+ signature = signer.sign(signing_input)
+ segments.append(_helpers._urlsafe_b64encode(signature))
+
+ logger.debug(str(segments))
+
+ return b'.'.join(segments)
+
+
+def _verify_signature(message, signature, certs):
+ """Verifies signed content using a list of certificates.
+
+ Args:
+ message: string or bytes, The message to verify.
+ signature: string or bytes, The signature on the message.
+ certs: iterable, certificates in PEM format.
+
+ Raises:
+ AppIdentityError: If none of the certificates can verify the message
+ against the signature.
+ """
+ for pem in certs:
+ verifier = Verifier.from_string(pem, is_x509_cert=True)
+ if verifier.verify(message, signature):
+ return
+
+ # If we have not returned, no certificate confirms the signature.
+ raise AppIdentityError('Invalid token signature')
+
+
+def _check_audience(payload_dict, audience):
+ """Checks audience field from a JWT payload.
+
+ Does nothing if the passed in ``audience`` is null.
+
+ Args:
+ payload_dict: dict, A dictionary containing a JWT payload.
+ audience: string or NoneType, an audience to check for in
+ the JWT payload.
+
+ Raises:
+ AppIdentityError: If there is no ``'aud'`` field in the payload
+ dictionary but there is an ``audience`` to check.
+ AppIdentityError: If the ``'aud'`` field in the payload dictionary
+ does not match the ``audience``.
+ """
+ if audience is None:
+ return
+
+ audience_in_payload = payload_dict.get('aud')
+ if audience_in_payload is None:
+ raise AppIdentityError(
+ 'No aud field in token: {0}'.format(payload_dict))
+ if audience_in_payload != audience:
+ raise AppIdentityError('Wrong recipient, {0} != {1}: {2}'.format(
+ audience_in_payload, audience, payload_dict))
+
+
+def _verify_time_range(payload_dict):
+ """Verifies the issued at and expiration from a JWT payload.
+
+ Makes sure the current time (in UTC) falls between the issued at and
+ expiration for the JWT (with some skew allowed for via
+ ``CLOCK_SKEW_SECS``).
+
+ Args:
+ payload_dict: dict, A dictionary containing a JWT payload.
+
+ Raises:
+ AppIdentityError: If there is no ``'iat'`` field in the payload
+ dictionary.
+ AppIdentityError: If there is no ``'exp'`` field in the payload
+ dictionary.
+ AppIdentityError: If the JWT expiration is too far in the future (i.e.
+ if the expiration would imply a token lifetime
+ longer than what is allowed.)
+ AppIdentityError: If the token appears to have been issued in the
+ future (up to clock skew).
+ AppIdentityError: If the token appears to have expired in the past
+ (up to clock skew).
+ """
+ # Get the current time to use throughout.
+ now = int(time.time())
+
+ # Make sure issued at and expiration are in the payload.
+ issued_at = payload_dict.get('iat')
+ if issued_at is None:
+ raise AppIdentityError(
+ 'No iat field in token: {0}'.format(payload_dict))
+ expiration = payload_dict.get('exp')
+ if expiration is None:
+ raise AppIdentityError(
+ 'No exp field in token: {0}'.format(payload_dict))
+
+ # Make sure the expiration gives an acceptable token lifetime.
+ if expiration >= now + MAX_TOKEN_LIFETIME_SECS:
+ raise AppIdentityError(
+ 'exp field too far in future: {0}'.format(payload_dict))
+
+ # Make sure (up to clock skew) that the token wasn't issued in the future.
+ earliest = issued_at - CLOCK_SKEW_SECS
+ if now < earliest:
+ raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format(
+ now, earliest, payload_dict))
+ # Make sure (up to clock skew) that the token isn't already expired.
+ latest = expiration + CLOCK_SKEW_SECS
+ if now > latest:
+ raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format(
+ now, latest, payload_dict))
+
+
+def verify_signed_jwt_with_certs(jwt, certs, audience=None):
+ """Verify a JWT against public certs.
+
+ See http://self-issued.info/docs/draft-jones-json-web-token.html.
+
+ Args:
+ jwt: string, A JWT.
+ certs: dict, Dictionary where values of public keys in PEM format.
+ audience: string, The audience, 'aud', that this JWT should contain. If
+ None then the JWT's 'aud' parameter is not verified.
+
+ Returns:
+ dict, The deserialized JSON payload in the JWT.
+
+ Raises:
+ AppIdentityError: if any checks are failed.
+ """
+ jwt = _helpers._to_bytes(jwt)
+
+ if jwt.count(b'.') != 2:
+ raise AppIdentityError(
+ 'Wrong number of segments in token: {0}'.format(jwt))
+
+ header, payload, signature = jwt.split(b'.')
+ message_to_sign = header + b'.' + payload
+ signature = _helpers._urlsafe_b64decode(signature)
+
+ # Parse token.
+ payload_bytes = _helpers._urlsafe_b64decode(payload)
+ try:
+ payload_dict = json.loads(_helpers._from_bytes(payload_bytes))
+ except:
+ raise AppIdentityError('Can\'t parse token: {0}'.format(payload_bytes))
+
+ # Verify that the signature matches the message.
+ _verify_signature(message_to_sign, signature, certs.values())
+
+ # Verify the issued at and created times in the payload.
+ _verify_time_range(payload_dict)
+
+ # Check audience.
+ _check_audience(payload_dict, audience)
+
+ return payload_dict
diff --git a/contrib/python/oauth2client/py2/oauth2client/file.py b/contrib/python/oauth2client/py2/oauth2client/file.py
new file mode 100644
index 0000000000..3551c80d47
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/file.py
@@ -0,0 +1,95 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for OAuth.
+
+Utilities for making it easier to work with OAuth 2.0
+credentials.
+"""
+
+import os
+import threading
+
+from oauth2client import _helpers
+from oauth2client import client
+
+
+class Storage(client.Storage):
+ """Store and retrieve a single credential to and from a file."""
+
+ def __init__(self, filename):
+ super(Storage, self).__init__(lock=threading.Lock())
+ self._filename = filename
+
+ def locked_get(self):
+ """Retrieve Credential from file.
+
+ Returns:
+ oauth2client.client.Credentials
+
+ Raises:
+ IOError if the file is a symbolic link.
+ """
+ credentials = None
+ _helpers.validate_file(self._filename)
+ try:
+ f = open(self._filename, 'rb')
+ content = f.read()
+ f.close()
+ except IOError:
+ return credentials
+
+ try:
+ credentials = client.Credentials.new_from_json(content)
+ credentials.set_store(self)
+ except ValueError:
+ pass
+
+ return credentials
+
+ def _create_file_if_needed(self):
+ """Create an empty file if necessary.
+
+ This method will not initialize the file. Instead it implements a
+ simple version of "touch" to ensure the file has been created.
+ """
+ if not os.path.exists(self._filename):
+ old_umask = os.umask(0o177)
+ try:
+ open(self._filename, 'a+b').close()
+ finally:
+ os.umask(old_umask)
+
+ def locked_put(self, credentials):
+ """Write Credentials to file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+
+ Raises:
+ IOError if the file is a symbolic link.
+ """
+ self._create_file_if_needed()
+ _helpers.validate_file(self._filename)
+ f = open(self._filename, 'w')
+ f.write(credentials.to_json())
+ f.close()
+
+ def locked_delete(self):
+ """Delete Credentials file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ os.unlink(self._filename)
diff --git a/contrib/python/oauth2client/py2/oauth2client/service_account.py b/contrib/python/oauth2client/py2/oauth2client/service_account.py
new file mode 100644
index 0000000000..540bfaaa1b
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/service_account.py
@@ -0,0 +1,685 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""oauth2client Service account credentials class."""
+
+import base64
+import copy
+import datetime
+import json
+import time
+
+import oauth2client
+from oauth2client import _helpers
+from oauth2client import client
+from oauth2client import crypt
+from oauth2client import transport
+
+
+_PASSWORD_DEFAULT = 'notasecret'
+_PKCS12_KEY = '_private_key_pkcs12'
+_PKCS12_ERROR = r"""
+This library only implements PKCS#12 support via the pyOpenSSL library.
+Either install pyOpenSSL, or please convert the .p12 file
+to .pem format:
+ $ cat key.p12 | \
+ > openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
+ > openssl rsa > key.pem
+"""
+
+
+class ServiceAccountCredentials(client.AssertionCredentials):
+ """Service Account credential for OAuth 2.0 signed JWT grants.
+
+ Supports
+
+ * JSON keyfile (typically contains a PKCS8 key stored as
+ PEM text)
+ * ``.p12`` key (stores PKCS12 key and certificate)
+
+ Makes an assertion to server using a signed JWT assertion in exchange
+ for an access token.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ signer: ``crypt.Signer``, A signer which can be used to sign content.
+ scopes: List or string, (Optional) Scopes to use when acquiring
+ an access token.
+ private_key_id: string, (Optional) Private key identifier. Typically
+ only used with a JSON keyfile. Can be sent in the
+ header of a JWT token assertion.
+ client_id: string, (Optional) Client ID for the project that owns the
+ service account.
+ user_agent: string, (Optional) User agent to use when sending
+ request.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ kwargs: dict, Extra key-value pairs (both strings) to send in the
+ payload body when making an assertion.
+ """
+
+ MAX_TOKEN_LIFETIME_SECS = 3600
+ """Max lifetime of the token (one hour, in seconds)."""
+
+ NON_SERIALIZED_MEMBERS = (
+ frozenset(['_signer']) |
+ client.AssertionCredentials.NON_SERIALIZED_MEMBERS)
+ """Members that aren't serialized when object is converted to JSON."""
+
+ # Can be over-ridden by factory constructors. Used for
+ # serialization/deserialization purposes.
+ _private_key_pkcs8_pem = None
+ _private_key_pkcs12 = None
+ _private_key_password = None
+
+ def __init__(self,
+ service_account_email,
+ signer,
+ scopes='',
+ private_key_id=None,
+ client_id=None,
+ user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ **kwargs):
+
+ super(ServiceAccountCredentials, self).__init__(
+ None, user_agent=user_agent, token_uri=token_uri,
+ revoke_uri=revoke_uri)
+
+ self._service_account_email = service_account_email
+ self._signer = signer
+ self._scopes = _helpers.scopes_to_string(scopes)
+ self._private_key_id = private_key_id
+ self.client_id = client_id
+ self._user_agent = user_agent
+ self._kwargs = kwargs
+
+ def _to_json(self, strip, to_serialize=None):
+ """Utility function that creates JSON repr. of a credentials object.
+
+ Over-ride is needed since PKCS#12 keys will not in general be JSON
+ serializable.
+
+ Args:
+ strip: array, An array of names of members to exclude from the
+ JSON.
+ to_serialize: dict, (Optional) The properties for this object
+ that will be serialized. This allows callers to
+ modify before serializing.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ if to_serialize is None:
+ to_serialize = copy.copy(self.__dict__)
+ pkcs12_val = to_serialize.get(_PKCS12_KEY)
+ if pkcs12_val is not None:
+ to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val)
+ return super(ServiceAccountCredentials, self)._to_json(
+ strip, to_serialize=to_serialize)
+
+ @classmethod
+ def _from_parsed_json_keyfile(cls, keyfile_dict, scopes,
+ token_uri=None, revoke_uri=None):
+ """Helper for factory constructors from JSON keyfile.
+
+ Args:
+ keyfile_dict: dict-like object, The parsed dictionary-like object
+ containing the contents of the JSON keyfile.
+ scopes: List or string, Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for OAuth 2.0 provider token endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+ revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile contents.
+
+ Raises:
+ ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
+ KeyError, if one of the expected keys is not present in
+ the keyfile.
+ """
+ creds_type = keyfile_dict.get('type')
+ if creds_type != client.SERVICE_ACCOUNT:
+ raise ValueError('Unexpected credentials type', creds_type,
+ 'Expected', client.SERVICE_ACCOUNT)
+
+ service_account_email = keyfile_dict['client_email']
+ private_key_pkcs8_pem = keyfile_dict['private_key']
+ private_key_id = keyfile_dict['private_key_id']
+ client_id = keyfile_dict['client_id']
+ if not token_uri:
+ token_uri = keyfile_dict.get('token_uri',
+ oauth2client.GOOGLE_TOKEN_URI)
+ if not revoke_uri:
+ revoke_uri = keyfile_dict.get('revoke_uri',
+ oauth2client.GOOGLE_REVOKE_URI)
+
+ signer = crypt.Signer.from_string(private_key_pkcs8_pem)
+ credentials = cls(service_account_email, signer, scopes=scopes,
+ private_key_id=private_key_id,
+ client_id=client_id, token_uri=token_uri,
+ revoke_uri=revoke_uri)
+ credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
+ return credentials
+
+ @classmethod
+ def from_json_keyfile_name(cls, filename, scopes='',
+ token_uri=None, revoke_uri=None):
+
+ """Factory constructor from JSON keyfile by name.
+
+ Args:
+ filename: string, The location of the keyfile.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for OAuth 2.0 provider token endpoint.
+ If unset and not present in the key file, defaults
+ to Google's endpoints.
+ revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
+ If unset and not present in the key file, defaults
+ to Google's endpoints.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
+ KeyError, if one of the expected keys is not present in
+ the keyfile.
+ """
+ with open(filename, 'r') as file_obj:
+ client_credentials = json.load(file_obj)
+ return cls._from_parsed_json_keyfile(client_credentials, scopes,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri)
+
+ @classmethod
+ def from_json_keyfile_dict(cls, keyfile_dict, scopes='',
+ token_uri=None, revoke_uri=None):
+ """Factory constructor from parsed JSON keyfile.
+
+ Args:
+ keyfile_dict: dict-like object, The parsed dictionary-like object
+ containing the contents of the JSON keyfile.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for OAuth 2.0 provider token endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+ revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
+ KeyError, if one of the expected keys is not present in
+ the keyfile.
+ """
+ return cls._from_parsed_json_keyfile(keyfile_dict, scopes,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri)
+
+ @classmethod
+ def _from_p12_keyfile_contents(cls, service_account_email,
+ private_key_pkcs12,
+ private_key_password=None, scopes='',
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ """Factory constructor from JSON keyfile.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ private_key_pkcs12: string, The contents of a PKCS#12 keyfile.
+ private_key_password: string, (Optional) Password for PKCS#12
+ private key. Defaults to ``notasecret``.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ NotImplementedError if pyOpenSSL is not installed / not the
+ active crypto library.
+ """
+ if private_key_password is None:
+ private_key_password = _PASSWORD_DEFAULT
+ if crypt.Signer is not crypt.OpenSSLSigner:
+ raise NotImplementedError(_PKCS12_ERROR)
+ signer = crypt.Signer.from_string(private_key_pkcs12,
+ private_key_password)
+ credentials = cls(service_account_email, signer, scopes=scopes,
+ token_uri=token_uri, revoke_uri=revoke_uri)
+ credentials._private_key_pkcs12 = private_key_pkcs12
+ credentials._private_key_password = private_key_password
+ return credentials
+
+ @classmethod
+ def from_p12_keyfile(cls, service_account_email, filename,
+ private_key_password=None, scopes='',
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+
+ """Factory constructor from JSON keyfile.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ filename: string, The location of the PKCS#12 keyfile.
+ private_key_password: string, (Optional) Password for PKCS#12
+ private key. Defaults to ``notasecret``.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ NotImplementedError if pyOpenSSL is not installed / not the
+ active crypto library.
+ """
+ with open(filename, 'rb') as file_obj:
+ private_key_pkcs12 = file_obj.read()
+ return cls._from_p12_keyfile_contents(
+ service_account_email, private_key_pkcs12,
+ private_key_password=private_key_password, scopes=scopes,
+ token_uri=token_uri, revoke_uri=revoke_uri)
+
+ @classmethod
+ def from_p12_keyfile_buffer(cls, service_account_email, file_buffer,
+ private_key_password=None, scopes='',
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ """Factory constructor from JSON keyfile.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ file_buffer: stream, A buffer that implements ``read()``
+ and contains the PKCS#12 key contents.
+ private_key_password: string, (Optional) Password for PKCS#12
+ private key. Defaults to ``notasecret``.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ NotImplementedError if pyOpenSSL is not installed / not the
+ active crypto library.
+ """
+ private_key_pkcs12 = file_buffer.read()
+ return cls._from_p12_keyfile_contents(
+ service_account_email, private_key_pkcs12,
+ private_key_password=private_key_password, scopes=scopes,
+ token_uri=token_uri, revoke_uri=revoke_uri)
+
+ def _generate_assertion(self):
+ """Generate the assertion that will be used in the request."""
+ now = int(time.time())
+ payload = {
+ 'aud': self.token_uri,
+ 'scope': self._scopes,
+ 'iat': now,
+ 'exp': now + self.MAX_TOKEN_LIFETIME_SECS,
+ 'iss': self._service_account_email,
+ }
+ payload.update(self._kwargs)
+ return crypt.make_signed_jwt(self._signer, payload,
+ key_id=self._private_key_id)
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ Implements abstract method
+ :meth:`oauth2client.client.AssertionCredentials.sign_blob`.
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Returns:
+ tuple, A pair of the private key ID used to sign the blob and
+ the signed contents.
+ """
+ return self._private_key_id, self._signer.sign(blob)
+
+ @property
+ def service_account_email(self):
+ """Get the email for the current service account.
+
+ Returns:
+ string, The email associated with the service account.
+ """
+ return self._service_account_email
+
+ @property
+ def serialization_data(self):
+ # NOTE: This is only useful for JSON keyfile.
+ return {
+ 'type': 'service_account',
+ 'client_email': self._service_account_email,
+ 'private_key_id': self._private_key_id,
+ 'private_key': self._private_key_pkcs8_pem,
+ 'client_id': self.client_id,
+ }
+
+ @classmethod
+ def from_json(cls, json_data):
+ """Deserialize a JSON-serialized instance.
+
+ Inverse to :meth:`to_json`.
+
+ Args:
+ json_data: dict or string, Serialized JSON (as a string or an
+ already parsed dictionary) representing a credential.
+
+ Returns:
+ ServiceAccountCredentials from the serialized data.
+ """
+ if not isinstance(json_data, dict):
+ json_data = json.loads(_helpers._from_bytes(json_data))
+
+ private_key_pkcs8_pem = None
+ pkcs12_val = json_data.get(_PKCS12_KEY)
+ password = None
+ if pkcs12_val is None:
+ private_key_pkcs8_pem = json_data['_private_key_pkcs8_pem']
+ signer = crypt.Signer.from_string(private_key_pkcs8_pem)
+ else:
+ # NOTE: This assumes that private_key_pkcs8_pem is not also
+ # in the serialized data. This would be very incorrect
+ # state.
+ pkcs12_val = base64.b64decode(pkcs12_val)
+ password = json_data['_private_key_password']
+ signer = crypt.Signer.from_string(pkcs12_val, password)
+
+ credentials = cls(
+ json_data['_service_account_email'],
+ signer,
+ scopes=json_data['_scopes'],
+ private_key_id=json_data['_private_key_id'],
+ client_id=json_data['client_id'],
+ user_agent=json_data['_user_agent'],
+ **json_data['_kwargs']
+ )
+ if private_key_pkcs8_pem is not None:
+ credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
+ if pkcs12_val is not None:
+ credentials._private_key_pkcs12 = pkcs12_val
+ if password is not None:
+ credentials._private_key_password = password
+ credentials.invalid = json_data['invalid']
+ credentials.access_token = json_data['access_token']
+ credentials.token_uri = json_data['token_uri']
+ credentials.revoke_uri = json_data['revoke_uri']
+ token_expiry = json_data.get('token_expiry', None)
+ if token_expiry is not None:
+ credentials.token_expiry = datetime.datetime.strptime(
+ token_expiry, client.EXPIRY_FORMAT)
+ return credentials
+
+ def create_scoped_required(self):
+ return not self._scopes
+
+ def create_scoped(self, scopes):
+ result = self.__class__(self._service_account_email,
+ self._signer,
+ scopes=scopes,
+ private_key_id=self._private_key_id,
+ client_id=self.client_id,
+ user_agent=self._user_agent,
+ **self._kwargs)
+ result.token_uri = self.token_uri
+ result.revoke_uri = self.revoke_uri
+ result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
+ result._private_key_pkcs12 = self._private_key_pkcs12
+ result._private_key_password = self._private_key_password
+ return result
+
+ def create_with_claims(self, claims):
+ """Create credentials that specify additional claims.
+
+ Args:
+ claims: dict, key-value pairs for claims.
+
+ Returns:
+ ServiceAccountCredentials, a copy of the current service account
+ credentials with updated claims to use when obtaining access
+ tokens.
+ """
+ new_kwargs = dict(self._kwargs)
+ new_kwargs.update(claims)
+ result = self.__class__(self._service_account_email,
+ self._signer,
+ scopes=self._scopes,
+ private_key_id=self._private_key_id,
+ client_id=self.client_id,
+ user_agent=self._user_agent,
+ **new_kwargs)
+ result.token_uri = self.token_uri
+ result.revoke_uri = self.revoke_uri
+ result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
+ result._private_key_pkcs12 = self._private_key_pkcs12
+ result._private_key_password = self._private_key_password
+ return result
+
+ def create_delegated(self, sub):
+ """Create credentials that act as domain-wide delegation of authority.
+
+ Use the ``sub`` parameter as the subject to delegate on behalf of
+ that user.
+
+ For example::
+
+ >>> account_sub = 'foo@email.com'
+ >>> delegate_creds = creds.create_delegated(account_sub)
+
+ Args:
+ sub: string, An email address that this service account will
+ act on behalf of (via domain-wide delegation).
+
+ Returns:
+ ServiceAccountCredentials, a copy of the current service account
+ updated to act on behalf of ``sub``.
+ """
+ return self.create_with_claims({'sub': sub})
+
+
+def _datetime_to_secs(utc_time):
+ # TODO(issue 298): use time_delta.total_seconds()
+ # time_delta.total_seconds() not supported in Python 2.6
+ epoch = datetime.datetime(1970, 1, 1)
+ time_delta = utc_time - epoch
+ return time_delta.days * 86400 + time_delta.seconds
+
+
+class _JWTAccessCredentials(ServiceAccountCredentials):
+ """Self signed JWT credentials.
+
+ Makes an assertion to server using a self signed JWT from service account
+ credentials. These credentials do NOT use OAuth 2.0 and instead
+ authenticate directly.
+ """
+ _MAX_TOKEN_LIFETIME_SECS = 3600
+ """Max lifetime of the token (one hour, in seconds)."""
+
+ def __init__(self,
+ service_account_email,
+ signer,
+ scopes=None,
+ private_key_id=None,
+ client_id=None,
+ user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ additional_claims=None):
+ if additional_claims is None:
+ additional_claims = {}
+ super(_JWTAccessCredentials, self).__init__(
+ service_account_email,
+ signer,
+ private_key_id=private_key_id,
+ client_id=client_id,
+ user_agent=user_agent,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri,
+ **additional_claims)
+
+ def authorize(self, http):
+ """Authorize an httplib2.Http instance with a JWT assertion.
+
+ Unless specified, the 'aud' of the assertion will be the base
+ uri of the request.
+
+ Args:
+ http: An instance of ``httplib2.Http`` or something that acts
+ like it.
+ Returns:
+ A modified instance of http that was passed in.
+ Example::
+ h = httplib2.Http()
+ h = credentials.authorize(h)
+ """
+ transport.wrap_http_for_jwt_access(self, http)
+ return http
+
+ def get_access_token(self, http=None, additional_claims=None):
+ """Create a signed jwt.
+
+ Args:
+ http: unused
+ additional_claims: dict, additional claims to add to
+ the payload of the JWT.
+ Returns:
+ An AccessTokenInfo with the signed jwt
+ """
+ if additional_claims is None:
+ if self.access_token is None or self.access_token_expired:
+ self.refresh(None)
+ return client.AccessTokenInfo(
+ access_token=self.access_token, expires_in=self._expires_in())
+ else:
+ # Create a 1 time token
+ token, unused_expiry = self._create_token(additional_claims)
+ return client.AccessTokenInfo(
+ access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)
+
+ def revoke(self, http):
+ """Cannot revoke JWTAccessCredentials tokens."""
+ pass
+
+ def create_scoped_required(self):
+ # JWTAccessCredentials are unscoped by definition
+ return True
+
+ def create_scoped(self, scopes, token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ # Returns an OAuth2 credentials with the given scope
+ result = ServiceAccountCredentials(self._service_account_email,
+ self._signer,
+ scopes=scopes,
+ private_key_id=self._private_key_id,
+ client_id=self.client_id,
+ user_agent=self._user_agent,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri,
+ **self._kwargs)
+ if self._private_key_pkcs8_pem is not None:
+ result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
+ if self._private_key_pkcs12 is not None:
+ result._private_key_pkcs12 = self._private_key_pkcs12
+ if self._private_key_password is not None:
+ result._private_key_password = self._private_key_password
+ return result
+
+ def refresh(self, http):
+ """Refreshes the access_token.
+
+ The HTTP object is unused since no request needs to be made to
+ get a new token, it can just be generated locally.
+
+ Args:
+ http: unused HTTP object
+ """
+ self._refresh(None)
+
+ def _refresh(self, http):
+ """Refreshes the access_token.
+
+ Args:
+ http: unused HTTP object
+ """
+ self.access_token, self.token_expiry = self._create_token()
+
+ def _create_token(self, additional_claims=None):
+ now = client._UTCNOW()
+ lifetime = datetime.timedelta(seconds=self._MAX_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+ payload = {
+ 'iat': _datetime_to_secs(now),
+ 'exp': _datetime_to_secs(expiry),
+ 'iss': self._service_account_email,
+ 'sub': self._service_account_email
+ }
+ payload.update(self._kwargs)
+ if additional_claims is not None:
+ payload.update(additional_claims)
+ jwt = crypt.make_signed_jwt(self._signer, payload,
+ key_id=self._private_key_id)
+ return jwt.decode('ascii'), expiry
diff --git a/contrib/python/oauth2client/py2/oauth2client/tools.py b/contrib/python/oauth2client/py2/oauth2client/tools.py
new file mode 100644
index 0000000000..51669934df
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/tools.py
@@ -0,0 +1,256 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Command-line tools for authenticating via OAuth 2.0
+
+Do the OAuth 2.0 Web Server dance for a command line application. Stores the
+generated credentials in a common file that is used by other example apps in
+the same directory.
+"""
+
+from __future__ import print_function
+
+import logging
+import socket
+import sys
+
+from six.moves import BaseHTTPServer
+from six.moves import http_client
+from six.moves import input
+from six.moves import urllib
+
+from oauth2client import _helpers
+from oauth2client import client
+
+
+__all__ = ['argparser', 'run_flow', 'message_if_missing']
+
+_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
+
+To make this sample run you will need to populate the client_secrets.json file
+found at:
+
+ {file_path}
+
+with information from the APIs Console <https://code.google.com/apis/console>.
+
+"""
+
+_FAILED_START_MESSAGE = """
+Failed to start a local webserver listening on either port 8080
+or port 8090. Please check your firewall settings and locally
+running programs that may be blocking or using those ports.
+
+Falling back to --noauth_local_webserver and continuing with
+authorization.
+"""
+
+_BROWSER_OPENED_MESSAGE = """
+Your browser has been opened to visit:
+
+ {address}
+
+If your browser is on a different machine then exit and re-run this
+application with the command-line parameter
+
+ --noauth_local_webserver
+"""
+
+_GO_TO_LINK_MESSAGE = """
+Go to the following link in your browser:
+
+ {address}
+"""
+
+
+def _CreateArgumentParser():
+ try:
+ import argparse
+ except ImportError: # pragma: NO COVER
+ return None
+ parser = argparse.ArgumentParser(add_help=False)
+ parser.add_argument('--auth_host_name', default='localhost',
+ help='Hostname when running a local web server.')
+ parser.add_argument('--noauth_local_webserver', action='store_true',
+ default=False, help='Do not run a local web server.')
+ parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
+ nargs='*', help='Port web server should listen on.')
+ parser.add_argument(
+ '--logging_level', default='ERROR',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
+ help='Set the logging level of detail.')
+ return parser
+
+
+# argparser is an ArgumentParser that contains command-line options expected
+# by tools.run(). Pass it in as part of the 'parents' argument to your own
+# ArgumentParser.
+argparser = _CreateArgumentParser()
+
+
+class ClientRedirectServer(BaseHTTPServer.HTTPServer):
+ """A server to handle OAuth 2.0 redirects back to localhost.
+
+ Waits for a single request and parses the query parameters
+ into query_params and then stops serving.
+ """
+ query_params = {}
+
+
+class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """A handler for OAuth 2.0 redirects back to localhost.
+
+ Waits for a single request and parses the query parameters
+ into the servers query_params and then stops serving.
+ """
+
+ def do_GET(self):
+ """Handle a GET request.
+
+ Parses the query parameters and prints a message
+ if the flow has completed. Note that we can't detect
+ if an error occurred.
+ """
+ self.send_response(http_client.OK)
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+ parts = urllib.parse.urlparse(self.path)
+ query = _helpers.parse_unique_urlencoded(parts.query)
+ self.server.query_params = query
+ self.wfile.write(
+ b'<html><head><title>Authentication Status</title></head>')
+ self.wfile.write(
+ b'<body><p>The authentication flow has completed.</p>')
+ self.wfile.write(b'</body></html>')
+
+ def log_message(self, format, *args):
+ """Do not log messages to stdout while running as cmd. line program."""
+
+
+@_helpers.positional(3)
+def run_flow(flow, storage, flags=None, http=None):
+ """Core code for a command-line application.
+
+ The ``run()`` function is called from your application and runs
+ through all the steps to obtain credentials. It takes a ``Flow``
+ argument and attempts to open an authorization server page in the
+ user's default web browser. The server asks the user to grant your
+ application access to the user's data. If the user grants access,
+ the ``run()`` function returns new credentials. The new credentials
+ are also stored in the ``storage`` argument, which updates the file
+ associated with the ``Storage`` object.
+
+ It presumes it is run from a command-line application and supports the
+ following flags:
+
+ ``--auth_host_name`` (string, default: ``localhost``)
+ Host name to use when running a local web server to handle
+ redirects during OAuth authorization.
+
+ ``--auth_host_port`` (integer, default: ``[8080, 8090]``)
+ Port to use when running a local web server to handle redirects
+ during OAuth authorization. Repeat this option to specify a list
+ of values.
+
+ ``--[no]auth_local_webserver`` (boolean, default: ``True``)
+ Run a local web server to handle redirects during OAuth
+ authorization.
+
+ The tools module defines an ``ArgumentParser`` the already contains the
+ flag definitions that ``run()`` requires. You can pass that
+ ``ArgumentParser`` to your ``ArgumentParser`` constructor::
+
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ parents=[tools.argparser])
+ flags = parser.parse_args(argv)
+
+ Args:
+ flow: Flow, an OAuth 2.0 Flow to step through.
+ storage: Storage, a ``Storage`` to store the credential in.
+ flags: ``argparse.Namespace``, (Optional) The command-line flags. This
+ is the object returned from calling ``parse_args()`` on
+ ``argparse.ArgumentParser`` as described above. Defaults
+ to ``argparser.parse_args()``.
+ http: An instance of ``httplib2.Http.request`` or something that
+ acts like it.
+
+ Returns:
+ Credentials, the obtained credential.
+ """
+ if flags is None:
+ flags = argparser.parse_args()
+ logging.getLogger().setLevel(getattr(logging, flags.logging_level))
+ if not flags.noauth_local_webserver:
+ success = False
+ port_number = 0
+ for port in flags.auth_host_port:
+ port_number = port
+ try:
+ httpd = ClientRedirectServer((flags.auth_host_name, port),
+ ClientRedirectHandler)
+ except socket.error:
+ pass
+ else:
+ success = True
+ break
+ flags.noauth_local_webserver = not success
+ if not success:
+ print(_FAILED_START_MESSAGE)
+
+ if not flags.noauth_local_webserver:
+ oauth_callback = 'http://{host}:{port}/'.format(
+ host=flags.auth_host_name, port=port_number)
+ else:
+ oauth_callback = client.OOB_CALLBACK_URN
+ flow.redirect_uri = oauth_callback
+ authorize_url = flow.step1_get_authorize_url()
+
+ if not flags.noauth_local_webserver:
+ import webbrowser
+ webbrowser.open(authorize_url, new=1, autoraise=True)
+ print(_BROWSER_OPENED_MESSAGE.format(address=authorize_url))
+ else:
+ print(_GO_TO_LINK_MESSAGE.format(address=authorize_url))
+
+ code = None
+ if not flags.noauth_local_webserver:
+ httpd.handle_request()
+ if 'error' in httpd.query_params:
+ sys.exit('Authentication request was rejected.')
+ if 'code' in httpd.query_params:
+ code = httpd.query_params['code']
+ else:
+ print('Failed to find "code" in the query parameters '
+ 'of the redirect.')
+ sys.exit('Try running with --noauth_local_webserver.')
+ else:
+ code = input('Enter verification code: ').strip()
+
+ try:
+ credential = flow.step2_exchange(code, http=http)
+ except client.FlowExchangeError as e:
+ sys.exit('Authentication has failed: {0}'.format(e))
+
+ storage.put(credential)
+ credential.set_store(storage)
+ print('Authentication successful.')
+
+ return credential
+
+
+def message_if_missing(filename):
+ """Helpful message to display if the CLIENT_SECRETS file is missing."""
+ return _CLIENT_SECRETS_MESSAGE.format(file_path=filename)
diff --git a/contrib/python/oauth2client/py2/oauth2client/transport.py b/contrib/python/oauth2client/py2/oauth2client/transport.py
new file mode 100644
index 0000000000..79a61f1c1b
--- /dev/null
+++ b/contrib/python/oauth2client/py2/oauth2client/transport.py
@@ -0,0 +1,285 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import httplib2
+import six
+from six.moves import http_client
+
+from oauth2client import _helpers
+
+
+_LOGGER = logging.getLogger(__name__)
+# Properties present in file-like streams / buffers.
+_STREAM_PROPERTIES = ('read', 'seek', 'tell')
+
+# Google Data client libraries may need to set this to [401, 403].
+REFRESH_STATUS_CODES = (http_client.UNAUTHORIZED,)
+
+
+class MemoryCache(object):
+ """httplib2 Cache implementation which only caches locally."""
+
+ def __init__(self):
+ self.cache = {}
+
+ def get(self, key):
+ return self.cache.get(key)
+
+ def set(self, key, value):
+ self.cache[key] = value
+
+ def delete(self, key):
+ self.cache.pop(key, None)
+
+
+def get_cached_http():
+ """Return an HTTP object which caches results returned.
+
+ This is intended to be used in methods like
+ oauth2client.client.verify_id_token(), which calls to the same URI
+ to retrieve certs.
+
+ Returns:
+ httplib2.Http, an HTTP object with a MemoryCache
+ """
+ return _CACHED_HTTP
+
+
+def get_http_object(*args, **kwargs):
+ """Return a new HTTP object.
+
+ Args:
+ *args: tuple, The positional arguments to be passed when
+ contructing a new HTTP object.
+ **kwargs: dict, The keyword arguments to be passed when
+ contructing a new HTTP object.
+
+ Returns:
+ httplib2.Http, an HTTP object.
+ """
+ return httplib2.Http(*args, **kwargs)
+
+
+def _initialize_headers(headers):
+ """Creates a copy of the headers.
+
+ Args:
+ headers: dict, request headers to copy.
+
+ Returns:
+ dict, the copied headers or a new dictionary if the headers
+ were None.
+ """
+ return {} if headers is None else dict(headers)
+
+
+def _apply_user_agent(headers, user_agent):
+ """Adds a user-agent to the headers.
+
+ Args:
+ headers: dict, request headers to add / modify user
+ agent within.
+ user_agent: str, the user agent to add.
+
+ Returns:
+ dict, the original headers passed in, but modified if the
+ user agent is not None.
+ """
+ if user_agent is not None:
+ if 'user-agent' in headers:
+ headers['user-agent'] = (user_agent + ' ' + headers['user-agent'])
+ else:
+ headers['user-agent'] = user_agent
+
+ return headers
+
+
+def clean_headers(headers):
+ """Forces header keys and values to be strings, i.e not unicode.
+
+ The httplib module just concats the header keys and values in a way that
+ may make the message header a unicode string, which, if it then tries to
+ contatenate to a binary request body may result in a unicode decode error.
+
+ Args:
+ headers: dict, A dictionary of headers.
+
+ Returns:
+ The same dictionary but with all the keys converted to strings.
+ """
+ clean = {}
+ try:
+ for k, v in six.iteritems(headers):
+ if not isinstance(k, six.binary_type):
+ k = str(k)
+ if not isinstance(v, six.binary_type):
+ v = str(v)
+ clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)
+ except UnicodeEncodeError:
+ from oauth2client.client import NonAsciiHeaderError
+ raise NonAsciiHeaderError(k, ': ', v)
+ return clean
+
+
+def wrap_http_for_auth(credentials, http):
+ """Prepares an HTTP object's request method for auth.
+
+ Wraps HTTP requests with logic to catch auth failures (typically
+ identified via a 401 status code). In the event of failure, tries
+ to refresh the token used and then retry the original request.
+
+ Args:
+ credentials: Credentials, the credentials used to identify
+ the authenticated user.
+ http: httplib2.Http, an http object to be used to make
+ auth requests.
+ """
+ orig_request_method = http.request
+
+ # The closure that will replace 'httplib2.Http.request'.
+ def new_request(uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ if not credentials.access_token:
+ _LOGGER.info('Attempting refresh to obtain '
+ 'initial access_token')
+ credentials._refresh(orig_request_method)
+
+ # Clone and modify the request headers to add the appropriate
+ # Authorization header.
+ headers = _initialize_headers(headers)
+ credentials.apply(headers)
+ _apply_user_agent(headers, credentials.user_agent)
+
+ body_stream_position = None
+ # Check if the body is a file-like stream.
+ if all(getattr(body, stream_prop, None) for stream_prop in
+ _STREAM_PROPERTIES):
+ body_stream_position = body.tell()
+
+ resp, content = request(orig_request_method, uri, method, body,
+ clean_headers(headers),
+ redirections, connection_type)
+
+ # A stored token may expire between the time it is retrieved and
+ # the time the request is made, so we may need to try twice.
+ max_refresh_attempts = 2
+ for refresh_attempt in range(max_refresh_attempts):
+ if resp.status not in REFRESH_STATUS_CODES:
+ break
+ _LOGGER.info('Refreshing due to a %s (attempt %s/%s)',
+ resp.status, refresh_attempt + 1,
+ max_refresh_attempts)
+ credentials._refresh(orig_request_method)
+ credentials.apply(headers)
+ if body_stream_position is not None:
+ body.seek(body_stream_position)
+
+ resp, content = request(orig_request_method, uri, method, body,
+ clean_headers(headers),
+ redirections, connection_type)
+
+ return resp, content
+
+ # Replace the request method with our own closure.
+ http.request = new_request
+
+ # Set credentials as a property of the request method.
+ http.request.credentials = credentials
+
+
+def wrap_http_for_jwt_access(credentials, http):
+ """Prepares an HTTP object's request method for JWT access.
+
+ Wraps HTTP requests with logic to catch auth failures (typically
+ identified via a 401 status code). In the event of failure, tries
+ to refresh the token used and then retry the original request.
+
+ Args:
+ credentials: _JWTAccessCredentials, the credentials used to identify
+ a service account that uses JWT access tokens.
+ http: httplib2.Http, an http object to be used to make
+ auth requests.
+ """
+ orig_request_method = http.request
+ wrap_http_for_auth(credentials, http)
+ # The new value of ``http.request`` set by ``wrap_http_for_auth``.
+ authenticated_request_method = http.request
+
+ # The closure that will replace 'httplib2.Http.request'.
+ def new_request(uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ if 'aud' in credentials._kwargs:
+ # Preemptively refresh token, this is not done for OAuth2
+ if (credentials.access_token is None or
+ credentials.access_token_expired):
+ credentials.refresh(None)
+ return request(authenticated_request_method, uri,
+ method, body, headers, redirections,
+ connection_type)
+ else:
+ # If we don't have an 'aud' (audience) claim,
+ # create a 1-time token with the uri root as the audience
+ headers = _initialize_headers(headers)
+ _apply_user_agent(headers, credentials.user_agent)
+ uri_root = uri.split('?', 1)[0]
+ token, unused_expiry = credentials._create_token({'aud': uri_root})
+
+ headers['Authorization'] = 'Bearer ' + token
+ return request(orig_request_method, uri, method, body,
+ clean_headers(headers),
+ redirections, connection_type)
+
+ # Replace the request method with our own closure.
+ http.request = new_request
+
+ # Set credentials as a property of the request method.
+ http.request.credentials = credentials
+
+
+def request(http, uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ """Make an HTTP request with an HTTP object and arguments.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make requests.
+ uri: string, The URI to be requested.
+ method: string, The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body: string, The payload / body in HTTP request. By default
+ there is no payload.
+ headers: dict, Key-value pairs of request headers. By default
+ there are no headers.
+ redirections: int, The number of allowed 203 redirects for
+ the request. Defaults to 5.
+ connection_type: httplib.HTTPConnection, a subclass to be used for
+ establishing connection. If not set, the type
+ will be determined from the ``uri``.
+
+ Returns:
+ tuple, a pair of a httplib2.Response with the status code and other
+ headers and the bytes of the content returned.
+ """
+ # NOTE: Allowing http or http.request is temporary (See Issue 601).
+ http_callable = getattr(http, 'request', http)
+ return http_callable(uri, method=method, body=body, headers=headers,
+ redirections=redirections,
+ connection_type=connection_type)
+
+
+_CACHED_HTTP = httplib2.Http(MemoryCache())
diff --git a/contrib/python/oauth2client/py2/ya.make b/contrib/python/oauth2client/py2/ya.make
new file mode 100644
index 0000000000..73c0e3882b
--- /dev/null
+++ b/contrib/python/oauth2client/py2/ya.make
@@ -0,0 +1,68 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(4.1.3)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/httplib2
+ contrib/python/pyasn1
+ contrib/python/pyasn1-modules
+ contrib/python/rsa
+ contrib/python/six
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ oauth2client._openssl_crypt
+ oauth2client._pycrypto_crypt
+ oauth2client.contrib.*
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ oauth2client/__init__.py
+ oauth2client/_helpers.py
+ oauth2client/_openssl_crypt.py
+ oauth2client/_pkce.py
+ oauth2client/_pure_python_crypt.py
+ oauth2client/_pycrypto_crypt.py
+ oauth2client/client.py
+ oauth2client/clientsecrets.py
+ oauth2client/contrib/__init__.py
+ oauth2client/contrib/_appengine_ndb.py
+ oauth2client/contrib/_metadata.py
+ oauth2client/contrib/appengine.py
+ oauth2client/contrib/devshell.py
+ oauth2client/contrib/dictionary_storage.py
+ oauth2client/contrib/django_util/__init__.py
+ oauth2client/contrib/django_util/apps.py
+ oauth2client/contrib/django_util/decorators.py
+ oauth2client/contrib/django_util/models.py
+ oauth2client/contrib/django_util/signals.py
+ oauth2client/contrib/django_util/site.py
+ oauth2client/contrib/django_util/storage.py
+ oauth2client/contrib/django_util/views.py
+ oauth2client/contrib/flask_util.py
+ oauth2client/contrib/gce.py
+ oauth2client/contrib/keyring_storage.py
+ oauth2client/contrib/multiprocess_file_storage.py
+ oauth2client/contrib/sqlalchemy.py
+ oauth2client/contrib/xsrfutil.py
+ oauth2client/crypt.py
+ oauth2client/file.py
+ oauth2client/service_account.py
+ oauth2client/tools.py
+ oauth2client/transport.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/oauth2client/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/oauth2client/py3/.dist-info/METADATA b/contrib/python/oauth2client/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..b4b28000b1
--- /dev/null
+++ b/contrib/python/oauth2client/py3/.dist-info/METADATA
@@ -0,0 +1,34 @@
+Metadata-Version: 2.1
+Name: oauth2client
+Version: 4.1.3
+Summary: OAuth 2.0 client library
+Home-page: http://github.com/google/oauth2client/
+Author: Google Inc.
+Author-email: jonwayne+oauth2client@google.com
+License: Apache 2.0
+Keywords: google oauth 2.0 http client
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Development Status :: 7 - Inactive
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Topic :: Internet :: WWW/HTTP
+Requires-Dist: httplib2 (>=0.9.1)
+Requires-Dist: pyasn1 (>=0.1.7)
+Requires-Dist: pyasn1-modules (>=0.0.5)
+Requires-Dist: rsa (>=3.1.4)
+Requires-Dist: six (>=1.6.1)
+
+oauth2client is a client library for OAuth 2.0.
+
+Note: oauth2client is now deprecated. No more features will be added to the
+ libraries and the core team is turning down support. We recommend you use
+ `google-auth <https://google-auth.readthedocs.io>`__ and
+ `oauthlib <http://oauthlib.readthedocs.io/>`__.
+
+
diff --git a/contrib/python/oauth2client/py3/.dist-info/top_level.txt b/contrib/python/oauth2client/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..c636bd5953
--- /dev/null
+++ b/contrib/python/oauth2client/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+oauth2client
diff --git a/contrib/python/oauth2client/py3/LICENSE b/contrib/python/oauth2client/py3/LICENSE
new file mode 100644
index 0000000000..c8d76dfc54
--- /dev/null
+++ b/contrib/python/oauth2client/py3/LICENSE
@@ -0,0 +1,210 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Dependent Modules
+=================
+
+This code has the following dependencies
+above and beyond the Python standard library:
+
+httplib2 - MIT License
diff --git a/contrib/python/oauth2client/py3/README.md b/contrib/python/oauth2client/py3/README.md
new file mode 100644
index 0000000000..5e7aade714
--- /dev/null
+++ b/contrib/python/oauth2client/py3/README.md
@@ -0,0 +1,33 @@
+[![Build Status](https://travis-ci.org/google/oauth2client.svg?branch=master)](https://travis-ci.org/google/oauth2client)
+[![Coverage Status](https://coveralls.io/repos/google/oauth2client/badge.svg?branch=master&service=github)](https://coveralls.io/github/google/oauth2client?branch=master)
+[![Documentation Status](https://readthedocs.org/projects/oauth2client/badge/?version=latest)](https://oauth2client.readthedocs.io/)
+
+This is a client library for accessing resources protected by OAuth 2.0.
+
+**Note**: oauth2client is now deprecated. No more features will be added to the
+libraries and the core team is turning down support. We recommend you use
+[google-auth](https://google-auth.readthedocs.io) and [oauthlib](http://oauthlib.readthedocs.io/). For more details on the deprecation, see [oauth2client deprecation](https://google-auth.readthedocs.io/en/latest/oauth2client-deprecation.html).
+
+Installation
+============
+
+To install, simply run the following command in your terminal:
+
+```bash
+$ pip install --upgrade oauth2client
+```
+
+Contributing
+============
+
+Please see the [CONTRIBUTING page][1] for more information. In particular, we
+love pull requests -- but please make sure to sign the contributor license
+agreement.
+
+Supported Python Versions
+=========================
+
+We support Python 2.7 and 3.4+. More information [in the docs][2].
+
+[1]: https://github.com/google/oauth2client/blob/master/CONTRIBUTING.md
+[2]: https://oauth2client.readthedocs.io/#supported-python-versions
diff --git a/contrib/python/oauth2client/py3/oauth2client/__init__.py b/contrib/python/oauth2client/py3/oauth2client/__init__.py
new file mode 100644
index 0000000000..92bc191d43
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/__init__.py
@@ -0,0 +1,24 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client library for using OAuth2, especially with Google APIs."""
+
+__version__ = '4.1.3'
+
+GOOGLE_AUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'
+GOOGLE_DEVICE_URI = 'https://oauth2.googleapis.com/device/code'
+GOOGLE_REVOKE_URI = 'https://oauth2.googleapis.com/revoke'
+GOOGLE_TOKEN_URI = 'https://oauth2.googleapis.com/token'
+GOOGLE_TOKEN_INFO_URI = 'https://oauth2.googleapis.com/tokeninfo'
+
diff --git a/contrib/python/oauth2client/py3/oauth2client/_helpers.py b/contrib/python/oauth2client/py3/oauth2client/_helpers.py
new file mode 100644
index 0000000000..e9123971bc
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/_helpers.py
@@ -0,0 +1,341 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for commonly used utilities."""
+
+import base64
+import functools
+import inspect
+import json
+import logging
+import os
+import warnings
+
+import six
+from six.moves import urllib
+
+
+logger = logging.getLogger(__name__)
+
+POSITIONAL_WARNING = 'WARNING'
+POSITIONAL_EXCEPTION = 'EXCEPTION'
+POSITIONAL_IGNORE = 'IGNORE'
+POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
+ POSITIONAL_IGNORE])
+
+positional_parameters_enforcement = POSITIONAL_WARNING
+
+_SYM_LINK_MESSAGE = 'File: {0}: Is a symbolic link.'
+_IS_DIR_MESSAGE = '{0}: Is a directory'
+_MISSING_FILE_MESSAGE = 'Cannot access {0}: No such file or directory'
+
+
+def positional(max_positional_args):
+ """A decorator to declare that only the first N arguments my be positional.
+
+ This decorator makes it easy to support Python 3 style keyword-only
+ parameters. For example, in Python 3 it is possible to write::
+
+ def fn(pos1, *, kwonly1=None, kwonly1=None):
+ ...
+
+ All named parameters after ``*`` must be a keyword::
+
+ fn(10, 'kw1', 'kw2') # Raises exception.
+ fn(10, kwonly1='kw1') # Ok.
+
+ Example
+ ^^^^^^^
+
+ To define a function like above, do::
+
+ @positional(1)
+ def fn(pos1, kwonly1=None, kwonly2=None):
+ ...
+
+ If no default value is provided to a keyword argument, it becomes a
+ required keyword argument::
+
+ @positional(0)
+ def fn(required_kw):
+ ...
+
+ This must be called with the keyword parameter::
+
+ fn() # Raises exception.
+ fn(10) # Raises exception.
+ fn(required_kw=10) # Ok.
+
+ When defining instance or class methods always remember to account for
+ ``self`` and ``cls``::
+
+ class MyClass(object):
+
+ @positional(2)
+ def my_method(self, pos1, kwonly1=None):
+ ...
+
+ @classmethod
+ @positional(2)
+ def my_method(cls, pos1, kwonly1=None):
+ ...
+
+ The positional decorator behavior is controlled by
+ ``_helpers.positional_parameters_enforcement``, which may be set to
+ ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
+ ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
+ nothing, respectively, if a declaration is violated.
+
+ Args:
+ max_positional_arguments: Maximum number of positional arguments. All
+ parameters after the this index must be
+ keyword only.
+
+ Returns:
+ A decorator that prevents using arguments after max_positional_args
+ from being used as positional parameters.
+
+ Raises:
+ TypeError: if a key-word only argument is provided as a positional
+ parameter, but only if
+ _helpers.positional_parameters_enforcement is set to
+ POSITIONAL_EXCEPTION.
+ """
+
+ def positional_decorator(wrapped):
+ @functools.wraps(wrapped)
+ def positional_wrapper(*args, **kwargs):
+ if len(args) > max_positional_args:
+ plural_s = ''
+ if max_positional_args != 1:
+ plural_s = 's'
+ message = ('{function}() takes at most {args_max} positional '
+ 'argument{plural} ({args_given} given)'.format(
+ function=wrapped.__name__,
+ args_max=max_positional_args,
+ args_given=len(args),
+ plural=plural_s))
+ if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
+ raise TypeError(message)
+ elif positional_parameters_enforcement == POSITIONAL_WARNING:
+ logger.warning(message)
+ return wrapped(*args, **kwargs)
+ return positional_wrapper
+
+ if isinstance(max_positional_args, six.integer_types):
+ return positional_decorator
+ else:
+ args, _, _, defaults = inspect.getargspec(max_positional_args)
+ return positional(len(args) - len(defaults))(max_positional_args)
+
+
+def scopes_to_string(scopes):
+ """Converts scope value to a string.
+
+ If scopes is a string then it is simply passed through. If scopes is an
+ iterable then a string is returned that is all the individual scopes
+ concatenated with spaces.
+
+ Args:
+ scopes: string or iterable of strings, the scopes.
+
+ Returns:
+ The scopes formatted as a single string.
+ """
+ if isinstance(scopes, six.string_types):
+ return scopes
+ else:
+ return ' '.join(scopes)
+
+
+def string_to_scopes(scopes):
+ """Converts stringifed scope value to a list.
+
+ If scopes is a list then it is simply passed through. If scopes is an
+ string then a list of each individual scope is returned.
+
+ Args:
+ scopes: a string or iterable of strings, the scopes.
+
+ Returns:
+ The scopes in a list.
+ """
+ if not scopes:
+ return []
+ elif isinstance(scopes, six.string_types):
+ return scopes.split(' ')
+ else:
+ return scopes
+
+
+def parse_unique_urlencoded(content):
+ """Parses unique key-value parameters from urlencoded content.
+
+ Args:
+ content: string, URL-encoded key-value pairs.
+
+ Returns:
+ dict, The key-value pairs from ``content``.
+
+ Raises:
+ ValueError: if one of the keys is repeated.
+ """
+ urlencoded_params = urllib.parse.parse_qs(content)
+ params = {}
+ for key, value in six.iteritems(urlencoded_params):
+ if len(value) != 1:
+ msg = ('URL-encoded content contains a repeated value:'
+ '%s -> %s' % (key, ', '.join(value)))
+ raise ValueError(msg)
+ params[key] = value[0]
+ return params
+
+
+def update_query_params(uri, params):
+ """Updates a URI with new query parameters.
+
+ If a given key from ``params`` is repeated in the ``uri``, then
+ the URI will be considered invalid and an error will occur.
+
+ If the URI is valid, then each value from ``params`` will
+ replace the corresponding value in the query parameters (if
+ it exists).
+
+ Args:
+ uri: string, A valid URI, with potential existing query parameters.
+ params: dict, A dictionary of query parameters.
+
+ Returns:
+ The same URI but with the new query parameters added.
+ """
+ parts = urllib.parse.urlparse(uri)
+ query_params = parse_unique_urlencoded(parts.query)
+ query_params.update(params)
+ new_query = urllib.parse.urlencode(query_params)
+ new_parts = parts._replace(query=new_query)
+ return urllib.parse.urlunparse(new_parts)
+
+
+def _add_query_parameter(url, name, value):
+ """Adds a query parameter to a url.
+
+ Replaces the current value if it already exists in the URL.
+
+ Args:
+ url: string, url to add the query parameter to.
+ name: string, query parameter name.
+ value: string, query parameter value.
+
+ Returns:
+ Updated query parameter. Does not update the url if value is None.
+ """
+ if value is None:
+ return url
+ else:
+ return update_query_params(url, {name: value})
+
+
+def validate_file(filename):
+ if os.path.islink(filename):
+ raise IOError(_SYM_LINK_MESSAGE.format(filename))
+ elif os.path.isdir(filename):
+ raise IOError(_IS_DIR_MESSAGE.format(filename))
+ elif not os.path.isfile(filename):
+ warnings.warn(_MISSING_FILE_MESSAGE.format(filename))
+
+
+def _parse_pem_key(raw_key_input):
+ """Identify and extract PEM keys.
+
+ Determines whether the given key is in the format of PEM key, and extracts
+ the relevant part of the key if it is.
+
+ Args:
+ raw_key_input: The contents of a private key file (either PEM or
+ PKCS12).
+
+ Returns:
+ string, The actual key if the contents are from a PEM file, or
+ else None.
+ """
+ offset = raw_key_input.find(b'-----BEGIN ')
+ if offset != -1:
+ return raw_key_input[offset:]
+
+
+def _json_encode(data):
+ return json.dumps(data, separators=(',', ':'))
+
+
+def _to_bytes(value, encoding='ascii'):
+ """Converts a string value to bytes, if necessary.
+
+ Unfortunately, ``six.b`` is insufficient for this task since in
+ Python2 it does not modify ``unicode`` objects.
+
+ Args:
+ value: The string/bytes value to be converted.
+ encoding: The encoding to use to convert unicode to bytes. Defaults
+ to "ascii", which will not allow any characters from ordinals
+ larger than 127. Other useful values are "latin-1", which
+ which will only allows byte ordinals (up to 255) and "utf-8",
+ which will encode any unicode that needs to be.
+
+ Returns:
+ The original value converted to bytes (if unicode) or as passed in
+ if it started out as bytes.
+
+ Raises:
+ ValueError if the value could not be converted to bytes.
+ """
+ result = (value.encode(encoding)
+ if isinstance(value, six.text_type) else value)
+ if isinstance(result, six.binary_type):
+ return result
+ else:
+ raise ValueError('{0!r} could not be converted to bytes'.format(value))
+
+
+def _from_bytes(value):
+ """Converts bytes to a string value, if necessary.
+
+ Args:
+ value: The string/bytes value to be converted.
+
+ Returns:
+ The original value converted to unicode (if bytes) or as passed in
+ if it started out as unicode.
+
+ Raises:
+ ValueError if the value could not be converted to unicode.
+ """
+ result = (value.decode('utf-8')
+ if isinstance(value, six.binary_type) else value)
+ if isinstance(result, six.text_type):
+ return result
+ else:
+ raise ValueError(
+ '{0!r} could not be converted to unicode'.format(value))
+
+
+def _urlsafe_b64encode(raw_bytes):
+ raw_bytes = _to_bytes(raw_bytes, encoding='utf-8')
+ return base64.urlsafe_b64encode(raw_bytes).rstrip(b'=')
+
+
+def _urlsafe_b64decode(b64string):
+ # Guard against unicode strings, which base64 can't handle.
+ b64string = _to_bytes(b64string)
+ padded = b64string + b'=' * (4 - len(b64string) % 4)
+ return base64.urlsafe_b64decode(padded)
diff --git a/contrib/python/oauth2client/py3/oauth2client/_openssl_crypt.py b/contrib/python/oauth2client/py3/oauth2client/_openssl_crypt.py
new file mode 100644
index 0000000000..77fac74354
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/_openssl_crypt.py
@@ -0,0 +1,136 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""OpenSSL Crypto-related routines for oauth2client."""
+
+from OpenSSL import crypto
+
+from oauth2client import _helpers
+
+
+class OpenSSLVerifier(object):
+ """Verifies the signature on a message."""
+
+ def __init__(self, pubkey):
+ """Constructor.
+
+ Args:
+ pubkey: OpenSSL.crypto.PKey, The public key to verify with.
+ """
+ self._pubkey = pubkey
+
+ def verify(self, message, signature):
+ """Verifies a message against a signature.
+
+ Args:
+ message: string or bytes, The message to verify. If string, will be
+ encoded to bytes as utf-8.
+ signature: string or bytes, The signature on the message. If string,
+ will be encoded to bytes as utf-8.
+
+ Returns:
+ True if message was signed by the private key associated with the
+ public key that this object was constructed with.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ signature = _helpers._to_bytes(signature, encoding='utf-8')
+ try:
+ crypto.verify(self._pubkey, signature, message, 'sha256')
+ return True
+ except crypto.Error:
+ return False
+
+ @staticmethod
+ def from_string(key_pem, is_x509_cert):
+ """Construct a Verified instance from a string.
+
+ Args:
+ key_pem: string, public key in PEM format.
+ is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+ is expected to be an RSA key in PEM format.
+
+ Returns:
+ Verifier instance.
+
+ Raises:
+ OpenSSL.crypto.Error: if the key_pem can't be parsed.
+ """
+ key_pem = _helpers._to_bytes(key_pem)
+ if is_x509_cert:
+ pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
+ else:
+ pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
+ return OpenSSLVerifier(pubkey)
+
+
+class OpenSSLSigner(object):
+ """Signs messages with a private key."""
+
+ def __init__(self, pkey):
+ """Constructor.
+
+ Args:
+ pkey: OpenSSL.crypto.PKey (or equiv), The private key to sign with.
+ """
+ self._key = pkey
+
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message: bytes, Message to be signed.
+
+ Returns:
+ string, The signature of the message for the given key.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return crypto.sign(self._key, message, 'sha256')
+
+ @staticmethod
+ def from_string(key, password=b'notasecret'):
+ """Construct a Signer instance from a string.
+
+ Args:
+ key: string, private key in PKCS12 or PEM format.
+ password: string, password for the private key file.
+
+ Returns:
+ Signer instance.
+
+ Raises:
+ OpenSSL.crypto.Error if the key can't be parsed.
+ """
+ key = _helpers._to_bytes(key)
+ parsed_pem_key = _helpers._parse_pem_key(key)
+ if parsed_pem_key:
+ pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
+ else:
+ password = _helpers._to_bytes(password, encoding='utf-8')
+ pkey = crypto.load_pkcs12(key, password).get_privatekey()
+ return OpenSSLSigner(pkey)
+
+
+def pkcs12_key_as_pem(private_key_bytes, private_key_password):
+ """Convert the contents of a PKCS#12 key to PEM using pyOpenSSL.
+
+ Args:
+ private_key_bytes: Bytes. PKCS#12 key in DER format.
+ private_key_password: String. Password for PKCS#12 key.
+
+ Returns:
+ String. PEM contents of ``private_key_bytes``.
+ """
+ private_key_password = _helpers._to_bytes(private_key_password)
+ pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password)
+ return crypto.dump_privatekey(crypto.FILETYPE_PEM,
+ pkcs12.get_privatekey())
diff --git a/contrib/python/oauth2client/py3/oauth2client/_pkce.py b/contrib/python/oauth2client/py3/oauth2client/_pkce.py
new file mode 100644
index 0000000000..e4952d8c2f
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/_pkce.py
@@ -0,0 +1,67 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utility functions for implementing Proof Key for Code Exchange (PKCE) by OAuth
+Public Clients
+
+See RFC7636.
+"""
+
+import base64
+import hashlib
+import os
+
+
+def code_verifier(n_bytes=64):
+ """
+ Generates a 'code_verifier' as described in section 4.1 of RFC 7636.
+
+ This is a 'high-entropy cryptographic random string' that will be
+ impractical for an attacker to guess.
+
+ Args:
+ n_bytes: integer between 31 and 96, inclusive. default: 64
+ number of bytes of entropy to include in verifier.
+
+ Returns:
+ Bytestring, representing urlsafe base64-encoded random data.
+ """
+ verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')
+ # https://tools.ietf.org/html/rfc7636#section-4.1
+ # minimum length of 43 characters and a maximum length of 128 characters.
+ if len(verifier) < 43:
+ raise ValueError("Verifier too short. n_bytes must be > 30.")
+ elif len(verifier) > 128:
+ raise ValueError("Verifier too long. n_bytes must be < 97.")
+ else:
+ return verifier
+
+
+def code_challenge(verifier):
+ """
+ Creates a 'code_challenge' as described in section 4.2 of RFC 7636
+ by taking the sha256 hash of the verifier and then urlsafe
+ base64-encoding it.
+
+ Args:
+ verifier: bytestring, representing a code_verifier as generated by
+ code_verifier().
+
+ Returns:
+ Bytestring, representing a urlsafe base64-encoded sha256 hash digest,
+ without '=' padding.
+ """
+ digest = hashlib.sha256(verifier).digest()
+ return base64.urlsafe_b64encode(digest).rstrip(b'=')
diff --git a/contrib/python/oauth2client/py3/oauth2client/_pure_python_crypt.py b/contrib/python/oauth2client/py3/oauth2client/_pure_python_crypt.py
new file mode 100644
index 0000000000..2c5d43aae9
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/_pure_python_crypt.py
@@ -0,0 +1,184 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pure Python crypto-related routines for oauth2client.
+
+Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
+to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
+certificates.
+"""
+
+from pyasn1.codec.der import decoder
+from pyasn1_modules import pem
+from pyasn1_modules.rfc2459 import Certificate
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
+import rsa
+import six
+
+from oauth2client import _helpers
+
+
+_PKCS12_ERROR = r"""\
+PKCS12 format is not supported by the RSA library.
+Either install PyOpenSSL, or please convert .p12 format
+to .pem format:
+ $ cat key.p12 | \
+ > openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
+ > openssl rsa > key.pem
+"""
+
+_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
+_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
+ '-----END RSA PRIVATE KEY-----')
+_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
+ '-----END PRIVATE KEY-----')
+_PKCS8_SPEC = PrivateKeyInfo()
+
+
+def _bit_list_to_bytes(bit_list):
+ """Converts an iterable of 1's and 0's to bytes.
+
+ Combines the list 8 at a time, treating each group of 8 bits
+ as a single byte.
+ """
+ num_bits = len(bit_list)
+ byte_vals = bytearray()
+ for start in six.moves.xrange(0, num_bits, 8):
+ curr_bits = bit_list[start:start + 8]
+ char_val = sum(val * digit
+ for val, digit in zip(_POW2, curr_bits))
+ byte_vals.append(char_val)
+ return bytes(byte_vals)
+
+
+class RsaVerifier(object):
+ """Verifies the signature on a message.
+
+ Args:
+ pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
+ """
+
+ def __init__(self, pubkey):
+ self._pubkey = pubkey
+
+ def verify(self, message, signature):
+ """Verifies a message against a signature.
+
+ Args:
+ message: string or bytes, The message to verify. If string, will be
+ encoded to bytes as utf-8.
+ signature: string or bytes, The signature on the message. If
+ string, will be encoded to bytes as utf-8.
+
+ Returns:
+ True if message was signed by the private key associated with the
+ public key that this object was constructed with.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ try:
+ return rsa.pkcs1.verify(message, signature, self._pubkey)
+ except (ValueError, rsa.pkcs1.VerificationError):
+ return False
+
+ @classmethod
+ def from_string(cls, key_pem, is_x509_cert):
+ """Construct an RsaVerifier instance from a string.
+
+ Args:
+ key_pem: string, public key in PEM format.
+ is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+ is expected to be an RSA key in PEM format.
+
+ Returns:
+ RsaVerifier instance.
+
+ Raises:
+ ValueError: if the key_pem can't be parsed. In either case, error
+ will begin with 'No PEM start marker'. If
+ ``is_x509_cert`` is True, will fail to find the
+ "-----BEGIN CERTIFICATE-----" error, otherwise fails
+ to find "-----BEGIN RSA PUBLIC KEY-----".
+ """
+ key_pem = _helpers._to_bytes(key_pem)
+ if is_x509_cert:
+ der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
+ asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
+ if remaining != b'':
+ raise ValueError('Unused bytes', remaining)
+
+ cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
+ key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
+ pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
+ else:
+ pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
+ return cls(pubkey)
+
+
+class RsaSigner(object):
+ """Signs messages with a private key.
+
+ Args:
+ pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
+ """
+
+ def __init__(self, pkey):
+ self._key = pkey
+
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message: bytes, Message to be signed.
+
+ Returns:
+ string, The signature of the message for the given key.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return rsa.pkcs1.sign(message, self._key, 'SHA-256')
+
+ @classmethod
+ def from_string(cls, key, password='notasecret'):
+ """Construct an RsaSigner instance from a string.
+
+ Args:
+ key: string, private key in PEM format.
+ password: string, password for private key file. Unused for PEM
+ files.
+
+ Returns:
+ RsaSigner instance.
+
+ Raises:
+ ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
+ PEM format.
+ """
+ key = _helpers._from_bytes(key) # pem expects str in Py3
+ marker_id, key_bytes = pem.readPemBlocksFromFile(
+ six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
+
+ if marker_id == 0:
+ pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
+ format='DER')
+ elif marker_id == 1:
+ key_info, remaining = decoder.decode(
+ key_bytes, asn1Spec=_PKCS8_SPEC)
+ if remaining != b'':
+ raise ValueError('Unused bytes', remaining)
+ pkey_info = key_info.getComponentByName('privateKey')
+ pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
+ format='DER')
+ else:
+ raise ValueError('No key could be detected.')
+
+ return cls(pkey)
diff --git a/contrib/python/oauth2client/py3/oauth2client/_pycrypto_crypt.py b/contrib/python/oauth2client/py3/oauth2client/_pycrypto_crypt.py
new file mode 100644
index 0000000000..fd2ce0cd72
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/_pycrypto_crypt.py
@@ -0,0 +1,124 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""pyCrypto Crypto-related routines for oauth2client."""
+
+from Crypto.Hash import SHA256
+from Crypto.PublicKey import RSA
+from Crypto.Signature import PKCS1_v1_5
+from Crypto.Util.asn1 import DerSequence
+
+from oauth2client import _helpers
+
+
+class PyCryptoVerifier(object):
+ """Verifies the signature on a message."""
+
+ def __init__(self, pubkey):
+ """Constructor.
+
+ Args:
+ pubkey: OpenSSL.crypto.PKey (or equiv), The public key to verify
+ with.
+ """
+ self._pubkey = pubkey
+
+ def verify(self, message, signature):
+ """Verifies a message against a signature.
+
+ Args:
+ message: string or bytes, The message to verify. If string, will be
+ encoded to bytes as utf-8.
+ signature: string or bytes, The signature on the message.
+
+ Returns:
+ True if message was signed by the private key associated with the
+ public key that this object was constructed with.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return PKCS1_v1_5.new(self._pubkey).verify(
+ SHA256.new(message), signature)
+
+ @staticmethod
+ def from_string(key_pem, is_x509_cert):
+ """Construct a Verified instance from a string.
+
+ Args:
+ key_pem: string, public key in PEM format.
+ is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+ is expected to be an RSA key in PEM format.
+
+ Returns:
+ Verifier instance.
+ """
+ if is_x509_cert:
+ key_pem = _helpers._to_bytes(key_pem)
+ pemLines = key_pem.replace(b' ', b'').split()
+ certDer = _helpers._urlsafe_b64decode(b''.join(pemLines[1:-1]))
+ certSeq = DerSequence()
+ certSeq.decode(certDer)
+ tbsSeq = DerSequence()
+ tbsSeq.decode(certSeq[0])
+ pubkey = RSA.importKey(tbsSeq[6])
+ else:
+ pubkey = RSA.importKey(key_pem)
+ return PyCryptoVerifier(pubkey)
+
+
+class PyCryptoSigner(object):
+ """Signs messages with a private key."""
+
+ def __init__(self, pkey):
+ """Constructor.
+
+ Args:
+ pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
+ """
+ self._key = pkey
+
+ def sign(self, message):
+ """Signs a message.
+
+ Args:
+ message: string, Message to be signed.
+
+ Returns:
+ string, The signature of the message for the given key.
+ """
+ message = _helpers._to_bytes(message, encoding='utf-8')
+ return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
+
+ @staticmethod
+ def from_string(key, password='notasecret'):
+ """Construct a Signer instance from a string.
+
+ Args:
+ key: string, private key in PEM format.
+ password: string, password for private key file. Unused for PEM
+ files.
+
+ Returns:
+ Signer instance.
+
+ Raises:
+ NotImplementedError if the key isn't in PEM format.
+ """
+ parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))
+ if parsed_pem_key:
+ pkey = RSA.importKey(parsed_pem_key)
+ else:
+ raise NotImplementedError(
+ 'No key in PEM format was detected. This implementation '
+ 'can only use the PyCrypto library for keys in PEM '
+ 'format.')
+ return PyCryptoSigner(pkey)
diff --git a/contrib/python/oauth2client/py3/oauth2client/client.py b/contrib/python/oauth2client/py3/oauth2client/client.py
new file mode 100644
index 0000000000..7618960e44
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/client.py
@@ -0,0 +1,2170 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""An OAuth 2.0 client.
+
+Tools for interacting with OAuth 2.0 protected resources.
+"""
+
+import collections
+import copy
+import datetime
+import json
+import logging
+import os
+import shutil
+import socket
+import sys
+import tempfile
+
+import six
+from six.moves import http_client
+from six.moves import urllib
+
+import oauth2client
+from oauth2client import _helpers
+from oauth2client import _pkce
+from oauth2client import clientsecrets
+from oauth2client import transport
+
+
+HAS_OPENSSL = False
+HAS_CRYPTO = False
+try:
+ from oauth2client import crypt
+ HAS_CRYPTO = True
+ HAS_OPENSSL = crypt.OpenSSLVerifier is not None
+except ImportError: # pragma: NO COVER
+ pass
+
+
+logger = logging.getLogger(__name__)
+
+# Expiry is stored in RFC3339 UTC format
+EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
+
+# Which certs to use to validate id_tokens received.
+ID_TOKEN_VERIFICATION_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
+# This symbol previously had a typo in the name; we keep the old name
+# around for now, but will remove it in the future.
+ID_TOKEN_VERIFICATON_CERTS = ID_TOKEN_VERIFICATION_CERTS
+
+# Constant to use for the out of band OAuth 2.0 flow.
+OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
+
+# The value representing user credentials.
+AUTHORIZED_USER = 'authorized_user'
+
+# The value representing service account credentials.
+SERVICE_ACCOUNT = 'service_account'
+
+# The environment variable pointing the file with local
+# Application Default Credentials.
+GOOGLE_APPLICATION_CREDENTIALS = 'GOOGLE_APPLICATION_CREDENTIALS'
+# The ~/.config subdirectory containing gcloud credentials. Intended
+# to be swapped out in tests.
+_CLOUDSDK_CONFIG_DIRECTORY = 'gcloud'
+# The environment variable name which can replace ~/.config if set.
+_CLOUDSDK_CONFIG_ENV_VAR = 'CLOUDSDK_CONFIG'
+
+# The error message we show users when we can't find the Application
+# Default Credentials.
+ADC_HELP_MSG = (
+ 'The Application Default Credentials are not available. They are '
+ 'available if running in Google Compute Engine. Otherwise, the '
+ 'environment variable ' +
+ GOOGLE_APPLICATION_CREDENTIALS +
+ ' must be defined pointing to a file defining the credentials. See '
+ 'https://developers.google.com/accounts/docs/'
+ 'application-default-credentials for more information.')
+
+_WELL_KNOWN_CREDENTIALS_FILE = 'application_default_credentials.json'
+
+# The access token along with the seconds in which it expires.
+AccessTokenInfo = collections.namedtuple(
+ 'AccessTokenInfo', ['access_token', 'expires_in'])
+
+DEFAULT_ENV_NAME = 'UNKNOWN'
+
+# If set to True _get_environment avoid GCE check (_detect_gce_environment)
+NO_GCE_CHECK = os.getenv('NO_GCE_CHECK', 'False')
+
+# Timeout in seconds to wait for the GCE metadata server when detecting the
+# GCE environment.
+try:
+ GCE_METADATA_TIMEOUT = int(os.getenv('GCE_METADATA_TIMEOUT', 3))
+except ValueError: # pragma: NO COVER
+ GCE_METADATA_TIMEOUT = 3
+
+_SERVER_SOFTWARE = 'SERVER_SOFTWARE'
+_GCE_METADATA_URI = 'http://' + os.getenv('GCE_METADATA_IP', '169.254.169.254')
+_METADATA_FLAVOR_HEADER = 'metadata-flavor' # lowercase header
+_DESIRED_METADATA_FLAVOR = 'Google'
+_GCE_HEADERS = {_METADATA_FLAVOR_HEADER: _DESIRED_METADATA_FLAVOR}
+
+# Expose utcnow() at module level to allow for
+# easier testing (by replacing with a stub).
+_UTCNOW = datetime.datetime.utcnow
+
+# NOTE: These names were previously defined in this module but have been
+# moved into `oauth2client.transport`,
+clean_headers = transport.clean_headers
+MemoryCache = transport.MemoryCache
+REFRESH_STATUS_CODES = transport.REFRESH_STATUS_CODES
+
+
+class SETTINGS(object):
+ """Settings namespace for globally defined values."""
+ env_name = None
+
+
+class Error(Exception):
+ """Base error for this module."""
+
+
+class FlowExchangeError(Error):
+ """Error trying to exchange an authorization grant for an access token."""
+
+
+class AccessTokenRefreshError(Error):
+ """Error trying to refresh an expired access token."""
+
+
+class HttpAccessTokenRefreshError(AccessTokenRefreshError):
+ """Error (with HTTP status) trying to refresh an expired access token."""
+ def __init__(self, *args, **kwargs):
+ super(HttpAccessTokenRefreshError, self).__init__(*args)
+ self.status = kwargs.get('status')
+
+
+class TokenRevokeError(Error):
+ """Error trying to revoke a token."""
+
+
+class UnknownClientSecretsFlowError(Error):
+ """The client secrets file called for an unknown type of OAuth 2.0 flow."""
+
+
+class AccessTokenCredentialsError(Error):
+ """Having only the access_token means no refresh is possible."""
+
+
+class VerifyJwtTokenError(Error):
+ """Could not retrieve certificates for validation."""
+
+
+class NonAsciiHeaderError(Error):
+ """Header names and values must be ASCII strings."""
+
+
+class ApplicationDefaultCredentialsError(Error):
+ """Error retrieving the Application Default Credentials."""
+
+
+class OAuth2DeviceCodeError(Error):
+ """Error trying to retrieve a device code."""
+
+
+class CryptoUnavailableError(Error, NotImplementedError):
+ """Raised when a crypto library is required, but none is available."""
+
+
+def _parse_expiry(expiry):
+ if expiry and isinstance(expiry, datetime.datetime):
+ return expiry.strftime(EXPIRY_FORMAT)
+ else:
+ return None
+
+
+class Credentials(object):
+ """Base class for all Credentials objects.
+
+ Subclasses must define an authorize() method that applies the credentials
+ to an HTTP transport.
+
+ Subclasses must also specify a classmethod named 'from_json' that takes a
+ JSON string as input and returns an instantiated Credentials object.
+ """
+
+ NON_SERIALIZED_MEMBERS = frozenset(['store'])
+
+ def authorize(self, http):
+ """Take an httplib2.Http instance (or equivalent) and authorizes it.
+
+ Authorizes it for the set of credentials, usually by replacing
+ http.request() with a method that adds in the appropriate headers and
+ then delegates to the original Http.request() method.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+ """
+ raise NotImplementedError
+
+ def refresh(self, http):
+ """Forces a refresh of the access_token.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+ """
+ raise NotImplementedError
+
+ def revoke(self, http):
+ """Revokes a refresh_token and makes the credentials void.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the revoke
+ request.
+ """
+ raise NotImplementedError
+
+ def apply(self, headers):
+ """Add the authorization to the headers.
+
+ Args:
+ headers: dict, the headers to add the Authorization header to.
+ """
+ raise NotImplementedError
+
+ def _to_json(self, strip, to_serialize=None):
+ """Utility function that creates JSON repr. of a Credentials object.
+
+ Args:
+ strip: array, An array of names of members to exclude from the
+ JSON.
+ to_serialize: dict, (Optional) The properties for this object
+ that will be serialized. This allows callers to
+ modify before serializing.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ curr_type = self.__class__
+ if to_serialize is None:
+ to_serialize = copy.copy(self.__dict__)
+ else:
+ # Assumes it is a str->str dictionary, so we don't deep copy.
+ to_serialize = copy.copy(to_serialize)
+ for member in strip:
+ if member in to_serialize:
+ del to_serialize[member]
+ to_serialize['token_expiry'] = _parse_expiry(
+ to_serialize.get('token_expiry'))
+ # Add in information we will need later to reconstitute this instance.
+ to_serialize['_class'] = curr_type.__name__
+ to_serialize['_module'] = curr_type.__module__
+ for key, val in to_serialize.items():
+ if isinstance(val, bytes):
+ to_serialize[key] = val.decode('utf-8')
+ if isinstance(val, set):
+ to_serialize[key] = list(val)
+ return json.dumps(to_serialize)
+
+ def to_json(self):
+ """Creating a JSON representation of an instance of Credentials.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ return self._to_json(self.NON_SERIALIZED_MEMBERS)
+
+ @classmethod
+ def new_from_json(cls, json_data):
+ """Utility class method to instantiate a Credentials subclass from JSON.
+
+ Expects the JSON string to have been produced by to_json().
+
+ Args:
+ json_data: string or bytes, JSON from to_json().
+
+ Returns:
+ An instance of the subclass of Credentials that was serialized with
+ to_json().
+ """
+ json_data_as_unicode = _helpers._from_bytes(json_data)
+ data = json.loads(json_data_as_unicode)
+ # Find and call the right classmethod from_json() to restore
+ # the object.
+ module_name = data['_module']
+ try:
+ module_obj = __import__(module_name)
+ except ImportError:
+ # In case there's an object from the old package structure,
+ # update it
+ module_name = module_name.replace('.googleapiclient', '')
+ module_obj = __import__(module_name)
+
+ module_obj = __import__(module_name,
+ fromlist=module_name.split('.')[:-1])
+ kls = getattr(module_obj, data['_class'])
+ return kls.from_json(json_data_as_unicode)
+
+ @classmethod
+ def from_json(cls, unused_data):
+ """Instantiate a Credentials object from a JSON description of it.
+
+ The JSON should have been produced by calling .to_json() on the object.
+
+ Args:
+ unused_data: dict, A deserialized JSON object.
+
+ Returns:
+ An instance of a Credentials subclass.
+ """
+ return Credentials()
+
+
+class Flow(object):
+ """Base class for all Flow objects."""
+ pass
+
+
+class Storage(object):
+ """Base class for all Storage objects.
+
+ Store and retrieve a single credential. This class supports locking
+ such that multiple processes and threads can operate on a single
+ store.
+ """
+ def __init__(self, lock=None):
+ """Create a Storage instance.
+
+ Args:
+ lock: An optional threading.Lock-like object. Must implement at
+ least acquire() and release(). Does not need to be
+ re-entrant.
+ """
+ self._lock = lock
+
+ def acquire_lock(self):
+ """Acquires any lock necessary to access this Storage.
+
+ This lock is not reentrant.
+ """
+ if self._lock is not None:
+ self._lock.acquire()
+
+ def release_lock(self):
+ """Release the Storage lock.
+
+ Trying to release a lock that isn't held will result in a
+ RuntimeError in the case of a threading.Lock or multiprocessing.Lock.
+ """
+ if self._lock is not None:
+ self._lock.release()
+
+ def locked_get(self):
+ """Retrieve credential.
+
+ The Storage lock must be held when this is called.
+
+ Returns:
+ oauth2client.client.Credentials
+ """
+ raise NotImplementedError
+
+ def locked_put(self, credentials):
+ """Write a credential.
+
+ The Storage lock must be held when this is called.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ raise NotImplementedError
+
+ def locked_delete(self):
+ """Delete a credential.
+
+ The Storage lock must be held when this is called.
+ """
+ raise NotImplementedError
+
+ def get(self):
+ """Retrieve credential.
+
+ The Storage lock must *not* be held when this is called.
+
+ Returns:
+ oauth2client.client.Credentials
+ """
+ self.acquire_lock()
+ try:
+ return self.locked_get()
+ finally:
+ self.release_lock()
+
+ def put(self, credentials):
+ """Write a credential.
+
+ The Storage lock must be held when this is called.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ self.acquire_lock()
+ try:
+ self.locked_put(credentials)
+ finally:
+ self.release_lock()
+
+ def delete(self):
+ """Delete credential.
+
+ Frees any resources associated with storing the credential.
+ The Storage lock must *not* be held when this is called.
+
+ Returns:
+ None
+ """
+ self.acquire_lock()
+ try:
+ return self.locked_delete()
+ finally:
+ self.release_lock()
+
+
+class OAuth2Credentials(Credentials):
+ """Credentials object for OAuth 2.0.
+
+ Credentials can be applied to an httplib2.Http object using the authorize()
+ method, which then adds the OAuth 2.0 access token to each request.
+
+ OAuth2Credentials objects may be safely pickled and unpickled.
+ """
+
+ @_helpers.positional(8)
+ def __init__(self, access_token, client_id, client_secret, refresh_token,
+ token_expiry, token_uri, user_agent, revoke_uri=None,
+ id_token=None, token_response=None, scopes=None,
+ token_info_uri=None, id_token_jwt=None):
+ """Create an instance of OAuth2Credentials.
+
+ This constructor is not usually called by the user, instead
+ OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
+
+ Args:
+ access_token: string, access token.
+ client_id: string, client identifier.
+ client_secret: string, client secret.
+ refresh_token: string, refresh token.
+ token_expiry: datetime, when the access_token expires.
+ token_uri: string, URI of token endpoint.
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ revoke_uri: string, URI for revoke endpoint. Defaults to None; a
+ token can't be revoked if this is None.
+ id_token: object, The identity of the resource owner.
+ token_response: dict, the decoded response to the token request.
+ None if a token hasn't been requested yet. Stored
+ because some providers (e.g. wordpress.com) include
+ extra fields that clients may want.
+ scopes: list, authorized scopes for these credentials.
+ token_info_uri: string, the URI for the token info endpoint.
+ Defaults to None; scopes can not be refreshed if
+ this is None.
+ id_token_jwt: string, the encoded and signed identity JWT. The
+ decoded version of this is stored in id_token.
+
+ Notes:
+ store: callable, A callable that when passed a Credential
+ will store the credential back to where it came from.
+ This is needed to store the latest access_token if it
+ has expired and been refreshed.
+ """
+ self.access_token = access_token
+ self.client_id = client_id
+ self.client_secret = client_secret
+ self.refresh_token = refresh_token
+ self.store = None
+ self.token_expiry = token_expiry
+ self.token_uri = token_uri
+ self.user_agent = user_agent
+ self.revoke_uri = revoke_uri
+ self.id_token = id_token
+ self.id_token_jwt = id_token_jwt
+ self.token_response = token_response
+ self.scopes = set(_helpers.string_to_scopes(scopes or []))
+ self.token_info_uri = token_info_uri
+
+ # True if the credentials have been revoked or expired and can't be
+ # refreshed.
+ self.invalid = False
+
+ def authorize(self, http):
+ """Authorize an httplib2.Http instance with these credentials.
+
+ The modified http.request method will add authentication headers to
+ each request and will refresh access_tokens when a 401 is received on a
+ request. In addition the http.request method has a credentials
+ property, http.request.credentials, which is the Credentials object
+ that authorized it.
+
+ Args:
+ http: An instance of ``httplib2.Http`` or something that acts
+ like it.
+
+ Returns:
+ A modified instance of http that was passed in.
+
+ Example::
+
+ h = httplib2.Http()
+ h = credentials.authorize(h)
+
+ You can't create a new OAuth subclass of httplib2.Authentication
+ because it never gets passed the absolute URI, which is needed for
+ signing. So instead we have to overload 'request' with a closure
+ that adds in the Authorization header and then calls the original
+ version of 'request()'.
+ """
+ transport.wrap_http_for_auth(self, http)
+ return http
+
+ def refresh(self, http):
+ """Forces a refresh of the access_token.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+ """
+ self._refresh(http)
+
+ def revoke(self, http):
+ """Revokes a refresh_token and makes the credentials void.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the revoke
+ request.
+ """
+ self._revoke(http)
+
+ def apply(self, headers):
+ """Add the authorization to the headers.
+
+ Args:
+ headers: dict, the headers to add the Authorization header to.
+ """
+ headers['Authorization'] = 'Bearer ' + self.access_token
+
+ def has_scopes(self, scopes):
+ """Verify that the credentials are authorized for the given scopes.
+
+ Returns True if the credentials authorized scopes contain all of the
+ scopes given.
+
+ Args:
+ scopes: list or string, the scopes to check.
+
+ Notes:
+ There are cases where the credentials are unaware of which scopes
+ are authorized. Notably, credentials obtained and stored before
+ this code was added will not have scopes, AccessTokenCredentials do
+ not have scopes. In both cases, you can use refresh_scopes() to
+ obtain the canonical set of scopes.
+ """
+ scopes = _helpers.string_to_scopes(scopes)
+ return set(scopes).issubset(self.scopes)
+
+ def retrieve_scopes(self, http):
+ """Retrieves the canonical list of scopes for this access token.
+
+ Gets the scopes from the OAuth2 provider.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+
+ Returns:
+ A set of strings containing the canonical list of scopes.
+ """
+ self._retrieve_scopes(http)
+ return self.scopes
+
+ @classmethod
+ def from_json(cls, json_data):
+ """Instantiate a Credentials object from a JSON description of it.
+
+ The JSON should have been produced by calling .to_json() on the object.
+
+ Args:
+ json_data: string or bytes, JSON to deserialize.
+
+ Returns:
+ An instance of a Credentials subclass.
+ """
+ data = json.loads(_helpers._from_bytes(json_data))
+ if (data.get('token_expiry') and
+ not isinstance(data['token_expiry'], datetime.datetime)):
+ try:
+ data['token_expiry'] = datetime.datetime.strptime(
+ data['token_expiry'], EXPIRY_FORMAT)
+ except ValueError:
+ data['token_expiry'] = None
+ retval = cls(
+ data['access_token'],
+ data['client_id'],
+ data['client_secret'],
+ data['refresh_token'],
+ data['token_expiry'],
+ data['token_uri'],
+ data['user_agent'],
+ revoke_uri=data.get('revoke_uri', None),
+ id_token=data.get('id_token', None),
+ id_token_jwt=data.get('id_token_jwt', None),
+ token_response=data.get('token_response', None),
+ scopes=data.get('scopes', None),
+ token_info_uri=data.get('token_info_uri', None))
+ retval.invalid = data['invalid']
+ return retval
+
+ @property
+ def access_token_expired(self):
+ """True if the credential is expired or invalid.
+
+ If the token_expiry isn't set, we assume the token doesn't expire.
+ """
+ if self.invalid:
+ return True
+
+ if not self.token_expiry:
+ return False
+
+ now = _UTCNOW()
+ if now >= self.token_expiry:
+ logger.info('access_token is expired. Now: %s, token_expiry: %s',
+ now, self.token_expiry)
+ return True
+ return False
+
+ def get_access_token(self, http=None):
+ """Return the access token and its expiration information.
+
+ If the token does not exist, get one.
+ If the token expired, refresh it.
+ """
+ if not self.access_token or self.access_token_expired:
+ if not http:
+ http = transport.get_http_object()
+ self.refresh(http)
+ return AccessTokenInfo(access_token=self.access_token,
+ expires_in=self._expires_in())
+
+ def set_store(self, store):
+ """Set the Storage for the credential.
+
+ Args:
+ store: Storage, an implementation of Storage object.
+ This is needed to store the latest access_token if it
+ has expired and been refreshed. This implementation uses
+ locking to check for updates before updating the
+ access_token.
+ """
+ self.store = store
+
+ def _expires_in(self):
+ """Return the number of seconds until this token expires.
+
+ If token_expiry is in the past, this method will return 0, meaning the
+ token has already expired.
+
+ If token_expiry is None, this method will return None. Note that
+ returning 0 in such a case would not be fair: the token may still be
+ valid; we just don't know anything about it.
+ """
+ if self.token_expiry:
+ now = _UTCNOW()
+ if self.token_expiry > now:
+ time_delta = self.token_expiry - now
+ # TODO(orestica): return time_delta.total_seconds()
+ # once dropping support for Python 2.6
+ return time_delta.days * 86400 + time_delta.seconds
+ else:
+ return 0
+
+ def _updateFromCredential(self, other):
+ """Update this Credential from another instance."""
+ self.__dict__.update(other.__getstate__())
+
+ def __getstate__(self):
+ """Trim the state down to something that can be pickled."""
+ d = copy.copy(self.__dict__)
+ del d['store']
+ return d
+
+ def __setstate__(self, state):
+ """Reconstitute the state of the object from being pickled."""
+ self.__dict__.update(state)
+ self.store = None
+
+ def _generate_refresh_request_body(self):
+ """Generate the body that will be used in the refresh request."""
+ body = urllib.parse.urlencode({
+ 'grant_type': 'refresh_token',
+ 'client_id': self.client_id,
+ 'client_secret': self.client_secret,
+ 'refresh_token': self.refresh_token,
+ })
+ return body
+
+ def _generate_refresh_request_headers(self):
+ """Generate the headers that will be used in the refresh request."""
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ }
+
+ if self.user_agent is not None:
+ headers['user-agent'] = self.user_agent
+
+ return headers
+
+ def _refresh(self, http):
+ """Refreshes the access_token.
+
+ This method first checks by reading the Storage object if available.
+ If a refresh is still needed, it holds the Storage lock until the
+ refresh is completed.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+
+ Raises:
+ HttpAccessTokenRefreshError: When the refresh fails.
+ """
+ if not self.store:
+ self._do_refresh_request(http)
+ else:
+ self.store.acquire_lock()
+ try:
+ new_cred = self.store.locked_get()
+
+ if (new_cred and not new_cred.invalid and
+ new_cred.access_token != self.access_token and
+ not new_cred.access_token_expired):
+ logger.info('Updated access_token read from Storage')
+ self._updateFromCredential(new_cred)
+ else:
+ self._do_refresh_request(http)
+ finally:
+ self.store.release_lock()
+
+ def _do_refresh_request(self, http):
+ """Refresh the access_token using the refresh_token.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+
+ Raises:
+ HttpAccessTokenRefreshError: When the refresh fails.
+ """
+ body = self._generate_refresh_request_body()
+ headers = self._generate_refresh_request_headers()
+
+ logger.info('Refreshing access_token')
+ resp, content = transport.request(
+ http, self.token_uri, method='POST',
+ body=body, headers=headers)
+ content = _helpers._from_bytes(content)
+ if resp.status == http_client.OK:
+ d = json.loads(content)
+ self.token_response = d
+ self.access_token = d['access_token']
+ self.refresh_token = d.get('refresh_token', self.refresh_token)
+ if 'expires_in' in d:
+ delta = datetime.timedelta(seconds=int(d['expires_in']))
+ self.token_expiry = delta + _UTCNOW()
+ else:
+ self.token_expiry = None
+ if 'id_token' in d:
+ self.id_token = _extract_id_token(d['id_token'])
+ self.id_token_jwt = d['id_token']
+ else:
+ self.id_token = None
+ self.id_token_jwt = None
+ # On temporary refresh errors, the user does not actually have to
+ # re-authorize, so we unflag here.
+ self.invalid = False
+ if self.store:
+ self.store.locked_put(self)
+ else:
+ # An {'error':...} response body means the token is expired or
+ # revoked, so we flag the credentials as such.
+ logger.info('Failed to retrieve access token: %s', content)
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ d = json.loads(content)
+ if 'error' in d:
+ error_msg = d['error']
+ if 'error_description' in d:
+ error_msg += ': ' + d['error_description']
+ self.invalid = True
+ if self.store is not None:
+ self.store.locked_put(self)
+ except (TypeError, ValueError):
+ pass
+ raise HttpAccessTokenRefreshError(error_msg, status=resp.status)
+
+ def _revoke(self, http):
+ """Revokes this credential and deletes the stored copy (if it exists).
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_revoke(http, self.refresh_token or self.access_token)
+
+ def _do_revoke(self, http, token):
+ """Revokes this credential and deletes the stored copy (if it exists).
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ token: A string used as the token to be revoked. Can be either an
+ access_token or refresh_token.
+
+ Raises:
+ TokenRevokeError: If the revoke request does not return with a
+ 200 OK.
+ """
+ logger.info('Revoking token')
+ query_params = {'token': token}
+ token_revoke_uri = _helpers.update_query_params(
+ self.revoke_uri, query_params)
+ resp, content = transport.request(http, token_revoke_uri)
+ if resp.status == http_client.METHOD_NOT_ALLOWED:
+ body = urllib.parse.urlencode(query_params)
+ resp, content = transport.request(http, token_revoke_uri,
+ method='POST', body=body)
+ if resp.status == http_client.OK:
+ self.invalid = True
+ else:
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ d = json.loads(_helpers._from_bytes(content))
+ if 'error' in d:
+ error_msg = d['error']
+ except (TypeError, ValueError):
+ pass
+ raise TokenRevokeError(error_msg)
+
+ if self.store:
+ self.store.delete()
+
+ def _retrieve_scopes(self, http):
+ """Retrieves the list of authorized scopes from the OAuth2 provider.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_retrieve_scopes(http, self.access_token)
+
+ def _do_retrieve_scopes(self, http, token):
+ """Retrieves the list of authorized scopes from the OAuth2 provider.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ token: A string used as the token to identify the credentials to
+ the provider.
+
+ Raises:
+ Error: When refresh fails, indicating the the access token is
+ invalid.
+ """
+ logger.info('Refreshing scopes')
+ query_params = {'access_token': token, 'fields': 'scope'}
+ token_info_uri = _helpers.update_query_params(
+ self.token_info_uri, query_params)
+ resp, content = transport.request(http, token_info_uri)
+ content = _helpers._from_bytes(content)
+ if resp.status == http_client.OK:
+ d = json.loads(content)
+ self.scopes = set(_helpers.string_to_scopes(d.get('scope', '')))
+ else:
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ d = json.loads(content)
+ if 'error_description' in d:
+ error_msg = d['error_description']
+ except (TypeError, ValueError):
+ pass
+ raise Error(error_msg)
+
+
+class AccessTokenCredentials(OAuth2Credentials):
+ """Credentials object for OAuth 2.0.
+
+ Credentials can be applied to an httplib2.Http object using the
+ authorize() method, which then signs each request from that object
+ with the OAuth 2.0 access token. This set of credentials is for the
+ use case where you have acquired an OAuth 2.0 access_token from
+ another place such as a JavaScript client or another web
+ application, and wish to use it from Python. Because only the
+ access_token is present it can not be refreshed and will in time
+ expire.
+
+ AccessTokenCredentials objects may be safely pickled and unpickled.
+
+ Usage::
+
+ credentials = AccessTokenCredentials('<an access token>',
+ 'my-user-agent/1.0')
+ http = httplib2.Http()
+ http = credentials.authorize(http)
+
+ Raises:
+ AccessTokenCredentialsExpired: raised when the access_token expires or
+ is revoked.
+ """
+
+ def __init__(self, access_token, user_agent, revoke_uri=None):
+ """Create an instance of OAuth2Credentials
+
+ This is one of the few types if Credentials that you should contrust,
+ Credentials objects are usually instantiated by a Flow.
+
+ Args:
+ access_token: string, access token.
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ revoke_uri: string, URI for revoke endpoint. Defaults to None; a
+ token can't be revoked if this is None.
+ """
+ super(AccessTokenCredentials, self).__init__(
+ access_token,
+ None,
+ None,
+ None,
+ None,
+ None,
+ user_agent,
+ revoke_uri=revoke_uri)
+
+ @classmethod
+ def from_json(cls, json_data):
+ data = json.loads(_helpers._from_bytes(json_data))
+ retval = AccessTokenCredentials(
+ data['access_token'],
+ data['user_agent'])
+ return retval
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Args:
+ http: unused HTTP object.
+
+ Raises:
+ AccessTokenCredentialsError: always
+ """
+ raise AccessTokenCredentialsError(
+ 'The access_token is expired or invalid and can\'t be refreshed.')
+
+ def _revoke(self, http):
+ """Revokes the access_token and deletes the store if available.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_revoke(http, self.access_token)
+
+
+def _detect_gce_environment():
+ """Determine if the current environment is Compute Engine.
+
+ Returns:
+ Boolean indicating whether or not the current environment is Google
+ Compute Engine.
+ """
+ # NOTE: The explicit ``timeout`` is a workaround. The underlying
+ # issue is that resolving an unknown host on some networks will take
+ # 20-30 seconds; making this timeout short fixes the issue, but
+ # could lead to false negatives in the event that we are on GCE, but
+ # the metadata resolution was particularly slow. The latter case is
+ # "unlikely".
+ http = transport.get_http_object(timeout=GCE_METADATA_TIMEOUT)
+ try:
+ response, _ = transport.request(
+ http, _GCE_METADATA_URI, headers=_GCE_HEADERS)
+ return (
+ response.status == http_client.OK and
+ response.get(_METADATA_FLAVOR_HEADER) == _DESIRED_METADATA_FLAVOR)
+ except socket.error: # socket.timeout or socket.error(64, 'Host is down')
+ logger.info('Timeout attempting to reach GCE metadata service.')
+ return False
+
+
+def _in_gae_environment():
+ """Detects if the code is running in the App Engine environment.
+
+ Returns:
+ True if running in the GAE environment, False otherwise.
+ """
+ if SETTINGS.env_name is not None:
+ return SETTINGS.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL')
+
+ try:
+ import google.appengine # noqa: unused import
+ except ImportError:
+ pass
+ else:
+ server_software = os.environ.get(_SERVER_SOFTWARE, '')
+ if server_software.startswith('Google App Engine/'):
+ SETTINGS.env_name = 'GAE_PRODUCTION'
+ return True
+ elif server_software.startswith('Development/'):
+ SETTINGS.env_name = 'GAE_LOCAL'
+ return True
+
+ return False
+
+
+def _in_gce_environment():
+ """Detect if the code is running in the Compute Engine environment.
+
+ Returns:
+ True if running in the GCE environment, False otherwise.
+ """
+ if SETTINGS.env_name is not None:
+ return SETTINGS.env_name == 'GCE_PRODUCTION'
+
+ if NO_GCE_CHECK != 'True' and _detect_gce_environment():
+ SETTINGS.env_name = 'GCE_PRODUCTION'
+ return True
+ return False
+
+
+class GoogleCredentials(OAuth2Credentials):
+ """Application Default Credentials for use in calling Google APIs.
+
+ The Application Default Credentials are being constructed as a function of
+ the environment where the code is being run.
+ More details can be found on this page:
+ https://developers.google.com/accounts/docs/application-default-credentials
+
+ Here is an example of how to use the Application Default Credentials for a
+ service that requires authentication::
+
+ from googleapiclient.discovery import build
+ from oauth2client.client import GoogleCredentials
+
+ credentials = GoogleCredentials.get_application_default()
+ service = build('compute', 'v1', credentials=credentials)
+
+ PROJECT = 'bamboo-machine-422'
+ ZONE = 'us-central1-a'
+ request = service.instances().list(project=PROJECT, zone=ZONE)
+ response = request.execute()
+
+ print(response)
+ """
+
+ NON_SERIALIZED_MEMBERS = (
+ frozenset(['_private_key']) |
+ OAuth2Credentials.NON_SERIALIZED_MEMBERS)
+ """Members that aren't serialized when object is converted to JSON."""
+
+ def __init__(self, access_token, client_id, client_secret, refresh_token,
+ token_expiry, token_uri, user_agent,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ """Create an instance of GoogleCredentials.
+
+ This constructor is not usually called by the user, instead
+ GoogleCredentials objects are instantiated by
+ GoogleCredentials.from_stream() or
+ GoogleCredentials.get_application_default().
+
+ Args:
+ access_token: string, access token.
+ client_id: string, client identifier.
+ client_secret: string, client secret.
+ refresh_token: string, refresh token.
+ token_expiry: datetime, when the access_token expires.
+ token_uri: string, URI of token endpoint.
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ revoke_uri: string, URI for revoke endpoint. Defaults to
+ oauth2client.GOOGLE_REVOKE_URI; a token can't be
+ revoked if this is None.
+ """
+ super(GoogleCredentials, self).__init__(
+ access_token, client_id, client_secret, refresh_token,
+ token_expiry, token_uri, user_agent, revoke_uri=revoke_uri)
+
+ def create_scoped_required(self):
+ """Whether this Credentials object is scopeless.
+
+ create_scoped(scopes) method needs to be called in order to create
+ a Credentials object for API calls.
+ """
+ return False
+
+ def create_scoped(self, scopes):
+ """Create a Credentials object for the given scopes.
+
+ The Credentials type is preserved.
+ """
+ return self
+
+ @classmethod
+ def from_json(cls, json_data):
+ # TODO(issue 388): eliminate the circularity that is the reason for
+ # this non-top-level import.
+ from oauth2client import service_account
+ data = json.loads(_helpers._from_bytes(json_data))
+
+ # We handle service_account.ServiceAccountCredentials since it is a
+ # possible return type of GoogleCredentials.get_application_default()
+ if (data['_module'] == 'oauth2client.service_account' and
+ data['_class'] == 'ServiceAccountCredentials'):
+ return service_account.ServiceAccountCredentials.from_json(data)
+ elif (data['_module'] == 'oauth2client.service_account' and
+ data['_class'] == '_JWTAccessCredentials'):
+ return service_account._JWTAccessCredentials.from_json(data)
+
+ token_expiry = _parse_expiry(data.get('token_expiry'))
+ google_credentials = cls(
+ data['access_token'],
+ data['client_id'],
+ data['client_secret'],
+ data['refresh_token'],
+ token_expiry,
+ data['token_uri'],
+ data['user_agent'],
+ revoke_uri=data.get('revoke_uri', None))
+ google_credentials.invalid = data['invalid']
+ return google_credentials
+
+ @property
+ def serialization_data(self):
+ """Get the fields and values identifying the current credentials."""
+ return {
+ 'type': 'authorized_user',
+ 'client_id': self.client_id,
+ 'client_secret': self.client_secret,
+ 'refresh_token': self.refresh_token
+ }
+
+ @staticmethod
+ def _implicit_credentials_from_gae():
+ """Attempts to get implicit credentials in Google App Engine env.
+
+ If the current environment is not detected as App Engine, returns None,
+ indicating no Google App Engine credentials can be detected from the
+ current environment.
+
+ Returns:
+ None, if not in GAE, else an appengine.AppAssertionCredentials
+ object.
+ """
+ if not _in_gae_environment():
+ return None
+
+ return _get_application_default_credential_GAE()
+
+ @staticmethod
+ def _implicit_credentials_from_gce():
+ """Attempts to get implicit credentials in Google Compute Engine env.
+
+ If the current environment is not detected as Compute Engine, returns
+ None, indicating no Google Compute Engine credentials can be detected
+ from the current environment.
+
+ Returns:
+ None, if not in GCE, else a gce.AppAssertionCredentials object.
+ """
+ if not _in_gce_environment():
+ return None
+
+ return _get_application_default_credential_GCE()
+
+ @staticmethod
+ def _implicit_credentials_from_files():
+ """Attempts to get implicit credentials from local credential files.
+
+ First checks if the environment variable GOOGLE_APPLICATION_CREDENTIALS
+ is set with a filename and then falls back to a configuration file (the
+ "well known" file) associated with the 'gcloud' command line tool.
+
+ Returns:
+ Credentials object associated with the
+ GOOGLE_APPLICATION_CREDENTIALS file or the "well known" file if
+ either exist. If neither file is define, returns None, indicating
+ no credentials from a file can detected from the current
+ environment.
+ """
+ credentials_filename = _get_environment_variable_file()
+ if not credentials_filename:
+ credentials_filename = _get_well_known_file()
+ if os.path.isfile(credentials_filename):
+ extra_help = (' (produced automatically when running'
+ ' "gcloud auth login" command)')
+ else:
+ credentials_filename = None
+ else:
+ extra_help = (' (pointed to by ' + GOOGLE_APPLICATION_CREDENTIALS +
+ ' environment variable)')
+
+ if not credentials_filename:
+ return
+
+ # If we can read the credentials from a file, we don't need to know
+ # what environment we are in.
+ SETTINGS.env_name = DEFAULT_ENV_NAME
+
+ try:
+ return _get_application_default_credential_from_file(
+ credentials_filename)
+ except (ApplicationDefaultCredentialsError, ValueError) as error:
+ _raise_exception_for_reading_json(credentials_filename,
+ extra_help, error)
+
+ @classmethod
+ def _get_implicit_credentials(cls):
+ """Gets credentials implicitly from the environment.
+
+ Checks environment in order of precedence:
+ - Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to
+ a file with stored credentials information.
+ - Stored "well known" file associated with `gcloud` command line tool.
+ - Google App Engine (production and testing)
+ - Google Compute Engine production environment.
+
+ Raises:
+ ApplicationDefaultCredentialsError: raised when the credentials
+ fail to be retrieved.
+ """
+ # Environ checks (in order).
+ environ_checkers = [
+ cls._implicit_credentials_from_files,
+ cls._implicit_credentials_from_gae,
+ cls._implicit_credentials_from_gce,
+ ]
+
+ for checker in environ_checkers:
+ credentials = checker()
+ if credentials is not None:
+ return credentials
+
+ # If no credentials, fail.
+ raise ApplicationDefaultCredentialsError(ADC_HELP_MSG)
+
+ @staticmethod
+ def get_application_default():
+ """Get the Application Default Credentials for the current environment.
+
+ Raises:
+ ApplicationDefaultCredentialsError: raised when the credentials
+ fail to be retrieved.
+ """
+ return GoogleCredentials._get_implicit_credentials()
+
+ @staticmethod
+ def from_stream(credential_filename):
+ """Create a Credentials object by reading information from a file.
+
+ It returns an object of type GoogleCredentials.
+
+ Args:
+ credential_filename: the path to the file from where the
+ credentials are to be read
+
+ Raises:
+ ApplicationDefaultCredentialsError: raised when the credentials
+ fail to be retrieved.
+ """
+ if credential_filename and os.path.isfile(credential_filename):
+ try:
+ return _get_application_default_credential_from_file(
+ credential_filename)
+ except (ApplicationDefaultCredentialsError, ValueError) as error:
+ extra_help = (' (provided as parameter to the '
+ 'from_stream() method)')
+ _raise_exception_for_reading_json(credential_filename,
+ extra_help,
+ error)
+ else:
+ raise ApplicationDefaultCredentialsError(
+ 'The parameter passed to the from_stream() '
+ 'method should point to a file.')
+
+
+def _save_private_file(filename, json_contents):
+ """Saves a file with read-write permissions on for the owner.
+
+ Args:
+ filename: String. Absolute path to file.
+ json_contents: JSON serializable object to be saved.
+ """
+ temp_filename = tempfile.mktemp()
+ file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, 0o600)
+ with os.fdopen(file_desc, 'w') as file_handle:
+ json.dump(json_contents, file_handle, sort_keys=True,
+ indent=2, separators=(',', ': '))
+ shutil.move(temp_filename, filename)
+
+
+def save_to_well_known_file(credentials, well_known_file=None):
+ """Save the provided GoogleCredentials to the well known file.
+
+ Args:
+ credentials: the credentials to be saved to the well known file;
+ it should be an instance of GoogleCredentials
+ well_known_file: the name of the file where the credentials are to be
+ saved; this parameter is supposed to be used for
+ testing only
+ """
+ # TODO(orestica): move this method to tools.py
+ # once the argparse import gets fixed (it is not present in Python 2.6)
+
+ if well_known_file is None:
+ well_known_file = _get_well_known_file()
+
+ config_dir = os.path.dirname(well_known_file)
+ if not os.path.isdir(config_dir):
+ raise OSError(
+ 'Config directory does not exist: {0}'.format(config_dir))
+
+ credentials_data = credentials.serialization_data
+ _save_private_file(well_known_file, credentials_data)
+
+
+def _get_environment_variable_file():
+ application_default_credential_filename = (
+ os.environ.get(GOOGLE_APPLICATION_CREDENTIALS, None))
+
+ if application_default_credential_filename:
+ if os.path.isfile(application_default_credential_filename):
+ return application_default_credential_filename
+ else:
+ raise ApplicationDefaultCredentialsError(
+ 'File ' + application_default_credential_filename +
+ ' (pointed by ' +
+ GOOGLE_APPLICATION_CREDENTIALS +
+ ' environment variable) does not exist!')
+
+
+def _get_well_known_file():
+ """Get the well known file produced by command 'gcloud auth login'."""
+ # TODO(orestica): Revisit this method once gcloud provides a better way
+ # of pinpointing the exact location of the file.
+ default_config_dir = os.getenv(_CLOUDSDK_CONFIG_ENV_VAR)
+ if default_config_dir is None:
+ if os.name == 'nt':
+ try:
+ default_config_dir = os.path.join(os.environ['APPDATA'],
+ _CLOUDSDK_CONFIG_DIRECTORY)
+ except KeyError:
+ # This should never happen unless someone is really
+ # messing with things.
+ drive = os.environ.get('SystemDrive', 'C:')
+ default_config_dir = os.path.join(drive, '\\',
+ _CLOUDSDK_CONFIG_DIRECTORY)
+ else:
+ default_config_dir = os.path.join(os.path.expanduser('~'),
+ '.config',
+ _CLOUDSDK_CONFIG_DIRECTORY)
+
+ return os.path.join(default_config_dir, _WELL_KNOWN_CREDENTIALS_FILE)
+
+
+def _get_application_default_credential_from_file(filename):
+ """Build the Application Default Credentials from file."""
+ # read the credentials from the file
+ with open(filename) as file_obj:
+ client_credentials = json.load(file_obj)
+
+ credentials_type = client_credentials.get('type')
+ if credentials_type == AUTHORIZED_USER:
+ required_fields = set(['client_id', 'client_secret', 'refresh_token'])
+ elif credentials_type == SERVICE_ACCOUNT:
+ required_fields = set(['client_id', 'client_email', 'private_key_id',
+ 'private_key'])
+ else:
+ raise ApplicationDefaultCredentialsError(
+ "'type' field should be defined (and have one of the '" +
+ AUTHORIZED_USER + "' or '" + SERVICE_ACCOUNT + "' values)")
+
+ missing_fields = required_fields.difference(client_credentials.keys())
+
+ if missing_fields:
+ _raise_exception_for_missing_fields(missing_fields)
+
+ if client_credentials['type'] == AUTHORIZED_USER:
+ return GoogleCredentials(
+ access_token=None,
+ client_id=client_credentials['client_id'],
+ client_secret=client_credentials['client_secret'],
+ refresh_token=client_credentials['refresh_token'],
+ token_expiry=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ user_agent='Python client library')
+ else: # client_credentials['type'] == SERVICE_ACCOUNT
+ from oauth2client import service_account
+ return service_account._JWTAccessCredentials.from_json_keyfile_dict(
+ client_credentials)
+
+
+def _raise_exception_for_missing_fields(missing_fields):
+ raise ApplicationDefaultCredentialsError(
+ 'The following field(s) must be defined: ' + ', '.join(missing_fields))
+
+
+def _raise_exception_for_reading_json(credential_file,
+ extra_help,
+ error):
+ raise ApplicationDefaultCredentialsError(
+ 'An error was encountered while reading json file: ' +
+ credential_file + extra_help + ': ' + str(error))
+
+
+def _get_application_default_credential_GAE():
+ from oauth2client.contrib.appengine import AppAssertionCredentials
+
+ return AppAssertionCredentials([])
+
+
+def _get_application_default_credential_GCE():
+ from oauth2client.contrib.gce import AppAssertionCredentials
+
+ return AppAssertionCredentials()
+
+
+class AssertionCredentials(GoogleCredentials):
+ """Abstract Credentials object used for OAuth 2.0 assertion grants.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens. It must
+ be subclassed to generate the appropriate assertion string.
+
+ AssertionCredentials objects may be safely pickled and unpickled.
+ """
+
+ @_helpers.positional(2)
+ def __init__(self, assertion_type, user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ **unused_kwargs):
+ """Constructor for AssertionFlowCredentials.
+
+ Args:
+ assertion_type: string, assertion type that will be declared to the
+ auth server
+ user_agent: string, The HTTP User-Agent to provide for this
+ application.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint.
+ """
+ super(AssertionCredentials, self).__init__(
+ None,
+ None,
+ None,
+ None,
+ None,
+ token_uri,
+ user_agent,
+ revoke_uri=revoke_uri)
+ self.assertion_type = assertion_type
+
+ def _generate_refresh_request_body(self):
+ assertion = self._generate_assertion()
+
+ body = urllib.parse.urlencode({
+ 'assertion': assertion,
+ 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
+ })
+
+ return body
+
+ def _generate_assertion(self):
+ """Generate assertion string to be used in the access token request."""
+ raise NotImplementedError
+
+ def _revoke(self, http):
+ """Revokes the access_token and deletes the store if available.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ self._do_revoke(http, self.access_token)
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Returns:
+ tuple, A pair of the private key ID used to sign the blob and
+ the signed contents.
+ """
+ raise NotImplementedError('This method is abstract.')
+
+
+def _require_crypto_or_die():
+ """Ensure we have a crypto library, or throw CryptoUnavailableError.
+
+ The oauth2client.crypt module requires either PyCrypto or PyOpenSSL
+ to be available in order to function, but these are optional
+ dependencies.
+ """
+ if not HAS_CRYPTO:
+ raise CryptoUnavailableError('No crypto library available')
+
+
+@_helpers.positional(2)
+def verify_id_token(id_token, audience, http=None,
+ cert_uri=ID_TOKEN_VERIFICATION_CERTS):
+ """Verifies a signed JWT id_token.
+
+ This function requires PyOpenSSL and because of that it does not work on
+ App Engine.
+
+ Args:
+ id_token: string, A Signed JWT.
+ audience: string, The audience 'aud' that the token should be for.
+ http: httplib2.Http, instance to use to make the HTTP request. Callers
+ should supply an instance that has caching enabled.
+ cert_uri: string, URI of the certificates in JSON format to
+ verify the JWT against.
+
+ Returns:
+ The deserialized JSON in the JWT.
+
+ Raises:
+ oauth2client.crypt.AppIdentityError: if the JWT fails to verify.
+ CryptoUnavailableError: if no crypto library is available.
+ """
+ _require_crypto_or_die()
+ if http is None:
+ http = transport.get_cached_http()
+
+ resp, content = transport.request(http, cert_uri)
+ if resp.status == http_client.OK:
+ certs = json.loads(_helpers._from_bytes(content))
+ return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)
+ else:
+ raise VerifyJwtTokenError('Status code: {0}'.format(resp.status))
+
+
+def _extract_id_token(id_token):
+ """Extract the JSON payload from a JWT.
+
+ Does the extraction w/o checking the signature.
+
+ Args:
+ id_token: string or bytestring, OAuth 2.0 id_token.
+
+ Returns:
+ object, The deserialized JSON payload.
+ """
+ if type(id_token) == bytes:
+ segments = id_token.split(b'.')
+ else:
+ segments = id_token.split(u'.')
+
+ if len(segments) != 3:
+ raise VerifyJwtTokenError(
+ 'Wrong number of segments in token: {0}'.format(id_token))
+
+ return json.loads(
+ _helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1])))
+
+
+def _parse_exchange_token_response(content):
+ """Parses response of an exchange token request.
+
+ Most providers return JSON but some (e.g. Facebook) return a
+ url-encoded string.
+
+ Args:
+ content: The body of a response
+
+ Returns:
+ Content as a dictionary object. Note that the dict could be empty,
+ i.e. {}. That basically indicates a failure.
+ """
+ resp = {}
+ content = _helpers._from_bytes(content)
+ try:
+ resp = json.loads(content)
+ except Exception:
+ # different JSON libs raise different exceptions,
+ # so we just do a catch-all here
+ resp = _helpers.parse_unique_urlencoded(content)
+
+ # some providers respond with 'expires', others with 'expires_in'
+ if resp and 'expires' in resp:
+ resp['expires_in'] = resp.pop('expires')
+
+ return resp
+
+
+@_helpers.positional(4)
+def credentials_from_code(client_id, client_secret, scope, code,
+ redirect_uri='postmessage', http=None,
+ user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ auth_uri=oauth2client.GOOGLE_AUTH_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ device_uri=oauth2client.GOOGLE_DEVICE_URI,
+ token_info_uri=oauth2client.GOOGLE_TOKEN_INFO_URI,
+ pkce=False,
+ code_verifier=None):
+ """Exchanges an authorization code for an OAuth2Credentials object.
+
+ Args:
+ client_id: string, client identifier.
+ client_secret: string, client secret.
+ scope: string or iterable of strings, scope(s) to request.
+ code: string, An authorization code, most likely passed down from
+ the client
+ redirect_uri: string, this is generally set to 'postmessage' to match
+ the redirect_uri that the client specified
+ http: httplib2.Http, optional http instance to use to do the fetch
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ auth_uri: string, URI for authorization endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ device_uri: string, URI for device authorization endpoint. For
+ convenience defaults to Google's endpoints but any OAuth
+ 2.0 provider can be used.
+ pkce: boolean, default: False, Generate and include a "Proof Key
+ for Code Exchange" (PKCE) with your authorization and token
+ requests. This adds security for installed applications that
+ cannot protect a client_secret. See RFC 7636 for details.
+ code_verifier: bytestring or None, default: None, parameter passed
+ as part of the code exchange when pkce=True. If
+ None, a code_verifier will automatically be
+ generated as part of step1_get_authorize_url(). See
+ RFC 7636 for details.
+
+ Returns:
+ An OAuth2Credentials object.
+
+ Raises:
+ FlowExchangeError if the authorization code cannot be exchanged for an
+ access token
+ """
+ flow = OAuth2WebServerFlow(client_id, client_secret, scope,
+ redirect_uri=redirect_uri,
+ user_agent=user_agent,
+ auth_uri=auth_uri,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri,
+ device_uri=device_uri,
+ token_info_uri=token_info_uri,
+ pkce=pkce,
+ code_verifier=code_verifier)
+
+ credentials = flow.step2_exchange(code, http=http)
+ return credentials
+
+
+@_helpers.positional(3)
+def credentials_from_clientsecrets_and_code(filename, scope, code,
+ message=None,
+ redirect_uri='postmessage',
+ http=None,
+ cache=None,
+ device_uri=None):
+ """Returns OAuth2Credentials from a clientsecrets file and an auth code.
+
+ Will create the right kind of Flow based on the contents of the
+ clientsecrets file or will raise InvalidClientSecretsError for unknown
+ types of Flows.
+
+ Args:
+ filename: string, File name of clientsecrets.
+ scope: string or iterable of strings, scope(s) to request.
+ code: string, An authorization code, most likely passed down from
+ the client
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. If message is
+ provided then sys.exit will be called in the case of an error.
+ If message in not provided then
+ clientsecrets.InvalidClientSecretsError will be raised.
+ redirect_uri: string, this is generally set to 'postmessage' to match
+ the redirect_uri that the client specified
+ http: httplib2.Http, optional http instance to use to do the fetch
+ cache: An optional cache service client that implements get() and set()
+ methods. See clientsecrets.loadfile() for details.
+ device_uri: string, OAuth 2.0 device authorization endpoint
+ pkce: boolean, default: False, Generate and include a "Proof Key
+ for Code Exchange" (PKCE) with your authorization and token
+ requests. This adds security for installed applications that
+ cannot protect a client_secret. See RFC 7636 for details.
+ code_verifier: bytestring or None, default: None, parameter passed
+ as part of the code exchange when pkce=True. If
+ None, a code_verifier will automatically be
+ generated as part of step1_get_authorize_url(). See
+ RFC 7636 for details.
+
+ Returns:
+ An OAuth2Credentials object.
+
+ Raises:
+ FlowExchangeError: if the authorization code cannot be exchanged for an
+ access token
+ UnknownClientSecretsFlowError: if the file describes an unknown kind
+ of Flow.
+ clientsecrets.InvalidClientSecretsError: if the clientsecrets file is
+ invalid.
+ """
+ flow = flow_from_clientsecrets(filename, scope, message=message,
+ cache=cache, redirect_uri=redirect_uri,
+ device_uri=device_uri)
+ credentials = flow.step2_exchange(code, http=http)
+ return credentials
+
+
+class DeviceFlowInfo(collections.namedtuple('DeviceFlowInfo', (
+ 'device_code', 'user_code', 'interval', 'verification_url',
+ 'user_code_expiry'))):
+ """Intermediate information the OAuth2 for devices flow."""
+
+ @classmethod
+ def FromResponse(cls, response):
+ """Create a DeviceFlowInfo from a server response.
+
+ The response should be a dict containing entries as described here:
+
+ http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1
+ """
+ # device_code, user_code, and verification_url are required.
+ kwargs = {
+ 'device_code': response['device_code'],
+ 'user_code': response['user_code'],
+ }
+ # The response may list the verification address as either
+ # verification_url or verification_uri, so we check for both.
+ verification_url = response.get(
+ 'verification_url', response.get('verification_uri'))
+ if verification_url is None:
+ raise OAuth2DeviceCodeError(
+ 'No verification_url provided in server response')
+ kwargs['verification_url'] = verification_url
+ # expires_in and interval are optional.
+ kwargs.update({
+ 'interval': response.get('interval'),
+ 'user_code_expiry': None,
+ })
+ if 'expires_in' in response:
+ kwargs['user_code_expiry'] = (
+ _UTCNOW() +
+ datetime.timedelta(seconds=int(response['expires_in'])))
+ return cls(**kwargs)
+
+
+def _oauth2_web_server_flow_params(kwargs):
+ """Configures redirect URI parameters for OAuth2WebServerFlow."""
+ params = {
+ 'access_type': 'offline',
+ 'response_type': 'code',
+ }
+
+ params.update(kwargs)
+
+ # Check for the presence of the deprecated approval_prompt param and
+ # warn appropriately.
+ approval_prompt = params.get('approval_prompt')
+ if approval_prompt is not None:
+ logger.warning(
+ 'The approval_prompt parameter for OAuth2WebServerFlow is '
+ 'deprecated. Please use the prompt parameter instead.')
+
+ if approval_prompt == 'force':
+ logger.warning(
+ 'approval_prompt="force" has been adjusted to '
+ 'prompt="consent"')
+ params['prompt'] = 'consent'
+ del params['approval_prompt']
+
+ return params
+
+
+class OAuth2WebServerFlow(Flow):
+ """Does the Web Server Flow for OAuth 2.0.
+
+ OAuth2WebServerFlow objects may be safely pickled and unpickled.
+ """
+
+ @_helpers.positional(4)
+ def __init__(self, client_id,
+ client_secret=None,
+ scope=None,
+ redirect_uri=None,
+ user_agent=None,
+ auth_uri=oauth2client.GOOGLE_AUTH_URI,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ login_hint=None,
+ device_uri=oauth2client.GOOGLE_DEVICE_URI,
+ token_info_uri=oauth2client.GOOGLE_TOKEN_INFO_URI,
+ authorization_header=None,
+ pkce=False,
+ code_verifier=None,
+ **kwargs):
+ """Constructor for OAuth2WebServerFlow.
+
+ The kwargs argument is used to set extra query parameters on the
+ auth_uri. For example, the access_type and prompt
+ query parameters can be set via kwargs.
+
+ Args:
+ client_id: string, client identifier.
+ client_secret: string client secret.
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
+ for a non-web-based application, or a URI that
+ handles the callback from the authorization server.
+ user_agent: string, HTTP User-Agent to provide for this
+ application.
+ auth_uri: string, URI for authorization endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ token_uri: string, URI for token endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+ login_hint: string, Either an email address or domain. Passing this
+ hint will either pre-fill the email box on the sign-in
+ form or select the proper multi-login session, thereby
+ simplifying the login flow.
+ device_uri: string, URI for device authorization endpoint. For
+ convenience defaults to Google's endpoints but any
+ OAuth 2.0 provider can be used.
+ authorization_header: string, For use with OAuth 2.0 providers that
+ require a client to authenticate using a
+ header value instead of passing client_secret
+ in the POST body.
+ pkce: boolean, default: False, Generate and include a "Proof Key
+ for Code Exchange" (PKCE) with your authorization and token
+ requests. This adds security for installed applications that
+ cannot protect a client_secret. See RFC 7636 for details.
+ code_verifier: bytestring or None, default: None, parameter passed
+ as part of the code exchange when pkce=True. If
+ None, a code_verifier will automatically be
+ generated as part of step1_get_authorize_url(). See
+ RFC 7636 for details.
+ **kwargs: dict, The keyword arguments are all optional and required
+ parameters for the OAuth calls.
+ """
+ # scope is a required argument, but to preserve backwards-compatibility
+ # we don't want to rearrange the positional arguments
+ if scope is None:
+ raise TypeError("The value of scope must not be None")
+ self.client_id = client_id
+ self.client_secret = client_secret
+ self.scope = _helpers.scopes_to_string(scope)
+ self.redirect_uri = redirect_uri
+ self.login_hint = login_hint
+ self.user_agent = user_agent
+ self.auth_uri = auth_uri
+ self.token_uri = token_uri
+ self.revoke_uri = revoke_uri
+ self.device_uri = device_uri
+ self.token_info_uri = token_info_uri
+ self.authorization_header = authorization_header
+ self._pkce = pkce
+ self.code_verifier = code_verifier
+ self.params = _oauth2_web_server_flow_params(kwargs)
+
+ @_helpers.positional(1)
+ def step1_get_authorize_url(self, redirect_uri=None, state=None):
+ """Returns a URI to redirect to the provider.
+
+ Args:
+ redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
+ for a non-web-based application, or a URI that
+ handles the callback from the authorization server.
+ This parameter is deprecated, please move to passing
+ the redirect_uri in via the constructor.
+ state: string, Opaque state string which is passed through the
+ OAuth2 flow and returned to the client as a query parameter
+ in the callback.
+
+ Returns:
+ A URI as a string to redirect the user to begin the authorization
+ flow.
+ """
+ if redirect_uri is not None:
+ logger.warning((
+ 'The redirect_uri parameter for '
+ 'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. '
+ 'Please move to passing the redirect_uri in via the '
+ 'constructor.'))
+ self.redirect_uri = redirect_uri
+
+ if self.redirect_uri is None:
+ raise ValueError('The value of redirect_uri must not be None.')
+
+ query_params = {
+ 'client_id': self.client_id,
+ 'redirect_uri': self.redirect_uri,
+ 'scope': self.scope,
+ }
+ if state is not None:
+ query_params['state'] = state
+ if self.login_hint is not None:
+ query_params['login_hint'] = self.login_hint
+ if self._pkce:
+ if not self.code_verifier:
+ self.code_verifier = _pkce.code_verifier()
+ challenge = _pkce.code_challenge(self.code_verifier)
+ query_params['code_challenge'] = challenge
+ query_params['code_challenge_method'] = 'S256'
+
+ query_params.update(self.params)
+ return _helpers.update_query_params(self.auth_uri, query_params)
+
+ @_helpers.positional(1)
+ def step1_get_device_and_user_codes(self, http=None):
+ """Returns a user code and the verification URL where to enter it
+
+ Returns:
+ A user code as a string for the user to authorize the application
+ An URL as a string where the user has to enter the code
+ """
+ if self.device_uri is None:
+ raise ValueError('The value of device_uri must not be None.')
+
+ body = urllib.parse.urlencode({
+ 'client_id': self.client_id,
+ 'scope': self.scope,
+ })
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ }
+
+ if self.user_agent is not None:
+ headers['user-agent'] = self.user_agent
+
+ if http is None:
+ http = transport.get_http_object()
+
+ resp, content = transport.request(
+ http, self.device_uri, method='POST', body=body, headers=headers)
+ content = _helpers._from_bytes(content)
+ if resp.status == http_client.OK:
+ try:
+ flow_info = json.loads(content)
+ except ValueError as exc:
+ raise OAuth2DeviceCodeError(
+ 'Could not parse server response as JSON: "{0}", '
+ 'error: "{1}"'.format(content, exc))
+ return DeviceFlowInfo.FromResponse(flow_info)
+ else:
+ error_msg = 'Invalid response {0}.'.format(resp.status)
+ try:
+ error_dict = json.loads(content)
+ if 'error' in error_dict:
+ error_msg += ' Error: {0}'.format(error_dict['error'])
+ except ValueError:
+ # Couldn't decode a JSON response, stick with the
+ # default message.
+ pass
+ raise OAuth2DeviceCodeError(error_msg)
+
+ @_helpers.positional(2)
+ def step2_exchange(self, code=None, http=None, device_flow_info=None):
+ """Exchanges a code for OAuth2Credentials.
+
+ Args:
+ code: string, a dict-like object, or None. For a non-device
+ flow, this is either the response code as a string, or a
+ dictionary of query parameters to the redirect_uri. For a
+ device flow, this should be None.
+ http: httplib2.Http, optional http instance to use when fetching
+ credentials.
+ device_flow_info: DeviceFlowInfo, return value from step1 in the
+ case of a device flow.
+
+ Returns:
+ An OAuth2Credentials object that can be used to authorize requests.
+
+ Raises:
+ FlowExchangeError: if a problem occurred exchanging the code for a
+ refresh_token.
+ ValueError: if code and device_flow_info are both provided or both
+ missing.
+ """
+ if code is None and device_flow_info is None:
+ raise ValueError('No code or device_flow_info provided.')
+ if code is not None and device_flow_info is not None:
+ raise ValueError('Cannot provide both code and device_flow_info.')
+
+ if code is None:
+ code = device_flow_info.device_code
+ elif not isinstance(code, (six.string_types, six.binary_type)):
+ if 'code' not in code:
+ raise FlowExchangeError(code.get(
+ 'error', 'No code was supplied in the query parameters.'))
+ code = code['code']
+
+ post_data = {
+ 'client_id': self.client_id,
+ 'code': code,
+ 'scope': self.scope,
+ }
+ if self.client_secret is not None:
+ post_data['client_secret'] = self.client_secret
+ if self._pkce:
+ post_data['code_verifier'] = self.code_verifier
+ if device_flow_info is not None:
+ post_data['grant_type'] = 'http://oauth.net/grant_type/device/1.0'
+ else:
+ post_data['grant_type'] = 'authorization_code'
+ post_data['redirect_uri'] = self.redirect_uri
+ body = urllib.parse.urlencode(post_data)
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ }
+ if self.authorization_header is not None:
+ headers['Authorization'] = self.authorization_header
+ if self.user_agent is not None:
+ headers['user-agent'] = self.user_agent
+
+ if http is None:
+ http = transport.get_http_object()
+
+ resp, content = transport.request(
+ http, self.token_uri, method='POST', body=body, headers=headers)
+ d = _parse_exchange_token_response(content)
+ if resp.status == http_client.OK and 'access_token' in d:
+ access_token = d['access_token']
+ refresh_token = d.get('refresh_token', None)
+ if not refresh_token:
+ logger.info(
+ 'Received token response with no refresh_token. Consider '
+ "reauthenticating with prompt='consent'.")
+ token_expiry = None
+ if 'expires_in' in d:
+ delta = datetime.timedelta(seconds=int(d['expires_in']))
+ token_expiry = delta + _UTCNOW()
+
+ extracted_id_token = None
+ id_token_jwt = None
+ if 'id_token' in d:
+ extracted_id_token = _extract_id_token(d['id_token'])
+ id_token_jwt = d['id_token']
+
+ logger.info('Successfully retrieved access token')
+ return OAuth2Credentials(
+ access_token, self.client_id, self.client_secret,
+ refresh_token, token_expiry, self.token_uri, self.user_agent,
+ revoke_uri=self.revoke_uri, id_token=extracted_id_token,
+ id_token_jwt=id_token_jwt, token_response=d, scopes=self.scope,
+ token_info_uri=self.token_info_uri)
+ else:
+ logger.info('Failed to retrieve access token: %s', content)
+ if 'error' in d:
+ # you never know what those providers got to say
+ error_msg = (str(d['error']) +
+ str(d.get('error_description', '')))
+ else:
+ error_msg = 'Invalid response: {0}.'.format(str(resp.status))
+ raise FlowExchangeError(error_msg)
+
+
+@_helpers.positional(2)
+def flow_from_clientsecrets(filename, scope, redirect_uri=None,
+ message=None, cache=None, login_hint=None,
+ device_uri=None, pkce=None, code_verifier=None,
+ prompt=None):
+ """Create a Flow from a clientsecrets file.
+
+ Will create the right kind of Flow based on the contents of the
+ clientsecrets file or will raise InvalidClientSecretsError for unknown
+ types of Flows.
+
+ Args:
+ filename: string, File name of client secrets.
+ scope: string or iterable of strings, scope(s) to request.
+ redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
+ a non-web-based application, or a URI that handles the
+ callback from the authorization server.
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. If message is
+ provided then sys.exit will be called in the case of an error.
+ If message in not provided then
+ clientsecrets.InvalidClientSecretsError will be raised.
+ cache: An optional cache service client that implements get() and set()
+ methods. See clientsecrets.loadfile() for details.
+ login_hint: string, Either an email address or domain. Passing this
+ hint will either pre-fill the email box on the sign-in form
+ or select the proper multi-login session, thereby
+ simplifying the login flow.
+ device_uri: string, URI for device authorization endpoint. For
+ convenience defaults to Google's endpoints but any
+ OAuth 2.0 provider can be used.
+
+ Returns:
+ A Flow object.
+
+ Raises:
+ UnknownClientSecretsFlowError: if the file describes an unknown kind of
+ Flow.
+ clientsecrets.InvalidClientSecretsError: if the clientsecrets file is
+ invalid.
+ """
+ try:
+ client_type, client_info = clientsecrets.loadfile(filename,
+ cache=cache)
+ if client_type in (clientsecrets.TYPE_WEB,
+ clientsecrets.TYPE_INSTALLED):
+ constructor_kwargs = {
+ 'redirect_uri': redirect_uri,
+ 'auth_uri': client_info['auth_uri'],
+ 'token_uri': client_info['token_uri'],
+ 'login_hint': login_hint,
+ }
+ revoke_uri = client_info.get('revoke_uri')
+ optional = (
+ 'revoke_uri',
+ 'device_uri',
+ 'pkce',
+ 'code_verifier',
+ 'prompt'
+ )
+ for param in optional:
+ if locals()[param] is not None:
+ constructor_kwargs[param] = locals()[param]
+
+ return OAuth2WebServerFlow(
+ client_info['client_id'], client_info['client_secret'],
+ scope, **constructor_kwargs)
+
+ except clientsecrets.InvalidClientSecretsError as e:
+ if message is not None:
+ if e.args:
+ message = ('The client secrets were invalid: '
+ '\n{0}\n{1}'.format(e, message))
+ sys.exit(message)
+ else:
+ raise
+ else:
+ raise UnknownClientSecretsFlowError(
+ 'This OAuth 2.0 flow is unsupported: {0!r}'.format(client_type))
diff --git a/contrib/python/oauth2client/py3/oauth2client/clientsecrets.py b/contrib/python/oauth2client/py3/oauth2client/clientsecrets.py
new file mode 100644
index 0000000000..1598142e87
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/clientsecrets.py
@@ -0,0 +1,173 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for reading OAuth 2.0 client secret files.
+
+A client_secrets.json file contains all the information needed to interact with
+an OAuth 2.0 protected service.
+"""
+
+import json
+
+import six
+
+
+# Properties that make a client_secrets.json file valid.
+TYPE_WEB = 'web'
+TYPE_INSTALLED = 'installed'
+
+VALID_CLIENT = {
+ TYPE_WEB: {
+ 'required': [
+ 'client_id',
+ 'client_secret',
+ 'redirect_uris',
+ 'auth_uri',
+ 'token_uri',
+ ],
+ 'string': [
+ 'client_id',
+ 'client_secret',
+ ],
+ },
+ TYPE_INSTALLED: {
+ 'required': [
+ 'client_id',
+ 'client_secret',
+ 'redirect_uris',
+ 'auth_uri',
+ 'token_uri',
+ ],
+ 'string': [
+ 'client_id',
+ 'client_secret',
+ ],
+ },
+}
+
+
+class Error(Exception):
+ """Base error for this module."""
+
+
+class InvalidClientSecretsError(Error):
+ """Format of ClientSecrets file is invalid."""
+
+
+def _validate_clientsecrets(clientsecrets_dict):
+ """Validate parsed client secrets from a file.
+
+ Args:
+ clientsecrets_dict: dict, a dictionary holding the client secrets.
+
+ Returns:
+ tuple, a string of the client type and the information parsed
+ from the file.
+ """
+ _INVALID_FILE_FORMAT_MSG = (
+ 'Invalid file format. See '
+ 'https://developers.google.com/api-client-library/'
+ 'python/guide/aaa_client_secrets')
+
+ if clientsecrets_dict is None:
+ raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)
+ try:
+ (client_type, client_info), = clientsecrets_dict.items()
+ except (ValueError, AttributeError):
+ raise InvalidClientSecretsError(
+ _INVALID_FILE_FORMAT_MSG + ' '
+ 'Expected a JSON object with a single property for a "web" or '
+ '"installed" application')
+
+ if client_type not in VALID_CLIENT:
+ raise InvalidClientSecretsError(
+ 'Unknown client type: {0}.'.format(client_type))
+
+ for prop_name in VALID_CLIENT[client_type]['required']:
+ if prop_name not in client_info:
+ raise InvalidClientSecretsError(
+ 'Missing property "{0}" in a client type of "{1}".'.format(
+ prop_name, client_type))
+ for prop_name in VALID_CLIENT[client_type]['string']:
+ if client_info[prop_name].startswith('[['):
+ raise InvalidClientSecretsError(
+ 'Property "{0}" is not configured.'.format(prop_name))
+ return client_type, client_info
+
+
+def load(fp):
+ obj = json.load(fp)
+ return _validate_clientsecrets(obj)
+
+
+def loads(s):
+ obj = json.loads(s)
+ return _validate_clientsecrets(obj)
+
+
+def _loadfile(filename):
+ try:
+ with open(filename, 'r') as fp:
+ obj = json.load(fp)
+ except IOError as exc:
+ raise InvalidClientSecretsError('Error opening file', exc.filename,
+ exc.strerror, exc.errno)
+ return _validate_clientsecrets(obj)
+
+
+def loadfile(filename, cache=None):
+ """Loading of client_secrets JSON file, optionally backed by a cache.
+
+ Typical cache storage would be App Engine memcache service,
+ but you can pass in any other cache client that implements
+ these methods:
+
+ * ``get(key, namespace=ns)``
+ * ``set(key, value, namespace=ns)``
+
+ Usage::
+
+ # without caching
+ client_type, client_info = loadfile('secrets.json')
+ # using App Engine memcache service
+ from google.appengine.api import memcache
+ client_type, client_info = loadfile('secrets.json', cache=memcache)
+
+ Args:
+ filename: string, Path to a client_secrets.json file on a filesystem.
+ cache: An optional cache service client that implements get() and set()
+ methods. If not specified, the file is always being loaded from
+ a filesystem.
+
+ Raises:
+ InvalidClientSecretsError: In case of a validation error or some
+ I/O failure. Can happen only on cache miss.
+
+ Returns:
+ (client_type, client_info) tuple, as _loadfile() normally would.
+ JSON contents is validated only during first load. Cache hits are not
+ validated.
+ """
+ _SECRET_NAMESPACE = 'oauth2client:secrets#ns'
+
+ if not cache:
+ return _loadfile(filename)
+
+ obj = cache.get(filename, namespace=_SECRET_NAMESPACE)
+ if obj is None:
+ client_type, client_info = _loadfile(filename)
+ obj = {client_type: client_info}
+ cache.set(filename, obj, namespace=_SECRET_NAMESPACE)
+
+ return next(six.iteritems(obj))
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/__init__.py b/contrib/python/oauth2client/py3/oauth2client/contrib/__init__.py
new file mode 100644
index 0000000000..ecfd06c968
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/__init__.py
@@ -0,0 +1,6 @@
+"""Contributed modules.
+
+Contrib contains modules that are not considered part of the core oauth2client
+library but provide additional functionality. These modules are intended to
+make it easier to use oauth2client.
+"""
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/_appengine_ndb.py b/contrib/python/oauth2client/py3/oauth2client/contrib/_appengine_ndb.py
new file mode 100644
index 0000000000..c863e8f4e7
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/_appengine_ndb.py
@@ -0,0 +1,163 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google App Engine utilities helper.
+
+Classes that directly require App Engine's ndb library. Provided
+as a separate module in case of failure to import ndb while
+other App Engine libraries are present.
+"""
+
+import logging
+
+from google.appengine.ext import ndb
+
+from oauth2client import client
+
+
+NDB_KEY = ndb.Key
+"""Key constant used by :mod:`oauth2client.contrib.appengine`."""
+
+NDB_MODEL = ndb.Model
+"""Model constant used by :mod:`oauth2client.contrib.appengine`."""
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class SiteXsrfSecretKeyNDB(ndb.Model):
+ """NDB Model for storage for the sites XSRF secret key.
+
+ Since this model uses the same kind as SiteXsrfSecretKey, it can be
+ used interchangeably. This simply provides an NDB model for interacting
+ with the same data the DB model interacts with.
+
+ There should only be one instance stored of this model, the one used
+ for the site.
+ """
+ secret = ndb.StringProperty()
+
+ @classmethod
+ def _get_kind(cls):
+ """Return the kind name for this class."""
+ return 'SiteXsrfSecretKey'
+
+
+class FlowNDBProperty(ndb.PickleProperty):
+ """App Engine NDB datastore Property for Flow.
+
+ Serves the same purpose as the DB FlowProperty, but for NDB models.
+ Since PickleProperty inherits from BlobProperty, the underlying
+ representation of the data in the datastore will be the same as in the
+ DB case.
+
+ Utility property that allows easy storage and retrieval of an
+ oauth2client.Flow
+ """
+
+ def _validate(self, value):
+ """Validates a value as a proper Flow object.
+
+ Args:
+ value: A value to be set on the property.
+
+ Raises:
+ TypeError if the value is not an instance of Flow.
+ """
+ _LOGGER.info('validate: Got type %s', type(value))
+ if value is not None and not isinstance(value, client.Flow):
+ raise TypeError(
+ 'Property {0} must be convertible to a flow '
+ 'instance; received: {1}.'.format(self._name, value))
+
+
+class CredentialsNDBProperty(ndb.BlobProperty):
+ """App Engine NDB datastore Property for Credentials.
+
+ Serves the same purpose as the DB CredentialsProperty, but for NDB
+ models. Since CredentialsProperty stores data as a blob and this
+ inherits from BlobProperty, the data in the datastore will be the same
+ as in the DB case.
+
+ Utility property that allows easy storage and retrieval of Credentials
+ and subclasses.
+ """
+
+ def _validate(self, value):
+ """Validates a value as a proper credentials object.
+
+ Args:
+ value: A value to be set on the property.
+
+ Raises:
+ TypeError if the value is not an instance of Credentials.
+ """
+ _LOGGER.info('validate: Got type %s', type(value))
+ if value is not None and not isinstance(value, client.Credentials):
+ raise TypeError(
+ 'Property {0} must be convertible to a credentials '
+ 'instance; received: {1}.'.format(self._name, value))
+
+ def _to_base_type(self, value):
+ """Converts our validated value to a JSON serialized string.
+
+ Args:
+ value: A value to be set in the datastore.
+
+ Returns:
+ A JSON serialized version of the credential, else '' if value
+ is None.
+ """
+ if value is None:
+ return ''
+ else:
+ return value.to_json()
+
+ def _from_base_type(self, value):
+ """Converts our stored JSON string back to the desired type.
+
+ Args:
+ value: A value from the datastore to be converted to the
+ desired type.
+
+ Returns:
+ A deserialized Credentials (or subclass) object, else None if
+ the value can't be parsed.
+ """
+ if not value:
+ return None
+ try:
+ # Uses the from_json method of the implied class of value
+ credentials = client.Credentials.new_from_json(value)
+ except ValueError:
+ credentials = None
+ return credentials
+
+
+class CredentialsNDBModel(ndb.Model):
+ """NDB Model for storage of OAuth 2.0 Credentials
+
+ Since this model uses the same kind as CredentialsModel and has a
+ property which can serialize and deserialize Credentials correctly, it
+ can be used interchangeably with a CredentialsModel to access, insert
+ and delete the same entities. This simply provides an NDB model for
+ interacting with the same data the DB model interacts with.
+
+ Storage of the model is keyed by the user.user_id().
+ """
+ credentials = CredentialsNDBProperty()
+
+ @classmethod
+ def _get_kind(cls):
+ """Return the kind name for this class."""
+ return 'CredentialsModel'
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/_metadata.py b/contrib/python/oauth2client/py3/oauth2client/contrib/_metadata.py
new file mode 100644
index 0000000000..564cd398da
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/_metadata.py
@@ -0,0 +1,118 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provides helper methods for talking to the Compute Engine metadata server.
+
+See https://cloud.google.com/compute/docs/metadata
+"""
+
+import datetime
+import json
+import os
+
+from six.moves import http_client
+from six.moves.urllib import parse as urlparse
+
+from oauth2client import _helpers
+from oauth2client import client
+from oauth2client import transport
+
+
+METADATA_ROOT = 'http://{}/computeMetadata/v1/'.format(
+ os.getenv('GCE_METADATA_ROOT', 'metadata.google.internal'))
+METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
+
+
+def get(http, path, root=METADATA_ROOT, recursive=None):
+ """Fetch a resource from the metadata server.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ path: A string indicating the resource to retrieve. For example,
+ 'instance/service-accounts/default'
+ root: A string indicating the full path to the metadata server root.
+ recursive: A boolean indicating whether to do a recursive query of
+ metadata. See
+ https://cloud.google.com/compute/docs/metadata#aggcontents
+
+ Returns:
+ A dictionary if the metadata server returns JSON, otherwise a string.
+
+ Raises:
+ http_client.HTTPException if an error corrured while
+ retrieving metadata.
+ """
+ url = urlparse.urljoin(root, path)
+ url = _helpers._add_query_parameter(url, 'recursive', recursive)
+
+ response, content = transport.request(
+ http, url, headers=METADATA_HEADERS)
+
+ if response.status == http_client.OK:
+ decoded = _helpers._from_bytes(content)
+ if response['content-type'] == 'application/json':
+ return json.loads(decoded)
+ else:
+ return decoded
+ else:
+ raise http_client.HTTPException(
+ 'Failed to retrieve {0} from the Google Compute Engine'
+ 'metadata service. Response:\n{1}'.format(url, response))
+
+
+def get_service_account_info(http, service_account='default'):
+ """Get information about a service account from the metadata server.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ service_account: An email specifying the service account for which to
+ look up information. Default will be information for the "default"
+ service account of the current compute engine instance.
+
+ Returns:
+ A dictionary with information about the specified service account,
+ for example:
+
+ {
+ 'email': '...',
+ 'scopes': ['scope', ...],
+ 'aliases': ['default', '...']
+ }
+ """
+ return get(
+ http,
+ 'instance/service-accounts/{0}/'.format(service_account),
+ recursive=True)
+
+
+def get_token(http, service_account='default'):
+ """Fetch an oauth token for the
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ service_account: An email specifying the service account this token
+ should represent. Default will be a token for the "default" service
+ account of the current compute engine instance.
+
+ Returns:
+ A tuple of (access token, token expiration), where access token is the
+ access token as a string and token expiration is a datetime object
+ that indicates when the access token will expire.
+ """
+ token_json = get(
+ http,
+ 'instance/service-accounts/{0}/token'.format(service_account))
+ token_expiry = client._UTCNOW() + datetime.timedelta(
+ seconds=token_json['expires_in'])
+ return token_json['access_token'], token_expiry
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/appengine.py b/contrib/python/oauth2client/py3/oauth2client/contrib/appengine.py
new file mode 100644
index 0000000000..c1326eeb57
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/appengine.py
@@ -0,0 +1,910 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for Google App Engine
+
+Utilities for making it easier to use OAuth 2.0 on Google App Engine.
+"""
+
+import cgi
+import json
+import logging
+import os
+import pickle
+import threading
+
+from google.appengine.api import app_identity
+from google.appengine.api import memcache
+from google.appengine.api import users
+from google.appengine.ext import db
+from google.appengine.ext.webapp.util import login_required
+import webapp2 as webapp
+
+import oauth2client
+from oauth2client import _helpers
+from oauth2client import client
+from oauth2client import clientsecrets
+from oauth2client import transport
+from oauth2client.contrib import xsrfutil
+
+# This is a temporary fix for a Google internal issue.
+try:
+ from oauth2client.contrib import _appengine_ndb
+except ImportError: # pragma: NO COVER
+ _appengine_ndb = None
+
+
+logger = logging.getLogger(__name__)
+
+OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
+
+XSRF_MEMCACHE_ID = 'xsrf_secret_key'
+
+if _appengine_ndb is None: # pragma: NO COVER
+ CredentialsNDBModel = None
+ CredentialsNDBProperty = None
+ FlowNDBProperty = None
+ _NDB_KEY = None
+ _NDB_MODEL = None
+ SiteXsrfSecretKeyNDB = None
+else:
+ CredentialsNDBModel = _appengine_ndb.CredentialsNDBModel
+ CredentialsNDBProperty = _appengine_ndb.CredentialsNDBProperty
+ FlowNDBProperty = _appengine_ndb.FlowNDBProperty
+ _NDB_KEY = _appengine_ndb.NDB_KEY
+ _NDB_MODEL = _appengine_ndb.NDB_MODEL
+ SiteXsrfSecretKeyNDB = _appengine_ndb.SiteXsrfSecretKeyNDB
+
+
+def _safe_html(s):
+ """Escape text to make it safe to display.
+
+ Args:
+ s: string, The text to escape.
+
+ Returns:
+ The escaped text as a string.
+ """
+ return cgi.escape(s, quote=1).replace("'", '&#39;')
+
+
+class SiteXsrfSecretKey(db.Model):
+ """Storage for the sites XSRF secret key.
+
+ There will only be one instance stored of this model, the one used for the
+ site.
+ """
+ secret = db.StringProperty()
+
+
+def _generate_new_xsrf_secret_key():
+ """Returns a random XSRF secret key."""
+ return os.urandom(16).encode("hex")
+
+
+def xsrf_secret_key():
+ """Return the secret key for use for XSRF protection.
+
+ If the Site entity does not have a secret key, this method will also create
+ one and persist it.
+
+ Returns:
+ The secret key.
+ """
+ secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
+ if not secret:
+ # Load the one and only instance of SiteXsrfSecretKey.
+ model = SiteXsrfSecretKey.get_or_insert(key_name='site')
+ if not model.secret:
+ model.secret = _generate_new_xsrf_secret_key()
+ model.put()
+ secret = model.secret
+ memcache.add(XSRF_MEMCACHE_ID, secret,
+ namespace=OAUTH2CLIENT_NAMESPACE)
+
+ return str(secret)
+
+
+class AppAssertionCredentials(client.AssertionCredentials):
+ """Credentials object for App Engine Assertion Grants
+
+ This object will allow an App Engine application to identify itself to
+ Google and other OAuth 2.0 servers that can verify assertions. It can be
+ used for the purpose of accessing data stored under an account assigned to
+ the App Engine application itself.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+ """
+
+ @_helpers.positional(2)
+ def __init__(self, scope, **kwargs):
+ """Constructor for AppAssertionCredentials
+
+ Args:
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ **kwargs: optional keyword args, including:
+ service_account_id: service account id of the application. If None
+ or unspecified, the default service account for
+ the app is used.
+ """
+ self.scope = _helpers.scopes_to_string(scope)
+ self._kwargs = kwargs
+ self.service_account_id = kwargs.get('service_account_id', None)
+ self._service_account_email = None
+
+ # Assertion type is no longer used, but still in the
+ # parent class signature.
+ super(AppAssertionCredentials, self).__init__(None)
+
+ @classmethod
+ def from_json(cls, json_data):
+ data = json.loads(json_data)
+ return AppAssertionCredentials(data['scope'])
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Since the underlying App Engine app_identity implementation does its
+ own caching we can skip all the storage hoops and just to a refresh
+ using the API.
+
+ Args:
+ http: unused HTTP object
+
+ Raises:
+ AccessTokenRefreshError: When the refresh fails.
+ """
+ try:
+ scopes = self.scope.split()
+ (token, _) = app_identity.get_access_token(
+ scopes, service_account_id=self.service_account_id)
+ except app_identity.Error as e:
+ raise client.AccessTokenRefreshError(str(e))
+ self.access_token = token
+
+ @property
+ def serialization_data(self):
+ raise NotImplementedError('Cannot serialize credentials '
+ 'for Google App Engine.')
+
+ def create_scoped_required(self):
+ return not self.scope
+
+ def create_scoped(self, scopes):
+ return AppAssertionCredentials(scopes, **self._kwargs)
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ Implements abstract method
+ :meth:`oauth2client.client.AssertionCredentials.sign_blob`.
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Returns:
+ tuple, A pair of the private key ID used to sign the blob and
+ the signed contents.
+ """
+ return app_identity.sign_blob(blob)
+
+ @property
+ def service_account_email(self):
+ """Get the email for the current service account.
+
+ Returns:
+ string, The email associated with the Google App Engine
+ service account.
+ """
+ if self._service_account_email is None:
+ self._service_account_email = (
+ app_identity.get_service_account_name())
+ return self._service_account_email
+
+
+class FlowProperty(db.Property):
+ """App Engine datastore Property for Flow.
+
+ Utility property that allows easy storage and retrieval of an
+ oauth2client.Flow
+ """
+
+ # Tell what the user type is.
+ data_type = client.Flow
+
+ # For writing to datastore.
+ def get_value_for_datastore(self, model_instance):
+ flow = super(FlowProperty, self).get_value_for_datastore(
+ model_instance)
+ return db.Blob(pickle.dumps(flow))
+
+ # For reading from datastore.
+ def make_value_from_datastore(self, value):
+ if value is None:
+ return None
+ return pickle.loads(value)
+
+ def validate(self, value):
+ if value is not None and not isinstance(value, client.Flow):
+ raise db.BadValueError(
+ 'Property {0} must be convertible '
+ 'to a FlowThreeLegged instance ({1})'.format(self.name, value))
+ return super(FlowProperty, self).validate(value)
+
+ def empty(self, value):
+ return not value
+
+
+class CredentialsProperty(db.Property):
+ """App Engine datastore Property for Credentials.
+
+ Utility property that allows easy storage and retrieval of
+ oauth2client.Credentials
+ """
+
+ # Tell what the user type is.
+ data_type = client.Credentials
+
+ # For writing to datastore.
+ def get_value_for_datastore(self, model_instance):
+ logger.info("get: Got type " + str(type(model_instance)))
+ cred = super(CredentialsProperty, self).get_value_for_datastore(
+ model_instance)
+ if cred is None:
+ cred = ''
+ else:
+ cred = cred.to_json()
+ return db.Blob(cred)
+
+ # For reading from datastore.
+ def make_value_from_datastore(self, value):
+ logger.info("make: Got type " + str(type(value)))
+ if value is None:
+ return None
+ if len(value) == 0:
+ return None
+ try:
+ credentials = client.Credentials.new_from_json(value)
+ except ValueError:
+ credentials = None
+ return credentials
+
+ def validate(self, value):
+ value = super(CredentialsProperty, self).validate(value)
+ logger.info("validate: Got type " + str(type(value)))
+ if value is not None and not isinstance(value, client.Credentials):
+ raise db.BadValueError(
+ 'Property {0} must be convertible '
+ 'to a Credentials instance ({1})'.format(self.name, value))
+ return value
+
+
+class StorageByKeyName(client.Storage):
+ """Store and retrieve a credential to and from the App Engine datastore.
+
+ This Storage helper presumes the Credentials have been stored as a
+ CredentialsProperty or CredentialsNDBProperty on a datastore model class,
+ and that entities are stored by key_name.
+ """
+
+ @_helpers.positional(4)
+ def __init__(self, model, key_name, property_name, cache=None, user=None):
+ """Constructor for Storage.
+
+ Args:
+ model: db.Model or ndb.Model, model class
+ key_name: string, key name for the entity that has the credentials
+ property_name: string, name of the property that is a
+ CredentialsProperty or CredentialsNDBProperty.
+ cache: memcache, a write-through cache to put in front of the
+ datastore. If the model you are using is an NDB model, using
+ a cache will be redundant since the model uses an instance
+ cache and memcache for you.
+ user: users.User object, optional. Can be used to grab user ID as a
+ key_name if no key name is specified.
+ """
+ super(StorageByKeyName, self).__init__()
+
+ if key_name is None:
+ if user is None:
+ raise ValueError('StorageByKeyName called with no '
+ 'key name or user.')
+ key_name = user.user_id()
+
+ self._model = model
+ self._key_name = key_name
+ self._property_name = property_name
+ self._cache = cache
+
+ def _is_ndb(self):
+ """Determine whether the model of the instance is an NDB model.
+
+ Returns:
+ Boolean indicating whether or not the model is an NDB or DB model.
+ """
+ # issubclass will fail if one of the arguments is not a class, only
+ # need worry about new-style classes since ndb and db models are
+ # new-style
+ if isinstance(self._model, type):
+ if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL):
+ return True
+ elif issubclass(self._model, db.Model):
+ return False
+
+ raise TypeError(
+ 'Model class not an NDB or DB model: {0}.'.format(self._model))
+
+ def _get_entity(self):
+ """Retrieve entity from datastore.
+
+ Uses a different model method for db or ndb models.
+
+ Returns:
+ Instance of the model corresponding to the current storage object
+ and stored using the key name of the storage object.
+ """
+ if self._is_ndb():
+ return self._model.get_by_id(self._key_name)
+ else:
+ return self._model.get_by_key_name(self._key_name)
+
+ def _delete_entity(self):
+ """Delete entity from datastore.
+
+ Attempts to delete using the key_name stored on the object, whether or
+ not the given key is in the datastore.
+ """
+ if self._is_ndb():
+ _NDB_KEY(self._model, self._key_name).delete()
+ else:
+ entity_key = db.Key.from_path(self._model.kind(), self._key_name)
+ db.delete(entity_key)
+
+ @db.non_transactional(allow_existing=True)
+ def locked_get(self):
+ """Retrieve Credential from datastore.
+
+ Returns:
+ oauth2client.Credentials
+ """
+ credentials = None
+ if self._cache:
+ json = self._cache.get(self._key_name)
+ if json:
+ credentials = client.Credentials.new_from_json(json)
+ if credentials is None:
+ entity = self._get_entity()
+ if entity is not None:
+ credentials = getattr(entity, self._property_name)
+ if self._cache:
+ self._cache.set(self._key_name, credentials.to_json())
+
+ if credentials and hasattr(credentials, 'set_store'):
+ credentials.set_store(self)
+ return credentials
+
+ @db.non_transactional(allow_existing=True)
+ def locked_put(self, credentials):
+ """Write a Credentials to the datastore.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ entity = self._model.get_or_insert(self._key_name)
+ setattr(entity, self._property_name, credentials)
+ entity.put()
+ if self._cache:
+ self._cache.set(self._key_name, credentials.to_json())
+
+ @db.non_transactional(allow_existing=True)
+ def locked_delete(self):
+ """Delete Credential from datastore."""
+
+ if self._cache:
+ self._cache.delete(self._key_name)
+
+ self._delete_entity()
+
+
+class CredentialsModel(db.Model):
+ """Storage for OAuth 2.0 Credentials
+
+ Storage of the model is keyed by the user.user_id().
+ """
+ credentials = CredentialsProperty()
+
+
+def _build_state_value(request_handler, user):
+ """Composes the value for the 'state' parameter.
+
+ Packs the current request URI and an XSRF token into an opaque string that
+ can be passed to the authentication server via the 'state' parameter.
+
+ Args:
+ request_handler: webapp.RequestHandler, The request.
+ user: google.appengine.api.users.User, The current user.
+
+ Returns:
+ The state value as a string.
+ """
+ uri = request_handler.request.url
+ token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
+ action_id=str(uri))
+ return uri + ':' + token
+
+
+def _parse_state_value(state, user):
+ """Parse the value of the 'state' parameter.
+
+ Parses the value and validates the XSRF token in the state parameter.
+
+ Args:
+ state: string, The value of the state parameter.
+ user: google.appengine.api.users.User, The current user.
+
+ Returns:
+ The redirect URI, or None if XSRF token is not valid.
+ """
+ uri, token = state.rsplit(':', 1)
+ if xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
+ action_id=uri):
+ return uri
+ else:
+ return None
+
+
+class OAuth2Decorator(object):
+ """Utility for making OAuth 2.0 easier.
+
+ Instantiate and then use with oauth_required or oauth_aware
+ as decorators on webapp.RequestHandler methods.
+
+ ::
+
+ decorator = OAuth2Decorator(
+ client_id='837...ent.com',
+ client_secret='Qh...wwI',
+ scope='https://www.googleapis.com/auth/plus')
+
+ class MainHandler(webapp.RequestHandler):
+ @decorator.oauth_required
+ def get(self):
+ http = decorator.http()
+ # http is authorized with the user's Credentials and can be
+ # used in API calls
+
+ """
+
+ def set_credentials(self, credentials):
+ self._tls.credentials = credentials
+
+ def get_credentials(self):
+ """A thread local Credentials object.
+
+ Returns:
+ A client.Credentials object, or None if credentials hasn't been set
+ in this thread yet, which may happen when calling has_credentials
+ inside oauth_aware.
+ """
+ return getattr(self._tls, 'credentials', None)
+
+ credentials = property(get_credentials, set_credentials)
+
+ def set_flow(self, flow):
+ self._tls.flow = flow
+
+ def get_flow(self):
+ """A thread local Flow object.
+
+ Returns:
+ A credentials.Flow object, or None if the flow hasn't been set in
+ this thread yet, which happens in _create_flow() since Flows are
+ created lazily.
+ """
+ return getattr(self._tls, 'flow', None)
+
+ flow = property(get_flow, set_flow)
+
+ @_helpers.positional(4)
+ def __init__(self, client_id, client_secret, scope,
+ auth_uri=oauth2client.GOOGLE_AUTH_URI,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ user_agent=None,
+ message=None,
+ callback_path='/oauth2callback',
+ token_response_param=None,
+ _storage_class=StorageByKeyName,
+ _credentials_class=CredentialsModel,
+ _credentials_property_name='credentials',
+ **kwargs):
+ """Constructor for OAuth2Decorator
+
+ Args:
+ client_id: string, client identifier.
+ client_secret: string client secret.
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ auth_uri: string, URI for authorization endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0 provider
+ can be used.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+ user_agent: string, User agent of your application, default to
+ None.
+ message: Message to display if there are problems with the
+ OAuth 2.0 configuration. The message may contain HTML and
+ will be presented on the web interface for any method that
+ uses the decorator.
+ callback_path: string, The absolute path to use as the callback
+ URI. Note that this must match up with the URI given
+ when registering the application in the APIs
+ Console.
+ token_response_param: string. If provided, the full JSON response
+ to the access token request will be encoded
+ and included in this query parameter in the
+ callback URI. This is useful with providers
+ (e.g. wordpress.com) that include extra
+ fields that the client may want.
+ _storage_class: "Protected" keyword argument not typically provided
+ to this constructor. A storage class to aid in
+ storing a Credentials object for a user in the
+ datastore. Defaults to StorageByKeyName.
+ _credentials_class: "Protected" keyword argument not typically
+ provided to this constructor. A db or ndb Model
+ class to hold credentials. Defaults to
+ CredentialsModel.
+ _credentials_property_name: "Protected" keyword argument not
+ typically provided to this constructor.
+ A string indicating the name of the
+ field on the _credentials_class where a
+ Credentials object will be stored.
+ Defaults to 'credentials'.
+ **kwargs: dict, Keyword arguments are passed along as kwargs to
+ the OAuth2WebServerFlow constructor.
+ """
+ self._tls = threading.local()
+ self.flow = None
+ self.credentials = None
+ self._client_id = client_id
+ self._client_secret = client_secret
+ self._scope = _helpers.scopes_to_string(scope)
+ self._auth_uri = auth_uri
+ self._token_uri = token_uri
+ self._revoke_uri = revoke_uri
+ self._user_agent = user_agent
+ self._kwargs = kwargs
+ self._message = message
+ self._in_error = False
+ self._callback_path = callback_path
+ self._token_response_param = token_response_param
+ self._storage_class = _storage_class
+ self._credentials_class = _credentials_class
+ self._credentials_property_name = _credentials_property_name
+
+ def _display_error_message(self, request_handler):
+ request_handler.response.out.write('<html><body>')
+ request_handler.response.out.write(_safe_html(self._message))
+ request_handler.response.out.write('</body></html>')
+
+ def oauth_required(self, method):
+ """Decorator that starts the OAuth 2.0 dance.
+
+ Starts the OAuth dance for the logged in user if they haven't already
+ granted access for this application.
+
+ Args:
+ method: callable, to be decorated method of a webapp.RequestHandler
+ instance.
+ """
+
+ def check_oauth(request_handler, *args, **kwargs):
+ if self._in_error:
+ self._display_error_message(request_handler)
+ return
+
+ user = users.get_current_user()
+ # Don't use @login_decorator as this could be used in a
+ # POST request.
+ if not user:
+ request_handler.redirect(users.create_login_url(
+ request_handler.request.uri))
+ return
+
+ self._create_flow(request_handler)
+
+ # Store the request URI in 'state' so we can use it later
+ self.flow.params['state'] = _build_state_value(
+ request_handler, user)
+ self.credentials = self._storage_class(
+ self._credentials_class, None,
+ self._credentials_property_name, user=user).get()
+
+ if not self.has_credentials():
+ return request_handler.redirect(self.authorize_url())
+ try:
+ resp = method(request_handler, *args, **kwargs)
+ except client.AccessTokenRefreshError:
+ return request_handler.redirect(self.authorize_url())
+ finally:
+ self.credentials = None
+ return resp
+
+ return check_oauth
+
+ def _create_flow(self, request_handler):
+ """Create the Flow object.
+
+ The Flow is calculated lazily since we don't know where this app is
+ running until it receives a request, at which point redirect_uri can be
+ calculated and then the Flow object can be constructed.
+
+ Args:
+ request_handler: webapp.RequestHandler, the request handler.
+ """
+ if self.flow is None:
+ redirect_uri = request_handler.request.relative_url(
+ self._callback_path) # Usually /oauth2callback
+ self.flow = client.OAuth2WebServerFlow(
+ self._client_id, self._client_secret, self._scope,
+ redirect_uri=redirect_uri, user_agent=self._user_agent,
+ auth_uri=self._auth_uri, token_uri=self._token_uri,
+ revoke_uri=self._revoke_uri, **self._kwargs)
+
+ def oauth_aware(self, method):
+ """Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
+
+ Does all the setup for the OAuth dance, but doesn't initiate it.
+ This decorator is useful if you want to create a page that knows
+ whether or not the user has granted access to this application.
+ From within a method decorated with @oauth_aware the has_credentials()
+ and authorize_url() methods can be called.
+
+ Args:
+ method: callable, to be decorated method of a webapp.RequestHandler
+ instance.
+ """
+
+ def setup_oauth(request_handler, *args, **kwargs):
+ if self._in_error:
+ self._display_error_message(request_handler)
+ return
+
+ user = users.get_current_user()
+ # Don't use @login_decorator as this could be used in a
+ # POST request.
+ if not user:
+ request_handler.redirect(users.create_login_url(
+ request_handler.request.uri))
+ return
+
+ self._create_flow(request_handler)
+
+ self.flow.params['state'] = _build_state_value(request_handler,
+ user)
+ self.credentials = self._storage_class(
+ self._credentials_class, None,
+ self._credentials_property_name, user=user).get()
+ try:
+ resp = method(request_handler, *args, **kwargs)
+ finally:
+ self.credentials = None
+ return resp
+ return setup_oauth
+
+ def has_credentials(self):
+ """True if for the logged in user there are valid access Credentials.
+
+ Must only be called from with a webapp.RequestHandler subclassed method
+ that had been decorated with either @oauth_required or @oauth_aware.
+ """
+ return self.credentials is not None and not self.credentials.invalid
+
+ def authorize_url(self):
+ """Returns the URL to start the OAuth dance.
+
+ Must only be called from with a webapp.RequestHandler subclassed method
+ that had been decorated with either @oauth_required or @oauth_aware.
+ """
+ url = self.flow.step1_get_authorize_url()
+ return str(url)
+
+ def http(self, *args, **kwargs):
+ """Returns an authorized http instance.
+
+ Must only be called from within an @oauth_required decorated method, or
+ from within an @oauth_aware decorated method where has_credentials()
+ returns True.
+
+ Args:
+ *args: Positional arguments passed to httplib2.Http constructor.
+ **kwargs: Positional arguments passed to httplib2.Http constructor.
+ """
+ return self.credentials.authorize(
+ transport.get_http_object(*args, **kwargs))
+
+ @property
+ def callback_path(self):
+ """The absolute path where the callback will occur.
+
+ Note this is the absolute path, not the absolute URI, that will be
+ calculated by the decorator at runtime. See callback_handler() for how
+ this should be used.
+
+ Returns:
+ The callback path as a string.
+ """
+ return self._callback_path
+
+ def callback_handler(self):
+ """RequestHandler for the OAuth 2.0 redirect callback.
+
+ Usage::
+
+ app = webapp.WSGIApplication([
+ ('/index', MyIndexHandler),
+ ...,
+ (decorator.callback_path, decorator.callback_handler())
+ ])
+
+ Returns:
+ A webapp.RequestHandler that handles the redirect back from the
+ server during the OAuth 2.0 dance.
+ """
+ decorator = self
+
+ class OAuth2Handler(webapp.RequestHandler):
+ """Handler for the redirect_uri of the OAuth 2.0 dance."""
+
+ @login_required
+ def get(self):
+ error = self.request.get('error')
+ if error:
+ errormsg = self.request.get('error_description', error)
+ self.response.out.write(
+ 'The authorization request failed: {0}'.format(
+ _safe_html(errormsg)))
+ else:
+ user = users.get_current_user()
+ decorator._create_flow(self)
+ credentials = decorator.flow.step2_exchange(
+ self.request.params)
+ decorator._storage_class(
+ decorator._credentials_class, None,
+ decorator._credentials_property_name,
+ user=user).put(credentials)
+ redirect_uri = _parse_state_value(
+ str(self.request.get('state')), user)
+ if redirect_uri is None:
+ self.response.out.write(
+ 'The authorization request failed')
+ return
+
+ if (decorator._token_response_param and
+ credentials.token_response):
+ resp_json = json.dumps(credentials.token_response)
+ redirect_uri = _helpers._add_query_parameter(
+ redirect_uri, decorator._token_response_param,
+ resp_json)
+
+ self.redirect(redirect_uri)
+
+ return OAuth2Handler
+
+ def callback_application(self):
+ """WSGI application for handling the OAuth 2.0 redirect callback.
+
+ If you need finer grained control use `callback_handler` which returns
+ just the webapp.RequestHandler.
+
+ Returns:
+ A webapp.WSGIApplication that handles the redirect back from the
+ server during the OAuth 2.0 dance.
+ """
+ return webapp.WSGIApplication([
+ (self.callback_path, self.callback_handler())
+ ])
+
+
+class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
+ """An OAuth2Decorator that builds from a clientsecrets file.
+
+ Uses a clientsecrets file as the source for all the information when
+ constructing an OAuth2Decorator.
+
+ ::
+
+ decorator = OAuth2DecoratorFromClientSecrets(
+ os.path.join(os.path.dirname(__file__), 'client_secrets.json')
+ scope='https://www.googleapis.com/auth/plus')
+
+ class MainHandler(webapp.RequestHandler):
+ @decorator.oauth_required
+ def get(self):
+ http = decorator.http()
+ # http is authorized with the user's Credentials and can be
+ # used in API calls
+
+ """
+
+ @_helpers.positional(3)
+ def __init__(self, filename, scope, message=None, cache=None, **kwargs):
+ """Constructor
+
+ Args:
+ filename: string, File name of client secrets.
+ scope: string or iterable of strings, scope(s) of the credentials
+ being requested.
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. The message may
+ contain HTML and will be presented on the web interface
+ for any method that uses the decorator.
+ cache: An optional cache service client that implements get() and
+ set()
+ methods. See clientsecrets.loadfile() for details.
+ **kwargs: dict, Keyword arguments are passed along as kwargs to
+ the OAuth2WebServerFlow constructor.
+ """
+ client_type, client_info = clientsecrets.loadfile(filename,
+ cache=cache)
+ if client_type not in (clientsecrets.TYPE_WEB,
+ clientsecrets.TYPE_INSTALLED):
+ raise clientsecrets.InvalidClientSecretsError(
+ "OAuth2Decorator doesn't support this OAuth 2.0 flow.")
+
+ constructor_kwargs = dict(kwargs)
+ constructor_kwargs.update({
+ 'auth_uri': client_info['auth_uri'],
+ 'token_uri': client_info['token_uri'],
+ 'message': message,
+ })
+ revoke_uri = client_info.get('revoke_uri')
+ if revoke_uri is not None:
+ constructor_kwargs['revoke_uri'] = revoke_uri
+ super(OAuth2DecoratorFromClientSecrets, self).__init__(
+ client_info['client_id'], client_info['client_secret'],
+ scope, **constructor_kwargs)
+ if message is not None:
+ self._message = message
+ else:
+ self._message = 'Please configure your application for OAuth 2.0.'
+
+
+@_helpers.positional(2)
+def oauth2decorator_from_clientsecrets(filename, scope,
+ message=None, cache=None):
+ """Creates an OAuth2Decorator populated from a clientsecrets file.
+
+ Args:
+ filename: string, File name of client secrets.
+ scope: string or list of strings, scope(s) of the credentials being
+ requested.
+ message: string, A friendly string to display to the user if the
+ clientsecrets file is missing or invalid. The message may
+ contain HTML and will be presented on the web interface for
+ any method that uses the decorator.
+ cache: An optional cache service client that implements get() and set()
+ methods. See clientsecrets.loadfile() for details.
+
+ Returns: An OAuth2Decorator
+ """
+ return OAuth2DecoratorFromClientSecrets(filename, scope,
+ message=message, cache=cache)
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/devshell.py b/contrib/python/oauth2client/py3/oauth2client/contrib/devshell.py
new file mode 100644
index 0000000000..691765f097
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/devshell.py
@@ -0,0 +1,152 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 utitilies for Google Developer Shell environment."""
+
+import datetime
+import json
+import os
+import socket
+
+from oauth2client import _helpers
+from oauth2client import client
+
+DEVSHELL_ENV = 'DEVSHELL_CLIENT_PORT'
+
+
+class Error(Exception):
+ """Errors for this module."""
+ pass
+
+
+class CommunicationError(Error):
+ """Errors for communication with the Developer Shell server."""
+
+
+class NoDevshellServer(Error):
+ """Error when no Developer Shell server can be contacted."""
+
+
+# The request for credential information to the Developer Shell client socket
+# is always an empty PBLite-formatted JSON object, so just define it as a
+# constant.
+CREDENTIAL_INFO_REQUEST_JSON = '[]'
+
+
+class CredentialInfoResponse(object):
+ """Credential information response from Developer Shell server.
+
+ The credential information response from Developer Shell socket is a
+ PBLite-formatted JSON array with fields encoded by their index in the
+ array:
+
+ * Index 0 - user email
+ * Index 1 - default project ID. None if the project context is not known.
+ * Index 2 - OAuth2 access token. None if there is no valid auth context.
+ * Index 3 - Seconds until the access token expires. None if not present.
+ """
+
+ def __init__(self, json_string):
+ """Initialize the response data from JSON PBLite array."""
+ pbl = json.loads(json_string)
+ if not isinstance(pbl, list):
+ raise ValueError('Not a list: ' + str(pbl))
+ pbl_len = len(pbl)
+ self.user_email = pbl[0] if pbl_len > 0 else None
+ self.project_id = pbl[1] if pbl_len > 1 else None
+ self.access_token = pbl[2] if pbl_len > 2 else None
+ self.expires_in = pbl[3] if pbl_len > 3 else None
+
+
+def _SendRecv():
+ """Communicate with the Developer Shell server socket."""
+
+ port = int(os.getenv(DEVSHELL_ENV, 0))
+ if port == 0:
+ raise NoDevshellServer()
+
+ sock = socket.socket()
+ sock.connect(('localhost', port))
+
+ data = CREDENTIAL_INFO_REQUEST_JSON
+ msg = '{0}\n{1}'.format(len(data), data)
+ sock.sendall(_helpers._to_bytes(msg, encoding='utf-8'))
+
+ header = sock.recv(6).decode()
+ if '\n' not in header:
+ raise CommunicationError('saw no newline in the first 6 bytes')
+ len_str, json_str = header.split('\n', 1)
+ to_read = int(len_str) - len(json_str)
+ if to_read > 0:
+ json_str += sock.recv(to_read, socket.MSG_WAITALL).decode()
+
+ return CredentialInfoResponse(json_str)
+
+
+class DevshellCredentials(client.GoogleCredentials):
+ """Credentials object for Google Developer Shell environment.
+
+ This object will allow a Google Developer Shell session to identify its
+ user to Google and other OAuth 2.0 servers that can verify assertions. It
+ can be used for the purpose of accessing data stored under the user
+ account.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+ """
+
+ def __init__(self, user_agent=None):
+ super(DevshellCredentials, self).__init__(
+ None, # access_token, initialized below
+ None, # client_id
+ None, # client_secret
+ None, # refresh_token
+ None, # token_expiry
+ None, # token_uri
+ user_agent)
+ self._refresh(None)
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Args:
+ http: unused HTTP object
+ """
+ self.devshell_response = _SendRecv()
+ self.access_token = self.devshell_response.access_token
+ expires_in = self.devshell_response.expires_in
+ if expires_in is not None:
+ delta = datetime.timedelta(seconds=expires_in)
+ self.token_expiry = client._UTCNOW() + delta
+ else:
+ self.token_expiry = None
+
+ @property
+ def user_email(self):
+ return self.devshell_response.user_email
+
+ @property
+ def project_id(self):
+ return self.devshell_response.project_id
+
+ @classmethod
+ def from_json(cls, json_data):
+ raise NotImplementedError(
+ 'Cannot load Developer Shell credentials from JSON.')
+
+ @property
+ def serialization_data(self):
+ raise NotImplementedError(
+ 'Cannot serialize Developer Shell credentials.')
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/dictionary_storage.py b/contrib/python/oauth2client/py3/oauth2client/contrib/dictionary_storage.py
new file mode 100644
index 0000000000..6ee333fa7c
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/dictionary_storage.py
@@ -0,0 +1,65 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Dictionary storage for OAuth2 Credentials."""
+
+from oauth2client import client
+
+
+class DictionaryStorage(client.Storage):
+ """Store and retrieve credentials to and from a dictionary-like object.
+
+ Args:
+ dictionary: A dictionary or dictionary-like object.
+ key: A string or other hashable. The credentials will be stored in
+ ``dictionary[key]``.
+ lock: An optional threading.Lock-like object. The lock will be
+ acquired before anything is written or read from the
+ dictionary.
+ """
+
+ def __init__(self, dictionary, key, lock=None):
+ """Construct a DictionaryStorage instance."""
+ super(DictionaryStorage, self).__init__(lock=lock)
+ self._dictionary = dictionary
+ self._key = key
+
+ def locked_get(self):
+ """Retrieve the credentials from the dictionary, if they exist.
+
+ Returns: A :class:`oauth2client.client.OAuth2Credentials` instance.
+ """
+ serialized = self._dictionary.get(self._key)
+
+ if serialized is None:
+ return None
+
+ credentials = client.OAuth2Credentials.from_json(serialized)
+ credentials.set_store(self)
+
+ return credentials
+
+ def locked_put(self, credentials):
+ """Save the credentials to the dictionary.
+
+ Args:
+ credentials: A :class:`oauth2client.client.OAuth2Credentials`
+ instance.
+ """
+ serialized = credentials.to_json()
+ self._dictionary[self._key] = serialized
+
+ def locked_delete(self):
+ """Remove the credentials from the dictionary, if they exist."""
+ self._dictionary.pop(self._key, None)
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/__init__.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/__init__.py
new file mode 100644
index 0000000000..644a8f9fb7
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/__init__.py
@@ -0,0 +1,489 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for the Django web framework.
+
+Provides Django views and helpers the make using the OAuth2 web server
+flow easier. It includes an ``oauth_required`` decorator to automatically
+ensure that user credentials are available, and an ``oauth_enabled`` decorator
+to check if the user has authorized, and helper shortcuts to create the
+authorization URL otherwise.
+
+There are two basic use cases supported. The first is using Google OAuth as the
+primary form of authentication, which is the simpler approach recommended
+for applications without their own user system.
+
+The second use case is adding Google OAuth credentials to an
+existing Django model containing a Django user field. Most of the
+configuration is the same, except for `GOOGLE_OAUTH_MODEL_STORAGE` in
+settings.py. See "Adding Credentials To An Existing Django User System" for
+usage differences.
+
+Only Django versions 1.8+ are supported.
+
+Configuration
+===============
+
+To configure, you'll need a set of OAuth2 web application credentials from
+`Google Developer's Console <https://console.developers.google.com/project/_/apiui/credential>`.
+
+Add the helper to your INSTALLED_APPS:
+
+.. code-block:: python
+ :caption: settings.py
+ :name: installed_apps
+
+ INSTALLED_APPS = (
+ # other apps
+ "django.contrib.sessions.middleware"
+ "oauth2client.contrib.django_util"
+ )
+
+This helper also requires the Django Session Middleware, so
+``django.contrib.sessions.middleware`` should be in INSTALLED_APPS as well.
+MIDDLEWARE or MIDDLEWARE_CLASSES (in Django versions <1.10) should also
+contain the string 'django.contrib.sessions.middleware.SessionMiddleware'.
+
+
+Add the client secrets created earlier to the settings. You can either
+specify the path to the credentials file in JSON format
+
+.. code-block:: python
+ :caption: settings.py
+ :name: secrets_file
+
+ GOOGLE_OAUTH2_CLIENT_SECRETS_JSON=/path/to/client-secret.json
+
+Or, directly configure the client Id and client secret.
+
+
+.. code-block:: python
+ :caption: settings.py
+ :name: secrets_config
+
+ GOOGLE_OAUTH2_CLIENT_ID=client-id-field
+ GOOGLE_OAUTH2_CLIENT_SECRET=client-secret-field
+
+By default, the default scopes for the required decorator only contains the
+``email`` scopes. You can change that default in the settings.
+
+.. code-block:: python
+ :caption: settings.py
+ :name: scopes
+
+ GOOGLE_OAUTH2_SCOPES = ('email', 'https://www.googleapis.com/auth/calendar',)
+
+By default, the decorators will add an `oauth` object to the Django request
+object, and include all of its state and helpers inside that object. If the
+`oauth` name conflicts with another usage, it can be changed
+
+.. code-block:: python
+ :caption: settings.py
+ :name: request_prefix
+
+ # changes request.oauth to request.google_oauth
+ GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'google_oauth'
+
+Add the oauth2 routes to your application's urls.py urlpatterns.
+
+.. code-block:: python
+ :caption: urls.py
+ :name: urls
+
+ from oauth2client.contrib.django_util.site import urls as oauth2_urls
+
+ urlpatterns += [url(r'^oauth2/', include(oauth2_urls))]
+
+To require OAuth2 credentials for a view, use the `oauth2_required` decorator.
+This creates a credentials object with an id_token, and allows you to create
+an `http` object to build service clients with. These are all attached to the
+request.oauth
+
+.. code-block:: python
+ :caption: views.py
+ :name: views_required
+
+ from oauth2client.contrib.django_util.decorators import oauth_required
+
+ @oauth_required
+ def requires_default_scopes(request):
+ email = request.oauth.credentials.id_token['email']
+ service = build(serviceName='calendar', version='v3',
+ http=request.oauth.http,
+ developerKey=API_KEY)
+ events = service.events().list(calendarId='primary').execute()['items']
+ return HttpResponse("email: {0} , calendar: {1}".format(
+ email,str(events)))
+ return HttpResponse(
+ "email: {0} , calendar: {1}".format(email, str(events)))
+
+To make OAuth2 optional and provide an authorization link in your own views.
+
+.. code-block:: python
+ :caption: views.py
+ :name: views_enabled2
+
+ from oauth2client.contrib.django_util.decorators import oauth_enabled
+
+ @oauth_enabled
+ def optional_oauth2(request):
+ if request.oauth.has_credentials():
+ # this could be passed into a view
+ # request.oauth.http is also initialized
+ return HttpResponse("User email: {0}".format(
+ request.oauth.credentials.id_token['email']))
+ else:
+ return HttpResponse(
+ 'Here is an OAuth Authorize link: <a href="{0}">Authorize'
+ '</a>'.format(request.oauth.get_authorize_redirect()))
+
+If a view needs a scope not included in the default scopes specified in
+the settings, you can use [incremental auth](https://developers.google.com/identity/sign-in/web/incremental-auth)
+and specify additional scopes in the decorator arguments.
+
+.. code-block:: python
+ :caption: views.py
+ :name: views_required_additional_scopes
+
+ @oauth_enabled(scopes=['https://www.googleapis.com/auth/drive'])
+ def drive_required(request):
+ if request.oauth.has_credentials():
+ service = build(serviceName='drive', version='v2',
+ http=request.oauth.http,
+ developerKey=API_KEY)
+ events = service.files().list().execute()['items']
+ return HttpResponse(str(events))
+ else:
+ return HttpResponse(
+ 'Here is an OAuth Authorize link: <a href="{0}">Authorize'
+ '</a>'.format(request.oauth.get_authorize_redirect()))
+
+
+To provide a callback on authorization being completed, use the
+oauth2_authorized signal:
+
+.. code-block:: python
+ :caption: views.py
+ :name: signals
+
+ from oauth2client.contrib.django_util.signals import oauth2_authorized
+
+ def test_callback(sender, request, credentials, **kwargs):
+ print("Authorization Signal Received {0}".format(
+ credentials.id_token['email']))
+
+ oauth2_authorized.connect(test_callback)
+
+Adding Credentials To An Existing Django User System
+=====================================================
+
+As an alternative to storing the credentials in the session, the helper
+can be configured to store the fields on a Django model. This might be useful
+if you need to use the credentials outside the context of a user request. It
+also prevents the need for a logged in user to repeat the OAuth flow when
+starting a new session.
+
+To use, change ``settings.py``
+
+.. code-block:: python
+ :caption: settings.py
+ :name: storage_model_config
+
+ GOOGLE_OAUTH2_STORAGE_MODEL = {
+ 'model': 'path.to.model.MyModel',
+ 'user_property': 'user_id',
+ 'credentials_property': 'credential'
+ }
+
+Where ``path.to.model`` class is the fully qualified name of a
+``django.db.model`` class containing a ``django.contrib.auth.models.User``
+field with the name specified by `user_property` and a
+:class:`oauth2client.contrib.django_util.models.CredentialsField` with the name
+specified by `credentials_property`. For the sample configuration given,
+our model would look like
+
+.. code-block:: python
+ :caption: models.py
+ :name: storage_model_model
+
+ from django.contrib.auth.models import User
+ from oauth2client.contrib.django_util.models import CredentialsField
+
+ class MyModel(models.Model):
+ # ... other fields here ...
+ user = models.OneToOneField(User)
+ credential = CredentialsField()
+"""
+
+import importlib
+
+import django.conf
+from django.core import exceptions
+from django.core import urlresolvers
+from six.moves.urllib import parse
+
+from oauth2client import clientsecrets
+from oauth2client import transport
+from oauth2client.contrib import dictionary_storage
+from oauth2client.contrib.django_util import storage
+
+GOOGLE_OAUTH2_DEFAULT_SCOPES = ('email',)
+GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'oauth'
+
+
+def _load_client_secrets(filename):
+ """Loads client secrets from the given filename.
+
+ Args:
+ filename: The name of the file containing the JSON secret key.
+
+ Returns:
+ A 2-tuple, the first item containing the client id, and the second
+ item containing a client secret.
+ """
+ client_type, client_info = clientsecrets.loadfile(filename)
+
+ if client_type != clientsecrets.TYPE_WEB:
+ raise ValueError(
+ 'The flow specified in {} is not supported, only the WEB flow '
+ 'type is supported.'.format(client_type))
+ return client_info['client_id'], client_info['client_secret']
+
+
+def _get_oauth2_client_id_and_secret(settings_instance):
+ """Initializes client id and client secret based on the settings.
+
+ Args:
+ settings_instance: An instance of ``django.conf.settings``.
+
+ Returns:
+ A 2-tuple, the first item is the client id and the second
+ item is the client secret.
+ """
+ secret_json = getattr(settings_instance,
+ 'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)
+ if secret_json is not None:
+ return _load_client_secrets(secret_json)
+ else:
+ client_id = getattr(settings_instance, "GOOGLE_OAUTH2_CLIENT_ID",
+ None)
+ client_secret = getattr(settings_instance,
+ "GOOGLE_OAUTH2_CLIENT_SECRET", None)
+ if client_id is not None and client_secret is not None:
+ return client_id, client_secret
+ else:
+ raise exceptions.ImproperlyConfigured(
+ "Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or "
+ "both GOOGLE_OAUTH2_CLIENT_ID and "
+ "GOOGLE_OAUTH2_CLIENT_SECRET in settings.py")
+
+
+def _get_storage_model():
+ """This configures whether the credentials will be stored in the session
+ or the Django ORM based on the settings. By default, the credentials
+ will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL`
+ is found in the settings. Usually, the ORM storage is used to integrate
+ credentials into an existing Django user system.
+
+ Returns:
+ A tuple containing three strings, or None. If
+ ``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple
+ will contain the fully qualifed path of the `django.db.model`,
+ the name of the ``django.contrib.auth.models.User`` field on the
+ model, and the name of the
+ :class:`oauth2client.contrib.django_util.models.CredentialsField`
+ field on the model. If Django ORM storage is not configured,
+ this function returns None.
+ """
+ storage_model_settings = getattr(django.conf.settings,
+ 'GOOGLE_OAUTH2_STORAGE_MODEL', None)
+ if storage_model_settings is not None:
+ return (storage_model_settings['model'],
+ storage_model_settings['user_property'],
+ storage_model_settings['credentials_property'])
+ else:
+ return None, None, None
+
+
+class OAuth2Settings(object):
+ """Initializes Django OAuth2 Helper Settings
+
+ This class loads the OAuth2 Settings from the Django settings, and then
+ provides those settings as attributes to the rest of the views and
+ decorators in the module.
+
+ Attributes:
+ scopes: A list of OAuth2 scopes that the decorators and views will use
+ as defaults.
+ request_prefix: The name of the attribute that the decorators use to
+ attach the UserOAuth2 object to the Django request object.
+ client_id: The OAuth2 Client ID.
+ client_secret: The OAuth2 Client Secret.
+ """
+
+ def __init__(self, settings_instance):
+ self.scopes = getattr(settings_instance, 'GOOGLE_OAUTH2_SCOPES',
+ GOOGLE_OAUTH2_DEFAULT_SCOPES)
+ self.request_prefix = getattr(settings_instance,
+ 'GOOGLE_OAUTH2_REQUEST_ATTRIBUTE',
+ GOOGLE_OAUTH2_REQUEST_ATTRIBUTE)
+ info = _get_oauth2_client_id_and_secret(settings_instance)
+ self.client_id, self.client_secret = info
+
+ # Django 1.10 deprecated MIDDLEWARE_CLASSES in favor of MIDDLEWARE
+ middleware_settings = getattr(settings_instance, 'MIDDLEWARE', None)
+ if middleware_settings is None:
+ middleware_settings = getattr(
+ settings_instance, 'MIDDLEWARE_CLASSES', None)
+ if middleware_settings is None:
+ raise exceptions.ImproperlyConfigured(
+ 'Django settings has neither MIDDLEWARE nor MIDDLEWARE_CLASSES'
+ 'configured')
+
+ if ('django.contrib.sessions.middleware.SessionMiddleware' not in
+ middleware_settings):
+ raise exceptions.ImproperlyConfigured(
+ 'The Google OAuth2 Helper requires session middleware to '
+ 'be installed. Edit your MIDDLEWARE_CLASSES or MIDDLEWARE '
+ 'setting to include \'django.contrib.sessions.middleware.'
+ 'SessionMiddleware\'.')
+ (self.storage_model, self.storage_model_user_property,
+ self.storage_model_credentials_property) = _get_storage_model()
+
+
+oauth2_settings = OAuth2Settings(django.conf.settings)
+
+_CREDENTIALS_KEY = 'google_oauth2_credentials'
+
+
+def get_storage(request):
+ """ Gets a Credentials storage object provided by the Django OAuth2 Helper
+ object.
+
+ Args:
+ request: Reference to the current request object.
+
+ Returns:
+ An :class:`oauth2.client.Storage` object.
+ """
+ storage_model = oauth2_settings.storage_model
+ user_property = oauth2_settings.storage_model_user_property
+ credentials_property = oauth2_settings.storage_model_credentials_property
+
+ if storage_model:
+ module_name, class_name = storage_model.rsplit('.', 1)
+ module = importlib.import_module(module_name)
+ storage_model_class = getattr(module, class_name)
+ return storage.DjangoORMStorage(storage_model_class,
+ user_property,
+ request.user,
+ credentials_property)
+ else:
+ # use session
+ return dictionary_storage.DictionaryStorage(
+ request.session, key=_CREDENTIALS_KEY)
+
+
+def _redirect_with_params(url_name, *args, **kwargs):
+ """Helper method to create a redirect response with URL params.
+
+ This builds a redirect string that converts kwargs into a
+ query string.
+
+ Args:
+ url_name: The name of the url to redirect to.
+ kwargs: the query string param and their values to build.
+
+ Returns:
+ A properly formatted redirect string.
+ """
+ url = urlresolvers.reverse(url_name, args=args)
+ params = parse.urlencode(kwargs, True)
+ return "{0}?{1}".format(url, params)
+
+
+def _credentials_from_request(request):
+ """Gets the authorized credentials for this flow, if they exist."""
+ # ORM storage requires a logged in user
+ if (oauth2_settings.storage_model is None or
+ request.user.is_authenticated()):
+ return get_storage(request).get()
+ else:
+ return None
+
+
+class UserOAuth2(object):
+ """Class to create oauth2 objects on Django request objects containing
+ credentials and helper methods.
+ """
+
+ def __init__(self, request, scopes=None, return_url=None):
+ """Initialize the Oauth2 Object.
+
+ Args:
+ request: Django request object.
+ scopes: Scopes desired for this OAuth2 flow.
+ return_url: The url to return to after the OAuth flow is complete,
+ defaults to the request's current URL path.
+ """
+ self.request = request
+ self.return_url = return_url or request.get_full_path()
+ if scopes:
+ self._scopes = set(oauth2_settings.scopes) | set(scopes)
+ else:
+ self._scopes = set(oauth2_settings.scopes)
+
+ def get_authorize_redirect(self):
+ """Creates a URl to start the OAuth2 authorization flow."""
+ get_params = {
+ 'return_url': self.return_url,
+ 'scopes': self._get_scopes()
+ }
+
+ return _redirect_with_params('google_oauth:authorize', **get_params)
+
+ def has_credentials(self):
+ """Returns True if there are valid credentials for the current user
+ and required scopes."""
+ credentials = _credentials_from_request(self.request)
+ return (credentials and not credentials.invalid and
+ credentials.has_scopes(self._get_scopes()))
+
+ def _get_scopes(self):
+ """Returns the scopes associated with this object, kept up to
+ date for incremental auth."""
+ if _credentials_from_request(self.request):
+ return (self._scopes |
+ _credentials_from_request(self.request).scopes)
+ else:
+ return self._scopes
+
+ @property
+ def scopes(self):
+ """Returns the scopes associated with this OAuth2 object."""
+ # make sure previously requested custom scopes are maintained
+ # in future authorizations
+ return self._get_scopes()
+
+ @property
+ def credentials(self):
+ """Gets the authorized credentials for this flow, if they exist."""
+ return _credentials_from_request(self.request)
+
+ @property
+ def http(self):
+ """Helper: create HTTP client authorized with OAuth2 credentials."""
+ if self.has_credentials():
+ return self.credentials.authorize(transport.get_http_object())
+ return None
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/apps.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/apps.py
new file mode 100644
index 0000000000..86676b91a8
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/apps.py
@@ -0,0 +1,32 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Application Config For Django OAuth2 Helper.
+
+Django 1.7+ provides an
+[applications](https://docs.djangoproject.com/en/1.8/ref/applications/)
+API so that Django projects can introspect on installed applications using a
+stable API. This module exists to follow that convention.
+"""
+
+import sys
+
+# Django 1.7+ only supports Python 2.7+
+if sys.hexversion >= 0x02070000: # pragma: NO COVER
+ from django.apps import AppConfig
+
+ class GoogleOAuth2HelperConfig(AppConfig):
+ """ App Config for Django Helper"""
+ name = 'oauth2client.django_util'
+ verbose_name = "Google OAuth2 Django Helper"
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/decorators.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/decorators.py
new file mode 100644
index 0000000000..e62e171071
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/decorators.py
@@ -0,0 +1,145 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Decorators for Django OAuth2 Flow.
+
+Contains two decorators, ``oauth_required`` and ``oauth_enabled``.
+
+``oauth_required`` will ensure that a user has an oauth object containing
+credentials associated with the request, and if not, redirect to the
+authorization flow.
+
+``oauth_enabled`` will attach the oauth2 object containing credentials if it
+exists. If it doesn't, the view will still render, but helper methods will be
+attached to start the oauth2 flow.
+"""
+
+from django import shortcuts
+import django.conf
+from six import wraps
+from six.moves.urllib import parse
+
+from oauth2client.contrib import django_util
+
+
+def oauth_required(decorated_function=None, scopes=None, **decorator_kwargs):
+ """ Decorator to require OAuth2 credentials for a view.
+
+
+ .. code-block:: python
+ :caption: views.py
+ :name: views_required_2
+
+
+ from oauth2client.django_util.decorators import oauth_required
+
+ @oauth_required
+ def requires_default_scopes(request):
+ email = request.credentials.id_token['email']
+ service = build(serviceName='calendar', version='v3',
+ http=request.oauth.http,
+ developerKey=API_KEY)
+ events = service.events().list(
+ calendarId='primary').execute()['items']
+ return HttpResponse(
+ "email: {0}, calendar: {1}".format(email, str(events)))
+
+ Args:
+ decorated_function: View function to decorate, must have the Django
+ request object as the first argument.
+ scopes: Scopes to require, will default.
+ decorator_kwargs: Can include ``return_url`` to specify the URL to
+ return to after OAuth2 authorization is complete.
+
+ Returns:
+ An OAuth2 Authorize view if credentials are not found or if the
+ credentials are missing the required scopes. Otherwise,
+ the decorated view.
+ """
+ def curry_wrapper(wrapped_function):
+ @wraps(wrapped_function)
+ def required_wrapper(request, *args, **kwargs):
+ if not (django_util.oauth2_settings.storage_model is None or
+ request.user.is_authenticated()):
+ redirect_str = '{0}?next={1}'.format(
+ django.conf.settings.LOGIN_URL,
+ parse.quote(request.path))
+ return shortcuts.redirect(redirect_str)
+
+ return_url = decorator_kwargs.pop('return_url',
+ request.get_full_path())
+ user_oauth = django_util.UserOAuth2(request, scopes, return_url)
+ if not user_oauth.has_credentials():
+ return shortcuts.redirect(user_oauth.get_authorize_redirect())
+ setattr(request, django_util.oauth2_settings.request_prefix,
+ user_oauth)
+ return wrapped_function(request, *args, **kwargs)
+
+ return required_wrapper
+
+ if decorated_function:
+ return curry_wrapper(decorated_function)
+ else:
+ return curry_wrapper
+
+
+def oauth_enabled(decorated_function=None, scopes=None, **decorator_kwargs):
+ """ Decorator to enable OAuth Credentials if authorized, and setup
+ the oauth object on the request object to provide helper functions
+ to start the flow otherwise.
+
+ .. code-block:: python
+ :caption: views.py
+ :name: views_enabled3
+
+ from oauth2client.django_util.decorators import oauth_enabled
+
+ @oauth_enabled
+ def optional_oauth2(request):
+ if request.oauth.has_credentials():
+ # this could be passed into a view
+ # request.oauth.http is also initialized
+ return HttpResponse("User email: {0}".format(
+ request.oauth.credentials.id_token['email'])
+ else:
+ return HttpResponse('Here is an OAuth Authorize link:
+ <a href="{0}">Authorize</a>'.format(
+ request.oauth.get_authorize_redirect()))
+
+
+ Args:
+ decorated_function: View function to decorate.
+ scopes: Scopes to require, will default.
+ decorator_kwargs: Can include ``return_url`` to specify the URL to
+ return to after OAuth2 authorization is complete.
+
+ Returns:
+ The decorated view function.
+ """
+ def curry_wrapper(wrapped_function):
+ @wraps(wrapped_function)
+ def enabled_wrapper(request, *args, **kwargs):
+ return_url = decorator_kwargs.pop('return_url',
+ request.get_full_path())
+ user_oauth = django_util.UserOAuth2(request, scopes, return_url)
+ setattr(request, django_util.oauth2_settings.request_prefix,
+ user_oauth)
+ return wrapped_function(request, *args, **kwargs)
+
+ return enabled_wrapper
+
+ if decorated_function:
+ return curry_wrapper(decorated_function)
+ else:
+ return curry_wrapper
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/models.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/models.py
new file mode 100644
index 0000000000..37cc697054
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/models.py
@@ -0,0 +1,82 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains classes used for the Django ORM storage."""
+
+import base64
+import pickle
+
+from django.db import models
+from django.utils import encoding
+import jsonpickle
+
+import oauth2client
+
+
+class CredentialsField(models.Field):
+ """Django ORM field for storing OAuth2 Credentials."""
+
+ def __init__(self, *args, **kwargs):
+ if 'null' not in kwargs:
+ kwargs['null'] = True
+ super(CredentialsField, self).__init__(*args, **kwargs)
+
+ def get_internal_type(self):
+ return 'BinaryField'
+
+ def from_db_value(self, value, expression, connection, context):
+ """Overrides ``models.Field`` method. This converts the value
+ returned from the database to an instance of this class.
+ """
+ return self.to_python(value)
+
+ def to_python(self, value):
+ """Overrides ``models.Field`` method. This is used to convert
+ bytes (from serialization etc) to an instance of this class"""
+ if value is None:
+ return None
+ elif isinstance(value, oauth2client.client.Credentials):
+ return value
+ else:
+ try:
+ return jsonpickle.decode(
+ base64.b64decode(encoding.smart_bytes(value)).decode())
+ except ValueError:
+ return pickle.loads(
+ base64.b64decode(encoding.smart_bytes(value)))
+
+ def get_prep_value(self, value):
+ """Overrides ``models.Field`` method. This is used to convert
+ the value from an instances of this class to bytes that can be
+ inserted into the database.
+ """
+ if value is None:
+ return None
+ else:
+ return encoding.smart_text(
+ base64.b64encode(jsonpickle.encode(value).encode()))
+
+ def value_to_string(self, obj):
+ """Convert the field value from the provided model to a string.
+
+ Used during model serialization.
+
+ Args:
+ obj: db.Model, model object
+
+ Returns:
+ string, the serialized field value
+ """
+ value = self._get_val_from_obj(obj)
+ return self.get_prep_value(value)
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/signals.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/signals.py
new file mode 100644
index 0000000000..e9356b4dcb
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/signals.py
@@ -0,0 +1,28 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Signals for Google OAuth2 Helper.
+
+This module contains signals for Google OAuth2 Helper. Currently it only
+contains one, which fires when an OAuth2 authorization flow has completed.
+"""
+
+import django.dispatch
+
+"""Signal that fires when OAuth2 Flow has completed.
+It passes the Django request object and the OAuth2 credentials object to the
+ receiver.
+"""
+oauth2_authorized = django.dispatch.Signal(
+ providing_args=["request", "credentials"])
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/site.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/site.py
new file mode 100644
index 0000000000..631f79bef4
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/site.py
@@ -0,0 +1,26 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains Django URL patterns used for OAuth2 flow."""
+
+from django.conf import urls
+
+from oauth2client.contrib.django_util import views
+
+urlpatterns = [
+ urls.url(r'oauth2callback/', views.oauth2_callback, name="callback"),
+ urls.url(r'oauth2authorize/', views.oauth2_authorize, name="authorize")
+]
+
+urls = (urlpatterns, "google_oauth", "google_oauth")
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/storage.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/storage.py
new file mode 100644
index 0000000000..5682919bc0
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/storage.py
@@ -0,0 +1,81 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains a storage module that stores credentials using the Django ORM."""
+
+from oauth2client import client
+
+
+class DjangoORMStorage(client.Storage):
+ """Store and retrieve a single credential to and from the Django datastore.
+
+ This Storage helper presumes the Credentials
+ have been stored as a CredentialsField
+ on a db model class.
+ """
+
+ def __init__(self, model_class, key_name, key_value, property_name):
+ """Constructor for Storage.
+
+ Args:
+ model: string, fully qualified name of db.Model model class.
+ key_name: string, key name for the entity that has the credentials
+ key_value: string, key value for the entity that has the
+ credentials.
+ property_name: string, name of the property that is an
+ CredentialsProperty.
+ """
+ super(DjangoORMStorage, self).__init__()
+ self.model_class = model_class
+ self.key_name = key_name
+ self.key_value = key_value
+ self.property_name = property_name
+
+ def locked_get(self):
+ """Retrieve stored credential from the Django ORM.
+
+ Returns:
+ oauth2client.Credentials retrieved from the Django ORM, associated
+ with the ``model``, ``key_value``->``key_name`` pair used to query
+ for the model, and ``property_name`` identifying the
+ ``CredentialsProperty`` field, all of which are defined in the
+ constructor for this Storage object.
+
+ """
+ query = {self.key_name: self.key_value}
+ entities = self.model_class.objects.filter(**query)
+ if len(entities) > 0:
+ credential = getattr(entities[0], self.property_name)
+ if getattr(credential, 'set_store', None) is not None:
+ credential.set_store(self)
+ return credential
+ else:
+ return None
+
+ def locked_put(self, credentials):
+ """Write a Credentials to the Django datastore.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ entity, _ = self.model_class.objects.get_or_create(
+ **{self.key_name: self.key_value})
+
+ setattr(entity, self.property_name, credentials)
+ entity.save()
+
+ def locked_delete(self):
+ """Delete Credentials from the datastore."""
+ query = {self.key_name: self.key_value}
+ self.model_class.objects.filter(**query).delete()
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/views.py b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/views.py
new file mode 100644
index 0000000000..1835208a96
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/django_util/views.py
@@ -0,0 +1,193 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains the views used by the OAuth2 flows.
+
+Their are two views used by the OAuth2 flow, the authorize and the callback
+view. The authorize view kicks off the three-legged OAuth flow, and the
+callback view validates the flow and if successful stores the credentials
+in the configured storage."""
+
+import hashlib
+import json
+import os
+
+from django import http
+from django import shortcuts
+from django.conf import settings
+from django.core import urlresolvers
+from django.shortcuts import redirect
+from django.utils import html
+import jsonpickle
+from six.moves.urllib import parse
+
+from oauth2client import client
+from oauth2client.contrib import django_util
+from oauth2client.contrib.django_util import get_storage
+from oauth2client.contrib.django_util import signals
+
+_CSRF_KEY = 'google_oauth2_csrf_token'
+_FLOW_KEY = 'google_oauth2_flow_{0}'
+
+
+def _make_flow(request, scopes, return_url=None):
+ """Creates a Web Server Flow
+
+ Args:
+ request: A Django request object.
+ scopes: the request oauth2 scopes.
+ return_url: The URL to return to after the flow is complete. Defaults
+ to the path of the current request.
+
+ Returns:
+ An OAuth2 flow object that has been stored in the session.
+ """
+ # Generate a CSRF token to prevent malicious requests.
+ csrf_token = hashlib.sha256(os.urandom(1024)).hexdigest()
+
+ request.session[_CSRF_KEY] = csrf_token
+
+ state = json.dumps({
+ 'csrf_token': csrf_token,
+ 'return_url': return_url,
+ })
+
+ flow = client.OAuth2WebServerFlow(
+ client_id=django_util.oauth2_settings.client_id,
+ client_secret=django_util.oauth2_settings.client_secret,
+ scope=scopes,
+ state=state,
+ redirect_uri=request.build_absolute_uri(
+ urlresolvers.reverse("google_oauth:callback")))
+
+ flow_key = _FLOW_KEY.format(csrf_token)
+ request.session[flow_key] = jsonpickle.encode(flow)
+ return flow
+
+
+def _get_flow_for_token(csrf_token, request):
+ """ Looks up the flow in session to recover information about requested
+ scopes.
+
+ Args:
+ csrf_token: The token passed in the callback request that should
+ match the one previously generated and stored in the request on the
+ initial authorization view.
+
+ Returns:
+ The OAuth2 Flow object associated with this flow based on the
+ CSRF token.
+ """
+ flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None)
+ return None if flow_pickle is None else jsonpickle.decode(flow_pickle)
+
+
+def oauth2_callback(request):
+ """ View that handles the user's return from OAuth2 provider.
+
+ This view verifies the CSRF state and OAuth authorization code, and on
+ success stores the credentials obtained in the storage provider,
+ and redirects to the return_url specified in the authorize view and
+ stored in the session.
+
+ Args:
+ request: Django request.
+
+ Returns:
+ A redirect response back to the return_url.
+ """
+ if 'error' in request.GET:
+ reason = request.GET.get(
+ 'error_description', request.GET.get('error', ''))
+ reason = html.escape(reason)
+ return http.HttpResponseBadRequest(
+ 'Authorization failed {0}'.format(reason))
+
+ try:
+ encoded_state = request.GET['state']
+ code = request.GET['code']
+ except KeyError:
+ return http.HttpResponseBadRequest(
+ 'Request missing state or authorization code')
+
+ try:
+ server_csrf = request.session[_CSRF_KEY]
+ except KeyError:
+ return http.HttpResponseBadRequest(
+ 'No existing session for this flow.')
+
+ try:
+ state = json.loads(encoded_state)
+ client_csrf = state['csrf_token']
+ return_url = state['return_url']
+ except (ValueError, KeyError):
+ return http.HttpResponseBadRequest('Invalid state parameter.')
+
+ if client_csrf != server_csrf:
+ return http.HttpResponseBadRequest('Invalid CSRF token.')
+
+ flow = _get_flow_for_token(client_csrf, request)
+
+ if not flow:
+ return http.HttpResponseBadRequest('Missing Oauth2 flow.')
+
+ try:
+ credentials = flow.step2_exchange(code)
+ except client.FlowExchangeError as exchange_error:
+ return http.HttpResponseBadRequest(
+ 'An error has occurred: {0}'.format(exchange_error))
+
+ get_storage(request).put(credentials)
+
+ signals.oauth2_authorized.send(sender=signals.oauth2_authorized,
+ request=request, credentials=credentials)
+
+ return shortcuts.redirect(return_url)
+
+
+def oauth2_authorize(request):
+ """ View to start the OAuth2 Authorization flow.
+
+ This view starts the OAuth2 authorization flow. If scopes is passed in
+ as a GET URL parameter, it will authorize those scopes, otherwise the
+ default scopes specified in settings. The return_url can also be
+ specified as a GET parameter, otherwise the referer header will be
+ checked, and if that isn't found it will return to the root path.
+
+ Args:
+ request: The Django request object.
+
+ Returns:
+ A redirect to Google OAuth2 Authorization.
+ """
+ return_url = request.GET.get('return_url', None)
+ if not return_url:
+ return_url = request.META.get('HTTP_REFERER', '/')
+
+ scopes = request.GET.getlist('scopes', django_util.oauth2_settings.scopes)
+ # Model storage (but not session storage) requires a logged in user
+ if django_util.oauth2_settings.storage_model:
+ if not request.user.is_authenticated():
+ return redirect('{0}?next={1}'.format(
+ settings.LOGIN_URL, parse.quote(request.get_full_path())))
+ # This checks for the case where we ended up here because of a logged
+ # out user but we had credentials for it in the first place
+ else:
+ user_oauth = django_util.UserOAuth2(request, scopes, return_url)
+ if user_oauth.has_credentials():
+ return redirect(return_url)
+
+ flow = _make_flow(request=request, scopes=scopes, return_url=return_url)
+ auth_url = flow.step1_get_authorize_url()
+ return shortcuts.redirect(auth_url)
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/flask_util.py b/contrib/python/oauth2client/py3/oauth2client/contrib/flask_util.py
new file mode 100644
index 0000000000..fabd613b46
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/flask_util.py
@@ -0,0 +1,557 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for the Flask web framework
+
+Provides a Flask extension that makes using OAuth2 web server flow easier.
+The extension includes views that handle the entire auth flow and a
+``@required`` decorator to automatically ensure that user credentials are
+available.
+
+
+Configuration
+=============
+
+To configure, you'll need a set of OAuth2 web application credentials from the
+`Google Developer's Console <https://console.developers.google.com/project/_/\
+apiui/credential>`__.
+
+.. code-block:: python
+
+ from oauth2client.contrib.flask_util import UserOAuth2
+
+ app = Flask(__name__)
+
+ app.config['SECRET_KEY'] = 'your-secret-key'
+
+ app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'] = 'client_secrets.json'
+
+ # or, specify the client id and secret separately
+ app.config['GOOGLE_OAUTH2_CLIENT_ID'] = 'your-client-id'
+ app.config['GOOGLE_OAUTH2_CLIENT_SECRET'] = 'your-client-secret'
+
+ oauth2 = UserOAuth2(app)
+
+
+Usage
+=====
+
+Once configured, you can use the :meth:`UserOAuth2.required` decorator to
+ensure that credentials are available within a view.
+
+.. code-block:: python
+ :emphasize-lines: 3,7,10
+
+ # Note that app.route should be the outermost decorator.
+ @app.route('/needs_credentials')
+ @oauth2.required
+ def example():
+ # http is authorized with the user's credentials and can be used
+ # to make http calls.
+ http = oauth2.http()
+
+ # Or, you can access the credentials directly
+ credentials = oauth2.credentials
+
+If you want credentials to be optional for a view, you can leave the decorator
+off and use :meth:`UserOAuth2.has_credentials` to check.
+
+.. code-block:: python
+ :emphasize-lines: 3
+
+ @app.route('/optional')
+ def optional():
+ if oauth2.has_credentials():
+ return 'Credentials found!'
+ else:
+ return 'No credentials!'
+
+
+When credentials are available, you can use :attr:`UserOAuth2.email` and
+:attr:`UserOAuth2.user_id` to access information from the `ID Token
+<https://developers.google.com/identity/protocols/OpenIDConnect?hl=en>`__, if
+available.
+
+.. code-block:: python
+ :emphasize-lines: 4
+
+ @app.route('/info')
+ @oauth2.required
+ def info():
+ return "Hello, {} ({})".format(oauth2.email, oauth2.user_id)
+
+
+URLs & Trigging Authorization
+=============================
+
+The extension will add two new routes to your application:
+
+ * ``"oauth2.authorize"`` -> ``/oauth2authorize``
+ * ``"oauth2.callback"`` -> ``/oauth2callback``
+
+When configuring your OAuth2 credentials on the Google Developer's Console, be
+sure to add ``http[s]://[your-app-url]/oauth2callback`` as an authorized
+callback url.
+
+Typically you don't not need to use these routes directly, just be sure to
+decorate any views that require credentials with ``@oauth2.required``. If
+needed, you can trigger authorization at any time by redirecting the user
+to the URL returned by :meth:`UserOAuth2.authorize_url`.
+
+.. code-block:: python
+ :emphasize-lines: 3
+
+ @app.route('/login')
+ def login():
+ return oauth2.authorize_url("/")
+
+
+Incremental Auth
+================
+
+This extension also supports `Incremental Auth <https://developers.google.com\
+/identity/protocols/OAuth2WebServer?hl=en#incrementalAuth>`__. To enable it,
+configure the extension with ``include_granted_scopes``.
+
+.. code-block:: python
+
+ oauth2 = UserOAuth2(app, include_granted_scopes=True)
+
+Then specify any additional scopes needed on the decorator, for example:
+
+.. code-block:: python
+ :emphasize-lines: 2,7
+
+ @app.route('/drive')
+ @oauth2.required(scopes=["https://www.googleapis.com/auth/drive"])
+ def requires_drive():
+ ...
+
+ @app.route('/calendar')
+ @oauth2.required(scopes=["https://www.googleapis.com/auth/calendar"])
+ def requires_calendar():
+ ...
+
+The decorator will ensure that the the user has authorized all specified scopes
+before allowing them to access the view, and will also ensure that credentials
+do not lose any previously authorized scopes.
+
+
+Storage
+=======
+
+By default, the extension uses a Flask session-based storage solution. This
+means that credentials are only available for the duration of a session. It
+also means that with Flask's default configuration, the credentials will be
+visible in the session cookie. It's highly recommended to use database-backed
+session and to use https whenever handling user credentials.
+
+If you need the credentials to be available longer than a user session or
+available outside of a request context, you will need to implement your own
+:class:`oauth2client.Storage`.
+"""
+
+from functools import wraps
+import hashlib
+import json
+import os
+import pickle
+
+try:
+ from flask import Blueprint
+ from flask import _app_ctx_stack
+ from flask import current_app
+ from flask import redirect
+ from flask import request
+ from flask import session
+ from flask import url_for
+ import markupsafe
+except ImportError: # pragma: NO COVER
+ raise ImportError('The flask utilities require flask 0.9 or newer.')
+
+import six.moves.http_client as httplib
+
+from oauth2client import client
+from oauth2client import clientsecrets
+from oauth2client import transport
+from oauth2client.contrib import dictionary_storage
+
+
+_DEFAULT_SCOPES = ('email',)
+_CREDENTIALS_KEY = 'google_oauth2_credentials'
+_FLOW_KEY = 'google_oauth2_flow_{0}'
+_CSRF_KEY = 'google_oauth2_csrf_token'
+
+
+def _get_flow_for_token(csrf_token):
+ """Retrieves the flow instance associated with a given CSRF token from
+ the Flask session."""
+ flow_pickle = session.pop(
+ _FLOW_KEY.format(csrf_token), None)
+
+ if flow_pickle is None:
+ return None
+ else:
+ return pickle.loads(flow_pickle)
+
+
+class UserOAuth2(object):
+ """Flask extension for making OAuth 2.0 easier.
+
+ Configuration values:
+
+ * ``GOOGLE_OAUTH2_CLIENT_SECRETS_FILE`` path to a client secrets json
+ file, obtained from the credentials screen in the Google Developers
+ console.
+ * ``GOOGLE_OAUTH2_CLIENT_ID`` the oauth2 credentials' client ID. This
+ is only needed if ``GOOGLE_OAUTH2_CLIENT_SECRETS_FILE`` is not
+ specified.
+ * ``GOOGLE_OAUTH2_CLIENT_SECRET`` the oauth2 credentials' client
+ secret. This is only needed if ``GOOGLE_OAUTH2_CLIENT_SECRETS_FILE``
+ is not specified.
+
+ If app is specified, all arguments will be passed along to init_app.
+
+ If no app is specified, then you should call init_app in your application
+ factory to finish initialization.
+ """
+
+ def __init__(self, app=None, *args, **kwargs):
+ self.app = app
+ if app is not None:
+ self.init_app(app, *args, **kwargs)
+
+ def init_app(self, app, scopes=None, client_secrets_file=None,
+ client_id=None, client_secret=None, authorize_callback=None,
+ storage=None, **kwargs):
+ """Initialize this extension for the given app.
+
+ Arguments:
+ app: A Flask application.
+ scopes: Optional list of scopes to authorize.
+ client_secrets_file: Path to a file containing client secrets. You
+ can also specify the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE config
+ value.
+ client_id: If not specifying a client secrets file, specify the
+ OAuth2 client id. You can also specify the
+ GOOGLE_OAUTH2_CLIENT_ID config value. You must also provide a
+ client secret.
+ client_secret: The OAuth2 client secret. You can also specify the
+ GOOGLE_OAUTH2_CLIENT_SECRET config value.
+ authorize_callback: A function that is executed after successful
+ user authorization.
+ storage: A oauth2client.client.Storage subclass for storing the
+ credentials. By default, this is a Flask session based storage.
+ kwargs: Any additional args are passed along to the Flow
+ constructor.
+ """
+ self.app = app
+ self.authorize_callback = authorize_callback
+ self.flow_kwargs = kwargs
+
+ if storage is None:
+ storage = dictionary_storage.DictionaryStorage(
+ session, key=_CREDENTIALS_KEY)
+ self.storage = storage
+
+ if scopes is None:
+ scopes = app.config.get('GOOGLE_OAUTH2_SCOPES', _DEFAULT_SCOPES)
+ self.scopes = scopes
+
+ self._load_config(client_secrets_file, client_id, client_secret)
+
+ app.register_blueprint(self._create_blueprint())
+
+ def _load_config(self, client_secrets_file, client_id, client_secret):
+ """Loads oauth2 configuration in order of priority.
+
+ Priority:
+ 1. Config passed to the constructor or init_app.
+ 2. Config passed via the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE app
+ config.
+ 3. Config passed via the GOOGLE_OAUTH2_CLIENT_ID and
+ GOOGLE_OAUTH2_CLIENT_SECRET app config.
+
+ Raises:
+ ValueError if no config could be found.
+ """
+ if client_id and client_secret:
+ self.client_id, self.client_secret = client_id, client_secret
+ return
+
+ if client_secrets_file:
+ self._load_client_secrets(client_secrets_file)
+ return
+
+ if 'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE' in self.app.config:
+ self._load_client_secrets(
+ self.app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'])
+ return
+
+ try:
+ self.client_id, self.client_secret = (
+ self.app.config['GOOGLE_OAUTH2_CLIENT_ID'],
+ self.app.config['GOOGLE_OAUTH2_CLIENT_SECRET'])
+ except KeyError:
+ raise ValueError(
+ 'OAuth2 configuration could not be found. Either specify the '
+ 'client_secrets_file or client_id and client_secret or set '
+ 'the app configuration variables '
+ 'GOOGLE_OAUTH2_CLIENT_SECRETS_FILE or '
+ 'GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET.')
+
+ def _load_client_secrets(self, filename):
+ """Loads client secrets from the given filename."""
+ client_type, client_info = clientsecrets.loadfile(filename)
+ if client_type != clientsecrets.TYPE_WEB:
+ raise ValueError(
+ 'The flow specified in {0} is not supported.'.format(
+ client_type))
+
+ self.client_id = client_info['client_id']
+ self.client_secret = client_info['client_secret']
+
+ def _make_flow(self, return_url=None, **kwargs):
+ """Creates a Web Server Flow"""
+ # Generate a CSRF token to prevent malicious requests.
+ csrf_token = hashlib.sha256(os.urandom(1024)).hexdigest()
+
+ session[_CSRF_KEY] = csrf_token
+
+ state = json.dumps({
+ 'csrf_token': csrf_token,
+ 'return_url': return_url
+ })
+
+ kw = self.flow_kwargs.copy()
+ kw.update(kwargs)
+
+ extra_scopes = kw.pop('scopes', [])
+ scopes = set(self.scopes).union(set(extra_scopes))
+
+ flow = client.OAuth2WebServerFlow(
+ client_id=self.client_id,
+ client_secret=self.client_secret,
+ scope=scopes,
+ state=state,
+ redirect_uri=url_for('oauth2.callback', _external=True),
+ **kw)
+
+ flow_key = _FLOW_KEY.format(csrf_token)
+ session[flow_key] = pickle.dumps(flow)
+
+ return flow
+
+ def _create_blueprint(self):
+ bp = Blueprint('oauth2', __name__)
+ bp.add_url_rule('/oauth2authorize', 'authorize', self.authorize_view)
+ bp.add_url_rule('/oauth2callback', 'callback', self.callback_view)
+
+ return bp
+
+ def authorize_view(self):
+ """Flask view that starts the authorization flow.
+
+ Starts flow by redirecting the user to the OAuth2 provider.
+ """
+ args = request.args.to_dict()
+
+ # Scopes will be passed as mutliple args, and to_dict() will only
+ # return one. So, we use getlist() to get all of the scopes.
+ args['scopes'] = request.args.getlist('scopes')
+
+ return_url = args.pop('return_url', None)
+ if return_url is None:
+ return_url = request.referrer or '/'
+
+ flow = self._make_flow(return_url=return_url, **args)
+ auth_url = flow.step1_get_authorize_url()
+
+ return redirect(auth_url)
+
+ def callback_view(self):
+ """Flask view that handles the user's return from OAuth2 provider.
+
+ On return, exchanges the authorization code for credentials and stores
+ the credentials.
+ """
+ if 'error' in request.args:
+ reason = request.args.get(
+ 'error_description', request.args.get('error', ''))
+ reason = markupsafe.escape(reason)
+ return ('Authorization failed: {0}'.format(reason),
+ httplib.BAD_REQUEST)
+
+ try:
+ encoded_state = request.args['state']
+ server_csrf = session[_CSRF_KEY]
+ code = request.args['code']
+ except KeyError:
+ return 'Invalid request', httplib.BAD_REQUEST
+
+ try:
+ state = json.loads(encoded_state)
+ client_csrf = state['csrf_token']
+ return_url = state['return_url']
+ except (ValueError, KeyError):
+ return 'Invalid request state', httplib.BAD_REQUEST
+
+ if client_csrf != server_csrf:
+ return 'Invalid request state', httplib.BAD_REQUEST
+
+ flow = _get_flow_for_token(server_csrf)
+
+ if flow is None:
+ return 'Invalid request state', httplib.BAD_REQUEST
+
+ # Exchange the auth code for credentials.
+ try:
+ credentials = flow.step2_exchange(code)
+ except client.FlowExchangeError as exchange_error:
+ current_app.logger.exception(exchange_error)
+ content = 'An error occurred: {0}'.format(exchange_error)
+ return content, httplib.BAD_REQUEST
+
+ # Save the credentials to the storage.
+ self.storage.put(credentials)
+
+ if self.authorize_callback:
+ self.authorize_callback(credentials)
+
+ return redirect(return_url)
+
+ @property
+ def credentials(self):
+ """The credentials for the current user or None if unavailable."""
+ ctx = _app_ctx_stack.top
+
+ if not hasattr(ctx, _CREDENTIALS_KEY):
+ ctx.google_oauth2_credentials = self.storage.get()
+
+ return ctx.google_oauth2_credentials
+
+ def has_credentials(self):
+ """Returns True if there are valid credentials for the current user."""
+ if not self.credentials:
+ return False
+ # Is the access token expired? If so, do we have an refresh token?
+ elif (self.credentials.access_token_expired and
+ not self.credentials.refresh_token):
+ return False
+ else:
+ return True
+
+ @property
+ def email(self):
+ """Returns the user's email address or None if there are no credentials.
+
+ The email address is provided by the current credentials' id_token.
+ This should not be used as unique identifier as the user can change
+ their email. If you need a unique identifier, use user_id.
+ """
+ if not self.credentials:
+ return None
+ try:
+ return self.credentials.id_token['email']
+ except KeyError:
+ current_app.logger.error(
+ 'Invalid id_token {0}'.format(self.credentials.id_token))
+
+ @property
+ def user_id(self):
+ """Returns the a unique identifier for the user
+
+ Returns None if there are no credentials.
+
+ The id is provided by the current credentials' id_token.
+ """
+ if not self.credentials:
+ return None
+ try:
+ return self.credentials.id_token['sub']
+ except KeyError:
+ current_app.logger.error(
+ 'Invalid id_token {0}'.format(self.credentials.id_token))
+
+ def authorize_url(self, return_url, **kwargs):
+ """Creates a URL that can be used to start the authorization flow.
+
+ When the user is directed to the URL, the authorization flow will
+ begin. Once complete, the user will be redirected to the specified
+ return URL.
+
+ Any kwargs are passed into the flow constructor.
+ """
+ return url_for('oauth2.authorize', return_url=return_url, **kwargs)
+
+ def required(self, decorated_function=None, scopes=None,
+ **decorator_kwargs):
+ """Decorator to require OAuth2 credentials for a view.
+
+ If credentials are not available for the current user, then they will
+ be redirected to the authorization flow. Once complete, the user will
+ be redirected back to the original page.
+ """
+
+ def curry_wrapper(wrapped_function):
+ @wraps(wrapped_function)
+ def required_wrapper(*args, **kwargs):
+ return_url = decorator_kwargs.pop('return_url', request.url)
+
+ requested_scopes = set(self.scopes)
+ if scopes is not None:
+ requested_scopes |= set(scopes)
+ if self.has_credentials():
+ requested_scopes |= self.credentials.scopes
+
+ requested_scopes = list(requested_scopes)
+
+ # Does the user have credentials and does the credentials have
+ # all of the needed scopes?
+ if (self.has_credentials() and
+ self.credentials.has_scopes(requested_scopes)):
+ return wrapped_function(*args, **kwargs)
+ # Otherwise, redirect to authorization
+ else:
+ auth_url = self.authorize_url(
+ return_url,
+ scopes=requested_scopes,
+ **decorator_kwargs)
+
+ return redirect(auth_url)
+
+ return required_wrapper
+
+ if decorated_function:
+ return curry_wrapper(decorated_function)
+ else:
+ return curry_wrapper
+
+ def http(self, *args, **kwargs):
+ """Returns an authorized http instance.
+
+ Can only be called if there are valid credentials for the user, such
+ as inside of a view that is decorated with @required.
+
+ Args:
+ *args: Positional arguments passed to httplib2.Http constructor.
+ **kwargs: Positional arguments passed to httplib2.Http constructor.
+
+ Raises:
+ ValueError if no credentials are available.
+ """
+ if not self.credentials:
+ raise ValueError('No credentials available.')
+ return self.credentials.authorize(
+ transport.get_http_object(*args, **kwargs))
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/gce.py b/contrib/python/oauth2client/py3/oauth2client/contrib/gce.py
new file mode 100644
index 0000000000..aaab15ffce
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/gce.py
@@ -0,0 +1,156 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for Google Compute Engine
+
+Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
+"""
+
+import logging
+import warnings
+
+from six.moves import http_client
+
+from oauth2client import client
+from oauth2client.contrib import _metadata
+
+
+logger = logging.getLogger(__name__)
+
+_SCOPES_WARNING = """\
+You have requested explicit scopes to be used with a GCE service account.
+Using this argument will have no effect on the actual scopes for tokens
+requested. These scopes are set at VM instance creation time and
+can't be overridden in the request.
+"""
+
+
+class AppAssertionCredentials(client.AssertionCredentials):
+ """Credentials object for Compute Engine Assertion Grants
+
+ This object will allow a Compute Engine instance to identify itself to
+ Google and other OAuth 2.0 servers that can verify assertions. It can be
+ used for the purpose of accessing data stored under an account assigned to
+ the Compute Engine instance itself.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+
+ Note that :attr:`service_account_email` and :attr:`scopes`
+ will both return None until the credentials have been refreshed.
+ To check whether credentials have previously been refreshed use
+ :attr:`invalid`.
+ """
+
+ def __init__(self, email=None, *args, **kwargs):
+ """Constructor for AppAssertionCredentials
+
+ Args:
+ email: an email that specifies the service account to use.
+ Only necessary if using custom service accounts
+ (see https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#createdefaultserviceaccount).
+ """
+ if 'scopes' in kwargs:
+ warnings.warn(_SCOPES_WARNING)
+ kwargs['scopes'] = None
+
+ # Assertion type is no longer used, but still in the
+ # parent class signature.
+ super(AppAssertionCredentials, self).__init__(None, *args, **kwargs)
+
+ self.service_account_email = email
+ self.scopes = None
+ self.invalid = True
+
+ @classmethod
+ def from_json(cls, json_data):
+ raise NotImplementedError(
+ 'Cannot serialize credentials for GCE service accounts.')
+
+ def to_json(self):
+ raise NotImplementedError(
+ 'Cannot serialize credentials for GCE service accounts.')
+
+ def retrieve_scopes(self, http):
+ """Retrieves the canonical list of scopes for this access token.
+
+ Overrides client.Credentials.retrieve_scopes. Fetches scopes info
+ from the metadata server.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make the refresh
+ request.
+
+ Returns:
+ A set of strings containing the canonical list of scopes.
+ """
+ self._retrieve_info(http)
+ return self.scopes
+
+ def _retrieve_info(self, http):
+ """Retrieves service account info for invalid credentials.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+ """
+ if self.invalid:
+ info = _metadata.get_service_account_info(
+ http,
+ service_account=self.service_account_email or 'default')
+ self.invalid = False
+ self.service_account_email = info['email']
+ self.scopes = info['scopes']
+
+ def _refresh(self, http):
+ """Refreshes the access token.
+
+ Skip all the storage hoops and just refresh using the API.
+
+ Args:
+ http: an object to be used to make HTTP requests.
+
+ Raises:
+ HttpAccessTokenRefreshError: When the refresh fails.
+ """
+ try:
+ self._retrieve_info(http)
+ self.access_token, self.token_expiry = _metadata.get_token(
+ http, service_account=self.service_account_email)
+ except http_client.HTTPException as err:
+ raise client.HttpAccessTokenRefreshError(str(err))
+
+ @property
+ def serialization_data(self):
+ raise NotImplementedError(
+ 'Cannot serialize credentials for GCE service accounts.')
+
+ def create_scoped_required(self):
+ return False
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ This method is provided to support a common interface, but
+ the actual key used for a Google Compute Engine service account
+ is not available, so it can't be used to sign content.
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Raises:
+ NotImplementedError, always.
+ """
+ raise NotImplementedError(
+ 'Compute Engine service accounts cannot sign blobs')
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/keyring_storage.py b/contrib/python/oauth2client/py3/oauth2client/contrib/keyring_storage.py
new file mode 100644
index 0000000000..4af944881a
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/keyring_storage.py
@@ -0,0 +1,95 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A keyring based Storage.
+
+A Storage for Credentials that uses the keyring module.
+"""
+
+import threading
+
+import keyring
+
+from oauth2client import client
+
+
+class Storage(client.Storage):
+ """Store and retrieve a single credential to and from the keyring.
+
+ To use this module you must have the keyring module installed. See
+ <http://pypi.python.org/pypi/keyring/>. This is an optional module and is
+ not installed with oauth2client by default because it does not work on all
+ the platforms that oauth2client supports, such as Google App Engine.
+
+ The keyring module <http://pypi.python.org/pypi/keyring/> is a
+ cross-platform library for access the keyring capabilities of the local
+ system. The user will be prompted for their keyring password when this
+ module is used, and the manner in which the user is prompted will vary per
+ platform.
+
+ Usage::
+
+ from oauth2client import keyring_storage
+
+ s = keyring_storage.Storage('name_of_application', 'user1')
+ credentials = s.get()
+
+ """
+
+ def __init__(self, service_name, user_name):
+ """Constructor.
+
+ Args:
+ service_name: string, The name of the service under which the
+ credentials are stored.
+ user_name: string, The name of the user to store credentials for.
+ """
+ super(Storage, self).__init__(lock=threading.Lock())
+ self._service_name = service_name
+ self._user_name = user_name
+
+ def locked_get(self):
+ """Retrieve Credential from file.
+
+ Returns:
+ oauth2client.client.Credentials
+ """
+ credentials = None
+ content = keyring.get_password(self._service_name, self._user_name)
+
+ if content is not None:
+ try:
+ credentials = client.Credentials.new_from_json(content)
+ credentials.set_store(self)
+ except ValueError:
+ pass
+
+ return credentials
+
+ def locked_put(self, credentials):
+ """Write Credentials to file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ keyring.set_password(self._service_name, self._user_name,
+ credentials.to_json())
+
+ def locked_delete(self):
+ """Delete Credentials file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ keyring.set_password(self._service_name, self._user_name, '')
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/multiprocess_file_storage.py b/contrib/python/oauth2client/py3/oauth2client/contrib/multiprocess_file_storage.py
new file mode 100644
index 0000000000..e9e8c8cd1d
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/multiprocess_file_storage.py
@@ -0,0 +1,355 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Multiprocess file credential storage.
+
+This module provides file-based storage that supports multiple credentials and
+cross-thread and process access.
+
+This module supersedes the functionality previously found in `multistore_file`.
+
+This module provides :class:`MultiprocessFileStorage` which:
+ * Is tied to a single credential via a user-specified key. This key can be
+ used to distinguish between multiple users, client ids, and/or scopes.
+ * Can be safely accessed and refreshed across threads and processes.
+
+Process & thread safety guarantees the following behavior:
+ * If one thread or process refreshes a credential, subsequent refreshes
+ from other processes will re-fetch the credentials from the file instead
+ of performing an http request.
+ * If two processes or threads attempt to refresh concurrently, only one
+ will be able to acquire the lock and refresh, with the deadlock caveat
+ below.
+ * The interprocess lock will not deadlock, instead, the if a process can
+ not acquire the interprocess lock within ``INTERPROCESS_LOCK_DEADLINE``
+ it will allow refreshing the credential but will not write the updated
+ credential to disk, This logic happens during every lock cycle - if the
+ credentials are refreshed again it will retry locking and writing as
+ normal.
+
+Usage
+=====
+
+Before using the storage, you need to decide how you want to key the
+credentials. A few common strategies include:
+
+ * If you're storing credentials for multiple users in a single file, use
+ a unique identifier for each user as the key.
+ * If you're storing credentials for multiple client IDs in a single file,
+ use the client ID as the key.
+ * If you're storing multiple credentials for one user, use the scopes as
+ the key.
+ * If you have a complicated setup, use a compound key. For example, you
+ can use a combination of the client ID and scopes as the key.
+
+Create an instance of :class:`MultiprocessFileStorage` for each credential you
+want to store, for example::
+
+ filename = 'credentials'
+ key = '{}-{}'.format(client_id, user_id)
+ storage = MultiprocessFileStorage(filename, key)
+
+To store the credentials::
+
+ storage.put(credentials)
+
+If you're going to continue to use the credentials after storing them, be sure
+to call :func:`set_store`::
+
+ credentials.set_store(storage)
+
+To retrieve the credentials::
+
+ storage.get(credentials)
+
+"""
+
+import base64
+import json
+import logging
+import os
+import threading
+
+import fasteners
+from six import iteritems
+
+from oauth2client import _helpers
+from oauth2client import client
+
+
+#: The maximum amount of time, in seconds, to wait when acquire the
+#: interprocess lock before falling back to read-only mode.
+INTERPROCESS_LOCK_DEADLINE = 1
+
+logger = logging.getLogger(__name__)
+_backends = {}
+_backends_lock = threading.Lock()
+
+
+def _create_file_if_needed(filename):
+ """Creates the an empty file if it does not already exist.
+
+ Returns:
+ True if the file was created, False otherwise.
+ """
+ if os.path.exists(filename):
+ return False
+ else:
+ # Equivalent to "touch".
+ open(filename, 'a+b').close()
+ logger.info('Credential file {0} created'.format(filename))
+ return True
+
+
+def _load_credentials_file(credentials_file):
+ """Load credentials from the given file handle.
+
+ The file is expected to be in this format:
+
+ {
+ "file_version": 2,
+ "credentials": {
+ "key": "base64 encoded json representation of credentials."
+ }
+ }
+
+ This function will warn and return empty credentials instead of raising
+ exceptions.
+
+ Args:
+ credentials_file: An open file handle.
+
+ Returns:
+ A dictionary mapping user-defined keys to an instance of
+ :class:`oauth2client.client.Credentials`.
+ """
+ try:
+ credentials_file.seek(0)
+ data = json.load(credentials_file)
+ except Exception:
+ logger.warning(
+ 'Credentials file could not be loaded, will ignore and '
+ 'overwrite.')
+ return {}
+
+ if data.get('file_version') != 2:
+ logger.warning(
+ 'Credentials file is not version 2, will ignore and '
+ 'overwrite.')
+ return {}
+
+ credentials = {}
+
+ for key, encoded_credential in iteritems(data.get('credentials', {})):
+ try:
+ credential_json = base64.b64decode(encoded_credential)
+ credential = client.Credentials.new_from_json(credential_json)
+ credentials[key] = credential
+ except:
+ logger.warning(
+ 'Invalid credential {0} in file, ignoring.'.format(key))
+
+ return credentials
+
+
+def _write_credentials_file(credentials_file, credentials):
+ """Writes credentials to a file.
+
+ Refer to :func:`_load_credentials_file` for the format.
+
+ Args:
+ credentials_file: An open file handle, must be read/write.
+ credentials: A dictionary mapping user-defined keys to an instance of
+ :class:`oauth2client.client.Credentials`.
+ """
+ data = {'file_version': 2, 'credentials': {}}
+
+ for key, credential in iteritems(credentials):
+ credential_json = credential.to_json()
+ encoded_credential = _helpers._from_bytes(base64.b64encode(
+ _helpers._to_bytes(credential_json)))
+ data['credentials'][key] = encoded_credential
+
+ credentials_file.seek(0)
+ json.dump(data, credentials_file)
+ credentials_file.truncate()
+
+
+class _MultiprocessStorageBackend(object):
+ """Thread-local backend for multiprocess storage.
+
+ Each process has only one instance of this backend per file. All threads
+ share a single instance of this backend. This ensures that all threads
+ use the same thread lock and process lock when accessing the file.
+ """
+
+ def __init__(self, filename):
+ self._file = None
+ self._filename = filename
+ self._process_lock = fasteners.InterProcessLock(
+ '{0}.lock'.format(filename))
+ self._thread_lock = threading.Lock()
+ self._read_only = False
+ self._credentials = {}
+
+ def _load_credentials(self):
+ """(Re-)loads the credentials from the file."""
+ if not self._file:
+ return
+
+ loaded_credentials = _load_credentials_file(self._file)
+ self._credentials.update(loaded_credentials)
+
+ logger.debug('Read credential file')
+
+ def _write_credentials(self):
+ if self._read_only:
+ logger.debug('In read-only mode, not writing credentials.')
+ return
+
+ _write_credentials_file(self._file, self._credentials)
+ logger.debug('Wrote credential file {0}.'.format(self._filename))
+
+ def acquire_lock(self):
+ self._thread_lock.acquire()
+ locked = self._process_lock.acquire(timeout=INTERPROCESS_LOCK_DEADLINE)
+
+ if locked:
+ _create_file_if_needed(self._filename)
+ self._file = open(self._filename, 'r+')
+ self._read_only = False
+
+ else:
+ logger.warn(
+ 'Failed to obtain interprocess lock for credentials. '
+ 'If a credential is being refreshed, other processes may '
+ 'not see the updated access token and refresh as well.')
+ if os.path.exists(self._filename):
+ self._file = open(self._filename, 'r')
+ else:
+ self._file = None
+ self._read_only = True
+
+ self._load_credentials()
+
+ def release_lock(self):
+ if self._file is not None:
+ self._file.close()
+ self._file = None
+
+ if not self._read_only:
+ self._process_lock.release()
+
+ self._thread_lock.release()
+
+ def _refresh_predicate(self, credentials):
+ if credentials is None:
+ return True
+ elif credentials.invalid:
+ return True
+ elif credentials.access_token_expired:
+ return True
+ else:
+ return False
+
+ def locked_get(self, key):
+ # Check if the credential is already in memory.
+ credentials = self._credentials.get(key, None)
+
+ # Use the refresh predicate to determine if the entire store should be
+ # reloaded. This basically checks if the credentials are invalid
+ # or expired. This covers the situation where another process has
+ # refreshed the credentials and this process doesn't know about it yet.
+ # In that case, this process won't needlessly refresh the credentials.
+ if self._refresh_predicate(credentials):
+ self._load_credentials()
+ credentials = self._credentials.get(key, None)
+
+ return credentials
+
+ def locked_put(self, key, credentials):
+ self._load_credentials()
+ self._credentials[key] = credentials
+ self._write_credentials()
+
+ def locked_delete(self, key):
+ self._load_credentials()
+ self._credentials.pop(key, None)
+ self._write_credentials()
+
+
+def _get_backend(filename):
+ """A helper method to get or create a backend with thread locking.
+
+ This ensures that only one backend is used per-file per-process, so that
+ thread and process locks are appropriately shared.
+
+ Args:
+ filename: The full path to the credential storage file.
+
+ Returns:
+ An instance of :class:`_MultiprocessStorageBackend`.
+ """
+ filename = os.path.abspath(filename)
+
+ with _backends_lock:
+ if filename not in _backends:
+ _backends[filename] = _MultiprocessStorageBackend(filename)
+ return _backends[filename]
+
+
+class MultiprocessFileStorage(client.Storage):
+ """Multiprocess file credential storage.
+
+ Args:
+ filename: The path to the file where credentials will be stored.
+ key: An arbitrary string used to uniquely identify this set of
+ credentials. For example, you may use the user's ID as the key or
+ a combination of the client ID and user ID.
+ """
+ def __init__(self, filename, key):
+ self._key = key
+ self._backend = _get_backend(filename)
+
+ def acquire_lock(self):
+ self._backend.acquire_lock()
+
+ def release_lock(self):
+ self._backend.release_lock()
+
+ def locked_get(self):
+ """Retrieves the current credentials from the store.
+
+ Returns:
+ An instance of :class:`oauth2client.client.Credentials` or `None`.
+ """
+ credential = self._backend.locked_get(self._key)
+
+ if credential is not None:
+ credential.set_store(self)
+
+ return credential
+
+ def locked_put(self, credentials):
+ """Writes the given credentials to the store.
+
+ Args:
+ credentials: an instance of
+ :class:`oauth2client.client.Credentials`.
+ """
+ return self._backend.locked_put(self._key, credentials)
+
+ def locked_delete(self):
+ """Deletes the current credentials from the store."""
+ return self._backend.locked_delete(self._key)
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/sqlalchemy.py b/contrib/python/oauth2client/py3/oauth2client/contrib/sqlalchemy.py
new file mode 100644
index 0000000000..7d9fd4b23f
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/sqlalchemy.py
@@ -0,0 +1,173 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 utilities for SQLAlchemy.
+
+Utilities for using OAuth 2.0 in conjunction with a SQLAlchemy.
+
+Configuration
+=============
+
+In order to use this storage, you'll need to create table
+with :class:`oauth2client.contrib.sqlalchemy.CredentialsType` column.
+It's recommended to either put this column on some sort of user info
+table or put the column in a table with a belongs-to relationship to
+a user info table.
+
+Here's an example of a simple table with a :class:`CredentialsType`
+column that's related to a user table by the `user_id` key.
+
+.. code-block:: python
+
+ from sqlalchemy import Column, ForeignKey, Integer
+ from sqlalchemy.ext.declarative import declarative_base
+ from sqlalchemy.orm import relationship
+
+ from oauth2client.contrib.sqlalchemy import CredentialsType
+
+
+ Base = declarative_base()
+
+
+ class Credentials(Base):
+ __tablename__ = 'credentials'
+
+ user_id = Column(Integer, ForeignKey('user.id'))
+ credentials = Column(CredentialsType)
+
+
+ class User(Base):
+ id = Column(Integer, primary_key=True)
+ # bunch of other columns
+ credentials = relationship('Credentials')
+
+
+Usage
+=====
+
+With tables ready, you are now able to store credentials in database.
+We will reuse tables defined above.
+
+.. code-block:: python
+
+ from sqlalchemy.orm import Session
+
+ from oauth2client.client import OAuth2Credentials
+ from oauth2client.contrib.sql_alchemy import Storage
+
+ session = Session()
+ user = session.query(User).first()
+ storage = Storage(
+ session=session,
+ model_class=Credentials,
+ # This is the key column used to identify
+ # the row that stores the credentials.
+ key_name='user_id',
+ key_value=user.id,
+ property_name='credentials',
+ )
+
+ # Store
+ credentials = OAuth2Credentials(...)
+ storage.put(credentials)
+
+ # Retrieve
+ credentials = storage.get()
+
+ # Delete
+ storage.delete()
+
+"""
+
+from __future__ import absolute_import
+
+import sqlalchemy.types
+
+from oauth2client import client
+
+
+class CredentialsType(sqlalchemy.types.PickleType):
+ """Type representing credentials.
+
+ Alias for :class:`sqlalchemy.types.PickleType`.
+ """
+
+
+class Storage(client.Storage):
+ """Store and retrieve a single credential to and from SQLAlchemy.
+ This helper presumes the Credentials
+ have been stored as a Credentials column
+ on a db model class.
+ """
+
+ def __init__(self, session, model_class, key_name,
+ key_value, property_name):
+ """Constructor for Storage.
+
+ Args:
+ session: An instance of :class:`sqlalchemy.orm.Session`.
+ model_class: SQLAlchemy declarative mapping.
+ key_name: string, key name for the entity that has the credentials
+ key_value: key value for the entity that has the credentials
+ property_name: A string indicating which property on the
+ ``model_class`` to store the credentials.
+ This property must be a
+ :class:`CredentialsType` column.
+ """
+ super(Storage, self).__init__()
+
+ self.session = session
+ self.model_class = model_class
+ self.key_name = key_name
+ self.key_value = key_value
+ self.property_name = property_name
+
+ def locked_get(self):
+ """Retrieve stored credential.
+
+ Returns:
+ A :class:`oauth2client.Credentials` instance or `None`.
+ """
+ filters = {self.key_name: self.key_value}
+ query = self.session.query(self.model_class).filter_by(**filters)
+ entity = query.first()
+
+ if entity:
+ credential = getattr(entity, self.property_name)
+ if credential and hasattr(credential, 'set_store'):
+ credential.set_store(self)
+ return credential
+ else:
+ return None
+
+ def locked_put(self, credentials):
+ """Write a credentials to the SQLAlchemy datastore.
+
+ Args:
+ credentials: :class:`oauth2client.Credentials`
+ """
+ filters = {self.key_name: self.key_value}
+ query = self.session.query(self.model_class).filter_by(**filters)
+ entity = query.first()
+
+ if not entity:
+ entity = self.model_class(**filters)
+
+ setattr(entity, self.property_name, credentials)
+ self.session.add(entity)
+
+ def locked_delete(self):
+ """Delete credentials from the SQLAlchemy datastore."""
+ filters = {self.key_name: self.key_value}
+ self.session.query(self.model_class).filter_by(**filters).delete()
diff --git a/contrib/python/oauth2client/py3/oauth2client/contrib/xsrfutil.py b/contrib/python/oauth2client/py3/oauth2client/contrib/xsrfutil.py
new file mode 100644
index 0000000000..7c3ec0353a
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/contrib/xsrfutil.py
@@ -0,0 +1,101 @@
+# Copyright 2014 the Melange authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper methods for creating & verifying XSRF tokens."""
+
+import base64
+import binascii
+import hmac
+import time
+
+from oauth2client import _helpers
+
+
+# Delimiter character
+DELIMITER = b':'
+
+# 1 hour in seconds
+DEFAULT_TIMEOUT_SECS = 60 * 60
+
+
+@_helpers.positional(2)
+def generate_token(key, user_id, action_id='', when=None):
+ """Generates a URL-safe token for the given user, action, time tuple.
+
+ Args:
+ key: secret key to use.
+ user_id: the user ID of the authenticated user.
+ action_id: a string identifier of the action they requested
+ authorization for.
+ when: the time in seconds since the epoch at which the user was
+ authorized for this action. If not set the current time is used.
+
+ Returns:
+ A string XSRF protection token.
+ """
+ digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))
+ digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))
+ digester.update(DELIMITER)
+ digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))
+ digester.update(DELIMITER)
+ when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')
+ digester.update(when)
+ digest = digester.digest()
+
+ token = base64.urlsafe_b64encode(digest + DELIMITER + when)
+ return token
+
+
+@_helpers.positional(3)
+def validate_token(key, token, user_id, action_id="", current_time=None):
+ """Validates that the given token authorizes the user for the action.
+
+ Tokens are invalid if the time of issue is too old or if the token
+ does not match what generateToken outputs (i.e. the token was forged).
+
+ Args:
+ key: secret key to use.
+ token: a string of the token generated by generateToken.
+ user_id: the user ID of the authenticated user.
+ action_id: a string identifier of the action they requested
+ authorization for.
+
+ Returns:
+ A boolean - True if the user is authorized for the action, False
+ otherwise.
+ """
+ if not token:
+ return False
+ try:
+ decoded = base64.urlsafe_b64decode(token)
+ token_time = int(decoded.split(DELIMITER)[-1])
+ except (TypeError, ValueError, binascii.Error):
+ return False
+ if current_time is None:
+ current_time = time.time()
+ # If the token is too old it's not valid.
+ if current_time - token_time > DEFAULT_TIMEOUT_SECS:
+ return False
+
+ # The given token should match the generated one with the same time.
+ expected_token = generate_token(key, user_id, action_id=action_id,
+ when=token_time)
+ if len(token) != len(expected_token):
+ return False
+
+ # Perform constant time comparison to avoid timing attacks
+ different = 0
+ for x, y in zip(bytearray(token), bytearray(expected_token)):
+ different |= x ^ y
+ return not different
diff --git a/contrib/python/oauth2client/py3/oauth2client/crypt.py b/contrib/python/oauth2client/py3/oauth2client/crypt.py
new file mode 100644
index 0000000000..13260982a6
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/crypt.py
@@ -0,0 +1,250 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Crypto-related routines for oauth2client."""
+
+import json
+import logging
+import time
+
+from oauth2client import _helpers
+from oauth2client import _pure_python_crypt
+
+
+RsaSigner = _pure_python_crypt.RsaSigner
+RsaVerifier = _pure_python_crypt.RsaVerifier
+
+CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
+AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
+MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
+
+logger = logging.getLogger(__name__)
+
+
+class AppIdentityError(Exception):
+ """Error to indicate crypto failure."""
+
+
+def _bad_pkcs12_key_as_pem(*args, **kwargs):
+ raise NotImplementedError('pkcs12_key_as_pem requires OpenSSL.')
+
+
+try:
+ from oauth2client import _openssl_crypt
+ OpenSSLSigner = _openssl_crypt.OpenSSLSigner
+ OpenSSLVerifier = _openssl_crypt.OpenSSLVerifier
+ pkcs12_key_as_pem = _openssl_crypt.pkcs12_key_as_pem
+except ImportError: # pragma: NO COVER
+ OpenSSLVerifier = None
+ OpenSSLSigner = None
+ pkcs12_key_as_pem = _bad_pkcs12_key_as_pem
+
+try:
+ from oauth2client import _pycrypto_crypt
+ PyCryptoSigner = _pycrypto_crypt.PyCryptoSigner
+ PyCryptoVerifier = _pycrypto_crypt.PyCryptoVerifier
+except ImportError: # pragma: NO COVER
+ PyCryptoVerifier = None
+ PyCryptoSigner = None
+
+
+if OpenSSLSigner:
+ Signer = OpenSSLSigner
+ Verifier = OpenSSLVerifier
+elif PyCryptoSigner: # pragma: NO COVER
+ Signer = PyCryptoSigner
+ Verifier = PyCryptoVerifier
+else: # pragma: NO COVER
+ Signer = RsaSigner
+ Verifier = RsaVerifier
+
+
+def make_signed_jwt(signer, payload, key_id=None):
+ """Make a signed JWT.
+
+ See http://self-issued.info/docs/draft-jones-json-web-token.html.
+
+ Args:
+ signer: crypt.Signer, Cryptographic signer.
+ payload: dict, Dictionary of data to convert to JSON and then sign.
+ key_id: string, (Optional) Key ID header.
+
+ Returns:
+ string, The JWT for the payload.
+ """
+ header = {'typ': 'JWT', 'alg': 'RS256'}
+ if key_id is not None:
+ header['kid'] = key_id
+
+ segments = [
+ _helpers._urlsafe_b64encode(_helpers._json_encode(header)),
+ _helpers._urlsafe_b64encode(_helpers._json_encode(payload)),
+ ]
+ signing_input = b'.'.join(segments)
+
+ signature = signer.sign(signing_input)
+ segments.append(_helpers._urlsafe_b64encode(signature))
+
+ logger.debug(str(segments))
+
+ return b'.'.join(segments)
+
+
+def _verify_signature(message, signature, certs):
+ """Verifies signed content using a list of certificates.
+
+ Args:
+ message: string or bytes, The message to verify.
+ signature: string or bytes, The signature on the message.
+ certs: iterable, certificates in PEM format.
+
+ Raises:
+ AppIdentityError: If none of the certificates can verify the message
+ against the signature.
+ """
+ for pem in certs:
+ verifier = Verifier.from_string(pem, is_x509_cert=True)
+ if verifier.verify(message, signature):
+ return
+
+ # If we have not returned, no certificate confirms the signature.
+ raise AppIdentityError('Invalid token signature')
+
+
+def _check_audience(payload_dict, audience):
+ """Checks audience field from a JWT payload.
+
+ Does nothing if the passed in ``audience`` is null.
+
+ Args:
+ payload_dict: dict, A dictionary containing a JWT payload.
+ audience: string or NoneType, an audience to check for in
+ the JWT payload.
+
+ Raises:
+ AppIdentityError: If there is no ``'aud'`` field in the payload
+ dictionary but there is an ``audience`` to check.
+ AppIdentityError: If the ``'aud'`` field in the payload dictionary
+ does not match the ``audience``.
+ """
+ if audience is None:
+ return
+
+ audience_in_payload = payload_dict.get('aud')
+ if audience_in_payload is None:
+ raise AppIdentityError(
+ 'No aud field in token: {0}'.format(payload_dict))
+ if audience_in_payload != audience:
+ raise AppIdentityError('Wrong recipient, {0} != {1}: {2}'.format(
+ audience_in_payload, audience, payload_dict))
+
+
+def _verify_time_range(payload_dict):
+ """Verifies the issued at and expiration from a JWT payload.
+
+ Makes sure the current time (in UTC) falls between the issued at and
+ expiration for the JWT (with some skew allowed for via
+ ``CLOCK_SKEW_SECS``).
+
+ Args:
+ payload_dict: dict, A dictionary containing a JWT payload.
+
+ Raises:
+ AppIdentityError: If there is no ``'iat'`` field in the payload
+ dictionary.
+ AppIdentityError: If there is no ``'exp'`` field in the payload
+ dictionary.
+ AppIdentityError: If the JWT expiration is too far in the future (i.e.
+ if the expiration would imply a token lifetime
+ longer than what is allowed.)
+ AppIdentityError: If the token appears to have been issued in the
+ future (up to clock skew).
+ AppIdentityError: If the token appears to have expired in the past
+ (up to clock skew).
+ """
+ # Get the current time to use throughout.
+ now = int(time.time())
+
+ # Make sure issued at and expiration are in the payload.
+ issued_at = payload_dict.get('iat')
+ if issued_at is None:
+ raise AppIdentityError(
+ 'No iat field in token: {0}'.format(payload_dict))
+ expiration = payload_dict.get('exp')
+ if expiration is None:
+ raise AppIdentityError(
+ 'No exp field in token: {0}'.format(payload_dict))
+
+ # Make sure the expiration gives an acceptable token lifetime.
+ if expiration >= now + MAX_TOKEN_LIFETIME_SECS:
+ raise AppIdentityError(
+ 'exp field too far in future: {0}'.format(payload_dict))
+
+ # Make sure (up to clock skew) that the token wasn't issued in the future.
+ earliest = issued_at - CLOCK_SKEW_SECS
+ if now < earliest:
+ raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format(
+ now, earliest, payload_dict))
+ # Make sure (up to clock skew) that the token isn't already expired.
+ latest = expiration + CLOCK_SKEW_SECS
+ if now > latest:
+ raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format(
+ now, latest, payload_dict))
+
+
+def verify_signed_jwt_with_certs(jwt, certs, audience=None):
+ """Verify a JWT against public certs.
+
+ See http://self-issued.info/docs/draft-jones-json-web-token.html.
+
+ Args:
+ jwt: string, A JWT.
+ certs: dict, Dictionary where values of public keys in PEM format.
+ audience: string, The audience, 'aud', that this JWT should contain. If
+ None then the JWT's 'aud' parameter is not verified.
+
+ Returns:
+ dict, The deserialized JSON payload in the JWT.
+
+ Raises:
+ AppIdentityError: if any checks are failed.
+ """
+ jwt = _helpers._to_bytes(jwt)
+
+ if jwt.count(b'.') != 2:
+ raise AppIdentityError(
+ 'Wrong number of segments in token: {0}'.format(jwt))
+
+ header, payload, signature = jwt.split(b'.')
+ message_to_sign = header + b'.' + payload
+ signature = _helpers._urlsafe_b64decode(signature)
+
+ # Parse token.
+ payload_bytes = _helpers._urlsafe_b64decode(payload)
+ try:
+ payload_dict = json.loads(_helpers._from_bytes(payload_bytes))
+ except:
+ raise AppIdentityError('Can\'t parse token: {0}'.format(payload_bytes))
+
+ # Verify that the signature matches the message.
+ _verify_signature(message_to_sign, signature, certs.values())
+
+ # Verify the issued at and created times in the payload.
+ _verify_time_range(payload_dict)
+
+ # Check audience.
+ _check_audience(payload_dict, audience)
+
+ return payload_dict
diff --git a/contrib/python/oauth2client/py3/oauth2client/file.py b/contrib/python/oauth2client/py3/oauth2client/file.py
new file mode 100644
index 0000000000..3551c80d47
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/file.py
@@ -0,0 +1,95 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for OAuth.
+
+Utilities for making it easier to work with OAuth 2.0
+credentials.
+"""
+
+import os
+import threading
+
+from oauth2client import _helpers
+from oauth2client import client
+
+
+class Storage(client.Storage):
+ """Store and retrieve a single credential to and from a file."""
+
+ def __init__(self, filename):
+ super(Storage, self).__init__(lock=threading.Lock())
+ self._filename = filename
+
+ def locked_get(self):
+ """Retrieve Credential from file.
+
+ Returns:
+ oauth2client.client.Credentials
+
+ Raises:
+ IOError if the file is a symbolic link.
+ """
+ credentials = None
+ _helpers.validate_file(self._filename)
+ try:
+ f = open(self._filename, 'rb')
+ content = f.read()
+ f.close()
+ except IOError:
+ return credentials
+
+ try:
+ credentials = client.Credentials.new_from_json(content)
+ credentials.set_store(self)
+ except ValueError:
+ pass
+
+ return credentials
+
+ def _create_file_if_needed(self):
+ """Create an empty file if necessary.
+
+ This method will not initialize the file. Instead it implements a
+ simple version of "touch" to ensure the file has been created.
+ """
+ if not os.path.exists(self._filename):
+ old_umask = os.umask(0o177)
+ try:
+ open(self._filename, 'a+b').close()
+ finally:
+ os.umask(old_umask)
+
+ def locked_put(self, credentials):
+ """Write Credentials to file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+
+ Raises:
+ IOError if the file is a symbolic link.
+ """
+ self._create_file_if_needed()
+ _helpers.validate_file(self._filename)
+ f = open(self._filename, 'w')
+ f.write(credentials.to_json())
+ f.close()
+
+ def locked_delete(self):
+ """Delete Credentials file.
+
+ Args:
+ credentials: Credentials, the credentials to store.
+ """
+ os.unlink(self._filename)
diff --git a/contrib/python/oauth2client/py3/oauth2client/service_account.py b/contrib/python/oauth2client/py3/oauth2client/service_account.py
new file mode 100644
index 0000000000..540bfaaa1b
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/service_account.py
@@ -0,0 +1,685 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""oauth2client Service account credentials class."""
+
+import base64
+import copy
+import datetime
+import json
+import time
+
+import oauth2client
+from oauth2client import _helpers
+from oauth2client import client
+from oauth2client import crypt
+from oauth2client import transport
+
+
+_PASSWORD_DEFAULT = 'notasecret'
+_PKCS12_KEY = '_private_key_pkcs12'
+_PKCS12_ERROR = r"""
+This library only implements PKCS#12 support via the pyOpenSSL library.
+Either install pyOpenSSL, or please convert the .p12 file
+to .pem format:
+ $ cat key.p12 | \
+ > openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
+ > openssl rsa > key.pem
+"""
+
+
+class ServiceAccountCredentials(client.AssertionCredentials):
+ """Service Account credential for OAuth 2.0 signed JWT grants.
+
+ Supports
+
+ * JSON keyfile (typically contains a PKCS8 key stored as
+ PEM text)
+ * ``.p12`` key (stores PKCS12 key and certificate)
+
+ Makes an assertion to server using a signed JWT assertion in exchange
+ for an access token.
+
+ This credential does not require a flow to instantiate because it
+ represents a two legged flow, and therefore has all of the required
+ information to generate and refresh its own access tokens.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ signer: ``crypt.Signer``, A signer which can be used to sign content.
+ scopes: List or string, (Optional) Scopes to use when acquiring
+ an access token.
+ private_key_id: string, (Optional) Private key identifier. Typically
+ only used with a JSON keyfile. Can be sent in the
+ header of a JWT token assertion.
+ client_id: string, (Optional) Client ID for the project that owns the
+ service account.
+ user_agent: string, (Optional) User agent to use when sending
+ request.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ kwargs: dict, Extra key-value pairs (both strings) to send in the
+ payload body when making an assertion.
+ """
+
+ MAX_TOKEN_LIFETIME_SECS = 3600
+ """Max lifetime of the token (one hour, in seconds)."""
+
+ NON_SERIALIZED_MEMBERS = (
+ frozenset(['_signer']) |
+ client.AssertionCredentials.NON_SERIALIZED_MEMBERS)
+ """Members that aren't serialized when object is converted to JSON."""
+
+ # Can be over-ridden by factory constructors. Used for
+ # serialization/deserialization purposes.
+ _private_key_pkcs8_pem = None
+ _private_key_pkcs12 = None
+ _private_key_password = None
+
+ def __init__(self,
+ service_account_email,
+ signer,
+ scopes='',
+ private_key_id=None,
+ client_id=None,
+ user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ **kwargs):
+
+ super(ServiceAccountCredentials, self).__init__(
+ None, user_agent=user_agent, token_uri=token_uri,
+ revoke_uri=revoke_uri)
+
+ self._service_account_email = service_account_email
+ self._signer = signer
+ self._scopes = _helpers.scopes_to_string(scopes)
+ self._private_key_id = private_key_id
+ self.client_id = client_id
+ self._user_agent = user_agent
+ self._kwargs = kwargs
+
+ def _to_json(self, strip, to_serialize=None):
+ """Utility function that creates JSON repr. of a credentials object.
+
+ Over-ride is needed since PKCS#12 keys will not in general be JSON
+ serializable.
+
+ Args:
+ strip: array, An array of names of members to exclude from the
+ JSON.
+ to_serialize: dict, (Optional) The properties for this object
+ that will be serialized. This allows callers to
+ modify before serializing.
+
+ Returns:
+ string, a JSON representation of this instance, suitable to pass to
+ from_json().
+ """
+ if to_serialize is None:
+ to_serialize = copy.copy(self.__dict__)
+ pkcs12_val = to_serialize.get(_PKCS12_KEY)
+ if pkcs12_val is not None:
+ to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val)
+ return super(ServiceAccountCredentials, self)._to_json(
+ strip, to_serialize=to_serialize)
+
+ @classmethod
+ def _from_parsed_json_keyfile(cls, keyfile_dict, scopes,
+ token_uri=None, revoke_uri=None):
+ """Helper for factory constructors from JSON keyfile.
+
+ Args:
+ keyfile_dict: dict-like object, The parsed dictionary-like object
+ containing the contents of the JSON keyfile.
+ scopes: List or string, Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for OAuth 2.0 provider token endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+ revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile contents.
+
+ Raises:
+ ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
+ KeyError, if one of the expected keys is not present in
+ the keyfile.
+ """
+ creds_type = keyfile_dict.get('type')
+ if creds_type != client.SERVICE_ACCOUNT:
+ raise ValueError('Unexpected credentials type', creds_type,
+ 'Expected', client.SERVICE_ACCOUNT)
+
+ service_account_email = keyfile_dict['client_email']
+ private_key_pkcs8_pem = keyfile_dict['private_key']
+ private_key_id = keyfile_dict['private_key_id']
+ client_id = keyfile_dict['client_id']
+ if not token_uri:
+ token_uri = keyfile_dict.get('token_uri',
+ oauth2client.GOOGLE_TOKEN_URI)
+ if not revoke_uri:
+ revoke_uri = keyfile_dict.get('revoke_uri',
+ oauth2client.GOOGLE_REVOKE_URI)
+
+ signer = crypt.Signer.from_string(private_key_pkcs8_pem)
+ credentials = cls(service_account_email, signer, scopes=scopes,
+ private_key_id=private_key_id,
+ client_id=client_id, token_uri=token_uri,
+ revoke_uri=revoke_uri)
+ credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
+ return credentials
+
+ @classmethod
+ def from_json_keyfile_name(cls, filename, scopes='',
+ token_uri=None, revoke_uri=None):
+
+ """Factory constructor from JSON keyfile by name.
+
+ Args:
+ filename: string, The location of the keyfile.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for OAuth 2.0 provider token endpoint.
+ If unset and not present in the key file, defaults
+ to Google's endpoints.
+ revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
+ If unset and not present in the key file, defaults
+ to Google's endpoints.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
+ KeyError, if one of the expected keys is not present in
+ the keyfile.
+ """
+ with open(filename, 'r') as file_obj:
+ client_credentials = json.load(file_obj)
+ return cls._from_parsed_json_keyfile(client_credentials, scopes,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri)
+
+ @classmethod
+ def from_json_keyfile_dict(cls, keyfile_dict, scopes='',
+ token_uri=None, revoke_uri=None):
+ """Factory constructor from parsed JSON keyfile.
+
+ Args:
+ keyfile_dict: dict-like object, The parsed dictionary-like object
+ containing the contents of the JSON keyfile.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for OAuth 2.0 provider token endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+ revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.
+ If unset and not present in keyfile_dict, defaults
+ to Google's endpoints.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.
+ KeyError, if one of the expected keys is not present in
+ the keyfile.
+ """
+ return cls._from_parsed_json_keyfile(keyfile_dict, scopes,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri)
+
+ @classmethod
+ def _from_p12_keyfile_contents(cls, service_account_email,
+ private_key_pkcs12,
+ private_key_password=None, scopes='',
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ """Factory constructor from JSON keyfile.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ private_key_pkcs12: string, The contents of a PKCS#12 keyfile.
+ private_key_password: string, (Optional) Password for PKCS#12
+ private key. Defaults to ``notasecret``.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ NotImplementedError if pyOpenSSL is not installed / not the
+ active crypto library.
+ """
+ if private_key_password is None:
+ private_key_password = _PASSWORD_DEFAULT
+ if crypt.Signer is not crypt.OpenSSLSigner:
+ raise NotImplementedError(_PKCS12_ERROR)
+ signer = crypt.Signer.from_string(private_key_pkcs12,
+ private_key_password)
+ credentials = cls(service_account_email, signer, scopes=scopes,
+ token_uri=token_uri, revoke_uri=revoke_uri)
+ credentials._private_key_pkcs12 = private_key_pkcs12
+ credentials._private_key_password = private_key_password
+ return credentials
+
+ @classmethod
+ def from_p12_keyfile(cls, service_account_email, filename,
+ private_key_password=None, scopes='',
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+
+ """Factory constructor from JSON keyfile.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ filename: string, The location of the PKCS#12 keyfile.
+ private_key_password: string, (Optional) Password for PKCS#12
+ private key. Defaults to ``notasecret``.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ NotImplementedError if pyOpenSSL is not installed / not the
+ active crypto library.
+ """
+ with open(filename, 'rb') as file_obj:
+ private_key_pkcs12 = file_obj.read()
+ return cls._from_p12_keyfile_contents(
+ service_account_email, private_key_pkcs12,
+ private_key_password=private_key_password, scopes=scopes,
+ token_uri=token_uri, revoke_uri=revoke_uri)
+
+ @classmethod
+ def from_p12_keyfile_buffer(cls, service_account_email, file_buffer,
+ private_key_password=None, scopes='',
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ """Factory constructor from JSON keyfile.
+
+ Args:
+ service_account_email: string, The email associated with the
+ service account.
+ file_buffer: stream, A buffer that implements ``read()``
+ and contains the PKCS#12 key contents.
+ private_key_password: string, (Optional) Password for PKCS#12
+ private key. Defaults to ``notasecret``.
+ scopes: List or string, (Optional) Scopes to use when acquiring an
+ access token.
+ token_uri: string, URI for token endpoint. For convenience defaults
+ to Google's endpoints but any OAuth 2.0 provider can be
+ used.
+ revoke_uri: string, URI for revoke endpoint. For convenience
+ defaults to Google's endpoints but any OAuth 2.0
+ provider can be used.
+
+ Returns:
+ ServiceAccountCredentials, a credentials object created from
+ the keyfile.
+
+ Raises:
+ NotImplementedError if pyOpenSSL is not installed / not the
+ active crypto library.
+ """
+ private_key_pkcs12 = file_buffer.read()
+ return cls._from_p12_keyfile_contents(
+ service_account_email, private_key_pkcs12,
+ private_key_password=private_key_password, scopes=scopes,
+ token_uri=token_uri, revoke_uri=revoke_uri)
+
+ def _generate_assertion(self):
+ """Generate the assertion that will be used in the request."""
+ now = int(time.time())
+ payload = {
+ 'aud': self.token_uri,
+ 'scope': self._scopes,
+ 'iat': now,
+ 'exp': now + self.MAX_TOKEN_LIFETIME_SECS,
+ 'iss': self._service_account_email,
+ }
+ payload.update(self._kwargs)
+ return crypt.make_signed_jwt(self._signer, payload,
+ key_id=self._private_key_id)
+
+ def sign_blob(self, blob):
+ """Cryptographically sign a blob (of bytes).
+
+ Implements abstract method
+ :meth:`oauth2client.client.AssertionCredentials.sign_blob`.
+
+ Args:
+ blob: bytes, Message to be signed.
+
+ Returns:
+ tuple, A pair of the private key ID used to sign the blob and
+ the signed contents.
+ """
+ return self._private_key_id, self._signer.sign(blob)
+
+ @property
+ def service_account_email(self):
+ """Get the email for the current service account.
+
+ Returns:
+ string, The email associated with the service account.
+ """
+ return self._service_account_email
+
+ @property
+ def serialization_data(self):
+ # NOTE: This is only useful for JSON keyfile.
+ return {
+ 'type': 'service_account',
+ 'client_email': self._service_account_email,
+ 'private_key_id': self._private_key_id,
+ 'private_key': self._private_key_pkcs8_pem,
+ 'client_id': self.client_id,
+ }
+
+ @classmethod
+ def from_json(cls, json_data):
+ """Deserialize a JSON-serialized instance.
+
+ Inverse to :meth:`to_json`.
+
+ Args:
+ json_data: dict or string, Serialized JSON (as a string or an
+ already parsed dictionary) representing a credential.
+
+ Returns:
+ ServiceAccountCredentials from the serialized data.
+ """
+ if not isinstance(json_data, dict):
+ json_data = json.loads(_helpers._from_bytes(json_data))
+
+ private_key_pkcs8_pem = None
+ pkcs12_val = json_data.get(_PKCS12_KEY)
+ password = None
+ if pkcs12_val is None:
+ private_key_pkcs8_pem = json_data['_private_key_pkcs8_pem']
+ signer = crypt.Signer.from_string(private_key_pkcs8_pem)
+ else:
+ # NOTE: This assumes that private_key_pkcs8_pem is not also
+ # in the serialized data. This would be very incorrect
+ # state.
+ pkcs12_val = base64.b64decode(pkcs12_val)
+ password = json_data['_private_key_password']
+ signer = crypt.Signer.from_string(pkcs12_val, password)
+
+ credentials = cls(
+ json_data['_service_account_email'],
+ signer,
+ scopes=json_data['_scopes'],
+ private_key_id=json_data['_private_key_id'],
+ client_id=json_data['client_id'],
+ user_agent=json_data['_user_agent'],
+ **json_data['_kwargs']
+ )
+ if private_key_pkcs8_pem is not None:
+ credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
+ if pkcs12_val is not None:
+ credentials._private_key_pkcs12 = pkcs12_val
+ if password is not None:
+ credentials._private_key_password = password
+ credentials.invalid = json_data['invalid']
+ credentials.access_token = json_data['access_token']
+ credentials.token_uri = json_data['token_uri']
+ credentials.revoke_uri = json_data['revoke_uri']
+ token_expiry = json_data.get('token_expiry', None)
+ if token_expiry is not None:
+ credentials.token_expiry = datetime.datetime.strptime(
+ token_expiry, client.EXPIRY_FORMAT)
+ return credentials
+
+ def create_scoped_required(self):
+ return not self._scopes
+
+ def create_scoped(self, scopes):
+ result = self.__class__(self._service_account_email,
+ self._signer,
+ scopes=scopes,
+ private_key_id=self._private_key_id,
+ client_id=self.client_id,
+ user_agent=self._user_agent,
+ **self._kwargs)
+ result.token_uri = self.token_uri
+ result.revoke_uri = self.revoke_uri
+ result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
+ result._private_key_pkcs12 = self._private_key_pkcs12
+ result._private_key_password = self._private_key_password
+ return result
+
+ def create_with_claims(self, claims):
+ """Create credentials that specify additional claims.
+
+ Args:
+ claims: dict, key-value pairs for claims.
+
+ Returns:
+ ServiceAccountCredentials, a copy of the current service account
+ credentials with updated claims to use when obtaining access
+ tokens.
+ """
+ new_kwargs = dict(self._kwargs)
+ new_kwargs.update(claims)
+ result = self.__class__(self._service_account_email,
+ self._signer,
+ scopes=self._scopes,
+ private_key_id=self._private_key_id,
+ client_id=self.client_id,
+ user_agent=self._user_agent,
+ **new_kwargs)
+ result.token_uri = self.token_uri
+ result.revoke_uri = self.revoke_uri
+ result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
+ result._private_key_pkcs12 = self._private_key_pkcs12
+ result._private_key_password = self._private_key_password
+ return result
+
+ def create_delegated(self, sub):
+ """Create credentials that act as domain-wide delegation of authority.
+
+ Use the ``sub`` parameter as the subject to delegate on behalf of
+ that user.
+
+ For example::
+
+ >>> account_sub = 'foo@email.com'
+ >>> delegate_creds = creds.create_delegated(account_sub)
+
+ Args:
+ sub: string, An email address that this service account will
+ act on behalf of (via domain-wide delegation).
+
+ Returns:
+ ServiceAccountCredentials, a copy of the current service account
+ updated to act on behalf of ``sub``.
+ """
+ return self.create_with_claims({'sub': sub})
+
+
+def _datetime_to_secs(utc_time):
+ # TODO(issue 298): use time_delta.total_seconds()
+ # time_delta.total_seconds() not supported in Python 2.6
+ epoch = datetime.datetime(1970, 1, 1)
+ time_delta = utc_time - epoch
+ return time_delta.days * 86400 + time_delta.seconds
+
+
+class _JWTAccessCredentials(ServiceAccountCredentials):
+ """Self signed JWT credentials.
+
+ Makes an assertion to server using a self signed JWT from service account
+ credentials. These credentials do NOT use OAuth 2.0 and instead
+ authenticate directly.
+ """
+ _MAX_TOKEN_LIFETIME_SECS = 3600
+ """Max lifetime of the token (one hour, in seconds)."""
+
+ def __init__(self,
+ service_account_email,
+ signer,
+ scopes=None,
+ private_key_id=None,
+ client_id=None,
+ user_agent=None,
+ token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
+ additional_claims=None):
+ if additional_claims is None:
+ additional_claims = {}
+ super(_JWTAccessCredentials, self).__init__(
+ service_account_email,
+ signer,
+ private_key_id=private_key_id,
+ client_id=client_id,
+ user_agent=user_agent,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri,
+ **additional_claims)
+
+ def authorize(self, http):
+ """Authorize an httplib2.Http instance with a JWT assertion.
+
+ Unless specified, the 'aud' of the assertion will be the base
+ uri of the request.
+
+ Args:
+ http: An instance of ``httplib2.Http`` or something that acts
+ like it.
+ Returns:
+ A modified instance of http that was passed in.
+ Example::
+ h = httplib2.Http()
+ h = credentials.authorize(h)
+ """
+ transport.wrap_http_for_jwt_access(self, http)
+ return http
+
+ def get_access_token(self, http=None, additional_claims=None):
+ """Create a signed jwt.
+
+ Args:
+ http: unused
+ additional_claims: dict, additional claims to add to
+ the payload of the JWT.
+ Returns:
+ An AccessTokenInfo with the signed jwt
+ """
+ if additional_claims is None:
+ if self.access_token is None or self.access_token_expired:
+ self.refresh(None)
+ return client.AccessTokenInfo(
+ access_token=self.access_token, expires_in=self._expires_in())
+ else:
+ # Create a 1 time token
+ token, unused_expiry = self._create_token(additional_claims)
+ return client.AccessTokenInfo(
+ access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)
+
+ def revoke(self, http):
+ """Cannot revoke JWTAccessCredentials tokens."""
+ pass
+
+ def create_scoped_required(self):
+ # JWTAccessCredentials are unscoped by definition
+ return True
+
+ def create_scoped(self, scopes, token_uri=oauth2client.GOOGLE_TOKEN_URI,
+ revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
+ # Returns an OAuth2 credentials with the given scope
+ result = ServiceAccountCredentials(self._service_account_email,
+ self._signer,
+ scopes=scopes,
+ private_key_id=self._private_key_id,
+ client_id=self.client_id,
+ user_agent=self._user_agent,
+ token_uri=token_uri,
+ revoke_uri=revoke_uri,
+ **self._kwargs)
+ if self._private_key_pkcs8_pem is not None:
+ result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
+ if self._private_key_pkcs12 is not None:
+ result._private_key_pkcs12 = self._private_key_pkcs12
+ if self._private_key_password is not None:
+ result._private_key_password = self._private_key_password
+ return result
+
+ def refresh(self, http):
+ """Refreshes the access_token.
+
+ The HTTP object is unused since no request needs to be made to
+ get a new token, it can just be generated locally.
+
+ Args:
+ http: unused HTTP object
+ """
+ self._refresh(None)
+
+ def _refresh(self, http):
+ """Refreshes the access_token.
+
+ Args:
+ http: unused HTTP object
+ """
+ self.access_token, self.token_expiry = self._create_token()
+
+ def _create_token(self, additional_claims=None):
+ now = client._UTCNOW()
+ lifetime = datetime.timedelta(seconds=self._MAX_TOKEN_LIFETIME_SECS)
+ expiry = now + lifetime
+ payload = {
+ 'iat': _datetime_to_secs(now),
+ 'exp': _datetime_to_secs(expiry),
+ 'iss': self._service_account_email,
+ 'sub': self._service_account_email
+ }
+ payload.update(self._kwargs)
+ if additional_claims is not None:
+ payload.update(additional_claims)
+ jwt = crypt.make_signed_jwt(self._signer, payload,
+ key_id=self._private_key_id)
+ return jwt.decode('ascii'), expiry
diff --git a/contrib/python/oauth2client/py3/oauth2client/tools.py b/contrib/python/oauth2client/py3/oauth2client/tools.py
new file mode 100644
index 0000000000..51669934df
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/tools.py
@@ -0,0 +1,256 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Command-line tools for authenticating via OAuth 2.0
+
+Do the OAuth 2.0 Web Server dance for a command line application. Stores the
+generated credentials in a common file that is used by other example apps in
+the same directory.
+"""
+
+from __future__ import print_function
+
+import logging
+import socket
+import sys
+
+from six.moves import BaseHTTPServer
+from six.moves import http_client
+from six.moves import input
+from six.moves import urllib
+
+from oauth2client import _helpers
+from oauth2client import client
+
+
+__all__ = ['argparser', 'run_flow', 'message_if_missing']
+
+_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
+
+To make this sample run you will need to populate the client_secrets.json file
+found at:
+
+ {file_path}
+
+with information from the APIs Console <https://code.google.com/apis/console>.
+
+"""
+
+_FAILED_START_MESSAGE = """
+Failed to start a local webserver listening on either port 8080
+or port 8090. Please check your firewall settings and locally
+running programs that may be blocking or using those ports.
+
+Falling back to --noauth_local_webserver and continuing with
+authorization.
+"""
+
+_BROWSER_OPENED_MESSAGE = """
+Your browser has been opened to visit:
+
+ {address}
+
+If your browser is on a different machine then exit and re-run this
+application with the command-line parameter
+
+ --noauth_local_webserver
+"""
+
+_GO_TO_LINK_MESSAGE = """
+Go to the following link in your browser:
+
+ {address}
+"""
+
+
+def _CreateArgumentParser():
+ try:
+ import argparse
+ except ImportError: # pragma: NO COVER
+ return None
+ parser = argparse.ArgumentParser(add_help=False)
+ parser.add_argument('--auth_host_name', default='localhost',
+ help='Hostname when running a local web server.')
+ parser.add_argument('--noauth_local_webserver', action='store_true',
+ default=False, help='Do not run a local web server.')
+ parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
+ nargs='*', help='Port web server should listen on.')
+ parser.add_argument(
+ '--logging_level', default='ERROR',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
+ help='Set the logging level of detail.')
+ return parser
+
+
+# argparser is an ArgumentParser that contains command-line options expected
+# by tools.run(). Pass it in as part of the 'parents' argument to your own
+# ArgumentParser.
+argparser = _CreateArgumentParser()
+
+
+class ClientRedirectServer(BaseHTTPServer.HTTPServer):
+ """A server to handle OAuth 2.0 redirects back to localhost.
+
+ Waits for a single request and parses the query parameters
+ into query_params and then stops serving.
+ """
+ query_params = {}
+
+
+class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """A handler for OAuth 2.0 redirects back to localhost.
+
+ Waits for a single request and parses the query parameters
+ into the servers query_params and then stops serving.
+ """
+
+ def do_GET(self):
+ """Handle a GET request.
+
+ Parses the query parameters and prints a message
+ if the flow has completed. Note that we can't detect
+ if an error occurred.
+ """
+ self.send_response(http_client.OK)
+ self.send_header('Content-type', 'text/html')
+ self.end_headers()
+ parts = urllib.parse.urlparse(self.path)
+ query = _helpers.parse_unique_urlencoded(parts.query)
+ self.server.query_params = query
+ self.wfile.write(
+ b'<html><head><title>Authentication Status</title></head>')
+ self.wfile.write(
+ b'<body><p>The authentication flow has completed.</p>')
+ self.wfile.write(b'</body></html>')
+
+ def log_message(self, format, *args):
+ """Do not log messages to stdout while running as cmd. line program."""
+
+
+@_helpers.positional(3)
+def run_flow(flow, storage, flags=None, http=None):
+ """Core code for a command-line application.
+
+ The ``run()`` function is called from your application and runs
+ through all the steps to obtain credentials. It takes a ``Flow``
+ argument and attempts to open an authorization server page in the
+ user's default web browser. The server asks the user to grant your
+ application access to the user's data. If the user grants access,
+ the ``run()`` function returns new credentials. The new credentials
+ are also stored in the ``storage`` argument, which updates the file
+ associated with the ``Storage`` object.
+
+ It presumes it is run from a command-line application and supports the
+ following flags:
+
+ ``--auth_host_name`` (string, default: ``localhost``)
+ Host name to use when running a local web server to handle
+ redirects during OAuth authorization.
+
+ ``--auth_host_port`` (integer, default: ``[8080, 8090]``)
+ Port to use when running a local web server to handle redirects
+ during OAuth authorization. Repeat this option to specify a list
+ of values.
+
+ ``--[no]auth_local_webserver`` (boolean, default: ``True``)
+ Run a local web server to handle redirects during OAuth
+ authorization.
+
+ The tools module defines an ``ArgumentParser`` the already contains the
+ flag definitions that ``run()`` requires. You can pass that
+ ``ArgumentParser`` to your ``ArgumentParser`` constructor::
+
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ parents=[tools.argparser])
+ flags = parser.parse_args(argv)
+
+ Args:
+ flow: Flow, an OAuth 2.0 Flow to step through.
+ storage: Storage, a ``Storage`` to store the credential in.
+ flags: ``argparse.Namespace``, (Optional) The command-line flags. This
+ is the object returned from calling ``parse_args()`` on
+ ``argparse.ArgumentParser`` as described above. Defaults
+ to ``argparser.parse_args()``.
+ http: An instance of ``httplib2.Http.request`` or something that
+ acts like it.
+
+ Returns:
+ Credentials, the obtained credential.
+ """
+ if flags is None:
+ flags = argparser.parse_args()
+ logging.getLogger().setLevel(getattr(logging, flags.logging_level))
+ if not flags.noauth_local_webserver:
+ success = False
+ port_number = 0
+ for port in flags.auth_host_port:
+ port_number = port
+ try:
+ httpd = ClientRedirectServer((flags.auth_host_name, port),
+ ClientRedirectHandler)
+ except socket.error:
+ pass
+ else:
+ success = True
+ break
+ flags.noauth_local_webserver = not success
+ if not success:
+ print(_FAILED_START_MESSAGE)
+
+ if not flags.noauth_local_webserver:
+ oauth_callback = 'http://{host}:{port}/'.format(
+ host=flags.auth_host_name, port=port_number)
+ else:
+ oauth_callback = client.OOB_CALLBACK_URN
+ flow.redirect_uri = oauth_callback
+ authorize_url = flow.step1_get_authorize_url()
+
+ if not flags.noauth_local_webserver:
+ import webbrowser
+ webbrowser.open(authorize_url, new=1, autoraise=True)
+ print(_BROWSER_OPENED_MESSAGE.format(address=authorize_url))
+ else:
+ print(_GO_TO_LINK_MESSAGE.format(address=authorize_url))
+
+ code = None
+ if not flags.noauth_local_webserver:
+ httpd.handle_request()
+ if 'error' in httpd.query_params:
+ sys.exit('Authentication request was rejected.')
+ if 'code' in httpd.query_params:
+ code = httpd.query_params['code']
+ else:
+ print('Failed to find "code" in the query parameters '
+ 'of the redirect.')
+ sys.exit('Try running with --noauth_local_webserver.')
+ else:
+ code = input('Enter verification code: ').strip()
+
+ try:
+ credential = flow.step2_exchange(code, http=http)
+ except client.FlowExchangeError as e:
+ sys.exit('Authentication has failed: {0}'.format(e))
+
+ storage.put(credential)
+ credential.set_store(storage)
+ print('Authentication successful.')
+
+ return credential
+
+
+def message_if_missing(filename):
+ """Helpful message to display if the CLIENT_SECRETS file is missing."""
+ return _CLIENT_SECRETS_MESSAGE.format(file_path=filename)
diff --git a/contrib/python/oauth2client/py3/oauth2client/transport.py b/contrib/python/oauth2client/py3/oauth2client/transport.py
new file mode 100644
index 0000000000..79a61f1c1b
--- /dev/null
+++ b/contrib/python/oauth2client/py3/oauth2client/transport.py
@@ -0,0 +1,285 @@
+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import httplib2
+import six
+from six.moves import http_client
+
+from oauth2client import _helpers
+
+
+_LOGGER = logging.getLogger(__name__)
+# Properties present in file-like streams / buffers.
+_STREAM_PROPERTIES = ('read', 'seek', 'tell')
+
+# Google Data client libraries may need to set this to [401, 403].
+REFRESH_STATUS_CODES = (http_client.UNAUTHORIZED,)
+
+
+class MemoryCache(object):
+ """httplib2 Cache implementation which only caches locally."""
+
+ def __init__(self):
+ self.cache = {}
+
+ def get(self, key):
+ return self.cache.get(key)
+
+ def set(self, key, value):
+ self.cache[key] = value
+
+ def delete(self, key):
+ self.cache.pop(key, None)
+
+
+def get_cached_http():
+ """Return an HTTP object which caches results returned.
+
+ This is intended to be used in methods like
+ oauth2client.client.verify_id_token(), which calls to the same URI
+ to retrieve certs.
+
+ Returns:
+ httplib2.Http, an HTTP object with a MemoryCache
+ """
+ return _CACHED_HTTP
+
+
+def get_http_object(*args, **kwargs):
+ """Return a new HTTP object.
+
+ Args:
+ *args: tuple, The positional arguments to be passed when
+ contructing a new HTTP object.
+ **kwargs: dict, The keyword arguments to be passed when
+ contructing a new HTTP object.
+
+ Returns:
+ httplib2.Http, an HTTP object.
+ """
+ return httplib2.Http(*args, **kwargs)
+
+
+def _initialize_headers(headers):
+ """Creates a copy of the headers.
+
+ Args:
+ headers: dict, request headers to copy.
+
+ Returns:
+ dict, the copied headers or a new dictionary if the headers
+ were None.
+ """
+ return {} if headers is None else dict(headers)
+
+
+def _apply_user_agent(headers, user_agent):
+ """Adds a user-agent to the headers.
+
+ Args:
+ headers: dict, request headers to add / modify user
+ agent within.
+ user_agent: str, the user agent to add.
+
+ Returns:
+ dict, the original headers passed in, but modified if the
+ user agent is not None.
+ """
+ if user_agent is not None:
+ if 'user-agent' in headers:
+ headers['user-agent'] = (user_agent + ' ' + headers['user-agent'])
+ else:
+ headers['user-agent'] = user_agent
+
+ return headers
+
+
+def clean_headers(headers):
+ """Forces header keys and values to be strings, i.e not unicode.
+
+ The httplib module just concats the header keys and values in a way that
+ may make the message header a unicode string, which, if it then tries to
+ contatenate to a binary request body may result in a unicode decode error.
+
+ Args:
+ headers: dict, A dictionary of headers.
+
+ Returns:
+ The same dictionary but with all the keys converted to strings.
+ """
+ clean = {}
+ try:
+ for k, v in six.iteritems(headers):
+ if not isinstance(k, six.binary_type):
+ k = str(k)
+ if not isinstance(v, six.binary_type):
+ v = str(v)
+ clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)
+ except UnicodeEncodeError:
+ from oauth2client.client import NonAsciiHeaderError
+ raise NonAsciiHeaderError(k, ': ', v)
+ return clean
+
+
+def wrap_http_for_auth(credentials, http):
+ """Prepares an HTTP object's request method for auth.
+
+ Wraps HTTP requests with logic to catch auth failures (typically
+ identified via a 401 status code). In the event of failure, tries
+ to refresh the token used and then retry the original request.
+
+ Args:
+ credentials: Credentials, the credentials used to identify
+ the authenticated user.
+ http: httplib2.Http, an http object to be used to make
+ auth requests.
+ """
+ orig_request_method = http.request
+
+ # The closure that will replace 'httplib2.Http.request'.
+ def new_request(uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ if not credentials.access_token:
+ _LOGGER.info('Attempting refresh to obtain '
+ 'initial access_token')
+ credentials._refresh(orig_request_method)
+
+ # Clone and modify the request headers to add the appropriate
+ # Authorization header.
+ headers = _initialize_headers(headers)
+ credentials.apply(headers)
+ _apply_user_agent(headers, credentials.user_agent)
+
+ body_stream_position = None
+ # Check if the body is a file-like stream.
+ if all(getattr(body, stream_prop, None) for stream_prop in
+ _STREAM_PROPERTIES):
+ body_stream_position = body.tell()
+
+ resp, content = request(orig_request_method, uri, method, body,
+ clean_headers(headers),
+ redirections, connection_type)
+
+ # A stored token may expire between the time it is retrieved and
+ # the time the request is made, so we may need to try twice.
+ max_refresh_attempts = 2
+ for refresh_attempt in range(max_refresh_attempts):
+ if resp.status not in REFRESH_STATUS_CODES:
+ break
+ _LOGGER.info('Refreshing due to a %s (attempt %s/%s)',
+ resp.status, refresh_attempt + 1,
+ max_refresh_attempts)
+ credentials._refresh(orig_request_method)
+ credentials.apply(headers)
+ if body_stream_position is not None:
+ body.seek(body_stream_position)
+
+ resp, content = request(orig_request_method, uri, method, body,
+ clean_headers(headers),
+ redirections, connection_type)
+
+ return resp, content
+
+ # Replace the request method with our own closure.
+ http.request = new_request
+
+ # Set credentials as a property of the request method.
+ http.request.credentials = credentials
+
+
+def wrap_http_for_jwt_access(credentials, http):
+ """Prepares an HTTP object's request method for JWT access.
+
+ Wraps HTTP requests with logic to catch auth failures (typically
+ identified via a 401 status code). In the event of failure, tries
+ to refresh the token used and then retry the original request.
+
+ Args:
+ credentials: _JWTAccessCredentials, the credentials used to identify
+ a service account that uses JWT access tokens.
+ http: httplib2.Http, an http object to be used to make
+ auth requests.
+ """
+ orig_request_method = http.request
+ wrap_http_for_auth(credentials, http)
+ # The new value of ``http.request`` set by ``wrap_http_for_auth``.
+ authenticated_request_method = http.request
+
+ # The closure that will replace 'httplib2.Http.request'.
+ def new_request(uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ if 'aud' in credentials._kwargs:
+ # Preemptively refresh token, this is not done for OAuth2
+ if (credentials.access_token is None or
+ credentials.access_token_expired):
+ credentials.refresh(None)
+ return request(authenticated_request_method, uri,
+ method, body, headers, redirections,
+ connection_type)
+ else:
+ # If we don't have an 'aud' (audience) claim,
+ # create a 1-time token with the uri root as the audience
+ headers = _initialize_headers(headers)
+ _apply_user_agent(headers, credentials.user_agent)
+ uri_root = uri.split('?', 1)[0]
+ token, unused_expiry = credentials._create_token({'aud': uri_root})
+
+ headers['Authorization'] = 'Bearer ' + token
+ return request(orig_request_method, uri, method, body,
+ clean_headers(headers),
+ redirections, connection_type)
+
+ # Replace the request method with our own closure.
+ http.request = new_request
+
+ # Set credentials as a property of the request method.
+ http.request.credentials = credentials
+
+
+def request(http, uri, method='GET', body=None, headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ """Make an HTTP request with an HTTP object and arguments.
+
+ Args:
+ http: httplib2.Http, an http object to be used to make requests.
+ uri: string, The URI to be requested.
+ method: string, The HTTP method to use for the request. Defaults
+ to 'GET'.
+ body: string, The payload / body in HTTP request. By default
+ there is no payload.
+ headers: dict, Key-value pairs of request headers. By default
+ there are no headers.
+ redirections: int, The number of allowed 203 redirects for
+ the request. Defaults to 5.
+ connection_type: httplib.HTTPConnection, a subclass to be used for
+ establishing connection. If not set, the type
+ will be determined from the ``uri``.
+
+ Returns:
+ tuple, a pair of a httplib2.Response with the status code and other
+ headers and the bytes of the content returned.
+ """
+ # NOTE: Allowing http or http.request is temporary (See Issue 601).
+ http_callable = getattr(http, 'request', http)
+ return http_callable(uri, method=method, body=body, headers=headers,
+ redirections=redirections,
+ connection_type=connection_type)
+
+
+_CACHED_HTTP = httplib2.Http(MemoryCache())
diff --git a/contrib/python/oauth2client/py3/ya.make b/contrib/python/oauth2client/py3/ya.make
new file mode 100644
index 0000000000..644e4afed7
--- /dev/null
+++ b/contrib/python/oauth2client/py3/ya.make
@@ -0,0 +1,68 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(4.1.3)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/httplib2
+ contrib/python/pyasn1
+ contrib/python/pyasn1-modules
+ contrib/python/rsa
+ contrib/python/six
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ oauth2client._openssl_crypt
+ oauth2client._pycrypto_crypt
+ oauth2client.contrib.*
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ oauth2client/__init__.py
+ oauth2client/_helpers.py
+ oauth2client/_openssl_crypt.py
+ oauth2client/_pkce.py
+ oauth2client/_pure_python_crypt.py
+ oauth2client/_pycrypto_crypt.py
+ oauth2client/client.py
+ oauth2client/clientsecrets.py
+ oauth2client/contrib/__init__.py
+ oauth2client/contrib/_appengine_ndb.py
+ oauth2client/contrib/_metadata.py
+ oauth2client/contrib/appengine.py
+ oauth2client/contrib/devshell.py
+ oauth2client/contrib/dictionary_storage.py
+ oauth2client/contrib/django_util/__init__.py
+ oauth2client/contrib/django_util/apps.py
+ oauth2client/contrib/django_util/decorators.py
+ oauth2client/contrib/django_util/models.py
+ oauth2client/contrib/django_util/signals.py
+ oauth2client/contrib/django_util/site.py
+ oauth2client/contrib/django_util/storage.py
+ oauth2client/contrib/django_util/views.py
+ oauth2client/contrib/flask_util.py
+ oauth2client/contrib/gce.py
+ oauth2client/contrib/keyring_storage.py
+ oauth2client/contrib/multiprocess_file_storage.py
+ oauth2client/contrib/sqlalchemy.py
+ oauth2client/contrib/xsrfutil.py
+ oauth2client/crypt.py
+ oauth2client/file.py
+ oauth2client/service_account.py
+ oauth2client/tools.py
+ oauth2client/transport.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/oauth2client/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/oauth2client/ya.make b/contrib/python/oauth2client/ya.make
new file mode 100644
index 0000000000..8934e6dd84
--- /dev/null
+++ b/contrib/python/oauth2client/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/oauth2client/py2)
+ELSE()
+ PEERDIR(contrib/python/oauth2client/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/oauthlib/.dist-info/METADATA b/contrib/python/oauthlib/.dist-info/METADATA
new file mode 100644
index 0000000000..5bb339b0bd
--- /dev/null
+++ b/contrib/python/oauthlib/.dist-info/METADATA
@@ -0,0 +1,179 @@
+Metadata-Version: 2.1
+Name: oauthlib
+Version: 3.2.2
+Summary: A generic, spec-compliant, thorough implementation of the OAuth request-signing logic
+Home-page: https://github.com/oauthlib/oauthlib
+Author: The OAuthlib Community
+Author-email: idan@gazit.me
+Maintainer: Ib Lundgren
+Maintainer-email: ib.lundgren@gmail.com
+License: BSD
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: Implementation
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Provides-Extra: rsa
+Requires-Dist: cryptography (>=3.0.0) ; extra == 'rsa'
+Provides-Extra: signals
+Requires-Dist: blinker (>=1.4.0) ; extra == 'signals'
+Provides-Extra: signedtoken
+Requires-Dist: cryptography (>=3.0.0) ; extra == 'signedtoken'
+Requires-Dist: pyjwt (<3,>=2.0.0) ; extra == 'signedtoken'
+
+OAuthLib - Python Framework for OAuth1 & OAuth2
+===============================================
+
+*A generic, spec-compliant, thorough implementation of the OAuth request-signing
+logic for Python 3.6+.*
+
+.. image:: https://app.travis-ci.com/oauthlib/oauthlib.svg?branch=master
+ :target: https://app.travis-ci.com/oauthlib/oauthlib
+ :alt: Travis
+.. image:: https://coveralls.io/repos/oauthlib/oauthlib/badge.svg?branch=master
+ :target: https://coveralls.io/r/oauthlib/oauthlib
+ :alt: Coveralls
+.. image:: https://img.shields.io/pypi/pyversions/oauthlib.svg
+ :target: https://pypi.org/project/oauthlib/
+ :alt: Download from PyPI
+.. image:: https://img.shields.io/pypi/l/oauthlib.svg
+ :target: https://pypi.org/project/oauthlib/
+ :alt: License
+.. image:: https://app.fossa.io/api/projects/git%2Bgithub.com%2Foauthlib%2Foauthlib.svg?type=shield
+ :target: https://app.fossa.io/projects/git%2Bgithub.com%2Foauthlib%2Foauthlib?ref=badge_shield
+ :alt: FOSSA Status
+.. image:: https://img.shields.io/readthedocs/oauthlib.svg
+ :target: https://oauthlib.readthedocs.io/en/latest/index.html
+ :alt: Read the Docs
+.. image:: https://badges.gitter.im/oauthlib/oauthlib.svg
+ :target: https://gitter.im/oauthlib/Lobby
+ :alt: Chat on Gitter
+
+
+.. image:: https://raw.githubusercontent.com/oauthlib/oauthlib/8d71b161fd145d11c40d55c9ab66ac134a303253/docs/logo/oauthlib-banner-700x192.png
+ :target: https://github.com/oauthlib/oauthlib/
+ :alt: OAuth + Python = OAuthlib Python Framework
+
+
+OAuth often seems complicated and difficult-to-implement. There are several
+prominent libraries for handling OAuth requests, but they all suffer from one or
+both of the following:
+
+1. They predate the `OAuth 1.0 spec`_, AKA RFC 5849.
+2. They predate the `OAuth 2.0 spec`_, AKA RFC 6749.
+3. They assume the usage of a specific HTTP request library.
+
+.. _`OAuth 1.0 spec`: https://tools.ietf.org/html/rfc5849
+.. _`OAuth 2.0 spec`: https://tools.ietf.org/html/rfc6749
+
+OAuthLib is a framework which implements the logic of OAuth1 or OAuth2 without
+assuming a specific HTTP request object or web framework. Use it to graft OAuth
+client support onto your favorite HTTP library, or provide support onto your
+favourite web framework. If you're a maintainer of such a library, write a thin
+veneer on top of OAuthLib and get OAuth support for very little effort.
+
+
+Documentation
+--------------
+
+Full documentation is available on `Read the Docs`_. All contributions are very
+welcome! The documentation is still quite sparse, please open an issue for what
+you'd like to know, or discuss it in our `Gitter community`_, or even better, send a
+pull request!
+
+.. _`Gitter community`: https://gitter.im/oauthlib/Lobby
+.. _`Read the Docs`: https://oauthlib.readthedocs.io/en/latest/index.html
+
+Interested in making OAuth requests?
+------------------------------------
+
+Then you might be more interested in using `requests`_ which has OAuthLib
+powered OAuth support provided by the `requests-oauthlib`_ library.
+
+.. _`requests`: https://github.com/requests/requests
+.. _`requests-oauthlib`: https://github.com/requests/requests-oauthlib
+
+Which web frameworks are supported?
+-----------------------------------
+
+The following packages provide OAuth support using OAuthLib.
+
+- For Django there is `django-oauth-toolkit`_, which includes `Django REST framework`_ support.
+- For Flask there is `flask-oauthlib`_ and `Flask-Dance`_.
+- For Pyramid there is `pyramid-oauthlib`_.
+- For Bottle there is `bottle-oauthlib`_.
+
+If you have written an OAuthLib package that supports your favorite framework,
+please open a Pull Request, updating the documentation.
+
+.. _`django-oauth-toolkit`: https://github.com/evonove/django-oauth-toolkit
+.. _`flask-oauthlib`: https://github.com/lepture/flask-oauthlib
+.. _`Django REST framework`: http://django-rest-framework.org
+.. _`Flask-Dance`: https://github.com/singingwolfboy/flask-dance
+.. _`pyramid-oauthlib`: https://github.com/tilgovi/pyramid-oauthlib
+.. _`bottle-oauthlib`: https://github.com/thomsonreuters/bottle-oauthlib
+
+Using OAuthLib? Please get in touch!
+------------------------------------
+Patching OAuth support onto an http request framework? Creating an OAuth
+provider extension for a web framework? Simply using OAuthLib to Get Things Done
+or to learn?
+
+No matter which we'd love to hear from you in our `Gitter community`_ or if you have
+anything in particular you would like to have, change or comment on don't
+hesitate for a second to send a pull request or open an issue. We might be quite
+busy and therefore slow to reply but we love feedback!
+
+Chances are you have run into something annoying that you wish there was
+documentation for, if you wish to gain eternal fame and glory, and a drink if we
+have the pleasure to run into each other, please send a docs pull request =)
+
+.. _`Gitter community`: https://gitter.im/oauthlib/Lobby
+
+License
+-------
+
+OAuthLib is yours to use and abuse according to the terms of the BSD license.
+Check the LICENSE file for full details.
+
+Credits
+-------
+
+OAuthLib has been started and maintained several years by Idan Gazit and other
+amazing `AUTHORS`_. Thanks to their wonderful work, the open-source `community`_
+creation has been possible and the project can stay active and reactive to users
+requests.
+
+
+.. _`AUTHORS`: https://github.com/oauthlib/oauthlib/blob/master/AUTHORS
+.. _`community`: https://github.com/oauthlib/
+
+Changelog
+---------
+
+*OAuthLib is in active development, with the core of both OAuth1 and OAuth2
+completed, for providers as well as clients.* See `supported features`_ for
+details.
+
+.. _`supported features`: https://oauthlib.readthedocs.io/en/latest/feature_matrix.html
+
+For a full changelog see ``CHANGELOG.rst``.
diff --git a/contrib/python/oauthlib/.dist-info/top_level.txt b/contrib/python/oauthlib/.dist-info/top_level.txt
new file mode 100644
index 0000000000..b5f3f0e345
--- /dev/null
+++ b/contrib/python/oauthlib/.dist-info/top_level.txt
@@ -0,0 +1 @@
+oauthlib
diff --git a/contrib/python/oauthlib/LICENSE b/contrib/python/oauthlib/LICENSE
new file mode 100644
index 0000000000..d5a9e9acd0
--- /dev/null
+++ b/contrib/python/oauthlib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2019 The OAuthlib Community
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of this project nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/oauthlib/README.rst b/contrib/python/oauthlib/README.rst
new file mode 100644
index 0000000000..eb8c452d00
--- /dev/null
+++ b/contrib/python/oauthlib/README.rst
@@ -0,0 +1,137 @@
+OAuthLib - Python Framework for OAuth1 & OAuth2
+===============================================
+
+*A generic, spec-compliant, thorough implementation of the OAuth request-signing
+logic for Python 3.6+.*
+
+.. image:: https://app.travis-ci.com/oauthlib/oauthlib.svg?branch=master
+ :target: https://app.travis-ci.com/oauthlib/oauthlib
+ :alt: Travis
+.. image:: https://coveralls.io/repos/oauthlib/oauthlib/badge.svg?branch=master
+ :target: https://coveralls.io/r/oauthlib/oauthlib
+ :alt: Coveralls
+.. image:: https://img.shields.io/pypi/pyversions/oauthlib.svg
+ :target: https://pypi.org/project/oauthlib/
+ :alt: Download from PyPI
+.. image:: https://img.shields.io/pypi/l/oauthlib.svg
+ :target: https://pypi.org/project/oauthlib/
+ :alt: License
+.. image:: https://app.fossa.io/api/projects/git%2Bgithub.com%2Foauthlib%2Foauthlib.svg?type=shield
+ :target: https://app.fossa.io/projects/git%2Bgithub.com%2Foauthlib%2Foauthlib?ref=badge_shield
+ :alt: FOSSA Status
+.. image:: https://img.shields.io/readthedocs/oauthlib.svg
+ :target: https://oauthlib.readthedocs.io/en/latest/index.html
+ :alt: Read the Docs
+.. image:: https://badges.gitter.im/oauthlib/oauthlib.svg
+ :target: https://gitter.im/oauthlib/Lobby
+ :alt: Chat on Gitter
+
+
+.. image:: https://raw.githubusercontent.com/oauthlib/oauthlib/8d71b161fd145d11c40d55c9ab66ac134a303253/docs/logo/oauthlib-banner-700x192.png
+ :target: https://github.com/oauthlib/oauthlib/
+ :alt: OAuth + Python = OAuthlib Python Framework
+
+
+OAuth often seems complicated and difficult-to-implement. There are several
+prominent libraries for handling OAuth requests, but they all suffer from one or
+both of the following:
+
+1. They predate the `OAuth 1.0 spec`_, AKA RFC 5849.
+2. They predate the `OAuth 2.0 spec`_, AKA RFC 6749.
+3. They assume the usage of a specific HTTP request library.
+
+.. _`OAuth 1.0 spec`: https://tools.ietf.org/html/rfc5849
+.. _`OAuth 2.0 spec`: https://tools.ietf.org/html/rfc6749
+
+OAuthLib is a framework which implements the logic of OAuth1 or OAuth2 without
+assuming a specific HTTP request object or web framework. Use it to graft OAuth
+client support onto your favorite HTTP library, or provide support onto your
+favourite web framework. If you're a maintainer of such a library, write a thin
+veneer on top of OAuthLib and get OAuth support for very little effort.
+
+
+Documentation
+--------------
+
+Full documentation is available on `Read the Docs`_. All contributions are very
+welcome! The documentation is still quite sparse, please open an issue for what
+you'd like to know, or discuss it in our `Gitter community`_, or even better, send a
+pull request!
+
+.. _`Gitter community`: https://gitter.im/oauthlib/Lobby
+.. _`Read the Docs`: https://oauthlib.readthedocs.io/en/latest/index.html
+
+Interested in making OAuth requests?
+------------------------------------
+
+Then you might be more interested in using `requests`_ which has OAuthLib
+powered OAuth support provided by the `requests-oauthlib`_ library.
+
+.. _`requests`: https://github.com/requests/requests
+.. _`requests-oauthlib`: https://github.com/requests/requests-oauthlib
+
+Which web frameworks are supported?
+-----------------------------------
+
+The following packages provide OAuth support using OAuthLib.
+
+- For Django there is `django-oauth-toolkit`_, which includes `Django REST framework`_ support.
+- For Flask there is `flask-oauthlib`_ and `Flask-Dance`_.
+- For Pyramid there is `pyramid-oauthlib`_.
+- For Bottle there is `bottle-oauthlib`_.
+
+If you have written an OAuthLib package that supports your favorite framework,
+please open a Pull Request, updating the documentation.
+
+.. _`django-oauth-toolkit`: https://github.com/evonove/django-oauth-toolkit
+.. _`flask-oauthlib`: https://github.com/lepture/flask-oauthlib
+.. _`Django REST framework`: http://django-rest-framework.org
+.. _`Flask-Dance`: https://github.com/singingwolfboy/flask-dance
+.. _`pyramid-oauthlib`: https://github.com/tilgovi/pyramid-oauthlib
+.. _`bottle-oauthlib`: https://github.com/thomsonreuters/bottle-oauthlib
+
+Using OAuthLib? Please get in touch!
+------------------------------------
+Patching OAuth support onto an http request framework? Creating an OAuth
+provider extension for a web framework? Simply using OAuthLib to Get Things Done
+or to learn?
+
+No matter which we'd love to hear from you in our `Gitter community`_ or if you have
+anything in particular you would like to have, change or comment on don't
+hesitate for a second to send a pull request or open an issue. We might be quite
+busy and therefore slow to reply but we love feedback!
+
+Chances are you have run into something annoying that you wish there was
+documentation for, if you wish to gain eternal fame and glory, and a drink if we
+have the pleasure to run into each other, please send a docs pull request =)
+
+.. _`Gitter community`: https://gitter.im/oauthlib/Lobby
+
+License
+-------
+
+OAuthLib is yours to use and abuse according to the terms of the BSD license.
+Check the LICENSE file for full details.
+
+Credits
+-------
+
+OAuthLib has been started and maintained several years by Idan Gazit and other
+amazing `AUTHORS`_. Thanks to their wonderful work, the open-source `community`_
+creation has been possible and the project can stay active and reactive to users
+requests.
+
+
+.. _`AUTHORS`: https://github.com/oauthlib/oauthlib/blob/master/AUTHORS
+.. _`community`: https://github.com/oauthlib/
+
+Changelog
+---------
+
+*OAuthLib is in active development, with the core of both OAuth1 and OAuth2
+completed, for providers as well as clients.* See `supported features`_ for
+details.
+
+.. _`supported features`: https://oauthlib.readthedocs.io/en/latest/feature_matrix.html
+
+For a full changelog see ``CHANGELOG.rst``.
diff --git a/contrib/python/oauthlib/oauthlib/__init__.py b/contrib/python/oauthlib/oauthlib/__init__.py
new file mode 100644
index 0000000000..d9a5e38ea0
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/__init__.py
@@ -0,0 +1,34 @@
+"""
+ oauthlib
+ ~~~~~~~~
+
+ A generic, spec-compliant, thorough implementation of the OAuth
+ request-signing logic.
+
+ :copyright: (c) 2019 by The OAuthlib Community
+ :license: BSD, see LICENSE for details.
+"""
+import logging
+from logging import NullHandler
+
+__author__ = 'The OAuthlib Community'
+__version__ = '3.2.2'
+
+logging.getLogger('oauthlib').addHandler(NullHandler())
+
+_DEBUG = False
+
+def set_debug(debug_val):
+ """Set value of debug flag
+
+ :param debug_val: Value to set. Must be a bool value.
+ """
+ global _DEBUG
+ _DEBUG = debug_val
+
+def get_debug():
+ """Get debug mode value.
+
+ :return: `True` if debug mode is on, `False` otherwise
+ """
+ return _DEBUG
diff --git a/contrib/python/oauthlib/oauthlib/common.py b/contrib/python/oauthlib/oauthlib/common.py
new file mode 100644
index 0000000000..395e75efc9
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/common.py
@@ -0,0 +1,432 @@
+"""
+oauthlib.common
+~~~~~~~~~~~~~~
+
+This module provides data structures and utilities common
+to all implementations of OAuth.
+"""
+import collections
+import datetime
+import logging
+import re
+import time
+import urllib.parse as urlparse
+from urllib.parse import (
+ quote as _quote, unquote as _unquote, urlencode as _urlencode,
+)
+
+from . import get_debug
+
+try:
+ from secrets import SystemRandom, randbits
+except ImportError:
+ from random import SystemRandom, getrandbits as randbits
+
+UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ '0123456789')
+
+CLIENT_ID_CHARACTER_SET = (r' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMN'
+ 'OPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}')
+
+SANITIZE_PATTERN = re.compile(r'([^&;]*(?:password|token)[^=]*=)[^&;]+', re.IGNORECASE)
+INVALID_HEX_PATTERN = re.compile(r'%[^0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]')
+
+always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ 'abcdefghijklmnopqrstuvwxyz'
+ '0123456789' '_.-')
+
+log = logging.getLogger('oauthlib')
+
+
+# 'safe' must be bytes (Python 2.6 requires bytes, other versions allow either)
+def quote(s, safe=b'/'):
+ s = s.encode('utf-8') if isinstance(s, str) else s
+ s = _quote(s, safe)
+ # PY3 always returns unicode. PY2 may return either, depending on whether
+ # it had to modify the string.
+ if isinstance(s, bytes):
+ s = s.decode('utf-8')
+ return s
+
+
+def unquote(s):
+ s = _unquote(s)
+ # PY3 always returns unicode. PY2 seems to always return what you give it,
+ # which differs from quote's behavior. Just to be safe, make sure it is
+ # unicode before we return.
+ if isinstance(s, bytes):
+ s = s.decode('utf-8')
+ return s
+
+
+def urlencode(params):
+ utf8_params = encode_params_utf8(params)
+ urlencoded = _urlencode(utf8_params)
+ if isinstance(urlencoded, str):
+ return urlencoded
+ else:
+ return urlencoded.decode("utf-8")
+
+
+def encode_params_utf8(params):
+ """Ensures that all parameters in a list of 2-element tuples are encoded to
+ bytestrings using UTF-8
+ """
+ encoded = []
+ for k, v in params:
+ encoded.append((
+ k.encode('utf-8') if isinstance(k, str) else k,
+ v.encode('utf-8') if isinstance(v, str) else v))
+ return encoded
+
+
+def decode_params_utf8(params):
+ """Ensures that all parameters in a list of 2-element tuples are decoded to
+ unicode using UTF-8.
+ """
+ decoded = []
+ for k, v in params:
+ decoded.append((
+ k.decode('utf-8') if isinstance(k, bytes) else k,
+ v.decode('utf-8') if isinstance(v, bytes) else v))
+ return decoded
+
+
+urlencoded = set(always_safe) | set('=&;:%+~,*@!()/?\'$')
+
+
+def urldecode(query):
+ """Decode a query string in x-www-form-urlencoded format into a sequence
+ of two-element tuples.
+
+ Unlike urlparse.parse_qsl(..., strict_parsing=True) urldecode will enforce
+ correct formatting of the query string by validation. If validation fails
+ a ValueError will be raised. urllib.parse_qsl will only raise errors if
+ any of name-value pairs omits the equals sign.
+ """
+ # Check if query contains invalid characters
+ if query and not set(query) <= urlencoded:
+ error = ("Error trying to decode a non urlencoded string. "
+ "Found invalid characters: %s "
+ "in the string: '%s'. "
+ "Please ensure the request/response body is "
+ "x-www-form-urlencoded.")
+ raise ValueError(error % (set(query) - urlencoded, query))
+
+ # Check for correctly hex encoded values using a regular expression
+ # All encoded values begin with % followed by two hex characters
+ # correct = %00, %A0, %0A, %FF
+ # invalid = %G0, %5H, %PO
+ if INVALID_HEX_PATTERN.search(query):
+ raise ValueError('Invalid hex encoding in query string.')
+
+ # We want to allow queries such as "c2" whereas urlparse.parse_qsl
+ # with the strict_parsing flag will not.
+ params = urlparse.parse_qsl(query, keep_blank_values=True)
+
+ # unicode all the things
+ return decode_params_utf8(params)
+
+
+def extract_params(raw):
+ """Extract parameters and return them as a list of 2-tuples.
+
+ Will successfully extract parameters from urlencoded query strings,
+ dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
+ empty list of parameters. Any other input will result in a return
+ value of None.
+ """
+ if isinstance(raw, (bytes, str)):
+ try:
+ params = urldecode(raw)
+ except ValueError:
+ params = None
+ elif hasattr(raw, '__iter__'):
+ try:
+ dict(raw)
+ except ValueError:
+ params = None
+ except TypeError:
+ params = None
+ else:
+ params = list(raw.items() if isinstance(raw, dict) else raw)
+ params = decode_params_utf8(params)
+ else:
+ params = None
+
+ return params
+
+
+def generate_nonce():
+ """Generate pseudorandom nonce that is unlikely to repeat.
+
+ Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
+ Per `section 3.2.1`_ of the MAC Access Authentication spec.
+
+ A random 64-bit number is appended to the epoch timestamp for both
+ randomness and to decrease the likelihood of collisions.
+
+ .. _`section 3.2.1`: https://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
+ .. _`section 3.3`: https://tools.ietf.org/html/rfc5849#section-3.3
+ """
+ return str(str(randbits(64)) + generate_timestamp())
+
+
+def generate_timestamp():
+ """Get seconds since epoch (UTC).
+
+ Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
+ Per `section 3.2.1`_ of the MAC Access Authentication spec.
+
+ .. _`section 3.2.1`: https://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
+ .. _`section 3.3`: https://tools.ietf.org/html/rfc5849#section-3.3
+ """
+ return str(int(time.time()))
+
+
+def generate_token(length=30, chars=UNICODE_ASCII_CHARACTER_SET):
+ """Generates a non-guessable OAuth token
+
+ OAuth (1 and 2) does not specify the format of tokens except that they
+ should be strings of random characters. Tokens should not be guessable
+ and entropy when generating the random characters is important. Which is
+ why SystemRandom is used instead of the default random.choice method.
+ """
+ rand = SystemRandom()
+ return ''.join(rand.choice(chars) for x in range(length))
+
+
+def generate_signed_token(private_pem, request):
+ import jwt
+
+ now = datetime.datetime.utcnow()
+
+ claims = {
+ 'scope': request.scope,
+ 'exp': now + datetime.timedelta(seconds=request.expires_in)
+ }
+
+ claims.update(request.claims)
+
+ token = jwt.encode(claims, private_pem, 'RS256')
+ token = to_unicode(token, "UTF-8")
+
+ return token
+
+
+def verify_signed_token(public_pem, token):
+ import jwt
+
+ return jwt.decode(token, public_pem, algorithms=['RS256'])
+
+
+def generate_client_id(length=30, chars=CLIENT_ID_CHARACTER_SET):
+ """Generates an OAuth client_id
+
+ OAuth 2 specify the format of client_id in
+ https://tools.ietf.org/html/rfc6749#appendix-A.
+ """
+ return generate_token(length, chars)
+
+
+def add_params_to_qs(query, params):
+ """Extend a query with a list of two-tuples."""
+ if isinstance(params, dict):
+ params = params.items()
+ queryparams = urlparse.parse_qsl(query, keep_blank_values=True)
+ queryparams.extend(params)
+ return urlencode(queryparams)
+
+
+def add_params_to_uri(uri, params, fragment=False):
+ """Add a list of two-tuples to the uri query components."""
+ sch, net, path, par, query, fra = urlparse.urlparse(uri)
+ if fragment:
+ fra = add_params_to_qs(fra, params)
+ else:
+ query = add_params_to_qs(query, params)
+ return urlparse.urlunparse((sch, net, path, par, query, fra))
+
+
+def safe_string_equals(a, b):
+ """ Near-constant time string comparison.
+
+ Used in order to avoid timing attacks on sensitive information such
+ as secret keys during request verification (`rootLabs`_).
+
+ .. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/
+
+ """
+ if len(a) != len(b):
+ return False
+
+ result = 0
+ for x, y in zip(a, b):
+ result |= ord(x) ^ ord(y)
+ return result == 0
+
+
+def to_unicode(data, encoding='UTF-8'):
+ """Convert a number of different types of objects to unicode."""
+ if isinstance(data, str):
+ return data
+
+ if isinstance(data, bytes):
+ return str(data, encoding=encoding)
+
+ if hasattr(data, '__iter__'):
+ try:
+ dict(data)
+ except TypeError:
+ pass
+ except ValueError:
+ # Assume it's a one dimensional data structure
+ return (to_unicode(i, encoding) for i in data)
+ else:
+ # We support 2.6 which lacks dict comprehensions
+ if hasattr(data, 'items'):
+ data = data.items()
+ return {to_unicode(k, encoding): to_unicode(v, encoding) for k, v in data}
+
+ return data
+
+
+class CaseInsensitiveDict(dict):
+
+ """Basic case insensitive dict with strings only keys."""
+
+ proxy = {}
+
+ def __init__(self, data):
+ self.proxy = {k.lower(): k for k in data}
+ for k in data:
+ self[k] = data[k]
+
+ def __contains__(self, k):
+ return k.lower() in self.proxy
+
+ def __delitem__(self, k):
+ key = self.proxy[k.lower()]
+ super().__delitem__(key)
+ del self.proxy[k.lower()]
+
+ def __getitem__(self, k):
+ key = self.proxy[k.lower()]
+ return super().__getitem__(key)
+
+ def get(self, k, default=None):
+ return self[k] if k in self else default
+
+ def __setitem__(self, k, v):
+ super().__setitem__(k, v)
+ self.proxy[k.lower()] = k
+
+ def update(self, *args, **kwargs):
+ super().update(*args, **kwargs)
+ for k in dict(*args, **kwargs):
+ self.proxy[k.lower()] = k
+
+
+class Request:
+
+ """A malleable representation of a signable HTTP request.
+
+ Body argument may contain any data, but parameters will only be decoded if
+ they are one of:
+
+ * urlencoded query string
+ * dict
+ * list of 2-tuples
+
+ Anything else will be treated as raw body data to be passed through
+ unmolested.
+ """
+
+ def __init__(self, uri, http_method='GET', body=None, headers=None,
+ encoding='utf-8'):
+ # Convert to unicode using encoding if given, else assume unicode
+ encode = lambda x: to_unicode(x, encoding) if encoding else x
+
+ self.uri = encode(uri)
+ self.http_method = encode(http_method)
+ self.headers = CaseInsensitiveDict(encode(headers or {}))
+ self.body = encode(body)
+ self.decoded_body = extract_params(self.body)
+ self.oauth_params = []
+ self.validator_log = {}
+
+ self._params = {
+ "access_token": None,
+ "client": None,
+ "client_id": None,
+ "client_secret": None,
+ "code": None,
+ "code_challenge": None,
+ "code_challenge_method": None,
+ "code_verifier": None,
+ "extra_credentials": None,
+ "grant_type": None,
+ "redirect_uri": None,
+ "refresh_token": None,
+ "request_token": None,
+ "response_type": None,
+ "scope": None,
+ "scopes": None,
+ "state": None,
+ "token": None,
+ "user": None,
+ "token_type_hint": None,
+
+ # OpenID Connect
+ "response_mode": None,
+ "nonce": None,
+ "display": None,
+ "prompt": None,
+ "claims": None,
+ "max_age": None,
+ "ui_locales": None,
+ "id_token_hint": None,
+ "login_hint": None,
+ "acr_values": None
+ }
+ self._params.update(dict(urldecode(self.uri_query)))
+ self._params.update(dict(self.decoded_body or []))
+
+ def __getattr__(self, name):
+ if name in self._params:
+ return self._params[name]
+ else:
+ raise AttributeError(name)
+
+ def __repr__(self):
+ if not get_debug():
+ return "<oauthlib.Request SANITIZED>"
+ body = self.body
+ headers = self.headers.copy()
+ if body:
+ body = SANITIZE_PATTERN.sub('\1<SANITIZED>', str(body))
+ if 'Authorization' in headers:
+ headers['Authorization'] = '<SANITIZED>'
+ return '<oauthlib.Request url="{}", http_method="{}", headers="{}", body="{}">'.format(
+ self.uri, self.http_method, headers, body)
+
+ @property
+ def uri_query(self):
+ return urlparse.urlparse(self.uri).query
+
+ @property
+ def uri_query_params(self):
+ if not self.uri_query:
+ return []
+ return urlparse.parse_qsl(self.uri_query, keep_blank_values=True,
+ strict_parsing=True)
+
+ @property
+ def duplicate_params(self):
+ seen_keys = collections.defaultdict(int)
+ all_keys = (p[0]
+ for p in (self.decoded_body or []) + self.uri_query_params)
+ for k in all_keys:
+ seen_keys[k] += 1
+ return [k for k, c in seen_keys.items() if c > 1]
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/__init__.py b/contrib/python/oauthlib/oauthlib/oauth1/__init__.py
new file mode 100644
index 0000000000..9caf12a90d
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/__init__.py
@@ -0,0 +1,23 @@
+"""
+oauthlib.oauth1
+~~~~~~~~~~~~~~
+
+This module is a wrapper for the most recent implementation of OAuth 1.0 Client
+and Server classes.
+"""
+from .rfc5849 import (
+ SIGNATURE_HMAC, SIGNATURE_HMAC_SHA1, SIGNATURE_HMAC_SHA256,
+ SIGNATURE_HMAC_SHA512, SIGNATURE_PLAINTEXT, SIGNATURE_RSA,
+ SIGNATURE_RSA_SHA1, SIGNATURE_RSA_SHA256, SIGNATURE_RSA_SHA512,
+ SIGNATURE_TYPE_AUTH_HEADER, SIGNATURE_TYPE_BODY, SIGNATURE_TYPE_QUERY,
+ Client,
+)
+from .rfc5849.endpoints import (
+ AccessTokenEndpoint, AuthorizationEndpoint, RequestTokenEndpoint,
+ ResourceEndpoint, SignatureOnlyEndpoint, WebApplicationServer,
+)
+from .rfc5849.errors import (
+ InsecureTransportError, InvalidClientError, InvalidRequestError,
+ InvalidSignatureMethodError, OAuth1Error,
+)
+from .rfc5849.request_validator import RequestValidator
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/__init__.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/__init__.py
new file mode 100644
index 0000000000..c559251fed
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/__init__.py
@@ -0,0 +1,365 @@
+"""
+oauthlib.oauth1.rfc5849
+~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for signing and checking OAuth 1.0 RFC 5849 requests.
+
+It supports all three standard signature methods defined in RFC 5849:
+
+- HMAC-SHA1
+- RSA-SHA1
+- PLAINTEXT
+
+It also supports signature methods that are not defined in RFC 5849. These are
+based on the standard ones but replace SHA-1 with the more secure SHA-256:
+
+- HMAC-SHA256
+- RSA-SHA256
+
+"""
+import base64
+import hashlib
+import logging
+import urllib.parse as urlparse
+
+from oauthlib.common import (
+ Request, generate_nonce, generate_timestamp, to_unicode, urlencode,
+)
+
+from . import parameters, signature
+
+log = logging.getLogger(__name__)
+
+# Available signature methods
+#
+# Note: SIGNATURE_HMAC and SIGNATURE_RSA are kept for backward compatibility
+# with previous versions of this library, when it the only HMAC-based and
+# RSA-based signature methods were HMAC-SHA1 and RSA-SHA1. But now that it
+# supports other hashing algorithms besides SHA1, explicitly identifying which
+# hashing algorithm is being used is recommended.
+#
+# Note: if additional values are defined here, don't forget to update the
+# imports in "../__init__.py" so they are available outside this module.
+
+SIGNATURE_HMAC_SHA1 = "HMAC-SHA1"
+SIGNATURE_HMAC_SHA256 = "HMAC-SHA256"
+SIGNATURE_HMAC_SHA512 = "HMAC-SHA512"
+SIGNATURE_HMAC = SIGNATURE_HMAC_SHA1 # deprecated variable for HMAC-SHA1
+
+SIGNATURE_RSA_SHA1 = "RSA-SHA1"
+SIGNATURE_RSA_SHA256 = "RSA-SHA256"
+SIGNATURE_RSA_SHA512 = "RSA-SHA512"
+SIGNATURE_RSA = SIGNATURE_RSA_SHA1 # deprecated variable for RSA-SHA1
+
+SIGNATURE_PLAINTEXT = "PLAINTEXT"
+
+SIGNATURE_METHODS = (
+ SIGNATURE_HMAC_SHA1,
+ SIGNATURE_HMAC_SHA256,
+ SIGNATURE_HMAC_SHA512,
+ SIGNATURE_RSA_SHA1,
+ SIGNATURE_RSA_SHA256,
+ SIGNATURE_RSA_SHA512,
+ SIGNATURE_PLAINTEXT
+)
+
+SIGNATURE_TYPE_AUTH_HEADER = 'AUTH_HEADER'
+SIGNATURE_TYPE_QUERY = 'QUERY'
+SIGNATURE_TYPE_BODY = 'BODY'
+
+CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
+
+
+class Client:
+
+ """A client used to sign OAuth 1.0 RFC 5849 requests."""
+ SIGNATURE_METHODS = {
+ SIGNATURE_HMAC_SHA1: signature.sign_hmac_sha1_with_client,
+ SIGNATURE_HMAC_SHA256: signature.sign_hmac_sha256_with_client,
+ SIGNATURE_HMAC_SHA512: signature.sign_hmac_sha512_with_client,
+ SIGNATURE_RSA_SHA1: signature.sign_rsa_sha1_with_client,
+ SIGNATURE_RSA_SHA256: signature.sign_rsa_sha256_with_client,
+ SIGNATURE_RSA_SHA512: signature.sign_rsa_sha512_with_client,
+ SIGNATURE_PLAINTEXT: signature.sign_plaintext_with_client
+ }
+
+ @classmethod
+ def register_signature_method(cls, method_name, method_callback):
+ cls.SIGNATURE_METHODS[method_name] = method_callback
+
+ def __init__(self, client_key,
+ client_secret=None,
+ resource_owner_key=None,
+ resource_owner_secret=None,
+ callback_uri=None,
+ signature_method=SIGNATURE_HMAC_SHA1,
+ signature_type=SIGNATURE_TYPE_AUTH_HEADER,
+ rsa_key=None, verifier=None, realm=None,
+ encoding='utf-8', decoding=None,
+ nonce=None, timestamp=None):
+ """Create an OAuth 1 client.
+
+ :param client_key: Client key (consumer key), mandatory.
+ :param resource_owner_key: Resource owner key (oauth token).
+ :param resource_owner_secret: Resource owner secret (oauth token secret).
+ :param callback_uri: Callback used when obtaining request token.
+ :param signature_method: SIGNATURE_HMAC, SIGNATURE_RSA or SIGNATURE_PLAINTEXT.
+ :param signature_type: SIGNATURE_TYPE_AUTH_HEADER (default),
+ SIGNATURE_TYPE_QUERY or SIGNATURE_TYPE_BODY
+ depending on where you want to embed the oauth
+ credentials.
+ :param rsa_key: RSA key used with SIGNATURE_RSA.
+ :param verifier: Verifier used when obtaining an access token.
+ :param realm: Realm (scope) to which access is being requested.
+ :param encoding: If you provide non-unicode input you may use this
+ to have oauthlib automatically convert.
+ :param decoding: If you wish that the returned uri, headers and body
+ from sign be encoded back from unicode, then set
+ decoding to your preferred encoding, i.e. utf-8.
+ :param nonce: Use this nonce instead of generating one. (Mainly for testing)
+ :param timestamp: Use this timestamp instead of using current. (Mainly for testing)
+ """
+ # Convert to unicode using encoding if given, else assume unicode
+ encode = lambda x: to_unicode(x, encoding) if encoding else x
+
+ self.client_key = encode(client_key)
+ self.client_secret = encode(client_secret)
+ self.resource_owner_key = encode(resource_owner_key)
+ self.resource_owner_secret = encode(resource_owner_secret)
+ self.signature_method = encode(signature_method)
+ self.signature_type = encode(signature_type)
+ self.callback_uri = encode(callback_uri)
+ self.rsa_key = encode(rsa_key)
+ self.verifier = encode(verifier)
+ self.realm = encode(realm)
+ self.encoding = encode(encoding)
+ self.decoding = encode(decoding)
+ self.nonce = encode(nonce)
+ self.timestamp = encode(timestamp)
+
+ def __repr__(self):
+ attrs = vars(self).copy()
+ attrs['client_secret'] = '****' if attrs['client_secret'] else None
+ attrs['rsa_key'] = '****' if attrs['rsa_key'] else None
+ attrs[
+ 'resource_owner_secret'] = '****' if attrs['resource_owner_secret'] else None
+ attribute_str = ', '.join('{}={}'.format(k, v) for k, v in attrs.items())
+ return '<{} {}>'.format(self.__class__.__name__, attribute_str)
+
+ def get_oauth_signature(self, request):
+ """Get an OAuth signature to be used in signing a request
+
+ To satisfy `section 3.4.1.2`_ item 2, if the request argument's
+ headers dict attribute contains a Host item, its value will
+ replace any netloc part of the request argument's uri attribute
+ value.
+
+ .. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
+ """
+ if self.signature_method == SIGNATURE_PLAINTEXT:
+ # fast-path
+ return signature.sign_plaintext(self.client_secret,
+ self.resource_owner_secret)
+
+ uri, headers, body = self._render(request)
+
+ collected_params = signature.collect_parameters(
+ uri_query=urlparse.urlparse(uri).query,
+ body=body,
+ headers=headers)
+ log.debug("Collected params: {}".format(collected_params))
+
+ normalized_params = signature.normalize_parameters(collected_params)
+ normalized_uri = signature.base_string_uri(uri, headers.get('Host', None))
+ log.debug("Normalized params: {}".format(normalized_params))
+ log.debug("Normalized URI: {}".format(normalized_uri))
+
+ base_string = signature.signature_base_string(request.http_method,
+ normalized_uri, normalized_params)
+
+ log.debug("Signing: signature base string: {}".format(base_string))
+
+ if self.signature_method not in self.SIGNATURE_METHODS:
+ raise ValueError('Invalid signature method.')
+
+ sig = self.SIGNATURE_METHODS[self.signature_method](base_string, self)
+
+ log.debug("Signature: {}".format(sig))
+ return sig
+
+ def get_oauth_params(self, request):
+ """Get the basic OAuth parameters to be used in generating a signature.
+ """
+ nonce = (generate_nonce()
+ if self.nonce is None else self.nonce)
+ timestamp = (generate_timestamp()
+ if self.timestamp is None else self.timestamp)
+ params = [
+ ('oauth_nonce', nonce),
+ ('oauth_timestamp', timestamp),
+ ('oauth_version', '1.0'),
+ ('oauth_signature_method', self.signature_method),
+ ('oauth_consumer_key', self.client_key),
+ ]
+ if self.resource_owner_key:
+ params.append(('oauth_token', self.resource_owner_key))
+ if self.callback_uri:
+ params.append(('oauth_callback', self.callback_uri))
+ if self.verifier:
+ params.append(('oauth_verifier', self.verifier))
+
+ # providing body hash for requests other than x-www-form-urlencoded
+ # as described in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-4.1.1
+ # 4.1.1. When to include the body hash
+ # * [...] MUST NOT include an oauth_body_hash parameter on requests with form-encoded request bodies
+ # * [...] SHOULD include the oauth_body_hash parameter on all other requests.
+ # Note that SHA-1 is vulnerable. The spec acknowledges that in https://tools.ietf.org/html/draft-eaton-oauth-bodyhash-00#section-6.2
+ # At this time, no further effort has been made to replace SHA-1 for the OAuth Request Body Hash extension.
+ content_type = request.headers.get('Content-Type', None)
+ content_type_eligible = content_type and content_type.find('application/x-www-form-urlencoded') < 0
+ if request.body is not None and content_type_eligible:
+ params.append(('oauth_body_hash', base64.b64encode(hashlib.sha1(request.body.encode('utf-8')).digest()).decode('utf-8')))
+
+ return params
+
+ def _render(self, request, formencode=False, realm=None):
+ """Render a signed request according to signature type
+
+ Returns a 3-tuple containing the request URI, headers, and body.
+
+ If the formencode argument is True and the body contains parameters, it
+ is escaped and returned as a valid formencoded string.
+ """
+ # TODO what if there are body params on a header-type auth?
+ # TODO what if there are query params on a body-type auth?
+
+ uri, headers, body = request.uri, request.headers, request.body
+
+ # TODO: right now these prepare_* methods are very narrow in scope--they
+ # only affect their little thing. In some cases (for example, with
+ # header auth) it might be advantageous to allow these methods to touch
+ # other parts of the request, like the headers—so the prepare_headers
+ # method could also set the Content-Type header to x-www-form-urlencoded
+ # like the spec requires. This would be a fundamental change though, and
+ # I'm not sure how I feel about it.
+ if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER:
+ headers = parameters.prepare_headers(
+ request.oauth_params, request.headers, realm=realm)
+ elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None:
+ body = parameters.prepare_form_encoded_body(
+ request.oauth_params, request.decoded_body)
+ if formencode:
+ body = urlencode(body)
+ headers['Content-Type'] = 'application/x-www-form-urlencoded'
+ elif self.signature_type == SIGNATURE_TYPE_QUERY:
+ uri = parameters.prepare_request_uri_query(
+ request.oauth_params, request.uri)
+ else:
+ raise ValueError('Unknown signature type specified.')
+
+ return uri, headers, body
+
+ def sign(self, uri, http_method='GET', body=None, headers=None, realm=None):
+ """Sign a request
+
+ Signs an HTTP request with the specified parts.
+
+ Returns a 3-tuple of the signed request's URI, headers, and body.
+ Note that http_method is not returned as it is unaffected by the OAuth
+ signing process. Also worth noting is that duplicate parameters
+ will be included in the signature, regardless of where they are
+ specified (query, body).
+
+ The body argument may be a dict, a list of 2-tuples, or a formencoded
+ string. The Content-Type header must be 'application/x-www-form-urlencoded'
+ if it is present.
+
+ If the body argument is not one of the above, it will be returned
+ verbatim as it is unaffected by the OAuth signing process. Attempting to
+ sign a request with non-formencoded data using the OAuth body signature
+ type is invalid and will raise an exception.
+
+ If the body does contain parameters, it will be returned as a properly-
+ formatted formencoded string.
+
+ Body may not be included if the http_method is either GET or HEAD as
+ this changes the semantic meaning of the request.
+
+ All string data MUST be unicode or be encoded with the same encoding
+ scheme supplied to the Client constructor, default utf-8. This includes
+ strings inside body dicts, for example.
+ """
+ # normalize request data
+ request = Request(uri, http_method, body, headers,
+ encoding=self.encoding)
+
+ # sanity check
+ content_type = request.headers.get('Content-Type', None)
+ multipart = content_type and content_type.startswith('multipart/')
+ should_have_params = content_type == CONTENT_TYPE_FORM_URLENCODED
+ has_params = request.decoded_body is not None
+ # 3.4.1.3.1. Parameter Sources
+ # [Parameters are collected from the HTTP request entity-body, but only
+ # if [...]:
+ # * The entity-body is single-part.
+ if multipart and has_params:
+ raise ValueError(
+ "Headers indicate a multipart body but body contains parameters.")
+ # * The entity-body follows the encoding requirements of the
+ # "application/x-www-form-urlencoded" content-type as defined by
+ # [W3C.REC-html40-19980424].
+ elif should_have_params and not has_params:
+ raise ValueError(
+ "Headers indicate a formencoded body but body was not decodable.")
+ # * The HTTP request entity-header includes the "Content-Type"
+ # header field set to "application/x-www-form-urlencoded".
+ elif not should_have_params and has_params:
+ raise ValueError(
+ "Body contains parameters but Content-Type header was {} "
+ "instead of {}".format(content_type or "not set",
+ CONTENT_TYPE_FORM_URLENCODED))
+
+ # 3.5.2. Form-Encoded Body
+ # Protocol parameters can be transmitted in the HTTP request entity-
+ # body, but only if the following REQUIRED conditions are met:
+ # o The entity-body is single-part.
+ # o The entity-body follows the encoding requirements of the
+ # "application/x-www-form-urlencoded" content-type as defined by
+ # [W3C.REC-html40-19980424].
+ # o The HTTP request entity-header includes the "Content-Type" header
+ # field set to "application/x-www-form-urlencoded".
+ elif self.signature_type == SIGNATURE_TYPE_BODY and not (
+ should_have_params and has_params and not multipart):
+ raise ValueError(
+ 'Body signatures may only be used with form-urlencoded content')
+
+ # We amend https://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
+ # with the clause that parameters from body should only be included
+ # in non GET or HEAD requests. Extracting the request body parameters
+ # and including them in the signature base string would give semantic
+ # meaning to the body, which it should not have according to the
+ # HTTP 1.1 spec.
+ elif http_method.upper() in ('GET', 'HEAD') and has_params:
+ raise ValueError('GET/HEAD requests should not include body.')
+
+ # generate the basic OAuth parameters
+ request.oauth_params = self.get_oauth_params(request)
+
+ # generate the signature
+ request.oauth_params.append(
+ ('oauth_signature', self.get_oauth_signature(request)))
+
+ # render the signed request and return it
+ uri, headers, body = self._render(request, formencode=True,
+ realm=(realm or self.realm))
+
+ if self.decoding:
+ log.debug('Encoding URI, headers and body to %s.', self.decoding)
+ uri = uri.encode(self.decoding)
+ body = body.encode(self.decoding) if body else body
+ new_headers = {}
+ for k, v in headers.items():
+ new_headers[k.encode(self.decoding)] = v.encode(self.decoding)
+ headers = new_headers
+ return uri, headers, body
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/__init__.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/__init__.py
new file mode 100644
index 0000000000..9f30389f23
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/__init__.py
@@ -0,0 +1,8 @@
+from .access_token import AccessTokenEndpoint
+from .authorization import AuthorizationEndpoint
+from .base import BaseEndpoint
+from .request_token import RequestTokenEndpoint
+from .resource import ResourceEndpoint
+from .signature_only import SignatureOnlyEndpoint
+
+from .pre_configured import WebApplicationServer # isort:skip
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/access_token.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/access_token.py
new file mode 100644
index 0000000000..13665db08f
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/access_token.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth1.rfc5849.endpoints.access_token
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of the access token provider logic of
+OAuth 1.0 RFC 5849. It validates the correctness of access token requests,
+creates and persists tokens as well as create the proper response to be
+returned to the client.
+"""
+import logging
+
+from oauthlib.common import urlencode
+
+from .. import errors
+from .base import BaseEndpoint
+
+log = logging.getLogger(__name__)
+
+
+class AccessTokenEndpoint(BaseEndpoint):
+
+ """An endpoint responsible for providing OAuth 1 access tokens.
+
+ Typical use is to instantiate with a request validator and invoke the
+ ``create_access_token_response`` from a view function. The tuple returned
+ has all information necessary (body, status, headers) to quickly form
+ and return a proper response. See :doc:`/oauth1/validator` for details on which
+ validator methods to implement for this endpoint.
+ """
+
+ def create_access_token(self, request, credentials):
+ """Create and save a new access token.
+
+ Similar to OAuth 2, indication of granted scopes will be included as a
+ space separated list in ``oauth_authorized_realms``.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The token as an urlencoded string.
+ """
+ request.realms = self.request_validator.get_realms(
+ request.resource_owner_key, request)
+ token = {
+ 'oauth_token': self.token_generator(),
+ 'oauth_token_secret': self.token_generator(),
+ # Backport the authorized scopes indication used in OAuth2
+ 'oauth_authorized_realms': ' '.join(request.realms)
+ }
+ token.update(credentials)
+ self.request_validator.save_access_token(token, request)
+ return urlencode(token.items())
+
+ def create_access_token_response(self, uri, http_method='GET', body=None,
+ headers=None, credentials=None):
+ """Create an access token response, with a new request token if valid.
+
+ :param uri: The full URI of the token request.
+ :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
+ :param body: The request body as a string.
+ :param headers: The request headers as a dict.
+ :param credentials: A list of extra credentials to include in the token.
+ :returns: A tuple of 3 elements.
+ 1. A dict of headers to set on the response.
+ 2. The response body as a string.
+ 3. The response status code as an integer.
+
+ An example of a valid request::
+
+ >>> from your_validator import your_validator
+ >>> from oauthlib.oauth1 import AccessTokenEndpoint
+ >>> endpoint = AccessTokenEndpoint(your_validator)
+ >>> h, b, s = endpoint.create_access_token_response(
+ ... 'https://your.provider/access_token?foo=bar',
+ ... headers={
+ ... 'Authorization': 'OAuth oauth_token=234lsdkf....'
+ ... },
+ ... credentials={
+ ... 'my_specific': 'argument',
+ ... })
+ >>> h
+ {'Content-Type': 'application/x-www-form-urlencoded'}
+ >>> b
+ 'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
+ >>> s
+ 200
+
+ An response to invalid request would have a different body and status::
+
+ >>> b
+ 'error=invalid_request&description=missing+resource+owner+key'
+ >>> s
+ 400
+
+ The same goes for an an unauthorized request:
+
+ >>> b
+ ''
+ >>> s
+ 401
+ """
+ resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ try:
+ request = self._create_request(uri, http_method, body, headers)
+ valid, processed_request = self.validate_access_token_request(
+ request)
+ if valid:
+ token = self.create_access_token(request, credentials or {})
+ self.request_validator.invalidate_request_token(
+ request.client_key,
+ request.resource_owner_key,
+ request)
+ return resp_headers, token, 200
+ else:
+ return {}, None, 401
+ except errors.OAuth1Error as e:
+ return resp_headers, e.urlencoded, e.status_code
+
+ def validate_access_token_request(self, request):
+ """Validate an access token request.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :raises: OAuth1Error if the request is invalid.
+ :returns: A tuple of 2 elements.
+ 1. The validation result (True or False).
+ 2. The request object.
+ """
+ self._check_transport_security(request)
+ self._check_mandatory_parameters(request)
+
+ if not request.resource_owner_key:
+ raise errors.InvalidRequestError(
+ description='Missing resource owner.')
+
+ if not self.request_validator.check_request_token(
+ request.resource_owner_key):
+ raise errors.InvalidRequestError(
+ description='Invalid resource owner key format.')
+
+ if not request.verifier:
+ raise errors.InvalidRequestError(
+ description='Missing verifier.')
+
+ if not self.request_validator.check_verifier(request.verifier):
+ raise errors.InvalidRequestError(
+ description='Invalid verifier format.')
+
+ if not self.request_validator.validate_timestamp_and_nonce(
+ request.client_key, request.timestamp, request.nonce, request,
+ request_token=request.resource_owner_key):
+ return False, request
+
+ # The server SHOULD return a 401 (Unauthorized) status code when
+ # receiving a request with invalid client credentials.
+ # Note: This is postponed in order to avoid timing attacks, instead
+ # a dummy client is assigned and used to maintain near constant
+ # time request verification.
+ #
+ # Note that early exit would enable client enumeration
+ valid_client = self.request_validator.validate_client_key(
+ request.client_key, request)
+ if not valid_client:
+ request.client_key = self.request_validator.dummy_client
+
+ # The server SHOULD return a 401 (Unauthorized) status code when
+ # receiving a request with invalid or expired token.
+ # Note: This is postponed in order to avoid timing attacks, instead
+ # a dummy token is assigned and used to maintain near constant
+ # time request verification.
+ #
+ # Note that early exit would enable resource owner enumeration
+ valid_resource_owner = self.request_validator.validate_request_token(
+ request.client_key, request.resource_owner_key, request)
+ if not valid_resource_owner:
+ request.resource_owner_key = self.request_validator.dummy_request_token
+
+ # The server MUST verify (Section 3.2) the validity of the request,
+ # ensure that the resource owner has authorized the provisioning of
+ # token credentials to the client, and ensure that the temporary
+ # credentials have not expired or been used before. The server MUST
+ # also verify the verification code received from the client.
+ # .. _`Section 3.2`: https://tools.ietf.org/html/rfc5849#section-3.2
+ #
+ # Note that early exit would enable resource owner authorization
+ # verifier enumertion.
+ valid_verifier = self.request_validator.validate_verifier(
+ request.client_key,
+ request.resource_owner_key,
+ request.verifier,
+ request)
+
+ valid_signature = self._check_signature(request, is_token_request=True)
+
+ # log the results to the validator_log
+ # this lets us handle internal reporting and analysis
+ request.validator_log['client'] = valid_client
+ request.validator_log['resource_owner'] = valid_resource_owner
+ request.validator_log['verifier'] = valid_verifier
+ request.validator_log['signature'] = valid_signature
+
+ # We delay checking validity until the very end, using dummy values for
+ # calculations and fetching secrets/keys to ensure the flow of every
+ # request remains almost identical regardless of whether valid values
+ # have been supplied. This ensures near constant time execution and
+ # prevents malicious users from guessing sensitive information
+ v = all((valid_client, valid_resource_owner, valid_verifier,
+ valid_signature))
+ if not v:
+ log.info("[Failure] request verification failed.")
+ log.info("Valid client:, %s", valid_client)
+ log.info("Valid token:, %s", valid_resource_owner)
+ log.info("Valid verifier:, %s", valid_verifier)
+ log.info("Valid signature:, %s", valid_signature)
+ return v, request
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/authorization.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/authorization.py
new file mode 100644
index 0000000000..00d9576b01
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/authorization.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth1.rfc5849.endpoints.authorization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for signing and checking OAuth 1.0 RFC 5849 requests.
+"""
+from urllib.parse import urlencode
+
+from oauthlib.common import add_params_to_uri
+
+from .. import errors
+from .base import BaseEndpoint
+
+
+class AuthorizationEndpoint(BaseEndpoint):
+
+ """An endpoint responsible for letting authenticated users authorize access
+ to their protected resources to a client.
+
+ Typical use would be to have two views, one for displaying the authorization
+ form and one to process said form on submission.
+
+ The first view will want to utilize ``get_realms_and_credentials`` to fetch
+ requested realms and useful client credentials, such as name and
+ description, to be used when creating the authorization form.
+
+ During form processing you can use ``create_authorization_response`` to
+ validate the request, create a verifier as well as prepare the final
+ redirection URI used to send the user back to the client.
+
+ See :doc:`/oauth1/validator` for details on which validator methods to implement
+ for this endpoint.
+ """
+
+ def create_verifier(self, request, credentials):
+ """Create and save a new request token.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param credentials: A dict of extra token credentials.
+ :returns: The verifier as a dict.
+ """
+ verifier = {
+ 'oauth_token': request.resource_owner_key,
+ 'oauth_verifier': self.token_generator(),
+ }
+ verifier.update(credentials)
+ self.request_validator.save_verifier(
+ request.resource_owner_key, verifier, request)
+ return verifier
+
+ def create_authorization_response(self, uri, http_method='GET', body=None,
+ headers=None, realms=None, credentials=None):
+ """Create an authorization response, with a new request token if valid.
+
+ :param uri: The full URI of the token request.
+ :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
+ :param body: The request body as a string.
+ :param headers: The request headers as a dict.
+ :param credentials: A list of credentials to include in the verifier.
+ :returns: A tuple of 3 elements.
+ 1. A dict of headers to set on the response.
+ 2. The response body as a string.
+ 3. The response status code as an integer.
+
+ If the callback URI tied to the current token is "oob", a response with
+ a 200 status code will be returned. In this case, it may be desirable to
+ modify the response to better display the verifier to the client.
+
+ An example of an authorization request::
+
+ >>> from your_validator import your_validator
+ >>> from oauthlib.oauth1 import AuthorizationEndpoint
+ >>> endpoint = AuthorizationEndpoint(your_validator)
+ >>> h, b, s = endpoint.create_authorization_response(
+ ... 'https://your.provider/authorize?oauth_token=...',
+ ... credentials={
+ ... 'extra': 'argument',
+ ... })
+ >>> h
+ {'Location': 'https://the.client/callback?oauth_verifier=...&extra=argument'}
+ >>> b
+ None
+ >>> s
+ 302
+
+ An example of a request with an "oob" callback::
+
+ >>> from your_validator import your_validator
+ >>> from oauthlib.oauth1 import AuthorizationEndpoint
+ >>> endpoint = AuthorizationEndpoint(your_validator)
+ >>> h, b, s = endpoint.create_authorization_response(
+ ... 'https://your.provider/authorize?foo=bar',
+ ... credentials={
+ ... 'extra': 'argument',
+ ... })
+ >>> h
+ {'Content-Type': 'application/x-www-form-urlencoded'}
+ >>> b
+ 'oauth_verifier=...&extra=argument'
+ >>> s
+ 200
+ """
+ request = self._create_request(uri, http_method=http_method, body=body,
+ headers=headers)
+
+ if not request.resource_owner_key:
+ raise errors.InvalidRequestError(
+ 'Missing mandatory parameter oauth_token.')
+ if not self.request_validator.verify_request_token(
+ request.resource_owner_key, request):
+ raise errors.InvalidClientError()
+
+ request.realms = realms
+ if (request.realms and not self.request_validator.verify_realms(
+ request.resource_owner_key, request.realms, request)):
+ raise errors.InvalidRequestError(
+ description=('User granted access to realms outside of '
+ 'what the client may request.'))
+
+ verifier = self.create_verifier(request, credentials or {})
+ redirect_uri = self.request_validator.get_redirect_uri(
+ request.resource_owner_key, request)
+ if redirect_uri == 'oob':
+ response_headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded'}
+ response_body = urlencode(verifier)
+ return response_headers, response_body, 200
+ else:
+ populated_redirect = add_params_to_uri(
+ redirect_uri, verifier.items())
+ return {'Location': populated_redirect}, None, 302
+
+ def get_realms_and_credentials(self, uri, http_method='GET', body=None,
+ headers=None):
+ """Fetch realms and credentials for the presented request token.
+
+ :param uri: The full URI of the token request.
+ :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
+ :param body: The request body as a string.
+ :param headers: The request headers as a dict.
+ :returns: A tuple of 2 elements.
+ 1. A list of request realms.
+ 2. A dict of credentials which may be useful in creating the
+ authorization form.
+ """
+ request = self._create_request(uri, http_method=http_method, body=body,
+ headers=headers)
+
+ if not self.request_validator.verify_request_token(
+ request.resource_owner_key, request):
+ raise errors.InvalidClientError()
+
+ realms = self.request_validator.get_realms(
+ request.resource_owner_key, request)
+ return realms, {'resource_owner_key': request.resource_owner_key}
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/base.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/base.py
new file mode 100644
index 0000000000..7831be7c5e
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/base.py
@@ -0,0 +1,244 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth1.rfc5849.endpoints.base
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for signing and checking OAuth 1.0 RFC 5849 requests.
+"""
+import time
+
+from oauthlib.common import CaseInsensitiveDict, Request, generate_token
+
+from .. import (
+ CONTENT_TYPE_FORM_URLENCODED, SIGNATURE_HMAC_SHA1, SIGNATURE_HMAC_SHA256,
+ SIGNATURE_HMAC_SHA512, SIGNATURE_PLAINTEXT, SIGNATURE_RSA_SHA1,
+ SIGNATURE_RSA_SHA256, SIGNATURE_RSA_SHA512, SIGNATURE_TYPE_AUTH_HEADER,
+ SIGNATURE_TYPE_BODY, SIGNATURE_TYPE_QUERY, errors, signature, utils,
+)
+
+
+class BaseEndpoint:
+
+ def __init__(self, request_validator, token_generator=None):
+ self.request_validator = request_validator
+ self.token_generator = token_generator or generate_token
+
+ def _get_signature_type_and_params(self, request):
+ """Extracts parameters from query, headers and body. Signature type
+ is set to the source in which parameters were found.
+ """
+ # Per RFC5849, only the Authorization header may contain the 'realm'
+ # optional parameter.
+ header_params = signature.collect_parameters(headers=request.headers,
+ exclude_oauth_signature=False, with_realm=True)
+ body_params = signature.collect_parameters(body=request.body,
+ exclude_oauth_signature=False)
+ query_params = signature.collect_parameters(uri_query=request.uri_query,
+ exclude_oauth_signature=False)
+
+ params = []
+ params.extend(header_params)
+ params.extend(body_params)
+ params.extend(query_params)
+ signature_types_with_oauth_params = list(filter(lambda s: s[2], (
+ (SIGNATURE_TYPE_AUTH_HEADER, params,
+ utils.filter_oauth_params(header_params)),
+ (SIGNATURE_TYPE_BODY, params,
+ utils.filter_oauth_params(body_params)),
+ (SIGNATURE_TYPE_QUERY, params,
+ utils.filter_oauth_params(query_params))
+ )))
+
+ if len(signature_types_with_oauth_params) > 1:
+ found_types = [s[0] for s in signature_types_with_oauth_params]
+ raise errors.InvalidRequestError(
+ description=('oauth_ params must come from only 1 signature'
+ 'type but were found in %s',
+ ', '.join(found_types)))
+
+ try:
+ signature_type, params, oauth_params = signature_types_with_oauth_params[
+ 0]
+ except IndexError:
+ raise errors.InvalidRequestError(
+ description='Missing mandatory OAuth parameters.')
+
+ return signature_type, params, oauth_params
+
+ def _create_request(self, uri, http_method, body, headers):
+ # Only include body data from x-www-form-urlencoded requests
+ headers = CaseInsensitiveDict(headers or {})
+ if ("Content-Type" in headers and
+ CONTENT_TYPE_FORM_URLENCODED in headers["Content-Type"]):
+ request = Request(uri, http_method, body, headers)
+ else:
+ request = Request(uri, http_method, '', headers)
+
+ signature_type, params, oauth_params = (
+ self._get_signature_type_and_params(request))
+
+ # The server SHOULD return a 400 (Bad Request) status code when
+ # receiving a request with duplicated protocol parameters.
+ if len(dict(oauth_params)) != len(oauth_params):
+ raise errors.InvalidRequestError(
+ description='Duplicate OAuth1 entries.')
+
+ oauth_params = dict(oauth_params)
+ request.signature = oauth_params.get('oauth_signature')
+ request.client_key = oauth_params.get('oauth_consumer_key')
+ request.resource_owner_key = oauth_params.get('oauth_token')
+ request.nonce = oauth_params.get('oauth_nonce')
+ request.timestamp = oauth_params.get('oauth_timestamp')
+ request.redirect_uri = oauth_params.get('oauth_callback')
+ request.verifier = oauth_params.get('oauth_verifier')
+ request.signature_method = oauth_params.get('oauth_signature_method')
+ request.realm = dict(params).get('realm')
+ request.oauth_params = oauth_params
+
+ # Parameters to Client depend on signature method which may vary
+ # for each request. Note that HMAC-SHA1 and PLAINTEXT share parameters
+ request.params = [(k, v) for k, v in params if k != "oauth_signature"]
+
+ if 'realm' in request.headers.get('Authorization', ''):
+ request.params = [(k, v)
+ for k, v in request.params if k != "realm"]
+
+ return request
+
+ def _check_transport_security(self, request):
+ # TODO: move into oauthlib.common from oauth2.utils
+ if (self.request_validator.enforce_ssl and
+ not request.uri.lower().startswith("https://")):
+ raise errors.InsecureTransportError()
+
+ def _check_mandatory_parameters(self, request):
+ # The server SHOULD return a 400 (Bad Request) status code when
+ # receiving a request with missing parameters.
+ if not all((request.signature, request.client_key,
+ request.nonce, request.timestamp,
+ request.signature_method)):
+ raise errors.InvalidRequestError(
+ description='Missing mandatory OAuth parameters.')
+
+ # OAuth does not mandate a particular signature method, as each
+ # implementation can have its own unique requirements. Servers are
+ # free to implement and document their own custom methods.
+ # Recommending any particular method is beyond the scope of this
+ # specification. Implementers should review the Security
+ # Considerations section (`Section 4`_) before deciding on which
+ # method to support.
+ # .. _`Section 4`: https://tools.ietf.org/html/rfc5849#section-4
+ if (not request.signature_method in
+ self.request_validator.allowed_signature_methods):
+ raise errors.InvalidSignatureMethodError(
+ description="Invalid signature, {} not in {!r}.".format(
+ request.signature_method,
+ self.request_validator.allowed_signature_methods))
+
+ # Servers receiving an authenticated request MUST validate it by:
+ # If the "oauth_version" parameter is present, ensuring its value is
+ # "1.0".
+ if ('oauth_version' in request.oauth_params and
+ request.oauth_params['oauth_version'] != '1.0'):
+ raise errors.InvalidRequestError(
+ description='Invalid OAuth version.')
+
+ # The timestamp value MUST be a positive integer. Unless otherwise
+ # specified by the server's documentation, the timestamp is expressed
+ # in the number of seconds since January 1, 1970 00:00:00 GMT.
+ if len(request.timestamp) != 10:
+ raise errors.InvalidRequestError(
+ description='Invalid timestamp size')
+
+ try:
+ ts = int(request.timestamp)
+
+ except ValueError:
+ raise errors.InvalidRequestError(
+ description='Timestamp must be an integer.')
+
+ else:
+ # To avoid the need to retain an infinite number of nonce values for
+ # future checks, servers MAY choose to restrict the time period after
+ # which a request with an old timestamp is rejected.
+ if abs(time.time() - ts) > self.request_validator.timestamp_lifetime:
+ raise errors.InvalidRequestError(
+ description=('Timestamp given is invalid, differ from '
+ 'allowed by over %s seconds.' % (
+ self.request_validator.timestamp_lifetime)))
+
+ # Provider specific validation of parameters, used to enforce
+ # restrictions such as character set and length.
+ if not self.request_validator.check_client_key(request.client_key):
+ raise errors.InvalidRequestError(
+ description='Invalid client key format.')
+
+ if not self.request_validator.check_nonce(request.nonce):
+ raise errors.InvalidRequestError(
+ description='Invalid nonce format.')
+
+ def _check_signature(self, request, is_token_request=False):
+ # ---- RSA Signature verification ----
+ if request.signature_method == SIGNATURE_RSA_SHA1 or \
+ request.signature_method == SIGNATURE_RSA_SHA256 or \
+ request.signature_method == SIGNATURE_RSA_SHA512:
+ # RSA-based signature method
+
+ # The server verifies the signature per `[RFC3447] section 8.2.2`_
+ # .. _`[RFC3447] section 8.2.2`: https://tools.ietf.org/html/rfc3447#section-8.2.1
+
+ rsa_key = self.request_validator.get_rsa_key(
+ request.client_key, request)
+
+ if request.signature_method == SIGNATURE_RSA_SHA1:
+ valid_signature = signature.verify_rsa_sha1(request, rsa_key)
+ elif request.signature_method == SIGNATURE_RSA_SHA256:
+ valid_signature = signature.verify_rsa_sha256(request, rsa_key)
+ elif request.signature_method == SIGNATURE_RSA_SHA512:
+ valid_signature = signature.verify_rsa_sha512(request, rsa_key)
+ else:
+ valid_signature = False
+
+ # ---- HMAC or Plaintext Signature verification ----
+ else:
+ # Non-RSA based signature method
+
+ # Servers receiving an authenticated request MUST validate it by:
+ # Recalculating the request signature independently as described in
+ # `Section 3.4`_ and comparing it to the value received from the
+ # client via the "oauth_signature" parameter.
+ # .. _`Section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
+
+ client_secret = self.request_validator.get_client_secret(
+ request.client_key, request)
+
+ resource_owner_secret = None
+ if request.resource_owner_key:
+ if is_token_request:
+ resource_owner_secret = \
+ self.request_validator.get_request_token_secret(
+ request.client_key, request.resource_owner_key,
+ request)
+ else:
+ resource_owner_secret = \
+ self.request_validator.get_access_token_secret(
+ request.client_key, request.resource_owner_key,
+ request)
+
+ if request.signature_method == SIGNATURE_HMAC_SHA1:
+ valid_signature = signature.verify_hmac_sha1(
+ request, client_secret, resource_owner_secret)
+ elif request.signature_method == SIGNATURE_HMAC_SHA256:
+ valid_signature = signature.verify_hmac_sha256(
+ request, client_secret, resource_owner_secret)
+ elif request.signature_method == SIGNATURE_HMAC_SHA512:
+ valid_signature = signature.verify_hmac_sha512(
+ request, client_secret, resource_owner_secret)
+ elif request.signature_method == SIGNATURE_PLAINTEXT:
+ valid_signature = signature.verify_plaintext(
+ request, client_secret, resource_owner_secret)
+ else:
+ valid_signature = False
+
+ return valid_signature
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/pre_configured.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/pre_configured.py
new file mode 100644
index 0000000000..23e3cfc84e
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/pre_configured.py
@@ -0,0 +1,14 @@
+from . import (
+ AccessTokenEndpoint, AuthorizationEndpoint, RequestTokenEndpoint,
+ ResourceEndpoint,
+)
+
+
+class WebApplicationServer(RequestTokenEndpoint, AuthorizationEndpoint,
+ AccessTokenEndpoint, ResourceEndpoint):
+
+ def __init__(self, request_validator):
+ RequestTokenEndpoint.__init__(self, request_validator)
+ AuthorizationEndpoint.__init__(self, request_validator)
+ AccessTokenEndpoint.__init__(self, request_validator)
+ ResourceEndpoint.__init__(self, request_validator)
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/request_token.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/request_token.py
new file mode 100644
index 0000000000..0323cfb845
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/request_token.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth1.rfc5849.endpoints.request_token
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of the request token provider logic of
+OAuth 1.0 RFC 5849. It validates the correctness of request token requests,
+creates and persists tokens as well as create the proper response to be
+returned to the client.
+"""
+import logging
+
+from oauthlib.common import urlencode
+
+from .. import errors
+from .base import BaseEndpoint
+
+log = logging.getLogger(__name__)
+
+
+class RequestTokenEndpoint(BaseEndpoint):
+
+ """An endpoint responsible for providing OAuth 1 request tokens.
+
+ Typical use is to instantiate with a request validator and invoke the
+ ``create_request_token_response`` from a view function. The tuple returned
+ has all information necessary (body, status, headers) to quickly form
+ and return a proper response. See :doc:`/oauth1/validator` for details on which
+ validator methods to implement for this endpoint.
+ """
+
+ def create_request_token(self, request, credentials):
+ """Create and save a new request token.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param credentials: A dict of extra token credentials.
+ :returns: The token as an urlencoded string.
+ """
+ token = {
+ 'oauth_token': self.token_generator(),
+ 'oauth_token_secret': self.token_generator(),
+ 'oauth_callback_confirmed': 'true'
+ }
+ token.update(credentials)
+ self.request_validator.save_request_token(token, request)
+ return urlencode(token.items())
+
+ def create_request_token_response(self, uri, http_method='GET', body=None,
+ headers=None, credentials=None):
+ """Create a request token response, with a new request token if valid.
+
+ :param uri: The full URI of the token request.
+ :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
+ :param body: The request body as a string.
+ :param headers: The request headers as a dict.
+ :param credentials: A list of extra credentials to include in the token.
+ :returns: A tuple of 3 elements.
+ 1. A dict of headers to set on the response.
+ 2. The response body as a string.
+ 3. The response status code as an integer.
+
+ An example of a valid request::
+
+ >>> from your_validator import your_validator
+ >>> from oauthlib.oauth1 import RequestTokenEndpoint
+ >>> endpoint = RequestTokenEndpoint(your_validator)
+ >>> h, b, s = endpoint.create_request_token_response(
+ ... 'https://your.provider/request_token?foo=bar',
+ ... headers={
+ ... 'Authorization': 'OAuth realm=movies user, oauth_....'
+ ... },
+ ... credentials={
+ ... 'my_specific': 'argument',
+ ... })
+ >>> h
+ {'Content-Type': 'application/x-www-form-urlencoded'}
+ >>> b
+ 'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_callback_confirmed=true&my_specific=argument'
+ >>> s
+ 200
+
+ An response to invalid request would have a different body and status::
+
+ >>> b
+ 'error=invalid_request&description=missing+callback+uri'
+ >>> s
+ 400
+
+ The same goes for an an unauthorized request:
+
+ >>> b
+ ''
+ >>> s
+ 401
+ """
+ resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ try:
+ request = self._create_request(uri, http_method, body, headers)
+ valid, processed_request = self.validate_request_token_request(
+ request)
+ if valid:
+ token = self.create_request_token(request, credentials or {})
+ return resp_headers, token, 200
+ else:
+ return {}, None, 401
+ except errors.OAuth1Error as e:
+ return resp_headers, e.urlencoded, e.status_code
+
+ def validate_request_token_request(self, request):
+ """Validate a request token request.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :raises: OAuth1Error if the request is invalid.
+ :returns: A tuple of 2 elements.
+ 1. The validation result (True or False).
+ 2. The request object.
+ """
+ self._check_transport_security(request)
+ self._check_mandatory_parameters(request)
+
+ if request.realm:
+ request.realms = request.realm.split(' ')
+ else:
+ request.realms = self.request_validator.get_default_realms(
+ request.client_key, request)
+ if not self.request_validator.check_realms(request.realms):
+ raise errors.InvalidRequestError(
+ description='Invalid realm {}. Allowed are {!r}.'.format(
+ request.realms, self.request_validator.realms))
+
+ if not request.redirect_uri:
+ raise errors.InvalidRequestError(
+ description='Missing callback URI.')
+
+ if not self.request_validator.validate_timestamp_and_nonce(
+ request.client_key, request.timestamp, request.nonce, request,
+ request_token=request.resource_owner_key):
+ return False, request
+
+ # The server SHOULD return a 401 (Unauthorized) status code when
+ # receiving a request with invalid client credentials.
+ # Note: This is postponed in order to avoid timing attacks, instead
+ # a dummy client is assigned and used to maintain near constant
+ # time request verification.
+ #
+ # Note that early exit would enable client enumeration
+ valid_client = self.request_validator.validate_client_key(
+ request.client_key, request)
+ if not valid_client:
+ request.client_key = self.request_validator.dummy_client
+
+ # Note that `realm`_ is only used in authorization headers and how
+ # it should be interpreted is not included in the OAuth spec.
+ # However they could be seen as a scope or realm to which the
+ # client has access and as such every client should be checked
+ # to ensure it is authorized access to that scope or realm.
+ # .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2
+ #
+ # Note that early exit would enable client realm access enumeration.
+ #
+ # The require_realm indicates this is the first step in the OAuth
+ # workflow where a client requests access to a specific realm.
+ # This first step (obtaining request token) need not require a realm
+ # and can then be identified by checking the require_resource_owner
+ # flag and absence of realm.
+ #
+ # Clients obtaining an access token will not supply a realm and it will
+ # not be checked. Instead the previously requested realm should be
+ # transferred from the request token to the access token.
+ #
+ # Access to protected resources will always validate the realm but note
+ # that the realm is now tied to the access token and not provided by
+ # the client.
+ valid_realm = self.request_validator.validate_requested_realms(
+ request.client_key, request.realms, request)
+
+ # Callback is normally never required, except for requests for
+ # a Temporary Credential as described in `Section 2.1`_
+ # .._`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
+ valid_redirect = self.request_validator.validate_redirect_uri(
+ request.client_key, request.redirect_uri, request)
+ if not request.redirect_uri:
+ raise NotImplementedError('Redirect URI must either be provided '
+ 'or set to a default during validation.')
+
+ valid_signature = self._check_signature(request)
+
+ # log the results to the validator_log
+ # this lets us handle internal reporting and analysis
+ request.validator_log['client'] = valid_client
+ request.validator_log['realm'] = valid_realm
+ request.validator_log['callback'] = valid_redirect
+ request.validator_log['signature'] = valid_signature
+
+ # We delay checking validity until the very end, using dummy values for
+ # calculations and fetching secrets/keys to ensure the flow of every
+ # request remains almost identical regardless of whether valid values
+ # have been supplied. This ensures near constant time execution and
+ # prevents malicious users from guessing sensitive information
+ v = all((valid_client, valid_realm, valid_redirect, valid_signature))
+ if not v:
+ log.info("[Failure] request verification failed.")
+ log.info("Valid client: %s.", valid_client)
+ log.info("Valid realm: %s.", valid_realm)
+ log.info("Valid callback: %s.", valid_redirect)
+ log.info("Valid signature: %s.", valid_signature)
+ return v, request
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/resource.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/resource.py
new file mode 100644
index 0000000000..8641152e4e
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/resource.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth1.rfc5849.endpoints.resource
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of the resource protection provider logic of
+OAuth 1.0 RFC 5849.
+"""
+import logging
+
+from .. import errors
+from .base import BaseEndpoint
+
+log = logging.getLogger(__name__)
+
+
+class ResourceEndpoint(BaseEndpoint):
+
+ """An endpoint responsible for protecting resources.
+
+ Typical use is to instantiate with a request validator and invoke the
+ ``validate_protected_resource_request`` in a decorator around a view
+ function. If the request is valid, invoke and return the response of the
+ view. If invalid create and return an error response directly from the
+ decorator.
+
+ See :doc:`/oauth1/validator` for details on which validator methods to implement
+ for this endpoint.
+
+ An example decorator::
+
+ from functools import wraps
+ from your_validator import your_validator
+ from oauthlib.oauth1 import ResourceEndpoint
+ endpoint = ResourceEndpoint(your_validator)
+
+ def require_oauth(realms=None):
+ def decorator(f):
+ @wraps(f)
+ def wrapper(request, *args, **kwargs):
+ v, r = provider.validate_protected_resource_request(
+ request.url,
+ http_method=request.method,
+ body=request.data,
+ headers=request.headers,
+ realms=realms or [])
+ if v:
+ return f(*args, **kwargs)
+ else:
+ return abort(403)
+ """
+
+ def validate_protected_resource_request(self, uri, http_method='GET',
+ body=None, headers=None, realms=None):
+ """Create a request token response, with a new request token if valid.
+
+ :param uri: The full URI of the token request.
+ :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
+ :param body: The request body as a string.
+ :param headers: The request headers as a dict.
+ :param realms: A list of realms the resource is protected under.
+ This will be supplied to the ``validate_realms``
+ method of the request validator.
+ :returns: A tuple of 2 elements.
+ 1. True if valid, False otherwise.
+ 2. An oauthlib.common.Request object.
+ """
+ try:
+ request = self._create_request(uri, http_method, body, headers)
+ except errors.OAuth1Error:
+ return False, None
+
+ try:
+ self._check_transport_security(request)
+ self._check_mandatory_parameters(request)
+ except errors.OAuth1Error:
+ return False, request
+
+ if not request.resource_owner_key:
+ return False, request
+
+ if not self.request_validator.check_access_token(
+ request.resource_owner_key):
+ return False, request
+
+ if not self.request_validator.validate_timestamp_and_nonce(
+ request.client_key, request.timestamp, request.nonce, request,
+ access_token=request.resource_owner_key):
+ return False, request
+
+ # The server SHOULD return a 401 (Unauthorized) status code when
+ # receiving a request with invalid client credentials.
+ # Note: This is postponed in order to avoid timing attacks, instead
+ # a dummy client is assigned and used to maintain near constant
+ # time request verification.
+ #
+ # Note that early exit would enable client enumeration
+ valid_client = self.request_validator.validate_client_key(
+ request.client_key, request)
+ if not valid_client:
+ request.client_key = self.request_validator.dummy_client
+
+ # The server SHOULD return a 401 (Unauthorized) status code when
+ # receiving a request with invalid or expired token.
+ # Note: This is postponed in order to avoid timing attacks, instead
+ # a dummy token is assigned and used to maintain near constant
+ # time request verification.
+ #
+ # Note that early exit would enable resource owner enumeration
+ valid_resource_owner = self.request_validator.validate_access_token(
+ request.client_key, request.resource_owner_key, request)
+ if not valid_resource_owner:
+ request.resource_owner_key = self.request_validator.dummy_access_token
+
+ # Note that `realm`_ is only used in authorization headers and how
+ # it should be interpreted is not included in the OAuth spec.
+ # However they could be seen as a scope or realm to which the
+ # client has access and as such every client should be checked
+ # to ensure it is authorized access to that scope or realm.
+ # .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2
+ #
+ # Note that early exit would enable client realm access enumeration.
+ #
+ # The require_realm indicates this is the first step in the OAuth
+ # workflow where a client requests access to a specific realm.
+ # This first step (obtaining request token) need not require a realm
+ # and can then be identified by checking the require_resource_owner
+ # flag and absence of realm.
+ #
+ # Clients obtaining an access token will not supply a realm and it will
+ # not be checked. Instead the previously requested realm should be
+ # transferred from the request token to the access token.
+ #
+ # Access to protected resources will always validate the realm but note
+ # that the realm is now tied to the access token and not provided by
+ # the client.
+ valid_realm = self.request_validator.validate_realms(request.client_key,
+ request.resource_owner_key, request, uri=request.uri,
+ realms=realms)
+
+ valid_signature = self._check_signature(request)
+
+ # log the results to the validator_log
+ # this lets us handle internal reporting and analysis
+ request.validator_log['client'] = valid_client
+ request.validator_log['resource_owner'] = valid_resource_owner
+ request.validator_log['realm'] = valid_realm
+ request.validator_log['signature'] = valid_signature
+
+ # We delay checking validity until the very end, using dummy values for
+ # calculations and fetching secrets/keys to ensure the flow of every
+ # request remains almost identical regardless of whether valid values
+ # have been supplied. This ensures near constant time execution and
+ # prevents malicious users from guessing sensitive information
+ v = all((valid_client, valid_resource_owner, valid_realm,
+ valid_signature))
+ if not v:
+ log.info("[Failure] request verification failed.")
+ log.info("Valid client: %s", valid_client)
+ log.info("Valid token: %s", valid_resource_owner)
+ log.info("Valid realm: %s", valid_realm)
+ log.info("Valid signature: %s", valid_signature)
+ return v, request
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/signature_only.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/signature_only.py
new file mode 100644
index 0000000000..d693ccb7f6
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/endpoints/signature_only.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth1.rfc5849.endpoints.signature_only
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of the signing logic of OAuth 1.0 RFC 5849.
+"""
+
+import logging
+
+from .. import errors
+from .base import BaseEndpoint
+
+log = logging.getLogger(__name__)
+
+
+class SignatureOnlyEndpoint(BaseEndpoint):
+
+ """An endpoint only responsible for verifying an oauth signature."""
+
+ def validate_request(self, uri, http_method='GET',
+ body=None, headers=None):
+ """Validate a signed OAuth request.
+
+ :param uri: The full URI of the token request.
+ :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
+ :param body: The request body as a string.
+ :param headers: The request headers as a dict.
+ :returns: A tuple of 2 elements.
+ 1. True if valid, False otherwise.
+ 2. An oauthlib.common.Request object.
+ """
+ try:
+ request = self._create_request(uri, http_method, body, headers)
+ except errors.OAuth1Error as err:
+ log.info(
+ 'Exception caught while validating request, %s.' % err)
+ return False, None
+
+ try:
+ self._check_transport_security(request)
+ self._check_mandatory_parameters(request)
+ except errors.OAuth1Error as err:
+ log.info(
+ 'Exception caught while validating request, %s.' % err)
+ return False, request
+
+ if not self.request_validator.validate_timestamp_and_nonce(
+ request.client_key, request.timestamp, request.nonce, request):
+ log.debug('[Failure] verification failed: timestamp/nonce')
+ return False, request
+
+ # The server SHOULD return a 401 (Unauthorized) status code when
+ # receiving a request with invalid client credentials.
+ # Note: This is postponed in order to avoid timing attacks, instead
+ # a dummy client is assigned and used to maintain near constant
+ # time request verification.
+ #
+ # Note that early exit would enable client enumeration
+ valid_client = self.request_validator.validate_client_key(
+ request.client_key, request)
+ if not valid_client:
+ request.client_key = self.request_validator.dummy_client
+
+ valid_signature = self._check_signature(request)
+
+ # log the results to the validator_log
+ # this lets us handle internal reporting and analysis
+ request.validator_log['client'] = valid_client
+ request.validator_log['signature'] = valid_signature
+
+ # We delay checking validity until the very end, using dummy values for
+ # calculations and fetching secrets/keys to ensure the flow of every
+ # request remains almost identical regardless of whether valid values
+ # have been supplied. This ensures near constant time execution and
+ # prevents malicious users from guessing sensitive information
+ v = all((valid_client, valid_signature))
+ if not v:
+ log.info("[Failure] request verification failed.")
+ log.info("Valid client: %s", valid_client)
+ log.info("Valid signature: %s", valid_signature)
+ return v, request
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/errors.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/errors.py
new file mode 100644
index 0000000000..8774d40741
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/errors.py
@@ -0,0 +1,76 @@
+"""
+oauthlib.oauth1.rfc5849.errors
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Error used both by OAuth 1 clients and provicers to represent the spec
+defined error responses for all four core grant types.
+"""
+from oauthlib.common import add_params_to_uri, urlencode
+
+
+class OAuth1Error(Exception):
+ error = None
+ description = ''
+
+ def __init__(self, description=None, uri=None, status_code=400,
+ request=None):
+ """
+ description: A human-readable ASCII [USASCII] text providing
+ additional information, used to assist the client
+ developer in understanding the error that occurred.
+ Values for the "error_description" parameter MUST NOT
+ include characters outside the set
+ x20-21 / x23-5B / x5D-7E.
+
+ uri: A URI identifying a human-readable web page with information
+ about the error, used to provide the client developer with
+ additional information about the error. Values for the
+ "error_uri" parameter MUST conform to the URI- Reference
+ syntax, and thus MUST NOT include characters outside the set
+ x21 / x23-5B / x5D-7E.
+
+ state: A CSRF protection value received from the client.
+
+ request: Oauthlib Request object
+ """
+ self.description = description or self.description
+ message = '({}) {}'.format(self.error, self.description)
+ if request:
+ message += ' ' + repr(request)
+ super().__init__(message)
+
+ self.uri = uri
+ self.status_code = status_code
+
+ def in_uri(self, uri):
+ return add_params_to_uri(uri, self.twotuples)
+
+ @property
+ def twotuples(self):
+ error = [('error', self.error)]
+ if self.description:
+ error.append(('error_description', self.description))
+ if self.uri:
+ error.append(('error_uri', self.uri))
+ return error
+
+ @property
+ def urlencoded(self):
+ return urlencode(self.twotuples)
+
+
+class InsecureTransportError(OAuth1Error):
+ error = 'insecure_transport_protocol'
+ description = 'Only HTTPS connections are permitted.'
+
+
+class InvalidSignatureMethodError(OAuth1Error):
+ error = 'invalid_signature_method'
+
+
+class InvalidRequestError(OAuth1Error):
+ error = 'invalid_request'
+
+
+class InvalidClientError(OAuth1Error):
+ error = 'invalid_client'
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/parameters.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/parameters.py
new file mode 100644
index 0000000000..2163772df3
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/parameters.py
@@ -0,0 +1,133 @@
+"""
+oauthlib.parameters
+~~~~~~~~~~~~~~~~~~~
+
+This module contains methods related to `section 3.5`_ of the OAuth 1.0a spec.
+
+.. _`section 3.5`: https://tools.ietf.org/html/rfc5849#section-3.5
+"""
+from urllib.parse import urlparse, urlunparse
+
+from oauthlib.common import extract_params, urlencode
+
+from . import utils
+
+
+# TODO: do we need filter_params now that oauth_params are handled by Request?
+# We can easily pass in just oauth protocol params.
+@utils.filter_params
+def prepare_headers(oauth_params, headers=None, realm=None):
+ """**Prepare the Authorization header.**
+ Per `section 3.5.1`_ of the spec.
+
+ Protocol parameters can be transmitted using the HTTP "Authorization"
+ header field as defined by `RFC2617`_ with the auth-scheme name set to
+ "OAuth" (case insensitive).
+
+ For example::
+
+ Authorization: OAuth realm="Example",
+ oauth_consumer_key="0685bd9184jfhq22",
+ oauth_token="ad180jjd733klru7",
+ oauth_signature_method="HMAC-SHA1",
+ oauth_signature="wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
+ oauth_timestamp="137131200",
+ oauth_nonce="4572616e48616d6d65724c61686176",
+ oauth_version="1.0"
+
+
+ .. _`section 3.5.1`: https://tools.ietf.org/html/rfc5849#section-3.5.1
+ .. _`RFC2617`: https://tools.ietf.org/html/rfc2617
+ """
+ headers = headers or {}
+
+ # Protocol parameters SHALL be included in the "Authorization" header
+ # field as follows:
+ authorization_header_parameters_parts = []
+ for oauth_parameter_name, value in oauth_params:
+ # 1. Parameter names and values are encoded per Parameter Encoding
+ # (`Section 3.6`_)
+ #
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ escaped_name = utils.escape(oauth_parameter_name)
+ escaped_value = utils.escape(value)
+
+ # 2. Each parameter's name is immediately followed by an "=" character
+ # (ASCII code 61), a """ character (ASCII code 34), the parameter
+ # value (MAY be empty), and another """ character (ASCII code 34).
+ part = '{}="{}"'.format(escaped_name, escaped_value)
+
+ authorization_header_parameters_parts.append(part)
+
+ # 3. Parameters are separated by a "," character (ASCII code 44) and
+ # OPTIONAL linear whitespace per `RFC2617`_.
+ #
+ # .. _`RFC2617`: https://tools.ietf.org/html/rfc2617
+ authorization_header_parameters = ', '.join(
+ authorization_header_parameters_parts)
+
+ # 4. The OPTIONAL "realm" parameter MAY be added and interpreted per
+ # `RFC2617 section 1.2`_.
+ #
+ # .. _`RFC2617 section 1.2`: https://tools.ietf.org/html/rfc2617#section-1.2
+ if realm:
+ # NOTE: realm should *not* be escaped
+ authorization_header_parameters = ('realm="%s", ' % realm +
+ authorization_header_parameters)
+
+ # the auth-scheme name set to "OAuth" (case insensitive).
+ authorization_header = 'OAuth %s' % authorization_header_parameters
+
+ # contribute the Authorization header to the given headers
+ full_headers = {}
+ full_headers.update(headers)
+ full_headers['Authorization'] = authorization_header
+ return full_headers
+
+
+def _append_params(oauth_params, params):
+ """Append OAuth params to an existing set of parameters.
+
+ Both params and oauth_params is must be lists of 2-tuples.
+
+ Per `section 3.5.2`_ and `3.5.3`_ of the spec.
+
+ .. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
+ .. _`3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3
+
+ """
+ merged = list(params)
+ merged.extend(oauth_params)
+ # The request URI / entity-body MAY include other request-specific
+ # parameters, in which case, the protocol parameters SHOULD be appended
+ # following the request-specific parameters, properly separated by an "&"
+ # character (ASCII code 38)
+ merged.sort(key=lambda i: i[0].startswith('oauth_'))
+ return merged
+
+
+def prepare_form_encoded_body(oauth_params, body):
+ """Prepare the Form-Encoded Body.
+
+ Per `section 3.5.2`_ of the spec.
+
+ .. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
+
+ """
+ # append OAuth params to the existing body
+ return _append_params(oauth_params, body)
+
+
+def prepare_request_uri_query(oauth_params, uri):
+ """Prepare the Request URI Query.
+
+ Per `section 3.5.3`_ of the spec.
+
+ .. _`section 3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3
+
+ """
+ # append OAuth params to the existing set of query components
+ sch, net, path, par, query, fra = urlparse(uri)
+ query = urlencode(
+ _append_params(oauth_params, extract_params(query) or []))
+ return urlunparse((sch, net, path, par, query, fra))
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/request_validator.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/request_validator.py
new file mode 100644
index 0000000000..e937aabf40
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/request_validator.py
@@ -0,0 +1,849 @@
+"""
+oauthlib.oauth1.rfc5849
+~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for signing and checking OAuth 1.0 RFC 5849 requests.
+"""
+from . import SIGNATURE_METHODS, utils
+
+
+class RequestValidator:
+
+ """A validator/datastore interaction base class for OAuth 1 providers.
+
+ OAuth providers should inherit from RequestValidator and implement the
+ methods and properties outlined below. Further details are provided in the
+ documentation for each method and property.
+
+ Methods used to check the format of input parameters. Common tests include
+ length, character set, membership, range or pattern. These tests are
+ referred to as `whitelisting or blacklisting`_. Whitelisting is better
+ but blacklisting can be useful to spot malicious activity.
+ The following have methods a default implementation:
+
+ - check_client_key
+ - check_request_token
+ - check_access_token
+ - check_nonce
+ - check_verifier
+ - check_realms
+
+ The methods above default to whitelist input parameters, checking that they
+ are alphanumerical and between a minimum and maximum length. Rather than
+ overloading the methods a few properties can be used to configure these
+ methods.
+
+ * @safe_characters -> (character set)
+ * @client_key_length -> (min, max)
+ * @request_token_length -> (min, max)
+ * @access_token_length -> (min, max)
+ * @nonce_length -> (min, max)
+ * @verifier_length -> (min, max)
+ * @realms -> [list, of, realms]
+
+ Methods used to validate/invalidate input parameters. These checks usually
+ hit either persistent or temporary storage such as databases or the
+ filesystem. See each methods documentation for detailed usage.
+ The following methods must be implemented:
+
+ - validate_client_key
+ - validate_request_token
+ - validate_access_token
+ - validate_timestamp_and_nonce
+ - validate_redirect_uri
+ - validate_requested_realms
+ - validate_realms
+ - validate_verifier
+ - invalidate_request_token
+
+ Methods used to retrieve sensitive information from storage.
+ The following methods must be implemented:
+
+ - get_client_secret
+ - get_request_token_secret
+ - get_access_token_secret
+ - get_rsa_key
+ - get_realms
+ - get_default_realms
+ - get_redirect_uri
+
+ Methods used to save credentials.
+ The following methods must be implemented:
+
+ - save_request_token
+ - save_verifier
+ - save_access_token
+
+ Methods used to verify input parameters. This methods are used during
+ authorizing request token by user (AuthorizationEndpoint), to check if
+ parameters are valid. During token authorization request is not signed,
+ thus 'validation' methods can not be used. The following methods must be
+ implemented:
+
+ - verify_realms
+ - verify_request_token
+
+ To prevent timing attacks it is necessary to not exit early even if the
+ client key or resource owner key is invalid. Instead dummy values should
+ be used during the remaining verification process. It is very important
+ that the dummy client and token are valid input parameters to the methods
+ get_client_secret, get_rsa_key and get_(access/request)_token_secret and
+ that the running time of those methods when given a dummy value remain
+ equivalent to the running time when given a valid client/resource owner.
+ The following properties must be implemented:
+
+ * @dummy_client
+ * @dummy_request_token
+ * @dummy_access_token
+
+ Example implementations have been provided, note that the database used is
+ a simple dictionary and serves only an illustrative purpose. Use whichever
+ database suits your project and how to access it is entirely up to you.
+ The methods are introduced in an order which should make understanding
+ their use more straightforward and as such it could be worth reading what
+ follows in chronological order.
+
+ .. _`whitelisting or blacklisting`: https://www.schneier.com/blog/archives/2011/01/whitelisting_vs.html
+ """
+
+ def __init__(self):
+ pass
+
+ @property
+ def allowed_signature_methods(self):
+ return SIGNATURE_METHODS
+
+ @property
+ def safe_characters(self):
+ return set(utils.UNICODE_ASCII_CHARACTER_SET)
+
+ @property
+ def client_key_length(self):
+ return 20, 30
+
+ @property
+ def request_token_length(self):
+ return 20, 30
+
+ @property
+ def access_token_length(self):
+ return 20, 30
+
+ @property
+ def timestamp_lifetime(self):
+ return 600
+
+ @property
+ def nonce_length(self):
+ return 20, 30
+
+ @property
+ def verifier_length(self):
+ return 20, 30
+
+ @property
+ def realms(self):
+ return []
+
+ @property
+ def enforce_ssl(self):
+ return True
+
+ def check_client_key(self, client_key):
+ """Check that the client key only contains safe characters
+ and is no shorter than lower and no longer than upper.
+ """
+ lower, upper = self.client_key_length
+ return (set(client_key) <= self.safe_characters and
+ lower <= len(client_key) <= upper)
+
+ def check_request_token(self, request_token):
+ """Checks that the request token contains only safe characters
+ and is no shorter than lower and no longer than upper.
+ """
+ lower, upper = self.request_token_length
+ return (set(request_token) <= self.safe_characters and
+ lower <= len(request_token) <= upper)
+
+ def check_access_token(self, request_token):
+ """Checks that the token contains only safe characters
+ and is no shorter than lower and no longer than upper.
+ """
+ lower, upper = self.access_token_length
+ return (set(request_token) <= self.safe_characters and
+ lower <= len(request_token) <= upper)
+
+ def check_nonce(self, nonce):
+ """Checks that the nonce only contains only safe characters
+ and is no shorter than lower and no longer than upper.
+ """
+ lower, upper = self.nonce_length
+ return (set(nonce) <= self.safe_characters and
+ lower <= len(nonce) <= upper)
+
+ def check_verifier(self, verifier):
+ """Checks that the verifier contains only safe characters
+ and is no shorter than lower and no longer than upper.
+ """
+ lower, upper = self.verifier_length
+ return (set(verifier) <= self.safe_characters and
+ lower <= len(verifier) <= upper)
+
+ def check_realms(self, realms):
+ """Check that the realm is one of a set allowed realms."""
+ return all(r in self.realms for r in realms)
+
+ def _subclass_must_implement(self, fn):
+ """
+ Returns a NotImplementedError for a function that should be implemented.
+ :param fn: name of the function
+ """
+ m = "Missing function implementation in {}: {}".format(type(self), fn)
+ return NotImplementedError(m)
+
+ @property
+ def dummy_client(self):
+ """Dummy client used when an invalid client key is supplied.
+
+ :returns: The dummy client key string.
+
+ The dummy client should be associated with either a client secret,
+ a rsa key or both depending on which signature methods are supported.
+ Providers should make sure that
+
+ get_client_secret(dummy_client)
+ get_rsa_key(dummy_client)
+
+ return a valid secret or key for the dummy client.
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ * RequestTokenEndpoint
+ * ResourceEndpoint
+ * SignatureOnlyEndpoint
+ """
+ raise self._subclass_must_implement("dummy_client")
+
+ @property
+ def dummy_request_token(self):
+ """Dummy request token used when an invalid token was supplied.
+
+ :returns: The dummy request token string.
+
+ The dummy request token should be associated with a request token
+ secret such that get_request_token_secret(.., dummy_request_token)
+ returns a valid secret.
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ """
+ raise self._subclass_must_implement("dummy_request_token")
+
+ @property
+ def dummy_access_token(self):
+ """Dummy access token used when an invalid token was supplied.
+
+ :returns: The dummy access token string.
+
+ The dummy access token should be associated with an access token
+ secret such that get_access_token_secret(.., dummy_access_token)
+ returns a valid secret.
+
+ This method is used by
+
+ * ResourceEndpoint
+ """
+ raise self._subclass_must_implement("dummy_access_token")
+
+ def get_client_secret(self, client_key, request):
+ """Retrieves the client secret associated with the client key.
+
+ :param client_key: The client/consumer key.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The client secret as a string.
+
+ This method must allow the use of a dummy client_key value.
+ Fetching the secret using the dummy key must take the same amount of
+ time as fetching a secret for a valid client::
+
+ # Unlikely to be near constant time as it uses two database
+ # lookups for a valid client, and only one for an invalid.
+ from your_datastore import ClientSecret
+ if ClientSecret.has(client_key):
+ return ClientSecret.get(client_key)
+ else:
+ return 'dummy'
+
+ # Aim to mimic number of latency inducing operations no matter
+ # whether the client is valid or not.
+ from your_datastore import ClientSecret
+ return ClientSecret.get(client_key, 'dummy')
+
+ Note that the returned key must be in plaintext.
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ * RequestTokenEndpoint
+ * ResourceEndpoint
+ * SignatureOnlyEndpoint
+ """
+ raise self._subclass_must_implement('get_client_secret')
+
+ def get_request_token_secret(self, client_key, token, request):
+ """Retrieves the shared secret associated with the request token.
+
+ :param client_key: The client/consumer key.
+ :param token: The request token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The token secret as a string.
+
+ This method must allow the use of a dummy values and the running time
+ must be roughly equivalent to that of the running time of valid values::
+
+ # Unlikely to be near constant time as it uses two database
+ # lookups for a valid client, and only one for an invalid.
+ from your_datastore import RequestTokenSecret
+ if RequestTokenSecret.has(client_key):
+ return RequestTokenSecret.get((client_key, request_token))
+ else:
+ return 'dummy'
+
+ # Aim to mimic number of latency inducing operations no matter
+ # whether the client is valid or not.
+ from your_datastore import RequestTokenSecret
+ return ClientSecret.get((client_key, request_token), 'dummy')
+
+ Note that the returned key must be in plaintext.
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ """
+ raise self._subclass_must_implement('get_request_token_secret')
+
+ def get_access_token_secret(self, client_key, token, request):
+ """Retrieves the shared secret associated with the access token.
+
+ :param client_key: The client/consumer key.
+ :param token: The access token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The token secret as a string.
+
+ This method must allow the use of a dummy values and the running time
+ must be roughly equivalent to that of the running time of valid values::
+
+ # Unlikely to be near constant time as it uses two database
+ # lookups for a valid client, and only one for an invalid.
+ from your_datastore import AccessTokenSecret
+ if AccessTokenSecret.has(client_key):
+ return AccessTokenSecret.get((client_key, request_token))
+ else:
+ return 'dummy'
+
+ # Aim to mimic number of latency inducing operations no matter
+ # whether the client is valid or not.
+ from your_datastore import AccessTokenSecret
+ return ClientSecret.get((client_key, request_token), 'dummy')
+
+ Note that the returned key must be in plaintext.
+
+ This method is used by
+
+ * ResourceEndpoint
+ """
+ raise self._subclass_must_implement("get_access_token_secret")
+
+ def get_default_realms(self, client_key, request):
+ """Get the default realms for a client.
+
+ :param client_key: The client/consumer key.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The list of default realms associated with the client.
+
+ The list of default realms will be set during client registration and
+ is outside the scope of OAuthLib.
+
+ This method is used by
+
+ * RequestTokenEndpoint
+ """
+ raise self._subclass_must_implement("get_default_realms")
+
+ def get_realms(self, token, request):
+ """Get realms associated with a request token.
+
+ :param token: The request token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The list of realms associated with the request token.
+
+ This method is used by
+
+ * AuthorizationEndpoint
+ * AccessTokenEndpoint
+ """
+ raise self._subclass_must_implement("get_realms")
+
+ def get_redirect_uri(self, token, request):
+ """Get the redirect URI associated with a request token.
+
+ :param token: The request token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The redirect URI associated with the request token.
+
+ It may be desirable to return a custom URI if the redirect is set to "oob".
+ In this case, the user will be redirected to the returned URI and at that
+ endpoint the verifier can be displayed.
+
+ This method is used by
+
+ * AuthorizationEndpoint
+ """
+ raise self._subclass_must_implement("get_redirect_uri")
+
+ def get_rsa_key(self, client_key, request):
+ """Retrieves a previously stored client provided RSA key.
+
+ :param client_key: The client/consumer key.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: The rsa public key as a string.
+
+ This method must allow the use of a dummy client_key value. Fetching
+ the rsa key using the dummy key must take the same amount of time
+ as fetching a key for a valid client. The dummy key must also be of
+ the same bit length as client keys.
+
+ Note that the key must be returned in plaintext.
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ * RequestTokenEndpoint
+ * ResourceEndpoint
+ * SignatureOnlyEndpoint
+ """
+ raise self._subclass_must_implement("get_rsa_key")
+
+ def invalidate_request_token(self, client_key, request_token, request):
+ """Invalidates a used request token.
+
+ :param client_key: The client/consumer key.
+ :param request_token: The request token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: None
+
+ Per `Section 2.3`_ of the spec:
+
+ "The server MUST (...) ensure that the temporary
+ credentials have not expired or been used before."
+
+ .. _`Section 2.3`: https://tools.ietf.org/html/rfc5849#section-2.3
+
+ This method should ensure that provided token won't validate anymore.
+ It can be simply removing RequestToken from storage or setting
+ specific flag that makes it invalid (note that such flag should be
+ also validated during request token validation).
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ """
+ raise self._subclass_must_implement("invalidate_request_token")
+
+ def validate_client_key(self, client_key, request):
+ """Validates that supplied client key is a registered and valid client.
+
+ :param client_key: The client/consumer key.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ Note that if the dummy client is supplied it should validate in same
+ or nearly the same amount of time as a valid one.
+
+ Ensure latency inducing tasks are mimiced even for dummy clients.
+ For example, use::
+
+ from your_datastore import Client
+ try:
+ return Client.exists(client_key, access_token)
+ except DoesNotExist:
+ return False
+
+ Rather than::
+
+ from your_datastore import Client
+ if access_token == self.dummy_access_token:
+ return False
+ else:
+ return Client.exists(client_key, access_token)
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ * RequestTokenEndpoint
+ * ResourceEndpoint
+ * SignatureOnlyEndpoint
+ """
+ raise self._subclass_must_implement("validate_client_key")
+
+ def validate_request_token(self, client_key, token, request):
+ """Validates that supplied request token is registered and valid.
+
+ :param client_key: The client/consumer key.
+ :param token: The request token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ Note that if the dummy request_token is supplied it should validate in
+ the same nearly the same amount of time as a valid one.
+
+ Ensure latency inducing tasks are mimiced even for dummy clients.
+ For example, use::
+
+ from your_datastore import RequestToken
+ try:
+ return RequestToken.exists(client_key, access_token)
+ except DoesNotExist:
+ return False
+
+ Rather than::
+
+ from your_datastore import RequestToken
+ if access_token == self.dummy_access_token:
+ return False
+ else:
+ return RequestToken.exists(client_key, access_token)
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ """
+ raise self._subclass_must_implement("validate_request_token")
+
+ def validate_access_token(self, client_key, token, request):
+ """Validates that supplied access token is registered and valid.
+
+ :param client_key: The client/consumer key.
+ :param token: The access token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ Note that if the dummy access token is supplied it should validate in
+ the same or nearly the same amount of time as a valid one.
+
+ Ensure latency inducing tasks are mimiced even for dummy clients.
+ For example, use::
+
+ from your_datastore import AccessToken
+ try:
+ return AccessToken.exists(client_key, access_token)
+ except DoesNotExist:
+ return False
+
+ Rather than::
+
+ from your_datastore import AccessToken
+ if access_token == self.dummy_access_token:
+ return False
+ else:
+ return AccessToken.exists(client_key, access_token)
+
+ This method is used by
+
+ * ResourceEndpoint
+ """
+ raise self._subclass_must_implement("validate_access_token")
+
+ def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
+ request, request_token=None, access_token=None):
+ """Validates that the nonce has not been used before.
+
+ :param client_key: The client/consumer key.
+ :param timestamp: The ``oauth_timestamp`` parameter.
+ :param nonce: The ``oauth_nonce`` parameter.
+ :param request_token: Request token string, if any.
+ :param access_token: Access token string, if any.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ Per `Section 3.3`_ of the spec.
+
+ "A nonce is a random string, uniquely generated by the client to allow
+ the server to verify that a request has never been made before and
+ helps prevent replay attacks when requests are made over a non-secure
+ channel. The nonce value MUST be unique across all requests with the
+ same timestamp, client credentials, and token combinations."
+
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc5849#section-3.3
+
+ One of the first validation checks that will be made is for the validity
+ of the nonce and timestamp, which are associated with a client key and
+ possibly a token. If invalid then immediately fail the request
+ by returning False. If the nonce/timestamp pair has been used before and
+ you may just have detected a replay attack. Therefore it is an essential
+ part of OAuth security that you not allow nonce/timestamp reuse.
+ Note that this validation check is done before checking the validity of
+ the client and token.::
+
+ nonces_and_timestamps_database = [
+ (u'foo', 1234567890, u'rannoMstrInghere', u'bar')
+ ]
+
+ def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
+ request_token=None, access_token=None):
+
+ return ((client_key, timestamp, nonce, request_token or access_token)
+ not in self.nonces_and_timestamps_database)
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ * RequestTokenEndpoint
+ * ResourceEndpoint
+ * SignatureOnlyEndpoint
+ """
+ raise self._subclass_must_implement("validate_timestamp_and_nonce")
+
+ def validate_redirect_uri(self, client_key, redirect_uri, request):
+ """Validates the client supplied redirection URI.
+
+ :param client_key: The client/consumer key.
+ :param redirect_uri: The URI the client which to redirect back to after
+ authorization is successful.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ It is highly recommended that OAuth providers require their clients
+ to register all redirection URIs prior to using them in requests and
+ register them as absolute URIs. See `CWE-601`_ for more information
+ about open redirection attacks.
+
+ By requiring registration of all redirection URIs it should be
+ straightforward for the provider to verify whether the supplied
+ redirect_uri is valid or not.
+
+ Alternatively per `Section 2.1`_ of the spec:
+
+ "If the client is unable to receive callbacks or a callback URI has
+ been established via other means, the parameter value MUST be set to
+ "oob" (case sensitive), to indicate an out-of-band configuration."
+
+ .. _`CWE-601`: http://cwe.mitre.org/top25/index.html#CWE-601
+ .. _`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
+
+ This method is used by
+
+ * RequestTokenEndpoint
+ """
+ raise self._subclass_must_implement("validate_redirect_uri")
+
+ def validate_requested_realms(self, client_key, realms, request):
+ """Validates that the client may request access to the realm.
+
+ :param client_key: The client/consumer key.
+ :param realms: The list of realms that client is requesting access to.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ This method is invoked when obtaining a request token and should
+ tie a realm to the request token and after user authorization
+ this realm restriction should transfer to the access token.
+
+ This method is used by
+
+ * RequestTokenEndpoint
+ """
+ raise self._subclass_must_implement("validate_requested_realms")
+
+ def validate_realms(self, client_key, token, request, uri=None,
+ realms=None):
+ """Validates access to the request realm.
+
+ :param client_key: The client/consumer key.
+ :param token: A request token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param uri: The URI the realms is protecting.
+ :param realms: A list of realms that must have been granted to
+ the access token.
+ :returns: True or False
+
+ How providers choose to use the realm parameter is outside the OAuth
+ specification but it is commonly used to restrict access to a subset
+ of protected resources such as "photos".
+
+ realms is a convenience parameter which can be used to provide
+ a per view method pre-defined list of allowed realms.
+
+ Can be as simple as::
+
+ from your_datastore import RequestToken
+ request_token = RequestToken.get(token, None)
+
+ if not request_token:
+ return False
+ return set(request_token.realms).issuperset(set(realms))
+
+ This method is used by
+
+ * ResourceEndpoint
+ """
+ raise self._subclass_must_implement("validate_realms")
+
+ def validate_verifier(self, client_key, token, verifier, request):
+ """Validates a verification code.
+
+ :param client_key: The client/consumer key.
+ :param token: A request token string.
+ :param verifier: The authorization verifier string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ OAuth providers issue a verification code to clients after the
+ resource owner authorizes access. This code is used by the client to
+ obtain token credentials and the provider must verify that the
+ verifier is valid and associated with the client as well as the
+ resource owner.
+
+ Verifier validation should be done in near constant time
+ (to avoid verifier enumeration). To achieve this we need a
+ constant time string comparison which is provided by OAuthLib
+ in ``oauthlib.common.safe_string_equals``::
+
+ from your_datastore import Verifier
+ correct_verifier = Verifier.get(client_key, request_token)
+ from oauthlib.common import safe_string_equals
+ return safe_string_equals(verifier, correct_verifier)
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ """
+ raise self._subclass_must_implement("validate_verifier")
+
+ def verify_request_token(self, token, request):
+ """Verify that the given OAuth1 request token is valid.
+
+ :param token: A request token string.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ This method is used only in AuthorizationEndpoint to check whether the
+ oauth_token given in the authorization URL is valid or not.
+ This request is not signed and thus similar ``validate_request_token``
+ method can not be used.
+
+ This method is used by
+
+ * AuthorizationEndpoint
+ """
+ raise self._subclass_must_implement("verify_request_token")
+
+ def verify_realms(self, token, realms, request):
+ """Verify authorized realms to see if they match those given to token.
+
+ :param token: An access token string.
+ :param realms: A list of realms the client attempts to access.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :returns: True or False
+
+ This prevents the list of authorized realms sent by the client during
+ the authorization step to be altered to include realms outside what
+ was bound with the request token.
+
+ Can be as simple as::
+
+ valid_realms = self.get_realms(token)
+ return all((r in valid_realms for r in realms))
+
+ This method is used by
+
+ * AuthorizationEndpoint
+ """
+ raise self._subclass_must_implement("verify_realms")
+
+ def save_access_token(self, token, request):
+ """Save an OAuth1 access token.
+
+ :param token: A dict with token credentials.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ The token dictionary will at minimum include
+
+ * ``oauth_token`` the access token string.
+ * ``oauth_token_secret`` the token specific secret used in signing.
+ * ``oauth_authorized_realms`` a space separated list of realms.
+
+ Client key can be obtained from ``request.client_key``.
+
+ The list of realms (not joined string) can be obtained from
+ ``request.realm``.
+
+ This method is used by
+
+ * AccessTokenEndpoint
+ """
+ raise self._subclass_must_implement("save_access_token")
+
+ def save_request_token(self, token, request):
+ """Save an OAuth1 request token.
+
+ :param token: A dict with token credentials.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ The token dictionary will at minimum include
+
+ * ``oauth_token`` the request token string.
+ * ``oauth_token_secret`` the token specific secret used in signing.
+ * ``oauth_callback_confirmed`` the string ``true``.
+
+ Client key can be obtained from ``request.client_key``.
+
+ This method is used by
+
+ * RequestTokenEndpoint
+ """
+ raise self._subclass_must_implement("save_request_token")
+
+ def save_verifier(self, token, verifier, request):
+ """Associate an authorization verifier with a request token.
+
+ :param token: A request token string.
+ :param verifier: A dictionary containing the oauth_verifier and
+ oauth_token
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ We need to associate verifiers with tokens for validation during the
+ access token request.
+
+ Note that unlike save_x_token token here is the ``oauth_token`` token
+ string from the request token saved previously.
+
+ This method is used by
+
+ * AuthorizationEndpoint
+ """
+ raise self._subclass_must_implement("save_verifier")
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/signature.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/signature.py
new file mode 100644
index 0000000000..9cb1a517ee
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/signature.py
@@ -0,0 +1,852 @@
+"""
+This module is an implementation of `section 3.4`_ of RFC 5849.
+
+**Usage**
+
+Steps for signing a request:
+
+1. Collect parameters from the request using ``collect_parameters``.
+2. Normalize those parameters using ``normalize_parameters``.
+3. Create the *base string URI* using ``base_string_uri``.
+4. Create the *signature base string* from the above three components
+ using ``signature_base_string``.
+5. Pass the *signature base string* and the client credentials to one of the
+ sign-with-client functions. The HMAC-based signing functions needs
+ client credentials with secrets. The RSA-based signing functions needs
+ client credentials with an RSA private key.
+
+To verify a request, pass the request and credentials to one of the verify
+functions. The HMAC-based signing functions needs the shared secrets. The
+RSA-based verify functions needs the RSA public key.
+
+**Scope**
+
+All of the functions in this module should be considered internal to OAuthLib,
+since they are not imported into the "oauthlib.oauth1" module. Programs using
+OAuthLib should not use directly invoke any of the functions in this module.
+
+**Deprecated functions**
+
+The "sign_" methods that are not "_with_client" have been deprecated. They may
+be removed in a future release. Since they are all internal functions, this
+should have no impact on properly behaving programs.
+
+.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
+"""
+
+import binascii
+import hashlib
+import hmac
+import ipaddress
+import logging
+import urllib.parse as urlparse
+import warnings
+
+from oauthlib.common import extract_params, safe_string_equals, urldecode
+
+from . import utils
+
+log = logging.getLogger(__name__)
+
+
+# ==== Common functions ==========================================
+
+def signature_base_string(
+ http_method: str,
+ base_str_uri: str,
+ normalized_encoded_request_parameters: str) -> str:
+ """
+ Construct the signature base string.
+
+ The *signature base string* is the value that is calculated and signed by
+ the client. It is also independently calculated by the server to verify
+ the signature, and therefore must produce the exact same value at both
+ ends or the signature won't verify.
+
+ The rules for calculating the *signature base string* are defined in
+ section 3.4.1.1`_ of RFC 5849.
+
+ .. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
+ """
+
+ # The signature base string is constructed by concatenating together,
+ # in order, the following HTTP request elements:
+
+ # 1. The HTTP request method in uppercase. For example: "HEAD",
+ # "GET", "POST", etc. If the request uses a custom HTTP method, it
+ # MUST be encoded (`Section 3.6`_).
+ #
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ base_string = utils.escape(http_method.upper())
+
+ # 2. An "&" character (ASCII code 38).
+ base_string += '&'
+
+ # 3. The base string URI from `Section 3.4.1.2`_, after being encoded
+ # (`Section 3.6`_).
+ #
+ # .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ base_string += utils.escape(base_str_uri)
+
+ # 4. An "&" character (ASCII code 38).
+ base_string += '&'
+
+ # 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
+ # being encoded (`Section 3.6`).
+ #
+ # .. _`Sec 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ base_string += utils.escape(normalized_encoded_request_parameters)
+
+ return base_string
+
+
+def base_string_uri(uri: str, host: str = None) -> str:
+ """
+ Calculates the _base string URI_.
+
+ The *base string URI* is one of the components that make up the
+ *signature base string*.
+
+ The ``host`` is optional. If provided, it is used to override any host and
+ port values in the ``uri``. The value for ``host`` is usually extracted from
+ the "Host" request header from the HTTP request. Its value may be just the
+ hostname, or the hostname followed by a colon and a TCP/IP port number
+ (hostname:port). If a value for the``host`` is provided but it does not
+ contain a port number, the default port number is used (i.e. if the ``uri``
+ contained a port number, it will be discarded).
+
+ The rules for calculating the *base string URI* are defined in
+ section 3.4.1.2`_ of RFC 5849.
+
+ .. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
+
+ :param uri: URI
+ :param host: hostname with optional port number, separated by a colon
+ :return: base string URI
+ """
+
+ if not isinstance(uri, str):
+ raise ValueError('uri must be a string.')
+
+ # FIXME: urlparse does not support unicode
+ output = urlparse.urlparse(uri)
+ scheme = output.scheme
+ hostname = output.hostname
+ port = output.port
+ path = output.path
+ params = output.params
+
+ # The scheme, authority, and path of the request resource URI `RFC3986`
+ # are included by constructing an "http" or "https" URI representing
+ # the request resource (without the query or fragment) as follows:
+ #
+ # .. _`RFC3986`: https://tools.ietf.org/html/rfc3986
+
+ if not scheme:
+ raise ValueError('missing scheme')
+
+ # Per `RFC 2616 section 5.1.2`_:
+ #
+ # Note that the absolute path cannot be empty; if none is present in
+ # the original URI, it MUST be given as "/" (the server root).
+ #
+ # .. _`RFC 2616 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2
+ if not path:
+ path = '/'
+
+ # 1. The scheme and host MUST be in lowercase.
+ scheme = scheme.lower()
+ # Note: if ``host`` is used, it will be converted to lowercase below
+ if hostname is not None:
+ hostname = hostname.lower()
+
+ # 2. The host and port values MUST match the content of the HTTP
+ # request "Host" header field.
+ if host is not None:
+ # NOTE: override value in uri with provided host
+ # Host argument is equal to netloc. It means it's missing scheme.
+ # Add it back, before parsing.
+
+ host = host.lower()
+ host = f"{scheme}://{host}"
+ output = urlparse.urlparse(host)
+ hostname = output.hostname
+ port = output.port
+
+ # 3. The port MUST be included if it is not the default port for the
+ # scheme, and MUST be excluded if it is the default. Specifically,
+ # the port MUST be excluded when making an HTTP request `RFC2616`_
+ # to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
+ # All other non-default port numbers MUST be included.
+ #
+ # .. _`RFC2616`: https://tools.ietf.org/html/rfc2616
+ # .. _`RFC2818`: https://tools.ietf.org/html/rfc2818
+
+ if hostname is None:
+ raise ValueError('missing host')
+
+ # NOTE: Try guessing if we're dealing with IP or hostname
+ try:
+ hostname = ipaddress.ip_address(hostname)
+ except ValueError:
+ pass
+
+ if isinstance(hostname, ipaddress.IPv6Address):
+ hostname = f"[{hostname}]"
+ elif isinstance(hostname, ipaddress.IPv4Address):
+ hostname = f"{hostname}"
+
+ if port is not None and not (0 < port <= 65535):
+ raise ValueError('port out of range') # 16-bit unsigned ints
+ if (scheme, port) in (('http', 80), ('https', 443)):
+ netloc = hostname # default port for scheme: exclude port num
+ elif port:
+ netloc = f"{hostname}:{port}" # use hostname:port
+ else:
+ netloc = hostname
+
+ v = urlparse.urlunparse((scheme, netloc, path, params, '', ''))
+
+ # RFC 5849 does not specify which characters are encoded in the
+ # "base string URI", nor how they are encoded - which is very bad, since
+ # the signatures won't match if there are any differences. Fortunately,
+ # most URIs only use characters that are clearly not encoded (e.g. digits
+ # and A-Z, a-z), so have avoided any differences between implementations.
+ #
+ # The example from its section 3.4.1.2 illustrates that spaces in
+ # the path are percent encoded. But it provides no guidance as to what other
+ # characters (if any) must be encoded (nor how); nor if characters in the
+ # other components are to be encoded or not.
+ #
+ # This implementation **assumes** that **only** the space is percent-encoded
+ # and it is done to the entire value (not just to spaces in the path).
+ #
+ # This code may need to be changed if it is discovered that other characters
+ # are expected to be encoded.
+ #
+ # Note: the "base string URI" returned by this function will be encoded
+ # again before being concatenated into the "signature base string". So any
+ # spaces in the URI will actually appear in the "signature base string"
+ # as "%2520" (the "%20" further encoded according to section 3.6).
+
+ return v.replace(' ', '%20')
+
+
+def collect_parameters(uri_query='', body=None, headers=None,
+ exclude_oauth_signature=True, with_realm=False):
+ """
+ Gather the request parameters from all the parameter sources.
+
+ This function is used to extract all the parameters, which are then passed
+ to ``normalize_parameters`` to produce one of the components that make up
+ the *signature base string*.
+
+ Parameters starting with `oauth_` will be unescaped.
+
+ Body parameters must be supplied as a dict, a list of 2-tuples, or a
+ form encoded query string.
+
+ Headers must be supplied as a dict.
+
+ The rules where the parameters must be sourced from are defined in
+ `section 3.4.1.3.1`_ of RFC 5849.
+
+ .. _`Sec 3.4.1.3.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
+ """
+ if body is None:
+ body = []
+ headers = headers or {}
+ params = []
+
+ # The parameters from the following sources are collected into a single
+ # list of name/value pairs:
+
+ # * The query component of the HTTP request URI as defined by
+ # `RFC3986, Section 3.4`_. The query component is parsed into a list
+ # of name/value pairs by treating it as an
+ # "application/x-www-form-urlencoded" string, separating the names
+ # and values and decoding them as defined by W3C.REC-html40-19980424
+ # `W3C-HTML-4.0`_, Section 17.13.4.
+ #
+ # .. _`RFC3986, Sec 3.4`: https://tools.ietf.org/html/rfc3986#section-3.4
+ # .. _`W3C-HTML-4.0`: https://www.w3.org/TR/1998/REC-html40-19980424/
+ if uri_query:
+ params.extend(urldecode(uri_query))
+
+ # * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
+ # present. The header's content is parsed into a list of name/value
+ # pairs excluding the "realm" parameter if present. The parameter
+ # values are decoded as defined by `Section 3.5.1`_.
+ #
+ # .. _`Section 3.5.1`: https://tools.ietf.org/html/rfc5849#section-3.5.1
+ if headers:
+ headers_lower = {k.lower(): v for k, v in headers.items()}
+ authorization_header = headers_lower.get('authorization')
+ if authorization_header is not None:
+ params.extend([i for i in utils.parse_authorization_header(
+ authorization_header) if with_realm or i[0] != 'realm'])
+
+ # * The HTTP request entity-body, but only if all of the following
+ # conditions are met:
+ # * The entity-body is single-part.
+ #
+ # * The entity-body follows the encoding requirements of the
+ # "application/x-www-form-urlencoded" content-type as defined by
+ # W3C.REC-html40-19980424 `W3C-HTML-4.0`_.
+
+ # * The HTTP request entity-header includes the "Content-Type"
+ # header field set to "application/x-www-form-urlencoded".
+ #
+ # .. _`W3C-HTML-4.0`: https://www.w3.org/TR/1998/REC-html40-19980424/
+
+ # TODO: enforce header param inclusion conditions
+ bodyparams = extract_params(body) or []
+ params.extend(bodyparams)
+
+ # ensure all oauth params are unescaped
+ unescaped_params = []
+ for k, v in params:
+ if k.startswith('oauth_'):
+ v = utils.unescape(v)
+ unescaped_params.append((k, v))
+
+ # The "oauth_signature" parameter MUST be excluded from the signature
+ # base string if present.
+ if exclude_oauth_signature:
+ unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
+ unescaped_params))
+
+ return unescaped_params
+
+
+def normalize_parameters(params) -> str:
+ """
+ Calculate the normalized request parameters.
+
+ The *normalized request parameters* is one of the components that make up
+ the *signature base string*.
+
+ The rules for parameter normalization are defined in `section 3.4.1.3.2`_ of
+ RFC 5849.
+
+ .. _`Sec 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
+ """
+
+ # The parameters collected in `Section 3.4.1.3`_ are normalized into a
+ # single string as follows:
+ #
+ # .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
+
+ # 1. First, the name and value of each parameter are encoded
+ # (`Section 3.6`_).
+ #
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
+
+ # 2. The parameters are sorted by name, using ascending byte value
+ # ordering. If two or more parameters share the same name, they
+ # are sorted by their value.
+ key_values.sort()
+
+ # 3. The name of each parameter is concatenated to its corresponding
+ # value using an "=" character (ASCII code 61) as a separator, even
+ # if the value is empty.
+ parameter_parts = ['{}={}'.format(k, v) for k, v in key_values]
+
+ # 4. The sorted name/value pairs are concatenated together into a
+ # single string by using an "&" character (ASCII code 38) as
+ # separator.
+ return '&'.join(parameter_parts)
+
+
+# ==== Common functions for HMAC-based signature methods =========
+
+def _sign_hmac(hash_algorithm_name: str,
+ sig_base_str: str,
+ client_secret: str,
+ resource_owner_secret: str):
+ """
+ **HMAC-SHA256**
+
+ The "HMAC-SHA256" signature method uses the HMAC-SHA256 signature
+ algorithm as defined in `RFC4634`_::
+
+ digest = HMAC-SHA256 (key, text)
+
+ Per `section 3.4.2`_ of the spec.
+
+ .. _`RFC4634`: https://tools.ietf.org/html/rfc4634
+ .. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
+ """
+
+ # The HMAC-SHA256 function variables are used in following way:
+
+ # text is set to the value of the signature base string from
+ # `Section 3.4.1.1`_.
+ #
+ # .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
+ text = sig_base_str
+
+ # key is set to the concatenated values of:
+ # 1. The client shared-secret, after being encoded (`Section 3.6`_).
+ #
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ key = utils.escape(client_secret or '')
+
+ # 2. An "&" character (ASCII code 38), which MUST be included
+ # even when either secret is empty.
+ key += '&'
+
+ # 3. The token shared-secret, after being encoded (`Section 3.6`_).
+ #
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ key += utils.escape(resource_owner_secret or '')
+
+ # Get the hashing algorithm to use
+
+ m = {
+ 'SHA-1': hashlib.sha1,
+ 'SHA-256': hashlib.sha256,
+ 'SHA-512': hashlib.sha512,
+ }
+ hash_alg = m[hash_algorithm_name]
+
+ # Calculate the signature
+
+ # FIXME: HMAC does not support unicode!
+ key_utf8 = key.encode('utf-8')
+ text_utf8 = text.encode('utf-8')
+ signature = hmac.new(key_utf8, text_utf8, hash_alg)
+
+ # digest is used to set the value of the "oauth_signature" protocol
+ # parameter, after the result octet string is base64-encoded
+ # per `RFC2045, Section 6.8`.
+ #
+ # .. _`RFC2045, Sec 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
+ return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
+
+
+def _verify_hmac(hash_algorithm_name: str,
+ request,
+ client_secret=None,
+ resource_owner_secret=None):
+ """Verify a HMAC-SHA1 signature.
+
+ Per `section 3.4`_ of the spec.
+
+ .. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
+
+ To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
+ attribute MUST be an absolute URI whose netloc part identifies the
+ origin server or gateway on which the resource resides. Any Host
+ item of the request argument's headers dict attribute will be
+ ignored.
+
+ .. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
+
+ """
+ norm_params = normalize_parameters(request.params)
+ bs_uri = base_string_uri(request.uri)
+ sig_base_str = signature_base_string(request.http_method, bs_uri,
+ norm_params)
+ signature = _sign_hmac(hash_algorithm_name, sig_base_str,
+ client_secret, resource_owner_secret)
+ match = safe_string_equals(signature, request.signature)
+ if not match:
+ log.debug('Verify HMAC failed: signature base string: %s', sig_base_str)
+ return match
+
+
+# ==== HMAC-SHA1 =================================================
+
+def sign_hmac_sha1_with_client(sig_base_str, client):
+ return _sign_hmac('SHA-1', sig_base_str,
+ client.client_secret, client.resource_owner_secret)
+
+
+def verify_hmac_sha1(request, client_secret=None, resource_owner_secret=None):
+ return _verify_hmac('SHA-1', request, client_secret, resource_owner_secret)
+
+
+def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
+ """
+ Deprecated function for calculating a HMAC-SHA1 signature.
+
+ This function has been replaced by invoking ``sign_hmac`` with "SHA-1"
+ as the hash algorithm name.
+
+ This function was invoked by sign_hmac_sha1_with_client and
+ test_signatures.py, but does any application invoke it directly? If not,
+ it can be removed.
+ """
+ warnings.warn('use sign_hmac_sha1_with_client instead of sign_hmac_sha1',
+ DeprecationWarning)
+
+ # For some unknown reason, the original implementation assumed base_string
+ # could either be bytes or str. The signature base string calculating
+ # function always returned a str, so the new ``sign_rsa`` only expects that.
+
+ base_string = base_string.decode('ascii') \
+ if isinstance(base_string, bytes) else base_string
+
+ return _sign_hmac('SHA-1', base_string,
+ client_secret, resource_owner_secret)
+
+
+# ==== HMAC-SHA256 ===============================================
+
+def sign_hmac_sha256_with_client(sig_base_str, client):
+ return _sign_hmac('SHA-256', sig_base_str,
+ client.client_secret, client.resource_owner_secret)
+
+
+def verify_hmac_sha256(request, client_secret=None, resource_owner_secret=None):
+ return _verify_hmac('SHA-256', request,
+ client_secret, resource_owner_secret)
+
+
+def sign_hmac_sha256(base_string, client_secret, resource_owner_secret):
+ """
+ Deprecated function for calculating a HMAC-SHA256 signature.
+
+ This function has been replaced by invoking ``sign_hmac`` with "SHA-256"
+ as the hash algorithm name.
+
+ This function was invoked by sign_hmac_sha256_with_client and
+ test_signatures.py, but does any application invoke it directly? If not,
+ it can be removed.
+ """
+ warnings.warn(
+ 'use sign_hmac_sha256_with_client instead of sign_hmac_sha256',
+ DeprecationWarning)
+
+ # For some unknown reason, the original implementation assumed base_string
+ # could either be bytes or str. The signature base string calculating
+ # function always returned a str, so the new ``sign_rsa`` only expects that.
+
+ base_string = base_string.decode('ascii') \
+ if isinstance(base_string, bytes) else base_string
+
+ return _sign_hmac('SHA-256', base_string,
+ client_secret, resource_owner_secret)
+
+
+# ==== HMAC-SHA512 ===============================================
+
+def sign_hmac_sha512_with_client(sig_base_str: str,
+ client):
+ return _sign_hmac('SHA-512', sig_base_str,
+ client.client_secret, client.resource_owner_secret)
+
+
+def verify_hmac_sha512(request,
+ client_secret: str = None,
+ resource_owner_secret: str = None):
+ return _verify_hmac('SHA-512', request,
+ client_secret, resource_owner_secret)
+
+
+# ==== Common functions for RSA-based signature methods ==========
+
+_jwt_rsa = {} # cache of RSA-hash implementations from PyJWT jwt.algorithms
+
+
+def _get_jwt_rsa_algorithm(hash_algorithm_name: str):
+ """
+ Obtains an RSAAlgorithm object that implements RSA with the hash algorithm.
+
+ This method maintains the ``_jwt_rsa`` cache.
+
+ Returns a jwt.algorithm.RSAAlgorithm.
+ """
+ if hash_algorithm_name in _jwt_rsa:
+ # Found in cache: return it
+ return _jwt_rsa[hash_algorithm_name]
+ else:
+ # Not in cache: instantiate a new RSAAlgorithm
+
+ # PyJWT has some nice pycrypto/cryptography abstractions
+ import jwt.algorithms as jwt_algorithms
+ m = {
+ 'SHA-1': jwt_algorithms.hashes.SHA1,
+ 'SHA-256': jwt_algorithms.hashes.SHA256,
+ 'SHA-512': jwt_algorithms.hashes.SHA512,
+ }
+ v = jwt_algorithms.RSAAlgorithm(m[hash_algorithm_name])
+
+ _jwt_rsa[hash_algorithm_name] = v # populate cache
+
+ return v
+
+
+def _prepare_key_plus(alg, keystr):
+ """
+ Prepare a PEM encoded key (public or private), by invoking the `prepare_key`
+ method on alg with the keystr.
+
+ The keystr should be a string or bytes. If the keystr is bytes, it is
+ decoded as UTF-8 before being passed to prepare_key. Otherwise, it
+ is passed directly.
+ """
+ if isinstance(keystr, bytes):
+ keystr = keystr.decode('utf-8')
+ return alg.prepare_key(keystr)
+
+
+def _sign_rsa(hash_algorithm_name: str,
+ sig_base_str: str,
+ rsa_private_key: str):
+ """
+ Calculate the signature for an RSA-based signature method.
+
+ The ``alg`` is used to calculate the digest over the signature base string.
+ For the "RSA_SHA1" signature method, the alg must be SHA-1. While OAuth 1.0a
+ only defines the RSA-SHA1 signature method, this function can be used for
+ other non-standard signature methods that only differ from RSA-SHA1 by the
+ digest algorithm.
+
+ Signing for the RSA-SHA1 signature method is defined in
+ `section 3.4.3`_ of RFC 5849.
+
+ The RSASSA-PKCS1-v1_5 signature algorithm used defined by
+ `RFC3447, Section 8.2`_ (also known as PKCS#1), with the `alg` as the
+ hash function for EMSA-PKCS1-v1_5. To
+ use this method, the client MUST have established client credentials
+ with the server that included its RSA public key (in a manner that is
+ beyond the scope of this specification).
+
+ .. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
+ .. _`RFC3447, Section 8.2`: https://tools.ietf.org/html/rfc3447#section-8.2
+ """
+
+ # Get the implementation of RSA-hash
+
+ alg = _get_jwt_rsa_algorithm(hash_algorithm_name)
+
+ # Check private key
+
+ if not rsa_private_key:
+ raise ValueError('rsa_private_key required for RSA with ' +
+ alg.hash_alg.name + ' signature method')
+
+ # Convert the "signature base string" into a sequence of bytes (M)
+ #
+ # The signature base string, by definition, only contain printable US-ASCII
+ # characters. So encoding it as 'ascii' will always work. It will raise a
+ # ``UnicodeError`` if it can't encode the value, which will never happen
+ # if the signature base string was created correctly. Therefore, using
+ # 'ascii' encoding provides an extra level of error checking.
+
+ m = sig_base_str.encode('ascii')
+
+ # Perform signing: S = RSASSA-PKCS1-V1_5-SIGN (K, M)
+
+ key = _prepare_key_plus(alg, rsa_private_key)
+ s = alg.sign(m, key)
+
+ # base64-encoded per RFC2045 section 6.8.
+ #
+ # 1. While b2a_base64 implements base64 defined by RFC 3548. As used here,
+ # it is the same as base64 defined by RFC 2045.
+ # 2. b2a_base64 includes a "\n" at the end of its result ([:-1] removes it)
+ # 3. b2a_base64 produces a binary string. Use decode to produce a str.
+ # It should only contain only printable US-ASCII characters.
+
+ return binascii.b2a_base64(s)[:-1].decode('ascii')
+
+
+def _verify_rsa(hash_algorithm_name: str,
+ request,
+ rsa_public_key: str):
+ """
+ Verify a base64 encoded signature for a RSA-based signature method.
+
+ The ``alg`` is used to calculate the digest over the signature base string.
+ For the "RSA_SHA1" signature method, the alg must be SHA-1. While OAuth 1.0a
+ only defines the RSA-SHA1 signature method, this function can be used for
+ other non-standard signature methods that only differ from RSA-SHA1 by the
+ digest algorithm.
+
+ Verification for the RSA-SHA1 signature method is defined in
+ `section 3.4.3`_ of RFC 5849.
+
+ .. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
+
+ To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
+ attribute MUST be an absolute URI whose netloc part identifies the
+ origin server or gateway on which the resource resides. Any Host
+ item of the request argument's headers dict attribute will be
+ ignored.
+
+ .. _`RFC2616 Sec 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
+ """
+
+ try:
+ # Calculate the *signature base string* of the actual received request
+
+ norm_params = normalize_parameters(request.params)
+ bs_uri = base_string_uri(request.uri)
+ sig_base_str = signature_base_string(
+ request.http_method, bs_uri, norm_params)
+
+ # Obtain the signature that was received in the request
+
+ sig = binascii.a2b_base64(request.signature.encode('ascii'))
+
+ # Get the implementation of RSA-with-hash algorithm to use
+
+ alg = _get_jwt_rsa_algorithm(hash_algorithm_name)
+
+ # Verify the received signature was produced by the private key
+ # corresponding to the `rsa_public_key`, signing exact same
+ # *signature base string*.
+ #
+ # RSASSA-PKCS1-V1_5-VERIFY ((n, e), M, S)
+
+ key = _prepare_key_plus(alg, rsa_public_key)
+
+ # The signature base string only contain printable US-ASCII characters.
+ # The ``encode`` method with the default "strict" error handling will
+ # raise a ``UnicodeError`` if it can't encode the value. So using
+ # "ascii" will always work.
+
+ verify_ok = alg.verify(sig_base_str.encode('ascii'), key, sig)
+
+ if not verify_ok:
+ log.debug('Verify failed: RSA with ' + alg.hash_alg.name +
+ ': signature base string=%s' + sig_base_str)
+ return verify_ok
+
+ except UnicodeError:
+ # A properly encoded signature will only contain printable US-ASCII
+ # characters. The ``encode`` method with the default "strict" error
+ # handling will raise a ``UnicodeError`` if it can't decode the value.
+ # So using "ascii" will work with all valid signatures. But an
+ # incorrectly or maliciously produced signature could contain other
+ # bytes.
+ #
+ # This implementation treats that situation as equivalent to the
+ # signature verification having failed.
+ #
+ # Note: simply changing the encode to use 'utf-8' will not remove this
+ # case, since an incorrect or malicious request can contain bytes which
+ # are invalid as UTF-8.
+ return False
+
+
+# ==== RSA-SHA1 ==================================================
+
+def sign_rsa_sha1_with_client(sig_base_str, client):
+ # For some reason, this function originally accepts both str and bytes.
+ # This behaviour is preserved here. But won't be done for the newer
+ # sign_rsa_sha256_with_client and sign_rsa_sha512_with_client functions,
+ # which will only accept strings. The function to calculate a
+ # "signature base string" always produces a string, so it is not clear
+ # why support for bytes would ever be needed.
+ sig_base_str = sig_base_str.decode('ascii')\
+ if isinstance(sig_base_str, bytes) else sig_base_str
+
+ return _sign_rsa('SHA-1', sig_base_str, client.rsa_key)
+
+
+def verify_rsa_sha1(request, rsa_public_key: str):
+ return _verify_rsa('SHA-1', request, rsa_public_key)
+
+
+def sign_rsa_sha1(base_string, rsa_private_key):
+ """
+ Deprecated function for calculating a RSA-SHA1 signature.
+
+ This function has been replaced by invoking ``sign_rsa`` with "SHA-1"
+ as the hash algorithm name.
+
+ This function was invoked by sign_rsa_sha1_with_client and
+ test_signatures.py, but does any application invoke it directly? If not,
+ it can be removed.
+ """
+ warnings.warn('use _sign_rsa("SHA-1", ...) instead of sign_rsa_sha1',
+ DeprecationWarning)
+
+ if isinstance(base_string, bytes):
+ base_string = base_string.decode('ascii')
+
+ return _sign_rsa('SHA-1', base_string, rsa_private_key)
+
+
+# ==== RSA-SHA256 ================================================
+
+def sign_rsa_sha256_with_client(sig_base_str: str, client):
+ return _sign_rsa('SHA-256', sig_base_str, client.rsa_key)
+
+
+def verify_rsa_sha256(request, rsa_public_key: str):
+ return _verify_rsa('SHA-256', request, rsa_public_key)
+
+
+# ==== RSA-SHA512 ================================================
+
+def sign_rsa_sha512_with_client(sig_base_str: str, client):
+ return _sign_rsa('SHA-512', sig_base_str, client.rsa_key)
+
+
+def verify_rsa_sha512(request, rsa_public_key: str):
+ return _verify_rsa('SHA-512', request, rsa_public_key)
+
+
+# ==== PLAINTEXT =================================================
+
+def sign_plaintext_with_client(_signature_base_string, client):
+ # _signature_base_string is not used because the signature with PLAINTEXT
+ # is just the secret: it isn't a real signature.
+ return sign_plaintext(client.client_secret, client.resource_owner_secret)
+
+
+def sign_plaintext(client_secret, resource_owner_secret):
+ """Sign a request using plaintext.
+
+ Per `section 3.4.4`_ of the spec.
+
+ The "PLAINTEXT" method does not employ a signature algorithm. It
+ MUST be used with a transport-layer mechanism such as TLS or SSL (or
+ sent over a secure channel with equivalent protections). It does not
+ utilize the signature base string or the "oauth_timestamp" and
+ "oauth_nonce" parameters.
+
+ .. _`section 3.4.4`: https://tools.ietf.org/html/rfc5849#section-3.4.4
+
+ """
+
+ # The "oauth_signature" protocol parameter is set to the concatenated
+ # value of:
+
+ # 1. The client shared-secret, after being encoded (`Section 3.6`_).
+ #
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ signature = utils.escape(client_secret or '')
+
+ # 2. An "&" character (ASCII code 38), which MUST be included even
+ # when either secret is empty.
+ signature += '&'
+
+ # 3. The token shared-secret, after being encoded (`Section 3.6`_).
+ #
+ # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+ signature += utils.escape(resource_owner_secret or '')
+
+ return signature
+
+
+def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
+ """Verify a PLAINTEXT signature.
+
+ Per `section 3.4`_ of the spec.
+
+ .. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
+ """
+ signature = sign_plaintext(client_secret, resource_owner_secret)
+ match = safe_string_equals(signature, request.signature)
+ if not match:
+ log.debug('Verify PLAINTEXT failed')
+ return match
diff --git a/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/utils.py b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/utils.py
new file mode 100644
index 0000000000..8fb8302e30
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth1/rfc5849/utils.py
@@ -0,0 +1,83 @@
+"""
+oauthlib.utils
+~~~~~~~~~~~~~~
+
+This module contains utility methods used by various parts of the OAuth
+spec.
+"""
+import urllib.request as urllib2
+
+from oauthlib.common import quote, unquote
+
+UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ '0123456789')
+
+
+def filter_params(target):
+ """Decorator which filters params to remove non-oauth_* parameters
+
+ Assumes the decorated method takes a params dict or list of tuples as its
+ first argument.
+ """
+ def wrapper(params, *args, **kwargs):
+ params = filter_oauth_params(params)
+ return target(params, *args, **kwargs)
+
+ wrapper.__doc__ = target.__doc__
+ return wrapper
+
+
+def filter_oauth_params(params):
+ """Removes all non oauth parameters from a dict or a list of params."""
+ is_oauth = lambda kv: kv[0].startswith("oauth_")
+ if isinstance(params, dict):
+ return list(filter(is_oauth, list(params.items())))
+ else:
+ return list(filter(is_oauth, params))
+
+
+def escape(u):
+ """Escape a unicode string in an OAuth-compatible fashion.
+
+ Per `section 3.6`_ of the spec.
+
+ .. _`section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
+
+ """
+ if not isinstance(u, str):
+ raise ValueError('Only unicode objects are escapable. ' +
+ 'Got {!r} of type {}.'.format(u, type(u)))
+ # Letters, digits, and the characters '_.-' are already treated as safe
+ # by urllib.quote(). We need to add '~' to fully support rfc5849.
+ return quote(u, safe=b'~')
+
+
+def unescape(u):
+ if not isinstance(u, str):
+ raise ValueError('Only unicode objects are unescapable.')
+ return unquote(u)
+
+
+def parse_keqv_list(l):
+ """A unicode-safe version of urllib2.parse_keqv_list"""
+ # With Python 2.6, parse_http_list handles unicode fine
+ return urllib2.parse_keqv_list(l)
+
+
+def parse_http_list(u):
+ """A unicode-safe version of urllib2.parse_http_list"""
+ # With Python 2.6, parse_http_list handles unicode fine
+ return urllib2.parse_http_list(u)
+
+
+def parse_authorization_header(authorization_header):
+ """Parse an OAuth authorization header into a list of 2-tuples"""
+ auth_scheme = 'OAuth '.lower()
+ if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme):
+ items = parse_http_list(authorization_header[len(auth_scheme):])
+ try:
+ return list(parse_keqv_list(items).items())
+ except (IndexError, ValueError):
+ pass
+ raise ValueError('Malformed authorization header')
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/__init__.py b/contrib/python/oauthlib/oauthlib/oauth2/__init__.py
new file mode 100644
index 0000000000..deefb1af78
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/__init__.py
@@ -0,0 +1,36 @@
+"""
+oauthlib.oauth2
+~~~~~~~~~~~~~~
+
+This module is a wrapper for the most recent implementation of OAuth 2.0 Client
+and Server classes.
+"""
+from .rfc6749.clients import (
+ BackendApplicationClient, Client, LegacyApplicationClient,
+ MobileApplicationClient, ServiceApplicationClient, WebApplicationClient,
+)
+from .rfc6749.endpoints import (
+ AuthorizationEndpoint, BackendApplicationServer, IntrospectEndpoint,
+ LegacyApplicationServer, MetadataEndpoint, MobileApplicationServer,
+ ResourceEndpoint, RevocationEndpoint, Server, TokenEndpoint,
+ WebApplicationServer,
+)
+from .rfc6749.errors import (
+ AccessDeniedError, FatalClientError, InsecureTransportError,
+ InvalidClientError, InvalidClientIdError, InvalidGrantError,
+ InvalidRedirectURIError, InvalidRequestError, InvalidRequestFatalError,
+ InvalidScopeError, MismatchingRedirectURIError, MismatchingStateError,
+ MissingClientIdError, MissingCodeError, MissingRedirectURIError,
+ MissingResponseTypeError, MissingTokenError, MissingTokenTypeError,
+ OAuth2Error, ServerError, TemporarilyUnavailableError, TokenExpiredError,
+ UnauthorizedClientError, UnsupportedGrantTypeError,
+ UnsupportedResponseTypeError, UnsupportedTokenTypeError,
+)
+from .rfc6749.grant_types import (
+ AuthorizationCodeGrant, ClientCredentialsGrant, ImplicitGrant,
+ RefreshTokenGrant, ResourceOwnerPasswordCredentialsGrant,
+)
+from .rfc6749.request_validator import RequestValidator
+from .rfc6749.tokens import BearerToken, OAuth2Token
+from .rfc6749.utils import is_secure_transport
+from .rfc8628.clients import DeviceClient
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/__init__.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/__init__.py
new file mode 100644
index 0000000000..4b75a8a196
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/__init__.py
@@ -0,0 +1,16 @@
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+import functools
+import logging
+
+from .endpoints.base import BaseEndpoint, catch_errors_and_unavailability
+from .errors import (
+ FatalClientError, OAuth2Error, ServerError, TemporarilyUnavailableError,
+)
+
+log = logging.getLogger(__name__)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/__init__.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/__init__.py
new file mode 100644
index 0000000000..8fc6c955a2
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming OAuth 2.0 RFC6749.
+"""
+from .backend_application import BackendApplicationClient
+from .base import AUTH_HEADER, BODY, URI_QUERY, Client
+from .legacy_application import LegacyApplicationClient
+from .mobile_application import MobileApplicationClient
+from .service_application import ServiceApplicationClient
+from .web_application import WebApplicationClient
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/backend_application.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/backend_application.py
new file mode 100644
index 0000000000..e11e8fae38
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/backend_application.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+from ..parameters import prepare_token_request
+from .base import Client
+
+
+class BackendApplicationClient(Client):
+
+ """A public client utilizing the client credentials grant workflow.
+
+ The client can request an access token using only its client
+ credentials (or other supported means of authentication) when the
+ client is requesting access to the protected resources under its
+ control, or those of another resource owner which has been previously
+ arranged with the authorization server (the method of which is beyond
+ the scope of this specification).
+
+ The client credentials grant type MUST only be used by confidential
+ clients.
+
+ Since the client authentication is used as the authorization grant,
+ no additional authorization request is needed.
+ """
+
+ grant_type = 'client_credentials'
+
+ def prepare_request_body(self, body='', scope=None,
+ include_client_id=False, **kwargs):
+ """Add the client credentials to the request body.
+
+ The client makes a request to the token endpoint by adding the
+ following parameters using the "application/x-www-form-urlencoded"
+ format per `Appendix B`_ in the HTTP request entity-body:
+
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+ :param scope: The scope of the access request as described by
+ `Section 3.3`_.
+
+ :param include_client_id: `True` to send the `client_id` in the
+ body of the upstream request. This is required
+ if the client is not authenticating with the
+ authorization server as described in
+ `Section 3.2.1`_. False otherwise (default).
+ :type include_client_id: Boolean
+
+ :param kwargs: Extra credentials to include in the token request.
+
+ The client MUST authenticate with the authorization server as
+ described in `Section 3.2.1`_.
+
+ The prepared body will include all provided credentials as well as
+ the ``grant_type`` parameter set to ``client_credentials``::
+
+ >>> from oauthlib.oauth2 import BackendApplicationClient
+ >>> client = BackendApplicationClient('your_id')
+ >>> client.prepare_request_body(scope=['hello', 'world'])
+ 'grant_type=client_credentials&scope=hello+world'
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
+ """
+ kwargs['client_id'] = self.client_id
+ kwargs['include_client_id'] = include_client_id
+ scope = self.scope if scope is None else scope
+ return prepare_token_request(self.grant_type, body=body,
+ scope=scope, **kwargs)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/base.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/base.py
new file mode 100644
index 0000000000..d5eb0cc15f
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/base.py
@@ -0,0 +1,604 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming OAuth 2.0 RFC6749.
+"""
+import base64
+import hashlib
+import re
+import secrets
+import time
+import warnings
+
+from oauthlib.common import generate_token
+from oauthlib.oauth2.rfc6749 import tokens
+from oauthlib.oauth2.rfc6749.errors import (
+ InsecureTransportError, TokenExpiredError,
+)
+from oauthlib.oauth2.rfc6749.parameters import (
+ parse_token_response, prepare_token_request,
+ prepare_token_revocation_request,
+)
+from oauthlib.oauth2.rfc6749.utils import is_secure_transport
+
+AUTH_HEADER = 'auth_header'
+URI_QUERY = 'query'
+BODY = 'body'
+
+FORM_ENC_HEADERS = {
+ 'Content-Type': 'application/x-www-form-urlencoded'
+}
+
+
+class Client:
+ """Base OAuth2 client responsible for access token management.
+
+ This class also acts as a generic interface providing methods common to all
+ client types such as ``prepare_authorization_request`` and
+ ``prepare_token_revocation_request``. The ``prepare_x_request`` methods are
+ the recommended way of interacting with clients (as opposed to the abstract
+ prepare uri/body/etc methods). They are recommended over the older set
+ because they are easier to use (more consistent) and add a few additional
+ security checks, such as HTTPS and state checking.
+
+ Some of these methods require further implementation only provided by the
+ specific purpose clients such as
+ :py:class:`oauthlib.oauth2.MobileApplicationClient` and thus you should always
+ seek to use the client class matching the OAuth workflow you need. For
+ Python, this is usually :py:class:`oauthlib.oauth2.WebApplicationClient`.
+
+ """
+ refresh_token_key = 'refresh_token'
+
+ def __init__(self, client_id,
+ default_token_placement=AUTH_HEADER,
+ token_type='Bearer',
+ access_token=None,
+ refresh_token=None,
+ mac_key=None,
+ mac_algorithm=None,
+ token=None,
+ scope=None,
+ state=None,
+ redirect_url=None,
+ state_generator=generate_token,
+ code_verifier=None,
+ code_challenge=None,
+ code_challenge_method=None,
+ **kwargs):
+ """Initialize a client with commonly used attributes.
+
+ :param client_id: Client identifier given by the OAuth provider upon
+ registration.
+
+ :param default_token_placement: Tokens can be supplied in the Authorization
+ header (default), the URL query component (``query``) or the request
+ body (``body``).
+
+ :param token_type: OAuth 2 token type. Defaults to Bearer. Change this
+ if you specify the ``access_token`` parameter and know it is of a
+ different token type, such as a MAC, JWT or SAML token. Can
+ also be supplied as ``token_type`` inside the ``token`` dict parameter.
+
+ :param access_token: An access token (string) used to authenticate
+ requests to protected resources. Can also be supplied inside the
+ ``token`` dict parameter.
+
+ :param refresh_token: A refresh token (string) used to refresh expired
+ tokens. Can also be supplied inside the ``token`` dict parameter.
+
+ :param mac_key: Encryption key used with MAC tokens.
+
+ :param mac_algorithm: Hashing algorithm for MAC tokens.
+
+ :param token: A dict of token attributes such as ``access_token``,
+ ``token_type`` and ``expires_at``.
+
+ :param scope: A list of default scopes to request authorization for.
+
+ :param state: A CSRF protection string used during authorization.
+
+ :param redirect_url: The redirection endpoint on the client side to which
+ the user returns after authorization.
+
+ :param state_generator: A no argument state generation callable. Defaults
+ to :py:meth:`oauthlib.common.generate_token`.
+
+ :param code_verifier: PKCE parameter. A cryptographically random string that is used to correlate the
+ authorization request to the token request.
+
+ :param code_challenge: PKCE parameter. A challenge derived from the code verifier that is sent in the
+ authorization request, to be verified against later.
+
+ :param code_challenge_method: PKCE parameter. A method that was used to derive code challenge.
+ Defaults to "plain" if not present in the request.
+ """
+
+ self.client_id = client_id
+ self.default_token_placement = default_token_placement
+ self.token_type = token_type
+ self.access_token = access_token
+ self.refresh_token = refresh_token
+ self.mac_key = mac_key
+ self.mac_algorithm = mac_algorithm
+ self.token = token or {}
+ self.scope = scope
+ self.state_generator = state_generator
+ self.state = state
+ self.redirect_url = redirect_url
+ self.code_verifier = code_verifier
+ self.code_challenge = code_challenge
+ self.code_challenge_method = code_challenge_method
+ self.code = None
+ self.expires_in = None
+ self._expires_at = None
+ self.populate_token_attributes(self.token)
+
+ @property
+ def token_types(self):
+ """Supported token types and their respective methods
+
+ Additional tokens can be supported by extending this dictionary.
+
+ The Bearer token spec is stable and safe to use.
+
+ The MAC token spec is not yet stable and support for MAC tokens
+ is experimental and currently matching version 00 of the spec.
+ """
+ return {
+ 'Bearer': self._add_bearer_token,
+ 'MAC': self._add_mac_token
+ }
+
+ def prepare_request_uri(self, *args, **kwargs):
+ """Abstract method used to create request URIs."""
+ raise NotImplementedError("Must be implemented by inheriting classes.")
+
+ def prepare_request_body(self, *args, **kwargs):
+ """Abstract method used to create request bodies."""
+ raise NotImplementedError("Must be implemented by inheriting classes.")
+
+ def parse_request_uri_response(self, *args, **kwargs):
+ """Abstract method used to parse redirection responses."""
+ raise NotImplementedError("Must be implemented by inheriting classes.")
+
+ def add_token(self, uri, http_method='GET', body=None, headers=None,
+ token_placement=None, **kwargs):
+ """Add token to the request uri, body or authorization header.
+
+ The access token type provides the client with the information
+ required to successfully utilize the access token to make a protected
+ resource request (along with type-specific attributes). The client
+ MUST NOT use an access token if it does not understand the token
+ type.
+
+ For example, the "bearer" token type defined in
+ [`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
+ token string in the request:
+
+ .. code-block:: http
+
+ GET /resource/1 HTTP/1.1
+ Host: example.com
+ Authorization: Bearer mF_9.B5f-4.1JqM
+
+ while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
+ utilized by issuing a MAC key together with the access token which is
+ used to sign certain components of the HTTP requests:
+
+ .. code-block:: http
+
+ GET /resource/1 HTTP/1.1
+ Host: example.com
+ Authorization: MAC id="h480djs93hd8",
+ nonce="274312:dj83hs9s",
+ mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
+
+ .. _`I-D.ietf-oauth-v2-bearer`: https://tools.ietf.org/html/rfc6749#section-12.2
+ .. _`I-D.ietf-oauth-v2-http-mac`: https://tools.ietf.org/html/rfc6749#section-12.2
+ """
+ if not is_secure_transport(uri):
+ raise InsecureTransportError()
+
+ token_placement = token_placement or self.default_token_placement
+
+ case_insensitive_token_types = {
+ k.lower(): v for k, v in self.token_types.items()}
+ if not self.token_type.lower() in case_insensitive_token_types:
+ raise ValueError("Unsupported token type: %s" % self.token_type)
+
+ if not (self.access_token or self.token.get('access_token')):
+ raise ValueError("Missing access token.")
+
+ if self._expires_at and self._expires_at < time.time():
+ raise TokenExpiredError()
+
+ return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
+ headers, token_placement, **kwargs)
+
+ def prepare_authorization_request(self, authorization_url, state=None,
+ redirect_url=None, scope=None, **kwargs):
+ """Prepare the authorization request.
+
+ This is the first step in many OAuth flows in which the user is
+ redirected to a certain authorization URL. This method adds
+ required parameters to the authorization URL.
+
+ :param authorization_url: Provider authorization endpoint URL.
+ :param state: CSRF protection string. Will be automatically created if
+ not provided. The generated state is available via the ``state``
+ attribute. Clients should verify that the state is unchanged and
+ present in the authorization response. This verification is done
+ automatically if using the ``authorization_response`` parameter
+ with ``prepare_token_request``.
+ :param redirect_url: Redirect URL to which the user will be returned
+ after authorization. Must be provided unless previously setup with
+ the provider. If provided then it must also be provided in the
+ token request.
+ :param scope: List of scopes to request. Must be equal to
+ or a subset of the scopes granted when obtaining the refresh
+ token. If none is provided, the ones provided in the constructor are
+ used.
+ :param kwargs: Additional parameters to included in the request.
+ :returns: The prepared request tuple with (url, headers, body).
+ """
+ if not is_secure_transport(authorization_url):
+ raise InsecureTransportError()
+
+ self.state = state or self.state_generator()
+ self.redirect_url = redirect_url or self.redirect_url
+ # do not assign scope to self automatically anymore
+ scope = self.scope if scope is None else scope
+ auth_url = self.prepare_request_uri(
+ authorization_url, redirect_uri=self.redirect_url,
+ scope=scope, state=self.state, **kwargs)
+ return auth_url, FORM_ENC_HEADERS, ''
+
+ def prepare_token_request(self, token_url, authorization_response=None,
+ redirect_url=None, state=None, body='', **kwargs):
+ """Prepare a token creation request.
+
+ Note that these requests usually require client authentication, either
+ by including client_id or a set of provider specific authentication
+ credentials.
+
+ :param token_url: Provider token creation endpoint URL.
+ :param authorization_response: The full redirection URL string, i.e.
+ the location to which the user was redirected after successful
+ authorization. Used to mine credentials needed to obtain a token
+ in this step, such as authorization code.
+ :param redirect_url: The redirect_url supplied with the authorization
+ request (if there was one).
+ :param state:
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+ :param kwargs: Additional parameters to included in the request.
+ :returns: The prepared request tuple with (url, headers, body).
+ """
+ if not is_secure_transport(token_url):
+ raise InsecureTransportError()
+
+ state = state or self.state
+ if authorization_response:
+ self.parse_request_uri_response(
+ authorization_response, state=state)
+ self.redirect_url = redirect_url or self.redirect_url
+ body = self.prepare_request_body(body=body,
+ redirect_uri=self.redirect_url, **kwargs)
+
+ return token_url, FORM_ENC_HEADERS, body
+
+ def prepare_refresh_token_request(self, token_url, refresh_token=None,
+ body='', scope=None, **kwargs):
+ """Prepare an access token refresh request.
+
+ Expired access tokens can be replaced by new access tokens without
+ going through the OAuth dance if the client obtained a refresh token.
+ This refresh token and authentication credentials can be used to
+ obtain a new access token, and possibly a new refresh token.
+
+ :param token_url: Provider token refresh endpoint URL.
+ :param refresh_token: Refresh token string.
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+ :param scope: List of scopes to request. Must be equal to
+ or a subset of the scopes granted when obtaining the refresh
+ token. If none is provided, the ones provided in the constructor are
+ used.
+ :param kwargs: Additional parameters to included in the request.
+ :returns: The prepared request tuple with (url, headers, body).
+ """
+ if not is_secure_transport(token_url):
+ raise InsecureTransportError()
+
+ # do not assign scope to self automatically anymore
+ scope = self.scope if scope is None else scope
+ body = self.prepare_refresh_body(body=body,
+ refresh_token=refresh_token, scope=scope, **kwargs)
+ return token_url, FORM_ENC_HEADERS, body
+
+ def prepare_token_revocation_request(self, revocation_url, token,
+ token_type_hint="access_token", body='', callback=None, **kwargs):
+ """Prepare a token revocation request.
+
+ :param revocation_url: Provider token revocation endpoint URL.
+ :param token: The access or refresh token to be revoked (string).
+ :param token_type_hint: ``"access_token"`` (default) or
+ ``"refresh_token"``. This is optional and if you wish to not pass it you
+ must provide ``token_type_hint=None``.
+ :param body:
+ :param callback: A jsonp callback such as ``package.callback`` to be invoked
+ upon receiving the response. Not that it should not include a () suffix.
+ :param kwargs: Additional parameters to included in the request.
+ :returns: The prepared request tuple with (url, headers, body).
+
+ Note that JSONP request may use GET requests as the parameters will
+ be added to the request URL query as opposed to the request body.
+
+ An example of a revocation request
+
+ .. code-block:: http
+
+ POST /revoke HTTP/1.1
+ Host: server.example.com
+ Content-Type: application/x-www-form-urlencoded
+ Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
+
+ token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
+
+ An example of a jsonp revocation request
+
+ .. code-block:: http
+
+ GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
+ Host: server.example.com
+ Content-Type: application/x-www-form-urlencoded
+ Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
+
+ and an error response
+
+ .. code-block:: javascript
+
+ package.myCallback({"error":"unsupported_token_type"});
+
+ Note that these requests usually require client credentials, client_id in
+ the case for public clients and provider specific authentication
+ credentials for confidential clients.
+ """
+ if not is_secure_transport(revocation_url):
+ raise InsecureTransportError()
+
+ return prepare_token_revocation_request(revocation_url, token,
+ token_type_hint=token_type_hint, body=body, callback=callback,
+ **kwargs)
+
+ def parse_request_body_response(self, body, scope=None, **kwargs):
+ """Parse the JSON response body.
+
+ If the access token request is valid and authorized, the
+ authorization server issues an access token as described in
+ `Section 5.1`_. A refresh token SHOULD NOT be included. If the request
+ failed client authentication or is invalid, the authorization server
+ returns an error response as described in `Section 5.2`_.
+
+ :param body: The response body from the token request.
+ :param scope: Scopes originally requested. If none is provided, the ones
+ provided in the constructor are used.
+ :return: Dictionary of token parameters.
+ :raises: Warning if scope has changed. :py:class:`oauthlib.oauth2.errors.OAuth2Error`
+ if response is invalid.
+
+ These response are json encoded and could easily be parsed without
+ the assistance of OAuthLib. However, there are a few subtle issues
+ to be aware of regarding the response which are helpfully addressed
+ through the raising of various errors.
+
+ A successful response should always contain
+
+ **access_token**
+ The access token issued by the authorization server. Often
+ a random string.
+
+ **token_type**
+ The type of the token issued as described in `Section 7.1`_.
+ Commonly ``Bearer``.
+
+ While it is not mandated it is recommended that the provider include
+
+ **expires_in**
+ The lifetime in seconds of the access token. For
+ example, the value "3600" denotes that the access token will
+ expire in one hour from the time the response was generated.
+ If omitted, the authorization server SHOULD provide the
+ expiration time via other means or document the default value.
+
+ **scope**
+ Providers may supply this in all responses but are required to only
+ if it has changed since the authorization request.
+
+ .. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
+ .. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
+ .. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
+ """
+ scope = self.scope if scope is None else scope
+ self.token = parse_token_response(body, scope=scope)
+ self.populate_token_attributes(self.token)
+ return self.token
+
+ def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
+ """Prepare an access token request, using a refresh token.
+
+ If the authorization server issued a refresh token to the client, the
+ client makes a refresh request to the token endpoint by adding the
+ following parameters using the `application/x-www-form-urlencoded`
+ format in the HTTP request entity-body:
+
+ :param refresh_token: REQUIRED. The refresh token issued to the client.
+ :param scope: OPTIONAL. The scope of the access request as described by
+ Section 3.3. The requested scope MUST NOT include any scope
+ not originally granted by the resource owner, and if omitted is
+ treated as equal to the scope originally granted by the
+ resource owner. Note that if none is provided, the ones provided
+ in the constructor are used if any.
+ """
+ refresh_token = refresh_token or self.refresh_token
+ scope = self.scope if scope is None else scope
+ return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
+ refresh_token=refresh_token, **kwargs)
+
+ def _add_bearer_token(self, uri, http_method='GET', body=None,
+ headers=None, token_placement=None):
+ """Add a bearer token to the request uri, body or authorization header."""
+ if token_placement == AUTH_HEADER:
+ headers = tokens.prepare_bearer_headers(self.access_token, headers)
+
+ elif token_placement == URI_QUERY:
+ uri = tokens.prepare_bearer_uri(self.access_token, uri)
+
+ elif token_placement == BODY:
+ body = tokens.prepare_bearer_body(self.access_token, body)
+
+ else:
+ raise ValueError("Invalid token placement.")
+ return uri, headers, body
+
+ def create_code_verifier(self, length):
+ """Create PKCE **code_verifier** used in computing **code_challenge**.
+ See `RFC7636 Section 4.1`_
+
+ :param length: REQUIRED. The length of the code_verifier.
+
+ The client first creates a code verifier, "code_verifier", for each
+ OAuth 2.0 [RFC6749] Authorization Request, in the following manner:
+
+ .. code-block:: text
+
+ code_verifier = high-entropy cryptographic random STRING using the
+ unreserved characters [A-Z] / [a-z] / [0-9] / "-" / "." / "_" / "~"
+ from Section 2.3 of [RFC3986], with a minimum length of 43 characters
+ and a maximum length of 128 characters.
+
+ .. _`RFC7636 Section 4.1`: https://tools.ietf.org/html/rfc7636#section-4.1
+ """
+ code_verifier = None
+
+ if not length >= 43:
+ raise ValueError("Length must be greater than or equal to 43")
+
+ if not length <= 128:
+ raise ValueError("Length must be less than or equal to 128")
+
+ allowed_characters = re.compile('^[A-Zaa-z0-9-._~]')
+ code_verifier = secrets.token_urlsafe(length)
+
+ if not re.search(allowed_characters, code_verifier):
+ raise ValueError("code_verifier contains invalid characters")
+
+ self.code_verifier = code_verifier
+
+ return code_verifier
+
+ def create_code_challenge(self, code_verifier, code_challenge_method=None):
+ """Create PKCE **code_challenge** derived from the **code_verifier**.
+ See `RFC7636 Section 4.2`_
+
+ :param code_verifier: REQUIRED. The **code_verifier** generated from `create_code_verifier()`.
+ :param code_challenge_method: OPTIONAL. The method used to derive the **code_challenge**. Acceptable values include `S256`. DEFAULT is `plain`.
+
+ The client then creates a code challenge derived from the code
+ verifier by using one of the following transformations on the code
+ verifier::
+
+ plain
+ code_challenge = code_verifier
+ S256
+ code_challenge = BASE64URL-ENCODE(SHA256(ASCII(code_verifier)))
+
+ If the client is capable of using `S256`, it MUST use `S256`, as
+ `S256` is Mandatory To Implement (MTI) on the server. Clients are
+ permitted to use `plain` only if they cannot support `S256` for some
+ technical reason and know via out-of-band configuration that the
+ server supports `plain`.
+
+ The plain transformation is for compatibility with existing
+ deployments and for constrained environments that can't use the S256 transformation.
+
+ .. _`RFC7636 Section 4.2`: https://tools.ietf.org/html/rfc7636#section-4.2
+ """
+ code_challenge = None
+
+ if code_verifier == None:
+ raise ValueError("Invalid code_verifier")
+
+ if code_challenge_method == None:
+ code_challenge_method = "plain"
+ self.code_challenge_method = code_challenge_method
+ code_challenge = code_verifier
+ self.code_challenge = code_challenge
+
+ if code_challenge_method == "S256":
+ h = hashlib.sha256()
+ h.update(code_verifier.encode(encoding='ascii'))
+ sha256_val = h.digest()
+ code_challenge = bytes.decode(base64.urlsafe_b64encode(sha256_val))
+ # replace '+' with '-', '/' with '_', and remove trailing '='
+ code_challenge = code_challenge.replace("+", "-").replace("/", "_").replace("=", "")
+ self.code_challenge = code_challenge
+
+ return code_challenge
+
+ def _add_mac_token(self, uri, http_method='GET', body=None,
+ headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
+ """Add a MAC token to the request authorization header.
+
+ Warning: MAC token support is experimental as the spec is not yet stable.
+ """
+ if token_placement != AUTH_HEADER:
+ raise ValueError("Invalid token placement.")
+
+ headers = tokens.prepare_mac_header(self.access_token, uri,
+ self.mac_key, http_method, headers=headers, body=body, ext=ext,
+ hash_algorithm=self.mac_algorithm, **kwargs)
+ return uri, headers, body
+
+ def _populate_attributes(self, response):
+ warnings.warn("Please switch to the public method "
+ "populate_token_attributes.", DeprecationWarning)
+ return self.populate_token_attributes(response)
+
+ def populate_code_attributes(self, response):
+ """Add attributes from an auth code response to self."""
+
+ if 'code' in response:
+ self.code = response.get('code')
+
+ def populate_token_attributes(self, response):
+ """Add attributes from a token exchange response to self."""
+
+ if 'access_token' in response:
+ self.access_token = response.get('access_token')
+
+ if 'refresh_token' in response:
+ self.refresh_token = response.get('refresh_token')
+
+ if 'token_type' in response:
+ self.token_type = response.get('token_type')
+
+ if 'expires_in' in response:
+ self.expires_in = response.get('expires_in')
+ self._expires_at = time.time() + int(self.expires_in)
+
+ if 'expires_at' in response:
+ try:
+ self._expires_at = int(response.get('expires_at'))
+ except:
+ self._expires_at = None
+
+ if 'mac_key' in response:
+ self.mac_key = response.get('mac_key')
+
+ if 'mac_algorithm' in response:
+ self.mac_algorithm = response.get('mac_algorithm')
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/legacy_application.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/legacy_application.py
new file mode 100644
index 0000000000..9920981d2c
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/legacy_application.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+from ..parameters import prepare_token_request
+from .base import Client
+
+
+class LegacyApplicationClient(Client):
+
+ """A public client using the resource owner password and username directly.
+
+ The resource owner password credentials grant type is suitable in
+ cases where the resource owner has a trust relationship with the
+ client, such as the device operating system or a highly privileged
+ application. The authorization server should take special care when
+ enabling this grant type, and only allow it when other flows are not
+ viable.
+
+ The grant type is suitable for clients capable of obtaining the
+ resource owner's credentials (username and password, typically using
+ an interactive form). It is also used to migrate existing clients
+ using direct authentication schemes such as HTTP Basic or Digest
+ authentication to OAuth by converting the stored credentials to an
+ access token.
+
+ The method through which the client obtains the resource owner
+ credentials is beyond the scope of this specification. The client
+ MUST discard the credentials once an access token has been obtained.
+ """
+
+ grant_type = 'password'
+
+ def __init__(self, client_id, **kwargs):
+ super().__init__(client_id, **kwargs)
+
+ def prepare_request_body(self, username, password, body='', scope=None,
+ include_client_id=False, **kwargs):
+ """Add the resource owner password and username to the request body.
+
+ The client makes a request to the token endpoint by adding the
+ following parameters using the "application/x-www-form-urlencoded"
+ format per `Appendix B`_ in the HTTP request entity-body:
+
+ :param username: The resource owner username.
+ :param password: The resource owner password.
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+ :param scope: The scope of the access request as described by
+ `Section 3.3`_.
+ :param include_client_id: `True` to send the `client_id` in the
+ body of the upstream request. This is required
+ if the client is not authenticating with the
+ authorization server as described in
+ `Section 3.2.1`_. False otherwise (default).
+ :type include_client_id: Boolean
+ :param kwargs: Extra credentials to include in the token request.
+
+ If the client type is confidential or the client was issued client
+ credentials (or assigned other authentication requirements), the
+ client MUST authenticate with the authorization server as described
+ in `Section 3.2.1`_.
+
+ The prepared body will include all provided credentials as well as
+ the ``grant_type`` parameter set to ``password``::
+
+ >>> from oauthlib.oauth2 import LegacyApplicationClient
+ >>> client = LegacyApplicationClient('your_id')
+ >>> client.prepare_request_body(username='foo', password='bar', scope=['hello', 'world'])
+ 'grant_type=password&username=foo&scope=hello+world&password=bar'
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
+ """
+ kwargs['client_id'] = self.client_id
+ kwargs['include_client_id'] = include_client_id
+ scope = self.scope if scope is None else scope
+ return prepare_token_request(self.grant_type, body=body, username=username,
+ password=password, scope=scope, **kwargs)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/mobile_application.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/mobile_application.py
new file mode 100644
index 0000000000..b10b41ced3
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/mobile_application.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+from ..parameters import parse_implicit_response, prepare_grant_uri
+from .base import Client
+
+
+class MobileApplicationClient(Client):
+
+ """A public client utilizing the implicit code grant workflow.
+
+ A user-agent-based application is a public client in which the
+ client code is downloaded from a web server and executes within a
+ user-agent (e.g. web browser) on the device used by the resource
+ owner. Protocol data and credentials are easily accessible (and
+ often visible) to the resource owner. Since such applications
+ reside within the user-agent, they can make seamless use of the
+ user-agent capabilities when requesting authorization.
+
+ The implicit grant type is used to obtain access tokens (it does not
+ support the issuance of refresh tokens) and is optimized for public
+ clients known to operate a particular redirection URI. These clients
+ are typically implemented in a browser using a scripting language
+ such as JavaScript.
+
+ As a redirection-based flow, the client must be capable of
+ interacting with the resource owner's user-agent (typically a web
+ browser) and capable of receiving incoming requests (via redirection)
+ from the authorization server.
+
+ Unlike the authorization code grant type in which the client makes
+ separate requests for authorization and access token, the client
+ receives the access token as the result of the authorization request.
+
+ The implicit grant type does not include client authentication, and
+ relies on the presence of the resource owner and the registration of
+ the redirection URI. Because the access token is encoded into the
+ redirection URI, it may be exposed to the resource owner and other
+ applications residing on the same device.
+ """
+
+ response_type = 'token'
+
+ def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
+ state=None, **kwargs):
+ """Prepare the implicit grant request URI.
+
+ The client constructs the request URI by adding the following
+ parameters to the query component of the authorization endpoint URI
+ using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
+
+ :param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
+ and it should have been registered with the OAuth
+ provider prior to use. As described in `Section 3.1.2`_.
+
+ :param scope: OPTIONAL. The scope of the access request as described by
+ Section 3.3`_. These may be any string but are commonly
+ URIs or various categories such as ``videos`` or ``documents``.
+
+ :param state: RECOMMENDED. An opaque value used by the client to maintain
+ state between the request and callback. The authorization
+ server includes this value when redirecting the user-agent back
+ to the client. The parameter SHOULD be used for preventing
+ cross-site request forgery as described in `Section 10.12`_.
+
+ :param kwargs: Extra arguments to include in the request URI.
+
+ In addition to supplied parameters, OAuthLib will append the ``client_id``
+ that was provided in the constructor as well as the mandatory ``response_type``
+ argument, set to ``token``::
+
+ >>> from oauthlib.oauth2 import MobileApplicationClient
+ >>> client = MobileApplicationClient('your_id')
+ >>> client.prepare_request_uri('https://example.com')
+ 'https://example.com?client_id=your_id&response_type=token'
+ >>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
+ 'https://example.com?client_id=your_id&response_type=token&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
+ >>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
+ 'https://example.com?client_id=your_id&response_type=token&scope=profile+pictures'
+ >>> client.prepare_request_uri('https://example.com', foo='bar')
+ 'https://example.com?client_id=your_id&response_type=token&foo=bar'
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ .. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
+ .. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
+ """
+ scope = self.scope if scope is None else scope
+ return prepare_grant_uri(uri, self.client_id, self.response_type,
+ redirect_uri=redirect_uri, state=state, scope=scope, **kwargs)
+
+ def parse_request_uri_response(self, uri, state=None, scope=None):
+ """Parse the response URI fragment.
+
+ If the resource owner grants the access request, the authorization
+ server issues an access token and delivers it to the client by adding
+ the following parameters to the fragment component of the redirection
+ URI using the "application/x-www-form-urlencoded" format:
+
+ :param uri: The callback URI that resulted from the user being redirected
+ back from the provider to you, the client.
+ :param state: The state provided in the authorization request.
+ :param scope: The scopes provided in the authorization request.
+ :return: Dictionary of token parameters.
+ :raises: OAuth2Error if response is invalid.
+
+ A successful response should always contain
+
+ **access_token**
+ The access token issued by the authorization server. Often
+ a random string.
+
+ **token_type**
+ The type of the token issued as described in `Section 7.1`_.
+ Commonly ``Bearer``.
+
+ **state**
+ If you provided the state parameter in the authorization phase, then
+ the provider is required to include that exact state value in the
+ response.
+
+ While it is not mandated it is recommended that the provider include
+
+ **expires_in**
+ The lifetime in seconds of the access token. For
+ example, the value "3600" denotes that the access token will
+ expire in one hour from the time the response was generated.
+ If omitted, the authorization server SHOULD provide the
+ expiration time via other means or document the default value.
+
+ **scope**
+ Providers may supply this in all responses but are required to only
+ if it has changed since the authorization request.
+
+ A few example responses can be seen below::
+
+ >>> response_uri = 'https://example.com/callback#access_token=sdlfkj452&state=ss345asyht&token_type=Bearer&scope=hello+world'
+ >>> from oauthlib.oauth2 import MobileApplicationClient
+ >>> client = MobileApplicationClient('your_id')
+ >>> client.parse_request_uri_response(response_uri)
+ {
+ 'access_token': 'sdlfkj452',
+ 'token_type': 'Bearer',
+ 'state': 'ss345asyht',
+ 'scope': [u'hello', u'world']
+ }
+ >>> client.parse_request_uri_response(response_uri, state='other')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ File "oauthlib/oauth2/rfc6749/__init__.py", line 598, in parse_request_uri_response
+ **scope**
+ File "oauthlib/oauth2/rfc6749/parameters.py", line 197, in parse_implicit_response
+ raise ValueError("Mismatching or missing state in params.")
+ ValueError: Mismatching or missing state in params.
+ >>> def alert_scope_changed(message, old, new):
+ ... print(message, old, new)
+ ...
+ >>> oauthlib.signals.scope_changed.connect(alert_scope_changed)
+ >>> client.parse_request_body_response(response_body, scope=['other'])
+ ('Scope has changed from "other" to "hello world".', ['other'], ['hello', 'world'])
+
+ .. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ """
+ scope = self.scope if scope is None else scope
+ self.token = parse_implicit_response(uri, state=state, scope=scope)
+ self.populate_token_attributes(self.token)
+ return self.token
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/service_application.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/service_application.py
new file mode 100644
index 0000000000..8fb173776d
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/service_application.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+import time
+
+from oauthlib.common import to_unicode
+
+from ..parameters import prepare_token_request
+from .base import Client
+
+
+class ServiceApplicationClient(Client):
+ """A public client utilizing the JWT bearer grant.
+
+ JWT bearer tokes can be used to request an access token when a client
+ wishes to utilize an existing trust relationship, expressed through the
+ semantics of (and digital signature or keyed message digest calculated
+ over) the JWT, without a direct user approval step at the authorization
+ server.
+
+ This grant type does not involve an authorization step. It may be
+ used by both public and confidential clients.
+ """
+
+ grant_type = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
+
+ def __init__(self, client_id, private_key=None, subject=None, issuer=None,
+ audience=None, **kwargs):
+ """Initialize a JWT client with defaults for implicit use later.
+
+ :param client_id: Client identifier given by the OAuth provider upon
+ registration.
+
+ :param private_key: Private key used for signing and encrypting.
+ Must be given as a string.
+
+ :param subject: The principal that is the subject of the JWT, i.e.
+ which user is the token requested on behalf of.
+ For example, ``foo@example.com.
+
+ :param issuer: The JWT MUST contain an "iss" (issuer) claim that
+ contains a unique identifier for the entity that issued
+ the JWT. For example, ``your-client@provider.com``.
+
+ :param audience: A value identifying the authorization server as an
+ intended audience, e.g.
+ ``https://provider.com/oauth2/token``.
+
+ :param kwargs: Additional arguments to pass to base client, such as
+ state and token. See ``Client.__init__.__doc__`` for
+ details.
+ """
+ super().__init__(client_id, **kwargs)
+ self.private_key = private_key
+ self.subject = subject
+ self.issuer = issuer
+ self.audience = audience
+
+ def prepare_request_body(self,
+ private_key=None,
+ subject=None,
+ issuer=None,
+ audience=None,
+ expires_at=None,
+ issued_at=None,
+ extra_claims=None,
+ body='',
+ scope=None,
+ include_client_id=False,
+ **kwargs):
+ """Create and add a JWT assertion to the request body.
+
+ :param private_key: Private key used for signing and encrypting.
+ Must be given as a string.
+
+ :param subject: (sub) The principal that is the subject of the JWT,
+ i.e. which user is the token requested on behalf of.
+ For example, ``foo@example.com.
+
+ :param issuer: (iss) The JWT MUST contain an "iss" (issuer) claim that
+ contains a unique identifier for the entity that issued
+ the JWT. For example, ``your-client@provider.com``.
+
+ :param audience: (aud) A value identifying the authorization server as an
+ intended audience, e.g.
+ ``https://provider.com/oauth2/token``.
+
+ :param expires_at: A unix expiration timestamp for the JWT. Defaults
+ to an hour from now, i.e. ``time.time() + 3600``.
+
+ :param issued_at: A unix timestamp of when the JWT was created.
+ Defaults to now, i.e. ``time.time()``.
+
+ :param extra_claims: A dict of additional claims to include in the JWT.
+
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+
+ :param scope: The scope of the access request.
+
+ :param include_client_id: `True` to send the `client_id` in the
+ body of the upstream request. This is required
+ if the client is not authenticating with the
+ authorization server as described in
+ `Section 3.2.1`_. False otherwise (default).
+ :type include_client_id: Boolean
+
+ :param not_before: A unix timestamp after which the JWT may be used.
+ Not included unless provided. *
+
+ :param jwt_id: A unique JWT token identifier. Not included unless
+ provided. *
+
+ :param kwargs: Extra credentials to include in the token request.
+
+ Parameters marked with a `*` above are not explicit arguments in the
+ function signature, but are specially documented arguments for items
+ appearing in the generic `**kwargs` keyworded input.
+
+ The "scope" parameter may be used, as defined in the Assertion
+ Framework for OAuth 2.0 Client Authentication and Authorization Grants
+ [I-D.ietf-oauth-assertions] specification, to indicate the requested
+ scope.
+
+ Authentication of the client is optional, as described in
+ `Section 3.2.1`_ of OAuth 2.0 [RFC6749] and consequently, the
+ "client_id" is only needed when a form of client authentication that
+ relies on the parameter is used.
+
+ The following non-normative example demonstrates an Access Token
+ Request with a JWT as an authorization grant (with extra line breaks
+ for display purposes only):
+
+ .. code-block: http
+
+ POST /token.oauth2 HTTP/1.1
+ Host: as.example.com
+ Content-Type: application/x-www-form-urlencoded
+
+ grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer
+ &assertion=eyJhbGciOiJFUzI1NiJ9.
+ eyJpc3Mi[...omitted for brevity...].
+ J9l-ZhwP[...omitted for brevity...]
+
+ .. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
+ """
+ import jwt
+
+ key = private_key or self.private_key
+ if not key:
+ raise ValueError('An encryption key must be supplied to make JWT'
+ ' token requests.')
+ claim = {
+ 'iss': issuer or self.issuer,
+ 'aud': audience or self.audience,
+ 'sub': subject or self.subject,
+ 'exp': int(expires_at or time.time() + 3600),
+ 'iat': int(issued_at or time.time()),
+ }
+
+ for attr in ('iss', 'aud', 'sub'):
+ if claim[attr] is None:
+ raise ValueError(
+ 'Claim must include %s but none was given.' % attr)
+
+ if 'not_before' in kwargs:
+ claim['nbf'] = kwargs.pop('not_before')
+
+ if 'jwt_id' in kwargs:
+ claim['jti'] = kwargs.pop('jwt_id')
+
+ claim.update(extra_claims or {})
+
+ assertion = jwt.encode(claim, key, 'RS256')
+ assertion = to_unicode(assertion)
+
+ kwargs['client_id'] = self.client_id
+ kwargs['include_client_id'] = include_client_id
+ scope = self.scope if scope is None else scope
+ return prepare_token_request(self.grant_type,
+ body=body,
+ assertion=assertion,
+ scope=scope,
+ **kwargs)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/web_application.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/web_application.py
new file mode 100644
index 0000000000..50890fbf8a
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/clients/web_application.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+import warnings
+
+from ..parameters import (
+ parse_authorization_code_response, prepare_grant_uri,
+ prepare_token_request,
+)
+from .base import Client
+
+
+class WebApplicationClient(Client):
+
+ """A client utilizing the authorization code grant workflow.
+
+ A web application is a confidential client running on a web
+ server. Resource owners access the client via an HTML user
+ interface rendered in a user-agent on the device used by the
+ resource owner. The client credentials as well as any access
+ token issued to the client are stored on the web server and are
+ not exposed to or accessible by the resource owner.
+
+ The authorization code grant type is used to obtain both access
+ tokens and refresh tokens and is optimized for confidential clients.
+ As a redirection-based flow, the client must be capable of
+ interacting with the resource owner's user-agent (typically a web
+ browser) and capable of receiving incoming requests (via redirection)
+ from the authorization server.
+ """
+
+ grant_type = 'authorization_code'
+
+ def __init__(self, client_id, code=None, **kwargs):
+ super().__init__(client_id, **kwargs)
+ self.code = code
+
+ def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
+ state=None, code_challenge=None, code_challenge_method='plain', **kwargs):
+ """Prepare the authorization code request URI
+
+ The client constructs the request URI by adding the following
+ parameters to the query component of the authorization endpoint URI
+ using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
+
+ :param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
+ and it should have been registered with the OAuth
+ provider prior to use. As described in `Section 3.1.2`_.
+
+ :param scope: OPTIONAL. The scope of the access request as described by
+ Section 3.3`_. These may be any string but are commonly
+ URIs or various categories such as ``videos`` or ``documents``.
+
+ :param state: RECOMMENDED. An opaque value used by the client to maintain
+ state between the request and callback. The authorization
+ server includes this value when redirecting the user-agent back
+ to the client. The parameter SHOULD be used for preventing
+ cross-site request forgery as described in `Section 10.12`_.
+
+ :param code_challenge: OPTIONAL. PKCE parameter. REQUIRED if PKCE is enforced.
+ A challenge derived from the code_verifier that is sent in the
+ authorization request, to be verified against later.
+
+ :param code_challenge_method: OPTIONAL. PKCE parameter. A method that was used to derive code challenge.
+ Defaults to "plain" if not present in the request.
+
+ :param kwargs: Extra arguments to include in the request URI.
+
+ In addition to supplied parameters, OAuthLib will append the ``client_id``
+ that was provided in the constructor as well as the mandatory ``response_type``
+ argument, set to ``code``::
+
+ >>> from oauthlib.oauth2 import WebApplicationClient
+ >>> client = WebApplicationClient('your_id')
+ >>> client.prepare_request_uri('https://example.com')
+ 'https://example.com?client_id=your_id&response_type=code'
+ >>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
+ 'https://example.com?client_id=your_id&response_type=code&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
+ >>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
+ 'https://example.com?client_id=your_id&response_type=code&scope=profile+pictures'
+ >>> client.prepare_request_uri('https://example.com', code_challenge='kjasBS523KdkAILD2k78NdcJSk2k3KHG6')
+ 'https://example.com?client_id=your_id&response_type=code&code_challenge=kjasBS523KdkAILD2k78NdcJSk2k3KHG6'
+ >>> client.prepare_request_uri('https://example.com', code_challenge_method='S256')
+ 'https://example.com?client_id=your_id&response_type=code&code_challenge_method=S256'
+ >>> client.prepare_request_uri('https://example.com', foo='bar')
+ 'https://example.com?client_id=your_id&response_type=code&foo=bar'
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ .. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
+ .. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
+ """
+ scope = self.scope if scope is None else scope
+ return prepare_grant_uri(uri, self.client_id, 'code',
+ redirect_uri=redirect_uri, scope=scope, state=state, code_challenge=code_challenge,
+ code_challenge_method=code_challenge_method, **kwargs)
+
+ def prepare_request_body(self, code=None, redirect_uri=None, body='',
+ include_client_id=True, code_verifier=None, **kwargs):
+ """Prepare the access token request body.
+
+ The client makes a request to the token endpoint by adding the
+ following parameters using the "application/x-www-form-urlencoded"
+ format in the HTTP request entity-body:
+
+ :param code: REQUIRED. The authorization code received from the
+ authorization server.
+
+ :param redirect_uri: REQUIRED, if the "redirect_uri" parameter was included in the
+ authorization request as described in `Section 4.1.1`_, and their
+ values MUST be identical.
+
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+
+ :param include_client_id: `True` (default) to send the `client_id` in the
+ body of the upstream request. This is required
+ if the client is not authenticating with the
+ authorization server as described in `Section 3.2.1`_.
+ :type include_client_id: Boolean
+
+ :param code_verifier: OPTIONAL. A cryptographically random string that is used to correlate the
+ authorization request to the token request.
+
+ :param kwargs: Extra parameters to include in the token request.
+
+ In addition OAuthLib will add the ``grant_type`` parameter set to
+ ``authorization_code``.
+
+ If the client type is confidential or the client was issued client
+ credentials (or assigned other authentication requirements), the
+ client MUST authenticate with the authorization server as described
+ in `Section 3.2.1`_::
+
+ >>> from oauthlib.oauth2 import WebApplicationClient
+ >>> client = WebApplicationClient('your_id')
+ >>> client.prepare_request_body(code='sh35ksdf09sf')
+ 'grant_type=authorization_code&code=sh35ksdf09sf'
+ >>> client.prepare_request_body(code_verifier='KB46DCKJ873NCGXK5GD682NHDKK34GR')
+ 'grant_type=authorization_code&code_verifier=KB46DCKJ873NCGXK5GD682NHDKK34GR'
+ >>> client.prepare_request_body(code='sh35ksdf09sf', foo='bar')
+ 'grant_type=authorization_code&code=sh35ksdf09sf&foo=bar'
+
+ `Section 3.2.1` also states:
+ In the "authorization_code" "grant_type" request to the token
+ endpoint, an unauthenticated client MUST send its "client_id" to
+ prevent itself from inadvertently accepting a code intended for a
+ client with a different "client_id". This protects the client from
+ substitution of the authentication code. (It provides no additional
+ security for the protected resource.)
+
+ .. _`Section 4.1.1`: https://tools.ietf.org/html/rfc6749#section-4.1.1
+ .. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
+ """
+ code = code or self.code
+ if 'client_id' in kwargs:
+ warnings.warn("`client_id` has been deprecated in favor of "
+ "`include_client_id`, a boolean value which will "
+ "include the already configured `self.client_id`.",
+ DeprecationWarning)
+ if kwargs['client_id'] != self.client_id:
+ raise ValueError("`client_id` was supplied as an argument, but "
+ "it does not match `self.client_id`")
+
+ kwargs['client_id'] = self.client_id
+ kwargs['include_client_id'] = include_client_id
+ return prepare_token_request(self.grant_type, code=code, body=body,
+ redirect_uri=redirect_uri, code_verifier=code_verifier, **kwargs)
+
+ def parse_request_uri_response(self, uri, state=None):
+ """Parse the URI query for code and state.
+
+ If the resource owner grants the access request, the authorization
+ server issues an authorization code and delivers it to the client by
+ adding the following parameters to the query component of the
+ redirection URI using the "application/x-www-form-urlencoded" format:
+
+ :param uri: The callback URI that resulted from the user being redirected
+ back from the provider to you, the client.
+ :param state: The state provided in the authorization request.
+
+ **code**
+ The authorization code generated by the authorization server.
+ The authorization code MUST expire shortly after it is issued
+ to mitigate the risk of leaks. A maximum authorization code
+ lifetime of 10 minutes is RECOMMENDED. The client MUST NOT
+ use the authorization code more than once. If an authorization
+ code is used more than once, the authorization server MUST deny
+ the request and SHOULD revoke (when possible) all tokens
+ previously issued based on that authorization code.
+ The authorization code is bound to the client identifier and
+ redirection URI.
+
+ **state**
+ If the "state" parameter was present in the authorization request.
+
+ This method is mainly intended to enforce strict state checking with
+ the added benefit of easily extracting parameters from the URI::
+
+ >>> from oauthlib.oauth2 import WebApplicationClient
+ >>> client = WebApplicationClient('your_id')
+ >>> uri = 'https://example.com/callback?code=sdfkjh345&state=sfetw45'
+ >>> client.parse_request_uri_response(uri, state='sfetw45')
+ {'state': 'sfetw45', 'code': 'sdfkjh345'}
+ >>> client.parse_request_uri_response(uri, state='other')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ File "oauthlib/oauth2/rfc6749/__init__.py", line 357, in parse_request_uri_response
+ back from the provider to you, the client.
+ File "oauthlib/oauth2/rfc6749/parameters.py", line 153, in parse_authorization_code_response
+ raise MismatchingStateError()
+ oauthlib.oauth2.rfc6749.errors.MismatchingStateError
+ """
+ response = parse_authorization_code_response(uri, state=state)
+ self.populate_code_attributes(response)
+ return response
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/__init__.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/__init__.py
new file mode 100644
index 0000000000..1695b41b66
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/__init__.py
@@ -0,0 +1,17 @@
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+from .authorization import AuthorizationEndpoint
+from .introspect import IntrospectEndpoint
+from .metadata import MetadataEndpoint
+from .pre_configured import (
+ BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
+ Server, WebApplicationServer,
+)
+from .resource import ResourceEndpoint
+from .revocation import RevocationEndpoint
+from .token import TokenEndpoint
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/authorization.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/authorization.py
new file mode 100644
index 0000000000..71967865dc
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/authorization.py
@@ -0,0 +1,114 @@
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+import logging
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749 import utils
+
+from .base import BaseEndpoint, catch_errors_and_unavailability
+
+log = logging.getLogger(__name__)
+
+
+class AuthorizationEndpoint(BaseEndpoint):
+
+ """Authorization endpoint - used by the client to obtain authorization
+ from the resource owner via user-agent redirection.
+
+ The authorization endpoint is used to interact with the resource
+ owner and obtain an authorization grant. The authorization server
+ MUST first verify the identity of the resource owner. The way in
+ which the authorization server authenticates the resource owner (e.g.
+ username and password login, session cookies) is beyond the scope of
+ this specification.
+
+ The endpoint URI MAY include an "application/x-www-form-urlencoded"
+ formatted (per `Appendix B`_) query component,
+ which MUST be retained when adding additional query parameters. The
+ endpoint URI MUST NOT include a fragment component::
+
+ https://example.com/path?query=component # OK
+ https://example.com/path?query=component#fragment # Not OK
+
+ Since requests to the authorization endpoint result in user
+ authentication and the transmission of clear-text credentials (in the
+ HTTP response), the authorization server MUST require the use of TLS
+ as described in Section 1.6 when sending requests to the
+ authorization endpoint::
+
+ # We will deny any request which URI schema is not with https
+
+ The authorization server MUST support the use of the HTTP "GET"
+ method [RFC2616] for the authorization endpoint, and MAY support the
+ use of the "POST" method as well::
+
+ # HTTP method is currently not enforced
+
+ Parameters sent without a value MUST be treated as if they were
+ omitted from the request. The authorization server MUST ignore
+ unrecognized request parameters. Request and response parameters
+ MUST NOT be included more than once::
+
+ # Enforced through the design of oauthlib.common.Request
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ """
+
+ def __init__(self, default_response_type, default_token_type,
+ response_types):
+ BaseEndpoint.__init__(self)
+ self._response_types = response_types
+ self._default_response_type = default_response_type
+ self._default_token_type = default_token_type
+
+ @property
+ def response_types(self):
+ return self._response_types
+
+ @property
+ def default_response_type(self):
+ return self._default_response_type
+
+ @property
+ def default_response_type_handler(self):
+ return self.response_types.get(self.default_response_type)
+
+ @property
+ def default_token_type(self):
+ return self._default_token_type
+
+ @catch_errors_and_unavailability
+ def create_authorization_response(self, uri, http_method='GET', body=None,
+ headers=None, scopes=None, credentials=None):
+ """Extract response_type and route to the designated handler."""
+ request = Request(
+ uri, http_method=http_method, body=body, headers=headers)
+ request.scopes = scopes
+ # TODO: decide whether this should be a required argument
+ request.user = None # TODO: explain this in docs
+ for k, v in (credentials or {}).items():
+ setattr(request, k, v)
+ response_type_handler = self.response_types.get(
+ request.response_type, self.default_response_type_handler)
+ log.debug('Dispatching response_type %s request to %r.',
+ request.response_type, response_type_handler)
+ return response_type_handler.create_authorization_response(
+ request, self.default_token_type)
+
+ @catch_errors_and_unavailability
+ def validate_authorization_request(self, uri, http_method='GET', body=None,
+ headers=None):
+ """Extract response_type and route to the designated handler."""
+ request = Request(
+ uri, http_method=http_method, body=body, headers=headers)
+
+ request.scopes = utils.scope_to_list(request.scope)
+
+ response_type_handler = self.response_types.get(
+ request.response_type, self.default_response_type_handler)
+ return response_type_handler.validate_authorization_request(request)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/base.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/base.py
new file mode 100644
index 0000000000..3f239917cb
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/base.py
@@ -0,0 +1,113 @@
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+import functools
+import logging
+
+from ..errors import (
+ FatalClientError, InvalidClientError, InvalidRequestError, OAuth2Error,
+ ServerError, TemporarilyUnavailableError, UnsupportedTokenTypeError,
+)
+
+log = logging.getLogger(__name__)
+
+
+class BaseEndpoint:
+
+ def __init__(self):
+ self._available = True
+ self._catch_errors = False
+ self._valid_request_methods = None
+
+ @property
+ def valid_request_methods(self):
+ return self._valid_request_methods
+
+ @valid_request_methods.setter
+ def valid_request_methods(self, valid_request_methods):
+ if valid_request_methods is not None:
+ valid_request_methods = [x.upper() for x in valid_request_methods]
+ self._valid_request_methods = valid_request_methods
+
+
+ @property
+ def available(self):
+ return self._available
+
+ @available.setter
+ def available(self, available):
+ self._available = available
+
+ @property
+ def catch_errors(self):
+ return self._catch_errors
+
+ @catch_errors.setter
+ def catch_errors(self, catch_errors):
+ self._catch_errors = catch_errors
+
+ def _raise_on_missing_token(self, request):
+ """Raise error on missing token."""
+ if not request.token:
+ raise InvalidRequestError(request=request,
+ description='Missing token parameter.')
+ def _raise_on_invalid_client(self, request):
+ """Raise on failed client authentication."""
+ if self.request_validator.client_authentication_required(request):
+ if not self.request_validator.authenticate_client(request):
+ log.debug('Client authentication failed, %r.', request)
+ raise InvalidClientError(request=request)
+ elif not self.request_validator.authenticate_client_id(request.client_id, request):
+ log.debug('Client authentication failed, %r.', request)
+ raise InvalidClientError(request=request)
+
+ def _raise_on_unsupported_token(self, request):
+ """Raise on unsupported tokens."""
+ if (request.token_type_hint and
+ request.token_type_hint in self.valid_token_types and
+ request.token_type_hint not in self.supported_token_types):
+ raise UnsupportedTokenTypeError(request=request)
+
+ def _raise_on_bad_method(self, request):
+ if self.valid_request_methods is None:
+ raise ValueError('Configure "valid_request_methods" property first')
+ if request.http_method.upper() not in self.valid_request_methods:
+ raise InvalidRequestError(request=request,
+ description=('Unsupported request method %s' % request.http_method.upper()))
+
+ def _raise_on_bad_post_request(self, request):
+ """Raise if invalid POST request received
+ """
+ if request.http_method.upper() == 'POST':
+ query_params = request.uri_query or ""
+ if query_params:
+ raise InvalidRequestError(request=request,
+ description=('URL query parameters are not allowed'))
+
+def catch_errors_and_unavailability(f):
+ @functools.wraps(f)
+ def wrapper(endpoint, uri, *args, **kwargs):
+ if not endpoint.available:
+ e = TemporarilyUnavailableError()
+ log.info('Endpoint unavailable, ignoring request %s.' % uri)
+ return {}, e.json, 503
+
+ if endpoint.catch_errors:
+ try:
+ return f(endpoint, uri, *args, **kwargs)
+ except OAuth2Error:
+ raise
+ except FatalClientError:
+ raise
+ except Exception as e:
+ error = ServerError()
+ log.warning(
+ 'Exception caught while processing request, %s.' % e)
+ return {}, error.json, 500
+ else:
+ return f(endpoint, uri, *args, **kwargs)
+ return wrapper
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/introspect.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/introspect.py
new file mode 100644
index 0000000000..3cc61e6627
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/introspect.py
@@ -0,0 +1,120 @@
+"""
+oauthlib.oauth2.rfc6749.endpoint.introspect
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An implementation of the OAuth 2.0 `Token Introspection`.
+
+.. _`Token Introspection`: https://tools.ietf.org/html/rfc7662
+"""
+import json
+import logging
+
+from oauthlib.common import Request
+
+from ..errors import OAuth2Error
+from .base import BaseEndpoint, catch_errors_and_unavailability
+
+log = logging.getLogger(__name__)
+
+
+class IntrospectEndpoint(BaseEndpoint):
+
+ """Introspect token endpoint.
+
+ This endpoint defines a method to query an OAuth 2.0 authorization
+ server to determine the active state of an OAuth 2.0 token and to
+ determine meta-information about this token. OAuth 2.0 deployments
+ can use this method to convey information about the authorization
+ context of the token from the authorization server to the protected
+ resource.
+
+ To prevent the values of access tokens from leaking into
+ server-side logs via query parameters, an authorization server
+ offering token introspection MAY disallow the use of HTTP GET on
+ the introspection endpoint and instead require the HTTP POST method
+ to be used at the introspection endpoint.
+ """
+
+ valid_token_types = ('access_token', 'refresh_token')
+ valid_request_methods = ('POST',)
+
+ def __init__(self, request_validator, supported_token_types=None):
+ BaseEndpoint.__init__(self)
+ self.request_validator = request_validator
+ self.supported_token_types = (
+ supported_token_types or self.valid_token_types)
+
+ @catch_errors_and_unavailability
+ def create_introspect_response(self, uri, http_method='POST', body=None,
+ headers=None):
+ """Create introspect valid or invalid response
+
+ If the authorization server is unable to determine the state
+ of the token without additional information, it SHOULD return
+ an introspection response indicating the token is not active
+ as described in Section 2.2.
+ """
+ resp_headers = {
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-store',
+ 'Pragma': 'no-cache',
+ }
+ request = Request(uri, http_method, body, headers)
+ try:
+ self.validate_introspect_request(request)
+ log.debug('Token introspect valid for %r.', request)
+ except OAuth2Error as e:
+ log.debug('Client error during validation of %r. %r.', request, e)
+ resp_headers.update(e.headers)
+ return resp_headers, e.json, e.status_code
+
+ claims = self.request_validator.introspect_token(
+ request.token,
+ request.token_type_hint,
+ request
+ )
+ if claims is None:
+ return resp_headers, json.dumps(dict(active=False)), 200
+ if "active" in claims:
+ claims.pop("active")
+ return resp_headers, json.dumps(dict(active=True, **claims)), 200
+
+ def validate_introspect_request(self, request):
+ """Ensure the request is valid.
+
+ The protected resource calls the introspection endpoint using
+ an HTTP POST request with parameters sent as
+ "application/x-www-form-urlencoded".
+
+ * token REQUIRED. The string value of the token.
+ * token_type_hint OPTIONAL.
+
+ A hint about the type of the token submitted for
+ introspection. The protected resource MAY pass this parameter to
+ help the authorization server optimize the token lookup. If the
+ server is unable to locate the token using the given hint, it MUST
+ extend its search across all of its supported token types. An
+ authorization server MAY ignore this parameter, particularly if it
+ is able to detect the token type automatically.
+
+ * access_token: An Access Token as defined in [`RFC6749`], `section 1.4`_
+ * refresh_token: A Refresh Token as defined in [`RFC6749`], `section 1.5`_
+
+ The introspection endpoint MAY accept other OPTIONAL
+ parameters to provide further context to the query. For
+ instance, an authorization server may desire to know the IP
+ address of the client accessing the protected resource to
+ determine if the correct client is likely to be presenting the
+ token. The definition of this or any other parameters are
+ outside the scope of this specification, to be defined by
+ service documentation or extensions to this specification.
+
+ .. _`section 1.4`: http://tools.ietf.org/html/rfc6749#section-1.4
+ .. _`section 1.5`: http://tools.ietf.org/html/rfc6749#section-1.5
+ .. _`RFC6749`: http://tools.ietf.org/html/rfc6749
+ """
+ self._raise_on_bad_method(request)
+ self._raise_on_bad_post_request(request)
+ self._raise_on_missing_token(request)
+ self._raise_on_invalid_client(request)
+ self._raise_on_unsupported_token(request)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/metadata.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/metadata.py
new file mode 100644
index 0000000000..a2820f28a5
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/metadata.py
@@ -0,0 +1,238 @@
+"""
+oauthlib.oauth2.rfc6749.endpoint.metadata
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An implementation of the `OAuth 2.0 Authorization Server Metadata`.
+
+.. _`OAuth 2.0 Authorization Server Metadata`: https://tools.ietf.org/html/rfc8414
+"""
+import copy
+import json
+import logging
+
+from .. import grant_types, utils
+from .authorization import AuthorizationEndpoint
+from .base import BaseEndpoint, catch_errors_and_unavailability
+from .introspect import IntrospectEndpoint
+from .revocation import RevocationEndpoint
+from .token import TokenEndpoint
+
+log = logging.getLogger(__name__)
+
+
+class MetadataEndpoint(BaseEndpoint):
+
+ """OAuth2.0 Authorization Server Metadata endpoint.
+
+ This specification generalizes the metadata format defined by
+ `OpenID Connect Discovery 1.0` in a way that is compatible
+ with OpenID Connect Discovery while being applicable to a wider set
+ of OAuth 2.0 use cases. This is intentionally parallel to the way
+ that OAuth 2.0 Dynamic Client Registration Protocol [`RFC7591`_]
+ generalized the dynamic client registration mechanisms defined by
+ OpenID Connect Dynamic Client Registration 1.0
+ in a way that is compatible with it.
+
+ .. _`OpenID Connect Discovery 1.0`: https://openid.net/specs/openid-connect-discovery-1_0.html
+ .. _`RFC7591`: https://tools.ietf.org/html/rfc7591
+ """
+
+ def __init__(self, endpoints, claims={}, raise_errors=True):
+ assert isinstance(claims, dict)
+ for endpoint in endpoints:
+ assert isinstance(endpoint, BaseEndpoint)
+
+ BaseEndpoint.__init__(self)
+ self.raise_errors = raise_errors
+ self.endpoints = endpoints
+ self.initial_claims = claims
+ self.claims = self.validate_metadata_server()
+
+ @catch_errors_and_unavailability
+ def create_metadata_response(self, uri, http_method='GET', body=None,
+ headers=None):
+ """Create metadata response
+ """
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Access-Control-Allow-Origin': '*',
+ }
+ return headers, json.dumps(self.claims), 200
+
+ def validate_metadata(self, array, key, is_required=False, is_list=False, is_url=False, is_issuer=False):
+ if not self.raise_errors:
+ return
+
+ if key not in array:
+ if is_required:
+ raise ValueError("key {} is a mandatory metadata.".format(key))
+
+ elif is_issuer:
+ if not utils.is_secure_transport(array[key]):
+ raise ValueError("key {}: {} must be an HTTPS URL".format(key, array[key]))
+ if "?" in array[key] or "&" in array[key] or "#" in array[key]:
+ raise ValueError("key {}: {} must not contain query or fragment components".format(key, array[key]))
+
+ elif is_url:
+ if not array[key].startswith("http"):
+ raise ValueError("key {}: {} must be an URL".format(key, array[key]))
+
+ elif is_list:
+ if not isinstance(array[key], list):
+ raise ValueError("key {}: {} must be an Array".format(key, array[key]))
+ for elem in array[key]:
+ if not isinstance(elem, str):
+ raise ValueError("array {}: {} must contains only string (not {})".format(key, array[key], elem))
+
+ def validate_metadata_token(self, claims, endpoint):
+ """
+ If the token endpoint is used in the grant type, the value of this
+ parameter MUST be the same as the value of the "grant_type"
+ parameter passed to the token endpoint defined in the grant type
+ definition.
+ """
+ self._grant_types.extend(endpoint._grant_types.keys())
+ claims.setdefault("token_endpoint_auth_methods_supported", ["client_secret_post", "client_secret_basic"])
+
+ self.validate_metadata(claims, "token_endpoint_auth_methods_supported", is_list=True)
+ self.validate_metadata(claims, "token_endpoint_auth_signing_alg_values_supported", is_list=True)
+ self.validate_metadata(claims, "token_endpoint", is_required=True, is_url=True)
+
+ def validate_metadata_authorization(self, claims, endpoint):
+ claims.setdefault("response_types_supported",
+ list(filter(lambda x: x != "none", endpoint._response_types.keys())))
+ claims.setdefault("response_modes_supported", ["query", "fragment"])
+
+ # The OAuth2.0 Implicit flow is defined as a "grant type" but it is not
+ # using the "token" endpoint, as such, we have to add it explicitly to
+ # the list of "grant_types_supported" when enabled.
+ if "token" in claims["response_types_supported"]:
+ self._grant_types.append("implicit")
+
+ self.validate_metadata(claims, "response_types_supported", is_required=True, is_list=True)
+ self.validate_metadata(claims, "response_modes_supported", is_list=True)
+ if "code" in claims["response_types_supported"]:
+ code_grant = endpoint._response_types["code"]
+ if not isinstance(code_grant, grant_types.AuthorizationCodeGrant) and hasattr(code_grant, "default_grant"):
+ code_grant = code_grant.default_grant
+
+ claims.setdefault("code_challenge_methods_supported",
+ list(code_grant._code_challenge_methods.keys()))
+ self.validate_metadata(claims, "code_challenge_methods_supported", is_list=True)
+ self.validate_metadata(claims, "authorization_endpoint", is_required=True, is_url=True)
+
+ def validate_metadata_revocation(self, claims, endpoint):
+ claims.setdefault("revocation_endpoint_auth_methods_supported",
+ ["client_secret_post", "client_secret_basic"])
+
+ self.validate_metadata(claims, "revocation_endpoint_auth_methods_supported", is_list=True)
+ self.validate_metadata(claims, "revocation_endpoint_auth_signing_alg_values_supported", is_list=True)
+ self.validate_metadata(claims, "revocation_endpoint", is_required=True, is_url=True)
+
+ def validate_metadata_introspection(self, claims, endpoint):
+ claims.setdefault("introspection_endpoint_auth_methods_supported",
+ ["client_secret_post", "client_secret_basic"])
+
+ self.validate_metadata(claims, "introspection_endpoint_auth_methods_supported", is_list=True)
+ self.validate_metadata(claims, "introspection_endpoint_auth_signing_alg_values_supported", is_list=True)
+ self.validate_metadata(claims, "introspection_endpoint", is_required=True, is_url=True)
+
+ def validate_metadata_server(self):
+ """
+ Authorization servers can have metadata describing their
+ configuration. The following authorization server metadata values
+ are used by this specification. More details can be found in
+ `RFC8414 section 2`_ :
+
+ issuer
+ REQUIRED
+
+ authorization_endpoint
+ URL of the authorization server's authorization endpoint
+ [`RFC6749#Authorization`_]. This is REQUIRED unless no grant types are supported
+ that use the authorization endpoint.
+
+ token_endpoint
+ URL of the authorization server's token endpoint [`RFC6749#Token`_]. This
+ is REQUIRED unless only the implicit grant type is supported.
+
+ scopes_supported
+ RECOMMENDED.
+
+ response_types_supported
+ REQUIRED.
+
+ Other OPTIONAL fields:
+ jwks_uri,
+ registration_endpoint,
+ response_modes_supported
+
+ grant_types_supported
+ OPTIONAL. JSON array containing a list of the OAuth 2.0 grant
+ type values that this authorization server supports. The array
+ values used are the same as those used with the "grant_types"
+ parameter defined by "OAuth 2.0 Dynamic Client Registration
+ Protocol" [`RFC7591`_]. If omitted, the default value is
+ "["authorization_code", "implicit"]".
+
+ token_endpoint_auth_methods_supported
+
+ token_endpoint_auth_signing_alg_values_supported
+
+ service_documentation
+
+ ui_locales_supported
+
+ op_policy_uri
+
+ op_tos_uri
+
+ revocation_endpoint
+
+ revocation_endpoint_auth_methods_supported
+
+ revocation_endpoint_auth_signing_alg_values_supported
+
+ introspection_endpoint
+
+ introspection_endpoint_auth_methods_supported
+
+ introspection_endpoint_auth_signing_alg_values_supported
+
+ code_challenge_methods_supported
+
+ Additional authorization server metadata parameters MAY also be used.
+ Some are defined by other specifications, such as OpenID Connect
+ Discovery 1.0 [`OpenID.Discovery`_].
+
+ .. _`RFC8414 section 2`: https://tools.ietf.org/html/rfc8414#section-2
+ .. _`RFC6749#Authorization`: https://tools.ietf.org/html/rfc6749#section-3.1
+ .. _`RFC6749#Token`: https://tools.ietf.org/html/rfc6749#section-3.2
+ .. _`RFC7591`: https://tools.ietf.org/html/rfc7591
+ .. _`OpenID.Discovery`: https://openid.net/specs/openid-connect-discovery-1_0.html
+ """
+ claims = copy.deepcopy(self.initial_claims)
+ self.validate_metadata(claims, "issuer", is_required=True, is_issuer=True)
+ self.validate_metadata(claims, "jwks_uri", is_url=True)
+ self.validate_metadata(claims, "scopes_supported", is_list=True)
+ self.validate_metadata(claims, "service_documentation", is_url=True)
+ self.validate_metadata(claims, "ui_locales_supported", is_list=True)
+ self.validate_metadata(claims, "op_policy_uri", is_url=True)
+ self.validate_metadata(claims, "op_tos_uri", is_url=True)
+
+ self._grant_types = []
+ for endpoint in self.endpoints:
+ if isinstance(endpoint, TokenEndpoint):
+ self.validate_metadata_token(claims, endpoint)
+ if isinstance(endpoint, AuthorizationEndpoint):
+ self.validate_metadata_authorization(claims, endpoint)
+ if isinstance(endpoint, RevocationEndpoint):
+ self.validate_metadata_revocation(claims, endpoint)
+ if isinstance(endpoint, IntrospectEndpoint):
+ self.validate_metadata_introspection(claims, endpoint)
+
+ # "grant_types_supported" is a combination of all OAuth2 grant types
+ # allowed in the current provider implementation.
+ claims.setdefault("grant_types_supported", self._grant_types)
+ self.validate_metadata(claims, "grant_types_supported", is_list=True)
+ return claims
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/pre_configured.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/pre_configured.py
new file mode 100644
index 0000000000..d64a166391
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/pre_configured.py
@@ -0,0 +1,216 @@
+"""
+oauthlib.oauth2.rfc6749.endpoints.pre_configured
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various endpoints needed
+for providing OAuth 2.0 RFC6749 servers.
+"""
+from ..grant_types import (
+ AuthorizationCodeGrant, ClientCredentialsGrant, ImplicitGrant,
+ RefreshTokenGrant, ResourceOwnerPasswordCredentialsGrant,
+)
+from ..tokens import BearerToken
+from .authorization import AuthorizationEndpoint
+from .introspect import IntrospectEndpoint
+from .resource import ResourceEndpoint
+from .revocation import RevocationEndpoint
+from .token import TokenEndpoint
+
+
+class Server(AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint,
+ ResourceEndpoint, RevocationEndpoint):
+
+ """An all-in-one endpoint featuring all four major grant types."""
+
+ def __init__(self, request_validator, token_expires_in=None,
+ token_generator=None, refresh_token_generator=None,
+ *args, **kwargs):
+ """Construct a new all-grants-in-one server.
+
+ :param request_validator: An implementation of
+ oauthlib.oauth2.RequestValidator.
+ :param token_expires_in: An int or a function to generate a token
+ expiration offset (in seconds) given a
+ oauthlib.common.Request object.
+ :param token_generator: A function to generate a token from a request.
+ :param refresh_token_generator: A function to generate a token from a
+ request for the refresh token.
+ :param kwargs: Extra parameters to pass to authorization-,
+ token-, resource-, and revocation-endpoint constructors.
+ """
+ self.auth_grant = AuthorizationCodeGrant(request_validator)
+ self.implicit_grant = ImplicitGrant(request_validator)
+ self.password_grant = ResourceOwnerPasswordCredentialsGrant(
+ request_validator)
+ self.credentials_grant = ClientCredentialsGrant(request_validator)
+ self.refresh_grant = RefreshTokenGrant(request_validator)
+
+ self.bearer = BearerToken(request_validator, token_generator,
+ token_expires_in, refresh_token_generator)
+
+ AuthorizationEndpoint.__init__(self, default_response_type='code',
+ response_types={
+ 'code': self.auth_grant,
+ 'token': self.implicit_grant,
+ 'none': self.auth_grant
+ },
+ default_token_type=self.bearer)
+
+ TokenEndpoint.__init__(self, default_grant_type='authorization_code',
+ grant_types={
+ 'authorization_code': self.auth_grant,
+ 'password': self.password_grant,
+ 'client_credentials': self.credentials_grant,
+ 'refresh_token': self.refresh_grant,
+ },
+ default_token_type=self.bearer)
+ ResourceEndpoint.__init__(self, default_token='Bearer',
+ token_types={'Bearer': self.bearer})
+ RevocationEndpoint.__init__(self, request_validator)
+ IntrospectEndpoint.__init__(self, request_validator)
+
+
+class WebApplicationServer(AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint,
+ ResourceEndpoint, RevocationEndpoint):
+
+ """An all-in-one endpoint featuring Authorization code grant and Bearer tokens."""
+
+ def __init__(self, request_validator, token_generator=None,
+ token_expires_in=None, refresh_token_generator=None, **kwargs):
+ """Construct a new web application server.
+
+ :param request_validator: An implementation of
+ oauthlib.oauth2.RequestValidator.
+ :param token_expires_in: An int or a function to generate a token
+ expiration offset (in seconds) given a
+ oauthlib.common.Request object.
+ :param token_generator: A function to generate a token from a request.
+ :param refresh_token_generator: A function to generate a token from a
+ request for the refresh token.
+ :param kwargs: Extra parameters to pass to authorization-,
+ token-, resource-, and revocation-endpoint constructors.
+ """
+ self.auth_grant = AuthorizationCodeGrant(request_validator)
+ self.refresh_grant = RefreshTokenGrant(request_validator)
+ self.bearer = BearerToken(request_validator, token_generator,
+ token_expires_in, refresh_token_generator)
+ AuthorizationEndpoint.__init__(self, default_response_type='code',
+ response_types={'code': self.auth_grant},
+ default_token_type=self.bearer)
+ TokenEndpoint.__init__(self, default_grant_type='authorization_code',
+ grant_types={
+ 'authorization_code': self.auth_grant,
+ 'refresh_token': self.refresh_grant,
+ },
+ default_token_type=self.bearer)
+ ResourceEndpoint.__init__(self, default_token='Bearer',
+ token_types={'Bearer': self.bearer})
+ RevocationEndpoint.__init__(self, request_validator)
+ IntrospectEndpoint.__init__(self, request_validator)
+
+
+class MobileApplicationServer(AuthorizationEndpoint, IntrospectEndpoint,
+ ResourceEndpoint, RevocationEndpoint):
+
+ """An all-in-one endpoint featuring Implicit code grant and Bearer tokens."""
+
+ def __init__(self, request_validator, token_generator=None,
+ token_expires_in=None, refresh_token_generator=None, **kwargs):
+ """Construct a new implicit grant server.
+
+ :param request_validator: An implementation of
+ oauthlib.oauth2.RequestValidator.
+ :param token_expires_in: An int or a function to generate a token
+ expiration offset (in seconds) given a
+ oauthlib.common.Request object.
+ :param token_generator: A function to generate a token from a request.
+ :param refresh_token_generator: A function to generate a token from a
+ request for the refresh token.
+ :param kwargs: Extra parameters to pass to authorization-,
+ token-, resource-, and revocation-endpoint constructors.
+ """
+ self.implicit_grant = ImplicitGrant(request_validator)
+ self.bearer = BearerToken(request_validator, token_generator,
+ token_expires_in, refresh_token_generator)
+ AuthorizationEndpoint.__init__(self, default_response_type='token',
+ response_types={
+ 'token': self.implicit_grant},
+ default_token_type=self.bearer)
+ ResourceEndpoint.__init__(self, default_token='Bearer',
+ token_types={'Bearer': self.bearer})
+ RevocationEndpoint.__init__(self, request_validator,
+ supported_token_types=['access_token'])
+ IntrospectEndpoint.__init__(self, request_validator,
+ supported_token_types=['access_token'])
+
+
+class LegacyApplicationServer(TokenEndpoint, IntrospectEndpoint,
+ ResourceEndpoint, RevocationEndpoint):
+
+ """An all-in-one endpoint featuring Resource Owner Password Credentials grant and Bearer tokens."""
+
+ def __init__(self, request_validator, token_generator=None,
+ token_expires_in=None, refresh_token_generator=None, **kwargs):
+ """Construct a resource owner password credentials grant server.
+
+ :param request_validator: An implementation of
+ oauthlib.oauth2.RequestValidator.
+ :param token_expires_in: An int or a function to generate a token
+ expiration offset (in seconds) given a
+ oauthlib.common.Request object.
+ :param token_generator: A function to generate a token from a request.
+ :param refresh_token_generator: A function to generate a token from a
+ request for the refresh token.
+ :param kwargs: Extra parameters to pass to authorization-,
+ token-, resource-, and revocation-endpoint constructors.
+ """
+ self.password_grant = ResourceOwnerPasswordCredentialsGrant(
+ request_validator)
+ self.refresh_grant = RefreshTokenGrant(request_validator)
+ self.bearer = BearerToken(request_validator, token_generator,
+ token_expires_in, refresh_token_generator)
+ TokenEndpoint.__init__(self, default_grant_type='password',
+ grant_types={
+ 'password': self.password_grant,
+ 'refresh_token': self.refresh_grant,
+ },
+ default_token_type=self.bearer)
+ ResourceEndpoint.__init__(self, default_token='Bearer',
+ token_types={'Bearer': self.bearer})
+ RevocationEndpoint.__init__(self, request_validator)
+ IntrospectEndpoint.__init__(self, request_validator)
+
+
+class BackendApplicationServer(TokenEndpoint, IntrospectEndpoint,
+ ResourceEndpoint, RevocationEndpoint):
+
+ """An all-in-one endpoint featuring Client Credentials grant and Bearer tokens."""
+
+ def __init__(self, request_validator, token_generator=None,
+ token_expires_in=None, refresh_token_generator=None, **kwargs):
+ """Construct a client credentials grant server.
+
+ :param request_validator: An implementation of
+ oauthlib.oauth2.RequestValidator.
+ :param token_expires_in: An int or a function to generate a token
+ expiration offset (in seconds) given a
+ oauthlib.common.Request object.
+ :param token_generator: A function to generate a token from a request.
+ :param refresh_token_generator: A function to generate a token from a
+ request for the refresh token.
+ :param kwargs: Extra parameters to pass to authorization-,
+ token-, resource-, and revocation-endpoint constructors.
+ """
+ self.credentials_grant = ClientCredentialsGrant(request_validator)
+ self.bearer = BearerToken(request_validator, token_generator,
+ token_expires_in, refresh_token_generator)
+ TokenEndpoint.__init__(self, default_grant_type='client_credentials',
+ grant_types={
+ 'client_credentials': self.credentials_grant},
+ default_token_type=self.bearer)
+ ResourceEndpoint.__init__(self, default_token='Bearer',
+ token_types={'Bearer': self.bearer})
+ RevocationEndpoint.__init__(self, request_validator,
+ supported_token_types=['access_token'])
+ IntrospectEndpoint.__init__(self, request_validator,
+ supported_token_types=['access_token'])
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/resource.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/resource.py
new file mode 100644
index 0000000000..f7562255df
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/resource.py
@@ -0,0 +1,84 @@
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+import logging
+
+from oauthlib.common import Request
+
+from .base import BaseEndpoint, catch_errors_and_unavailability
+
+log = logging.getLogger(__name__)
+
+
+class ResourceEndpoint(BaseEndpoint):
+
+ """Authorizes access to protected resources.
+
+ The client accesses protected resources by presenting the access
+ token to the resource server. The resource server MUST validate the
+ access token and ensure that it has not expired and that its scope
+ covers the requested resource. The methods used by the resource
+ server to validate the access token (as well as any error responses)
+ are beyond the scope of this specification but generally involve an
+ interaction or coordination between the resource server and the
+ authorization server::
+
+ # For most cases, returning a 403 should suffice.
+
+ The method in which the client utilizes the access token to
+ authenticate with the resource server depends on the type of access
+ token issued by the authorization server. Typically, it involves
+ using the HTTP "Authorization" request header field [RFC2617] with an
+ authentication scheme defined by the specification of the access
+ token type used, such as [RFC6750]::
+
+ # Access tokens may also be provided in query and body
+ https://example.com/protected?access_token=kjfch2345sdf # Query
+ access_token=sdf23409df # Body
+ """
+
+ def __init__(self, default_token, token_types):
+ BaseEndpoint.__init__(self)
+ self._tokens = token_types
+ self._default_token = default_token
+
+ @property
+ def default_token(self):
+ return self._default_token
+
+ @property
+ def default_token_type_handler(self):
+ return self.tokens.get(self.default_token)
+
+ @property
+ def tokens(self):
+ return self._tokens
+
+ @catch_errors_and_unavailability
+ def verify_request(self, uri, http_method='GET', body=None, headers=None,
+ scopes=None):
+ """Validate client, code etc, return body + headers"""
+ request = Request(uri, http_method, body, headers)
+ request.token_type = self.find_token_type(request)
+ request.scopes = scopes
+ token_type_handler = self.tokens.get(request.token_type,
+ self.default_token_type_handler)
+ log.debug('Dispatching token_type %s request to %r.',
+ request.token_type, token_type_handler)
+ return token_type_handler.validate_request(request), request
+
+ def find_token_type(self, request):
+ """Token type identification.
+
+ RFC 6749 does not provide a method for easily differentiating between
+ different token types during protected resource access. We estimate
+ the most likely token type (if any) by asking each known token type
+ to give an estimation based on the request.
+ """
+ estimates = sorted(((t.estimate_type(request), n)
+ for n, t in self.tokens.items()), reverse=True)
+ return estimates[0][1] if len(estimates) else None
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/revocation.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/revocation.py
new file mode 100644
index 0000000000..596d0860fa
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/revocation.py
@@ -0,0 +1,126 @@
+"""
+oauthlib.oauth2.rfc6749.endpoint.revocation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An implementation of the OAuth 2 `Token Revocation`_ spec (draft 11).
+
+.. _`Token Revocation`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11
+"""
+import logging
+
+from oauthlib.common import Request
+
+from ..errors import OAuth2Error
+from .base import BaseEndpoint, catch_errors_and_unavailability
+
+log = logging.getLogger(__name__)
+
+
+class RevocationEndpoint(BaseEndpoint):
+
+ """Token revocation endpoint.
+
+ Endpoint used by authenticated clients to revoke access and refresh tokens.
+ Commonly this will be part of the Authorization Endpoint.
+ """
+
+ valid_token_types = ('access_token', 'refresh_token')
+ valid_request_methods = ('POST',)
+
+ def __init__(self, request_validator, supported_token_types=None,
+ enable_jsonp=False):
+ BaseEndpoint.__init__(self)
+ self.request_validator = request_validator
+ self.supported_token_types = (
+ supported_token_types or self.valid_token_types)
+ self.enable_jsonp = enable_jsonp
+
+ @catch_errors_and_unavailability
+ def create_revocation_response(self, uri, http_method='POST', body=None,
+ headers=None):
+ """Revoke supplied access or refresh token.
+
+
+ The authorization server responds with HTTP status code 200 if the
+ token has been revoked successfully or if the client submitted an
+ invalid token.
+
+ Note: invalid tokens do not cause an error response since the client
+ cannot handle such an error in a reasonable way. Moreover, the purpose
+ of the revocation request, invalidating the particular token, is
+ already achieved.
+
+ The content of the response body is ignored by the client as all
+ necessary information is conveyed in the response code.
+
+ An invalid token type hint value is ignored by the authorization server
+ and does not influence the revocation response.
+ """
+ resp_headers = {
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-store',
+ 'Pragma': 'no-cache',
+ }
+ request = Request(
+ uri, http_method=http_method, body=body, headers=headers)
+ try:
+ self.validate_revocation_request(request)
+ log.debug('Token revocation valid for %r.', request)
+ except OAuth2Error as e:
+ log.debug('Client error during validation of %r. %r.', request, e)
+ response_body = e.json
+ if self.enable_jsonp and request.callback:
+ response_body = '{}({});'.format(request.callback, response_body)
+ resp_headers.update(e.headers)
+ return resp_headers, response_body, e.status_code
+
+ self.request_validator.revoke_token(request.token,
+ request.token_type_hint, request)
+
+ response_body = ''
+ if self.enable_jsonp and request.callback:
+ response_body = request.callback + '();'
+ return {}, response_body, 200
+
+ def validate_revocation_request(self, request):
+ """Ensure the request is valid.
+
+ The client constructs the request by including the following parameters
+ using the "application/x-www-form-urlencoded" format in the HTTP
+ request entity-body:
+
+ token (REQUIRED). The token that the client wants to get revoked.
+
+ token_type_hint (OPTIONAL). A hint about the type of the token
+ submitted for revocation. Clients MAY pass this parameter in order to
+ help the authorization server to optimize the token lookup. If the
+ server is unable to locate the token using the given hint, it MUST
+ extend its search across all of its supported token types. An
+ authorization server MAY ignore this parameter, particularly if it is
+ able to detect the token type automatically. This specification
+ defines two such values:
+
+ * access_token: An Access Token as defined in [RFC6749],
+ `section 1.4`_
+
+ * refresh_token: A Refresh Token as defined in [RFC6749],
+ `section 1.5`_
+
+ Specific implementations, profiles, and extensions of this
+ specification MAY define other values for this parameter using
+ the registry defined in `Section 4.1.2`_.
+
+ The client also includes its authentication credentials as described in
+ `Section 2.3`_. of [`RFC6749`_].
+
+ .. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
+ .. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
+ .. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3
+ .. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
+ .. _`RFC6749`: https://tools.ietf.org/html/rfc6749
+ """
+ self._raise_on_bad_method(request)
+ self._raise_on_bad_post_request(request)
+ self._raise_on_missing_token(request)
+ self._raise_on_invalid_client(request)
+ self._raise_on_unsupported_token(request)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/token.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/token.py
new file mode 100644
index 0000000000..ab9e0918b3
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/endpoints/token.py
@@ -0,0 +1,119 @@
+"""
+oauthlib.oauth2.rfc6749
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 RFC6749.
+"""
+import logging
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749 import utils
+
+from .base import BaseEndpoint, catch_errors_and_unavailability
+
+log = logging.getLogger(__name__)
+
+
+class TokenEndpoint(BaseEndpoint):
+
+ """Token issuing endpoint.
+
+ The token endpoint is used by the client to obtain an access token by
+ presenting its authorization grant or refresh token. The token
+ endpoint is used with every authorization grant except for the
+ implicit grant type (since an access token is issued directly).
+
+ The means through which the client obtains the location of the token
+ endpoint are beyond the scope of this specification, but the location
+ is typically provided in the service documentation.
+
+ The endpoint URI MAY include an "application/x-www-form-urlencoded"
+ formatted (per `Appendix B`_) query component,
+ which MUST be retained when adding additional query parameters. The
+ endpoint URI MUST NOT include a fragment component::
+
+ https://example.com/path?query=component # OK
+ https://example.com/path?query=component#fragment # Not OK
+
+ Since requests to the token endpoint result in the transmission of
+ clear-text credentials (in the HTTP request and response), the
+ authorization server MUST require the use of TLS as described in
+ Section 1.6 when sending requests to the token endpoint::
+
+ # We will deny any request which URI schema is not with https
+
+ The client MUST use the HTTP "POST" method when making access token
+ requests::
+
+ # HTTP method is currently not enforced
+
+ Parameters sent without a value MUST be treated as if they were
+ omitted from the request. The authorization server MUST ignore
+ unrecognized request parameters. Request and response parameters
+ MUST NOT be included more than once::
+
+ # Delegated to each grant type.
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ """
+
+ valid_request_methods = ('POST',)
+
+ def __init__(self, default_grant_type, default_token_type, grant_types):
+ BaseEndpoint.__init__(self)
+ self._grant_types = grant_types
+ self._default_token_type = default_token_type
+ self._default_grant_type = default_grant_type
+
+ @property
+ def grant_types(self):
+ return self._grant_types
+
+ @property
+ def default_grant_type(self):
+ return self._default_grant_type
+
+ @property
+ def default_grant_type_handler(self):
+ return self.grant_types.get(self.default_grant_type)
+
+ @property
+ def default_token_type(self):
+ return self._default_token_type
+
+ @catch_errors_and_unavailability
+ def create_token_response(self, uri, http_method='POST', body=None,
+ headers=None, credentials=None, grant_type_for_scope=None,
+ claims=None):
+ """Extract grant_type and route to the designated handler."""
+ request = Request(
+ uri, http_method=http_method, body=body, headers=headers)
+ self.validate_token_request(request)
+ # 'scope' is an allowed Token Request param in both the "Resource Owner Password Credentials Grant"
+ # and "Client Credentials Grant" flows
+ # https://tools.ietf.org/html/rfc6749#section-4.3.2
+ # https://tools.ietf.org/html/rfc6749#section-4.4.2
+ request.scopes = utils.scope_to_list(request.scope)
+
+ request.extra_credentials = credentials
+ if grant_type_for_scope:
+ request.grant_type = grant_type_for_scope
+
+ # OpenID Connect claims, if provided. The server using oauthlib might choose
+ # to implement the claims parameter of the Authorization Request. In this case
+ # it should retrieve those claims and pass them via the claims argument here,
+ # as a dict.
+ if claims:
+ request.claims = claims
+
+ grant_type_handler = self.grant_types.get(request.grant_type,
+ self.default_grant_type_handler)
+ log.debug('Dispatching grant_type %s request to %r.',
+ request.grant_type, grant_type_handler)
+ return grant_type_handler.create_token_response(
+ request, self.default_token_type)
+
+ def validate_token_request(self, request):
+ self._raise_on_bad_method(request)
+ self._raise_on_bad_post_request(request)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/errors.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/errors.py
new file mode 100644
index 0000000000..da24feab75
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/errors.py
@@ -0,0 +1,400 @@
+"""
+oauthlib.oauth2.rfc6749.errors
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Error used both by OAuth 2 clients and providers to represent the spec
+defined error responses for all four core grant types.
+"""
+import json
+
+from oauthlib.common import add_params_to_uri, urlencode
+
+
+class OAuth2Error(Exception):
+ error = None
+ status_code = 400
+ description = ''
+
+ def __init__(self, description=None, uri=None, state=None,
+ status_code=None, request=None):
+ """
+ :param description: A human-readable ASCII [USASCII] text providing
+ additional information, used to assist the client
+ developer in understanding the error that occurred.
+ Values for the "error_description" parameter
+ MUST NOT include characters outside the set
+ x20-21 / x23-5B / x5D-7E.
+
+ :param uri: A URI identifying a human-readable web page with information
+ about the error, used to provide the client developer with
+ additional information about the error. Values for the
+ "error_uri" parameter MUST conform to the URI- Reference
+ syntax, and thus MUST NOT include characters outside the set
+ x21 / x23-5B / x5D-7E.
+
+ :param state: A CSRF protection value received from the client.
+
+ :param status_code:
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ if description is not None:
+ self.description = description
+
+ message = '({}) {}'.format(self.error, self.description)
+ if request:
+ message += ' ' + repr(request)
+ super().__init__(message)
+
+ self.uri = uri
+ self.state = state
+
+ if status_code:
+ self.status_code = status_code
+
+ if request:
+ self.redirect_uri = request.redirect_uri
+ self.client_id = request.client_id
+ self.scopes = request.scopes
+ self.response_type = request.response_type
+ self.response_mode = request.response_mode
+ self.grant_type = request.grant_type
+ if not state:
+ self.state = request.state
+ else:
+ self.redirect_uri = None
+ self.client_id = None
+ self.scopes = None
+ self.response_type = None
+ self.response_mode = None
+ self.grant_type = None
+
+ def in_uri(self, uri):
+ fragment = self.response_mode == "fragment"
+ return add_params_to_uri(uri, self.twotuples, fragment)
+
+ @property
+ def twotuples(self):
+ error = [('error', self.error)]
+ if self.description:
+ error.append(('error_description', self.description))
+ if self.uri:
+ error.append(('error_uri', self.uri))
+ if self.state:
+ error.append(('state', self.state))
+ return error
+
+ @property
+ def urlencoded(self):
+ return urlencode(self.twotuples)
+
+ @property
+ def json(self):
+ return json.dumps(dict(self.twotuples))
+
+ @property
+ def headers(self):
+ if self.status_code == 401:
+ """
+ https://tools.ietf.org/html/rfc6750#section-3
+
+ All challenges defined by this specification MUST use the auth-scheme
+ value "Bearer". This scheme MUST be followed by one or more
+ auth-param values.
+ """
+ authvalues = ['error="{}"'.format(self.error)]
+ if self.description:
+ authvalues.append('error_description="{}"'.format(self.description))
+ if self.uri:
+ authvalues.append('error_uri="{}"'.format(self.uri))
+ return {"WWW-Authenticate": "Bearer " + ", ".join(authvalues)}
+ return {}
+
+
+class TokenExpiredError(OAuth2Error):
+ error = 'token_expired'
+
+
+class InsecureTransportError(OAuth2Error):
+ error = 'insecure_transport'
+ description = 'OAuth 2 MUST utilize https.'
+
+
+class MismatchingStateError(OAuth2Error):
+ error = 'mismatching_state'
+ description = 'CSRF Warning! State not equal in request and response.'
+
+
+class MissingCodeError(OAuth2Error):
+ error = 'missing_code'
+
+
+class MissingTokenError(OAuth2Error):
+ error = 'missing_token'
+
+
+class MissingTokenTypeError(OAuth2Error):
+ error = 'missing_token_type'
+
+
+class FatalClientError(OAuth2Error):
+ """
+ Errors during authorization where user should not be redirected back.
+
+ If the request fails due to a missing, invalid, or mismatching
+ redirection URI, or if the client identifier is missing or invalid,
+ the authorization server SHOULD inform the resource owner of the
+ error and MUST NOT automatically redirect the user-agent to the
+ invalid redirection URI.
+
+ Instead the user should be informed of the error by the provider itself.
+ """
+ pass
+
+
+class InvalidRequestFatalError(FatalClientError):
+ """
+ For fatal errors, the request is missing a required parameter, includes
+ an invalid parameter value, includes a parameter more than once, or is
+ otherwise malformed.
+ """
+ error = 'invalid_request'
+
+
+class InvalidRedirectURIError(InvalidRequestFatalError):
+ description = 'Invalid redirect URI.'
+
+
+class MissingRedirectURIError(InvalidRequestFatalError):
+ description = 'Missing redirect URI.'
+
+
+class MismatchingRedirectURIError(InvalidRequestFatalError):
+ description = 'Mismatching redirect URI.'
+
+
+class InvalidClientIdError(InvalidRequestFatalError):
+ description = 'Invalid client_id parameter value.'
+
+
+class MissingClientIdError(InvalidRequestFatalError):
+ description = 'Missing client_id parameter.'
+
+
+class InvalidRequestError(OAuth2Error):
+ """
+ The request is missing a required parameter, includes an invalid
+ parameter value, includes a parameter more than once, or is
+ otherwise malformed.
+ """
+ error = 'invalid_request'
+
+
+class MissingResponseTypeError(InvalidRequestError):
+ description = 'Missing response_type parameter.'
+
+
+class MissingCodeChallengeError(InvalidRequestError):
+ """
+ If the server requires Proof Key for Code Exchange (PKCE) by OAuth
+ public clients and the client does not send the "code_challenge" in
+ the request, the authorization endpoint MUST return the authorization
+ error response with the "error" value set to "invalid_request". The
+ "error_description" or the response of "error_uri" SHOULD explain the
+ nature of error, e.g., code challenge required.
+ """
+ description = 'Code challenge required.'
+
+
+class MissingCodeVerifierError(InvalidRequestError):
+ """
+ The request to the token endpoint, when PKCE is enabled, has
+ the parameter `code_verifier` REQUIRED.
+ """
+ description = 'Code verifier required.'
+
+
+class AccessDeniedError(OAuth2Error):
+ """
+ The resource owner or authorization server denied the request.
+ """
+ error = 'access_denied'
+
+
+class UnsupportedResponseTypeError(OAuth2Error):
+ """
+ The authorization server does not support obtaining an authorization
+ code using this method.
+ """
+ error = 'unsupported_response_type'
+
+
+class UnsupportedCodeChallengeMethodError(InvalidRequestError):
+ """
+ If the server supporting PKCE does not support the requested
+ transformation, the authorization endpoint MUST return the
+ authorization error response with "error" value set to
+ "invalid_request". The "error_description" or the response of
+ "error_uri" SHOULD explain the nature of error, e.g., transform
+ algorithm not supported.
+ """
+ description = 'Transform algorithm not supported.'
+
+
+class InvalidScopeError(OAuth2Error):
+ """
+ The requested scope is invalid, unknown, or malformed, or
+ exceeds the scope granted by the resource owner.
+
+ https://tools.ietf.org/html/rfc6749#section-5.2
+ """
+ error = 'invalid_scope'
+
+
+class ServerError(OAuth2Error):
+ """
+ The authorization server encountered an unexpected condition that
+ prevented it from fulfilling the request. (This error code is needed
+ because a 500 Internal Server Error HTTP status code cannot be returned
+ to the client via a HTTP redirect.)
+ """
+ error = 'server_error'
+
+
+class TemporarilyUnavailableError(OAuth2Error):
+ """
+ The authorization server is currently unable to handle the request
+ due to a temporary overloading or maintenance of the server.
+ (This error code is needed because a 503 Service Unavailable HTTP
+ status code cannot be returned to the client via a HTTP redirect.)
+ """
+ error = 'temporarily_unavailable'
+
+
+class InvalidClientError(FatalClientError):
+ """
+ Client authentication failed (e.g. unknown client, no client
+ authentication included, or unsupported authentication method).
+ The authorization server MAY return an HTTP 401 (Unauthorized) status
+ code to indicate which HTTP authentication schemes are supported.
+ If the client attempted to authenticate via the "Authorization" request
+ header field, the authorization server MUST respond with an
+ HTTP 401 (Unauthorized) status code, and include the "WWW-Authenticate"
+ response header field matching the authentication scheme used by the
+ client.
+ """
+ error = 'invalid_client'
+ status_code = 401
+
+
+class InvalidGrantError(OAuth2Error):
+ """
+ The provided authorization grant (e.g. authorization code, resource
+ owner credentials) or refresh token is invalid, expired, revoked, does
+ not match the redirection URI used in the authorization request, or was
+ issued to another client.
+
+ https://tools.ietf.org/html/rfc6749#section-5.2
+ """
+ error = 'invalid_grant'
+ status_code = 400
+
+
+class UnauthorizedClientError(OAuth2Error):
+ """
+ The authenticated client is not authorized to use this authorization
+ grant type.
+ """
+ error = 'unauthorized_client'
+
+
+class UnsupportedGrantTypeError(OAuth2Error):
+ """
+ The authorization grant type is not supported by the authorization
+ server.
+ """
+ error = 'unsupported_grant_type'
+
+
+class UnsupportedTokenTypeError(OAuth2Error):
+ """
+ The authorization server does not support the hint of the
+ presented token type. I.e. the client tried to revoke an access token
+ on a server not supporting this feature.
+ """
+ error = 'unsupported_token_type'
+
+
+class InvalidTokenError(OAuth2Error):
+ """
+ The access token provided is expired, revoked, malformed, or
+ invalid for other reasons. The resource SHOULD respond with
+ the HTTP 401 (Unauthorized) status code. The client MAY
+ request a new access token and retry the protected resource
+ request.
+ """
+ error = 'invalid_token'
+ status_code = 401
+ description = ("The access token provided is expired, revoked, malformed, "
+ "or invalid for other reasons.")
+
+
+class InsufficientScopeError(OAuth2Error):
+ """
+ The request requires higher privileges than provided by the
+ access token. The resource server SHOULD respond with the HTTP
+ 403 (Forbidden) status code and MAY include the "scope"
+ attribute with the scope necessary to access the protected
+ resource.
+ """
+ error = 'insufficient_scope'
+ status_code = 403
+ description = ("The request requires higher privileges than provided by "
+ "the access token.")
+
+
+class ConsentRequired(OAuth2Error):
+ """
+ The Authorization Server requires End-User consent.
+
+ This error MAY be returned when the prompt parameter value in the
+ Authentication Request is none, but the Authentication Request cannot be
+ completed without displaying a user interface for End-User consent.
+ """
+ error = 'consent_required'
+
+
+class LoginRequired(OAuth2Error):
+ """
+ The Authorization Server requires End-User authentication.
+
+ This error MAY be returned when the prompt parameter value in the
+ Authentication Request is none, but the Authentication Request cannot be
+ completed without displaying a user interface for End-User authentication.
+ """
+ error = 'login_required'
+
+
+class CustomOAuth2Error(OAuth2Error):
+ """
+ This error is a placeholder for all custom errors not described by the RFC.
+ Some of the popular OAuth2 providers are using custom errors.
+ """
+ def __init__(self, error, *args, **kwargs):
+ self.error = error
+ super().__init__(*args, **kwargs)
+
+
+def raise_from_error(error, params=None):
+ import inspect
+ import sys
+ kwargs = {
+ 'description': params.get('error_description'),
+ 'uri': params.get('error_uri'),
+ 'state': params.get('state')
+ }
+ for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
+ if cls.error == error:
+ raise cls(**kwargs)
+ raise CustomOAuth2Error(error=error, **kwargs)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/__init__.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/__init__.py
new file mode 100644
index 0000000000..eb88cfc2e9
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/__init__.py
@@ -0,0 +1,11 @@
+"""
+oauthlib.oauth2.rfc6749.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+from .authorization_code import AuthorizationCodeGrant
+from .client_credentials import ClientCredentialsGrant
+from .implicit import ImplicitGrant
+from .refresh_token import RefreshTokenGrant
+from .resource_owner_password_credentials import (
+ ResourceOwnerPasswordCredentialsGrant,
+)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py
new file mode 100644
index 0000000000..858855a174
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py
@@ -0,0 +1,548 @@
+"""
+oauthlib.oauth2.rfc6749.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import base64
+import hashlib
+import json
+import logging
+
+from oauthlib import common
+
+from .. import errors
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+def code_challenge_method_s256(verifier, challenge):
+ """
+ If the "code_challenge_method" from `Section 4.3`_ was "S256", the
+ received "code_verifier" is hashed by SHA-256, base64url-encoded, and
+ then compared to the "code_challenge", i.e.:
+
+ BASE64URL-ENCODE(SHA256(ASCII(code_verifier))) == code_challenge
+
+ How to implement a base64url-encoding
+ function without padding, based upon the standard base64-encoding
+ function that uses padding.
+
+ To be concrete, example C# code implementing these functions is shown
+ below. Similar code could be used in other languages.
+
+ static string base64urlencode(byte [] arg)
+ {
+ string s = Convert.ToBase64String(arg); // Regular base64 encoder
+ s = s.Split('=')[0]; // Remove any trailing '='s
+ s = s.Replace('+', '-'); // 62nd char of encoding
+ s = s.Replace('/', '_'); // 63rd char of encoding
+ return s;
+ }
+
+ In python urlsafe_b64encode is already replacing '+' and '/', but preserve
+ the trailing '='. So we have to remove it.
+
+ .. _`Section 4.3`: https://tools.ietf.org/html/rfc7636#section-4.3
+ """
+ return base64.urlsafe_b64encode(
+ hashlib.sha256(verifier.encode()).digest()
+ ).decode().rstrip('=') == challenge
+
+
+def code_challenge_method_plain(verifier, challenge):
+ """
+ If the "code_challenge_method" from `Section 4.3`_ was "plain", they are
+ compared directly, i.e.:
+
+ code_verifier == code_challenge.
+
+ .. _`Section 4.3`: https://tools.ietf.org/html/rfc7636#section-4.3
+ """
+ return verifier == challenge
+
+
+class AuthorizationCodeGrant(GrantTypeBase):
+
+ """`Authorization Code Grant`_
+
+ The authorization code grant type is used to obtain both access
+ tokens and refresh tokens and is optimized for confidential clients.
+ Since this is a redirection-based flow, the client must be capable of
+ interacting with the resource owner's user-agent (typically a web
+ browser) and capable of receiving incoming requests (via redirection)
+ from the authorization server::
+
+ +----------+
+ | Resource |
+ | Owner |
+ | |
+ +----------+
+ ^
+ |
+ (B)
+ +----|-----+ Client Identifier +---------------+
+ | -+----(A)-- & Redirection URI ---->| |
+ | User- | | Authorization |
+ | Agent -+----(B)-- User authenticates --->| Server |
+ | | | |
+ | -+----(C)-- Authorization Code ---<| |
+ +-|----|---+ +---------------+
+ | | ^ v
+ (A) (C) | |
+ | | | |
+ ^ v | |
+ +---------+ | |
+ | |>---(D)-- Authorization Code ---------' |
+ | Client | & Redirection URI |
+ | | |
+ | |<---(E)----- Access Token -------------------'
+ +---------+ (w/ Optional Refresh Token)
+
+ Note: The lines illustrating steps (A), (B), and (C) are broken into
+ two parts as they pass through the user-agent.
+
+ Figure 3: Authorization Code Flow
+
+ The flow illustrated in Figure 3 includes the following steps:
+
+ (A) The client initiates the flow by directing the resource owner's
+ user-agent to the authorization endpoint. The client includes
+ its client identifier, requested scope, local state, and a
+ redirection URI to which the authorization server will send the
+ user-agent back once access is granted (or denied).
+
+ (B) The authorization server authenticates the resource owner (via
+ the user-agent) and establishes whether the resource owner
+ grants or denies the client's access request.
+
+ (C) Assuming the resource owner grants access, the authorization
+ server redirects the user-agent back to the client using the
+ redirection URI provided earlier (in the request or during
+ client registration). The redirection URI includes an
+ authorization code and any local state provided by the client
+ earlier.
+
+ (D) The client requests an access token from the authorization
+ server's token endpoint by including the authorization code
+ received in the previous step. When making the request, the
+ client authenticates with the authorization server. The client
+ includes the redirection URI used to obtain the authorization
+ code for verification.
+
+ (E) The authorization server authenticates the client, validates the
+ authorization code, and ensures that the redirection URI
+ received matches the URI used to redirect the client in
+ step (C). If valid, the authorization server responds back with
+ an access token and, optionally, a refresh token.
+
+ OAuth 2.0 public clients utilizing the Authorization Code Grant are
+ susceptible to the authorization code interception attack.
+
+ A technique to mitigate against the threat through the use of Proof Key for Code
+ Exchange (PKCE, pronounced "pixy") is implemented in the current oauthlib
+ implementation.
+
+ .. _`Authorization Code Grant`: https://tools.ietf.org/html/rfc6749#section-4.1
+ .. _`PKCE`: https://tools.ietf.org/html/rfc7636
+ """
+
+ default_response_mode = 'query'
+ response_types = ['code']
+
+ # This dict below is private because as RFC mention it:
+ # "S256" is Mandatory To Implement (MTI) on the server.
+ #
+ _code_challenge_methods = {
+ 'plain': code_challenge_method_plain,
+ 'S256': code_challenge_method_s256
+ }
+
+ def create_authorization_code(self, request):
+ """
+ Generates an authorization grant represented as a dictionary.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ grant = {'code': common.generate_token()}
+ if hasattr(request, 'state') and request.state:
+ grant['state'] = request.state
+ log.debug('Created authorization code grant %r for request %r.',
+ grant, request)
+ return grant
+
+ def create_authorization_response(self, request, token_handler):
+ """
+ The client constructs the request URI by adding the following
+ parameters to the query component of the authorization endpoint URI
+ using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
+
+ response_type
+ REQUIRED. Value MUST be set to "code" for standard OAuth2
+ authorization flow. For OpenID Connect it must be one of
+ "code token", "code id_token", or "code token id_token" - we
+ essentially test that "code" appears in the response_type.
+ client_id
+ REQUIRED. The client identifier as described in `Section 2.2`_.
+ redirect_uri
+ OPTIONAL. As described in `Section 3.1.2`_.
+ scope
+ OPTIONAL. The scope of the access request as described by
+ `Section 3.3`_.
+ state
+ RECOMMENDED. An opaque value used by the client to maintain
+ state between the request and callback. The authorization
+ server includes this value when redirecting the user-agent back
+ to the client. The parameter SHOULD be used for preventing
+ cross-site request forgery as described in `Section 10.12`_.
+
+ The client directs the resource owner to the constructed URI using an
+ HTTP redirection response, or by other means available to it via the
+ user-agent.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+ :returns: headers, body, status
+ :raises: FatalClientError on invalid redirect URI or client id.
+
+ A few examples::
+
+ >>> from your_validator import your_validator
+ >>> request = Request('https://example.com/authorize?client_id=valid'
+ ... '&redirect_uri=http%3A%2F%2Fclient.com%2F')
+ >>> from oauthlib.common import Request
+ >>> from oauthlib.oauth2 import AuthorizationCodeGrant, BearerToken
+ >>> token = BearerToken(your_validator)
+ >>> grant = AuthorizationCodeGrant(your_validator)
+ >>> request.scopes = ['authorized', 'in', 'some', 'form']
+ >>> grant.create_authorization_response(request, token)
+ (u'http://client.com/?error=invalid_request&error_description=Missing+response_type+parameter.', None, None, 400)
+ >>> request = Request('https://example.com/authorize?client_id=valid'
+ ... '&redirect_uri=http%3A%2F%2Fclient.com%2F'
+ ... '&response_type=code')
+ >>> request.scopes = ['authorized', 'in', 'some', 'form']
+ >>> grant.create_authorization_response(request, token)
+ (u'http://client.com/?code=u3F05aEObJuP2k7DordviIgW5wl52N', None, None, 200)
+ >>> # If the client id or redirect uri fails validation
+ >>> grant.create_authorization_response(request, token)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ File "oauthlib/oauth2/rfc6749/grant_types.py", line 515, in create_authorization_response
+ >>> grant.create_authorization_response(request, token)
+ File "oauthlib/oauth2/rfc6749/grant_types.py", line 591, in validate_authorization_request
+ oauthlib.oauth2.rfc6749.errors.InvalidClientIdError
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ .. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
+ .. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
+ """
+ try:
+ self.validate_authorization_request(request)
+ log.debug('Pre resource owner authorization validation ok for %r.',
+ request)
+
+ # If the request fails due to a missing, invalid, or mismatching
+ # redirection URI, or if the client identifier is missing or invalid,
+ # the authorization server SHOULD inform the resource owner of the
+ # error and MUST NOT automatically redirect the user-agent to the
+ # invalid redirection URI.
+ except errors.FatalClientError as e:
+ log.debug('Fatal client error during validation of %r. %r.',
+ request, e)
+ raise
+
+ # If the resource owner denies the access request or if the request
+ # fails for reasons other than a missing or invalid redirection URI,
+ # the authorization server informs the client by adding the following
+ # parameters to the query component of the redirection URI using the
+ # "application/x-www-form-urlencoded" format, per Appendix B:
+ # https://tools.ietf.org/html/rfc6749#appendix-B
+ except errors.OAuth2Error as e:
+ log.debug('Client error during validation of %r. %r.', request, e)
+ request.redirect_uri = request.redirect_uri or self.error_uri
+ redirect_uri = common.add_params_to_uri(
+ request.redirect_uri, e.twotuples,
+ fragment=request.response_mode == "fragment")
+ return {'Location': redirect_uri}, None, 302
+
+ grant = self.create_authorization_code(request)
+ for modifier in self._code_modifiers:
+ grant = modifier(grant, token_handler, request)
+ if 'access_token' in grant:
+ self.request_validator.save_token(grant, request)
+ log.debug('Saving grant %r for %r.', grant, request)
+ self.request_validator.save_authorization_code(
+ request.client_id, grant, request)
+ return self.prepare_authorization_response(
+ request, grant, {}, None, 302)
+
+ def create_token_response(self, request, token_handler):
+ """Validate the authorization code.
+
+ The client MUST NOT use the authorization code more than once. If an
+ authorization code is used more than once, the authorization server
+ MUST deny the request and SHOULD revoke (when possible) all tokens
+ previously issued based on that authorization code. The authorization
+ code is bound to the client identifier and redirection URI.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+
+ """
+ headers = self._get_default_headers()
+ try:
+ self.validate_token_request(request)
+ log.debug('Token request validation ok for %r.', request)
+ except errors.OAuth2Error as e:
+ log.debug('Client error during validation of %r. %r.', request, e)
+ headers.update(e.headers)
+ return headers, e.json, e.status_code
+
+ token = token_handler.create_token(request, refresh_token=self.refresh_token)
+
+ for modifier in self._token_modifiers:
+ token = modifier(token, token_handler, request)
+
+ self.request_validator.save_token(token, request)
+ self.request_validator.invalidate_authorization_code(
+ request.client_id, request.code, request)
+ headers.update(self._create_cors_headers(request))
+ return headers, json.dumps(token), 200
+
+ def validate_authorization_request(self, request):
+ """Check the authorization request for normal and fatal errors.
+
+ A normal error could be a missing response_type parameter or the client
+ attempting to access scope it is not allowed to ask authorization for.
+ Normal errors can safely be included in the redirection URI and
+ sent back to the client.
+
+ Fatal errors occur when the client_id or redirect_uri is invalid or
+ missing. These must be caught by the provider and handled, how this
+ is done is outside of the scope of OAuthLib but showing an error
+ page describing the issue is a good idea.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+
+ # First check for fatal errors
+
+ # If the request fails due to a missing, invalid, or mismatching
+ # redirection URI, or if the client identifier is missing or invalid,
+ # the authorization server SHOULD inform the resource owner of the
+ # error and MUST NOT automatically redirect the user-agent to the
+ # invalid redirection URI.
+
+ # First check duplicate parameters
+ for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
+ try:
+ duplicate_params = request.duplicate_params
+ except ValueError:
+ raise errors.InvalidRequestFatalError(description='Unable to parse query string', request=request)
+ if param in duplicate_params:
+ raise errors.InvalidRequestFatalError(description='Duplicate %s parameter.' % param, request=request)
+
+ # REQUIRED. The client identifier as described in Section 2.2.
+ # https://tools.ietf.org/html/rfc6749#section-2.2
+ if not request.client_id:
+ raise errors.MissingClientIdError(request=request)
+
+ if not self.request_validator.validate_client_id(request.client_id, request):
+ raise errors.InvalidClientIdError(request=request)
+
+ # OPTIONAL. As described in Section 3.1.2.
+ # https://tools.ietf.org/html/rfc6749#section-3.1.2
+ log.debug('Validating redirection uri %s for client %s.',
+ request.redirect_uri, request.client_id)
+
+ # OPTIONAL. As described in Section 3.1.2.
+ # https://tools.ietf.org/html/rfc6749#section-3.1.2
+ self._handle_redirects(request)
+
+ # Then check for normal errors.
+
+ # If the resource owner denies the access request or if the request
+ # fails for reasons other than a missing or invalid redirection URI,
+ # the authorization server informs the client by adding the following
+ # parameters to the query component of the redirection URI using the
+ # "application/x-www-form-urlencoded" format, per Appendix B.
+ # https://tools.ietf.org/html/rfc6749#appendix-B
+
+ # Note that the correct parameters to be added are automatically
+ # populated through the use of specific exceptions.
+
+ request_info = {}
+ for validator in self.custom_validators.pre_auth:
+ request_info.update(validator(request))
+
+ # REQUIRED.
+ if request.response_type is None:
+ raise errors.MissingResponseTypeError(request=request)
+ # Value MUST be set to "code" or one of the OpenID authorization code including
+ # response_types "code token", "code id_token", "code token id_token"
+ elif not 'code' in request.response_type and request.response_type != 'none':
+ raise errors.UnsupportedResponseTypeError(request=request)
+
+ if not self.request_validator.validate_response_type(request.client_id,
+ request.response_type,
+ request.client, request):
+
+ log.debug('Client %s is not authorized to use response_type %s.',
+ request.client_id, request.response_type)
+ raise errors.UnauthorizedClientError(request=request)
+
+ # OPTIONAL. Validate PKCE request or reply with "error"/"invalid_request"
+ # https://tools.ietf.org/html/rfc6749#section-4.4.1
+ if self.request_validator.is_pkce_required(request.client_id, request) is True:
+ if request.code_challenge is None:
+ raise errors.MissingCodeChallengeError(request=request)
+
+ if request.code_challenge is not None:
+ request_info["code_challenge"] = request.code_challenge
+
+ # OPTIONAL, defaults to "plain" if not present in the request.
+ if request.code_challenge_method is None:
+ request.code_challenge_method = "plain"
+
+ if request.code_challenge_method not in self._code_challenge_methods:
+ raise errors.UnsupportedCodeChallengeMethodError(request=request)
+ request_info["code_challenge_method"] = request.code_challenge_method
+
+ # OPTIONAL. The scope of the access request as described by Section 3.3
+ # https://tools.ietf.org/html/rfc6749#section-3.3
+ self.validate_scopes(request)
+
+ request_info.update({
+ 'client_id': request.client_id,
+ 'redirect_uri': request.redirect_uri,
+ 'response_type': request.response_type,
+ 'state': request.state,
+ 'request': request
+ })
+
+ for validator in self.custom_validators.post_auth:
+ request_info.update(validator(request))
+
+ return request.scopes, request_info
+
+ def validate_token_request(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ # REQUIRED. Value MUST be set to "authorization_code".
+ if request.grant_type not in ('authorization_code', 'openid'):
+ raise errors.UnsupportedGrantTypeError(request=request)
+
+ for validator in self.custom_validators.pre_token:
+ validator(request)
+
+ if request.code is None:
+ raise errors.InvalidRequestError(
+ description='Missing code parameter.', request=request)
+
+ for param in ('client_id', 'grant_type', 'redirect_uri'):
+ if param in request.duplicate_params:
+ raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
+ request=request)
+
+ if self.request_validator.client_authentication_required(request):
+ # If the client type is confidential or the client was issued client
+ # credentials (or assigned other authentication requirements), the
+ # client MUST authenticate with the authorization server as described
+ # in Section 3.2.1.
+ # https://tools.ietf.org/html/rfc6749#section-3.2.1
+ if not self.request_validator.authenticate_client(request):
+ log.debug('Client authentication failed, %r.', request)
+ raise errors.InvalidClientError(request=request)
+ elif not self.request_validator.authenticate_client_id(request.client_id, request):
+ # REQUIRED, if the client is not authenticating with the
+ # authorization server as described in Section 3.2.1.
+ # https://tools.ietf.org/html/rfc6749#section-3.2.1
+ log.debug('Client authentication failed, %r.', request)
+ raise errors.InvalidClientError(request=request)
+
+ if not hasattr(request.client, 'client_id'):
+ raise NotImplementedError('Authenticate client must set the '
+ 'request.client.client_id attribute '
+ 'in authenticate_client.')
+
+ request.client_id = request.client_id or request.client.client_id
+
+ # Ensure client is authorized use of this grant type
+ self.validate_grant_type(request)
+
+ # REQUIRED. The authorization code received from the
+ # authorization server.
+ if not self.request_validator.validate_code(request.client_id,
+ request.code, request.client, request):
+ log.debug('Client, %r (%r), is not allowed access to scopes %r.',
+ request.client_id, request.client, request.scopes)
+ raise errors.InvalidGrantError(request=request)
+
+ # OPTIONAL. Validate PKCE code_verifier
+ challenge = self.request_validator.get_code_challenge(request.code, request)
+
+ if challenge is not None:
+ if request.code_verifier is None:
+ raise errors.MissingCodeVerifierError(request=request)
+
+ challenge_method = self.request_validator.get_code_challenge_method(request.code, request)
+ if challenge_method is None:
+ raise errors.InvalidGrantError(request=request, description="Challenge method not found")
+
+ if challenge_method not in self._code_challenge_methods:
+ raise errors.ServerError(
+ description="code_challenge_method {} is not supported.".format(challenge_method),
+ request=request
+ )
+
+ if not self.validate_code_challenge(challenge,
+ challenge_method,
+ request.code_verifier):
+ log.debug('request provided a invalid code_verifier.')
+ raise errors.InvalidGrantError(request=request)
+ elif self.request_validator.is_pkce_required(request.client_id, request) is True:
+ if request.code_verifier is None:
+ raise errors.MissingCodeVerifierError(request=request)
+ raise errors.InvalidGrantError(request=request, description="Challenge not found")
+
+ for attr in ('user', 'scopes'):
+ if getattr(request, attr, None) is None:
+ log.debug('request.%s was not set on code validation.', attr)
+
+ # REQUIRED, if the "redirect_uri" parameter was included in the
+ # authorization request as described in Section 4.1.1, and their
+ # values MUST be identical.
+ if request.redirect_uri is None:
+ request.using_default_redirect_uri = True
+ request.redirect_uri = self.request_validator.get_default_redirect_uri(
+ request.client_id, request)
+ log.debug('Using default redirect_uri %s.', request.redirect_uri)
+ if not request.redirect_uri:
+ raise errors.MissingRedirectURIError(request=request)
+ else:
+ request.using_default_redirect_uri = False
+ log.debug('Using provided redirect_uri %s', request.redirect_uri)
+
+ if not self.request_validator.confirm_redirect_uri(request.client_id, request.code,
+ request.redirect_uri, request.client,
+ request):
+ log.debug('Redirect_uri (%r) invalid for client %r (%r).',
+ request.redirect_uri, request.client_id, request.client)
+ raise errors.MismatchingRedirectURIError(request=request)
+
+ for validator in self.custom_validators.post_token:
+ validator(request)
+
+ def validate_code_challenge(self, challenge, challenge_method, verifier):
+ if challenge_method in self._code_challenge_methods:
+ return self._code_challenge_methods[challenge_method](verifier, challenge)
+ raise NotImplementedError('Unknown challenge_method %s' % challenge_method)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/base.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/base.py
new file mode 100644
index 0000000000..ca343a1193
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/base.py
@@ -0,0 +1,268 @@
+"""
+oauthlib.oauth2.rfc6749.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+from itertools import chain
+
+from oauthlib.common import add_params_to_uri
+from oauthlib.oauth2.rfc6749 import errors, utils
+from oauthlib.uri_validate import is_absolute_uri
+
+from ..request_validator import RequestValidator
+from ..utils import is_secure_transport
+
+log = logging.getLogger(__name__)
+
+
+class ValidatorsContainer:
+ """
+ Container object for holding custom validator callables to be invoked
+ as part of the grant type `validate_authorization_request()` or
+ `validate_authorization_request()` methods on the various grant types.
+
+ Authorization validators must be callables that take a request object and
+ return a dict, which may contain items to be added to the `request_info`
+ returned from the grant_type after validation.
+
+ Token validators must be callables that take a request object and
+ return None.
+
+ Both authorization validators and token validators may raise OAuth2
+ exceptions if validation conditions fail.
+
+ Authorization validators added to `pre_auth` will be run BEFORE
+ the standard validations (but after the critical ones that raise
+ fatal errors) as part of `validate_authorization_request()`
+
+ Authorization validators added to `post_auth` will be run AFTER
+ the standard validations as part of `validate_authorization_request()`
+
+ Token validators added to `pre_token` will be run BEFORE
+ the standard validations as part of `validate_token_request()`
+
+ Token validators added to `post_token` will be run AFTER
+ the standard validations as part of `validate_token_request()`
+
+ For example:
+
+ >>> def my_auth_validator(request):
+ ... return {'myval': True}
+ >>> auth_code_grant = AuthorizationCodeGrant(request_validator)
+ >>> auth_code_grant.custom_validators.pre_auth.append(my_auth_validator)
+ >>> def my_token_validator(request):
+ ... if not request.everything_okay:
+ ... raise errors.OAuth2Error("uh-oh")
+ >>> auth_code_grant.custom_validators.post_token.append(my_token_validator)
+ """
+
+ def __init__(self, post_auth, post_token,
+ pre_auth, pre_token):
+ self.pre_auth = pre_auth
+ self.post_auth = post_auth
+ self.pre_token = pre_token
+ self.post_token = post_token
+
+ @property
+ def all_pre(self):
+ return chain(self.pre_auth, self.pre_token)
+
+ @property
+ def all_post(self):
+ return chain(self.post_auth, self.post_token)
+
+
+class GrantTypeBase:
+ error_uri = None
+ request_validator = None
+ default_response_mode = 'fragment'
+ refresh_token = True
+ response_types = ['code']
+
+ def __init__(self, request_validator=None, **kwargs):
+ self.request_validator = request_validator or RequestValidator()
+
+ # Transforms class variables into instance variables:
+ self.response_types = self.response_types
+ self.refresh_token = self.refresh_token
+ self._setup_custom_validators(kwargs)
+ self._code_modifiers = []
+ self._token_modifiers = []
+
+ for kw, val in kwargs.items():
+ setattr(self, kw, val)
+
+ def _setup_custom_validators(self, kwargs):
+ post_auth = kwargs.get('post_auth', [])
+ post_token = kwargs.get('post_token', [])
+ pre_auth = kwargs.get('pre_auth', [])
+ pre_token = kwargs.get('pre_token', [])
+ if not hasattr(self, 'validate_authorization_request'):
+ if post_auth or pre_auth:
+ msg = ("{} does not support authorization validators. Use "
+ "token validators instead.").format(self.__class__.__name__)
+ raise ValueError(msg)
+ # Using tuples here because they can't be appended to:
+ post_auth, pre_auth = (), ()
+ self.custom_validators = ValidatorsContainer(post_auth, post_token,
+ pre_auth, pre_token)
+
+ def register_response_type(self, response_type):
+ self.response_types.append(response_type)
+
+ def register_code_modifier(self, modifier):
+ self._code_modifiers.append(modifier)
+
+ def register_token_modifier(self, modifier):
+ self._token_modifiers.append(modifier)
+
+ def create_authorization_response(self, request, token_handler):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def create_token_response(self, request, token_handler):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def add_token(self, token, token_handler, request):
+ """
+ :param token:
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ # Only add a hybrid access token on auth step if asked for
+ if not request.response_type in ["token", "code token", "id_token token", "code id_token token"]:
+ return token
+
+ token.update(token_handler.create_token(request, refresh_token=False))
+ return token
+
+ def validate_grant_type(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ client_id = getattr(request, 'client_id', None)
+ if not self.request_validator.validate_grant_type(client_id,
+ request.grant_type, request.client, request):
+ log.debug('Unauthorized from %r (%r) access to grant type %s.',
+ request.client_id, request.client, request.grant_type)
+ raise errors.UnauthorizedClientError(request=request)
+
+ def validate_scopes(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ if not request.scopes:
+ request.scopes = utils.scope_to_list(request.scope) or utils.scope_to_list(
+ self.request_validator.get_default_scopes(request.client_id, request))
+ log.debug('Validating access to scopes %r for client %r (%r).',
+ request.scopes, request.client_id, request.client)
+ if not self.request_validator.validate_scopes(request.client_id,
+ request.scopes, request.client, request):
+ raise errors.InvalidScopeError(request=request)
+
+ def prepare_authorization_response(self, request, token, headers, body, status):
+ """Place token according to response mode.
+
+ Base classes can define a default response mode for their authorization
+ response by overriding the static `default_response_mode` member.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token:
+ :param headers:
+ :param body:
+ :param status:
+ """
+ request.response_mode = request.response_mode or self.default_response_mode
+
+ if request.response_mode not in ('query', 'fragment'):
+ log.debug('Overriding invalid response mode %s with %s',
+ request.response_mode, self.default_response_mode)
+ request.response_mode = self.default_response_mode
+
+ token_items = token.items()
+
+ if request.response_type == 'none':
+ state = token.get('state', None)
+ if state:
+ token_items = [('state', state)]
+ else:
+ token_items = []
+
+ if request.response_mode == 'query':
+ headers['Location'] = add_params_to_uri(
+ request.redirect_uri, token_items, fragment=False)
+ return headers, body, status
+
+ if request.response_mode == 'fragment':
+ headers['Location'] = add_params_to_uri(
+ request.redirect_uri, token_items, fragment=True)
+ return headers, body, status
+
+ raise NotImplementedError(
+ 'Subclasses must set a valid default_response_mode')
+
+ def _get_default_headers(self):
+ """Create default headers for grant responses."""
+ return {
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-store',
+ 'Pragma': 'no-cache',
+ }
+
+ def _handle_redirects(self, request):
+ if request.redirect_uri is not None:
+ request.using_default_redirect_uri = False
+ log.debug('Using provided redirect_uri %s', request.redirect_uri)
+ if not is_absolute_uri(request.redirect_uri):
+ raise errors.InvalidRedirectURIError(request=request)
+
+ # The authorization server MUST verify that the redirection URI
+ # to which it will redirect the access token matches a
+ # redirection URI registered by the client as described in
+ # Section 3.1.2.
+ # https://tools.ietf.org/html/rfc6749#section-3.1.2
+ if not self.request_validator.validate_redirect_uri(
+ request.client_id, request.redirect_uri, request):
+ raise errors.MismatchingRedirectURIError(request=request)
+ else:
+ request.redirect_uri = self.request_validator.get_default_redirect_uri(
+ request.client_id, request)
+ request.using_default_redirect_uri = True
+ log.debug('Using default redirect_uri %s.', request.redirect_uri)
+ if not request.redirect_uri:
+ raise errors.MissingRedirectURIError(request=request)
+ if not is_absolute_uri(request.redirect_uri):
+ raise errors.InvalidRedirectURIError(request=request)
+
+ def _create_cors_headers(self, request):
+ """If CORS is allowed, create the appropriate headers."""
+ if 'origin' not in request.headers:
+ return {}
+
+ origin = request.headers['origin']
+ if not is_secure_transport(origin):
+ log.debug('Origin "%s" is not HTTPS, CORS not allowed.', origin)
+ return {}
+ elif not self.request_validator.is_origin_allowed(
+ request.client_id, origin, request):
+ log.debug('Invalid origin "%s", CORS not allowed.', origin)
+ return {}
+ else:
+ log.debug('Valid origin "%s", injecting CORS headers.', origin)
+ return {'Access-Control-Allow-Origin': origin}
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/client_credentials.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/client_credentials.py
new file mode 100644
index 0000000000..e7b4618977
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/client_credentials.py
@@ -0,0 +1,123 @@
+"""
+oauthlib.oauth2.rfc6749.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import json
+import logging
+
+from .. import errors
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class ClientCredentialsGrant(GrantTypeBase):
+
+ """`Client Credentials Grant`_
+
+ The client can request an access token using only its client
+ credentials (or other supported means of authentication) when the
+ client is requesting access to the protected resources under its
+ control, or those of another resource owner that have been previously
+ arranged with the authorization server (the method of which is beyond
+ the scope of this specification).
+
+ The client credentials grant type MUST only be used by confidential
+ clients::
+
+ +---------+ +---------------+
+ : : : :
+ : :>-- A - Client Authentication --->: Authorization :
+ : Client : : Server :
+ : :<-- B ---- Access Token ---------<: :
+ : : : :
+ +---------+ +---------------+
+
+ Figure 6: Client Credentials Flow
+
+ The flow illustrated in Figure 6 includes the following steps:
+
+ (A) The client authenticates with the authorization server and
+ requests an access token from the token endpoint.
+
+ (B) The authorization server authenticates the client, and if valid,
+ issues an access token.
+
+ .. _`Client Credentials Grant`: https://tools.ietf.org/html/rfc6749#section-4.4
+ """
+
+ def create_token_response(self, request, token_handler):
+ """Return token or error in JSON format.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+
+ If the access token request is valid and authorized, the
+ authorization server issues an access token as described in
+ `Section 5.1`_. A refresh token SHOULD NOT be included. If the request
+ failed client authentication or is invalid, the authorization server
+ returns an error response as described in `Section 5.2`_.
+
+ .. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
+ .. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
+ """
+ headers = self._get_default_headers()
+ try:
+ log.debug('Validating access token request, %r.', request)
+ self.validate_token_request(request)
+ except errors.OAuth2Error as e:
+ log.debug('Client error in token request. %s.', e)
+ headers.update(e.headers)
+ return headers, e.json, e.status_code
+
+ token = token_handler.create_token(request, refresh_token=False)
+
+ for modifier in self._token_modifiers:
+ token = modifier(token)
+
+ self.request_validator.save_token(token, request)
+
+ log.debug('Issuing token to client id %r (%r), %r.',
+ request.client_id, request.client, token)
+ return headers, json.dumps(token), 200
+
+ def validate_token_request(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ for validator in self.custom_validators.pre_token:
+ validator(request)
+
+ if not getattr(request, 'grant_type', None):
+ raise errors.InvalidRequestError('Request is missing grant type.',
+ request=request)
+
+ if not request.grant_type == 'client_credentials':
+ raise errors.UnsupportedGrantTypeError(request=request)
+
+ for param in ('grant_type', 'scope'):
+ if param in request.duplicate_params:
+ raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
+ request=request)
+
+ log.debug('Authenticating client, %r.', request)
+ if not self.request_validator.authenticate_client(request):
+ log.debug('Client authentication failed, %r.', request)
+ raise errors.InvalidClientError(request=request)
+ else:
+ if not hasattr(request.client, 'client_id'):
+ raise NotImplementedError('Authenticate client must set the '
+ 'request.client.client_id attribute '
+ 'in authenticate_client.')
+ # Ensure client is authorized use of this grant type
+ self.validate_grant_type(request)
+
+ request.client_id = request.client_id or request.client.client_id
+ log.debug('Authorizing access to client %r.', request.client_id)
+ self.validate_scopes(request)
+
+ for validator in self.custom_validators.post_token:
+ validator(request)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/implicit.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/implicit.py
new file mode 100644
index 0000000000..6110b6f337
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/implicit.py
@@ -0,0 +1,376 @@
+"""
+oauthlib.oauth2.rfc6749.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+
+from oauthlib import common
+
+from .. import errors
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class ImplicitGrant(GrantTypeBase):
+
+ """`Implicit Grant`_
+
+ The implicit grant type is used to obtain access tokens (it does not
+ support the issuance of refresh tokens) and is optimized for public
+ clients known to operate a particular redirection URI. These clients
+ are typically implemented in a browser using a scripting language
+ such as JavaScript.
+
+ Unlike the authorization code grant type, in which the client makes
+ separate requests for authorization and for an access token, the
+ client receives the access token as the result of the authorization
+ request.
+
+ The implicit grant type does not include client authentication, and
+ relies on the presence of the resource owner and the registration of
+ the redirection URI. Because the access token is encoded into the
+ redirection URI, it may be exposed to the resource owner and other
+ applications residing on the same device::
+
+ +----------+
+ | Resource |
+ | Owner |
+ | |
+ +----------+
+ ^
+ |
+ (B)
+ +----|-----+ Client Identifier +---------------+
+ | -+----(A)-- & Redirection URI --->| |
+ | User- | | Authorization |
+ | Agent -|----(B)-- User authenticates -->| Server |
+ | | | |
+ | |<---(C)--- Redirection URI ----<| |
+ | | with Access Token +---------------+
+ | | in Fragment
+ | | +---------------+
+ | |----(D)--- Redirection URI ---->| Web-Hosted |
+ | | without Fragment | Client |
+ | | | Resource |
+ | (F) |<---(E)------- Script ---------<| |
+ | | +---------------+
+ +-|--------+
+ | |
+ (A) (G) Access Token
+ | |
+ ^ v
+ +---------+
+ | |
+ | Client |
+ | |
+ +---------+
+
+ Note: The lines illustrating steps (A) and (B) are broken into two
+ parts as they pass through the user-agent.
+
+ Figure 4: Implicit Grant Flow
+
+ The flow illustrated in Figure 4 includes the following steps:
+
+ (A) The client initiates the flow by directing the resource owner's
+ user-agent to the authorization endpoint. The client includes
+ its client identifier, requested scope, local state, and a
+ redirection URI to which the authorization server will send the
+ user-agent back once access is granted (or denied).
+
+ (B) The authorization server authenticates the resource owner (via
+ the user-agent) and establishes whether the resource owner
+ grants or denies the client's access request.
+
+ (C) Assuming the resource owner grants access, the authorization
+ server redirects the user-agent back to the client using the
+ redirection URI provided earlier. The redirection URI includes
+ the access token in the URI fragment.
+
+ (D) The user-agent follows the redirection instructions by making a
+ request to the web-hosted client resource (which does not
+ include the fragment per [RFC2616]). The user-agent retains the
+ fragment information locally.
+
+ (E) The web-hosted client resource returns a web page (typically an
+ HTML document with an embedded script) capable of accessing the
+ full redirection URI including the fragment retained by the
+ user-agent, and extracting the access token (and other
+ parameters) contained in the fragment.
+
+ (F) The user-agent executes the script provided by the web-hosted
+ client resource locally, which extracts the access token.
+
+ (G) The user-agent passes the access token to the client.
+
+ See `Section 10.3`_ and `Section 10.16`_ for important security considerations
+ when using the implicit grant.
+
+ .. _`Implicit Grant`: https://tools.ietf.org/html/rfc6749#section-4.2
+ .. _`Section 10.3`: https://tools.ietf.org/html/rfc6749#section-10.3
+ .. _`Section 10.16`: https://tools.ietf.org/html/rfc6749#section-10.16
+ """
+
+ response_types = ['token']
+ grant_allows_refresh_token = False
+
+ def create_authorization_response(self, request, token_handler):
+ """Create an authorization response.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+
+ The client constructs the request URI by adding the following
+ parameters to the query component of the authorization endpoint URI
+ using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
+
+ response_type
+ REQUIRED. Value MUST be set to "token" for standard OAuth2 implicit flow
+ or "id_token token" or just "id_token" for OIDC implicit flow
+
+ client_id
+ REQUIRED. The client identifier as described in `Section 2.2`_.
+
+ redirect_uri
+ OPTIONAL. As described in `Section 3.1.2`_.
+
+ scope
+ OPTIONAL. The scope of the access request as described by
+ `Section 3.3`_.
+
+ state
+ RECOMMENDED. An opaque value used by the client to maintain
+ state between the request and callback. The authorization
+ server includes this value when redirecting the user-agent back
+ to the client. The parameter SHOULD be used for preventing
+ cross-site request forgery as described in `Section 10.12`_.
+
+ The authorization server validates the request to ensure that all
+ required parameters are present and valid. The authorization server
+ MUST verify that the redirection URI to which it will redirect the
+ access token matches a redirection URI registered by the client as
+ described in `Section 3.1.2`_.
+
+ .. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
+ .. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ """
+ return self.create_token_response(request, token_handler)
+
+ def create_token_response(self, request, token_handler):
+ """Return token or error embedded in the URI fragment.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+
+ If the resource owner grants the access request, the authorization
+ server issues an access token and delivers it to the client by adding
+ the following parameters to the fragment component of the redirection
+ URI using the "application/x-www-form-urlencoded" format, per
+ `Appendix B`_:
+
+ access_token
+ REQUIRED. The access token issued by the authorization server.
+
+ token_type
+ REQUIRED. The type of the token issued as described in
+ `Section 7.1`_. Value is case insensitive.
+
+ expires_in
+ RECOMMENDED. The lifetime in seconds of the access token. For
+ example, the value "3600" denotes that the access token will
+ expire in one hour from the time the response was generated.
+ If omitted, the authorization server SHOULD provide the
+ expiration time via other means or document the default value.
+
+ scope
+ OPTIONAL, if identical to the scope requested by the client;
+ otherwise, REQUIRED. The scope of the access token as
+ described by `Section 3.3`_.
+
+ state
+ REQUIRED if the "state" parameter was present in the client
+ authorization request. The exact value received from the
+ client.
+
+ The authorization server MUST NOT issue a refresh token.
+
+ .. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
+ """
+ try:
+ self.validate_token_request(request)
+
+ # If the request fails due to a missing, invalid, or mismatching
+ # redirection URI, or if the client identifier is missing or invalid,
+ # the authorization server SHOULD inform the resource owner of the
+ # error and MUST NOT automatically redirect the user-agent to the
+ # invalid redirection URI.
+ except errors.FatalClientError as e:
+ log.debug('Fatal client error during validation of %r. %r.',
+ request, e)
+ raise
+
+ # If the resource owner denies the access request or if the request
+ # fails for reasons other than a missing or invalid redirection URI,
+ # the authorization server informs the client by adding the following
+ # parameters to the fragment component of the redirection URI using the
+ # "application/x-www-form-urlencoded" format, per Appendix B:
+ # https://tools.ietf.org/html/rfc6749#appendix-B
+ except errors.OAuth2Error as e:
+ log.debug('Client error during validation of %r. %r.', request, e)
+ return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples,
+ fragment=True)}, None, 302
+
+ # In OIDC implicit flow it is possible to have a request_type that does not include the access_token!
+ # "id_token token" - return the access token and the id token
+ # "id_token" - don't return the access token
+ if "token" in request.response_type.split():
+ token = token_handler.create_token(request, refresh_token=False)
+ else:
+ token = {}
+
+ if request.state is not None:
+ token['state'] = request.state
+
+ for modifier in self._token_modifiers:
+ token = modifier(token, token_handler, request)
+
+ # In OIDC implicit flow it is possible to have a request_type that does
+ # not include the access_token! In this case there is no need to save a token.
+ if "token" in request.response_type.split():
+ self.request_validator.save_token(token, request)
+
+ return self.prepare_authorization_response(
+ request, token, {}, None, 302)
+
+ def validate_authorization_request(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ return self.validate_token_request(request)
+
+ def validate_token_request(self, request):
+ """Check the token request for normal and fatal errors.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ This method is very similar to validate_authorization_request in
+ the AuthorizationCodeGrant but differ in a few subtle areas.
+
+ A normal error could be a missing response_type parameter or the client
+ attempting to access scope it is not allowed to ask authorization for.
+ Normal errors can safely be included in the redirection URI and
+ sent back to the client.
+
+ Fatal errors occur when the client_id or redirect_uri is invalid or
+ missing. These must be caught by the provider and handled, how this
+ is done is outside of the scope of OAuthLib but showing an error
+ page describing the issue is a good idea.
+ """
+
+ # First check for fatal errors
+
+ # If the request fails due to a missing, invalid, or mismatching
+ # redirection URI, or if the client identifier is missing or invalid,
+ # the authorization server SHOULD inform the resource owner of the
+ # error and MUST NOT automatically redirect the user-agent to the
+ # invalid redirection URI.
+
+ # First check duplicate parameters
+ for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
+ try:
+ duplicate_params = request.duplicate_params
+ except ValueError:
+ raise errors.InvalidRequestFatalError(description='Unable to parse query string', request=request)
+ if param in duplicate_params:
+ raise errors.InvalidRequestFatalError(description='Duplicate %s parameter.' % param, request=request)
+
+ # REQUIRED. The client identifier as described in Section 2.2.
+ # https://tools.ietf.org/html/rfc6749#section-2.2
+ if not request.client_id:
+ raise errors.MissingClientIdError(request=request)
+
+ if not self.request_validator.validate_client_id(request.client_id, request):
+ raise errors.InvalidClientIdError(request=request)
+
+ # OPTIONAL. As described in Section 3.1.2.
+ # https://tools.ietf.org/html/rfc6749#section-3.1.2
+ self._handle_redirects(request)
+
+ # Then check for normal errors.
+
+ request_info = self._run_custom_validators(request,
+ self.custom_validators.all_pre)
+
+ # If the resource owner denies the access request or if the request
+ # fails for reasons other than a missing or invalid redirection URI,
+ # the authorization server informs the client by adding the following
+ # parameters to the fragment component of the redirection URI using the
+ # "application/x-www-form-urlencoded" format, per Appendix B.
+ # https://tools.ietf.org/html/rfc6749#appendix-B
+
+ # Note that the correct parameters to be added are automatically
+ # populated through the use of specific exceptions
+
+ # REQUIRED.
+ if request.response_type is None:
+ raise errors.MissingResponseTypeError(request=request)
+ # Value MUST be one of our registered types: "token" by default or if using OIDC "id_token" or "id_token token"
+ elif not set(request.response_type.split()).issubset(self.response_types):
+ raise errors.UnsupportedResponseTypeError(request=request)
+
+ log.debug('Validating use of response_type token for client %r (%r).',
+ request.client_id, request.client)
+ if not self.request_validator.validate_response_type(request.client_id,
+ request.response_type,
+ request.client, request):
+
+ log.debug('Client %s is not authorized to use response_type %s.',
+ request.client_id, request.response_type)
+ raise errors.UnauthorizedClientError(request=request)
+
+ # OPTIONAL. The scope of the access request as described by Section 3.3
+ # https://tools.ietf.org/html/rfc6749#section-3.3
+ self.validate_scopes(request)
+
+ request_info.update({
+ 'client_id': request.client_id,
+ 'redirect_uri': request.redirect_uri,
+ 'response_type': request.response_type,
+ 'state': request.state,
+ 'request': request,
+ })
+
+ request_info = self._run_custom_validators(
+ request,
+ self.custom_validators.all_post,
+ request_info
+ )
+
+ return request.scopes, request_info
+
+ def _run_custom_validators(self,
+ request,
+ validations,
+ request_info=None):
+ # Make a copy so we don't modify the existing request_info dict
+ request_info = {} if request_info is None else request_info.copy()
+ # For implicit grant, auth_validators and token_validators are
+ # basically equivalent since the token is returned from the
+ # authorization endpoint.
+ for validator in validations:
+ result = validator(request)
+ if result is not None:
+ request_info.update(result)
+ return request_info
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py
new file mode 100644
index 0000000000..ce33df0e7d
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py
@@ -0,0 +1,136 @@
+"""
+oauthlib.oauth2.rfc6749.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import json
+import logging
+
+from .. import errors, utils
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class RefreshTokenGrant(GrantTypeBase):
+
+ """`Refresh token grant`_
+
+ .. _`Refresh token grant`: https://tools.ietf.org/html/rfc6749#section-6
+ """
+
+ def __init__(self, request_validator=None,
+ issue_new_refresh_tokens=True,
+ **kwargs):
+ super().__init__(
+ request_validator,
+ issue_new_refresh_tokens=issue_new_refresh_tokens,
+ **kwargs)
+
+ def create_token_response(self, request, token_handler):
+ """Create a new access token from a refresh_token.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+
+ If valid and authorized, the authorization server issues an access
+ token as described in `Section 5.1`_. If the request failed
+ verification or is invalid, the authorization server returns an error
+ response as described in `Section 5.2`_.
+
+ The authorization server MAY issue a new refresh token, in which case
+ the client MUST discard the old refresh token and replace it with the
+ new refresh token. The authorization server MAY revoke the old
+ refresh token after issuing a new refresh token to the client. If a
+ new refresh token is issued, the refresh token scope MUST be
+ identical to that of the refresh token included by the client in the
+ request.
+
+ .. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
+ .. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
+ """
+ headers = self._get_default_headers()
+ try:
+ log.debug('Validating refresh token request, %r.', request)
+ self.validate_token_request(request)
+ except errors.OAuth2Error as e:
+ log.debug('Client error in token request, %s.', e)
+ headers.update(e.headers)
+ return headers, e.json, e.status_code
+
+ token = token_handler.create_token(request,
+ refresh_token=self.issue_new_refresh_tokens)
+
+ for modifier in self._token_modifiers:
+ token = modifier(token, token_handler, request)
+
+ self.request_validator.save_token(token, request)
+
+ log.debug('Issuing new token to client id %r (%r), %r.',
+ request.client_id, request.client, token)
+ headers.update(self._create_cors_headers(request))
+ return headers, json.dumps(token), 200
+
+ def validate_token_request(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ # REQUIRED. Value MUST be set to "refresh_token".
+ if request.grant_type != 'refresh_token':
+ raise errors.UnsupportedGrantTypeError(request=request)
+
+ for validator in self.custom_validators.pre_token:
+ validator(request)
+
+ if request.refresh_token is None:
+ raise errors.InvalidRequestError(
+ description='Missing refresh token parameter.',
+ request=request)
+
+ # Because refresh tokens are typically long-lasting credentials used to
+ # request additional access tokens, the refresh token is bound to the
+ # client to which it was issued. If the client type is confidential or
+ # the client was issued client credentials (or assigned other
+ # authentication requirements), the client MUST authenticate with the
+ # authorization server as described in Section 3.2.1.
+ # https://tools.ietf.org/html/rfc6749#section-3.2.1
+ if self.request_validator.client_authentication_required(request):
+ log.debug('Authenticating client, %r.', request)
+ if not self.request_validator.authenticate_client(request):
+ log.debug('Invalid client (%r), denying access.', request)
+ raise errors.InvalidClientError(request=request)
+ elif not self.request_validator.authenticate_client_id(request.client_id, request):
+ log.debug('Client authentication failed, %r.', request)
+ raise errors.InvalidClientError(request=request)
+
+ # Ensure client is authorized use of this grant type
+ self.validate_grant_type(request)
+
+ # REQUIRED. The refresh token issued to the client.
+ log.debug('Validating refresh token %s for client %r.',
+ request.refresh_token, request.client)
+ if not self.request_validator.validate_refresh_token(
+ request.refresh_token, request.client, request):
+ log.debug('Invalid refresh token, %s, for client %r.',
+ request.refresh_token, request.client)
+ raise errors.InvalidGrantError(request=request)
+
+ original_scopes = utils.scope_to_list(
+ self.request_validator.get_original_scopes(
+ request.refresh_token, request))
+
+ if request.scope:
+ request.scopes = utils.scope_to_list(request.scope)
+ if (not all(s in original_scopes for s in request.scopes)
+ and not self.request_validator.is_within_original_scope(
+ request.scopes, request.refresh_token, request)):
+ log.debug('Refresh token %s lack requested scopes, %r.',
+ request.refresh_token, request.scopes)
+ raise errors.InvalidScopeError(request=request)
+ else:
+ request.scopes = original_scopes
+
+ for validator in self.custom_validators.post_token:
+ validator(request)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py
new file mode 100644
index 0000000000..4b0de5bf6f
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py
@@ -0,0 +1,199 @@
+"""
+oauthlib.oauth2.rfc6749.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import json
+import logging
+
+from .. import errors
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class ResourceOwnerPasswordCredentialsGrant(GrantTypeBase):
+
+ """`Resource Owner Password Credentials Grant`_
+
+ The resource owner password credentials grant type is suitable in
+ cases where the resource owner has a trust relationship with the
+ client, such as the device operating system or a highly privileged
+ application. The authorization server should take special care when
+ enabling this grant type and only allow it when other flows are not
+ viable.
+
+ This grant type is suitable for clients capable of obtaining the
+ resource owner's credentials (username and password, typically using
+ an interactive form). It is also used to migrate existing clients
+ using direct authentication schemes such as HTTP Basic or Digest
+ authentication to OAuth by converting the stored credentials to an
+ access token::
+
+ +----------+
+ | Resource |
+ | Owner |
+ | |
+ +----------+
+ v
+ | Resource Owner
+ (A) Password Credentials
+ |
+ v
+ +---------+ +---------------+
+ | |>--(B)---- Resource Owner ------->| |
+ | | Password Credentials | Authorization |
+ | Client | | Server |
+ | |<--(C)---- Access Token ---------<| |
+ | | (w/ Optional Refresh Token) | |
+ +---------+ +---------------+
+
+ Figure 5: Resource Owner Password Credentials Flow
+
+ The flow illustrated in Figure 5 includes the following steps:
+
+ (A) The resource owner provides the client with its username and
+ password.
+
+ (B) The client requests an access token from the authorization
+ server's token endpoint by including the credentials received
+ from the resource owner. When making the request, the client
+ authenticates with the authorization server.
+
+ (C) The authorization server authenticates the client and validates
+ the resource owner credentials, and if valid, issues an access
+ token.
+
+ .. _`Resource Owner Password Credentials Grant`: https://tools.ietf.org/html/rfc6749#section-4.3
+ """
+
+ def create_token_response(self, request, token_handler):
+ """Return token or error in json format.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param token_handler: A token handler instance, for example of type
+ oauthlib.oauth2.BearerToken.
+
+ If the access token request is valid and authorized, the
+ authorization server issues an access token and optional refresh
+ token as described in `Section 5.1`_. If the request failed client
+ authentication or is invalid, the authorization server returns an
+ error response as described in `Section 5.2`_.
+
+ .. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
+ .. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
+ """
+ headers = self._get_default_headers()
+ try:
+ if self.request_validator.client_authentication_required(request):
+ log.debug('Authenticating client, %r.', request)
+ if not self.request_validator.authenticate_client(request):
+ log.debug('Client authentication failed, %r.', request)
+ raise errors.InvalidClientError(request=request)
+ elif not self.request_validator.authenticate_client_id(request.client_id, request):
+ log.debug('Client authentication failed, %r.', request)
+ raise errors.InvalidClientError(request=request)
+ log.debug('Validating access token request, %r.', request)
+ self.validate_token_request(request)
+ except errors.OAuth2Error as e:
+ log.debug('Client error in token request, %s.', e)
+ headers.update(e.headers)
+ return headers, e.json, e.status_code
+
+ token = token_handler.create_token(request, self.refresh_token)
+
+ for modifier in self._token_modifiers:
+ token = modifier(token)
+
+ self.request_validator.save_token(token, request)
+
+ log.debug('Issuing token %r to client id %r (%r) and username %s.',
+ token, request.client_id, request.client, request.username)
+ return headers, json.dumps(token), 200
+
+ def validate_token_request(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ The client makes a request to the token endpoint by adding the
+ following parameters using the "application/x-www-form-urlencoded"
+ format per Appendix B with a character encoding of UTF-8 in the HTTP
+ request entity-body:
+
+ grant_type
+ REQUIRED. Value MUST be set to "password".
+
+ username
+ REQUIRED. The resource owner username.
+
+ password
+ REQUIRED. The resource owner password.
+
+ scope
+ OPTIONAL. The scope of the access request as described by
+ `Section 3.3`_.
+
+ If the client type is confidential or the client was issued client
+ credentials (or assigned other authentication requirements), the
+ client MUST authenticate with the authorization server as described
+ in `Section 3.2.1`_.
+
+ The authorization server MUST:
+
+ o require client authentication for confidential clients or for any
+ client that was issued client credentials (or with other
+ authentication requirements),
+
+ o authenticate the client if client authentication is included, and
+
+ o validate the resource owner password credentials using its
+ existing password validation algorithm.
+
+ Since this access token request utilizes the resource owner's
+ password, the authorization server MUST protect the endpoint against
+ brute force attacks (e.g., using rate-limitation or generating
+ alerts).
+
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
+ """
+ for validator in self.custom_validators.pre_token:
+ validator(request)
+
+ for param in ('grant_type', 'username', 'password'):
+ if not getattr(request, param, None):
+ raise errors.InvalidRequestError(
+ 'Request is missing %s parameter.' % param, request=request)
+
+ for param in ('grant_type', 'username', 'password', 'scope'):
+ if param in request.duplicate_params:
+ raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request)
+
+ # This error should rarely (if ever) occur if requests are routed to
+ # grant type handlers based on the grant_type parameter.
+ if not request.grant_type == 'password':
+ raise errors.UnsupportedGrantTypeError(request=request)
+
+ log.debug('Validating username %s.', request.username)
+ if not self.request_validator.validate_user(request.username,
+ request.password, request.client, request):
+ raise errors.InvalidGrantError(
+ 'Invalid credentials given.', request=request)
+ else:
+ if not hasattr(request.client, 'client_id'):
+ raise NotImplementedError(
+ 'Validate user must set the '
+ 'request.client.client_id attribute '
+ 'in authenticate_client.')
+ log.debug('Authorizing access to user %r.', request.user)
+
+ # Ensure client is authorized use of this grant type
+ self.validate_grant_type(request)
+
+ if request.client:
+ request.client_id = request.client_id or request.client.client_id
+ self.validate_scopes(request)
+
+ for validator in self.custom_validators.post_token:
+ validator(request)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/parameters.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/parameters.py
new file mode 100644
index 0000000000..8f6ce2c7fc
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/parameters.py
@@ -0,0 +1,471 @@
+"""
+oauthlib.oauth2.rfc6749.parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module contains methods related to `Section 4`_ of the OAuth 2 RFC.
+
+.. _`Section 4`: https://tools.ietf.org/html/rfc6749#section-4
+"""
+import json
+import os
+import time
+import urllib.parse as urlparse
+
+from oauthlib.common import add_params_to_qs, add_params_to_uri
+from oauthlib.signals import scope_changed
+
+from .errors import (
+ InsecureTransportError, MismatchingStateError, MissingCodeError,
+ MissingTokenError, MissingTokenTypeError, raise_from_error,
+)
+from .tokens import OAuth2Token
+from .utils import is_secure_transport, list_to_scope, scope_to_list
+
+
+def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None,
+ scope=None, state=None, code_challenge=None, code_challenge_method='plain', **kwargs):
+ """Prepare the authorization grant request URI.
+
+ The client constructs the request URI by adding the following
+ parameters to the query component of the authorization endpoint URI
+ using the ``application/x-www-form-urlencoded`` format as defined by
+ [`W3C.REC-html401-19991224`_]:
+
+ :param uri:
+ :param client_id: The client identifier as described in `Section 2.2`_.
+ :param response_type: To indicate which OAuth 2 grant/flow is required,
+ "code" and "token".
+ :param redirect_uri: The client provided URI to redirect back to after
+ authorization as described in `Section 3.1.2`_.
+ :param scope: The scope of the access request as described by
+ `Section 3.3`_.
+ :param state: An opaque value used by the client to maintain
+ state between the request and callback. The authorization
+ server includes this value when redirecting the user-agent
+ back to the client. The parameter SHOULD be used for
+ preventing cross-site request forgery as described in
+ `Section 10.12`_.
+ :param code_challenge: PKCE parameter. A challenge derived from the
+ code_verifier that is sent in the authorization
+ request, to be verified against later.
+ :param code_challenge_method: PKCE parameter. A method that was used to derive the
+ code_challenge. Defaults to "plain" if not present in the request.
+ :param kwargs: Extra arguments to embed in the grant/authorization URL.
+
+ An example of an authorization code grant authorization URL:
+
+ .. code-block:: http
+
+ GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz
+ &code_challenge=kjasBS523KdkAILD2k78NdcJSk2k3KHG6&code_challenge_method=S256
+ &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
+ Host: server.example.com
+
+ .. _`W3C.REC-html401-19991224`: https://tools.ietf.org/html/rfc6749#ref-W3C.REC-html401-19991224
+ .. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
+ .. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
+ """
+ if not is_secure_transport(uri):
+ raise InsecureTransportError()
+
+ params = [(('response_type', response_type)),
+ (('client_id', client_id))]
+
+ if redirect_uri:
+ params.append(('redirect_uri', redirect_uri))
+ if scope:
+ params.append(('scope', list_to_scope(scope)))
+ if state:
+ params.append(('state', state))
+ if code_challenge is not None:
+ params.append(('code_challenge', code_challenge))
+ params.append(('code_challenge_method', code_challenge_method))
+
+ for k in kwargs:
+ if kwargs[k]:
+ params.append((str(k), kwargs[k]))
+
+ return add_params_to_uri(uri, params)
+
+
+def prepare_token_request(grant_type, body='', include_client_id=True, code_verifier=None, **kwargs):
+ """Prepare the access token request.
+
+ The client makes a request to the token endpoint by adding the
+ following parameters using the ``application/x-www-form-urlencoded``
+ format in the HTTP request entity-body:
+
+ :param grant_type: To indicate grant type being used, i.e. "password",
+ "authorization_code" or "client_credentials".
+
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+
+ :param include_client_id: `True` (default) to send the `client_id` in the
+ body of the upstream request. This is required
+ if the client is not authenticating with the
+ authorization server as described in
+ `Section 3.2.1`_.
+ :type include_client_id: Boolean
+
+ :param client_id: Unicode client identifier. Will only appear if
+ `include_client_id` is True. *
+
+ :param client_secret: Unicode client secret. Will only appear if set to a
+ value that is not `None`. Invoking this function with
+ an empty string will send an empty `client_secret`
+ value to the server. *
+
+ :param code: If using authorization_code grant, pass the previously
+ obtained authorization code as the ``code`` argument. *
+
+ :param redirect_uri: If the "redirect_uri" parameter was included in the
+ authorization request as described in
+ `Section 4.1.1`_, and their values MUST be identical. *
+
+ :param code_verifier: PKCE parameter. A cryptographically random string that is used to correlate the
+ authorization request to the token request.
+
+ :param kwargs: Extra arguments to embed in the request body.
+
+ Parameters marked with a `*` above are not explicit arguments in the
+ function signature, but are specially documented arguments for items
+ appearing in the generic `**kwargs` keyworded input.
+
+ An example of an authorization code token request body:
+
+ .. code-block:: http
+
+ grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA
+ &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb
+
+ .. _`Section 4.1.1`: https://tools.ietf.org/html/rfc6749#section-4.1.1
+ """
+ params = [('grant_type', grant_type)]
+
+ if 'scope' in kwargs:
+ kwargs['scope'] = list_to_scope(kwargs['scope'])
+
+ # pull the `client_id` out of the kwargs.
+ client_id = kwargs.pop('client_id', None)
+ if include_client_id:
+ if client_id is not None:
+ params.append(('client_id', client_id))
+
+ # use code_verifier if code_challenge was passed in the authorization request
+ if code_verifier is not None:
+ params.append(('code_verifier', code_verifier))
+
+ # the kwargs iteration below only supports including boolean truth (truthy)
+ # values, but some servers may require an empty string for `client_secret`
+ client_secret = kwargs.pop('client_secret', None)
+ if client_secret is not None:
+ params.append(('client_secret', client_secret))
+
+ # this handles: `code`, `redirect_uri`, and other undocumented params
+ for k in kwargs:
+ if kwargs[k]:
+ params.append((str(k), kwargs[k]))
+
+ return add_params_to_qs(body, params)
+
+
+def prepare_token_revocation_request(url, token, token_type_hint="access_token",
+ callback=None, body='', **kwargs):
+ """Prepare a token revocation request.
+
+ The client constructs the request by including the following parameters
+ using the ``application/x-www-form-urlencoded`` format in the HTTP request
+ entity-body:
+
+ :param token: REQUIRED. The token that the client wants to get revoked.
+
+ :param token_type_hint: OPTIONAL. A hint about the type of the token
+ submitted for revocation. Clients MAY pass this
+ parameter in order to help the authorization server
+ to optimize the token lookup. If the server is
+ unable to locate the token using the given hint, it
+ MUST extend its search across all of its supported
+ token types. An authorization server MAY ignore
+ this parameter, particularly if it is able to detect
+ the token type automatically.
+
+ This specification defines two values for `token_type_hint`:
+
+ * access_token: An access token as defined in [RFC6749],
+ `Section 1.4`_
+
+ * refresh_token: A refresh token as defined in [RFC6749],
+ `Section 1.5`_
+
+ Specific implementations, profiles, and extensions of this
+ specification MAY define other values for this parameter using the
+ registry defined in `Section 4.1.2`_.
+
+ .. _`Section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
+ .. _`Section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
+ .. _`Section 4.1.2`: https://tools.ietf.org/html/rfc7009#section-4.1.2
+
+ """
+ if not is_secure_transport(url):
+ raise InsecureTransportError()
+
+ params = [('token', token)]
+
+ if token_type_hint:
+ params.append(('token_type_hint', token_type_hint))
+
+ for k in kwargs:
+ if kwargs[k]:
+ params.append((str(k), kwargs[k]))
+
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+
+ if callback:
+ params.append(('callback', callback))
+ return add_params_to_uri(url, params), headers, body
+ else:
+ return url, headers, add_params_to_qs(body, params)
+
+
+def parse_authorization_code_response(uri, state=None):
+ """Parse authorization grant response URI into a dict.
+
+ If the resource owner grants the access request, the authorization
+ server issues an authorization code and delivers it to the client by
+ adding the following parameters to the query component of the
+ redirection URI using the ``application/x-www-form-urlencoded`` format:
+
+ **code**
+ REQUIRED. The authorization code generated by the
+ authorization server. The authorization code MUST expire
+ shortly after it is issued to mitigate the risk of leaks. A
+ maximum authorization code lifetime of 10 minutes is
+ RECOMMENDED. The client MUST NOT use the authorization code
+ more than once. If an authorization code is used more than
+ once, the authorization server MUST deny the request and SHOULD
+ revoke (when possible) all tokens previously issued based on
+ that authorization code. The authorization code is bound to
+ the client identifier and redirection URI.
+
+ **state**
+ REQUIRED if the "state" parameter was present in the client
+ authorization request. The exact value received from the
+ client.
+
+ :param uri: The full redirect URL back to the client.
+ :param state: The state parameter from the authorization request.
+
+ For example, the authorization server redirects the user-agent by
+ sending the following HTTP response:
+
+ .. code-block:: http
+
+ HTTP/1.1 302 Found
+ Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
+ &state=xyz
+
+ """
+ if not is_secure_transport(uri):
+ raise InsecureTransportError()
+
+ query = urlparse.urlparse(uri).query
+ params = dict(urlparse.parse_qsl(query))
+
+ if state and params.get('state', None) != state:
+ raise MismatchingStateError()
+
+ if 'error' in params:
+ raise_from_error(params.get('error'), params)
+
+ if not 'code' in params:
+ raise MissingCodeError("Missing code parameter in response.")
+
+ return params
+
+
+def parse_implicit_response(uri, state=None, scope=None):
+ """Parse the implicit token response URI into a dict.
+
+ If the resource owner grants the access request, the authorization
+ server issues an access token and delivers it to the client by adding
+ the following parameters to the fragment component of the redirection
+ URI using the ``application/x-www-form-urlencoded`` format:
+
+ **access_token**
+ REQUIRED. The access token issued by the authorization server.
+
+ **token_type**
+ REQUIRED. The type of the token issued as described in
+ Section 7.1. Value is case insensitive.
+
+ **expires_in**
+ RECOMMENDED. The lifetime in seconds of the access token. For
+ example, the value "3600" denotes that the access token will
+ expire in one hour from the time the response was generated.
+ If omitted, the authorization server SHOULD provide the
+ expiration time via other means or document the default value.
+
+ **scope**
+ OPTIONAL, if identical to the scope requested by the client,
+ otherwise REQUIRED. The scope of the access token as described
+ by Section 3.3.
+
+ **state**
+ REQUIRED if the "state" parameter was present in the client
+ authorization request. The exact value received from the
+ client.
+
+ :param uri:
+ :param state:
+ :param scope:
+
+ Similar to the authorization code response, but with a full token provided
+ in the URL fragment:
+
+ .. code-block:: http
+
+ HTTP/1.1 302 Found
+ Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA
+ &state=xyz&token_type=example&expires_in=3600
+ """
+ if not is_secure_transport(uri):
+ raise InsecureTransportError()
+
+ fragment = urlparse.urlparse(uri).fragment
+ params = dict(urlparse.parse_qsl(fragment, keep_blank_values=True))
+
+ for key in ('expires_in',):
+ if key in params: # cast things to int
+ params[key] = int(params[key])
+
+ if 'scope' in params:
+ params['scope'] = scope_to_list(params['scope'])
+
+ if 'expires_in' in params:
+ params['expires_at'] = time.time() + int(params['expires_in'])
+
+ if state and params.get('state', None) != state:
+ raise ValueError("Mismatching or missing state in params.")
+
+ params = OAuth2Token(params, old_scope=scope)
+ validate_token_parameters(params)
+ return params
+
+
+def parse_token_response(body, scope=None):
+ """Parse the JSON token response body into a dict.
+
+ The authorization server issues an access token and optional refresh
+ token, and constructs the response by adding the following parameters
+ to the entity body of the HTTP response with a 200 (OK) status code:
+
+ access_token
+ REQUIRED. The access token issued by the authorization server.
+ token_type
+ REQUIRED. The type of the token issued as described in
+ `Section 7.1`_. Value is case insensitive.
+ expires_in
+ RECOMMENDED. The lifetime in seconds of the access token. For
+ example, the value "3600" denotes that the access token will
+ expire in one hour from the time the response was generated.
+ If omitted, the authorization server SHOULD provide the
+ expiration time via other means or document the default value.
+ refresh_token
+ OPTIONAL. The refresh token which can be used to obtain new
+ access tokens using the same authorization grant as described
+ in `Section 6`_.
+ scope
+ OPTIONAL, if identical to the scope requested by the client,
+ otherwise REQUIRED. The scope of the access token as described
+ by `Section 3.3`_.
+
+ The parameters are included in the entity body of the HTTP response
+ using the "application/json" media type as defined by [`RFC4627`_]. The
+ parameters are serialized into a JSON structure by adding each
+ parameter at the highest structure level. Parameter names and string
+ values are included as JSON strings. Numerical values are included
+ as JSON numbers. The order of parameters does not matter and can
+ vary.
+
+ :param body: The full json encoded response body.
+ :param scope: The scope requested during authorization.
+
+ For example:
+
+ .. code-block:: http
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+ Cache-Control: no-store
+ Pragma: no-cache
+
+ {
+ "access_token":"2YotnFZFEjr1zCsicMWpAA",
+ "token_type":"example",
+ "expires_in":3600,
+ "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",
+ "example_parameter":"example_value"
+ }
+
+ .. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
+ .. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
+ .. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
+ .. _`RFC4627`: https://tools.ietf.org/html/rfc4627
+ """
+ try:
+ params = json.loads(body)
+ except ValueError:
+
+ # Fall back to URL-encoded string, to support old implementations,
+ # including (at time of writing) Facebook. See:
+ # https://github.com/oauthlib/oauthlib/issues/267
+
+ params = dict(urlparse.parse_qsl(body))
+ for key in ('expires_in',):
+ if key in params: # cast things to int
+ params[key] = int(params[key])
+
+ if 'scope' in params:
+ params['scope'] = scope_to_list(params['scope'])
+
+ if 'expires_in' in params:
+ if params['expires_in'] is None:
+ params.pop('expires_in')
+ else:
+ params['expires_at'] = time.time() + int(params['expires_in'])
+
+ params = OAuth2Token(params, old_scope=scope)
+ validate_token_parameters(params)
+ return params
+
+
+def validate_token_parameters(params):
+ """Ensures token presence, token type, expiration and scope in params."""
+ if 'error' in params:
+ raise_from_error(params.get('error'), params)
+
+ if not 'access_token' in params:
+ raise MissingTokenError(description="Missing access token parameter.")
+
+ if not 'token_type' in params:
+ if os.environ.get('OAUTHLIB_STRICT_TOKEN_TYPE'):
+ raise MissingTokenTypeError()
+
+ # If the issued access token scope is different from the one requested by
+ # the client, the authorization server MUST include the "scope" response
+ # parameter to inform the client of the actual scope granted.
+ # https://tools.ietf.org/html/rfc6749#section-3.3
+ if params.scope_changed:
+ message = 'Scope has changed from "{old}" to "{new}".'.format(
+ old=params.old_scope, new=params.scope,
+ )
+ scope_changed.send(message=message, old=params.old_scopes, new=params.scopes)
+ if not os.environ.get('OAUTHLIB_RELAX_TOKEN_SCOPE', None):
+ w = Warning(message)
+ w.token = params
+ w.old_scope = params.old_scopes
+ w.new_scope = params.scopes
+ raise w
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/request_validator.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/request_validator.py
new file mode 100644
index 0000000000..3910c0b918
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/request_validator.py
@@ -0,0 +1,680 @@
+"""
+oauthlib.oauth2.rfc6749.request_validator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+
+log = logging.getLogger(__name__)
+
+
+class RequestValidator:
+
+ def client_authentication_required(self, request, *args, **kwargs):
+ """Determine if client authentication is required for current request.
+
+ According to the rfc6749, client authentication is required in the following cases:
+ - Resource Owner Password Credentials Grant, when Client type is Confidential or when
+ Client was issued client credentials or whenever Client provided client
+ authentication, see `Section 4.3.2`_.
+ - Authorization Code Grant, when Client type is Confidential or when Client was issued
+ client credentials or whenever Client provided client authentication,
+ see `Section 4.1.3`_.
+ - Refresh Token Grant, when Client type is Confidential or when Client was issued
+ client credentials or whenever Client provided client authentication, see
+ `Section 6`_
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ - Resource Owner Password Credentials Grant
+ - Refresh Token Grant
+
+ .. _`Section 4.3.2`: https://tools.ietf.org/html/rfc6749#section-4.3.2
+ .. _`Section 4.1.3`: https://tools.ietf.org/html/rfc6749#section-4.1.3
+ .. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
+ """
+ return True
+
+ def authenticate_client(self, request, *args, **kwargs):
+ """Authenticate client through means outside the OAuth 2 spec.
+
+ Means of authentication is negotiated beforehand and may for example
+ be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
+ header.
+
+ Headers may be accesses through request.headers and parameters found in
+ both body and query can be obtained by direct attribute access, i.e.
+ request.client_id for client_id in the URL query.
+
+ The authentication process is required to contain the identification of
+ the client (i.e. search the database based on the client_id). In case the
+ client doesn't exist based on the received client_id, this method has to
+ return False and the HTTP response created by the library will contain
+ 'invalid_client' message.
+
+ After the client identification succeeds, this method needs to set the
+ client on the request, i.e. request.client = client. A client object's
+ class must contain the 'client_id' attribute and the 'client_id' must have
+ a value.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ - Resource Owner Password Credentials Grant (may be disabled)
+ - Client Credentials Grant
+ - Refresh Token Grant
+
+ .. _`HTTP Basic Authentication Scheme`: https://tools.ietf.org/html/rfc1945#section-11.1
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def authenticate_client_id(self, client_id, request, *args, **kwargs):
+ """Ensure client_id belong to a non-confidential client.
+
+ A non-confidential client is one that is not required to authenticate
+ through other means, such as using HTTP Basic.
+
+ Note, while not strictly necessary it can often be very convenient
+ to set request.client to the client object associated with the
+ given client_id.
+
+ :param client_id: Unicode client identifier.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def confirm_redirect_uri(self, client_id, code, redirect_uri, client, request,
+ *args, **kwargs):
+ """Ensure that the authorization process represented by this authorization
+ code began with this 'redirect_uri'.
+
+ If the client specifies a redirect_uri when obtaining code then that
+ redirect URI must be bound to the code and verified equal in this
+ method, according to RFC 6749 section 4.1.3. Do not compare against
+ the client's allowed redirect URIs, but against the URI used when the
+ code was saved.
+
+ :param client_id: Unicode client identifier.
+ :param code: Unicode authorization_code.
+ :param redirect_uri: Unicode absolute URI.
+ :param client: Client object set by you, see ``.authenticate_client``.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant (during token request)
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
+ """Get the default redirect URI for the client.
+
+ :param client_id: Unicode client identifier.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: The default redirect URI for the client
+
+ Method is used by:
+ - Authorization Code Grant
+ - Implicit Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def get_default_scopes(self, client_id, request, *args, **kwargs):
+ """Get the default scopes for the client.
+
+ :param client_id: Unicode client identifier.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: List of default scopes
+
+ Method is used by all core grant types:
+ - Authorization Code Grant
+ - Implicit Grant
+ - Resource Owner Password Credentials Grant
+ - Client Credentials grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def get_original_scopes(self, refresh_token, request, *args, **kwargs):
+ """Get the list of scopes associated with the refresh token.
+
+ :param refresh_token: Unicode refresh token.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: List of scopes.
+
+ Method is used by:
+ - Refresh token grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
+ """Check if requested scopes are within a scope of the refresh token.
+
+ When access tokens are refreshed the scope of the new token
+ needs to be within the scope of the original token. This is
+ ensured by checking that all requested scopes strings are on
+ the list returned by the get_original_scopes. If this check
+ fails, is_within_original_scope is called. The method can be
+ used in situations where returning all valid scopes from the
+ get_original_scopes is not practical.
+
+ :param request_scopes: A list of scopes that were requested by client.
+ :param refresh_token: Unicode refresh_token.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Refresh token grant
+ """
+ return False
+
+ def introspect_token(self, token, token_type_hint, request, *args, **kwargs):
+ """Introspect an access or refresh token.
+
+ Called once the introspect request is validated. This method should
+ verify the *token* and either return a dictionary with the list of
+ claims associated, or `None` in case the token is unknown.
+
+ Below the list of registered claims you should be interested in:
+
+ - scope : space-separated list of scopes
+ - client_id : client identifier
+ - username : human-readable identifier for the resource owner
+ - token_type : type of the token
+ - exp : integer timestamp indicating when this token will expire
+ - iat : integer timestamp indicating when this token was issued
+ - nbf : integer timestamp indicating when it can be "not-before" used
+ - sub : subject of the token - identifier of the resource owner
+ - aud : list of string identifiers representing the intended audience
+ - iss : string representing issuer of this token
+ - jti : string identifier for the token
+
+ Note that most of them are coming directly from JWT RFC. More details
+ can be found in `Introspect Claims`_ or `JWT Claims`_.
+
+ The implementation can use *token_type_hint* to improve lookup
+ efficiency, but must fallback to other types to be compliant with RFC.
+
+ The dict of claims is added to request.token after this method.
+
+ :param token: The token string.
+ :param token_type_hint: access_token or refresh_token.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ Method is used by:
+ - Introspect Endpoint (all grants are compatible)
+
+ .. _`Introspect Claims`: https://tools.ietf.org/html/rfc7662#section-2.2
+ .. _`JWT Claims`: https://tools.ietf.org/html/rfc7519#section-4
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
+ """Invalidate an authorization code after use.
+
+ :param client_id: Unicode client identifier.
+ :param code: The authorization code grant (request.code).
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ Method is used by:
+ - Authorization Code Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
+ """Revoke an access or refresh token.
+
+ :param token: The token string.
+ :param token_type_hint: access_token or refresh_token.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ Method is used by:
+ - Revocation Endpoint
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def rotate_refresh_token(self, request):
+ """Determine whether to rotate the refresh token. Default, yes.
+
+ When access tokens are refreshed the old refresh token can be kept
+ or replaced with a new one (rotated). Return True to rotate and
+ and False for keeping original.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Refresh Token Grant
+ """
+ return True
+
+ def save_authorization_code(self, client_id, code, request, *args, **kwargs):
+ """Persist the authorization_code.
+
+ The code should at minimum be stored with:
+ - the client_id (``client_id``)
+ - the redirect URI used (``request.redirect_uri``)
+ - a resource owner / user (``request.user``)
+ - the authorized scopes (``request.scopes``)
+
+ To support PKCE, you MUST associate the code with:
+ - Code Challenge (``request.code_challenge``) and
+ - Code Challenge Method (``request.code_challenge_method``)
+
+ To support OIDC, you MUST associate the code with:
+ - nonce, if present (``code["nonce"]``)
+
+ The ``code`` argument is actually a dictionary, containing at least a
+ ``code`` key with the actual authorization code:
+
+ ``{'code': 'sdf345jsdf0934f'}``
+
+ It may also have a ``claims`` parameter which, when present, will be a dict
+ deserialized from JSON as described at
+ http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
+ This value should be saved in this method and used again in ``.validate_code``.
+
+ :param client_id: Unicode client identifier.
+ :param code: A dict of the authorization code grant and, optionally, state.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ Method is used by:
+ - Authorization Code Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def save_token(self, token, request, *args, **kwargs):
+ """Persist the token with a token type specific method.
+
+ Currently, only save_bearer_token is supported.
+
+ :param token: A (Bearer) token dict.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ return self.save_bearer_token(token, request, *args, **kwargs)
+
+ def save_bearer_token(self, token, request, *args, **kwargs):
+ """Persist the Bearer token.
+
+ The Bearer token should at minimum be associated with:
+ - a client and it's client_id, if available
+ - a resource owner / user (request.user)
+ - authorized scopes (request.scopes)
+ - an expiration time
+ - a refresh token, if issued
+ - a claims document, if present in request.claims
+
+ The Bearer token dict may hold a number of items::
+
+ {
+ 'token_type': 'Bearer',
+ 'access_token': 'askfjh234as9sd8',
+ 'expires_in': 3600,
+ 'scope': 'string of space separated authorized scopes',
+ 'refresh_token': '23sdf876234', # if issued
+ 'state': 'given_by_client', # if supplied by client (implicit ONLY)
+ }
+
+ Note that while "scope" is a string-separated list of authorized scopes,
+ the original list is still available in request.scopes.
+
+ The token dict is passed as a reference so any changes made to the dictionary
+ will go back to the user. If additional information must return to the client
+ user, and it is only possible to get this information after writing the token
+ to storage, it should be added to the token dictionary. If the token
+ dictionary must be modified but the changes should not go back to the user,
+ a copy of the dictionary must be made before making the changes.
+
+ Also note that if an Authorization Code grant request included a valid claims
+ parameter (for OpenID Connect) then the request.claims property will contain
+ the claims dict, which should be saved for later use when generating the
+ id_token and/or UserInfo response content.
+
+ :param token: A Bearer token dict.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: The default redirect URI for the client
+
+ Method is used by all core grant types issuing Bearer tokens:
+ - Authorization Code Grant
+ - Implicit Grant
+ - Resource Owner Password Credentials Grant (might not associate a client)
+ - Client Credentials grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_bearer_token(self, token, scopes, request):
+ """Ensure the Bearer token is valid and authorized access to scopes.
+
+ :param token: A string of random characters.
+ :param scopes: A list of scopes associated with the protected resource.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+
+ A key to OAuth 2 security and restricting impact of leaked tokens is
+ the short expiration time of tokens, *always ensure the token has not
+ expired!*.
+
+ Two different approaches to scope validation:
+
+ 1) all(scopes). The token must be authorized access to all scopes
+ associated with the resource. For example, the
+ token has access to ``read-only`` and ``images``,
+ thus the client can view images but not upload new.
+ Allows for fine grained access control through
+ combining various scopes.
+
+ 2) any(scopes). The token must be authorized access to one of the
+ scopes associated with the resource. For example,
+ token has access to ``read-only-images``.
+ Allows for fine grained, although arguably less
+ convenient, access control.
+
+ A powerful way to use scopes would mimic UNIX ACLs and see a scope
+ as a group with certain privileges. For a restful API these might
+ map to HTTP verbs instead of read, write and execute.
+
+ Note, the request.user attribute can be set to the resource owner
+ associated with this token. Similarly the request.client and
+ request.scopes attribute can be set to associated client object
+ and authorized scopes. If you then use a decorator such as the
+ one provided for django these attributes will be made available
+ in all protected views as keyword arguments.
+
+ :param token: Unicode Bearer token
+ :param scopes: List of scopes (defined by you)
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is indirectly used by all core Bearer token issuing grant types:
+ - Authorization Code Grant
+ - Implicit Grant
+ - Resource Owner Password Credentials Grant
+ - Client Credentials Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_client_id(self, client_id, request, *args, **kwargs):
+ """Ensure client_id belong to a valid and active client.
+
+ Note, while not strictly necessary it can often be very convenient
+ to set request.client to the client object associated with the
+ given client_id.
+
+ :param client_id: Unicode client identifier.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ - Implicit Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_code(self, client_id, code, client, request, *args, **kwargs):
+ """Verify that the authorization_code is valid and assigned to the given
+ client.
+
+ Before returning true, set the following based on the information stored
+ with the code in 'save_authorization_code':
+
+ - request.user
+ - request.scopes
+ - request.claims (if given)
+
+ OBS! The request.user attribute should be set to the resource owner
+ associated with this authorization code. Similarly request.scopes
+ must also be set.
+
+ The request.claims property, if it was given, should assigned a dict.
+
+ If PKCE is enabled (see 'is_pkce_required' and 'save_authorization_code')
+ you MUST set the following based on the information stored:
+
+ - request.code_challenge
+ - request.code_challenge_method
+
+ :param client_id: Unicode client identifier.
+ :param code: Unicode authorization code.
+ :param client: Client object set by you, see ``.authenticate_client``.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
+ """Ensure client is authorized to use the grant_type requested.
+
+ :param client_id: Unicode client identifier.
+ :param grant_type: Unicode grant type, i.e. authorization_code, password.
+ :param client: Client object set by you, see ``.authenticate_client``.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ - Resource Owner Password Credentials Grant
+ - Client Credentials Grant
+ - Refresh Token Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
+ """Ensure client is authorized to redirect to the redirect_uri requested.
+
+ All clients should register the absolute URIs of all URIs they intend
+ to redirect to. The registration is outside of the scope of oauthlib.
+
+ :param client_id: Unicode client identifier.
+ :param redirect_uri: Unicode absolute URI.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ - Implicit Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
+ """Ensure the Bearer token is valid and authorized access to scopes.
+
+ OBS! The request.user attribute should be set to the resource owner
+ associated with this refresh token.
+
+ :param refresh_token: Unicode refresh token.
+ :param client: Client object set by you, see ``.authenticate_client``.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant (indirectly by issuing refresh tokens)
+ - Resource Owner Password Credentials Grant (also indirectly)
+ - Refresh Token Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
+ """Ensure client is authorized to use the response_type requested.
+
+ :param client_id: Unicode client identifier.
+ :param response_type: Unicode response type, i.e. code, token.
+ :param client: Client object set by you, see ``.authenticate_client``.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+ - Implicit Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
+ """Ensure the client is authorized access to requested scopes.
+
+ :param client_id: Unicode client identifier.
+ :param scopes: List of scopes (defined by you).
+ :param client: Client object set by you, see ``.authenticate_client``.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by all core grant types:
+ - Authorization Code Grant
+ - Implicit Grant
+ - Resource Owner Password Credentials Grant
+ - Client Credentials Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_user(self, username, password, client, request, *args, **kwargs):
+ """Ensure the username and password is valid.
+
+ OBS! The validation should also set the user attribute of the request
+ to a valid resource owner, i.e. request.user = username or similar. If
+ not set you will be unable to associate a token with a user in the
+ persistence method used (commonly, save_bearer_token).
+
+ :param username: Unicode username.
+ :param password: Unicode password.
+ :param client: Client object set by you, see ``.authenticate_client``.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Resource Owner Password Credentials Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def is_pkce_required(self, client_id, request):
+ """Determine if current request requires PKCE. Default, False.
+ This is called for both "authorization" and "token" requests.
+
+ Override this method by ``return True`` to enable PKCE for everyone.
+ You might want to enable it only for public clients.
+ Note that PKCE can also be used in addition of a client authentication.
+
+ OAuth 2.0 public clients utilizing the Authorization Code Grant are
+ susceptible to the authorization code interception attack. This
+ specification describes the attack as well as a technique to mitigate
+ against the threat through the use of Proof Key for Code Exchange
+ (PKCE, pronounced "pixy"). See `RFC7636`_.
+
+ :param client_id: Client identifier.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - Authorization Code Grant
+
+ .. _`RFC7636`: https://tools.ietf.org/html/rfc7636
+ """
+ return False
+
+ def get_code_challenge(self, code, request):
+ """Is called for every "token" requests.
+
+ When the server issues the authorization code in the authorization
+ response, it MUST associate the ``code_challenge`` and
+ ``code_challenge_method`` values with the authorization code so it can
+ be verified later.
+
+ Typically, the ``code_challenge`` and ``code_challenge_method`` values
+ are stored in encrypted form in the ``code`` itself but could
+ alternatively be stored on the server associated with the code. The
+ server MUST NOT include the ``code_challenge`` value in client requests
+ in a form that other entities can extract.
+
+ Return the ``code_challenge`` associated to the code.
+ If ``None`` is returned, code is considered to not be associated to any
+ challenges.
+
+ :param code: Authorization code.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: code_challenge string
+
+ Method is used by:
+ - Authorization Code Grant - when PKCE is active
+
+ """
+ return None
+
+ def get_code_challenge_method(self, code, request):
+ """Is called during the "token" request processing, when a
+ ``code_verifier`` and a ``code_challenge`` has been provided.
+
+ See ``.get_code_challenge``.
+
+ Must return ``plain`` or ``S256``. You can return a custom value if you have
+ implemented your own ``AuthorizationCodeGrant`` class.
+
+ :param code: Authorization code.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: code_challenge_method string
+
+ Method is used by:
+ - Authorization Code Grant - when PKCE is active
+
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def is_origin_allowed(self, client_id, origin, request, *args, **kwargs):
+ """Indicate if the given origin is allowed to access the token endpoint
+ via Cross-Origin Resource Sharing (CORS). CORS is used by browser-based
+ clients, such as Single-Page Applications, to perform the Authorization
+ Code Grant.
+
+ (Note: If performing Authorization Code Grant via a public client such
+ as a browser, you should use PKCE as well.)
+
+ If this method returns true, the appropriate CORS headers will be added
+ to the response. By default this method always returns False, meaning
+ CORS is disabled.
+
+ :param client_id: Unicode client identifier.
+ :param redirect_uri: Unicode origin.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: bool
+
+ Method is used by:
+ - Authorization Code Grant
+ - Refresh Token Grant
+
+ """
+ return False
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/tokens.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/tokens.py
new file mode 100644
index 0000000000..0757d07ea5
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/tokens.py
@@ -0,0 +1,356 @@
+"""
+oauthlib.oauth2.rfc6749.tokens
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module contains methods for adding two types of access tokens to requests.
+
+- Bearer https://tools.ietf.org/html/rfc6750
+- MAC https://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01
+"""
+import hashlib
+import hmac
+import warnings
+from binascii import b2a_base64
+from urllib.parse import urlparse
+
+from oauthlib import common
+from oauthlib.common import add_params_to_qs, add_params_to_uri
+
+from . import utils
+
+
+class OAuth2Token(dict):
+
+ def __init__(self, params, old_scope=None):
+ super().__init__(params)
+ self._new_scope = None
+ if 'scope' in params and params['scope']:
+ self._new_scope = set(utils.scope_to_list(params['scope']))
+ if old_scope is not None:
+ self._old_scope = set(utils.scope_to_list(old_scope))
+ if self._new_scope is None:
+ # the rfc says that if the scope hasn't changed, it's optional
+ # in params so set the new scope to the old scope
+ self._new_scope = self._old_scope
+ else:
+ self._old_scope = self._new_scope
+
+ @property
+ def scope_changed(self):
+ return self._new_scope != self._old_scope
+
+ @property
+ def old_scope(self):
+ return utils.list_to_scope(self._old_scope)
+
+ @property
+ def old_scopes(self):
+ return list(self._old_scope)
+
+ @property
+ def scope(self):
+ return utils.list_to_scope(self._new_scope)
+
+ @property
+ def scopes(self):
+ return list(self._new_scope)
+
+ @property
+ def missing_scopes(self):
+ return list(self._old_scope - self._new_scope)
+
+ @property
+ def additional_scopes(self):
+ return list(self._new_scope - self._old_scope)
+
+
+def prepare_mac_header(token, uri, key, http_method,
+ nonce=None,
+ headers=None,
+ body=None,
+ ext='',
+ hash_algorithm='hmac-sha-1',
+ issue_time=None,
+ draft=0):
+ """Add an `MAC Access Authentication`_ signature to headers.
+
+ Unlike OAuth 1, this HMAC signature does not require inclusion of the
+ request payload/body, neither does it use a combination of client_secret
+ and token_secret but rather a mac_key provided together with the access
+ token.
+
+ Currently two algorithms are supported, "hmac-sha-1" and "hmac-sha-256",
+ `extension algorithms`_ are not supported.
+
+ Example MAC Authorization header, linebreaks added for clarity
+
+ Authorization: MAC id="h480djs93hd8",
+ nonce="1336363200:dj83hs9s",
+ mac="bhCQXTVyfj5cmA9uKkPFx1zeOXM="
+
+ .. _`MAC Access Authentication`: https://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01
+ .. _`extension algorithms`: https://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-7.1
+
+ :param token:
+ :param uri: Request URI.
+ :param key: MAC given provided by token endpoint.
+ :param http_method: HTTP Request method.
+ :param nonce:
+ :param headers: Request headers as a dictionary.
+ :param body:
+ :param ext:
+ :param hash_algorithm: HMAC algorithm provided by token endpoint.
+ :param issue_time: Time when the MAC credentials were issued (datetime).
+ :param draft: MAC authentication specification version.
+ :return: headers dictionary with the authorization field added.
+ """
+ http_method = http_method.upper()
+ host, port = utils.host_from_uri(uri)
+
+ if hash_algorithm.lower() == 'hmac-sha-1':
+ h = hashlib.sha1
+ elif hash_algorithm.lower() == 'hmac-sha-256':
+ h = hashlib.sha256
+ else:
+ raise ValueError('unknown hash algorithm')
+
+ if draft == 0:
+ nonce = nonce or '{}:{}'.format(utils.generate_age(issue_time),
+ common.generate_nonce())
+ else:
+ ts = common.generate_timestamp()
+ nonce = common.generate_nonce()
+
+ sch, net, path, par, query, fra = urlparse(uri)
+
+ if query:
+ request_uri = path + '?' + query
+ else:
+ request_uri = path
+
+ # Hash the body/payload
+ if body is not None and draft == 0:
+ body = body.encode('utf-8')
+ bodyhash = b2a_base64(h(body).digest())[:-1].decode('utf-8')
+ else:
+ bodyhash = ''
+
+ # Create the normalized base string
+ base = []
+ if draft == 0:
+ base.append(nonce)
+ else:
+ base.append(ts)
+ base.append(nonce)
+ base.append(http_method.upper())
+ base.append(request_uri)
+ base.append(host)
+ base.append(port)
+ if draft == 0:
+ base.append(bodyhash)
+ base.append(ext or '')
+ base_string = '\n'.join(base) + '\n'
+
+ # hmac struggles with unicode strings - http://bugs.python.org/issue5285
+ if isinstance(key, str):
+ key = key.encode('utf-8')
+ sign = hmac.new(key, base_string.encode('utf-8'), h)
+ sign = b2a_base64(sign.digest())[:-1].decode('utf-8')
+
+ header = []
+ header.append('MAC id="%s"' % token)
+ if draft != 0:
+ header.append('ts="%s"' % ts)
+ header.append('nonce="%s"' % nonce)
+ if bodyhash:
+ header.append('bodyhash="%s"' % bodyhash)
+ if ext:
+ header.append('ext="%s"' % ext)
+ header.append('mac="%s"' % sign)
+
+ headers = headers or {}
+ headers['Authorization'] = ', '.join(header)
+ return headers
+
+
+def prepare_bearer_uri(token, uri):
+ """Add a `Bearer Token`_ to the request URI.
+ Not recommended, use only if client can't use authorization header or body.
+
+ http://www.example.com/path?access_token=h480djs93hd8
+
+ .. _`Bearer Token`: https://tools.ietf.org/html/rfc6750
+
+ :param token:
+ :param uri:
+ """
+ return add_params_to_uri(uri, [(('access_token', token))])
+
+
+def prepare_bearer_headers(token, headers=None):
+ """Add a `Bearer Token`_ to the request URI.
+ Recommended method of passing bearer tokens.
+
+ Authorization: Bearer h480djs93hd8
+
+ .. _`Bearer Token`: https://tools.ietf.org/html/rfc6750
+
+ :param token:
+ :param headers:
+ """
+ headers = headers or {}
+ headers['Authorization'] = 'Bearer %s' % token
+ return headers
+
+
+def prepare_bearer_body(token, body=''):
+ """Add a `Bearer Token`_ to the request body.
+
+ access_token=h480djs93hd8
+
+ .. _`Bearer Token`: https://tools.ietf.org/html/rfc6750
+
+ :param token:
+ :param body:
+ """
+ return add_params_to_qs(body, [(('access_token', token))])
+
+
+def random_token_generator(request, refresh_token=False):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param refresh_token:
+ """
+ return common.generate_token()
+
+
+def signed_token_generator(private_pem, **kwargs):
+ """
+ :param private_pem:
+ """
+ def signed_token_generator(request):
+ request.claims = kwargs
+ return common.generate_signed_token(private_pem, request)
+
+ return signed_token_generator
+
+
+def get_token_from_header(request):
+ """
+ Helper function to extract a token from the request header.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :return: Return the token or None if the Authorization header is malformed.
+ """
+ token = None
+
+ if 'Authorization' in request.headers:
+ split_header = request.headers.get('Authorization').split()
+ if len(split_header) == 2 and split_header[0].lower() == 'bearer':
+ token = split_header[1]
+ else:
+ token = request.access_token
+
+ return token
+
+
+class TokenBase:
+ __slots__ = ()
+
+ def __call__(self, request, refresh_token=False):
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_request(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def estimate_type(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+
+class BearerToken(TokenBase):
+ __slots__ = (
+ 'request_validator', 'token_generator',
+ 'refresh_token_generator', 'expires_in'
+ )
+
+ def __init__(self, request_validator=None, token_generator=None,
+ expires_in=None, refresh_token_generator=None):
+ self.request_validator = request_validator
+ self.token_generator = token_generator or random_token_generator
+ self.refresh_token_generator = (
+ refresh_token_generator or self.token_generator
+ )
+ self.expires_in = expires_in or 3600
+
+ def create_token(self, request, refresh_token=False, **kwargs):
+ """
+ Create a BearerToken, by default without refresh token.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :param refresh_token:
+ """
+ if "save_token" in kwargs:
+ warnings.warn("`save_token` has been deprecated, it was not called internally."
+ "If you do, call `request_validator.save_token()` instead.",
+ DeprecationWarning)
+
+ if callable(self.expires_in):
+ expires_in = self.expires_in(request)
+ else:
+ expires_in = self.expires_in
+
+ request.expires_in = expires_in
+
+ token = {
+ 'access_token': self.token_generator(request),
+ 'expires_in': expires_in,
+ 'token_type': 'Bearer',
+ }
+
+ # If provided, include - this is optional in some cases https://tools.ietf.org/html/rfc6749#section-3.3 but
+ # there is currently no mechanism to coordinate issuing a token for only a subset of the requested scopes so
+ # all tokens issued are for the entire set of requested scopes.
+ if request.scopes is not None:
+ token['scope'] = ' '.join(request.scopes)
+
+ if refresh_token:
+ if (request.refresh_token and
+ not self.request_validator.rotate_refresh_token(request)):
+ token['refresh_token'] = request.refresh_token
+ else:
+ token['refresh_token'] = self.refresh_token_generator(request)
+
+ token.update(request.extra_credentials or {})
+ return OAuth2Token(token)
+
+ def validate_request(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ token = get_token_from_header(request)
+ return self.request_validator.validate_bearer_token(
+ token, request.scopes, request)
+
+ def estimate_type(self, request):
+ """
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ """
+ if request.headers.get('Authorization', '').split(' ')[0].lower() == 'bearer':
+ return 9
+ elif request.access_token is not None:
+ return 5
+ else:
+ return 0
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/utils.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/utils.py
new file mode 100644
index 0000000000..7dc27b3dff
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc6749/utils.py
@@ -0,0 +1,83 @@
+"""
+oauthlib.utils
+~~~~~~~~~~~~~~
+
+This module contains utility methods used by various parts of the OAuth 2 spec.
+"""
+import datetime
+import os
+from urllib.parse import quote, urlparse
+
+from oauthlib.common import urldecode
+
+
+def list_to_scope(scope):
+ """Convert a list of scopes to a space separated string."""
+ if isinstance(scope, str) or scope is None:
+ return scope
+ elif isinstance(scope, (set, tuple, list)):
+ return " ".join([str(s) for s in scope])
+ else:
+ raise ValueError("Invalid scope (%s), must be string, tuple, set, or list." % scope)
+
+
+def scope_to_list(scope):
+ """Convert a space separated string to a list of scopes."""
+ if isinstance(scope, (tuple, list, set)):
+ return [str(s) for s in scope]
+ elif scope is None:
+ return None
+ else:
+ return scope.strip().split(" ")
+
+
+def params_from_uri(uri):
+ params = dict(urldecode(urlparse(uri).query))
+ if 'scope' in params:
+ params['scope'] = scope_to_list(params['scope'])
+ return params
+
+
+def host_from_uri(uri):
+ """Extract hostname and port from URI.
+
+ Will use default port for HTTP and HTTPS if none is present in the URI.
+ """
+ default_ports = {
+ 'HTTP': '80',
+ 'HTTPS': '443',
+ }
+
+ sch, netloc, path, par, query, fra = urlparse(uri)
+ if ':' in netloc:
+ netloc, port = netloc.split(':', 1)
+ else:
+ port = default_ports.get(sch.upper())
+
+ return netloc, port
+
+
+def escape(u):
+ """Escape a string in an OAuth-compatible fashion.
+
+ TODO: verify whether this can in fact be used for OAuth 2
+
+ """
+ if not isinstance(u, str):
+ raise ValueError('Only unicode objects are escapable.')
+ return quote(u.encode('utf-8'), safe=b'~')
+
+
+def generate_age(issue_time):
+ """Generate a age parameter for MAC authentication draft 00."""
+ td = datetime.datetime.now() - issue_time
+ age = (td.microseconds + (td.seconds + td.days * 24 * 3600)
+ * 10 ** 6) / 10 ** 6
+ return str(age)
+
+
+def is_secure_transport(uri):
+ """Check if the uri is over ssl."""
+ if os.environ.get('OAUTHLIB_INSECURE_TRANSPORT'):
+ return True
+ return uri.lower().startswith('https://')
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/__init__.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/__init__.py
new file mode 100644
index 0000000000..531929dcc7
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/__init__.py
@@ -0,0 +1,10 @@
+"""
+oauthlib.oauth2.rfc8628
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 Device Authorization RFC8628.
+"""
+import logging
+
+log = logging.getLogger(__name__)
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/__init__.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/__init__.py
new file mode 100644
index 0000000000..130b52e381
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/__init__.py
@@ -0,0 +1,8 @@
+"""
+oauthlib.oauth2.rfc8628
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming OAuth 2.0 Device Authorization RFC8628.
+"""
+from .device import DeviceClient
diff --git a/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/device.py b/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/device.py
new file mode 100644
index 0000000000..b9ba2150a2
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/oauth2/rfc8628/clients/device.py
@@ -0,0 +1,95 @@
+"""
+oauthlib.oauth2.rfc8628
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OAuth 2.0 Device Authorization RFC8628.
+"""
+from oauthlib.common import add_params_to_uri
+from oauthlib.oauth2 import BackendApplicationClient, Client
+from oauthlib.oauth2.rfc6749.errors import InsecureTransportError
+from oauthlib.oauth2.rfc6749.parameters import prepare_token_request
+from oauthlib.oauth2.rfc6749.utils import is_secure_transport, list_to_scope
+
+
+class DeviceClient(Client):
+
+ """A public client utilizing the device authorization workflow.
+
+ The client can request an access token using a device code and
+ a public client id associated with the device code as defined
+ in RFC8628.
+
+ The device authorization grant type can be used to obtain both
+ access tokens and refresh tokens and is intended to be used in
+ a scenario where the device being authorized does not have a
+ user interface that is suitable for performing authentication.
+ """
+
+ grant_type = 'urn:ietf:params:oauth:grant-type:device_code'
+
+ def __init__(self, client_id, **kwargs):
+ super().__init__(client_id, **kwargs)
+ self.client_secret = kwargs.get('client_secret')
+
+ def prepare_request_uri(self, uri, scope=None, **kwargs):
+ if not is_secure_transport(uri):
+ raise InsecureTransportError()
+
+ scope = self.scope if scope is None else scope
+ params = [(('client_id', self.client_id)), (('grant_type', self.grant_type))]
+
+ if self.client_secret is not None:
+ params.append(('client_secret', self.client_secret))
+
+ if scope:
+ params.append(('scope', list_to_scope(scope)))
+
+ for k in kwargs:
+ if kwargs[k]:
+ params.append((str(k), kwargs[k]))
+
+ return add_params_to_uri(uri, params)
+
+ def prepare_request_body(self, device_code, body='', scope=None,
+ include_client_id=False, **kwargs):
+ """Add device_code to request body
+
+ The client makes a request to the token endpoint by adding the
+ device_code as a parameter using the
+ "application/x-www-form-urlencoded" format to the HTTP request
+ body.
+
+ :param body: Existing request body (URL encoded string) to embed parameters
+ into. This may contain extra parameters. Default ''.
+ :param scope: The scope of the access request as described by
+ `Section 3.3`_.
+
+ :param include_client_id: `True` to send the `client_id` in the
+ body of the upstream request. This is required
+ if the client is not authenticating with the
+ authorization server as described in
+ `Section 3.2.1`_. False otherwise (default).
+ :type include_client_id: Boolean
+
+ :param kwargs: Extra credentials to include in the token request.
+
+ The prepared body will include all provided device_code as well as
+ the ``grant_type`` parameter set to
+ ``urn:ietf:params:oauth:grant-type:device_code``::
+
+ >>> from oauthlib.oauth2 import DeviceClient
+ >>> client = DeviceClient('your_id', 'your_code')
+ >>> client.prepare_request_body(scope=['hello', 'world'])
+ 'grant_type=urn:ietf:params:oauth:grant-type:device_code&scope=hello+world'
+
+ .. _`Section 3.2.1`: https://datatracker.ietf.org/doc/html/rfc6749#section-3.2.1
+ .. _`Section 3.3`: https://datatracker.ietf.org/doc/html/rfc6749#section-3.3
+ .. _`Section 3.4`: https://datatracker.ietf.org/doc/html/rfc8628#section-3.4
+ """
+
+ kwargs['client_id'] = self.client_id
+ kwargs['include_client_id'] = include_client_id
+ scope = self.scope if scope is None else scope
+ return prepare_token_request(self.grant_type, body=body, device_code=device_code,
+ scope=scope, **kwargs)
diff --git a/contrib/python/oauthlib/oauthlib/openid/__init__.py b/contrib/python/oauthlib/oauthlib/openid/__init__.py
new file mode 100644
index 0000000000..e317437479
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/__init__.py
@@ -0,0 +1,7 @@
+"""
+oauthlib.openid
+~~~~~~~~~~~~~~
+
+"""
+from .connect.core.endpoints import Server, UserInfoEndpoint
+from .connect.core.request_validator import RequestValidator
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/__init__.py b/contrib/python/oauthlib/oauthlib/openid/connect/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/__init__.py
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/__init__.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/__init__.py
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/__init__.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/__init__.py
new file mode 100644
index 0000000000..7017ff4f32
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/__init__.py
@@ -0,0 +1,9 @@
+"""
+oauthlib.oopenid.core
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various logic needed
+for consuming and providing OpenID Connect
+"""
+from .pre_configured import Server
+from .userinfo import UserInfoEndpoint
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/pre_configured.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/pre_configured.py
new file mode 100644
index 0000000000..8ce8bee67b
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/pre_configured.py
@@ -0,0 +1,97 @@
+"""
+oauthlib.openid.connect.core.endpoints.pre_configured
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of various endpoints needed
+for providing OpenID Connect servers.
+"""
+from oauthlib.oauth2.rfc6749.endpoints import (
+ AuthorizationEndpoint, IntrospectEndpoint, ResourceEndpoint,
+ RevocationEndpoint, TokenEndpoint,
+)
+from oauthlib.oauth2.rfc6749.grant_types import (
+ AuthorizationCodeGrant as OAuth2AuthorizationCodeGrant,
+ ClientCredentialsGrant, ImplicitGrant as OAuth2ImplicitGrant,
+ RefreshTokenGrant, ResourceOwnerPasswordCredentialsGrant,
+)
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+
+from ..grant_types import AuthorizationCodeGrant, HybridGrant, ImplicitGrant
+from ..grant_types.dispatchers import (
+ AuthorizationCodeGrantDispatcher, AuthorizationTokenGrantDispatcher,
+ ImplicitTokenGrantDispatcher,
+)
+from ..tokens import JWTToken
+from .userinfo import UserInfoEndpoint
+
+
+class Server(AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint,
+ ResourceEndpoint, RevocationEndpoint, UserInfoEndpoint):
+
+ """An all-in-one endpoint featuring all four major grant types."""
+
+ def __init__(self, request_validator, token_expires_in=None,
+ token_generator=None, refresh_token_generator=None,
+ *args, **kwargs):
+ """Construct a new all-grants-in-one server.
+
+ :param request_validator: An implementation of
+ oauthlib.oauth2.RequestValidator.
+ :param token_expires_in: An int or a function to generate a token
+ expiration offset (in seconds) given a
+ oauthlib.common.Request object.
+ :param token_generator: A function to generate a token from a request.
+ :param refresh_token_generator: A function to generate a token from a
+ request for the refresh token.
+ :param kwargs: Extra parameters to pass to authorization-,
+ token-, resource-, and revocation-endpoint constructors.
+ """
+ self.auth_grant = OAuth2AuthorizationCodeGrant(request_validator)
+ self.implicit_grant = OAuth2ImplicitGrant(request_validator)
+ self.password_grant = ResourceOwnerPasswordCredentialsGrant(
+ request_validator)
+ self.credentials_grant = ClientCredentialsGrant(request_validator)
+ self.refresh_grant = RefreshTokenGrant(request_validator)
+ self.openid_connect_auth = AuthorizationCodeGrant(request_validator)
+ self.openid_connect_implicit = ImplicitGrant(request_validator)
+ self.openid_connect_hybrid = HybridGrant(request_validator)
+
+ self.bearer = BearerToken(request_validator, token_generator,
+ token_expires_in, refresh_token_generator)
+
+ self.jwt = JWTToken(request_validator, token_generator,
+ token_expires_in, refresh_token_generator)
+
+ self.auth_grant_choice = AuthorizationCodeGrantDispatcher(default_grant=self.auth_grant, oidc_grant=self.openid_connect_auth)
+ self.implicit_grant_choice = ImplicitTokenGrantDispatcher(default_grant=self.implicit_grant, oidc_grant=self.openid_connect_implicit)
+
+ # See http://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#Combinations for valid combinations
+ # internally our AuthorizationEndpoint will ensure they can appear in any order for any valid combination
+ AuthorizationEndpoint.__init__(self, default_response_type='code',
+ response_types={
+ 'code': self.auth_grant_choice,
+ 'token': self.implicit_grant_choice,
+ 'id_token': self.openid_connect_implicit,
+ 'id_token token': self.openid_connect_implicit,
+ 'code token': self.openid_connect_hybrid,
+ 'code id_token': self.openid_connect_hybrid,
+ 'code id_token token': self.openid_connect_hybrid,
+ 'none': self.auth_grant
+ },
+ default_token_type=self.bearer)
+
+ self.token_grant_choice = AuthorizationTokenGrantDispatcher(request_validator, default_grant=self.auth_grant, oidc_grant=self.openid_connect_auth)
+
+ TokenEndpoint.__init__(self, default_grant_type='authorization_code',
+ grant_types={
+ 'authorization_code': self.token_grant_choice,
+ 'password': self.password_grant,
+ 'client_credentials': self.credentials_grant,
+ 'refresh_token': self.refresh_grant,
+ },
+ default_token_type=self.bearer)
+ ResourceEndpoint.__init__(self, default_token='Bearer',
+ token_types={'Bearer': self.bearer, 'JWT': self.jwt})
+ RevocationEndpoint.__init__(self, request_validator)
+ IntrospectEndpoint.__init__(self, request_validator)
+ UserInfoEndpoint.__init__(self, request_validator)
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/userinfo.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/userinfo.py
new file mode 100644
index 0000000000..7aa2bbe97d
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/endpoints/userinfo.py
@@ -0,0 +1,106 @@
+"""
+oauthlib.openid.connect.core.endpoints.userinfo
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module is an implementation of userinfo endpoint.
+"""
+import json
+import logging
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.oauth2.rfc6749.endpoints.base import (
+ BaseEndpoint, catch_errors_and_unavailability,
+)
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+
+log = logging.getLogger(__name__)
+
+
+class UserInfoEndpoint(BaseEndpoint):
+ """Authorizes access to userinfo resource.
+ """
+ def __init__(self, request_validator):
+ self.bearer = BearerToken(request_validator, None, None, None)
+ self.request_validator = request_validator
+ BaseEndpoint.__init__(self)
+
+ @catch_errors_and_unavailability
+ def create_userinfo_response(self, uri, http_method='GET', body=None, headers=None):
+ """Validate BearerToken and return userinfo from RequestValidator
+
+ The UserInfo Endpoint MUST return a
+ content-type header to indicate which format is being returned. The
+ content-type of the HTTP response MUST be application/json if the
+ response body is a text JSON object; the response body SHOULD be encoded
+ using UTF-8.
+ """
+ request = Request(uri, http_method, body, headers)
+ request.scopes = ["openid"]
+ self.validate_userinfo_request(request)
+
+ claims = self.request_validator.get_userinfo_claims(request)
+ if claims is None:
+ log.error('Userinfo MUST have claims for %r.', request)
+ raise errors.ServerError(status_code=500)
+
+ if isinstance(claims, dict):
+ resp_headers = {
+ 'Content-Type': 'application/json'
+ }
+ if "sub" not in claims:
+ log.error('Userinfo MUST have "sub" for %r.', request)
+ raise errors.ServerError(status_code=500)
+ body = json.dumps(claims)
+ elif isinstance(claims, str):
+ resp_headers = {
+ 'Content-Type': 'application/jwt'
+ }
+ body = claims
+ else:
+ log.error('Userinfo return unknown response for %r.', request)
+ raise errors.ServerError(status_code=500)
+ log.debug('Userinfo access valid for %r.', request)
+ return resp_headers, body, 200
+
+ def validate_userinfo_request(self, request):
+ """Ensure the request is valid.
+
+ 5.3.1. UserInfo Request
+ The Client sends the UserInfo Request using either HTTP GET or HTTP
+ POST. The Access Token obtained from an OpenID Connect Authentication
+ Request MUST be sent as a Bearer Token, per `Section 2`_ of OAuth 2.0
+ Bearer Token Usage [RFC6750].
+
+ It is RECOMMENDED that the request use the HTTP GET method and the
+ Access Token be sent using the Authorization header field.
+
+ The following is a non-normative example of a UserInfo Request:
+
+ .. code-block:: http
+
+ GET /userinfo HTTP/1.1
+ Host: server.example.com
+ Authorization: Bearer SlAV32hkKG
+
+ 5.3.3. UserInfo Error Response
+ When an error condition occurs, the UserInfo Endpoint returns an Error
+ Response as defined in `Section 3`_ of OAuth 2.0 Bearer Token Usage
+ [RFC6750]. (HTTP errors unrelated to RFC 6750 are returned to the User
+ Agent using the appropriate HTTP status code.)
+
+ The following is a non-normative example of a UserInfo Error Response:
+
+ .. code-block:: http
+
+ HTTP/1.1 401 Unauthorized
+ WWW-Authenticate: Bearer error="invalid_token",
+ error_description="The Access Token expired"
+
+ .. _`Section 2`: https://datatracker.ietf.org/doc/html/rfc6750#section-2
+ .. _`Section 3`: https://datatracker.ietf.org/doc/html/rfc6750#section-3
+ """
+ if not self.bearer.validate_request(request):
+ raise errors.InvalidTokenError()
+ if "openid" not in request.scopes:
+ raise errors.InsufficientScopeError()
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/exceptions.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/exceptions.py
new file mode 100644
index 0000000000..099b84e2da
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/exceptions.py
@@ -0,0 +1,149 @@
+"""
+oauthlib.oauth2.rfc6749.errors
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Error used both by OAuth 2 clients and providers to represent the spec
+defined error responses for all four core grant types.
+"""
+from oauthlib.oauth2.rfc6749.errors import FatalClientError, OAuth2Error
+
+
+class FatalOpenIDClientError(FatalClientError):
+ pass
+
+
+class OpenIDClientError(OAuth2Error):
+ pass
+
+
+class InteractionRequired(OpenIDClientError):
+ """
+ The Authorization Server requires End-User interaction to proceed.
+
+ This error MAY be returned when the prompt parameter value in the
+ Authentication Request is none, but the Authentication Request cannot be
+ completed without displaying a user interface for End-User interaction.
+ """
+ error = 'interaction_required'
+ status_code = 401
+
+
+class LoginRequired(OpenIDClientError):
+ """
+ The Authorization Server requires End-User authentication.
+
+ This error MAY be returned when the prompt parameter value in the
+ Authentication Request is none, but the Authentication Request cannot be
+ completed without displaying a user interface for End-User authentication.
+ """
+ error = 'login_required'
+ status_code = 401
+
+
+class AccountSelectionRequired(OpenIDClientError):
+ """
+ The End-User is REQUIRED to select a session at the Authorization Server.
+
+ The End-User MAY be authenticated at the Authorization Server with
+ different associated accounts, but the End-User did not select a session.
+ This error MAY be returned when the prompt parameter value in the
+ Authentication Request is none, but the Authentication Request cannot be
+ completed without displaying a user interface to prompt for a session to
+ use.
+ """
+ error = 'account_selection_required'
+
+
+class ConsentRequired(OpenIDClientError):
+ """
+ The Authorization Server requires End-User consent.
+
+ This error MAY be returned when the prompt parameter value in the
+ Authentication Request is none, but the Authentication Request cannot be
+ completed without displaying a user interface for End-User consent.
+ """
+ error = 'consent_required'
+ status_code = 401
+
+
+class InvalidRequestURI(OpenIDClientError):
+ """
+ The request_uri in the Authorization Request returns an error or
+ contains invalid data.
+ """
+ error = 'invalid_request_uri'
+ description = 'The request_uri in the Authorization Request returns an ' \
+ 'error or contains invalid data.'
+
+
+class InvalidRequestObject(OpenIDClientError):
+ """
+ The request parameter contains an invalid Request Object.
+ """
+ error = 'invalid_request_object'
+ description = 'The request parameter contains an invalid Request Object.'
+
+
+class RequestNotSupported(OpenIDClientError):
+ """
+ The OP does not support use of the request parameter.
+ """
+ error = 'request_not_supported'
+ description = 'The request parameter is not supported.'
+
+
+class RequestURINotSupported(OpenIDClientError):
+ """
+ The OP does not support use of the request_uri parameter.
+ """
+ error = 'request_uri_not_supported'
+ description = 'The request_uri parameter is not supported.'
+
+
+class RegistrationNotSupported(OpenIDClientError):
+ """
+ The OP does not support use of the registration parameter.
+ """
+ error = 'registration_not_supported'
+ description = 'The registration parameter is not supported.'
+
+
+class InvalidTokenError(OAuth2Error):
+ """
+ The access token provided is expired, revoked, malformed, or
+ invalid for other reasons. The resource SHOULD respond with
+ the HTTP 401 (Unauthorized) status code. The client MAY
+ request a new access token and retry the protected resource
+ request.
+ """
+ error = 'invalid_token'
+ status_code = 401
+ description = ("The access token provided is expired, revoked, malformed, "
+ "or invalid for other reasons.")
+
+
+class InsufficientScopeError(OAuth2Error):
+ """
+ The request requires higher privileges than provided by the
+ access token. The resource server SHOULD respond with the HTTP
+ 403 (Forbidden) status code and MAY include the "scope"
+ attribute with the scope necessary to access the protected
+ resource.
+ """
+ error = 'insufficient_scope'
+ status_code = 403
+ description = ("The request requires higher privileges than provided by "
+ "the access token.")
+
+
+def raise_from_error(error, params=None):
+ import inspect
+ import sys
+ kwargs = {
+ 'description': params.get('error_description'),
+ 'uri': params.get('error_uri'),
+ 'state': params.get('state')
+ }
+ for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
+ if cls.error == error:
+ raise cls(**kwargs)
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/__init__.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/__init__.py
new file mode 100644
index 0000000000..8dad5f607b
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/__init__.py
@@ -0,0 +1,13 @@
+"""
+oauthlib.openid.connect.core.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+from .authorization_code import AuthorizationCodeGrant
+from .base import GrantTypeBase
+from .dispatchers import (
+ AuthorizationCodeGrantDispatcher, AuthorizationTokenGrantDispatcher,
+ ImplicitTokenGrantDispatcher,
+)
+from .hybrid import HybridGrant
+from .implicit import ImplicitGrant
+from .refresh_token import RefreshTokenGrant
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/authorization_code.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/authorization_code.py
new file mode 100644
index 0000000000..6b2dcc3bdd
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/authorization_code.py
@@ -0,0 +1,43 @@
+"""
+oauthlib.openid.connect.core.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+
+from oauthlib.oauth2.rfc6749.grant_types.authorization_code import (
+ AuthorizationCodeGrant as OAuth2AuthorizationCodeGrant,
+)
+
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class AuthorizationCodeGrant(GrantTypeBase):
+
+ def __init__(self, request_validator=None, **kwargs):
+ self.proxy_target = OAuth2AuthorizationCodeGrant(
+ request_validator=request_validator, **kwargs)
+ self.custom_validators.post_auth.append(
+ self.openid_authorization_validator)
+ self.register_token_modifier(self.add_id_token)
+
+ def add_id_token(self, token, token_handler, request):
+ """
+ Construct an initial version of id_token, and let the
+ request_validator sign or encrypt it.
+
+ The authorization_code version of this method is used to
+ retrieve the nonce accordingly to the code storage.
+ """
+ # Treat it as normal OAuth 2 auth code request if openid is not present
+ if not request.scopes or 'openid' not in request.scopes:
+ return token
+
+ nonce = self.request_validator.get_authorization_code_nonce(
+ request.client_id,
+ request.code,
+ request.redirect_uri,
+ request
+ )
+ return super().add_id_token(token, token_handler, request, nonce=nonce)
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/base.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/base.py
new file mode 100644
index 0000000000..33411dad75
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/base.py
@@ -0,0 +1,326 @@
+import base64
+import hashlib
+import logging
+import time
+from json import loads
+
+from oauthlib.oauth2.rfc6749.errors import (
+ ConsentRequired, InvalidRequestError, LoginRequired,
+)
+
+log = logging.getLogger(__name__)
+
+
+class GrantTypeBase:
+
+ # Just proxy the majority of method calls through to the
+ # proxy_target grant type handler, which will usually be either
+ # the standard OAuth2 AuthCode or Implicit grant types.
+ def __getattr__(self, attr):
+ return getattr(self.proxy_target, attr)
+
+ def __setattr__(self, attr, value):
+ proxied_attrs = {'refresh_token', 'response_types'}
+ if attr in proxied_attrs:
+ setattr(self.proxy_target, attr, value)
+ else:
+ super(OpenIDConnectBase, self).__setattr__(attr, value)
+
+ def validate_authorization_request(self, request):
+ """Validates the OpenID Connect authorization request parameters.
+
+ :returns: (list of scopes, dict of request info)
+ """
+ return self.proxy_target.validate_authorization_request(request)
+
+ def _inflate_claims(self, request):
+ # this may be called multiple times in a single request so make sure we only de-serialize the claims once
+ if request.claims and not isinstance(request.claims, dict):
+ # specific claims are requested during the Authorization Request and may be requested for inclusion
+ # in either the id_token or the UserInfo endpoint response
+ # see http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
+ try:
+ request.claims = loads(request.claims)
+ except Exception as ex:
+ raise InvalidRequestError(description="Malformed claims parameter",
+ uri="http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter")
+
+ def id_token_hash(self, value, hashfunc=hashlib.sha256):
+ """
+ Its value is the base64url encoding of the left-most half of the
+ hash of the octets of the ASCII representation of the access_token
+ value, where the hash algorithm used is the hash algorithm used in
+ the alg Header Parameter of the ID Token's JOSE Header.
+
+ For instance, if the alg is RS256, hash the access_token value
+ with SHA-256, then take the left-most 128 bits and
+ base64url-encode them.
+ For instance, if the alg is HS512, hash the code value with
+ SHA-512, then take the left-most 256 bits and base64url-encode
+ them. The c_hash value is a case-sensitive string.
+
+ Example of hash from OIDC specification (bound to a JWS using RS256):
+
+ code:
+ Qcb0Orv1zh30vL1MPRsbm-diHiMwcLyZvn1arpZv-Jxf_11jnpEX3Tgfvk
+
+ c_hash:
+ LDktKdoQak3Pk0cnXxCltA
+ """
+ digest = hashfunc(value.encode()).digest()
+ left_most = len(digest) // 2
+ return base64.urlsafe_b64encode(digest[:left_most]).decode().rstrip("=")
+
+ def add_id_token(self, token, token_handler, request, nonce=None):
+ """
+ Construct an initial version of id_token, and let the
+ request_validator sign or encrypt it.
+
+ The initial version can contain the fields below, accordingly
+ to the spec:
+ - aud
+ - iat
+ - nonce
+ - at_hash
+ - c_hash
+ """
+ # Treat it as normal OAuth 2 auth code request if openid is not present
+ if not request.scopes or 'openid' not in request.scopes:
+ return token
+
+ # Only add an id token on auth/token step if asked for.
+ if request.response_type and 'id_token' not in request.response_type:
+ return token
+
+ # Implementation mint its own id_token without help.
+ id_token = self.request_validator.get_id_token(token, token_handler, request)
+ if id_token:
+ token['id_token'] = id_token
+ return token
+
+ # Fallback for asking some help from oauthlib framework.
+ # Start with technicals fields bound to the specification.
+ id_token = {}
+ id_token['aud'] = request.client_id
+ id_token['iat'] = int(time.time())
+
+ # nonce is REQUIRED when response_type value is:
+ # - id_token token (Implicit)
+ # - id_token (Implicit)
+ # - code id_token (Hybrid)
+ # - code id_token token (Hybrid)
+ #
+ # nonce is OPTIONAL when response_type value is:
+ # - code (Authorization Code)
+ # - code token (Hybrid)
+ if nonce is not None:
+ id_token["nonce"] = nonce
+
+ # at_hash is REQUIRED when response_type value is:
+ # - id_token token (Implicit)
+ # - code id_token token (Hybrid)
+ #
+ # at_hash is OPTIONAL when:
+ # - code (Authorization code)
+ # - code id_token (Hybrid)
+ # - code token (Hybrid)
+ #
+ # at_hash MAY NOT be used when:
+ # - id_token (Implicit)
+ if "access_token" in token:
+ id_token["at_hash"] = self.id_token_hash(token["access_token"])
+
+ # c_hash is REQUIRED when response_type value is:
+ # - code id_token (Hybrid)
+ # - code id_token token (Hybrid)
+ #
+ # c_hash is OPTIONAL for others.
+ if "code" in token:
+ id_token["c_hash"] = self.id_token_hash(token["code"])
+
+ # Call request_validator to complete/sign/encrypt id_token
+ token['id_token'] = self.request_validator.finalize_id_token(id_token, token, token_handler, request)
+
+ return token
+
+ def openid_authorization_validator(self, request):
+ """Perform OpenID Connect specific authorization request validation.
+
+ nonce
+ OPTIONAL. String value used to associate a Client session with
+ an ID Token, and to mitigate replay attacks. The value is
+ passed through unmodified from the Authentication Request to
+ the ID Token. Sufficient entropy MUST be present in the nonce
+ values used to prevent attackers from guessing values
+
+ display
+ OPTIONAL. ASCII string value that specifies how the
+ Authorization Server displays the authentication and consent
+ user interface pages to the End-User. The defined values are:
+
+ page - The Authorization Server SHOULD display the
+ authentication and consent UI consistent with a full User
+ Agent page view. If the display parameter is not specified,
+ this is the default display mode.
+
+ popup - The Authorization Server SHOULD display the
+ authentication and consent UI consistent with a popup User
+ Agent window. The popup User Agent window should be of an
+ appropriate size for a login-focused dialog and should not
+ obscure the entire window that it is popping up over.
+
+ touch - The Authorization Server SHOULD display the
+ authentication and consent UI consistent with a device that
+ leverages a touch interface.
+
+ wap - The Authorization Server SHOULD display the
+ authentication and consent UI consistent with a "feature
+ phone" type display.
+
+ The Authorization Server MAY also attempt to detect the
+ capabilities of the User Agent and present an appropriate
+ display.
+
+ prompt
+ OPTIONAL. Space delimited, case sensitive list of ASCII string
+ values that specifies whether the Authorization Server prompts
+ the End-User for reauthentication and consent. The defined
+ values are:
+
+ none - The Authorization Server MUST NOT display any
+ authentication or consent user interface pages. An error is
+ returned if an End-User is not already authenticated or the
+ Client does not have pre-configured consent for the
+ requested Claims or does not fulfill other conditions for
+ processing the request. The error code will typically be
+ login_required, interaction_required, or another code
+ defined in Section 3.1.2.6. This can be used as a method to
+ check for existing authentication and/or consent.
+
+ login - The Authorization Server SHOULD prompt the End-User
+ for reauthentication. If it cannot reauthenticate the
+ End-User, it MUST return an error, typically
+ login_required.
+
+ consent - The Authorization Server SHOULD prompt the
+ End-User for consent before returning information to the
+ Client. If it cannot obtain consent, it MUST return an
+ error, typically consent_required.
+
+ select_account - The Authorization Server SHOULD prompt the
+ End-User to select a user account. This enables an End-User
+ who has multiple accounts at the Authorization Server to
+ select amongst the multiple accounts that they might have
+ current sessions for. If it cannot obtain an account
+ selection choice made by the End-User, it MUST return an
+ error, typically account_selection_required.
+
+ The prompt parameter can be used by the Client to make sure
+ that the End-User is still present for the current session or
+ to bring attention to the request. If this parameter contains
+ none with any other value, an error is returned.
+
+ max_age
+ OPTIONAL. Maximum Authentication Age. Specifies the allowable
+ elapsed time in seconds since the last time the End-User was
+ actively authenticated by the OP. If the elapsed time is
+ greater than this value, the OP MUST attempt to actively
+ re-authenticate the End-User. (The max_age request parameter
+ corresponds to the OpenID 2.0 PAPE [OpenID.PAPE] max_auth_age
+ request parameter.) When max_age is used, the ID Token returned
+ MUST include an auth_time Claim Value.
+
+ ui_locales
+ OPTIONAL. End-User's preferred languages and scripts for the
+ user interface, represented as a space-separated list of BCP47
+ [RFC5646] language tag values, ordered by preference. For
+ instance, the value "fr-CA fr en" represents a preference for
+ French as spoken in Canada, then French (without a region
+ designation), followed by English (without a region
+ designation). An error SHOULD NOT result if some or all of the
+ requested locales are not supported by the OpenID Provider.
+
+ id_token_hint
+ OPTIONAL. ID Token previously issued by the Authorization
+ Server being passed as a hint about the End-User's current or
+ past authenticated session with the Client. If the End-User
+ identified by the ID Token is logged in or is logged in by the
+ request, then the Authorization Server returns a positive
+ response; otherwise, it SHOULD return an error, such as
+ login_required. When possible, an id_token_hint SHOULD be
+ present when prompt=none is used and an invalid_request error
+ MAY be returned if it is not; however, the server SHOULD
+ respond successfully when possible, even if it is not present.
+ The Authorization Server need not be listed as an audience of
+ the ID Token when it is used as an id_token_hint value. If the
+ ID Token received by the RP from the OP is encrypted, to use it
+ as an id_token_hint, the Client MUST decrypt the signed ID
+ Token contained within the encrypted ID Token. The Client MAY
+ re-encrypt the signed ID token to the Authentication Server
+ using a key that enables the server to decrypt the ID Token,
+ and use the re-encrypted ID token as the id_token_hint value.
+
+ login_hint
+ OPTIONAL. Hint to the Authorization Server about the login
+ identifier the End-User might use to log in (if necessary).
+ This hint can be used by an RP if it first asks the End-User
+ for their e-mail address (or other identifier) and then wants
+ to pass that value as a hint to the discovered authorization
+ service. It is RECOMMENDED that the hint value match the value
+ used for discovery. This value MAY also be a phone number in
+ the format specified for the phone_number Claim. The use of
+ this parameter is left to the OP's discretion.
+
+ acr_values
+ OPTIONAL. Requested Authentication Context Class Reference
+ values. Space-separated string that specifies the acr values
+ that the Authorization Server is being requested to use for
+ processing this Authentication Request, with the values
+ appearing in order of preference. The Authentication Context
+ Class satisfied by the authentication performed is returned as
+ the acr Claim Value, as specified in Section 2. The acr Claim
+ is requested as a Voluntary Claim by this parameter.
+ """
+
+ # Treat it as normal OAuth 2 auth code request if openid is not present
+ if not request.scopes or 'openid' not in request.scopes:
+ return {}
+
+ prompt = request.prompt if request.prompt else []
+ if hasattr(prompt, 'split'):
+ prompt = prompt.strip().split()
+ prompt = set(prompt)
+
+ if 'none' in prompt:
+
+ if len(prompt) > 1:
+ msg = "Prompt none is mutually exclusive with other values."
+ raise InvalidRequestError(request=request, description=msg)
+
+ if not self.request_validator.validate_silent_login(request):
+ raise LoginRequired(request=request)
+
+ if not self.request_validator.validate_silent_authorization(request):
+ raise ConsentRequired(request=request)
+
+ self._inflate_claims(request)
+
+ if not self.request_validator.validate_user_match(
+ request.id_token_hint, request.scopes, request.claims, request):
+ msg = "Session user does not match client supplied user."
+ raise LoginRequired(request=request, description=msg)
+
+ request_info = {
+ 'display': request.display,
+ 'nonce': request.nonce,
+ 'prompt': prompt,
+ 'ui_locales': request.ui_locales.split() if request.ui_locales else [],
+ 'id_token_hint': request.id_token_hint,
+ 'login_hint': request.login_hint,
+ 'claims': request.claims
+ }
+
+ return request_info
+
+
+OpenIDConnectBase = GrantTypeBase
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/dispatchers.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/dispatchers.py
new file mode 100644
index 0000000000..5aa7d4698b
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/dispatchers.py
@@ -0,0 +1,101 @@
+import logging
+
+log = logging.getLogger(__name__)
+
+
+class Dispatcher:
+ default_grant = None
+ oidc_grant = None
+
+
+class AuthorizationCodeGrantDispatcher(Dispatcher):
+ """
+ This is an adapter class that will route simple Authorization Code
+ requests, those that have `response_type=code` and a scope including
+ `openid` to either the `default_grant` or the `oidc_grant` based on
+ the scopes requested.
+ """
+ def __init__(self, default_grant=None, oidc_grant=None):
+ self.default_grant = default_grant
+ self.oidc_grant = oidc_grant
+
+ def _handler_for_request(self, request):
+ handler = self.default_grant
+
+ if request.scopes and "openid" in request.scopes:
+ handler = self.oidc_grant
+
+ log.debug('Selecting handler for request %r.', handler)
+ return handler
+
+ def create_authorization_response(self, request, token_handler):
+ """Read scope and route to the designated handler."""
+ return self._handler_for_request(request).create_authorization_response(request, token_handler)
+
+ def validate_authorization_request(self, request):
+ """Read scope and route to the designated handler."""
+ return self._handler_for_request(request).validate_authorization_request(request)
+
+
+class ImplicitTokenGrantDispatcher(Dispatcher):
+ """
+ This is an adapter class that will route simple Authorization
+ requests, those that have `id_token` in `response_type` and a scope
+ including `openid` to either the `default_grant` or the `oidc_grant`
+ based on the scopes requested.
+ """
+ def __init__(self, default_grant=None, oidc_grant=None):
+ self.default_grant = default_grant
+ self.oidc_grant = oidc_grant
+
+ def _handler_for_request(self, request):
+ handler = self.default_grant
+
+ if request.scopes and "openid" in request.scopes and 'id_token' in request.response_type:
+ handler = self.oidc_grant
+
+ log.debug('Selecting handler for request %r.', handler)
+ return handler
+
+ def create_authorization_response(self, request, token_handler):
+ """Read scope and route to the designated handler."""
+ return self._handler_for_request(request).create_authorization_response(request, token_handler)
+
+ def validate_authorization_request(self, request):
+ """Read scope and route to the designated handler."""
+ return self._handler_for_request(request).validate_authorization_request(request)
+
+
+class AuthorizationTokenGrantDispatcher(Dispatcher):
+ """
+ This is an adapter class that will route simple Token requests, those that authorization_code have a scope
+ including 'openid' to either the default_grant or the oidc_grant based on the scopes requested.
+ """
+ def __init__(self, request_validator, default_grant=None, oidc_grant=None):
+ self.default_grant = default_grant
+ self.oidc_grant = oidc_grant
+ self.request_validator = request_validator
+
+ def _handler_for_request(self, request):
+ handler = self.default_grant
+ scopes = ()
+ parameters = dict(request.decoded_body)
+ client_id = parameters.get('client_id', None)
+ code = parameters.get('code', None)
+ redirect_uri = parameters.get('redirect_uri', None)
+
+ # If code is not present fallback to `default_grant` which will
+ # raise an error for the missing `code` in `create_token_response` step.
+ if code:
+ scopes = self.request_validator.get_authorization_code_scopes(client_id, code, redirect_uri, request)
+
+ if 'openid' in scopes:
+ handler = self.oidc_grant
+
+ log.debug('Selecting handler for request %r.', handler)
+ return handler
+
+ def create_token_response(self, request, token_handler):
+ """Read scope and route to the designated handler."""
+ handler = self._handler_for_request(request)
+ return handler.create_token_response(request, token_handler)
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/hybrid.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/hybrid.py
new file mode 100644
index 0000000000..7cb0758b81
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/hybrid.py
@@ -0,0 +1,63 @@
+"""
+oauthlib.openid.connect.core.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+
+from oauthlib.oauth2.rfc6749.errors import InvalidRequestError
+from oauthlib.oauth2.rfc6749.grant_types.authorization_code import (
+ AuthorizationCodeGrant as OAuth2AuthorizationCodeGrant,
+)
+
+from ..request_validator import RequestValidator
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class HybridGrant(GrantTypeBase):
+
+ def __init__(self, request_validator=None, **kwargs):
+ self.request_validator = request_validator or RequestValidator()
+
+ self.proxy_target = OAuth2AuthorizationCodeGrant(
+ request_validator=request_validator, **kwargs)
+ # All hybrid response types should be fragment-encoded.
+ self.proxy_target.default_response_mode = "fragment"
+ self.register_response_type('code id_token')
+ self.register_response_type('code token')
+ self.register_response_type('code id_token token')
+ self.custom_validators.post_auth.append(
+ self.openid_authorization_validator)
+ # Hybrid flows can return the id_token from the authorization
+ # endpoint as part of the 'code' response
+ self.register_code_modifier(self.add_token)
+ self.register_code_modifier(self.add_id_token)
+ self.register_token_modifier(self.add_id_token)
+
+ def add_id_token(self, token, token_handler, request):
+ return super().add_id_token(token, token_handler, request, nonce=request.nonce)
+
+ def openid_authorization_validator(self, request):
+ """Additional validation when following the Authorization Code flow.
+ """
+ request_info = super().openid_authorization_validator(request)
+ if not request_info: # returns immediately if OAuth2.0
+ return request_info
+
+ # REQUIRED if the Response Type of the request is `code
+ # id_token` or `code id_token token` and OPTIONAL when the
+ # Response Type of the request is `code token`. It is a string
+ # value used to associate a Client session with an ID Token,
+ # and to mitigate replay attacks. The value is passed through
+ # unmodified from the Authentication Request to the ID
+ # Token. Sufficient entropy MUST be present in the `nonce`
+ # values used to prevent attackers from guessing values. For
+ # implementation notes, see Section 15.5.2.
+ if request.response_type in ["code id_token", "code id_token token"]:
+ if not request.nonce:
+ raise InvalidRequestError(
+ request=request,
+ description='Request is missing mandatory nonce parameter.'
+ )
+ return request_info
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/implicit.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/implicit.py
new file mode 100644
index 0000000000..a4fe6049bc
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/implicit.py
@@ -0,0 +1,51 @@
+"""
+oauthlib.openid.connect.core.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+
+from oauthlib.oauth2.rfc6749.errors import InvalidRequestError
+from oauthlib.oauth2.rfc6749.grant_types.implicit import (
+ ImplicitGrant as OAuth2ImplicitGrant,
+)
+
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class ImplicitGrant(GrantTypeBase):
+
+ def __init__(self, request_validator=None, **kwargs):
+ self.proxy_target = OAuth2ImplicitGrant(
+ request_validator=request_validator, **kwargs)
+ self.register_response_type('id_token')
+ self.register_response_type('id_token token')
+ self.custom_validators.post_auth.append(
+ self.openid_authorization_validator)
+ self.register_token_modifier(self.add_id_token)
+
+ def add_id_token(self, token, token_handler, request):
+ if 'state' not in token and request.state:
+ token['state'] = request.state
+ return super().add_id_token(token, token_handler, request, nonce=request.nonce)
+
+ def openid_authorization_validator(self, request):
+ """Additional validation when following the implicit flow.
+ """
+ request_info = super().openid_authorization_validator(request)
+ if not request_info: # returns immediately if OAuth2.0
+ return request_info
+
+ # REQUIRED. String value used to associate a Client session with an ID
+ # Token, and to mitigate replay attacks. The value is passed through
+ # unmodified from the Authentication Request to the ID Token.
+ # Sufficient entropy MUST be present in the nonce values used to
+ # prevent attackers from guessing values. For implementation notes, see
+ # Section 15.5.2.
+ if not request.nonce:
+ raise InvalidRequestError(
+ request=request,
+ description='Request is missing mandatory nonce parameter.'
+ )
+ return request_info
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/refresh_token.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/refresh_token.py
new file mode 100644
index 0000000000..43e4499c53
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/grant_types/refresh_token.py
@@ -0,0 +1,34 @@
+"""
+oauthlib.openid.connect.core.grant_types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+
+from oauthlib.oauth2.rfc6749.grant_types.refresh_token import (
+ RefreshTokenGrant as OAuth2RefreshTokenGrant,
+)
+
+from .base import GrantTypeBase
+
+log = logging.getLogger(__name__)
+
+
+class RefreshTokenGrant(GrantTypeBase):
+
+ def __init__(self, request_validator=None, **kwargs):
+ self.proxy_target = OAuth2RefreshTokenGrant(
+ request_validator=request_validator, **kwargs)
+ self.register_token_modifier(self.add_id_token)
+
+ def add_id_token(self, token, token_handler, request):
+ """
+ Construct an initial version of id_token, and let the
+ request_validator sign or encrypt it.
+
+ The authorization_code version of this method is used to
+ retrieve the nonce accordingly to the code storage.
+ """
+ if not self.request_validator.refresh_id_token(request):
+ return token
+
+ return super().add_id_token(token, token_handler, request)
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/request_validator.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/request_validator.py
new file mode 100644
index 0000000000..47c4cd9406
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/request_validator.py
@@ -0,0 +1,320 @@
+"""
+oauthlib.openid.connect.core.request_validator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""
+import logging
+
+from oauthlib.oauth2.rfc6749.request_validator import (
+ RequestValidator as OAuth2RequestValidator,
+)
+
+log = logging.getLogger(__name__)
+
+
+class RequestValidator(OAuth2RequestValidator):
+
+ def get_authorization_code_scopes(self, client_id, code, redirect_uri, request):
+ """ Extracts scopes from saved authorization code.
+
+ The scopes returned by this method is used to route token requests
+ based on scopes passed to Authorization Code requests.
+
+ With that the token endpoint knows when to include OpenIDConnect
+ id_token in token response only based on authorization code scopes.
+
+ Only code param should be sufficient to retrieve grant code from
+ any storage you are using, `client_id` and `redirect_uri` can have a
+ blank value `""` don't forget to check it before using those values
+ in a select query if a database is used.
+
+ :param client_id: Unicode client identifier
+ :param code: Unicode authorization code grant
+ :param redirect_uri: Unicode absolute URI
+ :return: A list of scope
+
+ Method is used by:
+ - Authorization Token Grant Dispatcher
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def get_authorization_code_nonce(self, client_id, code, redirect_uri, request):
+ """ Extracts nonce from saved authorization code.
+
+ If present in the Authentication Request, Authorization
+ Servers MUST include a nonce Claim in the ID Token with the
+ Claim Value being the nonce value sent in the Authentication
+ Request. Authorization Servers SHOULD perform no other
+ processing on nonce values used. The nonce value is a
+ case-sensitive string.
+
+ Only code param should be sufficient to retrieve grant code from
+ any storage you are using. However, `client_id` and `redirect_uri`
+ have been validated and can be used also.
+
+ :param client_id: Unicode client identifier
+ :param code: Unicode authorization code grant
+ :param redirect_uri: Unicode absolute URI
+ :return: Unicode nonce
+
+ Method is used by:
+ - Authorization Token Grant Dispatcher
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def get_jwt_bearer_token(self, token, token_handler, request):
+ """Get JWT Bearer token or OpenID Connect ID token
+
+ If using OpenID Connect this SHOULD call `oauthlib.oauth2.RequestValidator.get_id_token`
+
+ :param token: A Bearer token dict
+ :param token_handler: the token handler (BearerToken class)
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :return: The JWT Bearer token or OpenID Connect ID token (a JWS signed JWT)
+
+ Method is used by JWT Bearer and OpenID Connect tokens:
+ - JWTToken.create_token
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def get_id_token(self, token, token_handler, request):
+ """Get OpenID Connect ID token
+
+ This method is OPTIONAL and is NOT RECOMMENDED.
+ `finalize_id_token` SHOULD be implemented instead. However, if you
+ want a full control over the minting of the `id_token`, you
+ MAY want to override `get_id_token` instead of using
+ `finalize_id_token`.
+
+ In the OpenID Connect workflows when an ID Token is requested this method is called.
+ Subclasses should implement the construction, signing and optional encryption of the
+ ID Token as described in the OpenID Connect spec.
+
+ In addition to the standard OAuth2 request properties, the request may also contain
+ these OIDC specific properties which are useful to this method:
+
+ - nonce, if workflow is implicit or hybrid and it was provided
+ - claims, if provided to the original Authorization Code request
+
+ The token parameter is a dict which may contain an ``access_token`` entry, in which
+ case the resulting ID Token *should* include a calculated ``at_hash`` claim.
+
+ Similarly, when the request parameter has a ``code`` property defined, the ID Token
+ *should* include a calculated ``c_hash`` claim.
+
+ http://openid.net/specs/openid-connect-core-1_0.html (sections `3.1.3.6`_, `3.2.2.10`_, `3.3.2.11`_)
+
+ .. _`3.1.3.6`: http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
+ .. _`3.2.2.10`: http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken
+ .. _`3.3.2.11`: http://openid.net/specs/openid-connect-core-1_0.html#HybridIDToken
+
+ :param token: A Bearer token dict
+ :param token_handler: the token handler (BearerToken class)
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :return: The ID Token (a JWS signed JWT)
+ """
+ return None
+
+ def finalize_id_token(self, id_token, token, token_handler, request):
+ """Finalize OpenID Connect ID token & Sign or Encrypt.
+
+ In the OpenID Connect workflows when an ID Token is requested
+ this method is called. Subclasses should implement the
+ construction, signing and optional encryption of the ID Token
+ as described in the OpenID Connect spec.
+
+ The `id_token` parameter is a dict containing a couple of OIDC
+ technical fields related to the specification. Prepopulated
+ attributes are:
+
+ - `aud`, equals to `request.client_id`.
+ - `iat`, equals to current time.
+ - `nonce`, if present, is equals to the `nonce` from the
+ authorization request.
+ - `at_hash`, hash of `access_token`, if relevant.
+ - `c_hash`, hash of `code`, if relevant.
+
+ This method MUST provide required fields as below:
+
+ - `iss`, REQUIRED. Issuer Identifier for the Issuer of the response.
+ - `sub`, REQUIRED. Subject Identifier
+ - `exp`, REQUIRED. Expiration time on or after which the ID
+ Token MUST NOT be accepted by the RP when performing
+ authentication with the OP.
+
+ Additionals claims must be added, note that `request.scope`
+ should be used to determine the list of claims.
+
+ More information can be found at `OpenID Connect Core#Claims`_
+
+ .. _`OpenID Connect Core#Claims`: https://openid.net/specs/openid-connect-core-1_0.html#Claims
+
+ :param id_token: A dict containing technical fields of id_token
+ :param token: A Bearer token dict
+ :param token_handler: the token handler (BearerToken class)
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :return: The ID Token (a JWS signed JWT or JWE encrypted JWT)
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_jwt_bearer_token(self, token, scopes, request):
+ """Ensure the JWT Bearer token or OpenID Connect ID token are valids and authorized access to scopes.
+
+ If using OpenID Connect this SHOULD call `oauthlib.oauth2.RequestValidator.get_id_token`
+
+ If not using OpenID Connect this can `return None` to avoid 5xx rather 401/3 response.
+
+ OpenID connect core 1.0 describe how to validate an id_token:
+ - http://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
+ - http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDTValidation
+ - http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation
+ - http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation2
+
+ :param token: Unicode Bearer token
+ :param scopes: List of scopes (defined by you)
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is indirectly used by all core OpenID connect JWT token issuing grant types:
+ - Authorization Code Grant
+ - Implicit Grant
+ - Hybrid Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_id_token(self, token, scopes, request):
+ """Ensure the id token is valid and authorized access to scopes.
+
+ OpenID connect core 1.0 describe how to validate an id_token:
+ - http://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
+ - http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDTValidation
+ - http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation
+ - http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation2
+
+ :param token: Unicode Bearer token
+ :param scopes: List of scopes (defined by you)
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is indirectly used by all core OpenID connect JWT token issuing grant types:
+ - Authorization Code Grant
+ - Implicit Grant
+ - Hybrid Grant
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_silent_authorization(self, request):
+ """Ensure the logged in user has authorized silent OpenID authorization.
+
+ Silent OpenID authorization allows access tokens and id tokens to be
+ granted to clients without any user prompt or interaction.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - OpenIDConnectAuthCode
+ - OpenIDConnectImplicit
+ - OpenIDConnectHybrid
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_silent_login(self, request):
+ """Ensure session user has authorized silent OpenID login.
+
+ If no user is logged in or has not authorized silent login, this
+ method should return False.
+
+ If the user is logged in but associated with multiple accounts and
+ not selected which one to link to the token then this method should
+ raise an oauthlib.oauth2.AccountSelectionRequired error.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - OpenIDConnectAuthCode
+ - OpenIDConnectImplicit
+ - OpenIDConnectHybrid
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def validate_user_match(self, id_token_hint, scopes, claims, request):
+ """Ensure client supplied user id hint matches session user.
+
+ If the sub claim or id_token_hint is supplied then the session
+ user must match the given ID.
+
+ :param id_token_hint: User identifier string.
+ :param scopes: List of OAuth 2 scopes and OpenID claims (strings).
+ :param claims: OpenID Connect claims dict.
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ - OpenIDConnectAuthCode
+ - OpenIDConnectImplicit
+ - OpenIDConnectHybrid
+ """
+ raise NotImplementedError('Subclasses must implement this method.')
+
+ def get_userinfo_claims(self, request):
+ """Return the UserInfo claims in JSON or Signed or Encrypted.
+
+ The UserInfo Claims MUST be returned as the members of a JSON object
+ unless a signed or encrypted response was requested during Client
+ Registration. The Claims defined in Section 5.1 can be returned, as can
+ additional Claims not specified there.
+
+ For privacy reasons, OpenID Providers MAY elect to not return values for
+ some requested Claims.
+
+ If a Claim is not returned, that Claim Name SHOULD be omitted from the
+ JSON object representing the Claims; it SHOULD NOT be present with a
+ null or empty string value.
+
+ The sub (subject) Claim MUST always be returned in the UserInfo
+ Response.
+
+ Upon receipt of the UserInfo Request, the UserInfo Endpoint MUST return
+ the JSON Serialization of the UserInfo Response as in Section 13.3 in
+ the HTTP response body unless a different format was specified during
+ Registration [OpenID.Registration].
+
+ If the UserInfo Response is signed and/or encrypted, then the Claims are
+ returned in a JWT and the content-type MUST be application/jwt. The
+ response MAY be encrypted without also being signed. If both signing and
+ encryption are requested, the response MUST be signed then encrypted,
+ with the result being a Nested JWT, as defined in [JWT].
+
+ If signed, the UserInfo Response SHOULD contain the Claims iss (issuer)
+ and aud (audience) as members. The iss value SHOULD be the OP's Issuer
+ Identifier URL. The aud value SHOULD be or include the RP's Client ID
+ value.
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: Claims as a dict OR JWT/JWS/JWE as a string
+
+ Method is used by:
+ UserInfoEndpoint
+ """
+
+ def refresh_id_token(self, request):
+ """Whether the id token should be refreshed. Default, True
+
+ :param request: OAuthlib request.
+ :type request: oauthlib.common.Request
+ :rtype: True or False
+
+ Method is used by:
+ RefreshTokenGrant
+ """
+ return True
diff --git a/contrib/python/oauthlib/oauthlib/openid/connect/core/tokens.py b/contrib/python/oauthlib/oauthlib/openid/connect/core/tokens.py
new file mode 100644
index 0000000000..936ab52e38
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/openid/connect/core/tokens.py
@@ -0,0 +1,48 @@
+"""
+authlib.openid.connect.core.tokens
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This module contains methods for adding JWT tokens to requests.
+"""
+from oauthlib.oauth2.rfc6749.tokens import (
+ TokenBase, get_token_from_header, random_token_generator,
+)
+
+
+class JWTToken(TokenBase):
+ __slots__ = (
+ 'request_validator', 'token_generator',
+ 'refresh_token_generator', 'expires_in'
+ )
+
+ def __init__(self, request_validator=None, token_generator=None,
+ expires_in=None, refresh_token_generator=None):
+ self.request_validator = request_validator
+ self.token_generator = token_generator or random_token_generator
+ self.refresh_token_generator = (
+ refresh_token_generator or self.token_generator
+ )
+ self.expires_in = expires_in or 3600
+
+ def create_token(self, request, refresh_token=False):
+ """Create a JWT Token, using requestvalidator method."""
+
+ if callable(self.expires_in):
+ expires_in = self.expires_in(request)
+ else:
+ expires_in = self.expires_in
+
+ request.expires_in = expires_in
+
+ return self.request_validator.get_jwt_bearer_token(None, None, request)
+
+ def validate_request(self, request):
+ token = get_token_from_header(request)
+ return self.request_validator.validate_jwt_bearer_token(
+ token, request.scopes, request)
+
+ def estimate_type(self, request):
+ token = get_token_from_header(request)
+ if token and token.startswith('ey') and token.count('.') in (2, 4):
+ return 10
+ return 0
diff --git a/contrib/python/oauthlib/oauthlib/signals.py b/contrib/python/oauthlib/oauthlib/signals.py
new file mode 100644
index 0000000000..8fd347a5c8
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/signals.py
@@ -0,0 +1,40 @@
+"""
+ Implements signals based on blinker if available, otherwise
+ falls silently back to a noop. Shamelessly stolen from flask.signals:
+ https://github.com/mitsuhiko/flask/blob/master/flask/signals.py
+"""
+signals_available = False
+try:
+ from blinker import Namespace
+ signals_available = True
+except ImportError: # noqa
+ class Namespace:
+ def signal(self, name, doc=None):
+ return _FakeSignal(name, doc)
+
+ class _FakeSignal:
+ """If blinker is unavailable, create a fake class with the same
+ interface that allows sending of signals but will fail with an
+ error on anything else. Instead of doing anything on send, it
+ will just ignore the arguments and do nothing instead.
+ """
+
+ def __init__(self, name, doc=None):
+ self.name = name
+ self.__doc__ = doc
+ def _fail(self, *args, **kwargs):
+ raise RuntimeError('signalling support is unavailable '
+ 'because the blinker library is '
+ 'not installed.')
+ send = lambda *a, **kw: None
+ connect = disconnect = has_receivers_for = receivers_for = \
+ temporarily_connected_to = connected_to = _fail
+ del _fail
+
+# The namespace for code signals. If you are not oauthlib code, do
+# not put signals in here. Create your own namespace instead.
+_signals = Namespace()
+
+
+# Core signals.
+scope_changed = _signals.signal('scope-changed')
diff --git a/contrib/python/oauthlib/oauthlib/uri_validate.py b/contrib/python/oauthlib/oauthlib/uri_validate.py
new file mode 100644
index 0000000000..a6fe0fb23e
--- /dev/null
+++ b/contrib/python/oauthlib/oauthlib/uri_validate.py
@@ -0,0 +1,190 @@
+"""
+Regex for URIs
+
+These regex are directly derived from the collected ABNF in RFC3986
+(except for DIGIT, ALPHA and HEXDIG, defined by RFC2234).
+
+They should be processed with re.VERBOSE.
+
+Thanks Mark Nottingham for this code - https://gist.github.com/138549
+"""
+import re
+
+# basics
+
+DIGIT = r"[\x30-\x39]"
+
+ALPHA = r"[\x41-\x5A\x61-\x7A]"
+
+HEXDIG = r"[\x30-\x39A-Fa-f]"
+
+# pct-encoded = "%" HEXDIG HEXDIG
+pct_encoded = r" %% %(HEXDIG)s %(HEXDIG)s" % locals()
+
+# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+unreserved = r"(?: %(ALPHA)s | %(DIGIT)s | \- | \. | _ | ~ )" % locals()
+
+# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
+gen_delims = r"(?: : | / | \? | \# | \[ | \] | @ )"
+
+# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+# / "*" / "+" / "," / ";" / "="
+sub_delims = r"""(?: ! | \$ | & | ' | \( | \) |
+ \* | \+ | , | ; | = )"""
+
+# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+pchar = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | : | @ )" % locals(
+)
+
+# reserved = gen-delims / sub-delims
+reserved = r"(?: %(gen_delims)s | %(sub_delims)s )" % locals()
+
+
+# scheme
+
+# scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
+scheme = r"%(ALPHA)s (?: %(ALPHA)s | %(DIGIT)s | \+ | \- | \. )*" % locals()
+
+
+# authority
+
+# dec-octet = DIGIT ; 0-9
+# / %x31-39 DIGIT ; 10-99
+# / "1" 2DIGIT ; 100-199
+# / "2" %x30-34 DIGIT ; 200-249
+# / "25" %x30-35 ; 250-255
+dec_octet = r"""(?: %(DIGIT)s |
+ [\x31-\x39] %(DIGIT)s |
+ 1 %(DIGIT)s{2} |
+ 2 [\x30-\x34] %(DIGIT)s |
+ 25 [\x30-\x35]
+ )
+""" % locals()
+
+# IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
+IPv4address = r"%(dec_octet)s \. %(dec_octet)s \. %(dec_octet)s \. %(dec_octet)s" % locals(
+)
+
+# IPv6address
+IPv6address = r"([A-Fa-f0-9:]+[:$])[A-Fa-f0-9]{1,4}"
+
+# IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
+IPvFuture = r"v %(HEXDIG)s+ \. (?: %(unreserved)s | %(sub_delims)s | : )+" % locals()
+
+# IP-literal = "[" ( IPv6address / IPvFuture ) "]"
+IP_literal = r"\[ (?: %(IPv6address)s | %(IPvFuture)s ) \]" % locals()
+
+# reg-name = *( unreserved / pct-encoded / sub-delims )
+reg_name = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s )*" % locals()
+
+# userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
+userinfo = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | : )" % locals(
+)
+
+# host = IP-literal / IPv4address / reg-name
+host = r"(?: %(IP_literal)s | %(IPv4address)s | %(reg_name)s )" % locals()
+
+# port = *DIGIT
+port = r"(?: %(DIGIT)s )*" % locals()
+
+# authority = [ userinfo "@" ] host [ ":" port ]
+authority = r"(?: %(userinfo)s @)? %(host)s (?: : %(port)s)?" % locals()
+
+# Path
+
+# segment = *pchar
+segment = r"%(pchar)s*" % locals()
+
+# segment-nz = 1*pchar
+segment_nz = r"%(pchar)s+" % locals()
+
+# segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" )
+# ; non-zero-length segment without any colon ":"
+segment_nz_nc = r"(?: %(unreserved)s | %(pct_encoded)s | %(sub_delims)s | @ )+" % locals()
+
+# path-abempty = *( "/" segment )
+path_abempty = r"(?: / %(segment)s )*" % locals()
+
+# path-absolute = "/" [ segment-nz *( "/" segment ) ]
+path_absolute = r"/ (?: %(segment_nz)s (?: / %(segment)s )* )?" % locals()
+
+# path-noscheme = segment-nz-nc *( "/" segment )
+path_noscheme = r"%(segment_nz_nc)s (?: / %(segment)s )*" % locals()
+
+# path-rootless = segment-nz *( "/" segment )
+path_rootless = r"%(segment_nz)s (?: / %(segment)s )*" % locals()
+
+# path-empty = 0<pchar>
+path_empty = r"" # FIXME
+
+# path = path-abempty ; begins with "/" or is empty
+# / path-absolute ; begins with "/" but not "//"
+# / path-noscheme ; begins with a non-colon segment
+# / path-rootless ; begins with a segment
+# / path-empty ; zero characters
+path = r"""(?: %(path_abempty)s |
+ %(path_absolute)s |
+ %(path_noscheme)s |
+ %(path_rootless)s |
+ %(path_empty)s
+ )
+""" % locals()
+
+### Query and Fragment
+
+# query = *( pchar / "/" / "?" )
+query = r"(?: %(pchar)s | / | \? )*" % locals()
+
+# fragment = *( pchar / "/" / "?" )
+fragment = r"(?: %(pchar)s | / | \? )*" % locals()
+
+# URIs
+
+# hier-part = "//" authority path-abempty
+# / path-absolute
+# / path-rootless
+# / path-empty
+hier_part = r"""(?: (?: // %(authority)s %(path_abempty)s ) |
+ %(path_absolute)s |
+ %(path_rootless)s |
+ %(path_empty)s
+ )
+""" % locals()
+
+# relative-part = "//" authority path-abempty
+# / path-absolute
+# / path-noscheme
+# / path-empty
+relative_part = r"""(?: (?: // %(authority)s %(path_abempty)s ) |
+ %(path_absolute)s |
+ %(path_noscheme)s |
+ %(path_empty)s
+ )
+""" % locals()
+
+# relative-ref = relative-part [ "?" query ] [ "#" fragment ]
+relative_ref = r"%(relative_part)s (?: \? %(query)s)? (?: \# %(fragment)s)?" % locals(
+)
+
+# URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
+URI = r"^(?: %(scheme)s : %(hier_part)s (?: \? %(query)s )? (?: \# %(fragment)s )? )$" % locals(
+)
+
+# URI-reference = URI / relative-ref
+URI_reference = r"^(?: %(URI)s | %(relative_ref)s )$" % locals()
+
+# absolute-URI = scheme ":" hier-part [ "?" query ]
+absolute_URI = r"^(?: %(scheme)s : %(hier_part)s (?: \? %(query)s )? )$" % locals(
+)
+
+
+def is_uri(uri):
+ return re.match(URI, uri, re.VERBOSE)
+
+
+def is_uri_reference(uri):
+ return re.match(URI_reference, uri, re.VERBOSE)
+
+
+def is_absolute_uri(uri):
+ return re.match(absolute_URI, uri, re.VERBOSE)
diff --git a/contrib/python/oauthlib/tests/__init__.py b/contrib/python/oauthlib/tests/__init__.py
new file mode 100644
index 0000000000..f33236b5ee
--- /dev/null
+++ b/contrib/python/oauthlib/tests/__init__.py
@@ -0,0 +1,3 @@
+import oauthlib
+
+oauthlib.set_debug(True)
diff --git a/contrib/python/oauthlib/tests/oauth1/__init__.py b/contrib/python/oauthlib/tests/oauth1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/__init__.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/__init__.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_access_token.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_access_token.py
new file mode 100644
index 0000000000..57d8117531
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_access_token.py
@@ -0,0 +1,91 @@
+from unittest.mock import ANY, MagicMock
+
+from oauthlib.oauth1 import RequestValidator
+from oauthlib.oauth1.rfc5849 import Client
+from oauthlib.oauth1.rfc5849.endpoints import AccessTokenEndpoint
+
+from tests.unittest import TestCase
+
+
+class AccessTokenEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(wraps=RequestValidator())
+ self.validator.check_client_key.return_value = True
+ self.validator.check_request_token.return_value = True
+ self.validator.check_verifier.return_value = True
+ self.validator.allowed_signature_methods = ['HMAC-SHA1']
+ self.validator.get_client_secret.return_value = 'bar'
+ self.validator.get_request_token_secret.return_value = 'secret'
+ self.validator.get_realms.return_value = ['foo']
+ self.validator.timestamp_lifetime = 600
+ self.validator.validate_client_key.return_value = True
+ self.validator.validate_request_token.return_value = True
+ self.validator.validate_verifier.return_value = True
+ self.validator.validate_timestamp_and_nonce.return_value = True
+ self.validator.invalidate_request_token.return_value = True
+ self.validator.dummy_client = 'dummy'
+ self.validator.dummy_secret = 'dummy'
+ self.validator.dummy_request_token = 'dummy'
+ self.validator.save_access_token = MagicMock()
+ self.endpoint = AccessTokenEndpoint(self.validator)
+ self.client = Client('foo',
+ client_secret='bar',
+ resource_owner_key='token',
+ resource_owner_secret='secret',
+ verifier='verfier')
+ self.uri, self.headers, self.body = self.client.sign(
+ 'https://i.b/access_token')
+
+ def test_check_request_token(self):
+ self.validator.check_request_token.return_value = False
+ h, b, s = self.endpoint.create_access_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_check_verifier(self):
+ self.validator.check_verifier.return_value = False
+ h, b, s = self.endpoint.create_access_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_validate_client_key(self):
+ self.validator.validate_client_key.return_value = False
+ h, b, s = self.endpoint.create_access_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 401)
+
+ def test_validate_request_token(self):
+ self.validator.validate_request_token.return_value = False
+ h, b, s = self.endpoint.create_access_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 401)
+
+ def test_validate_verifier(self):
+ self.validator.validate_verifier.return_value = False
+ h, b, s = self.endpoint.create_access_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 401)
+
+ def test_validate_signature(self):
+ client = Client('foo',
+ resource_owner_key='token',
+ resource_owner_secret='secret',
+ verifier='verfier')
+ _, headers, _ = client.sign(self.uri + '/extra')
+ h, b, s = self.endpoint.create_access_token_response(
+ self.uri, headers=headers)
+ self.assertEqual(s, 401)
+
+ def test_valid_request(self):
+ h, b, s = self.endpoint.create_access_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 200)
+ self.assertIn('oauth_token', b)
+ self.validator.validate_timestamp_and_nonce.assert_called_once_with(
+ self.client.client_key, ANY, ANY, ANY,
+ request_token=self.client.resource_owner_key)
+ self.validator.invalidate_request_token.assert_called_once_with(
+ self.client.client_key, self.client.resource_owner_key, ANY)
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_authorization.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_authorization.py
new file mode 100644
index 0000000000..a9b2fc0c9f
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_authorization.py
@@ -0,0 +1,54 @@
+from unittest.mock import MagicMock
+
+from oauthlib.oauth1 import RequestValidator
+from oauthlib.oauth1.rfc5849 import errors
+from oauthlib.oauth1.rfc5849.endpoints import AuthorizationEndpoint
+
+from tests.unittest import TestCase
+
+
+class AuthorizationEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(wraps=RequestValidator())
+ self.validator.verify_request_token.return_value = True
+ self.validator.verify_realms.return_value = True
+ self.validator.get_realms.return_value = ['test']
+ self.validator.save_verifier = MagicMock()
+ self.endpoint = AuthorizationEndpoint(self.validator)
+ self.uri = 'https://i.b/authorize?oauth_token=foo'
+
+ def test_get_realms_and_credentials(self):
+ realms, credentials = self.endpoint.get_realms_and_credentials(self.uri)
+ self.assertEqual(realms, ['test'])
+
+ def test_verify_token(self):
+ self.validator.verify_request_token.return_value = False
+ self.assertRaises(errors.InvalidClientError,
+ self.endpoint.get_realms_and_credentials, self.uri)
+ self.assertRaises(errors.InvalidClientError,
+ self.endpoint.create_authorization_response, self.uri)
+
+ def test_verify_realms(self):
+ self.validator.verify_realms.return_value = False
+ self.assertRaises(errors.InvalidRequestError,
+ self.endpoint.create_authorization_response,
+ self.uri,
+ realms=['bar'])
+
+ def test_create_authorization_response(self):
+ self.validator.get_redirect_uri.return_value = 'https://c.b/cb'
+ h, b, s = self.endpoint.create_authorization_response(self.uri)
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ location = h['Location']
+ self.assertTrue(location.startswith('https://c.b/cb'))
+ self.assertIn('oauth_verifier', location)
+
+ def test_create_authorization_response_oob(self):
+ self.validator.get_redirect_uri.return_value = 'oob'
+ h, b, s = self.endpoint.create_authorization_response(self.uri)
+ self.assertEqual(s, 200)
+ self.assertNotIn('Location', h)
+ self.assertIn('oauth_verifier', b)
+ self.assertIn('oauth_token', b)
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_base.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_base.py
new file mode 100644
index 0000000000..e87f359baa
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_base.py
@@ -0,0 +1,406 @@
+from re import sub
+from unittest.mock import MagicMock
+
+from oauthlib.common import CaseInsensitiveDict, safe_string_equals
+from oauthlib.oauth1 import Client, RequestValidator
+from oauthlib.oauth1.rfc5849 import (
+ SIGNATURE_HMAC, SIGNATURE_PLAINTEXT, SIGNATURE_RSA, errors,
+)
+from oauthlib.oauth1.rfc5849.endpoints import (
+ BaseEndpoint, RequestTokenEndpoint,
+)
+
+from tests.unittest import TestCase
+
+URLENCODED = {"Content-Type": "application/x-www-form-urlencoded"}
+
+
+class BaseEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(spec=RequestValidator)
+ self.validator.allowed_signature_methods = ['HMAC-SHA1']
+ self.validator.timestamp_lifetime = 600
+ self.endpoint = RequestTokenEndpoint(self.validator)
+ self.client = Client('foo', callback_uri='https://c.b/cb')
+ self.uri, self.headers, self.body = self.client.sign(
+ 'https://i.b/request_token')
+
+ def test_ssl_enforcement(self):
+ uri, headers, _ = self.client.sign('http://i.b/request_token')
+ h, b, s = self.endpoint.create_request_token_response(
+ uri, headers=headers)
+ self.assertEqual(s, 400)
+ self.assertIn('insecure_transport_protocol', b)
+
+ def test_missing_parameters(self):
+ h, b, s = self.endpoint.create_request_token_response(self.uri)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_signature_methods(self):
+ headers = {}
+ headers['Authorization'] = self.headers['Authorization'].replace(
+ 'HMAC', 'RSA')
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_signature_method', b)
+
+ def test_invalid_version(self):
+ headers = {}
+ headers['Authorization'] = self.headers['Authorization'].replace(
+ '1.0', '2.0')
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_expired_timestamp(self):
+ headers = {}
+ for pattern in ('12345678901', '4567890123', '123456789K'):
+ headers['Authorization'] = sub(r'timestamp="\d*k?"',
+ 'timestamp="%s"' % pattern,
+ self.headers['Authorization'])
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_client_key_check(self):
+ self.validator.check_client_key.return_value = False
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_noncecheck(self):
+ self.validator.check_nonce.return_value = False
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_enforce_ssl(self):
+ """Ensure SSL is enforced by default."""
+ v = RequestValidator()
+ e = BaseEndpoint(v)
+ c = Client('foo')
+ u, h, b = c.sign('http://example.com')
+ r = e._create_request(u, 'GET', b, h)
+ self.assertRaises(errors.InsecureTransportError,
+ e._check_transport_security, r)
+
+ def test_multiple_source_params(self):
+ """Check for duplicate params"""
+ v = RequestValidator()
+ e = BaseEndpoint(v)
+ self.assertRaises(errors.InvalidRequestError, e._create_request,
+ 'https://a.b/?oauth_signature_method=HMAC-SHA1',
+ 'GET', 'oauth_version=foo', URLENCODED)
+ headers = {'Authorization': 'OAuth oauth_signature="foo"'}
+ headers.update(URLENCODED)
+ self.assertRaises(errors.InvalidRequestError, e._create_request,
+ 'https://a.b/?oauth_signature_method=HMAC-SHA1',
+ 'GET',
+ 'oauth_version=foo',
+ headers)
+ headers = {'Authorization': 'OAuth oauth_signature_method="foo"'}
+ headers.update(URLENCODED)
+ self.assertRaises(errors.InvalidRequestError, e._create_request,
+ 'https://a.b/',
+ 'GET',
+ 'oauth_signature=foo',
+ headers)
+
+ def test_duplicate_params(self):
+ """Ensure params are only supplied once"""
+ v = RequestValidator()
+ e = BaseEndpoint(v)
+ self.assertRaises(errors.InvalidRequestError, e._create_request,
+ 'https://a.b/?oauth_version=a&oauth_version=b',
+ 'GET', None, URLENCODED)
+ self.assertRaises(errors.InvalidRequestError, e._create_request,
+ 'https://a.b/', 'GET', 'oauth_version=a&oauth_version=b',
+ URLENCODED)
+
+ def test_mandated_params(self):
+ """Ensure all mandatory params are present."""
+ v = RequestValidator()
+ e = BaseEndpoint(v)
+ r = e._create_request('https://a.b/', 'GET',
+ 'oauth_signature=a&oauth_consumer_key=b&oauth_nonce',
+ URLENCODED)
+ self.assertRaises(errors.InvalidRequestError,
+ e._check_mandatory_parameters, r)
+
+ def test_oauth_version(self):
+ """OAuth version must be 1.0 if present."""
+ v = RequestValidator()
+ e = BaseEndpoint(v)
+ r = e._create_request('https://a.b/', 'GET',
+ ('oauth_signature=a&oauth_consumer_key=b&oauth_nonce=c&'
+ 'oauth_timestamp=a&oauth_signature_method=RSA-SHA1&'
+ 'oauth_version=2.0'),
+ URLENCODED)
+ self.assertRaises(errors.InvalidRequestError,
+ e._check_mandatory_parameters, r)
+
+ def test_oauth_timestamp(self):
+ """Check for a valid UNIX timestamp."""
+ v = RequestValidator()
+ e = BaseEndpoint(v)
+
+ # Invalid timestamp length, must be 10
+ r = e._create_request('https://a.b/', 'GET',
+ ('oauth_signature=a&oauth_consumer_key=b&oauth_nonce=c&'
+ 'oauth_version=1.0&oauth_signature_method=RSA-SHA1&'
+ 'oauth_timestamp=123456789'),
+ URLENCODED)
+ self.assertRaises(errors.InvalidRequestError,
+ e._check_mandatory_parameters, r)
+
+ # Invalid timestamp age, must be younger than 10 minutes
+ r = e._create_request('https://a.b/', 'GET',
+ ('oauth_signature=a&oauth_consumer_key=b&oauth_nonce=c&'
+ 'oauth_version=1.0&oauth_signature_method=RSA-SHA1&'
+ 'oauth_timestamp=1234567890'),
+ URLENCODED)
+ self.assertRaises(errors.InvalidRequestError,
+ e._check_mandatory_parameters, r)
+
+ # Timestamp must be an integer
+ r = e._create_request('https://a.b/', 'GET',
+ ('oauth_signature=a&oauth_consumer_key=b&oauth_nonce=c&'
+ 'oauth_version=1.0&oauth_signature_method=RSA-SHA1&'
+ 'oauth_timestamp=123456789a'),
+ URLENCODED)
+ self.assertRaises(errors.InvalidRequestError,
+ e._check_mandatory_parameters, r)
+
+ def test_case_insensitive_headers(self):
+ """Ensure headers are case-insensitive"""
+ v = RequestValidator()
+ e = BaseEndpoint(v)
+ r = e._create_request('https://a.b', 'POST',
+ ('oauth_signature=a&oauth_consumer_key=b&oauth_nonce=c&'
+ 'oauth_version=1.0&oauth_signature_method=RSA-SHA1&'
+ 'oauth_timestamp=123456789a'),
+ URLENCODED)
+ self.assertIsInstance(r.headers, CaseInsensitiveDict)
+
+ def test_signature_method_validation(self):
+ """Ensure valid signature method is used."""
+
+ body = ('oauth_signature=a&oauth_consumer_key=b&oauth_nonce=c&'
+ 'oauth_version=1.0&oauth_signature_method=%s&'
+ 'oauth_timestamp=1234567890')
+
+ uri = 'https://example.com/'
+
+ class HMACValidator(RequestValidator):
+
+ @property
+ def allowed_signature_methods(self):
+ return (SIGNATURE_HMAC,)
+
+ v = HMACValidator()
+ e = BaseEndpoint(v)
+ r = e._create_request(uri, 'GET', body % 'RSA-SHA1', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+ r = e._create_request(uri, 'GET', body % 'PLAINTEXT', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+ r = e._create_request(uri, 'GET', body % 'shibboleth', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+
+ class RSAValidator(RequestValidator):
+
+ @property
+ def allowed_signature_methods(self):
+ return (SIGNATURE_RSA,)
+
+ v = RSAValidator()
+ e = BaseEndpoint(v)
+ r = e._create_request(uri, 'GET', body % 'HMAC-SHA1', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+ r = e._create_request(uri, 'GET', body % 'PLAINTEXT', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+ r = e._create_request(uri, 'GET', body % 'shibboleth', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+
+ class PlainValidator(RequestValidator):
+
+ @property
+ def allowed_signature_methods(self):
+ return (SIGNATURE_PLAINTEXT,)
+
+ v = PlainValidator()
+ e = BaseEndpoint(v)
+ r = e._create_request(uri, 'GET', body % 'HMAC-SHA1', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+ r = e._create_request(uri, 'GET', body % 'RSA-SHA1', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+ r = e._create_request(uri, 'GET', body % 'shibboleth', URLENCODED)
+ self.assertRaises(errors.InvalidSignatureMethodError,
+ e._check_mandatory_parameters, r)
+
+
+class ClientValidator(RequestValidator):
+ clients = ['foo']
+ nonces = [('foo', 'once', '1234567891', 'fez')]
+ owners = {'foo': ['abcdefghijklmnopqrstuvxyz', 'fez']}
+ assigned_realms = {('foo', 'abcdefghijklmnopqrstuvxyz'): 'photos'}
+ verifiers = {('foo', 'fez'): 'shibboleth'}
+
+ @property
+ def client_key_length(self):
+ return 1, 30
+
+ @property
+ def request_token_length(self):
+ return 1, 30
+
+ @property
+ def access_token_length(self):
+ return 1, 30
+
+ @property
+ def nonce_length(self):
+ return 2, 30
+
+ @property
+ def verifier_length(self):
+ return 2, 30
+
+ @property
+ def realms(self):
+ return ['photos']
+
+ @property
+ def timestamp_lifetime(self):
+ # Disabled check to allow hardcoded verification signatures
+ return 1000000000
+
+ @property
+ def dummy_client(self):
+ return 'dummy'
+
+ @property
+ def dummy_request_token(self):
+ return 'dumbo'
+
+ @property
+ def dummy_access_token(self):
+ return 'dumbo'
+
+ def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
+ request, request_token=None, access_token=None):
+ resource_owner_key = request_token if request_token else access_token
+ return not (client_key, nonce, timestamp, resource_owner_key) in self.nonces
+
+ def validate_client_key(self, client_key):
+ return client_key in self.clients
+
+ def validate_access_token(self, client_key, access_token, request):
+ return (self.owners.get(client_key) and
+ access_token in self.owners.get(client_key))
+
+ def validate_request_token(self, client_key, request_token, request):
+ return (self.owners.get(client_key) and
+ request_token in self.owners.get(client_key))
+
+ def validate_requested_realm(self, client_key, realm, request):
+ return True
+
+ def validate_realm(self, client_key, access_token, request, uri=None,
+ required_realm=None):
+ return (client_key, access_token) in self.assigned_realms
+
+ def validate_verifier(self, client_key, request_token, verifier,
+ request):
+ return ((client_key, request_token) in self.verifiers and
+ safe_string_equals(verifier, self.verifiers.get(
+ (client_key, request_token))))
+
+ def validate_redirect_uri(self, client_key, redirect_uri, request):
+ return redirect_uri.startswith('http://client.example.com/')
+
+ def get_client_secret(self, client_key, request):
+ return 'super secret'
+
+ def get_access_token_secret(self, client_key, access_token, request):
+ return 'even more secret'
+
+ def get_request_token_secret(self, client_key, request_token, request):
+ return 'even more secret'
+
+ def get_rsa_key(self, client_key, request):
+ return ("-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNA"
+ "DCBiQKBgQDVLQCATX8iK+aZuGVdkGb6uiar\nLi/jqFwL1dYj0JLIsdQc"
+ "KaMWtPC06K0+vI+RRZcjKc6sNB9/7kJcKN9Ekc9BUxyT\n/D09Cz47cmC"
+ "YsUoiW7G8NSqbE4wPiVpGkJRzFAxaCWwOSSQ+lpC9vwxnvVQfOoZ1\nnp"
+ "mWbCdA0iTxsMahwQIDAQAB\n-----END PUBLIC KEY-----")
+
+
+class SignatureVerificationTest(TestCase):
+
+ def setUp(self):
+ v = ClientValidator()
+ self.e = BaseEndpoint(v)
+
+ self.uri = 'https://example.com/'
+ self.sig = ('oauth_signature=%s&'
+ 'oauth_timestamp=1234567890&'
+ 'oauth_nonce=abcdefghijklmnopqrstuvwxyz&'
+ 'oauth_version=1.0&'
+ 'oauth_signature_method=%s&'
+ 'oauth_token=abcdefghijklmnopqrstuvxyz&'
+ 'oauth_consumer_key=foo')
+
+ def test_signature_too_short(self):
+ short_sig = ('oauth_signature=fmrXnTF4lO4o%2BD0%2FlZaJHP%2FXqEY&'
+ 'oauth_timestamp=1234567890&'
+ 'oauth_nonce=abcdefghijklmnopqrstuvwxyz&'
+ 'oauth_version=1.0&oauth_signature_method=HMAC-SHA1&'
+ 'oauth_token=abcdefghijklmnopqrstuvxyz&'
+ 'oauth_consumer_key=foo')
+ r = self.e._create_request(self.uri, 'GET', short_sig, URLENCODED)
+ self.assertFalse(self.e._check_signature(r))
+
+ plain = ('oauth_signature=correctlengthbutthewrongcontent1111&'
+ 'oauth_timestamp=1234567890&'
+ 'oauth_nonce=abcdefghijklmnopqrstuvwxyz&'
+ 'oauth_version=1.0&oauth_signature_method=PLAINTEXT&'
+ 'oauth_token=abcdefghijklmnopqrstuvxyz&'
+ 'oauth_consumer_key=foo')
+ r = self.e._create_request(self.uri, 'GET', plain, URLENCODED)
+ self.assertFalse(self.e._check_signature(r))
+
+ def test_hmac_signature(self):
+ hmac_sig = "fmrXnTF4lO4o%2BD0%2FlZaJHP%2FXqEY%3D"
+ sig = self.sig % (hmac_sig, "HMAC-SHA1")
+ r = self.e._create_request(self.uri, 'GET', sig, URLENCODED)
+ self.assertTrue(self.e._check_signature(r))
+
+ def test_rsa_signature(self):
+ rsa_sig = ("fxFvCx33oKlR9wDquJ%2FPsndFzJphyBa3RFPPIKi3flqK%2BJ7yIrMVbH"
+ "YTM%2FLHPc7NChWz4F4%2FzRA%2BDN1k08xgYGSBoWJUOW6VvOQ6fbYhMA"
+ "FkOGYbuGDbje487XMzsAcv6ZjqZHCROSCk5vofgLk2SN7RZ3OrgrFzf4in"
+ "xetClqA%3D")
+ sig = self.sig % (rsa_sig, "RSA-SHA1")
+ r = self.e._create_request(self.uri, 'GET', sig, URLENCODED)
+ self.assertTrue(self.e._check_signature(r))
+
+ def test_plaintext_signature(self):
+ plain_sig = "super%252520secret%26even%252520more%252520secret"
+ sig = self.sig % (plain_sig, "PLAINTEXT")
+ r = self.e._create_request(self.uri, 'GET', sig, URLENCODED)
+ self.assertTrue(self.e._check_signature(r))
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_request_token.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_request_token.py
new file mode 100644
index 0000000000..879cad2f48
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_request_token.py
@@ -0,0 +1,90 @@
+from unittest.mock import ANY, MagicMock
+
+from oauthlib.oauth1 import RequestValidator
+from oauthlib.oauth1.rfc5849 import Client
+from oauthlib.oauth1.rfc5849.endpoints import RequestTokenEndpoint
+
+from tests.unittest import TestCase
+
+
+class RequestTokenEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(wraps=RequestValidator())
+ self.validator.check_client_key.return_value = True
+ self.validator.allowed_signature_methods = ['HMAC-SHA1']
+ self.validator.get_client_secret.return_value = 'bar'
+ self.validator.get_default_realms.return_value = ['foo']
+ self.validator.timestamp_lifetime = 600
+ self.validator.check_realms.return_value = True
+ self.validator.validate_client_key.return_value = True
+ self.validator.validate_requested_realms.return_value = True
+ self.validator.validate_redirect_uri.return_value = True
+ self.validator.validate_timestamp_and_nonce.return_value = True
+ self.validator.dummy_client = 'dummy'
+ self.validator.dummy_secret = 'dummy'
+ self.validator.save_request_token = MagicMock()
+ self.endpoint = RequestTokenEndpoint(self.validator)
+ self.client = Client('foo', client_secret='bar', realm='foo',
+ callback_uri='https://c.b/cb')
+ self.uri, self.headers, self.body = self.client.sign(
+ 'https://i.b/request_token')
+
+ def test_check_redirect_uri(self):
+ client = Client('foo')
+ uri, headers, _ = client.sign(self.uri)
+ h, b, s = self.endpoint.create_request_token_response(
+ uri, headers=headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_check_realms(self):
+ self.validator.check_realms.return_value = False
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 400)
+ self.assertIn('invalid_request', b)
+
+ def test_validate_client_key(self):
+ self.validator.validate_client_key.return_value = False
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 401)
+
+ def test_validate_realms(self):
+ self.validator.validate_requested_realms.return_value = False
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 401)
+
+ def test_validate_redirect_uri(self):
+ self.validator.validate_redirect_uri.return_value = False
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 401)
+
+ def test_validate_signature(self):
+ client = Client('foo', callback_uri='https://c.b/cb')
+ _, headers, _ = client.sign(self.uri + '/extra')
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=headers)
+ self.assertEqual(s, 401)
+
+ def test_valid_request(self):
+ h, b, s = self.endpoint.create_request_token_response(
+ self.uri, headers=self.headers)
+ self.assertEqual(s, 200)
+ self.assertIn('oauth_token', b)
+ self.validator.validate_timestamp_and_nonce.assert_called_once_with(
+ self.client.client_key, ANY, ANY, ANY,
+ request_token=self.client.resource_owner_key)
+
+ def test_uri_provided_realm(self):
+ client = Client('foo', callback_uri='https://c.b/cb',
+ client_secret='bar')
+ uri = self.uri + '?realm=foo'
+ _, headers, _ = client.sign(uri)
+ h, b, s = self.endpoint.create_request_token_response(
+ uri, headers=headers)
+ self.assertEqual(s, 200)
+ self.assertIn('oauth_token', b)
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_resource.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_resource.py
new file mode 100644
index 0000000000..416216f737
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_resource.py
@@ -0,0 +1,102 @@
+from unittest.mock import ANY, MagicMock
+
+from oauthlib.oauth1 import RequestValidator
+from oauthlib.oauth1.rfc5849 import Client
+from oauthlib.oauth1.rfc5849.endpoints import ResourceEndpoint
+
+from tests.unittest import TestCase
+
+
+class ResourceEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(wraps=RequestValidator())
+ self.validator.check_client_key.return_value = True
+ self.validator.check_access_token.return_value = True
+ self.validator.allowed_signature_methods = ['HMAC-SHA1']
+ self.validator.get_client_secret.return_value = 'bar'
+ self.validator.get_access_token_secret.return_value = 'secret'
+ self.validator.timestamp_lifetime = 600
+ self.validator.validate_client_key.return_value = True
+ self.validator.validate_access_token.return_value = True
+ self.validator.validate_timestamp_and_nonce.return_value = True
+ self.validator.validate_realms.return_value = True
+ self.validator.dummy_client = 'dummy'
+ self.validator.dummy_secret = 'dummy'
+ self.validator.dummy_access_token = 'dummy'
+ self.endpoint = ResourceEndpoint(self.validator)
+ self.client = Client('foo',
+ client_secret='bar',
+ resource_owner_key='token',
+ resource_owner_secret='secret')
+ self.uri, self.headers, self.body = self.client.sign(
+ 'https://i.b/protected_resource')
+
+ def test_missing_parameters(self):
+ self.validator.check_access_token.return_value = False
+ v, r = self.endpoint.validate_protected_resource_request(
+ self.uri)
+ self.assertFalse(v)
+
+ def test_check_access_token(self):
+ self.validator.check_access_token.return_value = False
+ v, r = self.endpoint.validate_protected_resource_request(
+ self.uri, headers=self.headers)
+ self.assertFalse(v)
+
+ def test_validate_client_key(self):
+ self.validator.validate_client_key.return_value = False
+ v, r = self.endpoint.validate_protected_resource_request(
+ self.uri, headers=self.headers)
+ self.assertFalse(v)
+ # the validator log should have `False` values
+ self.assertFalse(r.validator_log['client'])
+ self.assertTrue(r.validator_log['realm'])
+ self.assertTrue(r.validator_log['resource_owner'])
+ self.assertTrue(r.validator_log['signature'])
+
+ def test_validate_access_token(self):
+ self.validator.validate_access_token.return_value = False
+ v, r = self.endpoint.validate_protected_resource_request(
+ self.uri, headers=self.headers)
+ self.assertFalse(v)
+ # the validator log should have `False` values
+ self.assertTrue(r.validator_log['client'])
+ self.assertTrue(r.validator_log['realm'])
+ self.assertFalse(r.validator_log['resource_owner'])
+ self.assertTrue(r.validator_log['signature'])
+
+ def test_validate_realms(self):
+ self.validator.validate_realms.return_value = False
+ v, r = self.endpoint.validate_protected_resource_request(
+ self.uri, headers=self.headers)
+ self.assertFalse(v)
+ # the validator log should have `False` values
+ self.assertTrue(r.validator_log['client'])
+ self.assertFalse(r.validator_log['realm'])
+ self.assertTrue(r.validator_log['resource_owner'])
+ self.assertTrue(r.validator_log['signature'])
+
+ def test_validate_signature(self):
+ client = Client('foo',
+ resource_owner_key='token',
+ resource_owner_secret='secret')
+ _, headers, _ = client.sign(self.uri + '/extra')
+ v, r = self.endpoint.validate_protected_resource_request(
+ self.uri, headers=headers)
+ self.assertFalse(v)
+ # the validator log should have `False` values
+ self.assertTrue(r.validator_log['client'])
+ self.assertTrue(r.validator_log['realm'])
+ self.assertTrue(r.validator_log['resource_owner'])
+ self.assertFalse(r.validator_log['signature'])
+
+ def test_valid_request(self):
+ v, r = self.endpoint.validate_protected_resource_request(
+ self.uri, headers=self.headers)
+ self.assertTrue(v)
+ self.validator.validate_timestamp_and_nonce.assert_called_once_with(
+ self.client.client_key, ANY, ANY, ANY,
+ access_token=self.client.resource_owner_key)
+ # everything in the validator_log should be `True`
+ self.assertTrue(all(r.validator_log.items()))
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_signature_only.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_signature_only.py
new file mode 100644
index 0000000000..16585bd580
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/endpoints/test_signature_only.py
@@ -0,0 +1,50 @@
+from unittest.mock import ANY, MagicMock
+
+from oauthlib.oauth1 import RequestValidator
+from oauthlib.oauth1.rfc5849 import Client
+from oauthlib.oauth1.rfc5849.endpoints import SignatureOnlyEndpoint
+
+from tests.unittest import TestCase
+
+
+class SignatureOnlyEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(wraps=RequestValidator())
+ self.validator.check_client_key.return_value = True
+ self.validator.allowed_signature_methods = ['HMAC-SHA1']
+ self.validator.get_client_secret.return_value = 'bar'
+ self.validator.timestamp_lifetime = 600
+ self.validator.validate_client_key.return_value = True
+ self.validator.validate_timestamp_and_nonce.return_value = True
+ self.validator.dummy_client = 'dummy'
+ self.validator.dummy_secret = 'dummy'
+ self.endpoint = SignatureOnlyEndpoint(self.validator)
+ self.client = Client('foo', client_secret='bar')
+ self.uri, self.headers, self.body = self.client.sign(
+ 'https://i.b/protected_resource')
+
+ def test_missing_parameters(self):
+ v, r = self.endpoint.validate_request(
+ self.uri)
+ self.assertFalse(v)
+
+ def test_validate_client_key(self):
+ self.validator.validate_client_key.return_value = False
+ v, r = self.endpoint.validate_request(
+ self.uri, headers=self.headers)
+ self.assertFalse(v)
+
+ def test_validate_signature(self):
+ client = Client('foo')
+ _, headers, _ = client.sign(self.uri + '/extra')
+ v, r = self.endpoint.validate_request(
+ self.uri, headers=headers)
+ self.assertFalse(v)
+
+ def test_valid_request(self):
+ v, r = self.endpoint.validate_request(
+ self.uri, headers=self.headers)
+ self.assertTrue(v)
+ self.validator.validate_timestamp_and_nonce.assert_called_once_with(
+ self.client.client_key, ANY, ANY, ANY)
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/test_client.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_client.py
new file mode 100644
index 0000000000..f7c997f509
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_client.py
@@ -0,0 +1,269 @@
+# -*- coding: utf-8 -*-
+from oauthlib.common import Request
+from oauthlib.oauth1 import (
+ SIGNATURE_HMAC_SHA1, SIGNATURE_HMAC_SHA256, SIGNATURE_PLAINTEXT,
+ SIGNATURE_RSA, SIGNATURE_TYPE_BODY, SIGNATURE_TYPE_QUERY,
+)
+from oauthlib.oauth1.rfc5849 import Client
+
+from tests.unittest import TestCase
+
+
+class ClientRealmTests(TestCase):
+
+ def test_client_no_realm(self):
+ client = Client("client-key")
+ uri, header, body = client.sign("http://example-uri")
+ self.assertTrue(
+ header["Authorization"].startswith('OAuth oauth_nonce='))
+
+ def test_client_realm_sign_with_default_realm(self):
+ client = Client("client-key", realm="moo-realm")
+ self.assertEqual(client.realm, "moo-realm")
+ uri, header, body = client.sign("http://example-uri")
+ self.assertTrue(
+ header["Authorization"].startswith('OAuth realm="moo-realm",'))
+
+ def test_client_realm_sign_with_additional_realm(self):
+ client = Client("client-key", realm="moo-realm")
+ uri, header, body = client.sign("http://example-uri", realm="baa-realm")
+ self.assertTrue(
+ header["Authorization"].startswith('OAuth realm="baa-realm",'))
+ # make sure sign() does not override the default realm
+ self.assertEqual(client.realm, "moo-realm")
+
+
+class ClientConstructorTests(TestCase):
+
+ def test_convert_to_unicode_resource_owner(self):
+ client = Client('client-key',
+ resource_owner_key=b'owner key')
+ self.assertNotIsInstance(client.resource_owner_key, bytes)
+ self.assertEqual(client.resource_owner_key, 'owner key')
+
+ def test_give_explicit_timestamp(self):
+ client = Client('client-key', timestamp='1')
+ params = dict(client.get_oauth_params(Request('http://example.com')))
+ self.assertEqual(params['oauth_timestamp'], '1')
+
+ def test_give_explicit_nonce(self):
+ client = Client('client-key', nonce='1')
+ params = dict(client.get_oauth_params(Request('http://example.com')))
+ self.assertEqual(params['oauth_nonce'], '1')
+
+ def test_decoding(self):
+ client = Client('client_key', decoding='utf-8')
+ uri, headers, body = client.sign('http://a.b/path?query',
+ http_method='POST', body='a=b',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertIsInstance(uri, bytes)
+ self.assertIsInstance(body, bytes)
+ for k, v in headers.items():
+ self.assertIsInstance(k, bytes)
+ self.assertIsInstance(v, bytes)
+
+ def test_hmac_sha1(self):
+ client = Client('client_key')
+ # instance is using the correct signer method
+ self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_HMAC_SHA1],
+ client.SIGNATURE_METHODS[client.signature_method])
+
+ def test_hmac_sha256(self):
+ client = Client('client_key', signature_method=SIGNATURE_HMAC_SHA256)
+ # instance is using the correct signer method
+ self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_HMAC_SHA256],
+ client.SIGNATURE_METHODS[client.signature_method])
+
+ def test_rsa(self):
+ client = Client('client_key', signature_method=SIGNATURE_RSA)
+ # instance is using the correct signer method
+ self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_RSA],
+ client.SIGNATURE_METHODS[client.signature_method])
+ # don't need an RSA key to instantiate
+ self.assertIsNone(client.rsa_key)
+
+
+class SignatureMethodTest(TestCase):
+
+ def test_hmac_sha1_method(self):
+ client = Client('client_key', timestamp='1234567890', nonce='abc')
+ u, h, b = client.sign('http://example.com')
+ correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
+ 'oauth_version="1.0", oauth_signature_method="HMAC-SHA1", '
+ 'oauth_consumer_key="client_key", '
+ 'oauth_signature="hH5BWYVqo7QI4EmPBUUe9owRUUQ%3D"')
+ self.assertEqual(h['Authorization'], correct)
+
+ def test_hmac_sha256_method(self):
+ client = Client('client_key', signature_method=SIGNATURE_HMAC_SHA256,
+ timestamp='1234567890', nonce='abc')
+ u, h, b = client.sign('http://example.com')
+ correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
+ 'oauth_version="1.0", oauth_signature_method="HMAC-SHA256", '
+ 'oauth_consumer_key="client_key", '
+ 'oauth_signature="JzgJWBxX664OiMW3WE4MEjtYwOjI%2FpaUWHqtdHe68Es%3D"')
+ self.assertEqual(h['Authorization'], correct)
+
+ def test_rsa_method(self):
+ private_key = (
+ "-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQDk1/bxy"
+ "S8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG\nAlwXWfzXw"
+ "SMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVa"
+ "h\n5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjT"
+ "MO7IdrwIDAQAB\nAoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9"
+ "GxocdM1m30WyWRFMEz2nKJ8fR\np3vTD4w8yplTOhcoXdQZl0kRoaD"
+ "zrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC\nDY6xveQczE7qt7V"
+ "k7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i\nrf6"
+ "qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVaw"
+ "Ft3UMhe\n542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+"
+ "XO/2xKp/d/ty1OIeovx\nC60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZL"
+ "eMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT\nSuy30sKjLzqoGw1kR+wv7"
+ "C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA\nkmaMg2PNr"
+ "jUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzV"
+ "S\nJzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lT"
+ "LVduVgh4v5yLT\nGa6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPv"
+ "dMlxqXA==\n-----END RSA PRIVATE KEY-----"
+ )
+ client = Client('client_key', signature_method=SIGNATURE_RSA,
+ rsa_key=private_key, timestamp='1234567890', nonce='abc')
+ u, h, b = client.sign('http://example.com')
+ correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
+ 'oauth_version="1.0", oauth_signature_method="RSA-SHA1", '
+ 'oauth_consumer_key="client_key", '
+ 'oauth_signature="ktvzkUhtrIawBcq21DRJrAyysTc3E1Zq5GdGu8EzH'
+ 'OtbeaCmOBDLGHAcqlm92mj7xp5E1Z6i2vbExPimYAJL7FzkLnkRE5YEJR4'
+ 'rNtIgAf1OZbYsIUmmBO%2BCLuStuu5Lg3tAluwC7XkkgoXCBaRKT1mUXzP'
+ 'HJILzZ8iFOvS6w5E%3D"')
+ self.assertEqual(h['Authorization'], correct)
+
+ def test_plaintext_method(self):
+ client = Client('client_key',
+ signature_method=SIGNATURE_PLAINTEXT,
+ timestamp='1234567890',
+ nonce='abc',
+ client_secret='foo',
+ resource_owner_secret='bar')
+ u, h, b = client.sign('http://example.com')
+ correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
+ 'oauth_version="1.0", oauth_signature_method="PLAINTEXT", '
+ 'oauth_consumer_key="client_key", '
+ 'oauth_signature="foo%26bar"')
+ self.assertEqual(h['Authorization'], correct)
+
+ def test_invalid_method(self):
+ client = Client('client_key', signature_method='invalid')
+ self.assertRaises(ValueError, client.sign, 'http://example.com')
+
+ def test_rsa_no_key(self):
+ client = Client('client_key', signature_method=SIGNATURE_RSA)
+ self.assertRaises(ValueError, client.sign, 'http://example.com')
+
+ def test_register_method(self):
+ Client.register_signature_method('PIZZA',
+ lambda base_string, client: 'PIZZA')
+
+ self.assertIn('PIZZA', Client.SIGNATURE_METHODS)
+
+ client = Client('client_key', signature_method='PIZZA',
+ timestamp='1234567890', nonce='abc')
+
+ u, h, b = client.sign('http://example.com')
+
+ self.assertEqual(h['Authorization'], (
+ 'OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
+ 'oauth_version="1.0", oauth_signature_method="PIZZA", '
+ 'oauth_consumer_key="client_key", '
+ 'oauth_signature="PIZZA"'
+ ))
+
+
+class SignatureTypeTest(TestCase):
+
+ def test_params_in_body(self):
+ client = Client('client_key', signature_type=SIGNATURE_TYPE_BODY,
+ timestamp='1378988215', nonce='14205877133089081931378988215')
+ _, h, b = client.sign('http://i.b/path', http_method='POST', body='a=b',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
+ correct = ('a=b&oauth_nonce=14205877133089081931378988215&'
+ 'oauth_timestamp=1378988215&'
+ 'oauth_version=1.0&'
+ 'oauth_signature_method=HMAC-SHA1&'
+ 'oauth_consumer_key=client_key&'
+ 'oauth_signature=2JAQomgbShqoscqKWBiYQZwWq94%3D')
+ self.assertEqual(b, correct)
+
+ def test_params_in_query(self):
+ client = Client('client_key', signature_type=SIGNATURE_TYPE_QUERY,
+ timestamp='1378988215', nonce='14205877133089081931378988215')
+ u, _, _ = client.sign('http://i.b/path', http_method='POST')
+ correct = ('http://i.b/path?oauth_nonce=14205877133089081931378988215&'
+ 'oauth_timestamp=1378988215&'
+ 'oauth_version=1.0&'
+ 'oauth_signature_method=HMAC-SHA1&'
+ 'oauth_consumer_key=client_key&'
+ 'oauth_signature=08G5Snvw%2BgDAzBF%2BCmT5KqlrPKo%3D')
+ self.assertEqual(u, correct)
+
+ def test_invalid_signature_type(self):
+ client = Client('client_key', signature_type='invalid')
+ self.assertRaises(ValueError, client.sign, 'http://i.b/path')
+
+
+class SigningTest(TestCase):
+
+ def test_case_insensitive_headers(self):
+ client = Client('client_key')
+ # Uppercase
+ _, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
+
+ # Lowercase
+ _, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
+ headers={'content-type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(h['content-type'], 'application/x-www-form-urlencoded')
+
+ # Capitalized
+ _, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
+ headers={'Content-type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(h['Content-type'], 'application/x-www-form-urlencoded')
+
+ # Random
+ _, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
+ headers={'conTent-tYpe': 'application/x-www-form-urlencoded'})
+ self.assertEqual(h['conTent-tYpe'], 'application/x-www-form-urlencoded')
+
+ def test_sign_no_body(self):
+ client = Client('client_key', decoding='utf-8')
+ self.assertRaises(ValueError, client.sign, 'http://i.b/path',
+ http_method='POST', body=None,
+ headers={'Content-Type': 'application/x-www-form-urlencoded'})
+
+ def test_sign_body(self):
+ client = Client('client_key')
+ _, h, b = client.sign('http://i.b/path', http_method='POST', body='',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
+
+ def test_sign_get_with_body(self):
+ client = Client('client_key')
+ for method in ('GET', 'HEAD'):
+ self.assertRaises(ValueError, client.sign, 'http://a.b/path?query',
+ http_method=method, body='a=b',
+ headers={
+ 'Content-Type': 'application/x-www-form-urlencoded'
+ })
+
+ def test_sign_unicode(self):
+ client = Client('client_key', nonce='abc', timestamp='abc')
+ _, h, b = client.sign('http://i.b/path', http_method='POST',
+ body='status=%E5%95%A6%E5%95%A6',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(b, 'status=%E5%95%A6%E5%95%A6')
+ self.assertIn('oauth_signature="yrtSqp88m%2Fc5UDaucI8BXK4oEtk%3D"', h['Authorization'])
+ _, h, b = client.sign('http://i.b/path', http_method='POST',
+ body='status=%C3%A6%C3%A5%C3%B8',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(b, 'status=%C3%A6%C3%A5%C3%B8')
+ self.assertIn('oauth_signature="oG5t3Eg%2FXO5FfQgUUlTtUeeZzvk%3D"', h['Authorization'])
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/test_parameters.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_parameters.py
new file mode 100644
index 0000000000..92b95c1167
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_parameters.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+from oauthlib.common import urlencode
+from oauthlib.oauth1.rfc5849.parameters import (
+ _append_params, prepare_form_encoded_body, prepare_headers,
+ prepare_request_uri_query,
+)
+
+from tests.unittest import TestCase
+
+
+class ParameterTests(TestCase):
+ auth_only_params = [
+ ('oauth_consumer_key', "9djdj82h48djs9d2"),
+ ('oauth_token', "kkk9d7dh3k39sjv7"),
+ ('oauth_signature_method', "HMAC-SHA1"),
+ ('oauth_timestamp', "137131201"),
+ ('oauth_nonce', "7d8f3e4a"),
+ ('oauth_signature', "bYT5CMsGcbgUdFHObYMEfcx6bsw=")
+ ]
+ auth_and_data = list(auth_only_params)
+ auth_and_data.append(('data_param_foo', 'foo'))
+ auth_and_data.append(('data_param_1', '1'))
+ realm = 'testrealm'
+ norealm_authorization_header = ' '.join((
+ 'OAuth',
+ 'oauth_consumer_key="9djdj82h48djs9d2",',
+ 'oauth_token="kkk9d7dh3k39sjv7",',
+ 'oauth_signature_method="HMAC-SHA1",',
+ 'oauth_timestamp="137131201",',
+ 'oauth_nonce="7d8f3e4a",',
+ 'oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"',
+ ))
+ withrealm_authorization_header = ' '.join((
+ 'OAuth',
+ 'realm="testrealm",',
+ 'oauth_consumer_key="9djdj82h48djs9d2",',
+ 'oauth_token="kkk9d7dh3k39sjv7",',
+ 'oauth_signature_method="HMAC-SHA1",',
+ 'oauth_timestamp="137131201",',
+ 'oauth_nonce="7d8f3e4a",',
+ 'oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"',
+ ))
+
+ def test_append_params(self):
+ unordered_1 = [
+ ('oauth_foo', 'foo'),
+ ('lala', 123),
+ ('oauth_baz', 'baz'),
+ ('oauth_bar', 'bar'), ]
+ unordered_2 = [
+ ('teehee', 456),
+ ('oauth_quux', 'quux'), ]
+ expected = [
+ ('teehee', 456),
+ ('lala', 123),
+ ('oauth_quux', 'quux'),
+ ('oauth_foo', 'foo'),
+ ('oauth_baz', 'baz'),
+ ('oauth_bar', 'bar'), ]
+ self.assertEqual(_append_params(unordered_1, unordered_2), expected)
+
+ def test_prepare_headers(self):
+ self.assertEqual(
+ prepare_headers(self.auth_only_params, {}),
+ {'Authorization': self.norealm_authorization_header})
+ self.assertEqual(
+ prepare_headers(self.auth_only_params, {}, realm=self.realm),
+ {'Authorization': self.withrealm_authorization_header})
+
+ def test_prepare_headers_ignore_data(self):
+ self.assertEqual(
+ prepare_headers(self.auth_and_data, {}),
+ {'Authorization': self.norealm_authorization_header})
+ self.assertEqual(
+ prepare_headers(self.auth_and_data, {}, realm=self.realm),
+ {'Authorization': self.withrealm_authorization_header})
+
+ def test_prepare_form_encoded_body(self):
+ existing_body = ''
+ form_encoded_body = 'data_param_foo=foo&data_param_1=1&oauth_consumer_key=9djdj82h48djs9d2&oauth_token=kkk9d7dh3k39sjv7&oauth_signature_method=HMAC-SHA1&oauth_timestamp=137131201&oauth_nonce=7d8f3e4a&oauth_signature=bYT5CMsGcbgUdFHObYMEfcx6bsw%3D'
+ self.assertEqual(
+ urlencode(prepare_form_encoded_body(self.auth_and_data, existing_body)),
+ form_encoded_body)
+
+ def test_prepare_request_uri_query(self):
+ url = 'http://notarealdomain.com/foo/bar/baz?some=args&go=here'
+ request_uri_query = 'http://notarealdomain.com/foo/bar/baz?some=args&go=here&data_param_foo=foo&data_param_1=1&oauth_consumer_key=9djdj82h48djs9d2&oauth_token=kkk9d7dh3k39sjv7&oauth_signature_method=HMAC-SHA1&oauth_timestamp=137131201&oauth_nonce=7d8f3e4a&oauth_signature=bYT5CMsGcbgUdFHObYMEfcx6bsw%3D'
+ self.assertEqual(
+ prepare_request_uri_query(self.auth_and_data, url),
+ request_uri_query)
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/test_request_validator.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_request_validator.py
new file mode 100644
index 0000000000..8d34415040
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_request_validator.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+from oauthlib.oauth1 import RequestValidator
+
+from tests.unittest import TestCase
+
+
+class RequestValidatorTests(TestCase):
+
+ def test_not_implemented(self):
+ v = RequestValidator()
+ self.assertRaises(NotImplementedError, v.get_client_secret, None, None)
+ self.assertRaises(NotImplementedError, v.get_request_token_secret,
+ None, None, None)
+ self.assertRaises(NotImplementedError, v.get_access_token_secret,
+ None, None, None)
+ self.assertRaises(NotImplementedError, lambda: v.dummy_client)
+ self.assertRaises(NotImplementedError, lambda: v.dummy_request_token)
+ self.assertRaises(NotImplementedError, lambda: v.dummy_access_token)
+ self.assertRaises(NotImplementedError, v.get_rsa_key, None, None)
+ self.assertRaises(NotImplementedError, v.get_default_realms, None, None)
+ self.assertRaises(NotImplementedError, v.get_realms, None, None)
+ self.assertRaises(NotImplementedError, v.get_redirect_uri, None, None)
+ self.assertRaises(NotImplementedError, v.validate_client_key, None, None)
+ self.assertRaises(NotImplementedError, v.validate_access_token,
+ None, None, None)
+ self.assertRaises(NotImplementedError, v.validate_request_token,
+ None, None, None)
+ self.assertRaises(NotImplementedError, v.verify_request_token,
+ None, None)
+ self.assertRaises(NotImplementedError, v.verify_realms,
+ None, None, None)
+ self.assertRaises(NotImplementedError, v.validate_timestamp_and_nonce,
+ None, None, None, None)
+ self.assertRaises(NotImplementedError, v.validate_redirect_uri,
+ None, None, None)
+ self.assertRaises(NotImplementedError, v.validate_realms,
+ None, None, None, None, None)
+ self.assertRaises(NotImplementedError, v.validate_requested_realms,
+ None, None, None)
+ self.assertRaises(NotImplementedError, v.validate_verifier,
+ None, None, None, None)
+ self.assertRaises(NotImplementedError, v.save_access_token, None, None)
+ self.assertRaises(NotImplementedError, v.save_request_token, None, None)
+ self.assertRaises(NotImplementedError, v.save_verifier,
+ None, None, None)
+
+ def test_check_length(self):
+ v = RequestValidator()
+
+ for method in (v.check_client_key, v.check_request_token,
+ v.check_access_token, v.check_nonce, v.check_verifier):
+ for not_valid in ('tooshort', 'invalid?characters!',
+ 'thisclientkeyisalittlebittoolong'):
+ self.assertFalse(method(not_valid))
+ for valid in ('itsjustaboutlongenough',):
+ self.assertTrue(method(valid))
+
+ def test_check_realms(self):
+ v = RequestValidator()
+ self.assertFalse(v.check_realms(['foo']))
+
+ class FooRealmValidator(RequestValidator):
+ @property
+ def realms(self):
+ return ['foo']
+
+ v = FooRealmValidator()
+ self.assertTrue(v.check_realms(['foo']))
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/test_signatures.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_signatures.py
new file mode 100644
index 0000000000..2d4735eafd
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_signatures.py
@@ -0,0 +1,896 @@
+# -*- coding: utf-8 -*-
+from oauthlib.oauth1.rfc5849.signature import (
+ base_string_uri, collect_parameters, normalize_parameters,
+ sign_hmac_sha1_with_client, sign_hmac_sha256_with_client,
+ sign_hmac_sha512_with_client, sign_plaintext_with_client,
+ sign_rsa_sha1_with_client, sign_rsa_sha256_with_client,
+ sign_rsa_sha512_with_client, signature_base_string, verify_hmac_sha1,
+ verify_hmac_sha256, verify_hmac_sha512, verify_plaintext, verify_rsa_sha1,
+ verify_rsa_sha256, verify_rsa_sha512,
+)
+
+from tests.unittest import TestCase
+
+# ################################################################
+
+class MockRequest:
+ """
+ Mock of a request used by the verify_* functions.
+ """
+
+ def __init__(self,
+ method: str,
+ uri_str: str,
+ params: list,
+ signature: str):
+ """
+ The params is a list of (name, value) tuples. It is not a dictionary,
+ because there can be multiple parameters with the same name.
+ """
+ self.uri = uri_str
+ self.http_method = method
+ self.params = params
+ self.signature = signature
+
+
+# ################################################################
+
+class MockClient:
+ """
+ Mock of client credentials used by the sign_*_with_client functions.
+
+ For HMAC, set the client_secret and resource_owner_secret.
+
+ For RSA, set the rsa_key to either a PEM formatted PKCS #1 public key or
+ PEM formatted PKCS #1 private key.
+ """
+ def __init__(self,
+ client_secret: str = None,
+ resource_owner_secret: str = None,
+ rsa_key: str = None):
+ self.client_secret = client_secret
+ self.resource_owner_secret = resource_owner_secret
+ self.rsa_key = rsa_key # used for private or public key: a poor design!
+
+
+# ################################################################
+
+class SignatureTests(TestCase):
+ """
+ Unit tests for the oauthlib/oauth1/rfc5849/signature.py module.
+
+ The tests in this class are organised into sections, to test the
+ functions relating to:
+
+ - Signature base string calculation
+ - HMAC-based signature methods
+ - RSA-based signature methods
+ - PLAINTEXT signature method
+
+ Each section is separated by a comment beginning with "====".
+
+ Those comments have been formatted to remain visible when the code is
+ collapsed using PyCharm's code folding feature. That is, those section
+ heading comments do not have any other comment lines around it, so they
+ don't get collapsed when the contents of the class is collapsed. While
+ there is a "Sequential comments" option in the code folding configuration,
+ by default they are folded.
+
+ They all use some/all of the example test vector, defined in the first
+ section below.
+ """
+
+ # ==== Example test vector =======================================
+
+ eg_signature_base_string =\
+ 'POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q' \
+ '%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_' \
+ 'key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m' \
+ 'ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk' \
+ '9d7dh3k39sjv7'
+
+ # The _signature base string_ above is copied from the end of
+ # RFC 5849 section 3.4.1.1.
+ #
+ # It corresponds to the three values below.
+ #
+ # The _normalized parameters_ below is copied from the end of
+ # RFC 5849 section 3.4.1.3.2.
+
+ eg_http_method = 'POST'
+
+ eg_base_string_uri = 'http://example.com/request'
+
+ eg_normalized_parameters =\
+ 'a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj' \
+ 'dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1' \
+ '&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7'
+
+ # The above _normalized parameters_ corresponds to the parameters below.
+ #
+ # The parameters below is copied from the table at the end of
+ # RFC 5849 section 3.4.1.3.1.
+
+ eg_params = [
+ ('b5', '=%3D'),
+ ('a3', 'a'),
+ ('c@', ''),
+ ('a2', 'r b'),
+ ('oauth_consumer_key', '9djdj82h48djs9d2'),
+ ('oauth_token', 'kkk9d7dh3k39sjv7'),
+ ('oauth_signature_method', 'HMAC-SHA1'),
+ ('oauth_timestamp', '137131201'),
+ ('oauth_nonce', '7d8f3e4a'),
+ ('c2', ''),
+ ('a3', '2 q'),
+ ]
+
+ # The above parameters correspond to parameters from the three values below.
+ #
+ # These come from RFC 5849 section 3.4.1.3.1.
+
+ eg_uri_query = 'b5=%3D%253D&a3=a&c%40=&a2=r%20b'
+
+ eg_body = 'c2&a3=2+q'
+
+ eg_authorization_header =\
+ 'OAuth realm="Example", oauth_consumer_key="9djdj82h48djs9d2",' \
+ ' oauth_token="kkk9d7dh3k39sjv7", oauth_signature_method="HMAC-SHA1",' \
+ ' oauth_timestamp="137131201", oauth_nonce="7d8f3e4a",' \
+ ' oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"'
+
+ # ==== Signature base string calculating function tests ==========
+
+ def test_signature_base_string(self):
+ """
+ Test the ``signature_base_string`` function.
+ """
+
+ # Example from RFC 5849
+
+ self.assertEqual(
+ self.eg_signature_base_string,
+ signature_base_string(
+ self.eg_http_method,
+ self.eg_base_string_uri,
+ self.eg_normalized_parameters))
+
+ # Test method is always uppercase in the signature base string
+
+ for test_method in ['POST', 'Post', 'pOST', 'poST', 'posT', 'post']:
+ self.assertEqual(
+ self.eg_signature_base_string,
+ signature_base_string(
+ test_method,
+ self.eg_base_string_uri,
+ self.eg_normalized_parameters))
+
+ def test_base_string_uri(self):
+ """
+ Test the ``base_string_uri`` function.
+ """
+
+ # ----------------
+ # Examples from the OAuth 1.0a specification: RFC 5849.
+
+ # First example from RFC 5849 section 3.4.1.2.
+ #
+ # GET /r%20v/X?id=123 HTTP/1.1
+ # Host: EXAMPLE.COM:80
+ #
+ # Note: there is a space between "r" and "v"
+
+ self.assertEqual(
+ 'http://example.com/r%20v/X',
+ base_string_uri('http://EXAMPLE.COM:80/r v/X?id=123'))
+
+ # Second example from RFC 5849 section 3.4.1.2.
+ #
+ # GET /?q=1 HTTP/1.1
+ # Host: www.example.net:8080
+
+ self.assertEqual(
+ 'https://www.example.net:8080/',
+ base_string_uri('https://www.example.net:8080/?q=1'))
+
+ # ----------------
+ # Scheme: will always be in lowercase
+
+ for uri in [
+ 'foobar://www.example.com',
+ 'FOOBAR://www.example.com',
+ 'Foobar://www.example.com',
+ 'FooBar://www.example.com',
+ 'fOObAR://www.example.com',
+ ]:
+ self.assertEqual('foobar://www.example.com/', base_string_uri(uri))
+
+ # ----------------
+ # Host: will always be in lowercase
+
+ for uri in [
+ 'http://www.example.com',
+ 'http://WWW.EXAMPLE.COM',
+ 'http://www.EXAMPLE.com',
+ 'http://wWW.eXAMPLE.cOM',
+ ]:
+ self.assertEqual('http://www.example.com/', base_string_uri(uri))
+
+ # base_string_uri has an optional host parameter that can be used to
+ # override the URI's netloc (or used as the host if there is no netloc)
+ # The "netloc" refers to the "hostname[:port]" part of the URI.
+
+ self.assertEqual(
+ 'http://actual.example.com/',
+ base_string_uri('http://IGNORE.example.com', 'ACTUAL.example.com'))
+
+ self.assertEqual(
+ 'http://override.example.com/path',
+ base_string_uri('http:///path', 'OVERRIDE.example.com'))
+
+ # ----------------
+ # Host: valid host allows for IPv4 and IPv6
+
+ self.assertEqual(
+ 'https://192.168.0.1/',
+ base_string_uri('https://192.168.0.1')
+ )
+ self.assertEqual(
+ 'https://192.168.0.1:13000/',
+ base_string_uri('https://192.168.0.1:13000')
+ )
+ self.assertEqual(
+ 'https://[123:db8:fd00:1000::5]:13000/',
+ base_string_uri('https://[123:db8:fd00:1000::5]:13000')
+ )
+ self.assertEqual(
+ 'https://[123:db8:fd00:1000::5]/',
+ base_string_uri('https://[123:db8:fd00:1000::5]')
+ )
+
+ # ----------------
+ # Port: default ports always excluded; non-default ports always included
+
+ self.assertEqual(
+ "http://www.example.com/",
+ base_string_uri("http://www.example.com:80/")) # default port
+
+ self.assertEqual(
+ "https://www.example.com/",
+ base_string_uri("https://www.example.com:443/")) # default port
+
+ self.assertEqual(
+ "https://www.example.com:999/",
+ base_string_uri("https://www.example.com:999/")) # non-default port
+
+ self.assertEqual(
+ "http://www.example.com:443/",
+ base_string_uri("HTTP://www.example.com:443/")) # non-default port
+
+ self.assertEqual(
+ "https://www.example.com:80/",
+ base_string_uri("HTTPS://www.example.com:80/")) # non-default port
+
+ self.assertEqual(
+ "http://www.example.com/",
+ base_string_uri("http://www.example.com:/")) # colon but no number
+
+ # ----------------
+ # Paths
+
+ self.assertEqual(
+ 'http://www.example.com/',
+ base_string_uri('http://www.example.com')) # no slash
+
+ self.assertEqual(
+ 'http://www.example.com/',
+ base_string_uri('http://www.example.com/')) # with slash
+
+ self.assertEqual(
+ 'http://www.example.com:8080/',
+ base_string_uri('http://www.example.com:8080')) # no slash
+
+ self.assertEqual(
+ 'http://www.example.com:8080/',
+ base_string_uri('http://www.example.com:8080/')) # with slash
+
+ self.assertEqual(
+ 'http://www.example.com/foo/bar',
+ base_string_uri('http://www.example.com/foo/bar')) # no slash
+ self.assertEqual(
+ 'http://www.example.com/foo/bar/',
+ base_string_uri('http://www.example.com/foo/bar/')) # with slash
+
+ # ----------------
+ # Query parameters & fragment IDs do not appear in the base string URI
+
+ self.assertEqual(
+ 'https://www.example.com/path',
+ base_string_uri('https://www.example.com/path?foo=bar'))
+
+ self.assertEqual(
+ 'https://www.example.com/path',
+ base_string_uri('https://www.example.com/path#fragment'))
+
+ # ----------------
+ # Percent encoding
+ #
+ # RFC 5849 does not specify what characters are percent encoded, but in
+ # one of its examples it shows spaces being percent encoded.
+ # So it is assumed that spaces must be encoded, but we don't know what
+ # other characters are encoded or not.
+
+ self.assertEqual(
+ 'https://www.example.com/hello%20world',
+ base_string_uri('https://www.example.com/hello world'))
+
+ self.assertEqual(
+ 'https://www.hello%20world.com/',
+ base_string_uri('https://www.hello world.com/'))
+
+ # ----------------
+ # Errors detected
+
+ # base_string_uri expects a string
+ self.assertRaises(ValueError, base_string_uri, None)
+ self.assertRaises(ValueError, base_string_uri, 42)
+ self.assertRaises(ValueError, base_string_uri, b'http://example.com')
+
+ # Missing scheme is an error
+ self.assertRaises(ValueError, base_string_uri, '')
+ self.assertRaises(ValueError, base_string_uri, ' ') # single space
+ self.assertRaises(ValueError, base_string_uri, 'http')
+ self.assertRaises(ValueError, base_string_uri, 'example.com')
+
+ # Missing host is an error
+ self.assertRaises(ValueError, base_string_uri, 'http:')
+ self.assertRaises(ValueError, base_string_uri, 'http://')
+ self.assertRaises(ValueError, base_string_uri, 'http://:8080')
+
+ # Port is not a valid TCP/IP port number
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com:0')
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com:-1')
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com:65536')
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com:3.14')
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com:BAD')
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com:NaN')
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com: ')
+ self.assertRaises(ValueError, base_string_uri, 'http://eg.com:42:42')
+
+ def test_collect_parameters(self):
+ """
+ Test the ``collect_parameters`` function.
+ """
+
+ # ----------------
+ # Examples from the OAuth 1.0a specification: RFC 5849.
+
+ params = collect_parameters(
+ self.eg_uri_query,
+ self.eg_body,
+ {'Authorization': self.eg_authorization_header})
+
+ # Check params contains the same pairs as control_params, ignoring order
+ self.assertEqual(sorted(self.eg_params), sorted(params))
+
+ # ----------------
+ # Examples with no parameters
+
+ self.assertEqual([], collect_parameters('', '', {}))
+
+ self.assertEqual([], collect_parameters(None, None, None))
+
+ self.assertEqual([], collect_parameters())
+
+ self.assertEqual([], collect_parameters(headers={'foo': 'bar'}))
+
+ # ----------------
+ # Test effect of exclude_oauth_signature"
+
+ no_sig = collect_parameters(
+ headers={'authorization': self.eg_authorization_header})
+ with_sig = collect_parameters(
+ headers={'authorization': self.eg_authorization_header},
+ exclude_oauth_signature=False)
+
+ self.assertEqual(sorted(no_sig + [('oauth_signature',
+ 'djosJKDKJSD8743243/jdk33klY=')]),
+ sorted(with_sig))
+
+ # ----------------
+ # Test effect of "with_realm" as well as header name case insensitivity
+
+ no_realm = collect_parameters(
+ headers={'authorization': self.eg_authorization_header},
+ with_realm=False)
+ with_realm = collect_parameters(
+ headers={'AUTHORIZATION': self.eg_authorization_header},
+ with_realm=True)
+
+ self.assertEqual(sorted(no_realm + [('realm', 'Example')]),
+ sorted(with_realm))
+
+ def test_normalize_parameters(self):
+ """
+ Test the ``normalize_parameters`` function.
+ """
+
+ # headers = {'Authorization': self.authorization_header}
+ # parameters = collect_parameters(
+ # uri_query=self.uri_query, body=self.body, headers=headers)
+ # normalized = normalize_parameters(parameters)
+ #
+ # # Unicode everywhere and always
+ # self.assertIsInstance(normalized, str)
+ #
+ # # Lets see if things are in order
+ # # check to see that querystring keys come in alphanumeric order:
+ # querystring_keys = ['a2', 'a3', 'b5', 'oauth_consumer_key',
+ # 'oauth_nonce', 'oauth_signature_method',
+ # 'oauth_timestamp', 'oauth_token']
+ # index = -1 # start at -1 because the 'a2' key starts at index 0
+ # for key in querystring_keys:
+ # self.assertGreater(normalized.index(key), index)
+ # index = normalized.index(key)
+
+ # ----------------
+ # Example from the OAuth 1.0a specification: RFC 5849.
+ # Params from end of section 3.4.1.3.1. and the expected
+ # normalized parameters from the end of section 3.4.1.3.2.
+
+ self.assertEqual(self.eg_normalized_parameters,
+ normalize_parameters(self.eg_params))
+
+ # ==== HMAC-based signature method tests =========================
+
+ hmac_client = MockClient(
+ client_secret='ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ',
+ resource_owner_secret='just-a-string asdasd')
+
+ # The following expected signatures were calculated by putting the value of
+ # the eg_signature_base_string in a file ("base-str.txt") and running:
+ #
+ # echo -n `cat base-str.txt` | openssl dgst -hmac KEY -sha1 -binary| base64
+ #
+ # Where the KEY is the concatenation of the client_secret, an ampersand and
+ # the resource_owner_secret. But those values need to be encoded properly,
+ # so the spaces in the resource_owner_secret must be represented as '%20'.
+ #
+ # Note: the "echo -n" is needed to remove the last newline character, which
+ # most text editors will add.
+
+ expected_signature_hmac_sha1 = \
+ 'wsdNmjGB7lvis0UJuPAmjvX/PXw='
+
+ expected_signature_hmac_sha256 = \
+ 'wdfdHUKXHbOnOGZP8WFAWMSAmWzN3EVBWWgXGlC/Eo4='
+
+ expected_signature_hmac_sha512 = \
+ 'u/vlyZFDxOWOZ9UUXwRBJHvq8/T4jCA74ocRmn2ECnjUBTAeJiZIRU8hDTjS88Tz' \
+ '1fGONffMpdZxUkUTW3k1kg=='
+
+ def test_sign_hmac_sha1_with_client(self):
+ """
+ Test sign and verify with HMAC-SHA1.
+ """
+ self.assertEqual(
+ self.expected_signature_hmac_sha1,
+ sign_hmac_sha1_with_client(self.eg_signature_base_string,
+ self.hmac_client))
+ self.assertTrue(verify_hmac_sha1(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ self.expected_signature_hmac_sha1),
+ self.hmac_client.client_secret,
+ self.hmac_client.resource_owner_secret))
+
+ def test_sign_hmac_sha256_with_client(self):
+ """
+ Test sign and verify with HMAC-SHA256.
+ """
+ self.assertEqual(
+ self.expected_signature_hmac_sha256,
+ sign_hmac_sha256_with_client(self.eg_signature_base_string,
+ self.hmac_client))
+ self.assertTrue(verify_hmac_sha256(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ self.expected_signature_hmac_sha256),
+ self.hmac_client.client_secret,
+ self.hmac_client.resource_owner_secret))
+
+ def test_sign_hmac_sha512_with_client(self):
+ """
+ Test sign and verify with HMAC-SHA512.
+ """
+ self.assertEqual(
+ self.expected_signature_hmac_sha512,
+ sign_hmac_sha512_with_client(self.eg_signature_base_string,
+ self.hmac_client))
+ self.assertTrue(verify_hmac_sha512(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ self.expected_signature_hmac_sha512),
+ self.hmac_client.client_secret,
+ self.hmac_client.resource_owner_secret))
+
+ def test_hmac_false_positives(self):
+ """
+ Test verify_hmac-* functions will correctly detect invalid signatures.
+ """
+
+ _ros = self.hmac_client.resource_owner_secret
+
+ for functions in [
+ (sign_hmac_sha1_with_client, verify_hmac_sha1),
+ (sign_hmac_sha256_with_client, verify_hmac_sha256),
+ (sign_hmac_sha512_with_client, verify_hmac_sha512),
+ ]:
+ signing_function = functions[0]
+ verify_function = functions[1]
+
+ good_signature = \
+ signing_function(
+ self.eg_signature_base_string,
+ self.hmac_client)
+
+ bad_signature_on_different_value = \
+ signing_function(
+ 'not the signature base string',
+ self.hmac_client)
+
+ bad_signature_produced_by_different_client_secret = \
+ signing_function(
+ self.eg_signature_base_string,
+ MockClient(client_secret='wrong-secret',
+ resource_owner_secret=_ros))
+ bad_signature_produced_by_different_resource_owner_secret = \
+ signing_function(
+ self.eg_signature_base_string,
+ MockClient(client_secret=self.hmac_client.client_secret,
+ resource_owner_secret='wrong-secret'))
+
+ bad_signature_produced_with_no_resource_owner_secret = \
+ signing_function(
+ self.eg_signature_base_string,
+ MockClient(client_secret=self.hmac_client.client_secret))
+ bad_signature_produced_with_no_client_secret = \
+ signing_function(
+ self.eg_signature_base_string,
+ MockClient(resource_owner_secret=_ros))
+
+ self.assertTrue(verify_function(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ good_signature),
+ self.hmac_client.client_secret,
+ self.hmac_client.resource_owner_secret))
+
+ for bad_signature in [
+ '',
+ 'ZG9uJ3QgdHJ1c3QgbWUK', # random base64 encoded value
+ 'altérer', # value with a non-ASCII character in it
+ bad_signature_on_different_value,
+ bad_signature_produced_by_different_client_secret,
+ bad_signature_produced_by_different_resource_owner_secret,
+ bad_signature_produced_with_no_resource_owner_secret,
+ bad_signature_produced_with_no_client_secret,
+ ]:
+ self.assertFalse(verify_function(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ bad_signature),
+ self.hmac_client.client_secret,
+ self.hmac_client.resource_owner_secret))
+
+ # ==== RSA-based signature methods tests =========================
+
+ rsa_private_client = MockClient(rsa_key='''
+-----BEGIN RSA PRIVATE KEY-----
+MIICXgIBAAKBgQDk1/bxyS8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG
+AlwXWfzXwSMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVah
+5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjTMO7IdrwIDAQAB
+AoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9GxocdM1m30WyWRFMEz2nKJ8fR
+p3vTD4w8yplTOhcoXdQZl0kRoaDzrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC
+DY6xveQczE7qt7Vk7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i
+rf6qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVawFt3UMhe
+542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+XO/2xKp/d/ty1OIeovx
+C60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZLeMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT
+Suy30sKjLzqoGw1kR+wv7C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA
+kmaMg2PNrjUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzVS
+JzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lTLVduVgh4v5yLT
+Ga6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPvdMlxqXA==
+-----END RSA PRIVATE KEY-----
+''')
+
+ rsa_public_client = MockClient(rsa_key='''
+-----BEGIN RSA PUBLIC KEY-----
+MIGJAoGBAOTX9vHJLxDyOKF4d5hin/isQomil5BFEoqlkjizmL5Q/BWm6kYCXBdZ
+/NfBIxoo8Cgok127u2opGeKqy3m6gdfD3yCPTHvNqa7QKTUu1DhzukUxVqHkhgaE
+GLYT3Jw1Lfb1bbuck9Y0JsRJO7uydWUbxXyZ+8YaDfE2NMw7sh2vAgMBAAE=
+-----END RSA PUBLIC KEY-----
+''')
+
+ # The above private key was generated using:
+ # $ openssl genrsa -out example.pvt 1024
+ # $ chmod 600 example.pvt
+ # Public key was extract from it using:
+ # $ ssh-keygen -e -m pem -f example.pvt
+ # PEM encoding requires the key to be concatenated with linebreaks.
+
+ # The following expected signatures were calculated by putting the private
+ # key in a file (test.pvt) and the value of sig_base_str_rsa in another file
+ # ("base-str.txt") and running:
+ #
+ # echo -n `cat base-str.txt` | openssl dgst -sha1 -sign test.pvt| base64
+ #
+ # Note: the "echo -n" is needed to remove the last newline character, which
+ # most text editors will add.
+
+ expected_signature_rsa_sha1 = \
+ 'mFY2KOEnlYWsTvUA+5kxuBIcvBYXu+ljw9ttVJQxKduMueGSVPCB1tK1PlqVLK738' \
+ 'HK0t19ecBJfb6rMxUwrriw+MlBO+jpojkZIWccw1J4cAb4qu4M81DbpUAq4j/1w/Q' \
+ 'yTR4TWCODlEfN7Zfgy8+pf+TjiXfIwRC1jEWbuL1E='
+
+ expected_signature_rsa_sha256 = \
+ 'jqKl6m0WS69tiVJV8ZQ6aQEfJqISoZkiPBXRv6Al2+iFSaDpfeXjYm+Hbx6m1azR' \
+ 'drZ/35PM3cvuid3LwW/siAkzb0xQcGnTyAPH8YcGWzmnKGY7LsB7fkqThchNxvRK' \
+ '/N7s9M1WMnfZZ+1dQbbwtTs1TG1+iexUcV7r3M7Heec='
+
+ expected_signature_rsa_sha512 = \
+ 'jL1CnjlsNd25qoZVHZ2oJft47IRYTjpF5CvCUjL3LY0NTnbEeVhE4amWXUFBe9GL' \
+ 'DWdUh/79ZWNOrCirBFIP26cHLApjYdt4ZG7EVK0/GubS2v8wT1QPRsog8zyiMZkm' \
+ 'g4JXdWCGXG8YRvRJTg+QKhXuXwS6TcMNakrgzgFIVhA='
+
+ def test_sign_rsa_sha1_with_client(self):
+ """
+ Test sign and verify with RSA-SHA1.
+ """
+ self.assertEqual(
+ self.expected_signature_rsa_sha1,
+ sign_rsa_sha1_with_client(self.eg_signature_base_string,
+ self.rsa_private_client))
+ self.assertTrue(verify_rsa_sha1(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ self.expected_signature_rsa_sha1),
+ self.rsa_public_client.rsa_key))
+
+ def test_sign_rsa_sha256_with_client(self):
+ """
+ Test sign and verify with RSA-SHA256.
+ """
+ self.assertEqual(
+ self.expected_signature_rsa_sha256,
+ sign_rsa_sha256_with_client(self.eg_signature_base_string,
+ self.rsa_private_client))
+ self.assertTrue(verify_rsa_sha256(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ self.expected_signature_rsa_sha256),
+ self.rsa_public_client.rsa_key))
+
+ def test_sign_rsa_sha512_with_client(self):
+ """
+ Test sign and verify with RSA-SHA512.
+ """
+ self.assertEqual(
+ self.expected_signature_rsa_sha512,
+ sign_rsa_sha512_with_client(self.eg_signature_base_string,
+ self.rsa_private_client))
+ self.assertTrue(verify_rsa_sha512(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ self.expected_signature_rsa_sha512),
+ self.rsa_public_client.rsa_key))
+
+ def test_rsa_false_positives(self):
+ """
+ Test verify_rsa-* functions will correctly detect invalid signatures.
+ """
+
+ another_client = MockClient(rsa_key='''
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDZcD/1OZNJJ6Y3QZM16Z+O7fkD9kTIQuT2BfpAOUvDfxzYhVC9
+TNmSDHCQhr+ClutyolBk5jTE1/FXFUuHoPsTrkI7KQFXPP834D4gnSY9jrAiUJHe
+DVF6wXNuS7H4Ueh16YPjUxgLLRh/nn/JSEj98gsw+7DP01OWMfWS99S7eQIDAQAB
+AoGBALsQZRXVyK7BG7CiC8HwEcNnXDpaXmZjlpNKJTenk1THQMvONd4GBZAuf5D3
+PD9fE4R1u/ByVKecmBaxTV+L0TRQfD8K/nbQe0SKRQIkLI2ymLJKC/eyw5iTKT0E
++BS6wYpVd+mfcqgvpHOYpUmz9X8k/eOa7uslFmvt+sDb5ZcBAkEA+++SRqqUxFEG
+s/ZWAKw9p5YgkeVUOYVUwyAeZ97heySrjVzg1nZ6v6kv7iOPi9KOEpaIGPW7x1K/
+uQuSt4YEqQJBANzyNqZTTPpv7b/R8ABFy0YMwPVNt3b1GOU1Xxl6iuhH2WcHuueo
+UB13JHoZCMZ7hsEqieEz6uteUjdRzRPKclECQFNhVK4iop3emzNQYeJTHwyp+RmQ
+JrHq2MTDioyiDUouNsDQbnFMQQ/RtNVB265Q/0hTnbN1ELLFRkK9+87VghECQQC9
+hacLFPk6+TffCp3sHfI3rEj4Iin1iFhKhHWGzW7JwJfjoOXaQK44GDLZ6Q918g+t
+MmgDHR2tt8KeYTSgfU+BAkBcaVF91EQ7VXhvyABNYjeYP7lU7orOgdWMa/zbLXSU
+4vLsK1WOmwPY9zsXpPkilqszqcru4gzlG462cSbEdAW9
+-----END RSA PRIVATE KEY-----
+''')
+
+ for functions in [
+ (sign_rsa_sha1_with_client, verify_rsa_sha1),
+ (sign_rsa_sha256_with_client, verify_rsa_sha256),
+ (sign_rsa_sha512_with_client, verify_rsa_sha512),
+ ]:
+ signing_function = functions[0]
+ verify_function = functions[1]
+
+ good_signature = \
+ signing_function(self.eg_signature_base_string,
+ self.rsa_private_client)
+
+ bad_signature_on_different_value = \
+ signing_function('wrong value signed', self.rsa_private_client)
+
+ bad_signature_produced_by_different_private_key = \
+ signing_function(self.eg_signature_base_string, another_client)
+
+ self.assertTrue(verify_function(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ good_signature),
+ self.rsa_public_client.rsa_key))
+
+ for bad_signature in [
+ '',
+ 'ZG9uJ3QgdHJ1c3QgbWUK', # random base64 encoded value
+ 'altérer', # value with a non-ASCII character in it
+ bad_signature_on_different_value,
+ bad_signature_produced_by_different_private_key,
+ ]:
+ self.assertFalse(verify_function(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ bad_signature),
+ self.rsa_public_client.rsa_key))
+
+ def test_rsa_bad_keys(self):
+ """
+ Testing RSA sign and verify with bad key values produces errors.
+
+ This test is useful for coverage tests, since it runs the code branches
+ that deal with error situations.
+ """
+
+ # Signing needs a private key
+
+ for bad_value in [None, '', 'foobar']:
+ self.assertRaises(ValueError,
+ sign_rsa_sha1_with_client,
+ self.eg_signature_base_string,
+ MockClient(rsa_key=bad_value))
+
+ self.assertRaises(AttributeError,
+ sign_rsa_sha1_with_client,
+ self.eg_signature_base_string,
+ self.rsa_public_client) # public key doesn't sign
+
+ # Verify needs a public key
+
+ for bad_value in [None, '', 'foobar', self.rsa_private_client.rsa_key]:
+ self.assertRaises(TypeError,
+ verify_rsa_sha1,
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ self.expected_signature_rsa_sha1),
+ MockClient(rsa_key=bad_value))
+
+ # For completeness, this text could repeat the above for RSA-SHA256 and
+ # RSA-SHA512 signing and verification functions.
+
+ def test_rsa_jwt_algorithm_cache(self):
+ # Tests cache of RSAAlgorithm objects is implemented correctly.
+
+ # This is difficult to test, since the cache is internal.
+ #
+ # Running this test with coverage will show the cache-hit branch of code
+ # being executed by two signing operations with the same hash algorithm.
+
+ self.test_sign_rsa_sha1_with_client() # creates cache entry
+ self.test_sign_rsa_sha1_with_client() # reuses cache entry
+
+ # Some possible bugs will be detected if multiple signing operations
+ # with different hash algorithms produce the wrong results (e.g. if the
+ # cache incorrectly returned the previously used algorithm, instead
+ # of the one that is needed).
+
+ self.test_sign_rsa_sha256_with_client()
+ self.test_sign_rsa_sha256_with_client()
+ self.test_sign_rsa_sha1_with_client()
+ self.test_sign_rsa_sha256_with_client()
+ self.test_sign_rsa_sha512_with_client()
+
+ # ==== PLAINTEXT signature method tests ==========================
+
+ plaintext_client = hmac_client # for convenience, use the same HMAC secrets
+
+ expected_signature_plaintext = (
+ 'ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ'
+ '&'
+ 'just-a-string%20%20%20%20asdasd')
+
+ def test_sign_plaintext_with_client(self):
+ # With PLAINTEXT, the "signature" is always the same: regardless of the
+ # contents of the request. It is the concatenation of the encoded
+ # client_secret, an ampersand, and the encoded resource_owner_secret.
+ #
+ # That is why the spaces in the resource owner secret are "%20".
+
+ self.assertEqual(self.expected_signature_plaintext,
+ sign_plaintext_with_client(None, # request is ignored
+ self.plaintext_client))
+ self.assertTrue(verify_plaintext(
+ MockRequest('PUT',
+ 'http://example.com/some-other-path',
+ [('description', 'request is ignored in PLAINTEXT')],
+ self.expected_signature_plaintext),
+ self.plaintext_client.client_secret,
+ self.plaintext_client.resource_owner_secret))
+
+ def test_plaintext_false_positives(self):
+ """
+ Test verify_plaintext function will correctly detect invalid signatures.
+ """
+
+ _ros = self.plaintext_client.resource_owner_secret
+
+ good_signature = \
+ sign_plaintext_with_client(
+ self.eg_signature_base_string,
+ self.plaintext_client)
+
+ bad_signature_produced_by_different_client_secret = \
+ sign_plaintext_with_client(
+ self.eg_signature_base_string,
+ MockClient(client_secret='wrong-secret',
+ resource_owner_secret=_ros))
+ bad_signature_produced_by_different_resource_owner_secret = \
+ sign_plaintext_with_client(
+ self.eg_signature_base_string,
+ MockClient(client_secret=self.plaintext_client.client_secret,
+ resource_owner_secret='wrong-secret'))
+
+ bad_signature_produced_with_no_resource_owner_secret = \
+ sign_plaintext_with_client(
+ self.eg_signature_base_string,
+ MockClient(client_secret=self.plaintext_client.client_secret))
+ bad_signature_produced_with_no_client_secret = \
+ sign_plaintext_with_client(
+ self.eg_signature_base_string,
+ MockClient(resource_owner_secret=_ros))
+
+ self.assertTrue(verify_plaintext(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ good_signature),
+ self.plaintext_client.client_secret,
+ self.plaintext_client.resource_owner_secret))
+
+ for bad_signature in [
+ '',
+ 'ZG9uJ3QgdHJ1c3QgbWUK', # random base64 encoded value
+ 'altérer', # value with a non-ASCII character in it
+ bad_signature_produced_by_different_client_secret,
+ bad_signature_produced_by_different_resource_owner_secret,
+ bad_signature_produced_with_no_resource_owner_secret,
+ bad_signature_produced_with_no_client_secret,
+ ]:
+ self.assertFalse(verify_plaintext(
+ MockRequest('POST',
+ 'http://example.com/request',
+ self.eg_params,
+ bad_signature),
+ self.plaintext_client.client_secret,
+ self.plaintext_client.resource_owner_secret))
diff --git a/contrib/python/oauthlib/tests/oauth1/rfc5849/test_utils.py b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_utils.py
new file mode 100644
index 0000000000..013c71a910
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth1/rfc5849/test_utils.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+from oauthlib.oauth1.rfc5849.utils import *
+
+from tests.unittest import TestCase
+
+
+class UtilsTests(TestCase):
+
+ sample_params_list = [
+ ("notoauth", "shouldnotbehere"),
+ ("oauth_consumer_key", "9djdj82h48djs9d2"),
+ ("oauth_token", "kkk9d7dh3k39sjv7"),
+ ("notoautheither", "shouldnotbehere")
+ ]
+
+ sample_params_dict = {
+ "notoauth": "shouldnotbehere",
+ "oauth_consumer_key": "9djdj82h48djs9d2",
+ "oauth_token": "kkk9d7dh3k39sjv7",
+ "notoautheither": "shouldnotbehere"
+ }
+
+ sample_params_unicode_list = [
+ ("notoauth", "shouldnotbehere"),
+ ("oauth_consumer_key", "9djdj82h48djs9d2"),
+ ("oauth_token", "kkk9d7dh3k39sjv7"),
+ ("notoautheither", "shouldnotbehere")
+ ]
+
+ sample_params_unicode_dict = {
+ "notoauth": "shouldnotbehere",
+ "oauth_consumer_key": "9djdj82h48djs9d2",
+ "oauth_token": "kkk9d7dh3k39sjv7",
+ "notoautheither": "shouldnotbehere"
+ }
+
+ authorization_header = """OAuth realm="Example",
+ oauth_consumer_key="9djdj82h48djs9d2",
+ oauth_token="kkk9d7dh3k39sjv7",
+ oauth_signature_method="HMAC-SHA1",
+ oauth_timestamp="137131201",
+ oauth_nonce="7d8f3e4a",
+ oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D" """.strip()
+ bad_authorization_headers = (
+ "OAuth",
+ "OAuth oauth_nonce=",
+ "Negotiate b2F1dGhsaWI=",
+ "OA",
+ )
+
+ def test_filter_params(self):
+
+ # The following is an isolated test function used to test the filter_params decorator.
+ @filter_params
+ def special_test_function(params, realm=None):
+ """ I am a special test function """
+ return 'OAuth ' + ','.join(['='.join([k, v]) for k, v in params])
+
+ # check that the docstring got through
+ self.assertEqual(special_test_function.__doc__, " I am a special test function ")
+
+ # Check that the decorator filtering works as per design.
+ # Any param that does not start with 'oauth'
+ # should not be present in the filtered params
+ filtered_params = special_test_function(self.sample_params_list)
+ self.assertNotIn("notoauth", filtered_params)
+ self.assertIn("oauth_consumer_key", filtered_params)
+ self.assertIn("oauth_token", filtered_params)
+ self.assertNotIn("notoautheither", filtered_params)
+
+ def test_filter_oauth_params(self):
+
+ # try with list
+ # try with list
+ # try with list
+ self.assertEqual(len(self.sample_params_list), 4)
+
+ # Any param that does not start with 'oauth'
+ # should not be present in the filtered params
+ filtered_params = filter_oauth_params(self.sample_params_list)
+ self.assertEqual(len(filtered_params), 2)
+
+ self.assertTrue(filtered_params[0][0].startswith('oauth'))
+ self.assertTrue(filtered_params[1][0].startswith('oauth'))
+
+ # try with dict
+ # try with dict
+ # try with dict
+ self.assertEqual(len(self.sample_params_dict), 4)
+
+ # Any param that does not start with 'oauth'
+ # should not be present in the filtered params
+ filtered_params = filter_oauth_params(self.sample_params_dict)
+ self.assertEqual(len(filtered_params), 2)
+
+ self.assertTrue(filtered_params[0][0].startswith('oauth'))
+ self.assertTrue(filtered_params[1][0].startswith('oauth'))
+
+ def test_escape(self):
+ self.assertRaises(ValueError, escape, b"I am a string type. Not a unicode type.")
+ self.assertEqual(escape("I am a unicode type."), "I%20am%20a%20unicode%20type.")
+ self.assertIsInstance(escape("I am a unicode type."), str)
+
+ def test_unescape(self):
+ self.assertRaises(ValueError, unescape, b"I am a string type. Not a unicode type.")
+ self.assertEqual(unescape("I%20am%20a%20unicode%20type."), 'I am a unicode type.')
+ self.assertIsInstance(unescape("I%20am%20a%20unicode%20type."), str)
+
+ def test_parse_authorization_header(self):
+ # make us some headers
+ authorization_headers = parse_authorization_header(self.authorization_header)
+
+ # is it a list?
+ self.assertIsInstance(authorization_headers, list)
+
+ # are the internal items tuples?
+ for header in authorization_headers:
+ self.assertIsInstance(header, tuple)
+
+ # are the internal components of each tuple unicode?
+ for k, v in authorization_headers:
+ self.assertIsInstance(k, str)
+ self.assertIsInstance(v, str)
+
+ # let's check the parsed headers created
+ correct_headers = [
+ ("oauth_nonce", "7d8f3e4a"),
+ ("oauth_timestamp", "137131201"),
+ ("oauth_consumer_key", "9djdj82h48djs9d2"),
+ ('oauth_signature', 'djosJKDKJSD8743243%2Fjdk33klY%3D'),
+ ('oauth_signature_method', 'HMAC-SHA1'),
+ ('oauth_token', 'kkk9d7dh3k39sjv7'),
+ ('realm', 'Example')]
+ self.assertEqual(sorted(authorization_headers), sorted(correct_headers))
+
+ # Check against malformed headers.
+ for header in self.bad_authorization_headers:
+ self.assertRaises(ValueError, parse_authorization_header, header)
diff --git a/contrib/python/oauthlib/tests/oauth2/__init__.py b/contrib/python/oauthlib/tests/oauth2/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/__init__.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/__init__.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_backend_application.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_backend_application.py
new file mode 100644
index 0000000000..c1489ac7c6
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_backend_application.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+import os
+from unittest.mock import patch
+
+from oauthlib import signals
+from oauthlib.oauth2 import BackendApplicationClient
+
+from tests.unittest import TestCase
+
+
+@patch('time.time', new=lambda: 1000)
+class BackendApplicationClientTest(TestCase):
+
+ client_id = "someclientid"
+ client_secret = 'someclientsecret'
+ scope = ["/profile"]
+ kwargs = {
+ "some": "providers",
+ "require": "extra arguments"
+ }
+
+ body = "not=empty"
+
+ body_up = "not=empty&grant_type=client_credentials"
+ body_kwargs = body_up + "&some=providers&require=extra+arguments"
+
+ token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type":"example",'
+ ' "expires_in":3600,'
+ ' "scope":"/profile",'
+ ' "example_parameter":"example_value"}')
+ token = {
+ "access_token": "2YotnFZFEjr1zCsicMWpAA",
+ "token_type": "example",
+ "expires_in": 3600,
+ "expires_at": 4600,
+ "scope": ["/profile"],
+ "example_parameter": "example_value"
+ }
+
+ def test_request_body(self):
+ client = BackendApplicationClient(self.client_id)
+
+ # Basic, no extra arguments
+ body = client.prepare_request_body(body=self.body)
+ self.assertFormBodyEqual(body, self.body_up)
+
+ rclient = BackendApplicationClient(self.client_id)
+ body = rclient.prepare_request_body(body=self.body)
+ self.assertFormBodyEqual(body, self.body_up)
+
+ # With extra parameters
+ body = client.prepare_request_body(body=self.body, **self.kwargs)
+ self.assertFormBodyEqual(body, self.body_kwargs)
+
+ def test_parse_token_response(self):
+ client = BackendApplicationClient(self.client_id)
+
+ # Parse code and state
+ response = client.parse_request_body_response(self.token_json, scope=self.scope)
+ self.assertEqual(response, self.token)
+ self.assertEqual(client.access_token, response.get("access_token"))
+ self.assertEqual(client.refresh_token, response.get("refresh_token"))
+ self.assertEqual(client.token_type, response.get("token_type"))
+
+ # Mismatching state
+ self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
+ os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '3'
+ token = client.parse_request_body_response(self.token_json, scope="invalid")
+ self.assertTrue(token.scope_changed)
+
+ scope_changes_recorded = []
+ def record_scope_change(sender, message, old, new):
+ scope_changes_recorded.append((message, old, new))
+
+ signals.scope_changed.connect(record_scope_change)
+ try:
+ client.parse_request_body_response(self.token_json, scope="invalid")
+ self.assertEqual(len(scope_changes_recorded), 1)
+ message, old, new = scope_changes_recorded[0]
+ self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
+ self.assertEqual(old, ['invalid'])
+ self.assertEqual(new, ['/profile'])
+ finally:
+ signals.scope_changed.disconnect(record_scope_change)
+ del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_base.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_base.py
new file mode 100644
index 0000000000..70a22834c3
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_base.py
@@ -0,0 +1,355 @@
+# -*- coding: utf-8 -*-
+import datetime
+
+from oauthlib import common
+from oauthlib.oauth2 import Client, InsecureTransportError, TokenExpiredError
+from oauthlib.oauth2.rfc6749 import utils
+from oauthlib.oauth2.rfc6749.clients import AUTH_HEADER, BODY, URI_QUERY
+
+from tests.unittest import TestCase
+
+
+class ClientTest(TestCase):
+
+ client_id = "someclientid"
+ uri = "https://example.com/path?query=world"
+ body = "not=empty"
+ headers = {}
+ access_token = "token"
+ mac_key = "secret"
+
+ bearer_query = uri + "&access_token=" + access_token
+ bearer_header = {
+ "Authorization": "Bearer " + access_token
+ }
+ bearer_body = body + "&access_token=" + access_token
+
+ mac_00_header = {
+ "Authorization": 'MAC id="' + access_token + '", nonce="0:abc123",' +
+ ' bodyhash="Yqyso8r3hR5Nm1ZFv+6AvNHrxjE=",' +
+ ' mac="0X6aACoBY0G6xgGZVJ1IeE8dF9k="'
+ }
+ mac_01_header = {
+ "Authorization": 'MAC id="' + access_token + '", ts="123456789",' +
+ ' nonce="abc123", mac="Xuk+9oqaaKyhitkgh1CD0xrI6+s="'
+ }
+
+ def test_add_bearer_token(self):
+ """Test a number of bearer token placements"""
+
+ # Invalid token type
+ client = Client(self.client_id, token_type="invalid")
+ self.assertRaises(ValueError, client.add_token, self.uri)
+
+ # Case-insensitive token type
+ client = Client(self.client_id, access_token=self.access_token, token_type="bEAreR")
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers)
+ self.assertURLEqual(uri, self.uri)
+ self.assertFormBodyEqual(body, self.body)
+ self.assertEqual(headers, self.bearer_header)
+
+ # Non-HTTPS
+ insecure_uri = 'http://example.com/path?query=world'
+ client = Client(self.client_id, access_token=self.access_token, token_type="Bearer")
+ self.assertRaises(InsecureTransportError, client.add_token, insecure_uri,
+ body=self.body,
+ headers=self.headers)
+
+ # Missing access token
+ client = Client(self.client_id)
+ self.assertRaises(ValueError, client.add_token, self.uri)
+
+ # Expired token
+ expired = 523549800
+ expired_token = {
+ 'expires_at': expired,
+ }
+ client = Client(self.client_id, token=expired_token, access_token=self.access_token, token_type="Bearer")
+ self.assertRaises(TokenExpiredError, client.add_token, self.uri,
+ body=self.body, headers=self.headers)
+
+ # The default token placement, bearer in auth header
+ client = Client(self.client_id, access_token=self.access_token)
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers)
+ self.assertURLEqual(uri, self.uri)
+ self.assertFormBodyEqual(body, self.body)
+ self.assertEqual(headers, self.bearer_header)
+
+ # Setting default placements of tokens
+ client = Client(self.client_id, access_token=self.access_token,
+ default_token_placement=AUTH_HEADER)
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers)
+ self.assertURLEqual(uri, self.uri)
+ self.assertFormBodyEqual(body, self.body)
+ self.assertEqual(headers, self.bearer_header)
+
+ client = Client(self.client_id, access_token=self.access_token,
+ default_token_placement=URI_QUERY)
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers)
+ self.assertURLEqual(uri, self.bearer_query)
+ self.assertFormBodyEqual(body, self.body)
+ self.assertEqual(headers, self.headers)
+
+ client = Client(self.client_id, access_token=self.access_token,
+ default_token_placement=BODY)
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers)
+ self.assertURLEqual(uri, self.uri)
+ self.assertFormBodyEqual(body, self.bearer_body)
+ self.assertEqual(headers, self.headers)
+
+ # Asking for specific placement in the add_token method
+ client = Client(self.client_id, access_token=self.access_token)
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers, token_placement=AUTH_HEADER)
+ self.assertURLEqual(uri, self.uri)
+ self.assertFormBodyEqual(body, self.body)
+ self.assertEqual(headers, self.bearer_header)
+
+ client = Client(self.client_id, access_token=self.access_token)
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers, token_placement=URI_QUERY)
+ self.assertURLEqual(uri, self.bearer_query)
+ self.assertFormBodyEqual(body, self.body)
+ self.assertEqual(headers, self.headers)
+
+ client = Client(self.client_id, access_token=self.access_token)
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers, token_placement=BODY)
+ self.assertURLEqual(uri, self.uri)
+ self.assertFormBodyEqual(body, self.bearer_body)
+ self.assertEqual(headers, self.headers)
+
+ # Invalid token placement
+ client = Client(self.client_id, access_token=self.access_token)
+ self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
+ headers=self.headers, token_placement="invalid")
+
+ client = Client(self.client_id, access_token=self.access_token,
+ default_token_placement="invalid")
+ self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
+ headers=self.headers)
+
+ def test_add_mac_token(self):
+ # Missing access token
+ client = Client(self.client_id, token_type="MAC")
+ self.assertRaises(ValueError, client.add_token, self.uri)
+
+ # Invalid hash algorithm
+ client = Client(self.client_id, token_type="MAC",
+ access_token=self.access_token, mac_key=self.mac_key,
+ mac_algorithm="hmac-sha-2")
+ self.assertRaises(ValueError, client.add_token, self.uri)
+
+ orig_generate_timestamp = common.generate_timestamp
+ orig_generate_nonce = common.generate_nonce
+ orig_generate_age = utils.generate_age
+ self.addCleanup(setattr, common, 'generage_timestamp', orig_generate_timestamp)
+ self.addCleanup(setattr, common, 'generage_nonce', orig_generate_nonce)
+ self.addCleanup(setattr, utils, 'generate_age', orig_generate_age)
+ common.generate_timestamp = lambda: '123456789'
+ common.generate_nonce = lambda: 'abc123'
+ utils.generate_age = lambda *args: 0
+
+ # Add the Authorization header (draft 00)
+ client = Client(self.client_id, token_type="MAC",
+ access_token=self.access_token, mac_key=self.mac_key,
+ mac_algorithm="hmac-sha-1")
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers, issue_time=datetime.datetime.now())
+ self.assertEqual(uri, self.uri)
+ self.assertEqual(body, self.body)
+ self.assertEqual(headers, self.mac_00_header)
+ # Non-HTTPS
+ insecure_uri = 'http://example.com/path?query=world'
+ self.assertRaises(InsecureTransportError, client.add_token, insecure_uri,
+ body=self.body,
+ headers=self.headers,
+ issue_time=datetime.datetime.now())
+ # Expired Token
+ expired = 523549800
+ expired_token = {
+ 'expires_at': expired,
+ }
+ client = Client(self.client_id, token=expired_token, token_type="MAC",
+ access_token=self.access_token, mac_key=self.mac_key,
+ mac_algorithm="hmac-sha-1")
+ self.assertRaises(TokenExpiredError, client.add_token, self.uri,
+ body=self.body,
+ headers=self.headers,
+ issue_time=datetime.datetime.now())
+
+ # Add the Authorization header (draft 01)
+ client = Client(self.client_id, token_type="MAC",
+ access_token=self.access_token, mac_key=self.mac_key,
+ mac_algorithm="hmac-sha-1")
+ uri, headers, body = client.add_token(self.uri, body=self.body,
+ headers=self.headers, draft=1)
+ self.assertEqual(uri, self.uri)
+ self.assertEqual(body, self.body)
+ self.assertEqual(headers, self.mac_01_header)
+ # Non-HTTPS
+ insecure_uri = 'http://example.com/path?query=world'
+ self.assertRaises(InsecureTransportError, client.add_token, insecure_uri,
+ body=self.body,
+ headers=self.headers,
+ draft=1)
+ # Expired Token
+ expired = 523549800
+ expired_token = {
+ 'expires_at': expired,
+ }
+ client = Client(self.client_id, token=expired_token, token_type="MAC",
+ access_token=self.access_token, mac_key=self.mac_key,
+ mac_algorithm="hmac-sha-1")
+ self.assertRaises(TokenExpiredError, client.add_token, self.uri,
+ body=self.body,
+ headers=self.headers,
+ draft=1)
+
+ def test_revocation_request(self):
+ client = Client(self.client_id)
+
+ url = 'https://example.com/revoke'
+ token = 'foobar'
+
+ # Valid request
+ u, h, b = client.prepare_token_revocation_request(url, token)
+ self.assertEqual(u, url)
+ self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(b, 'token=%s&token_type_hint=access_token' % token)
+
+ # Non-HTTPS revocation endpoint
+ self.assertRaises(InsecureTransportError,
+ client.prepare_token_revocation_request,
+ 'http://example.com/revoke', token)
+
+
+ u, h, b = client.prepare_token_revocation_request(
+ url, token, token_type_hint='refresh_token')
+ self.assertEqual(u, url)
+ self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(b, 'token=%s&token_type_hint=refresh_token' % token)
+
+ # JSONP
+ u, h, b = client.prepare_token_revocation_request(
+ url, token, callback='hello.world')
+ self.assertURLEqual(u, url + '?callback=hello.world&token=%s&token_type_hint=access_token' % token)
+ self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(b, '')
+
+ def test_prepare_authorization_request(self):
+ redirect_url = 'https://example.com/callback/'
+ scopes = 'read'
+ auth_url = 'https://example.com/authorize/'
+ state = 'fake_state'
+
+ client = Client(self.client_id, redirect_url=redirect_url, scope=scopes, state=state)
+
+ # Non-HTTPS
+ self.assertRaises(InsecureTransportError,
+ client.prepare_authorization_request, 'http://example.com/authorize/')
+
+ # NotImplementedError
+ self.assertRaises(NotImplementedError, client.prepare_authorization_request, auth_url)
+
+ def test_prepare_token_request(self):
+ redirect_url = 'https://example.com/callback/'
+ scopes = 'read'
+ token_url = 'https://example.com/token/'
+ state = 'fake_state'
+
+ client = Client(self.client_id, scope=scopes, state=state)
+
+ # Non-HTTPS
+ self.assertRaises(InsecureTransportError,
+ client.prepare_token_request, 'http://example.com/token/')
+
+ # NotImplementedError
+ self.assertRaises(NotImplementedError, client.prepare_token_request, token_url)
+
+ def test_prepare_refresh_token_request(self):
+ client = Client(self.client_id)
+
+ url = 'https://example.com/revoke'
+ token = 'foobar'
+ scope = 'extra_scope'
+
+ u, h, b = client.prepare_refresh_token_request(url, token)
+ self.assertEqual(u, url)
+ self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertFormBodyEqual(b, 'grant_type=refresh_token&refresh_token=%s' % token)
+
+ # Non-HTTPS revocation endpoint
+ self.assertRaises(InsecureTransportError,
+ client.prepare_refresh_token_request,
+ 'http://example.com/revoke', token)
+
+ # provide extra scope
+ u, h, b = client.prepare_refresh_token_request(url, token, scope=scope)
+ self.assertEqual(u, url)
+ self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertFormBodyEqual(b, 'grant_type=refresh_token&scope={}&refresh_token={}'.format(scope, token))
+
+ # provide scope while init
+ client = Client(self.client_id, scope=scope)
+ u, h, b = client.prepare_refresh_token_request(url, token, scope=scope)
+ self.assertEqual(u, url)
+ self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertFormBodyEqual(b, 'grant_type=refresh_token&scope={}&refresh_token={}'.format(scope, token))
+
+ def test_parse_token_response_invalid_expires_at(self):
+ token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type":"example",'
+ ' "expires_at":"2006-01-02T15:04:05Z",'
+ ' "scope":"/profile",'
+ ' "example_parameter":"example_value"}')
+ token = {
+ "access_token": "2YotnFZFEjr1zCsicMWpAA",
+ "token_type": "example",
+ "expires_at": "2006-01-02T15:04:05Z",
+ "scope": ["/profile"],
+ "example_parameter": "example_value"
+ }
+
+ client = Client(self.client_id)
+
+ # Parse code and state
+ response = client.parse_request_body_response(token_json, scope=["/profile"])
+ self.assertEqual(response, token)
+ self.assertEqual(None, client._expires_at)
+ self.assertEqual(client.access_token, response.get("access_token"))
+ self.assertEqual(client.refresh_token, response.get("refresh_token"))
+ self.assertEqual(client.token_type, response.get("token_type"))
+
+
+ def test_create_code_verifier_min_length(self):
+ client = Client(self.client_id)
+ length = 43
+ code_verifier = client.create_code_verifier(length=length)
+ self.assertEqual(client.code_verifier, code_verifier)
+
+ def test_create_code_verifier_max_length(self):
+ client = Client(self.client_id)
+ length = 128
+ code_verifier = client.create_code_verifier(length=length)
+ self.assertEqual(client.code_verifier, code_verifier)
+
+ def test_create_code_challenge_plain(self):
+ client = Client(self.client_id)
+ code_verifier = client.create_code_verifier(length=128)
+ code_challenge_plain = client.create_code_challenge(code_verifier=code_verifier)
+
+ # if no code_challenge_method specified, code_challenge = code_verifier
+ self.assertEqual(code_challenge_plain, client.code_verifier)
+ self.assertEqual(client.code_challenge_method, "plain")
+
+ def test_create_code_challenge_s256(self):
+ client = Client(self.client_id)
+ code_verifier = client.create_code_verifier(length=128)
+ code_challenge_s256 = client.create_code_challenge(code_verifier=code_verifier, code_challenge_method='S256')
+ self.assertEqual(code_challenge_s256, client.code_challenge)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_legacy_application.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_legacy_application.py
new file mode 100644
index 0000000000..b5a18194b7
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_legacy_application.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+import os
+import urllib.parse as urlparse
+from unittest.mock import patch
+
+from oauthlib import signals
+from oauthlib.oauth2 import LegacyApplicationClient
+
+from tests.unittest import TestCase
+
+
+@patch('time.time', new=lambda: 1000)
+class LegacyApplicationClientTest(TestCase):
+
+ client_id = "someclientid"
+ client_secret = 'someclientsecret'
+ scope = ["/profile"]
+ kwargs = {
+ "some": "providers",
+ "require": "extra arguments"
+ }
+
+ username = "user_username"
+ password = "user_password"
+ body = "not=empty"
+
+ body_up = "not=empty&grant_type=password&username={}&password={}".format(username, password)
+ body_kwargs = body_up + "&some=providers&require=extra+arguments"
+
+ token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type":"example",'
+ ' "expires_in":3600,'
+ ' "scope":"/profile",'
+ ' "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter":"example_value"}')
+ token = {
+ "access_token": "2YotnFZFEjr1zCsicMWpAA",
+ "token_type": "example",
+ "expires_in": 3600,
+ "expires_at": 4600,
+ "scope": scope,
+ "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
+ "example_parameter": "example_value"
+ }
+
+ def test_request_body(self):
+ client = LegacyApplicationClient(self.client_id)
+
+ # Basic, no extra arguments
+ body = client.prepare_request_body(self.username, self.password,
+ body=self.body)
+ self.assertFormBodyEqual(body, self.body_up)
+
+ # With extra parameters
+ body = client.prepare_request_body(self.username, self.password,
+ body=self.body, **self.kwargs)
+ self.assertFormBodyEqual(body, self.body_kwargs)
+
+ def test_parse_token_response(self):
+ client = LegacyApplicationClient(self.client_id)
+
+ # Parse code and state
+ response = client.parse_request_body_response(self.token_json, scope=self.scope)
+ self.assertEqual(response, self.token)
+ self.assertEqual(client.access_token, response.get("access_token"))
+ self.assertEqual(client.refresh_token, response.get("refresh_token"))
+ self.assertEqual(client.token_type, response.get("token_type"))
+
+ # Mismatching state
+ self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
+ os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '5'
+ token = client.parse_request_body_response(self.token_json, scope="invalid")
+ self.assertTrue(token.scope_changed)
+
+ scope_changes_recorded = []
+ def record_scope_change(sender, message, old, new):
+ scope_changes_recorded.append((message, old, new))
+
+ signals.scope_changed.connect(record_scope_change)
+ try:
+ client.parse_request_body_response(self.token_json, scope="invalid")
+ self.assertEqual(len(scope_changes_recorded), 1)
+ message, old, new = scope_changes_recorded[0]
+ self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
+ self.assertEqual(old, ['invalid'])
+ self.assertEqual(new, ['/profile'])
+ finally:
+ signals.scope_changed.disconnect(record_scope_change)
+ del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
+
+ def test_prepare_request_body(self):
+ """
+ see issue #585
+ https://github.com/oauthlib/oauthlib/issues/585
+ """
+ client = LegacyApplicationClient(self.client_id)
+
+ # scenario 1, default behavior to not include `client_id`
+ r1 = client.prepare_request_body(username=self.username, password=self.password)
+ self.assertIn(r1, ('grant_type=password&username={}&password={}'.format(self.username, self.password),
+ 'grant_type=password&password={}&username={}'.format(self.password, self.username),
+ ))
+
+ # scenario 2, include `client_id` in the body
+ r2 = client.prepare_request_body(username=self.username, password=self.password, include_client_id=True)
+ r2_params = dict(urlparse.parse_qsl(r2, keep_blank_values=True))
+ self.assertEqual(len(r2_params.keys()), 4)
+ self.assertEqual(r2_params['grant_type'], 'password')
+ self.assertEqual(r2_params['username'], self.username)
+ self.assertEqual(r2_params['password'], self.password)
+ self.assertEqual(r2_params['client_id'], self.client_id)
+
+ # scenario 3, include `client_id` + `client_secret` in the body
+ r3 = client.prepare_request_body(username=self.username, password=self.password, include_client_id=True, client_secret=self.client_secret)
+ r3_params = dict(urlparse.parse_qsl(r3, keep_blank_values=True))
+ self.assertEqual(len(r3_params.keys()), 5)
+ self.assertEqual(r3_params['grant_type'], 'password')
+ self.assertEqual(r3_params['username'], self.username)
+ self.assertEqual(r3_params['password'], self.password)
+ self.assertEqual(r3_params['client_id'], self.client_id)
+ self.assertEqual(r3_params['client_secret'], self.client_secret)
+
+ # scenario 4, `client_secret` is an empty string
+ r4 = client.prepare_request_body(username=self.username, password=self.password, include_client_id=True, client_secret='')
+ r4_params = dict(urlparse.parse_qsl(r4, keep_blank_values=True))
+ self.assertEqual(len(r4_params.keys()), 5)
+ self.assertEqual(r4_params['grant_type'], 'password')
+ self.assertEqual(r4_params['username'], self.username)
+ self.assertEqual(r4_params['password'], self.password)
+ self.assertEqual(r4_params['client_id'], self.client_id)
+ self.assertEqual(r4_params['client_secret'], '')
+
+ # scenario 4b`,` client_secret is `None`
+ r4b = client.prepare_request_body(username=self.username, password=self.password, include_client_id=True, client_secret=None)
+ r4b_params = dict(urlparse.parse_qsl(r4b, keep_blank_values=True))
+ self.assertEqual(len(r4b_params.keys()), 4)
+ self.assertEqual(r4b_params['grant_type'], 'password')
+ self.assertEqual(r4b_params['username'], self.username)
+ self.assertEqual(r4b_params['password'], self.password)
+ self.assertEqual(r4b_params['client_id'], self.client_id)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_mobile_application.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_mobile_application.py
new file mode 100644
index 0000000000..c40950c978
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_mobile_application.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+import os
+from unittest.mock import patch
+
+from oauthlib import signals
+from oauthlib.oauth2 import MobileApplicationClient
+
+from tests.unittest import TestCase
+
+
+@patch('time.time', new=lambda: 1000)
+class MobileApplicationClientTest(TestCase):
+
+ client_id = "someclientid"
+ uri = "https://example.com/path?query=world"
+ uri_id = uri + "&response_type=token&client_id=" + client_id
+ uri_redirect = uri_id + "&redirect_uri=http%3A%2F%2Fmy.page.com%2Fcallback"
+ redirect_uri = "http://my.page.com/callback"
+ scope = ["/profile"]
+ state = "xyz"
+ uri_scope = uri_id + "&scope=%2Fprofile"
+ uri_state = uri_id + "&state=" + state
+ kwargs = {
+ "some": "providers",
+ "require": "extra arguments"
+ }
+ uri_kwargs = uri_id + "&some=providers&require=extra+arguments"
+
+ code = "zzzzaaaa"
+
+ response_uri = ('https://client.example.com/cb?#'
+ 'access_token=2YotnFZFEjr1zCsicMWpAA&'
+ 'token_type=example&'
+ 'expires_in=3600&'
+ 'scope=%2Fprofile&'
+ 'example_parameter=example_value')
+ token = {
+ "access_token": "2YotnFZFEjr1zCsicMWpAA",
+ "token_type": "example",
+ "expires_in": 3600,
+ "expires_at": 4600,
+ "scope": scope,
+ "example_parameter": "example_value"
+ }
+
+ def test_implicit_token_uri(self):
+ client = MobileApplicationClient(self.client_id)
+
+ # Basic, no extra arguments
+ uri = client.prepare_request_uri(self.uri)
+ self.assertURLEqual(uri, self.uri_id)
+
+ # With redirection uri
+ uri = client.prepare_request_uri(self.uri, redirect_uri=self.redirect_uri)
+ self.assertURLEqual(uri, self.uri_redirect)
+
+ # With scope
+ uri = client.prepare_request_uri(self.uri, scope=self.scope)
+ self.assertURLEqual(uri, self.uri_scope)
+
+ # With state
+ uri = client.prepare_request_uri(self.uri, state=self.state)
+ self.assertURLEqual(uri, self.uri_state)
+
+ # With extra parameters through kwargs
+ uri = client.prepare_request_uri(self.uri, **self.kwargs)
+ self.assertURLEqual(uri, self.uri_kwargs)
+
+ def test_populate_attributes(self):
+
+ client = MobileApplicationClient(self.client_id)
+
+ response_uri = (self.response_uri + "&code=EVIL-CODE")
+
+ client.parse_request_uri_response(response_uri, scope=self.scope)
+
+ # We must not accidentally pick up any further security
+ # credentials at this point.
+ self.assertIsNone(client.code)
+
+ def test_parse_token_response(self):
+ client = MobileApplicationClient(self.client_id)
+
+ # Parse code and state
+ response = client.parse_request_uri_response(self.response_uri, scope=self.scope)
+ self.assertEqual(response, self.token)
+ self.assertEqual(client.access_token, response.get("access_token"))
+ self.assertEqual(client.refresh_token, response.get("refresh_token"))
+ self.assertEqual(client.token_type, response.get("token_type"))
+
+ # Mismatching scope
+ self.assertRaises(Warning, client.parse_request_uri_response, self.response_uri, scope="invalid")
+ os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '4'
+ token = client.parse_request_uri_response(self.response_uri, scope='invalid')
+ self.assertTrue(token.scope_changed)
+
+ scope_changes_recorded = []
+ def record_scope_change(sender, message, old, new):
+ scope_changes_recorded.append((message, old, new))
+
+ signals.scope_changed.connect(record_scope_change)
+ try:
+ client.parse_request_uri_response(self.response_uri, scope="invalid")
+ self.assertEqual(len(scope_changes_recorded), 1)
+ message, old, new = scope_changes_recorded[0]
+ self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
+ self.assertEqual(old, ['invalid'])
+ self.assertEqual(new, ['/profile'])
+ finally:
+ signals.scope_changed.disconnect(record_scope_change)
+ del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_service_application.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_service_application.py
new file mode 100644
index 0000000000..b97d8554ed
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_service_application.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+import os
+from time import time
+from unittest.mock import patch
+
+import jwt
+
+from oauthlib.common import Request
+from oauthlib.oauth2 import ServiceApplicationClient
+
+from tests.unittest import TestCase
+
+
+class ServiceApplicationClientTest(TestCase):
+
+ gt = ServiceApplicationClient.grant_type
+
+ private_key = """
+-----BEGIN RSA PRIVATE KEY-----
+MIICXgIBAAKBgQDk1/bxyS8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG
+AlwXWfzXwSMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVah
+5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjTMO7IdrwIDAQAB
+AoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9GxocdM1m30WyWRFMEz2nKJ8fR
+p3vTD4w8yplTOhcoXdQZl0kRoaDzrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC
+DY6xveQczE7qt7Vk7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i
+rf6qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVawFt3UMhe
+542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+XO/2xKp/d/ty1OIeovx
+C60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZLeMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT
+Suy30sKjLzqoGw1kR+wv7C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA
+kmaMg2PNrjUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzVS
+JzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lTLVduVgh4v5yLT
+Ga6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPvdMlxqXA==
+-----END RSA PRIVATE KEY-----
+"""
+
+ public_key = """
+-----BEGIN PUBLIC KEY-----
+MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDk1/bxyS8Q8jiheHeYYp/4rEKJ
+opeQRRKKpZI4s5i+UPwVpupGAlwXWfzXwSMaKPAoKJNdu7tqKRniqst5uoHXw98g
+j0x7zamu0Ck1LtQ4c7pFMVah5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8
+mfvGGg3xNjTMO7IdrwIDAQAB
+-----END PUBLIC KEY-----
+"""
+
+ subject = 'resource-owner@provider.com'
+
+ issuer = 'the-client@provider.com'
+
+ audience = 'https://provider.com/token'
+
+ client_id = "someclientid"
+ scope = ["/profile"]
+ kwargs = {
+ "some": "providers",
+ "require": "extra arguments"
+ }
+
+ body = "isnot=empty"
+
+ body_up = "not=empty&grant_type=%s" % gt
+ body_kwargs = body_up + "&some=providers&require=extra+arguments"
+
+ token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type":"example",'
+ ' "expires_in":3600,'
+ ' "scope":"/profile",'
+ ' "example_parameter":"example_value"}')
+ token = {
+ "access_token": "2YotnFZFEjr1zCsicMWpAA",
+ "token_type": "example",
+ "expires_in": 3600,
+ "scope": ["/profile"],
+ "example_parameter": "example_value"
+ }
+
+ @patch('time.time')
+ def test_request_body(self, t):
+ t.return_value = time()
+ self.token['expires_at'] = self.token['expires_in'] + t.return_value
+
+ client = ServiceApplicationClient(
+ self.client_id, private_key=self.private_key)
+
+ # Basic with min required params
+ body = client.prepare_request_body(issuer=self.issuer,
+ subject=self.subject,
+ audience=self.audience,
+ body=self.body)
+ r = Request('https://a.b', body=body)
+ self.assertEqual(r.isnot, 'empty')
+ self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type)
+
+ claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256'])
+
+ self.assertEqual(claim['iss'], self.issuer)
+ # audience verification is handled during decode now
+ self.assertEqual(claim['sub'], self.subject)
+ self.assertEqual(claim['iat'], int(t.return_value))
+ self.assertNotIn('nbf', claim)
+ self.assertNotIn('jti', claim)
+
+ # Missing issuer parameter
+ self.assertRaises(ValueError, client.prepare_request_body,
+ issuer=None, subject=self.subject, audience=self.audience, body=self.body)
+
+ # Missing subject parameter
+ self.assertRaises(ValueError, client.prepare_request_body,
+ issuer=self.issuer, subject=None, audience=self.audience, body=self.body)
+
+ # Missing audience parameter
+ self.assertRaises(ValueError, client.prepare_request_body,
+ issuer=self.issuer, subject=self.subject, audience=None, body=self.body)
+
+ # Optional kwargs
+ not_before = time() - 3600
+ jwt_id = '8zd15df4s35f43sd'
+ body = client.prepare_request_body(issuer=self.issuer,
+ subject=self.subject,
+ audience=self.audience,
+ body=self.body,
+ not_before=not_before,
+ jwt_id=jwt_id)
+
+ r = Request('https://a.b', body=body)
+ self.assertEqual(r.isnot, 'empty')
+ self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type)
+
+ claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256'])
+
+ self.assertEqual(claim['iss'], self.issuer)
+ # audience verification is handled during decode now
+ self.assertEqual(claim['sub'], self.subject)
+ self.assertEqual(claim['iat'], int(t.return_value))
+ self.assertEqual(claim['nbf'], not_before)
+ self.assertEqual(claim['jti'], jwt_id)
+
+ @patch('time.time')
+ def test_request_body_no_initial_private_key(self, t):
+ t.return_value = time()
+ self.token['expires_at'] = self.token['expires_in'] + t.return_value
+
+ client = ServiceApplicationClient(
+ self.client_id, private_key=None)
+
+ # Basic with private key provided
+ body = client.prepare_request_body(issuer=self.issuer,
+ subject=self.subject,
+ audience=self.audience,
+ body=self.body,
+ private_key=self.private_key)
+ r = Request('https://a.b', body=body)
+ self.assertEqual(r.isnot, 'empty')
+ self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type)
+
+ claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256'])
+
+ self.assertEqual(claim['iss'], self.issuer)
+ # audience verification is handled during decode now
+ self.assertEqual(claim['sub'], self.subject)
+ self.assertEqual(claim['iat'], int(t.return_value))
+
+ # No private key provided
+ self.assertRaises(ValueError, client.prepare_request_body,
+ issuer=self.issuer, subject=self.subject, audience=self.audience, body=self.body)
+
+ @patch('time.time')
+ def test_parse_token_response(self, t):
+ t.return_value = time()
+ self.token['expires_at'] = self.token['expires_in'] + t.return_value
+
+ client = ServiceApplicationClient(self.client_id)
+
+ # Parse code and state
+ response = client.parse_request_body_response(self.token_json, scope=self.scope)
+ self.assertEqual(response, self.token)
+ self.assertEqual(client.access_token, response.get("access_token"))
+ self.assertEqual(client.refresh_token, response.get("refresh_token"))
+ self.assertEqual(client.token_type, response.get("token_type"))
+
+ # Mismatching state
+ self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
+ os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '2'
+ token = client.parse_request_body_response(self.token_json, scope="invalid")
+ self.assertTrue(token.scope_changed)
+ del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_web_application.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_web_application.py
new file mode 100644
index 0000000000..7a71121512
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/clients/test_web_application.py
@@ -0,0 +1,269 @@
+# -*- coding: utf-8 -*-
+import os
+import urllib.parse as urlparse
+import warnings
+from unittest.mock import patch
+
+from oauthlib import common, signals
+from oauthlib.oauth2 import (
+ BackendApplicationClient, Client, LegacyApplicationClient,
+ MobileApplicationClient, WebApplicationClient,
+)
+from oauthlib.oauth2.rfc6749 import errors, utils
+from oauthlib.oauth2.rfc6749.clients import AUTH_HEADER, BODY, URI_QUERY
+
+from tests.unittest import TestCase
+
+
+@patch('time.time', new=lambda: 1000)
+class WebApplicationClientTest(TestCase):
+
+ client_id = "someclientid"
+ client_secret = 'someclientsecret'
+ uri = "https://example.com/path?query=world"
+ uri_id = uri + "&response_type=code&client_id=" + client_id
+ uri_redirect = uri_id + "&redirect_uri=http%3A%2F%2Fmy.page.com%2Fcallback"
+ redirect_uri = "http://my.page.com/callback"
+ code_verifier = "code_verifier"
+ scope = ["/profile"]
+ state = "xyz"
+ code_challenge = "code_challenge"
+ code_challenge_method = "S256"
+ uri_scope = uri_id + "&scope=%2Fprofile"
+ uri_state = uri_id + "&state=" + state
+ uri_code_challenge = uri_id + "&code_challenge=" + code_challenge + "&code_challenge_method=" + code_challenge_method
+ uri_code_challenge_method = uri_id + "&code_challenge=" + code_challenge + "&code_challenge_method=plain"
+ kwargs = {
+ "some": "providers",
+ "require": "extra arguments"
+ }
+ uri_kwargs = uri_id + "&some=providers&require=extra+arguments"
+ uri_authorize_code = uri_redirect + "&scope=%2Fprofile&state=" + state
+
+ code = "zzzzaaaa"
+ body = "not=empty"
+
+ body_code = "not=empty&grant_type=authorization_code&code={}&client_id={}".format(code, client_id)
+ body_redirect = body_code + "&redirect_uri=http%3A%2F%2Fmy.page.com%2Fcallback"
+ body_code_verifier = body_code + "&code_verifier=code_verifier"
+ body_kwargs = body_code + "&some=providers&require=extra+arguments"
+
+ response_uri = "https://client.example.com/cb?code=zzzzaaaa&state=xyz"
+ response = {"code": "zzzzaaaa", "state": "xyz"}
+
+ token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type":"example",'
+ ' "expires_in":3600,'
+ ' "scope":"/profile",'
+ ' "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter":"example_value"}')
+ token = {
+ "access_token": "2YotnFZFEjr1zCsicMWpAA",
+ "token_type": "example",
+ "expires_in": 3600,
+ "expires_at": 4600,
+ "scope": scope,
+ "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
+ "example_parameter": "example_value"
+ }
+
+ def test_auth_grant_uri(self):
+ client = WebApplicationClient(self.client_id)
+
+ # Basic, no extra arguments
+ uri = client.prepare_request_uri(self.uri)
+ self.assertURLEqual(uri, self.uri_id)
+
+ # With redirection uri
+ uri = client.prepare_request_uri(self.uri, redirect_uri=self.redirect_uri)
+ self.assertURLEqual(uri, self.uri_redirect)
+
+ # With scope
+ uri = client.prepare_request_uri(self.uri, scope=self.scope)
+ self.assertURLEqual(uri, self.uri_scope)
+
+ # With state
+ uri = client.prepare_request_uri(self.uri, state=self.state)
+ self.assertURLEqual(uri, self.uri_state)
+
+ # with code_challenge and code_challenge_method
+ uri = client.prepare_request_uri(self.uri, code_challenge=self.code_challenge, code_challenge_method=self.code_challenge_method)
+ self.assertURLEqual(uri, self.uri_code_challenge)
+
+ # with no code_challenge_method
+ uri = client.prepare_request_uri(self.uri, code_challenge=self.code_challenge)
+ self.assertURLEqual(uri, self.uri_code_challenge_method)
+
+ # With extra parameters through kwargs
+ uri = client.prepare_request_uri(self.uri, **self.kwargs)
+ self.assertURLEqual(uri, self.uri_kwargs)
+
+ def test_request_body(self):
+ client = WebApplicationClient(self.client_id, code=self.code)
+
+ # Basic, no extra arguments
+ body = client.prepare_request_body(body=self.body)
+ self.assertFormBodyEqual(body, self.body_code)
+
+ rclient = WebApplicationClient(self.client_id)
+ body = rclient.prepare_request_body(code=self.code, body=self.body)
+ self.assertFormBodyEqual(body, self.body_code)
+
+ # With redirection uri
+ body = client.prepare_request_body(body=self.body, redirect_uri=self.redirect_uri)
+ self.assertFormBodyEqual(body, self.body_redirect)
+
+ # With code verifier
+ body = client.prepare_request_body(body=self.body, code_verifier=self.code_verifier)
+ self.assertFormBodyEqual(body, self.body_code_verifier)
+
+ # With extra parameters
+ body = client.prepare_request_body(body=self.body, **self.kwargs)
+ self.assertFormBodyEqual(body, self.body_kwargs)
+
+ def test_parse_grant_uri_response(self):
+ client = WebApplicationClient(self.client_id)
+
+ # Parse code and state
+ response = client.parse_request_uri_response(self.response_uri, state=self.state)
+ self.assertEqual(response, self.response)
+ self.assertEqual(client.code, self.code)
+
+ # Mismatching state
+ self.assertRaises(errors.MismatchingStateError,
+ client.parse_request_uri_response,
+ self.response_uri,
+ state="invalid")
+
+ def test_populate_attributes(self):
+
+ client = WebApplicationClient(self.client_id)
+
+ response_uri = (self.response_uri +
+ "&access_token=EVIL-TOKEN"
+ "&refresh_token=EVIL-TOKEN"
+ "&mac_key=EVIL-KEY")
+
+ client.parse_request_uri_response(response_uri, self.state)
+
+ self.assertEqual(client.code, self.code)
+
+ # We must not accidentally pick up any further security
+ # credentials at this point.
+ self.assertIsNone(client.access_token)
+ self.assertIsNone(client.refresh_token)
+ self.assertIsNone(client.mac_key)
+
+ def test_parse_token_response(self):
+ client = WebApplicationClient(self.client_id)
+
+ # Parse code and state
+ response = client.parse_request_body_response(self.token_json, scope=self.scope)
+ self.assertEqual(response, self.token)
+ self.assertEqual(client.access_token, response.get("access_token"))
+ self.assertEqual(client.refresh_token, response.get("refresh_token"))
+ self.assertEqual(client.token_type, response.get("token_type"))
+
+ # Mismatching state
+ self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
+ os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
+ token = client.parse_request_body_response(self.token_json, scope="invalid")
+ self.assertTrue(token.scope_changed)
+
+ scope_changes_recorded = []
+ def record_scope_change(sender, message, old, new):
+ scope_changes_recorded.append((message, old, new))
+
+ signals.scope_changed.connect(record_scope_change)
+ try:
+ client.parse_request_body_response(self.token_json, scope="invalid")
+ self.assertEqual(len(scope_changes_recorded), 1)
+ message, old, new = scope_changes_recorded[0]
+ self.assertEqual(message, 'Scope has changed from "invalid" to "/profile".')
+ self.assertEqual(old, ['invalid'])
+ self.assertEqual(new, ['/profile'])
+ finally:
+ signals.scope_changed.disconnect(record_scope_change)
+ del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
+
+ def test_prepare_authorization_requeset(self):
+ client = WebApplicationClient(self.client_id)
+
+ url, header, body = client.prepare_authorization_request(
+ self.uri, redirect_url=self.redirect_uri, state=self.state, scope=self.scope)
+ self.assertURLEqual(url, self.uri_authorize_code)
+ # verify default header and body only
+ self.assertEqual(header, {'Content-Type': 'application/x-www-form-urlencoded'})
+ self.assertEqual(body, '')
+
+ def test_prepare_request_body(self):
+ """
+ see issue #585
+ https://github.com/oauthlib/oauthlib/issues/585
+
+ `prepare_request_body` should support the following scenarios:
+ 1. Include client_id alone in the body (default)
+ 2. Include client_id and client_secret in auth and not include them in the body (RFC preferred solution)
+ 3. Include client_id and client_secret in the body (RFC alternative solution)
+ 4. Include client_id in the body and an empty string for client_secret.
+ """
+ client = WebApplicationClient(self.client_id)
+
+ # scenario 1, default behavior to include `client_id`
+ r1 = client.prepare_request_body()
+ self.assertEqual(r1, 'grant_type=authorization_code&client_id=%s' % self.client_id)
+
+ r1b = client.prepare_request_body(include_client_id=True)
+ self.assertEqual(r1b, 'grant_type=authorization_code&client_id=%s' % self.client_id)
+
+ # scenario 2, do not include `client_id` in the body, so it can be sent in auth.
+ r2 = client.prepare_request_body(include_client_id=False)
+ self.assertEqual(r2, 'grant_type=authorization_code')
+
+ # scenario 3, Include client_id and client_secret in the body (RFC alternative solution)
+ # the order of kwargs being appended is not guaranteed. for brevity, check the 2 permutations instead of sorting
+ r3 = client.prepare_request_body(client_secret=self.client_secret)
+ r3_params = dict(urlparse.parse_qsl(r3, keep_blank_values=True))
+ self.assertEqual(len(r3_params.keys()), 3)
+ self.assertEqual(r3_params['grant_type'], 'authorization_code')
+ self.assertEqual(r3_params['client_id'], self.client_id)
+ self.assertEqual(r3_params['client_secret'], self.client_secret)
+
+ r3b = client.prepare_request_body(include_client_id=True, client_secret=self.client_secret)
+ r3b_params = dict(urlparse.parse_qsl(r3b, keep_blank_values=True))
+ self.assertEqual(len(r3b_params.keys()), 3)
+ self.assertEqual(r3b_params['grant_type'], 'authorization_code')
+ self.assertEqual(r3b_params['client_id'], self.client_id)
+ self.assertEqual(r3b_params['client_secret'], self.client_secret)
+
+ # scenario 4, `client_secret` is an empty string
+ r4 = client.prepare_request_body(include_client_id=True, client_secret='')
+ r4_params = dict(urlparse.parse_qsl(r4, keep_blank_values=True))
+ self.assertEqual(len(r4_params.keys()), 3)
+ self.assertEqual(r4_params['grant_type'], 'authorization_code')
+ self.assertEqual(r4_params['client_id'], self.client_id)
+ self.assertEqual(r4_params['client_secret'], '')
+
+ # scenario 4b, `client_secret` is `None`
+ r4b = client.prepare_request_body(include_client_id=True, client_secret=None)
+ r4b_params = dict(urlparse.parse_qsl(r4b, keep_blank_values=True))
+ self.assertEqual(len(r4b_params.keys()), 2)
+ self.assertEqual(r4b_params['grant_type'], 'authorization_code')
+ self.assertEqual(r4b_params['client_id'], self.client_id)
+
+ # scenario Warnings
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always") # catch all
+
+ # warning1 - raise a DeprecationWarning if a `client_id` is submitted
+ rWarnings1 = client.prepare_request_body(client_id=self.client_id)
+ self.assertEqual(len(w), 1)
+ self.assertIsInstance(w[0].message, DeprecationWarning)
+
+ # testing the exact warning message in Python2&Python3 is a pain
+
+ # scenario Exceptions
+ # exception1 - raise a ValueError if the a different `client_id` is submitted
+ with self.assertRaises(ValueError) as cm:
+ client.prepare_request_body(client_id='different_client_id')
+ # testing the exact exception message in Python2&Python3 is a pain
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/__init__.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_base_endpoint.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_base_endpoint.py
new file mode 100644
index 0000000000..b1af6c3306
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_base_endpoint.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+from oauthlib.oauth2 import (
+ FatalClientError, OAuth2Error, RequestValidator, Server,
+)
+from oauthlib.oauth2.rfc6749 import (
+ BaseEndpoint, catch_errors_and_unavailability,
+)
+
+from tests.unittest import TestCase
+
+
+class BaseEndpointTest(TestCase):
+
+ def test_default_config(self):
+ endpoint = BaseEndpoint()
+ self.assertFalse(endpoint.catch_errors)
+ self.assertTrue(endpoint.available)
+ endpoint.catch_errors = True
+ self.assertTrue(endpoint.catch_errors)
+ endpoint.available = False
+ self.assertFalse(endpoint.available)
+
+ def test_error_catching(self):
+ validator = RequestValidator()
+ server = Server(validator)
+ server.catch_errors = True
+ h, b, s = server.create_token_response(
+ 'https://example.com', body='grant_type=authorization_code&code=abc'
+ )
+ self.assertIn("server_error", b)
+ self.assertEqual(s, 500)
+
+ def test_unavailability(self):
+ validator = RequestValidator()
+ server = Server(validator)
+ server.available = False
+ h, b, s = server.create_authorization_response('https://example.com')
+ self.assertIn("temporarily_unavailable", b)
+ self.assertEqual(s, 503)
+
+ def test_wrapper(self):
+
+ class TestServer(Server):
+
+ @catch_errors_and_unavailability
+ def throw_error(self, uri):
+ raise ValueError()
+
+ @catch_errors_and_unavailability
+ def throw_oauth_error(self, uri):
+ raise OAuth2Error()
+
+ @catch_errors_and_unavailability
+ def throw_fatal_oauth_error(self, uri):
+ raise FatalClientError()
+
+ validator = RequestValidator()
+ server = TestServer(validator)
+
+ server.catch_errors = True
+ h, b, s = server.throw_error('a')
+ self.assertIn("server_error", b)
+ self.assertEqual(s, 500)
+
+ server.available = False
+ h, b, s = server.throw_error('a')
+ self.assertIn("temporarily_unavailable", b)
+ self.assertEqual(s, 503)
+
+ server.available = True
+ self.assertRaises(OAuth2Error, server.throw_oauth_error, 'a')
+ self.assertRaises(FatalClientError, server.throw_fatal_oauth_error, 'a')
+ server.catch_errors = False
+ self.assertRaises(OAuth2Error, server.throw_oauth_error, 'a')
+ self.assertRaises(FatalClientError, server.throw_fatal_oauth_error, 'a')
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_client_authentication.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_client_authentication.py
new file mode 100644
index 0000000000..0659ee0d25
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_client_authentication.py
@@ -0,0 +1,162 @@
+"""Client authentication tests across all endpoints.
+
+Client authentication in OAuth2 serve two purposes, to authenticate
+confidential clients and to ensure public clients are in fact public. The
+latter is achieved with authenticate_client_id and the former with
+authenticate_client.
+
+We make sure authentication is done by requiring a client object to be set
+on the request object with a client_id parameter. The client_id attribute
+prevents this check from being circumvented with a client form parameter.
+"""
+import json
+from unittest import mock
+
+from oauthlib.oauth2 import (
+ BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
+ RequestValidator, WebApplicationServer,
+)
+
+from tests.unittest import TestCase
+
+from .test_utils import get_fragment_credentials
+
+
+class ClientAuthenticationTest(TestCase):
+
+ def inspect_client(self, request, refresh_token=False):
+ if not request.client or not request.client.client_id:
+ raise ValueError()
+ return 'abc'
+
+ def setUp(self):
+ self.validator = mock.MagicMock(spec=RequestValidator)
+ self.validator.is_pkce_required.return_value = False
+ self.validator.get_code_challenge.return_value = None
+ self.validator.get_default_redirect_uri.return_value = 'http://i.b./path'
+ self.web = WebApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+ self.mobile = MobileApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+ self.legacy = LegacyApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+ self.backend = BackendApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+ self.token_uri = 'http://example.com/path'
+ self.auth_uri = 'http://example.com/path?client_id=abc&response_type=token'
+ # should be base64 but no added value in this unittest
+ self.basicauth_client_creds = {"Authorization": "john:doe"}
+ self.basicauth_client_id = {"Authorization": "john:"}
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def set_client_id(self, client_id, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def basicauth_authenticate_client(self, request):
+ assert "Authorization" in request.headers
+ assert "john:doe" in request.headers["Authorization"]
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def test_client_id_authentication(self):
+ token_uri = 'http://example.com/path'
+
+ # authorization code grant
+ self.validator.authenticate_client.return_value = False
+ self.validator.authenticate_client_id.return_value = False
+ _, body, _ = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=mock')
+ self.assertEqual(json.loads(body)['error'], 'invalid_client')
+
+ self.validator.authenticate_client_id.return_value = True
+ self.validator.authenticate_client.side_effect = self.set_client
+ _, body, _ = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=mock')
+ self.assertIn('access_token', json.loads(body))
+
+ # implicit grant
+ auth_uri = 'http://example.com/path?client_id=abc&response_type=token'
+ self.assertRaises(ValueError, self.mobile.create_authorization_response,
+ auth_uri, scopes=['random'])
+
+ self.validator.validate_client_id.side_effect = self.set_client_id
+ h, _, s = self.mobile.create_authorization_response(auth_uri, scopes=['random'])
+ self.assertEqual(302, s)
+ self.assertIn('Location', h)
+ self.assertIn('access_token', get_fragment_credentials(h['Location']))
+
+ def test_basicauth_web(self):
+ self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
+ _, body, _ = self.web.create_token_response(
+ self.token_uri,
+ body='grant_type=authorization_code&code=mock',
+ headers=self.basicauth_client_creds
+ )
+ self.assertIn('access_token', json.loads(body))
+
+ def test_basicauth_legacy(self):
+ self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
+ _, body, _ = self.legacy.create_token_response(
+ self.token_uri,
+ body='grant_type=password&username=abc&password=secret',
+ headers=self.basicauth_client_creds
+ )
+ self.assertIn('access_token', json.loads(body))
+
+ def test_basicauth_backend(self):
+ self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
+ _, body, _ = self.backend.create_token_response(
+ self.token_uri,
+ body='grant_type=client_credentials',
+ headers=self.basicauth_client_creds
+ )
+ self.assertIn('access_token', json.loads(body))
+
+ def test_basicauth_revoke(self):
+ self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
+
+ # legacy or any other uses the same RevocationEndpoint
+ _, body, status = self.legacy.create_revocation_response(
+ self.token_uri,
+ body='token=foobar',
+ headers=self.basicauth_client_creds
+ )
+ self.assertEqual(status, 200, body)
+
+ def test_basicauth_introspect(self):
+ self.validator.authenticate_client.side_effect = self.basicauth_authenticate_client
+
+ # legacy or any other uses the same IntrospectEndpoint
+ _, body, status = self.legacy.create_introspect_response(
+ self.token_uri,
+ body='token=foobar',
+ headers=self.basicauth_client_creds
+ )
+ self.assertEqual(status, 200, body)
+
+ def test_custom_authentication(self):
+ token_uri = 'http://example.com/path'
+
+ # authorization code grant
+ self.assertRaises(NotImplementedError,
+ self.web.create_token_response, token_uri,
+ body='grant_type=authorization_code&code=mock')
+
+ # password grant
+ self.validator.authenticate_client.return_value = True
+ self.assertRaises(NotImplementedError,
+ self.legacy.create_token_response, token_uri,
+ body='grant_type=password&username=abc&password=secret')
+
+ # client credentials grant
+ self.validator.authenticate_client.return_value = True
+ self.assertRaises(NotImplementedError,
+ self.backend.create_token_response, token_uri,
+ body='grant_type=client_credentials')
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_credentials_preservation.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_credentials_preservation.py
new file mode 100644
index 0000000000..32c770ccb7
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_credentials_preservation.py
@@ -0,0 +1,128 @@
+"""Ensure credentials are preserved through the authorization.
+
+The Authorization Code Grant will need to preserve state as well as redirect
+uri and the Implicit Grant will need to preserve state.
+"""
+import json
+from unittest import mock
+
+from oauthlib.oauth2 import (
+ MobileApplicationServer, RequestValidator, WebApplicationServer,
+)
+from oauthlib.oauth2.rfc6749 import errors
+
+from tests.unittest import TestCase
+
+from .test_utils import get_fragment_credentials, get_query_credentials
+
+
+class PreservationTest(TestCase):
+
+ DEFAULT_REDIRECT_URI = 'http://i.b./path'
+
+ def setUp(self):
+ self.validator = mock.MagicMock(spec=RequestValidator)
+ self.validator.get_default_redirect_uri.return_value = self.DEFAULT_REDIRECT_URI
+ self.validator.get_code_challenge.return_value = None
+ self.validator.authenticate_client.side_effect = self.set_client
+ self.web = WebApplicationServer(self.validator)
+ self.mobile = MobileApplicationServer(self.validator)
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def test_state_preservation(self):
+ auth_uri = 'http://example.com/path?state=xyz&client_id=abc&response_type='
+
+ # authorization grant
+ h, _, s = self.web.create_authorization_response(
+ auth_uri + 'code', scopes=['random'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertEqual(get_query_credentials(h['Location'])['state'][0], 'xyz')
+
+ # implicit grant
+ h, _, s = self.mobile.create_authorization_response(
+ auth_uri + 'token', scopes=['random'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertEqual(get_fragment_credentials(h['Location'])['state'][0], 'xyz')
+
+ def test_redirect_uri_preservation(self):
+ auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
+ redirect_uri = 'http://i.b/path'
+ token_uri = 'http://example.com/path'
+
+ # authorization grant
+ h, _, s = self.web.create_authorization_response(
+ auth_uri + '&response_type=code', scopes=['random'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertTrue(h['Location'].startswith(redirect_uri))
+
+ # confirm_redirect_uri should return false if the redirect uri
+ # was given in the authorization but not in the token request.
+ self.validator.confirm_redirect_uri.return_value = False
+ code = get_query_credentials(h['Location'])['code'][0]
+ _, body, _ = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=%s' % code)
+ self.assertEqual(json.loads(body)['error'], 'invalid_request')
+
+ # implicit grant
+ h, _, s = self.mobile.create_authorization_response(
+ auth_uri + '&response_type=token', scopes=['random'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertTrue(h['Location'].startswith(redirect_uri))
+
+ def test_invalid_redirect_uri(self):
+ auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
+ self.validator.validate_redirect_uri.return_value = False
+
+ # authorization grant
+ self.assertRaises(errors.MismatchingRedirectURIError,
+ self.web.create_authorization_response,
+ auth_uri + '&response_type=code', scopes=['random'])
+
+ # implicit grant
+ self.assertRaises(errors.MismatchingRedirectURIError,
+ self.mobile.create_authorization_response,
+ auth_uri + '&response_type=token', scopes=['random'])
+
+ def test_default_uri(self):
+ auth_uri = 'http://example.com/path?state=xyz&client_id=abc'
+
+ self.validator.get_default_redirect_uri.return_value = None
+
+ # authorization grant
+ self.assertRaises(errors.MissingRedirectURIError,
+ self.web.create_authorization_response,
+ auth_uri + '&response_type=code', scopes=['random'])
+
+ # implicit grant
+ self.assertRaises(errors.MissingRedirectURIError,
+ self.mobile.create_authorization_response,
+ auth_uri + '&response_type=token', scopes=['random'])
+
+ def test_default_uri_in_token(self):
+ auth_uri = 'http://example.com/path?state=xyz&client_id=abc'
+ token_uri = 'http://example.com/path'
+
+ # authorization grant
+ h, _, s = self.web.create_authorization_response(
+ auth_uri + '&response_type=code', scopes=['random'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertTrue(h['Location'].startswith(self.DEFAULT_REDIRECT_URI))
+
+ # confirm_redirect_uri should return true if the redirect uri
+ # was not given in the authorization AND not in the token request.
+ self.validator.confirm_redirect_uri.return_value = True
+ code = get_query_credentials(h['Location'])['code'][0]
+ self.validator.validate_code.return_value = True
+ _, body, s = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=%s' % code)
+ self.assertEqual(s, 200)
+ self.assertEqual(self.validator.confirm_redirect_uri.call_args[0][2], self.DEFAULT_REDIRECT_URI)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_error_responses.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_error_responses.py
new file mode 100644
index 0000000000..f61595e213
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_error_responses.py
@@ -0,0 +1,491 @@
+"""Ensure the correct error responses are returned for all defined error types.
+"""
+import json
+from unittest import mock
+
+from oauthlib.common import urlencode
+from oauthlib.oauth2 import (
+ BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
+ RequestValidator, WebApplicationServer,
+)
+from oauthlib.oauth2.rfc6749 import errors
+
+from tests.unittest import TestCase
+
+
+class ErrorResponseTest(TestCase):
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def setUp(self):
+ self.validator = mock.MagicMock(spec=RequestValidator)
+ self.validator.get_default_redirect_uri.return_value = None
+ self.validator.get_code_challenge.return_value = None
+ self.web = WebApplicationServer(self.validator)
+ self.mobile = MobileApplicationServer(self.validator)
+ self.legacy = LegacyApplicationServer(self.validator)
+ self.backend = BackendApplicationServer(self.validator)
+
+ def test_invalid_redirect_uri(self):
+ uri = 'https://example.com/authorize?response_type={0}&client_id=foo&redirect_uri=wrong'
+
+ # Authorization code grant
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.web.validate_authorization_request, uri.format('code'))
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.web.create_authorization_response, uri.format('code'), scopes=['foo'])
+
+ # Implicit grant
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.mobile.validate_authorization_request, uri.format('token'))
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.mobile.create_authorization_response, uri.format('token'), scopes=['foo'])
+
+ def test_invalid_default_redirect_uri(self):
+ uri = 'https://example.com/authorize?response_type={0}&client_id=foo'
+ self.validator.get_default_redirect_uri.return_value = "wrong"
+
+ # Authorization code grant
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.web.validate_authorization_request, uri.format('code'))
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.web.create_authorization_response, uri.format('code'), scopes=['foo'])
+
+ # Implicit grant
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.mobile.validate_authorization_request, uri.format('token'))
+ self.assertRaises(errors.InvalidRedirectURIError,
+ self.mobile.create_authorization_response, uri.format('token'), scopes=['foo'])
+
+ def test_missing_redirect_uri(self):
+ uri = 'https://example.com/authorize?response_type={0}&client_id=foo'
+
+ # Authorization code grant
+ self.assertRaises(errors.MissingRedirectURIError,
+ self.web.validate_authorization_request, uri.format('code'))
+ self.assertRaises(errors.MissingRedirectURIError,
+ self.web.create_authorization_response, uri.format('code'), scopes=['foo'])
+
+ # Implicit grant
+ self.assertRaises(errors.MissingRedirectURIError,
+ self.mobile.validate_authorization_request, uri.format('token'))
+ self.assertRaises(errors.MissingRedirectURIError,
+ self.mobile.create_authorization_response, uri.format('token'), scopes=['foo'])
+
+ def test_mismatching_redirect_uri(self):
+ uri = 'https://example.com/authorize?response_type={0}&client_id=foo&redirect_uri=https%3A%2F%2Fi.b%2Fback'
+
+ # Authorization code grant
+ self.validator.validate_redirect_uri.return_value = False
+ self.assertRaises(errors.MismatchingRedirectURIError,
+ self.web.validate_authorization_request, uri.format('code'))
+ self.assertRaises(errors.MismatchingRedirectURIError,
+ self.web.create_authorization_response, uri.format('code'), scopes=['foo'])
+
+ # Implicit grant
+ self.assertRaises(errors.MismatchingRedirectURIError,
+ self.mobile.validate_authorization_request, uri.format('token'))
+ self.assertRaises(errors.MismatchingRedirectURIError,
+ self.mobile.create_authorization_response, uri.format('token'), scopes=['foo'])
+
+ def test_missing_client_id(self):
+ uri = 'https://example.com/authorize?response_type={0}&redirect_uri=https%3A%2F%2Fi.b%2Fback'
+
+ # Authorization code grant
+ self.validator.validate_redirect_uri.return_value = False
+ self.assertRaises(errors.MissingClientIdError,
+ self.web.validate_authorization_request, uri.format('code'))
+ self.assertRaises(errors.MissingClientIdError,
+ self.web.create_authorization_response, uri.format('code'), scopes=['foo'])
+
+ # Implicit grant
+ self.assertRaises(errors.MissingClientIdError,
+ self.mobile.validate_authorization_request, uri.format('token'))
+ self.assertRaises(errors.MissingClientIdError,
+ self.mobile.create_authorization_response, uri.format('token'), scopes=['foo'])
+
+ def test_invalid_client_id(self):
+ uri = 'https://example.com/authorize?response_type={0}&client_id=foo&redirect_uri=https%3A%2F%2Fi.b%2Fback'
+
+ # Authorization code grant
+ self.validator.validate_client_id.return_value = False
+ self.assertRaises(errors.InvalidClientIdError,
+ self.web.validate_authorization_request, uri.format('code'))
+ self.assertRaises(errors.InvalidClientIdError,
+ self.web.create_authorization_response, uri.format('code'), scopes=['foo'])
+
+ # Implicit grant
+ self.assertRaises(errors.InvalidClientIdError,
+ self.mobile.validate_authorization_request, uri.format('token'))
+ self.assertRaises(errors.InvalidClientIdError,
+ self.mobile.create_authorization_response, uri.format('token'), scopes=['foo'])
+
+ def test_empty_parameter(self):
+ uri = 'https://example.com/authorize?client_id=foo&redirect_uri=https%3A%2F%2Fi.b%2Fback&response_type=code&'
+
+ # Authorization code grant
+ self.assertRaises(errors.InvalidRequestFatalError,
+ self.web.validate_authorization_request, uri)
+
+ # Implicit grant
+ self.assertRaises(errors.InvalidRequestFatalError,
+ self.mobile.validate_authorization_request, uri)
+
+ def test_invalid_request(self):
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+ token_uri = 'https://i.b/token'
+
+ invalid_bodies = [
+ # duplicate params
+ 'grant_type=authorization_code&client_id=nope&client_id=nope&code=foo'
+ ]
+ for body in invalid_bodies:
+ _, body, _ = self.web.create_token_response(token_uri,
+ body=body)
+ self.assertEqual('invalid_request', json.loads(body)['error'])
+
+ # Password credentials grant
+ invalid_bodies = [
+ # duplicate params
+ 'grant_type=password&username=foo&username=bar&password=baz'
+ # missing username
+ 'grant_type=password&password=baz'
+ # missing password
+ 'grant_type=password&username=foo'
+ ]
+ self.validator.authenticate_client.side_effect = self.set_client
+ for body in invalid_bodies:
+ _, body, _ = self.legacy.create_token_response(token_uri,
+ body=body)
+ self.assertEqual('invalid_request', json.loads(body)['error'])
+
+ # Client credentials grant
+ invalid_bodies = [
+ # duplicate params
+ 'grant_type=client_credentials&scope=foo&scope=bar'
+ ]
+ for body in invalid_bodies:
+ _, body, _ = self.backend.create_token_response(token_uri,
+ body=body)
+ self.assertEqual('invalid_request', json.loads(body)['error'])
+
+ def test_invalid_request_duplicate_params(self):
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+ uri = 'https://i.b/auth?client_id=foo&client_id=bar&response_type={0}'
+ description = 'Duplicate client_id parameter.'
+
+ # Authorization code
+ self.assertRaisesRegex(errors.InvalidRequestFatalError,
+ description,
+ self.web.validate_authorization_request,
+ uri.format('code'))
+ self.assertRaisesRegex(errors.InvalidRequestFatalError,
+ description,
+ self.web.create_authorization_response,
+ uri.format('code'), scopes=['foo'])
+
+ # Implicit grant
+ self.assertRaisesRegex(errors.InvalidRequestFatalError,
+ description,
+ self.mobile.validate_authorization_request,
+ uri.format('token'))
+ self.assertRaisesRegex(errors.InvalidRequestFatalError,
+ description,
+ self.mobile.create_authorization_response,
+ uri.format('token'), scopes=['foo'])
+
+ def test_invalid_request_missing_response_type(self):
+
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+
+ uri = 'https://i.b/auth?client_id=foo'
+
+ # Authorization code
+ self.assertRaises(errors.MissingResponseTypeError,
+ self.web.validate_authorization_request,
+ uri.format('code'))
+ h, _, s = self.web.create_authorization_response(uri, scopes=['foo'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ # Implicit grant
+ self.assertRaises(errors.MissingResponseTypeError,
+ self.mobile.validate_authorization_request,
+ uri.format('token'))
+ h, _, s = self.mobile.create_authorization_response(uri, scopes=['foo'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ def test_unauthorized_client(self):
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+ self.validator.validate_grant_type.return_value = False
+ self.validator.validate_response_type.return_value = False
+ self.validator.authenticate_client.side_effect = self.set_client
+ token_uri = 'https://i.b/token'
+
+ # Authorization code grant
+ self.assertRaises(errors.UnauthorizedClientError,
+ self.web.validate_authorization_request,
+ 'https://i.b/auth?response_type=code&client_id=foo')
+ _, body, _ = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=foo')
+ self.assertEqual('unauthorized_client', json.loads(body)['error'])
+
+ # Implicit grant
+ self.assertRaises(errors.UnauthorizedClientError,
+ self.mobile.validate_authorization_request,
+ 'https://i.b/auth?response_type=token&client_id=foo')
+
+ # Password credentials grant
+ _, body, _ = self.legacy.create_token_response(token_uri,
+ body='grant_type=password&username=foo&password=bar')
+ self.assertEqual('unauthorized_client', json.loads(body)['error'])
+
+ # Client credentials grant
+ _, body, _ = self.backend.create_token_response(token_uri,
+ body='grant_type=client_credentials')
+ self.assertEqual('unauthorized_client', json.loads(body)['error'])
+
+ def test_access_denied(self):
+ self.validator.authenticate_client.side_effect = self.set_client
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+ self.validator.confirm_redirect_uri.return_value = False
+ token_uri = 'https://i.b/token'
+ # Authorization code grant
+ _, body, _ = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=foo')
+ self.assertEqual('invalid_request', json.loads(body)['error'])
+
+ def test_access_denied_no_default_redirecturi(self):
+ self.validator.authenticate_client.side_effect = self.set_client
+ self.validator.get_default_redirect_uri.return_value = None
+ token_uri = 'https://i.b/token'
+ # Authorization code grant
+ _, body, _ = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=foo')
+ self.assertEqual('invalid_request', json.loads(body)['error'])
+
+ def test_unsupported_response_type(self):
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+
+ # Authorization code grant
+ self.assertRaises(errors.UnsupportedResponseTypeError,
+ self.web.validate_authorization_request,
+ 'https://i.b/auth?response_type=foo&client_id=foo')
+
+ # Implicit grant
+ self.assertRaises(errors.UnsupportedResponseTypeError,
+ self.mobile.validate_authorization_request,
+ 'https://i.b/auth?response_type=foo&client_id=foo')
+
+ def test_invalid_scope(self):
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+ self.validator.validate_scopes.return_value = False
+ self.validator.authenticate_client.side_effect = self.set_client
+
+ # Authorization code grant
+ self.assertRaises(errors.InvalidScopeError,
+ self.web.validate_authorization_request,
+ 'https://i.b/auth?response_type=code&client_id=foo')
+
+ # Implicit grant
+ self.assertRaises(errors.InvalidScopeError,
+ self.mobile.validate_authorization_request,
+ 'https://i.b/auth?response_type=token&client_id=foo')
+
+ # Password credentials grant
+ _, body, _ = self.legacy.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=password&username=foo&password=bar')
+ self.assertEqual('invalid_scope', json.loads(body)['error'])
+
+ # Client credentials grant
+ _, body, _ = self.backend.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=client_credentials')
+ self.assertEqual('invalid_scope', json.loads(body)['error'])
+
+ def test_server_error(self):
+ def raise_error(*args, **kwargs):
+ raise ValueError()
+
+ self.validator.validate_client_id.side_effect = raise_error
+ self.validator.authenticate_client.side_effect = raise_error
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+
+ # Authorization code grant
+ self.web.catch_errors = True
+ _, _, s = self.web.create_authorization_response(
+ 'https://i.b/auth?client_id=foo&response_type=code',
+ scopes=['foo'])
+ self.assertEqual(s, 500)
+ _, _, s = self.web.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=authorization_code&code=foo',
+ scopes=['foo'])
+ self.assertEqual(s, 500)
+
+ # Implicit grant
+ self.mobile.catch_errors = True
+ _, _, s = self.mobile.create_authorization_response(
+ 'https://i.b/auth?client_id=foo&response_type=token',
+ scopes=['foo'])
+ self.assertEqual(s, 500)
+
+ # Password credentials grant
+ self.legacy.catch_errors = True
+ _, _, s = self.legacy.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=password&username=foo&password=foo')
+ self.assertEqual(s, 500)
+
+ # Client credentials grant
+ self.backend.catch_errors = True
+ _, _, s = self.backend.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=client_credentials')
+ self.assertEqual(s, 500)
+
+ def test_temporarily_unavailable(self):
+ # Authorization code grant
+ self.web.available = False
+ _, _, s = self.web.create_authorization_response(
+ 'https://i.b/auth?client_id=foo&response_type=code',
+ scopes=['foo'])
+ self.assertEqual(s, 503)
+ _, _, s = self.web.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=authorization_code&code=foo',
+ scopes=['foo'])
+ self.assertEqual(s, 503)
+
+ # Implicit grant
+ self.mobile.available = False
+ _, _, s = self.mobile.create_authorization_response(
+ 'https://i.b/auth?client_id=foo&response_type=token',
+ scopes=['foo'])
+ self.assertEqual(s, 503)
+
+ # Password credentials grant
+ self.legacy.available = False
+ _, _, s = self.legacy.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=password&username=foo&password=foo')
+ self.assertEqual(s, 503)
+
+ # Client credentials grant
+ self.backend.available = False
+ _, _, s = self.backend.create_token_response(
+ 'https://i.b/token',
+ body='grant_type=client_credentials')
+ self.assertEqual(s, 503)
+
+ def test_invalid_client(self):
+ self.validator.authenticate_client.return_value = False
+ self.validator.authenticate_client_id.return_value = False
+
+ # Authorization code grant
+ _, body, _ = self.web.create_token_response('https://i.b/token',
+ body='grant_type=authorization_code&code=foo')
+ self.assertEqual('invalid_client', json.loads(body)['error'])
+
+ # Password credentials grant
+ _, body, _ = self.legacy.create_token_response('https://i.b/token',
+ body='grant_type=password&username=foo&password=bar')
+ self.assertEqual('invalid_client', json.loads(body)['error'])
+
+ # Client credentials grant
+ _, body, _ = self.legacy.create_token_response('https://i.b/token',
+ body='grant_type=client_credentials')
+ self.assertEqual('invalid_client', json.loads(body)['error'])
+
+ def test_invalid_grant(self):
+ self.validator.authenticate_client.side_effect = self.set_client
+
+ # Authorization code grant
+ self.validator.validate_code.return_value = False
+ _, body, _ = self.web.create_token_response('https://i.b/token',
+ body='grant_type=authorization_code&code=foo')
+ self.assertEqual('invalid_grant', json.loads(body)['error'])
+
+ # Password credentials grant
+ self.validator.validate_user.return_value = False
+ _, body, _ = self.legacy.create_token_response('https://i.b/token',
+ body='grant_type=password&username=foo&password=bar')
+ self.assertEqual('invalid_grant', json.loads(body)['error'])
+
+ def test_unsupported_grant_type(self):
+ self.validator.authenticate_client.side_effect = self.set_client
+
+ # Authorization code grant
+ _, body, _ = self.web.create_token_response('https://i.b/token',
+ body='grant_type=bar&code=foo')
+ self.assertEqual('unsupported_grant_type', json.loads(body)['error'])
+
+ # Password credentials grant
+ _, body, _ = self.legacy.create_token_response('https://i.b/token',
+ body='grant_type=bar&username=foo&password=bar')
+ self.assertEqual('unsupported_grant_type', json.loads(body)['error'])
+
+ # Client credentials grant
+ _, body, _ = self.backend.create_token_response('https://i.b/token',
+ body='grant_type=bar')
+ self.assertEqual('unsupported_grant_type', json.loads(body)['error'])
+
+ def test_invalid_request_method(self):
+ test_methods = ['GET', 'pUt', 'dEleTe', 'paTcH']
+ test_methods = test_methods + [x.lower() for x in test_methods] + [x.upper() for x in test_methods]
+ for method in test_methods:
+ self.validator.authenticate_client.side_effect = self.set_client
+
+ uri = "http://i/b/token/"
+ try:
+ _, body, s = self.web.create_token_response(uri,
+ body='grant_type=access_token&code=123', http_method=method)
+ self.fail('This should have failed with InvalidRequestError')
+ except errors.InvalidRequestError as ire:
+ self.assertIn('Unsupported request method', ire.description)
+
+ try:
+ _, body, s = self.legacy.create_token_response(uri,
+ body='grant_type=access_token&code=123', http_method=method)
+ self.fail('This should have failed with InvalidRequestError')
+ except errors.InvalidRequestError as ire:
+ self.assertIn('Unsupported request method', ire.description)
+
+ try:
+ _, body, s = self.backend.create_token_response(uri,
+ body='grant_type=access_token&code=123', http_method=method)
+ self.fail('This should have failed with InvalidRequestError')
+ except errors.InvalidRequestError as ire:
+ self.assertIn('Unsupported request method', ire.description)
+
+ def test_invalid_post_request(self):
+ self.validator.authenticate_client.side_effect = self.set_client
+ for param in ['token', 'secret', 'code', 'foo']:
+ uri = 'https://i/b/token?' + urlencode([(param, 'secret')])
+ try:
+ _, body, s = self.web.create_token_response(uri,
+ body='grant_type=access_token&code=123')
+ self.fail('This should have failed with InvalidRequestError')
+ except errors.InvalidRequestError as ire:
+ self.assertIn('URL query parameters are not allowed', ire.description)
+
+ try:
+ _, body, s = self.legacy.create_token_response(uri,
+ body='grant_type=access_token&code=123')
+ self.fail('This should have failed with InvalidRequestError')
+ except errors.InvalidRequestError as ire:
+ self.assertIn('URL query parameters are not allowed', ire.description)
+
+ try:
+ _, body, s = self.backend.create_token_response(uri,
+ body='grant_type=access_token&code=123')
+ self.fail('This should have failed with InvalidRequestError')
+ except errors.InvalidRequestError as ire:
+ self.assertIn('URL query parameters are not allowed', ire.description)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_extra_credentials.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_extra_credentials.py
new file mode 100644
index 0000000000..97aaf86dff
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_extra_credentials.py
@@ -0,0 +1,69 @@
+"""Ensure extra credentials can be supplied for inclusion in tokens.
+"""
+from unittest import mock
+
+from oauthlib.oauth2 import (
+ BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
+ RequestValidator, WebApplicationServer,
+)
+
+from tests.unittest import TestCase
+
+
+class ExtraCredentialsTest(TestCase):
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def setUp(self):
+ self.validator = mock.MagicMock(spec=RequestValidator)
+ self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
+ self.web = WebApplicationServer(self.validator)
+ self.mobile = MobileApplicationServer(self.validator)
+ self.legacy = LegacyApplicationServer(self.validator)
+ self.backend = BackendApplicationServer(self.validator)
+
+ def test_post_authorization_request(self):
+ def save_code(client_id, token, request):
+ self.assertEqual('creds', request.extra)
+
+ def save_token(token, request):
+ self.assertEqual('creds', request.extra)
+
+ # Authorization code grant
+ self.validator.save_authorization_code.side_effect = save_code
+ self.web.create_authorization_response(
+ 'https://i.b/auth?client_id=foo&response_type=code',
+ scopes=['foo'],
+ credentials={'extra': 'creds'})
+
+ # Implicit grant
+ self.validator.save_bearer_token.side_effect = save_token
+ self.mobile.create_authorization_response(
+ 'https://i.b/auth?client_id=foo&response_type=token',
+ scopes=['foo'],
+ credentials={'extra': 'creds'})
+
+ def test_token_request(self):
+ def save_token(token, request):
+ self.assertIn('extra', token)
+
+ self.validator.save_bearer_token.side_effect = save_token
+ self.validator.authenticate_client.side_effect = self.set_client
+
+ # Authorization code grant
+ self.web.create_token_response('https://i.b/token',
+ body='grant_type=authorization_code&code=foo',
+ credentials={'extra': 'creds'})
+
+ # Password credentials grant
+ self.legacy.create_token_response('https://i.b/token',
+ body='grant_type=password&username=foo&password=bar',
+ credentials={'extra': 'creds'})
+
+ # Client credentials grant
+ self.backend.create_token_response('https://i.b/token',
+ body='grant_type=client_credentials',
+ credentials={'extra': 'creds'})
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_introspect_endpoint.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_introspect_endpoint.py
new file mode 100644
index 0000000000..6d3d119a3b
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_introspect_endpoint.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+from json import loads
+from unittest.mock import MagicMock
+
+from oauthlib.common import urlencode
+from oauthlib.oauth2 import IntrospectEndpoint, RequestValidator
+
+from tests.unittest import TestCase
+
+
+class IntrospectEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(wraps=RequestValidator())
+ self.validator.client_authentication_required.return_value = True
+ self.validator.authenticate_client.return_value = True
+ self.validator.validate_bearer_token.return_value = True
+ self.validator.introspect_token.return_value = {}
+ self.endpoint = IntrospectEndpoint(self.validator)
+
+ self.uri = 'should_not_matter'
+ self.headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ }
+ self.resp_h = {
+ 'Cache-Control': 'no-store',
+ 'Content-Type': 'application/json',
+ 'Pragma': 'no-cache'
+ }
+ self.resp_b = {
+ "active": True
+ }
+
+ def test_introspect_token(self):
+ for token_type in ('access_token', 'refresh_token', 'invalid'):
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', token_type)])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b), self.resp_b)
+ self.assertEqual(s, 200)
+
+ def test_introspect_token_nohint(self):
+ # don't specify token_type_hint
+ body = urlencode([('token', 'foo')])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b), self.resp_b)
+ self.assertEqual(s, 200)
+
+ def test_introspect_token_false(self):
+ self.validator.introspect_token.return_value = None
+ body = urlencode([('token', 'foo')])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b), {"active": False})
+ self.assertEqual(s, 200)
+
+ def test_introspect_token_claims(self):
+ self.validator.introspect_token.return_value = {"foo": "bar"}
+ body = urlencode([('token', 'foo')])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b), {"active": True, "foo": "bar"})
+ self.assertEqual(s, 200)
+
+ def test_introspect_token_claims_spoof_active(self):
+ self.validator.introspect_token.return_value = {"foo": "bar", "active": False}
+ body = urlencode([('token', 'foo')])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b), {"active": True, "foo": "bar"})
+ self.assertEqual(s, 200)
+
+ def test_introspect_token_client_authentication_failed(self):
+ self.validator.authenticate_client.return_value = False
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'access_token')])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-store',
+ 'Pragma': 'no-cache',
+ "WWW-Authenticate": 'Bearer error="invalid_client"'
+ })
+ self.assertEqual(loads(b)['error'], 'invalid_client')
+ self.assertEqual(s, 401)
+
+ def test_introspect_token_public_client_authentication(self):
+ self.validator.client_authentication_required.return_value = False
+ self.validator.authenticate_client_id.return_value = True
+ for token_type in ('access_token', 'refresh_token', 'invalid'):
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', token_type)])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b), self.resp_b)
+ self.assertEqual(s, 200)
+
+ def test_introspect_token_public_client_authentication_failed(self):
+ self.validator.client_authentication_required.return_value = False
+ self.validator.authenticate_client_id.return_value = False
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'access_token')])
+ h, b, s = self.endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-store',
+ 'Pragma': 'no-cache',
+ "WWW-Authenticate": 'Bearer error="invalid_client"'
+ })
+ self.assertEqual(loads(b)['error'], 'invalid_client')
+ self.assertEqual(s, 401)
+
+ def test_introspect_unsupported_token(self):
+ endpoint = IntrospectEndpoint(self.validator,
+ supported_token_types=['access_token'])
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'refresh_token')])
+ h, b, s = endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'unsupported_token_type')
+ self.assertEqual(s, 400)
+
+ h, b, s = endpoint.create_introspect_response(self.uri,
+ headers=self.headers, body='')
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'invalid_request')
+ self.assertEqual(s, 400)
+
+ def test_introspect_invalid_request_method(self):
+ endpoint = IntrospectEndpoint(self.validator,
+ supported_token_types=['access_token'])
+ test_methods = ['GET', 'pUt', 'dEleTe', 'paTcH']
+ test_methods = test_methods + [x.lower() for x in test_methods] + [x.upper() for x in test_methods]
+ for method in test_methods:
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'refresh_token')])
+ h, b, s = endpoint.create_introspect_response(self.uri,
+ http_method = method, headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'invalid_request')
+ self.assertIn('Unsupported request method', loads(b)['error_description'])
+ self.assertEqual(s, 400)
+
+ def test_introspect_bad_post_request(self):
+ endpoint = IntrospectEndpoint(self.validator,
+ supported_token_types=['access_token'])
+ for param in ['token', 'secret', 'code', 'foo']:
+ uri = 'http://some.endpoint?' + urlencode([(param, 'secret')])
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'access_token')])
+ h, b, s = endpoint.create_introspect_response(
+ uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'invalid_request')
+ self.assertIn('query parameters are not allowed', loads(b)['error_description'])
+ self.assertEqual(s, 400)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_metadata.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_metadata.py
new file mode 100644
index 0000000000..1f5b912100
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_metadata.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+import json
+
+from oauthlib.oauth2 import MetadataEndpoint, Server, TokenEndpoint
+
+from tests.unittest import TestCase
+
+
+class MetadataEndpointTest(TestCase):
+ def setUp(self):
+ self.metadata = {
+ "issuer": 'https://foo.bar'
+ }
+
+ def test_openid_oauth2_preconfigured(self):
+ default_claims = {
+ "issuer": 'https://foo.bar',
+ "authorization_endpoint": "https://foo.bar/authorize",
+ "revocation_endpoint": "https://foo.bar/revoke",
+ "introspection_endpoint": "https://foo.bar/introspect",
+ "token_endpoint": "https://foo.bar/token"
+ }
+ from oauthlib.oauth2 import Server as OAuth2Server
+ from oauthlib.openid import Server as OpenIDServer
+
+ endpoint = OAuth2Server(None)
+ metadata = MetadataEndpoint([endpoint], default_claims)
+ oauth2_claims = metadata.claims
+
+ endpoint = OpenIDServer(None)
+ metadata = MetadataEndpoint([endpoint], default_claims)
+ openid_claims = metadata.claims
+
+ # Pure OAuth2 Authorization Metadata are similar with OpenID but
+ # response_type not! (OIDC contains "id_token" and hybrid flows)
+ del oauth2_claims['response_types_supported']
+ del openid_claims['response_types_supported']
+
+ self.maxDiff = None
+ self.assertEqual(openid_claims, oauth2_claims)
+
+ def test_create_metadata_response(self):
+ endpoint = TokenEndpoint(None, None, grant_types={"password": None})
+ metadata = MetadataEndpoint([endpoint], {
+ "issuer": 'https://foo.bar',
+ "token_endpoint": "https://foo.bar/token"
+ })
+ headers, body, status = metadata.create_metadata_response('/', 'GET')
+ assert headers == {
+ 'Content-Type': 'application/json',
+ 'Access-Control-Allow-Origin': '*',
+ }
+ claims = json.loads(body)
+ assert claims['issuer'] == 'https://foo.bar'
+
+ def test_token_endpoint(self):
+ endpoint = TokenEndpoint(None, None, grant_types={"password": None})
+ metadata = MetadataEndpoint([endpoint], {
+ "issuer": 'https://foo.bar',
+ "token_endpoint": "https://foo.bar/token"
+ })
+ self.assertIn("grant_types_supported", metadata.claims)
+ self.assertEqual(metadata.claims["grant_types_supported"], ["password"])
+
+ def test_token_endpoint_overridden(self):
+ endpoint = TokenEndpoint(None, None, grant_types={"password": None})
+ metadata = MetadataEndpoint([endpoint], {
+ "issuer": 'https://foo.bar',
+ "token_endpoint": "https://foo.bar/token",
+ "grant_types_supported": ["pass_word_special_provider"]
+ })
+ self.assertIn("grant_types_supported", metadata.claims)
+ self.assertEqual(metadata.claims["grant_types_supported"], ["pass_word_special_provider"])
+
+ def test_mandatory_fields(self):
+ metadata = MetadataEndpoint([], self.metadata)
+ self.assertIn("issuer", metadata.claims)
+ self.assertEqual(metadata.claims["issuer"], 'https://foo.bar')
+
+ def test_server_metadata(self):
+ endpoint = Server(None)
+ metadata = MetadataEndpoint([endpoint], {
+ "issuer": 'https://foo.bar',
+ "authorization_endpoint": "https://foo.bar/authorize",
+ "introspection_endpoint": "https://foo.bar/introspect",
+ "revocation_endpoint": "https://foo.bar/revoke",
+ "token_endpoint": "https://foo.bar/token",
+ "jwks_uri": "https://foo.bar/certs",
+ "scopes_supported": ["email", "profile"]
+ })
+ expected_claims = {
+ "issuer": "https://foo.bar",
+ "authorization_endpoint": "https://foo.bar/authorize",
+ "introspection_endpoint": "https://foo.bar/introspect",
+ "revocation_endpoint": "https://foo.bar/revoke",
+ "token_endpoint": "https://foo.bar/token",
+ "jwks_uri": "https://foo.bar/certs",
+ "scopes_supported": ["email", "profile"],
+ "grant_types_supported": [
+ "authorization_code",
+ "password",
+ "client_credentials",
+ "refresh_token",
+ "implicit"
+ ],
+ "token_endpoint_auth_methods_supported": [
+ "client_secret_post",
+ "client_secret_basic"
+ ],
+ "response_types_supported": [
+ "code",
+ "token"
+ ],
+ "response_modes_supported": [
+ "query",
+ "fragment"
+ ],
+ "code_challenge_methods_supported": [
+ "plain",
+ "S256"
+ ],
+ "revocation_endpoint_auth_methods_supported": [
+ "client_secret_post",
+ "client_secret_basic"
+ ],
+ "introspection_endpoint_auth_methods_supported": [
+ "client_secret_post",
+ "client_secret_basic"
+ ]
+ }
+
+ def sort_list(claims):
+ for k in claims.keys():
+ claims[k] = sorted(claims[k])
+
+ sort_list(metadata.claims)
+ sort_list(expected_claims)
+ self.assertEqual(sorted(metadata.claims.items()), sorted(expected_claims.items()))
+
+ def test_metadata_validate_issuer(self):
+ with self.assertRaises(ValueError):
+ endpoint = TokenEndpoint(
+ None, None, grant_types={"password": None},
+ )
+ metadata = MetadataEndpoint([endpoint], {
+ "issuer": 'http://foo.bar',
+ "token_endpoint": "https://foo.bar/token",
+ })
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_resource_owner_association.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_resource_owner_association.py
new file mode 100644
index 0000000000..04533888e9
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_resource_owner_association.py
@@ -0,0 +1,108 @@
+"""Ensure all tokens are associated with a resource owner.
+"""
+import json
+from unittest import mock
+
+from oauthlib.oauth2 import (
+ BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
+ RequestValidator, WebApplicationServer,
+)
+
+from tests.unittest import TestCase
+
+from .test_utils import get_fragment_credentials, get_query_credentials
+
+
+class ResourceOwnerAssociationTest(TestCase):
+
+ auth_uri = 'http://example.com/path?client_id=abc'
+ token_uri = 'http://example.com/path'
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def set_user(self, client_id, code, client, request):
+ request.user = 'test'
+ return True
+
+ def set_user_from_username(self, username, password, client, request):
+ request.user = 'test'
+ return True
+
+ def set_user_from_credentials(self, request):
+ request.user = 'test'
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def inspect_client(self, request, refresh_token=False):
+ if not request.user:
+ raise ValueError()
+ return 'abc'
+
+ def setUp(self):
+ self.validator = mock.MagicMock(spec=RequestValidator)
+ self.validator.get_default_redirect_uri.return_value = 'http://i.b./path'
+ self.validator.get_code_challenge.return_value = None
+ self.validator.authenticate_client.side_effect = self.set_client
+ self.web = WebApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+ self.mobile = MobileApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+ self.legacy = LegacyApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+ self.backend = BackendApplicationServer(self.validator,
+ token_generator=self.inspect_client)
+
+ def test_web_application(self):
+ # TODO: code generator + intercept test
+ h, _, s = self.web.create_authorization_response(
+ self.auth_uri + '&response_type=code',
+ credentials={'user': 'test'}, scopes=['random'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ code = get_query_credentials(h['Location'])['code'][0]
+ self.assertRaises(ValueError,
+ self.web.create_token_response, self.token_uri,
+ body='grant_type=authorization_code&code=%s' % code)
+
+ self.validator.validate_code.side_effect = self.set_user
+ _, body, _ = self.web.create_token_response(self.token_uri,
+ body='grant_type=authorization_code&code=%s' % code)
+ self.assertEqual(json.loads(body)['access_token'], 'abc')
+
+ def test_mobile_application(self):
+ self.assertRaises(ValueError,
+ self.mobile.create_authorization_response,
+ self.auth_uri + '&response_type=token')
+
+ h, _, s = self.mobile.create_authorization_response(
+ self.auth_uri + '&response_type=token',
+ credentials={'user': 'test'}, scopes=['random'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertEqual(get_fragment_credentials(h['Location'])['access_token'][0], 'abc')
+
+ def test_legacy_application(self):
+ body = 'grant_type=password&username=abc&password=secret'
+ self.assertRaises(ValueError,
+ self.legacy.create_token_response,
+ self.token_uri, body=body)
+
+ self.validator.validate_user.side_effect = self.set_user_from_username
+ _, body, _ = self.legacy.create_token_response(
+ self.token_uri, body=body)
+ self.assertEqual(json.loads(body)['access_token'], 'abc')
+
+ def test_backend_application(self):
+ body = 'grant_type=client_credentials'
+ self.assertRaises(ValueError,
+ self.backend.create_token_response,
+ self.token_uri, body=body)
+
+ self.validator.authenticate_client.side_effect = self.set_user_from_credentials
+ _, body, _ = self.backend.create_token_response(
+ self.token_uri, body=body)
+ self.assertEqual(json.loads(body)['access_token'], 'abc')
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py
new file mode 100644
index 0000000000..338dbd91fa
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+from json import loads
+from unittest.mock import MagicMock
+
+from oauthlib.common import urlencode
+from oauthlib.oauth2 import RequestValidator, RevocationEndpoint
+
+from tests.unittest import TestCase
+
+
+class RevocationEndpointTest(TestCase):
+
+ def setUp(self):
+ self.validator = MagicMock(wraps=RequestValidator())
+ self.validator.client_authentication_required.return_value = True
+ self.validator.authenticate_client.return_value = True
+ self.validator.revoke_token.return_value = True
+ self.endpoint = RevocationEndpoint(self.validator)
+
+ self.uri = 'https://example.com/revoke_token'
+ self.headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ }
+ self.resp_h = {
+ 'Cache-Control': 'no-store',
+ 'Content-Type': 'application/json',
+ 'Pragma': 'no-cache'
+ }
+
+ def test_revoke_token(self):
+ for token_type in ('access_token', 'refresh_token', 'invalid'):
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', token_type)])
+ h, b, s = self.endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {})
+ self.assertEqual(b, '')
+ self.assertEqual(s, 200)
+
+ # don't specify token_type_hint
+ body = urlencode([('token', 'foo')])
+ h, b, s = self.endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {})
+ self.assertEqual(b, '')
+ self.assertEqual(s, 200)
+
+ def test_revoke_token_client_authentication_failed(self):
+ self.validator.authenticate_client.return_value = False
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'access_token')])
+ h, b, s = self.endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-store',
+ 'Pragma': 'no-cache',
+ "WWW-Authenticate": 'Bearer error="invalid_client"'
+ })
+ self.assertEqual(loads(b)['error'], 'invalid_client')
+ self.assertEqual(s, 401)
+
+ def test_revoke_token_public_client_authentication(self):
+ self.validator.client_authentication_required.return_value = False
+ self.validator.authenticate_client_id.return_value = True
+ for token_type in ('access_token', 'refresh_token', 'invalid'):
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', token_type)])
+ h, b, s = self.endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {})
+ self.assertEqual(b, '')
+ self.assertEqual(s, 200)
+
+ def test_revoke_token_public_client_authentication_failed(self):
+ self.validator.client_authentication_required.return_value = False
+ self.validator.authenticate_client_id.return_value = False
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'access_token')])
+ h, b, s = self.endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-store',
+ 'Pragma': 'no-cache',
+ "WWW-Authenticate": 'Bearer error="invalid_client"'
+ })
+ self.assertEqual(loads(b)['error'], 'invalid_client')
+ self.assertEqual(s, 401)
+
+ def test_revoke_with_callback(self):
+ endpoint = RevocationEndpoint(self.validator, enable_jsonp=True)
+ callback = 'package.hello_world'
+ for token_type in ('access_token', 'refresh_token', 'invalid'):
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', token_type),
+ ('callback', callback)])
+ h, b, s = endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, {})
+ self.assertEqual(b, callback + '();')
+ self.assertEqual(s, 200)
+
+ def test_revoke_unsupported_token(self):
+ endpoint = RevocationEndpoint(self.validator,
+ supported_token_types=['access_token'])
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'refresh_token')])
+ h, b, s = endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'unsupported_token_type')
+ self.assertEqual(s, 400)
+
+ h, b, s = endpoint.create_revocation_response(self.uri,
+ headers=self.headers, body='')
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'invalid_request')
+ self.assertEqual(s, 400)
+
+ def test_revoke_invalid_request_method(self):
+ endpoint = RevocationEndpoint(self.validator,
+ supported_token_types=['access_token'])
+ test_methods = ['GET', 'pUt', 'dEleTe', 'paTcH']
+ test_methods = test_methods + [x.lower() for x in test_methods] + [x.upper() for x in test_methods]
+ for method in test_methods:
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'refresh_token')])
+ h, b, s = endpoint.create_revocation_response(self.uri,
+ http_method = method, headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'invalid_request')
+ self.assertIn('Unsupported request method', loads(b)['error_description'])
+ self.assertEqual(s, 400)
+
+ def test_revoke_bad_post_request(self):
+ endpoint = RevocationEndpoint(self.validator,
+ supported_token_types=['access_token'])
+ for param in ['token', 'secret', 'code', 'foo']:
+ uri = 'http://some.endpoint?' + urlencode([(param, 'secret')])
+ body = urlencode([('token', 'foo'),
+ ('token_type_hint', 'access_token')])
+ h, b, s = endpoint.create_revocation_response(uri,
+ headers=self.headers, body=body)
+ self.assertEqual(h, self.resp_h)
+ self.assertEqual(loads(b)['error'], 'invalid_request')
+ self.assertIn('query parameters are not allowed', loads(b)['error_description'])
+ self.assertEqual(s, 400)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_scope_handling.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_scope_handling.py
new file mode 100644
index 0000000000..4c87d9c7c8
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_scope_handling.py
@@ -0,0 +1,193 @@
+"""Ensure scope is preserved across authorization.
+
+Fairly trivial in all grants except the Authorization Code Grant where scope
+need to be persisted temporarily in an authorization code.
+"""
+import json
+from unittest import mock
+
+from oauthlib.oauth2 import (
+ BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
+ RequestValidator, Server, WebApplicationServer,
+)
+
+from tests.unittest import TestCase
+
+from .test_utils import get_fragment_credentials, get_query_credentials
+
+
+class TestScopeHandling(TestCase):
+
+ DEFAULT_REDIRECT_URI = 'http://i.b./path'
+
+ def set_scopes(self, scopes):
+ def set_request_scopes(client_id, code, client, request):
+ request.scopes = scopes
+ return True
+ return set_request_scopes
+
+ def set_user(self, request):
+ request.user = 'foo'
+ request.client_id = 'bar'
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def setUp(self):
+ self.validator = mock.MagicMock(spec=RequestValidator)
+ self.validator.get_default_redirect_uri.return_value = TestScopeHandling.DEFAULT_REDIRECT_URI
+ self.validator.get_code_challenge.return_value = None
+ self.validator.authenticate_client.side_effect = self.set_client
+ self.server = Server(self.validator)
+ self.web = WebApplicationServer(self.validator)
+ self.mobile = MobileApplicationServer(self.validator)
+ self.legacy = LegacyApplicationServer(self.validator)
+ self.backend = BackendApplicationServer(self.validator)
+
+ def test_scope_extraction(self):
+ scopes = (
+ ('images', ['images']),
+ ('images+videos', ['images', 'videos']),
+ ('images+videos+openid', ['images', 'videos', 'openid']),
+ ('http%3A%2f%2fa.b%2fvideos', ['http://a.b/videos']),
+ ('http%3A%2f%2fa.b%2fvideos+pics', ['http://a.b/videos', 'pics']),
+ ('pics+http%3A%2f%2fa.b%2fvideos', ['pics', 'http://a.b/videos']),
+ ('http%3A%2f%2fa.b%2fvideos+https%3A%2f%2fc.d%2Fsecret', ['http://a.b/videos', 'https://c.d/secret']),
+ )
+
+ uri = 'http://example.com/path?client_id=abc&scope=%s&response_type=%s'
+ for scope, correct_scopes in scopes:
+ scopes, _ = self.web.validate_authorization_request(
+ uri % (scope, 'code'))
+ self.assertCountEqual(scopes, correct_scopes)
+ scopes, _ = self.mobile.validate_authorization_request(
+ uri % (scope, 'token'))
+ self.assertCountEqual(scopes, correct_scopes)
+ scopes, _ = self.server.validate_authorization_request(
+ uri % (scope, 'code'))
+ self.assertCountEqual(scopes, correct_scopes)
+
+ def test_scope_preservation(self):
+ scope = 'pics+http%3A%2f%2fa.b%2fvideos'
+ decoded_scope = 'pics http://a.b/videos'
+ auth_uri = 'http://example.com/path?client_id=abc&response_type='
+ token_uri = 'http://example.com/path'
+
+ # authorization grant
+ for backend_server_type in ['web', 'server']:
+ h, _, s = getattr(self, backend_server_type).create_authorization_response(
+ auth_uri + 'code', scopes=decoded_scope.split(' '))
+ self.validator.validate_code.side_effect = self.set_scopes(decoded_scope.split(' '))
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ code = get_query_credentials(h['Location'])['code'][0]
+ _, body, _ = getattr(self, backend_server_type).create_token_response(token_uri,
+ body='client_id=me&redirect_uri=http://back.to/me&grant_type=authorization_code&code=%s' % code)
+ self.assertEqual(json.loads(body)['scope'], decoded_scope)
+
+ # implicit grant
+ for backend_server_type in ['mobile', 'server']:
+ h, _, s = getattr(self, backend_server_type).create_authorization_response(
+ auth_uri + 'token', scopes=decoded_scope.split(' '))
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertEqual(get_fragment_credentials(h['Location'])['scope'][0], decoded_scope)
+
+ # resource owner password credentials grant
+ for backend_server_type in ['legacy', 'server']:
+ body = 'grant_type=password&username=abc&password=secret&scope=%s'
+
+ _, body, _ = getattr(self, backend_server_type).create_token_response(token_uri,
+ body=body % scope)
+ self.assertEqual(json.loads(body)['scope'], decoded_scope)
+
+ # client credentials grant
+ for backend_server_type in ['backend', 'server']:
+ body = 'grant_type=client_credentials&scope=%s'
+ self.validator.authenticate_client.side_effect = self.set_user
+ _, body, _ = getattr(self, backend_server_type).create_token_response(token_uri,
+ body=body % scope)
+ self.assertEqual(json.loads(body)['scope'], decoded_scope)
+
+ def test_scope_changed(self):
+ scope = 'pics+http%3A%2f%2fa.b%2fvideos'
+ scopes = ['images', 'http://a.b/videos']
+ decoded_scope = 'images http://a.b/videos'
+ auth_uri = 'http://example.com/path?client_id=abc&response_type='
+ token_uri = 'http://example.com/path'
+
+ # authorization grant
+ h, _, s = self.web.create_authorization_response(
+ auth_uri + 'code', scopes=scopes)
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ code = get_query_credentials(h['Location'])['code'][0]
+ self.validator.validate_code.side_effect = self.set_scopes(scopes)
+ _, body, _ = self.web.create_token_response(token_uri,
+ body='grant_type=authorization_code&code=%s' % code)
+ self.assertEqual(json.loads(body)['scope'], decoded_scope)
+
+ # implicit grant
+ self.validator.validate_scopes.side_effect = self.set_scopes(scopes)
+ h, _, s = self.mobile.create_authorization_response(
+ auth_uri + 'token', scopes=scopes)
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ self.assertEqual(get_fragment_credentials(h['Location'])['scope'][0], decoded_scope)
+
+ # resource owner password credentials grant
+ self.validator.validate_scopes.side_effect = self.set_scopes(scopes)
+ body = 'grant_type=password&username=abc&password=secret&scope=%s'
+ _, body, _ = self.legacy.create_token_response(token_uri,
+ body=body % scope)
+ self.assertEqual(json.loads(body)['scope'], decoded_scope)
+
+ # client credentials grant
+ self.validator.validate_scopes.side_effect = self.set_scopes(scopes)
+ self.validator.authenticate_client.side_effect = self.set_user
+ body = 'grant_type=client_credentials&scope=%s'
+ _, body, _ = self.backend.create_token_response(token_uri,
+ body=body % scope)
+
+ self.assertEqual(json.loads(body)['scope'], decoded_scope)
+
+ def test_invalid_scope(self):
+ scope = 'pics+http%3A%2f%2fa.b%2fvideos'
+ auth_uri = 'http://example.com/path?client_id=abc&response_type='
+ token_uri = 'http://example.com/path'
+
+ self.validator.validate_scopes.return_value = False
+
+ # authorization grant
+ h, _, s = self.web.create_authorization_response(
+ auth_uri + 'code', scopes=['invalid'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ error = get_query_credentials(h['Location'])['error'][0]
+ self.assertEqual(error, 'invalid_scope')
+
+ # implicit grant
+ h, _, s = self.mobile.create_authorization_response(
+ auth_uri + 'token', scopes=['invalid'])
+ self.assertEqual(s, 302)
+ self.assertIn('Location', h)
+ error = get_fragment_credentials(h['Location'])['error'][0]
+ self.assertEqual(error, 'invalid_scope')
+
+ # resource owner password credentials grant
+ body = 'grant_type=password&username=abc&password=secret&scope=%s'
+ _, body, _ = self.legacy.create_token_response(token_uri,
+ body=body % scope)
+ self.assertEqual(json.loads(body)['error'], 'invalid_scope')
+
+ # client credentials grant
+ self.validator.authenticate_client.side_effect = self.set_user
+ body = 'grant_type=client_credentials&scope=%s'
+ _, body, _ = self.backend.create_token_response(token_uri,
+ body=body % scope)
+ self.assertEqual(json.loads(body)['error'], 'invalid_scope')
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_utils.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_utils.py
new file mode 100644
index 0000000000..5eae1956f4
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/endpoints/test_utils.py
@@ -0,0 +1,11 @@
+import urllib.parse as urlparse
+
+
+def get_query_credentials(uri):
+ return urlparse.parse_qs(urlparse.urlparse(uri).query,
+ keep_blank_values=True)
+
+
+def get_fragment_credentials(uri):
+ return urlparse.parse_qs(urlparse.urlparse(uri).fragment,
+ keep_blank_values=True)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/__init__.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_authorization_code.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_authorization_code.py
new file mode 100644
index 0000000000..77e1a81b46
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_authorization_code.py
@@ -0,0 +1,382 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.oauth2.rfc6749.grant_types import (
+ AuthorizationCodeGrant, authorization_code,
+)
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+
+from tests.unittest import TestCase
+
+
+class AuthorizationCodeGrantTest(TestCase):
+
+ def setUp(self):
+ self.request = Request('http://a.b/path')
+ self.request.scopes = ('hello', 'world')
+ self.request.expires_in = 1800
+ self.request.client = 'batman'
+ self.request.client_id = 'abcdef'
+ self.request.code = '1234'
+ self.request.response_type = 'code'
+ self.request.grant_type = 'authorization_code'
+ self.request.redirect_uri = 'https://a.b/cb'
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.is_pkce_required.return_value = False
+ self.mock_validator.get_code_challenge.return_value = None
+ self.mock_validator.is_origin_allowed.return_value = False
+ self.mock_validator.authenticate_client.side_effect = self.set_client
+ self.auth = AuthorizationCodeGrant(request_validator=self.mock_validator)
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def setup_validators(self):
+ self.authval1, self.authval2 = mock.Mock(), mock.Mock()
+ self.authval1.return_value = {}
+ self.authval2.return_value = {}
+ self.tknval1, self.tknval2 = mock.Mock(), mock.Mock()
+ self.tknval1.return_value = None
+ self.tknval2.return_value = None
+ self.auth.custom_validators.pre_token.append(self.tknval1)
+ self.auth.custom_validators.post_token.append(self.tknval2)
+ self.auth.custom_validators.pre_auth.append(self.authval1)
+ self.auth.custom_validators.post_auth.append(self.authval2)
+
+ def test_custom_auth_validators(self):
+ self.setup_validators()
+
+ bearer = BearerToken(self.mock_validator)
+ self.auth.create_authorization_response(self.request, bearer)
+ self.assertTrue(self.authval1.called)
+ self.assertTrue(self.authval2.called)
+ self.assertFalse(self.tknval1.called)
+ self.assertFalse(self.tknval2.called)
+
+ def test_custom_token_validators(self):
+ self.setup_validators()
+
+ bearer = BearerToken(self.mock_validator)
+ self.auth.create_token_response(self.request, bearer)
+ self.assertTrue(self.tknval1.called)
+ self.assertTrue(self.tknval2.called)
+ self.assertFalse(self.authval1.called)
+ self.assertFalse(self.authval2.called)
+
+ def test_create_authorization_grant(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.response_mode = 'query'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ grant = dict(Request(h['Location']).uri_query_params)
+ self.assertIn('code', grant)
+ self.assertTrue(self.mock_validator.validate_redirect_uri.called)
+ self.assertTrue(self.mock_validator.validate_response_type.called)
+ self.assertTrue(self.mock_validator.validate_scopes.called)
+
+ def test_create_authorization_grant_no_scopes(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.response_mode = 'query'
+ self.request.scopes = []
+ self.auth.create_authorization_response(self.request, bearer)
+
+ def test_create_authorization_grant_state(self):
+ self.request.state = 'abc'
+ self.request.redirect_uri = None
+ self.request.response_mode = 'query'
+ self.mock_validator.get_default_redirect_uri.return_value = 'https://a.b/cb'
+ bearer = BearerToken(self.mock_validator)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ grant = dict(Request(h['Location']).uri_query_params)
+ self.assertIn('code', grant)
+ self.assertIn('state', grant)
+ self.assertFalse(self.mock_validator.validate_redirect_uri.called)
+ self.assertTrue(self.mock_validator.get_default_redirect_uri.called)
+ self.assertTrue(self.mock_validator.validate_response_type.called)
+ self.assertTrue(self.mock_validator.validate_scopes.called)
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_create_authorization_response(self, generate_token):
+ generate_token.return_value = 'abc'
+ bearer = BearerToken(self.mock_validator)
+ self.request.response_mode = 'query'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], 'https://a.b/cb?code=abc')
+ self.request.response_mode = 'fragment'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], 'https://a.b/cb#code=abc')
+
+ def test_create_token_response(self):
+ bearer = BearerToken(self.mock_validator)
+
+ h, token, s = self.auth.create_token_response(self.request, bearer)
+ token = json.loads(token)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('refresh_token', token)
+ self.assertIn('expires_in', token)
+ self.assertIn('scope', token)
+ self.assertTrue(self.mock_validator.client_authentication_required.called)
+ self.assertTrue(self.mock_validator.authenticate_client.called)
+ self.assertTrue(self.mock_validator.validate_code.called)
+ self.assertTrue(self.mock_validator.confirm_redirect_uri.called)
+ self.assertTrue(self.mock_validator.validate_grant_type.called)
+ self.assertTrue(self.mock_validator.invalidate_authorization_code.called)
+
+ def test_create_token_response_without_refresh_token(self):
+ self.auth.refresh_token = False # Not to issue refresh token.
+
+ bearer = BearerToken(self.mock_validator)
+ h, token, s = self.auth.create_token_response(self.request, bearer)
+ token = json.loads(token)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertNotIn('refresh_token', token)
+ self.assertIn('expires_in', token)
+ self.assertIn('scope', token)
+ self.assertTrue(self.mock_validator.client_authentication_required.called)
+ self.assertTrue(self.mock_validator.authenticate_client.called)
+ self.assertTrue(self.mock_validator.validate_code.called)
+ self.assertTrue(self.mock_validator.confirm_redirect_uri.called)
+ self.assertTrue(self.mock_validator.validate_grant_type.called)
+ self.assertTrue(self.mock_validator.invalidate_authorization_code.called)
+
+ def test_invalid_request(self):
+ del self.request.code
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
+ self.request)
+
+ def test_invalid_request_duplicates(self):
+ request = mock.MagicMock(wraps=self.request)
+ request.grant_type = 'authorization_code'
+ request.duplicate_params = ['client_id']
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
+ request)
+
+ def test_authentication_required(self):
+ """
+ ensure client_authentication_required() is properly called
+ """
+ self.auth.validate_token_request(self.request)
+ self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
+
+ def test_authenticate_client(self):
+ self.mock_validator.authenticate_client.side_effect = None
+ self.mock_validator.authenticate_client.return_value = False
+ self.assertRaises(errors.InvalidClientError, self.auth.validate_token_request,
+ self.request)
+
+ def test_client_id_missing(self):
+ self.mock_validator.authenticate_client.side_effect = None
+ request = mock.MagicMock(wraps=self.request)
+ request.grant_type = 'authorization_code'
+ del request.client.client_id
+ self.assertRaises(NotImplementedError, self.auth.validate_token_request,
+ request)
+
+ def test_invalid_grant(self):
+ self.request.client = 'batman'
+ self.mock_validator.authenticate_client = self.set_client
+ self.mock_validator.validate_code.return_value = False
+ self.assertRaises(errors.InvalidGrantError,
+ self.auth.validate_token_request, self.request)
+
+ def test_invalid_grant_type(self):
+ self.request.grant_type = 'foo'
+ self.assertRaises(errors.UnsupportedGrantTypeError,
+ self.auth.validate_token_request, self.request)
+
+ def test_authenticate_client_id(self):
+ self.mock_validator.client_authentication_required.return_value = False
+ self.mock_validator.authenticate_client_id.return_value = False
+ self.request.state = 'abc'
+ self.assertRaises(errors.InvalidClientError,
+ self.auth.validate_token_request, self.request)
+
+ def test_invalid_redirect_uri(self):
+ self.mock_validator.confirm_redirect_uri.return_value = False
+ self.assertRaises(errors.MismatchingRedirectURIError,
+ self.auth.validate_token_request, self.request)
+
+ # PKCE validate_authorization_request
+ def test_pkce_challenge_missing(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.assertRaises(errors.MissingCodeChallengeError,
+ self.auth.validate_authorization_request, self.request)
+
+ def test_pkce_default_method(self):
+ for required in [True, False]:
+ self.mock_validator.is_pkce_required.return_value = required
+ self.request.code_challenge = "present"
+ _, ri = self.auth.validate_authorization_request(self.request)
+ self.assertIn("code_challenge", ri)
+ self.assertIn("code_challenge_method", ri)
+ self.assertEqual(ri["code_challenge"], "present")
+ self.assertEqual(ri["code_challenge_method"], "plain")
+
+ def test_pkce_wrong_method(self):
+ for required in [True, False]:
+ self.mock_validator.is_pkce_required.return_value = required
+ self.request.code_challenge = "present"
+ self.request.code_challenge_method = "foobar"
+ self.assertRaises(errors.UnsupportedCodeChallengeMethodError,
+ self.auth.validate_authorization_request, self.request)
+
+ # PKCE validate_token_request
+ def test_pkce_verifier_missing(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.assertRaises(errors.MissingCodeVerifierError,
+ self.auth.validate_token_request, self.request)
+
+ # PKCE validate_token_request
+ def test_pkce_required_verifier_missing_challenge_missing(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.request.code_verifier = None
+ self.mock_validator.get_code_challenge.return_value = None
+ self.assertRaises(errors.MissingCodeVerifierError,
+ self.auth.validate_token_request, self.request)
+
+ def test_pkce_required_verifier_missing_challenge_valid(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.request.code_verifier = None
+ self.mock_validator.get_code_challenge.return_value = "foo"
+ self.assertRaises(errors.MissingCodeVerifierError,
+ self.auth.validate_token_request, self.request)
+
+ def test_pkce_required_verifier_valid_challenge_missing(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.request.code_verifier = "foobar"
+ self.mock_validator.get_code_challenge.return_value = None
+ self.assertRaises(errors.InvalidGrantError,
+ self.auth.validate_token_request, self.request)
+
+ def test_pkce_required_verifier_valid_challenge_valid_method_valid(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.request.code_verifier = "foobar"
+ self.mock_validator.get_code_challenge.return_value = "foobar"
+ self.mock_validator.get_code_challenge_method.return_value = "plain"
+ self.auth.validate_token_request(self.request)
+
+ def test_pkce_required_verifier_invalid_challenge_valid_method_valid(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.request.code_verifier = "foobar"
+ self.mock_validator.get_code_challenge.return_value = "raboof"
+ self.mock_validator.get_code_challenge_method.return_value = "plain"
+ self.assertRaises(errors.InvalidGrantError,
+ self.auth.validate_token_request, self.request)
+
+ def test_pkce_required_verifier_valid_challenge_valid_method_wrong(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.request.code_verifier = "present"
+ self.mock_validator.get_code_challenge.return_value = "foobar"
+ self.mock_validator.get_code_challenge_method.return_value = "cryptic_method"
+ self.assertRaises(errors.ServerError,
+ self.auth.validate_token_request, self.request)
+
+ def test_pkce_verifier_valid_challenge_valid_method_missing(self):
+ self.mock_validator.is_pkce_required.return_value = True
+ self.request.code_verifier = "present"
+ self.mock_validator.get_code_challenge.return_value = "foobar"
+ self.mock_validator.get_code_challenge_method.return_value = None
+ self.assertRaises(errors.InvalidGrantError,
+ self.auth.validate_token_request, self.request)
+
+ def test_pkce_optional_verifier_valid_challenge_missing(self):
+ self.mock_validator.is_pkce_required.return_value = False
+ self.request.code_verifier = "present"
+ self.mock_validator.get_code_challenge.return_value = None
+ self.auth.validate_token_request(self.request)
+
+ def test_pkce_optional_verifier_missing_challenge_valid(self):
+ self.mock_validator.is_pkce_required.return_value = False
+ self.request.code_verifier = None
+ self.mock_validator.get_code_challenge.return_value = "foobar"
+ self.assertRaises(errors.MissingCodeVerifierError,
+ self.auth.validate_token_request, self.request)
+
+ # PKCE functions
+ def test_wrong_code_challenge_method_plain(self):
+ self.assertFalse(authorization_code.code_challenge_method_plain("foo", "bar"))
+
+ def test_correct_code_challenge_method_plain(self):
+ self.assertTrue(authorization_code.code_challenge_method_plain("foo", "foo"))
+
+ def test_wrong_code_challenge_method_s256(self):
+ self.assertFalse(authorization_code.code_challenge_method_s256("foo", "bar"))
+
+ def test_correct_code_challenge_method_s256(self):
+ # "abcd" as verifier gives a '+' to base64
+ self.assertTrue(
+ authorization_code.code_challenge_method_s256("abcd",
+ "iNQmb9TmM40TuEX88olXnSCciXgjuSF9o-Fhk28DFYk")
+ )
+ # "/" as verifier gives a '/' and '+' to base64
+ self.assertTrue(
+ authorization_code.code_challenge_method_s256("/",
+ "il7asoJjJEMhngUeSt4tHVu8Zxx4EFG_FDeJfL3-oPE")
+ )
+ # Example from PKCE RFCE
+ self.assertTrue(
+ authorization_code.code_challenge_method_s256("dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk",
+ "E9Melhoa2OwvFrEMTJguCHaoeK1t8URWbuGJSstw-cM")
+ )
+
+ def test_code_modifier_called(self):
+ bearer = BearerToken(self.mock_validator)
+ code_modifier = mock.MagicMock(wraps=lambda grant, *a: grant)
+ self.auth.register_code_modifier(code_modifier)
+ self.auth.create_authorization_response(self.request, bearer)
+ code_modifier.assert_called_once()
+
+ def test_hybrid_token_save(self):
+ bearer = BearerToken(self.mock_validator)
+ self.auth.register_code_modifier(
+ lambda grant, *a: dict(list(grant.items()) + [('access_token', 1)])
+ )
+ self.auth.create_authorization_response(self.request, bearer)
+ self.mock_validator.save_token.assert_called_once()
+
+ # CORS
+
+ def test_create_cors_headers(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.headers['origin'] = 'https://foo.bar'
+ self.mock_validator.is_origin_allowed.return_value = True
+
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertEqual(
+ headers['Access-Control-Allow-Origin'], 'https://foo.bar'
+ )
+ self.mock_validator.is_origin_allowed.assert_called_once_with(
+ 'abcdef', 'https://foo.bar', self.request
+ )
+
+ def test_create_cors_headers_no_origin(self):
+ bearer = BearerToken(self.mock_validator)
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertNotIn('Access-Control-Allow-Origin', headers)
+ self.mock_validator.is_origin_allowed.assert_not_called()
+
+ def test_create_cors_headers_insecure_origin(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.headers['origin'] = 'http://foo.bar'
+
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertNotIn('Access-Control-Allow-Origin', headers)
+ self.mock_validator.is_origin_allowed.assert_not_called()
+
+ def test_create_cors_headers_invalid_origin(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.headers['origin'] = 'https://foo.bar'
+ self.mock_validator.is_origin_allowed.return_value = False
+
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertNotIn('Access-Control-Allow-Origin', headers)
+ self.mock_validator.is_origin_allowed.assert_called_once_with(
+ 'abcdef', 'https://foo.bar', self.request
+ )
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_client_credentials.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_client_credentials.py
new file mode 100644
index 0000000000..e9559c7931
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_client_credentials.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749.grant_types import ClientCredentialsGrant
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+
+from tests.unittest import TestCase
+
+
+class ClientCredentialsGrantTest(TestCase):
+
+ def setUp(self):
+ mock_client = mock.MagicMock()
+ mock_client.user.return_value = 'mocked user'
+ self.request = Request('http://a.b/path')
+ self.request.grant_type = 'client_credentials'
+ self.request.client = mock_client
+ self.request.scopes = ('mocked', 'scopes')
+ self.mock_validator = mock.MagicMock()
+ self.auth = ClientCredentialsGrant(
+ request_validator=self.mock_validator)
+
+ def test_custom_auth_validators_unsupported(self):
+ authval1, authval2 = mock.Mock(), mock.Mock()
+ expected = ('ClientCredentialsGrant does not support authorization '
+ 'validators. Use token validators instead.')
+ with self.assertRaises(ValueError) as caught:
+ ClientCredentialsGrant(self.mock_validator, pre_auth=[authval1])
+ self.assertEqual(caught.exception.args[0], expected)
+ with self.assertRaises(ValueError) as caught:
+ ClientCredentialsGrant(self.mock_validator, post_auth=[authval2])
+ self.assertEqual(caught.exception.args[0], expected)
+ with self.assertRaises(AttributeError):
+ self.auth.custom_validators.pre_auth.append(authval1)
+ with self.assertRaises(AttributeError):
+ self.auth.custom_validators.pre_auth.append(authval2)
+
+ def test_custom_token_validators(self):
+ tknval1, tknval2 = mock.Mock(), mock.Mock()
+ self.auth.custom_validators.pre_token.append(tknval1)
+ self.auth.custom_validators.post_token.append(tknval2)
+
+ bearer = BearerToken(self.mock_validator)
+ self.auth.create_token_response(self.request, bearer)
+ self.assertTrue(tknval1.called)
+ self.assertTrue(tknval2.called)
+
+ def test_create_token_response(self):
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertIn('Content-Type', headers)
+ self.assertEqual(headers['Content-Type'], 'application/json')
+
+ def test_error_response(self):
+ bearer = BearerToken(self.mock_validator)
+ self.mock_validator.authenticate_client.return_value = False
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+ error_msg = json.loads(body)
+ self.assertIn('error', error_msg)
+ self.assertEqual(error_msg['error'], 'invalid_client')
+ self.assertIn('Content-Type', headers)
+ self.assertEqual(headers['Content-Type'], 'application/json')
+
+ def test_validate_token_response(self):
+ # wrong grant type, scope
+ pass
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_implicit.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_implicit.py
new file mode 100644
index 0000000000..1fb71a1dc9
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_implicit.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749.grant_types import ImplicitGrant
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+
+from tests.unittest import TestCase
+
+
+class ImplicitGrantTest(TestCase):
+
+ def setUp(self):
+ mock_client = mock.MagicMock()
+ mock_client.user.return_value = 'mocked user'
+ self.request = Request('http://a.b/path')
+ self.request.scopes = ('hello', 'world')
+ self.request.client = mock_client
+ self.request.client_id = 'abcdef'
+ self.request.response_type = 'token'
+ self.request.state = 'xyz'
+ self.request.redirect_uri = 'https://b.c/p'
+
+ self.mock_validator = mock.MagicMock()
+ self.auth = ImplicitGrant(request_validator=self.mock_validator)
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_create_token_response(self, generate_token):
+ generate_token.return_value = '1234'
+ bearer = BearerToken(self.mock_validator, expires_in=1800)
+ h, b, s = self.auth.create_token_response(self.request, bearer)
+ correct_uri = 'https://b.c/p#access_token=1234&token_type=Bearer&expires_in=1800&state=xyz&scope=hello+world'
+ self.assertEqual(s, 302)
+ self.assertURLEqual(h['Location'], correct_uri, parse_fragment=True)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+
+ correct_uri = 'https://b.c/p?access_token=1234&token_type=Bearer&expires_in=1800&state=xyz&scope=hello+world'
+ self.request.response_mode = 'query'
+ h, b, s = self.auth.create_token_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], correct_uri)
+
+ def test_custom_validators(self):
+ self.authval1, self.authval2 = mock.Mock(), mock.Mock()
+ self.tknval1, self.tknval2 = mock.Mock(), mock.Mock()
+ for val in (self.authval1, self.authval2):
+ val.return_value = {}
+ for val in (self.tknval1, self.tknval2):
+ val.return_value = None
+ self.auth.custom_validators.pre_token.append(self.tknval1)
+ self.auth.custom_validators.post_token.append(self.tknval2)
+ self.auth.custom_validators.pre_auth.append(self.authval1)
+ self.auth.custom_validators.post_auth.append(self.authval2)
+
+ bearer = BearerToken(self.mock_validator)
+ self.auth.create_token_response(self.request, bearer)
+ self.assertTrue(self.tknval1.called)
+ self.assertTrue(self.tknval2.called)
+ self.assertTrue(self.authval1.called)
+ self.assertTrue(self.authval2.called)
+
+ def test_error_response(self):
+ pass
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_refresh_token.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_refresh_token.py
new file mode 100644
index 0000000000..581f2a4d6a
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_refresh_token.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.oauth2.rfc6749.grant_types import RefreshTokenGrant
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+
+from tests.unittest import TestCase
+
+
+class RefreshTokenGrantTest(TestCase):
+
+ def setUp(self):
+ mock_client = mock.MagicMock()
+ mock_client.user.return_value = 'mocked user'
+ self.request = Request('http://a.b/path')
+ self.request.grant_type = 'refresh_token'
+ self.request.refresh_token = 'lsdkfhj230'
+ self.request.client_id = 'abcdef'
+ self.request.client = mock_client
+ self.request.scope = 'foo'
+ self.mock_validator = mock.MagicMock()
+ self.auth = RefreshTokenGrant(
+ request_validator=self.mock_validator)
+
+ def test_create_token_response(self):
+ self.mock_validator.get_original_scopes.return_value = ['foo', 'bar']
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertEqual(token['scope'], 'foo')
+
+ def test_custom_auth_validators_unsupported(self):
+ authval1, authval2 = mock.Mock(), mock.Mock()
+ expected = ('RefreshTokenGrant does not support authorization '
+ 'validators. Use token validators instead.')
+ with self.assertRaises(ValueError) as caught:
+ RefreshTokenGrant(self.mock_validator, pre_auth=[authval1])
+ self.assertEqual(caught.exception.args[0], expected)
+ with self.assertRaises(ValueError) as caught:
+ RefreshTokenGrant(self.mock_validator, post_auth=[authval2])
+ self.assertEqual(caught.exception.args[0], expected)
+ with self.assertRaises(AttributeError):
+ self.auth.custom_validators.pre_auth.append(authval1)
+ with self.assertRaises(AttributeError):
+ self.auth.custom_validators.pre_auth.append(authval2)
+
+ def test_custom_token_validators(self):
+ tknval1, tknval2 = mock.Mock(), mock.Mock()
+ self.auth.custom_validators.pre_token.append(tknval1)
+ self.auth.custom_validators.post_token.append(tknval2)
+
+ bearer = BearerToken(self.mock_validator)
+ self.auth.create_token_response(self.request, bearer)
+ self.assertTrue(tknval1.called)
+ self.assertTrue(tknval2.called)
+
+ def test_create_token_inherit_scope(self):
+ self.request.scope = None
+ self.mock_validator.get_original_scopes.return_value = ['foo', 'bar']
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertEqual(token['scope'], 'foo bar')
+
+ def test_create_token_within_original_scope(self):
+ self.mock_validator.get_original_scopes.return_value = ['baz']
+ self.mock_validator.is_within_original_scope.return_value = True
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertEqual(token['scope'], 'foo')
+
+ def test_invalid_scope(self):
+ self.mock_validator.get_original_scopes.return_value = ['baz']
+ self.mock_validator.is_within_original_scope.return_value = False
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+ self.assertEqual(token['error'], 'invalid_scope')
+ self.assertEqual(status_code, 400)
+
+ def test_invalid_token(self):
+ self.mock_validator.validate_refresh_token.return_value = False
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+ self.assertEqual(token['error'], 'invalid_grant')
+ self.assertEqual(status_code, 400)
+
+ def test_invalid_client(self):
+ self.mock_validator.authenticate_client.return_value = False
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+ self.assertEqual(token['error'], 'invalid_client')
+ self.assertEqual(status_code, 401)
+
+ def test_authentication_required(self):
+ """
+ ensure client_authentication_required() is properly called
+ """
+ self.mock_validator.authenticate_client.return_value = False
+ self.mock_validator.authenticate_client_id.return_value = False
+ self.request.code = 'waffles'
+ self.assertRaises(errors.InvalidClientError, self.auth.validate_token_request,
+ self.request)
+ self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
+
+ def test_invalid_grant_type(self):
+ self.request.grant_type = 'wrong_type'
+ self.assertRaises(errors.UnsupportedGrantTypeError,
+ self.auth.validate_token_request, self.request)
+
+ def test_authenticate_client_id(self):
+ self.mock_validator.client_authentication_required.return_value = False
+ self.request.refresh_token = mock.MagicMock()
+ self.mock_validator.authenticate_client_id.return_value = False
+ self.assertRaises(errors.InvalidClientError,
+ self.auth.validate_token_request, self.request)
+
+ def test_invalid_refresh_token(self):
+ # invalid refresh token
+ self.mock_validator.authenticate_client_id.return_value = True
+ self.mock_validator.validate_refresh_token.return_value = False
+ self.assertRaises(errors.InvalidGrantError,
+ self.auth.validate_token_request, self.request)
+ # no token provided
+ del self.request.refresh_token
+ self.assertRaises(errors.InvalidRequestError,
+ self.auth.validate_token_request, self.request)
+
+ def test_invalid_scope_original_scopes_empty(self):
+ self.mock_validator.validate_refresh_token.return_value = True
+ self.mock_validator.is_within_original_scope.return_value = False
+ self.assertRaises(errors.InvalidScopeError,
+ self.auth.validate_token_request, self.request)
+
+ def test_valid_token_request(self):
+ self.request.scope = 'foo bar'
+ self.mock_validator.get_original_scopes = mock.Mock()
+ self.mock_validator.get_original_scopes.return_value = 'foo bar baz'
+ self.auth.validate_token_request(self.request)
+ self.assertEqual(self.request.scopes, self.request.scope.split())
+ # all ok but without request.scope
+ del self.request.scope
+ self.auth.validate_token_request(self.request)
+ self.assertEqual(self.request.scopes, 'foo bar baz'.split())
+
+ # CORS
+
+ def test_create_cors_headers(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.headers['origin'] = 'https://foo.bar'
+ self.mock_validator.is_origin_allowed.return_value = True
+
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertEqual(
+ headers['Access-Control-Allow-Origin'], 'https://foo.bar'
+ )
+ self.mock_validator.is_origin_allowed.assert_called_once_with(
+ 'abcdef', 'https://foo.bar', self.request
+ )
+
+ def test_create_cors_headers_no_origin(self):
+ bearer = BearerToken(self.mock_validator)
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertNotIn('Access-Control-Allow-Origin', headers)
+ self.mock_validator.is_origin_allowed.assert_not_called()
+
+ def test_create_cors_headers_insecure_origin(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.headers['origin'] = 'http://foo.bar'
+
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertNotIn('Access-Control-Allow-Origin', headers)
+ self.mock_validator.is_origin_allowed.assert_not_called()
+
+ def test_create_cors_headers_invalid_origin(self):
+ bearer = BearerToken(self.mock_validator)
+ self.request.headers['origin'] = 'https://foo.bar'
+ self.mock_validator.is_origin_allowed.return_value = False
+
+ headers = self.auth.create_token_response(self.request, bearer)[0]
+ self.assertNotIn('Access-Control-Allow-Origin', headers)
+ self.mock_validator.is_origin_allowed.assert_called_once_with(
+ 'abcdef', 'https://foo.bar', self.request
+ )
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_resource_owner_password.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_resource_owner_password.py
new file mode 100644
index 0000000000..294e27be35
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/grant_types/test_resource_owner_password.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.oauth2.rfc6749.grant_types import (
+ ResourceOwnerPasswordCredentialsGrant,
+)
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+
+from tests.unittest import TestCase
+
+
+class ResourceOwnerPasswordCredentialsGrantTest(TestCase):
+
+ def setUp(self):
+ mock_client = mock.MagicMock()
+ mock_client.user.return_value = 'mocked user'
+ self.request = Request('http://a.b/path')
+ self.request.grant_type = 'password'
+ self.request.username = 'john'
+ self.request.password = 'doe'
+ self.request.client = mock_client
+ self.request.scopes = ('mocked', 'scopes')
+ self.mock_validator = mock.MagicMock()
+ self.auth = ResourceOwnerPasswordCredentialsGrant(
+ request_validator=self.mock_validator)
+
+ def set_client(self, request, *args, **kwargs):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def test_create_token_response(self):
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertIn('refresh_token', token)
+ # ensure client_authentication_required() is properly called
+ self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
+ # fail client authentication
+ self.mock_validator.reset_mock()
+ self.mock_validator.validate_user.return_value = True
+ self.mock_validator.authenticate_client.return_value = False
+ status_code = self.auth.create_token_response(self.request, bearer)[2]
+ self.assertEqual(status_code, 401)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+
+ # mock client_authentication_required() returning False then fail
+ self.mock_validator.reset_mock()
+ self.mock_validator.client_authentication_required.return_value = False
+ self.mock_validator.authenticate_client_id.return_value = False
+ status_code = self.auth.create_token_response(self.request, bearer)[2]
+ self.assertEqual(status_code, 401)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+
+ def test_create_token_response_without_refresh_token(self):
+ # self.auth.refresh_token = False so we don't generate a refresh token
+ self.auth = ResourceOwnerPasswordCredentialsGrant(
+ request_validator=self.mock_validator, refresh_token=False)
+ bearer = BearerToken(self.mock_validator)
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer)
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ # ensure no refresh token is generated
+ self.assertNotIn('refresh_token', token)
+ # ensure client_authentication_required() is properly called
+ self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
+ # fail client authentication
+ self.mock_validator.reset_mock()
+ self.mock_validator.validate_user.return_value = True
+ self.mock_validator.authenticate_client.return_value = False
+ status_code = self.auth.create_token_response(self.request, bearer)[2]
+ self.assertEqual(status_code, 401)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+ # mock client_authentication_required() returning False then fail
+ self.mock_validator.reset_mock()
+ self.mock_validator.client_authentication_required.return_value = False
+ self.mock_validator.authenticate_client_id.return_value = False
+ status_code = self.auth.create_token_response(self.request, bearer)[2]
+ self.assertEqual(status_code, 401)
+ self.assertEqual(self.mock_validator.save_token.call_count, 0)
+
+ def test_custom_auth_validators_unsupported(self):
+ authval1, authval2 = mock.Mock(), mock.Mock()
+ expected = ('ResourceOwnerPasswordCredentialsGrant does not '
+ 'support authorization validators. Use token '
+ 'validators instead.')
+ with self.assertRaises(ValueError) as caught:
+ ResourceOwnerPasswordCredentialsGrant(self.mock_validator,
+ pre_auth=[authval1])
+ self.assertEqual(caught.exception.args[0], expected)
+ with self.assertRaises(ValueError) as caught:
+ ResourceOwnerPasswordCredentialsGrant(self.mock_validator,
+ post_auth=[authval2])
+ self.assertEqual(caught.exception.args[0], expected)
+ with self.assertRaises(AttributeError):
+ self.auth.custom_validators.pre_auth.append(authval1)
+ with self.assertRaises(AttributeError):
+ self.auth.custom_validators.pre_auth.append(authval2)
+
+ def test_custom_token_validators(self):
+ tknval1, tknval2 = mock.Mock(), mock.Mock()
+ self.auth.custom_validators.pre_token.append(tknval1)
+ self.auth.custom_validators.post_token.append(tknval2)
+
+ bearer = BearerToken(self.mock_validator)
+ self.auth.create_token_response(self.request, bearer)
+ self.assertTrue(tknval1.called)
+ self.assertTrue(tknval2.called)
+
+ def test_error_response(self):
+ pass
+
+ def test_scopes(self):
+ pass
+
+ def test_invalid_request_missing_params(self):
+ del self.request.grant_type
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
+ self.request)
+
+ def test_invalid_request_duplicates(self):
+ request = mock.MagicMock(wraps=self.request)
+ request.duplicate_params = ['scope']
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
+ request)
+
+ def test_invalid_grant_type(self):
+ self.request.grant_type = 'foo'
+ self.assertRaises(errors.UnsupportedGrantTypeError,
+ self.auth.validate_token_request, self.request)
+
+ def test_invalid_user(self):
+ self.mock_validator.validate_user.return_value = False
+ self.assertRaises(errors.InvalidGrantError, self.auth.validate_token_request,
+ self.request)
+
+ def test_client_id_missing(self):
+ del self.request.client.client_id
+ self.assertRaises(NotImplementedError, self.auth.validate_token_request,
+ self.request)
+
+ def test_valid_token_request(self):
+ self.mock_validator.validate_grant_type.return_value = True
+ self.auth.validate_token_request(self.request)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/test_parameters.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_parameters.py
new file mode 100644
index 0000000000..cd8c9e952b
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_parameters.py
@@ -0,0 +1,304 @@
+from unittest.mock import patch
+
+from oauthlib import signals
+from oauthlib.oauth2.rfc6749.errors import *
+from oauthlib.oauth2.rfc6749.parameters import *
+
+from tests.unittest import TestCase
+
+
+@patch('time.time', new=lambda: 1000)
+class ParameterTests(TestCase):
+
+ state = 'xyz'
+ auth_base = {
+ 'uri': 'https://server.example.com/authorize',
+ 'client_id': 's6BhdRkqt3',
+ 'redirect_uri': 'https://client.example.com/cb',
+ 'state': state,
+ 'scope': 'photos'
+ }
+ list_scope = ['list', 'of', 'scopes']
+
+ auth_grant = {'response_type': 'code'}
+ auth_grant_pkce = {'response_type': 'code', 'code_challenge': "code_challenge",
+ 'code_challenge_method': 'code_challenge_method'}
+ auth_grant_list_scope = {}
+ auth_implicit = {'response_type': 'token', 'extra': 'extra'}
+ auth_implicit_list_scope = {}
+
+ def setUp(self):
+ self.auth_grant.update(self.auth_base)
+ self.auth_grant_pkce.update(self.auth_base)
+ self.auth_implicit.update(self.auth_base)
+ self.auth_grant_list_scope.update(self.auth_grant)
+ self.auth_grant_list_scope['scope'] = self.list_scope
+ self.auth_implicit_list_scope.update(self.auth_implicit)
+ self.auth_implicit_list_scope['scope'] = self.list_scope
+
+ auth_base_uri = ('https://server.example.com/authorize?response_type={0}'
+ '&client_id=s6BhdRkqt3&redirect_uri=https%3A%2F%2F'
+ 'client.example.com%2Fcb&scope={1}&state={2}{3}')
+
+ auth_base_uri_pkce = ('https://server.example.com/authorize?response_type={0}'
+ '&client_id=s6BhdRkqt3&redirect_uri=https%3A%2F%2F'
+ 'client.example.com%2Fcb&scope={1}&state={2}{3}&code_challenge={4}'
+ '&code_challenge_method={5}')
+
+ auth_grant_uri = auth_base_uri.format('code', 'photos', state, '')
+ auth_grant_uri_pkce = auth_base_uri_pkce.format('code', 'photos', state, '', 'code_challenge',
+ 'code_challenge_method')
+ auth_grant_uri_list_scope = auth_base_uri.format('code', 'list+of+scopes', state, '')
+ auth_implicit_uri = auth_base_uri.format('token', 'photos', state, '&extra=extra')
+ auth_implicit_uri_list_scope = auth_base_uri.format('token', 'list+of+scopes', state, '&extra=extra')
+
+ grant_body = {
+ 'grant_type': 'authorization_code',
+ 'code': 'SplxlOBeZQQYbYS6WxSbIA',
+ 'redirect_uri': 'https://client.example.com/cb'
+ }
+ grant_body_pkce = {
+ 'grant_type': 'authorization_code',
+ 'code': 'SplxlOBeZQQYbYS6WxSbIA',
+ 'redirect_uri': 'https://client.example.com/cb',
+ 'code_verifier': 'code_verifier'
+ }
+ grant_body_scope = {'scope': 'photos'}
+ grant_body_list_scope = {'scope': list_scope}
+ auth_grant_body = ('grant_type=authorization_code&'
+ 'code=SplxlOBeZQQYbYS6WxSbIA&'
+ 'redirect_uri=https%3A%2F%2Fclient.example.com%2Fcb')
+ auth_grant_body_pkce = ('grant_type=authorization_code&'
+ 'code=SplxlOBeZQQYbYS6WxSbIA&'
+ 'redirect_uri=https%3A%2F%2Fclient.example.com%2Fcb'
+ '&code_verifier=code_verifier')
+ auth_grant_body_scope = auth_grant_body + '&scope=photos'
+ auth_grant_body_list_scope = auth_grant_body + '&scope=list+of+scopes'
+
+ pwd_body = {
+ 'grant_type': 'password',
+ 'username': 'johndoe',
+ 'password': 'A3ddj3w'
+ }
+ password_body = 'grant_type=password&username=johndoe&password=A3ddj3w'
+
+ cred_grant = {'grant_type': 'client_credentials'}
+ cred_body = 'grant_type=client_credentials'
+
+ grant_response = 'https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA&state=xyz'
+ grant_dict = {'code': 'SplxlOBeZQQYbYS6WxSbIA', 'state': state}
+
+ error_nocode = 'https://client.example.com/cb?state=xyz'
+ error_nostate = 'https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA'
+ error_wrongstate = 'https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA&state=abc'
+ error_denied = 'https://client.example.com/cb?error=access_denied&state=xyz'
+ error_invalid = 'https://client.example.com/cb?error=invalid_request&state=xyz'
+
+ implicit_base = 'https://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA&scope=abc&'
+ implicit_response = implicit_base + 'state={}&token_type=example&expires_in=3600'.format(state)
+ implicit_notype = implicit_base + 'state={}&expires_in=3600'.format(state)
+ implicit_wrongstate = implicit_base + 'state={}&token_type=exampleexpires_in=3600'.format('invalid')
+ implicit_nostate = implicit_base + 'token_type=example&expires_in=3600'
+ implicit_notoken = 'https://example.com/cb#state=xyz&token_type=example&expires_in=3600'
+
+ implicit_dict = {
+ 'access_token': '2YotnFZFEjr1zCsicMWpAA',
+ 'state': state,
+ 'token_type': 'example',
+ 'expires_in': 3600,
+ 'expires_at': 4600,
+ 'scope': ['abc']
+ }
+
+ json_response = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type": "example",'
+ ' "expires_in": 3600,'
+ ' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter": "example_value",'
+ ' "scope":"abc def"}')
+ json_response_noscope = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type": "example",'
+ ' "expires_in": 3600,'
+ ' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter": "example_value" }')
+ json_response_noexpire = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type": "example",'
+ ' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter": "example_value"}')
+ json_response_expirenull = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
+ ' "token_type": "example",'
+ ' "expires_in": null,'
+ ' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter": "example_value"}')
+
+ json_custom_error = '{ "error": "incorrect_client_credentials" }'
+ json_error = '{ "error": "access_denied" }'
+
+ json_notoken = ('{ "token_type": "example",'
+ ' "expires_in": 3600,'
+ ' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter": "example_value" }')
+
+ json_notype = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
+ ' "expires_in": 3600,'
+ ' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
+ ' "example_parameter": "example_value" }')
+
+ json_dict = {
+ 'access_token': '2YotnFZFEjr1zCsicMWpAA',
+ 'token_type': 'example',
+ 'expires_in': 3600,
+ 'expires_at': 4600,
+ 'refresh_token': 'tGzv3JOkF0XG5Qx2TlKWIA',
+ 'example_parameter': 'example_value',
+ 'scope': ['abc', 'def']
+ }
+
+ json_noscope_dict = {
+ 'access_token': '2YotnFZFEjr1zCsicMWpAA',
+ 'token_type': 'example',
+ 'expires_in': 3600,
+ 'expires_at': 4600,
+ 'refresh_token': 'tGzv3JOkF0XG5Qx2TlKWIA',
+ 'example_parameter': 'example_value'
+ }
+
+ json_noexpire_dict = {
+ 'access_token': '2YotnFZFEjr1zCsicMWpAA',
+ 'token_type': 'example',
+ 'refresh_token': 'tGzv3JOkF0XG5Qx2TlKWIA',
+ 'example_parameter': 'example_value'
+ }
+
+ json_notype_dict = {
+ 'access_token': '2YotnFZFEjr1zCsicMWpAA',
+ 'expires_in': 3600,
+ 'expires_at': 4600,
+ 'refresh_token': 'tGzv3JOkF0XG5Qx2TlKWIA',
+ 'example_parameter': 'example_value',
+ }
+
+ url_encoded_response = ('access_token=2YotnFZFEjr1zCsicMWpAA'
+ '&token_type=example'
+ '&expires_in=3600'
+ '&refresh_token=tGzv3JOkF0XG5Qx2TlKWIA'
+ '&example_parameter=example_value'
+ '&scope=abc def')
+
+ url_encoded_error = 'error=access_denied'
+
+ url_encoded_notoken = ('token_type=example'
+ '&expires_in=3600'
+ '&refresh_token=tGzv3JOkF0XG5Qx2TlKWIA'
+ '&example_parameter=example_value')
+
+
+ def test_prepare_grant_uri(self):
+ """Verify correct authorization URI construction."""
+ self.assertURLEqual(prepare_grant_uri(**self.auth_grant), self.auth_grant_uri)
+ self.assertURLEqual(prepare_grant_uri(**self.auth_grant_list_scope), self.auth_grant_uri_list_scope)
+ self.assertURLEqual(prepare_grant_uri(**self.auth_implicit), self.auth_implicit_uri)
+ self.assertURLEqual(prepare_grant_uri(**self.auth_implicit_list_scope), self.auth_implicit_uri_list_scope)
+ self.assertURLEqual(prepare_grant_uri(**self.auth_grant_pkce), self.auth_grant_uri_pkce)
+
+ def test_prepare_token_request(self):
+ """Verify correct access token request body construction."""
+ self.assertFormBodyEqual(prepare_token_request(**self.grant_body), self.auth_grant_body)
+ self.assertFormBodyEqual(prepare_token_request(**self.pwd_body), self.password_body)
+ self.assertFormBodyEqual(prepare_token_request(**self.cred_grant), self.cred_body)
+ self.assertFormBodyEqual(prepare_token_request(**self.grant_body_pkce), self.auth_grant_body_pkce)
+
+ def test_grant_response(self):
+ """Verify correct parameter parsing and validation for auth code responses."""
+ params = parse_authorization_code_response(self.grant_response)
+ self.assertEqual(params, self.grant_dict)
+ params = parse_authorization_code_response(self.grant_response, state=self.state)
+ self.assertEqual(params, self.grant_dict)
+
+ self.assertRaises(MissingCodeError, parse_authorization_code_response,
+ self.error_nocode)
+ self.assertRaises(AccessDeniedError, parse_authorization_code_response,
+ self.error_denied)
+ self.assertRaises(InvalidRequestFatalError, parse_authorization_code_response,
+ self.error_invalid)
+ self.assertRaises(MismatchingStateError, parse_authorization_code_response,
+ self.error_nostate, state=self.state)
+ self.assertRaises(MismatchingStateError, parse_authorization_code_response,
+ self.error_wrongstate, state=self.state)
+
+ def test_implicit_token_response(self):
+ """Verify correct parameter parsing and validation for implicit responses."""
+ self.assertEqual(parse_implicit_response(self.implicit_response),
+ self.implicit_dict)
+ self.assertRaises(MissingTokenError, parse_implicit_response,
+ self.implicit_notoken)
+ self.assertRaises(ValueError, parse_implicit_response,
+ self.implicit_nostate, state=self.state)
+ self.assertRaises(ValueError, parse_implicit_response,
+ self.implicit_wrongstate, state=self.state)
+
+ def test_custom_json_error(self):
+ self.assertRaises(CustomOAuth2Error, parse_token_response, self.json_custom_error)
+
+ def test_json_token_response(self):
+ """Verify correct parameter parsing and validation for token responses. """
+ self.assertEqual(parse_token_response(self.json_response), self.json_dict)
+ self.assertRaises(AccessDeniedError, parse_token_response, self.json_error)
+ self.assertRaises(MissingTokenError, parse_token_response, self.json_notoken)
+
+ self.assertEqual(parse_token_response(self.json_response_noscope,
+ scope=['all', 'the', 'scopes']), self.json_noscope_dict)
+ self.assertEqual(parse_token_response(self.json_response_noexpire), self.json_noexpire_dict)
+ self.assertEqual(parse_token_response(self.json_response_expirenull), self.json_noexpire_dict)
+
+ scope_changes_recorded = []
+ def record_scope_change(sender, message, old, new):
+ scope_changes_recorded.append((message, old, new))
+
+ os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
+ signals.scope_changed.connect(record_scope_change)
+ try:
+ parse_token_response(self.json_response, scope='aaa')
+ self.assertEqual(len(scope_changes_recorded), 1)
+ message, old, new = scope_changes_recorded[0]
+ for scope in new + old:
+ self.assertIn(scope, message)
+ self.assertEqual(old, ['aaa'])
+ self.assertEqual(set(new), {'abc', 'def'})
+ finally:
+ signals.scope_changed.disconnect(record_scope_change)
+ del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
+
+
+ def test_json_token_notype(self):
+ """Verify strict token type parsing only when configured. """
+ self.assertEqual(parse_token_response(self.json_notype), self.json_notype_dict)
+ try:
+ os.environ['OAUTHLIB_STRICT_TOKEN_TYPE'] = '1'
+ self.assertRaises(MissingTokenTypeError, parse_token_response, self.json_notype)
+ finally:
+ del os.environ['OAUTHLIB_STRICT_TOKEN_TYPE']
+
+ def test_url_encoded_token_response(self):
+ """Verify fallback parameter parsing and validation for token responses. """
+ self.assertEqual(parse_token_response(self.url_encoded_response), self.json_dict)
+ self.assertRaises(AccessDeniedError, parse_token_response, self.url_encoded_error)
+ self.assertRaises(MissingTokenError, parse_token_response, self.url_encoded_notoken)
+
+ scope_changes_recorded = []
+ def record_scope_change(sender, message, old, new):
+ scope_changes_recorded.append((message, old, new))
+
+ os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
+ signals.scope_changed.connect(record_scope_change)
+ try:
+ token = parse_token_response(self.url_encoded_response, scope='aaa')
+ self.assertEqual(len(scope_changes_recorded), 1)
+ message, old, new = scope_changes_recorded[0]
+ for scope in new + old:
+ self.assertIn(scope, message)
+ self.assertEqual(old, ['aaa'])
+ self.assertEqual(set(new), {'abc', 'def'})
+ finally:
+ signals.scope_changed.disconnect(record_scope_change)
+ del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/test_request_validator.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_request_validator.py
new file mode 100644
index 0000000000..7a8d06b668
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_request_validator.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+from oauthlib.oauth2 import RequestValidator
+
+from tests.unittest import TestCase
+
+
+class RequestValidatorTest(TestCase):
+
+ def test_method_contracts(self):
+ v = RequestValidator()
+ self.assertRaises(NotImplementedError, v.authenticate_client, 'r')
+ self.assertRaises(NotImplementedError, v.authenticate_client_id,
+ 'client_id', 'r')
+ self.assertRaises(NotImplementedError, v.confirm_redirect_uri,
+ 'client_id', 'code', 'redirect_uri', 'client', 'request')
+ self.assertRaises(NotImplementedError, v.get_default_redirect_uri,
+ 'client_id', 'request')
+ self.assertRaises(NotImplementedError, v.get_default_scopes,
+ 'client_id', 'request')
+ self.assertRaises(NotImplementedError, v.get_original_scopes,
+ 'refresh_token', 'request')
+ self.assertFalse(v.is_within_original_scope(
+ ['scope'], 'refresh_token', 'request'))
+ self.assertRaises(NotImplementedError, v.invalidate_authorization_code,
+ 'client_id', 'code', 'request')
+ self.assertRaises(NotImplementedError, v.save_authorization_code,
+ 'client_id', 'code', 'request')
+ self.assertRaises(NotImplementedError, v.save_bearer_token,
+ 'token', 'request')
+ self.assertRaises(NotImplementedError, v.validate_bearer_token,
+ 'token', 'scopes', 'request')
+ self.assertRaises(NotImplementedError, v.validate_client_id,
+ 'client_id', 'request')
+ self.assertRaises(NotImplementedError, v.validate_code,
+ 'client_id', 'code', 'client', 'request')
+ self.assertRaises(NotImplementedError, v.validate_grant_type,
+ 'client_id', 'grant_type', 'client', 'request')
+ self.assertRaises(NotImplementedError, v.validate_redirect_uri,
+ 'client_id', 'redirect_uri', 'request')
+ self.assertRaises(NotImplementedError, v.validate_refresh_token,
+ 'refresh_token', 'client', 'request')
+ self.assertRaises(NotImplementedError, v.validate_response_type,
+ 'client_id', 'response_type', 'client', 'request')
+ self.assertRaises(NotImplementedError, v.validate_scopes,
+ 'client_id', 'scopes', 'client', 'request')
+ self.assertRaises(NotImplementedError, v.validate_user,
+ 'username', 'password', 'client', 'request')
+ self.assertTrue(v.client_authentication_required('r'))
+ self.assertFalse(
+ v.is_origin_allowed('client_id', 'https://foo.bar', 'r')
+ )
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/test_server.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_server.py
new file mode 100644
index 0000000000..94af37e56b
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_server.py
@@ -0,0 +1,391 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib import common
+from oauthlib.oauth2.rfc6749 import errors, tokens
+from oauthlib.oauth2.rfc6749.endpoints import Server
+from oauthlib.oauth2.rfc6749.endpoints.authorization import (
+ AuthorizationEndpoint,
+)
+from oauthlib.oauth2.rfc6749.endpoints.resource import ResourceEndpoint
+from oauthlib.oauth2.rfc6749.endpoints.token import TokenEndpoint
+from oauthlib.oauth2.rfc6749.grant_types import (
+ AuthorizationCodeGrant, ClientCredentialsGrant, ImplicitGrant,
+ ResourceOwnerPasswordCredentialsGrant,
+)
+
+from tests.unittest import TestCase
+
+
+class AuthorizationEndpointTest(TestCase):
+
+ def setUp(self):
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.get_code_challenge.return_value = None
+ self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
+ auth_code = AuthorizationCodeGrant(
+ request_validator=self.mock_validator)
+ auth_code.save_authorization_code = mock.MagicMock()
+ implicit = ImplicitGrant(
+ request_validator=self.mock_validator)
+ implicit.save_token = mock.MagicMock()
+
+ response_types = {
+ 'code': auth_code,
+ 'token': implicit,
+ 'none': auth_code
+ }
+ self.expires_in = 1800
+ token = tokens.BearerToken(
+ self.mock_validator,
+ expires_in=self.expires_in
+ )
+ self.endpoint = AuthorizationEndpoint(
+ default_response_type='code',
+ default_token_type=token,
+ response_types=response_types
+ )
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_authorization_grant(self):
+ uri = 'http://i.b/l?response_type=code&client_id=me&scope=all+of+them&state=xyz'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?code=abc&state=xyz')
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_implicit_grant(self):
+ uri = 'http://i.b/l?response_type=token&client_id=me&scope=all+of+them&state=xyz'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me#access_token=abc&expires_in=' + str(self.expires_in) + '&token_type=Bearer&state=xyz&scope=all+of+them', parse_fragment=True)
+
+ def test_none_grant(self):
+ uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them&state=xyz'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?state=xyz', parse_fragment=True)
+ self.assertIsNone(body)
+ self.assertEqual(status_code, 302)
+
+ # and without the state parameter
+ uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me', parse_fragment=True)
+ self.assertIsNone(body)
+ self.assertEqual(status_code, 302)
+
+ def test_missing_type(self):
+ uri = 'http://i.b/l?client_id=me&scope=all+of+them'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ self.mock_validator.validate_request = mock.MagicMock(
+ side_effect=errors.InvalidRequestError())
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?error=invalid_request&error_description=Missing+response_type+parameter.')
+
+ def test_invalid_type(self):
+ uri = 'http://i.b/l?response_type=invalid&client_id=me&scope=all+of+them'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ self.mock_validator.validate_request = mock.MagicMock(
+ side_effect=errors.UnsupportedResponseTypeError())
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?error=unsupported_response_type')
+
+
+class TokenEndpointTest(TestCase):
+
+ def setUp(self):
+ def set_user(request):
+ request.user = mock.MagicMock()
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked_client_id'
+ return True
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.authenticate_client.side_effect = set_user
+ self.mock_validator.get_code_challenge.return_value = None
+ self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
+ auth_code = AuthorizationCodeGrant(
+ request_validator=self.mock_validator)
+ password = ResourceOwnerPasswordCredentialsGrant(
+ request_validator=self.mock_validator)
+ client = ClientCredentialsGrant(
+ request_validator=self.mock_validator)
+ supported_types = {
+ 'authorization_code': auth_code,
+ 'password': password,
+ 'client_credentials': client,
+ }
+ self.expires_in = 1800
+ token = tokens.BearerToken(
+ self.mock_validator,
+ expires_in=self.expires_in
+ )
+ self.endpoint = TokenEndpoint(
+ 'authorization_code',
+ default_token_type=token,
+ grant_types=supported_types
+ )
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_authorization_grant(self):
+ body = 'grant_type=authorization_code&code=abc&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': 'abc',
+ 'refresh_token': 'abc',
+ 'scope': 'all of them'
+ }
+ self.assertEqual(json.loads(body), token)
+
+ body = 'grant_type=authorization_code&code=abc'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': 'abc',
+ 'refresh_token': 'abc'
+ }
+ self.assertEqual(json.loads(body), token)
+
+ # try with additional custom variables
+ body = 'grant_type=authorization_code&code=abc&state=foobar'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ self.assertEqual(json.loads(body), token)
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_password_grant(self):
+ body = 'grant_type=password&username=a&password=hello&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': 'abc',
+ 'refresh_token': 'abc',
+ 'scope': 'all of them',
+ }
+ self.assertEqual(json.loads(body), token)
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_client_grant(self):
+ body = 'grant_type=client_credentials&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': 'abc',
+ 'scope': 'all of them',
+ }
+ self.assertEqual(json.loads(body), token)
+
+ def test_missing_type(self):
+ _, body, _ = self.endpoint.create_token_response('', body='')
+ token = {'error': 'unsupported_grant_type'}
+ self.assertEqual(json.loads(body), token)
+
+ def test_invalid_type(self):
+ body = 'grant_type=invalid'
+ _, body, _ = self.endpoint.create_token_response('', body=body)
+ token = {'error': 'unsupported_grant_type'}
+ self.assertEqual(json.loads(body), token)
+
+
+class SignedTokenEndpointTest(TestCase):
+
+ def setUp(self):
+ self.expires_in = 1800
+
+ def set_user(request):
+ request.user = mock.MagicMock()
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked_client_id'
+ return True
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.get_code_challenge.return_value = None
+ self.mock_validator.authenticate_client.side_effect = set_user
+ self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
+
+ self.private_pem = """
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA6TtDhWGwzEOWZP6m/zHoZnAPLABfetvoMPmxPGjFjtDuMRPv
+EvI1sbixZBjBtdnc5rTtHUUQ25Am3JzwPRGo5laMGbj1pPyCPxlVi9LK82HQNX0B
+YK7tZtVfDHElQA7F4v3j9d3rad4O9/n+lyGIQ0tT7yQcBm2A8FEaP0bZYCLMjwMN
+WfaVLE8eXHyv+MfpNNLI9wttLxygKYM48I3NwsFuJgOa/KuodXaAmf8pJnx8t1Wn
+nxvaYXFiUn/TxmhM/qhemPa6+0nqq+aWV5eT7xn4K/ghLgNs09v6Yge0pmPl9Oz+
++bjJ+aKRnAmwCOY8/5U5EilAiUOeBoO9+8OXtwIDAQABAoIBAGFTTbXXMkPK4HN8
+oItVdDlrAanG7hECuz3UtFUVE3upS/xG6TjqweVLwRqYCh2ssDXFwjy4mXRGDzF4
+e/e/6s9Txlrlh/w1MtTJ6ZzTdcViR9RKOczysjZ7S5KRlI3KnGFAuWPcG2SuOWjZ
+dZfzcj1Crd/ZHajBAVFHRsCo/ATVNKbTRprFfb27xKpQ2BwH/GG781sLE3ZVNIhs
+aRRaED4622kI1E/WXws2qQMqbFKzo0m1tPbLb3Z89WgZJ/tRQwuDype1Vfm7k6oX
+xfbp3948qSe/yWKRlMoPkleji/WxPkSIalzWSAi9ziN/0Uzhe65FURgrfHL3XR1A
+B8UR+aECgYEA7NPQZV4cAikk02Hv65JgISofqV49P8MbLXk8sdnI1n7Mj10TgzU3
+lyQGDEX4hqvT0bTXe4KAOxQZx9wumu05ejfzhdtSsEm6ptGHyCdmYDQeV0C/pxDX
+JNCK8XgMku2370XG0AnyBCT7NGlgtDcNCQufcesF2gEuoKiXg6Zjo7sCgYEA/Bzs
+9fWGZZnSsMSBSW2OYbFuhF3Fne0HcxXQHipl0Rujc/9g0nccwqKGizn4fGOE7a8F
+usQgJoeGcinL7E9OEP/uQ9VX1C9RNVjIxP1O5/Guw1zjxQQYetOvbPhN2QhD1Ye7
+0TRKrW1BapcjwLpFQlVg1ZeTPOi5lv24W/wX9jUCgYEAkrMSX/hPuTbrTNVZ3L6r
+NV/2hN+PaTPeXei/pBuXwOaCqDurnpcUfFcgN/IP5LwDVd+Dq0pHTFFDNv45EFbq
+R77o5n3ZVsIVEMiyJ1XgoK8oLDw7e61+15smtjT69Piz+09pu+ytMcwGn4y3Dmsb
+dALzHYnL8iLRU0ubrz0ec4kCgYAJiVKRTzNBPptQom49h85d9ac3jJCAE8o3WTjh
+Gzt0uHXrWlqgO280EY/DTnMOyXjqwLcXxHlu26uDP/99tdY/IF8z46sJ1KxetzgI
+84f7kBHLRAU9m5UNeFpnZdEUB5MBTbwWAsNcYgiabpMkpCcghjg+fBhOsoLqqjhC
+CnwhjQKBgQDkv0QTdyBU84TE8J0XY3eLQwXbrvG2yD5A2ntN3PyxGEneX5WTJGMZ
+xJxwaFYQiDS3b9E7b8Q5dg8qa5Y1+epdhx3cuQAWPm+AoHKshDfbRve4txBDQAqh
+c6MxSWgsa+2Ld5SWSNbGtpPcmEM3Fl5ttMCNCKtNc0UE16oHwaPAIw==
+-----END RSA PRIVATE KEY-----
+ """
+
+ self.public_pem = """
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6TtDhWGwzEOWZP6m/zHo
+ZnAPLABfetvoMPmxPGjFjtDuMRPvEvI1sbixZBjBtdnc5rTtHUUQ25Am3JzwPRGo
+5laMGbj1pPyCPxlVi9LK82HQNX0BYK7tZtVfDHElQA7F4v3j9d3rad4O9/n+lyGI
+Q0tT7yQcBm2A8FEaP0bZYCLMjwMNWfaVLE8eXHyv+MfpNNLI9wttLxygKYM48I3N
+wsFuJgOa/KuodXaAmf8pJnx8t1WnnxvaYXFiUn/TxmhM/qhemPa6+0nqq+aWV5eT
+7xn4K/ghLgNs09v6Yge0pmPl9Oz++bjJ+aKRnAmwCOY8/5U5EilAiUOeBoO9+8OX
+twIDAQAB
+-----END PUBLIC KEY-----
+ """
+
+ signed_token = tokens.signed_token_generator(self.private_pem,
+ user_id=123)
+ self.endpoint = Server(
+ self.mock_validator,
+ token_expires_in=self.expires_in,
+ token_generator=signed_token,
+ refresh_token_generator=tokens.random_token_generator
+ )
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_authorization_grant(self):
+ body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=authorization_code&code=abc&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ body = json.loads(body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': body['access_token'],
+ 'refresh_token': 'abc',
+ 'scope': 'all of them'
+ }
+ self.assertEqual(body, token)
+
+ body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=authorization_code&code=abc'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ body = json.loads(body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': body['access_token'],
+ 'refresh_token': 'abc'
+ }
+ self.assertEqual(body, token)
+
+ # try with additional custom variables
+ body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=authorization_code&code=abc&state=foobar'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ body = json.loads(body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': body['access_token'],
+ 'refresh_token': 'abc'
+ }
+ self.assertEqual(body, token)
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_password_grant(self):
+ body = 'grant_type=password&username=a&password=hello&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ body = json.loads(body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': body['access_token'],
+ 'refresh_token': 'abc',
+ 'scope': 'all of them',
+ }
+ self.assertEqual(body, token)
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_scopes_and_user_id_stored_in_access_token(self):
+ body = 'grant_type=password&username=a&password=hello&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+
+ access_token = json.loads(body)['access_token']
+
+ claims = common.verify_signed_token(self.public_pem, access_token)
+
+ self.assertEqual(claims['scope'], 'all of them')
+ self.assertEqual(claims['user_id'], 123)
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_client_grant(self):
+ body = 'grant_type=client_credentials&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ body = json.loads(body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': body['access_token'],
+ 'scope': 'all of them',
+ }
+ self.assertEqual(body, token)
+
+ def test_missing_type(self):
+ _, body, _ = self.endpoint.create_token_response('', body='client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&code=abc')
+ token = {'error': 'unsupported_grant_type'}
+ self.assertEqual(json.loads(body), token)
+
+ def test_invalid_type(self):
+ body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=invalid&code=abc'
+ _, body, _ = self.endpoint.create_token_response('', body=body)
+ token = {'error': 'unsupported_grant_type'}
+ self.assertEqual(json.loads(body), token)
+
+
+class ResourceEndpointTest(TestCase):
+
+ def setUp(self):
+ self.mock_validator = mock.MagicMock()
+ self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
+ token = tokens.BearerToken(request_validator=self.mock_validator)
+ self.endpoint = ResourceEndpoint(
+ default_token='Bearer',
+ token_types={'Bearer': token}
+ )
+
+ def test_defaults(self):
+ uri = 'http://a.b/path?some=query'
+ self.mock_validator.validate_bearer_token.return_value = False
+ valid, request = self.endpoint.verify_request(uri)
+ self.assertFalse(valid)
+ self.assertEqual(request.token_type, 'Bearer')
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/test_tokens.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_tokens.py
new file mode 100644
index 0000000000..fa6b1c092c
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_tokens.py
@@ -0,0 +1,170 @@
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749.tokens import (
+ BearerToken, prepare_bearer_body, prepare_bearer_headers,
+ prepare_bearer_uri, prepare_mac_header,
+)
+
+from tests.unittest import TestCase
+
+
+class TokenTest(TestCase):
+
+ # MAC without body/payload or extension
+ mac_plain = {
+ 'token': 'h480djs93hd8',
+ 'uri': 'http://example.com/resource/1?b=1&a=2',
+ 'key': '489dks293j39',
+ 'http_method': 'GET',
+ 'nonce': '264095:dj83hs9s',
+ 'hash_algorithm': 'hmac-sha-1'
+ }
+ auth_plain = {
+ 'Authorization': 'MAC id="h480djs93hd8", nonce="264095:dj83hs9s",'
+ ' mac="SLDJd4mg43cjQfElUs3Qub4L6xE="'
+ }
+
+ # MAC with body/payload, no extension
+ mac_body = {
+ 'token': 'jd93dh9dh39D',
+ 'uri': 'http://example.com/request',
+ 'key': '8yfrufh348h',
+ 'http_method': 'POST',
+ 'nonce': '273156:di3hvdf8',
+ 'hash_algorithm': 'hmac-sha-1',
+ 'body': 'hello=world%21'
+ }
+ auth_body = {
+ 'Authorization': 'MAC id="jd93dh9dh39D", nonce="273156:di3hvdf8",'
+ ' bodyhash="k9kbtCIy0CkI3/FEfpS/oIDjk6k=", mac="W7bdMZbv9UWOTadASIQHagZyirA="'
+ }
+
+ # MAC with body/payload and extension
+ mac_both = {
+ 'token': 'h480djs93hd8',
+ 'uri': 'http://example.com/request?b5=%3D%253D&a3=a&c%40=&a2=r%20b&c2&a3=2+q',
+ 'key': '489dks293j39',
+ 'http_method': 'GET',
+ 'nonce': '264095:7d8f3e4a',
+ 'hash_algorithm': 'hmac-sha-1',
+ 'body': 'Hello World!',
+ 'ext': 'a,b,c'
+ }
+ auth_both = {
+ 'Authorization': 'MAC id="h480djs93hd8", nonce="264095:7d8f3e4a",'
+ ' bodyhash="Lve95gjOVATpfV8EL5X4nxwjKHE=", ext="a,b,c",'
+ ' mac="Z3C2DojEopRDIC88/imW8Ez853g="'
+ }
+
+ # Bearer
+ token = 'vF9dft4qmT'
+ uri = 'http://server.example.com/resource'
+ bearer_headers = {
+ 'Authorization': 'Bearer vF9dft4qmT'
+ }
+ valid_bearer_header_lowercase = {"Authorization": "bearer vF9dft4qmT"}
+ fake_bearer_headers = [
+ {'Authorization': 'Beaver vF9dft4qmT'},
+ {'Authorization': 'BeavervF9dft4qmT'},
+ {'Authorization': 'Beaver vF9dft4qmT'},
+ {'Authorization': 'BearerF9dft4qmT'},
+ {'Authorization': 'Bearer vF9d ft4qmT'},
+ ]
+ valid_header_with_multiple_spaces = {'Authorization': 'Bearer vF9dft4qmT'}
+ bearer_body = 'access_token=vF9dft4qmT'
+ bearer_uri = 'http://server.example.com/resource?access_token=vF9dft4qmT'
+
+ def _mocked_validate_bearer_token(self, token, scopes, request):
+ if not token:
+ return False
+ return True
+
+ def test_prepare_mac_header(self):
+ """Verify mac signatures correctness
+
+ TODO: verify hmac-sha-256
+ """
+ self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)
+ self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)
+ self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)
+
+ def test_prepare_bearer_request(self):
+ """Verify proper addition of bearer tokens to requests.
+
+ They may be represented as query components in body or URI or
+ in a Bearer authorization header.
+ """
+ self.assertEqual(prepare_bearer_headers(self.token), self.bearer_headers)
+ self.assertEqual(prepare_bearer_body(self.token), self.bearer_body)
+ self.assertEqual(prepare_bearer_uri(self.token, uri=self.uri), self.bearer_uri)
+
+ def test_valid_bearer_is_validated(self):
+ request_validator = mock.MagicMock()
+ request_validator.validate_bearer_token = self._mocked_validate_bearer_token
+
+ request = Request("/", headers=self.bearer_headers)
+ result = BearerToken(request_validator=request_validator).validate_request(
+ request
+ )
+ self.assertTrue(result)
+
+ def test_lowercase_bearer_is_validated(self):
+ request_validator = mock.MagicMock()
+ request_validator.validate_bearer_token = self._mocked_validate_bearer_token
+
+ request = Request("/", headers=self.valid_bearer_header_lowercase)
+ result = BearerToken(request_validator=request_validator).validate_request(
+ request
+ )
+ self.assertTrue(result)
+
+ def test_fake_bearer_is_not_validated(self):
+ request_validator = mock.MagicMock()
+ request_validator.validate_bearer_token = self._mocked_validate_bearer_token
+
+ for fake_header in self.fake_bearer_headers:
+ request = Request("/", headers=fake_header)
+ result = BearerToken(request_validator=request_validator).validate_request(
+ request
+ )
+
+ self.assertFalse(result)
+
+ def test_header_with_multispaces_is_validated(self):
+ request_validator = mock.MagicMock()
+ request_validator.validate_bearer_token = self._mocked_validate_bearer_token
+
+ request = Request("/", headers=self.valid_header_with_multiple_spaces)
+ result = BearerToken(request_validator=request_validator).validate_request(
+ request
+ )
+
+ self.assertTrue(result)
+
+ def test_estimate_type(self):
+ request_validator = mock.MagicMock()
+ request_validator.validate_bearer_token = self._mocked_validate_bearer_token
+ request = Request("/", headers=self.bearer_headers)
+ result = BearerToken(request_validator=request_validator).estimate_type(request)
+ self.assertEqual(result, 9)
+
+ def test_estimate_type_with_fake_header_returns_type_0(self):
+ request_validator = mock.MagicMock()
+ request_validator.validate_bearer_token = self._mocked_validate_bearer_token
+
+ for fake_header in self.fake_bearer_headers:
+ request = Request("/", headers=fake_header)
+ result = BearerToken(request_validator=request_validator).estimate_type(
+ request
+ )
+
+ if (
+ fake_header["Authorization"].count(" ") == 2
+ and fake_header["Authorization"].split()[0] == "Bearer"
+ ):
+ # If we're dealing with the header containing 2 spaces, it will be recognized
+ # as a Bearer valid header, the token itself will be invalid by the way.
+ self.assertEqual(result, 9)
+ else:
+ self.assertEqual(result, 0)
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc6749/test_utils.py b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_utils.py
new file mode 100644
index 0000000000..3299591926
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc6749/test_utils.py
@@ -0,0 +1,100 @@
+import datetime
+import os
+
+from oauthlib.oauth2.rfc6749.utils import (
+ escape, generate_age, host_from_uri, is_secure_transport, list_to_scope,
+ params_from_uri, scope_to_list,
+)
+
+from tests.unittest import TestCase
+
+
+class ScopeObject:
+ """
+ Fixture for testing list_to_scope()/scope_to_list() with objects other
+ than regular strings.
+ """
+ def __init__(self, scope):
+ self.scope = scope
+
+ def __str__(self):
+ return self.scope
+
+
+class UtilsTests(TestCase):
+
+ def test_escape(self):
+ """Assert that we are only escaping unicode"""
+ self.assertRaises(ValueError, escape, b"I am a string type. Not a unicode type.")
+ self.assertEqual(escape("I am a unicode type."), "I%20am%20a%20unicode%20type.")
+
+ def test_host_from_uri(self):
+ """Test if hosts and ports are properly extracted from URIs.
+
+ This should be done according to the MAC Authentication spec.
+ Defaults ports should be provided when none is present in the URI.
+ """
+ self.assertEqual(host_from_uri('http://a.b-c.com:8080'), ('a.b-c.com', '8080'))
+ self.assertEqual(host_from_uri('https://a.b.com:8080'), ('a.b.com', '8080'))
+ self.assertEqual(host_from_uri('http://www.example.com'), ('www.example.com', '80'))
+ self.assertEqual(host_from_uri('https://www.example.com'), ('www.example.com', '443'))
+
+ def test_is_secure_transport(self):
+ """Test check secure uri."""
+ if 'OAUTHLIB_INSECURE_TRANSPORT' in os.environ:
+ del os.environ['OAUTHLIB_INSECURE_TRANSPORT']
+
+ self.assertTrue(is_secure_transport('https://example.com'))
+ self.assertFalse(is_secure_transport('http://example.com'))
+
+ os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
+ self.assertTrue(is_secure_transport('http://example.com'))
+ del os.environ['OAUTHLIB_INSECURE_TRANSPORT']
+
+ def test_params_from_uri(self):
+ self.assertEqual(params_from_uri('http://i.b/?foo=bar&g&scope=a+d'),
+ {'foo': 'bar', 'g': '', 'scope': ['a', 'd']})
+
+ def test_generate_age(self):
+ issue_time = datetime.datetime.now() - datetime.timedelta(
+ days=3, minutes=1, seconds=4)
+ self.assertGreater(float(generate_age(issue_time)), 259263.0)
+
+ def test_list_to_scope(self):
+ expected = 'foo bar baz'
+
+ string_list = ['foo', 'bar', 'baz']
+ self.assertEqual(list_to_scope(string_list), expected)
+
+ string_tuple = ('foo', 'bar', 'baz')
+ self.assertEqual(list_to_scope(string_tuple), expected)
+
+ obj_list = [ScopeObject('foo'), ScopeObject('bar'), ScopeObject('baz')]
+ self.assertEqual(list_to_scope(obj_list), expected)
+
+ set_list = set(string_list)
+ set_scope = list_to_scope(set_list)
+ assert len(set_scope.split(' ')) == 3
+ for x in string_list:
+ assert x in set_scope
+
+ self.assertRaises(ValueError, list_to_scope, object())
+
+ def test_scope_to_list(self):
+ expected = ['foo', 'bar', 'baz']
+
+ string_scopes = 'foo bar baz '
+ self.assertEqual(scope_to_list(string_scopes), expected)
+
+ string_list_scopes = ['foo', 'bar', 'baz']
+ self.assertEqual(scope_to_list(string_list_scopes), expected)
+
+ tuple_list_scopes = ('foo', 'bar', 'baz')
+ self.assertEqual(scope_to_list(tuple_list_scopes), expected)
+
+ obj_list_scopes = [ScopeObject('foo'), ScopeObject('bar'), ScopeObject('baz')]
+ self.assertEqual(scope_to_list(obj_list_scopes), expected)
+
+ set_list_scopes = set(string_list_scopes)
+ set_list = scope_to_list(set_list_scopes)
+ self.assertEqual(sorted(set_list), sorted(string_list_scopes))
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc8628/__init__.py b/contrib/python/oauthlib/tests/oauth2/rfc8628/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc8628/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc8628/clients/__init__.py b/contrib/python/oauthlib/tests/oauth2/rfc8628/clients/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc8628/clients/__init__.py
diff --git a/contrib/python/oauthlib/tests/oauth2/rfc8628/clients/test_device.py b/contrib/python/oauthlib/tests/oauth2/rfc8628/clients/test_device.py
new file mode 100644
index 0000000000..725dea2a92
--- /dev/null
+++ b/contrib/python/oauthlib/tests/oauth2/rfc8628/clients/test_device.py
@@ -0,0 +1,63 @@
+import os
+from unittest.mock import patch
+
+from oauthlib import signals
+from oauthlib.oauth2 import DeviceClient
+
+from tests.unittest import TestCase
+
+
+class DeviceClientTest(TestCase):
+
+ client_id = "someclientid"
+ kwargs = {
+ "some": "providers",
+ "require": "extra arguments"
+ }
+
+ client_secret = "asecret"
+
+ device_code = "somedevicecode"
+
+ scope = ["profile", "email"]
+
+ body = "not=empty"
+
+ body_up = "not=empty&grant_type=urn:ietf:params:oauth:grant-type:device_code"
+ body_code = body_up + "&device_code=somedevicecode"
+ body_kwargs = body_code + "&some=providers&require=extra+arguments"
+
+ uri = "https://example.com/path?query=world"
+ uri_id = uri + "&client_id=" + client_id
+ uri_grant = uri_id + "&grant_type=urn:ietf:params:oauth:grant-type:device_code"
+ uri_secret = uri_grant + "&client_secret=asecret"
+ uri_scope = uri_secret + "&scope=profile+email"
+
+ def test_request_body(self):
+ client = DeviceClient(self.client_id)
+
+ # Basic, no extra arguments
+ body = client.prepare_request_body(self.device_code, body=self.body)
+ self.assertFormBodyEqual(body, self.body_code)
+
+ rclient = DeviceClient(self.client_id)
+ body = rclient.prepare_request_body(self.device_code, body=self.body)
+ self.assertFormBodyEqual(body, self.body_code)
+
+ # With extra parameters
+ body = client.prepare_request_body(
+ self.device_code, body=self.body, **self.kwargs)
+ self.assertFormBodyEqual(body, self.body_kwargs)
+
+ def test_request_uri(self):
+ client = DeviceClient(self.client_id)
+
+ uri = client.prepare_request_uri(self.uri)
+ self.assertURLEqual(uri, self.uri_grant)
+
+ client = DeviceClient(self.client_id, client_secret=self.client_secret)
+ uri = client.prepare_request_uri(self.uri)
+ self.assertURLEqual(uri, self.uri_secret)
+
+ uri = client.prepare_request_uri(self.uri, scope=self.scope)
+ self.assertURLEqual(uri, self.uri_scope)
diff --git a/contrib/python/oauthlib/tests/openid/__init__.py b/contrib/python/oauthlib/tests/openid/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/__init__.py
diff --git a/contrib/python/oauthlib/tests/openid/connect/__init__.py b/contrib/python/oauthlib/tests/openid/connect/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/__init__.py
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/__init__.py b/contrib/python/oauthlib/tests/openid/connect/core/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/__init__.py
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/endpoints/__init__.py b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/__init__.py
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_claims_handling.py b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_claims_handling.py
new file mode 100644
index 0000000000..301ed1aa44
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_claims_handling.py
@@ -0,0 +1,107 @@
+"""Ensure OpenID Connect Authorization Request 'claims' are preserved across authorization.
+
+The claims parameter is an optional query param for the Authorization Request endpoint
+ but if it is provided and is valid it needs to be deserialized (from urlencoded JSON)
+ and persisted with the authorization code itself, then in the subsequent Access Token
+ request the claims should be transferred (via the oauthlib request) to be persisted
+ with the Access Token when it is created.
+"""
+from unittest import mock
+
+from oauthlib.openid import RequestValidator
+from oauthlib.openid.connect.core.endpoints.pre_configured import Server
+
+from __tests__.oauth2.rfc6749.endpoints.test_utils import get_query_credentials
+from tests.unittest import TestCase
+
+
+class TestClaimsHandling(TestCase):
+
+ DEFAULT_REDIRECT_URI = 'http://i.b./path'
+
+ def set_scopes(self, scopes):
+ def set_request_scopes(client_id, code, client, request):
+ request.scopes = scopes
+ return True
+ return set_request_scopes
+
+ def set_user(self, request):
+ request.user = 'foo'
+ request.client_id = 'bar'
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def save_claims_with_code(self, client_id, code, request, *args, **kwargs):
+ # a real validator would save the claims with the code during save_authorization_code()
+ self.claims_from_auth_code_request = request.claims
+ self.scopes = request.scopes.split()
+
+ def retrieve_claims_saved_with_code(self, client_id, code, client, request, *args, **kwargs):
+ request.claims = self.claims_from_auth_code_request
+ request.scopes = self.scopes
+
+ return True
+
+ def save_claims_with_bearer_token(self, token, request, *args, **kwargs):
+ # a real validator would save the claims with the access token during save_bearer_token()
+ self.claims_saved_with_bearer_token = request.claims
+
+ def setUp(self):
+ self.validator = mock.MagicMock(spec=RequestValidator)
+ self.validator.get_code_challenge.return_value = None
+ self.validator.get_default_redirect_uri.return_value = TestClaimsHandling.DEFAULT_REDIRECT_URI
+ self.validator.authenticate_client.side_effect = self.set_client
+
+ self.validator.save_authorization_code.side_effect = self.save_claims_with_code
+ self.validator.validate_code.side_effect = self.retrieve_claims_saved_with_code
+ self.validator.save_token.side_effect = self.save_claims_with_bearer_token
+
+ self.server = Server(self.validator)
+
+ def test_claims_stored_on_code_creation(self):
+
+ claims = {
+ "id_token": {
+ "claim_1": None,
+ "claim_2": {
+ "essential": True
+ }
+ },
+ "userinfo": {
+ "claim_3": {
+ "essential": True
+ },
+ "claim_4": None
+ }
+ }
+
+ claims_urlquoted = '%7B%22id_token%22%3A%20%7B%22claim_2%22%3A%20%7B%22essential%22%3A%20true%7D%2C%20%22claim_1%22%3A%20null%7D%2C%20%22userinfo%22%3A%20%7B%22claim_4%22%3A%20null%2C%20%22claim_3%22%3A%20%7B%22essential%22%3A%20true%7D%7D%7D'
+ uri = 'http://example.com/path?client_id=abc&scope=openid+test_scope&response_type=code&claims=%s'
+
+ h, b, s = self.server.create_authorization_response(uri % claims_urlquoted, scopes='openid test_scope')
+
+ self.assertDictEqual(self.claims_from_auth_code_request, claims)
+
+ code = get_query_credentials(h['Location'])['code'][0]
+ token_uri = 'http://example.com/path'
+ _, body, _ = self.server.create_token_response(
+ token_uri,
+ body='client_id=me&redirect_uri=http://back.to/me&grant_type=authorization_code&code=%s' % code
+ )
+
+ self.assertDictEqual(self.claims_saved_with_bearer_token, claims)
+
+ def test_invalid_claims(self):
+ uri = 'http://example.com/path?client_id=abc&scope=openid+test_scope&response_type=code&claims=this-is-not-json'
+
+ h, b, s = self.server.create_authorization_response(uri, scopes='openid test_scope')
+ error = get_query_credentials(h['Location'])['error'][0]
+ error_desc = get_query_credentials(h['Location'])['error_description'][0]
+ self.assertEqual(error, 'invalid_request')
+ self.assertEqual(error_desc, "Malformed claims parameter")
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_openid_connect_params_handling.py b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_openid_connect_params_handling.py
new file mode 100644
index 0000000000..c55136fbf1
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_openid_connect_params_handling.py
@@ -0,0 +1,78 @@
+from unittest import mock
+from urllib.parse import urlencode
+
+from oauthlib.oauth2 import InvalidRequestError
+from oauthlib.oauth2.rfc6749.endpoints.authorization import (
+ AuthorizationEndpoint,
+)
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+from oauthlib.openid.connect.core.grant_types import AuthorizationCodeGrant
+
+from tests.unittest import TestCase
+
+
+class OpenIDConnectEndpointTest(TestCase):
+
+ def setUp(self):
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.authenticate_client.side_effect = self.set_client
+ grant = AuthorizationCodeGrant(request_validator=self.mock_validator)
+ bearer = BearerToken(self.mock_validator)
+ self.endpoint = AuthorizationEndpoint(grant, bearer,
+ response_types={'code': grant})
+ params = {
+ 'prompt': 'consent',
+ 'display': 'touch',
+ 'nonce': 'abcd',
+ 'state': 'abc',
+ 'redirect_uri': 'https://a.b/cb',
+ 'response_type': 'code',
+ 'client_id': 'abcdef',
+ 'scope': 'hello openid'
+ }
+ self.url = 'http://a.b/path?' + urlencode(params)
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_authorization_endpoint_handles_prompt(self, generate_token):
+ generate_token.return_value = "MOCK_CODE"
+ # In the GET view:
+ scopes, creds = self.endpoint.validate_authorization_request(self.url)
+ # In the POST view:
+ creds['scopes'] = scopes
+ h, b, s = self.endpoint.create_authorization_response(self.url,
+ credentials=creds)
+ expected = 'https://a.b/cb?state=abc&code=MOCK_CODE'
+ self.assertURLEqual(h['Location'], expected)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ def test_prompt_none_exclusiveness(self):
+ """
+ Test that prompt=none can't be used with another prompt value.
+ """
+ params = {
+ 'prompt': 'none consent',
+ 'state': 'abc',
+ 'redirect_uri': 'https://a.b/cb',
+ 'response_type': 'code',
+ 'client_id': 'abcdef',
+ 'scope': 'hello openid'
+ }
+ url = 'http://a.b/path?' + urlencode(params)
+ with self.assertRaises(InvalidRequestError):
+ self.endpoint.validate_authorization_request(url)
+
+ def test_oidc_params_preservation(self):
+ """
+ Test that the nonce parameter is passed through.
+ """
+ scopes, creds = self.endpoint.validate_authorization_request(self.url)
+
+ self.assertEqual(creds['prompt'], {'consent'})
+ self.assertEqual(creds['nonce'], 'abcd')
+ self.assertEqual(creds['display'], 'touch')
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_userinfo_endpoint.py b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_userinfo_endpoint.py
new file mode 100644
index 0000000000..4833485195
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/endpoints/test_userinfo_endpoint.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.openid import RequestValidator, UserInfoEndpoint
+
+from tests.unittest import TestCase
+
+
+def set_scopes_valid(token, scopes, request):
+ request.scopes = ["openid", "bar"]
+ return True
+
+
+class UserInfoEndpointTest(TestCase):
+ def setUp(self):
+ self.claims = {
+ "sub": "john",
+ "fruit": "banana"
+ }
+ # Can't use MagicMock/wraps below.
+ # Triggers error when endpoint copies to self.bearer.request_validator
+ self.validator = RequestValidator()
+ self.validator.validate_bearer_token = mock.Mock()
+ self.validator.validate_bearer_token.side_effect = set_scopes_valid
+ self.validator.get_userinfo_claims = mock.Mock()
+ self.validator.get_userinfo_claims.return_value = self.claims
+ self.endpoint = UserInfoEndpoint(self.validator)
+
+ self.uri = 'should_not_matter'
+ self.headers = {
+ 'Authorization': 'Bearer eyJxx'
+ }
+
+ def test_userinfo_no_auth(self):
+ self.endpoint.create_userinfo_response(self.uri)
+
+ def test_userinfo_wrong_auth(self):
+ self.headers['Authorization'] = 'Basic foifoifoi'
+ self.endpoint.create_userinfo_response(self.uri, headers=self.headers)
+
+ def test_userinfo_token_expired(self):
+ self.validator.validate_bearer_token.return_value = False
+ self.endpoint.create_userinfo_response(self.uri, headers=self.headers)
+
+ def test_userinfo_token_no_openid_scope(self):
+ def set_scopes_invalid(token, scopes, request):
+ request.scopes = ["foo", "bar"]
+ return True
+ self.validator.validate_bearer_token.side_effect = set_scopes_invalid
+ with self.assertRaises(errors.InsufficientScopeError) as context:
+ self.endpoint.create_userinfo_response(self.uri)
+
+ def test_userinfo_json_response(self):
+ h, b, s = self.endpoint.create_userinfo_response(self.uri)
+ self.assertEqual(s, 200)
+ body_json = json.loads(b)
+ self.assertEqual(self.claims, body_json)
+ self.assertEqual("application/json", h['Content-Type'])
+
+ def test_userinfo_jwt_response(self):
+ self.validator.get_userinfo_claims.return_value = "eyJzzzzz"
+ h, b, s = self.endpoint.create_userinfo_response(self.uri)
+ self.assertEqual(s, 200)
+ self.assertEqual(b, "eyJzzzzz")
+ self.assertEqual("application/jwt", h['Content-Type'])
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/grant_types/__init__.py b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/__init__.py
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_authorization_code.py b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_authorization_code.py
new file mode 100644
index 0000000000..49b03a7f7d
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_authorization_code.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749.errors import (
+ ConsentRequired, InvalidRequestError, LoginRequired,
+)
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+from oauthlib.openid.connect.core.grant_types.authorization_code import (
+ AuthorizationCodeGrant,
+)
+
+from __tests__.oauth2.rfc6749.grant_types.test_authorization_code import (
+ AuthorizationCodeGrantTest,
+)
+from tests.unittest import TestCase
+
+
+def get_id_token_mock(token, token_handler, request):
+ return "MOCKED_TOKEN"
+
+
+class OpenIDAuthCodeInterferenceTest(AuthorizationCodeGrantTest):
+ """Test that OpenID don't interfere with normal OAuth 2 flows."""
+
+ def setUp(self):
+ super().setUp()
+ self.auth = AuthorizationCodeGrant(request_validator=self.mock_validator)
+
+
+class OpenIDAuthCodeTest(TestCase):
+
+ def setUp(self):
+ self.request = Request('http://a.b/path')
+ self.request.scopes = ('hello', 'openid')
+ self.request.expires_in = 1800
+ self.request.client_id = 'abcdef'
+ self.request.code = '1234'
+ self.request.response_type = 'code'
+ self.request.grant_type = 'authorization_code'
+ self.request.redirect_uri = 'https://a.b/cb'
+ self.request.state = 'abc'
+ self.request.nonce = None
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.authenticate_client.side_effect = self.set_client
+ self.mock_validator.get_code_challenge.return_value = None
+ self.mock_validator.get_id_token.side_effect = get_id_token_mock
+ self.auth = AuthorizationCodeGrant(request_validator=self.mock_validator)
+
+ self.url_query = 'https://a.b/cb?code=abc&state=abc'
+ self.url_fragment = 'https://a.b/cb#code=abc&state=abc'
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_authorization(self, generate_token):
+
+ scope, info = self.auth.validate_authorization_request(self.request)
+
+ generate_token.return_value = 'abc'
+ bearer = BearerToken(self.mock_validator)
+ self.request.response_mode = 'query'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_query)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ self.request.response_mode = 'fragment'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_no_prompt_authorization(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.prompt = 'none'
+
+ bearer = BearerToken(self.mock_validator)
+
+ self.request.response_mode = 'query'
+ self.request.id_token_hint = 'me@email.com'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_query)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ # Test alternative response modes
+ self.request.response_mode = 'fragment'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
+
+ # Ensure silent authentication and authorization is done
+ self.mock_validator.validate_silent_login.return_value = False
+ self.mock_validator.validate_silent_authorization.return_value = True
+ self.assertRaises(LoginRequired,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=login_required', h['Location'])
+
+ self.mock_validator.validate_silent_login.return_value = True
+ self.mock_validator.validate_silent_authorization.return_value = False
+ self.assertRaises(ConsentRequired,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=consent_required', h['Location'])
+
+ # ID token hint must match logged in user
+ self.mock_validator.validate_silent_authorization.return_value = True
+ self.mock_validator.validate_user_match.return_value = False
+ self.assertRaises(LoginRequired,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=login_required', h['Location'])
+
+ def test_none_multi_prompt(self):
+ bearer = BearerToken(self.mock_validator)
+
+ self.request.prompt = 'none login'
+ self.assertRaises(InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ self.request.prompt = 'none consent'
+ self.assertRaises(InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ self.request.prompt = 'none select_account'
+ self.assertRaises(InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ self.request.prompt = 'consent none login'
+ self.assertRaises(InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ def set_scopes(self, client_id, code, client, request):
+ request.scopes = self.request.scopes
+ request.user = 'bob'
+ return True
+
+ def test_create_token_response(self):
+ self.request.response_type = None
+ self.mock_validator.validate_code.side_effect = self.set_scopes
+
+ bearer = BearerToken(self.mock_validator)
+
+ h, token, s = self.auth.create_token_response(self.request, bearer)
+ token = json.loads(token)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('refresh_token', token)
+ self.assertIn('expires_in', token)
+ self.assertIn('scope', token)
+ self.assertIn('id_token', token)
+ self.assertIn('openid', token['scope'])
+
+ self.mock_validator.reset_mock()
+
+ self.request.scopes = ('hello', 'world')
+ h, token, s = self.auth.create_token_response(self.request, bearer)
+ token = json.loads(token)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('refresh_token', token)
+ self.assertIn('expires_in', token)
+ self.assertIn('scope', token)
+ self.assertNotIn('id_token', token)
+ self.assertNotIn('openid', token['scope'])
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_optional_nonce(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.nonce = 'xyz'
+ scope, info = self.auth.validate_authorization_request(self.request)
+
+ bearer = BearerToken(self.mock_validator)
+ self.request.response_mode = 'query'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_query)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_base.py b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_base.py
new file mode 100644
index 0000000000..a88834b807
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_base.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+import time
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.openid.connect.core.grant_types.base import GrantTypeBase
+
+from tests.unittest import TestCase
+
+
+class GrantBase(GrantTypeBase):
+ """Class to test GrantTypeBase"""
+ def __init__(self, request_validator=None, **kwargs):
+ self.request_validator = request_validator
+
+
+class IDTokenTest(TestCase):
+
+ def setUp(self):
+ self.request = Request('http://a.b/path')
+ self.request.scopes = ('hello', 'openid')
+ self.request.expires_in = 1800
+ self.request.client_id = 'abcdef'
+ self.request.code = '1234'
+ self.request.response_type = 'id_token'
+ self.request.grant_type = 'authorization_code'
+ self.request.redirect_uri = 'https://a.b/cb'
+ self.request.state = 'abc'
+ self.request.nonce = None
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.get_id_token.return_value = None
+ self.mock_validator.finalize_id_token.return_value = "eyJ.body.signature"
+ self.token = {}
+
+ self.grant = GrantBase(request_validator=self.mock_validator)
+
+ self.url_query = 'https://a.b/cb?code=abc&state=abc'
+ self.url_fragment = 'https://a.b/cb#code=abc&state=abc'
+
+ def test_id_token_hash(self):
+ self.assertEqual(self.grant.id_token_hash(
+ "Qcb0Orv1zh30vL1MPRsbm-diHiMwcLyZvn1arpZv-Jxf_11jnpEX3Tgfvk",
+ ), "LDktKdoQak3Pk0cnXxCltA", "hash differs from RFC")
+
+ def test_get_id_token_no_openid(self):
+ self.request.scopes = ('hello')
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertNotIn("id_token", token)
+
+ self.request.scopes = None
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertNotIn("id_token", token)
+
+ self.request.scopes = ()
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertNotIn("id_token", token)
+
+ def test_get_id_token(self):
+ self.mock_validator.get_id_token.return_value = "toto"
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertIn("id_token", token)
+ self.assertEqual(token["id_token"], "toto")
+
+ def test_finalize_id_token(self):
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertIn("id_token", token)
+ self.assertEqual(token["id_token"], "eyJ.body.signature")
+ id_token = self.mock_validator.finalize_id_token.call_args[0][0]
+ self.assertEqual(id_token['aud'], 'abcdef')
+ self.assertGreaterEqual(int(time.time()), id_token['iat'])
+
+ def test_finalize_id_token_with_nonce(self):
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request, "my_nonce")
+ self.assertIn("id_token", token)
+ self.assertEqual(token["id_token"], "eyJ.body.signature")
+ id_token = self.mock_validator.finalize_id_token.call_args[0][0]
+ self.assertEqual(id_token['nonce'], 'my_nonce')
+
+ def test_finalize_id_token_with_at_hash(self):
+ self.token["access_token"] = "Qcb0Orv1zh30vL1MPRsbm-diHiMwcLyZvn1arpZv-Jxf_11jnpEX3Tgfvk"
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertIn("id_token", token)
+ self.assertEqual(token["id_token"], "eyJ.body.signature")
+ id_token = self.mock_validator.finalize_id_token.call_args[0][0]
+ self.assertEqual(id_token['at_hash'], 'LDktKdoQak3Pk0cnXxCltA')
+
+ def test_finalize_id_token_with_c_hash(self):
+ self.token["code"] = "Qcb0Orv1zh30vL1MPRsbm-diHiMwcLyZvn1arpZv-Jxf_11jnpEX3Tgfvk"
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertIn("id_token", token)
+ self.assertEqual(token["id_token"], "eyJ.body.signature")
+ id_token = self.mock_validator.finalize_id_token.call_args[0][0]
+ self.assertEqual(id_token['c_hash'], 'LDktKdoQak3Pk0cnXxCltA')
+
+ def test_finalize_id_token_with_c_and_at_hash(self):
+ self.token["code"] = "Qcb0Orv1zh30vL1MPRsbm-diHiMwcLyZvn1arpZv-Jxf_11jnpEX3Tgfvk"
+ self.token["access_token"] = "Qcb0Orv1zh30vL1MPRsbm-diHiMwcLyZvn1arpZv-Jxf_11jnpEX3Tgfvk"
+ token = self.grant.add_id_token(self.token, "token_handler_mock", self.request)
+ self.assertIn("id_token", token)
+ self.assertEqual(token["id_token"], "eyJ.body.signature")
+ id_token = self.mock_validator.finalize_id_token.call_args[0][0]
+ self.assertEqual(id_token['at_hash'], 'LDktKdoQak3Pk0cnXxCltA')
+ self.assertEqual(id_token['c_hash'], 'LDktKdoQak3Pk0cnXxCltA')
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_dispatchers.py b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_dispatchers.py
new file mode 100644
index 0000000000..ccbada490d
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_dispatchers.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749.grant_types import (
+ AuthorizationCodeGrant as OAuth2AuthorizationCodeGrant,
+ ImplicitGrant as OAuth2ImplicitGrant,
+)
+from oauthlib.openid.connect.core.grant_types.authorization_code import (
+ AuthorizationCodeGrant,
+)
+from oauthlib.openid.connect.core.grant_types.dispatchers import (
+ AuthorizationTokenGrantDispatcher, ImplicitTokenGrantDispatcher,
+)
+from oauthlib.openid.connect.core.grant_types.implicit import ImplicitGrant
+
+from tests.unittest import TestCase
+
+
+class ImplicitTokenGrantDispatcherTest(TestCase):
+ def setUp(self):
+ self.request = Request('http://a.b/path')
+ request_validator = mock.MagicMock()
+ implicit_grant = OAuth2ImplicitGrant(request_validator)
+ openid_connect_implicit = ImplicitGrant(request_validator)
+
+ self.dispatcher = ImplicitTokenGrantDispatcher(
+ default_grant=implicit_grant,
+ oidc_grant=openid_connect_implicit
+ )
+
+ def test_create_authorization_response_openid(self):
+ self.request.scopes = ('hello', 'openid')
+ self.request.response_type = 'id_token'
+ handler = self.dispatcher._handler_for_request(self.request)
+ self.assertIsInstance(handler, ImplicitGrant)
+
+ def test_validate_authorization_request_openid(self):
+ self.request.scopes = ('hello', 'openid')
+ self.request.response_type = 'id_token'
+ handler = self.dispatcher._handler_for_request(self.request)
+ self.assertIsInstance(handler, ImplicitGrant)
+
+ def test_create_authorization_response_oauth(self):
+ self.request.scopes = ('hello', 'world')
+ handler = self.dispatcher._handler_for_request(self.request)
+ self.assertIsInstance(handler, OAuth2ImplicitGrant)
+
+ def test_validate_authorization_request_oauth(self):
+ self.request.scopes = ('hello', 'world')
+ handler = self.dispatcher._handler_for_request(self.request)
+ self.assertIsInstance(handler, OAuth2ImplicitGrant)
+
+
+class DispatcherTest(TestCase):
+ def setUp(self):
+ self.request = Request('http://a.b/path')
+ self.request.decoded_body = (
+ ("client_id", "me"),
+ ("code", "code"),
+ ("redirect_url", "https://a.b/cb"),
+ )
+
+ self.request_validator = mock.MagicMock()
+ self.auth_grant = OAuth2AuthorizationCodeGrant(self.request_validator)
+ self.openid_connect_auth = AuthorizationCodeGrant(self.request_validator)
+
+
+class AuthTokenGrantDispatcherOpenIdTest(DispatcherTest):
+
+ def setUp(self):
+ super().setUp()
+ self.request_validator.get_authorization_code_scopes.return_value = ('hello', 'openid')
+ self.dispatcher = AuthorizationTokenGrantDispatcher(
+ self.request_validator,
+ default_grant=self.auth_grant,
+ oidc_grant=self.openid_connect_auth
+ )
+
+ def test_create_token_response_openid(self):
+ handler = self.dispatcher._handler_for_request(self.request)
+ self.assertIsInstance(handler, AuthorizationCodeGrant)
+ self.assertTrue(self.dispatcher.request_validator.get_authorization_code_scopes.called)
+
+
+class AuthTokenGrantDispatcherOpenIdWithoutCodeTest(DispatcherTest):
+
+ def setUp(self):
+ super().setUp()
+ self.request.decoded_body = (
+ ("client_id", "me"),
+ ("code", ""),
+ ("redirect_url", "https://a.b/cb"),
+ )
+ self.request_validator.get_authorization_code_scopes.return_value = ('hello', 'openid')
+ self.dispatcher = AuthorizationTokenGrantDispatcher(
+ self.request_validator,
+ default_grant=self.auth_grant,
+ oidc_grant=self.openid_connect_auth
+ )
+
+ def test_create_token_response_openid_without_code(self):
+ handler = self.dispatcher._handler_for_request(self.request)
+ self.assertIsInstance(handler, OAuth2AuthorizationCodeGrant)
+ self.assertFalse(self.dispatcher.request_validator.get_authorization_code_scopes.called)
+
+
+class AuthTokenGrantDispatcherOAuthTest(DispatcherTest):
+
+ def setUp(self):
+ super().setUp()
+ self.request_validator.get_authorization_code_scopes.return_value = ('hello', 'world')
+ self.dispatcher = AuthorizationTokenGrantDispatcher(
+ self.request_validator,
+ default_grant=self.auth_grant,
+ oidc_grant=self.openid_connect_auth
+ )
+
+ def test_create_token_response_oauth(self):
+ handler = self.dispatcher._handler_for_request(self.request)
+ self.assertIsInstance(handler, OAuth2AuthorizationCodeGrant)
+ self.assertTrue(self.dispatcher.request_validator.get_authorization_code_scopes.called)
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_hybrid.py b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_hybrid.py
new file mode 100644
index 0000000000..111c8c5c4b
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_hybrid.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+from unittest import mock
+
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+from oauthlib.openid.connect.core.grant_types.hybrid import HybridGrant
+
+from __tests__.oauth2.rfc6749.grant_types.test_authorization_code import (
+ AuthorizationCodeGrantTest,
+)
+
+from .test_authorization_code import OpenIDAuthCodeTest
+
+
+class OpenIDHybridInterferenceTest(AuthorizationCodeGrantTest):
+ """Test that OpenID don't interfere with normal OAuth 2 flows."""
+
+ def setUp(self):
+ super().setUp()
+ self.auth = HybridGrant(request_validator=self.mock_validator)
+
+
+class OpenIDHybridCodeTokenTest(OpenIDAuthCodeTest):
+
+ def setUp(self):
+ super().setUp()
+ self.request.response_type = 'code token'
+ self.request.nonce = None
+ self.auth = HybridGrant(request_validator=self.mock_validator)
+ self.url_query = 'https://a.b/cb?code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc'
+ self.url_fragment = 'https://a.b/cb#code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc'
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_optional_nonce(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.nonce = 'xyz'
+ scope, info = self.auth.validate_authorization_request(self.request)
+
+ bearer = BearerToken(self.mock_validator)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+
+class OpenIDHybridCodeIdTokenTest(OpenIDAuthCodeTest):
+
+ def setUp(self):
+ super().setUp()
+ self.mock_validator.get_code_challenge.return_value = None
+ self.request.response_type = 'code id_token'
+ self.request.nonce = 'zxc'
+ self.auth = HybridGrant(request_validator=self.mock_validator)
+ token = 'MOCKED_TOKEN'
+ self.url_query = 'https://a.b/cb?code=abc&state=abc&id_token=%s' % token
+ self.url_fragment = 'https://a.b/cb#code=abc&state=abc&id_token=%s' % token
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_required_nonce(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.nonce = None
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
+
+ bearer = BearerToken(self.mock_validator)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ def test_id_token_contains_nonce(self):
+ token = {}
+ self.mock_validator.get_id_token.side_effect = None
+ self.mock_validator.get_id_token.return_value = None
+ token = self.auth.add_id_token(token, None, self.request)
+ assert self.mock_validator.finalize_id_token.call_count == 1
+ claims = self.mock_validator.finalize_id_token.call_args[0][0]
+ assert "nonce" in claims
+
+
+class OpenIDHybridCodeIdTokenTokenTest(OpenIDAuthCodeTest):
+
+ def setUp(self):
+ super().setUp()
+ self.mock_validator.get_code_challenge.return_value = None
+ self.request.response_type = 'code id_token token'
+ self.request.nonce = 'xyz'
+ self.auth = HybridGrant(request_validator=self.mock_validator)
+ token = 'MOCKED_TOKEN'
+ self.url_query = 'https://a.b/cb?code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
+ self.url_fragment = 'https://a.b/cb#code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_required_nonce(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.nonce = None
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
+
+ bearer = BearerToken(self.mock_validator)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_implicit.py b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_implicit.py
new file mode 100644
index 0000000000..825093138c
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_implicit.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+from oauthlib.openid.connect.core.grant_types.implicit import ImplicitGrant
+
+from __tests__.oauth2.rfc6749.grant_types.test_implicit import ImplicitGrantTest
+from tests.unittest import TestCase
+
+from .test_authorization_code import get_id_token_mock
+
+
+class OpenIDImplicitInterferenceTest(ImplicitGrantTest):
+ """Test that OpenID don't interfere with normal OAuth 2 flows."""
+
+ def setUp(self):
+ super().setUp()
+ self.auth = ImplicitGrant(request_validator=self.mock_validator)
+
+
+class OpenIDImplicitTest(TestCase):
+
+ def setUp(self):
+ self.request = Request('http://a.b/path')
+ self.request.scopes = ('hello', 'openid')
+ self.request.expires_in = 1800
+ self.request.client_id = 'abcdef'
+ self.request.response_type = 'id_token token'
+ self.request.redirect_uri = 'https://a.b/cb'
+ self.request.state = 'abc'
+ self.request.nonce = 'xyz'
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.get_id_token.side_effect = get_id_token_mock
+ self.auth = ImplicitGrant(request_validator=self.mock_validator)
+
+ token = 'MOCKED_TOKEN'
+ self.url_query = 'https://a.b/cb?state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
+ self.url_fragment = 'https://a.b/cb#state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_authorization(self, generate_token):
+ scope, info = self.auth.validate_authorization_request(self.request)
+
+ generate_token.return_value = 'abc'
+ bearer = BearerToken(self.mock_validator)
+
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ self.request.response_type = 'id_token'
+ token = 'MOCKED_TOKEN'
+ url = 'https://a.b/cb#state=abc&id_token=%s' % token
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], url, parse_fragment=True)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_no_prompt_authorization(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.prompt = 'none'
+
+ bearer = BearerToken(self.mock_validator)
+
+ self.request.response_mode = 'query'
+ self.request.id_token_hint = 'me@email.com'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_query)
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+ # Test alternative response modes
+ self.request.response_mode = 'fragment'
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
+
+ # Ensure silent authentication and authorization is done
+ self.mock_validator.validate_silent_login.return_value = False
+ self.mock_validator.validate_silent_authorization.return_value = True
+ self.assertRaises(errors.LoginRequired,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=login_required', h['Location'])
+
+ self.mock_validator.validate_silent_login.return_value = True
+ self.mock_validator.validate_silent_authorization.return_value = False
+ self.assertRaises(errors.ConsentRequired,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=consent_required', h['Location'])
+
+ # ID token hint must match logged in user
+ self.mock_validator.validate_silent_authorization.return_value = True
+ self.mock_validator.validate_user_match.return_value = False
+ self.assertRaises(errors.LoginRequired,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=login_required', h['Location'])
+
+ def test_none_multi_prompt(self):
+ bearer = BearerToken(self.mock_validator)
+
+ self.request.prompt = 'none login'
+ self.assertRaises(errors.InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ self.request.prompt = 'none consent'
+ self.assertRaises(errors.InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ self.request.prompt = 'none select_account'
+ self.assertRaises(errors.InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ self.request.prompt = 'consent none login'
+ self.assertRaises(errors.InvalidRequestError,
+ self.auth.validate_authorization_request,
+ self.request)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_required_nonce(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.nonce = None
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
+
+ bearer = BearerToken(self.mock_validator)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
+
+
+class OpenIDImplicitNoAccessTokenTest(OpenIDImplicitTest):
+ def setUp(self):
+ super().setUp()
+ self.request.response_type = 'id_token'
+ token = 'MOCKED_TOKEN'
+ self.url_query = 'https://a.b/cb?state=abc&id_token=%s' % token
+ self.url_fragment = 'https://a.b/cb#state=abc&id_token=%s' % token
+
+ @mock.patch('oauthlib.common.generate_token')
+ def test_required_nonce(self, generate_token):
+ generate_token.return_value = 'abc'
+ self.request.nonce = None
+ self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
+
+ bearer = BearerToken(self.mock_validator)
+ h, b, s = self.auth.create_authorization_response(self.request, bearer)
+ self.assertIn('error=invalid_request', h['Location'])
+ self.assertIsNone(b)
+ self.assertEqual(s, 302)
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_refresh_token.py b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_refresh_token.py
new file mode 100644
index 0000000000..2e363fef1a
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/grant_types/test_refresh_token.py
@@ -0,0 +1,105 @@
+import json
+from unittest import mock
+
+from oauthlib.common import Request
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+from oauthlib.openid.connect.core.grant_types import RefreshTokenGrant
+
+from __tests__.oauth2.rfc6749.grant_types.test_refresh_token import (
+ RefreshTokenGrantTest,
+)
+from tests.unittest import TestCase
+
+
+def get_id_token_mock(token, token_handler, request):
+ return "MOCKED_TOKEN"
+
+
+class OpenIDRefreshTokenInterferenceTest(RefreshTokenGrantTest):
+ """Test that OpenID don't interfere with normal OAuth 2 flows."""
+
+ def setUp(self):
+ super().setUp()
+ self.auth = RefreshTokenGrant(request_validator=self.mock_validator)
+
+
+class OpenIDRefreshTokenTest(TestCase):
+
+ def setUp(self):
+ self.request = Request('http://a.b/path')
+ self.request.grant_type = 'refresh_token'
+ self.request.refresh_token = 'lsdkfhj230'
+ self.request.scope = ('hello', 'openid')
+ self.mock_validator = mock.MagicMock()
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.authenticate_client.side_effect = self.set_client
+ self.mock_validator.get_id_token.side_effect = get_id_token_mock
+ self.auth = RefreshTokenGrant(request_validator=self.mock_validator)
+
+ def set_client(self, request):
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked'
+ return True
+
+ def test_refresh_id_token(self):
+ self.mock_validator.get_original_scopes.return_value = [
+ 'hello', 'openid'
+ ]
+ bearer = BearerToken(self.mock_validator)
+
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer
+ )
+
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('refresh_token', token)
+ self.assertIn('id_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertEqual(token['scope'], 'hello openid')
+ self.mock_validator.refresh_id_token.assert_called_once_with(
+ self.request
+ )
+
+ def test_refresh_id_token_false(self):
+ self.mock_validator.refresh_id_token.return_value = False
+ self.mock_validator.get_original_scopes.return_value = [
+ 'hello', 'openid'
+ ]
+ bearer = BearerToken(self.mock_validator)
+
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer
+ )
+
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('refresh_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertEqual(token['scope'], 'hello openid')
+ self.assertNotIn('id_token', token)
+ self.mock_validator.refresh_id_token.assert_called_once_with(
+ self.request
+ )
+
+ def test_refresh_token_without_openid_scope(self):
+ self.request.scope = "hello"
+ bearer = BearerToken(self.mock_validator)
+
+ headers, body, status_code = self.auth.create_token_response(
+ self.request, bearer
+ )
+
+ token = json.loads(body)
+ self.assertEqual(self.mock_validator.save_token.call_count, 1)
+ self.assertIn('access_token', token)
+ self.assertIn('refresh_token', token)
+ self.assertIn('token_type', token)
+ self.assertIn('expires_in', token)
+ self.assertNotIn('id_token', token)
+ self.assertEqual(token['scope'], 'hello')
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/test_request_validator.py b/contrib/python/oauthlib/tests/openid/connect/core/test_request_validator.py
new file mode 100644
index 0000000000..6a800d41ca
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/test_request_validator.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+from oauthlib.openid import RequestValidator
+
+from tests.unittest import TestCase
+
+
+class RequestValidatorTest(TestCase):
+
+ def test_method_contracts(self):
+ v = RequestValidator()
+ self.assertRaises(
+ NotImplementedError,
+ v.get_authorization_code_scopes,
+ 'client_id', 'code', 'redirect_uri', 'request'
+ )
+ self.assertRaises(
+ NotImplementedError,
+ v.get_jwt_bearer_token,
+ 'token', 'token_handler', 'request'
+ )
+ self.assertRaises(
+ NotImplementedError,
+ v.finalize_id_token,
+ 'id_token', 'token', 'token_handler', 'request'
+ )
+ self.assertRaises(
+ NotImplementedError,
+ v.validate_jwt_bearer_token,
+ 'token', 'scopes', 'request'
+ )
+ self.assertRaises(
+ NotImplementedError,
+ v.validate_id_token,
+ 'token', 'scopes', 'request'
+ )
+ self.assertRaises(
+ NotImplementedError,
+ v.validate_silent_authorization,
+ 'request'
+ )
+ self.assertRaises(
+ NotImplementedError,
+ v.validate_silent_login,
+ 'request'
+ )
+ self.assertRaises(
+ NotImplementedError,
+ v.validate_user_match,
+ 'id_token_hint', 'scopes', 'claims', 'request'
+ )
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/test_server.py b/contrib/python/oauthlib/tests/openid/connect/core/test_server.py
new file mode 100644
index 0000000000..47f0ecc842
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/test_server.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+import json
+from unittest import mock
+
+from oauthlib.oauth2.rfc6749 import errors
+from oauthlib.oauth2.rfc6749.endpoints.authorization import (
+ AuthorizationEndpoint,
+)
+from oauthlib.oauth2.rfc6749.endpoints.token import TokenEndpoint
+from oauthlib.oauth2.rfc6749.tokens import BearerToken
+from oauthlib.openid.connect.core.grant_types.authorization_code import (
+ AuthorizationCodeGrant,
+)
+from oauthlib.openid.connect.core.grant_types.hybrid import HybridGrant
+from oauthlib.openid.connect.core.grant_types.implicit import ImplicitGrant
+
+from tests.unittest import TestCase
+
+
+class AuthorizationEndpointTest(TestCase):
+
+ def setUp(self):
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.get_code_challenge.return_value = None
+ self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
+ auth_code = AuthorizationCodeGrant(request_validator=self.mock_validator)
+ auth_code.save_authorization_code = mock.MagicMock()
+ implicit = ImplicitGrant(
+ request_validator=self.mock_validator)
+ implicit.save_token = mock.MagicMock()
+ hybrid = HybridGrant(self.mock_validator)
+
+ response_types = {
+ 'code': auth_code,
+ 'token': implicit,
+ 'id_token': implicit,
+ 'id_token token': implicit,
+ 'code token': hybrid,
+ 'code id_token': hybrid,
+ 'code token id_token': hybrid,
+ 'none': auth_code
+ }
+ self.expires_in = 1800
+ token = BearerToken(
+ self.mock_validator,
+ expires_in=self.expires_in
+ )
+ self.endpoint = AuthorizationEndpoint(
+ default_response_type='code',
+ default_token_type=token,
+ response_types=response_types
+ )
+
+ # TODO: Add hybrid grant test
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_authorization_grant(self):
+ uri = 'http://i.b/l?response_type=code&client_id=me&scope=all+of+them&state=xyz'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?code=abc&state=xyz')
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_implicit_grant(self):
+ uri = 'http://i.b/l?response_type=token&client_id=me&scope=all+of+them&state=xyz'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me#access_token=abc&expires_in=' + str(self.expires_in) + '&token_type=Bearer&state=xyz&scope=all+of+them', parse_fragment=True)
+
+ def test_none_grant(self):
+ uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them&state=xyz'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?state=xyz', parse_fragment=True)
+ self.assertIsNone(body)
+ self.assertEqual(status_code, 302)
+
+ # and without the state parameter
+ uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me', parse_fragment=True)
+ self.assertIsNone(body)
+ self.assertEqual(status_code, 302)
+
+ def test_missing_type(self):
+ uri = 'http://i.b/l?client_id=me&scope=all+of+them'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ self.mock_validator.validate_request = mock.MagicMock(
+ side_effect=errors.InvalidRequestError())
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?error=invalid_request&error_description=Missing+response_type+parameter.')
+
+ def test_invalid_type(self):
+ uri = 'http://i.b/l?response_type=invalid&client_id=me&scope=all+of+them'
+ uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
+ self.mock_validator.validate_request = mock.MagicMock(
+ side_effect=errors.UnsupportedResponseTypeError())
+ headers, body, status_code = self.endpoint.create_authorization_response(
+ uri, scopes=['all', 'of', 'them'])
+ self.assertIn('Location', headers)
+ self.assertURLEqual(headers['Location'], 'http://back.to/me?error=unsupported_response_type')
+
+
+class TokenEndpointTest(TestCase):
+
+ def setUp(self):
+ def set_user(request):
+ request.user = mock.MagicMock()
+ request.client = mock.MagicMock()
+ request.client.client_id = 'mocked_client_id'
+ return True
+
+ self.mock_validator = mock.MagicMock()
+ self.mock_validator.authenticate_client.side_effect = set_user
+ self.mock_validator.get_code_challenge.return_value = None
+ self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
+ auth_code = AuthorizationCodeGrant(
+ request_validator=self.mock_validator)
+ supported_types = {
+ 'authorization_code': auth_code,
+ }
+ self.expires_in = 1800
+ token = BearerToken(
+ self.mock_validator,
+ expires_in=self.expires_in
+ )
+ self.endpoint = TokenEndpoint(
+ 'authorization_code',
+ default_token_type=token,
+ grant_types=supported_types
+ )
+
+ @mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
+ def test_authorization_grant(self):
+ body = 'grant_type=authorization_code&code=abc&scope=all+of+them'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': 'abc',
+ 'refresh_token': 'abc',
+ 'scope': 'all of them'
+ }
+ self.assertEqual(json.loads(body), token)
+
+ body = 'grant_type=authorization_code&code=abc'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ token = {
+ 'token_type': 'Bearer',
+ 'expires_in': self.expires_in,
+ 'access_token': 'abc',
+ 'refresh_token': 'abc'
+ }
+ self.assertEqual(json.loads(body), token)
+
+ # ignore useless fields
+ body = 'grant_type=authorization_code&code=abc&state=foobar'
+ headers, body, status_code = self.endpoint.create_token_response(
+ '', body=body)
+ self.assertEqual(json.loads(body), token)
+
+ def test_missing_type(self):
+ _, body, _ = self.endpoint.create_token_response('', body='')
+ token = {'error': 'unsupported_grant_type'}
+ self.assertEqual(json.loads(body), token)
+
+ def test_invalid_type(self):
+ body = 'grant_type=invalid'
+ _, body, _ = self.endpoint.create_token_response('', body=body)
+ token = {'error': 'unsupported_grant_type'}
+ self.assertEqual(json.loads(body), token)
diff --git a/contrib/python/oauthlib/tests/openid/connect/core/test_tokens.py b/contrib/python/oauthlib/tests/openid/connect/core/test_tokens.py
new file mode 100644
index 0000000000..fe90142bb8
--- /dev/null
+++ b/contrib/python/oauthlib/tests/openid/connect/core/test_tokens.py
@@ -0,0 +1,157 @@
+from unittest import mock
+
+from oauthlib.openid.connect.core.tokens import JWTToken
+
+from tests.unittest import TestCase
+
+
+class JWTTokenTestCase(TestCase):
+
+ def test_create_token_callable_expires_in(self):
+ """
+ Test retrieval of the expires in value by calling the callable expires_in property
+ """
+
+ expires_in_mock = mock.MagicMock()
+ request_mock = mock.MagicMock()
+
+ token = JWTToken(expires_in=expires_in_mock, request_validator=mock.MagicMock())
+ token.create_token(request=request_mock)
+
+ expires_in_mock.assert_called_once_with(request_mock)
+
+ def test_create_token_non_callable_expires_in(self):
+ """
+ When a non callable expires in is set this should just be set to the request
+ """
+
+ expires_in_mock = mock.NonCallableMagicMock()
+ request_mock = mock.MagicMock()
+
+ token = JWTToken(expires_in=expires_in_mock, request_validator=mock.MagicMock())
+ token.create_token(request=request_mock)
+
+ self.assertFalse(expires_in_mock.called)
+ self.assertEqual(request_mock.expires_in, expires_in_mock)
+
+ def test_create_token_calls_get_id_token(self):
+ """
+ When create_token is called the call should be forwarded to the get_id_token on the token validator
+ """
+ request_mock = mock.MagicMock()
+
+ with mock.patch('oauthlib.openid.RequestValidator',
+ autospec=True) as RequestValidatorMock:
+
+ request_validator = RequestValidatorMock()
+
+ token = JWTToken(expires_in=mock.MagicMock(), request_validator=request_validator)
+ token.create_token(request=request_mock)
+
+ request_validator.get_jwt_bearer_token.assert_called_once_with(None, None, request_mock)
+
+ def test_validate_request_token_from_headers(self):
+ """
+ Bearer token get retrieved from headers.
+ """
+
+ with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
+ mock.patch('oauthlib.openid.RequestValidator',
+ autospec=True) as RequestValidatorMock:
+ request_validator_mock = RequestValidatorMock()
+
+ token = JWTToken(request_validator=request_validator_mock)
+
+ request = RequestMock('/uri')
+ # Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
+ # with autospec=True
+ request.scopes = mock.MagicMock()
+ request.headers = {
+ 'Authorization': 'Bearer some-token-from-header'
+ }
+
+ token.validate_request(request=request)
+
+ request_validator_mock.validate_jwt_bearer_token.assert_called_once_with('some-token-from-header',
+ request.scopes,
+ request)
+
+ def test_validate_request_token_from_headers_basic(self):
+ """
+ Wrong kind of token (Basic) retrieved from headers. Confirm token is not parsed.
+ """
+
+ with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
+ mock.patch('oauthlib.openid.RequestValidator',
+ autospec=True) as RequestValidatorMock:
+ request_validator_mock = RequestValidatorMock()
+
+ token = JWTToken(request_validator=request_validator_mock)
+
+ request = RequestMock('/uri')
+ # Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
+ # with autospec=True
+ request.scopes = mock.MagicMock()
+ request.headers = {
+ 'Authorization': 'Basic some-token-from-header'
+ }
+
+ token.validate_request(request=request)
+
+ request_validator_mock.validate_jwt_bearer_token.assert_called_once_with(None,
+ request.scopes,
+ request)
+
+ def test_validate_token_from_request(self):
+ """
+ Token get retrieved from request object.
+ """
+
+ with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock, \
+ mock.patch('oauthlib.openid.RequestValidator',
+ autospec=True) as RequestValidatorMock:
+ request_validator_mock = RequestValidatorMock()
+
+ token = JWTToken(request_validator=request_validator_mock)
+
+ request = RequestMock('/uri')
+ # Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
+ # with autospec=True
+ request.scopes = mock.MagicMock()
+ request.access_token = 'some-token-from-request-object'
+ request.headers = {}
+
+ token.validate_request(request=request)
+
+ request_validator_mock.validate_jwt_bearer_token.assert_called_once_with('some-token-from-request-object',
+ request.scopes,
+ request)
+
+ def test_estimate_type(self):
+ """
+ Estimate type results for a jwt token
+ """
+
+ def test_token(token, expected_result):
+ with mock.patch('oauthlib.common.Request', autospec=True) as RequestMock:
+ jwt_token = JWTToken()
+
+ request = RequestMock('/uri')
+ # Scopes is retrieved using the __call__ method which is not picked up correctly by mock.patch
+ # with autospec=True
+ request.headers = {
+ 'Authorization': 'Bearer {}'.format(token)
+ }
+
+ result = jwt_token.estimate_type(request=request)
+
+ self.assertEqual(result, expected_result)
+
+ test_items = (
+ ('eyfoo.foo.foo', 10),
+ ('eyfoo.foo.foo.foo.foo', 10),
+ ('eyfoobar', 0)
+ )
+
+ for token, expected_result in test_items:
+ test_token(token, expected_result)
diff --git a/contrib/python/oauthlib/tests/test_common.py b/contrib/python/oauthlib/tests/test_common.py
new file mode 100644
index 0000000000..7f0e35bc9c
--- /dev/null
+++ b/contrib/python/oauthlib/tests/test_common.py
@@ -0,0 +1,243 @@
+# -*- coding: utf-8 -*-
+import oauthlib
+from oauthlib.common import (
+ CaseInsensitiveDict, Request, add_params_to_uri, extract_params,
+ generate_client_id, generate_nonce, generate_timestamp, generate_token,
+ urldecode,
+)
+
+from tests.unittest import TestCase
+
+PARAMS_DICT = {'foo': 'bar', 'baz': '123', }
+PARAMS_TWOTUPLE = [('foo', 'bar'), ('baz', '123')]
+PARAMS_FORMENCODED = 'foo=bar&baz=123'
+URI = 'http://www.someuri.com'
+
+
+class EncodingTest(TestCase):
+
+ def test_urldecode(self):
+ self.assertCountEqual(urldecode(''), [])
+ self.assertCountEqual(urldecode('='), [('', '')])
+ self.assertCountEqual(urldecode('%20'), [(' ', '')])
+ self.assertCountEqual(urldecode('+'), [(' ', '')])
+ self.assertCountEqual(urldecode('c2'), [('c2', '')])
+ self.assertCountEqual(urldecode('c2='), [('c2', '')])
+ self.assertCountEqual(urldecode('foo=bar'), [('foo', 'bar')])
+ self.assertCountEqual(urldecode('foo_%20~=.bar-'),
+ [('foo_ ~', '.bar-')])
+ self.assertCountEqual(urldecode('foo=1,2,3'), [('foo', '1,2,3')])
+ self.assertCountEqual(urldecode('foo=(1,2,3)'), [('foo', '(1,2,3)')])
+ self.assertCountEqual(urldecode('foo=bar.*'), [('foo', 'bar.*')])
+ self.assertCountEqual(urldecode('foo=bar@spam'), [('foo', 'bar@spam')])
+ self.assertCountEqual(urldecode('foo=bar/baz'), [('foo', 'bar/baz')])
+ self.assertCountEqual(urldecode('foo=bar?baz'), [('foo', 'bar?baz')])
+ self.assertCountEqual(urldecode('foo=bar\'s'), [('foo', 'bar\'s')])
+ self.assertCountEqual(urldecode('foo=$'), [('foo', '$')])
+ self.assertRaises(ValueError, urldecode, 'foo bar')
+ self.assertRaises(ValueError, urldecode, '%R')
+ self.assertRaises(ValueError, urldecode, '%RA')
+ self.assertRaises(ValueError, urldecode, '%AR')
+ self.assertRaises(ValueError, urldecode, '%RR')
+
+
+class ParameterTest(TestCase):
+
+ def test_extract_params_dict(self):
+ self.assertCountEqual(extract_params(PARAMS_DICT), PARAMS_TWOTUPLE)
+
+ def test_extract_params_twotuple(self):
+ self.assertCountEqual(extract_params(PARAMS_TWOTUPLE), PARAMS_TWOTUPLE)
+
+ def test_extract_params_formencoded(self):
+ self.assertCountEqual(extract_params(PARAMS_FORMENCODED),
+ PARAMS_TWOTUPLE)
+
+ def test_extract_params_blank_string(self):
+ self.assertCountEqual(extract_params(''), [])
+
+ def test_extract_params_empty_list(self):
+ self.assertCountEqual(extract_params([]), [])
+
+ def test_extract_non_formencoded_string(self):
+ self.assertIsNone(extract_params('not a formencoded string'))
+
+ def test_extract_invalid(self):
+ self.assertIsNone(extract_params(object()))
+ self.assertIsNone(extract_params([('')]))
+
+ def test_add_params_to_uri(self):
+ correct = '{}?{}'.format(URI, PARAMS_FORMENCODED)
+ self.assertURLEqual(add_params_to_uri(URI, PARAMS_DICT), correct)
+ self.assertURLEqual(add_params_to_uri(URI, PARAMS_TWOTUPLE), correct)
+
+
+class GeneratorTest(TestCase):
+
+ def test_generate_timestamp(self):
+ timestamp = generate_timestamp()
+ self.assertIsInstance(timestamp, str)
+ self.assertTrue(int(timestamp))
+ self.assertGreater(int(timestamp), 1331672335)
+
+ def test_generate_nonce(self):
+ """Ping me (ib-lundgren) when you discover how to test randomness."""
+ nonce = generate_nonce()
+ for i in range(50):
+ self.assertNotEqual(nonce, generate_nonce())
+
+ def test_generate_token(self):
+ token = generate_token()
+ self.assertEqual(len(token), 30)
+
+ token = generate_token(length=44)
+ self.assertEqual(len(token), 44)
+
+ token = generate_token(length=6, chars="python")
+ self.assertEqual(len(token), 6)
+ for c in token:
+ self.assertIn(c, "python")
+
+ def test_generate_client_id(self):
+ client_id = generate_client_id()
+ self.assertEqual(len(client_id), 30)
+
+ client_id = generate_client_id(length=44)
+ self.assertEqual(len(client_id), 44)
+
+ client_id = generate_client_id(length=6, chars="python")
+ self.assertEqual(len(client_id), 6)
+ for c in client_id:
+ self.assertIn(c, "python")
+
+
+class RequestTest(TestCase):
+
+ def test_non_unicode_params(self):
+ r = Request(
+ b'http://a.b/path?query',
+ http_method=b'GET',
+ body=b'you=shall+pass',
+ headers={
+ b'a': b'b',
+ }
+ )
+ self.assertEqual(r.uri, 'http://a.b/path?query')
+ self.assertEqual(r.http_method, 'GET')
+ self.assertEqual(r.body, 'you=shall+pass')
+ self.assertEqual(r.decoded_body, [('you', 'shall pass')])
+ self.assertEqual(r.headers, {'a': 'b'})
+
+ def test_none_body(self):
+ r = Request(URI)
+ self.assertIsNone(r.decoded_body)
+
+ def test_empty_list_body(self):
+ r = Request(URI, body=[])
+ self.assertEqual(r.decoded_body, [])
+
+ def test_empty_dict_body(self):
+ r = Request(URI, body={})
+ self.assertEqual(r.decoded_body, [])
+
+ def test_empty_string_body(self):
+ r = Request(URI, body='')
+ self.assertEqual(r.decoded_body, [])
+
+ def test_non_formencoded_string_body(self):
+ body = 'foo bar'
+ r = Request(URI, body=body)
+ self.assertIsNone(r.decoded_body)
+
+ def test_param_free_sequence_body(self):
+ body = [1, 1, 2, 3, 5, 8, 13]
+ r = Request(URI, body=body)
+ self.assertIsNone(r.decoded_body)
+
+ def test_list_body(self):
+ r = Request(URI, body=PARAMS_TWOTUPLE)
+ self.assertCountEqual(r.decoded_body, PARAMS_TWOTUPLE)
+
+ def test_dict_body(self):
+ r = Request(URI, body=PARAMS_DICT)
+ self.assertCountEqual(r.decoded_body, PARAMS_TWOTUPLE)
+
+ def test_getattr_existing_attribute(self):
+ r = Request(URI, body='foo bar')
+ self.assertEqual('foo bar', getattr(r, 'body'))
+
+ def test_getattr_return_default(self):
+ r = Request(URI, body='')
+ actual_value = getattr(r, 'does_not_exist', 'foo bar')
+ self.assertEqual('foo bar', actual_value)
+
+ def test_getattr_raise_attribute_error(self):
+ r = Request(URI, body='foo bar')
+ with self.assertRaises(AttributeError):
+ getattr(r, 'does_not_exist')
+
+ def test_sanitizing_authorization_header(self):
+ r = Request(URI, headers={'Accept': 'application/json',
+ 'Authorization': 'Basic Zm9vOmJhcg=='}
+ )
+ self.assertNotIn('Zm9vOmJhcg==', repr(r))
+ self.assertIn('<SANITIZED>', repr(r))
+ # Double-check we didn't modify the underlying object:
+ self.assertEqual(r.headers['Authorization'], 'Basic Zm9vOmJhcg==')
+
+ def test_token_body(self):
+ payload = 'client_id=foo&refresh_token=bar'
+ r = Request(URI, body=payload)
+ self.assertNotIn('bar', repr(r))
+ self.assertIn('<SANITIZED>', repr(r))
+
+ payload = 'refresh_token=bar&client_id=foo'
+ r = Request(URI, body=payload)
+ self.assertNotIn('bar', repr(r))
+ self.assertIn('<SANITIZED>', repr(r))
+
+ def test_password_body(self):
+ payload = 'username=foo&password=bar'
+ r = Request(URI, body=payload)
+ self.assertNotIn('bar', repr(r))
+ self.assertIn('<SANITIZED>', repr(r))
+
+ payload = 'password=bar&username=foo'
+ r = Request(URI, body=payload)
+ self.assertNotIn('bar', repr(r))
+ self.assertIn('<SANITIZED>', repr(r))
+
+ def test_headers_params(self):
+ r = Request(URI, headers={'token': 'foobar'}, body='token=banana')
+ self.assertEqual(r.headers['token'], 'foobar')
+ self.assertEqual(r.token, 'banana')
+
+ def test_sanitized_request_non_debug_mode(self):
+ """make sure requests are sanitized when in non debug mode.
+ For the debug mode, the other tests checking sanitization should prove
+ that debug mode is working.
+ """
+ try:
+ oauthlib.set_debug(False)
+ r = Request(URI, headers={'token': 'foobar'}, body='token=banana')
+ self.assertNotIn('token', repr(r))
+ self.assertIn('SANITIZED', repr(r))
+ finally:
+ # set flag back for other tests
+ oauthlib.set_debug(True)
+
+
+class CaseInsensitiveDictTest(TestCase):
+
+ def test_basic(self):
+ cid = CaseInsensitiveDict({})
+ cid['a'] = 'b'
+ cid['c'] = 'd'
+ del cid['c']
+ self.assertEqual(cid['A'], 'b')
+ self.assertEqual(cid['a'], 'b')
+
+ def test_update(self):
+ cid = CaseInsensitiveDict({})
+ cid.update({'KeY': 'value'})
+ self.assertEqual(cid['kEy'], 'value')
diff --git a/contrib/python/oauthlib/tests/test_uri_validate.py b/contrib/python/oauthlib/tests/test_uri_validate.py
new file mode 100644
index 0000000000..6a9f8ea60b
--- /dev/null
+++ b/contrib/python/oauthlib/tests/test_uri_validate.py
@@ -0,0 +1,84 @@
+import unittest
+from oauthlib.uri_validate import is_absolute_uri
+
+from tests.unittest import TestCase
+
+
+class UriValidateTest(TestCase):
+
+ def test_is_absolute_uri(self):
+ self.assertIsNotNone(is_absolute_uri('schema://example.com/path'))
+ self.assertIsNotNone(is_absolute_uri('https://example.com/path'))
+ self.assertIsNotNone(is_absolute_uri('https://example.com'))
+ self.assertIsNotNone(is_absolute_uri('https://example.com:443/path'))
+ self.assertIsNotNone(is_absolute_uri('https://example.com:443/'))
+ self.assertIsNotNone(is_absolute_uri('https://example.com:443'))
+ self.assertIsNotNone(is_absolute_uri('http://example.com'))
+ self.assertIsNotNone(is_absolute_uri('http://example.com/path'))
+ self.assertIsNotNone(is_absolute_uri('http://example.com:80/path'))
+
+ def test_query(self):
+ self.assertIsNotNone(is_absolute_uri('http://example.com:80/path?foo'))
+ self.assertIsNotNone(is_absolute_uri('http://example.com:80/path?foo=bar'))
+ self.assertIsNotNone(is_absolute_uri('http://example.com:80/path?foo=bar&fruit=banana'))
+
+ def test_fragment_forbidden(self):
+ self.assertIsNone(is_absolute_uri('http://example.com:80/path#foo'))
+ self.assertIsNone(is_absolute_uri('http://example.com:80/path#foo=bar'))
+ self.assertIsNone(is_absolute_uri('http://example.com:80/path#foo=bar&fruit=banana'))
+
+ def test_combined_forbidden(self):
+ self.assertIsNone(is_absolute_uri('http://example.com:80/path?foo#bar'))
+ self.assertIsNone(is_absolute_uri('http://example.com:80/path?foo&bar#fruit'))
+ self.assertIsNone(is_absolute_uri('http://example.com:80/path?foo=1&bar#fruit=banana'))
+ self.assertIsNone(is_absolute_uri('http://example.com:80/path?foo=1&bar=2#fruit=banana&bar=foo'))
+
+ def test_custom_scheme(self):
+ self.assertIsNotNone(is_absolute_uri('com.example.bundle.id://'))
+
+ def test_ipv6_bracket(self):
+ self.assertIsNotNone(is_absolute_uri('http://[::1]:38432/path'))
+ self.assertIsNotNone(is_absolute_uri('http://[::1]/path'))
+ self.assertIsNotNone(is_absolute_uri('http://[fd01:0001::1]/path'))
+ self.assertIsNotNone(is_absolute_uri('http://[fd01:1::1]/path'))
+ self.assertIsNotNone(is_absolute_uri('http://[0123:4567:89ab:cdef:0123:4567:89ab:cdef]/path'))
+ self.assertIsNotNone(is_absolute_uri('http://[0123:4567:89ab:cdef:0123:4567:89ab:cdef]:8080/path'))
+
+ @unittest.skip("ipv6 edge-cases not supported")
+ def test_ipv6_edge_cases(self):
+ self.assertIsNotNone(is_absolute_uri('http://2001:db8::'))
+ self.assertIsNotNone(is_absolute_uri('http://::1234:5678'))
+ self.assertIsNotNone(is_absolute_uri('http://2001:db8::1234:5678'))
+ self.assertIsNotNone(is_absolute_uri('http://2001:db8:3333:4444:5555:6666:7777:8888'))
+ self.assertIsNotNone(is_absolute_uri('http://2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF'))
+ self.assertIsNotNone(is_absolute_uri('http://0123:4567:89ab:cdef:0123:4567:89ab:cdef/path'))
+ self.assertIsNotNone(is_absolute_uri('http://::'))
+ self.assertIsNotNone(is_absolute_uri('http://2001:0db8:0001:0000:0000:0ab9:C0A8:0102'))
+
+ @unittest.skip("ipv6 dual ipv4 not supported")
+ def test_ipv6_dual(self):
+ self.assertIsNotNone(is_absolute_uri('http://2001:db8:3333:4444:5555:6666:1.2.3.4'))
+ self.assertIsNotNone(is_absolute_uri('http://::11.22.33.44'))
+ self.assertIsNotNone(is_absolute_uri('http://2001:db8::123.123.123.123'))
+ self.assertIsNotNone(is_absolute_uri('http://::1234:5678:91.123.4.56'))
+ self.assertIsNotNone(is_absolute_uri('http://::1234:5678:1.2.3.4'))
+ self.assertIsNotNone(is_absolute_uri('http://2001:db8::1234:5678:5.6.7.8'))
+
+ def test_ipv4(self):
+ self.assertIsNotNone(is_absolute_uri('http://127.0.0.1:38432/'))
+ self.assertIsNotNone(is_absolute_uri('http://127.0.0.1:38432/'))
+ self.assertIsNotNone(is_absolute_uri('http://127.1:38432/'))
+
+ def test_failures(self):
+ self.assertIsNone(is_absolute_uri('http://example.com:notaport/path'))
+ self.assertIsNone(is_absolute_uri('wrong'))
+ self.assertIsNone(is_absolute_uri('http://[:1]:38432/path'))
+ self.assertIsNone(is_absolute_uri('http://[abcd:efgh::1]/'))
+
+ def test_recursive_regex(self):
+ from datetime import datetime
+ t0 = datetime.now()
+ is_absolute_uri('http://[::::::::::::::::::::::::::]/path')
+ t1 = datetime.now()
+ spent = t1 - t0
+ self.assertGreater(0.1, spent.total_seconds(), "possible recursive loop detected")
diff --git a/contrib/python/oauthlib/tests/unittest/__init__.py b/contrib/python/oauthlib/tests/unittest/__init__.py
new file mode 100644
index 0000000000..f94f35c664
--- /dev/null
+++ b/contrib/python/oauthlib/tests/unittest/__init__.py
@@ -0,0 +1,32 @@
+import urllib.parse as urlparse
+from unittest import TestCase
+
+
+# URL comparison where query param order is insignificant
+def url_equals(self, a, b, parse_fragment=False):
+ parsed_a = urlparse.urlparse(a, allow_fragments=parse_fragment)
+ parsed_b = urlparse.urlparse(b, allow_fragments=parse_fragment)
+ query_a = urlparse.parse_qsl(parsed_a.query)
+ query_b = urlparse.parse_qsl(parsed_b.query)
+ if parse_fragment:
+ fragment_a = urlparse.parse_qsl(parsed_a.fragment)
+ fragment_b = urlparse.parse_qsl(parsed_b.fragment)
+ self.assertCountEqual(fragment_a, fragment_b)
+ else:
+ self.assertEqual(parsed_a.fragment, parsed_b.fragment)
+ self.assertEqual(parsed_a.scheme, parsed_b.scheme)
+ self.assertEqual(parsed_a.netloc, parsed_b.netloc)
+ self.assertEqual(parsed_a.path, parsed_b.path)
+ self.assertEqual(parsed_a.params, parsed_b.params)
+ self.assertEqual(parsed_a.username, parsed_b.username)
+ self.assertEqual(parsed_a.password, parsed_b.password)
+ self.assertEqual(parsed_a.hostname, parsed_b.hostname)
+ self.assertEqual(parsed_a.port, parsed_b.port)
+ self.assertCountEqual(query_a, query_b)
+
+
+TestCase.assertURLEqual = url_equals
+
+# Form body comparison where order is insignificant
+TestCase.assertFormBodyEqual = lambda self, a, b: self.assertCountEqual(
+ urlparse.parse_qsl(a), urlparse.parse_qsl(b))
diff --git a/contrib/python/oauthlib/tests/ya.make b/contrib/python/oauthlib/tests/ya.make
new file mode 100644
index 0000000000..b207e5ea63
--- /dev/null
+++ b/contrib/python/oauthlib/tests/ya.make
@@ -0,0 +1,88 @@
+PY3TEST()
+
+PEERDIR(
+ contrib/python/oauthlib
+ contrib/python/mock
+ contrib/python/PyJWT
+ contrib/python/blinker
+)
+
+PY_SRCS(
+ NAMESPACE tests
+ unittest/__init__.py
+)
+
+TEST_SRCS(
+ __init__.py
+ oauth1/__init__.py
+ oauth1/rfc5849/__init__.py
+ oauth1/rfc5849/endpoints/__init__.py
+ oauth1/rfc5849/endpoints/test_access_token.py
+ oauth1/rfc5849/endpoints/test_authorization.py
+ oauth1/rfc5849/endpoints/test_base.py
+ oauth1/rfc5849/endpoints/test_request_token.py
+ oauth1/rfc5849/endpoints/test_resource.py
+ oauth1/rfc5849/endpoints/test_signature_only.py
+ oauth1/rfc5849/test_client.py
+ oauth1/rfc5849/test_parameters.py
+ oauth1/rfc5849/test_request_validator.py
+ oauth1/rfc5849/test_signatures.py
+ oauth1/rfc5849/test_utils.py
+ oauth2/__init__.py
+ oauth2/rfc6749/__init__.py
+ oauth2/rfc6749/clients/__init__.py
+ oauth2/rfc6749/clients/test_backend_application.py
+ oauth2/rfc6749/clients/test_base.py
+ oauth2/rfc6749/clients/test_legacy_application.py
+ oauth2/rfc6749/clients/test_mobile_application.py
+ oauth2/rfc6749/clients/test_service_application.py
+ oauth2/rfc6749/clients/test_web_application.py
+ oauth2/rfc6749/endpoints/__init__.py
+ oauth2/rfc6749/endpoints/test_base_endpoint.py
+ oauth2/rfc6749/endpoints/test_client_authentication.py
+ oauth2/rfc6749/endpoints/test_credentials_preservation.py
+ oauth2/rfc6749/endpoints/test_error_responses.py
+ oauth2/rfc6749/endpoints/test_extra_credentials.py
+ oauth2/rfc6749/endpoints/test_introspect_endpoint.py
+ oauth2/rfc6749/endpoints/test_metadata.py
+ oauth2/rfc6749/endpoints/test_resource_owner_association.py
+ oauth2/rfc6749/endpoints/test_revocation_endpoint.py
+ oauth2/rfc6749/endpoints/test_scope_handling.py
+ oauth2/rfc6749/endpoints/test_utils.py
+ oauth2/rfc6749/grant_types/__init__.py
+ oauth2/rfc6749/grant_types/test_authorization_code.py
+ oauth2/rfc6749/grant_types/test_client_credentials.py
+ oauth2/rfc6749/grant_types/test_implicit.py
+ oauth2/rfc6749/grant_types/test_refresh_token.py
+ oauth2/rfc6749/grant_types/test_resource_owner_password.py
+ oauth2/rfc6749/test_parameters.py
+ oauth2/rfc6749/test_request_validator.py
+ oauth2/rfc6749/test_server.py
+ oauth2/rfc6749/test_tokens.py
+ oauth2/rfc6749/test_utils.py
+ oauth2/rfc8628/__init__.py
+ oauth2/rfc8628/clients/__init__.py
+ oauth2/rfc8628/clients/test_device.py
+ openid/__init__.py
+ openid/connect/__init__.py
+ openid/connect/core/__init__.py
+ openid/connect/core/endpoints/__init__.py
+ openid/connect/core/endpoints/test_claims_handling.py
+ openid/connect/core/endpoints/test_openid_connect_params_handling.py
+ openid/connect/core/endpoints/test_userinfo_endpoint.py
+ openid/connect/core/grant_types/__init__.py
+ openid/connect/core/grant_types/test_authorization_code.py
+ openid/connect/core/grant_types/test_base.py
+ openid/connect/core/grant_types/test_dispatchers.py
+ openid/connect/core/grant_types/test_hybrid.py
+ openid/connect/core/grant_types/test_implicit.py
+ openid/connect/core/test_request_validator.py
+ openid/connect/core/test_server.py
+ openid/connect/core/test_tokens.py
+ test_common.py
+ test_uri_validate.py
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/oauthlib/ya.make b/contrib/python/oauthlib/ya.make
new file mode 100644
index 0000000000..31a9686ead
--- /dev/null
+++ b/contrib/python/oauthlib/ya.make
@@ -0,0 +1,93 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(3.2.2)
+
+LICENSE(BSD-3-Clause)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ oauthlib/__init__.py
+ oauthlib/common.py
+ oauthlib/oauth1/__init__.py
+ oauthlib/oauth1/rfc5849/__init__.py
+ oauthlib/oauth1/rfc5849/endpoints/__init__.py
+ oauthlib/oauth1/rfc5849/endpoints/access_token.py
+ oauthlib/oauth1/rfc5849/endpoints/authorization.py
+ oauthlib/oauth1/rfc5849/endpoints/base.py
+ oauthlib/oauth1/rfc5849/endpoints/pre_configured.py
+ oauthlib/oauth1/rfc5849/endpoints/request_token.py
+ oauthlib/oauth1/rfc5849/endpoints/resource.py
+ oauthlib/oauth1/rfc5849/endpoints/signature_only.py
+ oauthlib/oauth1/rfc5849/errors.py
+ oauthlib/oauth1/rfc5849/parameters.py
+ oauthlib/oauth1/rfc5849/request_validator.py
+ oauthlib/oauth1/rfc5849/signature.py
+ oauthlib/oauth1/rfc5849/utils.py
+ oauthlib/oauth2/__init__.py
+ oauthlib/oauth2/rfc6749/__init__.py
+ oauthlib/oauth2/rfc6749/clients/__init__.py
+ oauthlib/oauth2/rfc6749/clients/backend_application.py
+ oauthlib/oauth2/rfc6749/clients/base.py
+ oauthlib/oauth2/rfc6749/clients/legacy_application.py
+ oauthlib/oauth2/rfc6749/clients/mobile_application.py
+ oauthlib/oauth2/rfc6749/clients/service_application.py
+ oauthlib/oauth2/rfc6749/clients/web_application.py
+ oauthlib/oauth2/rfc6749/endpoints/__init__.py
+ oauthlib/oauth2/rfc6749/endpoints/authorization.py
+ oauthlib/oauth2/rfc6749/endpoints/base.py
+ oauthlib/oauth2/rfc6749/endpoints/introspect.py
+ oauthlib/oauth2/rfc6749/endpoints/metadata.py
+ oauthlib/oauth2/rfc6749/endpoints/pre_configured.py
+ oauthlib/oauth2/rfc6749/endpoints/resource.py
+ oauthlib/oauth2/rfc6749/endpoints/revocation.py
+ oauthlib/oauth2/rfc6749/endpoints/token.py
+ oauthlib/oauth2/rfc6749/errors.py
+ oauthlib/oauth2/rfc6749/grant_types/__init__.py
+ oauthlib/oauth2/rfc6749/grant_types/authorization_code.py
+ oauthlib/oauth2/rfc6749/grant_types/base.py
+ oauthlib/oauth2/rfc6749/grant_types/client_credentials.py
+ oauthlib/oauth2/rfc6749/grant_types/implicit.py
+ oauthlib/oauth2/rfc6749/grant_types/refresh_token.py
+ oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py
+ oauthlib/oauth2/rfc6749/parameters.py
+ oauthlib/oauth2/rfc6749/request_validator.py
+ oauthlib/oauth2/rfc6749/tokens.py
+ oauthlib/oauth2/rfc6749/utils.py
+ oauthlib/oauth2/rfc8628/__init__.py
+ oauthlib/oauth2/rfc8628/clients/__init__.py
+ oauthlib/oauth2/rfc8628/clients/device.py
+ oauthlib/openid/__init__.py
+ oauthlib/openid/connect/__init__.py
+ oauthlib/openid/connect/core/__init__.py
+ oauthlib/openid/connect/core/endpoints/__init__.py
+ oauthlib/openid/connect/core/endpoints/pre_configured.py
+ oauthlib/openid/connect/core/endpoints/userinfo.py
+ oauthlib/openid/connect/core/exceptions.py
+ oauthlib/openid/connect/core/grant_types/__init__.py
+ oauthlib/openid/connect/core/grant_types/authorization_code.py
+ oauthlib/openid/connect/core/grant_types/base.py
+ oauthlib/openid/connect/core/grant_types/dispatchers.py
+ oauthlib/openid/connect/core/grant_types/hybrid.py
+ oauthlib/openid/connect/core/grant_types/implicit.py
+ oauthlib/openid/connect/core/grant_types/refresh_token.py
+ oauthlib/openid/connect/core/request_validator.py
+ oauthlib/openid/connect/core/tokens.py
+ oauthlib/signals.py
+ oauthlib/uri_validate.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/oauthlib/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/pyOpenSSL/py3/.dist-info/METADATA b/contrib/python/pyOpenSSL/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..43ea7f5813
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/.dist-info/METADATA
@@ -0,0 +1,198 @@
+Metadata-Version: 2.1
+Name: pyOpenSSL
+Version: 21.0.0
+Summary: Python wrapper module around the OpenSSL library
+Home-page: https://pyopenssl.org/
+Author: The pyOpenSSL developers
+Author-email: cryptography-dev@python.org
+License: Apache License, Version 2.0
+Platform: UNKNOWN
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Security :: Cryptography
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Networking
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*
+Requires-Dist: cryptography (>=3.3)
+Requires-Dist: six (>=1.5.2)
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: sphinx-rtd-theme ; extra == 'docs'
+Provides-Extra: test
+Requires-Dist: flaky ; extra == 'test'
+Requires-Dist: pretend ; extra == 'test'
+Requires-Dist: pytest (>=3.0.1) ; extra == 'test'
+
+========================================================
+pyOpenSSL -- A Python wrapper around the OpenSSL library
+========================================================
+
+.. image:: https://readthedocs.org/projects/pyopenssl/badge/?version=stable
+ :target: https://pyopenssl.org/en/stable/
+ :alt: Stable Docs
+
+.. image:: https://github.com/pyca/pyopenssl/workflows/CI/badge.svg?branch=main
+ :target: https://github.com/pyca/pyopenssl/actions?query=workflow%3ACI+branch%3Amain
+
+.. image:: https://codecov.io/github/pyca/pyopenssl/branch/main/graph/badge.svg
+ :target: https://codecov.io/github/pyca/pyopenssl
+ :alt: Test coverage
+
+**Note:** The Python Cryptographic Authority **strongly suggests** the use of `pyca/cryptography`_
+where possible. If you are using pyOpenSSL for anything other than making a TLS connection
+**you should move to cryptography and drop your pyOpenSSL dependency**.
+
+High-level wrapper around a subset of the OpenSSL library. Includes
+
+* ``SSL.Connection`` objects, wrapping the methods of Python's portable sockets
+* Callbacks written in Python
+* Extensive error-handling mechanism, mirroring OpenSSL's error codes
+
+... and much more.
+
+You can find more information in the documentation_.
+Development takes place on GitHub_.
+
+
+Discussion
+==========
+
+If you run into bugs, you can file them in our `issue tracker`_.
+
+We maintain a cryptography-dev_ mailing list for both user and development discussions.
+
+You can also join ``#cryptography-dev`` on Freenode to ask questions or get involved.
+
+
+.. _documentation: https://pyopenssl.org/
+.. _`issue tracker`: https://github.com/pyca/pyopenssl/issues
+.. _cryptography-dev: https://mail.python.org/mailman/listinfo/cryptography-dev
+.. _GitHub: https://github.com/pyca/pyopenssl
+.. _`pyca/cryptography`: https://github.com/pyca/cryptography
+
+
+Release Information
+===================
+
+21.0.0 (2020-09-28)
+-------------------
+
+Backward-incompatible changes:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- The minimum ``cryptography`` version is now 3.3.
+- Drop support for Python 3.5
+
+Deprecations:
+^^^^^^^^^^^^^
+
+Changes:
+^^^^^^^^
+
+- Raise an error when an invalid ALPN value is set.
+ `#993 <https://github.com/pyca/pyopenssl/pull/993>`_
+- Added ``OpenSSL.SSL.Context.set_min_proto_version`` and ``OpenSSL.SSL.Context.set_max_proto_version``
+ to set the minimum and maximum supported TLS version `#985 <https://github.com/pyca/pyopenssl/pull/985>`_.
+- Updated ``to_cryptography`` and ``from_cryptography`` methods to support an upcoming release of ``cryptography`` without raising deprecation warnings.
+ `#1030 <https://github.com/pyca/pyopenssl/pull/1030>`_
+
+20.0.1 (2020-12-15)
+-------------------
+
+Backward-incompatible changes:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Deprecations:
+^^^^^^^^^^^^^
+
+Changes:
+^^^^^^^^
+
+- Fixed compatibility with OpenSSL 1.1.0.
+
+20.0.0 (2020-11-27)
+-------------------
+
+
+Backward-incompatible changes:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- The minimum ``cryptography`` version is now 3.2.
+- Remove deprecated ``OpenSSL.tsafe`` module.
+- Removed deprecated ``OpenSSL.SSL.Context.set_npn_advertise_callback``, ``OpenSSL.SSL.Context.set_npn_select_callback``, and ``OpenSSL.SSL.Connection.get_next_proto_negotiated``.
+- Drop support for Python 3.4
+- Drop support for OpenSSL 1.0.1 and 1.0.2
+
+Deprecations:
+^^^^^^^^^^^^^
+
+- Deprecated ``OpenSSL.crypto.loads_pkcs7`` and ``OpenSSL.crypto.loads_pkcs12``.
+
+Changes:
+^^^^^^^^
+
+- Added a new optional ``chain`` parameter to ``OpenSSL.crypto.X509StoreContext()``
+ where additional untrusted certificates can be specified to help chain building.
+ `#948 <https://github.com/pyca/pyopenssl/pull/948>`_
+- Added ``OpenSSL.crypto.X509Store.load_locations`` to set trusted
+ certificate file bundles and/or directories for verification.
+ `#943 <https://github.com/pyca/pyopenssl/pull/943>`_
+- Added ``Context.set_keylog_callback`` to log key material.
+ `#910 <https://github.com/pyca/pyopenssl/pull/910>`_
+- Added ``OpenSSL.SSL.Connection.get_verified_chain`` to retrieve the
+ verified certificate chain of the peer.
+ `#894 <https://github.com/pyca/pyopenssl/pull/894>`_.
+- Make verification callback optional in ``Context.set_verify``.
+ If omitted, OpenSSL's default verification is used.
+ `#933 <https://github.com/pyca/pyopenssl/pull/933>`_
+- Fixed a bug that could truncate or cause a zero-length key error due to a
+ null byte in private key passphrase in ``OpenSSL.crypto.load_privatekey``
+ and ``OpenSSL.crypto.dump_privatekey``.
+ `#947 <https://github.com/pyca/pyopenssl/pull/947>`_
+
+19.1.0 (2019-11-18)
+-------------------
+
+
+Backward-incompatible changes:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Removed deprecated ``ContextType``, ``ConnectionType``, ``PKeyType``, ``X509NameType``, ``X509ReqType``, ``X509Type``, ``X509StoreType``, ``CRLType``, ``PKCS7Type``, ``PKCS12Type``, and ``NetscapeSPKIType`` aliases.
+ Use the classes without the ``Type`` suffix instead.
+ `#814 <https://github.com/pyca/pyopenssl/pull/814>`_
+- The minimum ``cryptography`` version is now 2.8 due to issues on macOS with a transitive dependency.
+ `#875 <https://github.com/pyca/pyopenssl/pull/875>`_
+
+Deprecations:
+^^^^^^^^^^^^^
+
+- Deprecated ``OpenSSL.SSL.Context.set_npn_advertise_callback``, ``OpenSSL.SSL.Context.set_npn_select_callback``, and ``OpenSSL.SSL.Connection.get_next_proto_negotiated``.
+ ALPN should be used instead.
+ `#820 <https://github.com/pyca/pyopenssl/pull/820>`_
+
+
+Changes:
+^^^^^^^^
+
+- Support ``bytearray`` in ``SSL.Connection.send()`` by using cffi's from_buffer.
+ `#852 <https://github.com/pyca/pyopenssl/pull/852>`_
+- The ``OpenSSL.SSL.Context.set_alpn_select_callback`` can return a new ``NO_OVERLAPPING_PROTOCOLS`` sentinel value
+ to allow a TLS handshake to complete without an application protocol.
+
+`Full changelog <https://pyopenssl.org/en/stable/changelog.html>`_.
+
+
+
diff --git a/contrib/python/pyOpenSSL/py3/.dist-info/top_level.txt b/contrib/python/pyOpenSSL/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..effce34b61
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+OpenSSL
diff --git a/contrib/python/pyOpenSSL/py3/OpenSSL/SSL.py b/contrib/python/pyOpenSSL/py3/OpenSSL/SSL.py
new file mode 100644
index 0000000000..e71b044cc0
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/OpenSSL/SSL.py
@@ -0,0 +1,2505 @@
+import os
+import socket
+from sys import platform
+from functools import wraps, partial
+from itertools import count, chain
+from weakref import WeakValueDictionary
+from errno import errorcode
+
+from six import integer_types, int2byte, indexbytes
+
+from OpenSSL._util import (
+ UNSPECIFIED as _UNSPECIFIED,
+ exception_from_error_queue as _exception_from_error_queue,
+ ffi as _ffi,
+ lib as _lib,
+ make_assert as _make_assert,
+ native as _native,
+ path_string as _path_string,
+ text_to_bytes_and_warn as _text_to_bytes_and_warn,
+ no_zero_allocator as _no_zero_allocator,
+)
+
+from OpenSSL.crypto import (
+ FILETYPE_PEM,
+ _PassphraseHelper,
+ PKey,
+ X509Name,
+ X509,
+ X509Store,
+)
+
+__all__ = [
+ "OPENSSL_VERSION_NUMBER",
+ "SSLEAY_VERSION",
+ "SSLEAY_CFLAGS",
+ "SSLEAY_PLATFORM",
+ "SSLEAY_DIR",
+ "SSLEAY_BUILT_ON",
+ "SENT_SHUTDOWN",
+ "RECEIVED_SHUTDOWN",
+ "SSLv2_METHOD",
+ "SSLv3_METHOD",
+ "SSLv23_METHOD",
+ "TLSv1_METHOD",
+ "TLSv1_1_METHOD",
+ "TLSv1_2_METHOD",
+ "TLS_METHOD",
+ "TLS_SERVER_METHOD",
+ "TLS_CLIENT_METHOD",
+ "SSL3_VERSION",
+ "TLS1_VERSION",
+ "TLS1_1_VERSION",
+ "TLS1_2_VERSION",
+ "TLS1_3_VERSION",
+ "OP_NO_SSLv2",
+ "OP_NO_SSLv3",
+ "OP_NO_TLSv1",
+ "OP_NO_TLSv1_1",
+ "OP_NO_TLSv1_2",
+ "OP_NO_TLSv1_3",
+ "MODE_RELEASE_BUFFERS",
+ "OP_SINGLE_DH_USE",
+ "OP_SINGLE_ECDH_USE",
+ "OP_EPHEMERAL_RSA",
+ "OP_MICROSOFT_SESS_ID_BUG",
+ "OP_NETSCAPE_CHALLENGE_BUG",
+ "OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG",
+ "OP_SSLREF2_REUSE_CERT_TYPE_BUG",
+ "OP_MICROSOFT_BIG_SSLV3_BUFFER",
+ "OP_MSIE_SSLV2_RSA_PADDING",
+ "OP_SSLEAY_080_CLIENT_DH_BUG",
+ "OP_TLS_D5_BUG",
+ "OP_TLS_BLOCK_PADDING_BUG",
+ "OP_DONT_INSERT_EMPTY_FRAGMENTS",
+ "OP_CIPHER_SERVER_PREFERENCE",
+ "OP_TLS_ROLLBACK_BUG",
+ "OP_PKCS1_CHECK_1",
+ "OP_PKCS1_CHECK_2",
+ "OP_NETSCAPE_CA_DN_BUG",
+ "OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG",
+ "OP_NO_COMPRESSION",
+ "OP_NO_QUERY_MTU",
+ "OP_COOKIE_EXCHANGE",
+ "OP_NO_TICKET",
+ "OP_ALL",
+ "VERIFY_PEER",
+ "VERIFY_FAIL_IF_NO_PEER_CERT",
+ "VERIFY_CLIENT_ONCE",
+ "VERIFY_NONE",
+ "SESS_CACHE_OFF",
+ "SESS_CACHE_CLIENT",
+ "SESS_CACHE_SERVER",
+ "SESS_CACHE_BOTH",
+ "SESS_CACHE_NO_AUTO_CLEAR",
+ "SESS_CACHE_NO_INTERNAL_LOOKUP",
+ "SESS_CACHE_NO_INTERNAL_STORE",
+ "SESS_CACHE_NO_INTERNAL",
+ "SSL_ST_CONNECT",
+ "SSL_ST_ACCEPT",
+ "SSL_ST_MASK",
+ "SSL_CB_LOOP",
+ "SSL_CB_EXIT",
+ "SSL_CB_READ",
+ "SSL_CB_WRITE",
+ "SSL_CB_ALERT",
+ "SSL_CB_READ_ALERT",
+ "SSL_CB_WRITE_ALERT",
+ "SSL_CB_ACCEPT_LOOP",
+ "SSL_CB_ACCEPT_EXIT",
+ "SSL_CB_CONNECT_LOOP",
+ "SSL_CB_CONNECT_EXIT",
+ "SSL_CB_HANDSHAKE_START",
+ "SSL_CB_HANDSHAKE_DONE",
+ "Error",
+ "WantReadError",
+ "WantWriteError",
+ "WantX509LookupError",
+ "ZeroReturnError",
+ "SysCallError",
+ "NO_OVERLAPPING_PROTOCOLS",
+ "SSLeay_version",
+ "Session",
+ "Context",
+ "Connection",
+]
+
+try:
+ _buffer = buffer
+except NameError:
+
+ class _buffer(object):
+ pass
+
+
+OPENSSL_VERSION_NUMBER = _lib.OPENSSL_VERSION_NUMBER
+SSLEAY_VERSION = _lib.SSLEAY_VERSION
+SSLEAY_CFLAGS = _lib.SSLEAY_CFLAGS
+SSLEAY_PLATFORM = _lib.SSLEAY_PLATFORM
+SSLEAY_DIR = _lib.SSLEAY_DIR
+SSLEAY_BUILT_ON = _lib.SSLEAY_BUILT_ON
+
+SENT_SHUTDOWN = _lib.SSL_SENT_SHUTDOWN
+RECEIVED_SHUTDOWN = _lib.SSL_RECEIVED_SHUTDOWN
+
+SSLv2_METHOD = 1
+SSLv3_METHOD = 2
+SSLv23_METHOD = 3
+TLSv1_METHOD = 4
+TLSv1_1_METHOD = 5
+TLSv1_2_METHOD = 6
+TLS_METHOD = 7
+TLS_SERVER_METHOD = 8
+TLS_CLIENT_METHOD = 9
+
+try:
+ SSL3_VERSION = _lib.SSL3_VERSION
+ TLS1_VERSION = _lib.TLS1_VERSION
+ TLS1_1_VERSION = _lib.TLS1_1_VERSION
+ TLS1_2_VERSION = _lib.TLS1_2_VERSION
+ TLS1_3_VERSION = _lib.TLS1_3_VERSION
+except AttributeError:
+ # Hardcode constants for cryptography < 3.4, see
+ # https://github.com/pyca/pyopenssl/pull/985#issuecomment-775186682
+ SSL3_VERSION = 768
+ TLS1_VERSION = 769
+ TLS1_1_VERSION = 770
+ TLS1_2_VERSION = 771
+ TLS1_3_VERSION = 772
+
+OP_NO_SSLv2 = _lib.SSL_OP_NO_SSLv2
+OP_NO_SSLv3 = _lib.SSL_OP_NO_SSLv3
+OP_NO_TLSv1 = _lib.SSL_OP_NO_TLSv1
+OP_NO_TLSv1_1 = _lib.SSL_OP_NO_TLSv1_1
+OP_NO_TLSv1_2 = _lib.SSL_OP_NO_TLSv1_2
+try:
+ OP_NO_TLSv1_3 = _lib.SSL_OP_NO_TLSv1_3
+except AttributeError:
+ pass
+
+MODE_RELEASE_BUFFERS = _lib.SSL_MODE_RELEASE_BUFFERS
+
+OP_SINGLE_DH_USE = _lib.SSL_OP_SINGLE_DH_USE
+OP_SINGLE_ECDH_USE = _lib.SSL_OP_SINGLE_ECDH_USE
+OP_EPHEMERAL_RSA = _lib.SSL_OP_EPHEMERAL_RSA
+OP_MICROSOFT_SESS_ID_BUG = _lib.SSL_OP_MICROSOFT_SESS_ID_BUG
+OP_NETSCAPE_CHALLENGE_BUG = _lib.SSL_OP_NETSCAPE_CHALLENGE_BUG
+OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG = (
+ _lib.SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG
+)
+OP_SSLREF2_REUSE_CERT_TYPE_BUG = _lib.SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG
+OP_MICROSOFT_BIG_SSLV3_BUFFER = _lib.SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER
+OP_MSIE_SSLV2_RSA_PADDING = _lib.SSL_OP_MSIE_SSLV2_RSA_PADDING
+OP_SSLEAY_080_CLIENT_DH_BUG = _lib.SSL_OP_SSLEAY_080_CLIENT_DH_BUG
+OP_TLS_D5_BUG = _lib.SSL_OP_TLS_D5_BUG
+OP_TLS_BLOCK_PADDING_BUG = _lib.SSL_OP_TLS_BLOCK_PADDING_BUG
+OP_DONT_INSERT_EMPTY_FRAGMENTS = _lib.SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
+OP_CIPHER_SERVER_PREFERENCE = _lib.SSL_OP_CIPHER_SERVER_PREFERENCE
+OP_TLS_ROLLBACK_BUG = _lib.SSL_OP_TLS_ROLLBACK_BUG
+OP_PKCS1_CHECK_1 = _lib.SSL_OP_PKCS1_CHECK_1
+OP_PKCS1_CHECK_2 = _lib.SSL_OP_PKCS1_CHECK_2
+OP_NETSCAPE_CA_DN_BUG = _lib.SSL_OP_NETSCAPE_CA_DN_BUG
+OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG = (
+ _lib.SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG
+)
+OP_NO_COMPRESSION = _lib.SSL_OP_NO_COMPRESSION
+
+OP_NO_QUERY_MTU = _lib.SSL_OP_NO_QUERY_MTU
+OP_COOKIE_EXCHANGE = _lib.SSL_OP_COOKIE_EXCHANGE
+OP_NO_TICKET = _lib.SSL_OP_NO_TICKET
+
+OP_ALL = _lib.SSL_OP_ALL
+
+VERIFY_PEER = _lib.SSL_VERIFY_PEER
+VERIFY_FAIL_IF_NO_PEER_CERT = _lib.SSL_VERIFY_FAIL_IF_NO_PEER_CERT
+VERIFY_CLIENT_ONCE = _lib.SSL_VERIFY_CLIENT_ONCE
+VERIFY_NONE = _lib.SSL_VERIFY_NONE
+
+SESS_CACHE_OFF = _lib.SSL_SESS_CACHE_OFF
+SESS_CACHE_CLIENT = _lib.SSL_SESS_CACHE_CLIENT
+SESS_CACHE_SERVER = _lib.SSL_SESS_CACHE_SERVER
+SESS_CACHE_BOTH = _lib.SSL_SESS_CACHE_BOTH
+SESS_CACHE_NO_AUTO_CLEAR = _lib.SSL_SESS_CACHE_NO_AUTO_CLEAR
+SESS_CACHE_NO_INTERNAL_LOOKUP = _lib.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP
+SESS_CACHE_NO_INTERNAL_STORE = _lib.SSL_SESS_CACHE_NO_INTERNAL_STORE
+SESS_CACHE_NO_INTERNAL = _lib.SSL_SESS_CACHE_NO_INTERNAL
+
+SSL_ST_CONNECT = _lib.SSL_ST_CONNECT
+SSL_ST_ACCEPT = _lib.SSL_ST_ACCEPT
+SSL_ST_MASK = _lib.SSL_ST_MASK
+
+SSL_CB_LOOP = _lib.SSL_CB_LOOP
+SSL_CB_EXIT = _lib.SSL_CB_EXIT
+SSL_CB_READ = _lib.SSL_CB_READ
+SSL_CB_WRITE = _lib.SSL_CB_WRITE
+SSL_CB_ALERT = _lib.SSL_CB_ALERT
+SSL_CB_READ_ALERT = _lib.SSL_CB_READ_ALERT
+SSL_CB_WRITE_ALERT = _lib.SSL_CB_WRITE_ALERT
+SSL_CB_ACCEPT_LOOP = _lib.SSL_CB_ACCEPT_LOOP
+SSL_CB_ACCEPT_EXIT = _lib.SSL_CB_ACCEPT_EXIT
+SSL_CB_CONNECT_LOOP = _lib.SSL_CB_CONNECT_LOOP
+SSL_CB_CONNECT_EXIT = _lib.SSL_CB_CONNECT_EXIT
+SSL_CB_HANDSHAKE_START = _lib.SSL_CB_HANDSHAKE_START
+SSL_CB_HANDSHAKE_DONE = _lib.SSL_CB_HANDSHAKE_DONE
+
+# Taken from https://golang.org/src/crypto/x509/root_linux.go
+_CERTIFICATE_FILE_LOCATIONS = [
+ "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/Gentoo etc.
+ "/etc/pki/tls/certs/ca-bundle.crt", # Fedora/RHEL 6
+ "/etc/ssl/ca-bundle.pem", # OpenSUSE
+ "/etc/pki/tls/cacert.pem", # OpenELEC
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", # CentOS/RHEL 7
+]
+
+_CERTIFICATE_PATH_LOCATIONS = [
+ "/etc/ssl/certs", # SLES10/SLES11
+]
+
+# These values are compared to output from cffi's ffi.string so they must be
+# byte strings.
+_CRYPTOGRAPHY_MANYLINUX1_CA_DIR = b"/opt/pyca/cryptography/openssl/certs"
+_CRYPTOGRAPHY_MANYLINUX1_CA_FILE = b"/opt/pyca/cryptography/openssl/cert.pem"
+
+
+class Error(Exception):
+ """
+ An error occurred in an `OpenSSL.SSL` API.
+ """
+
+
+_raise_current_error = partial(_exception_from_error_queue, Error)
+_openssl_assert = _make_assert(Error)
+
+
+class WantReadError(Error):
+ pass
+
+
+class WantWriteError(Error):
+ pass
+
+
+class WantX509LookupError(Error):
+ pass
+
+
+class ZeroReturnError(Error):
+ pass
+
+
+class SysCallError(Error):
+ pass
+
+
+class _CallbackExceptionHelper(object):
+ """
+ A base class for wrapper classes that allow for intelligent exception
+ handling in OpenSSL callbacks.
+
+ :ivar list _problems: Any exceptions that occurred while executing in a
+ context where they could not be raised in the normal way. Typically
+ this is because OpenSSL has called into some Python code and requires a
+ return value. The exceptions are saved to be raised later when it is
+ possible to do so.
+ """
+
+ def __init__(self):
+ self._problems = []
+
+ def raise_if_problem(self):
+ """
+ Raise an exception from the OpenSSL error queue or that was previously
+ captured whe running a callback.
+ """
+ if self._problems:
+ try:
+ _raise_current_error()
+ except Error:
+ pass
+ raise self._problems.pop(0)
+
+
+class _VerifyHelper(_CallbackExceptionHelper):
+ """
+ Wrap a callback such that it can be used as a certificate verification
+ callback.
+ """
+
+ def __init__(self, callback):
+ _CallbackExceptionHelper.__init__(self)
+
+ @wraps(callback)
+ def wrapper(ok, store_ctx):
+ x509 = _lib.X509_STORE_CTX_get_current_cert(store_ctx)
+ _lib.X509_up_ref(x509)
+ cert = X509._from_raw_x509_ptr(x509)
+ error_number = _lib.X509_STORE_CTX_get_error(store_ctx)
+ error_depth = _lib.X509_STORE_CTX_get_error_depth(store_ctx)
+
+ index = _lib.SSL_get_ex_data_X509_STORE_CTX_idx()
+ ssl = _lib.X509_STORE_CTX_get_ex_data(store_ctx, index)
+ connection = Connection._reverse_mapping[ssl]
+
+ try:
+ result = callback(
+ connection, cert, error_number, error_depth, ok
+ )
+ except Exception as e:
+ self._problems.append(e)
+ return 0
+ else:
+ if result:
+ _lib.X509_STORE_CTX_set_error(store_ctx, _lib.X509_V_OK)
+ return 1
+ else:
+ return 0
+
+ self.callback = _ffi.callback(
+ "int (*)(int, X509_STORE_CTX *)", wrapper
+ )
+
+
+NO_OVERLAPPING_PROTOCOLS = object()
+
+
+class _ALPNSelectHelper(_CallbackExceptionHelper):
+ """
+ Wrap a callback such that it can be used as an ALPN selection callback.
+ """
+
+ def __init__(self, callback):
+ _CallbackExceptionHelper.__init__(self)
+
+ @wraps(callback)
+ def wrapper(ssl, out, outlen, in_, inlen, arg):
+ try:
+ conn = Connection._reverse_mapping[ssl]
+
+ # The string passed to us is made up of multiple
+ # length-prefixed bytestrings. We need to split that into a
+ # list.
+ instr = _ffi.buffer(in_, inlen)[:]
+ protolist = []
+ while instr:
+ encoded_len = indexbytes(instr, 0)
+ proto = instr[1 : encoded_len + 1]
+ protolist.append(proto)
+ instr = instr[encoded_len + 1 :]
+
+ # Call the callback
+ outbytes = callback(conn, protolist)
+ any_accepted = True
+ if outbytes is NO_OVERLAPPING_PROTOCOLS:
+ outbytes = b""
+ any_accepted = False
+ elif not isinstance(outbytes, bytes):
+ raise TypeError(
+ "ALPN callback must return a bytestring or the "
+ "special NO_OVERLAPPING_PROTOCOLS sentinel value."
+ )
+
+ # Save our callback arguments on the connection object to make
+ # sure that they don't get freed before OpenSSL can use them.
+ # Then, return them in the appropriate output parameters.
+ conn._alpn_select_callback_args = [
+ _ffi.new("unsigned char *", len(outbytes)),
+ _ffi.new("unsigned char[]", outbytes),
+ ]
+ outlen[0] = conn._alpn_select_callback_args[0][0]
+ out[0] = conn._alpn_select_callback_args[1]
+ if not any_accepted:
+ return _lib.SSL_TLSEXT_ERR_NOACK
+ return _lib.SSL_TLSEXT_ERR_OK
+ except Exception as e:
+ self._problems.append(e)
+ return _lib.SSL_TLSEXT_ERR_ALERT_FATAL
+
+ self.callback = _ffi.callback(
+ (
+ "int (*)(SSL *, unsigned char **, unsigned char *, "
+ "const unsigned char *, unsigned int, void *)"
+ ),
+ wrapper,
+ )
+
+
+class _OCSPServerCallbackHelper(_CallbackExceptionHelper):
+ """
+ Wrap a callback such that it can be used as an OCSP callback for the server
+ side.
+
+ Annoyingly, OpenSSL defines one OCSP callback but uses it in two different
+ ways. For servers, that callback is expected to retrieve some OCSP data and
+ hand it to OpenSSL, and may return only SSL_TLSEXT_ERR_OK,
+ SSL_TLSEXT_ERR_FATAL, and SSL_TLSEXT_ERR_NOACK. For clients, that callback
+ is expected to check the OCSP data, and returns a negative value on error,
+ 0 if the response is not acceptable, or positive if it is. These are
+ mutually exclusive return code behaviours, and they mean that we need two
+ helpers so that we always return an appropriate error code if the user's
+ code throws an exception.
+
+ Given that we have to have two helpers anyway, these helpers are a bit more
+ helpery than most: specifically, they hide a few more of the OpenSSL
+ functions so that the user has an easier time writing these callbacks.
+
+ This helper implements the server side.
+ """
+
+ def __init__(self, callback):
+ _CallbackExceptionHelper.__init__(self)
+
+ @wraps(callback)
+ def wrapper(ssl, cdata):
+ try:
+ conn = Connection._reverse_mapping[ssl]
+
+ # Extract the data if any was provided.
+ if cdata != _ffi.NULL:
+ data = _ffi.from_handle(cdata)
+ else:
+ data = None
+
+ # Call the callback.
+ ocsp_data = callback(conn, data)
+
+ if not isinstance(ocsp_data, bytes):
+ raise TypeError("OCSP callback must return a bytestring.")
+
+ # If the OCSP data was provided, we will pass it to OpenSSL.
+ # However, we have an early exit here: if no OCSP data was
+ # provided we will just exit out and tell OpenSSL that there
+ # is nothing to do.
+ if not ocsp_data:
+ return 3 # SSL_TLSEXT_ERR_NOACK
+
+ # OpenSSL takes ownership of this data and expects it to have
+ # been allocated by OPENSSL_malloc.
+ ocsp_data_length = len(ocsp_data)
+ data_ptr = _lib.OPENSSL_malloc(ocsp_data_length)
+ _ffi.buffer(data_ptr, ocsp_data_length)[:] = ocsp_data
+
+ _lib.SSL_set_tlsext_status_ocsp_resp(
+ ssl, data_ptr, ocsp_data_length
+ )
+
+ return 0
+ except Exception as e:
+ self._problems.append(e)
+ return 2 # SSL_TLSEXT_ERR_ALERT_FATAL
+
+ self.callback = _ffi.callback("int (*)(SSL *, void *)", wrapper)
+
+
+class _OCSPClientCallbackHelper(_CallbackExceptionHelper):
+ """
+ Wrap a callback such that it can be used as an OCSP callback for the client
+ side.
+
+ Annoyingly, OpenSSL defines one OCSP callback but uses it in two different
+ ways. For servers, that callback is expected to retrieve some OCSP data and
+ hand it to OpenSSL, and may return only SSL_TLSEXT_ERR_OK,
+ SSL_TLSEXT_ERR_FATAL, and SSL_TLSEXT_ERR_NOACK. For clients, that callback
+ is expected to check the OCSP data, and returns a negative value on error,
+ 0 if the response is not acceptable, or positive if it is. These are
+ mutually exclusive return code behaviours, and they mean that we need two
+ helpers so that we always return an appropriate error code if the user's
+ code throws an exception.
+
+ Given that we have to have two helpers anyway, these helpers are a bit more
+ helpery than most: specifically, they hide a few more of the OpenSSL
+ functions so that the user has an easier time writing these callbacks.
+
+ This helper implements the client side.
+ """
+
+ def __init__(self, callback):
+ _CallbackExceptionHelper.__init__(self)
+
+ @wraps(callback)
+ def wrapper(ssl, cdata):
+ try:
+ conn = Connection._reverse_mapping[ssl]
+
+ # Extract the data if any was provided.
+ if cdata != _ffi.NULL:
+ data = _ffi.from_handle(cdata)
+ else:
+ data = None
+
+ # Get the OCSP data.
+ ocsp_ptr = _ffi.new("unsigned char **")
+ ocsp_len = _lib.SSL_get_tlsext_status_ocsp_resp(ssl, ocsp_ptr)
+ if ocsp_len < 0:
+ # No OCSP data.
+ ocsp_data = b""
+ else:
+ # Copy the OCSP data, then pass it to the callback.
+ ocsp_data = _ffi.buffer(ocsp_ptr[0], ocsp_len)[:]
+
+ valid = callback(conn, ocsp_data, data)
+
+ # Return 1 on success or 0 on error.
+ return int(bool(valid))
+
+ except Exception as e:
+ self._problems.append(e)
+ # Return negative value if an exception is hit.
+ return -1
+
+ self.callback = _ffi.callback("int (*)(SSL *, void *)", wrapper)
+
+
+def _asFileDescriptor(obj):
+ fd = None
+ if not isinstance(obj, integer_types):
+ meth = getattr(obj, "fileno", None)
+ if meth is not None:
+ obj = meth()
+
+ if isinstance(obj, integer_types):
+ fd = obj
+
+ if not isinstance(fd, integer_types):
+ raise TypeError("argument must be an int, or have a fileno() method.")
+ elif fd < 0:
+ raise ValueError(
+ "file descriptor cannot be a negative integer (%i)" % (fd,)
+ )
+
+ return fd
+
+
+def SSLeay_version(type):
+ """
+ Return a string describing the version of OpenSSL in use.
+
+ :param type: One of the :const:`SSLEAY_` constants defined in this module.
+ """
+ return _ffi.string(_lib.SSLeay_version(type))
+
+
+def _make_requires(flag, error):
+ """
+ Builds a decorator that ensures that functions that rely on OpenSSL
+ functions that are not present in this build raise NotImplementedError,
+ rather than AttributeError coming out of cryptography.
+
+ :param flag: A cryptography flag that guards the functions, e.g.
+ ``Cryptography_HAS_NEXTPROTONEG``.
+ :param error: The string to be used in the exception if the flag is false.
+ """
+
+ def _requires_decorator(func):
+ if not flag:
+
+ @wraps(func)
+ def explode(*args, **kwargs):
+ raise NotImplementedError(error)
+
+ return explode
+ else:
+ return func
+
+ return _requires_decorator
+
+
+_requires_alpn = _make_requires(
+ _lib.Cryptography_HAS_ALPN, "ALPN not available"
+)
+
+
+_requires_keylog = _make_requires(
+ getattr(_lib, "Cryptography_HAS_KEYLOG", None), "Key logging not available"
+)
+
+
+class Session(object):
+ """
+ A class representing an SSL session. A session defines certain connection
+ parameters which may be re-used to speed up the setup of subsequent
+ connections.
+
+ .. versionadded:: 0.14
+ """
+
+ pass
+
+
+class Context(object):
+ """
+ :class:`OpenSSL.SSL.Context` instances define the parameters for setting
+ up new SSL connections.
+
+ :param method: One of TLS_METHOD, TLS_CLIENT_METHOD, or TLS_SERVER_METHOD.
+ SSLv23_METHOD, TLSv1_METHOD, etc. are deprecated and should
+ not be used.
+ """
+
+ _methods = {
+ SSLv2_METHOD: "SSLv2_method",
+ SSLv3_METHOD: "SSLv3_method",
+ SSLv23_METHOD: "SSLv23_method",
+ TLSv1_METHOD: "TLSv1_method",
+ TLSv1_1_METHOD: "TLSv1_1_method",
+ TLSv1_2_METHOD: "TLSv1_2_method",
+ TLS_METHOD: "TLS_method",
+ TLS_SERVER_METHOD: "TLS_server_method",
+ TLS_CLIENT_METHOD: "TLS_client_method",
+ }
+ _methods = dict(
+ (identifier, getattr(_lib, name))
+ for (identifier, name) in _methods.items()
+ if getattr(_lib, name, None) is not None
+ )
+
+ def __init__(self, method):
+ if not isinstance(method, integer_types):
+ raise TypeError("method must be an integer")
+
+ try:
+ method_func = self._methods[method]
+ except KeyError:
+ raise ValueError("No such protocol")
+
+ method_obj = method_func()
+ _openssl_assert(method_obj != _ffi.NULL)
+
+ context = _lib.SSL_CTX_new(method_obj)
+ _openssl_assert(context != _ffi.NULL)
+ context = _ffi.gc(context, _lib.SSL_CTX_free)
+
+ # Set SSL_CTX_set_ecdh_auto so that the ECDH curve will be
+ # auto-selected. This function was added in 1.0.2 and made a noop in
+ # 1.1.0+ (where it is set automatically).
+ res = _lib.SSL_CTX_set_ecdh_auto(context, 1)
+ _openssl_assert(res == 1)
+
+ self._context = context
+ self._passphrase_helper = None
+ self._passphrase_callback = None
+ self._passphrase_userdata = None
+ self._verify_helper = None
+ self._verify_callback = None
+ self._info_callback = None
+ self._keylog_callback = None
+ self._tlsext_servername_callback = None
+ self._app_data = None
+ self._alpn_select_helper = None
+ self._alpn_select_callback = None
+ self._ocsp_helper = None
+ self._ocsp_callback = None
+ self._ocsp_data = None
+
+ self.set_mode(_lib.SSL_MODE_ENABLE_PARTIAL_WRITE)
+
+ def set_min_proto_version(self, version):
+ """
+ Set the minimum supported protocol version. Setting the minimum
+ version to 0 will enable protocol versions down to the lowest version
+ supported by the library.
+
+ If the underlying OpenSSL build is missing support for the selected
+ version, this method will raise an exception.
+ """
+ _openssl_assert(
+ _lib.SSL_CTX_set_min_proto_version(self._context, version) == 1
+ )
+
+ def set_max_proto_version(self, version):
+ """
+ Set the maximum supported protocol version. Setting the maximum
+ version to 0 will enable protocol versions up to the highest version
+ supported by the library.
+
+ If the underlying OpenSSL build is missing support for the selected
+ version, this method will raise an exception.
+ """
+ _openssl_assert(
+ _lib.SSL_CTX_set_max_proto_version(self._context, version) == 1
+ )
+
+ def load_verify_locations(self, cafile, capath=None):
+ """
+ Let SSL know where we can find trusted certificates for the certificate
+ chain. Note that the certificates have to be in PEM format.
+
+ If capath is passed, it must be a directory prepared using the
+ ``c_rehash`` tool included with OpenSSL. Either, but not both, of
+ *pemfile* or *capath* may be :data:`None`.
+
+ :param cafile: In which file we can find the certificates (``bytes`` or
+ ``unicode``).
+ :param capath: In which directory we can find the certificates
+ (``bytes`` or ``unicode``).
+
+ :return: None
+ """
+ if cafile is None:
+ cafile = _ffi.NULL
+ else:
+ cafile = _path_string(cafile)
+
+ if capath is None:
+ capath = _ffi.NULL
+ else:
+ capath = _path_string(capath)
+
+ load_result = _lib.SSL_CTX_load_verify_locations(
+ self._context, cafile, capath
+ )
+ if not load_result:
+ _raise_current_error()
+
+ def _wrap_callback(self, callback):
+ @wraps(callback)
+ def wrapper(size, verify, userdata):
+ return callback(size, verify, self._passphrase_userdata)
+
+ return _PassphraseHelper(
+ FILETYPE_PEM, wrapper, more_args=True, truncate=True
+ )
+
+ def set_passwd_cb(self, callback, userdata=None):
+ """
+ Set the passphrase callback. This function will be called
+ when a private key with a passphrase is loaded.
+
+ :param callback: The Python callback to use. This must accept three
+ positional arguments. First, an integer giving the maximum length
+ of the passphrase it may return. If the returned passphrase is
+ longer than this, it will be truncated. Second, a boolean value
+ which will be true if the user should be prompted for the
+ passphrase twice and the callback should verify that the two values
+ supplied are equal. Third, the value given as the *userdata*
+ parameter to :meth:`set_passwd_cb`. The *callback* must return
+ a byte string. If an error occurs, *callback* should return a false
+ value (e.g. an empty string).
+ :param userdata: (optional) A Python object which will be given as
+ argument to the callback
+ :return: None
+ """
+ if not callable(callback):
+ raise TypeError("callback must be callable")
+
+ self._passphrase_helper = self._wrap_callback(callback)
+ self._passphrase_callback = self._passphrase_helper.callback
+ _lib.SSL_CTX_set_default_passwd_cb(
+ self._context, self._passphrase_callback
+ )
+ self._passphrase_userdata = userdata
+
+ def set_default_verify_paths(self):
+ """
+ Specify that the platform provided CA certificates are to be used for
+ verification purposes. This method has some caveats related to the
+ binary wheels that cryptography (pyOpenSSL's primary dependency) ships:
+
+ * macOS will only load certificates using this method if the user has
+ the ``openssl@1.1`` `Homebrew <https://brew.sh>`_ formula installed
+ in the default location.
+ * Windows will not work.
+ * manylinux1 cryptography wheels will work on most common Linux
+ distributions in pyOpenSSL 17.1.0 and above. pyOpenSSL detects the
+ manylinux1 wheel and attempts to load roots via a fallback path.
+
+ :return: None
+ """
+ # SSL_CTX_set_default_verify_paths will attempt to load certs from
+ # both a cafile and capath that are set at compile time. However,
+ # it will first check environment variables and, if present, load
+ # those paths instead
+ set_result = _lib.SSL_CTX_set_default_verify_paths(self._context)
+ _openssl_assert(set_result == 1)
+ # After attempting to set default_verify_paths we need to know whether
+ # to go down the fallback path.
+ # First we'll check to see if any env vars have been set. If so,
+ # we won't try to do anything else because the user has set the path
+ # themselves.
+ dir_env_var = _ffi.string(_lib.X509_get_default_cert_dir_env()).decode(
+ "ascii"
+ )
+ file_env_var = _ffi.string(
+ _lib.X509_get_default_cert_file_env()
+ ).decode("ascii")
+ if not self._check_env_vars_set(dir_env_var, file_env_var):
+ default_dir = _ffi.string(_lib.X509_get_default_cert_dir())
+ default_file = _ffi.string(_lib.X509_get_default_cert_file())
+ # Now we check to see if the default_dir and default_file are set
+ # to the exact values we use in our manylinux1 builds. If they are
+ # then we know to load the fallbacks
+ if (
+ default_dir == _CRYPTOGRAPHY_MANYLINUX1_CA_DIR
+ and default_file == _CRYPTOGRAPHY_MANYLINUX1_CA_FILE
+ ):
+ # This is manylinux1, let's load our fallback paths
+ self._fallback_default_verify_paths(
+ _CERTIFICATE_FILE_LOCATIONS, _CERTIFICATE_PATH_LOCATIONS
+ )
+
+ def _check_env_vars_set(self, dir_env_var, file_env_var):
+ """
+ Check to see if the default cert dir/file environment vars are present.
+
+ :return: bool
+ """
+ return (
+ os.environ.get(file_env_var) is not None
+ or os.environ.get(dir_env_var) is not None
+ )
+
+ def _fallback_default_verify_paths(self, file_path, dir_path):
+ """
+ Default verify paths are based on the compiled version of OpenSSL.
+ However, when pyca/cryptography is compiled as a manylinux1 wheel
+ that compiled location can potentially be wrong. So, like Go, we
+ will try a predefined set of paths and attempt to load roots
+ from there.
+
+ :return: None
+ """
+ for cafile in file_path:
+ if os.path.isfile(cafile):
+ self.load_verify_locations(cafile)
+ break
+
+ for capath in dir_path:
+ if os.path.isdir(capath):
+ self.load_verify_locations(None, capath)
+ break
+
+ def use_certificate_chain_file(self, certfile):
+ """
+ Load a certificate chain from a file.
+
+ :param certfile: The name of the certificate chain file (``bytes`` or
+ ``unicode``). Must be PEM encoded.
+
+ :return: None
+ """
+ certfile = _path_string(certfile)
+
+ result = _lib.SSL_CTX_use_certificate_chain_file(
+ self._context, certfile
+ )
+ if not result:
+ _raise_current_error()
+
+ def use_certificate_file(self, certfile, filetype=FILETYPE_PEM):
+ """
+ Load a certificate from a file
+
+ :param certfile: The name of the certificate file (``bytes`` or
+ ``unicode``).
+ :param filetype: (optional) The encoding of the file, which is either
+ :const:`FILETYPE_PEM` or :const:`FILETYPE_ASN1`. The default is
+ :const:`FILETYPE_PEM`.
+
+ :return: None
+ """
+ certfile = _path_string(certfile)
+ if not isinstance(filetype, integer_types):
+ raise TypeError("filetype must be an integer")
+
+ use_result = _lib.SSL_CTX_use_certificate_file(
+ self._context, certfile, filetype
+ )
+ if not use_result:
+ _raise_current_error()
+
+ def use_certificate(self, cert):
+ """
+ Load a certificate from a X509 object
+
+ :param cert: The X509 object
+ :return: None
+ """
+ if not isinstance(cert, X509):
+ raise TypeError("cert must be an X509 instance")
+
+ use_result = _lib.SSL_CTX_use_certificate(self._context, cert._x509)
+ if not use_result:
+ _raise_current_error()
+
+ def add_extra_chain_cert(self, certobj):
+ """
+ Add certificate to chain
+
+ :param certobj: The X509 certificate object to add to the chain
+ :return: None
+ """
+ if not isinstance(certobj, X509):
+ raise TypeError("certobj must be an X509 instance")
+
+ copy = _lib.X509_dup(certobj._x509)
+ add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy)
+ if not add_result:
+ # TODO: This is untested.
+ _lib.X509_free(copy)
+ _raise_current_error()
+
+ def _raise_passphrase_exception(self):
+ if self._passphrase_helper is not None:
+ self._passphrase_helper.raise_if_problem(Error)
+
+ _raise_current_error()
+
+ def use_privatekey_file(self, keyfile, filetype=_UNSPECIFIED):
+ """
+ Load a private key from a file
+
+ :param keyfile: The name of the key file (``bytes`` or ``unicode``)
+ :param filetype: (optional) The encoding of the file, which is either
+ :const:`FILETYPE_PEM` or :const:`FILETYPE_ASN1`. The default is
+ :const:`FILETYPE_PEM`.
+
+ :return: None
+ """
+ keyfile = _path_string(keyfile)
+
+ if filetype is _UNSPECIFIED:
+ filetype = FILETYPE_PEM
+ elif not isinstance(filetype, integer_types):
+ raise TypeError("filetype must be an integer")
+
+ use_result = _lib.SSL_CTX_use_PrivateKey_file(
+ self._context, keyfile, filetype
+ )
+ if not use_result:
+ self._raise_passphrase_exception()
+
+ def use_privatekey(self, pkey):
+ """
+ Load a private key from a PKey object
+
+ :param pkey: The PKey object
+ :return: None
+ """
+ if not isinstance(pkey, PKey):
+ raise TypeError("pkey must be a PKey instance")
+
+ use_result = _lib.SSL_CTX_use_PrivateKey(self._context, pkey._pkey)
+ if not use_result:
+ self._raise_passphrase_exception()
+
+ def check_privatekey(self):
+ """
+ Check if the private key (loaded with :meth:`use_privatekey`) matches
+ the certificate (loaded with :meth:`use_certificate`)
+
+ :return: :data:`None` (raises :exc:`Error` if something's wrong)
+ """
+ if not _lib.SSL_CTX_check_private_key(self._context):
+ _raise_current_error()
+
+ def load_client_ca(self, cafile):
+ """
+ Load the trusted certificates that will be sent to the client. Does
+ not actually imply any of the certificates are trusted; that must be
+ configured separately.
+
+ :param bytes cafile: The path to a certificates file in PEM format.
+ :return: None
+ """
+ ca_list = _lib.SSL_load_client_CA_file(
+ _text_to_bytes_and_warn("cafile", cafile)
+ )
+ _openssl_assert(ca_list != _ffi.NULL)
+ _lib.SSL_CTX_set_client_CA_list(self._context, ca_list)
+
+ def set_session_id(self, buf):
+ """
+ Set the session id to *buf* within which a session can be reused for
+ this Context object. This is needed when doing session resumption,
+ because there is no way for a stored session to know which Context
+ object it is associated with.
+
+ :param bytes buf: The session id.
+
+ :returns: None
+ """
+ buf = _text_to_bytes_and_warn("buf", buf)
+ _openssl_assert(
+ _lib.SSL_CTX_set_session_id_context(self._context, buf, len(buf))
+ == 1
+ )
+
+ def set_session_cache_mode(self, mode):
+ """
+ Set the behavior of the session cache used by all connections using
+ this Context. The previously set mode is returned. See
+ :const:`SESS_CACHE_*` for details about particular modes.
+
+ :param mode: One or more of the SESS_CACHE_* flags (combine using
+ bitwise or)
+ :returns: The previously set caching mode.
+
+ .. versionadded:: 0.14
+ """
+ if not isinstance(mode, integer_types):
+ raise TypeError("mode must be an integer")
+
+ return _lib.SSL_CTX_set_session_cache_mode(self._context, mode)
+
+ def get_session_cache_mode(self):
+ """
+ Get the current session cache mode.
+
+ :returns: The currently used cache mode.
+
+ .. versionadded:: 0.14
+ """
+ return _lib.SSL_CTX_get_session_cache_mode(self._context)
+
+ def set_verify(self, mode, callback=None):
+ """
+ Set the verification flags for this Context object to *mode* and
+ specify that *callback* should be used for verification callbacks.
+
+ :param mode: The verify mode, this should be one of
+ :const:`VERIFY_NONE` and :const:`VERIFY_PEER`. If
+ :const:`VERIFY_PEER` is used, *mode* can be OR:ed with
+ :const:`VERIFY_FAIL_IF_NO_PEER_CERT` and
+ :const:`VERIFY_CLIENT_ONCE` to further control the behaviour.
+ :param callback: The optional Python verification callback to use.
+ This should take five arguments: A Connection object, an X509
+ object, and three integer variables, which are in turn potential
+ error number, error depth and return code. *callback* should
+ return True if verification passes and False otherwise.
+ If omitted, OpenSSL's default verification is used.
+ :return: None
+
+ See SSL_CTX_set_verify(3SSL) for further details.
+ """
+ if not isinstance(mode, integer_types):
+ raise TypeError("mode must be an integer")
+
+ if callback is None:
+ self._verify_helper = None
+ self._verify_callback = None
+ _lib.SSL_CTX_set_verify(self._context, mode, _ffi.NULL)
+ else:
+ if not callable(callback):
+ raise TypeError("callback must be callable")
+
+ self._verify_helper = _VerifyHelper(callback)
+ self._verify_callback = self._verify_helper.callback
+ _lib.SSL_CTX_set_verify(self._context, mode, self._verify_callback)
+
+ def set_verify_depth(self, depth):
+ """
+ Set the maximum depth for the certificate chain verification that shall
+ be allowed for this Context object.
+
+ :param depth: An integer specifying the verify depth
+ :return: None
+ """
+ if not isinstance(depth, integer_types):
+ raise TypeError("depth must be an integer")
+
+ _lib.SSL_CTX_set_verify_depth(self._context, depth)
+
+ def get_verify_mode(self):
+ """
+ Retrieve the Context object's verify mode, as set by
+ :meth:`set_verify`.
+
+ :return: The verify mode
+ """
+ return _lib.SSL_CTX_get_verify_mode(self._context)
+
+ def get_verify_depth(self):
+ """
+ Retrieve the Context object's verify depth, as set by
+ :meth:`set_verify_depth`.
+
+ :return: The verify depth
+ """
+ return _lib.SSL_CTX_get_verify_depth(self._context)
+
+ def load_tmp_dh(self, dhfile):
+ """
+ Load parameters for Ephemeral Diffie-Hellman
+
+ :param dhfile: The file to load EDH parameters from (``bytes`` or
+ ``unicode``).
+
+ :return: None
+ """
+ dhfile = _path_string(dhfile)
+
+ bio = _lib.BIO_new_file(dhfile, b"r")
+ if bio == _ffi.NULL:
+ _raise_current_error()
+ bio = _ffi.gc(bio, _lib.BIO_free)
+
+ dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
+ dh = _ffi.gc(dh, _lib.DH_free)
+ res = _lib.SSL_CTX_set_tmp_dh(self._context, dh)
+ _openssl_assert(res == 1)
+
+ def set_tmp_ecdh(self, curve):
+ """
+ Select a curve to use for ECDHE key exchange.
+
+ :param curve: A curve object to use as returned by either
+ :meth:`OpenSSL.crypto.get_elliptic_curve` or
+ :meth:`OpenSSL.crypto.get_elliptic_curves`.
+
+ :return: None
+ """
+ _lib.SSL_CTX_set_tmp_ecdh(self._context, curve._to_EC_KEY())
+
+ def set_cipher_list(self, cipher_list):
+ """
+ Set the list of ciphers to be used in this context.
+
+ See the OpenSSL manual for more information (e.g.
+ :manpage:`ciphers(1)`).
+
+ :param bytes cipher_list: An OpenSSL cipher string.
+ :return: None
+ """
+ cipher_list = _text_to_bytes_and_warn("cipher_list", cipher_list)
+
+ if not isinstance(cipher_list, bytes):
+ raise TypeError("cipher_list must be a byte string.")
+
+ _openssl_assert(
+ _lib.SSL_CTX_set_cipher_list(self._context, cipher_list) == 1
+ )
+ # In OpenSSL 1.1.1 setting the cipher list will always return TLS 1.3
+ # ciphers even if you pass an invalid cipher. Applications (like
+ # Twisted) have tests that depend on an error being raised if an
+ # invalid cipher string is passed, but without the following check
+ # for the TLS 1.3 specific cipher suites it would never error.
+ tmpconn = Connection(self, None)
+ if tmpconn.get_cipher_list() == [
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "TLS_AES_128_GCM_SHA256",
+ ]:
+ raise Error(
+ [
+ (
+ "SSL routines",
+ "SSL_CTX_set_cipher_list",
+ "no cipher match",
+ ),
+ ],
+ )
+
+ def set_client_ca_list(self, certificate_authorities):
+ """
+ Set the list of preferred client certificate signers for this server
+ context.
+
+ This list of certificate authorities will be sent to the client when
+ the server requests a client certificate.
+
+ :param certificate_authorities: a sequence of X509Names.
+ :return: None
+
+ .. versionadded:: 0.10
+ """
+ name_stack = _lib.sk_X509_NAME_new_null()
+ _openssl_assert(name_stack != _ffi.NULL)
+
+ try:
+ for ca_name in certificate_authorities:
+ if not isinstance(ca_name, X509Name):
+ raise TypeError(
+ "client CAs must be X509Name objects, not %s "
+ "objects" % (type(ca_name).__name__,)
+ )
+ copy = _lib.X509_NAME_dup(ca_name._name)
+ _openssl_assert(copy != _ffi.NULL)
+ push_result = _lib.sk_X509_NAME_push(name_stack, copy)
+ if not push_result:
+ _lib.X509_NAME_free(copy)
+ _raise_current_error()
+ except Exception:
+ _lib.sk_X509_NAME_free(name_stack)
+ raise
+
+ _lib.SSL_CTX_set_client_CA_list(self._context, name_stack)
+
+ def add_client_ca(self, certificate_authority):
+ """
+ Add the CA certificate to the list of preferred signers for this
+ context.
+
+ The list of certificate authorities will be sent to the client when the
+ server requests a client certificate.
+
+ :param certificate_authority: certificate authority's X509 certificate.
+ :return: None
+
+ .. versionadded:: 0.10
+ """
+ if not isinstance(certificate_authority, X509):
+ raise TypeError("certificate_authority must be an X509 instance")
+
+ add_result = _lib.SSL_CTX_add_client_CA(
+ self._context, certificate_authority._x509
+ )
+ _openssl_assert(add_result == 1)
+
+ def set_timeout(self, timeout):
+ """
+ Set the timeout for newly created sessions for this Context object to
+ *timeout*. The default value is 300 seconds. See the OpenSSL manual
+ for more information (e.g. :manpage:`SSL_CTX_set_timeout(3)`).
+
+ :param timeout: The timeout in (whole) seconds
+ :return: The previous session timeout
+ """
+ if not isinstance(timeout, integer_types):
+ raise TypeError("timeout must be an integer")
+
+ return _lib.SSL_CTX_set_timeout(self._context, timeout)
+
+ def get_timeout(self):
+ """
+ Retrieve session timeout, as set by :meth:`set_timeout`. The default
+ is 300 seconds.
+
+ :return: The session timeout
+ """
+ return _lib.SSL_CTX_get_timeout(self._context)
+
+ def set_info_callback(self, callback):
+ """
+ Set the information callback to *callback*. This function will be
+ called from time to time during SSL handshakes.
+
+ :param callback: The Python callback to use. This should take three
+ arguments: a Connection object and two integers. The first integer
+ specifies where in the SSL handshake the function was called, and
+ the other the return code from a (possibly failed) internal
+ function call.
+ :return: None
+ """
+
+ @wraps(callback)
+ def wrapper(ssl, where, return_code):
+ callback(Connection._reverse_mapping[ssl], where, return_code)
+
+ self._info_callback = _ffi.callback(
+ "void (*)(const SSL *, int, int)", wrapper
+ )
+ _lib.SSL_CTX_set_info_callback(self._context, self._info_callback)
+
+ @_requires_keylog
+ def set_keylog_callback(self, callback):
+ """
+ Set the TLS key logging callback to *callback*. This function will be
+ called whenever TLS key material is generated or received, in order
+ to allow applications to store this keying material for debugging
+ purposes.
+
+ :param callback: The Python callback to use. This should take two
+ arguments: a Connection object and a bytestring that contains
+ the key material in the format used by NSS for its SSLKEYLOGFILE
+ debugging output.
+ :return: None
+ """
+
+ @wraps(callback)
+ def wrapper(ssl, line):
+ line = _ffi.string(line)
+ callback(Connection._reverse_mapping[ssl], line)
+
+ self._keylog_callback = _ffi.callback(
+ "void (*)(const SSL *, const char *)", wrapper
+ )
+ _lib.SSL_CTX_set_keylog_callback(self._context, self._keylog_callback)
+
+ def get_app_data(self):
+ """
+ Get the application data (supplied via :meth:`set_app_data()`)
+
+ :return: The application data
+ """
+ return self._app_data
+
+ def set_app_data(self, data):
+ """
+ Set the application data (will be returned from get_app_data())
+
+ :param data: Any Python object
+ :return: None
+ """
+ self._app_data = data
+
+ def get_cert_store(self):
+ """
+ Get the certificate store for the context. This can be used to add
+ "trusted" certificates without using the
+ :meth:`load_verify_locations` method.
+
+ :return: A X509Store object or None if it does not have one.
+ """
+ store = _lib.SSL_CTX_get_cert_store(self._context)
+ if store == _ffi.NULL:
+ # TODO: This is untested.
+ return None
+
+ pystore = X509Store.__new__(X509Store)
+ pystore._store = store
+ return pystore
+
+ def set_options(self, options):
+ """
+ Add options. Options set before are not cleared!
+ This method should be used with the :const:`OP_*` constants.
+
+ :param options: The options to add.
+ :return: The new option bitmask.
+ """
+ if not isinstance(options, integer_types):
+ raise TypeError("options must be an integer")
+
+ return _lib.SSL_CTX_set_options(self._context, options)
+
+ def set_mode(self, mode):
+ """
+ Add modes via bitmask. Modes set before are not cleared! This method
+ should be used with the :const:`MODE_*` constants.
+
+ :param mode: The mode to add.
+ :return: The new mode bitmask.
+ """
+ if not isinstance(mode, integer_types):
+ raise TypeError("mode must be an integer")
+
+ return _lib.SSL_CTX_set_mode(self._context, mode)
+
+ def set_tlsext_servername_callback(self, callback):
+ """
+ Specify a callback function to be called when clients specify a server
+ name.
+
+ :param callback: The callback function. It will be invoked with one
+ argument, the Connection instance.
+
+ .. versionadded:: 0.13
+ """
+
+ @wraps(callback)
+ def wrapper(ssl, alert, arg):
+ callback(Connection._reverse_mapping[ssl])
+ return 0
+
+ self._tlsext_servername_callback = _ffi.callback(
+ "int (*)(SSL *, int *, void *)", wrapper
+ )
+ _lib.SSL_CTX_set_tlsext_servername_callback(
+ self._context, self._tlsext_servername_callback
+ )
+
+ def set_tlsext_use_srtp(self, profiles):
+ """
+ Enable support for negotiating SRTP keying material.
+
+ :param bytes profiles: A colon delimited list of protection profile
+ names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.
+ :return: None
+ """
+ if not isinstance(profiles, bytes):
+ raise TypeError("profiles must be a byte string.")
+
+ _openssl_assert(
+ _lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == 0
+ )
+
+ @_requires_alpn
+ def set_alpn_protos(self, protos):
+ """
+ Specify the protocols that the client is prepared to speak after the
+ TLS connection has been negotiated using Application Layer Protocol
+ Negotiation.
+
+ :param protos: A list of the protocols to be offered to the server.
+ This list should be a Python list of bytestrings representing the
+ protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
+ """
+ # Take the list of protocols and join them together, prefixing them
+ # with their lengths.
+ protostr = b"".join(
+ chain.from_iterable((int2byte(len(p)), p) for p in protos)
+ )
+
+ # Build a C string from the list. We don't need to save this off
+ # because OpenSSL immediately copies the data out.
+ input_str = _ffi.new("unsigned char[]", protostr)
+
+ # https://www.openssl.org/docs/man1.1.0/man3/SSL_CTX_set_alpn_protos.html:
+ # SSL_CTX_set_alpn_protos() and SSL_set_alpn_protos()
+ # return 0 on success, and non-0 on failure.
+ # WARNING: these functions reverse the return value convention.
+ _openssl_assert(
+ _lib.SSL_CTX_set_alpn_protos(
+ self._context, input_str, len(protostr)
+ )
+ == 0
+ )
+
+ @_requires_alpn
+ def set_alpn_select_callback(self, callback):
+ """
+ Specify a callback function that will be called on the server when a
+ client offers protocols using ALPN.
+
+ :param callback: The callback function. It will be invoked with two
+ arguments: the Connection, and a list of offered protocols as
+ bytestrings, e.g ``[b'http/1.1', b'spdy/2']``. It can return
+ one of those bytestrings to indicate the chosen protocol, the
+ empty bytestring to terminate the TLS connection, or the
+ :py:obj:`NO_OVERLAPPING_PROTOCOLS` to indicate that no offered
+ protocol was selected, but that the connection should not be
+ aborted.
+ """
+ self._alpn_select_helper = _ALPNSelectHelper(callback)
+ self._alpn_select_callback = self._alpn_select_helper.callback
+ _lib.SSL_CTX_set_alpn_select_cb(
+ self._context, self._alpn_select_callback, _ffi.NULL
+ )
+
+ def _set_ocsp_callback(self, helper, data):
+ """
+ This internal helper does the common work for
+ ``set_ocsp_server_callback`` and ``set_ocsp_client_callback``, which is
+ almost all of it.
+ """
+ self._ocsp_helper = helper
+ self._ocsp_callback = helper.callback
+ if data is None:
+ self._ocsp_data = _ffi.NULL
+ else:
+ self._ocsp_data = _ffi.new_handle(data)
+
+ rc = _lib.SSL_CTX_set_tlsext_status_cb(
+ self._context, self._ocsp_callback
+ )
+ _openssl_assert(rc == 1)
+ rc = _lib.SSL_CTX_set_tlsext_status_arg(self._context, self._ocsp_data)
+ _openssl_assert(rc == 1)
+
+ def set_ocsp_server_callback(self, callback, data=None):
+ """
+ Set a callback to provide OCSP data to be stapled to the TLS handshake
+ on the server side.
+
+ :param callback: The callback function. It will be invoked with two
+ arguments: the Connection, and the optional arbitrary data you have
+ provided. The callback must return a bytestring that contains the
+ OCSP data to staple to the handshake. If no OCSP data is available
+ for this connection, return the empty bytestring.
+ :param data: Some opaque data that will be passed into the callback
+ function when called. This can be used to avoid needing to do
+ complex data lookups or to keep track of what context is being
+ used. This parameter is optional.
+ """
+ helper = _OCSPServerCallbackHelper(callback)
+ self._set_ocsp_callback(helper, data)
+
+ def set_ocsp_client_callback(self, callback, data=None):
+ """
+ Set a callback to validate OCSP data stapled to the TLS handshake on
+ the client side.
+
+ :param callback: The callback function. It will be invoked with three
+ arguments: the Connection, a bytestring containing the stapled OCSP
+ assertion, and the optional arbitrary data you have provided. The
+ callback must return a boolean that indicates the result of
+ validating the OCSP data: ``True`` if the OCSP data is valid and
+ the certificate can be trusted, or ``False`` if either the OCSP
+ data is invalid or the certificate has been revoked.
+ :param data: Some opaque data that will be passed into the callback
+ function when called. This can be used to avoid needing to do
+ complex data lookups or to keep track of what context is being
+ used. This parameter is optional.
+ """
+ helper = _OCSPClientCallbackHelper(callback)
+ self._set_ocsp_callback(helper, data)
+
+
+class Connection(object):
+ _reverse_mapping = WeakValueDictionary()
+
+ def __init__(self, context, socket=None):
+ """
+ Create a new Connection object, using the given OpenSSL.SSL.Context
+ instance and socket.
+
+ :param context: An SSL Context to use for this connection
+ :param socket: The socket to use for transport layer
+ """
+ if not isinstance(context, Context):
+ raise TypeError("context must be a Context instance")
+
+ ssl = _lib.SSL_new(context._context)
+ self._ssl = _ffi.gc(ssl, _lib.SSL_free)
+ # We set SSL_MODE_AUTO_RETRY to handle situations where OpenSSL returns
+ # an SSL_ERROR_WANT_READ when processing a non-application data packet
+ # even though there is still data on the underlying transport.
+ # See https://github.com/openssl/openssl/issues/6234 for more details.
+ _lib.SSL_set_mode(self._ssl, _lib.SSL_MODE_AUTO_RETRY)
+ self._context = context
+ self._app_data = None
+
+ # References to strings used for Application Layer Protocol
+ # Negotiation. These strings get copied at some point but it's well
+ # after the callback returns, so we have to hang them somewhere to
+ # avoid them getting freed.
+ self._alpn_select_callback_args = None
+
+ # Reference the verify_callback of the Context. This ensures that if
+ # set_verify is called again after the SSL object has been created we
+ # do not point to a dangling reference
+ self._verify_helper = context._verify_helper
+ self._verify_callback = context._verify_callback
+
+ self._reverse_mapping[self._ssl] = self
+
+ if socket is None:
+ self._socket = None
+ # Don't set up any gc for these, SSL_free will take care of them.
+ self._into_ssl = _lib.BIO_new(_lib.BIO_s_mem())
+ _openssl_assert(self._into_ssl != _ffi.NULL)
+
+ self._from_ssl = _lib.BIO_new(_lib.BIO_s_mem())
+ _openssl_assert(self._from_ssl != _ffi.NULL)
+
+ _lib.SSL_set_bio(self._ssl, self._into_ssl, self._from_ssl)
+ else:
+ self._into_ssl = None
+ self._from_ssl = None
+ self._socket = socket
+ set_result = _lib.SSL_set_fd(
+ self._ssl, _asFileDescriptor(self._socket)
+ )
+ _openssl_assert(set_result == 1)
+
+ def __getattr__(self, name):
+ """
+ Look up attributes on the wrapped socket object if they are not found
+ on the Connection object.
+ """
+ if self._socket is None:
+ raise AttributeError(
+ "'%s' object has no attribute '%s'"
+ % (self.__class__.__name__, name)
+ )
+ else:
+ return getattr(self._socket, name)
+
+ def _raise_ssl_error(self, ssl, result):
+ if self._context._verify_helper is not None:
+ self._context._verify_helper.raise_if_problem()
+ if self._context._alpn_select_helper is not None:
+ self._context._alpn_select_helper.raise_if_problem()
+ if self._context._ocsp_helper is not None:
+ self._context._ocsp_helper.raise_if_problem()
+
+ error = _lib.SSL_get_error(ssl, result)
+ if error == _lib.SSL_ERROR_WANT_READ:
+ raise WantReadError()
+ elif error == _lib.SSL_ERROR_WANT_WRITE:
+ raise WantWriteError()
+ elif error == _lib.SSL_ERROR_ZERO_RETURN:
+ raise ZeroReturnError()
+ elif error == _lib.SSL_ERROR_WANT_X509_LOOKUP:
+ # TODO: This is untested.
+ raise WantX509LookupError()
+ elif error == _lib.SSL_ERROR_SYSCALL:
+ if _lib.ERR_peek_error() == 0:
+ if result < 0:
+ if platform == "win32":
+ errno = _ffi.getwinerror()[0]
+ else:
+ errno = _ffi.errno
+
+ if errno != 0:
+ raise SysCallError(errno, errorcode.get(errno))
+ raise SysCallError(-1, "Unexpected EOF")
+ else:
+ # TODO: This is untested.
+ _raise_current_error()
+ elif error == _lib.SSL_ERROR_NONE:
+ pass
+ else:
+ _raise_current_error()
+
+ def get_context(self):
+ """
+ Retrieve the :class:`Context` object associated with this
+ :class:`Connection`.
+ """
+ return self._context
+
+ def set_context(self, context):
+ """
+ Switch this connection to a new session context.
+
+ :param context: A :class:`Context` instance giving the new session
+ context to use.
+ """
+ if not isinstance(context, Context):
+ raise TypeError("context must be a Context instance")
+
+ _lib.SSL_set_SSL_CTX(self._ssl, context._context)
+ self._context = context
+
+ def get_servername(self):
+ """
+ Retrieve the servername extension value if provided in the client hello
+ message, or None if there wasn't one.
+
+ :return: A byte string giving the server name or :data:`None`.
+
+ .. versionadded:: 0.13
+ """
+ name = _lib.SSL_get_servername(
+ self._ssl, _lib.TLSEXT_NAMETYPE_host_name
+ )
+ if name == _ffi.NULL:
+ return None
+
+ return _ffi.string(name)
+
+ def set_tlsext_host_name(self, name):
+ """
+ Set the value of the servername extension to send in the client hello.
+
+ :param name: A byte string giving the name.
+
+ .. versionadded:: 0.13
+ """
+ if not isinstance(name, bytes):
+ raise TypeError("name must be a byte string")
+ elif b"\0" in name:
+ raise TypeError("name must not contain NUL byte")
+
+ # XXX I guess this can fail sometimes?
+ _lib.SSL_set_tlsext_host_name(self._ssl, name)
+
+ def pending(self):
+ """
+ Get the number of bytes that can be safely read from the SSL buffer
+ (**not** the underlying transport buffer).
+
+ :return: The number of bytes available in the receive buffer.
+ """
+ return _lib.SSL_pending(self._ssl)
+
+ def send(self, buf, flags=0):
+ """
+ Send data on the connection. NOTE: If you get one of the WantRead,
+ WantWrite or WantX509Lookup exceptions on this, you have to call the
+ method again with the SAME buffer.
+
+ :param buf: The string, buffer or memoryview to send
+ :param flags: (optional) Included for compatibility with the socket
+ API, the value is ignored
+ :return: The number of bytes written
+ """
+ # Backward compatibility
+ buf = _text_to_bytes_and_warn("buf", buf)
+
+ with _ffi.from_buffer(buf) as data:
+ # check len(buf) instead of len(data) for testability
+ if len(buf) > 2147483647:
+ raise ValueError(
+ "Cannot send more than 2**31-1 bytes at once."
+ )
+
+ result = _lib.SSL_write(self._ssl, data, len(data))
+ self._raise_ssl_error(self._ssl, result)
+
+ return result
+
+ write = send
+
+ def sendall(self, buf, flags=0):
+ """
+ Send "all" data on the connection. This calls send() repeatedly until
+ all data is sent. If an error occurs, it's impossible to tell how much
+ data has been sent.
+
+ :param buf: The string, buffer or memoryview to send
+ :param flags: (optional) Included for compatibility with the socket
+ API, the value is ignored
+ :return: The number of bytes written
+ """
+ buf = _text_to_bytes_and_warn("buf", buf)
+
+ with _ffi.from_buffer(buf) as data:
+
+ left_to_send = len(buf)
+ total_sent = 0
+
+ while left_to_send:
+ # SSL_write's num arg is an int,
+ # so we cannot send more than 2**31-1 bytes at once.
+ result = _lib.SSL_write(
+ self._ssl, data + total_sent, min(left_to_send, 2147483647)
+ )
+ self._raise_ssl_error(self._ssl, result)
+ total_sent += result
+ left_to_send -= result
+
+ return total_sent
+
+ def recv(self, bufsiz, flags=None):
+ """
+ Receive data on the connection.
+
+ :param bufsiz: The maximum number of bytes to read
+ :param flags: (optional) The only supported flag is ``MSG_PEEK``,
+ all other flags are ignored.
+ :return: The string read from the Connection
+ """
+ buf = _no_zero_allocator("char[]", bufsiz)
+ if flags is not None and flags & socket.MSG_PEEK:
+ result = _lib.SSL_peek(self._ssl, buf, bufsiz)
+ else:
+ result = _lib.SSL_read(self._ssl, buf, bufsiz)
+ self._raise_ssl_error(self._ssl, result)
+ return _ffi.buffer(buf, result)[:]
+
+ read = recv
+
+ def recv_into(self, buffer, nbytes=None, flags=None):
+ """
+ Receive data on the connection and copy it directly into the provided
+ buffer, rather than creating a new string.
+
+ :param buffer: The buffer to copy into.
+ :param nbytes: (optional) The maximum number of bytes to read into the
+ buffer. If not present, defaults to the size of the buffer. If
+ larger than the size of the buffer, is reduced to the size of the
+ buffer.
+ :param flags: (optional) The only supported flag is ``MSG_PEEK``,
+ all other flags are ignored.
+ :return: The number of bytes read into the buffer.
+ """
+ if nbytes is None:
+ nbytes = len(buffer)
+ else:
+ nbytes = min(nbytes, len(buffer))
+
+ # We need to create a temporary buffer. This is annoying, it would be
+ # better if we could pass memoryviews straight into the SSL_read call,
+ # but right now we can't. Revisit this if CFFI gets that ability.
+ buf = _no_zero_allocator("char[]", nbytes)
+ if flags is not None and flags & socket.MSG_PEEK:
+ result = _lib.SSL_peek(self._ssl, buf, nbytes)
+ else:
+ result = _lib.SSL_read(self._ssl, buf, nbytes)
+ self._raise_ssl_error(self._ssl, result)
+
+ # This strange line is all to avoid a memory copy. The buffer protocol
+ # should allow us to assign a CFFI buffer to the LHS of this line, but
+ # on CPython 3.3+ that segfaults. As a workaround, we can temporarily
+ # wrap it in a memoryview.
+ buffer[:result] = memoryview(_ffi.buffer(buf, result))
+
+ return result
+
+ def _handle_bio_errors(self, bio, result):
+ if _lib.BIO_should_retry(bio):
+ if _lib.BIO_should_read(bio):
+ raise WantReadError()
+ elif _lib.BIO_should_write(bio):
+ # TODO: This is untested.
+ raise WantWriteError()
+ elif _lib.BIO_should_io_special(bio):
+ # TODO: This is untested. I think io_special means the socket
+ # BIO has a not-yet connected socket.
+ raise ValueError("BIO_should_io_special")
+ else:
+ # TODO: This is untested.
+ raise ValueError("unknown bio failure")
+ else:
+ # TODO: This is untested.
+ _raise_current_error()
+
+ def bio_read(self, bufsiz):
+ """
+ If the Connection was created with a memory BIO, this method can be
+ used to read bytes from the write end of that memory BIO. Many
+ Connection methods will add bytes which must be read in this manner or
+ the buffer will eventually fill up and the Connection will be able to
+ take no further actions.
+
+ :param bufsiz: The maximum number of bytes to read
+ :return: The string read.
+ """
+ if self._from_ssl is None:
+ raise TypeError("Connection sock was not None")
+
+ if not isinstance(bufsiz, integer_types):
+ raise TypeError("bufsiz must be an integer")
+
+ buf = _no_zero_allocator("char[]", bufsiz)
+ result = _lib.BIO_read(self._from_ssl, buf, bufsiz)
+ if result <= 0:
+ self._handle_bio_errors(self._from_ssl, result)
+
+ return _ffi.buffer(buf, result)[:]
+
+ def bio_write(self, buf):
+ """
+ If the Connection was created with a memory BIO, this method can be
+ used to add bytes to the read end of that memory BIO. The Connection
+ can then read the bytes (for example, in response to a call to
+ :meth:`recv`).
+
+ :param buf: The string to put into the memory BIO.
+ :return: The number of bytes written
+ """
+ buf = _text_to_bytes_and_warn("buf", buf)
+
+ if self._into_ssl is None:
+ raise TypeError("Connection sock was not None")
+
+ with _ffi.from_buffer(buf) as data:
+ result = _lib.BIO_write(self._into_ssl, data, len(data))
+ if result <= 0:
+ self._handle_bio_errors(self._into_ssl, result)
+ return result
+
+ def renegotiate(self):
+ """
+ Renegotiate the session.
+
+ :return: True if the renegotiation can be started, False otherwise
+ :rtype: bool
+ """
+ if not self.renegotiate_pending():
+ _openssl_assert(_lib.SSL_renegotiate(self._ssl) == 1)
+ return True
+ return False
+
+ def do_handshake(self):
+ """
+ Perform an SSL handshake (usually called after :meth:`renegotiate` or
+ one of :meth:`set_accept_state` or :meth:`set_connect_state`). This can
+ raise the same exceptions as :meth:`send` and :meth:`recv`.
+
+ :return: None.
+ """
+ result = _lib.SSL_do_handshake(self._ssl)
+ self._raise_ssl_error(self._ssl, result)
+
+ def renegotiate_pending(self):
+ """
+ Check if there's a renegotiation in progress, it will return False once
+ a renegotiation is finished.
+
+ :return: Whether there's a renegotiation in progress
+ :rtype: bool
+ """
+ return _lib.SSL_renegotiate_pending(self._ssl) == 1
+
+ def total_renegotiations(self):
+ """
+ Find out the total number of renegotiations.
+
+ :return: The number of renegotiations.
+ :rtype: int
+ """
+ return _lib.SSL_total_renegotiations(self._ssl)
+
+ def connect(self, addr):
+ """
+ Call the :meth:`connect` method of the underlying socket and set up SSL
+ on the socket, using the :class:`Context` object supplied to this
+ :class:`Connection` object at creation.
+
+ :param addr: A remote address
+ :return: What the socket's connect method returns
+ """
+ _lib.SSL_set_connect_state(self._ssl)
+ return self._socket.connect(addr)
+
+ def connect_ex(self, addr):
+ """
+ Call the :meth:`connect_ex` method of the underlying socket and set up
+ SSL on the socket, using the Context object supplied to this Connection
+ object at creation. Note that if the :meth:`connect_ex` method of the
+ socket doesn't return 0, SSL won't be initialized.
+
+ :param addr: A remove address
+ :return: What the socket's connect_ex method returns
+ """
+ connect_ex = self._socket.connect_ex
+ self.set_connect_state()
+ return connect_ex(addr)
+
+ def accept(self):
+ """
+ Call the :meth:`accept` method of the underlying socket and set up SSL
+ on the returned socket, using the Context object supplied to this
+ :class:`Connection` object at creation.
+
+ :return: A *(conn, addr)* pair where *conn* is the new
+ :class:`Connection` object created, and *address* is as returned by
+ the socket's :meth:`accept`.
+ """
+ client, addr = self._socket.accept()
+ conn = Connection(self._context, client)
+ conn.set_accept_state()
+ return (conn, addr)
+
+ def bio_shutdown(self):
+ """
+ If the Connection was created with a memory BIO, this method can be
+ used to indicate that *end of file* has been reached on the read end of
+ that memory BIO.
+
+ :return: None
+ """
+ if self._from_ssl is None:
+ raise TypeError("Connection sock was not None")
+
+ _lib.BIO_set_mem_eof_return(self._into_ssl, 0)
+
+ def shutdown(self):
+ """
+ Send the shutdown message to the Connection.
+
+ :return: True if the shutdown completed successfully (i.e. both sides
+ have sent closure alerts), False otherwise (in which case you
+ call :meth:`recv` or :meth:`send` when the connection becomes
+ readable/writeable).
+ """
+ result = _lib.SSL_shutdown(self._ssl)
+ if result < 0:
+ self._raise_ssl_error(self._ssl, result)
+ elif result > 0:
+ return True
+ else:
+ return False
+
+ def get_cipher_list(self):
+ """
+ Retrieve the list of ciphers used by the Connection object.
+
+ :return: A list of native cipher strings.
+ """
+ ciphers = []
+ for i in count():
+ result = _lib.SSL_get_cipher_list(self._ssl, i)
+ if result == _ffi.NULL:
+ break
+ ciphers.append(_native(_ffi.string(result)))
+ return ciphers
+
+ def get_client_ca_list(self):
+ """
+ Get CAs whose certificates are suggested for client authentication.
+
+ :return: If this is a server connection, the list of certificate
+ authorities that will be sent or has been sent to the client, as
+ controlled by this :class:`Connection`'s :class:`Context`.
+
+ If this is a client connection, the list will be empty until the
+ connection with the server is established.
+
+ .. versionadded:: 0.10
+ """
+ ca_names = _lib.SSL_get_client_CA_list(self._ssl)
+ if ca_names == _ffi.NULL:
+ # TODO: This is untested.
+ return []
+
+ result = []
+ for i in range(_lib.sk_X509_NAME_num(ca_names)):
+ name = _lib.sk_X509_NAME_value(ca_names, i)
+ copy = _lib.X509_NAME_dup(name)
+ _openssl_assert(copy != _ffi.NULL)
+
+ pyname = X509Name.__new__(X509Name)
+ pyname._name = _ffi.gc(copy, _lib.X509_NAME_free)
+ result.append(pyname)
+ return result
+
+ def makefile(self, *args, **kwargs):
+ """
+ The makefile() method is not implemented, since there is no dup
+ semantics for SSL connections
+
+ :raise: NotImplementedError
+ """
+ raise NotImplementedError(
+ "Cannot make file object of OpenSSL.SSL.Connection"
+ )
+
+ def get_app_data(self):
+ """
+ Retrieve application data as set by :meth:`set_app_data`.
+
+ :return: The application data
+ """
+ return self._app_data
+
+ def set_app_data(self, data):
+ """
+ Set application data
+
+ :param data: The application data
+ :return: None
+ """
+ self._app_data = data
+
+ def get_shutdown(self):
+ """
+ Get the shutdown state of the Connection.
+
+ :return: The shutdown state, a bitvector of SENT_SHUTDOWN,
+ RECEIVED_SHUTDOWN.
+ """
+ return _lib.SSL_get_shutdown(self._ssl)
+
+ def set_shutdown(self, state):
+ """
+ Set the shutdown state of the Connection.
+
+ :param state: bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.
+ :return: None
+ """
+ if not isinstance(state, integer_types):
+ raise TypeError("state must be an integer")
+
+ _lib.SSL_set_shutdown(self._ssl, state)
+
+ def get_state_string(self):
+ """
+ Retrieve a verbose string detailing the state of the Connection.
+
+ :return: A string representing the state
+ :rtype: bytes
+ """
+ return _ffi.string(_lib.SSL_state_string_long(self._ssl))
+
+ def server_random(self):
+ """
+ Retrieve the random value used with the server hello message.
+
+ :return: A string representing the state
+ """
+ session = _lib.SSL_get_session(self._ssl)
+ if session == _ffi.NULL:
+ return None
+ length = _lib.SSL_get_server_random(self._ssl, _ffi.NULL, 0)
+ _openssl_assert(length > 0)
+ outp = _no_zero_allocator("unsigned char[]", length)
+ _lib.SSL_get_server_random(self._ssl, outp, length)
+ return _ffi.buffer(outp, length)[:]
+
+ def client_random(self):
+ """
+ Retrieve the random value used with the client hello message.
+
+ :return: A string representing the state
+ """
+ session = _lib.SSL_get_session(self._ssl)
+ if session == _ffi.NULL:
+ return None
+
+ length = _lib.SSL_get_client_random(self._ssl, _ffi.NULL, 0)
+ _openssl_assert(length > 0)
+ outp = _no_zero_allocator("unsigned char[]", length)
+ _lib.SSL_get_client_random(self._ssl, outp, length)
+ return _ffi.buffer(outp, length)[:]
+
+ def master_key(self):
+ """
+ Retrieve the value of the master key for this session.
+
+ :return: A string representing the state
+ """
+ session = _lib.SSL_get_session(self._ssl)
+ if session == _ffi.NULL:
+ return None
+
+ length = _lib.SSL_SESSION_get_master_key(session, _ffi.NULL, 0)
+ _openssl_assert(length > 0)
+ outp = _no_zero_allocator("unsigned char[]", length)
+ _lib.SSL_SESSION_get_master_key(session, outp, length)
+ return _ffi.buffer(outp, length)[:]
+
+ def export_keying_material(self, label, olen, context=None):
+ """
+ Obtain keying material for application use.
+
+ :param: label - a disambiguating label string as described in RFC 5705
+ :param: olen - the length of the exported key material in bytes
+ :param: context - a per-association context value
+ :return: the exported key material bytes or None
+ """
+ outp = _no_zero_allocator("unsigned char[]", olen)
+ context_buf = _ffi.NULL
+ context_len = 0
+ use_context = 0
+ if context is not None:
+ context_buf = context
+ context_len = len(context)
+ use_context = 1
+ success = _lib.SSL_export_keying_material(
+ self._ssl,
+ outp,
+ olen,
+ label,
+ len(label),
+ context_buf,
+ context_len,
+ use_context,
+ )
+ _openssl_assert(success == 1)
+ return _ffi.buffer(outp, olen)[:]
+
+ def sock_shutdown(self, *args, **kwargs):
+ """
+ Call the :meth:`shutdown` method of the underlying socket.
+ See :manpage:`shutdown(2)`.
+
+ :return: What the socket's shutdown() method returns
+ """
+ return self._socket.shutdown(*args, **kwargs)
+
+ def get_certificate(self):
+ """
+ Retrieve the local certificate (if any)
+
+ :return: The local certificate
+ """
+ cert = _lib.SSL_get_certificate(self._ssl)
+ if cert != _ffi.NULL:
+ _lib.X509_up_ref(cert)
+ return X509._from_raw_x509_ptr(cert)
+ return None
+
+ def get_peer_certificate(self):
+ """
+ Retrieve the other side's certificate (if any)
+
+ :return: The peer's certificate
+ """
+ cert = _lib.SSL_get_peer_certificate(self._ssl)
+ if cert != _ffi.NULL:
+ return X509._from_raw_x509_ptr(cert)
+ return None
+
+ @staticmethod
+ def _cert_stack_to_list(cert_stack):
+ """
+ Internal helper to convert a STACK_OF(X509) to a list of X509
+ instances.
+ """
+ result = []
+ for i in range(_lib.sk_X509_num(cert_stack)):
+ cert = _lib.sk_X509_value(cert_stack, i)
+ _openssl_assert(cert != _ffi.NULL)
+ res = _lib.X509_up_ref(cert)
+ _openssl_assert(res >= 1)
+ pycert = X509._from_raw_x509_ptr(cert)
+ result.append(pycert)
+ return result
+
+ def get_peer_cert_chain(self):
+ """
+ Retrieve the other side's certificate (if any)
+
+ :return: A list of X509 instances giving the peer's certificate chain,
+ or None if it does not have one.
+ """
+ cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl)
+ if cert_stack == _ffi.NULL:
+ return None
+
+ return self._cert_stack_to_list(cert_stack)
+
+ def get_verified_chain(self):
+ """
+ Retrieve the verified certificate chain of the peer including the
+ peer's end entity certificate. It must be called after a session has
+ been successfully established. If peer verification was not successful
+ the chain may be incomplete, invalid, or None.
+
+ :return: A list of X509 instances giving the peer's verified
+ certificate chain, or None if it does not have one.
+
+ .. versionadded:: 20.0
+ """
+ # OpenSSL 1.1+
+ cert_stack = _lib.SSL_get0_verified_chain(self._ssl)
+ if cert_stack == _ffi.NULL:
+ return None
+
+ return self._cert_stack_to_list(cert_stack)
+
+ def want_read(self):
+ """
+ Checks if more data has to be read from the transport layer to complete
+ an operation.
+
+ :return: True iff more data has to be read
+ """
+ return _lib.SSL_want_read(self._ssl)
+
+ def want_write(self):
+ """
+ Checks if there is data to write to the transport layer to complete an
+ operation.
+
+ :return: True iff there is data to write
+ """
+ return _lib.SSL_want_write(self._ssl)
+
+ def set_accept_state(self):
+ """
+ Set the connection to work in server mode. The handshake will be
+ handled automatically by read/write.
+
+ :return: None
+ """
+ _lib.SSL_set_accept_state(self._ssl)
+
+ def set_connect_state(self):
+ """
+ Set the connection to work in client mode. The handshake will be
+ handled automatically by read/write.
+
+ :return: None
+ """
+ _lib.SSL_set_connect_state(self._ssl)
+
+ def get_session(self):
+ """
+ Returns the Session currently used.
+
+ :return: An instance of :class:`OpenSSL.SSL.Session` or
+ :obj:`None` if no session exists.
+
+ .. versionadded:: 0.14
+ """
+ session = _lib.SSL_get1_session(self._ssl)
+ if session == _ffi.NULL:
+ return None
+
+ pysession = Session.__new__(Session)
+ pysession._session = _ffi.gc(session, _lib.SSL_SESSION_free)
+ return pysession
+
+ def set_session(self, session):
+ """
+ Set the session to be used when the TLS/SSL connection is established.
+
+ :param session: A Session instance representing the session to use.
+ :returns: None
+
+ .. versionadded:: 0.14
+ """
+ if not isinstance(session, Session):
+ raise TypeError("session must be a Session instance")
+
+ result = _lib.SSL_set_session(self._ssl, session._session)
+ _openssl_assert(result == 1)
+
+ def _get_finished_message(self, function):
+ """
+ Helper to implement :meth:`get_finished` and
+ :meth:`get_peer_finished`.
+
+ :param function: Either :data:`SSL_get_finished`: or
+ :data:`SSL_get_peer_finished`.
+
+ :return: :data:`None` if the desired message has not yet been
+ received, otherwise the contents of the message.
+ :rtype: :class:`bytes` or :class:`NoneType`
+ """
+ # The OpenSSL documentation says nothing about what might happen if the
+ # count argument given is zero. Specifically, it doesn't say whether
+ # the output buffer may be NULL in that case or not. Inspection of the
+ # implementation reveals that it calls memcpy() unconditionally.
+ # Section 7.1.4, paragraph 1 of the C standard suggests that
+ # memcpy(NULL, source, 0) is not guaranteed to produce defined (let
+ # alone desirable) behavior (though it probably does on just about
+ # every implementation...)
+ #
+ # Allocate a tiny buffer to pass in (instead of just passing NULL as
+ # one might expect) for the initial call so as to be safe against this
+ # potentially undefined behavior.
+ empty = _ffi.new("char[]", 0)
+ size = function(self._ssl, empty, 0)
+ if size == 0:
+ # No Finished message so far.
+ return None
+
+ buf = _no_zero_allocator("char[]", size)
+ function(self._ssl, buf, size)
+ return _ffi.buffer(buf, size)[:]
+
+ def get_finished(self):
+ """
+ Obtain the latest TLS Finished message that we sent.
+
+ :return: The contents of the message or :obj:`None` if the TLS
+ handshake has not yet completed.
+ :rtype: :class:`bytes` or :class:`NoneType`
+
+ .. versionadded:: 0.15
+ """
+ return self._get_finished_message(_lib.SSL_get_finished)
+
+ def get_peer_finished(self):
+ """
+ Obtain the latest TLS Finished message that we received from the peer.
+
+ :return: The contents of the message or :obj:`None` if the TLS
+ handshake has not yet completed.
+ :rtype: :class:`bytes` or :class:`NoneType`
+
+ .. versionadded:: 0.15
+ """
+ return self._get_finished_message(_lib.SSL_get_peer_finished)
+
+ def get_cipher_name(self):
+ """
+ Obtain the name of the currently used cipher.
+
+ :returns: The name of the currently used cipher or :obj:`None`
+ if no connection has been established.
+ :rtype: :class:`unicode` or :class:`NoneType`
+
+ .. versionadded:: 0.15
+ """
+ cipher = _lib.SSL_get_current_cipher(self._ssl)
+ if cipher == _ffi.NULL:
+ return None
+ else:
+ name = _ffi.string(_lib.SSL_CIPHER_get_name(cipher))
+ return name.decode("utf-8")
+
+ def get_cipher_bits(self):
+ """
+ Obtain the number of secret bits of the currently used cipher.
+
+ :returns: The number of secret bits of the currently used cipher
+ or :obj:`None` if no connection has been established.
+ :rtype: :class:`int` or :class:`NoneType`
+
+ .. versionadded:: 0.15
+ """
+ cipher = _lib.SSL_get_current_cipher(self._ssl)
+ if cipher == _ffi.NULL:
+ return None
+ else:
+ return _lib.SSL_CIPHER_get_bits(cipher, _ffi.NULL)
+
+ def get_cipher_version(self):
+ """
+ Obtain the protocol version of the currently used cipher.
+
+ :returns: The protocol name of the currently used cipher
+ or :obj:`None` if no connection has been established.
+ :rtype: :class:`unicode` or :class:`NoneType`
+
+ .. versionadded:: 0.15
+ """
+ cipher = _lib.SSL_get_current_cipher(self._ssl)
+ if cipher == _ffi.NULL:
+ return None
+ else:
+ version = _ffi.string(_lib.SSL_CIPHER_get_version(cipher))
+ return version.decode("utf-8")
+
+ def get_protocol_version_name(self):
+ """
+ Retrieve the protocol version of the current connection.
+
+ :returns: The TLS version of the current connection, for example
+ the value for TLS 1.2 would be ``TLSv1.2``or ``Unknown``
+ for connections that were not successfully established.
+ :rtype: :class:`unicode`
+ """
+ version = _ffi.string(_lib.SSL_get_version(self._ssl))
+ return version.decode("utf-8")
+
+ def get_protocol_version(self):
+ """
+ Retrieve the SSL or TLS protocol version of the current connection.
+
+ :returns: The TLS version of the current connection. For example,
+ it will return ``0x769`` for connections made over TLS version 1.
+ :rtype: :class:`int`
+ """
+ version = _lib.SSL_version(self._ssl)
+ return version
+
+ @_requires_alpn
+ def set_alpn_protos(self, protos):
+ """
+ Specify the client's ALPN protocol list.
+
+ These protocols are offered to the server during protocol negotiation.
+
+ :param protos: A list of the protocols to be offered to the server.
+ This list should be a Python list of bytestrings representing the
+ protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
+ """
+ # Take the list of protocols and join them together, prefixing them
+ # with their lengths.
+ protostr = b"".join(
+ chain.from_iterable((int2byte(len(p)), p) for p in protos)
+ )
+
+ # Build a C string from the list. We don't need to save this off
+ # because OpenSSL immediately copies the data out.
+ input_str = _ffi.new("unsigned char[]", protostr)
+
+ # https://www.openssl.org/docs/man1.1.0/man3/SSL_CTX_set_alpn_protos.html:
+ # SSL_CTX_set_alpn_protos() and SSL_set_alpn_protos()
+ # return 0 on success, and non-0 on failure.
+ # WARNING: these functions reverse the return value convention.
+ _openssl_assert(
+ _lib.SSL_set_alpn_protos(self._ssl, input_str, len(protostr)) == 0
+ )
+
+ @_requires_alpn
+ def get_alpn_proto_negotiated(self):
+ """
+ Get the protocol that was negotiated by ALPN.
+
+ :returns: A bytestring of the protocol name. If no protocol has been
+ negotiated yet, returns an empty string.
+ """
+ data = _ffi.new("unsigned char **")
+ data_len = _ffi.new("unsigned int *")
+
+ _lib.SSL_get0_alpn_selected(self._ssl, data, data_len)
+
+ if not data_len:
+ return b""
+
+ return _ffi.buffer(data[0], data_len[0])[:]
+
+ def request_ocsp(self):
+ """
+ Called to request that the server sends stapled OCSP data, if
+ available. If this is not called on the client side then the server
+ will not send OCSP data. Should be used in conjunction with
+ :meth:`Context.set_ocsp_client_callback`.
+ """
+ rc = _lib.SSL_set_tlsext_status_type(
+ self._ssl, _lib.TLSEXT_STATUSTYPE_ocsp
+ )
+ _openssl_assert(rc == 1)
+
+
+# This is similar to the initialization calls at the end of OpenSSL/crypto.py
+# but is exercised mostly by the Context initializer.
+_lib.SSL_library_init()
diff --git a/contrib/python/pyOpenSSL/py3/OpenSSL/__init__.py b/contrib/python/pyOpenSSL/py3/OpenSSL/__init__.py
new file mode 100644
index 0000000000..11e896a4ea
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/OpenSSL/__init__.py
@@ -0,0 +1,32 @@
+# Copyright (C) AB Strakt
+# See LICENSE for details.
+
+"""
+pyOpenSSL - A simple wrapper around the OpenSSL library
+"""
+
+from OpenSSL import crypto, SSL
+from OpenSSL.version import (
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
+)
+
+
+__all__ = [
+ "SSL",
+ "crypto",
+ "__author__",
+ "__copyright__",
+ "__email__",
+ "__license__",
+ "__summary__",
+ "__title__",
+ "__uri__",
+ "__version__",
+]
diff --git a/contrib/python/pyOpenSSL/py3/OpenSSL/_util.py b/contrib/python/pyOpenSSL/py3/OpenSSL/_util.py
new file mode 100644
index 0000000000..53c0b9e573
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/OpenSSL/_util.py
@@ -0,0 +1,155 @@
+import sys
+import warnings
+
+from six import PY2, text_type
+
+from cryptography.hazmat.bindings.openssl.binding import Binding
+
+
+binding = Binding()
+binding.init_static_locks()
+ffi = binding.ffi
+lib = binding.lib
+
+
+# This is a special CFFI allocator that does not bother to zero its memory
+# after allocation. This has vastly better performance on large allocations and
+# so should be used whenever we don't need the memory zeroed out.
+no_zero_allocator = ffi.new_allocator(should_clear_after_alloc=False)
+
+
+def text(charp):
+ """
+ Get a native string type representing of the given CFFI ``char*`` object.
+
+ :param charp: A C-style string represented using CFFI.
+
+ :return: :class:`str`
+ """
+ if not charp:
+ return ""
+ return native(ffi.string(charp))
+
+
+def exception_from_error_queue(exception_type):
+ """
+ Convert an OpenSSL library failure into a Python exception.
+
+ When a call to the native OpenSSL library fails, this is usually signalled
+ by the return value, and an error code is stored in an error queue
+ associated with the current thread. The err library provides functions to
+ obtain these error codes and textual error messages.
+ """
+ errors = []
+
+ while True:
+ error = lib.ERR_get_error()
+ if error == 0:
+ break
+ errors.append(
+ (
+ text(lib.ERR_lib_error_string(error)),
+ text(lib.ERR_func_error_string(error)),
+ text(lib.ERR_reason_error_string(error)),
+ )
+ )
+
+ raise exception_type(errors)
+
+
+def make_assert(error):
+ """
+ Create an assert function that uses :func:`exception_from_error_queue` to
+ raise an exception wrapped by *error*.
+ """
+
+ def openssl_assert(ok):
+ """
+ If *ok* is not True, retrieve the error from OpenSSL and raise it.
+ """
+ if ok is not True:
+ exception_from_error_queue(error)
+
+ return openssl_assert
+
+
+def native(s):
+ """
+ Convert :py:class:`bytes` or :py:class:`unicode` to the native
+ :py:class:`str` type, using UTF-8 encoding if conversion is necessary.
+
+ :raise UnicodeError: The input string is not UTF-8 decodeable.
+
+ :raise TypeError: The input is neither :py:class:`bytes` nor
+ :py:class:`unicode`.
+ """
+ if not isinstance(s, (bytes, text_type)):
+ raise TypeError("%r is neither bytes nor unicode" % s)
+ if PY2:
+ if isinstance(s, text_type):
+ return s.encode("utf-8")
+ else:
+ if isinstance(s, bytes):
+ return s.decode("utf-8")
+ return s
+
+
+def path_string(s):
+ """
+ Convert a Python string to a :py:class:`bytes` string identifying the same
+ path and which can be passed into an OpenSSL API accepting a filename.
+
+ :param s: An instance of :py:class:`bytes` or :py:class:`unicode`.
+
+ :return: An instance of :py:class:`bytes`.
+ """
+ if isinstance(s, bytes):
+ return s
+ elif isinstance(s, text_type):
+ return s.encode(sys.getfilesystemencoding())
+ else:
+ raise TypeError("Path must be represented as bytes or unicode string")
+
+
+if PY2:
+
+ def byte_string(s):
+ return s
+
+
+else:
+
+ def byte_string(s):
+ return s.encode("charmap")
+
+
+# A marker object to observe whether some optional arguments are passed any
+# value or not.
+UNSPECIFIED = object()
+
+_TEXT_WARNING = (
+ text_type.__name__ + " for {0} is no longer accepted, use bytes"
+)
+
+
+def text_to_bytes_and_warn(label, obj):
+ """
+ If ``obj`` is text, emit a warning that it should be bytes instead and try
+ to convert it to bytes automatically.
+
+ :param str label: The name of the parameter from which ``obj`` was taken
+ (so a developer can easily find the source of the problem and correct
+ it).
+
+ :return: If ``obj`` is the text string type, a ``bytes`` object giving the
+ UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is
+ returned.
+ """
+ if isinstance(obj, text_type):
+ warnings.warn(
+ _TEXT_WARNING.format(label),
+ category=DeprecationWarning,
+ stacklevel=3,
+ )
+ return obj.encode("utf-8")
+ return obj
diff --git a/contrib/python/pyOpenSSL/py3/OpenSSL/crypto.py b/contrib/python/pyOpenSSL/py3/OpenSSL/crypto.py
new file mode 100644
index 0000000000..eda4af6f9d
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/OpenSSL/crypto.py
@@ -0,0 +1,3288 @@
+import calendar
+import datetime
+
+from base64 import b16encode
+from functools import partial
+from operator import __eq__, __ne__, __lt__, __le__, __gt__, __ge__
+
+from six import (
+ integer_types as _integer_types,
+ text_type as _text_type,
+ PY2 as _PY2,
+)
+
+from cryptography import utils, x509
+from cryptography.hazmat.primitives.asymmetric import dsa, rsa
+
+from OpenSSL._util import (
+ ffi as _ffi,
+ lib as _lib,
+ exception_from_error_queue as _exception_from_error_queue,
+ byte_string as _byte_string,
+ native as _native,
+ path_string as _path_string,
+ UNSPECIFIED as _UNSPECIFIED,
+ text_to_bytes_and_warn as _text_to_bytes_and_warn,
+ make_assert as _make_assert,
+)
+
+__all__ = [
+ "FILETYPE_PEM",
+ "FILETYPE_ASN1",
+ "FILETYPE_TEXT",
+ "TYPE_RSA",
+ "TYPE_DSA",
+ "Error",
+ "PKey",
+ "get_elliptic_curves",
+ "get_elliptic_curve",
+ "X509Name",
+ "X509Extension",
+ "X509Req",
+ "X509",
+ "X509StoreFlags",
+ "X509Store",
+ "X509StoreContextError",
+ "X509StoreContext",
+ "load_certificate",
+ "dump_certificate",
+ "dump_publickey",
+ "dump_privatekey",
+ "Revoked",
+ "CRL",
+ "PKCS7",
+ "PKCS12",
+ "NetscapeSPKI",
+ "load_publickey",
+ "load_privatekey",
+ "dump_certificate_request",
+ "load_certificate_request",
+ "sign",
+ "verify",
+ "dump_crl",
+ "load_crl",
+ "load_pkcs7_data",
+ "load_pkcs12",
+]
+
+FILETYPE_PEM = _lib.SSL_FILETYPE_PEM
+FILETYPE_ASN1 = _lib.SSL_FILETYPE_ASN1
+
+# TODO This was an API mistake. OpenSSL has no such constant.
+FILETYPE_TEXT = 2 ** 16 - 1
+
+TYPE_RSA = _lib.EVP_PKEY_RSA
+TYPE_DSA = _lib.EVP_PKEY_DSA
+TYPE_DH = _lib.EVP_PKEY_DH
+TYPE_EC = _lib.EVP_PKEY_EC
+
+
+class Error(Exception):
+ """
+ An error occurred in an `OpenSSL.crypto` API.
+ """
+
+
+_raise_current_error = partial(_exception_from_error_queue, Error)
+_openssl_assert = _make_assert(Error)
+
+
+def _get_backend():
+ """
+ Importing the backend from cryptography has the side effect of activating
+ the osrandom engine. This mutates the global state of OpenSSL in the
+ process and causes issues for various programs that use subinterpreters or
+ embed Python. By putting the import in this function we can avoid
+ triggering this side effect unless _get_backend is called.
+ """
+ from cryptography.hazmat.backends.openssl.backend import backend
+
+ return backend
+
+
+def _untested_error(where):
+ """
+ An OpenSSL API failed somehow. Additionally, the failure which was
+ encountered isn't one that's exercised by the test suite so future behavior
+ of pyOpenSSL is now somewhat less predictable.
+ """
+ raise RuntimeError("Unknown %s failure" % (where,))
+
+
+def _new_mem_buf(buffer=None):
+ """
+ Allocate a new OpenSSL memory BIO.
+
+ Arrange for the garbage collector to clean it up automatically.
+
+ :param buffer: None or some bytes to use to put into the BIO so that they
+ can be read out.
+ """
+ if buffer is None:
+ bio = _lib.BIO_new(_lib.BIO_s_mem())
+ free = _lib.BIO_free
+ else:
+ data = _ffi.new("char[]", buffer)
+ bio = _lib.BIO_new_mem_buf(data, len(buffer))
+
+ # Keep the memory alive as long as the bio is alive!
+ def free(bio, ref=data):
+ return _lib.BIO_free(bio)
+
+ _openssl_assert(bio != _ffi.NULL)
+
+ bio = _ffi.gc(bio, free)
+ return bio
+
+
+def _bio_to_string(bio):
+ """
+ Copy the contents of an OpenSSL BIO object into a Python byte string.
+ """
+ result_buffer = _ffi.new("char**")
+ buffer_length = _lib.BIO_get_mem_data(bio, result_buffer)
+ return _ffi.buffer(result_buffer[0], buffer_length)[:]
+
+
+def _set_asn1_time(boundary, when):
+ """
+ The the time value of an ASN1 time object.
+
+ @param boundary: An ASN1_TIME pointer (or an object safely
+ castable to that type) which will have its value set.
+ @param when: A string representation of the desired time value.
+
+ @raise TypeError: If C{when} is not a L{bytes} string.
+ @raise ValueError: If C{when} does not represent a time in the required
+ format.
+ @raise RuntimeError: If the time value cannot be set for some other
+ (unspecified) reason.
+ """
+ if not isinstance(when, bytes):
+ raise TypeError("when must be a byte string")
+
+ set_result = _lib.ASN1_TIME_set_string(boundary, when)
+ if set_result == 0:
+ raise ValueError("Invalid string")
+
+
+def _get_asn1_time(timestamp):
+ """
+ Retrieve the time value of an ASN1 time object.
+
+ @param timestamp: An ASN1_GENERALIZEDTIME* (or an object safely castable to
+ that type) from which the time value will be retrieved.
+
+ @return: The time value from C{timestamp} as a L{bytes} string in a certain
+ format. Or C{None} if the object contains no time value.
+ """
+ string_timestamp = _ffi.cast("ASN1_STRING*", timestamp)
+ if _lib.ASN1_STRING_length(string_timestamp) == 0:
+ return None
+ elif (
+ _lib.ASN1_STRING_type(string_timestamp) == _lib.V_ASN1_GENERALIZEDTIME
+ ):
+ return _ffi.string(_lib.ASN1_STRING_data(string_timestamp))
+ else:
+ generalized_timestamp = _ffi.new("ASN1_GENERALIZEDTIME**")
+ _lib.ASN1_TIME_to_generalizedtime(timestamp, generalized_timestamp)
+ if generalized_timestamp[0] == _ffi.NULL:
+ # This may happen:
+ # - if timestamp was not an ASN1_TIME
+ # - if allocating memory for the ASN1_GENERALIZEDTIME failed
+ # - if a copy of the time data from timestamp cannot be made for
+ # the newly allocated ASN1_GENERALIZEDTIME
+ #
+ # These are difficult to test. cffi enforces the ASN1_TIME type.
+ # Memory allocation failures are a pain to trigger
+ # deterministically.
+ _untested_error("ASN1_TIME_to_generalizedtime")
+ else:
+ string_timestamp = _ffi.cast(
+ "ASN1_STRING*", generalized_timestamp[0]
+ )
+ string_data = _lib.ASN1_STRING_data(string_timestamp)
+ string_result = _ffi.string(string_data)
+ _lib.ASN1_GENERALIZEDTIME_free(generalized_timestamp[0])
+ return string_result
+
+
+class _X509NameInvalidator(object):
+ def __init__(self):
+ self._names = []
+
+ def add(self, name):
+ self._names.append(name)
+
+ def clear(self):
+ for name in self._names:
+ # Breaks the object, but also prevents UAF!
+ del name._name
+
+
+class PKey(object):
+ """
+ A class representing an DSA or RSA public key or key pair.
+ """
+
+ _only_public = False
+ _initialized = True
+
+ def __init__(self):
+ pkey = _lib.EVP_PKEY_new()
+ self._pkey = _ffi.gc(pkey, _lib.EVP_PKEY_free)
+ self._initialized = False
+
+ def to_cryptography_key(self):
+ """
+ Export as a ``cryptography`` key.
+
+ :rtype: One of ``cryptography``'s `key interfaces`_.
+
+ .. _key interfaces: https://cryptography.io/en/latest/hazmat/\
+ primitives/asymmetric/rsa/#key-interfaces
+
+ .. versionadded:: 16.1.0
+ """
+ from cryptography.hazmat.primitives.serialization import (
+ load_der_private_key,
+ load_der_public_key,
+ )
+
+ backend = _get_backend()
+ if self._only_public:
+ der = dump_publickey(FILETYPE_ASN1, self)
+ return load_der_public_key(der, backend)
+ else:
+ der = dump_privatekey(FILETYPE_ASN1, self)
+ return load_der_private_key(der, None, backend)
+
+ @classmethod
+ def from_cryptography_key(cls, crypto_key):
+ """
+ Construct based on a ``cryptography`` *crypto_key*.
+
+ :param crypto_key: A ``cryptography`` key.
+ :type crypto_key: One of ``cryptography``'s `key interfaces`_.
+
+ :rtype: PKey
+
+ .. versionadded:: 16.1.0
+ """
+ if not isinstance(
+ crypto_key,
+ (
+ rsa.RSAPublicKey,
+ rsa.RSAPrivateKey,
+ dsa.DSAPublicKey,
+ dsa.DSAPrivateKey,
+ ),
+ ):
+ raise TypeError("Unsupported key type")
+
+ from cryptography.hazmat.primitives.serialization import (
+ Encoding,
+ NoEncryption,
+ PrivateFormat,
+ PublicFormat,
+ )
+
+ if isinstance(crypto_key, (rsa.RSAPublicKey, dsa.DSAPublicKey)):
+ return load_publickey(
+ FILETYPE_ASN1,
+ crypto_key.public_bytes(
+ Encoding.DER, PublicFormat.SubjectPublicKeyInfo
+ ),
+ )
+ else:
+ der = crypto_key.private_bytes(
+ Encoding.DER, PrivateFormat.PKCS8, NoEncryption()
+ )
+ return load_privatekey(FILETYPE_ASN1, der)
+
+ def generate_key(self, type, bits):
+ """
+ Generate a key pair of the given type, with the given number of bits.
+
+ This generates a key "into" the this object.
+
+ :param type: The key type.
+ :type type: :py:data:`TYPE_RSA` or :py:data:`TYPE_DSA`
+ :param bits: The number of bits.
+ :type bits: :py:data:`int` ``>= 0``
+ :raises TypeError: If :py:data:`type` or :py:data:`bits` isn't
+ of the appropriate type.
+ :raises ValueError: If the number of bits isn't an integer of
+ the appropriate size.
+ :return: ``None``
+ """
+ if not isinstance(type, int):
+ raise TypeError("type must be an integer")
+
+ if not isinstance(bits, int):
+ raise TypeError("bits must be an integer")
+
+ if type == TYPE_RSA:
+ if bits <= 0:
+ raise ValueError("Invalid number of bits")
+
+ # TODO Check error return
+ exponent = _lib.BN_new()
+ exponent = _ffi.gc(exponent, _lib.BN_free)
+ _lib.BN_set_word(exponent, _lib.RSA_F4)
+
+ rsa = _lib.RSA_new()
+
+ result = _lib.RSA_generate_key_ex(rsa, bits, exponent, _ffi.NULL)
+ _openssl_assert(result == 1)
+
+ result = _lib.EVP_PKEY_assign_RSA(self._pkey, rsa)
+ _openssl_assert(result == 1)
+
+ elif type == TYPE_DSA:
+ dsa = _lib.DSA_new()
+ _openssl_assert(dsa != _ffi.NULL)
+
+ dsa = _ffi.gc(dsa, _lib.DSA_free)
+ res = _lib.DSA_generate_parameters_ex(
+ dsa, bits, _ffi.NULL, 0, _ffi.NULL, _ffi.NULL, _ffi.NULL
+ )
+ _openssl_assert(res == 1)
+
+ _openssl_assert(_lib.DSA_generate_key(dsa) == 1)
+ _openssl_assert(_lib.EVP_PKEY_set1_DSA(self._pkey, dsa) == 1)
+ else:
+ raise Error("No such key type")
+
+ self._initialized = True
+
+ def check(self):
+ """
+ Check the consistency of an RSA private key.
+
+ This is the Python equivalent of OpenSSL's ``RSA_check_key``.
+
+ :return: ``True`` if key is consistent.
+
+ :raise OpenSSL.crypto.Error: if the key is inconsistent.
+
+ :raise TypeError: if the key is of a type which cannot be checked.
+ Only RSA keys can currently be checked.
+ """
+ if self._only_public:
+ raise TypeError("public key only")
+
+ if _lib.EVP_PKEY_type(self.type()) != _lib.EVP_PKEY_RSA:
+ raise TypeError("key type unsupported")
+
+ rsa = _lib.EVP_PKEY_get1_RSA(self._pkey)
+ rsa = _ffi.gc(rsa, _lib.RSA_free)
+ result = _lib.RSA_check_key(rsa)
+ if result == 1:
+ return True
+ _raise_current_error()
+
+ def type(self):
+ """
+ Returns the type of the key
+
+ :return: The type of the key.
+ """
+ return _lib.EVP_PKEY_id(self._pkey)
+
+ def bits(self):
+ """
+ Returns the number of bits of the key
+
+ :return: The number of bits of the key.
+ """
+ return _lib.EVP_PKEY_bits(self._pkey)
+
+
+class _EllipticCurve(object):
+ """
+ A representation of a supported elliptic curve.
+
+ @cvar _curves: :py:obj:`None` until an attempt is made to load the curves.
+ Thereafter, a :py:type:`set` containing :py:type:`_EllipticCurve`
+ instances each of which represents one curve supported by the system.
+ @type _curves: :py:type:`NoneType` or :py:type:`set`
+ """
+
+ _curves = None
+
+ if not _PY2:
+ # This only necessary on Python 3. Moreover, it is broken on Python 2.
+ def __ne__(self, other):
+ """
+ Implement cooperation with the right-hand side argument of ``!=``.
+
+ Python 3 seems to have dropped this cooperation in this very narrow
+ circumstance.
+ """
+ if isinstance(other, _EllipticCurve):
+ return super(_EllipticCurve, self).__ne__(other)
+ return NotImplemented
+
+ @classmethod
+ def _load_elliptic_curves(cls, lib):
+ """
+ Get the curves supported by OpenSSL.
+
+ :param lib: The OpenSSL library binding object.
+
+ :return: A :py:type:`set` of ``cls`` instances giving the names of the
+ elliptic curves the underlying library supports.
+ """
+ num_curves = lib.EC_get_builtin_curves(_ffi.NULL, 0)
+ builtin_curves = _ffi.new("EC_builtin_curve[]", num_curves)
+ # The return value on this call should be num_curves again. We
+ # could check it to make sure but if it *isn't* then.. what could
+ # we do? Abort the whole process, I suppose...? -exarkun
+ lib.EC_get_builtin_curves(builtin_curves, num_curves)
+ return set(cls.from_nid(lib, c.nid) for c in builtin_curves)
+
+ @classmethod
+ def _get_elliptic_curves(cls, lib):
+ """
+ Get, cache, and return the curves supported by OpenSSL.
+
+ :param lib: The OpenSSL library binding object.
+
+ :return: A :py:type:`set` of ``cls`` instances giving the names of the
+ elliptic curves the underlying library supports.
+ """
+ if cls._curves is None:
+ cls._curves = cls._load_elliptic_curves(lib)
+ return cls._curves
+
+ @classmethod
+ def from_nid(cls, lib, nid):
+ """
+ Instantiate a new :py:class:`_EllipticCurve` associated with the given
+ OpenSSL NID.
+
+ :param lib: The OpenSSL library binding object.
+
+ :param nid: The OpenSSL NID the resulting curve object will represent.
+ This must be a curve NID (and not, for example, a hash NID) or
+ subsequent operations will fail in unpredictable ways.
+ :type nid: :py:class:`int`
+
+ :return: The curve object.
+ """
+ return cls(lib, nid, _ffi.string(lib.OBJ_nid2sn(nid)).decode("ascii"))
+
+ def __init__(self, lib, nid, name):
+ """
+ :param _lib: The :py:mod:`cryptography` binding instance used to
+ interface with OpenSSL.
+
+ :param _nid: The OpenSSL NID identifying the curve this object
+ represents.
+ :type _nid: :py:class:`int`
+
+ :param name: The OpenSSL short name identifying the curve this object
+ represents.
+ :type name: :py:class:`unicode`
+ """
+ self._lib = lib
+ self._nid = nid
+ self.name = name
+
+ def __repr__(self):
+ return "<Curve %r>" % (self.name,)
+
+ def _to_EC_KEY(self):
+ """
+ Create a new OpenSSL EC_KEY structure initialized to use this curve.
+
+ The structure is automatically garbage collected when the Python object
+ is garbage collected.
+ """
+ key = self._lib.EC_KEY_new_by_curve_name(self._nid)
+ return _ffi.gc(key, _lib.EC_KEY_free)
+
+
+def get_elliptic_curves():
+ """
+ Return a set of objects representing the elliptic curves supported in the
+ OpenSSL build in use.
+
+ The curve objects have a :py:class:`unicode` ``name`` attribute by which
+ they identify themselves.
+
+ The curve objects are useful as values for the argument accepted by
+ :py:meth:`Context.set_tmp_ecdh` to specify which elliptical curve should be
+ used for ECDHE key exchange.
+ """
+ return _EllipticCurve._get_elliptic_curves(_lib)
+
+
+def get_elliptic_curve(name):
+ """
+ Return a single curve object selected by name.
+
+ See :py:func:`get_elliptic_curves` for information about curve objects.
+
+ :param name: The OpenSSL short name identifying the curve object to
+ retrieve.
+ :type name: :py:class:`unicode`
+
+ If the named curve is not supported then :py:class:`ValueError` is raised.
+ """
+ for curve in get_elliptic_curves():
+ if curve.name == name:
+ return curve
+ raise ValueError("unknown curve name", name)
+
+
+class X509Name(object):
+ """
+ An X.509 Distinguished Name.
+
+ :ivar countryName: The country of the entity.
+ :ivar C: Alias for :py:attr:`countryName`.
+
+ :ivar stateOrProvinceName: The state or province of the entity.
+ :ivar ST: Alias for :py:attr:`stateOrProvinceName`.
+
+ :ivar localityName: The locality of the entity.
+ :ivar L: Alias for :py:attr:`localityName`.
+
+ :ivar organizationName: The organization name of the entity.
+ :ivar O: Alias for :py:attr:`organizationName`.
+
+ :ivar organizationalUnitName: The organizational unit of the entity.
+ :ivar OU: Alias for :py:attr:`organizationalUnitName`
+
+ :ivar commonName: The common name of the entity.
+ :ivar CN: Alias for :py:attr:`commonName`.
+
+ :ivar emailAddress: The e-mail address of the entity.
+ """
+
+ def __init__(self, name):
+ """
+ Create a new X509Name, copying the given X509Name instance.
+
+ :param name: The name to copy.
+ :type name: :py:class:`X509Name`
+ """
+ name = _lib.X509_NAME_dup(name._name)
+ self._name = _ffi.gc(name, _lib.X509_NAME_free)
+
+ def __setattr__(self, name, value):
+ if name.startswith("_"):
+ return super(X509Name, self).__setattr__(name, value)
+
+ # Note: we really do not want str subclasses here, so we do not use
+ # isinstance.
+ if type(name) is not str:
+ raise TypeError(
+ "attribute name must be string, not '%.200s'"
+ % (type(value).__name__,)
+ )
+
+ nid = _lib.OBJ_txt2nid(_byte_string(name))
+ if nid == _lib.NID_undef:
+ try:
+ _raise_current_error()
+ except Error:
+ pass
+ raise AttributeError("No such attribute")
+
+ # If there's an old entry for this NID, remove it
+ for i in range(_lib.X509_NAME_entry_count(self._name)):
+ ent = _lib.X509_NAME_get_entry(self._name, i)
+ ent_obj = _lib.X509_NAME_ENTRY_get_object(ent)
+ ent_nid = _lib.OBJ_obj2nid(ent_obj)
+ if nid == ent_nid:
+ ent = _lib.X509_NAME_delete_entry(self._name, i)
+ _lib.X509_NAME_ENTRY_free(ent)
+ break
+
+ if isinstance(value, _text_type):
+ value = value.encode("utf-8")
+
+ add_result = _lib.X509_NAME_add_entry_by_NID(
+ self._name, nid, _lib.MBSTRING_UTF8, value, -1, -1, 0
+ )
+ if not add_result:
+ _raise_current_error()
+
+ def __getattr__(self, name):
+ """
+ Find attribute. An X509Name object has the following attributes:
+ countryName (alias C), stateOrProvince (alias ST), locality (alias L),
+ organization (alias O), organizationalUnit (alias OU), commonName
+ (alias CN) and more...
+ """
+ nid = _lib.OBJ_txt2nid(_byte_string(name))
+ if nid == _lib.NID_undef:
+ # This is a bit weird. OBJ_txt2nid indicated failure, but it seems
+ # a lower level function, a2d_ASN1_OBJECT, also feels the need to
+ # push something onto the error queue. If we don't clean that up
+ # now, someone else will bump into it later and be quite confused.
+ # See lp#314814.
+ try:
+ _raise_current_error()
+ except Error:
+ pass
+ return super(X509Name, self).__getattr__(name)
+
+ entry_index = _lib.X509_NAME_get_index_by_NID(self._name, nid, -1)
+ if entry_index == -1:
+ return None
+
+ entry = _lib.X509_NAME_get_entry(self._name, entry_index)
+ data = _lib.X509_NAME_ENTRY_get_data(entry)
+
+ result_buffer = _ffi.new("unsigned char**")
+ data_length = _lib.ASN1_STRING_to_UTF8(result_buffer, data)
+ _openssl_assert(data_length >= 0)
+
+ try:
+ result = _ffi.buffer(result_buffer[0], data_length)[:].decode(
+ "utf-8"
+ )
+ finally:
+ # XXX untested
+ _lib.OPENSSL_free(result_buffer[0])
+ return result
+
+ def _cmp(op):
+ def f(self, other):
+ if not isinstance(other, X509Name):
+ return NotImplemented
+ result = _lib.X509_NAME_cmp(self._name, other._name)
+ return op(result, 0)
+
+ return f
+
+ __eq__ = _cmp(__eq__)
+ __ne__ = _cmp(__ne__)
+
+ __lt__ = _cmp(__lt__)
+ __le__ = _cmp(__le__)
+
+ __gt__ = _cmp(__gt__)
+ __ge__ = _cmp(__ge__)
+
+ def __repr__(self):
+ """
+ String representation of an X509Name
+ """
+ result_buffer = _ffi.new("char[]", 512)
+ format_result = _lib.X509_NAME_oneline(
+ self._name, result_buffer, len(result_buffer)
+ )
+ _openssl_assert(format_result != _ffi.NULL)
+
+ return "<X509Name object '%s'>" % (
+ _native(_ffi.string(result_buffer)),
+ )
+
+ def hash(self):
+ """
+ Return an integer representation of the first four bytes of the
+ MD5 digest of the DER representation of the name.
+
+ This is the Python equivalent of OpenSSL's ``X509_NAME_hash``.
+
+ :return: The (integer) hash of this name.
+ :rtype: :py:class:`int`
+ """
+ return _lib.X509_NAME_hash(self._name)
+
+ def der(self):
+ """
+ Return the DER encoding of this name.
+
+ :return: The DER encoded form of this name.
+ :rtype: :py:class:`bytes`
+ """
+ result_buffer = _ffi.new("unsigned char**")
+ encode_result = _lib.i2d_X509_NAME(self._name, result_buffer)
+ _openssl_assert(encode_result >= 0)
+
+ string_result = _ffi.buffer(result_buffer[0], encode_result)[:]
+ _lib.OPENSSL_free(result_buffer[0])
+ return string_result
+
+ def get_components(self):
+ """
+ Returns the components of this name, as a sequence of 2-tuples.
+
+ :return: The components of this name.
+ :rtype: :py:class:`list` of ``name, value`` tuples.
+ """
+ result = []
+ for i in range(_lib.X509_NAME_entry_count(self._name)):
+ ent = _lib.X509_NAME_get_entry(self._name, i)
+
+ fname = _lib.X509_NAME_ENTRY_get_object(ent)
+ fval = _lib.X509_NAME_ENTRY_get_data(ent)
+
+ nid = _lib.OBJ_obj2nid(fname)
+ name = _lib.OBJ_nid2sn(nid)
+
+ # ffi.string does not handle strings containing NULL bytes
+ # (which may have been generated by old, broken software)
+ value = _ffi.buffer(
+ _lib.ASN1_STRING_data(fval), _lib.ASN1_STRING_length(fval)
+ )[:]
+ result.append((_ffi.string(name), value))
+
+ return result
+
+
+class X509Extension(object):
+ """
+ An X.509 v3 certificate extension.
+ """
+
+ def __init__(self, type_name, critical, value, subject=None, issuer=None):
+ """
+ Initializes an X509 extension.
+
+ :param type_name: The name of the type of extension_ to create.
+ :type type_name: :py:data:`bytes`
+
+ :param bool critical: A flag indicating whether this is a critical
+ extension.
+
+ :param value: The value of the extension.
+ :type value: :py:data:`bytes`
+
+ :param subject: Optional X509 certificate to use as subject.
+ :type subject: :py:class:`X509`
+
+ :param issuer: Optional X509 certificate to use as issuer.
+ :type issuer: :py:class:`X509`
+
+ .. _extension: https://www.openssl.org/docs/manmaster/man5/
+ x509v3_config.html#STANDARD-EXTENSIONS
+ """
+ ctx = _ffi.new("X509V3_CTX*")
+
+ # A context is necessary for any extension which uses the r2i
+ # conversion method. That is, X509V3_EXT_nconf may segfault if passed
+ # a NULL ctx. Start off by initializing most of the fields to NULL.
+ _lib.X509V3_set_ctx(ctx, _ffi.NULL, _ffi.NULL, _ffi.NULL, _ffi.NULL, 0)
+
+ # We have no configuration database - but perhaps we should (some
+ # extensions may require it).
+ _lib.X509V3_set_ctx_nodb(ctx)
+
+ # Initialize the subject and issuer, if appropriate. ctx is a local,
+ # and as far as I can tell none of the X509V3_* APIs invoked here steal
+ # any references, so no need to mess with reference counts or
+ # duplicates.
+ if issuer is not None:
+ if not isinstance(issuer, X509):
+ raise TypeError("issuer must be an X509 instance")
+ ctx.issuer_cert = issuer._x509
+ if subject is not None:
+ if not isinstance(subject, X509):
+ raise TypeError("subject must be an X509 instance")
+ ctx.subject_cert = subject._x509
+
+ if critical:
+ # There are other OpenSSL APIs which would let us pass in critical
+ # separately, but they're harder to use, and since value is already
+ # a pile of crappy junk smuggling a ton of utterly important
+ # structured data, what's the point of trying to avoid nasty stuff
+ # with strings? (However, X509V3_EXT_i2d in particular seems like
+ # it would be a better API to invoke. I do not know where to get
+ # the ext_struc it desires for its last parameter, though.)
+ value = b"critical," + value
+
+ extension = _lib.X509V3_EXT_nconf(_ffi.NULL, ctx, type_name, value)
+ if extension == _ffi.NULL:
+ _raise_current_error()
+ self._extension = _ffi.gc(extension, _lib.X509_EXTENSION_free)
+
+ @property
+ def _nid(self):
+ return _lib.OBJ_obj2nid(
+ _lib.X509_EXTENSION_get_object(self._extension)
+ )
+
+ _prefixes = {
+ _lib.GEN_EMAIL: "email",
+ _lib.GEN_DNS: "DNS",
+ _lib.GEN_URI: "URI",
+ }
+
+ def _subjectAltNameString(self):
+ names = _ffi.cast(
+ "GENERAL_NAMES*", _lib.X509V3_EXT_d2i(self._extension)
+ )
+
+ names = _ffi.gc(names, _lib.GENERAL_NAMES_free)
+ parts = []
+ for i in range(_lib.sk_GENERAL_NAME_num(names)):
+ name = _lib.sk_GENERAL_NAME_value(names, i)
+ try:
+ label = self._prefixes[name.type]
+ except KeyError:
+ bio = _new_mem_buf()
+ _lib.GENERAL_NAME_print(bio, name)
+ parts.append(_native(_bio_to_string(bio)))
+ else:
+ value = _native(
+ _ffi.buffer(name.d.ia5.data, name.d.ia5.length)[:]
+ )
+ parts.append(label + ":" + value)
+ return ", ".join(parts)
+
+ def __str__(self):
+ """
+ :return: a nice text representation of the extension
+ """
+ if _lib.NID_subject_alt_name == self._nid:
+ return self._subjectAltNameString()
+
+ bio = _new_mem_buf()
+ print_result = _lib.X509V3_EXT_print(bio, self._extension, 0, 0)
+ _openssl_assert(print_result != 0)
+
+ return _native(_bio_to_string(bio))
+
+ def get_critical(self):
+ """
+ Returns the critical field of this X.509 extension.
+
+ :return: The critical field.
+ """
+ return _lib.X509_EXTENSION_get_critical(self._extension)
+
+ def get_short_name(self):
+ """
+ Returns the short type name of this X.509 extension.
+
+ The result is a byte string such as :py:const:`b"basicConstraints"`.
+
+ :return: The short type name.
+ :rtype: :py:data:`bytes`
+
+ .. versionadded:: 0.12
+ """
+ obj = _lib.X509_EXTENSION_get_object(self._extension)
+ nid = _lib.OBJ_obj2nid(obj)
+ return _ffi.string(_lib.OBJ_nid2sn(nid))
+
+ def get_data(self):
+ """
+ Returns the data of the X509 extension, encoded as ASN.1.
+
+ :return: The ASN.1 encoded data of this X509 extension.
+ :rtype: :py:data:`bytes`
+
+ .. versionadded:: 0.12
+ """
+ octet_result = _lib.X509_EXTENSION_get_data(self._extension)
+ string_result = _ffi.cast("ASN1_STRING*", octet_result)
+ char_result = _lib.ASN1_STRING_data(string_result)
+ result_length = _lib.ASN1_STRING_length(string_result)
+ return _ffi.buffer(char_result, result_length)[:]
+
+
+class X509Req(object):
+ """
+ An X.509 certificate signing requests.
+ """
+
+ def __init__(self):
+ req = _lib.X509_REQ_new()
+ self._req = _ffi.gc(req, _lib.X509_REQ_free)
+ # Default to version 0.
+ self.set_version(0)
+
+ def to_cryptography(self):
+ """
+ Export as a ``cryptography`` certificate signing request.
+
+ :rtype: ``cryptography.x509.CertificateSigningRequest``
+
+ .. versionadded:: 17.1.0
+ """
+ from cryptography.x509 import load_der_x509_csr
+
+ der = dump_certificate_request(FILETYPE_ASN1, self)
+
+ backend = _get_backend()
+ return load_der_x509_csr(der, backend)
+
+ @classmethod
+ def from_cryptography(cls, crypto_req):
+ """
+ Construct based on a ``cryptography`` *crypto_req*.
+
+ :param crypto_req: A ``cryptography`` X.509 certificate signing request
+ :type crypto_req: ``cryptography.x509.CertificateSigningRequest``
+
+ :rtype: X509Req
+
+ .. versionadded:: 17.1.0
+ """
+ if not isinstance(crypto_req, x509.CertificateSigningRequest):
+ raise TypeError("Must be a certificate signing request")
+
+ from cryptography.hazmat.primitives.serialization import Encoding
+
+ der = crypto_req.public_bytes(Encoding.DER)
+ return load_certificate_request(FILETYPE_ASN1, der)
+
+ def set_pubkey(self, pkey):
+ """
+ Set the public key of the certificate signing request.
+
+ :param pkey: The public key to use.
+ :type pkey: :py:class:`PKey`
+
+ :return: ``None``
+ """
+ set_result = _lib.X509_REQ_set_pubkey(self._req, pkey._pkey)
+ _openssl_assert(set_result == 1)
+
+ def get_pubkey(self):
+ """
+ Get the public key of the certificate signing request.
+
+ :return: The public key.
+ :rtype: :py:class:`PKey`
+ """
+ pkey = PKey.__new__(PKey)
+ pkey._pkey = _lib.X509_REQ_get_pubkey(self._req)
+ _openssl_assert(pkey._pkey != _ffi.NULL)
+ pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free)
+ pkey._only_public = True
+ return pkey
+
+ def set_version(self, version):
+ """
+ Set the version subfield (RFC 2459, section 4.1.2.1) of the certificate
+ request.
+
+ :param int version: The version number.
+ :return: ``None``
+ """
+ set_result = _lib.X509_REQ_set_version(self._req, version)
+ _openssl_assert(set_result == 1)
+
+ def get_version(self):
+ """
+ Get the version subfield (RFC 2459, section 4.1.2.1) of the certificate
+ request.
+
+ :return: The value of the version subfield.
+ :rtype: :py:class:`int`
+ """
+ return _lib.X509_REQ_get_version(self._req)
+
+ def get_subject(self):
+ """
+ Return the subject of this certificate signing request.
+
+ This creates a new :class:`X509Name` that wraps the underlying subject
+ name field on the certificate signing request. Modifying it will modify
+ the underlying signing request, and will have the effect of modifying
+ any other :class:`X509Name` that refers to this subject.
+
+ :return: The subject of this certificate signing request.
+ :rtype: :class:`X509Name`
+ """
+ name = X509Name.__new__(X509Name)
+ name._name = _lib.X509_REQ_get_subject_name(self._req)
+ _openssl_assert(name._name != _ffi.NULL)
+
+ # The name is owned by the X509Req structure. As long as the X509Name
+ # Python object is alive, keep the X509Req Python object alive.
+ name._owner = self
+
+ return name
+
+ def add_extensions(self, extensions):
+ """
+ Add extensions to the certificate signing request.
+
+ :param extensions: The X.509 extensions to add.
+ :type extensions: iterable of :py:class:`X509Extension`
+ :return: ``None``
+ """
+ stack = _lib.sk_X509_EXTENSION_new_null()
+ _openssl_assert(stack != _ffi.NULL)
+
+ stack = _ffi.gc(stack, _lib.sk_X509_EXTENSION_free)
+
+ for ext in extensions:
+ if not isinstance(ext, X509Extension):
+ raise ValueError("One of the elements is not an X509Extension")
+
+ # TODO push can fail (here and elsewhere)
+ _lib.sk_X509_EXTENSION_push(stack, ext._extension)
+
+ add_result = _lib.X509_REQ_add_extensions(self._req, stack)
+ _openssl_assert(add_result == 1)
+
+ def get_extensions(self):
+ """
+ Get X.509 extensions in the certificate signing request.
+
+ :return: The X.509 extensions in this request.
+ :rtype: :py:class:`list` of :py:class:`X509Extension` objects.
+
+ .. versionadded:: 0.15
+ """
+ exts = []
+ native_exts_obj = _lib.X509_REQ_get_extensions(self._req)
+ native_exts_obj = _ffi.gc(
+ native_exts_obj,
+ lambda x: _lib.sk_X509_EXTENSION_pop_free(
+ x,
+ _ffi.addressof(_lib._original_lib, "X509_EXTENSION_free"),
+ ),
+ )
+
+ for i in range(_lib.sk_X509_EXTENSION_num(native_exts_obj)):
+ ext = X509Extension.__new__(X509Extension)
+ extension = _lib.X509_EXTENSION_dup(
+ _lib.sk_X509_EXTENSION_value(native_exts_obj, i)
+ )
+ ext._extension = _ffi.gc(extension, _lib.X509_EXTENSION_free)
+ exts.append(ext)
+ return exts
+
+ def sign(self, pkey, digest):
+ """
+ Sign the certificate signing request with this key and digest type.
+
+ :param pkey: The key pair to sign with.
+ :type pkey: :py:class:`PKey`
+ :param digest: The name of the message digest to use for the signature,
+ e.g. :py:data:`b"sha256"`.
+ :type digest: :py:class:`bytes`
+ :return: ``None``
+ """
+ if pkey._only_public:
+ raise ValueError("Key has only public part")
+
+ if not pkey._initialized:
+ raise ValueError("Key is uninitialized")
+
+ digest_obj = _lib.EVP_get_digestbyname(_byte_string(digest))
+ if digest_obj == _ffi.NULL:
+ raise ValueError("No such digest method")
+
+ sign_result = _lib.X509_REQ_sign(self._req, pkey._pkey, digest_obj)
+ _openssl_assert(sign_result > 0)
+
+ def verify(self, pkey):
+ """
+ Verifies the signature on this certificate signing request.
+
+ :param PKey key: A public key.
+
+ :return: ``True`` if the signature is correct.
+ :rtype: bool
+
+ :raises OpenSSL.crypto.Error: If the signature is invalid or there is a
+ problem verifying the signature.
+ """
+ if not isinstance(pkey, PKey):
+ raise TypeError("pkey must be a PKey instance")
+
+ result = _lib.X509_REQ_verify(self._req, pkey._pkey)
+ if result <= 0:
+ _raise_current_error()
+
+ return result
+
+
+class X509(object):
+ """
+ An X.509 certificate.
+ """
+
+ def __init__(self):
+ x509 = _lib.X509_new()
+ _openssl_assert(x509 != _ffi.NULL)
+ self._x509 = _ffi.gc(x509, _lib.X509_free)
+
+ self._issuer_invalidator = _X509NameInvalidator()
+ self._subject_invalidator = _X509NameInvalidator()
+
+ @classmethod
+ def _from_raw_x509_ptr(cls, x509):
+ cert = cls.__new__(cls)
+ cert._x509 = _ffi.gc(x509, _lib.X509_free)
+ cert._issuer_invalidator = _X509NameInvalidator()
+ cert._subject_invalidator = _X509NameInvalidator()
+ return cert
+
+ def to_cryptography(self):
+ """
+ Export as a ``cryptography`` certificate.
+
+ :rtype: ``cryptography.x509.Certificate``
+
+ .. versionadded:: 17.1.0
+ """
+ from cryptography.x509 import load_der_x509_certificate
+
+ der = dump_certificate(FILETYPE_ASN1, self)
+ backend = _get_backend()
+ return load_der_x509_certificate(der, backend)
+
+ @classmethod
+ def from_cryptography(cls, crypto_cert):
+ """
+ Construct based on a ``cryptography`` *crypto_cert*.
+
+ :param crypto_key: A ``cryptography`` X.509 certificate.
+ :type crypto_key: ``cryptography.x509.Certificate``
+
+ :rtype: X509
+
+ .. versionadded:: 17.1.0
+ """
+ if not isinstance(crypto_cert, x509.Certificate):
+ raise TypeError("Must be a certificate")
+
+ from cryptography.hazmat.primitives.serialization import Encoding
+
+ der = crypto_cert.public_bytes(Encoding.DER)
+ return load_certificate(FILETYPE_ASN1, der)
+
+ def set_version(self, version):
+ """
+ Set the version number of the certificate. Note that the
+ version value is zero-based, eg. a value of 0 is V1.
+
+ :param version: The version number of the certificate.
+ :type version: :py:class:`int`
+
+ :return: ``None``
+ """
+ if not isinstance(version, int):
+ raise TypeError("version must be an integer")
+
+ _lib.X509_set_version(self._x509, version)
+
+ def get_version(self):
+ """
+ Return the version number of the certificate.
+
+ :return: The version number of the certificate.
+ :rtype: :py:class:`int`
+ """
+ return _lib.X509_get_version(self._x509)
+
+ def get_pubkey(self):
+ """
+ Get the public key of the certificate.
+
+ :return: The public key.
+ :rtype: :py:class:`PKey`
+ """
+ pkey = PKey.__new__(PKey)
+ pkey._pkey = _lib.X509_get_pubkey(self._x509)
+ if pkey._pkey == _ffi.NULL:
+ _raise_current_error()
+ pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free)
+ pkey._only_public = True
+ return pkey
+
+ def set_pubkey(self, pkey):
+ """
+ Set the public key of the certificate.
+
+ :param pkey: The public key.
+ :type pkey: :py:class:`PKey`
+
+ :return: :py:data:`None`
+ """
+ if not isinstance(pkey, PKey):
+ raise TypeError("pkey must be a PKey instance")
+
+ set_result = _lib.X509_set_pubkey(self._x509, pkey._pkey)
+ _openssl_assert(set_result == 1)
+
+ def sign(self, pkey, digest):
+ """
+ Sign the certificate with this key and digest type.
+
+ :param pkey: The key to sign with.
+ :type pkey: :py:class:`PKey`
+
+ :param digest: The name of the message digest to use.
+ :type digest: :py:class:`bytes`
+
+ :return: :py:data:`None`
+ """
+ if not isinstance(pkey, PKey):
+ raise TypeError("pkey must be a PKey instance")
+
+ if pkey._only_public:
+ raise ValueError("Key only has public part")
+
+ if not pkey._initialized:
+ raise ValueError("Key is uninitialized")
+
+ evp_md = _lib.EVP_get_digestbyname(_byte_string(digest))
+ if evp_md == _ffi.NULL:
+ raise ValueError("No such digest method")
+
+ sign_result = _lib.X509_sign(self._x509, pkey._pkey, evp_md)
+ _openssl_assert(sign_result > 0)
+
+ def get_signature_algorithm(self):
+ """
+ Return the signature algorithm used in the certificate.
+
+ :return: The name of the algorithm.
+ :rtype: :py:class:`bytes`
+
+ :raises ValueError: If the signature algorithm is undefined.
+
+ .. versionadded:: 0.13
+ """
+ algor = _lib.X509_get0_tbs_sigalg(self._x509)
+ nid = _lib.OBJ_obj2nid(algor.algorithm)
+ if nid == _lib.NID_undef:
+ raise ValueError("Undefined signature algorithm")
+ return _ffi.string(_lib.OBJ_nid2ln(nid))
+
+ def digest(self, digest_name):
+ """
+ Return the digest of the X509 object.
+
+ :param digest_name: The name of the digest algorithm to use.
+ :type digest_name: :py:class:`bytes`
+
+ :return: The digest of the object, formatted as
+ :py:const:`b":"`-delimited hex pairs.
+ :rtype: :py:class:`bytes`
+ """
+ digest = _lib.EVP_get_digestbyname(_byte_string(digest_name))
+ if digest == _ffi.NULL:
+ raise ValueError("No such digest method")
+
+ result_buffer = _ffi.new("unsigned char[]", _lib.EVP_MAX_MD_SIZE)
+ result_length = _ffi.new("unsigned int[]", 1)
+ result_length[0] = len(result_buffer)
+
+ digest_result = _lib.X509_digest(
+ self._x509, digest, result_buffer, result_length
+ )
+ _openssl_assert(digest_result == 1)
+
+ return b":".join(
+ [
+ b16encode(ch).upper()
+ for ch in _ffi.buffer(result_buffer, result_length[0])
+ ]
+ )
+
+ def subject_name_hash(self):
+ """
+ Return the hash of the X509 subject.
+
+ :return: The hash of the subject.
+ :rtype: :py:class:`bytes`
+ """
+ return _lib.X509_subject_name_hash(self._x509)
+
+ def set_serial_number(self, serial):
+ """
+ Set the serial number of the certificate.
+
+ :param serial: The new serial number.
+ :type serial: :py:class:`int`
+
+ :return: :py:data`None`
+ """
+ if not isinstance(serial, _integer_types):
+ raise TypeError("serial must be an integer")
+
+ hex_serial = hex(serial)[2:]
+ if not isinstance(hex_serial, bytes):
+ hex_serial = hex_serial.encode("ascii")
+
+ bignum_serial = _ffi.new("BIGNUM**")
+
+ # BN_hex2bn stores the result in &bignum. Unless it doesn't feel like
+ # it. If bignum is still NULL after this call, then the return value
+ # is actually the result. I hope. -exarkun
+ small_serial = _lib.BN_hex2bn(bignum_serial, hex_serial)
+
+ if bignum_serial[0] == _ffi.NULL:
+ set_result = _lib.ASN1_INTEGER_set(
+ _lib.X509_get_serialNumber(self._x509), small_serial
+ )
+ if set_result:
+ # TODO Not tested
+ _raise_current_error()
+ else:
+ asn1_serial = _lib.BN_to_ASN1_INTEGER(bignum_serial[0], _ffi.NULL)
+ _lib.BN_free(bignum_serial[0])
+ if asn1_serial == _ffi.NULL:
+ # TODO Not tested
+ _raise_current_error()
+ asn1_serial = _ffi.gc(asn1_serial, _lib.ASN1_INTEGER_free)
+ set_result = _lib.X509_set_serialNumber(self._x509, asn1_serial)
+ _openssl_assert(set_result == 1)
+
+ def get_serial_number(self):
+ """
+ Return the serial number of this certificate.
+
+ :return: The serial number.
+ :rtype: int
+ """
+ asn1_serial = _lib.X509_get_serialNumber(self._x509)
+ bignum_serial = _lib.ASN1_INTEGER_to_BN(asn1_serial, _ffi.NULL)
+ try:
+ hex_serial = _lib.BN_bn2hex(bignum_serial)
+ try:
+ hexstring_serial = _ffi.string(hex_serial)
+ serial = int(hexstring_serial, 16)
+ return serial
+ finally:
+ _lib.OPENSSL_free(hex_serial)
+ finally:
+ _lib.BN_free(bignum_serial)
+
+ def gmtime_adj_notAfter(self, amount):
+ """
+ Adjust the time stamp on which the certificate stops being valid.
+
+ :param int amount: The number of seconds by which to adjust the
+ timestamp.
+ :return: ``None``
+ """
+ if not isinstance(amount, int):
+ raise TypeError("amount must be an integer")
+
+ notAfter = _lib.X509_getm_notAfter(self._x509)
+ _lib.X509_gmtime_adj(notAfter, amount)
+
+ def gmtime_adj_notBefore(self, amount):
+ """
+ Adjust the timestamp on which the certificate starts being valid.
+
+ :param amount: The number of seconds by which to adjust the timestamp.
+ :return: ``None``
+ """
+ if not isinstance(amount, int):
+ raise TypeError("amount must be an integer")
+
+ notBefore = _lib.X509_getm_notBefore(self._x509)
+ _lib.X509_gmtime_adj(notBefore, amount)
+
+ def has_expired(self):
+ """
+ Check whether the certificate has expired.
+
+ :return: ``True`` if the certificate has expired, ``False`` otherwise.
+ :rtype: bool
+ """
+ time_string = _native(self.get_notAfter())
+ not_after = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
+
+ return not_after < datetime.datetime.utcnow()
+
+ def _get_boundary_time(self, which):
+ return _get_asn1_time(which(self._x509))
+
+ def get_notBefore(self):
+ """
+ Get the timestamp at which the certificate starts being valid.
+
+ The timestamp is formatted as an ASN.1 TIME::
+
+ YYYYMMDDhhmmssZ
+
+ :return: A timestamp string, or ``None`` if there is none.
+ :rtype: bytes or NoneType
+ """
+ return self._get_boundary_time(_lib.X509_getm_notBefore)
+
+ def _set_boundary_time(self, which, when):
+ return _set_asn1_time(which(self._x509), when)
+
+ def set_notBefore(self, when):
+ """
+ Set the timestamp at which the certificate starts being valid.
+
+ The timestamp is formatted as an ASN.1 TIME::
+
+ YYYYMMDDhhmmssZ
+
+ :param bytes when: A timestamp string.
+ :return: ``None``
+ """
+ return self._set_boundary_time(_lib.X509_getm_notBefore, when)
+
+ def get_notAfter(self):
+ """
+ Get the timestamp at which the certificate stops being valid.
+
+ The timestamp is formatted as an ASN.1 TIME::
+
+ YYYYMMDDhhmmssZ
+
+ :return: A timestamp string, or ``None`` if there is none.
+ :rtype: bytes or NoneType
+ """
+ return self._get_boundary_time(_lib.X509_getm_notAfter)
+
+ def set_notAfter(self, when):
+ """
+ Set the timestamp at which the certificate stops being valid.
+
+ The timestamp is formatted as an ASN.1 TIME::
+
+ YYYYMMDDhhmmssZ
+
+ :param bytes when: A timestamp string.
+ :return: ``None``
+ """
+ return self._set_boundary_time(_lib.X509_getm_notAfter, when)
+
+ def _get_name(self, which):
+ name = X509Name.__new__(X509Name)
+ name._name = which(self._x509)
+ _openssl_assert(name._name != _ffi.NULL)
+
+ # The name is owned by the X509 structure. As long as the X509Name
+ # Python object is alive, keep the X509 Python object alive.
+ name._owner = self
+
+ return name
+
+ def _set_name(self, which, name):
+ if not isinstance(name, X509Name):
+ raise TypeError("name must be an X509Name")
+ set_result = which(self._x509, name._name)
+ _openssl_assert(set_result == 1)
+
+ def get_issuer(self):
+ """
+ Return the issuer of this certificate.
+
+ This creates a new :class:`X509Name` that wraps the underlying issuer
+ name field on the certificate. Modifying it will modify the underlying
+ certificate, and will have the effect of modifying any other
+ :class:`X509Name` that refers to this issuer.
+
+ :return: The issuer of this certificate.
+ :rtype: :class:`X509Name`
+ """
+ name = self._get_name(_lib.X509_get_issuer_name)
+ self._issuer_invalidator.add(name)
+ return name
+
+ def set_issuer(self, issuer):
+ """
+ Set the issuer of this certificate.
+
+ :param issuer: The issuer.
+ :type issuer: :py:class:`X509Name`
+
+ :return: ``None``
+ """
+ self._set_name(_lib.X509_set_issuer_name, issuer)
+ self._issuer_invalidator.clear()
+
+ def get_subject(self):
+ """
+ Return the subject of this certificate.
+
+ This creates a new :class:`X509Name` that wraps the underlying subject
+ name field on the certificate. Modifying it will modify the underlying
+ certificate, and will have the effect of modifying any other
+ :class:`X509Name` that refers to this subject.
+
+ :return: The subject of this certificate.
+ :rtype: :class:`X509Name`
+ """
+ name = self._get_name(_lib.X509_get_subject_name)
+ self._subject_invalidator.add(name)
+ return name
+
+ def set_subject(self, subject):
+ """
+ Set the subject of this certificate.
+
+ :param subject: The subject.
+ :type subject: :py:class:`X509Name`
+
+ :return: ``None``
+ """
+ self._set_name(_lib.X509_set_subject_name, subject)
+ self._subject_invalidator.clear()
+
+ def get_extension_count(self):
+ """
+ Get the number of extensions on this certificate.
+
+ :return: The number of extensions.
+ :rtype: :py:class:`int`
+
+ .. versionadded:: 0.12
+ """
+ return _lib.X509_get_ext_count(self._x509)
+
+ def add_extensions(self, extensions):
+ """
+ Add extensions to the certificate.
+
+ :param extensions: The extensions to add.
+ :type extensions: An iterable of :py:class:`X509Extension` objects.
+ :return: ``None``
+ """
+ for ext in extensions:
+ if not isinstance(ext, X509Extension):
+ raise ValueError("One of the elements is not an X509Extension")
+
+ add_result = _lib.X509_add_ext(self._x509, ext._extension, -1)
+ if not add_result:
+ _raise_current_error()
+
+ def get_extension(self, index):
+ """
+ Get a specific extension of the certificate by index.
+
+ Extensions on a certificate are kept in order. The index
+ parameter selects which extension will be returned.
+
+ :param int index: The index of the extension to retrieve.
+ :return: The extension at the specified index.
+ :rtype: :py:class:`X509Extension`
+ :raises IndexError: If the extension index was out of bounds.
+
+ .. versionadded:: 0.12
+ """
+ ext = X509Extension.__new__(X509Extension)
+ ext._extension = _lib.X509_get_ext(self._x509, index)
+ if ext._extension == _ffi.NULL:
+ raise IndexError("extension index out of bounds")
+
+ extension = _lib.X509_EXTENSION_dup(ext._extension)
+ ext._extension = _ffi.gc(extension, _lib.X509_EXTENSION_free)
+ return ext
+
+
+class X509StoreFlags(object):
+ """
+ Flags for X509 verification, used to change the behavior of
+ :class:`X509Store`.
+
+ See `OpenSSL Verification Flags`_ for details.
+
+ .. _OpenSSL Verification Flags:
+ https://www.openssl.org/docs/manmaster/man3/X509_VERIFY_PARAM_set_flags.html
+ """
+
+ CRL_CHECK = _lib.X509_V_FLAG_CRL_CHECK
+ CRL_CHECK_ALL = _lib.X509_V_FLAG_CRL_CHECK_ALL
+ IGNORE_CRITICAL = _lib.X509_V_FLAG_IGNORE_CRITICAL
+ X509_STRICT = _lib.X509_V_FLAG_X509_STRICT
+ ALLOW_PROXY_CERTS = _lib.X509_V_FLAG_ALLOW_PROXY_CERTS
+ POLICY_CHECK = _lib.X509_V_FLAG_POLICY_CHECK
+ EXPLICIT_POLICY = _lib.X509_V_FLAG_EXPLICIT_POLICY
+ INHIBIT_MAP = _lib.X509_V_FLAG_INHIBIT_MAP
+ NOTIFY_POLICY = _lib.X509_V_FLAG_NOTIFY_POLICY
+ CHECK_SS_SIGNATURE = _lib.X509_V_FLAG_CHECK_SS_SIGNATURE
+
+
+class X509Store(object):
+ """
+ An X.509 store.
+
+ An X.509 store is used to describe a context in which to verify a
+ certificate. A description of a context may include a set of certificates
+ to trust, a set of certificate revocation lists, verification flags and
+ more.
+
+ An X.509 store, being only a description, cannot be used by itself to
+ verify a certificate. To carry out the actual verification process, see
+ :class:`X509StoreContext`.
+ """
+
+ def __init__(self):
+ store = _lib.X509_STORE_new()
+ self._store = _ffi.gc(store, _lib.X509_STORE_free)
+
+ def add_cert(self, cert):
+ """
+ Adds a trusted certificate to this store.
+
+ Adding a certificate with this method adds this certificate as a
+ *trusted* certificate.
+
+ :param X509 cert: The certificate to add to this store.
+
+ :raises TypeError: If the certificate is not an :class:`X509`.
+
+ :raises OpenSSL.crypto.Error: If OpenSSL was unhappy with your
+ certificate.
+
+ :return: ``None`` if the certificate was added successfully.
+ """
+ if not isinstance(cert, X509):
+ raise TypeError()
+
+ res = _lib.X509_STORE_add_cert(self._store, cert._x509)
+ _openssl_assert(res == 1)
+
+ def add_crl(self, crl):
+ """
+ Add a certificate revocation list to this store.
+
+ The certificate revocation lists added to a store will only be used if
+ the associated flags are configured to check certificate revocation
+ lists.
+
+ .. versionadded:: 16.1.0
+
+ :param CRL crl: The certificate revocation list to add to this store.
+ :return: ``None`` if the certificate revocation list was added
+ successfully.
+ """
+ _openssl_assert(_lib.X509_STORE_add_crl(self._store, crl._crl) != 0)
+
+ def set_flags(self, flags):
+ """
+ Set verification flags to this store.
+
+ Verification flags can be combined by oring them together.
+
+ .. note::
+
+ Setting a verification flag sometimes requires clients to add
+ additional information to the store, otherwise a suitable error will
+ be raised.
+
+ For example, in setting flags to enable CRL checking a
+ suitable CRL must be added to the store otherwise an error will be
+ raised.
+
+ .. versionadded:: 16.1.0
+
+ :param int flags: The verification flags to set on this store.
+ See :class:`X509StoreFlags` for available constants.
+ :return: ``None`` if the verification flags were successfully set.
+ """
+ _openssl_assert(_lib.X509_STORE_set_flags(self._store, flags) != 0)
+
+ def set_time(self, vfy_time):
+ """
+ Set the time against which the certificates are verified.
+
+ Normally the current time is used.
+
+ .. note::
+
+ For example, you can determine if a certificate was valid at a given
+ time.
+
+ .. versionadded:: 17.0.0
+
+ :param datetime vfy_time: The verification time to set on this store.
+ :return: ``None`` if the verification time was successfully set.
+ """
+ param = _lib.X509_VERIFY_PARAM_new()
+ param = _ffi.gc(param, _lib.X509_VERIFY_PARAM_free)
+
+ _lib.X509_VERIFY_PARAM_set_time(
+ param, calendar.timegm(vfy_time.timetuple())
+ )
+ _openssl_assert(_lib.X509_STORE_set1_param(self._store, param) != 0)
+
+ def load_locations(self, cafile, capath=None):
+ """
+ Let X509Store know where we can find trusted certificates for the
+ certificate chain. Note that the certificates have to be in PEM
+ format.
+
+ If *capath* is passed, it must be a directory prepared using the
+ ``c_rehash`` tool included with OpenSSL. Either, but not both, of
+ *cafile* or *capath* may be ``None``.
+
+ .. note::
+
+ Both *cafile* and *capath* may be set simultaneously.
+
+ Call this method multiple times to add more than one location.
+ For example, CA certificates, and certificate revocation list bundles
+ may be passed in *cafile* in subsequent calls to this method.
+
+ .. versionadded:: 20.0
+
+ :param cafile: In which file we can find the certificates (``bytes`` or
+ ``unicode``).
+ :param capath: In which directory we can find the certificates
+ (``bytes`` or ``unicode``).
+
+ :return: ``None`` if the locations were set successfully.
+
+ :raises OpenSSL.crypto.Error: If both *cafile* and *capath* is ``None``
+ or the locations could not be set for any reason.
+
+ """
+ if cafile is None:
+ cafile = _ffi.NULL
+ else:
+ cafile = _path_string(cafile)
+
+ if capath is None:
+ capath = _ffi.NULL
+ else:
+ capath = _path_string(capath)
+
+ load_result = _lib.X509_STORE_load_locations(
+ self._store, cafile, capath
+ )
+ if not load_result:
+ _raise_current_error()
+
+
+class X509StoreContextError(Exception):
+ """
+ An exception raised when an error occurred while verifying a certificate
+ using `OpenSSL.X509StoreContext.verify_certificate`.
+
+ :ivar certificate: The certificate which caused verificate failure.
+ :type certificate: :class:`X509`
+ """
+
+ def __init__(self, message, certificate):
+ super(X509StoreContextError, self).__init__(message)
+ self.certificate = certificate
+
+
+class X509StoreContext(object):
+ """
+ An X.509 store context.
+
+ An X.509 store context is used to carry out the actual verification process
+ of a certificate in a described context. For describing such a context, see
+ :class:`X509Store`.
+
+ :ivar _store_ctx: The underlying X509_STORE_CTX structure used by this
+ instance. It is dynamically allocated and automatically garbage
+ collected.
+ :ivar _store: See the ``store`` ``__init__`` parameter.
+ :ivar _cert: See the ``certificate`` ``__init__`` parameter.
+ :ivar _chain: See the ``chain`` ``__init__`` parameter.
+ :param X509Store store: The certificates which will be trusted for the
+ purposes of any verifications.
+ :param X509 certificate: The certificate to be verified.
+ :param chain: List of untrusted certificates that may be used for building
+ the certificate chain. May be ``None``.
+ :type chain: :class:`list` of :class:`X509`
+ """
+
+ def __init__(self, store, certificate, chain=None):
+ store_ctx = _lib.X509_STORE_CTX_new()
+ self._store_ctx = _ffi.gc(store_ctx, _lib.X509_STORE_CTX_free)
+ self._store = store
+ self._cert = certificate
+ self._chain = self._build_certificate_stack(chain)
+ # Make the store context available for use after instantiating this
+ # class by initializing it now. Per testing, subsequent calls to
+ # :meth:`_init` have no adverse affect.
+ self._init()
+
+ @staticmethod
+ def _build_certificate_stack(certificates):
+ def cleanup(s):
+ # Equivalent to sk_X509_pop_free, but we don't
+ # currently have a CFFI binding for that available
+ for i in range(_lib.sk_X509_num(s)):
+ x = _lib.sk_X509_value(s, i)
+ _lib.X509_free(x)
+ _lib.sk_X509_free(s)
+
+ if certificates is None or len(certificates) == 0:
+ return _ffi.NULL
+
+ stack = _lib.sk_X509_new_null()
+ _openssl_assert(stack != _ffi.NULL)
+ stack = _ffi.gc(stack, cleanup)
+
+ for cert in certificates:
+ if not isinstance(cert, X509):
+ raise TypeError("One of the elements is not an X509 instance")
+
+ _openssl_assert(_lib.X509_up_ref(cert._x509) > 0)
+ if _lib.sk_X509_push(stack, cert._x509) <= 0:
+ _lib.X509_free(cert._x509)
+ _raise_current_error()
+
+ return stack
+
+ def _init(self):
+ """
+ Set up the store context for a subsequent verification operation.
+
+ Calling this method more than once without first calling
+ :meth:`_cleanup` will leak memory.
+ """
+ ret = _lib.X509_STORE_CTX_init(
+ self._store_ctx, self._store._store, self._cert._x509, self._chain
+ )
+ if ret <= 0:
+ _raise_current_error()
+
+ def _cleanup(self):
+ """
+ Internally cleans up the store context.
+
+ The store context can then be reused with a new call to :meth:`_init`.
+ """
+ _lib.X509_STORE_CTX_cleanup(self._store_ctx)
+
+ def _exception_from_context(self):
+ """
+ Convert an OpenSSL native context error failure into a Python
+ exception.
+
+ When a call to native OpenSSL X509_verify_cert fails, additional
+ information about the failure can be obtained from the store context.
+ """
+ errors = [
+ _lib.X509_STORE_CTX_get_error(self._store_ctx),
+ _lib.X509_STORE_CTX_get_error_depth(self._store_ctx),
+ _native(
+ _ffi.string(
+ _lib.X509_verify_cert_error_string(
+ _lib.X509_STORE_CTX_get_error(self._store_ctx)
+ )
+ )
+ ),
+ ]
+ # A context error should always be associated with a certificate, so we
+ # expect this call to never return :class:`None`.
+ _x509 = _lib.X509_STORE_CTX_get_current_cert(self._store_ctx)
+ _cert = _lib.X509_dup(_x509)
+ pycert = X509._from_raw_x509_ptr(_cert)
+ return X509StoreContextError(errors, pycert)
+
+ def set_store(self, store):
+ """
+ Set the context's X.509 store.
+
+ .. versionadded:: 0.15
+
+ :param X509Store store: The store description which will be used for
+ the purposes of any *future* verifications.
+ """
+ self._store = store
+
+ def verify_certificate(self):
+ """
+ Verify a certificate in a context.
+
+ .. versionadded:: 0.15
+
+ :raises X509StoreContextError: If an error occurred when validating a
+ certificate in the context. Sets ``certificate`` attribute to
+ indicate which certificate caused the error.
+ """
+ # Always re-initialize the store context in case
+ # :meth:`verify_certificate` is called multiple times.
+ #
+ # :meth:`_init` is called in :meth:`__init__` so _cleanup is called
+ # before _init to ensure memory is not leaked.
+ self._cleanup()
+ self._init()
+ ret = _lib.X509_verify_cert(self._store_ctx)
+ self._cleanup()
+ if ret <= 0:
+ raise self._exception_from_context()
+
+ def get_verified_chain(self):
+ """
+ Verify a certificate in a context and return the complete validated
+ chain.
+
+ :raises X509StoreContextError: If an error occurred when validating a
+ certificate in the context. Sets ``certificate`` attribute to
+ indicate which certificate caused the error.
+
+ .. versionadded:: 20.0
+ """
+ # Always re-initialize the store context in case
+ # :meth:`verify_certificate` is called multiple times.
+ #
+ # :meth:`_init` is called in :meth:`__init__` so _cleanup is called
+ # before _init to ensure memory is not leaked.
+ self._cleanup()
+ self._init()
+ ret = _lib.X509_verify_cert(self._store_ctx)
+ if ret <= 0:
+ self._cleanup()
+ raise self._exception_from_context()
+
+ # Note: X509_STORE_CTX_get1_chain returns a deep copy of the chain.
+ cert_stack = _lib.X509_STORE_CTX_get1_chain(self._store_ctx)
+ _openssl_assert(cert_stack != _ffi.NULL)
+
+ result = []
+ for i in range(_lib.sk_X509_num(cert_stack)):
+ cert = _lib.sk_X509_value(cert_stack, i)
+ _openssl_assert(cert != _ffi.NULL)
+ pycert = X509._from_raw_x509_ptr(cert)
+ result.append(pycert)
+
+ # Free the stack but not the members which are freed by the X509 class.
+ _lib.sk_X509_free(cert_stack)
+ self._cleanup()
+ return result
+
+
+def load_certificate(type, buffer):
+ """
+ Load a certificate (X509) from the string *buffer* encoded with the
+ type *type*.
+
+ :param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1)
+
+ :param bytes buffer: The buffer the certificate is stored in
+
+ :return: The X509 object
+ """
+ if isinstance(buffer, _text_type):
+ buffer = buffer.encode("ascii")
+
+ bio = _new_mem_buf(buffer)
+
+ if type == FILETYPE_PEM:
+ x509 = _lib.PEM_read_bio_X509(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
+ elif type == FILETYPE_ASN1:
+ x509 = _lib.d2i_X509_bio(bio, _ffi.NULL)
+ else:
+ raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1")
+
+ if x509 == _ffi.NULL:
+ _raise_current_error()
+
+ return X509._from_raw_x509_ptr(x509)
+
+
+def dump_certificate(type, cert):
+ """
+ Dump the certificate *cert* into a buffer string encoded with the type
+ *type*.
+
+ :param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1, or
+ FILETYPE_TEXT)
+ :param cert: The certificate to dump
+ :return: The buffer with the dumped certificate in
+ """
+ bio = _new_mem_buf()
+
+ if type == FILETYPE_PEM:
+ result_code = _lib.PEM_write_bio_X509(bio, cert._x509)
+ elif type == FILETYPE_ASN1:
+ result_code = _lib.i2d_X509_bio(bio, cert._x509)
+ elif type == FILETYPE_TEXT:
+ result_code = _lib.X509_print_ex(bio, cert._x509, 0, 0)
+ else:
+ raise ValueError(
+ "type argument must be FILETYPE_PEM, FILETYPE_ASN1, or "
+ "FILETYPE_TEXT"
+ )
+
+ _openssl_assert(result_code == 1)
+ return _bio_to_string(bio)
+
+
+def dump_publickey(type, pkey):
+ """
+ Dump a public key to a buffer.
+
+ :param type: The file type (one of :data:`FILETYPE_PEM` or
+ :data:`FILETYPE_ASN1`).
+ :param PKey pkey: The public key to dump
+ :return: The buffer with the dumped key in it.
+ :rtype: bytes
+ """
+ bio = _new_mem_buf()
+ if type == FILETYPE_PEM:
+ write_bio = _lib.PEM_write_bio_PUBKEY
+ elif type == FILETYPE_ASN1:
+ write_bio = _lib.i2d_PUBKEY_bio
+ else:
+ raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1")
+
+ result_code = write_bio(bio, pkey._pkey)
+ if result_code != 1: # pragma: no cover
+ _raise_current_error()
+
+ return _bio_to_string(bio)
+
+
+def dump_privatekey(type, pkey, cipher=None, passphrase=None):
+ """
+ Dump the private key *pkey* into a buffer string encoded with the type
+ *type*. Optionally (if *type* is :const:`FILETYPE_PEM`) encrypting it
+ using *cipher* and *passphrase*.
+
+ :param type: The file type (one of :const:`FILETYPE_PEM`,
+ :const:`FILETYPE_ASN1`, or :const:`FILETYPE_TEXT`)
+ :param PKey pkey: The PKey to dump
+ :param cipher: (optional) if encrypted PEM format, the cipher to use
+ :param passphrase: (optional) if encrypted PEM format, this can be either
+ the passphrase to use, or a callback for providing the passphrase.
+
+ :return: The buffer with the dumped key in
+ :rtype: bytes
+ """
+ bio = _new_mem_buf()
+
+ if not isinstance(pkey, PKey):
+ raise TypeError("pkey must be a PKey")
+
+ if cipher is not None:
+ if passphrase is None:
+ raise TypeError(
+ "if a value is given for cipher "
+ "one must also be given for passphrase"
+ )
+ cipher_obj = _lib.EVP_get_cipherbyname(_byte_string(cipher))
+ if cipher_obj == _ffi.NULL:
+ raise ValueError("Invalid cipher name")
+ else:
+ cipher_obj = _ffi.NULL
+
+ helper = _PassphraseHelper(type, passphrase)
+ if type == FILETYPE_PEM:
+ result_code = _lib.PEM_write_bio_PrivateKey(
+ bio,
+ pkey._pkey,
+ cipher_obj,
+ _ffi.NULL,
+ 0,
+ helper.callback,
+ helper.callback_args,
+ )
+ helper.raise_if_problem()
+ elif type == FILETYPE_ASN1:
+ result_code = _lib.i2d_PrivateKey_bio(bio, pkey._pkey)
+ elif type == FILETYPE_TEXT:
+ if _lib.EVP_PKEY_id(pkey._pkey) != _lib.EVP_PKEY_RSA:
+ raise TypeError("Only RSA keys are supported for FILETYPE_TEXT")
+
+ rsa = _ffi.gc(_lib.EVP_PKEY_get1_RSA(pkey._pkey), _lib.RSA_free)
+ result_code = _lib.RSA_print(bio, rsa, 0)
+ else:
+ raise ValueError(
+ "type argument must be FILETYPE_PEM, FILETYPE_ASN1, or "
+ "FILETYPE_TEXT"
+ )
+
+ _openssl_assert(result_code != 0)
+
+ return _bio_to_string(bio)
+
+
+class Revoked(object):
+ """
+ A certificate revocation.
+ """
+
+ # https://www.openssl.org/docs/manmaster/man5/x509v3_config.html#CRL-distribution-points
+ # which differs from crl_reasons of crypto/x509v3/v3_enum.c that matches
+ # OCSP_crl_reason_str. We use the latter, just like the command line
+ # program.
+ _crl_reasons = [
+ b"unspecified",
+ b"keyCompromise",
+ b"CACompromise",
+ b"affiliationChanged",
+ b"superseded",
+ b"cessationOfOperation",
+ b"certificateHold",
+ # b"removeFromCRL",
+ ]
+
+ def __init__(self):
+ revoked = _lib.X509_REVOKED_new()
+ self._revoked = _ffi.gc(revoked, _lib.X509_REVOKED_free)
+
+ def set_serial(self, hex_str):
+ """
+ Set the serial number.
+
+ The serial number is formatted as a hexadecimal number encoded in
+ ASCII.
+
+ :param bytes hex_str: The new serial number.
+
+ :return: ``None``
+ """
+ bignum_serial = _ffi.gc(_lib.BN_new(), _lib.BN_free)
+ bignum_ptr = _ffi.new("BIGNUM**")
+ bignum_ptr[0] = bignum_serial
+ bn_result = _lib.BN_hex2bn(bignum_ptr, hex_str)
+ if not bn_result:
+ raise ValueError("bad hex string")
+
+ asn1_serial = _ffi.gc(
+ _lib.BN_to_ASN1_INTEGER(bignum_serial, _ffi.NULL),
+ _lib.ASN1_INTEGER_free,
+ )
+ _lib.X509_REVOKED_set_serialNumber(self._revoked, asn1_serial)
+
+ def get_serial(self):
+ """
+ Get the serial number.
+
+ The serial number is formatted as a hexadecimal number encoded in
+ ASCII.
+
+ :return: The serial number.
+ :rtype: bytes
+ """
+ bio = _new_mem_buf()
+
+ asn1_int = _lib.X509_REVOKED_get0_serialNumber(self._revoked)
+ _openssl_assert(asn1_int != _ffi.NULL)
+ result = _lib.i2a_ASN1_INTEGER(bio, asn1_int)
+ _openssl_assert(result >= 0)
+ return _bio_to_string(bio)
+
+ def _delete_reason(self):
+ for i in range(_lib.X509_REVOKED_get_ext_count(self._revoked)):
+ ext = _lib.X509_REVOKED_get_ext(self._revoked, i)
+ obj = _lib.X509_EXTENSION_get_object(ext)
+ if _lib.OBJ_obj2nid(obj) == _lib.NID_crl_reason:
+ _lib.X509_EXTENSION_free(ext)
+ _lib.X509_REVOKED_delete_ext(self._revoked, i)
+ break
+
+ def set_reason(self, reason):
+ """
+ Set the reason of this revocation.
+
+ If :data:`reason` is ``None``, delete the reason instead.
+
+ :param reason: The reason string.
+ :type reason: :class:`bytes` or :class:`NoneType`
+
+ :return: ``None``
+
+ .. seealso::
+
+ :meth:`all_reasons`, which gives you a list of all supported
+ reasons which you might pass to this method.
+ """
+ if reason is None:
+ self._delete_reason()
+ elif not isinstance(reason, bytes):
+ raise TypeError("reason must be None or a byte string")
+ else:
+ reason = reason.lower().replace(b" ", b"")
+ reason_code = [r.lower() for r in self._crl_reasons].index(reason)
+
+ new_reason_ext = _lib.ASN1_ENUMERATED_new()
+ _openssl_assert(new_reason_ext != _ffi.NULL)
+ new_reason_ext = _ffi.gc(new_reason_ext, _lib.ASN1_ENUMERATED_free)
+
+ set_result = _lib.ASN1_ENUMERATED_set(new_reason_ext, reason_code)
+ _openssl_assert(set_result != _ffi.NULL)
+
+ self._delete_reason()
+ add_result = _lib.X509_REVOKED_add1_ext_i2d(
+ self._revoked, _lib.NID_crl_reason, new_reason_ext, 0, 0
+ )
+ _openssl_assert(add_result == 1)
+
+ def get_reason(self):
+ """
+ Get the reason of this revocation.
+
+ :return: The reason, or ``None`` if there is none.
+ :rtype: bytes or NoneType
+
+ .. seealso::
+
+ :meth:`all_reasons`, which gives you a list of all supported
+ reasons this method might return.
+ """
+ for i in range(_lib.X509_REVOKED_get_ext_count(self._revoked)):
+ ext = _lib.X509_REVOKED_get_ext(self._revoked, i)
+ obj = _lib.X509_EXTENSION_get_object(ext)
+ if _lib.OBJ_obj2nid(obj) == _lib.NID_crl_reason:
+ bio = _new_mem_buf()
+
+ print_result = _lib.X509V3_EXT_print(bio, ext, 0, 0)
+ if not print_result:
+ print_result = _lib.M_ASN1_OCTET_STRING_print(
+ bio, _lib.X509_EXTENSION_get_data(ext)
+ )
+ _openssl_assert(print_result != 0)
+
+ return _bio_to_string(bio)
+
+ def all_reasons(self):
+ """
+ Return a list of all the supported reason strings.
+
+ This list is a copy; modifying it does not change the supported reason
+ strings.
+
+ :return: A list of reason strings.
+ :rtype: :class:`list` of :class:`bytes`
+ """
+ return self._crl_reasons[:]
+
+ def set_rev_date(self, when):
+ """
+ Set the revocation timestamp.
+
+ :param bytes when: The timestamp of the revocation,
+ as ASN.1 TIME.
+ :return: ``None``
+ """
+ dt = _lib.X509_REVOKED_get0_revocationDate(self._revoked)
+ return _set_asn1_time(dt, when)
+
+ def get_rev_date(self):
+ """
+ Get the revocation timestamp.
+
+ :return: The timestamp of the revocation, as ASN.1 TIME.
+ :rtype: bytes
+ """
+ dt = _lib.X509_REVOKED_get0_revocationDate(self._revoked)
+ return _get_asn1_time(dt)
+
+
+class CRL(object):
+ """
+ A certificate revocation list.
+ """
+
+ def __init__(self):
+ crl = _lib.X509_CRL_new()
+ self._crl = _ffi.gc(crl, _lib.X509_CRL_free)
+
+ def to_cryptography(self):
+ """
+ Export as a ``cryptography`` CRL.
+
+ :rtype: ``cryptography.x509.CertificateRevocationList``
+
+ .. versionadded:: 17.1.0
+ """
+ from cryptography.x509 import load_der_x509_crl
+
+ der = dump_crl(FILETYPE_ASN1, self)
+
+ backend = _get_backend()
+ return load_der_x509_crl(der, backend)
+
+ @classmethod
+ def from_cryptography(cls, crypto_crl):
+ """
+ Construct based on a ``cryptography`` *crypto_crl*.
+
+ :param crypto_crl: A ``cryptography`` certificate revocation list
+ :type crypto_crl: ``cryptography.x509.CertificateRevocationList``
+
+ :rtype: CRL
+
+ .. versionadded:: 17.1.0
+ """
+ if not isinstance(crypto_crl, x509.CertificateRevocationList):
+ raise TypeError("Must be a certificate revocation list")
+
+ from cryptography.hazmat.primitives.serialization import Encoding
+
+ der = crypto_crl.public_bytes(Encoding.DER)
+ return load_crl(FILETYPE_ASN1, der)
+
+ def get_revoked(self):
+ """
+ Return the revocations in this certificate revocation list.
+
+ These revocations will be provided by value, not by reference.
+ That means it's okay to mutate them: it won't affect this CRL.
+
+ :return: The revocations in this CRL.
+ :rtype: :class:`tuple` of :class:`Revocation`
+ """
+ results = []
+ revoked_stack = _lib.X509_CRL_get_REVOKED(self._crl)
+ for i in range(_lib.sk_X509_REVOKED_num(revoked_stack)):
+ revoked = _lib.sk_X509_REVOKED_value(revoked_stack, i)
+ revoked_copy = _lib.Cryptography_X509_REVOKED_dup(revoked)
+ pyrev = Revoked.__new__(Revoked)
+ pyrev._revoked = _ffi.gc(revoked_copy, _lib.X509_REVOKED_free)
+ results.append(pyrev)
+ if results:
+ return tuple(results)
+
+ def add_revoked(self, revoked):
+ """
+ Add a revoked (by value not reference) to the CRL structure
+
+ This revocation will be added by value, not by reference. That
+ means it's okay to mutate it after adding: it won't affect
+ this CRL.
+
+ :param Revoked revoked: The new revocation.
+ :return: ``None``
+ """
+ copy = _lib.Cryptography_X509_REVOKED_dup(revoked._revoked)
+ _openssl_assert(copy != _ffi.NULL)
+
+ add_result = _lib.X509_CRL_add0_revoked(self._crl, copy)
+ _openssl_assert(add_result != 0)
+
+ def get_issuer(self):
+ """
+ Get the CRL's issuer.
+
+ .. versionadded:: 16.1.0
+
+ :rtype: X509Name
+ """
+ _issuer = _lib.X509_NAME_dup(_lib.X509_CRL_get_issuer(self._crl))
+ _openssl_assert(_issuer != _ffi.NULL)
+ _issuer = _ffi.gc(_issuer, _lib.X509_NAME_free)
+ issuer = X509Name.__new__(X509Name)
+ issuer._name = _issuer
+ return issuer
+
+ def set_version(self, version):
+ """
+ Set the CRL version.
+
+ .. versionadded:: 16.1.0
+
+ :param int version: The version of the CRL.
+ :return: ``None``
+ """
+ _openssl_assert(_lib.X509_CRL_set_version(self._crl, version) != 0)
+
+ def _set_boundary_time(self, which, when):
+ return _set_asn1_time(which(self._crl), when)
+
+ def set_lastUpdate(self, when):
+ """
+ Set when the CRL was last updated.
+
+ The timestamp is formatted as an ASN.1 TIME::
+
+ YYYYMMDDhhmmssZ
+
+ .. versionadded:: 16.1.0
+
+ :param bytes when: A timestamp string.
+ :return: ``None``
+ """
+ return self._set_boundary_time(_lib.X509_CRL_get_lastUpdate, when)
+
+ def set_nextUpdate(self, when):
+ """
+ Set when the CRL will next be updated.
+
+ The timestamp is formatted as an ASN.1 TIME::
+
+ YYYYMMDDhhmmssZ
+
+ .. versionadded:: 16.1.0
+
+ :param bytes when: A timestamp string.
+ :return: ``None``
+ """
+ return self._set_boundary_time(_lib.X509_CRL_get_nextUpdate, when)
+
+ def sign(self, issuer_cert, issuer_key, digest):
+ """
+ Sign the CRL.
+
+ Signing a CRL enables clients to associate the CRL itself with an
+ issuer. Before a CRL is meaningful to other OpenSSL functions, it must
+ be signed by an issuer.
+
+ This method implicitly sets the issuer's name based on the issuer
+ certificate and private key used to sign the CRL.
+
+ .. versionadded:: 16.1.0
+
+ :param X509 issuer_cert: The issuer's certificate.
+ :param PKey issuer_key: The issuer's private key.
+ :param bytes digest: The digest method to sign the CRL with.
+ """
+ digest_obj = _lib.EVP_get_digestbyname(digest)
+ _openssl_assert(digest_obj != _ffi.NULL)
+ _lib.X509_CRL_set_issuer_name(
+ self._crl, _lib.X509_get_subject_name(issuer_cert._x509)
+ )
+ _lib.X509_CRL_sort(self._crl)
+ result = _lib.X509_CRL_sign(self._crl, issuer_key._pkey, digest_obj)
+ _openssl_assert(result != 0)
+
+ def export(
+ self, cert, key, type=FILETYPE_PEM, days=100, digest=_UNSPECIFIED
+ ):
+ """
+ Export the CRL as a string.
+
+ :param X509 cert: The certificate used to sign the CRL.
+ :param PKey key: The key used to sign the CRL.
+ :param int type: The export format, either :data:`FILETYPE_PEM`,
+ :data:`FILETYPE_ASN1`, or :data:`FILETYPE_TEXT`.
+ :param int days: The number of days until the next update of this CRL.
+ :param bytes digest: The name of the message digest to use (eg
+ ``b"sha256"``).
+ :rtype: bytes
+ """
+
+ if not isinstance(cert, X509):
+ raise TypeError("cert must be an X509 instance")
+ if not isinstance(key, PKey):
+ raise TypeError("key must be a PKey instance")
+ if not isinstance(type, int):
+ raise TypeError("type must be an integer")
+
+ if digest is _UNSPECIFIED:
+ raise TypeError("digest must be provided")
+
+ digest_obj = _lib.EVP_get_digestbyname(digest)
+ if digest_obj == _ffi.NULL:
+ raise ValueError("No such digest method")
+
+ bio = _lib.BIO_new(_lib.BIO_s_mem())
+ _openssl_assert(bio != _ffi.NULL)
+
+ # A scratch time object to give different values to different CRL
+ # fields
+ sometime = _lib.ASN1_TIME_new()
+ _openssl_assert(sometime != _ffi.NULL)
+
+ _lib.X509_gmtime_adj(sometime, 0)
+ _lib.X509_CRL_set_lastUpdate(self._crl, sometime)
+
+ _lib.X509_gmtime_adj(sometime, days * 24 * 60 * 60)
+ _lib.X509_CRL_set_nextUpdate(self._crl, sometime)
+
+ _lib.X509_CRL_set_issuer_name(
+ self._crl, _lib.X509_get_subject_name(cert._x509)
+ )
+
+ sign_result = _lib.X509_CRL_sign(self._crl, key._pkey, digest_obj)
+ if not sign_result:
+ _raise_current_error()
+
+ return dump_crl(type, self)
+
+
+class PKCS7(object):
+ def type_is_signed(self):
+ """
+ Check if this NID_pkcs7_signed object
+
+ :return: True if the PKCS7 is of type signed
+ """
+ return bool(_lib.PKCS7_type_is_signed(self._pkcs7))
+
+ def type_is_enveloped(self):
+ """
+ Check if this NID_pkcs7_enveloped object
+
+ :returns: True if the PKCS7 is of type enveloped
+ """
+ return bool(_lib.PKCS7_type_is_enveloped(self._pkcs7))
+
+ def type_is_signedAndEnveloped(self):
+ """
+ Check if this NID_pkcs7_signedAndEnveloped object
+
+ :returns: True if the PKCS7 is of type signedAndEnveloped
+ """
+ return bool(_lib.PKCS7_type_is_signedAndEnveloped(self._pkcs7))
+
+ def type_is_data(self):
+ """
+ Check if this NID_pkcs7_data object
+
+ :return: True if the PKCS7 is of type data
+ """
+ return bool(_lib.PKCS7_type_is_data(self._pkcs7))
+
+ def get_type_name(self):
+ """
+ Returns the type name of the PKCS7 structure
+
+ :return: A string with the typename
+ """
+ nid = _lib.OBJ_obj2nid(self._pkcs7.type)
+ string_type = _lib.OBJ_nid2sn(nid)
+ return _ffi.string(string_type)
+
+
+class PKCS12(object):
+ """
+ A PKCS #12 archive.
+ """
+
+ def __init__(self):
+ self._pkey = None
+ self._cert = None
+ self._cacerts = None
+ self._friendlyname = None
+
+ def get_certificate(self):
+ """
+ Get the certificate in the PKCS #12 structure.
+
+ :return: The certificate, or :py:const:`None` if there is none.
+ :rtype: :py:class:`X509` or :py:const:`None`
+ """
+ return self._cert
+
+ def set_certificate(self, cert):
+ """
+ Set the certificate in the PKCS #12 structure.
+
+ :param cert: The new certificate, or :py:const:`None` to unset it.
+ :type cert: :py:class:`X509` or :py:const:`None`
+
+ :return: ``None``
+ """
+ if not isinstance(cert, X509):
+ raise TypeError("cert must be an X509 instance")
+ self._cert = cert
+
+ def get_privatekey(self):
+ """
+ Get the private key in the PKCS #12 structure.
+
+ :return: The private key, or :py:const:`None` if there is none.
+ :rtype: :py:class:`PKey`
+ """
+ return self._pkey
+
+ def set_privatekey(self, pkey):
+ """
+ Set the certificate portion of the PKCS #12 structure.
+
+ :param pkey: The new private key, or :py:const:`None` to unset it.
+ :type pkey: :py:class:`PKey` or :py:const:`None`
+
+ :return: ``None``
+ """
+ if not isinstance(pkey, PKey):
+ raise TypeError("pkey must be a PKey instance")
+ self._pkey = pkey
+
+ def get_ca_certificates(self):
+ """
+ Get the CA certificates in the PKCS #12 structure.
+
+ :return: A tuple with the CA certificates in the chain, or
+ :py:const:`None` if there are none.
+ :rtype: :py:class:`tuple` of :py:class:`X509` or :py:const:`None`
+ """
+ if self._cacerts is not None:
+ return tuple(self._cacerts)
+
+ def set_ca_certificates(self, cacerts):
+ """
+ Replace or set the CA certificates within the PKCS12 object.
+
+ :param cacerts: The new CA certificates, or :py:const:`None` to unset
+ them.
+ :type cacerts: An iterable of :py:class:`X509` or :py:const:`None`
+
+ :return: ``None``
+ """
+ if cacerts is None:
+ self._cacerts = None
+ else:
+ cacerts = list(cacerts)
+ for cert in cacerts:
+ if not isinstance(cert, X509):
+ raise TypeError(
+ "iterable must only contain X509 instances"
+ )
+ self._cacerts = cacerts
+
+ def set_friendlyname(self, name):
+ """
+ Set the friendly name in the PKCS #12 structure.
+
+ :param name: The new friendly name, or :py:const:`None` to unset.
+ :type name: :py:class:`bytes` or :py:const:`None`
+
+ :return: ``None``
+ """
+ if name is None:
+ self._friendlyname = None
+ elif not isinstance(name, bytes):
+ raise TypeError(
+ "name must be a byte string or None (not %r)" % (name,)
+ )
+ self._friendlyname = name
+
+ def get_friendlyname(self):
+ """
+ Get the friendly name in the PKCS# 12 structure.
+
+ :returns: The friendly name, or :py:const:`None` if there is none.
+ :rtype: :py:class:`bytes` or :py:const:`None`
+ """
+ return self._friendlyname
+
+ def export(self, passphrase=None, iter=2048, maciter=1):
+ """
+ Dump a PKCS12 object as a string.
+
+ For more information, see the :c:func:`PKCS12_create` man page.
+
+ :param passphrase: The passphrase used to encrypt the structure. Unlike
+ some other passphrase arguments, this *must* be a string, not a
+ callback.
+ :type passphrase: :py:data:`bytes`
+
+ :param iter: Number of times to repeat the encryption step.
+ :type iter: :py:data:`int`
+
+ :param maciter: Number of times to repeat the MAC step.
+ :type maciter: :py:data:`int`
+
+ :return: The string representation of the PKCS #12 structure.
+ :rtype:
+ """
+ passphrase = _text_to_bytes_and_warn("passphrase", passphrase)
+
+ if self._cacerts is None:
+ cacerts = _ffi.NULL
+ else:
+ cacerts = _lib.sk_X509_new_null()
+ cacerts = _ffi.gc(cacerts, _lib.sk_X509_free)
+ for cert in self._cacerts:
+ _lib.sk_X509_push(cacerts, cert._x509)
+
+ if passphrase is None:
+ passphrase = _ffi.NULL
+
+ friendlyname = self._friendlyname
+ if friendlyname is None:
+ friendlyname = _ffi.NULL
+
+ if self._pkey is None:
+ pkey = _ffi.NULL
+ else:
+ pkey = self._pkey._pkey
+
+ if self._cert is None:
+ cert = _ffi.NULL
+ else:
+ cert = self._cert._x509
+
+ pkcs12 = _lib.PKCS12_create(
+ passphrase,
+ friendlyname,
+ pkey,
+ cert,
+ cacerts,
+ _lib.NID_pbe_WithSHA1And3_Key_TripleDES_CBC,
+ _lib.NID_pbe_WithSHA1And3_Key_TripleDES_CBC,
+ iter,
+ maciter,
+ 0,
+ )
+ if pkcs12 == _ffi.NULL:
+ _raise_current_error()
+ pkcs12 = _ffi.gc(pkcs12, _lib.PKCS12_free)
+
+ bio = _new_mem_buf()
+ _lib.i2d_PKCS12_bio(bio, pkcs12)
+ return _bio_to_string(bio)
+
+
+class NetscapeSPKI(object):
+ """
+ A Netscape SPKI object.
+ """
+
+ def __init__(self):
+ spki = _lib.NETSCAPE_SPKI_new()
+ self._spki = _ffi.gc(spki, _lib.NETSCAPE_SPKI_free)
+
+ def sign(self, pkey, digest):
+ """
+ Sign the certificate request with this key and digest type.
+
+ :param pkey: The private key to sign with.
+ :type pkey: :py:class:`PKey`
+
+ :param digest: The message digest to use.
+ :type digest: :py:class:`bytes`
+
+ :return: ``None``
+ """
+ if pkey._only_public:
+ raise ValueError("Key has only public part")
+
+ if not pkey._initialized:
+ raise ValueError("Key is uninitialized")
+
+ digest_obj = _lib.EVP_get_digestbyname(_byte_string(digest))
+ if digest_obj == _ffi.NULL:
+ raise ValueError("No such digest method")
+
+ sign_result = _lib.NETSCAPE_SPKI_sign(
+ self._spki, pkey._pkey, digest_obj
+ )
+ _openssl_assert(sign_result > 0)
+
+ def verify(self, key):
+ """
+ Verifies a signature on a certificate request.
+
+ :param PKey key: The public key that signature is supposedly from.
+
+ :return: ``True`` if the signature is correct.
+ :rtype: bool
+
+ :raises OpenSSL.crypto.Error: If the signature is invalid, or there was
+ a problem verifying the signature.
+ """
+ answer = _lib.NETSCAPE_SPKI_verify(self._spki, key._pkey)
+ if answer <= 0:
+ _raise_current_error()
+ return True
+
+ def b64_encode(self):
+ """
+ Generate a base64 encoded representation of this SPKI object.
+
+ :return: The base64 encoded string.
+ :rtype: :py:class:`bytes`
+ """
+ encoded = _lib.NETSCAPE_SPKI_b64_encode(self._spki)
+ result = _ffi.string(encoded)
+ _lib.OPENSSL_free(encoded)
+ return result
+
+ def get_pubkey(self):
+ """
+ Get the public key of this certificate.
+
+ :return: The public key.
+ :rtype: :py:class:`PKey`
+ """
+ pkey = PKey.__new__(PKey)
+ pkey._pkey = _lib.NETSCAPE_SPKI_get_pubkey(self._spki)
+ _openssl_assert(pkey._pkey != _ffi.NULL)
+ pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free)
+ pkey._only_public = True
+ return pkey
+
+ def set_pubkey(self, pkey):
+ """
+ Set the public key of the certificate
+
+ :param pkey: The public key
+ :return: ``None``
+ """
+ set_result = _lib.NETSCAPE_SPKI_set_pubkey(self._spki, pkey._pkey)
+ _openssl_assert(set_result == 1)
+
+
+class _PassphraseHelper(object):
+ def __init__(self, type, passphrase, more_args=False, truncate=False):
+ if type != FILETYPE_PEM and passphrase is not None:
+ raise ValueError(
+ "only FILETYPE_PEM key format supports encryption"
+ )
+ self._passphrase = passphrase
+ self._more_args = more_args
+ self._truncate = truncate
+ self._problems = []
+
+ @property
+ def callback(self):
+ if self._passphrase is None:
+ return _ffi.NULL
+ elif isinstance(self._passphrase, bytes) or callable(self._passphrase):
+ return _ffi.callback("pem_password_cb", self._read_passphrase)
+ else:
+ raise TypeError(
+ "Last argument must be a byte string or a callable."
+ )
+
+ @property
+ def callback_args(self):
+ if self._passphrase is None:
+ return _ffi.NULL
+ elif isinstance(self._passphrase, bytes) or callable(self._passphrase):
+ return _ffi.NULL
+ else:
+ raise TypeError(
+ "Last argument must be a byte string or a callable."
+ )
+
+ def raise_if_problem(self, exceptionType=Error):
+ if self._problems:
+
+ # Flush the OpenSSL error queue
+ try:
+ _exception_from_error_queue(exceptionType)
+ except exceptionType:
+ pass
+
+ raise self._problems.pop(0)
+
+ def _read_passphrase(self, buf, size, rwflag, userdata):
+ try:
+ if callable(self._passphrase):
+ if self._more_args:
+ result = self._passphrase(size, rwflag, userdata)
+ else:
+ result = self._passphrase(rwflag)
+ else:
+ result = self._passphrase
+ if not isinstance(result, bytes):
+ raise ValueError("Bytes expected")
+ if len(result) > size:
+ if self._truncate:
+ result = result[:size]
+ else:
+ raise ValueError(
+ "passphrase returned by callback is too long"
+ )
+ for i in range(len(result)):
+ buf[i] = result[i : i + 1]
+ return len(result)
+ except Exception as e:
+ self._problems.append(e)
+ return 0
+
+
+def load_publickey(type, buffer):
+ """
+ Load a public key from a buffer.
+
+ :param type: The file type (one of :data:`FILETYPE_PEM`,
+ :data:`FILETYPE_ASN1`).
+ :param buffer: The buffer the key is stored in.
+ :type buffer: A Python string object, either unicode or bytestring.
+ :return: The PKey object.
+ :rtype: :class:`PKey`
+ """
+ if isinstance(buffer, _text_type):
+ buffer = buffer.encode("ascii")
+
+ bio = _new_mem_buf(buffer)
+
+ if type == FILETYPE_PEM:
+ evp_pkey = _lib.PEM_read_bio_PUBKEY(
+ bio, _ffi.NULL, _ffi.NULL, _ffi.NULL
+ )
+ elif type == FILETYPE_ASN1:
+ evp_pkey = _lib.d2i_PUBKEY_bio(bio, _ffi.NULL)
+ else:
+ raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1")
+
+ if evp_pkey == _ffi.NULL:
+ _raise_current_error()
+
+ pkey = PKey.__new__(PKey)
+ pkey._pkey = _ffi.gc(evp_pkey, _lib.EVP_PKEY_free)
+ pkey._only_public = True
+ return pkey
+
+
+def load_privatekey(type, buffer, passphrase=None):
+ """
+ Load a private key (PKey) from the string *buffer* encoded with the type
+ *type*.
+
+ :param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1)
+ :param buffer: The buffer the key is stored in
+ :param passphrase: (optional) if encrypted PEM format, this can be
+ either the passphrase to use, or a callback for
+ providing the passphrase.
+
+ :return: The PKey object
+ """
+ if isinstance(buffer, _text_type):
+ buffer = buffer.encode("ascii")
+
+ bio = _new_mem_buf(buffer)
+
+ helper = _PassphraseHelper(type, passphrase)
+ if type == FILETYPE_PEM:
+ evp_pkey = _lib.PEM_read_bio_PrivateKey(
+ bio, _ffi.NULL, helper.callback, helper.callback_args
+ )
+ helper.raise_if_problem()
+ elif type == FILETYPE_ASN1:
+ evp_pkey = _lib.d2i_PrivateKey_bio(bio, _ffi.NULL)
+ else:
+ raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1")
+
+ if evp_pkey == _ffi.NULL:
+ _raise_current_error()
+
+ pkey = PKey.__new__(PKey)
+ pkey._pkey = _ffi.gc(evp_pkey, _lib.EVP_PKEY_free)
+ return pkey
+
+
+def dump_certificate_request(type, req):
+ """
+ Dump the certificate request *req* into a buffer string encoded with the
+ type *type*.
+
+ :param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1)
+ :param req: The certificate request to dump
+ :return: The buffer with the dumped certificate request in
+ """
+ bio = _new_mem_buf()
+
+ if type == FILETYPE_PEM:
+ result_code = _lib.PEM_write_bio_X509_REQ(bio, req._req)
+ elif type == FILETYPE_ASN1:
+ result_code = _lib.i2d_X509_REQ_bio(bio, req._req)
+ elif type == FILETYPE_TEXT:
+ result_code = _lib.X509_REQ_print_ex(bio, req._req, 0, 0)
+ else:
+ raise ValueError(
+ "type argument must be FILETYPE_PEM, FILETYPE_ASN1, or "
+ "FILETYPE_TEXT"
+ )
+
+ _openssl_assert(result_code != 0)
+
+ return _bio_to_string(bio)
+
+
+def load_certificate_request(type, buffer):
+ """
+ Load a certificate request (X509Req) from the string *buffer* encoded with
+ the type *type*.
+
+ :param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1)
+ :param buffer: The buffer the certificate request is stored in
+ :return: The X509Req object
+ """
+ if isinstance(buffer, _text_type):
+ buffer = buffer.encode("ascii")
+
+ bio = _new_mem_buf(buffer)
+
+ if type == FILETYPE_PEM:
+ req = _lib.PEM_read_bio_X509_REQ(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
+ elif type == FILETYPE_ASN1:
+ req = _lib.d2i_X509_REQ_bio(bio, _ffi.NULL)
+ else:
+ raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1")
+
+ _openssl_assert(req != _ffi.NULL)
+
+ x509req = X509Req.__new__(X509Req)
+ x509req._req = _ffi.gc(req, _lib.X509_REQ_free)
+ return x509req
+
+
+def sign(pkey, data, digest):
+ """
+ Sign a data string using the given key and message digest.
+
+ :param pkey: PKey to sign with
+ :param data: data to be signed
+ :param digest: message digest to use
+ :return: signature
+
+ .. versionadded:: 0.11
+ """
+ data = _text_to_bytes_and_warn("data", data)
+
+ digest_obj = _lib.EVP_get_digestbyname(_byte_string(digest))
+ if digest_obj == _ffi.NULL:
+ raise ValueError("No such digest method")
+
+ md_ctx = _lib.Cryptography_EVP_MD_CTX_new()
+ md_ctx = _ffi.gc(md_ctx, _lib.Cryptography_EVP_MD_CTX_free)
+
+ _lib.EVP_SignInit(md_ctx, digest_obj)
+ _lib.EVP_SignUpdate(md_ctx, data, len(data))
+
+ length = _lib.EVP_PKEY_size(pkey._pkey)
+ _openssl_assert(length > 0)
+ signature_buffer = _ffi.new("unsigned char[]", length)
+ signature_length = _ffi.new("unsigned int *")
+ final_result = _lib.EVP_SignFinal(
+ md_ctx, signature_buffer, signature_length, pkey._pkey
+ )
+ _openssl_assert(final_result == 1)
+
+ return _ffi.buffer(signature_buffer, signature_length[0])[:]
+
+
+def verify(cert, signature, data, digest):
+ """
+ Verify the signature for a data string.
+
+ :param cert: signing certificate (X509 object) corresponding to the
+ private key which generated the signature.
+ :param signature: signature returned by sign function
+ :param data: data to be verified
+ :param digest: message digest to use
+ :return: ``None`` if the signature is correct, raise exception otherwise.
+
+ .. versionadded:: 0.11
+ """
+ data = _text_to_bytes_and_warn("data", data)
+
+ digest_obj = _lib.EVP_get_digestbyname(_byte_string(digest))
+ if digest_obj == _ffi.NULL:
+ raise ValueError("No such digest method")
+
+ pkey = _lib.X509_get_pubkey(cert._x509)
+ _openssl_assert(pkey != _ffi.NULL)
+ pkey = _ffi.gc(pkey, _lib.EVP_PKEY_free)
+
+ md_ctx = _lib.Cryptography_EVP_MD_CTX_new()
+ md_ctx = _ffi.gc(md_ctx, _lib.Cryptography_EVP_MD_CTX_free)
+
+ _lib.EVP_VerifyInit(md_ctx, digest_obj)
+ _lib.EVP_VerifyUpdate(md_ctx, data, len(data))
+ verify_result = _lib.EVP_VerifyFinal(
+ md_ctx, signature, len(signature), pkey
+ )
+
+ if verify_result != 1:
+ _raise_current_error()
+
+
+def dump_crl(type, crl):
+ """
+ Dump a certificate revocation list to a buffer.
+
+ :param type: The file type (one of ``FILETYPE_PEM``, ``FILETYPE_ASN1``, or
+ ``FILETYPE_TEXT``).
+ :param CRL crl: The CRL to dump.
+
+ :return: The buffer with the CRL.
+ :rtype: bytes
+ """
+ bio = _new_mem_buf()
+
+ if type == FILETYPE_PEM:
+ ret = _lib.PEM_write_bio_X509_CRL(bio, crl._crl)
+ elif type == FILETYPE_ASN1:
+ ret = _lib.i2d_X509_CRL_bio(bio, crl._crl)
+ elif type == FILETYPE_TEXT:
+ ret = _lib.X509_CRL_print(bio, crl._crl)
+ else:
+ raise ValueError(
+ "type argument must be FILETYPE_PEM, FILETYPE_ASN1, or "
+ "FILETYPE_TEXT"
+ )
+
+ _openssl_assert(ret == 1)
+ return _bio_to_string(bio)
+
+
+def load_crl(type, buffer):
+ """
+ Load Certificate Revocation List (CRL) data from a string *buffer*.
+ *buffer* encoded with the type *type*.
+
+ :param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1)
+ :param buffer: The buffer the CRL is stored in
+
+ :return: The PKey object
+ """
+ if isinstance(buffer, _text_type):
+ buffer = buffer.encode("ascii")
+
+ bio = _new_mem_buf(buffer)
+
+ if type == FILETYPE_PEM:
+ crl = _lib.PEM_read_bio_X509_CRL(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
+ elif type == FILETYPE_ASN1:
+ crl = _lib.d2i_X509_CRL_bio(bio, _ffi.NULL)
+ else:
+ raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1")
+
+ if crl == _ffi.NULL:
+ _raise_current_error()
+
+ result = CRL.__new__(CRL)
+ result._crl = _ffi.gc(crl, _lib.X509_CRL_free)
+ return result
+
+
+def load_pkcs7_data(type, buffer):
+ """
+ Load pkcs7 data from the string *buffer* encoded with the type
+ *type*.
+
+ :param type: The file type (one of FILETYPE_PEM or FILETYPE_ASN1)
+ :param buffer: The buffer with the pkcs7 data.
+ :return: The PKCS7 object
+ """
+ if isinstance(buffer, _text_type):
+ buffer = buffer.encode("ascii")
+
+ bio = _new_mem_buf(buffer)
+
+ if type == FILETYPE_PEM:
+ pkcs7 = _lib.PEM_read_bio_PKCS7(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
+ elif type == FILETYPE_ASN1:
+ pkcs7 = _lib.d2i_PKCS7_bio(bio, _ffi.NULL)
+ else:
+ raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1")
+
+ if pkcs7 == _ffi.NULL:
+ _raise_current_error()
+
+ pypkcs7 = PKCS7.__new__(PKCS7)
+ pypkcs7._pkcs7 = _ffi.gc(pkcs7, _lib.PKCS7_free)
+ return pypkcs7
+
+
+load_pkcs7_data = utils.deprecated(
+ load_pkcs7_data,
+ __name__,
+ (
+ "PKCS#7 support in pyOpenSSL is deprecated. You should use the APIs "
+ "in cryptography."
+ ),
+ DeprecationWarning,
+)
+
+
+def load_pkcs12(buffer, passphrase=None):
+ """
+ Load pkcs12 data from the string *buffer*. If the pkcs12 structure is
+ encrypted, a *passphrase* must be included. The MAC is always
+ checked and thus required.
+
+ See also the man page for the C function :py:func:`PKCS12_parse`.
+
+ :param buffer: The buffer the certificate is stored in
+ :param passphrase: (Optional) The password to decrypt the PKCS12 lump
+ :returns: The PKCS12 object
+ """
+ passphrase = _text_to_bytes_and_warn("passphrase", passphrase)
+
+ if isinstance(buffer, _text_type):
+ buffer = buffer.encode("ascii")
+
+ bio = _new_mem_buf(buffer)
+
+ # Use null passphrase if passphrase is None or empty string. With PKCS#12
+ # password based encryption no password and a zero length password are two
+ # different things, but OpenSSL implementation will try both to figure out
+ # which one works.
+ if not passphrase:
+ passphrase = _ffi.NULL
+
+ p12 = _lib.d2i_PKCS12_bio(bio, _ffi.NULL)
+ if p12 == _ffi.NULL:
+ _raise_current_error()
+ p12 = _ffi.gc(p12, _lib.PKCS12_free)
+
+ pkey = _ffi.new("EVP_PKEY**")
+ cert = _ffi.new("X509**")
+ cacerts = _ffi.new("Cryptography_STACK_OF_X509**")
+
+ parse_result = _lib.PKCS12_parse(p12, passphrase, pkey, cert, cacerts)
+ if not parse_result:
+ _raise_current_error()
+
+ cacerts = _ffi.gc(cacerts[0], _lib.sk_X509_free)
+
+ # openssl 1.0.0 sometimes leaves an X509_check_private_key error in the
+ # queue for no particular reason. This error isn't interesting to anyone
+ # outside this function. It's not even interesting to us. Get rid of it.
+ try:
+ _raise_current_error()
+ except Error:
+ pass
+
+ if pkey[0] == _ffi.NULL:
+ pykey = None
+ else:
+ pykey = PKey.__new__(PKey)
+ pykey._pkey = _ffi.gc(pkey[0], _lib.EVP_PKEY_free)
+
+ if cert[0] == _ffi.NULL:
+ pycert = None
+ friendlyname = None
+ else:
+ pycert = X509._from_raw_x509_ptr(cert[0])
+
+ friendlyname_length = _ffi.new("int*")
+ friendlyname_buffer = _lib.X509_alias_get0(
+ cert[0], friendlyname_length
+ )
+ friendlyname = _ffi.buffer(
+ friendlyname_buffer, friendlyname_length[0]
+ )[:]
+ if friendlyname_buffer == _ffi.NULL:
+ friendlyname = None
+
+ pycacerts = []
+ for i in range(_lib.sk_X509_num(cacerts)):
+ x509 = _lib.sk_X509_value(cacerts, i)
+ pycacert = X509._from_raw_x509_ptr(x509)
+ pycacerts.append(pycacert)
+ if not pycacerts:
+ pycacerts = None
+
+ pkcs12 = PKCS12.__new__(PKCS12)
+ pkcs12._pkey = pykey
+ pkcs12._cert = pycert
+ pkcs12._cacerts = pycacerts
+ pkcs12._friendlyname = friendlyname
+ return pkcs12
+
+
+load_pkcs12 = utils.deprecated(
+ load_pkcs12,
+ __name__,
+ (
+ "PKCS#12 support in pyOpenSSL is deprecated. You should use the APIs "
+ "in cryptography."
+ ),
+ DeprecationWarning,
+)
+
+
+# There are no direct unit tests for this initialization. It is tested
+# indirectly since it is necessary for functions like dump_privatekey when
+# using encryption.
+#
+# Thus OpenSSL.test.test_crypto.FunctionTests.test_dump_privatekey_passphrase
+# and some other similar tests may fail without this (though they may not if
+# the Python runtime has already done some initialization of the underlying
+# OpenSSL library (and is linked against the same one that cryptography is
+# using)).
+_lib.OpenSSL_add_all_algorithms()
+
+# This is similar but exercised mainly by exception_from_error_queue. It calls
+# both ERR_load_crypto_strings() and ERR_load_SSL_strings().
+_lib.SSL_load_error_strings()
+
+
+# Set the default string mask to match OpenSSL upstream (since 2005) and
+# RFC5280 recommendations.
+_lib.ASN1_STRING_set_default_mask_asc(b"utf8only")
diff --git a/contrib/python/pyOpenSSL/py3/OpenSSL/debug.py b/contrib/python/pyOpenSSL/py3/OpenSSL/debug.py
new file mode 100644
index 0000000000..04521d5922
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/OpenSSL/debug.py
@@ -0,0 +1,42 @@
+from __future__ import print_function
+
+import ssl
+import sys
+
+import OpenSSL.SSL
+import cffi
+import cryptography
+
+from . import version
+
+
+_env_info = u"""\
+pyOpenSSL: {pyopenssl}
+cryptography: {cryptography}
+cffi: {cffi}
+cryptography's compiled against OpenSSL: {crypto_openssl_compile}
+cryptography's linked OpenSSL: {crypto_openssl_link}
+Python's OpenSSL: {python_openssl}
+Python executable: {python}
+Python version: {python_version}
+Platform: {platform}
+sys.path: {sys_path}""".format(
+ pyopenssl=version.__version__,
+ crypto_openssl_compile=OpenSSL._util.ffi.string(
+ OpenSSL._util.lib.OPENSSL_VERSION_TEXT,
+ ).decode("ascii"),
+ crypto_openssl_link=OpenSSL.SSL.SSLeay_version(
+ OpenSSL.SSL.SSLEAY_VERSION
+ ).decode("ascii"),
+ python_openssl=getattr(ssl, "OPENSSL_VERSION", "n/a"),
+ cryptography=cryptography.__version__,
+ cffi=cffi.__version__,
+ python=sys.executable,
+ python_version=sys.version,
+ platform=sys.platform,
+ sys_path=sys.path,
+)
+
+
+if __name__ == "__main__":
+ print(_env_info)
diff --git a/contrib/python/pyOpenSSL/py3/OpenSSL/rand.py b/contrib/python/pyOpenSSL/py3/OpenSSL/rand.py
new file mode 100644
index 0000000000..d2c17673e5
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/OpenSSL/rand.py
@@ -0,0 +1,40 @@
+"""
+PRNG management routines, thin wrappers.
+"""
+
+from OpenSSL._util import lib as _lib
+
+
+def add(buffer, entropy):
+ """
+ Mix bytes from *string* into the PRNG state.
+
+ The *entropy* argument is (the lower bound of) an estimate of how much
+ randomness is contained in *string*, measured in bytes.
+
+ For more information, see e.g. :rfc:`1750`.
+
+ This function is only relevant if you are forking Python processes and
+ need to reseed the CSPRNG after fork.
+
+ :param buffer: Buffer with random data.
+ :param entropy: The entropy (in bytes) measurement of the buffer.
+
+ :return: :obj:`None`
+ """
+ if not isinstance(buffer, bytes):
+ raise TypeError("buffer must be a byte string")
+
+ if not isinstance(entropy, int):
+ raise TypeError("entropy must be an integer")
+
+ _lib.RAND_add(buffer, len(buffer), entropy)
+
+
+def status():
+ """
+ Check whether the PRNG has been seeded with enough data.
+
+ :return: 1 if the PRNG is seeded enough, 0 otherwise.
+ """
+ return _lib.RAND_status()
diff --git a/contrib/python/pyOpenSSL/py3/OpenSSL/version.py b/contrib/python/pyOpenSSL/py3/OpenSSL/version.py
new file mode 100644
index 0000000000..c6fcecb077
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/OpenSSL/version.py
@@ -0,0 +1,28 @@
+# Copyright (C) AB Strakt
+# Copyright (C) Jean-Paul Calderone
+# See LICENSE for details.
+
+"""
+pyOpenSSL - A simple wrapper around the OpenSSL library
+"""
+
+__all__ = [
+ "__author__",
+ "__copyright__",
+ "__email__",
+ "__license__",
+ "__summary__",
+ "__title__",
+ "__uri__",
+ "__version__",
+]
+
+__version__ = "21.0.0"
+
+__title__ = "pyOpenSSL"
+__uri__ = "https://pyopenssl.org/"
+__summary__ = "Python wrapper module around the OpenSSL library"
+__author__ = "The pyOpenSSL developers"
+__email__ = "cryptography-dev@python.org"
+__license__ = "Apache License, Version 2.0"
+__copyright__ = "Copyright 2001-2020 {0}".format(__author__)
diff --git a/contrib/python/pyOpenSSL/py3/ya.make b/contrib/python/pyOpenSSL/py3/ya.make
new file mode 100644
index 0000000000..8f449cc38b
--- /dev/null
+++ b/contrib/python/pyOpenSSL/py3/ya.make
@@ -0,0 +1,37 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(21.0.0)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/cryptography
+ contrib/python/six
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ OpenSSL/SSL.py
+ OpenSSL/__init__.py
+ OpenSSL/_util.py
+ OpenSSL/crypto.py
+ OpenSSL/debug.py
+ OpenSSL/rand.py
+ OpenSSL/version.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/pyOpenSSL/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/pyasn1-modules/py2/.dist-info/METADATA b/contrib/python/pyasn1-modules/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..34a82a084a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/.dist-info/METADATA
@@ -0,0 +1,74 @@
+Metadata-Version: 2.1
+Name: pyasn1-modules
+Version: 0.3.0
+Summary: A collection of ASN.1-based protocols modules
+Home-page: https://github.com/pyasn1/pyasn1-modules
+Author: Ilya Etingof
+Author-email: etingof@gmail.com
+Maintainer: pyasn1 maintenance organization
+Maintainer-email: Christian Heimes <christian@python.org>
+License: BSD
+Project-URL: Source, https://github.com/pyasn1/pyasn1-modules
+Project-URL: Issues, https://github.com/pyasn1/pyasn1-modules/issues
+Project-URL: Changelog, https://github.com/pyasn1/pyasn1-modules/blob/master/CHANGES.txt
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: Intended Audience :: Telecommunications Industry
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Communications
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7
+Description-Content-Type: text/markdown
+License-File: LICENSE.txt
+Requires-Dist: pyasn1 (<0.6.0,>=0.4.6)
+
+
+ASN.1 modules for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1-modules.svg?maxAge=2592000)](https://pypi.org/project/pyasn1-modules)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1-modules.svg)](https://pypi.org/project/pyasn1-modules/)
+[![Build status](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1-modules.svg)](https://codecov.io/github/pyasn1/pyasn1-modules)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1-modules/master/LICENSE.txt)
+
+The `pyasn1-modules` package contains a collection of
+[ASN.1](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items)
+data structures expressed as Python classes based on [pyasn1](https://github.com/pyasn1/pyasn1)
+data model.
+
+If ASN.1 module you need is not present in this collection, try using
+[Asn1ate](https://github.com/kimgr/asn1ate) tool that compiles ASN.1 documents
+into pyasn1 code.
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1-modules.
+
+Feedback
+--------
+
+If something does not work as expected,
+[open an issue](https://github.com/pyasn1/pyasn1-modules/issues) at GitHub
+or post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+
+New modules contributions are welcome via GitHub pull requests.
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1-modules/py2/.dist-info/top_level.txt b/contrib/python/pyasn1-modules/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..9dad8496ee
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyasn1_modules
diff --git a/contrib/python/pyasn1-modules/py2/LICENSE.txt b/contrib/python/pyasn1-modules/py2/LICENSE.txt
new file mode 100644
index 0000000000..598b8430ef
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/LICENSE.txt
@@ -0,0 +1,24 @@
+Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/pyasn1-modules/py2/README.md b/contrib/python/pyasn1-modules/py2/README.md
new file mode 100644
index 0000000000..c70b1e8bc3
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/README.md
@@ -0,0 +1,32 @@
+
+ASN.1 modules for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1-modules.svg?maxAge=2592000)](https://pypi.org/project/pyasn1-modules)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1-modules.svg)](https://pypi.org/project/pyasn1-modules/)
+[![Build status](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1-modules.svg)](https://codecov.io/github/pyasn1/pyasn1-modules)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1-modules/master/LICENSE.txt)
+
+The `pyasn1-modules` package contains a collection of
+[ASN.1](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items)
+data structures expressed as Python classes based on [pyasn1](https://github.com/pyasn1/pyasn1)
+data model.
+
+If ASN.1 module you need is not present in this collection, try using
+[Asn1ate](https://github.com/kimgr/asn1ate) tool that compiles ASN.1 documents
+into pyasn1 code.
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1-modules.
+
+Feedback
+--------
+
+If something does not work as expected,
+[open an issue](https://github.com/pyasn1/pyasn1-modules/issues) at GitHub
+or post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+
+New modules contributions are welcome via GitHub pull requests.
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/__init__.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/__init__.py
new file mode 100644
index 0000000000..95a220efd2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/__init__.py
@@ -0,0 +1,2 @@
+# http://www.python.org/dev/peps/pep-0396/
+__version__ = '0.3.0'
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/pem.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/pem.py
new file mode 100644
index 0000000000..f7c80a9b9d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/pem.py
@@ -0,0 +1,65 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import base64
+import sys
+
+stSpam, stHam, stDump = 0, 1, 2
+
+
+# The markers parameters is in form ('start1', 'stop1'), ('start2', 'stop2')...
+# Return is (marker-index, substrate)
+def readPemBlocksFromFile(fileObj, *markers):
+ startMarkers = dict(map(lambda x: (x[1], x[0]),
+ enumerate(map(lambda y: y[0], markers))))
+ stopMarkers = dict(map(lambda x: (x[1], x[0]),
+ enumerate(map(lambda y: y[1], markers))))
+ idx = -1
+ substrate = ''
+ certLines = []
+ state = stSpam
+ while True:
+ certLine = fileObj.readline()
+ if not certLine:
+ break
+ certLine = certLine.strip()
+ if state == stSpam:
+ if certLine in startMarkers:
+ certLines = []
+ idx = startMarkers[certLine]
+ state = stHam
+ continue
+ if state == stHam:
+ if certLine in stopMarkers and stopMarkers[certLine] == idx:
+ state = stDump
+ else:
+ certLines.append(certLine)
+ if state == stDump:
+ if sys.version_info[0] <= 2:
+ substrate = ''.join([base64.b64decode(x) for x in certLines])
+ else:
+ substrate = ''.encode().join([base64.b64decode(x.encode()) for x in certLines])
+ break
+ return idx, substrate
+
+
+# Backward compatibility routine
+def readPemFromFile(fileObj,
+ startMarker='-----BEGIN CERTIFICATE-----',
+ endMarker='-----END CERTIFICATE-----'):
+ idx, substrate = readPemBlocksFromFile(fileObj, (startMarker, endMarker))
+ return substrate
+
+
+def readBase64fromText(text):
+ if sys.version_info[0] <= 2:
+ return base64.b64decode(text)
+ else:
+ return base64.b64decode(text.encode())
+
+
+def readBase64FromFile(fileObj):
+ return readBase64fromText(fileObj.read())
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1155.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1155.py
new file mode 100644
index 0000000000..18702345d1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1155.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv1 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1155.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class ObjectName(univ.ObjectIdentifier):
+ pass
+
+
+class SimpleSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('number', univ.Integer()),
+ namedtype.NamedType('string', univ.OctetString()),
+ namedtype.NamedType('object', univ.ObjectIdentifier()),
+ namedtype.NamedType('empty', univ.Null())
+ )
+
+
+class IpAddress(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
+ 4, 4
+ )
+
+
+class NetworkAddress(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('internet', IpAddress())
+ )
+
+
+class Counter(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 1)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Gauge(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class TimeTicks(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 3)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Opaque(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 4)
+ )
+
+
+class ApplicationSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('address', NetworkAddress()),
+ namedtype.NamedType('counter', Counter()),
+ namedtype.NamedType('gauge', Gauge()),
+ namedtype.NamedType('ticks', TimeTicks()),
+ namedtype.NamedType('arbitrary', Opaque())
+ )
+
+
+class ObjectSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', SimpleSyntax()),
+ namedtype.NamedType('application-wide', ApplicationSyntax())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1157.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1157.py
new file mode 100644
index 0000000000..df49e482db
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1157.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv1 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1157.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1155
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('version-1', 0)
+ )
+ defaultValue = 0
+
+
+class Community(univ.OctetString):
+ pass
+
+
+class RequestID(univ.Integer):
+ pass
+
+
+class ErrorStatus(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('noError', 0),
+ ('tooBig', 1),
+ ('noSuchName', 2),
+ ('badValue', 3),
+ ('readOnly', 4),
+ ('genErr', 5)
+ )
+
+
+class ErrorIndex(univ.Integer):
+ pass
+
+
+class VarBind(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', rfc1155.ObjectName()),
+ namedtype.NamedType('value', rfc1155.ObjectSyntax())
+ )
+
+
+class VarBindList(univ.SequenceOf):
+ componentType = VarBind()
+
+
+class _RequestBase(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', RequestID()),
+ namedtype.NamedType('error-status', ErrorStatus()),
+ namedtype.NamedType('error-index', ErrorIndex()),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class GetRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+
+
+class GetNextRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+
+
+class GetResponsePDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+
+
+class SetRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+
+
+class TrapPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
+ namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
+ namedtype.NamedType('generic-trap', univ.Integer().clone(
+ namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3),
+ ('authenticationFailure', 4), ('egpNeighborLoss', 5),
+ ('enterpriseSpecific', 6)))),
+ namedtype.NamedType('specific-trap', univ.Integer()),
+ namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class Pdus(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('get-request', GetRequestPDU()),
+ namedtype.NamedType('get-next-request', GetNextRequestPDU()),
+ namedtype.NamedType('get-response', GetResponsePDU()),
+ namedtype.NamedType('set-request', SetRequestPDU()),
+ namedtype.NamedType('trap', TrapPDU())
+ )
+
+
+class Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('community', Community()),
+ namedtype.NamedType('data', Pdus())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1901.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1901.py
new file mode 100644
index 0000000000..658dcb9381
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1901.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1901.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+
+class Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('version-2c', 1)))),
+ namedtype.NamedType('community', univ.OctetString()),
+ namedtype.NamedType('data', univ.Any())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1902.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1902.py
new file mode 100644
index 0000000000..063998a948
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1902.py
@@ -0,0 +1,129 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1902.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class Integer(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ -2147483648, 2147483647
+ )
+
+
+class Integer32(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ -2147483648, 2147483647
+ )
+
+
+class OctetString(univ.OctetString):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
+ 0, 65535
+ )
+
+
+class IpAddress(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x00)
+ )
+ subtypeSpec = univ.OctetString.subtypeSpec + constraint.ValueSizeConstraint(
+ 4, 4
+ )
+
+
+class Counter32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x01)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Gauge32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Unsigned32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class TimeTicks(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x03)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Opaque(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x04)
+ )
+
+
+class Counter64(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x06)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 18446744073709551615
+ )
+
+
+class Bits(univ.OctetString):
+ pass
+
+
+class ObjectName(univ.ObjectIdentifier):
+ pass
+
+
+class SimpleSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('integer-value', Integer()),
+ namedtype.NamedType('string-value', OctetString()),
+ namedtype.NamedType('objectID-value', univ.ObjectIdentifier())
+ )
+
+
+class ApplicationSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ipAddress-value', IpAddress()),
+ namedtype.NamedType('counter-value', Counter32()),
+ namedtype.NamedType('timeticks-value', TimeTicks()),
+ namedtype.NamedType('arbitrary-value', Opaque()),
+ namedtype.NamedType('big-counter-value', Counter64()),
+ # This conflicts with Counter32
+ # namedtype.NamedType('unsigned-integer-value', Unsigned32()),
+ namedtype.NamedType('gauge32-value', Gauge32())
+ ) # BITS misplaced?
+
+
+class ObjectSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', SimpleSyntax()),
+ namedtype.NamedType('application-wide', ApplicationSyntax())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1905.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1905.py
new file mode 100644
index 0000000000..435427b2bc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc1905.py
@@ -0,0 +1,135 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c PDU syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1905.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1902
+
+max_bindings = rfc1902.Integer(2147483647)
+
+
+class _BindValue(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('value', rfc1902.ObjectSyntax()),
+ namedtype.NamedType('unSpecified', univ.Null()),
+ namedtype.NamedType('noSuchObject',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('noSuchInstance',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('endOfMibView',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class VarBind(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', rfc1902.ObjectName()),
+ namedtype.NamedType('', _BindValue())
+ )
+
+
+class VarBindList(univ.SequenceOf):
+ componentType = VarBind()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(
+ 0, max_bindings
+ )
+
+
+class PDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', rfc1902.Integer32()),
+ namedtype.NamedType('error-status', univ.Integer(
+ namedValues=namedval.NamedValues(('noError', 0), ('tooBig', 1), ('noSuchName', 2), ('badValue', 3),
+ ('readOnly', 4), ('genErr', 5), ('noAccess', 6), ('wrongType', 7),
+ ('wrongLength', 8), ('wrongEncoding', 9), ('wrongValue', 10),
+ ('noCreation', 11), ('inconsistentValue', 12), ('resourceUnavailable', 13),
+ ('commitFailed', 14), ('undoFailed', 15), ('authorizationError', 16),
+ ('notWritable', 17), ('inconsistentName', 18)))),
+ namedtype.NamedType('error-index',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class BulkPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', rfc1902.Integer32()),
+ namedtype.NamedType('non-repeaters',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('max-repetitions',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class GetRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+
+
+class GetNextRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+
+
+class ResponsePDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+
+
+class SetRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+
+
+class GetBulkRequestPDU(BulkPDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
+ )
+
+
+class InformRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
+ )
+
+
+class SNMPv2TrapPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
+ )
+
+
+class ReportPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
+ )
+
+
+class PDUs(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('get-request', GetRequestPDU()),
+ namedtype.NamedType('get-next-request', GetNextRequestPDU()),
+ namedtype.NamedType('get-bulk-request', GetBulkRequestPDU()),
+ namedtype.NamedType('response', ResponsePDU()),
+ namedtype.NamedType('set-request', SetRequestPDU()),
+ namedtype.NamedType('inform-request', InformRequestPDU()),
+ namedtype.NamedType('snmpV2-trap', SNMPv2TrapPDU()),
+ namedtype.NamedType('report', ReportPDU())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2251.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2251.py
new file mode 100644
index 0000000000..094922cad0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2251.py
@@ -0,0 +1,563 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# LDAP message syntax
+#
+# ASN.1 source from:
+# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/ldap.asn
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+maxInt = univ.Integer(2147483647)
+
+
+class LDAPString(univ.OctetString):
+ pass
+
+
+class LDAPOID(univ.OctetString):
+ pass
+
+
+class LDAPDN(LDAPString):
+ pass
+
+
+class RelativeLDAPDN(LDAPString):
+ pass
+
+
+class AttributeType(LDAPString):
+ pass
+
+
+class AttributeDescription(LDAPString):
+ pass
+
+
+class AttributeDescriptionList(univ.SequenceOf):
+ componentType = AttributeDescription()
+
+
+class AttributeValue(univ.OctetString):
+ pass
+
+
+class AssertionValue(univ.OctetString):
+ pass
+
+
+class AttributeValueAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeDesc', AttributeDescription()),
+ namedtype.NamedType('assertionValue', AssertionValue())
+ )
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class MatchingRuleId(LDAPString):
+ pass
+
+
+class Control(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlType', LDAPOID()),
+ namedtype.DefaultedNamedType('criticality', univ.Boolean('False')),
+ namedtype.OptionalNamedType('controlValue', univ.OctetString())
+ )
+
+
+class Controls(univ.SequenceOf):
+ componentType = Control()
+
+
+class LDAPURL(LDAPString):
+ pass
+
+
+class Referral(univ.SequenceOf):
+ componentType = LDAPURL()
+
+
+class SaslCredentials(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mechanism', LDAPString()),
+ namedtype.OptionalNamedType('credentials', univ.OctetString())
+ )
+
+
+class AuthenticationChoice(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('reserved-1', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('reserved-2', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('sasl',
+ SaslCredentials().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class BindRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 127))),
+ namedtype.NamedType('name', LDAPDN()),
+ namedtype.NamedType('authentication', AuthenticationChoice())
+ )
+
+
+class PartialAttributeList(univ.SequenceOf):
+ componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+ )
+
+
+class SearchResultEntry(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 4)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('objectName', LDAPDN()),
+ namedtype.NamedType('attributes', PartialAttributeList())
+ )
+
+
+class MatchingRuleAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('matchingRule', MatchingRuleId().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('type', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('matchValue',
+ AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('dnAttributes', univ.Boolean('False').subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class SubstringFilter(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('substrings',
+ univ.SequenceOf(
+ componentType=univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'initial', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType(
+ 'any', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
+ ),
+ namedtype.NamedType(
+ 'final', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))
+ )
+ )
+ )
+ )
+ )
+ )
+
+
+# Ugly hack to handle recursive Filter reference (up to 3-levels deep).
+
+class Filter3(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class Filter2(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('and', univ.SetOf(componentType=Filter3()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('or', univ.SetOf(componentType=Filter3()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('not',
+ Filter3().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class Filter(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('and', univ.SetOf(componentType=Filter2()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('or', univ.SetOf(componentType=Filter2()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('not',
+ Filter2().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+# End of Filter hack
+
+class SearchRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 3)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('baseObject', LDAPDN()),
+ namedtype.NamedType('scope', univ.Enumerated(
+ namedValues=namedval.NamedValues(('baseObject', 0), ('singleLevel', 1), ('wholeSubtree', 2)))),
+ namedtype.NamedType('derefAliases', univ.Enumerated(
+ namedValues=namedval.NamedValues(('neverDerefAliases', 0), ('derefInSearching', 1),
+ ('derefFindingBaseObj', 2), ('derefAlways', 3)))),
+ namedtype.NamedType('sizeLimit',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
+ namedtype.NamedType('timeLimit',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
+ namedtype.NamedType('typesOnly', univ.Boolean()),
+ namedtype.NamedType('filter', Filter()),
+ namedtype.NamedType('attributes', AttributeDescriptionList())
+ )
+
+
+class UnbindRequest(univ.Null):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ )
+
+
+class BindResponse(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('serverSaslCreds', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)))
+ )
+
+
+class LDAPResult(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class SearchResultReference(univ.SequenceOf):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 19)
+ )
+ componentType = LDAPURL()
+
+
+class SearchResultDone(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5)
+ )
+
+
+class AttributeTypeAndValues(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class ModifyRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 6)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('object', LDAPDN()),
+ namedtype.NamedType('modification',
+ univ.SequenceOf(
+ componentType=univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'operation', univ.Enumerated(namedValues=namedval.NamedValues(('add', 0), ('delete', 1), ('replace', 2)))
+ ),
+ namedtype.NamedType('modification', AttributeTypeAndValues())))
+ )
+ )
+ )
+
+
+class ModifyResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 7)
+ )
+
+
+class AttributeList(univ.SequenceOf):
+ componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+ )
+
+
+class AddRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 8)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('attributes', AttributeList())
+ )
+
+
+class AddResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 9)
+ )
+
+
+class DelRequest(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 10)
+ )
+
+
+class DelResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 11)
+ )
+
+
+class ModifyDNRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 12)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('newrdn', RelativeLDAPDN()),
+ namedtype.NamedType('deleteoldrdn', univ.Boolean()),
+ namedtype.OptionalNamedType('newSuperior',
+ LDAPDN().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+
+ )
+
+
+class ModifyDNResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 13)
+ )
+
+
+class CompareRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 14)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('ava', AttributeValueAssertion())
+ )
+
+
+class CompareResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 15)
+ )
+
+
+class AbandonRequest(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 16)
+ )
+
+
+class ExtendedRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 23)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('requestName',
+ LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestValue', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtendedResponse(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 24)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+
+ namedtype.OptionalNamedType('responseName', LDAPOID().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))),
+ namedtype.OptionalNamedType('response', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11)))
+ )
+
+
+class MessageID(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, maxInt
+ )
+
+
+class LDAPMessage(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('messageID', MessageID()),
+ namedtype.NamedType(
+ 'protocolOp', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('bindRequest', BindRequest()),
+ namedtype.NamedType('bindResponse', BindResponse()),
+ namedtype.NamedType('unbindRequest', UnbindRequest()),
+ namedtype.NamedType('searchRequest', SearchRequest()),
+ namedtype.NamedType('searchResEntry', SearchResultEntry()),
+ namedtype.NamedType('searchResDone', SearchResultDone()),
+ namedtype.NamedType('searchResRef', SearchResultReference()),
+ namedtype.NamedType('modifyRequest', ModifyRequest()),
+ namedtype.NamedType('modifyResponse', ModifyResponse()),
+ namedtype.NamedType('addRequest', AddRequest()),
+ namedtype.NamedType('addResponse', AddResponse()),
+ namedtype.NamedType('delRequest', DelRequest()),
+ namedtype.NamedType('delResponse', DelResponse()),
+ namedtype.NamedType('modDNRequest', ModifyDNRequest()),
+ namedtype.NamedType('modDNResponse', ModifyDNResponse()),
+ namedtype.NamedType('compareRequest', CompareRequest()),
+ namedtype.NamedType('compareResponse', CompareResponse()),
+ namedtype.NamedType('abandonRequest', AbandonRequest()),
+ namedtype.NamedType('extendedReq', ExtendedRequest()),
+ namedtype.NamedType('extendedResp', ExtendedResponse())
+ )
+ )
+ ),
+ namedtype.OptionalNamedType('controls', Controls().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2314.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2314.py
new file mode 100644
index 0000000000..b0edfe0917
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2314.py
@@ -0,0 +1,48 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#10 syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc2314
+#
+# Sample captures could be obtained with "openssl req" command
+#
+from pyasn1_modules.rfc2459 import *
+
+
+class Attributes(univ.SetOf):
+ componentType = Attribute()
+
+
+class Version(univ.Integer):
+ pass
+
+
+class CertificationRequestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.NamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class CertificationRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2315.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2315.py
new file mode 100644
index 0000000000..1069fc27dd
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2315.py
@@ -0,0 +1,294 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#7 message syntax
+#
+# ASN.1 source from:
+# https://opensource.apple.com/source/Security/Security-55179.1/libsecurity_asn1/asn1/pkcs7.asn.auto.html
+#
+# Sample captures from:
+# openssl crl2pkcs7 -nocrl -certfile cert1.cer -out outfile.p7b
+#
+from pyasn1_modules.rfc2459 import *
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class AttributeValueAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeType', AttributeType()),
+ namedtype.NamedType('attributeValue', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+
+pkcs_7 = univ.ObjectIdentifier('1.2.840.113549.1.7')
+data = univ.ObjectIdentifier('1.2.840.113549.1.7.1')
+signedData = univ.ObjectIdentifier('1.2.840.113549.1.7.2')
+envelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.3')
+signedAndEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.4')
+digestedData = univ.ObjectIdentifier('1.2.840.113549.1.7.5')
+encryptedData = univ.ObjectIdentifier('1.2.840.113549.1.7.6')
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+contentTypeMap = {}
+
+
+class EncryptedContentInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType(
+ 'encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ ),
+ openType=opentype.OpenType('contentType', contentTypeMap)
+ )
+ )
+
+
+class Version(univ.Integer): # overrides x509.Version
+ pass
+
+
+class EncryptedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
+ )
+
+
+class DigestAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ componentType = DigestAlgorithmIdentifier()
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class ContentInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.OptionalNamedType(
+ 'content',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)),
+ openType=opentype.OpenType('contentType', contentTypeMap)
+ )
+ )
+
+
+class DigestedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('contentInfo', ContentInfo()),
+ namedtype.NamedType('digest', Digest())
+ )
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class RecipientInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+ )
+
+
+class RecipientInfos(univ.SetOf):
+ componentType = RecipientInfo()
+
+
+class Attributes(univ.SetOf):
+ componentType = Attribute()
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('attributes', Attributes())
+ )
+
+
+class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+ )
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class ExtendedCertificatesAndCertificates(univ.SetOf):
+ componentType = ExtendedCertificateOrCertificate()
+
+
+class SerialNumber(univ.Integer):
+ pass
+
+
+class CRLEntry(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', SerialNumber()),
+ namedtype.NamedType('revocationDate', useful.UTCTime())
+ )
+
+
+class TBSCertificateRevocationList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('lastUpdate', useful.UTCTime()),
+ namedtype.NamedType('nextUpdate', useful.UTCTime()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry()))
+ )
+
+
+class CertificateRevocationList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificateRevocationList', TBSCertificateRevocationList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+
+class CertificateRevocationLists(univ.SetOf):
+ componentType = CertificateRevocationList()
+
+
+class DigestEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedDigest(univ.OctetString):
+ pass
+
+
+class SignerInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('authenticatedAttributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('digestEncryptionAlgorithm', DigestEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedDigest', EncryptedDigest()),
+ namedtype.OptionalNamedType('unauthenticatedAttributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class SignerInfos(univ.SetOf):
+ componentType = SignerInfo()
+
+
+class SignedAndEnvelopedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+ )
+
+
+class EnvelopedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
+ )
+
+
+class DigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('digest', Digest())
+ )
+
+
+class SignedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.OptionalNamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('contentInfo', ContentInfo()),
+ namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('signerInfos', SignerInfos())
+ )
+
+
+class Data(univ.OctetString):
+ pass
+
+_contentTypeMapUpdate = {
+ data: Data(),
+ signedData: SignedData(),
+ envelopedData: EnvelopedData(),
+ signedAndEnvelopedData: SignedAndEnvelopedData(),
+ digestedData: DigestedData(),
+ encryptedData: EncryptedData()
+}
+
+contentTypeMap.update(_contentTypeMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2437.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2437.py
new file mode 100644
index 0000000000..88641cf07d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2437.py
@@ -0,0 +1,69 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#1 syntax
+#
+# ASN.1 source from:
+# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2.asn
+#
+# Sample captures could be obtained with "openssl genrsa" command
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules.rfc2459 import AlgorithmIdentifier
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+md4WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.3')
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+rsaOAEPEncryptionSET = univ.ObjectIdentifier('1.2.840.113549.1.1.6')
+id_RSAES_OAEP = univ.ObjectIdentifier('1.2.840.113549.1.1.7')
+id_mgf1 = univ.ObjectIdentifier('1.2.840.113549.1.1.8')
+id_pSpecified = univ.ObjectIdentifier('1.2.840.113549.1.1.9')
+id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26')
+
+MAX = float('inf')
+
+
+class Version(univ.Integer):
+ pass
+
+
+class RSAPrivateKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer())
+ )
+
+
+class RSAPublicKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+ )
+
+
+# XXX defaults not set
+class RSAES_OAEP_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('maskGenFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('pSourceFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2459.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2459.py
new file mode 100644
index 0000000000..57f783e451
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2459.py
@@ -0,0 +1,1339 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Updated by Russ Housley to resolve the TODO regarding the Certificate
+# Policies Certificate Extension.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 message syntax
+#
+# ASN.1 source from:
+# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn
+# http://www.ietf.org/rfc/rfc2459.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+#
+# PKIX1Explicit88
+#
+
+# Upper Bounds
+ub_name = univ.Integer(32768)
+ub_common_name = univ.Integer(64)
+ub_locality_name = univ.Integer(128)
+ub_state_name = univ.Integer(128)
+ub_organization_name = univ.Integer(64)
+ub_organizational_unit_name = univ.Integer(64)
+ub_title = univ.Integer(64)
+ub_match = univ.Integer(128)
+ub_emailaddress_length = univ.Integer(128)
+ub_common_name_length = univ.Integer(64)
+ub_country_name_alpha_length = univ.Integer(2)
+ub_country_name_numeric_length = univ.Integer(3)
+ub_domain_defined_attributes = univ.Integer(4)
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+ub_domain_name_length = univ.Integer(16)
+ub_extension_attributes = univ.Integer(256)
+ub_e163_4_number_length = univ.Integer(15)
+ub_e163_4_sub_address_length = univ.Integer(40)
+ub_generation_qualifier_length = univ.Integer(3)
+ub_given_name_length = univ.Integer(16)
+ub_initials_length = univ.Integer(5)
+ub_integer_options = univ.Integer(256)
+ub_numeric_user_id_length = univ.Integer(32)
+ub_organization_name_length = univ.Integer(64)
+ub_organizational_unit_name_length = univ.Integer(32)
+ub_organizational_units = univ.Integer(4)
+ub_pds_name_length = univ.Integer(16)
+ub_pds_parameter_length = univ.Integer(30)
+ub_pds_physical_address_lines = univ.Integer(6)
+ub_postal_code_length = univ.Integer(16)
+ub_surname_length = univ.Integer(40)
+ub_terminal_id_length = univ.Integer(24)
+ub_unformatted_address_length = univ.Integer(180)
+ub_x121_address_length = univ.Integer(16)
+
+
+class UniversalString(char.UniversalString):
+ pass
+
+
+class BMPString(char.BMPString):
+ pass
+
+
+class UTF8String(char.UTF8String):
+ pass
+
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2')
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48')
+
+id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1')
+id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2')
+
+id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1')
+id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
+
+
+
+
+id_at = univ.ObjectIdentifier('2.5.4')
+id_at_name = univ.ObjectIdentifier('2.5.4.41')
+# preserve misspelled variable for compatibility
+id_at_sutname = id_at_surname = univ.ObjectIdentifier('2.5.4.4')
+id_at_givenName = univ.ObjectIdentifier('2.5.4.42')
+id_at_initials = univ.ObjectIdentifier('2.5.4.43')
+id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44')
+
+
+class X520name(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+ )
+
+
+id_at_commonName = univ.ObjectIdentifier('2.5.4.3')
+
+
+class X520CommonName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+ )
+
+
+id_at_localityName = univ.ObjectIdentifier('2.5.4.7')
+
+
+class X520LocalityName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+ )
+
+
+id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8')
+
+
+class X520StateOrProvinceName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+ )
+
+
+id_at_organizationName = univ.ObjectIdentifier('2.5.4.10')
+
+
+class X520OrganizationName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+ )
+
+
+id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11')
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+ )
+
+
+id_at_title = univ.ObjectIdentifier('2.5.4.12')
+
+
+class X520Title(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+ )
+
+
+id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46')
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+id_at_countryName = univ.ObjectIdentifier('2.5.4.6')
+
+
+class X520countryName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2)
+
+
+pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9')
+
+emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1')
+
+
+class Pkcs9email(char.IA5String):
+ subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+
+# ----
+
+class DSAPrivateKey(univ.Sequence):
+ """PKIX compliant DSA private key structure"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))),
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('public', univ.Integer()),
+ namedtype.NamedType('private', univ.Integer())
+ )
+
+
+# ----
+
+
+class DirectoryString(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ # hm, this should not be here!? XXX
+ )
+
+
+# certificate and CRL specific structures begin here
+
+class AlgorithmIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any())
+ )
+
+
+
+# Algorithm OIDs and parameter structures
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
+
+
+class Dss_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
+
+
+class ValidationParms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seed', univ.BitString()),
+ namedtype.NamedType('pgenCounter', univ.Integer())
+ )
+
+
+class DomainParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('j', univ.Integer()),
+ namedtype.OptionalNamedType('validationParms', ValidationParms())
+ )
+
+
+id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
+
+
+class Dss_Parms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer())
+ )
+
+
+# x400 address syntax starts here
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString())
+ )
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ componentType = TeletexDomainDefinedAttribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+terminal_type = univ.Integer(23)
+
+
+class TerminalType(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options)
+ namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletelex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+ )
+
+
+class PresentationAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3),
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ )
+
+
+extended_network_address = univ.Integer(22)
+
+
+class E163_4_address(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('e163-4-address', E163_4_address()),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class PDSParameter(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+ )
+
+
+local_postal_attributes = univ.Integer(21)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+unique_postal_name = univ.Integer(20)
+
+poste_restante_address = univ.Integer(19)
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+post_office_box_address = univ.Integer(18)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+street_address = univ.Integer(17)
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+class UnformattedPostalAddress(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+ )
+
+
+physical_delivery_office_name = univ.Integer(10)
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+physical_delivery_office_number = univ.Integer(11)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+extension_OR_address_components = univ.Integer(12)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+physical_delivery_personal_name = univ.Integer(13)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+physical_delivery_organization_name = univ.Integer(14)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+unformatted_postal_address = univ.Integer(16)
+
+postal_code = univ.Integer(9)
+
+
+class PostalCode(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+ )
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
+ ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+ )
+
+
+class PDSName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+physical_delivery_country_name = univ.Integer(8)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+pds_name = univ.Integer(7)
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ componentType = TeletexOrganizationalUnitName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+teletex_personal_name = univ.Integer(4)
+
+
+class TeletexPersonalName(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+teletex_organization_name = univ.Integer(3)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+teletex_common_name = univ.Integer(2)
+
+
+class TeletexCommonName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class CommonName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+common_name = univ.Integer(1)
+
+
+class ExtensionAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtensionAttributes(univ.SetOf):
+ componentType = ExtensionAttribute()
+ sizeSpec = univ.SetOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+ )
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ componentType = BuiltInDomainDefinedAttribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ componentType = OrganizationalUnitName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class PersonalName(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class NumericUserIdentifier(char.NumericString):
+ subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class OrganizationName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+class PrivateDomainName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+ )
+
+
+class TerminalIdentifier(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+
+class X121Address(char.NumericString):
+ subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+class AdministrationDomainName(univ.Choice):
+ tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+ )
+
+
+class CountryName(univ.Choice):
+ tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
+ ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+ )
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+ )
+
+
+class ORAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+ )
+
+
+#
+# PKIX1Implicit88
+#
+
+id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24')
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1')
+id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2')
+id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3')
+
+holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2')
+
+id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23')
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21')
+
+
+class CRLReason(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8)
+ )
+
+
+id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20')
+
+
+class CRLNumber(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1')
+id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2')
+id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3')
+id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4')
+id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5')
+id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6')
+id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7')
+id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8')
+id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
+id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37')
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ componentType = KeyPurposeId()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class ReasonFlags(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6)
+ )
+
+
+class SkipCerts(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
+
+
+id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36')
+
+
+class PolicyConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19')
+
+
+class BasicConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean(False)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+ )
+
+
+id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9')
+
+
+class EDIPartyName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('partyName',
+ DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+
+id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27')
+
+
+
+class BaseDistance(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX)
+
+
+id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31')
+
+
+id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28')
+
+
+
+
+id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30')
+
+
+class DisplayText(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+ )
+
+
+class NoticeReference(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+ )
+
+
+class UserNotice(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+ )
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice)
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType('qualifier', univ.Any())
+ )
+
+
+id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32')
+
+
+class PolicyInformation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class CertificatePolicies(univ.SequenceOf):
+ componentType = PolicyInformation()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33')
+
+
+class PolicyMapping(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+ )
+
+
+class PolicyMappings(univ.SequenceOf):
+ componentType = PolicyMapping()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16')
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15')
+
+
+class KeyUsage(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+ )
+
+
+id_ce = univ.ObjectIdentifier('2.5.29')
+
+id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35')
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14')
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29')
+
+
+id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17')
+
+
+id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18')
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+certificateAttributesMap = {}
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('value', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ componentType = Attribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ componentType = AttributeTypeAndValue()
+
+
+class RDNSequence(univ.SequenceOf):
+ componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('', RDNSequence())
+ )
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+class AnotherName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType('value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class GeneralName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+ )
+
+
+class GeneralNames(univ.SequenceOf):
+ componentType = GeneralName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class AccessDescription(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+ )
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ componentType = AccessDescription()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class DistributionPointName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class DistributionPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class CRLDistPointsSyntax(univ.SequenceOf):
+ componentType = DistributionPoint()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class GeneralSubtree(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance(0).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ componentType = GeneralSubtree()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+certificateExtensionsMap = {}
+
+
+class Extension(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
+ namedtype.NamedType('extnValue', univ.OctetString(),
+ openType=opentype.OpenType('extnID', certificateExtensionsMap))
+ )
+
+
+class Extensions(univ.SequenceOf):
+ componentType = Extension()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+ )
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Time(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+ )
+
+
+class Validity(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('v1', 0), ('v2', 1), ('v3', 2)
+ )
+
+
+class TBSCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class Certificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+ )
+
+# CRL structures
+
+class RevokedCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ )
+
+
+class TBSCertList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())),
+ namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class CertificateList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+# map of AttributeType -> AttributeValue
+
+_certificateAttributesMapUpdate = {
+ id_at_name: X520name(),
+ id_at_surname: X520name(),
+ id_at_givenName: X520name(),
+ id_at_initials: X520name(),
+ id_at_generationQualifier: X520name(),
+ id_at_commonName: X520CommonName(),
+ id_at_localityName: X520LocalityName(),
+ id_at_stateOrProvinceName: X520StateOrProvinceName(),
+ id_at_organizationName: X520OrganizationName(),
+ id_at_organizationalUnitName: X520OrganizationalUnitName(),
+ id_at_title: X520Title(),
+ id_at_dnQualifier: X520dnQualifier(),
+ id_at_countryName: X520countryName(),
+ emailAddress: Pkcs9email(),
+}
+
+certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# map of Certificate Extension OIDs to Extensions
+
+_certificateExtensionsMapUpdate = {
+ id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
+ id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
+ id_ce_keyUsage: KeyUsage(),
+ id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
+ id_ce_certificatePolicies: CertificatePolicies(),
+ id_ce_policyMappings: PolicyMappings(),
+ id_ce_subjectAltName: SubjectAltName(),
+ id_ce_issuerAltName: IssuerAltName(),
+ id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
+ id_ce_basicConstraints: BasicConstraints(),
+ id_ce_nameConstraints: NameConstraints(),
+ id_ce_policyConstraints: PolicyConstraints(),
+ id_ce_extKeyUsage: ExtKeyUsageSyntax(),
+ id_ce_cRLDistributionPoints: CRLDistPointsSyntax(),
+ id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
+ id_ce_cRLNumber: univ.Integer(),
+ id_ce_deltaCRLIndicator: BaseCRLNumber(),
+ id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
+ id_ce_cRLReasons: CRLReason(),
+ id_ce_holdInstructionCode: univ.ObjectIdentifier(),
+ id_ce_invalidityDate: useful.GeneralizedTime(),
+ id_ce_certificateIssuer: GeneralNames(),
+}
+
+certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2511.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2511.py
new file mode 100644
index 0000000000..8935cdabe3
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2511.py
@@ -0,0 +1,258 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 certificate Request Message Format (CRMF) syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc2511
+#
+# Sample captures could be obtained with OpenSSL
+#
+from pyasn1_modules import rfc2315
+from pyasn1_modules.rfc2459 import *
+
+MAX = float('inf')
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+id_pkip = univ.ObjectIdentifier('1.3.6.1.5.5.7.5')
+id_regCtrl = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1')
+id_regCtrl_regToken = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.1')
+id_regCtrl_authenticator = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.2')
+id_regCtrl_pkiPublicationInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.3')
+id_regCtrl_pkiArchiveOptions = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.4')
+id_regCtrl_oldCertID = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.5')
+id_regCtrl_protocolEncrKey = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.6')
+id_regInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2')
+id_regInfo_utf8Pairs = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.1')
+id_regInfo_certReq = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.2')
+
+
+# This should be in PKIX Certificate Extensions module
+
+class GeneralName(univ.OctetString):
+ pass
+
+
+# end of PKIX Certificate Extensions module
+
+class UTF8Pairs(char.UTF8String):
+ pass
+
+
+class ProtocolEncrKey(SubjectPublicKeyInfo):
+ pass
+
+
+class CertId(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+ )
+
+
+class OldCertId(CertId):
+ pass
+
+
+class KeyGenParameters(univ.OctetString):
+ pass
+
+
+class EncryptedValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('intendedAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('symmAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('keyAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('encValue', univ.BitString())
+ )
+
+
+class EncryptedKey(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedValue', EncryptedValue()),
+ namedtype.NamedType('envelopedData', rfc2315.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class PKIArchiveOptions(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedPrivKey', EncryptedKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyGenParameters', KeyGenParameters().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('archiveRemGenPrivKey',
+ univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class SinglePubInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubMethod', univ.Integer(
+ namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
+ namedtype.OptionalNamedType('pubLocation', GeneralName())
+ )
+
+
+class PKIPublicationInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('action',
+ univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
+ namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class Authenticator(char.UTF8String):
+ pass
+
+
+class RegToken(char.UTF8String):
+ pass
+
+
+class SubsequentMessage(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('encrCert', 0),
+ ('challengeResp', 1)
+ )
+
+
+class POPOPrivKey(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('thisMessage',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subsequentMessage', SubsequentMessage().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dhMAC',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class PBMParameter(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('owf', AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', AlgorithmIdentifier())
+ )
+
+
+class PKMACValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algId', AlgorithmIdentifier()),
+ namedtype.NamedType('value', univ.BitString())
+ )
+
+
+class POPOSigningKeyInput(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'authInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'sender', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType('publicKeyMAC', PKMACValue())
+ )
+ )
+ ),
+ namedtype.NamedType('publicKey', SubjectPublicKeyInfo())
+ )
+
+
+class POPOSigningKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('algorithmIdentifier', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+
+class ProofOfPossession(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('raVerified',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signature', POPOSigningKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('keyEncipherment', POPOPrivKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('keyAgreement', POPOPrivKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class Controls(univ.SequenceOf):
+ componentType = AttributeTypeAndValue()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class OptionalValidity(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore',
+ Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter',
+ Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class CertTemplate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('signingAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('issuer', Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('subject', Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('publicKey', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.OptionalNamedType('issuerUID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('subjectUID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class CertRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('certTemplate', CertTemplate()),
+ namedtype.OptionalNamedType('controls', Controls())
+ )
+
+
+class CertReq(CertRequest):
+ pass
+
+
+class CertReqMsg(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReq', CertRequest()),
+ namedtype.OptionalNamedType('pop', ProofOfPossession()),
+ namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class CertReqMessages(univ.SequenceOf):
+ componentType = CertReqMsg()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2560.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2560.py
new file mode 100644
index 0000000000..017ac0b66e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2560.py
@@ -0,0 +1,225 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# OCSP request/response syntax
+#
+# Derived from a minimal OCSP library (RFC2560) code written by
+# Bud P. Bruegger <bud@ancitel.it>
+# Copyright: Ancitel, S.p.a, Rome, Italy
+# License: BSD
+#
+
+#
+# current limitations:
+# * request and response works only for a single certificate
+# * only some values are parsed out of the response
+# * the request does't set a nonce nor signature
+# * there is no signature validation of the response
+# * dates are left as strings in GeneralizedTime format -- datetime.datetime
+# would be nicer
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc2459
+
+
+# Start of OCSP module definitions
+
+# This should be in directory Authentication Framework (X.509) module
+
+class CRLReason(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+ )
+
+
+# end of directory Authentication Framework (X.509) module
+
+# This should be in PKIX Certificate Extensions module
+
+class GeneralName(univ.OctetString):
+ pass
+
+
+# end of PKIX Certificate Extensions module
+
+id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
+id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
+id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
+id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
+id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
+id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
+id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
+id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
+id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
+
+
+class AcceptableResponses(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class ArchiveCutoff(useful.GeneralizedTime):
+ pass
+
+
+class UnknownInfo(univ.Null):
+ pass
+
+
+class RevokedInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class CertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('issuerNameHash', univ.OctetString()),
+ namedtype.NamedType('issuerKeyHash', univ.OctetString()),
+ namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
+ )
+
+
+class CertStatus(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('good',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('revoked',
+ RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('unknown',
+ UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class SingleResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certID', CertID()),
+ namedtype.NamedType('certStatus', CertStatus()),
+ namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class KeyHash(univ.OctetString):
+ pass
+
+
+class ResponderID(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('byName',
+ rfc2459.Name().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('byKey',
+ KeyHash().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0))
+
+
+class ResponseData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('responderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('responses', univ.SequenceOf(componentType=SingleResponse())),
+ namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BasicOCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsResponseData', ResponseData()),
+ namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class ResponseBytes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('responseType', univ.ObjectIdentifier()),
+ namedtype.NamedType('response', univ.OctetString())
+ )
+
+
+class OCSPResponseStatus(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('successful', 0),
+ ('malformedRequest', 1),
+ ('internalError', 2),
+ ('tryLater', 3),
+ ('undefinedStatus', 4), # should never occur
+ ('sigRequired', 5),
+ ('unauthorized', 6)
+ )
+
+
+class OCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('responseStatus', OCSPResponseStatus()),
+ namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Request(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('reqCert', CertID()),
+ namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Signature(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class TBSRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('requestList', univ.SequenceOf(componentType=Request())),
+ namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class OCSPRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsRequest', TBSRequest()),
+ namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2631.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2631.py
new file mode 100644
index 0000000000..44e537101c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2631.py
@@ -0,0 +1,37 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Key Agreement
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2631.txt
+# https://www.rfc-editor.org/errata/eid5897
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class KeySpecificInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.NamedType('counter', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(4, 4)))
+ )
+
+
+class OtherInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyInfo', KeySpecificInfo()),
+ namedtype.OptionalNamedType('partyAInfo', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('suppPubInfo', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2634.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2634.py
new file mode 100644
index 0000000000..2099a4b206
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2634.py
@@ -0,0 +1,336 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Enhanced Security Services for S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2634.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedval
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+ContentType = rfc5652.ContentType
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+PolicyInformation = rfc5280.PolicyInformation
+
+GeneralNames = rfc5280.GeneralNames
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+
+# Signing Certificate Attribute
+# Warning: It is better to use SigningCertificateV2 from RFC 5035
+
+id_aa_signingCertificate = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.12')
+
+class Hash(univ.OctetString):
+ pass # SHA-1 hash of entire certificate; RFC 5035 supports other hash algorithms
+
+
+class IssuerSerial(univ.Sequence):
+ pass
+
+IssuerSerial.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+)
+
+
+class ESSCertID(univ.Sequence):
+ pass
+
+ESSCertID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', Hash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+)
+
+
+class SigningCertificate(univ.Sequence):
+ pass
+
+SigningCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs', univ.SequenceOf(
+ componentType=ESSCertID())),
+ namedtype.OptionalNamedType('policies', univ.SequenceOf(
+ componentType=PolicyInformation()))
+)
+
+
+# Mail List Expansion History Attribute
+
+id_aa_mlExpandHistory = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.3')
+
+ub_ml_expansion_history = univ.Integer(64)
+
+
+class EntityIdentifier(univ.Choice):
+ pass
+
+EntityIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier())
+)
+
+
+class MLReceiptPolicy(univ.Choice):
+ pass
+
+MLReceiptPolicy.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('none', univ.Null().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('insteadOf', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('inAdditionTo', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class MLData(univ.Sequence):
+ pass
+
+MLData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mailListIdentifier', EntityIdentifier()),
+ namedtype.NamedType('expansionTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('mlReceiptPolicy', MLReceiptPolicy())
+)
+
+class MLExpansionHistory(univ.SequenceOf):
+ pass
+
+MLExpansionHistory.componentType = MLData()
+MLExpansionHistory.sizeSpec = constraint.ValueSizeConstraint(1, ub_ml_expansion_history)
+
+
+# ESS Security Label Attribute
+
+id_aa_securityLabel = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.2')
+
+ub_privacy_mark_length = univ.Integer(128)
+
+ub_security_categories = univ.Integer(64)
+
+ub_integer_options = univ.Integer(256)
+
+
+class ESSPrivacyMark(univ.Choice):
+ pass
+
+ESSPrivacyMark.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_privacy_mark_length))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class SecurityClassification(univ.Integer):
+ pass
+
+SecurityClassification.subtypeSpec=constraint.ValueRangeConstraint(0, ub_integer_options)
+
+SecurityClassification.namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('top-secret', 5)
+)
+
+
+class SecurityPolicyIdentifier(univ.ObjectIdentifier):
+ pass
+
+
+class SecurityCategory(univ.Sequence):
+ pass
+
+SecurityCategory.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SecurityCategories(univ.SetOf):
+ pass
+
+SecurityCategories.componentType = SecurityCategory()
+SecurityCategories.sizeSpec = constraint.ValueSizeConstraint(1, ub_security_categories)
+
+
+class ESSSecurityLabel(univ.Set):
+ pass
+
+ESSSecurityLabel.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('security-policy-identifier', SecurityPolicyIdentifier()),
+ namedtype.OptionalNamedType('security-classification', SecurityClassification()),
+ namedtype.OptionalNamedType('privacy-mark', ESSPrivacyMark()),
+ namedtype.OptionalNamedType('security-categories', SecurityCategories())
+)
+
+
+# Equivalent Labels Attribute
+
+id_aa_equivalentLabels = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.9')
+
+class EquivalentLabels(univ.SequenceOf):
+ pass
+
+EquivalentLabels.componentType = ESSSecurityLabel()
+
+
+# Content Identifier Attribute
+
+id_aa_contentIdentifier = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.7')
+
+class ContentIdentifier(univ.OctetString):
+ pass
+
+
+# Content Reference Attribute
+
+id_aa_contentReference = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.10')
+
+class ContentReference(univ.Sequence):
+ pass
+
+ContentReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('originatorSignatureValue', univ.OctetString())
+)
+
+
+# Message Signature Digest Attribute
+
+id_aa_msgSigDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.5')
+
+class MsgSigDigest(univ.OctetString):
+ pass
+
+
+# Content Hints Attribute
+
+id_aa_contentHint = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.4')
+
+class ContentHints(univ.Sequence):
+ pass
+
+ContentHints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('contentDescription', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('contentType', ContentType())
+)
+
+
+# Receipt Request Attribute
+
+class AllOrFirstTier(univ.Integer):
+ pass
+
+AllOrFirstTier.namedValues = namedval.NamedValues(
+ ('allReceipts', 0),
+ ('firstTierRecipients', 1)
+)
+
+
+class ReceiptsFrom(univ.Choice):
+ pass
+
+ReceiptsFrom.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('allOrFirstTier', AllOrFirstTier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receiptList', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+id_aa_receiptRequest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.1')
+
+ub_receiptsTo = univ.Integer(16)
+
+class ReceiptRequest(univ.Sequence):
+ pass
+
+ReceiptRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('receiptsFrom', ReceiptsFrom()),
+ namedtype.NamedType('receiptsTo', univ.SequenceOf(componentType=GeneralNames()).subtype(sizeSpec=constraint.ValueSizeConstraint(1, ub_receiptsTo)))
+)
+
+# Receipt Content Type
+
+class ESSVersion(univ.Integer):
+ pass
+
+ESSVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_receipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.1')
+
+class Receipt(univ.Sequence):
+ pass
+
+Receipt.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', ESSVersion()),
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('originatorSignatureValue', univ.OctetString())
+)
+
+
+# Map of Attribute Type to the Attribute structure is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_signingCertificate: SigningCertificate(),
+ id_aa_mlExpandHistory: MLExpansionHistory(),
+ id_aa_securityLabel: ESSSecurityLabel(),
+ id_aa_equivalentLabels: EquivalentLabels(),
+ id_aa_contentIdentifier: ContentIdentifier(),
+ id_aa_contentReference: ContentReference(),
+ id_aa_msgSigDigest: MsgSigDigest(),
+ id_aa_contentHint: ContentHints(),
+ id_aa_receiptRequest: ReceiptRequest(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_receipt: Receipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2876.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2876.py
new file mode 100644
index 0000000000..04c402b7ea
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2876.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# KEA and SKIPJACK Algorithms in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2876.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+
+
+id_fortezzaConfidentialityAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.4')
+
+
+id_fortezzaWrap80 = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.23')
+
+
+id_kEAKeyEncryptionAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.24')
+
+
+id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22')
+
+
+class Skipjack_Parm(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('initialization-vector', univ.OctetString())
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_fortezzaConfidentialityAlgorithm: Skipjack_Parm(),
+ id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the SMIMECapabilities Attribute map in rfc5751.py
+
+_smimeCapabilityMapUpdate = {
+ id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2985.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2985.py
new file mode 100644
index 0000000000..75bccf097d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2985.py
@@ -0,0 +1,588 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#9: Selected Attribute Types (Version 2.0)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2985.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc7292
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+EmailAddress = rfc5280.EmailAddress
+
+Extensions = rfc5280.Extensions
+
+Time = rfc5280.Time
+
+X520countryName = rfc5280.X520countryName
+
+X520SerialNumber = rfc5280.X520SerialNumber
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+ContentType = rfc5652.ContentType
+
+Countersignature = rfc5652.Countersignature
+
+MessageDigest = rfc5652.MessageDigest
+
+SignerInfo = rfc5652.SignerInfo
+
+SigningTime = rfc5652.SigningTime
+
+
+# Imports from RFC 5958
+
+EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo
+
+
+# Imports from RFC 7292
+
+PFX = rfc7292.PFX
+
+
+# TODO:
+# Need a place to import PKCS15Token; it does not yet appear in an RFC
+
+
+# SingleAttribute is the same as Attribute in RFC 5280, except that the
+# attrValues SET must have one and only one member
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+
+
+class SingleAttributeValues(univ.SetOf):
+ pass
+
+SingleAttributeValues.componentType = AttributeValue()
+
+
+class SingleAttribute(univ.Sequence):
+ pass
+
+SingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('type', rfc5280.certificateAttributesMap)
+ )
+)
+
+
+# CMSAttribute is the same as Attribute in RFC 5652, and CMSSingleAttribute
+# is the companion where the attrValues SET must have one and only one member
+
+CMSAttribute = rfc5652.Attribute
+
+
+class CMSSingleAttribute(univ.Sequence):
+ pass
+
+CMSSingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# DirectoryString is the same as RFC 5280, except the length is limited to 255
+
+class DirectoryString(univ.Choice):
+ pass
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
+)
+
+
+# PKCS9String is DirectoryString with an additional choice of IA5String,
+# and the SIZE is limited to 255
+
+class PKCS9String(univ.Choice):
+ pass
+
+PKCS9String.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('directoryString', DirectoryString())
+)
+
+
+# Upper Bounds
+
+pkcs_9_ub_pkcs9String = univ.Integer(255)
+
+pkcs_9_ub_challengePassword = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_emailAddress = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_match = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_signingDescription = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_unstructuredAddress = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_unstructuredName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+
+ub_name = univ.Integer(32768)
+
+pkcs_9_ub_placeOfBirth = univ.Integer(ub_name)
+
+pkcs_9_ub_pseudonym = univ.Integer(ub_name)
+
+
+# Object Identifier Arcs
+
+ietf_at = _OID(1, 3, 6, 1, 5, 5, 7, 9)
+
+id_at = _OID(2, 5, 4)
+
+pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
+
+pkcs_9_mo = _OID(pkcs_9, 0)
+
+smime = _OID(pkcs_9, 16)
+
+certTypes = _OID(pkcs_9, 22)
+
+crlTypes = _OID(pkcs_9, 23)
+
+pkcs_9_oc = _OID(pkcs_9, 24)
+
+pkcs_9_at = _OID(pkcs_9, 25)
+
+pkcs_9_sx = _OID(pkcs_9, 26)
+
+pkcs_9_mr = _OID(pkcs_9, 27)
+
+
+# Object Identifiers for Syntaxes for use with LDAP-accessible directories
+
+pkcs_9_sx_pkcs9String = _OID(pkcs_9_sx, 1)
+
+pkcs_9_sx_signingTime = _OID(pkcs_9_sx, 2)
+
+
+# Object Identifiers for object classes
+
+pkcs_9_oc_pkcsEntity = _OID(pkcs_9_oc, 1)
+
+pkcs_9_oc_naturalPerson = _OID(pkcs_9_oc, 2)
+
+
+# Object Identifiers for matching rules
+
+pkcs_9_mr_caseIgnoreMatch = _OID(pkcs_9_mr, 1)
+
+pkcs_9_mr_signingTimeMatch = _OID(pkcs_9_mr, 2)
+
+
+# PKCS #7 PDU
+
+pkcs_9_at_pkcs7PDU = _OID(pkcs_9_at, 5)
+
+pKCS7PDU = Attribute()
+pKCS7PDU['type'] = pkcs_9_at_pkcs7PDU
+pKCS7PDU['values'][0] = ContentInfo()
+
+
+# PKCS #12 token
+
+pkcs_9_at_userPKCS12 = _OID(2, 16, 840, 1, 113730, 3, 1, 216)
+
+userPKCS12 = Attribute()
+userPKCS12['type'] = pkcs_9_at_userPKCS12
+userPKCS12['values'][0] = PFX()
+
+
+# PKCS #15 token
+
+pkcs_9_at_pkcs15Token = _OID(pkcs_9_at, 1)
+
+# TODO: Once PKCS15Token can be imported, this can be included
+#
+# pKCS15Token = Attribute()
+# userPKCS12['type'] = pkcs_9_at_pkcs15Token
+# userPKCS12['values'][0] = PKCS15Token()
+
+
+# PKCS #8 encrypted private key information
+
+pkcs_9_at_encryptedPrivateKeyInfo = _OID(pkcs_9_at, 2)
+
+encryptedPrivateKeyInfo = Attribute()
+encryptedPrivateKeyInfo['type'] = pkcs_9_at_encryptedPrivateKeyInfo
+encryptedPrivateKeyInfo['values'][0] = EncryptedPrivateKeyInfo()
+
+
+# Electronic-mail address
+
+pkcs_9_at_emailAddress = rfc5280.id_emailAddress
+
+emailAddress = Attribute()
+emailAddress['type'] = pkcs_9_at_emailAddress
+emailAddress['values'][0] = EmailAddress()
+
+
+# Unstructured name
+
+pkcs_9_at_unstructuredName = _OID(pkcs_9, 2)
+
+unstructuredName = Attribute()
+unstructuredName['type'] = pkcs_9_at_unstructuredName
+unstructuredName['values'][0] = PKCS9String()
+
+
+# Unstructured address
+
+pkcs_9_at_unstructuredAddress = _OID(pkcs_9, 8)
+
+unstructuredAddress = Attribute()
+unstructuredAddress['type'] = pkcs_9_at_unstructuredAddress
+unstructuredAddress['values'][0] = DirectoryString()
+
+
+# Date of birth
+
+pkcs_9_at_dateOfBirth = _OID(ietf_at, 1)
+
+dateOfBirth = SingleAttribute()
+dateOfBirth['type'] = pkcs_9_at_dateOfBirth
+dateOfBirth['values'][0] = useful.GeneralizedTime()
+
+
+# Place of birth
+
+pkcs_9_at_placeOfBirth = _OID(ietf_at, 2)
+
+placeOfBirth = SingleAttribute()
+placeOfBirth['type'] = pkcs_9_at_placeOfBirth
+placeOfBirth['values'][0] = DirectoryString()
+
+
+# Gender
+
+class GenderString(char.PrintableString):
+ pass
+
+GenderString.subtypeSpec = constraint.ValueSizeConstraint(1, 1)
+GenderString.subtypeSpec = constraint.SingleValueConstraint("M", "F", "m", "f")
+
+
+pkcs_9_at_gender = _OID(ietf_at, 3)
+
+gender = SingleAttribute()
+gender['type'] = pkcs_9_at_gender
+gender['values'][0] = GenderString()
+
+
+# Country of citizenship
+
+pkcs_9_at_countryOfCitizenship = _OID(ietf_at, 4)
+
+countryOfCitizenship = Attribute()
+countryOfCitizenship['type'] = pkcs_9_at_countryOfCitizenship
+countryOfCitizenship['values'][0] = X520countryName()
+
+
+# Country of residence
+
+pkcs_9_at_countryOfResidence = _OID(ietf_at, 5)
+
+countryOfResidence = Attribute()
+countryOfResidence['type'] = pkcs_9_at_countryOfResidence
+countryOfResidence['values'][0] = X520countryName()
+
+
+# Pseudonym
+
+id_at_pseudonym = _OID(2, 5, 4, 65)
+
+pseudonym = Attribute()
+pseudonym['type'] = id_at_pseudonym
+pseudonym['values'][0] = DirectoryString()
+
+
+# Serial number
+
+id_at_serialNumber = rfc5280.id_at_serialNumber
+
+serialNumber = Attribute()
+serialNumber['type'] = id_at_serialNumber
+serialNumber['values'][0] = X520SerialNumber()
+
+
+# Content type
+
+pkcs_9_at_contentType = rfc5652.id_contentType
+
+contentType = CMSSingleAttribute()
+contentType['attrType'] = pkcs_9_at_contentType
+contentType['attrValues'][0] = ContentType()
+
+
+# Message digest
+
+pkcs_9_at_messageDigest = rfc5652.id_messageDigest
+
+messageDigest = CMSSingleAttribute()
+messageDigest['attrType'] = pkcs_9_at_messageDigest
+messageDigest['attrValues'][0] = MessageDigest()
+
+
+# Signing time
+
+pkcs_9_at_signingTime = rfc5652.id_signingTime
+
+signingTime = CMSSingleAttribute()
+signingTime['attrType'] = pkcs_9_at_signingTime
+signingTime['attrValues'][0] = SigningTime()
+
+
+# Random nonce
+
+class RandomNonce(univ.OctetString):
+ pass
+
+RandomNonce.subtypeSpec = constraint.ValueSizeConstraint(4, MAX)
+
+
+pkcs_9_at_randomNonce = _OID(pkcs_9_at, 3)
+
+randomNonce = CMSSingleAttribute()
+randomNonce['attrType'] = pkcs_9_at_randomNonce
+randomNonce['attrValues'][0] = RandomNonce()
+
+
+# Sequence number
+
+class SequenceNumber(univ.Integer):
+ pass
+
+SequenceNumber.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
+
+
+pkcs_9_at_sequenceNumber = _OID(pkcs_9_at, 4)
+
+sequenceNumber = CMSSingleAttribute()
+sequenceNumber['attrType'] = pkcs_9_at_sequenceNumber
+sequenceNumber['attrValues'][0] = SequenceNumber()
+
+
+# Countersignature
+
+pkcs_9_at_counterSignature = rfc5652.id_countersignature
+
+counterSignature = CMSAttribute()
+counterSignature['attrType'] = pkcs_9_at_counterSignature
+counterSignature['attrValues'][0] = Countersignature()
+
+
+# Challenge password
+
+pkcs_9_at_challengePassword = _OID(pkcs_9, 7)
+
+challengePassword = SingleAttribute()
+challengePassword['type'] = pkcs_9_at_challengePassword
+challengePassword['values'][0] = DirectoryString()
+
+
+# Extension request
+
+class ExtensionRequest(Extensions):
+ pass
+
+
+pkcs_9_at_extensionRequest = _OID(pkcs_9, 14)
+
+extensionRequest = SingleAttribute()
+extensionRequest['type'] = pkcs_9_at_extensionRequest
+extensionRequest['values'][0] = ExtensionRequest()
+
+
+# Extended-certificate attributes (deprecated)
+
+class AttributeSet(univ.SetOf):
+ pass
+
+AttributeSet.componentType = Attribute()
+
+
+pkcs_9_at_extendedCertificateAttributes = _OID(pkcs_9, 9)
+
+extendedCertificateAttributes = SingleAttribute()
+extendedCertificateAttributes['type'] = pkcs_9_at_extendedCertificateAttributes
+extendedCertificateAttributes['values'][0] = AttributeSet()
+
+
+# Friendly name
+
+class FriendlyName(char.BMPString):
+ pass
+
+FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName)
+
+
+pkcs_9_at_friendlyName = _OID(pkcs_9, 20)
+
+friendlyName = SingleAttribute()
+friendlyName['type'] = pkcs_9_at_friendlyName
+friendlyName['values'][0] = FriendlyName()
+
+
+# Local key identifier
+
+pkcs_9_at_localKeyId = _OID(pkcs_9, 21)
+
+localKeyId = SingleAttribute()
+localKeyId['type'] = pkcs_9_at_localKeyId
+localKeyId['values'][0] = univ.OctetString()
+
+
+# Signing description
+
+pkcs_9_at_signingDescription = _OID(pkcs_9, 13)
+
+signingDescription = CMSSingleAttribute()
+signingDescription['attrType'] = pkcs_9_at_signingDescription
+signingDescription['attrValues'][0] = DirectoryString()
+
+
+# S/MIME capabilities
+
+class SMIMECapability(AlgorithmIdentifier):
+ pass
+
+
+class SMIMECapabilities(univ.SequenceOf):
+ pass
+
+SMIMECapabilities.componentType = SMIMECapability()
+
+
+pkcs_9_at_smimeCapabilities = _OID(pkcs_9, 15)
+
+smimeCapabilities = CMSSingleAttribute()
+smimeCapabilities['attrType'] = pkcs_9_at_smimeCapabilities
+smimeCapabilities['attrValues'][0] = SMIMECapabilities()
+
+
+# Certificate Attribute Map
+
+_certificateAttributesMapUpdate = {
+ # Attribute types for use with the "pkcsEntity" object class
+ pkcs_9_at_pkcs7PDU: ContentInfo(),
+ pkcs_9_at_userPKCS12: PFX(),
+ # TODO: Once PKCS15Token can be imported, this can be included
+ # pkcs_9_at_pkcs15Token: PKCS15Token(),
+ pkcs_9_at_encryptedPrivateKeyInfo: EncryptedPrivateKeyInfo(),
+ # Attribute types for use with the "naturalPerson" object class
+ pkcs_9_at_emailAddress: EmailAddress(),
+ pkcs_9_at_unstructuredName: PKCS9String(),
+ pkcs_9_at_unstructuredAddress: DirectoryString(),
+ pkcs_9_at_dateOfBirth: useful.GeneralizedTime(),
+ pkcs_9_at_placeOfBirth: DirectoryString(),
+ pkcs_9_at_gender: GenderString(),
+ pkcs_9_at_countryOfCitizenship: X520countryName(),
+ pkcs_9_at_countryOfResidence: X520countryName(),
+ id_at_pseudonym: DirectoryString(),
+ id_at_serialNumber: X520SerialNumber(),
+ # Attribute types for use with PKCS #10 certificate requests
+ pkcs_9_at_challengePassword: DirectoryString(),
+ pkcs_9_at_extensionRequest: ExtensionRequest(),
+ pkcs_9_at_extendedCertificateAttributes: AttributeSet(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# CMS Attribute Map
+
+# Note: pkcs_9_at_smimeCapabilities is not included in the map because
+# the definition in RFC 5751 is preferred, which produces the same
+# encoding, but it allows different parameters for SMIMECapability
+# and AlgorithmIdentifier.
+
+_cmsAttributesMapUpdate = {
+ # Attribute types for use in PKCS #7 data (a.k.a. CMS)
+ pkcs_9_at_contentType: ContentType(),
+ pkcs_9_at_messageDigest: MessageDigest(),
+ pkcs_9_at_signingTime: SigningTime(),
+ pkcs_9_at_randomNonce: RandomNonce(),
+ pkcs_9_at_sequenceNumber: SequenceNumber(),
+ pkcs_9_at_counterSignature: Countersignature(),
+ # Attributes for use in PKCS #12 "PFX" PDUs or PKCS #15 tokens
+ pkcs_9_at_friendlyName: FriendlyName(),
+ pkcs_9_at_localKeyId: univ.OctetString(),
+ pkcs_9_at_signingDescription: DirectoryString(),
+ # pkcs_9_at_smimeCapabilities: SMIMECapabilities(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2986.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2986.py
new file mode 100644
index 0000000000..309637d1fe
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc2986.py
@@ -0,0 +1,75 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Joel Johnson with asn1ate tool.
+# Modified by Russ Housley to add support for opentypes by importing
+# definitions from rfc5280 so that the same maps are used.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #10: Certification Request Syntax Specification
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2986.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+AttributeType = rfc5280.AttributeType
+
+AttributeValue = rfc5280.AttributeValue
+
+AttributeTypeAndValue = rfc5280.AttributeTypeAndValue
+
+Attribute = rfc5280.Attribute
+
+RelativeDistinguishedName = rfc5280.RelativeDistinguishedName
+
+RDNSequence = rfc5280.RDNSequence
+
+Name = rfc5280.Name
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+
+class Attributes(univ.SetOf):
+ pass
+
+
+Attributes.componentType = Attribute()
+
+
+class CertificationRequestInfo(univ.Sequence):
+ pass
+
+
+CertificationRequestInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPKInfo', SubjectPublicKeyInfo()),
+ namedtype.NamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))
+ )
+)
+
+
+class CertificationRequest(univ.Sequence):
+ pass
+
+
+CertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3058.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3058.py
new file mode 100644
index 0000000000..725de82ae7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3058.py
@@ -0,0 +1,42 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# IDEA Encryption Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3058.txt
+# https://www.rfc-editor.org/errata/eid5913
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_IDEA_CBC = univ.ObjectIdentifier('1.3.6.1.4.1.188.7.1.1.2')
+
+
+id_alg_CMSIDEAwrap = univ.ObjectIdentifier('1.3.6.1.4.1.188.7.1.1.6')
+
+
+class IDEA_CBCPar(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('iv', univ.OctetString())
+ # exactly 8 octets, when present
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_IDEA_CBC: IDEA_CBCPar(),
+ id_alg_CMSIDEAwrap: univ.Null("")
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3114.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3114.py
new file mode 100644
index 0000000000..badcb1f214
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3114.py
@@ -0,0 +1,77 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# TEST Company Classification Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3114.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5755
+
+
+id_smime = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, ))
+
+id_tsp = id_smime + (7, )
+
+id_tsp_TEST_Amoco = id_tsp + (1, )
+
+class Amoco_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('amoco-general', 6),
+ ('amoco-confidential', 7),
+ ('amoco-highly-confidential', 8)
+ )
+
+
+id_tsp_TEST_Caterpillar = id_tsp + (2, )
+
+class Caterpillar_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('caterpillar-public', 6),
+ ('caterpillar-green', 7),
+ ('caterpillar-yellow', 8),
+ ('caterpillar-red', 9)
+ )
+
+
+id_tsp_TEST_Whirlpool = id_tsp + (3, )
+
+class Whirlpool_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('whirlpool-public', 6),
+ ('whirlpool-internal', 7),
+ ('whirlpool-confidential', 8)
+ )
+
+
+id_tsp_TEST_Whirlpool_Categories = id_tsp + (4, )
+
+class SecurityCategoryValues(univ.SequenceOf):
+ componentType = char.UTF8String()
+
+# Example SecurityCategoryValues: "LAW DEPARTMENT USE ONLY"
+# Example SecurityCategoryValues: "HUMAN RESOURCES USE ONLY"
+
+
+# Also, the privacy mark in the security label can contain a string,
+# such as: "ATTORNEY-CLIENT PRIVILEGED INFORMATION"
+
+
+# Map of security category type OIDs to security category added
+# to the ones that are in rfc5755.py
+
+_securityCategoryMapUpdate = {
+ id_tsp_TEST_Whirlpool_Categories: SecurityCategoryValues(),
+}
+
+rfc5755.securityCategoryMap.update(_securityCategoryMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3125.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3125.py
new file mode 100644
index 0000000000..00ff9bff48
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3125.py
@@ -0,0 +1,469 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Electronic Signature Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3125.txt
+# https://www.rfc-editor.org/errata/eid5901
+# https://www.rfc-editor.org/errata/eid5902
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import useful
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+AttributeType = rfc5280.AttributeType
+
+AttributeTypeAndValue = rfc5280.AttributeTypeAndValue
+
+AttributeValue = rfc5280.AttributeValue
+
+Certificate = rfc5280.Certificate
+
+CertificateList = rfc5280.CertificateList
+
+DirectoryString = rfc5280.DirectoryString
+
+GeneralName = rfc5280.GeneralName
+
+GeneralNames = rfc5280.GeneralNames
+
+Name = rfc5280.Name
+
+PolicyInformation = rfc5280.PolicyInformation
+
+
+# Electronic Signature Policies
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class AcceptablePolicySet(univ.SequenceOf):
+ componentType = CertPolicyId()
+
+
+class SignPolExtn(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.NamedType('extnValue', univ.OctetString())
+ )
+
+
+class SignPolExtensions(univ.SequenceOf):
+ componentType = SignPolExtn()
+
+
+class AlgAndLength(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algID', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('minKeyLength', univ.Integer()),
+ namedtype.OptionalNamedType('other', SignPolExtensions())
+ )
+
+
+class AlgorithmConstraints(univ.SequenceOf):
+ componentType = AlgAndLength()
+
+
+class AlgorithmConstraintSet(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('signerAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('eeCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('caCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('aaCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('tsaCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class AttributeValueConstraints(univ.SequenceOf):
+ componentType = AttributeTypeAndValue()
+
+
+class AttributeTypeConstraints(univ.SequenceOf):
+ componentType = AttributeType()
+
+
+class AttributeConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('attributeTypeConstarints',
+ AttributeTypeConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('attributeValueConstarints',
+ AttributeValueConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class HowCertAttribute(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('claimedAttribute', 0),
+ ('certifiedAttribtes', 1),
+ ('either', 2)
+ )
+
+
+class SkipCerts(univ.Integer):
+ subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class PolicyConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BaseDistance(univ.Integer):
+ subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum',
+ BaseDistance().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(
+ value=0)),
+ namedtype.OptionalNamedType('maximum',
+ BaseDistance().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ componentType = GeneralSubtree()
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees',
+ GeneralSubtrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees',
+ GeneralSubtrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class PathLenConstraint(univ.Integer):
+ subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class CertificateTrustPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('trustpoint', Certificate()),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ PathLenConstraint().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('acceptablePolicySet',
+ AcceptablePolicySet().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('nameConstraints',
+ NameConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('policyConstraints',
+ PolicyConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class CertificateTrustTrees(univ.SequenceOf):
+ componentType = CertificateTrustPoint()
+
+
+class EnuRevReq(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('clrCheck', 0),
+ ('ocspCheck', 1),
+ ('bothCheck', 2),
+ ('eitherCheck', 3),
+ ('noCheck', 4),
+ ('other', 5)
+ )
+
+
+class RevReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enuRevReq', EnuRevReq()),
+ namedtype.OptionalNamedType('exRevReq', SignPolExtensions())
+ )
+
+
+class CertRevReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('endCertRevReq', RevReq()),
+ namedtype.NamedType('caCerts',
+ RevReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class AttributeTrustCondition(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeMandated', univ.Boolean()),
+ namedtype.NamedType('howCertAttribute', HowCertAttribute()),
+ namedtype.OptionalNamedType('attrCertificateTrustTrees',
+ CertificateTrustTrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('attrRevReq',
+ CertRevReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('attributeConstraints',
+ AttributeConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class CMSAttrs(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class CertInfoReq(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('none', 0),
+ ('signerOnly', 1),
+ ('fullPath', 2)
+ )
+
+
+class CertRefReq(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('signerOnly', 1),
+ ('fullPath', 2)
+ )
+
+
+class DeltaTime(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('deltaSeconds', univ.Integer()),
+ namedtype.NamedType('deltaMinutes', univ.Integer()),
+ namedtype.NamedType('deltaHours', univ.Integer()),
+ namedtype.NamedType('deltaDays', univ.Integer())
+ )
+
+
+class TimestampTrustCondition(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('ttsCertificateTrustTrees',
+ CertificateTrustTrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('ttsRevReq',
+ CertRevReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('ttsNameConstraints',
+ NameConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('cautionPeriod',
+ DeltaTime().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('signatureTimestampDelay',
+ DeltaTime().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 4)))
+ )
+
+
+class SignerRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('externalSignedData', univ.Boolean()),
+ namedtype.NamedType('mandatedSignedAttr', CMSAttrs()),
+ namedtype.NamedType('mandatedUnsignedAttr', CMSAttrs()),
+ namedtype.DefaultedNamedType('mandatedCertificateRef',
+ CertRefReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(
+ value='signerOnly')),
+ namedtype.DefaultedNamedType('mandatedCertificateInfo',
+ CertInfoReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(
+ value='none')),
+ namedtype.OptionalNamedType('signPolExtensions',
+ SignPolExtensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class MandatedUnsignedAttr(CMSAttrs):
+ pass
+
+
+class VerifierRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mandatedUnsignedAttr', MandatedUnsignedAttr()),
+ namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions())
+ )
+
+
+class SignerAndVerifierRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signerRules', SignerRules()),
+ namedtype.NamedType('verifierRules', VerifierRules())
+ )
+
+
+class SigningCertTrustCondition(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signerTrustTrees', CertificateTrustTrees()),
+ namedtype.NamedType('signerRevReq', CertRevReq())
+ )
+
+
+class CommitmentTypeIdentifier(univ.ObjectIdentifier):
+ pass
+
+
+class FieldOfApplication(DirectoryString):
+ pass
+
+
+class CommitmentType(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('identifier', CommitmentTypeIdentifier()),
+ namedtype.OptionalNamedType('fieldOfApplication',
+ FieldOfApplication().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('semantics',
+ DirectoryString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class SelectedCommitmentTypes(univ.SequenceOf):
+ componentType = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('empty', univ.Null()),
+ namedtype.NamedType('recognizedCommitmentType', CommitmentType())
+ ))
+
+
+class CommitmentRule(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('selCommitmentTypes', SelectedCommitmentTypes()),
+ namedtype.OptionalNamedType('signerAndVeriferRules',
+ SignerAndVerifierRules().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('signingCertTrustCondition',
+ SigningCertTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('timeStampTrustCondition',
+ TimestampTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('attributeTrustCondition',
+ AttributeTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('algorithmConstraintSet',
+ AlgorithmConstraintSet().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('signPolExtensions',
+ SignPolExtensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 5)))
+ )
+
+
+class CommitmentRules(univ.SequenceOf):
+ componentType = CommitmentRule()
+
+
+class CommonRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('signerAndVeriferRules',
+ SignerAndVerifierRules().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('signingCertTrustCondition',
+ SigningCertTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('timeStampTrustCondition',
+ TimestampTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('attributeTrustCondition',
+ AttributeTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('algorithmConstraintSet',
+ AlgorithmConstraintSet().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('signPolExtensions',
+ SignPolExtensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 5)))
+ )
+
+
+class PolicyIssuerName(GeneralNames):
+ pass
+
+
+class SignPolicyHash(univ.OctetString):
+ pass
+
+
+class SignPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class SigningPeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime())
+ )
+
+
+class SignatureValidationPolicy(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signingPeriod', SigningPeriod()),
+ namedtype.NamedType('commonRules', CommonRules()),
+ namedtype.NamedType('commitmentRules', CommitmentRules()),
+ namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions())
+ )
+
+
+class SignPolicyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signPolicyIdentifier', SignPolicyId()),
+ namedtype.NamedType('dateOfIssue', useful.GeneralizedTime()),
+ namedtype.NamedType('policyIssuerName', PolicyIssuerName()),
+ namedtype.NamedType('fieldOfApplication', FieldOfApplication()),
+ namedtype.NamedType('signatureValidationPolicy', SignatureValidationPolicy()),
+ namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions())
+ )
+
+
+class SignaturePolicy(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signPolicyHashAlg', AlgorithmIdentifier()),
+ namedtype.NamedType('signPolicyInfo', SignPolicyInfo()),
+ namedtype.OptionalNamedType('signPolicyHash', SignPolicyHash())
+ )
+
+
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3161.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3161.py
new file mode 100644
index 0000000000..0e1dcedb39
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3161.py
@@ -0,0 +1,142 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Time-Stamp Protocol (TSP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3161.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc4210
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+Extensions = rfc5280.Extensions
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+GeneralName = rfc5280.GeneralName
+
+ContentInfo = rfc5652.ContentInfo
+
+PKIFreeText = rfc4210.PKIFreeText
+
+
+id_ct_TSTInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.4')
+
+
+class Accuracy(univ.Sequence):
+ pass
+
+Accuracy.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('seconds', univ.Integer()),
+ namedtype.OptionalNamedType('millis', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('micros', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class MessageImprint(univ.Sequence):
+ pass
+
+MessageImprint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('hashedMessage', univ.OctetString())
+)
+
+
+class PKIFailureInfo(univ.BitString):
+ pass
+
+PKIFailureInfo.namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badRequest', 2),
+ ('badDataFormat', 5),
+ ('timeNotAvailable', 14),
+ ('unacceptedPolicy', 15),
+ ('unacceptedExtension', 16),
+ ('addInfoNotAvailable', 17),
+ ('systemFailure', 25)
+)
+
+
+class PKIStatus(univ.Integer):
+ pass
+
+PKIStatus.namedValues = namedval.NamedValues(
+ ('granted', 0),
+ ('grantedWithMods', 1),
+ ('rejection', 2),
+ ('waiting', 3),
+ ('revocationWarning', 4),
+ ('revocationNotification', 5)
+)
+
+
+class PKIStatusInfo(univ.Sequence):
+ pass
+
+PKIStatusInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.OptionalNamedType('statusString', PKIFreeText()),
+ namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
+)
+
+
+class TSAPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class TSTInfo(univ.Sequence):
+ pass
+
+TSTInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))),
+ namedtype.NamedType('policy', TSAPolicyId()),
+ namedtype.NamedType('messageImprint', MessageImprint()),
+ namedtype.NamedType('serialNumber', univ.Integer()),
+ namedtype.NamedType('genTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('accuracy', Accuracy()),
+ namedtype.DefaultedNamedType('ordering', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('nonce', univ.Integer()),
+ namedtype.OptionalNamedType('tsa', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class TimeStampReq(univ.Sequence):
+ pass
+
+TimeStampReq.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))),
+ namedtype.NamedType('messageImprint', MessageImprint()),
+ namedtype.OptionalNamedType('reqPolicy', TSAPolicyId()),
+ namedtype.OptionalNamedType('nonce', univ.Integer()),
+ namedtype.DefaultedNamedType('certReq', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class TimeStampToken(ContentInfo):
+ pass
+
+
+class TimeStampResp(univ.Sequence):
+ pass
+
+TimeStampResp.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType('timeStampToken', TimeStampToken())
+)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3274.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3274.py
new file mode 100644
index 0000000000..425e006f3d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3274.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Compressed Data Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3274.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+class CompressionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+# The CMS Compressed Data Content Type
+
+id_ct_compressedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.9')
+
+class CompressedData(univ.Sequence):
+ pass
+
+CompressedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', rfc5652.CMSVersion()), # Always set to 0
+ namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', rfc5652.EncapsulatedContentInfo())
+)
+
+
+# Algorithm identifier for the zLib Compression Algorithm
+# This includes cpa_zlibCompress as defined in RFC 6268,
+# from https://www.rfc-editor.org/rfc/rfc6268.txt
+
+id_alg_zlibCompress = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.8')
+
+cpa_zlibCompress = rfc5280.AlgorithmIdentifier()
+cpa_zlibCompress['algorithm'] = id_alg_zlibCompress
+# cpa_zlibCompress['parameters'] are absent
+
+
+# Map of Content Type OIDs to Content Types is added to thr
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_compressedData: CompressedData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3279.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3279.py
new file mode 100644
index 0000000000..f6e24deafc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3279.py
@@ -0,0 +1,260 @@
+#
+# This file is part of pyasn1-modules.
+#
+# Copyright (c) 2017, Danielle Madeley <danielle@madeley.id.au>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Algorithms and Identifiers for Internet X.509 Certificates and CRLs
+#
+# Derived from RFC 3279:
+# https://www.rfc-editor.org/rfc/rfc3279.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+md2 = _OID(1, 2, 840, 113549, 2, 2)
+md5 = _OID(1, 2, 840, 113549, 2, 5)
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+id_dsa = _OID(1, 2, 840, 10040, 4, 1)
+
+
+class DSAPublicKey(univ.Integer):
+ pass
+
+
+class Dss_Parms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer())
+ )
+
+
+id_dsa_with_sha1 = _OID(1, 2, 840, 10040, 4, 3)
+
+
+class Dss_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
+rsaEncryption = _OID(pkcs_1, 1)
+md2WithRSAEncryption = _OID(pkcs_1, 2)
+md5WithRSAEncryption = _OID(pkcs_1, 4)
+sha1WithRSAEncryption = _OID(pkcs_1, 5)
+
+
+class RSAPublicKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+ )
+
+
+dhpublicnumber = _OID(1, 2, 840, 10046, 2, 1)
+
+
+class DHPublicKey(univ.Integer):
+ pass
+
+
+class ValidationParms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seed', univ.BitString()),
+ namedtype.NamedType('pgenCounter', univ.Integer())
+ )
+
+
+class DomainParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.OptionalNamedType('j', univ.Integer()),
+ namedtype.OptionalNamedType('validationParms', ValidationParms())
+ )
+
+
+id_keyExchangeAlgorithm = _OID(2, 16, 840, 1, 101, 2, 1, 1, 22)
+
+
+class KEA_Parms_Id(univ.OctetString):
+ pass
+
+
+ansi_X9_62 = _OID(1, 2, 840, 10045)
+
+
+class FieldID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fieldType', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Any())
+ )
+
+
+id_ecSigType = _OID(ansi_X9_62, 4)
+ecdsa_with_SHA1 = _OID(id_ecSigType, 1)
+
+
+class ECDSA_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+id_fieldType = _OID(ansi_X9_62, 1)
+prime_field = _OID(id_fieldType, 1)
+
+
+class Prime_p(univ.Integer):
+ pass
+
+
+characteristic_two_field = _OID(id_fieldType, 2)
+
+
+class Characteristic_two(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('m', univ.Integer()),
+ namedtype.NamedType('basis', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Any())
+ )
+
+
+id_characteristic_two_basis = _OID(characteristic_two_field, 3)
+gnBasis = _OID(id_characteristic_two_basis, 1)
+tpBasis = _OID(id_characteristic_two_basis, 2)
+
+
+class Trinomial(univ.Integer):
+ pass
+
+
+ppBasis = _OID(id_characteristic_two_basis, 3)
+
+
+class Pentanomial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('k1', univ.Integer()),
+ namedtype.NamedType('k2', univ.Integer()),
+ namedtype.NamedType('k3', univ.Integer())
+ )
+
+
+class FieldElement(univ.OctetString):
+ pass
+
+
+class ECPoint(univ.OctetString):
+ pass
+
+
+class Curve(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('a', FieldElement()),
+ namedtype.NamedType('b', FieldElement()),
+ namedtype.OptionalNamedType('seed', univ.BitString())
+ )
+
+
+class ECPVer(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('ecpVer1', 1)
+ )
+
+
+class ECParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', ECPVer()),
+ namedtype.NamedType('fieldID', FieldID()),
+ namedtype.NamedType('curve', Curve()),
+ namedtype.NamedType('base', ECPoint()),
+ namedtype.NamedType('order', univ.Integer()),
+ namedtype.OptionalNamedType('cofactor', univ.Integer())
+ )
+
+
+class EcpkParameters(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ecParameters', ECParameters()),
+ namedtype.NamedType('namedCurve', univ.ObjectIdentifier()),
+ namedtype.NamedType('implicitlyCA', univ.Null())
+ )
+
+
+id_publicKeyType = _OID(ansi_X9_62, 2)
+id_ecPublicKey = _OID(id_publicKeyType, 1)
+
+ellipticCurve = _OID(ansi_X9_62, 3)
+
+c_TwoCurve = _OID(ellipticCurve, 0)
+c2pnb163v1 = _OID(c_TwoCurve, 1)
+c2pnb163v2 = _OID(c_TwoCurve, 2)
+c2pnb163v3 = _OID(c_TwoCurve, 3)
+c2pnb176w1 = _OID(c_TwoCurve, 4)
+c2tnb191v1 = _OID(c_TwoCurve, 5)
+c2tnb191v2 = _OID(c_TwoCurve, 6)
+c2tnb191v3 = _OID(c_TwoCurve, 7)
+c2onb191v4 = _OID(c_TwoCurve, 8)
+c2onb191v5 = _OID(c_TwoCurve, 9)
+c2pnb208w1 = _OID(c_TwoCurve, 10)
+c2tnb239v1 = _OID(c_TwoCurve, 11)
+c2tnb239v2 = _OID(c_TwoCurve, 12)
+c2tnb239v3 = _OID(c_TwoCurve, 13)
+c2onb239v4 = _OID(c_TwoCurve, 14)
+c2onb239v5 = _OID(c_TwoCurve, 15)
+c2pnb272w1 = _OID(c_TwoCurve, 16)
+c2pnb304w1 = _OID(c_TwoCurve, 17)
+c2tnb359v1 = _OID(c_TwoCurve, 18)
+c2pnb368w1 = _OID(c_TwoCurve, 19)
+c2tnb431r1 = _OID(c_TwoCurve, 20)
+
+primeCurve = _OID(ellipticCurve, 1)
+prime192v1 = _OID(primeCurve, 1)
+prime192v2 = _OID(primeCurve, 2)
+prime192v3 = _OID(primeCurve, 3)
+prime239v1 = _OID(primeCurve, 4)
+prime239v2 = _OID(primeCurve, 5)
+prime239v3 = _OID(primeCurve, 6)
+prime256v1 = _OID(primeCurve, 7)
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones in rfc5280.py. Do not add OIDs with absent paramaters.
+
+_algorithmIdentifierMapUpdate = {
+ md2: univ.Null(""),
+ md5: univ.Null(""),
+ id_sha1: univ.Null(""),
+ id_dsa: Dss_Parms(),
+ rsaEncryption: univ.Null(""),
+ md2WithRSAEncryption: univ.Null(""),
+ md5WithRSAEncryption: univ.Null(""),
+ sha1WithRSAEncryption: univ.Null(""),
+ dhpublicnumber: DomainParameters(),
+ id_keyExchangeAlgorithm: KEA_Parms_Id(),
+ id_ecPublicKey: EcpkParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3280.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3280.py
new file mode 100644
index 0000000000..4c6df13280
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3280.py
@@ -0,0 +1,1543 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate and Certificate
+# Revocation List (CRL) Profile
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3280.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+unformatted_postal_address = univ.Integer(16)
+
+ub_organizational_units = univ.Integer(4)
+
+ub_organizational_unit_name_length = univ.Integer(32)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ pass
+
+
+OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+OrganizationalUnitNames.componentType = OrganizationalUnitName()
+OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+id_at = _OID(2, 5, 4)
+
+id_at_name = _OID(id_at, 41)
+
+ub_pds_parameter_length = univ.Integer(30)
+
+
+class PDSParameter(univ.Set):
+ pass
+
+
+PDSParameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+ub_organization_name_length = univ.Integer(64)
+
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+id_pkix = _OID(1, 3, 6, 1, 5, 5, 7)
+
+id_qt = _OID(id_pkix, 2)
+
+
+class PresentationAddress(univ.Sequence):
+ pass
+
+
+PresentationAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class AlgorithmIdentifier(univ.Sequence):
+ pass
+
+
+AlgorithmIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any())
+)
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Extension(univ.Sequence):
+ pass
+
+
+Extension.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('extnValue', univ.OctetString())
+)
+
+
+class Extensions(univ.SequenceOf):
+ pass
+
+
+Extensions.componentType = Extension()
+Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ pass
+
+
+SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+)
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class Validity(univ.Sequence):
+ pass
+
+
+Validity.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+)
+
+
+class Version(univ.Integer):
+ pass
+
+
+Version.namedValues = namedval.NamedValues(
+ ('v1', 0),
+ ('v2', 1),
+ ('v3', 2)
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ pass
+
+
+AttributeTypeAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('value', AttributeValue())
+)
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ pass
+
+
+RelativeDistinguishedName.componentType = AttributeTypeAndValue()
+RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class RDNSequence(univ.SequenceOf):
+ pass
+
+
+RDNSequence.componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ pass
+
+
+Name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rdnSequence', RDNSequence())
+)
+
+
+class TBSCertificate(univ.Sequence):
+ pass
+
+
+TBSCertificate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value="v1")),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class Certificate(univ.Sequence):
+ pass
+
+
+Certificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+ub_surname_length = univ.Integer(40)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_e163_4_sub_address_length = univ.Integer(40)
+
+teletex_common_name = univ.Integer(2)
+
+ub_country_name_alpha_length = univ.Integer(2)
+
+ub_country_name_numeric_length = univ.Integer(3)
+
+
+class CountryName(univ.Choice):
+ pass
+
+
+CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+CountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+extension_OR_address_components = univ.Integer(12)
+
+id_at_dnQualifier = _OID(id_at, 46)
+
+ub_e163_4_number_length = univ.Integer(15)
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ pass
+
+
+ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('e163-4-address', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))
+ ),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+terminal_type = univ.Integer(23)
+
+id_domainComponent = _OID(0, 9, 2342, 19200300, 100, 1, 25)
+
+ub_state_name = univ.Integer(128)
+
+
+class X520StateOrProvinceName(univ.Choice):
+ pass
+
+
+X520StateOrProvinceName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+)
+
+ub_organization_name = univ.Integer(64)
+
+
+class X520OrganizationName(univ.Choice):
+ pass
+
+
+X520OrganizationName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+)
+
+ub_emailaddress_length = univ.Integer(128)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+id_at_surname = _OID(id_at, 4)
+
+ub_common_name_length = univ.Integer(64)
+
+id_ad = _OID(id_pkix, 48)
+
+ub_numeric_user_id_length = univ.Integer(32)
+
+
+class NumericUserIdentifier(char.NumericString):
+ pass
+
+
+NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class OrganizationName(char.PrintableString):
+ pass
+
+
+OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_domain_name_length = univ.Integer(16)
+
+
+class AdministrationDomainName(univ.Choice):
+ pass
+
+
+AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+AdministrationDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+)
+
+
+class PrivateDomainName(univ.Choice):
+ pass
+
+
+PrivateDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+)
+
+ub_generation_qualifier_length = univ.Integer(3)
+
+ub_given_name_length = univ.Integer(16)
+
+ub_initials_length = univ.Integer(5)
+
+
+class PersonalName(univ.Set):
+ pass
+
+
+PersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+ub_terminal_id_length = univ.Integer(24)
+
+
+class TerminalIdentifier(char.PrintableString):
+ pass
+
+
+TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+ub_x121_address_length = univ.Integer(16)
+
+
+class X121Address(char.NumericString):
+ pass
+
+
+X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ pass
+
+
+BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+)
+
+ub_domain_defined_attributes = univ.Integer(4)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
+BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+ub_extension_attributes = univ.Integer(256)
+
+
+class ExtensionAttribute(univ.Sequence):
+ pass
+
+
+ExtensionAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ExtensionAttributes(univ.SetOf):
+ pass
+
+
+ExtensionAttributes.componentType = ExtensionAttribute()
+ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+
+class ORAddress(univ.Sequence):
+ pass
+
+
+ORAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+)
+
+id_pe = _OID(id_pkix, 1)
+
+ub_title = univ.Integer(64)
+
+
+class X520Title(univ.Choice):
+ pass
+
+
+X520Title.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+)
+
+id_at_organizationalUnitName = _OID(id_at, 11)
+
+
+class EmailAddress(char.IA5String):
+ pass
+
+
+EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+physical_delivery_country_name = univ.Integer(8)
+
+id_at_givenName = _OID(id_at, 42)
+
+
+class TeletexCommonName(char.TeletexString):
+ pass
+
+
+TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+id_qt_cps = _OID(id_qt, 1)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+id_kp = _OID(id_pkix, 3)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class DomainComponent(char.IA5String):
+ pass
+
+
+id_at_initials = _OID(id_at, 43)
+
+id_qt_unotice = _OID(id_qt, 2)
+
+ub_pds_name_length = univ.Integer(16)
+
+
+class PDSName(char.PrintableString):
+ pass
+
+
+PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+class DistinguishedName(RDNSequence):
+ pass
+
+
+class CommonName(char.PrintableString):
+ pass
+
+
+CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+ub_serial_number = univ.Integer(64)
+
+
+class X520SerialNumber(char.PrintableString):
+ pass
+
+
+X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
+
+id_at_generationQualifier = _OID(id_at, 44)
+
+ub_organizational_unit_name = univ.Integer(64)
+
+id_ad_ocsp = _OID(id_ad, 1)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class TeletexPersonalName(univ.Set):
+ pass
+
+
+TeletexPersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
+TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+class TBSCertList(univ.Sequence):
+ pass
+
+
+TBSCertList.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType('revokedCertificates',
+ univ.SequenceOf(componentType=univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ ))
+ )),
+ namedtype.OptionalNamedType('crlExtensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+local_postal_attributes = univ.Integer(21)
+
+pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ pass
+
+
+PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+ub_name = univ.Integer(32768)
+
+
+class X520name(univ.Choice):
+ pass
+
+
+X520name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+)
+
+id_emailAddress = _OID(pkcs_9, 1)
+
+
+class TerminalType(univ.Integer):
+ pass
+
+
+TerminalType.namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+)
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ pass
+
+
+X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+)
+
+id_at_commonName = _OID(id_at, 3)
+
+pds_name = univ.Integer(7)
+
+post_office_box_address = univ.Integer(18)
+
+ub_locality_name = univ.Integer(128)
+
+
+class X520LocalityName(univ.Choice):
+ pass
+
+
+X520LocalityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+)
+
+id_ad_timeStamping = _OID(id_ad, 3)
+
+id_at_countryName = _OID(id_at, 6)
+
+physical_delivery_personal_name = univ.Integer(13)
+
+teletex_personal_name = univ.Integer(4)
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+ub_postal_code_length = univ.Integer(16)
+
+
+class PostalCode(univ.Choice):
+ pass
+
+
+PostalCode.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+)
+
+
+class X520countryName(char.PrintableString):
+ pass
+
+
+X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+
+postal_code = univ.Integer(9)
+
+id_ad_caRepository = _OID(id_ad, 5)
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+id_at_title = _OID(id_at, 12)
+
+id_at_serialNumber = _OID(id_at, 5)
+
+id_ad_caIssuers = _OID(id_ad, 2)
+
+ub_integer_options = univ.Integer(256)
+
+
+class CertificateList(univ.Sequence):
+ pass
+
+
+CertificateList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
+TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+physical_delivery_office_name = univ.Integer(10)
+
+ub_common_name = univ.Integer(64)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+ub_pseudonym = univ.Integer(128)
+
+poste_restante_address = univ.Integer(19)
+
+id_at_organizationName = _OID(id_at, 10)
+
+physical_delivery_office_number = univ.Integer(11)
+
+id_at_pseudonym = _OID(id_at, 65)
+
+
+class X520CommonName(univ.Choice):
+ pass
+
+
+X520CommonName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+)
+
+physical_delivery_organization_name = univ.Integer(14)
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+id_at_stateOrProvinceName = _OID(id_at, 8)
+
+common_name = univ.Integer(1)
+
+id_at_localityName = _OID(id_at, 7)
+
+ub_match = univ.Integer(128)
+
+ub_unformatted_address_length = univ.Integer(180)
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
+)
+
+extended_network_address = univ.Integer(22)
+
+unique_postal_name = univ.Integer(20)
+
+ub_pds_physical_address_lines = univ.Integer(6)
+
+
+class UnformattedPostalAddress(univ.Set):
+ pass
+
+
+UnformattedPostalAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+)
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+class X520Pseudonym(univ.Choice):
+ pass
+
+
+X520Pseudonym.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
+)
+
+teletex_organization_name = univ.Integer(3)
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+street_address = univ.Integer(17)
+
+id_kp_OCSPSigning = _OID(id_kp, 9)
+
+id_ce = _OID(2, 5, 29)
+
+id_ce_certificatePolicies = _OID(id_ce, 32)
+
+
+class EDIPartyName(univ.Sequence):
+ pass
+
+
+EDIPartyName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('partyName',
+ DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class AnotherName(univ.Sequence):
+ pass
+
+
+AnotherName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType('value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class GeneralName(univ.Choice):
+ pass
+
+
+GeneralName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress',
+ univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class GeneralNames(univ.SequenceOf):
+ pass
+
+
+GeneralNames.componentType = GeneralName()
+GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+id_ce_cRLDistributionPoints = _OID(id_ce, 31)
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyMappings(univ.SequenceOf):
+ pass
+
+
+PolicyMappings.componentType = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+))
+
+PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+holdInstruction = _OID(2, 2, 840, 10040, 2)
+
+id_ce_subjectDirectoryAttributes = _OID(id_ce, 9)
+
+id_holdinstruction_callissuer = _OID(holdInstruction, 2)
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ pass
+
+
+SubjectDirectoryAttributes.componentType = Attribute()
+SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+anyPolicy = _OID(id_ce_certificatePolicies, 0)
+
+id_ce_subjectAltName = _OID(id_ce, 17)
+
+id_kp_emailProtection = _OID(id_kp, 4)
+
+
+class ReasonFlags(univ.BitString):
+ pass
+
+
+ReasonFlags.namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('privilegeWithdrawn', 7),
+ ('aACompromise', 8)
+)
+
+
+class DistributionPointName(univ.Choice):
+ pass
+
+
+DistributionPointName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName',
+ GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class DistributionPoint(univ.Sequence):
+ pass
+
+
+DistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_ce_keyUsage = _OID(id_ce, 15)
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ pass
+
+
+PolicyQualifierInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType('qualifier', univ.Any())
+)
+
+
+class PolicyInformation(univ.Sequence):
+ pass
+
+
+PolicyInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
+)
+
+
+class CertificatePolicies(univ.SequenceOf):
+ pass
+
+
+CertificatePolicies.componentType = PolicyInformation()
+CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_ce_basicConstraints = _OID(id_ce, 19)
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ pass
+
+
+ExtKeyUsageSyntax.componentType = KeyPurposeId()
+ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+class BasicConstraints(univ.Sequence):
+ pass
+
+
+BasicConstraints.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+
+class SkipCerts(univ.Integer):
+ pass
+
+
+SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class InhibitAnyPolicy(SkipCerts):
+ pass
+
+
+class CRLNumber(univ.Integer):
+ pass
+
+
+CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ pass
+
+
+AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_ce_nameConstraints = _OID(id_ce, 30)
+
+id_kp_serverAuth = _OID(id_kp, 1)
+
+id_ce_freshestCRL = _OID(id_ce, 46)
+
+id_ce_cRLReasons = _OID(id_ce, 21)
+
+
+class CRLDistributionPoints(univ.SequenceOf):
+ pass
+
+
+CRLDistributionPoints.componentType = DistributionPoint()
+CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class FreshestCRL(CRLDistributionPoints):
+ pass
+
+
+id_ce_inhibitAnyPolicy = _OID(id_ce, 54)
+
+
+class CRLReason(univ.Enumerated):
+ pass
+
+
+CRLReason.namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+)
+
+
+class BaseDistance(univ.Integer):
+ pass
+
+
+BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ pass
+
+
+GeneralSubtree.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ pass
+
+
+GeneralSubtrees.componentType = GeneralSubtree()
+GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ pass
+
+
+NameConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_pe_authorityInfoAccess = _OID(id_pe, 1)
+
+id_pe_subjectInfoAccess = _OID(id_pe, 11)
+
+id_ce_certificateIssuer = _OID(id_ce, 29)
+
+id_ce_invalidityDate = _OID(id_ce, 24)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('any', univ.Any())
+)
+
+id_ce_authorityKeyIdentifier = _OID(id_ce, 35)
+
+
+class AccessDescription(univ.Sequence):
+ pass
+
+
+AccessDescription.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+)
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+AuthorityInfoAccessSyntax.componentType = AccessDescription()
+AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_ce_issuingDistributionPoint = _OID(id_ce, 28)
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+class DisplayText(univ.Choice):
+ pass
+
+
+DisplayText.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+)
+
+
+class NoticeReference(univ.Sequence):
+ pass
+
+
+NoticeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+)
+
+
+class UserNotice(univ.Sequence):
+ pass
+
+
+UserNotice.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+)
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ pass
+
+
+PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_ce_subjectKeyIdentifier = _OID(id_ce, 14)
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+class SubjectInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+SubjectInfoAccessSyntax.componentType = AccessDescription()
+SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class KeyUsage(univ.BitString):
+ pass
+
+
+KeyUsage.namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+)
+
+id_ce_extKeyUsage = _OID(id_ce, 37)
+
+anyExtendedKeyUsage = _OID(id_ce_extKeyUsage, 0)
+
+id_ce_privateKeyUsagePeriod = _OID(id_ce, 16)
+
+id_ce_policyMappings = _OID(id_ce, 33)
+
+id_ce_cRLNumber = _OID(id_ce, 20)
+
+id_ce_policyConstraints = _OID(id_ce, 36)
+
+id_holdinstruction_none = _OID(holdInstruction, 1)
+
+id_holdinstruction_reject = _OID(holdInstruction, 3)
+
+id_kp_timeStamping = _OID(id_kp, 8)
+
+
+class PolicyConstraints(univ.Sequence):
+ pass
+
+
+PolicyConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_kp_clientAuth = _OID(id_kp, 2)
+
+id_ce_deltaCRLIndicator = _OID(id_ce, 27)
+
+id_ce_issuerAltName = _OID(id_ce, 18)
+
+id_kp_codeSigning = _OID(id_kp, 3)
+
+id_ce_holdInstructionCode = _OID(id_ce, 23)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ pass
+
+
+IssuingDistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
+)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3281.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3281.py
new file mode 100644
index 0000000000..a78abf9fea
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3281.py
@@ -0,0 +1,331 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3281.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3280
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class ObjectDigestInfo(univ.Sequence):
+ pass
+
+
+ObjectDigestInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestedObjectType', univ.Enumerated(
+ namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))),
+ namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()),
+ namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('objectDigest', univ.BitString())
+)
+
+
+class IssuerSerial(univ.Sequence):
+ pass
+
+
+IssuerSerial.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.GeneralNames()),
+ namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()),
+ namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier())
+)
+
+
+class TargetCert(univ.Sequence):
+ pass
+
+
+TargetCert.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetCertificate', IssuerSerial()),
+ namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()),
+ namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
+)
+
+
+class Target(univ.Choice):
+ pass
+
+
+Target.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetName', rfc3280.GeneralName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('targetCert',
+ TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class Targets(univ.SequenceOf):
+ pass
+
+
+Targets.componentType = Target()
+
+
+class ProxyInfo(univ.SequenceOf):
+ pass
+
+
+ProxyInfo.componentType = Targets()
+
+id_at_role = _buildOid(rfc3280.id_at, 72)
+
+id_pe_aaControls = _buildOid(rfc3280.id_pe, 6)
+
+id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55)
+
+id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4)
+
+
+class ClassList(univ.BitString):
+ pass
+
+
+ClassList.namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('topSecret', 5)
+)
+
+
+class SecurityCategory(univ.Sequence):
+ pass
+
+
+SecurityCategory.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Clearance(univ.Sequence):
+ pass
+
+
+Clearance.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(
+ value="unclassified")),
+ namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class AttCertVersion(univ.Integer):
+ pass
+
+
+AttCertVersion.namedValues = namedval.NamedValues(
+ ('v2', 1)
+)
+
+id_aca = _buildOid(rfc3280.id_pkix, 10)
+
+id_at_clearance = _buildOid(2, 5, 1, 5, 55)
+
+
+class AttrSpec(univ.SequenceOf):
+ pass
+
+
+AttrSpec.componentType = univ.ObjectIdentifier()
+
+
+class AAControls(univ.Sequence):
+ pass
+
+
+AAControls.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.OptionalNamedType('permittedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1))
+)
+
+
+class AttCertValidityPeriod(univ.Sequence):
+ pass
+
+
+AttCertValidityPeriod.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
+ namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
+)
+
+
+id_aca_authenticationInfo = _buildOid(id_aca, 1)
+
+
+class V2Form(univ.Sequence):
+ pass
+
+
+V2Form.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()),
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class AttCertIssuer(univ.Choice):
+ pass
+
+
+AttCertIssuer.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('v1Form', rfc3280.GeneralNames()),
+ namedtype.NamedType('v2Form',
+ V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class Holder(univ.Sequence):
+ pass
+
+
+Holder.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class AttributeCertificateInfo(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', AttCertVersion()),
+ namedtype.NamedType('holder', Holder()),
+ namedtype.NamedType('issuer', AttCertIssuer()),
+ namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
+ namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
+)
+
+
+class AttributeCertificate(univ.Sequence):
+ pass
+
+
+AttributeCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acinfo', AttributeCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+)
+
+id_mod = _buildOid(rfc3280.id_pkix, 0)
+
+id_mod_attribute_cert = _buildOid(id_mod, 12)
+
+id_aca_accessIdentity = _buildOid(id_aca, 2)
+
+
+class RoleSyntax(univ.Sequence):
+ pass
+
+
+RoleSyntax.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('roleName',
+ rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_aca_chargingIdentity = _buildOid(id_aca, 3)
+
+
+class ACClearAttrs(univ.Sequence):
+ pass
+
+
+ACClearAttrs.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acIssuer', rfc3280.GeneralName()),
+ namedtype.NamedType('acSerial', univ.Integer()),
+ namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute()))
+)
+
+id_aca_group = _buildOid(id_aca, 4)
+
+id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10)
+
+
+class SvceAuthInfo(univ.Sequence):
+ pass
+
+
+SvceAuthInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('service', rfc3280.GeneralName()),
+ namedtype.NamedType('ident', rfc3280.GeneralName()),
+ namedtype.OptionalNamedType('authInfo', univ.OctetString())
+)
+
+
+class IetfAttrSyntax(univ.Sequence):
+ pass
+
+
+IetfAttrSyntax.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType(
+ 'values', univ.SequenceOf(
+ componentType=univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('octets', univ.OctetString()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('string', char.UTF8String())
+ )
+ )
+ )
+ )
+)
+
+id_aca_encAttrs = _buildOid(id_aca, 6)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3370.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3370.py
new file mode 100644
index 0000000000..51a9d5c5b1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3370.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS) Algorithms
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3370.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc5753
+from pyasn1_modules import rfc5990
+from pyasn1_modules import rfc8018
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 3279
+
+dhpublicnumber = rfc3279.dhpublicnumber
+
+dh_public_number = dhpublicnumber
+
+DHPublicKey = rfc3279.DHPublicKey
+
+DomainParameters = rfc3279.DomainParameters
+
+DHDomainParameters = DomainParameters
+
+Dss_Parms = rfc3279.Dss_Parms
+
+Dss_Sig_Value = rfc3279.Dss_Sig_Value
+
+md5 = rfc3279.md5
+
+md5WithRSAEncryption = rfc3279.md5WithRSAEncryption
+
+RSAPublicKey = rfc3279.RSAPublicKey
+
+rsaEncryption = rfc3279.rsaEncryption
+
+ValidationParms = rfc3279.ValidationParms
+
+id_dsa = rfc3279.id_dsa
+
+id_dsa_with_sha1 = rfc3279.id_dsa_with_sha1
+
+id_sha1 = rfc3279.id_sha1
+
+sha_1 = id_sha1
+
+sha1WithRSAEncryption = rfc3279.sha1WithRSAEncryption
+
+
+# Imports from RFC 5753
+
+CBCParameter = rfc5753.CBCParameter
+
+CBCParameter = rfc5753.IV
+
+KeyWrapAlgorithm = rfc5753.KeyWrapAlgorithm
+
+
+# Imports from RFC 5990
+
+id_alg_CMS3DESwrap = rfc5990.id_alg_CMS3DESwrap
+
+
+# Imports from RFC 8018
+
+des_EDE3_CBC = rfc8018.des_EDE3_CBC
+
+des_ede3_cbc = des_EDE3_CBC
+
+rc2CBC = rfc8018.rc2CBC
+
+rc2_cbc = rc2CBC
+
+RC2_CBC_Parameter = rfc8018.RC2_CBC_Parameter
+
+RC2CBCParameter = RC2_CBC_Parameter
+
+PBKDF2_params = rfc8018.PBKDF2_params
+
+id_PBKDF2 = rfc8018.id_PBKDF2
+
+
+# The few things that are not already defined elsewhere
+
+hMAC_SHA1 = univ.ObjectIdentifier('1.3.6.1.5.5.8.1.2')
+
+
+id_alg_ESDH = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.5')
+
+
+id_alg_SSDH = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.10')
+
+
+id_alg_CMSRC2wrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.7')
+
+
+class RC2ParameterVersion(univ.Integer):
+ pass
+
+
+class RC2wrapParameter(RC2ParameterVersion):
+ pass
+
+
+class Dss_Pub_Key(univ.Integer):
+ pass
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ hMAC_SHA1: univ.Null(""),
+ id_alg_CMSRC2wrap: RC2wrapParameter(),
+ id_alg_ESDH: KeyWrapAlgorithm(),
+ id_alg_SSDH: KeyWrapAlgorithm(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the S/MIME Capabilities map in rfc5751.py.
+
+_smimeCapabilityMapUpdate = {
+ id_alg_CMSRC2wrap: RC2wrapParameter(),
+ id_alg_ESDH: KeyWrapAlgorithm(),
+ id_alg_SSDH: KeyWrapAlgorithm(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3412.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3412.py
new file mode 100644
index 0000000000..2cf1e1020f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3412.py
@@ -0,0 +1,53 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv3 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3412.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1905
+
+
+class ScopedPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contextEngineId', univ.OctetString()),
+ namedtype.NamedType('contextName', univ.OctetString()),
+ namedtype.NamedType('data', rfc1905.PDUs())
+ )
+
+
+class ScopedPduData(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('plaintext', ScopedPDU()),
+ namedtype.NamedType('encryptedPDU', univ.OctetString()),
+ )
+
+
+class HeaderData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgID',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgMaxSize',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(484, 2147483647))),
+ namedtype.NamedType('msgFlags', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 1))),
+ namedtype.NamedType('msgSecurityModel',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 2147483647)))
+ )
+
+
+class SNMPv3Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgVersion',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgGlobalData', HeaderData()),
+ namedtype.NamedType('msgSecurityParameters', univ.OctetString()),
+ namedtype.NamedType('msgData', ScopedPduData())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3414.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3414.py
new file mode 100644
index 0000000000..00420cb01c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3414.py
@@ -0,0 +1,28 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv3 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3414.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+
+class UsmSecurityParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgAuthoritativeEngineID', univ.OctetString()),
+ namedtype.NamedType('msgAuthoritativeEngineBoots',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgAuthoritativeEngineTime',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgUserName',
+ univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))),
+ namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()),
+ namedtype.NamedType('msgPrivacyParameters', univ.OctetString())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3447.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3447.py
new file mode 100644
index 0000000000..3352b70c9e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3447.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#1 syntax
+#
+# ASN.1 source from:
+# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.asn
+#
+# Sample captures could be obtained with "openssl genrsa" command
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedval
+
+from pyasn1_modules.rfc2437 import *
+
+
+class OtherPrimeInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('prime', univ.Integer()),
+ namedtype.NamedType('exponent', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer())
+ )
+
+
+class OtherPrimeInfos(univ.SequenceOf):
+ componentType = OtherPrimeInfo()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class RSAPrivateKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('two-prime', 0), ('multi', 1)))),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer()),
+ namedtype.OptionalNamedType('otherPrimeInfos', OtherPrimeInfos())
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3537.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3537.py
new file mode 100644
index 0000000000..374dd8193c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3537.py
@@ -0,0 +1,34 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SEED Encryption Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4010.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_alg_HMACwith3DESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.11')
+
+
+id_alg_HMACwithAESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.12')
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_HMACwith3DESwrap: univ.Null(""),
+ id_alg_HMACwithAESwrap: univ.Null(""),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3560.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3560.py
new file mode 100644
index 0000000000..8365436df5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3560.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RSAES-OAEP Key Transport Algorithm in CMS
+#
+# Notice that all of the things needed in RFC 3560 are also defined
+# in RFC 4055. So, they are all pulled from the RFC 4055 module into
+# this one so that people looking a RFC 3560 can easily find them.
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3560.txt
+#
+
+from pyasn1_modules import rfc4055
+
+id_sha1 = rfc4055.id_sha1
+
+id_sha256 = rfc4055.id_sha256
+
+id_sha384 = rfc4055.id_sha384
+
+id_sha512 = rfc4055.id_sha512
+
+id_mgf1 = rfc4055.id_mgf1
+
+rsaEncryption = rfc4055.rsaEncryption
+
+id_RSAES_OAEP = rfc4055.id_RSAES_OAEP
+
+id_pSpecified = rfc4055.id_pSpecified
+
+sha1Identifier = rfc4055.sha1Identifier
+
+sha256Identifier = rfc4055.sha256Identifier
+
+sha384Identifier = rfc4055.sha384Identifier
+
+sha512Identifier = rfc4055.sha512Identifier
+
+mgf1SHA1Identifier = rfc4055.mgf1SHA1Identifier
+
+mgf1SHA256Identifier = rfc4055.mgf1SHA256Identifier
+
+mgf1SHA384Identifier = rfc4055.mgf1SHA384Identifier
+
+mgf1SHA512Identifier = rfc4055.mgf1SHA512Identifier
+
+pSpecifiedEmptyIdentifier = rfc4055.pSpecifiedEmptyIdentifier
+
+
+class RSAES_OAEP_params(rfc4055.RSAES_OAEP_params):
+ pass
+
+
+rSAES_OAEP_Default_Params = RSAES_OAEP_params()
+
+rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier
+
+rSAES_OAEP_SHA256_Params = rfc4055.rSAES_OAEP_SHA256_Params
+
+rSAES_OAEP_SHA256_Identifier = rfc4055.rSAES_OAEP_SHA256_Identifier
+
+rSAES_OAEP_SHA384_Params = rfc4055.rSAES_OAEP_SHA384_Params
+
+rSAES_OAEP_SHA384_Identifier = rfc4055.rSAES_OAEP_SHA384_Identifier
+
+rSAES_OAEP_SHA512_Params = rfc4055.rSAES_OAEP_SHA512_Params
+
+rSAES_OAEP_SHA512_Identifier = rfc4055.rSAES_OAEP_SHA512_Identifier
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3565.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3565.py
new file mode 100644
index 0000000000..ec75e23489
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3565.py
@@ -0,0 +1,57 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Use of the Advanced Encryption Standard (AES) Encryption
+# Algorithm in the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3565.txt
+
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class AES_IV(univ.OctetString):
+ pass
+
+AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+id_aes128_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.2')
+
+id_aes192_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.22')
+
+id_aes256_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.42')
+
+
+id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5')
+
+id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25')
+
+id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45')
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_aes128_CBC: AES_IV(),
+ id_aes192_CBC: AES_IV(),
+ id_aes256_CBC: AES_IV(),
+ id_aes128_wrap: univ.Null(),
+ id_aes192_wrap: univ.Null(),
+ id_aes256_wrap: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3657.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3657.py
new file mode 100644
index 0000000000..ebf23dabcb
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3657.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Camellia Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3657.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+
+
+id_camellia128_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.2')
+
+id_camellia192_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.3')
+
+id_camellia256_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.4')
+
+id_camellia128_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.2')
+
+id_camellia192_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.3')
+
+id_camellia256_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.4')
+
+
+
+class Camellia_IV(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+class CamelliaSMimeCapability(univ.Null):
+ pass
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_camellia128_cbc: Camellia_IV(),
+ id_camellia192_cbc: Camellia_IV(),
+ id_camellia256_cbc: Camellia_IV(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the SMIMECapabilities Attribute map in rfc5751.py
+
+_smimeCapabilityMapUpdate = {
+ id_camellia128_cbc: CamelliaSMimeCapability(),
+ id_camellia192_cbc: CamelliaSMimeCapability(),
+ id_camellia256_cbc: CamelliaSMimeCapability(),
+ id_camellia128_wrap: CamelliaSMimeCapability(),
+ id_camellia192_wrap: CamelliaSMimeCapability(),
+ id_camellia256_wrap: CamelliaSMimeCapability(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3709.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3709.py
new file mode 100644
index 0000000000..aa1d5b6abf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3709.py
@@ -0,0 +1,207 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Logotypes in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3709.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6170
+
+MAX = float('inf')
+
+
+class HashAlgAndValue(univ.Sequence):
+ pass
+
+HashAlgAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', univ.OctetString())
+)
+
+
+class LogotypeDetails(univ.Sequence):
+ pass
+
+LogotypeDetails.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mediaType', char.IA5String()),
+ namedtype.NamedType('logotypeHash', univ.SequenceOf(
+ componentType=HashAlgAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('logotypeURI', univ.SequenceOf(
+ componentType=char.IA5String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class LogotypeAudioInfo(univ.Sequence):
+ pass
+
+LogotypeAudioInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fileSize', univ.Integer()),
+ namedtype.NamedType('playTime', univ.Integer()),
+ namedtype.NamedType('channels', univ.Integer()),
+ namedtype.OptionalNamedType('sampleRate', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('language', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class LogotypeAudio(univ.Sequence):
+ pass
+
+LogotypeAudio.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('audioDetails', LogotypeDetails()),
+ namedtype.OptionalNamedType('audioInfo', LogotypeAudioInfo())
+)
+
+
+class LogotypeImageType(univ.Integer):
+ pass
+
+LogotypeImageType.namedValues = namedval.NamedValues(
+ ('grayScale', 0),
+ ('color', 1)
+)
+
+
+class LogotypeImageResolution(univ.Choice):
+ pass
+
+LogotypeImageResolution.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numBits',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('tableSize',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class LogotypeImageInfo(univ.Sequence):
+ pass
+
+LogotypeImageInfo.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('type', LogotypeImageType().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='color')),
+ namedtype.NamedType('fileSize', univ.Integer()),
+ namedtype.NamedType('xSize', univ.Integer()),
+ namedtype.NamedType('ySize', univ.Integer()),
+ namedtype.OptionalNamedType('resolution', LogotypeImageResolution()),
+ namedtype.OptionalNamedType('language', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class LogotypeImage(univ.Sequence):
+ pass
+
+LogotypeImage.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('imageDetails', LogotypeDetails()),
+ namedtype.OptionalNamedType('imageInfo', LogotypeImageInfo())
+)
+
+
+class LogotypeData(univ.Sequence):
+ pass
+
+LogotypeData.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('image', univ.SequenceOf(
+ componentType=LogotypeImage())),
+ namedtype.OptionalNamedType('audio', univ.SequenceOf(
+ componentType=LogotypeAudio()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+
+class LogotypeReference(univ.Sequence):
+ pass
+
+LogotypeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('refStructHash', univ.SequenceOf(
+ componentType=HashAlgAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('refStructURI', univ.SequenceOf(
+ componentType=char.IA5String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class LogotypeInfo(univ.Choice):
+ pass
+
+LogotypeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('direct',
+ LogotypeData().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('indirect', LogotypeReference().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+# Other logotype type and associated object identifiers
+
+id_logo_background = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.2')
+
+id_logo_loyalty = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.1')
+
+id_logo_certImage = rfc6170.id_logo_certImage
+
+
+class OtherLogotypeInfo(univ.Sequence):
+ pass
+
+OtherLogotypeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('logotypeType', univ.ObjectIdentifier()),
+ namedtype.NamedType('info', LogotypeInfo())
+)
+
+
+# Logotype Certificate Extension
+
+id_pe_logotype = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.12')
+
+
+class LogotypeExtn(univ.Sequence):
+ pass
+
+LogotypeExtn.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('communityLogos', univ.SequenceOf(
+ componentType=LogotypeInfo()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('issuerLogo', LogotypeInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('subjectLogo', LogotypeInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('otherLogos', univ.SequenceOf(
+ componentType=OtherLogotypeInfo()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_logotype: LogotypeExtn(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3739.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3739.py
new file mode 100644
index 0000000000..4aa5aaf0de
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3739.py
@@ -0,0 +1,203 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add WithComponentsConstraints to
+# enforce the requirements that are indicated in comments.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Qualified Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3739.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Initialize the qcStatement map
+
+qcStatementMap = { }
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+AttributeType = rfc5280.AttributeType
+
+DirectoryString = rfc5280.DirectoryString
+
+GeneralName = rfc5280.GeneralName
+
+id_pkix = rfc5280.id_pkix
+
+id_pe = rfc5280.id_pe
+
+
+# Arc for QC personal data attributes
+
+id_pda = id_pkix + (9, )
+
+
+# Arc for QC statements
+
+id_qcs = id_pkix + (11, )
+
+
+# Personal data attributes
+
+id_pda_dateOfBirth = id_pda + (1, )
+
+class DateOfBirth(useful.GeneralizedTime):
+ pass
+
+
+id_pda_placeOfBirth = id_pda + (2, )
+
+class PlaceOfBirth(DirectoryString):
+ pass
+
+
+id_pda_gender = id_pda + (3, )
+
+class Gender(char.PrintableString):
+ subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 1),
+ constraint.SingleValueConstraint('M', 'F', 'm', 'f')
+ )
+
+
+id_pda_countryOfCitizenship = id_pda + (4, )
+
+class CountryOfCitizenship(char.PrintableString):
+ subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+ # ISO 3166 Country Code
+
+
+id_pda_countryOfResidence = id_pda + (5, )
+
+class CountryOfResidence(char.PrintableString):
+ subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+ # ISO 3166 Country Code
+
+
+# Biometric info certificate extension
+
+id_pe_biometricInfo = id_pe + (2, )
+
+
+class PredefinedBiometricType(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('picture', 0),
+ ('handwritten-signature', 1)
+ )
+ subtypeSpec = constraint.SingleValueConstraint(0, 1)
+
+
+class TypeOfBiometricData(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('predefinedBiometricType', PredefinedBiometricType()),
+ namedtype.NamedType('biometricDataOid', univ.ObjectIdentifier())
+ )
+
+
+class BiometricData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('typeOfBiometricData', TypeOfBiometricData()),
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('biometricDataHash', univ.OctetString()),
+ namedtype.OptionalNamedType('sourceDataUri', char.IA5String())
+ )
+
+
+class BiometricSyntax(univ.SequenceOf):
+ componentType = BiometricData()
+
+
+# QC Statements certificate extension
+# NOTE: This extension does not allow to mix critical and
+# non-critical Qualified Certificate Statements. Either all
+# statements must be critical or all statements must be
+# non-critical.
+
+id_pe_qcStatements = id_pe + (3, )
+
+
+class NameRegistrationAuthorities(univ.SequenceOf):
+ componentType = GeneralName()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class QCStatement(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('statementId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('statementInfo', univ.Any(),
+ openType=opentype.OpenType('statementId', qcStatementMap))
+ )
+
+
+class QCStatements(univ.SequenceOf):
+ componentType = QCStatement()
+
+
+class SemanticsInformation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('semanticsIndentifier',
+ univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('nameRegistrationAuthorities',
+ NameRegistrationAuthorities())
+ )
+ subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('semanticsIndentifier', constraint.ComponentPresentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('nameRegistrationAuthorities', constraint.ComponentPresentConstraint()))
+ )
+
+
+id_qcs = id_pkix + (11, )
+
+
+id_qcs_pkixQCSyntax_v1 = id_qcs + (1, )
+
+
+id_qcs_pkixQCSyntax_v2 = id_qcs + (2, )
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_biometricInfo: BiometricSyntax(),
+ id_pe_qcStatements: QCStatements(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_pda_dateOfBirth: DateOfBirth(),
+ id_pda_placeOfBirth: PlaceOfBirth(),
+ id_pda_gender: Gender(),
+ id_pda_countryOfCitizenship: CountryOfCitizenship(),
+ id_pda_countryOfResidence: CountryOfResidence(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3770.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3770.py
new file mode 100644
index 0000000000..3fefe1d90e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3770.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extensions and Attributes Supporting Authentication
+# in PPP and Wireless LAN Networks
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3770.txt
+# https://www.rfc-editor.org/errata/eid234
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+MAX = float('inf')
+
+
+# Extended Key Usage Values
+
+id_kp_eapOverLAN = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.14')
+
+id_kp_eapOverPPP = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.13')
+
+
+# Wireless LAN SSID Extension
+
+id_pe_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.13')
+
+
+class SSID(univ.OctetString):
+ pass
+
+SSID.subtypeSpec = constraint.ValueSizeConstraint(1, 32)
+
+
+class SSIDList(univ.SequenceOf):
+ pass
+
+SSIDList.componentType = SSID()
+SSIDList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Wireless LAN SSID Attribute Certificate Attribute
+# Uses same syntax as the certificate extension: SSIDList
+# Correction for https://www.rfc-editor.org/errata/eid234
+
+id_aca_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.10.7')
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3779.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3779.py
new file mode 100644
index 0000000000..8e6eaa3e7b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3779.py
@@ -0,0 +1,137 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Extensions for IP Addresses and AS Identifiers
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3779.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# IP Address Delegation Extension
+
+id_pe_ipAddrBlocks = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.7')
+
+
+class IPAddress(univ.BitString):
+ pass
+
+
+class IPAddressRange(univ.Sequence):
+ pass
+
+IPAddressRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('min', IPAddress()),
+ namedtype.NamedType('max', IPAddress())
+)
+
+
+class IPAddressOrRange(univ.Choice):
+ pass
+
+IPAddressOrRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressPrefix', IPAddress()),
+ namedtype.NamedType('addressRange', IPAddressRange())
+)
+
+
+class IPAddressChoice(univ.Choice):
+ pass
+
+IPAddressChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('inherit', univ.Null()),
+ namedtype.NamedType('addressesOrRanges', univ.SequenceOf(
+ componentType=IPAddressOrRange())
+ )
+)
+
+
+class IPAddressFamily(univ.Sequence):
+ pass
+
+IPAddressFamily.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressFamily', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
+ namedtype.NamedType('ipAddressChoice', IPAddressChoice())
+)
+
+
+class IPAddrBlocks(univ.SequenceOf):
+ pass
+
+IPAddrBlocks.componentType = IPAddressFamily()
+
+
+# Autonomous System Identifier Delegation Extension
+
+id_pe_autonomousSysIds = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.8')
+
+
+class ASId(univ.Integer):
+ pass
+
+
+class ASRange(univ.Sequence):
+ pass
+
+ASRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('min', ASId()),
+ namedtype.NamedType('max', ASId())
+)
+
+
+class ASIdOrRange(univ.Choice):
+ pass
+
+ASIdOrRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', ASId()),
+ namedtype.NamedType('range', ASRange())
+)
+
+
+class ASIdentifierChoice(univ.Choice):
+ pass
+
+ASIdentifierChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('inherit', univ.Null()),
+ namedtype.NamedType('asIdsOrRanges', univ.SequenceOf(
+ componentType=ASIdOrRange())
+ )
+)
+
+
+class ASIdentifiers(univ.Sequence):
+ pass
+
+ASIdentifiers.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('asnum', ASIdentifierChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('rdi', ASIdentifierChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+# Map of Certificate Extension OIDs to Extensions is added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ipAddrBlocks: IPAddrBlocks(),
+ id_pe_autonomousSysIds: ASIdentifiers(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3820.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3820.py
new file mode 100644
index 0000000000..b4ba34c05c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3820.py
@@ -0,0 +1,65 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Key Agreement
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3820.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+
+class ProxyCertPathLengthConstraint(univ.Integer):
+ pass
+
+
+class ProxyPolicy(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyLanguage', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('policy', univ.OctetString())
+ )
+
+
+class ProxyCertInfoExtension(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pCPathLenConstraint',
+ ProxyCertPathLengthConstraint()),
+ namedtype.NamedType('proxyPolicy', ProxyPolicy())
+ )
+
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+
+id_pe = id_pkix + (1, )
+
+id_pe_proxyCertInfo = id_pe + (14, )
+
+
+id_ppl = id_pkix + (21, )
+
+id_ppl_anyLanguage = id_ppl + (0, )
+
+id_ppl_inheritAll = id_ppl + (1, )
+
+id_ppl_independent = id_ppl + (2, )
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_proxyCertInfo: ProxyCertInfoExtension(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3852.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3852.py
new file mode 100644
index 0000000000..cf1bb85ad8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc3852.py
@@ -0,0 +1,706 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3852.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3280
+from pyasn1_modules import rfc3281
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()))
+)
+
+
+class SignedAttributes(univ.SetOf):
+ pass
+
+
+SignedAttributes.componentType = Attribute()
+SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class OtherRevocationInfoFormat(univ.Sequence):
+ pass
+
+
+OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherRevInfo', univ.Any())
+)
+
+
+class RevocationInfoChoice(univ.Choice):
+ pass
+
+
+RevocationInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crl', rfc3280.CertificateList()),
+ namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RevocationInfoChoices(univ.SetOf):
+ pass
+
+
+RevocationInfoChoices.componentType = RevocationInfoChoice()
+
+
+class OtherKeyAttribute(univ.Sequence):
+ pass
+
+
+OtherKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('keyAttr', univ.Any())
+)
+
+id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class CMSVersion(univ.Integer):
+ pass
+
+
+CMSVersion.namedValues = namedval.NamedValues(
+ ('v0', 0),
+ ('v1', 1),
+ ('v2', 2),
+ ('v3', 3),
+ ('v4', 4),
+ ('v5', 5)
+)
+
+
+class KEKIdentifier(univ.Sequence):
+ pass
+
+
+KEKIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyIdentifier', univ.OctetString()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KEKRecipientInfo(univ.Sequence):
+ pass
+
+
+KEKRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('kekid', KEKIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class KeyDerivationAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class PasswordRecipientInfo(univ.Sequence):
+ pass
+
+
+PasswordRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class OtherRecipientInfo(univ.Sequence):
+ pass
+
+
+OtherRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oriType', univ.ObjectIdentifier()),
+ namedtype.NamedType('oriValue', univ.Any())
+)
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ pass
+
+
+IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.Name()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber())
+)
+
+
+class SubjectKeyIdentifier(univ.OctetString):
+ pass
+
+
+class RecipientKeyIdentifier(univ.Sequence):
+ pass
+
+
+RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyAgreeRecipientIdentifier(univ.Choice):
+ pass
+
+
+KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class RecipientEncryptedKey(univ.Sequence):
+ pass
+
+
+RecipientEncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientEncryptedKeys(univ.SequenceOf):
+ pass
+
+
+RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
+
+
+class UserKeyingMaterial(univ.OctetString):
+ pass
+
+
+class OriginatorPublicKey(univ.Sequence):
+ pass
+
+
+OriginatorPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('publicKey', univ.BitString())
+)
+
+
+class OriginatorIdentifierOrKey(univ.Choice):
+ pass
+
+
+OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class KeyAgreeRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
+)
+
+
+class RecipientIdentifier(univ.Choice):
+ pass
+
+
+RecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyTransRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('rid', RecipientIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientInfo(univ.Choice):
+ pass
+
+
+RecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ktri', KeyTransRecipientInfo()),
+ namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ori', OtherRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class RecipientInfos(univ.SetOf):
+ pass
+
+
+RecipientInfos.componentType = RecipientInfo()
+RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class DigestAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignerIdentifier(univ.Choice):
+ pass
+
+
+SignerIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnprotectedAttributes(univ.SetOf):
+ pass
+
+
+UnprotectedAttributes.componentType = Attribute()
+UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContentInfo(univ.Sequence):
+ pass
+
+
+EncryptedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedData(univ.Sequence):
+ pass
+
+
+EncryptedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
+
+id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
+
+id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ pass
+
+
+DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
+
+
+class EncapsulatedContentInfo(univ.Sequence):
+ pass
+
+
+EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eContentType', ContentType()),
+ namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class DigestedData(univ.Sequence):
+ pass
+
+
+DigestedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.NamedType('digest', Digest())
+)
+
+
+class ContentInfo(univ.Sequence):
+ pass
+
+
+ContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnauthAttributes(univ.SetOf):
+ pass
+
+
+UnauthAttributes.componentType = Attribute()
+UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ pass
+
+
+ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('attributes', UnauthAttributes())
+)
+
+
+class SignatureAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ pass
+
+
+ExtendedCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+)
+
+
+class OtherCertificateFormat(univ.Sequence):
+ pass
+
+
+OtherCertificateFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherCert', univ.Any())
+)
+
+
+class AttributeCertificateV2(rfc3281.AttributeCertificate):
+ pass
+
+
+class AttCertVersionV1(univ.Integer):
+ pass
+
+
+AttCertVersionV1.namedValues = namedval.NamedValues(
+ ('v1', 0)
+)
+
+
+class AttributeCertificateInfoV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
+ namedtype.NamedType(
+ 'subject', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subjectName', rfc3280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('issuer', rfc3280.GeneralNames()),
+ namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
+ namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
+)
+
+
+class AttributeCertificateV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateV1.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
+ namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class CertificateChoices(univ.Choice):
+ pass
+
+
+CertificateChoices.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('other', OtherCertificateFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class CertificateSet(univ.SetOf):
+ pass
+
+
+CertificateSet.componentType = CertificateChoices()
+
+
+class MessageAuthenticationCode(univ.OctetString):
+ pass
+
+
+class UnsignedAttributes(univ.SetOf):
+ pass
+
+
+UnsignedAttributes.componentType = Attribute()
+UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SignatureValue(univ.OctetString):
+ pass
+
+
+class SignerInfo(univ.Sequence):
+ pass
+
+
+SignerInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('sid', SignerIdentifier()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', SignatureValue()),
+ namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignerInfos(univ.SetOf):
+ pass
+
+
+SignerInfos.componentType = SignerInfo()
+
+
+class SignedData(univ.Sequence):
+ pass
+
+
+SignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+
+class MessageAuthenticationCodeAlgorithm(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class MessageDigest(univ.OctetString):
+ pass
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class OriginatorInfo(univ.Sequence):
+ pass
+
+
+OriginatorInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('certs', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class AuthAttributes(univ.SetOf):
+ pass
+
+
+AuthAttributes.componentType = Attribute()
+AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class AuthenticatedData(univ.Sequence):
+ pass
+
+
+AuthenticatedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
+ namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('mac', MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
+
+id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
+
+
+class EnvelopedData(univ.Sequence):
+ pass
+
+
+EnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Countersignature(SignerInfo):
+ pass
+
+
+id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
+
+id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ pass
+
+
+ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
+
+id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
+
+
+class SigningTime(Time):
+ pass
+
+
+id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4010.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4010.py
new file mode 100644
index 0000000000..4981f76bed
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4010.py
@@ -0,0 +1,58 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SEED Encryption Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4010.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+
+
+id_seedCBC = univ.ObjectIdentifier('1.2.410.200004.1.4')
+
+
+id_npki_app_cmsSeed_wrap = univ.ObjectIdentifier('1.2.410.200004.7.1.1.1')
+
+
+class SeedIV(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+class SeedCBCParameter(SeedIV):
+ pass
+
+
+class SeedSMimeCapability(univ.Null):
+ pass
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_seedCBC: SeedCBCParameter(),
+ id_npki_app_cmsSeed_wrap: univ.Null(""),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the SMIMECapabilities Attribute map in rfc5751.py
+
+_smimeCapabilityMapUpdate = {
+ id_seedCBC: SeedSMimeCapability(),
+ id_npki_app_cmsSeed_wrap: SeedSMimeCapability(),
+
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4043.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4043.py
new file mode 100644
index 0000000000..cf0a801419
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4043.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Permanent Identifier
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4043.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+id_on = id_pkix + (8, )
+
+id_on_permanentIdentifier = id_on + (3, )
+
+
+class PermanentIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('identifierValue', char.UTF8String()),
+ namedtype.OptionalNamedType('assigner', univ.ObjectIdentifier())
+ )
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_permanentIdentifier: PermanentIdentifier(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4055.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4055.py
new file mode 100644
index 0000000000..bdc128632a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4055.py
@@ -0,0 +1,258 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with a very small amount of assistance from
+# asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional Algorithms and Identifiers for RSA Cryptography
+# for use in Certificates and CRLs
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4055.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+
+id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
+
+id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
+
+id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
+
+id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
+
+rsaEncryption = _OID(1, 2, 840, 113549, 1, 1, 1)
+
+id_mgf1 = _OID(1, 2, 840, 113549, 1, 1, 8)
+
+id_RSAES_OAEP = _OID(1, 2, 840, 113549, 1, 1, 7)
+
+id_pSpecified = _OID(1, 2, 840, 113549, 1, 1, 9)
+
+id_RSASSA_PSS = _OID(1, 2, 840, 113549, 1, 1, 10)
+
+sha256WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 11)
+
+sha384WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 12)
+
+sha512WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 13)
+
+sha224WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 14)
+
+sha1Identifier = rfc5280.AlgorithmIdentifier()
+sha1Identifier['algorithm'] = id_sha1
+sha1Identifier['parameters'] = univ.Null("")
+
+sha224Identifier = rfc5280.AlgorithmIdentifier()
+sha224Identifier['algorithm'] = id_sha224
+sha224Identifier['parameters'] = univ.Null("")
+
+sha256Identifier = rfc5280.AlgorithmIdentifier()
+sha256Identifier['algorithm'] = id_sha256
+sha256Identifier['parameters'] = univ.Null("")
+
+sha384Identifier = rfc5280.AlgorithmIdentifier()
+sha384Identifier['algorithm'] = id_sha384
+sha384Identifier['parameters'] = univ.Null("")
+
+sha512Identifier = rfc5280.AlgorithmIdentifier()
+sha512Identifier['algorithm'] = id_sha512
+sha512Identifier['parameters'] = univ.Null("")
+
+mgf1SHA1Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA1Identifier['algorithm'] = id_mgf1
+mgf1SHA1Identifier['parameters'] = sha1Identifier
+
+mgf1SHA224Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA224Identifier['algorithm'] = id_mgf1
+mgf1SHA224Identifier['parameters'] = sha224Identifier
+
+mgf1SHA256Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA256Identifier['algorithm'] = id_mgf1
+mgf1SHA256Identifier['parameters'] = sha256Identifier
+
+mgf1SHA384Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA384Identifier['algorithm'] = id_mgf1
+mgf1SHA384Identifier['parameters'] = sha384Identifier
+
+mgf1SHA512Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA512Identifier['algorithm'] = id_mgf1
+mgf1SHA512Identifier['parameters'] = sha512Identifier
+
+pSpecifiedEmptyIdentifier = rfc5280.AlgorithmIdentifier()
+pSpecifiedEmptyIdentifier['algorithm'] = id_pSpecified
+pSpecifiedEmptyIdentifier['parameters'] = univ.OctetString(value='')
+
+
+class RSAPublicKey(univ.Sequence):
+ pass
+
+RSAPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+)
+
+
+class HashAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class MaskGenAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class RSAES_OAEP_params(univ.Sequence):
+ pass
+
+RSAES_OAEP_params.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('hashFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maskGenFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('pSourceFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+rSAES_OAEP_Default_Params = RSAES_OAEP_params()
+
+rSAES_OAEP_Default_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_Default_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_Default_Identifier['parameters'] = rSAES_OAEP_Default_Params
+
+rSAES_OAEP_SHA224_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA224_Params['hashFunc'] = sha224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA224_Params['maskGenFunc'] = mgf1SHA224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA224_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA224_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA224_Identifier['parameters'] = rSAES_OAEP_SHA224_Params
+
+rSAES_OAEP_SHA256_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA256_Params['hashFunc'] = sha256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA256_Params['maskGenFunc'] = mgf1SHA256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA256_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA256_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA256_Identifier['parameters'] = rSAES_OAEP_SHA256_Params
+
+rSAES_OAEP_SHA384_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA384_Params['hashFunc'] = sha384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA384_Params['maskGenFunc'] = mgf1SHA384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA384_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA384_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA384_Identifier['parameters'] = rSAES_OAEP_SHA384_Params
+
+rSAES_OAEP_SHA512_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA512_Params['hashFunc'] = sha512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA512_Params['maskGenFunc'] = mgf1SHA512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA512_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA512_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA512_Identifier['parameters'] = rSAES_OAEP_SHA512_Params
+
+
+class RSASSA_PSS_params(univ.Sequence):
+ pass
+
+RSASSA_PSS_params.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maskGenAlgorithm', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.DefaultedNamedType('saltLength', univ.Integer(value=20).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.DefaultedNamedType('trailerField', univ.Integer(value=1).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+rSASSA_PSS_Default_Params = RSASSA_PSS_params()
+
+rSASSA_PSS_Default_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_Default_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_Default_Identifier['parameters'] = rSASSA_PSS_Default_Params
+
+rSASSA_PSS_SHA224_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA224_Params['hashAlgorithm'] = sha224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA224_Params['maskGenAlgorithm'] = mgf1SHA224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA224_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA224_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA224_Identifier['parameters'] = rSASSA_PSS_SHA224_Params
+
+rSASSA_PSS_SHA256_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA256_Params['hashAlgorithm'] = sha256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA256_Params['maskGenAlgorithm'] = mgf1SHA256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA256_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA256_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA256_Identifier['parameters'] = rSASSA_PSS_SHA256_Params
+
+rSASSA_PSS_SHA384_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA384_Params['hashAlgorithm'] = sha384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA384_Params['maskGenAlgorithm'] = mgf1SHA384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA384_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA384_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA384_Identifier['parameters'] = rSASSA_PSS_SHA384_Params
+
+rSASSA_PSS_SHA512_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA512_Params['hashAlgorithm'] = sha512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA512_Params['maskGenAlgorithm'] = mgf1SHA512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA512_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA512_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA512_Identifier['parameters'] = rSASSA_PSS_SHA512_Params
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_mgf1: rfc5280.AlgorithmIdentifier(),
+ id_pSpecified: univ.OctetString(),
+ id_RSAES_OAEP: RSAES_OAEP_params(),
+ id_RSASSA_PSS: RSASSA_PSS_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4073.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4073.py
new file mode 100644
index 0000000000..3f425b28ed
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4073.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Protecting Multiple Contents with the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4073.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# Content Collection Content Type and Object Identifier
+
+id_ct_contentCollection = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.19')
+
+class ContentCollection(univ.SequenceOf):
+ pass
+
+ContentCollection.componentType = rfc5652.ContentInfo()
+ContentCollection.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+# Content With Attributes Content Type and Object Identifier
+
+id_ct_contentWithAttrs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.20')
+
+class ContentWithAttributes(univ.Sequence):
+ pass
+
+ContentWithAttributes.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('content', rfc5652.ContentInfo()),
+ namedtype.NamedType('attrs', univ.SequenceOf(
+ componentType=rfc5652.Attribute()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_contentCollection: ContentCollection(),
+ id_ct_contentWithAttrs: ContentWithAttributes(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4108.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4108.py
new file mode 100644
index 0000000000..ecace9e3ee
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4108.py
@@ -0,0 +1,350 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add items from the verified errata.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Firmware Wrapper
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4108.txt
+# https://www.rfc-editor.org/errata_search.php?rfc=4108
+#
+
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+class HardwareSerialEntry(univ.Choice):
+ pass
+
+HardwareSerialEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('all', univ.Null()),
+ namedtype.NamedType('single', univ.OctetString()),
+ namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('low', univ.OctetString()),
+ namedtype.NamedType('high', univ.OctetString())
+ ))
+ )
+)
+
+
+class HardwareModules(univ.Sequence):
+ pass
+
+HardwareModules.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialEntries', univ.SequenceOf(componentType=HardwareSerialEntry()))
+)
+
+
+class CommunityIdentifier(univ.Choice):
+ pass
+
+CommunityIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('communityOID', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwModuleList', HardwareModules())
+)
+
+
+
+class PreferredPackageIdentifier(univ.Sequence):
+ pass
+
+PreferredPackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fwPkgID', univ.ObjectIdentifier()),
+ namedtype.NamedType('verNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+
+class PreferredOrLegacyPackageIdentifier(univ.Choice):
+ pass
+
+PreferredOrLegacyPackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('preferred', PreferredPackageIdentifier()),
+ namedtype.NamedType('legacy', univ.OctetString())
+)
+
+
+class CurrentFWConfig(univ.Sequence):
+ pass
+
+CurrentFWConfig.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('fwPkgType', univ.Integer()),
+ namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier())
+)
+
+
+class PreferredOrLegacyStalePackageIdentifier(univ.Choice):
+ pass
+
+PreferredOrLegacyStalePackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('preferredStaleVerNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('legacyStaleVersion', univ.OctetString())
+)
+
+
+class FirmwarePackageLoadErrorCode(univ.Enumerated):
+ pass
+
+FirmwarePackageLoadErrorCode.namedValues = namedval.NamedValues(
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('signatureFailure', 15),
+ ('contentTypeMismatch', 16),
+ ('badEncryptedData', 17),
+ ('unprotectedAttrsPresent', 18),
+ ('badEncryptContent', 19),
+ ('badEncryptAlgorithm', 20),
+ ('missingCiphertext', 21),
+ ('noDecryptKey', 22),
+ ('decryptFailure', 23),
+ ('badCompressAlgorithm', 24),
+ ('missingCompressedContent', 25),
+ ('decompressFailure', 26),
+ ('wrongHardware', 27),
+ ('stalePackage', 28),
+ ('notInCommunity', 29),
+ ('unsupportedPackageType', 30),
+ ('missingDependency', 31),
+ ('wrongDependencyVersion', 32),
+ ('insufficientMemory', 33),
+ ('badFirmware', 34),
+ ('unsupportedParameters', 35),
+ ('breaksDependency', 36),
+ ('otherError', 99)
+)
+
+
+class VendorLoadErrorCode(univ.Integer):
+ pass
+
+
+# Wrapped Firmware Key Unsigned Attribute and Object Identifier
+
+id_aa_wrappedFirmwareKey = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.39')
+
+class WrappedFirmwareKey(rfc5652.EnvelopedData):
+ pass
+
+
+# Firmware Package Information Signed Attribute and Object Identifier
+
+id_aa_firmwarePackageInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.42')
+
+class FirmwarePackageInfo(univ.Sequence):
+ pass
+
+FirmwarePackageInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('fwPkgType', univ.Integer()),
+ namedtype.OptionalNamedType('dependencies', univ.SequenceOf(componentType=PreferredOrLegacyPackageIdentifier()))
+)
+
+FirmwarePackageInfo.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2)
+
+
+# Community Identifiers Signed Attribute and Object Identifier
+
+id_aa_communityIdentifiers = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.40')
+
+class CommunityIdentifiers(univ.SequenceOf):
+ pass
+
+CommunityIdentifiers.componentType = CommunityIdentifier()
+
+
+# Implemented Compression Algorithms Signed Attribute and Object Identifier
+
+id_aa_implCompressAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.43')
+
+class ImplementedCompressAlgorithms(univ.SequenceOf):
+ pass
+
+ImplementedCompressAlgorithms.componentType = univ.ObjectIdentifier()
+
+
+# Implemented Cryptographic Algorithms Signed Attribute and Object Identifier
+
+id_aa_implCryptoAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.38')
+
+class ImplementedCryptoAlgorithms(univ.SequenceOf):
+ pass
+
+ImplementedCryptoAlgorithms.componentType = univ.ObjectIdentifier()
+
+
+# Decrypt Key Identifier Signed Attribute and Object Identifier
+
+id_aa_decryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.37')
+
+class DecryptKeyIdentifier(univ.OctetString):
+ pass
+
+
+# Target Hardware Identifier Signed Attribute and Object Identifier
+
+id_aa_targetHardwareIDs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.36')
+
+class TargetHardwareIdentifiers(univ.SequenceOf):
+ pass
+
+TargetHardwareIdentifiers.componentType = univ.ObjectIdentifier()
+
+
+# Firmware Package Identifier Signed Attribute and Object Identifier
+
+id_aa_firmwarePackageID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.35')
+
+class FirmwarePackageIdentifier(univ.Sequence):
+ pass
+
+FirmwarePackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('stale', PreferredOrLegacyStalePackageIdentifier())
+)
+
+
+# Firmware Package Message Digest Signed Attribute and Object Identifier
+
+id_aa_fwPkgMessageDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.41')
+
+class FirmwarePackageMessageDigest(univ.Sequence):
+ pass
+
+FirmwarePackageMessageDigest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('msgDigest', univ.OctetString())
+)
+
+
+# Firmware Package Load Error Report Content Type and Object Identifier
+
+class FWErrorVersion(univ.Integer):
+ pass
+
+FWErrorVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_firmwareLoadError = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.18')
+
+class FirmwarePackageLoadError(univ.Sequence):
+ pass
+
+FirmwarePackageLoadError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', FWErrorVersion().subtype(value='v1')),
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString()),
+ namedtype.NamedType('errorCode', FirmwarePackageLoadErrorCode()),
+ namedtype.OptionalNamedType('vendorErrorCode', VendorLoadErrorCode()),
+ namedtype.OptionalNamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('config', univ.SequenceOf(componentType=CurrentFWConfig()).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Firmware Package Load Receipt Content Type and Object Identifier
+
+class FWReceiptVersion(univ.Integer):
+ pass
+
+FWReceiptVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_firmwareLoadReceipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.17')
+
+class FirmwarePackageLoadReceipt(univ.Sequence):
+ pass
+
+FirmwarePackageLoadReceipt.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', FWReceiptVersion().subtype(value='v1')),
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString()),
+ namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('trustAnchorKeyID', univ.OctetString()),
+ namedtype.OptionalNamedType('decryptKeyID', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Firmware Package Content Type and Object Identifier
+
+id_ct_firmwarePackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.16')
+
+class FirmwarePkgData(univ.OctetString):
+ pass
+
+
+# Other Name syntax for Hardware Module Name
+
+id_on_hardwareModuleName = univ.ObjectIdentifier('1.3.6.1.5.5.7.8.4')
+
+class HardwareModuleName(univ.Sequence):
+ pass
+
+HardwareModuleName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString())
+)
+
+
+# Map of Attribute Type OIDs to Attributes is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_wrappedFirmwareKey: WrappedFirmwareKey(),
+ id_aa_firmwarePackageInfo: FirmwarePackageInfo(),
+ id_aa_communityIdentifiers: CommunityIdentifiers(),
+ id_aa_implCompressAlgs: ImplementedCompressAlgorithms(),
+ id_aa_implCryptoAlgs: ImplementedCryptoAlgorithms(),
+ id_aa_decryptKeyID: DecryptKeyIdentifier(),
+ id_aa_targetHardwareIDs: TargetHardwareIdentifiers(),
+ id_aa_firmwarePackageID: FirmwarePackageIdentifier(),
+ id_aa_fwPkgMessageDigest: FirmwarePackageMessageDigest(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_firmwareLoadError: FirmwarePackageLoadError(),
+ id_ct_firmwareLoadReceipt: FirmwarePackageLoadReceipt(),
+ id_ct_firmwarePackage: FirmwarePkgData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_hardwareModuleName: HardwareModuleName(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4210.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4210.py
new file mode 100644
index 0000000000..0935e3e9ac
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4210.py
@@ -0,0 +1,803 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Management Protocol structures as per RFC4210
+#
+# Based on Alex Railean's work
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc2314
+from pyasn1_modules import rfc2459
+from pyasn1_modules import rfc2511
+
+MAX = float('inf')
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class CMPCertificate(rfc2459.Certificate):
+ pass
+
+
+class OOBCert(CMPCertificate):
+ pass
+
+
+class CertAnnContent(CMPCertificate):
+ pass
+
+
+class PKIFreeText(univ.SequenceOf):
+ """
+ PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
+ """
+ componentType = char.UTF8String()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class PollRepContent(univ.SequenceOf):
+ """
+ PollRepContent ::= SEQUENCE OF SEQUENCE {
+ certReqId INTEGER,
+ checkAfter INTEGER, -- time in seconds
+ reason PKIFreeText OPTIONAL
+ }
+ """
+
+ class CertReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('checkAfter', univ.Integer()),
+ namedtype.OptionalNamedType('reason', PKIFreeText())
+ )
+
+ componentType = CertReq()
+
+
+class PollReqContent(univ.SequenceOf):
+ """
+ PollReqContent ::= SEQUENCE OF SEQUENCE {
+ certReqId INTEGER
+ }
+
+ """
+
+ class CertReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer())
+ )
+
+ componentType = CertReq()
+
+
+class InfoTypeAndValue(univ.Sequence):
+ """
+ InfoTypeAndValue ::= SEQUENCE {
+ infoType OBJECT IDENTIFIER,
+ infoValue ANY DEFINED BY infoType OPTIONAL
+ }"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('infoType', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('infoValue', univ.Any())
+ )
+
+
+class GenRepContent(univ.SequenceOf):
+ componentType = InfoTypeAndValue()
+
+
+class GenMsgContent(univ.SequenceOf):
+ componentType = InfoTypeAndValue()
+
+
+class PKIConfirmContent(univ.Null):
+ pass
+
+
+class CRLAnnContent(univ.SequenceOf):
+ componentType = rfc2459.CertificateList()
+
+
+class CAKeyUpdAnnContent(univ.Sequence):
+ """
+ CAKeyUpdAnnContent ::= SEQUENCE {
+ oldWithNew CMPCertificate,
+ newWithOld CMPCertificate,
+ newWithNew CMPCertificate
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oldWithNew', CMPCertificate()),
+ namedtype.NamedType('newWithOld', CMPCertificate()),
+ namedtype.NamedType('newWithNew', CMPCertificate())
+ )
+
+
+class RevDetails(univ.Sequence):
+ """
+ RevDetails ::= SEQUENCE {
+ certDetails CertTemplate,
+ crlEntryDetails Extensions OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
+ namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
+ )
+
+
+class RevReqContent(univ.SequenceOf):
+ componentType = RevDetails()
+
+
+class CertOrEncCert(univ.Choice):
+ """
+ CertOrEncCert ::= CHOICE {
+ certificate [0] CMPCertificate,
+ encryptedCert [1] EncryptedValue
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', CMPCertificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class CertifiedKeyPair(univ.Sequence):
+ """
+ CertifiedKeyPair ::= SEQUENCE {
+ certOrEncCert CertOrEncCert,
+ privateKey [0] EncryptedValue OPTIONAL,
+ publicationInfo [1] PKIPublicationInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certOrEncCert', CertOrEncCert()),
+ namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class POPODecKeyRespContent(univ.SequenceOf):
+ componentType = univ.Integer()
+
+
+class Challenge(univ.Sequence):
+ """
+ Challenge ::= SEQUENCE {
+ owf AlgorithmIdentifier OPTIONAL,
+ witness OCTET STRING,
+ challenge OCTET STRING
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString()),
+ namedtype.NamedType('challenge', univ.OctetString())
+ )
+
+
+class PKIStatus(univ.Integer):
+ """
+ PKIStatus ::= INTEGER {
+ accepted (0),
+ grantedWithMods (1),
+ rejection (2),
+ waiting (3),
+ revocationWarning (4),
+ revocationNotification (5),
+ keyUpdateWarning (6)
+ }
+ """
+ namedValues = namedval.NamedValues(
+ ('accepted', 0),
+ ('grantedWithMods', 1),
+ ('rejection', 2),
+ ('waiting', 3),
+ ('revocationWarning', 4),
+ ('revocationNotification', 5),
+ ('keyUpdateWarning', 6)
+ )
+
+
+class PKIFailureInfo(univ.BitString):
+ """
+ PKIFailureInfo ::= BIT STRING {
+ badAlg (0),
+ badMessageCheck (1),
+ badRequest (2),
+ badTime (3),
+ badCertId (4),
+ badDataFormat (5),
+ wrongAuthority (6),
+ incorrectData (7),
+ missingTimeStamp (8),
+ badPOP (9),
+ certRevoked (10),
+ certConfirmed (11),
+ wrongIntegrity (12),
+ badRecipientNonce (13),
+ timeNotAvailable (14),
+ unacceptedPolicy (15),
+ unacceptedExtension (16),
+ addInfoNotAvailable (17),
+ badSenderNonce (18),
+ badCertTemplate (19),
+ signerNotTrusted (20),
+ transactionIdInUse (21),
+ unsupportedVersion (22),
+ notAuthorized (23),
+ systemUnavail (24),
+ systemFailure (25),
+ duplicateCertReq (26)
+ """
+ namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badMessageCheck', 1),
+ ('badRequest', 2),
+ ('badTime', 3),
+ ('badCertId', 4),
+ ('badDataFormat', 5),
+ ('wrongAuthority', 6),
+ ('incorrectData', 7),
+ ('missingTimeStamp', 8),
+ ('badPOP', 9),
+ ('certRevoked', 10),
+ ('certConfirmed', 11),
+ ('wrongIntegrity', 12),
+ ('badRecipientNonce', 13),
+ ('timeNotAvailable', 14),
+ ('unacceptedPolicy', 15),
+ ('unacceptedExtension', 16),
+ ('addInfoNotAvailable', 17),
+ ('badSenderNonce', 18),
+ ('badCertTemplate', 19),
+ ('signerNotTrusted', 20),
+ ('transactionIdInUse', 21),
+ ('unsupportedVersion', 22),
+ ('notAuthorized', 23),
+ ('systemUnavail', 24),
+ ('systemFailure', 25),
+ ('duplicateCertReq', 26)
+ )
+
+
+class PKIStatusInfo(univ.Sequence):
+ """
+ PKIStatusInfo ::= SEQUENCE {
+ status PKIStatus,
+ statusString PKIFreeText OPTIONAL,
+ failInfo PKIFailureInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.OptionalNamedType('statusString', PKIFreeText()),
+ namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
+ )
+
+
+class ErrorMsgContent(univ.Sequence):
+ """
+ ErrorMsgContent ::= SEQUENCE {
+ pKIStatusInfo PKIStatusInfo,
+ errorCode INTEGER OPTIONAL,
+ -- implementation-specific error codes
+ errorDetails PKIFreeText OPTIONAL
+ -- implementation-specific error details
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
+ namedtype.OptionalNamedType('errorCode', univ.Integer()),
+ namedtype.OptionalNamedType('errorDetails', PKIFreeText())
+ )
+
+
+class CertStatus(univ.Sequence):
+ """
+ CertStatus ::= SEQUENCE {
+ certHash OCTET STRING,
+ certReqId INTEGER,
+ statusInfo PKIStatusInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', univ.OctetString()),
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
+ )
+
+
+class CertConfirmContent(univ.SequenceOf):
+ componentType = CertStatus()
+
+
+class RevAnnContent(univ.Sequence):
+ """
+ RevAnnContent ::= SEQUENCE {
+ status PKIStatus,
+ certId CertId,
+ willBeRevokedAt GeneralizedTime,
+ badSinceDate GeneralizedTime,
+ crlDetails Extensions OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.NamedType('certId', rfc2511.CertId()),
+ namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
+ )
+
+
+class RevRepContent(univ.Sequence):
+ """
+ RevRepContent ::= SEQUENCE {
+ status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
+ revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
+ OPTIONAL,
+ crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
+ OPTIONAL
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'status', univ.SequenceOf(
+ componentType=PKIStatusInfo(),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'revCerts', univ.SequenceOf(componentType=rfc2511.CertId()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'crls', univ.SequenceOf(componentType=rfc2459.CertificateList()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ )
+ )
+
+
+class KeyRecRepContent(univ.Sequence):
+ """
+ KeyRecRepContent ::= SEQUENCE {
+ status PKIStatusInfo,
+ newSigCert [0] CMPCertificate OPTIONAL,
+ caCerts [1] SEQUENCE SIZE (1..MAX) OF
+ CMPCertificate OPTIONAL,
+ keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
+ CertifiedKeyPair OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType(
+ 'newSigCert', CMPCertificate().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'caCerts', univ.SequenceOf(componentType=CMPCertificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ),
+ namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(componentType=CertifiedKeyPair()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))
+ )
+ )
+
+
+class CertResponse(univ.Sequence):
+ """
+ CertResponse ::= SEQUENCE {
+ certReqId INTEGER,
+ status PKIStatusInfo,
+ certifiedKeyPair CertifiedKeyPair OPTIONAL,
+ rspInfo OCTET STRING OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
+ namedtype.OptionalNamedType('rspInfo', univ.OctetString())
+ )
+
+
+class CertRepMessage(univ.Sequence):
+ """
+ CertRepMessage ::= SEQUENCE {
+ caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
+ OPTIONAL,
+ response SEQUENCE OF CertResponse
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'caPubs', univ.SequenceOf(
+ componentType=CMPCertificate()
+ ).subtype(sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
+ ),
+ namedtype.NamedType('response', univ.SequenceOf(componentType=CertResponse()))
+ )
+
+
+class POPODecKeyChallContent(univ.SequenceOf):
+ componentType = Challenge()
+
+
+class OOBCertHash(univ.Sequence):
+ """
+ OOBCertHash ::= SEQUENCE {
+ hashAlg [0] AlgorithmIdentifier OPTIONAL,
+ certId [1] CertId OPTIONAL,
+ hashVal BIT STRING
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'hashAlg', rfc2459.AlgorithmIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
+ ),
+ namedtype.OptionalNamedType(
+ 'certId', rfc2511.CertId().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
+ ),
+ namedtype.NamedType('hashVal', univ.BitString())
+ )
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+class NestedMessageContent(univ.SequenceOf):
+ """
+ NestedMessageContent ::= PKIMessages
+ """
+ componentType = univ.Any()
+
+
+class DHBMParameter(univ.Sequence):
+ """
+ DHBMParameter ::= SEQUENCE {
+ owf AlgorithmIdentifier,
+ -- AlgId for a One-Way Function (SHA-1 recommended)
+ mac AlgorithmIdentifier
+ -- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
+ } -- or HMAC [RFC2104, RFC2202])
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
+ )
+
+
+id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
+
+
+class PBMParameter(univ.Sequence):
+ """
+ PBMParameter ::= SEQUENCE {
+ salt OCTET STRING,
+ owf AlgorithmIdentifier,
+ iterationCount INTEGER,
+ mac AlgorithmIdentifier
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'salt', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 128))
+ ),
+ namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
+ )
+
+
+id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
+
+
+class PKIProtection(univ.BitString):
+ pass
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+nestedMessageContent = NestedMessageContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 20))
+
+
+class PKIBody(univ.Choice):
+ """
+ PKIBody ::= CHOICE { -- message-specific body elements
+ ir [0] CertReqMessages, --Initialization Request
+ ip [1] CertRepMessage, --Initialization Response
+ cr [2] CertReqMessages, --Certification Request
+ cp [3] CertRepMessage, --Certification Response
+ p10cr [4] CertificationRequest, --imported from [PKCS10]
+ popdecc [5] POPODecKeyChallContent, --pop Challenge
+ popdecr [6] POPODecKeyRespContent, --pop Response
+ kur [7] CertReqMessages, --Key Update Request
+ kup [8] CertRepMessage, --Key Update Response
+ krr [9] CertReqMessages, --Key Recovery Request
+ krp [10] KeyRecRepContent, --Key Recovery Response
+ rr [11] RevReqContent, --Revocation Request
+ rp [12] RevRepContent, --Revocation Response
+ ccr [13] CertReqMessages, --Cross-Cert. Request
+ ccp [14] CertRepMessage, --Cross-Cert. Response
+ ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
+ cann [16] CertAnnContent, --Certificate Ann.
+ rann [17] RevAnnContent, --Revocation Ann.
+ crlann [18] CRLAnnContent, --CRL Announcement
+ pkiconf [19] PKIConfirmContent, --Confirmation
+ nested [20] NestedMessageContent, --Nested Message
+ genm [21] GenMsgContent, --General Message
+ genp [22] GenRepContent, --General Response
+ error [23] ErrorMsgContent, --Error Message
+ certConf [24] CertConfirmContent, --Certificate confirm
+ pollReq [25] PollReqContent, --Polling request
+ pollRep [26] PollRepContent --Polling response
+
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'ir', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.NamedType(
+ 'ip', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ ),
+ namedtype.NamedType(
+ 'cr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+ ),
+ namedtype.NamedType(
+ 'cp', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+ ),
+ namedtype.NamedType(
+ 'p10cr', rfc2314.CertificationRequest().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
+ )
+ ),
+ namedtype.NamedType(
+ 'popdecc', POPODecKeyChallContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
+ )
+ ),
+ namedtype.NamedType(
+ 'popdecr', POPODecKeyRespContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
+ )
+ ),
+ namedtype.NamedType(
+ 'kur', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
+ )
+ ),
+ namedtype.NamedType(
+ 'kup', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
+ )
+ ),
+ namedtype.NamedType(
+ 'krr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)
+ )
+ ),
+ namedtype.NamedType(
+ 'krp', KeyRecRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10)
+ )
+ ),
+ namedtype.NamedType(
+ 'rr', RevReqContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 11)
+ )
+ ),
+ namedtype.NamedType(
+ 'rp', RevRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 12)
+ )
+ ),
+ namedtype.NamedType(
+ 'ccr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 13)
+ )
+ ),
+ namedtype.NamedType(
+ 'ccp', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 14)
+ )
+ ),
+ namedtype.NamedType(
+ 'ckuann', CAKeyUpdAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 15)
+ )
+ ),
+ namedtype.NamedType(
+ 'cann', CertAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 16)
+ )
+ ),
+ namedtype.NamedType(
+ 'rann', RevAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 17)
+ )
+ ),
+ namedtype.NamedType(
+ 'crlann', CRLAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 18)
+ )
+ ),
+ namedtype.NamedType(
+ 'pkiconf', PKIConfirmContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 19)
+ )
+ ),
+ namedtype.NamedType(
+ 'nested', nestedMessageContent
+ ),
+ # namedtype.NamedType('nested', NestedMessageContent().subtype(
+ # explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
+ # )
+ # ),
+ namedtype.NamedType(
+ 'genm', GenMsgContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 21)
+ )
+ ),
+ namedtype.NamedType(
+ 'gen', GenRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 22)
+ )
+ ),
+ namedtype.NamedType(
+ 'error', ErrorMsgContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 23)
+ )
+ ),
+ namedtype.NamedType(
+ 'certConf', CertConfirmContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 24)
+ )
+ ),
+ namedtype.NamedType(
+ 'pollReq', PollReqContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 25)
+ )
+ ),
+ namedtype.NamedType(
+ 'pollRep', PollRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 26)
+ )
+ )
+ )
+
+
+class PKIHeader(univ.Sequence):
+ """
+ PKIHeader ::= SEQUENCE {
+ pvno INTEGER { cmp1999(1), cmp2000(2) },
+ sender GeneralName,
+ recipient GeneralName,
+ messageTime [0] GeneralizedTime OPTIONAL,
+ protectionAlg [1] AlgorithmIdentifier OPTIONAL,
+ senderKID [2] KeyIdentifier OPTIONAL,
+ recipKID [3] KeyIdentifier OPTIONAL,
+ transactionID [4] OCTET STRING OPTIONAL,
+ senderNonce [5] OCTET STRING OPTIONAL,
+ recipNonce [6] OCTET STRING OPTIONAL,
+ freeText [7] PKIFreeText OPTIONAL,
+ generalInfo [8] SEQUENCE SIZE (1..MAX) OF
+ InfoTypeAndValue OPTIONAL
+ }
+
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'pvno', univ.Integer(
+ namedValues=namedval.NamedValues(('cmp1999', 1), ('cmp2000', 2))
+ )
+ ),
+ namedtype.NamedType('sender', rfc2459.GeneralName()),
+ namedtype.NamedType('recipient', rfc2459.GeneralName()),
+ namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
+ namedtype.OptionalNamedType('generalInfo',
+ univ.SequenceOf(
+ componentType=InfoTypeAndValue().subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))
+ )
+ )
+
+
+class ProtectedPart(univ.Sequence):
+ """
+ ProtectedPart ::= SEQUENCE {
+ header PKIHeader,
+ body PKIBody
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PKIHeader()),
+ namedtype.NamedType('infoValue', PKIBody())
+ )
+
+
+class PKIMessage(univ.Sequence):
+ """
+ PKIMessage ::= SEQUENCE {
+ header PKIHeader,
+ body PKIBody,
+ protection [0] PKIProtection OPTIONAL,
+ extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
+ OPTIONAL
+ }"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PKIHeader()),
+ namedtype.NamedType('body', PKIBody()),
+ namedtype.OptionalNamedType('protection', PKIProtection().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('extraCerts',
+ univ.SequenceOf(
+ componentType=CMPCertificate()
+ ).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ )
+ )
+
+
+class PKIMessages(univ.SequenceOf):
+ """
+ PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
+ """
+ componentType = PKIMessage()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+NestedMessageContent._componentType = PKIMessages()
+nestedMessageContent._componentType = PKIMessages()
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4211.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4211.py
new file mode 100644
index 0000000000..c47b3c5dd2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4211.py
@@ -0,0 +1,396 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate Request
+# Message Format (CRMF)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc4211.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3280
+from pyasn1_modules import rfc3852
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_pkip = _buildOid(id_pkix, 5)
+
+id_regCtrl = _buildOid(id_pkip, 1)
+
+
+class SinglePubInfo(univ.Sequence):
+ pass
+
+
+SinglePubInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubMethod', univ.Integer(
+ namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
+ namedtype.OptionalNamedType('pubLocation', rfc3280.GeneralName())
+)
+
+
+class UTF8Pairs(char.UTF8String):
+ pass
+
+
+class PKMACValue(univ.Sequence):
+ pass
+
+
+PKMACValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algId', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('value', univ.BitString())
+)
+
+
+class POPOSigningKeyInput(univ.Sequence):
+ pass
+
+
+POPOSigningKeyInput.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'authInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'sender', rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
+ ),
+ namedtype.NamedType(
+ 'publicKeyMAC', PKMACValue()
+ )
+ )
+ )
+ ),
+ namedtype.NamedType('publicKey', rfc3280.SubjectPublicKeyInfo())
+)
+
+
+class POPOSigningKey(univ.Sequence):
+ pass
+
+
+POPOSigningKey.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('algorithmIdentifier', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class Attributes(univ.SetOf):
+ pass
+
+
+Attributes.componentType = rfc3280.Attribute()
+
+
+class PrivateKeyInfo(univ.Sequence):
+ pass
+
+
+PrivateKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('privateKeyAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', univ.OctetString()),
+ namedtype.OptionalNamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedValue(univ.Sequence):
+ pass
+
+
+EncryptedValue.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('intendedAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('symmAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('keyAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('encValue', univ.BitString())
+)
+
+
+class EncryptedKey(univ.Choice):
+ pass
+
+
+EncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedValue', EncryptedValue()),
+ namedtype.NamedType('envelopedData', rfc3852.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyGenParameters(univ.OctetString):
+ pass
+
+
+class PKIArchiveOptions(univ.Choice):
+ pass
+
+
+PKIArchiveOptions.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedPrivKey',
+ EncryptedKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyGenParameters',
+ KeyGenParameters().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('archiveRemGenPrivKey',
+ univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_regCtrl_authenticator = _buildOid(id_regCtrl, 2)
+
+id_regInfo = _buildOid(id_pkip, 2)
+
+id_regInfo_certReq = _buildOid(id_regInfo, 2)
+
+
+class ProtocolEncrKey(rfc3280.SubjectPublicKeyInfo):
+ pass
+
+
+class Authenticator(char.UTF8String):
+ pass
+
+
+class SubsequentMessage(univ.Integer):
+ pass
+
+
+SubsequentMessage.namedValues = namedval.NamedValues(
+ ('encrCert', 0),
+ ('challengeResp', 1)
+)
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ pass
+
+
+AttributeTypeAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier()),
+ namedtype.NamedType('value', univ.Any())
+)
+
+
+class POPOPrivKey(univ.Choice):
+ pass
+
+
+POPOPrivKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('thisMessage',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subsequentMessage',
+ SubsequentMessage().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dhMAC',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('agreeMAC',
+ PKMACValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('encryptedKey', rfc3852.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class ProofOfPossession(univ.Choice):
+ pass
+
+
+ProofOfPossession.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('raVerified',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signature', POPOSigningKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('keyEncipherment',
+ POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('keyAgreement',
+ POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class OptionalValidity(univ.Sequence):
+ pass
+
+
+OptionalValidity.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', rfc3280.Time().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('notAfter', rfc3280.Time().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class CertTemplate(univ.Sequence):
+ pass
+
+
+CertTemplate.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', rfc3280.Version().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('signingAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('issuer', rfc3280.Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('subject', rfc3280.Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('publicKey', rfc3280.SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('subjectUID', rfc3280.UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9)))
+)
+
+
+class Controls(univ.SequenceOf):
+ pass
+
+
+Controls.componentType = AttributeTypeAndValue()
+Controls.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertRequest(univ.Sequence):
+ pass
+
+
+CertRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('certTemplate', CertTemplate()),
+ namedtype.OptionalNamedType('controls', Controls())
+)
+
+
+class CertReqMsg(univ.Sequence):
+ pass
+
+
+CertReqMsg.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReq', CertRequest()),
+ namedtype.OptionalNamedType('popo', ProofOfPossession()),
+ namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()))
+)
+
+
+class CertReqMessages(univ.SequenceOf):
+ pass
+
+
+CertReqMessages.componentType = CertReqMsg()
+CertReqMessages.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertReq(CertRequest):
+ pass
+
+
+id_regCtrl_pkiPublicationInfo = _buildOid(id_regCtrl, 3)
+
+
+class CertId(univ.Sequence):
+ pass
+
+
+CertId.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+)
+
+
+class OldCertId(CertId):
+ pass
+
+
+class PKIPublicationInfo(univ.Sequence):
+ pass
+
+
+PKIPublicationInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('action',
+ univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
+ namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()))
+)
+
+
+class EncKeyWithID(univ.Sequence):
+ pass
+
+
+EncKeyWithID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('privateKey', PrivateKeyInfo()),
+ namedtype.OptionalNamedType(
+ 'identifier', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('string', char.UTF8String()),
+ namedtype.NamedType('generalName', rfc3280.GeneralName())
+ )
+ )
+ )
+)
+
+id_regCtrl_protocolEncrKey = _buildOid(id_regCtrl, 6)
+
+id_regCtrl_oldCertID = _buildOid(id_regCtrl, 5)
+
+id_smime = _buildOid(1, 2, 840, 113549, 1, 9, 16)
+
+
+class PBMParameter(univ.Sequence):
+ pass
+
+
+PBMParameter.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('owf', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', rfc3280.AlgorithmIdentifier())
+)
+
+id_regCtrl_regToken = _buildOid(id_regCtrl, 1)
+
+id_regCtrl_pkiArchiveOptions = _buildOid(id_regCtrl, 4)
+
+id_regInfo_utf8Pairs = _buildOid(id_regInfo, 1)
+
+id_ct = _buildOid(id_smime, 1)
+
+id_ct_encKeyWithID = _buildOid(id_ct, 21)
+
+
+class RegToken(char.UTF8String):
+ pass
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4334.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4334.py
new file mode 100644
index 0000000000..44cd31b166
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4334.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extensions and Attributes Supporting Authentication
+# in PPP and Wireless LAN Networks
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4334.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# OID Arcs
+
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_aca = univ.ObjectIdentifier('1.3.6.1.5.5.7.10')
+
+
+# Extended Key Usage Values
+
+id_kp_eapOverPPP = id_kp + (13, )
+
+id_kp_eapOverLAN = id_kp + (14, )
+
+
+# Wireless LAN SSID Extension
+
+id_pe_wlanSSID = id_pe + (13, )
+
+class SSID(univ.OctetString):
+ constraint.ValueSizeConstraint(1, 32)
+
+
+class SSIDList(univ.SequenceOf):
+ componentType = SSID()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Wireless LAN SSID Attribute Certificate Attribute
+
+id_aca_wlanSSID = id_aca + (7, )
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4357.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4357.py
new file mode 100644
index 0000000000..42b9e3ecb8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4357.py
@@ -0,0 +1,477 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional Cryptographic Algorithms for Use with GOST 28147-89,
+# GOST R 34.10-94, GOST R 34.10-2001, and GOST R 34.11-94 Algorithms
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4357.txt
+# https://www.rfc-editor.org/errata/eid5927
+# https://www.rfc-editor.org/errata/eid5928
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Import from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Object Identifiers
+
+id_CryptoPro = univ.ObjectIdentifier((1, 2, 643, 2, 2,))
+
+
+id_CryptoPro_modules = id_CryptoPro + (1, 1,)
+
+id_CryptoPro_extensions = id_CryptoPro + (34,)
+
+id_CryptoPro_policyIds = id_CryptoPro + (38,)
+
+id_CryptoPro_policyQt = id_CryptoPro + (39,)
+
+
+cryptographic_Gost_Useful_Definitions = id_CryptoPro_modules + (0, 1,)
+
+gostR3411_94_DigestSyntax = id_CryptoPro_modules + (1, 1,)
+
+gostR3410_94_PKISyntax = id_CryptoPro_modules + (2, 1,)
+
+gostR3410_94_SignatureSyntax = id_CryptoPro_modules + (3, 1,)
+
+gost28147_89_EncryptionSyntax = id_CryptoPro_modules + (4, 1,)
+
+gostR3410_EncryptionSyntax = id_CryptoPro_modules + (5, 2,)
+
+gost28147_89_ParamSetSyntax = id_CryptoPro_modules + (6, 1,)
+
+gostR3411_94_ParamSetSyntax = id_CryptoPro_modules + (7, 1,)
+
+gostR3410_94_ParamSetSyntax = id_CryptoPro_modules + (8, 1, 1)
+
+gostR3410_2001_PKISyntax = id_CryptoPro_modules + (9, 1,)
+
+gostR3410_2001_SignatureSyntax = id_CryptoPro_modules + (10, 1,)
+
+gostR3410_2001_ParamSetSyntax = id_CryptoPro_modules + (12, 1,)
+
+gost_CryptoPro_ExtendedKeyUsage = id_CryptoPro_modules + (13, 1,)
+
+gost_CryptoPro_PrivateKey = id_CryptoPro_modules + (14, 1,)
+
+gost_CryptoPro_PKIXCMP = id_CryptoPro_modules + (15, 1,)
+
+gost_CryptoPro_TLS = id_CryptoPro_modules + (16, 1,)
+
+gost_CryptoPro_Policy = id_CryptoPro_modules + (17, 1,)
+
+gost_CryptoPro_Constants = id_CryptoPro_modules + (18, 1,)
+
+
+id_CryptoPro_algorithms = id_CryptoPro
+
+id_GostR3411_94_with_GostR3410_2001 = id_CryptoPro_algorithms + (3,)
+
+id_GostR3411_94_with_GostR3410_94 = id_CryptoPro_algorithms + (4,)
+
+id_GostR3411_94 = id_CryptoPro_algorithms + (9,)
+
+id_Gost28147_89_None_KeyMeshing = id_CryptoPro_algorithms + (14, 0,)
+
+id_Gost28147_89_CryptoPro_KeyMeshing = id_CryptoPro_algorithms + (14, 1,)
+
+id_GostR3410_2001 = id_CryptoPro_algorithms + (19,)
+
+id_GostR3410_94 = id_CryptoPro_algorithms + (20,)
+
+id_Gost28147_89 = id_CryptoPro_algorithms + (21,)
+
+id_Gost28147_89_MAC = id_CryptoPro_algorithms + (22,)
+
+id_CryptoPro_hashes = id_CryptoPro_algorithms + (30,)
+
+id_CryptoPro_encrypts = id_CryptoPro_algorithms + (31,)
+
+id_CryptoPro_signs = id_CryptoPro_algorithms + (32,)
+
+id_CryptoPro_exchanges = id_CryptoPro_algorithms + (33,)
+
+id_CryptoPro_ecc_signs = id_CryptoPro_algorithms + (35,)
+
+id_CryptoPro_ecc_exchanges = id_CryptoPro_algorithms + (36,)
+
+id_CryptoPro_private_keys = id_CryptoPro_algorithms + (37,)
+
+id_CryptoPro_pkixcmp_infos = id_CryptoPro_algorithms + (41,)
+
+id_CryptoPro_audit_service_types = id_CryptoPro_algorithms + (42,)
+
+id_CryptoPro_audit_record_types = id_CryptoPro_algorithms + (43,)
+
+id_CryptoPro_attributes = id_CryptoPro_algorithms + (44,)
+
+id_CryptoPro_name_service_types = id_CryptoPro_algorithms + (45,)
+
+id_GostR3410_2001DH = id_CryptoPro_algorithms + (98,)
+
+id_GostR3410_94DH = id_CryptoPro_algorithms + (99,)
+
+
+id_Gost28147_89_TestParamSet = id_CryptoPro_encrypts + (0,)
+
+id_Gost28147_89_CryptoPro_A_ParamSet = id_CryptoPro_encrypts + (1,)
+
+id_Gost28147_89_CryptoPro_B_ParamSet = id_CryptoPro_encrypts + (2,)
+
+id_Gost28147_89_CryptoPro_C_ParamSet = id_CryptoPro_encrypts + (3,)
+
+id_Gost28147_89_CryptoPro_D_ParamSet = id_CryptoPro_encrypts + (4,)
+
+id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet = id_CryptoPro_encrypts + (5,)
+
+id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet = id_CryptoPro_encrypts + (6,)
+
+id_Gost28147_89_CryptoPro_RIC_1_ParamSet = id_CryptoPro_encrypts + (7,)
+
+
+id_GostR3410_2001_TestParamSet = id_CryptoPro_ecc_signs + (0,)
+
+id_GostR3410_2001_CryptoPro_A_ParamSet = id_CryptoPro_ecc_signs + (1,)
+
+id_GostR3410_2001_CryptoPro_B_ParamSet = id_CryptoPro_ecc_signs + (2,)
+
+id_GostR3410_2001_CryptoPro_C_ParamSet = id_CryptoPro_ecc_signs + (3,)
+
+
+id_GostR3410_2001_CryptoPro_XchA_ParamSet = id_CryptoPro_ecc_exchanges + (0,)
+
+id_GostR3410_2001_CryptoPro_XchB_ParamSet = id_CryptoPro_ecc_exchanges + (1,)
+
+
+id_GostR3410_94_TestParamSet = id_CryptoPro_signs + (0,)
+
+id_GostR3410_94_CryptoPro_A_ParamSet = id_CryptoPro_signs + (2,)
+
+id_GostR3410_94_CryptoPro_B_ParamSet = id_CryptoPro_signs + (3,)
+
+id_GostR3410_94_CryptoPro_C_ParamSet = id_CryptoPro_signs + (4,)
+
+id_GostR3410_94_CryptoPro_D_ParamSet = id_CryptoPro_signs + (5,)
+
+
+id_GostR3410_94_CryptoPro_XchA_ParamSet = id_CryptoPro_exchanges + (1,)
+
+id_GostR3410_94_CryptoPro_XchB_ParamSet = id_CryptoPro_exchanges + (2,)
+
+id_GostR3410_94_CryptoPro_XchC_ParamSet = id_CryptoPro_exchanges + (3,)
+
+
+id_GostR3410_94_a = id_GostR3410_94 + (1,)
+
+id_GostR3410_94_aBis = id_GostR3410_94 + (2,)
+
+id_GostR3410_94_b = id_GostR3410_94 + (3,)
+
+id_GostR3410_94_bBis = id_GostR3410_94 + (4,)
+
+
+id_GostR3411_94_TestParamSet = id_CryptoPro_hashes + (0,)
+
+id_GostR3411_94_CryptoProParamSet = id_CryptoPro_hashes + (1,)
+
+
+
+
+class Gost28147_89_ParamSet(univ.ObjectIdentifier):
+ pass
+
+Gost28147_89_ParamSet.subtypeSpec = constraint.SingleValueConstraint(
+ id_Gost28147_89_TestParamSet,
+ id_Gost28147_89_CryptoPro_A_ParamSet,
+ id_Gost28147_89_CryptoPro_B_ParamSet,
+ id_Gost28147_89_CryptoPro_C_ParamSet,
+ id_Gost28147_89_CryptoPro_D_ParamSet,
+ id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet,
+ id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet,
+ id_Gost28147_89_CryptoPro_RIC_1_ParamSet
+)
+
+
+class Gost28147_89_BlobParameters(univ.Sequence):
+ pass
+
+Gost28147_89_BlobParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet())
+)
+
+
+class Gost28147_89_MAC(univ.OctetString):
+ pass
+
+Gost28147_89_MAC.subtypeSpec = constraint.ValueSizeConstraint(1, 4)
+
+
+class Gost28147_89_Key(univ.OctetString):
+ pass
+
+Gost28147_89_Key.subtypeSpec = constraint.ValueSizeConstraint(32, 32)
+
+
+class Gost28147_89_EncryptedKey(univ.Sequence):
+ pass
+
+Gost28147_89_EncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedKey', Gost28147_89_Key()),
+ namedtype.OptionalNamedType('maskKey', Gost28147_89_Key().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('macKey', Gost28147_89_MAC())
+)
+
+
+class Gost28147_89_IV(univ.OctetString):
+ pass
+
+Gost28147_89_IV.subtypeSpec = constraint.ValueSizeConstraint(8, 8)
+
+
+class Gost28147_89_UZ(univ.OctetString):
+ pass
+
+Gost28147_89_UZ.subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+class Gost28147_89_ParamSetParameters(univ.Sequence):
+ pass
+
+Gost28147_89_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eUZ', Gost28147_89_UZ()),
+ namedtype.NamedType('mode',
+ univ.Integer(namedValues=namedval.NamedValues(
+ ('gost28147-89-CNT', 0),
+ ('gost28147-89-CFB', 1),
+ ('cryptoPro-CBC', 2)
+ ))),
+ namedtype.NamedType('shiftBits',
+ univ.Integer(namedValues=namedval.NamedValues(
+ ('gost28147-89-block', 64)
+ ))),
+ namedtype.NamedType('keyMeshing', AlgorithmIdentifier())
+)
+
+
+class Gost28147_89_Parameters(univ.Sequence):
+ pass
+
+Gost28147_89_Parameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('iv', Gost28147_89_IV()),
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet())
+)
+
+
+class GostR3410_2001_CertificateSignature(univ.BitString):
+ pass
+
+GostR3410_2001_CertificateSignature.subtypeSpec=constraint.ValueSizeConstraint(256, 512)
+
+
+class GostR3410_2001_ParamSetParameters(univ.Sequence):
+ pass
+
+GostR3410_2001_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('a', univ.Integer()),
+ namedtype.NamedType('b', univ.Integer()),
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('x', univ.Integer()),
+ namedtype.NamedType('y', univ.Integer())
+)
+
+
+class GostR3410_2001_PublicKey(univ.OctetString):
+ pass
+
+GostR3410_2001_PublicKey.subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+class GostR3410_2001_PublicKeyParameters(univ.Sequence):
+ pass
+
+GostR3410_2001_PublicKeyParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('publicKeyParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3410_2001_TestParamSet,
+ id_GostR3410_2001_CryptoPro_A_ParamSet,
+ id_GostR3410_2001_CryptoPro_B_ParamSet,
+ id_GostR3410_2001_CryptoPro_C_ParamSet,
+ id_GostR3410_2001_CryptoPro_XchA_ParamSet,
+ id_GostR3410_2001_CryptoPro_XchB_ParamSet
+ ))),
+ namedtype.NamedType('digestParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3411_94_TestParamSet,
+ id_GostR3411_94_CryptoProParamSet
+ ))),
+ namedtype.DefaultedNamedType('encryptionParamSet',
+ Gost28147_89_ParamSet().subtype(value=id_Gost28147_89_CryptoPro_A_ParamSet
+ ))
+)
+
+
+class GostR3410_94_CertificateSignature(univ.BitString):
+ pass
+
+GostR3410_94_CertificateSignature.subtypeSpec = constraint.ValueSizeConstraint(256, 512)
+
+
+class GostR3410_94_ParamSetParameters_t(univ.Integer):
+ pass
+
+GostR3410_94_ParamSetParameters_t.subtypeSpec = constraint.SingleValueConstraint(512, 1024)
+
+
+class GostR3410_94_ParamSetParameters(univ.Sequence):
+ pass
+
+GostR3410_94_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('t', GostR3410_94_ParamSetParameters_t()),
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('a', univ.Integer()),
+ namedtype.OptionalNamedType('validationAlgorithm', AlgorithmIdentifier())
+)
+
+
+class GostR3410_94_PublicKey(univ.OctetString):
+ pass
+
+GostR3410_94_PublicKey.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.ValueSizeConstraint(64, 64),
+ constraint.ValueSizeConstraint(128, 128)
+)
+
+
+class GostR3410_94_PublicKeyParameters(univ.Sequence):
+ pass
+
+GostR3410_94_PublicKeyParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('publicKeyParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3410_94_TestParamSet,
+ id_GostR3410_94_CryptoPro_A_ParamSet,
+ id_GostR3410_94_CryptoPro_B_ParamSet,
+ id_GostR3410_94_CryptoPro_C_ParamSet,
+ id_GostR3410_94_CryptoPro_D_ParamSet,
+ id_GostR3410_94_CryptoPro_XchA_ParamSet,
+ id_GostR3410_94_CryptoPro_XchB_ParamSet,
+ id_GostR3410_94_CryptoPro_XchC_ParamSet
+ ))),
+ namedtype.NamedType('digestParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3411_94_TestParamSet,
+ id_GostR3411_94_CryptoProParamSet
+ ))),
+ namedtype.DefaultedNamedType('encryptionParamSet',
+ Gost28147_89_ParamSet().subtype(value=id_Gost28147_89_CryptoPro_A_ParamSet
+ ))
+)
+
+
+class GostR3410_94_ValidationBisParameters_c(univ.Integer):
+ pass
+
+GostR3410_94_ValidationBisParameters_c.subtypeSpec = constraint.ValueRangeConstraint(0, 4294967295)
+
+
+class GostR3410_94_ValidationBisParameters(univ.Sequence):
+ pass
+
+GostR3410_94_ValidationBisParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x0', GostR3410_94_ValidationBisParameters_c()),
+ namedtype.NamedType('c', GostR3410_94_ValidationBisParameters_c()),
+ namedtype.OptionalNamedType('d', univ.Integer())
+)
+
+
+class GostR3410_94_ValidationParameters_c(univ.Integer):
+ pass
+
+GostR3410_94_ValidationParameters_c.subtypeSpec = constraint.ValueRangeConstraint(0, 65535)
+
+
+class GostR3410_94_ValidationParameters(univ.Sequence):
+ pass
+
+GostR3410_94_ValidationParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x0', GostR3410_94_ValidationParameters_c()),
+ namedtype.NamedType('c', GostR3410_94_ValidationParameters_c()),
+ namedtype.OptionalNamedType('d', univ.Integer())
+)
+
+
+class GostR3411_94_Digest(univ.OctetString):
+ pass
+
+GostR3411_94_Digest.subtypeSpec = constraint.ValueSizeConstraint(32, 32)
+
+
+class GostR3411_94_DigestParameters(univ.ObjectIdentifier):
+ pass
+
+GostR3411_94_DigestParameters.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.SingleValueConstraint(id_GostR3411_94_TestParamSet),
+ constraint.SingleValueConstraint(id_GostR3411_94_CryptoProParamSet),
+)
+
+
+class GostR3411_94_ParamSetParameters(univ.Sequence):
+ pass
+
+GostR3411_94_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hUZ', Gost28147_89_UZ()),
+ namedtype.NamedType('h0', GostR3411_94_Digest())
+)
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_Gost28147_89: Gost28147_89_Parameters(),
+ id_Gost28147_89_TestParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_A_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_B_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_C_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_D_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_KeyMeshing: univ.Null(""),
+ id_Gost28147_89_None_KeyMeshing: univ.Null(""),
+ id_GostR3410_94: GostR3410_94_PublicKeyParameters(),
+ id_GostR3410_94_TestParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_A_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_B_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_C_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_D_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_XchA_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_XchB_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_XchC_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_a: GostR3410_94_ValidationParameters(),
+ id_GostR3410_94_aBis: GostR3410_94_ValidationBisParameters(),
+ id_GostR3410_94_b: GostR3410_94_ValidationParameters(),
+ id_GostR3410_94_bBis: GostR3410_94_ValidationBisParameters(),
+ id_GostR3410_2001: univ.Null(""),
+ id_GostR3411_94: univ.Null(""),
+ id_GostR3411_94_TestParamSet: GostR3411_94_ParamSetParameters(),
+ id_GostR3411_94_CryptoProParamSet: GostR3411_94_ParamSetParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4387.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4387.py
new file mode 100644
index 0000000000..c1f4e79acf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4387.py
@@ -0,0 +1,23 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Store Access via HTTP
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4387.txt
+#
+
+
+from pyasn1.type import univ
+
+
+id_ad = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, ))
+
+id_ad_http_certs = id_ad + (6, )
+
+id_ad_http_crls = id_ad + (7,)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4476.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4476.py
new file mode 100644
index 0000000000..25a0ccb7e8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4476.py
@@ -0,0 +1,93 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Attribute Certificate Policies Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4476.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+PolicyQualifierId = rfc5280.PolicyQualifierId
+
+PolicyQualifierInfo = rfc5280.PolicyQualifierInfo
+
+UserNotice = rfc5280.UserNotice
+
+id_pkix = rfc5280.id_pkix
+
+
+# Object Identifiers
+
+id_pe = id_pkix + (1,)
+
+id_pe_acPolicies = id_pe + (15,)
+
+id_qt = id_pkix + (2,)
+
+id_qt_acps = id_qt + (4,)
+
+id_qt_acunotice = id_qt + (5,)
+
+
+# Attribute Certificate Policies Extension
+
+class ACUserNotice(UserNotice):
+ pass
+
+
+class ACPSuri(char.IA5String):
+ pass
+
+
+class AcPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyInformation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', AcPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers',
+ univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class AcPoliciesSyntax(univ.SequenceOf):
+ componentType = PolicyInformation()
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+# Update the policy qualifier map in rfc5280.py
+
+_policyQualifierInfoMapUpdate = {
+ id_qt_acps: ACPSuri(),
+ id_qt_acunotice: UserNotice(),
+}
+
+rfc5280.policyQualifierInfoMap.update(_policyQualifierInfoMapUpdate)
+
+
+# Update the certificate extension map in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_acPolicies: AcPoliciesSyntax(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4490.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4490.py
new file mode 100644
index 0000000000..b8fe32134e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4490.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Using the GOST 28147-89, GOST R 34.11-94, GOST R 34.10-94, and
+# GOST R 34.10-2001 Algorithms with the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4490.txt
+#
+
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc4357
+from pyasn1_modules import rfc5280
+
+
+# Imports from RFC 4357
+
+id_CryptoPro_algorithms = rfc4357.id_CryptoPro_algorithms
+
+id_GostR3410_94 = rfc4357.id_GostR3410_94
+
+id_GostR3410_2001 = rfc4357.id_GostR3410_2001
+
+Gost28147_89_ParamSet = rfc4357.Gost28147_89_ParamSet
+
+Gost28147_89_EncryptedKey = rfc4357.Gost28147_89_EncryptedKey
+
+GostR3410_94_PublicKeyParameters = rfc4357.GostR3410_94_PublicKeyParameters
+
+GostR3410_2001_PublicKeyParameters = rfc4357.GostR3410_2001_PublicKeyParameters
+
+
+# Imports from RFC 5280
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+
+# CMS/PKCS#7 key agreement algorithms & parameters
+
+class Gost28147_89_KeyWrapParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()),
+ namedtype.OptionalNamedType('ukm', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8)))
+ )
+
+
+id_Gost28147_89_CryptoPro_KeyWrap = id_CryptoPro_algorithms + (13, 1, )
+
+
+id_Gost28147_89_None_KeyWrap = id_CryptoPro_algorithms + (13, 0, )
+
+
+id_GostR3410_2001_CryptoPro_ESDH = id_CryptoPro_algorithms + (96, )
+
+
+id_GostR3410_94_CryptoPro_ESDH = id_CryptoPro_algorithms + (97, )
+
+
+# CMS/PKCS#7 key transport algorithms & parameters
+
+id_GostR3410_2001_KeyTransportSMIMECapability = id_GostR3410_2001
+
+
+id_GostR3410_94_KeyTransportSMIMECapability = id_GostR3410_94
+
+
+class GostR3410_TransportParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()),
+ namedtype.OptionalNamedType('ephemeralPublicKey',
+ SubjectPublicKeyInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('ukm', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8)))
+ )
+
+class GostR3410_KeyTransport(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sessionEncryptedKey', Gost28147_89_EncryptedKey()),
+ namedtype.OptionalNamedType('transportParameters',
+ GostR3410_TransportParameters().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+# GOST R 34.10-94 signature algorithm & parameters
+
+class GostR3410_94_Signature(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+# GOST R 34.10-2001 signature algorithms and parameters
+
+class GostR3410_2001_Signature(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_Gost28147_89_CryptoPro_KeyWrap: Gost28147_89_KeyWrapParameters(),
+ id_Gost28147_89_None_KeyWrap: Gost28147_89_KeyWrapParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4491.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4491.py
new file mode 100644
index 0000000000..60b5560dcc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4491.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Using the GOST R 34.10-94, GOST R 34.10-2001, and GOST R 34.11-94
+# Algorithms with Certificates and CRLs
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4491.txt
+#
+
+from pyasn1_modules import rfc4357
+
+
+# Signature Algorithm GOST R 34.10-94
+
+id_GostR3411_94_with_GostR3410_94 = rfc4357.id_GostR3411_94_with_GostR3410_94
+
+
+# Signature Algorithm GOST R 34.10-2001
+
+id_GostR3411_94_with_GostR3410_2001 = rfc4357.id_GostR3411_94_with_GostR3410_2001
+
+
+# GOST R 34.10-94 Keys
+
+id_GostR3410_94 = rfc4357.id_GostR3410_94
+
+GostR3410_2001_PublicKey = rfc4357.GostR3410_2001_PublicKey
+
+GostR3410_2001_PublicKeyParameters = rfc4357.GostR3410_2001_PublicKeyParameters
+
+
+# GOST R 34.10-2001 Keys
+
+id_GostR3410_2001 = rfc4357.id_GostR3410_2001
+
+GostR3410_94_PublicKey = rfc4357.GostR3410_94_PublicKey
+
+GostR3410_94_PublicKeyParameters = rfc4357.GostR3410_94_PublicKeyParameters
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4683.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4683.py
new file mode 100644
index 0000000000..11ac65aa68
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4683.py
@@ -0,0 +1,72 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Subject Identification Method (SIM)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4683.txt
+# https://www.rfc-editor.org/errata/eid1047
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Used to compute the PEPSI value
+
+class HashContent(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userPassword', char.UTF8String()),
+ namedtype.NamedType('authorityRandom', univ.OctetString()),
+ namedtype.NamedType('identifierType', univ.ObjectIdentifier()),
+ namedtype.NamedType('identifier', char.UTF8String())
+ )
+
+
+# Used to encode the PEPSI value as the SIM Other Name
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8,)
+
+id_on_SIM = id_on + (6,)
+
+
+class SIM(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('authorityRandom', univ.OctetString()),
+ namedtype.NamedType('pEPSI', univ.OctetString())
+ )
+
+
+# Used to encrypt the PEPSI value during certificate request
+
+id_pkip = id_pkix + (5,)
+
+id_regEPEPSI = id_pkip + (3,)
+
+
+class EncryptedPEPSI(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('identifierType', univ.ObjectIdentifier()),
+ namedtype.NamedType('identifier', char.UTF8String()),
+ namedtype.NamedType('sIM', SIM())
+ )
+
+
+# Update the map of Other Name OIDs to Other Names in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_SIM: SIM(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4985.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4985.py
new file mode 100644
index 0000000000..318e412380
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc4985.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Expression of Service Names in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4985.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# As specified in Appendix A.2 of RFC 4985
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_dnsSRV = id_on + (7, )
+
+
+class SRVName(char.IA5String):
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+srvName = rfc5280.AnotherName()
+srvName['type-id'] = id_on_dnsSRV
+srvName['value'] = SRVName()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_dnsSRV: SRVName(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5035.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5035.py
new file mode 100644
index 0000000000..1cec98249c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5035.py
@@ -0,0 +1,199 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Update to Enhanced Security Services for S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5035.txt
+#
+
+from pyasn1.codec.der.encoder import encode as der_encode
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+ContentType = rfc5652.ContentType
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+PolicyInformation = rfc5280.PolicyInformation
+
+GeneralNames = rfc5280.GeneralNames
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+
+# Signing Certificate Attribute V1 and V2
+
+id_aa_signingCertificate = rfc2634.id_aa_signingCertificate
+
+id_aa_signingCertificateV2 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.47')
+
+Hash = rfc2634.Hash
+
+IssuerSerial = rfc2634.IssuerSerial
+
+ESSCertID = rfc2634.ESSCertID
+
+SigningCertificate = rfc2634.SigningCertificate
+
+
+sha256AlgId = AlgorithmIdentifier()
+sha256AlgId['algorithm'] = rfc4055.id_sha256
+# A non-schema object for sha256AlgId['parameters'] as absent
+sha256AlgId['parameters'] = der_encode(univ.OctetString(''))
+
+
+class ESSCertIDv2(univ.Sequence):
+ pass
+
+ESSCertIDv2.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('hashAlgorithm', sha256AlgId),
+ namedtype.NamedType('certHash', Hash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+)
+
+
+class SigningCertificateV2(univ.Sequence):
+ pass
+
+SigningCertificateV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs', univ.SequenceOf(
+ componentType=ESSCertIDv2())),
+ namedtype.OptionalNamedType('policies', univ.SequenceOf(
+ componentType=PolicyInformation()))
+)
+
+
+# Mail List Expansion History Attribute
+
+id_aa_mlExpandHistory = rfc2634.id_aa_mlExpandHistory
+
+ub_ml_expansion_history = rfc2634.ub_ml_expansion_history
+
+EntityIdentifier = rfc2634.EntityIdentifier
+
+MLReceiptPolicy = rfc2634.MLReceiptPolicy
+
+MLData = rfc2634.MLData
+
+MLExpansionHistory = rfc2634.MLExpansionHistory
+
+
+# ESS Security Label Attribute
+
+id_aa_securityLabel = rfc2634.id_aa_securityLabel
+
+ub_privacy_mark_length = rfc2634.ub_privacy_mark_length
+
+ub_security_categories = rfc2634.ub_security_categories
+
+ub_integer_options = rfc2634.ub_integer_options
+
+ESSPrivacyMark = rfc2634.ESSPrivacyMark
+
+SecurityClassification = rfc2634.SecurityClassification
+
+SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier
+
+SecurityCategory = rfc2634.SecurityCategory
+
+SecurityCategories = rfc2634.SecurityCategories
+
+ESSSecurityLabel = rfc2634.ESSSecurityLabel
+
+
+# Equivalent Labels Attribute
+
+id_aa_equivalentLabels = rfc2634.id_aa_equivalentLabels
+
+EquivalentLabels = rfc2634.EquivalentLabels
+
+
+# Content Identifier Attribute
+
+id_aa_contentIdentifier = rfc2634.id_aa_contentIdentifier
+
+ContentIdentifier = rfc2634.ContentIdentifier
+
+
+# Content Reference Attribute
+
+id_aa_contentReference = rfc2634.id_aa_contentReference
+
+ContentReference = rfc2634.ContentReference
+
+
+# Message Signature Digest Attribute
+
+id_aa_msgSigDigest = rfc2634.id_aa_msgSigDigest
+
+MsgSigDigest = rfc2634.MsgSigDigest
+
+
+# Content Hints Attribute
+
+id_aa_contentHint = rfc2634.id_aa_contentHint
+
+ContentHints = rfc2634.ContentHints
+
+
+# Receipt Request Attribute
+
+AllOrFirstTier = rfc2634.AllOrFirstTier
+
+ReceiptsFrom = rfc2634.ReceiptsFrom
+
+id_aa_receiptRequest = rfc2634.id_aa_receiptRequest
+
+ub_receiptsTo = rfc2634.ub_receiptsTo
+
+ReceiptRequest = rfc2634.ReceiptRequest
+
+
+# Receipt Content Type
+
+ESSVersion = rfc2634.ESSVersion
+
+id_ct_receipt = rfc2634.id_ct_receipt
+
+Receipt = rfc2634.Receipt
+
+ub_receiptsTo = rfc2634.ub_receiptsTo
+
+ReceiptRequest = rfc2634.ReceiptRequest
+
+
+# Map of Attribute Type to the Attribute structure is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_signingCertificateV2: SigningCertificateV2(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_receipt: Receipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5083.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5083.py
new file mode 100644
index 0000000000..26ef550c47
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5083.py
@@ -0,0 +1,52 @@
+# This file is being contributed to of pyasn1-modules software.
+#
+# Created by Russ Housley without assistance from the asn1ate tool.
+# Modified by Russ Housley to add a map for use with opentypes and
+# simplify the code for the object identifier assignment.
+#
+# Copyright (c) 2018, 2019 Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authenticated-Enveloped-Data for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5083.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# CMS Authenticated-Enveloped-Data Content Type
+
+id_ct_authEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.23')
+
+class AuthEnvelopedData(univ.Sequence):
+ pass
+
+AuthEnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', rfc5652.CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', rfc5652.OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', rfc5652.RecipientInfos()),
+ namedtype.NamedType('authEncryptedContentInfo', rfc5652.EncryptedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', rfc5652.AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('mac', rfc5652.MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', rfc5652.UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_authEnvelopedData: AuthEnvelopedData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5084.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5084.py
new file mode 100644
index 0000000000..7686839561
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5084.py
@@ -0,0 +1,97 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool, with manual
+# changes to AES_CCM_ICVlen.subtypeSpec and added comments
+#
+# Copyright (c) 2018-2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# AES-CCM and AES-GCM Algorithms fo use with the Authenticated-Enveloped-Data
+# protecting content type for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5084.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AES_CCM_ICVlen(univ.Integer):
+ pass
+
+
+class AES_GCM_ICVlen(univ.Integer):
+ pass
+
+
+AES_CCM_ICVlen.subtypeSpec = constraint.SingleValueConstraint(4, 6, 8, 10, 12, 14, 16)
+
+AES_GCM_ICVlen.subtypeSpec = constraint.ValueRangeConstraint(12, 16)
+
+
+class CCMParameters(univ.Sequence):
+ pass
+
+
+CCMParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('aes-nonce', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(7, 13))),
+ # The aes-nonce parameter contains 15-L octets, where L is the size of the length field. L=8 is RECOMMENDED.
+ # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique.
+ namedtype.DefaultedNamedType('aes-ICVlen', AES_CCM_ICVlen().subtype(value=12))
+)
+
+
+class GCMParameters(univ.Sequence):
+ pass
+
+
+GCMParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('aes-nonce', univ.OctetString()),
+ # The aes-nonce may have any number of bits between 8 and 2^64, but it MUST be a multiple of 8 bits.
+ # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique.
+ # A nonce value of 12 octets can be processed more efficiently, so that length is RECOMMENDED.
+ namedtype.DefaultedNamedType('aes-ICVlen', AES_GCM_ICVlen().subtype(value=12))
+)
+
+aes = _OID(2, 16, 840, 1, 101, 3, 4, 1)
+
+id_aes128_CCM = _OID(aes, 7)
+
+id_aes128_GCM = _OID(aes, 6)
+
+id_aes192_CCM = _OID(aes, 27)
+
+id_aes192_GCM = _OID(aes, 26)
+
+id_aes256_CCM = _OID(aes, 47)
+
+id_aes256_GCM = _OID(aes, 46)
+
+
+# Map of Algorithm Identifier OIDs to Parameters is added to the
+# ones in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_aes128_CCM: CCMParameters(),
+ id_aes128_GCM: GCMParameters(),
+ id_aes192_CCM: CCMParameters(),
+ id_aes192_GCM: GCMParameters(),
+ id_aes256_CCM: CCMParameters(),
+ id_aes256_GCM: GCMParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5126.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5126.py
new file mode 100644
index 0000000000..8e016c209f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5126.py
@@ -0,0 +1,577 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Advanced Electronic Signatures (CAdES)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5126.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import useful
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5035
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc6960
+from pyasn1_modules import rfc3161
+
+MAX = float('inf')
+
+
+# Maps for OpenTypes
+
+commitmentQualifierMap = { }
+
+sigQualifiersMap = { }
+
+otherRevRefMap = { }
+
+otherRevValMap = { }
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+ContentType = rfc5652.ContentType
+
+SignedData = rfc5652.SignedData
+
+EncapsulatedContentInfo = rfc5652.EncapsulatedContentInfo
+
+SignerInfo = rfc5652.SignerInfo
+
+MessageDigest = rfc5652.MessageDigest
+
+SigningTime = rfc5652.SigningTime
+
+Countersignature = rfc5652.Countersignature
+
+id_data = rfc5652.id_data
+
+id_signedData = rfc5652.id_signedData
+
+id_contentType= rfc5652.id_contentType
+
+id_messageDigest = rfc5652.id_messageDigest
+
+id_signingTime = rfc5652.id_signingTime
+
+id_countersignature = rfc5652.id_countersignature
+
+
+# Imports from RFC 5035
+
+SigningCertificate = rfc5035.SigningCertificate
+
+IssuerSerial = rfc5035.IssuerSerial
+
+ContentReference = rfc5035.ContentReference
+
+ContentIdentifier = rfc5035.ContentIdentifier
+
+id_aa_contentReference = rfc5035.id_aa_contentReference
+
+id_aa_contentIdentifier = rfc5035.id_aa_contentIdentifier
+
+id_aa_signingCertificate = rfc5035.id_aa_signingCertificate
+
+id_aa_signingCertificateV2 = rfc5035.id_aa_signingCertificateV2
+
+
+# Imports from RFC 5280
+
+Certificate = rfc5280.Certificate
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+CertificateList = rfc5280.CertificateList
+
+Name = rfc5280.Name
+
+Attribute = rfc5280.Attribute
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+PolicyInformation = rfc5280.PolicyInformation
+
+DirectoryString = rfc5280.DirectoryString
+
+
+# Imports from RFC 5755
+
+AttributeCertificate = rfc5755.AttributeCertificate
+
+
+# Imports from RFC 6960
+
+BasicOCSPResponse = rfc6960.BasicOCSPResponse
+
+ResponderID = rfc6960.ResponderID
+
+
+# Imports from RFC 3161
+
+TimeStampToken = rfc3161.TimeStampToken
+
+
+# OID used referencing electronic signature mechanisms
+
+id_etsi_es_IDUP_Mechanism_v1 = univ.ObjectIdentifier('0.4.0.1733.1.4.1')
+
+
+# OtherSigningCertificate - deprecated
+
+id_aa_ets_otherSigCert = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.19')
+
+
+class OtherHashValue(univ.OctetString):
+ pass
+
+
+class OtherHashAlgAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', OtherHashValue())
+ )
+
+
+class OtherHash(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sha1Hash', OtherHashValue()),
+ namedtype.NamedType('otherHash', OtherHashAlgAndValue())
+ )
+
+
+class OtherCertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertHash', OtherHash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+ )
+
+
+class OtherSigningCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs',
+ univ.SequenceOf(componentType=OtherCertID())),
+ namedtype.OptionalNamedType('policies',
+ univ.SequenceOf(componentType=PolicyInformation()))
+ )
+
+
+# Signature Policy Identifier
+
+id_aa_ets_sigPolicyId = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.15')
+
+
+class SigPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class SigPolicyHash(OtherHashAlgAndValue):
+ pass
+
+
+class SigPolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+class SigPolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sigPolicyQualifierId', SigPolicyQualifierId()),
+ namedtype.NamedType('sigQualifier', univ.Any(),
+ openType=opentype.OpenType('sigPolicyQualifierId', sigQualifiersMap))
+ )
+
+
+class SignaturePolicyId(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sigPolicyId', SigPolicyId()),
+ namedtype.NamedType('sigPolicyHash', SigPolicyHash()),
+ namedtype.OptionalNamedType('sigPolicyQualifiers',
+ univ.SequenceOf(componentType=SigPolicyQualifierInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class SignaturePolicyImplied(univ.Null):
+ pass
+
+
+class SignaturePolicy(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signaturePolicyId', SignaturePolicyId()),
+ namedtype.NamedType('signaturePolicyImplied', SignaturePolicyImplied())
+ )
+
+
+id_spq_ets_unotice = univ.ObjectIdentifier('1.2.840.113549.1.9.16.5.2')
+
+
+class DisplayText(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('visibleString', char.VisibleString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+ )
+
+
+class NoticeReference(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers',
+ univ.SequenceOf(componentType=univ.Integer()))
+ )
+
+class SPUserNotice(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+ )
+
+
+noticeToUser = SigPolicyQualifierInfo()
+noticeToUser['sigPolicyQualifierId'] = id_spq_ets_unotice
+noticeToUser['sigQualifier'] = SPUserNotice()
+
+
+id_spq_ets_uri = univ.ObjectIdentifier('1.2.840.113549.1.9.16.5.1')
+
+
+class SPuri(char.IA5String):
+ pass
+
+
+pointerToSigPolSpec = SigPolicyQualifierInfo()
+pointerToSigPolSpec['sigPolicyQualifierId'] = id_spq_ets_uri
+pointerToSigPolSpec['sigQualifier'] = SPuri()
+
+
+# Commitment Type
+
+id_aa_ets_commitmentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.16')
+
+
+class CommitmentTypeIdentifier(univ.ObjectIdentifier):
+ pass
+
+
+class CommitmentTypeQualifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('commitmentTypeIdentifier',
+ CommitmentTypeIdentifier()),
+ namedtype.NamedType('qualifier', univ.Any(),
+ openType=opentype.OpenType('commitmentTypeIdentifier',
+ commitmentQualifierMap))
+ )
+
+
+class CommitmentTypeIndication(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('commitmentTypeId', CommitmentTypeIdentifier()),
+ namedtype.OptionalNamedType('commitmentTypeQualifier',
+ univ.SequenceOf(componentType=CommitmentTypeQualifier()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+id_cti_ets_proofOfOrigin = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.1')
+
+id_cti_ets_proofOfReceipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.2')
+
+id_cti_ets_proofOfDelivery = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.3')
+
+id_cti_ets_proofOfSender = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.4')
+
+id_cti_ets_proofOfApproval = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.5')
+
+id_cti_ets_proofOfCreation = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.6')
+
+
+# Signer Location
+
+id_aa_ets_signerLocation = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.17')
+
+
+class PostalAddress(univ.SequenceOf):
+ componentType = DirectoryString()
+ subtypeSpec = constraint.ValueSizeConstraint(1, 6)
+
+
+class SignerLocation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('countryName',
+ DirectoryString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('localityName',
+ DirectoryString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('postalAdddress',
+ PostalAddress().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+# Signature Timestamp
+
+id_aa_signatureTimeStampToken = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.14')
+
+
+class SignatureTimeStampToken(TimeStampToken):
+ pass
+
+
+# Content Timestamp
+
+id_aa_ets_contentTimestamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.20')
+
+
+class ContentTimestamp(TimeStampToken):
+ pass
+
+
+# Signer Attributes
+
+id_aa_ets_signerAttr = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.18')
+
+
+class ClaimedAttributes(univ.SequenceOf):
+ componentType = Attribute()
+
+
+class CertifiedAttributes(AttributeCertificate):
+ pass
+
+
+class SignerAttribute(univ.SequenceOf):
+ componentType = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('claimedAttributes',
+ ClaimedAttributes().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('certifiedAttributes',
+ CertifiedAttributes().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))
+
+
+# Complete Certificate Refs
+
+id_aa_ets_certificateRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.21')
+
+
+class CompleteCertificateRefs(univ.SequenceOf):
+ componentType = OtherCertID()
+
+
+# Complete Revocation Refs
+
+id_aa_ets_revocationRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.22')
+
+
+class CrlIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crlissuer', Name()),
+ namedtype.NamedType('crlIssuedTime', useful.UTCTime()),
+ namedtype.OptionalNamedType('crlNumber', univ.Integer())
+ )
+
+
+class CrlValidatedID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crlHash', OtherHash()),
+ namedtype.OptionalNamedType('crlIdentifier', CrlIdentifier())
+ )
+
+
+class CRLListID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crls',
+ univ.SequenceOf(componentType=CrlValidatedID()))
+ )
+
+
+class OcspIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ocspResponderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime())
+ )
+
+
+class OcspResponsesID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ocspIdentifier', OcspIdentifier()),
+ namedtype.OptionalNamedType('ocspRepHash', OtherHash())
+ )
+
+
+class OcspListID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ocspResponses',
+ univ.SequenceOf(componentType=OcspResponsesID()))
+ )
+
+
+class OtherRevRefType(univ.ObjectIdentifier):
+ pass
+
+
+class OtherRevRefs(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevRefType', OtherRevRefType()),
+ namedtype.NamedType('otherRevRefs', univ.Any(),
+ openType=opentype.OpenType('otherRevRefType', otherRevRefMap))
+ )
+
+
+class CrlOcspRef(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('crlids',
+ CRLListID().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ocspids',
+ OcspListID().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('otherRev',
+ OtherRevRefs().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class CompleteRevocationRefs(univ.SequenceOf):
+ componentType = CrlOcspRef()
+
+
+# Certificate Values
+
+id_aa_ets_certValues = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.23')
+
+
+class CertificateValues(univ.SequenceOf):
+ componentType = Certificate()
+
+
+# Certificate Revocation Values
+
+id_aa_ets_revocationValues = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.24')
+
+
+class OtherRevValType(univ.ObjectIdentifier):
+ pass
+
+
+class OtherRevVals(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevValType', OtherRevValType()),
+ namedtype.NamedType('otherRevVals', univ.Any(),
+ openType=opentype.OpenType('otherRevValType', otherRevValMap))
+ )
+
+
+class RevocationValues(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('crlVals',
+ univ.SequenceOf(componentType=CertificateList()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('ocspVals',
+ univ.SequenceOf(componentType=BasicOCSPResponse()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('otherRevVals',
+ OtherRevVals().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+# CAdES-C Timestamp
+
+id_aa_ets_escTimeStamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.25')
+
+
+class ESCTimeStampToken(TimeStampToken):
+ pass
+
+
+# Time-Stamped Certificates and CRLs
+
+id_aa_ets_certCRLTimestamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.26')
+
+
+class TimestampedCertsCRLs(TimeStampToken):
+ pass
+
+
+# Archive Timestamp
+
+id_aa_ets_archiveTimestampV2 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.48')
+
+
+class ArchiveTimeStampToken(TimeStampToken):
+ pass
+
+
+# Attribute certificate references
+
+id_aa_ets_attrCertificateRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.44')
+
+
+class AttributeCertificateRefs(univ.SequenceOf):
+ componentType = OtherCertID()
+
+
+# Attribute revocation references
+
+id_aa_ets_attrRevocationRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.45')
+
+
+class AttributeRevocationRefs(univ.SequenceOf):
+ componentType = CrlOcspRef()
+
+
+# Update the sigQualifiersMap
+
+_sigQualifiersMapUpdate = {
+ id_spq_ets_unotice: SPUserNotice(),
+ id_spq_ets_uri: SPuri(),
+}
+
+sigQualifiersMap.update(_sigQualifiersMapUpdate)
+
+
+# Update the CMS Attribute Map in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_ets_otherSigCert: OtherSigningCertificate(),
+ id_aa_ets_sigPolicyId: SignaturePolicy(),
+ id_aa_ets_commitmentType: CommitmentTypeIndication(),
+ id_aa_ets_signerLocation: SignerLocation(),
+ id_aa_signatureTimeStampToken: SignatureTimeStampToken(),
+ id_aa_ets_contentTimestamp: ContentTimestamp(),
+ id_aa_ets_signerAttr: SignerAttribute(),
+ id_aa_ets_certificateRefs: CompleteCertificateRefs(),
+ id_aa_ets_revocationRefs: CompleteRevocationRefs(),
+ id_aa_ets_certValues: CertificateValues(),
+ id_aa_ets_revocationValues: RevocationValues(),
+ id_aa_ets_escTimeStamp: ESCTimeStampToken(),
+ id_aa_ets_certCRLTimestamp: TimestampedCertsCRLs(),
+ id_aa_ets_archiveTimestampV2: ArchiveTimeStampToken(),
+ id_aa_ets_attrCertificateRefs: AttributeCertificateRefs(),
+ id_aa_ets_attrRevocationRefs: AttributeRevocationRefs(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5208.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5208.py
new file mode 100644
index 0000000000..295fdbf388
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5208.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#8 syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc5208
+#
+# Sample captures could be obtained with "openssl pkcs8 -topk8" command
+#
+from pyasn1_modules import rfc2251
+from pyasn1_modules.rfc2459 import *
+
+
+class KeyEncryptionAlgorithms(AlgorithmIdentifier):
+ pass
+
+
+class PrivateKeyAlgorithms(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedData(univ.OctetString):
+ pass
+
+
+class EncryptedPrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('encryptedData', EncryptedData())
+ )
+
+
+class PrivateKey(univ.OctetString):
+ pass
+
+
+class Attributes(univ.SetOf):
+ componentType = rfc2251.Attribute()
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0), ('v2', 1))
+
+
+class PrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('privateKeyAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', PrivateKey()),
+ namedtype.OptionalNamedType('attributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5275.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5275.py
new file mode 100644
index 0000000000..1be9598142
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5275.py
@@ -0,0 +1,404 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5275.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc5755
+
+MAX = float('inf')
+
+
+# Initialize the map for GLAQueryRequests and GLAQueryResponses
+
+glaQueryRRMap = { }
+
+
+# Imports from RFC 3565
+
+id_aes128_wrap = rfc3565.id_aes128_wrap
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Certificate = rfc5280.Certificate
+
+GeneralName = rfc5280.GeneralName
+
+
+# Imports from RFC 5652
+
+CertificateSet = rfc5652.CertificateSet
+
+KEKIdentifier = rfc5652.KEKIdentifier
+
+RecipientInfos = rfc5652.RecipientInfos
+
+
+# Imports from RFC 5751
+
+SMIMECapability = rfc5751.SMIMECapability
+
+
+# Imports from RFC 5755
+
+AttributeCertificate = rfc5755.AttributeCertificate
+
+
+# The GL symmetric key distribution object identifier arc
+
+id_skd = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 8,))
+
+
+# The GL Use KEK control attribute
+
+id_skd_glUseKEK = id_skd + (1,)
+
+
+class Certificates(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pKC',
+ Certificate().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('aC',
+ univ.SequenceOf(componentType=AttributeCertificate()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('certPath',
+ CertificateSet().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class GLInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glAddress', GeneralName())
+ )
+
+
+class GLOwnerInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glOwnerName', GeneralName()),
+ namedtype.NamedType('glOwnerAddress', GeneralName()),
+ namedtype.OptionalNamedType('certificates', Certificates())
+ )
+
+
+class GLAdministration(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('unmanaged', 0),
+ ('managed', 1),
+ ('closed', 2)
+ )
+
+
+requested_algorithm = SMIMECapability().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+requested_algorithm['capabilityID'] = id_aes128_wrap
+
+
+class GLKeyAttributes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('rekeyControlledByGLO',
+ univ.Boolean().subtype(value=0,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('recipientsNotMutuallyAware',
+ univ.Boolean().subtype(value=1,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('duration',
+ univ.Integer().subtype(value=0,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.DefaultedNamedType('generationCounter',
+ univ.Integer().subtype(value=2,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('requestedAlgorithm', requested_algorithm)
+ )
+
+
+class GLUseKEK(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glInfo', GLInfo()),
+ namedtype.NamedType('glOwnerInfo',
+ univ.SequenceOf(componentType=GLOwnerInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.DefaultedNamedType('glAdministration',
+ GLAdministration().subtype(value=1)),
+ namedtype.OptionalNamedType('glKeyAttributes', GLKeyAttributes())
+ )
+
+
+# The Delete GL control attribute
+
+id_skd_glDelete = id_skd + (2,)
+
+
+class DeleteGL(GeneralName):
+ pass
+
+
+# The Add GL Member control attribute
+
+id_skd_glAddMember = id_skd + (3,)
+
+
+class GLMember(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glMemberName', GeneralName()),
+ namedtype.OptionalNamedType('glMemberAddress', GeneralName()),
+ namedtype.OptionalNamedType('certificates', Certificates())
+ )
+
+
+class GLAddMember(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glMember', GLMember())
+ )
+
+
+# The Delete GL Member control attribute
+
+id_skd_glDeleteMember = id_skd + (4,)
+
+
+class GLDeleteMember(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glMemberToDelete', GeneralName())
+ )
+
+
+# The GL Rekey control attribute
+
+id_skd_glRekey = id_skd + (5,)
+
+
+class GLNewKeyAttributes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('rekeyControlledByGLO',
+ univ.Boolean().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('recipientsNotMutuallyAware',
+ univ.Boolean().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('duration',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generationCounter',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('requestedAlgorithm',
+ AlgorithmIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class GLRekey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.OptionalNamedType('glAdministration', GLAdministration()),
+ namedtype.OptionalNamedType('glNewKeyAttributes', GLNewKeyAttributes()),
+ namedtype.OptionalNamedType('glRekeyAllGLKeys', univ.Boolean())
+ )
+
+
+# The Add and Delete GL Owner control attributes
+
+id_skd_glAddOwner = id_skd + (6,)
+
+id_skd_glRemoveOwner = id_skd + (7,)
+
+
+class GLOwnerAdministration(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glOwnerInfo', GLOwnerInfo())
+ )
+
+
+# The GL Key Compromise control attribute
+
+id_skd_glKeyCompromise = id_skd + (8,)
+
+
+class GLKCompromise(GeneralName):
+ pass
+
+
+# The GL Key Refresh control attribute
+
+id_skd_glkRefresh = id_skd + (9,)
+
+
+class Date(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('start', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('end', useful.GeneralizedTime())
+ )
+
+
+class GLKRefresh(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('dates',
+ univ.SequenceOf(componentType=Date()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+# The GLA Query Request control attribute
+
+id_skd_glaQueryRequest = id_skd + (11,)
+
+
+class GLAQueryRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glaRequestType', univ.ObjectIdentifier()),
+ namedtype.NamedType('glaRequestValue', univ.Any(),
+ openType=opentype.OpenType('glaRequestType', glaQueryRRMap))
+ )
+
+
+# The GLA Query Response control attribute
+
+id_skd_glaQueryResponse = id_skd + (12,)
+
+
+class GLAQueryResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glaResponseType', univ.ObjectIdentifier()),
+ namedtype.NamedType('glaResponseValue', univ.Any(),
+ openType=opentype.OpenType('glaResponseType', glaQueryRRMap))
+ )
+
+
+# The GLA Request/Response (glaRR) arc for glaRequestType/glaResponseType
+
+id_cmc_glaRR = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 7, 99,))
+
+
+# The Algorithm Request
+
+id_cmc_gla_skdAlgRequest = id_cmc_glaRR + (1,)
+
+
+class SKDAlgRequest(univ.Null):
+ pass
+
+
+# The Algorithm Response
+
+id_cmc_gla_skdAlgResponse = id_cmc_glaRR + (2,)
+
+SMIMECapabilities = rfc5751.SMIMECapabilities
+
+
+# The control attribute to request an updated certificate to the GLA and
+# the control attribute to return an updated certificate to the GLA
+
+id_skd_glProvideCert = id_skd + (13,)
+
+id_skd_glManageCert = id_skd + (14,)
+
+
+class GLManageCert(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glMember', GLMember())
+ )
+
+
+# The control attribute to distribute the GL shared KEK
+
+id_skd_glKey = id_skd + (15,)
+
+
+class GLKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glIdentifier', KEKIdentifier()),
+ namedtype.NamedType('glkWrapped', RecipientInfos()),
+ namedtype.NamedType('glkAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('glkNotBefore', useful.GeneralizedTime()),
+ namedtype.NamedType('glkNotAfter', useful.GeneralizedTime())
+ )
+
+
+# The CMC error types
+
+id_cet_skdFailInfo = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 15, 1,))
+
+
+class SKDFailInfo(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('closedGL', 1),
+ ('unsupportedDuration', 2),
+ ('noGLACertificate', 3),
+ ('invalidCert', 4),
+ ('unsupportedAlgorithm', 5),
+ ('noGLONameMatch', 6),
+ ('invalidGLName', 7),
+ ('nameAlreadyInUse', 8),
+ ('noSpam', 9),
+ ('alreadyAMember', 11),
+ ('notAMember', 12),
+ ('alreadyAnOwner', 13),
+ ('notAnOwner', 14)
+ )
+
+
+# Update the map for GLAQueryRequests and GLAQueryResponses
+
+_glaQueryRRMapUpdate = {
+ id_cmc_gla_skdAlgRequest: univ.Null(""),
+ id_cmc_gla_skdAlgResponse: SMIMECapabilities(),
+}
+
+glaQueryRRMap.update(_glaQueryRRMapUpdate)
+
+
+# Update the map for CMC control attributes; since CMS Attributes and
+# CMC Controls both use 'attrType', one map is used for both
+
+_cmcControlAttributesMapUpdate = {
+ id_skd_glUseKEK: GLUseKEK(),
+ id_skd_glDelete: DeleteGL(),
+ id_skd_glAddMember: GLAddMember(),
+ id_skd_glDeleteMember: GLDeleteMember(),
+ id_skd_glRekey: GLRekey(),
+ id_skd_glAddOwner: GLOwnerAdministration(),
+ id_skd_glRemoveOwner: GLOwnerAdministration(),
+ id_skd_glKeyCompromise: GLKCompromise(),
+ id_skd_glkRefresh: GLKRefresh(),
+ id_skd_glaQueryRequest: GLAQueryRequest(),
+ id_skd_glaQueryResponse: GLAQueryResponse(),
+ id_skd_glProvideCert: GLManageCert(),
+ id_skd_glManageCert: GLManageCert(),
+ id_skd_glKey: GLKey(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmcControlAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5280.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5280.py
new file mode 100644
index 0000000000..ed5d28f751
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5280.py
@@ -0,0 +1,1658 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Updated by Russ Housley for ORAddress Extension Attribute opentype support.
+# Updated by Russ Housley for AlgorithmIdentifier opentype support.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate and Certificate
+# Revocation List (CRL) Profile
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5280.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+ub_e163_4_sub_address_length = univ.Integer(40)
+
+ub_e163_4_number_length = univ.Integer(15)
+
+unformatted_postal_address = univ.Integer(16)
+
+
+class TerminalType(univ.Integer):
+ pass
+
+
+TerminalType.namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+)
+
+
+class Extension(univ.Sequence):
+ pass
+
+
+Extension.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('extnValue', univ.OctetString())
+)
+
+
+class Extensions(univ.SequenceOf):
+ pass
+
+
+Extensions.componentType = Extension()
+Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+physical_delivery_personal_name = univ.Integer(13)
+
+ub_unformatted_address_length = univ.Integer(180)
+
+ub_pds_parameter_length = univ.Integer(30)
+
+ub_pds_physical_address_lines = univ.Integer(6)
+
+
+class UnformattedPostalAddress(univ.Set):
+ pass
+
+
+UnformattedPostalAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+)
+
+ub_organization_name = univ.Integer(64)
+
+
+class X520OrganizationName(univ.Choice):
+ pass
+
+
+X520OrganizationName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+)
+
+ub_x121_address_length = univ.Integer(16)
+
+pds_name = univ.Integer(7)
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_kp = _buildOid(id_pkix, 3)
+
+ub_postal_code_length = univ.Integer(16)
+
+
+class PostalCode(univ.Choice):
+ pass
+
+
+PostalCode.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+)
+
+ub_generation_qualifier_length = univ.Integer(3)
+
+unique_postal_name = univ.Integer(20)
+
+
+class DomainComponent(char.IA5String):
+ pass
+
+
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+
+ub_match = univ.Integer(128)
+
+id_at = _buildOid(2, 5, 4)
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+id_at_organizationalUnitName = _buildOid(id_at, 11)
+
+terminal_type = univ.Integer(23)
+
+
+class PDSParameter(univ.Set):
+ pass
+
+
+PDSParameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+ub_surname_length = univ.Integer(40)
+
+id_ad = _buildOid(id_pkix, 48)
+
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+ub_domain_defined_attributes = univ.Integer(4)
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
+TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+extended_network_address = univ.Integer(22)
+
+ub_locality_name = univ.Integer(128)
+
+
+class X520LocalityName(univ.Choice):
+ pass
+
+
+X520LocalityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+)
+
+teletex_organization_name = univ.Integer(3)
+
+ub_given_name_length = univ.Integer(16)
+
+ub_initials_length = univ.Integer(5)
+
+
+class PersonalName(univ.Set):
+ pass
+
+
+PersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+ub_organizational_unit_name_length = univ.Integer(32)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ pass
+
+
+OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+id_at_generationQualifier = _buildOid(id_at, 44)
+
+
+class Version(univ.Integer):
+ pass
+
+
+Version.namedValues = namedval.NamedValues(
+ ('v1', 0),
+ ('v2', 1),
+ ('v3', 2)
+)
+
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+algorithmIdentifierMap = {}
+
+
+class AlgorithmIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any(),
+ openType=opentype.OpenType('algorithm', algorithmIdentifierMap)
+ )
+ )
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+certificateAttributesMap = {}
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType(
+ 'value', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap)
+ )
+ )
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ pass
+
+
+RelativeDistinguishedName.componentType = AttributeTypeAndValue()
+RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class RDNSequence(univ.SequenceOf):
+ pass
+
+
+RDNSequence.componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ pass
+
+
+Name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rdnSequence', RDNSequence())
+)
+
+
+class TBSCertList(univ.Sequence):
+ pass
+
+
+TBSCertList.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType(
+ 'revokedCertificates', univ.SequenceOf(
+ componentType=univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ )
+ )
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'crlExtensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class CertificateList(univ.Sequence):
+ pass
+
+
+CertificateList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+ub_extension_attributes = univ.Integer(256)
+
+certificateExtensionsMap = {
+}
+
+oraddressExtensionAttributeMap = {
+}
+
+
+class ExtensionAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'extension-attribute-type',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType(
+ 'extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)),
+ openType=opentype.OpenType('extension-attribute-type', oraddressExtensionAttributeMap))
+ )
+
+id_qt = _buildOid(id_pkix, 2)
+
+id_qt_cps = _buildOid(id_qt, 1)
+
+id_at_stateOrProvinceName = _buildOid(id_at, 8)
+
+id_at_title = _buildOid(id_at, 12)
+
+id_at_serialNumber = _buildOid(id_at, 5)
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+poste_restante_address = univ.Integer(19)
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Validity(univ.Sequence):
+ pass
+
+
+Validity.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+)
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ pass
+
+
+SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+)
+
+
+class TBSCertificate(univ.Sequence):
+ pass
+
+
+TBSCertificate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value="v1")),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+physical_delivery_office_name = univ.Integer(10)
+
+ub_name = univ.Integer(32768)
+
+
+class X520name(univ.Choice):
+ pass
+
+
+X520name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+)
+
+id_at_dnQualifier = _buildOid(id_at, 46)
+
+ub_serial_number = univ.Integer(64)
+
+ub_pseudonym = univ.Integer(128)
+
+pkcs_9 = _buildOid(1, 2, 840, 113549, 1, 9)
+
+
+class X121Address(char.NumericString):
+ pass
+
+
+X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+ub_integer_options = univ.Integer(256)
+
+id_at_commonName = _buildOid(id_at, 3)
+
+ub_organization_name_length = univ.Integer(64)
+
+id_ad_ocsp = _buildOid(id_ad, 1)
+
+ub_country_name_numeric_length = univ.Integer(3)
+
+ub_country_name_alpha_length = univ.Integer(2)
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ pass
+
+
+PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+id_emailAddress = _buildOid(pkcs_9, 1)
+
+common_name = univ.Integer(1)
+
+
+class X520Pseudonym(univ.Choice):
+ pass
+
+
+X520Pseudonym.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
+)
+
+ub_domain_name_length = univ.Integer(16)
+
+
+class AdministrationDomainName(univ.Choice):
+ pass
+
+
+AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+AdministrationDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+)
+
+
+class PresentationAddress(univ.Sequence):
+ pass
+
+
+PresentationAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ pass
+
+
+ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'e163-4-address', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_terminal_id_length = univ.Integer(24)
+
+
+class TerminalIdentifier(char.PrintableString):
+ pass
+
+
+TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+id_ad_caIssuers = _buildOid(id_ad, 2)
+
+id_at_countryName = _buildOid(id_at, 6)
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+postal_code = univ.Integer(9)
+
+id_at_givenName = _buildOid(id_at, 42)
+
+ub_title = univ.Integer(64)
+
+
+class ExtensionAttributes(univ.SetOf):
+ pass
+
+
+ExtensionAttributes.componentType = ExtensionAttribute()
+ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+ub_emailaddress_length = univ.Integer(255)
+
+id_ad_caRepository = _buildOid(id_ad, 5)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+ub_organizational_unit_name = univ.Integer(64)
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ pass
+
+
+X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class X520Title(univ.Choice):
+ pass
+
+
+X520Title.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+)
+
+id_at_localityName = _buildOid(id_at, 7)
+
+id_at_initials = _buildOid(id_at, 43)
+
+ub_state_name = univ.Integer(128)
+
+
+class X520StateOrProvinceName(univ.Choice):
+ pass
+
+
+X520StateOrProvinceName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+)
+
+physical_delivery_organization_name = univ.Integer(14)
+
+id_at_surname = _buildOid(id_at, 4)
+
+
+class X520countryName(char.PrintableString):
+ pass
+
+
+X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+
+physical_delivery_office_number = univ.Integer(11)
+
+id_qt_unotice = _buildOid(id_qt, 2)
+
+
+class X520SerialNumber(char.PrintableString):
+ pass
+
+
+X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+ub_common_name = univ.Integer(64)
+
+id_pe = _buildOid(id_pkix, 1)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+class EmailAddress(char.IA5String):
+ pass
+
+
+EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+id_at_organizationName = _buildOid(id_at, 10)
+
+post_office_box_address = univ.Integer(18)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
+BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+id_at_pseudonym = _buildOid(id_at, 65)
+
+id_domainComponent = _buildOid(0, 9, 2342, 19200300, 100, 1, 25)
+
+
+class X520CommonName(univ.Choice):
+ pass
+
+
+X520CommonName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+)
+
+extension_OR_address_components = univ.Integer(12)
+
+ub_organizational_units = univ.Integer(4)
+
+teletex_personal_name = univ.Integer(4)
+
+ub_numeric_user_id_length = univ.Integer(32)
+
+ub_common_name_length = univ.Integer(64)
+
+
+class TeletexCommonName(char.TeletexString):
+ pass
+
+
+TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class NumericUserIdentifier(char.NumericString):
+ pass
+
+
+NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class CountryName(univ.Choice):
+ pass
+
+
+CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+CountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+
+class OrganizationName(char.PrintableString):
+ pass
+
+
+OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+OrganizationalUnitNames.componentType = OrganizationalUnitName()
+OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class PrivateDomainName(univ.Choice):
+ pass
+
+
+PrivateDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+)
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ pass
+
+
+BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+)
+
+
+class ORAddress(univ.Sequence):
+ pass
+
+
+ORAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+)
+
+
+class DistinguishedName(RDNSequence):
+ pass
+
+
+id_ad_timeStamping = _buildOid(id_ad, 3)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+physical_delivery_country_name = univ.Integer(8)
+
+ub_pds_name_length = univ.Integer(16)
+
+
+class PDSName(char.PrintableString):
+ pass
+
+
+PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+class TeletexPersonalName(univ.Set):
+ pass
+
+
+TeletexPersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+street_address = univ.Integer(17)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+local_postal_attributes = univ.Integer(21)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+teletex_common_name = univ.Integer(2)
+
+
+class CommonName(char.PrintableString):
+ pass
+
+
+CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class Certificate(univ.Sequence):
+ pass
+
+
+Certificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+id_at_name = _buildOid(id_at, 41)
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
+TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+id_ce = _buildOid(2, 5, 29)
+
+id_ce_issuerAltName = _buildOid(id_ce, 18)
+
+
+class SkipCerts(univ.Integer):
+ pass
+
+
+SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class CRLReason(univ.Enumerated):
+ pass
+
+
+CRLReason.namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+)
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ pass
+
+
+PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+anotherNameMap = {
+
+}
+
+
+class AnotherName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType(
+ 'value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('type-id', anotherNameMap)
+ )
+ )
+
+
+class EDIPartyName(univ.Sequence):
+ pass
+
+
+EDIPartyName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('partyName', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class GeneralName(univ.Choice):
+ pass
+
+
+GeneralName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress',
+ univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class BaseDistance(univ.Integer):
+ pass
+
+
+BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ pass
+
+
+GeneralSubtree.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class GeneralNames(univ.SequenceOf):
+ pass
+
+
+GeneralNames.componentType = GeneralName()
+GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class DistributionPointName(univ.Choice):
+ pass
+
+
+DistributionPointName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName',
+ GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ReasonFlags(univ.BitString):
+ pass
+
+
+ReasonFlags.namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('privilegeWithdrawn', 7),
+ ('aACompromise', 8)
+)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ pass
+
+
+IssuingDistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
+)
+
+id_ce_certificatePolicies = _buildOid(id_ce, 32)
+
+id_kp_emailProtection = _buildOid(id_kp, 4)
+
+
+class AccessDescription(univ.Sequence):
+ pass
+
+
+AccessDescription.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+)
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+id_ce_cRLDistributionPoints = _buildOid(id_ce, 31)
+
+holdInstruction = _buildOid(2, 2, 840, 10040, 2)
+
+id_holdinstruction_callissuer = _buildOid(holdInstruction, 2)
+
+id_ce_subjectDirectoryAttributes = _buildOid(id_ce, 9)
+
+id_ce_issuingDistributionPoint = _buildOid(id_ce, 28)
+
+
+class DistributionPoint(univ.Sequence):
+ pass
+
+
+DistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class CRLDistributionPoints(univ.SequenceOf):
+ pass
+
+
+CRLDistributionPoints.componentType = DistributionPoint()
+CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ pass
+
+
+GeneralSubtrees.componentType = GeneralSubtree()
+GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ pass
+
+
+NameConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ pass
+
+
+SubjectDirectoryAttributes.componentType = Attribute()
+SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_kp_OCSPSigning = _buildOid(id_kp, 9)
+
+id_kp_timeStamping = _buildOid(id_kp, 8)
+
+
+class DisplayText(univ.Choice):
+ pass
+
+
+DisplayText.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+)
+
+
+class NoticeReference(univ.Sequence):
+ pass
+
+
+NoticeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+)
+
+
+class UserNotice(univ.Sequence):
+ pass
+
+
+UserNotice.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+)
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+policyQualifierInfoMap = {
+
+}
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType(
+ 'qualifier', univ.Any(),
+ openType=opentype.OpenType('policyQualifierId', policyQualifierInfoMap)
+ )
+ )
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyInformation(univ.Sequence):
+ pass
+
+
+PolicyInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
+)
+
+
+class CertificatePolicies(univ.SequenceOf):
+ pass
+
+
+CertificatePolicies.componentType = PolicyInformation()
+CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+id_ce_basicConstraints = _buildOid(id_ce, 19)
+
+id_ce_authorityKeyIdentifier = _buildOid(id_ce, 35)
+
+id_kp_codeSigning = _buildOid(id_kp, 3)
+
+
+class BasicConstraints(univ.Sequence):
+ pass
+
+
+BasicConstraints.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+id_ce_certificateIssuer = _buildOid(id_ce, 29)
+
+
+class PolicyMappings(univ.SequenceOf):
+ pass
+
+
+PolicyMappings.componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+ )
+)
+
+PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class InhibitAnyPolicy(SkipCerts):
+ pass
+
+
+anyPolicy = _buildOid(id_ce_certificatePolicies, 0)
+
+
+class CRLNumber(univ.Integer):
+ pass
+
+
+CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+id_ce_nameConstraints = _buildOid(id_ce, 30)
+
+id_kp_serverAuth = _buildOid(id_kp, 1)
+
+id_ce_freshestCRL = _buildOid(id_ce, 46)
+
+id_ce_cRLReasons = _buildOid(id_ce, 21)
+
+id_ce_extKeyUsage = _buildOid(id_ce, 37)
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ pass
+
+
+AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class FreshestCRL(CRLDistributionPoints):
+ pass
+
+
+id_ce_policyConstraints = _buildOid(id_ce, 36)
+
+id_pe_authorityInfoAccess = _buildOid(id_pe, 1)
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+AuthorityInfoAccessSyntax.componentType = AccessDescription()
+AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_holdinstruction_none = _buildOid(holdInstruction, 1)
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+id_pe_subjectInfoAccess = _buildOid(id_pe, 11)
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_ce_subjectAltName = _buildOid(id_ce, 17)
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ pass
+
+
+ExtKeyUsageSyntax.componentType = KeyPurposeId()
+ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+id_ce_deltaCRLIndicator = _buildOid(id_ce, 27)
+
+id_ce_keyUsage = _buildOid(id_ce, 15)
+
+id_ce_holdInstructionCode = _buildOid(id_ce, 23)
+
+
+class SubjectInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+SubjectInfoAccessSyntax.componentType = AccessDescription()
+SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+class KeyUsage(univ.BitString):
+ pass
+
+
+KeyUsage.namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+)
+
+id_ce_invalidityDate = _buildOid(id_ce, 24)
+
+id_ce_policyMappings = _buildOid(id_ce, 33)
+
+anyExtendedKeyUsage = _buildOid(id_ce_extKeyUsage, 0)
+
+id_ce_privateKeyUsagePeriod = _buildOid(id_ce, 16)
+
+id_ce_cRLNumber = _buildOid(id_ce, 20)
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+id_holdinstruction_reject = _buildOid(holdInstruction, 3)
+
+
+class PolicyConstraints(univ.Sequence):
+ pass
+
+
+PolicyConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_kp_clientAuth = _buildOid(id_kp, 2)
+
+id_ce_subjectKeyIdentifier = _buildOid(id_ce, 14)
+
+id_ce_inhibitAnyPolicy = _buildOid(id_ce, 54)
+
+# map of ORAddress ExtensionAttribute type to ExtensionAttribute value
+
+_oraddressExtensionAttributeMapUpdate = {
+ common_name: CommonName(),
+ teletex_common_name: TeletexCommonName(),
+ teletex_organization_name: TeletexOrganizationName(),
+ teletex_personal_name: TeletexPersonalName(),
+ teletex_organizational_unit_names: TeletexOrganizationalUnitNames(),
+ pds_name: PDSName(),
+ physical_delivery_country_name: PhysicalDeliveryCountryName(),
+ postal_code: PostalCode(),
+ physical_delivery_office_name: PhysicalDeliveryOfficeName(),
+ physical_delivery_office_number: PhysicalDeliveryOfficeNumber(),
+ extension_OR_address_components: ExtensionORAddressComponents(),
+ physical_delivery_personal_name: PhysicalDeliveryPersonalName(),
+ physical_delivery_organization_name: PhysicalDeliveryOrganizationName(),
+ extension_physical_delivery_address_components: ExtensionPhysicalDeliveryAddressComponents(),
+ unformatted_postal_address: UnformattedPostalAddress(),
+ street_address: StreetAddress(),
+ post_office_box_address: PostOfficeBoxAddress(),
+ poste_restante_address: PosteRestanteAddress(),
+ unique_postal_name: UniquePostalName(),
+ local_postal_attributes: LocalPostalAttributes(),
+ extended_network_address: ExtendedNetworkAddress(),
+ terminal_type: TerminalType(),
+ teletex_domain_defined_attributes: TeletexDomainDefinedAttributes(),
+}
+
+oraddressExtensionAttributeMap.update(_oraddressExtensionAttributeMapUpdate)
+
+
+# map of AttributeType -> AttributeValue
+
+_certificateAttributesMapUpdate = {
+ id_at_name: X520name(),
+ id_at_surname: X520name(),
+ id_at_givenName: X520name(),
+ id_at_initials: X520name(),
+ id_at_generationQualifier: X520name(),
+ id_at_commonName: X520CommonName(),
+ id_at_localityName: X520LocalityName(),
+ id_at_stateOrProvinceName: X520StateOrProvinceName(),
+ id_at_organizationName: X520OrganizationName(),
+ id_at_organizationalUnitName: X520OrganizationalUnitName(),
+ id_at_title: X520Title(),
+ id_at_dnQualifier: X520dnQualifier(),
+ id_at_countryName: X520countryName(),
+ id_at_serialNumber: X520SerialNumber(),
+ id_at_pseudonym: X520Pseudonym(),
+ id_domainComponent: DomainComponent(),
+ id_emailAddress: EmailAddress(),
+}
+
+certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# map of Certificate Extension OIDs to Extensions
+
+_certificateExtensionsMap = {
+ id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
+ id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
+ id_ce_keyUsage: KeyUsage(),
+ id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
+ id_ce_certificatePolicies: CertificatePolicies(),
+ id_ce_policyMappings: PolicyMappings(),
+ id_ce_subjectAltName: SubjectAltName(),
+ id_ce_issuerAltName: IssuerAltName(),
+ id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
+ id_ce_basicConstraints: BasicConstraints(),
+ id_ce_nameConstraints: NameConstraints(),
+ id_ce_policyConstraints: PolicyConstraints(),
+ id_ce_extKeyUsage: ExtKeyUsageSyntax(),
+ id_ce_cRLDistributionPoints: CRLDistributionPoints(),
+ id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
+ id_ce_cRLNumber: univ.Integer(),
+ id_ce_deltaCRLIndicator: BaseCRLNumber(),
+ id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
+ id_ce_cRLReasons: CRLReason(),
+ id_ce_holdInstructionCode: univ.ObjectIdentifier(),
+ id_ce_invalidityDate: useful.GeneralizedTime(),
+ id_ce_certificateIssuer: GeneralNames(),
+}
+
+certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5480.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5480.py
new file mode 100644
index 0000000000..84c0c11b88
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5480.py
@@ -0,0 +1,190 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Cryptography Subject Public Key Information
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5480.txt
+
+
+# What can be imported from rfc4055.py ?
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+
+
+# These structures are the same as RFC 3279.
+
+DHPublicKey = rfc3279.DHPublicKey
+
+DSAPublicKey = rfc3279.DSAPublicKey
+
+ValidationParms = rfc3279.ValidationParms
+
+DomainParameters = rfc3279.DomainParameters
+
+ECDSA_Sig_Value = rfc3279.ECDSA_Sig_Value
+
+ECPoint = rfc3279.ECPoint
+
+KEA_Parms_Id = rfc3279.KEA_Parms_Id
+
+RSAPublicKey = rfc3279.RSAPublicKey
+
+
+# RFC 5480 changed the names of these structures from RFC 3279.
+
+DSS_Parms = rfc3279.Dss_Parms
+
+DSA_Sig_Value = rfc3279.Dss_Sig_Value
+
+
+# RFC 3279 defines a more complex alternative for ECParameters.
+# RFC 5480 narrows the definition to a single CHOICE: namedCurve.
+
+class ECParameters(univ.Choice):
+ pass
+
+ECParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('namedCurve', univ.ObjectIdentifier())
+)
+
+
+# OIDs for Message Digest Algorithms
+
+id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2')
+
+id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5')
+
+id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26')
+
+id_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.4')
+
+id_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.1')
+
+id_sha384 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2')
+
+id_sha512 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.3')
+
+
+# OID for RSA PK Algorithm and Key
+
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+
+
+# OID for DSA PK Algorithm, Key, and Parameters
+
+id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
+
+
+# OID for Diffie-Hellman PK Algorithm, Key, and Parameters
+
+dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
+
+# OID for KEA PK Algorithm and Parameters
+
+id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22')
+
+
+# OIDs for Elliptic Curve Algorithm ID, Key, and Parameters
+# Note that ECDSA keys always use this OID
+
+id_ecPublicKey = univ.ObjectIdentifier('1.2.840.10045.2.1')
+
+id_ecDH = univ.ObjectIdentifier('1.3.132.1.12')
+
+id_ecMQV = univ.ObjectIdentifier('1.3.132.1.13')
+
+
+# OIDs for RSA Signature Algorithms
+
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+
+
+# OIDs for DSA Signature Algorithms
+
+id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
+
+id_dsa_with_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.1')
+
+id_dsa_with_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.2')
+
+
+# OIDs for ECDSA Signature Algorithms
+
+ecdsa_with_SHA1 = univ.ObjectIdentifier('1.2.840.10045.4.1')
+
+ecdsa_with_SHA224 = univ.ObjectIdentifier('1.2.840.10045.4.3.1')
+
+ecdsa_with_SHA256 = univ.ObjectIdentifier('1.2.840.10045.4.3.2')
+
+ecdsa_with_SHA384 = univ.ObjectIdentifier('1.2.840.10045.4.3.3')
+
+ecdsa_with_SHA512 = univ.ObjectIdentifier('1.2.840.10045.4.3.4')
+
+
+# OIDs for Named Elliptic Curves
+
+secp192r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.1')
+
+sect163k1 = univ.ObjectIdentifier('1.3.132.0.1')
+
+sect163r2 = univ.ObjectIdentifier('1.3.132.0.15')
+
+secp224r1 = univ.ObjectIdentifier('1.3.132.0.33')
+
+sect233k1 = univ.ObjectIdentifier('1.3.132.0.26')
+
+sect233r1 = univ.ObjectIdentifier('1.3.132.0.27')
+
+secp256r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.7')
+
+sect283k1 = univ.ObjectIdentifier('1.3.132.0.16')
+
+sect283r1 = univ.ObjectIdentifier('1.3.132.0.17')
+
+secp384r1 = univ.ObjectIdentifier('1.3.132.0.34')
+
+sect409k1 = univ.ObjectIdentifier('1.3.132.0.36')
+
+sect409r1 = univ.ObjectIdentifier('1.3.132.0.37')
+
+secp521r1 = univ.ObjectIdentifier('1.3.132.0.35')
+
+sect571k1 = univ.ObjectIdentifier('1.3.132.0.38')
+
+sect571r1 = univ.ObjectIdentifier('1.3.132.0.39')
+
+
+# Map of Algorithm Identifier OIDs to Parameters
+# The algorithm is not included if the parameters MUST be absent
+
+_algorithmIdentifierMapUpdate = {
+ rsaEncryption: univ.Null(),
+ md2WithRSAEncryption: univ.Null(),
+ md5WithRSAEncryption: univ.Null(),
+ sha1WithRSAEncryption: univ.Null(),
+ id_dsa: DSS_Parms(),
+ dhpublicnumber: DomainParameters(),
+ id_keyExchangeAlgorithm: KEA_Parms_Id(),
+ id_ecPublicKey: ECParameters(),
+ id_ecDH: ECParameters(),
+ id_ecMQV: ECParameters(),
+}
+
+
+# Add these Algorithm Identifier map entries to the ones in rfc5280.py
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5636.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5636.py
new file mode 100644
index 0000000000..f87bc4ec82
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5636.py
@@ -0,0 +1,113 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Traceable Anonymous Certificate
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5480.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+EncapsulatedContentInfo = rfc5652.EncapsulatedContentInfo
+
+id_data = rfc5652.id_data
+
+
+# Object Identifiers
+
+id_KISA = univ.ObjectIdentifier((1, 2, 410, 200004,))
+
+
+id_npki = id_KISA + (10,)
+
+
+id_attribute = id_npki + (1,)
+
+
+id_kisa_tac = id_attribute + (1,)
+
+
+id_kisa_tac_token = id_kisa_tac + (1,)
+
+
+id_kisa_tac_tokenandblindbash = id_kisa_tac + (2,)
+
+
+id_kisa_tac_tokenandpartially = id_kisa_tac + (3,)
+
+
+# Structures for Traceable Anonymous Certificate (TAC)
+
+class UserKey(univ.OctetString):
+ pass
+
+
+class Timeout(useful.GeneralizedTime):
+ pass
+
+
+class BlinedCertificateHash(univ.OctetString):
+ pass
+
+
+class PartiallySignedCertificateHash(univ.OctetString):
+ pass
+
+
+class Token(ContentInfo):
+ pass
+
+
+class TokenandBlindHash(ContentInfo):
+ pass
+
+
+class TokenandPartiallySignedCertificateHash(ContentInfo):
+ pass
+
+
+# Added to the module in RFC 5636 for the CMS Content Type Map
+
+class TACToken(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userKey', UserKey()),
+ namedtype.NamedType('timeout', Timeout())
+ )
+
+
+class TACTokenandBlindHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('token', Token()),
+ namedtype.NamedType('blinded', BlinedCertificateHash())
+ )
+
+
+class TACTokenandPartiallySignedCertificateHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('token', Token()),
+ namedtype.NamedType('partially', PartiallySignedCertificateHash())
+ )
+
+
+# Add to the CMS Content Type Map in rfc5752.py
+
+_cmsContentTypesMapUpdate = {
+ id_kisa_tac_token: TACToken(),
+ id_kisa_tac_tokenandblindbash: TACTokenandBlindHash(),
+ id_kisa_tac_tokenandpartially: TACTokenandPartiallySignedCertificateHash(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5639.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5639.py
new file mode 100644
index 0000000000..d48d30044b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5639.py
@@ -0,0 +1,49 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Cryptography Brainpool Standard Curves
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5639.txt
+
+
+from pyasn1.type import univ
+
+
+ecStdCurvesAndGeneration = univ.ObjectIdentifier((1, 3, 36, 3, 3, 2, 8,))
+
+ellipticCurve = ecStdCurvesAndGeneration + (1,)
+
+versionOne = ellipticCurve + (1,)
+
+brainpoolP160r1 = versionOne + (1,)
+
+brainpoolP160t1 = versionOne + (2,)
+
+brainpoolP192r1 = versionOne + (3,)
+
+brainpoolP192t1 = versionOne + (4,)
+
+brainpoolP224r1 = versionOne + (5,)
+
+brainpoolP224t1 = versionOne + (6,)
+
+brainpoolP256r1 = versionOne + (7,)
+
+brainpoolP256t1 = versionOne + (8,)
+
+brainpoolP320r1 = versionOne + (9,)
+
+brainpoolP320t1 = versionOne + (10,)
+
+brainpoolP384r1 = versionOne + (11,)
+
+brainpoolP384t1 = versionOne + (12,)
+
+brainpoolP512r1 = versionOne + (13,)
+
+brainpoolP512t1 = versionOne + (14,)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5649.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5649.py
new file mode 100644
index 0000000000..84809eeb18
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5649.py
@@ -0,0 +1,33 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# AES Key Wrap with Padding
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5649.txt
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5')
+
+id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25')
+
+id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45')
+
+
+id_aes128_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.8')
+
+id_aes192_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.28')
+
+id_aes256_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.48')
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5652.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5652.py
new file mode 100644
index 0000000000..1e958293df
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5652.py
@@ -0,0 +1,761 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Modified by Russ Housley to add support for opentypes.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc5652.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3281
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+cmsContentTypesMap = { }
+
+cmsAttributesMap = { }
+
+otherKeyAttributesMap = { }
+
+otherCertFormatMap = { }
+
+otherRevInfoFormatMap = { }
+
+otherRecipientInfoMap = { }
+
+
+class AttCertVersionV1(univ.Integer):
+ pass
+
+
+AttCertVersionV1.namedValues = namedval.NamedValues(
+ ('v1', 0)
+)
+
+
+class AttributeCertificateInfoV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
+ namedtype.NamedType(
+ 'subject', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subjectName', rfc5280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('issuer', rfc5280.GeneralNames()),
+ namedtype.NamedType('signature', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber()),
+ namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc5280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc5280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc5280.Extensions())
+)
+
+
+class AttributeCertificateV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateV1.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
+ namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', cmsAttributesMap)
+ )
+)
+
+
+class SignedAttributes(univ.SetOf):
+ pass
+
+
+SignedAttributes.componentType = Attribute()
+SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class AttributeCertificateV2(rfc3281.AttributeCertificate):
+ pass
+
+
+class OtherKeyAttribute(univ.Sequence):
+ pass
+
+
+OtherKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('keyAttr', univ.Any(),
+ openType=opentype.OpenType('keyAttrId', otherKeyAttributesMap)
+ )
+)
+
+
+class UnauthAttributes(univ.SetOf):
+ pass
+
+
+UnauthAttributes.componentType = Attribute()
+UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
+
+
+class SignatureValue(univ.OctetString):
+ pass
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ pass
+
+
+IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc5280.Name()),
+ namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber())
+)
+
+
+class SubjectKeyIdentifier(univ.OctetString):
+ pass
+
+
+class RecipientKeyIdentifier(univ.Sequence):
+ pass
+
+
+RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyAgreeRecipientIdentifier(univ.Choice):
+ pass
+
+
+KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class RecipientEncryptedKey(univ.Sequence):
+ pass
+
+
+RecipientEncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientEncryptedKeys(univ.SequenceOf):
+ pass
+
+
+RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
+
+
+class MessageAuthenticationCode(univ.OctetString):
+ pass
+
+
+class CMSVersion(univ.Integer):
+ pass
+
+
+CMSVersion.namedValues = namedval.NamedValues(
+ ('v0', 0),
+ ('v1', 1),
+ ('v2', 2),
+ ('v3', 3),
+ ('v4', 4),
+ ('v5', 5)
+)
+
+
+class OtherCertificateFormat(univ.Sequence):
+ pass
+
+
+OtherCertificateFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherCert', univ.Any(),
+ openType=opentype.OpenType('otherCertFormat', otherCertFormatMap)
+ )
+)
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ pass
+
+
+ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('attributes', UnauthAttributes())
+)
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ pass
+
+
+ExtendedCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+)
+
+
+class CertificateChoices(univ.Choice):
+ pass
+
+
+CertificateChoices.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('other', OtherCertificateFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class CertificateSet(univ.SetOf):
+ pass
+
+
+CertificateSet.componentType = CertificateChoices()
+
+
+class OtherRevocationInfoFormat(univ.Sequence):
+ pass
+
+
+OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherRevInfo', univ.Any(),
+ openType=opentype.OpenType('otherRevInfoFormat', otherRevInfoFormatMap)
+ )
+)
+
+
+class RevocationInfoChoice(univ.Choice):
+ pass
+
+
+RevocationInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crl', rfc5280.CertificateList()),
+ namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RevocationInfoChoices(univ.SetOf):
+ pass
+
+
+RevocationInfoChoices.componentType = RevocationInfoChoice()
+
+
+class OriginatorInfo(univ.Sequence):
+ pass
+
+
+OriginatorInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('certs', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContentInfo(univ.Sequence):
+ pass
+
+
+EncryptedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnprotectedAttributes(univ.SetOf):
+ pass
+
+
+UnprotectedAttributes.componentType = Attribute()
+UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KEKIdentifier(univ.Sequence):
+ pass
+
+
+KEKIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyIdentifier', univ.OctetString()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KEKRecipientInfo(univ.Sequence):
+ pass
+
+
+KEKRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('kekid', KEKIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class KeyDerivationAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class PasswordRecipientInfo(univ.Sequence):
+ pass
+
+
+PasswordRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientIdentifier(univ.Choice):
+ pass
+
+
+RecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyTransRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('rid', RecipientIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class UserKeyingMaterial(univ.OctetString):
+ pass
+
+
+class OriginatorPublicKey(univ.Sequence):
+ pass
+
+
+OriginatorPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('publicKey', univ.BitString())
+)
+
+
+class OriginatorIdentifierOrKey(univ.Choice):
+ pass
+
+
+OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class KeyAgreeRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
+)
+
+
+class OtherRecipientInfo(univ.Sequence):
+ pass
+
+
+OtherRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oriType', univ.ObjectIdentifier()),
+ namedtype.NamedType('oriValue', univ.Any(),
+ openType=opentype.OpenType('oriType', otherRecipientInfoMap)
+ )
+)
+
+
+class RecipientInfo(univ.Choice):
+ pass
+
+
+RecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ktri', KeyTransRecipientInfo()),
+ namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ori', OtherRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class RecipientInfos(univ.SetOf):
+ pass
+
+
+RecipientInfos.componentType = RecipientInfo()
+RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class EnvelopedData(univ.Sequence):
+ pass
+
+
+EnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class DigestAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
+
+id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
+
+
+class EncryptedData(univ.Sequence):
+ pass
+
+
+EncryptedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
+
+id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
+
+
+class MessageAuthenticationCodeAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class UnsignedAttributes(univ.SetOf):
+ pass
+
+
+UnsignedAttributes.componentType = Attribute()
+UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SignerIdentifier(univ.Choice):
+ pass
+
+
+SignerIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class SignerInfo(univ.Sequence):
+ pass
+
+
+SignerInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('sid', SignerIdentifier()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', SignatureValue()),
+ namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignerInfos(univ.SetOf):
+ pass
+
+
+SignerInfos.componentType = SignerInfo()
+
+
+class Countersignature(SignerInfo):
+ pass
+
+
+class ContentInfo(univ.Sequence):
+ pass
+
+
+ContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('contentType', cmsContentTypesMap)
+ )
+)
+
+
+class EncapsulatedContentInfo(univ.Sequence):
+ pass
+
+
+EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eContentType', ContentType()),
+ namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
+
+id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
+
+
+class MessageDigest(univ.OctetString):
+ pass
+
+
+class AuthAttributes(univ.SetOf):
+ pass
+
+
+AuthAttributes.componentType = Attribute()
+AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class AuthenticatedData(univ.Sequence):
+ pass
+
+
+AuthenticatedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
+ namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('mac', MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ pass
+
+
+ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class DigestedData(univ.Sequence):
+ pass
+
+
+DigestedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.NamedType('digest', Digest())
+)
+
+id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ pass
+
+
+DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
+
+
+class SignedData(univ.Sequence):
+ pass
+
+
+SignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
+
+
+class SigningTime(Time):
+ pass
+
+
+id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
+
+
+# CMS Content Type Map
+
+_cmsContentTypesMapUpdate = {
+ id_ct_contentInfo: ContentInfo(),
+ id_data: univ.OctetString(),
+ id_signedData: SignedData(),
+ id_envelopedData: EnvelopedData(),
+ id_digestedData: DigestedData(),
+ id_encryptedData: EncryptedData(),
+ id_ct_authData: AuthenticatedData(),
+}
+
+cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ id_contentType: ContentType(),
+ id_messageDigest: MessageDigest(),
+ id_signingTime: SigningTime(),
+ id_countersignature: Countersignature(),
+}
+
+cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5697.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5697.py
new file mode 100644
index 0000000000..8c5a9d3ecf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5697.py
@@ -0,0 +1,70 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Other Certificates Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5697.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4055
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+GeneralNames = rfc5280.GeneralNames
+
+
+# Imports from RFC 4055
+
+id_sha1 = rfc4055.id_sha1
+
+
+# Imports from RFC 5055
+# These are defined here because a module for RFC 5055 does not exist yet
+
+class SCVPIssuerSerial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+sha1_alg_id = AlgorithmIdentifier()
+sha1_alg_id['algorithm'] = id_sha1
+
+
+class SCVPCertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', univ.OctetString()),
+ namedtype.NamedType('issuerSerial', SCVPIssuerSerial()),
+ namedtype.DefaultedNamedType('hashAlgorithm', sha1_alg_id)
+ )
+
+
+# Other Certificates Extension
+
+id_pe_otherCerts = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 1, 19,))
+
+class OtherCertificates(univ.SequenceOf):
+ componentType = SCVPCertID()
+
+
+# Update of certificate extension map in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_otherCerts: OtherCertificates(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5751.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5751.py
new file mode 100644
index 0000000000..7e200012c6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5751.py
@@ -0,0 +1,124 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# S/MIME Version 3.2 Message Specification
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5751.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8018
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 5652 and RFC 8018
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+RecipientKeyIdentifier = rfc5652.RecipientKeyIdentifier
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+rc2CBC = rfc8018.rc2CBC
+
+
+# S/MIME Capabilities Attribute
+
+smimeCapabilities = univ.ObjectIdentifier('1.2.840.113549.1.9.15')
+
+
+smimeCapabilityMap = { }
+
+
+class SMIMECapability(univ.Sequence):
+ pass
+
+SMIMECapability.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('capabilityID', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any(),
+ openType=opentype.OpenType('capabilityID', smimeCapabilityMap))
+)
+
+
+class SMIMECapabilities(univ.SequenceOf):
+ pass
+
+SMIMECapabilities.componentType = SMIMECapability()
+
+
+class SMIMECapabilitiesParametersForRC2CBC(univ.Integer):
+ # which carries the RC2 Key Length (number of bits)
+ pass
+
+
+# S/MIME Encryption Key Preference Attribute
+
+id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16')
+
+id_aa = _OID(id_smime, 2)
+
+id_aa_encrypKeyPref = _OID(id_aa, 11)
+
+
+class SMIMEEncryptionKeyPreference(univ.Choice):
+ pass
+
+SMIMEEncryptionKeyPreference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber',
+ IssuerAndSerialNumber().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receipentKeyId',
+ # Yes, 'receipentKeyId' is spelled incorrectly, but kept
+ # this way for alignment with the ASN.1 module in the RFC.
+ RecipientKeyIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('subjectAltKeyIdentifier',
+ SubjectKeyIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+# The Prefer Binary Inside SMIMECapabilities attribute
+
+id_cap = _OID(id_smime, 11)
+
+id_cap_preferBinaryInside = _OID(id_cap, 1)
+
+
+# CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ smimeCapabilities: SMIMECapabilities(),
+ id_aa_encrypKeyPref: SMIMEEncryptionKeyPreference(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# SMIMECapabilities Attribute Map
+#
+# Do not include OIDs in the dictionary when the parameters are absent.
+
+_smimeCapabilityMapUpdate = {
+ rc2CBC: SMIMECapabilitiesParametersForRC2CBC(),
+}
+
+smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5752.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5752.py
new file mode 100644
index 0000000000..1d0df8f459
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5752.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Multiple Signatures in Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5752.txt
+# https://www.rfc-editor.org/errata/eid4444
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5035
+from pyasn1_modules import rfc5652
+
+
+class SignAttrsHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algID', rfc5652.DigestAlgorithmIdentifier()),
+ namedtype.NamedType('hash', univ.OctetString())
+ )
+
+
+class MultipleSignatures(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyHashAlg', rfc5652.DigestAlgorithmIdentifier()),
+ namedtype.NamedType('signAlg', rfc5652.SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signAttrsHash', SignAttrsHash()),
+ namedtype.OptionalNamedType('cert', rfc5035.ESSCertIDv2())
+ )
+
+
+id_aa_multipleSignatures = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.51')
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_multipleSignatures: MultipleSignatures(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5753.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5753.py
new file mode 100644
index 0000000000..94c37c2ab1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5753.py
@@ -0,0 +1,157 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Cryptography (ECC) Algorithms in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5753.txt
+#
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc8018
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 5652
+
+OriginatorPublicKey = rfc5652.OriginatorPublicKey
+
+UserKeyingMaterial = rfc5652.UserKeyingMaterial
+
+
+# Imports from RFC 5480
+
+ECDSA_Sig_Value = rfc5480.ECDSA_Sig_Value
+
+ECParameters = rfc5480.ECParameters
+
+ECPoint = rfc5480.ECPoint
+
+id_ecPublicKey = rfc5480.id_ecPublicKey
+
+
+# Imports from RFC 8018
+
+id_hmacWithSHA224 = rfc8018.id_hmacWithSHA224
+
+id_hmacWithSHA256 = rfc8018.id_hmacWithSHA256
+
+id_hmacWithSHA384 = rfc8018.id_hmacWithSHA384
+
+id_hmacWithSHA512 = rfc8018.id_hmacWithSHA512
+
+
+# Object Identifier arcs
+
+x9_63_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0')
+
+secg_scheme = univ.ObjectIdentifier('1.3.132.1')
+
+
+# Object Identifiers for the algorithms
+
+dhSinglePass_cofactorDH_sha1kdf_scheme = x9_63_scheme + (3, )
+
+dhSinglePass_cofactorDH_sha224kdf_scheme = secg_scheme + (14, 0, )
+
+dhSinglePass_cofactorDH_sha256kdf_scheme = secg_scheme + (14, 1, )
+
+dhSinglePass_cofactorDH_sha384kdf_scheme = secg_scheme + (14, 2, )
+
+dhSinglePass_cofactorDH_sha512kdf_scheme = secg_scheme + (14, 3, )
+
+dhSinglePass_stdDH_sha1kdf_scheme = x9_63_scheme + (2, )
+
+dhSinglePass_stdDH_sha224kdf_scheme = secg_scheme + (11, 0, )
+
+dhSinglePass_stdDH_sha256kdf_scheme = secg_scheme + (11, 1, )
+
+dhSinglePass_stdDH_sha384kdf_scheme = secg_scheme + (11, 2, )
+
+dhSinglePass_stdDH_sha512kdf_scheme = secg_scheme + (11, 3, )
+
+mqvSinglePass_sha1kdf_scheme = x9_63_scheme + (16, )
+
+mqvSinglePass_sha224kdf_scheme = secg_scheme + (15, 0, )
+
+mqvSinglePass_sha256kdf_scheme = secg_scheme + (15, 1, )
+
+mqvSinglePass_sha384kdf_scheme = secg_scheme + (15, 2, )
+
+mqvSinglePass_sha512kdf_scheme = secg_scheme + (15, 3, )
+
+
+# Structures for parameters and key derivation
+
+class IV(univ.OctetString):
+ # Exactly 8 octets
+ pass
+
+
+class CBCParameter(IV):
+ pass
+
+
+class KeyWrapAlgorithm(AlgorithmIdentifier):
+ pass
+
+
+class ECC_CMS_SharedInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyInfo', KeyWrapAlgorithm()),
+ namedtype.OptionalNamedType('entityUInfo',
+ univ.OctetString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('suppPubInfo',
+ univ.OctetString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class MQVuserKeyingMaterial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ephemeralPublicKey', OriginatorPublicKey()),
+ namedtype.OptionalNamedType('addedukm',
+ UserKeyingMaterial().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py and
+# Update the SMIMECapabilities Attribute Map in rfc5751.py
+
+_algorithmIdentifierMapUpdate = {
+ dhSinglePass_stdDH_sha1kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha224kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha256kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha384kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha512kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha1kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha224kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha256kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha384kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha512kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha1kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha224kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha256kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha384kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha512kdf_scheme: KeyWrapAlgorithm(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+rfc5751.smimeCapabilityMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5755.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5755.py
new file mode 100644
index 0000000000..14f56fc600
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5755.py
@@ -0,0 +1,398 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5755.txt
+# https://www.rfc-editor.org/rfc/rfc5912.txt (see Section 13)
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+# Map for Security Category type to value
+
+securityCategoryMap = { }
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax
+
+AuthorityKeyIdentifier = rfc5280.AuthorityKeyIdentifier
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+CRLDistributionPoints = rfc5280.CRLDistributionPoints
+
+Extensions = rfc5280.Extensions
+
+Extension = rfc5280.Extension
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+UniqueIdentifier = rfc5280.UniqueIdentifier
+
+
+# Object Identifier arcs
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+id_pe = id_pkix + (1, )
+
+id_kp = id_pkix + (3, )
+
+id_aca = id_pkix + (10, )
+
+id_ad = id_pkix + (48, )
+
+id_at = univ.ObjectIdentifier((2, 5, 4, ))
+
+id_ce = univ.ObjectIdentifier((2, 5, 29, ))
+
+
+# Attribute Certificate
+
+class AttCertVersion(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('v2', 1)
+ )
+
+
+class IssuerSerial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serial', CertificateSerialNumber()),
+ namedtype.OptionalNamedType('issuerUID', UniqueIdentifier())
+ )
+
+
+class ObjectDigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestedObjectType',
+ univ.Enumerated(namedValues=namedval.NamedValues(
+ ('publicKey', 0),
+ ('publicKeyCert', 1),
+ ('otherObjectTypes', 2)))),
+ namedtype.OptionalNamedType('otherObjectTypeID',
+ univ.ObjectIdentifier()),
+ namedtype.NamedType('digestAlgorithm',
+ AlgorithmIdentifier()),
+ namedtype.NamedType('objectDigest',
+ univ.BitString())
+ )
+
+
+class Holder(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('baseCertificateID',
+ IssuerSerial().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('entityName',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('objectDigestInfo',
+ ObjectDigestInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class V2Form(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerName',
+ GeneralNames()),
+ namedtype.OptionalNamedType('baseCertificateID',
+ IssuerSerial().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('objectDigestInfo',
+ ObjectDigestInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class AttCertIssuer(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('v1Form', GeneralNames()),
+ namedtype.NamedType('v2Form', V2Form().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class AttCertValidityPeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
+ namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
+ )
+
+
+class AttributeCertificateInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ AttCertVersion()),
+ namedtype.NamedType('holder',
+ Holder()),
+ namedtype.NamedType('issuer',
+ AttCertIssuer()),
+ namedtype.NamedType('signature',
+ AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber',
+ CertificateSerialNumber()),
+ namedtype.NamedType('attrCertValidityPeriod',
+ AttCertValidityPeriod()),
+ namedtype.NamedType('attributes',
+ univ.SequenceOf(componentType=Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID',
+ UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions',
+ Extensions())
+ )
+
+
+class AttributeCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acinfo', AttributeCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+ )
+
+
+# Attribute Certificate Extensions
+
+id_pe_ac_auditIdentity = id_pe + (4, )
+
+id_ce_noRevAvail = id_ce + (56, )
+
+id_ce_targetInformation = id_ce + (55, )
+
+
+class TargetCert(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetCertificate', IssuerSerial()),
+ namedtype.OptionalNamedType('targetName', GeneralName()),
+ namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
+ )
+
+
+class Target(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetName',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('targetGroup',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('targetCert',
+ TargetCert().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class Targets(univ.SequenceOf):
+ componentType = Target()
+
+
+id_pe_ac_proxying = id_pe + (10, )
+
+
+class ProxyInfo(univ.SequenceOf):
+ componentType = Targets()
+
+
+id_pe_aaControls = id_pe + (6, )
+
+
+class AttrSpec(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class AAControls(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.OptionalNamedType('permittedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('permitUnSpecified',
+ univ.Boolean().subtype(value=1))
+ )
+
+
+# Attribute Certificate Attributes
+
+id_aca_authenticationInfo = id_aca + (1, )
+
+
+id_aca_accessIdentity = id_aca + (2, )
+
+
+class SvceAuthInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('service', GeneralName()),
+ namedtype.NamedType('ident', GeneralName()),
+ namedtype.OptionalNamedType('authInfo', univ.OctetString())
+ )
+
+
+id_aca_chargingIdentity = id_aca + (3, )
+
+
+id_aca_group = id_aca + (4, )
+
+
+class IetfAttrSyntax(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('policyAuthority',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('values', univ.SequenceOf(
+ componentType=univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('octets', univ.OctetString()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('string', char.UTF8String())
+ ))
+ ))
+ )
+
+
+id_at_role = id_at + (72,)
+
+
+class RoleSyntax(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('roleAuthority',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('roleName',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ClassList(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('topSecret', 5)
+ )
+
+
+class SecurityCategory(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type',
+ univ.ObjectIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value',
+ univ.Any().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)),
+ openType=opentype.OpenType('type', securityCategoryMap))
+ )
+
+
+id_at_clearance = univ.ObjectIdentifier((2, 5, 4, 55, ))
+
+
+class Clearance(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId',
+ univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(value='unclassified')),
+ namedtype.OptionalNamedType('securityCategories',
+ univ.SetOf(componentType=SecurityCategory()))
+ )
+
+
+id_at_clearance_rfc3281 = univ.ObjectIdentifier((2, 5, 1, 5, 55, ))
+
+
+class Clearance_rfc3281(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId',
+ univ.ObjectIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(
+ value='unclassified')),
+ namedtype.OptionalNamedType('securityCategories',
+ univ.SetOf(componentType=SecurityCategory()).subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+id_aca_encAttrs = id_aca + (6, )
+
+
+class ACClearAttrs(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acIssuer', GeneralName()),
+ namedtype.NamedType('acSerial', univ.Integer()),
+ namedtype.NamedType('attrs', univ.SequenceOf(componentType=Attribute()))
+ )
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ac_auditIdentity: univ.OctetString(),
+ id_ce_noRevAvail: univ.Null(),
+ id_ce_targetInformation: Targets(),
+ id_pe_ac_proxying: ProxyInfo(),
+ id_pe_aaControls: AAControls(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_authenticationInfo: SvceAuthInfo(),
+ id_aca_accessIdentity: SvceAuthInfo(),
+ id_aca_chargingIdentity: IetfAttrSyntax(),
+ id_aca_group: IetfAttrSyntax(),
+ id_at_role: RoleSyntax(),
+ id_at_clearance: Clearance(),
+ id_at_clearance_rfc3281: Clearance_rfc3281(),
+ id_aca_encAttrs: ContentInfo(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5913.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5913.py
new file mode 100644
index 0000000000..0bd065330d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5913.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authority Clearance Constraints Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5913.txt
+# https://www.rfc-editor.org/errata/eid5890
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5755
+
+MAX = float('inf')
+
+
+# Authority Clearance Constraints Certificate Extension
+
+id_pe_clearanceConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.21')
+
+id_pe_authorityClearanceConstraints = id_pe_clearanceConstraints
+
+
+class AuthorityClearanceConstraints(univ.SequenceOf):
+ componentType = rfc5755.Clearance()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_clearanceConstraints: AuthorityClearanceConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5914.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5914.py
new file mode 100644
index 0000000000..d125ea2a65
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5914.py
@@ -0,0 +1,119 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Trust Anchor Format
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5914.txt
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+MAX = float('inf')
+
+Certificate = rfc5280.Certificate
+
+Name = rfc5280.Name
+
+Extensions = rfc5280.Extensions
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+TBSCertificate = rfc5280.TBSCertificate
+
+CertificatePolicies = rfc5280.CertificatePolicies
+
+KeyIdentifier = rfc5280.KeyIdentifier
+
+NameConstraints = rfc5280.NameConstraints
+
+
+class CertPolicyFlags(univ.BitString):
+ pass
+
+CertPolicyFlags.namedValues = namedval.NamedValues(
+ ('inhibitPolicyMapping', 0),
+ ('requireExplicitPolicy', 1),
+ ('inhibitAnyPolicy', 2)
+)
+
+
+class CertPathControls(univ.Sequence):
+ pass
+
+CertPathControls.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taName', Name()),
+ namedtype.OptionalNamedType('certificate', Certificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('policySet', CertificatePolicies().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('policyFlags', CertPolicyFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('nameConstr', NameConstraints().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('pathLenConstraint', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class TrustAnchorTitle(char.UTF8String):
+ pass
+
+TrustAnchorTitle.subtypeSpec = constraint.ValueSizeConstraint(1, 64)
+
+
+class TrustAnchorInfoVersion(univ.Integer):
+ pass
+
+TrustAnchorInfoVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+class TrustAnchorInfo(univ.Sequence):
+ pass
+
+TrustAnchorInfo.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TrustAnchorInfoVersion().subtype(value='v1')),
+ namedtype.NamedType('pubKey', SubjectPublicKeyInfo()),
+ namedtype.NamedType('keyId', KeyIdentifier()),
+ namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()),
+ namedtype.OptionalNamedType('certPath', CertPathControls()),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('taTitleLangTag', char.UTF8String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class TrustAnchorChoice(univ.Choice):
+ pass
+
+TrustAnchorChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('tbsCert', TBSCertificate().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('taInfo', TrustAnchorInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+id_ct_trustAnchorList = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.34')
+
+class TrustAnchorList(univ.SequenceOf):
+ pass
+
+TrustAnchorList.componentType = TrustAnchorChoice()
+TrustAnchorList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5915.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5915.py
new file mode 100644
index 0000000000..82ff4a338b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5915.py
@@ -0,0 +1,32 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Private Key
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5915.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5480
+
+
+class ECPrivateKey(univ.Sequence):
+ pass
+
+ECPrivateKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(
+ namedValues=namedval.NamedValues(('ecPrivkeyVer1', 1)))),
+ namedtype.NamedType('privateKey', univ.OctetString()),
+ namedtype.OptionalNamedType('parameters', rfc5480.ECParameters().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('publicKey', univ.BitString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5916.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5916.py
new file mode 100644
index 0000000000..ac23c86b79
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5916.py
@@ -0,0 +1,35 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Device Owner Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5916.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Device Owner Attribute
+
+id_deviceOwner = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 69))
+
+at_deviceOwner = rfc5280.Attribute()
+at_deviceOwner['type'] = id_deviceOwner
+at_deviceOwner['values'][0] = univ.ObjectIdentifier()
+
+
+# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py.
+
+_certificateAttributesMapUpdate = {
+ id_deviceOwner: univ.ObjectIdentifier(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5917.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5917.py
new file mode 100644
index 0000000000..ed9af987db
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5917.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Clearance Sponsor Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5917.txt
+# https://www.rfc-editor.org/errata/eid4558
+# https://www.rfc-editor.org/errata/eid5883
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# DirectoryString is the same as RFC 5280, except for two things:
+# 1. the length is limited to 64;
+# 2. only the 'utf8String' choice remains because the ASN.1
+# specification says: ( WITH COMPONENTS { utf8String PRESENT } )
+
+class DirectoryString(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 64))),
+ )
+
+
+# Clearance Sponsor Attribute
+
+id_clearanceSponsor = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 68))
+
+ub_clearance_sponsor = univ.Integer(64)
+
+
+at_clearanceSponsor = rfc5280.Attribute()
+at_clearanceSponsor['type'] = id_clearanceSponsor
+at_clearanceSponsor['values'][0] = DirectoryString()
+
+
+# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py.
+
+_certificateAttributesMapUpdate = {
+ id_clearanceSponsor: DirectoryString(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5924.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5924.py
new file mode 100644
index 0000000000..4358e4f529
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5924.py
@@ -0,0 +1,19 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Extended Key Usage (EKU) for Session Initiation Protocol (SIP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5924.txt
+#
+
+from pyasn1.type import univ
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_kp_sipDomain = id_kp + (20, )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5934.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5934.py
new file mode 100644
index 0000000000..e3ad247aa0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5934.py
@@ -0,0 +1,786 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Trust Anchor Format
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5934.txt
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5914
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 2985
+
+SingleAttribute = rfc2985.SingleAttribute
+
+
+# Imports from RFC5914
+
+CertPathControls = rfc5914.CertPathControls
+
+TrustAnchorChoice = rfc5914.TrustAnchorChoice
+
+TrustAnchorTitle = rfc5914.TrustAnchorTitle
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+AnotherName = rfc5280.AnotherName
+
+Attribute = rfc5280.Attribute
+
+Certificate = rfc5280.Certificate
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+Extension = rfc5280.Extension
+
+Extensions = rfc5280.Extensions
+
+KeyIdentifier = rfc5280.KeyIdentifier
+
+Name = rfc5280.Name
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+TBSCertificate = rfc5280.TBSCertificate
+
+Validity = rfc5280.Validity
+
+
+# Object Identifier Arc for TAMP Message Content Types
+
+id_tamp = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.77')
+
+
+# TAMP Status Query Message
+
+id_ct_TAMP_statusQuery = _OID(id_tamp, 1)
+
+
+class TAMPVersion(univ.Integer):
+ pass
+
+TAMPVersion.namedValues = namedval.NamedValues(
+ ('v1', 1),
+ ('v2', 2)
+)
+
+
+class TerseOrVerbose(univ.Enumerated):
+ pass
+
+TerseOrVerbose.namedValues = namedval.NamedValues(
+ ('terse', 1),
+ ('verbose', 2)
+)
+
+
+class HardwareSerialEntry(univ.Choice):
+ pass
+
+HardwareSerialEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('all', univ.Null()),
+ namedtype.NamedType('single', univ.OctetString()),
+ namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('low', univ.OctetString()),
+ namedtype.NamedType('high', univ.OctetString())
+ ))
+ )
+)
+
+
+class HardwareModules(univ.Sequence):
+ pass
+
+HardwareModules.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialEntries', univ.SequenceOf(
+ componentType=HardwareSerialEntry()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class HardwareModuleIdentifierList(univ.SequenceOf):
+ pass
+
+HardwareModuleIdentifierList.componentType = HardwareModules()
+HardwareModuleIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class Community(univ.ObjectIdentifier):
+ pass
+
+
+class CommunityIdentifierList(univ.SequenceOf):
+ pass
+
+CommunityIdentifierList.componentType = Community()
+CommunityIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(0, MAX)
+
+
+class TargetIdentifier(univ.Choice):
+ pass
+
+TargetIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwModules', HardwareModuleIdentifierList().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('communities', CommunityIdentifierList().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('allModules', univ.Null().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('uri', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('otherName', AnotherName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+
+class SeqNumber(univ.Integer):
+ pass
+
+SeqNumber.subtypeSpec = constraint.ValueRangeConstraint(0, 9223372036854775807)
+
+
+class TAMPMsgRef(univ.Sequence):
+ pass
+
+TAMPMsgRef.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('target', TargetIdentifier()),
+ namedtype.NamedType('seqNum', SeqNumber())
+)
+
+
+class TAMPStatusQuery(univ.Sequence):
+ pass
+
+TAMPStatusQuery.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse', TerseOrVerbose().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('query', TAMPMsgRef())
+)
+
+
+tamp_status_query = rfc5652.ContentInfo()
+tamp_status_query['contentType'] = id_ct_TAMP_statusQuery
+tamp_status_query['content'] = TAMPStatusQuery()
+
+
+# TAMP Status Response Message
+
+id_ct_TAMP_statusResponse = _OID(id_tamp, 2)
+
+
+class KeyIdentifiers(univ.SequenceOf):
+ pass
+
+KeyIdentifiers.componentType = KeyIdentifier()
+KeyIdentifiers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TrustAnchorChoiceList(univ.SequenceOf):
+ pass
+
+TrustAnchorChoiceList.componentType = TrustAnchorChoice()
+TrustAnchorChoiceList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TAMPSequenceNumber(univ.Sequence):
+ pass
+
+TAMPSequenceNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyId', KeyIdentifier()),
+ namedtype.NamedType('seqNumber', SeqNumber())
+)
+
+
+class TAMPSequenceNumbers(univ.SequenceOf):
+ pass
+
+TAMPSequenceNumbers.componentType = TAMPSequenceNumber()
+TAMPSequenceNumbers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TerseStatusResponse(univ.Sequence):
+ pass
+
+TerseStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taKeyIds', KeyIdentifiers()),
+ namedtype.OptionalNamedType('communities', CommunityIdentifierList())
+)
+
+
+class VerboseStatusResponse(univ.Sequence):
+ pass
+
+VerboseStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('continPubKeyDecryptAlg',
+ AlgorithmIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('communities',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class StatusResponse(univ.Choice):
+ pass
+
+StatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseResponse', TerseStatusResponse().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('verboseResponse', VerboseStatusResponse().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPStatusResponse(univ.Sequence):
+ pass
+
+TAMPStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('query', TAMPMsgRef()),
+ namedtype.NamedType('response', StatusResponse()),
+ namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
+)
+
+
+tamp_status_response = rfc5652.ContentInfo()
+tamp_status_response['contentType'] = id_ct_TAMP_statusResponse
+tamp_status_response['content'] = TAMPStatusResponse()
+
+
+# Trust Anchor Update Message
+
+id_ct_TAMP_update = _OID(id_tamp, 3)
+
+
+class TBSCertificateChangeInfo(univ.Sequence):
+ pass
+
+TBSCertificateChangeInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.OptionalNamedType('signature', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('issuer', Name().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('validity', Validity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('subject', Name().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+
+class TrustAnchorChangeInfo(univ.Sequence):
+ pass
+
+TrustAnchorChangeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubKey', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('keyId', KeyIdentifier()),
+ namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()),
+ namedtype.OptionalNamedType('certPath', CertPathControls()),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class TrustAnchorChangeInfoChoice(univ.Choice):
+ pass
+
+TrustAnchorChangeInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertChange', TBSCertificateChangeInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('taChange', TrustAnchorChangeInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TrustAnchorUpdate(univ.Choice):
+ pass
+
+TrustAnchorUpdate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('add', TrustAnchorChoice().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('remove', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('change', TrustAnchorChangeInfoChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class TAMPUpdate(univ.Sequence):
+ pass
+
+TAMPUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('updates',
+ univ.SequenceOf(componentType=TrustAnchorUpdate()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+tamp_update = rfc5652.ContentInfo()
+tamp_update['contentType'] = id_ct_TAMP_update
+tamp_update['content'] = TAMPUpdate()
+
+
+# Trust Anchor Update Confirm Message
+
+id_ct_TAMP_updateConfirm = _OID(id_tamp, 4)
+
+
+class StatusCode(univ.Enumerated):
+ pass
+
+StatusCode.namedValues = namedval.NamedValues(
+ ('success', 0),
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('unsupportedParameters', 15),
+ ('signatureFailure', 16),
+ ('insufficientMemory', 17),
+ ('unsupportedTAMPMsgType', 18),
+ ('apexTAMPAnchor', 19),
+ ('improperTAAddition', 20),
+ ('seqNumFailure', 21),
+ ('contingencyPublicKeyDecrypt', 22),
+ ('incorrectTarget', 23),
+ ('communityUpdateFailed', 24),
+ ('trustAnchorNotFound', 25),
+ ('unsupportedTAAlgorithm', 26),
+ ('unsupportedTAKeySize', 27),
+ ('unsupportedContinPubKeyDecryptAlg', 28),
+ ('missingSignature', 29),
+ ('resourcesBusy', 30),
+ ('versionNumberMismatch', 31),
+ ('missingPolicySet', 32),
+ ('revokedCertificate', 33),
+ ('unsupportedTrustAnchorFormat', 34),
+ ('improperTAChange', 35),
+ ('malformed', 36),
+ ('cmsError', 37),
+ ('unsupportedTargetIdentifier', 38),
+ ('other', 127)
+)
+
+
+class StatusCodeList(univ.SequenceOf):
+ pass
+
+StatusCodeList.componentType = StatusCode()
+StatusCodeList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TerseUpdateConfirm(StatusCodeList):
+ pass
+
+
+class VerboseUpdateConfirm(univ.Sequence):
+ pass
+
+VerboseUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCodeList()),
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('tampSeqNumbers', TAMPSequenceNumbers()),
+ namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
+)
+
+
+class UpdateConfirm(univ.Choice):
+ pass
+
+UpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseConfirm', TerseUpdateConfirm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseConfirm', VerboseUpdateConfirm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('update', TAMPMsgRef()),
+ namedtype.NamedType('confirm', UpdateConfirm())
+)
+
+
+tamp_update_confirm = rfc5652.ContentInfo()
+tamp_update_confirm['contentType'] = id_ct_TAMP_updateConfirm
+tamp_update_confirm['content'] = TAMPUpdateConfirm()
+
+
+# Apex Trust Anchor Update Message
+
+id_ct_TAMP_apexUpdate = _OID(id_tamp, 5)
+
+
+class TAMPApexUpdate(univ.Sequence):
+ pass
+
+TAMPApexUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('clearTrustAnchors', univ.Boolean()),
+ namedtype.NamedType('clearCommunities', univ.Boolean()),
+ namedtype.OptionalNamedType('seqNumber', SeqNumber()),
+ namedtype.NamedType('apexTA', TrustAnchorChoice())
+)
+
+
+tamp_apex_update = rfc5652.ContentInfo()
+tamp_apex_update['contentType'] = id_ct_TAMP_apexUpdate
+tamp_apex_update['content'] = TAMPApexUpdate()
+
+
+# Apex Trust Anchor Update Confirm Message
+
+id_ct_TAMP_apexUpdateConfirm = _OID(id_tamp, 6)
+
+
+class TerseApexUpdateConfirm(StatusCode):
+ pass
+
+
+class VerboseApexUpdateConfirm(univ.Sequence):
+ pass
+
+VerboseApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('communities',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+
+class ApexUpdateConfirm(univ.Choice):
+ pass
+
+ApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseApexConfirm',
+ TerseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseApexConfirm',
+ VerboseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPApexUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('apexReplace', TAMPMsgRef()),
+ namedtype.NamedType('apexConfirm', ApexUpdateConfirm())
+)
+
+
+tamp_apex_update_confirm = rfc5652.ContentInfo()
+tamp_apex_update_confirm['contentType'] = id_ct_TAMP_apexUpdateConfirm
+tamp_apex_update_confirm['content'] = TAMPApexUpdateConfirm()
+
+
+# Community Update Message
+
+id_ct_TAMP_communityUpdate = _OID(id_tamp, 7)
+
+
+class CommunityUpdates(univ.Sequence):
+ pass
+
+CommunityUpdates.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('remove',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('add',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 2)))
+)
+
+
+class TAMPCommunityUpdate(univ.Sequence):
+ pass
+
+TAMPCommunityUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('updates', CommunityUpdates())
+)
+
+
+tamp_community_update = rfc5652.ContentInfo()
+tamp_community_update['contentType'] = id_ct_TAMP_communityUpdate
+tamp_community_update['content'] = TAMPCommunityUpdate()
+
+
+# Community Update Confirm Message
+
+id_ct_TAMP_communityUpdateConfirm = _OID(id_tamp, 8)
+
+
+class TerseCommunityConfirm(StatusCode):
+ pass
+
+
+class VerboseCommunityConfirm(univ.Sequence):
+ pass
+
+VerboseCommunityConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.OptionalNamedType('communities', CommunityIdentifierList())
+)
+
+
+class CommunityConfirm(univ.Choice):
+ pass
+
+CommunityConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseCommConfirm',
+ TerseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseCommConfirm',
+ VerboseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPCommunityUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPCommunityUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('update', TAMPMsgRef()),
+ namedtype.NamedType('commConfirm', CommunityConfirm())
+)
+
+
+tamp_community_update_confirm = rfc5652.ContentInfo()
+tamp_community_update_confirm['contentType'] = id_ct_TAMP_communityUpdateConfirm
+tamp_community_update_confirm['content'] = TAMPCommunityUpdateConfirm()
+
+
+# Sequence Number Adjust Message
+
+id_ct_TAMP_seqNumAdjust = _OID(id_tamp, 10)
+
+
+
+class SequenceNumberAdjust(univ.Sequence):
+ pass
+
+SequenceNumberAdjust.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('msgRef', TAMPMsgRef())
+)
+
+
+tamp_sequence_number_adjust = rfc5652.ContentInfo()
+tamp_sequence_number_adjust['contentType'] = id_ct_TAMP_seqNumAdjust
+tamp_sequence_number_adjust['content'] = SequenceNumberAdjust()
+
+
+# Sequence Number Adjust Confirm Message
+
+id_ct_TAMP_seqNumAdjustConfirm = _OID(id_tamp, 11)
+
+
+class SequenceNumberAdjustConfirm(univ.Sequence):
+ pass
+
+SequenceNumberAdjustConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('adjust', TAMPMsgRef()),
+ namedtype.NamedType('status', StatusCode())
+)
+
+
+tamp_sequence_number_adjust_confirm = rfc5652.ContentInfo()
+tamp_sequence_number_adjust_confirm['contentType'] = id_ct_TAMP_seqNumAdjustConfirm
+tamp_sequence_number_adjust_confirm['content'] = SequenceNumberAdjustConfirm()
+
+
+# TAMP Error Message
+
+id_ct_TAMP_error = _OID(id_tamp, 9)
+
+
+class TAMPError(univ.Sequence):
+ pass
+
+TAMPError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('msgType', univ.ObjectIdentifier()),
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.OptionalNamedType('msgRef', TAMPMsgRef())
+)
+
+
+tamp_error = rfc5652.ContentInfo()
+tamp_error['contentType'] = id_ct_TAMP_error
+tamp_error['content'] = TAMPError()
+
+
+# Object Identifier Arc for Attributes
+
+id_attributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.5')
+
+
+# contingency-public-key-decrypt-key unsigned attribute
+
+id_aa_TAMP_contingencyPublicKeyDecryptKey = _OID(id_attributes, 63)
+
+
+class PlaintextSymmetricKey(univ.OctetString):
+ pass
+
+
+contingency_public_key_decrypt_key = Attribute()
+contingency_public_key_decrypt_key['type'] = id_aa_TAMP_contingencyPublicKeyDecryptKey
+contingency_public_key_decrypt_key['values'][0] = PlaintextSymmetricKey()
+
+
+# id-pe-wrappedApexContinKey extension
+
+id_pe_wrappedApexContinKey =univ.ObjectIdentifier('1.3.6.1.5.5.7.1.20')
+
+
+class ApexContingencyKey(univ.Sequence):
+ pass
+
+ApexContingencyKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('wrapAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('wrappedContinPubKey', univ.OctetString())
+)
+
+
+wrappedApexContinKey = Extension()
+wrappedApexContinKey['extnID'] = id_pe_wrappedApexContinKey
+wrappedApexContinKey['critical'] = 0
+wrappedApexContinKey['extnValue'] = univ.OctetString()
+
+
+# Add to the map of CMS Content Type OIDs to Content Types in
+# rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_TAMP_statusQuery: TAMPStatusQuery(),
+ id_ct_TAMP_statusResponse: TAMPStatusResponse(),
+ id_ct_TAMP_update: TAMPUpdate(),
+ id_ct_TAMP_updateConfirm: TAMPUpdateConfirm(),
+ id_ct_TAMP_apexUpdate: TAMPApexUpdate(),
+ id_ct_TAMP_apexUpdateConfirm: TAMPApexUpdateConfirm(),
+ id_ct_TAMP_communityUpdate: TAMPCommunityUpdate(),
+ id_ct_TAMP_communityUpdateConfirm: TAMPCommunityUpdateConfirm(),
+ id_ct_TAMP_seqNumAdjust: SequenceNumberAdjust(),
+ id_ct_TAMP_seqNumAdjustConfirm: SequenceNumberAdjustConfirm(),
+ id_ct_TAMP_error: TAMPError(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# Add to the map of CMS Attribute OIDs to Attribute Values in
+# rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_TAMP_contingencyPublicKeyDecryptKey: PlaintextSymmetricKey(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Add to the map of Certificate Extension OIDs to Extensions in
+# rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wrappedApexContinKey: ApexContingencyKey(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5940.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5940.py
new file mode 100644
index 0000000000..e105923358
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5940.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional CMS Revocation Information Choices
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5940.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5652
+
+
+# RevocationInfoChoice for OCSP response:
+# The OID is included in otherRevInfoFormat, and
+# signed OCSPResponse is included in otherRevInfo
+
+id_ri_ocsp_response = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.2')
+
+OCSPResponse = rfc2560.OCSPResponse
+
+
+# RevocationInfoChoice for SCVP request/response:
+# The OID is included in otherRevInfoFormat, and
+# SCVPReqRes is included in otherRevInfo
+
+id_ri_scvp = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.4')
+
+ContentInfo = rfc5652.ContentInfo
+
+class SCVPReqRes(univ.Sequence):
+ pass
+
+SCVPReqRes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('request',
+ ContentInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('response', ContentInfo())
+)
+
+
+# Map of Revocation Info Format OIDs to Revocation Info Format
+# is added to the ones that are in rfc5652.py
+
+_otherRevInfoFormatMapUpdate = {
+ id_ri_ocsp_response: OCSPResponse(),
+ id_ri_scvp: SCVPReqRes(),
+}
+
+rfc5652.otherRevInfoFormatMap.update(_otherRevInfoFormatMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5958.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5958.py
new file mode 100644
index 0000000000..1aaa9286ad
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5958.py
@@ -0,0 +1,98 @@
+#
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Asymmetric Key Packages, which is essentially version 2 of
+# the PrivateKeyInfo structure in PKCS#8 in RFC 5208
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5958.txt
+
+from pyasn1.type import univ, constraint, namedtype, namedval, tag
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+MAX = float('inf')
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class PrivateKeyAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedData(univ.OctetString):
+ pass
+
+
+class EncryptedPrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedData', EncryptedData())
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0), ('v2', 1))
+
+
+class PrivateKey(univ.OctetString):
+ pass
+
+
+class Attributes(univ.SetOf):
+ componentType = rfc5652.Attribute()
+
+
+class PublicKey(univ.BitString):
+ pass
+
+
+# OneAsymmetricKey is essentially version 2 of PrivateKeyInfo.
+# If publicKey is present, then the version must be v2;
+# otherwise, the version should be v1.
+
+class OneAsymmetricKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('privateKeyAlgorithm', PrivateKeyAlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', PrivateKey()),
+ namedtype.OptionalNamedType('attributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('publicKey', PublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class PrivateKeyInfo(OneAsymmetricKey):
+ pass
+
+
+# The CMS AsymmetricKeyPackage Content Type
+
+id_ct_KP_aKeyPackage = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.5')
+
+class AsymmetricKeyPackage(univ.SequenceOf):
+ pass
+
+AsymmetricKeyPackage.componentType = OneAsymmetricKey()
+AsymmetricKeyPackage.sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_aKeyPackage: AsymmetricKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5990.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5990.py
new file mode 100644
index 0000000000..281316fb81
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc5990.py
@@ -0,0 +1,237 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Use of the RSA-KEM Key Transport Algorithm in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5990.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Useful types and definitions
+
+class NullParms(univ.Null):
+ pass
+
+
+# Object identifier arcs
+
+is18033_2 = _OID(1, 0, 18033, 2)
+
+nistAlgorithm = _OID(2, 16, 840, 1, 101, 3, 4)
+
+pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
+
+x9_44 = _OID(1, 3, 133, 16, 840, 9, 44)
+
+x9_44_components = _OID(x9_44, 1)
+
+
+# Types for algorithm identifiers
+
+class Camellia_KeyWrappingScheme(AlgorithmIdentifier):
+ pass
+
+class DataEncapsulationMechanism(AlgorithmIdentifier):
+ pass
+
+class KDF2_HashFunction(AlgorithmIdentifier):
+ pass
+
+class KDF3_HashFunction(AlgorithmIdentifier):
+ pass
+
+class KeyDerivationFunction(AlgorithmIdentifier):
+ pass
+
+class KeyEncapsulationMechanism(AlgorithmIdentifier):
+ pass
+
+class X9_SymmetricKeyWrappingScheme(AlgorithmIdentifier):
+ pass
+
+
+# RSA-KEM Key Transport Algorithm
+
+id_rsa_kem = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 14)
+
+
+class GenericHybridParameters(univ.Sequence):
+ pass
+
+GenericHybridParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('kem', KeyEncapsulationMechanism()),
+ namedtype.NamedType('dem', DataEncapsulationMechanism())
+)
+
+
+rsa_kem = AlgorithmIdentifier()
+rsa_kem['algorithm'] = id_rsa_kem
+rsa_kem['parameters'] = GenericHybridParameters()
+
+
+# KEM-RSA Key Encapsulation Mechanism
+
+id_kem_rsa = _OID(is18033_2, 2, 4)
+
+
+class KeyLength(univ.Integer):
+ pass
+
+KeyLength.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
+
+
+class RsaKemParameters(univ.Sequence):
+ pass
+
+RsaKemParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunction', KeyDerivationFunction()),
+ namedtype.NamedType('keyLength', KeyLength())
+)
+
+
+kem_rsa = AlgorithmIdentifier()
+kem_rsa['algorithm'] = id_kem_rsa
+kem_rsa['parameters'] = RsaKemParameters()
+
+
+# Key Derivation Functions
+
+id_kdf_kdf2 = _OID(x9_44_components, 1)
+
+id_kdf_kdf3 = _OID(x9_44_components, 2)
+
+
+kdf2 = AlgorithmIdentifier()
+kdf2['algorithm'] = id_kdf_kdf2
+kdf2['parameters'] = KDF2_HashFunction()
+
+kdf3 = AlgorithmIdentifier()
+kdf3['algorithm'] = id_kdf_kdf3
+kdf3['parameters'] = KDF3_HashFunction()
+
+
+# Hash Functions
+
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+
+id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
+
+id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
+
+id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
+
+id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
+
+
+sha1 = AlgorithmIdentifier()
+sha1['algorithm'] = id_sha1
+sha1['parameters'] = univ.Null("")
+
+sha224 = AlgorithmIdentifier()
+sha224['algorithm'] = id_sha224
+sha224['parameters'] = univ.Null("")
+
+sha256 = AlgorithmIdentifier()
+sha256['algorithm'] = id_sha256
+sha256['parameters'] = univ.Null("")
+
+sha384 = AlgorithmIdentifier()
+sha384['algorithm'] = id_sha384
+sha384['parameters'] = univ.Null("")
+
+sha512 = AlgorithmIdentifier()
+sha512['algorithm'] = id_sha512
+sha512['parameters'] = univ.Null("")
+
+
+# Symmetric Key-Wrapping Schemes
+
+id_aes128_Wrap = _OID(nistAlgorithm, 1, 5)
+
+id_aes192_Wrap = _OID(nistAlgorithm, 1, 25)
+
+id_aes256_Wrap = _OID(nistAlgorithm, 1, 45)
+
+id_alg_CMS3DESwrap = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 6)
+
+id_camellia128_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 2)
+
+id_camellia192_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 3)
+
+id_camellia256_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 4)
+
+
+aes128_Wrap = AlgorithmIdentifier()
+aes128_Wrap['algorithm'] = id_aes128_Wrap
+# aes128_Wrap['parameters'] are absent
+
+aes192_Wrap = AlgorithmIdentifier()
+aes192_Wrap['algorithm'] = id_aes128_Wrap
+# aes192_Wrap['parameters'] are absent
+
+aes256_Wrap = AlgorithmIdentifier()
+aes256_Wrap['algorithm'] = id_sha256
+# aes256_Wrap['parameters'] are absent
+
+tdes_Wrap = AlgorithmIdentifier()
+tdes_Wrap['algorithm'] = id_alg_CMS3DESwrap
+tdes_Wrap['parameters'] = univ.Null("")
+
+camellia128_Wrap = AlgorithmIdentifier()
+camellia128_Wrap['algorithm'] = id_camellia128_Wrap
+# camellia128_Wrap['parameters'] are absent
+
+camellia192_Wrap = AlgorithmIdentifier()
+camellia192_Wrap['algorithm'] = id_camellia192_Wrap
+# camellia192_Wrap['parameters'] are absent
+
+camellia256_Wrap = AlgorithmIdentifier()
+camellia256_Wrap['algorithm'] = id_camellia256_Wrap
+# camellia256_Wrap['parameters'] are absent
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+# Note that the ones that must not have parameters are not added to the map.
+
+_algorithmIdentifierMapUpdate = {
+ id_rsa_kem: GenericHybridParameters(),
+ id_kem_rsa: RsaKemParameters(),
+ id_kdf_kdf2: KDF2_HashFunction(),
+ id_kdf_kdf3: KDF3_HashFunction(),
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_alg_CMS3DESwrap: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6010.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6010.py
new file mode 100644
index 0000000000..250e207ba4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6010.py
@@ -0,0 +1,88 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extension for CMS Content Constraints (CCC)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6010.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+AttributeType = rfc5280.AttributeType
+
+AttributeValue = rfc5280.AttributeValue
+
+
+id_ct_anyContentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.0')
+
+
+class AttrConstraint(univ.Sequence):
+ pass
+
+AttrConstraint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues', univ.SetOf(
+ componentType=AttributeValue()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class AttrConstraintList(univ.SequenceOf):
+ pass
+
+AttrConstraintList.componentType = AttrConstraint()
+AttrConstraintList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class ContentTypeGeneration(univ.Enumerated):
+ pass
+
+ContentTypeGeneration.namedValues = namedval.NamedValues(
+ ('canSource', 0),
+ ('cannotSource', 1)
+)
+
+
+class ContentTypeConstraint(univ.Sequence):
+ pass
+
+ContentTypeConstraint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('canSource', ContentTypeGeneration().subtype(value='canSource')),
+ namedtype.OptionalNamedType('attrConstraints', AttrConstraintList())
+)
+
+
+# CMS Content Constraints (CCC) Extension and Object Identifier
+
+id_pe_cmsContentConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.18')
+
+class CMSContentConstraints(univ.SequenceOf):
+ pass
+
+CMSContentConstraints.componentType = ContentTypeConstraint()
+CMSContentConstraints.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_cmsContentConstraints: CMSContentConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6019.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6019.py
new file mode 100644
index 0000000000..c6872c7669
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6019.py
@@ -0,0 +1,45 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# BinaryTime: An Alternate Format for Representing Date and Time
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6019.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# BinaryTime: Represent date and time as an integer
+
+class BinaryTime(univ.Integer):
+ pass
+
+BinaryTime.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+# CMS Attribute for representing signing time in BinaryTime
+
+id_aa_binarySigningTime = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.46')
+
+class BinarySigningTime(BinaryTime):
+ pass
+
+
+# Map of Attribute Type OIDs to Attributes ia added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_binarySigningTime: BinarySigningTime(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6031.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6031.py
new file mode 100644
index 0000000000..6e1bb2261d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6031.py
@@ -0,0 +1,469 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Symmetric Key Package Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6031.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6019
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+MAX = float('inf')
+
+id_pskc = univ.ObjectIdentifier('1.2.840.113549.1.9.16.12')
+
+
+# Symmetric Key Package Attributes
+
+id_pskc_manufacturer = _OID(id_pskc, 1)
+
+class at_pskc_manufacturer(char.UTF8String):
+ pass
+
+
+id_pskc_serialNo = _OID(id_pskc, 2)
+
+class at_pskc_serialNo(char.UTF8String):
+ pass
+
+
+id_pskc_model = _OID(id_pskc, 3)
+
+class at_pskc_model(char.UTF8String):
+ pass
+
+
+id_pskc_issueNo = _OID(id_pskc, 4)
+
+class at_pskc_issueNo(char.UTF8String):
+ pass
+
+
+id_pskc_deviceBinding = _OID(id_pskc, 5)
+
+class at_pskc_deviceBinding(char.UTF8String):
+ pass
+
+
+id_pskc_deviceStartDate = _OID(id_pskc, 6)
+
+class at_pskc_deviceStartDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_deviceExpiryDate = _OID(id_pskc, 7)
+
+class at_pskc_deviceExpiryDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_moduleId = _OID(id_pskc, 8)
+
+class at_pskc_moduleId(char.UTF8String):
+ pass
+
+
+id_pskc_deviceUserId = _OID(id_pskc, 26)
+
+class at_pskc_deviceUserId(char.UTF8String):
+ pass
+
+
+# Symmetric Key Attributes
+
+id_pskc_keyId = _OID(id_pskc, 9)
+
+class at_pskc_keyUserId(char.UTF8String):
+ pass
+
+
+id_pskc_algorithm = _OID(id_pskc, 10)
+
+class at_pskc_algorithm(char.UTF8String):
+ pass
+
+
+id_pskc_issuer = _OID(id_pskc, 11)
+
+class at_pskc_issuer(char.UTF8String):
+ pass
+
+
+id_pskc_keyProfileId = _OID(id_pskc, 12)
+
+class at_pskc_keyProfileId(char.UTF8String):
+ pass
+
+
+id_pskc_keyReference = _OID(id_pskc, 13)
+
+class at_pskc_keyReference(char.UTF8String):
+ pass
+
+
+id_pskc_friendlyName = _OID(id_pskc, 14)
+
+class FriendlyName(univ.Sequence):
+ pass
+
+FriendlyName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('friendlyName', char.UTF8String()),
+ namedtype.OptionalNamedType('friendlyNameLangTag', char.UTF8String())
+)
+
+class at_pskc_friendlyName(FriendlyName):
+ pass
+
+
+id_pskc_algorithmParameters = _OID(id_pskc, 15)
+
+class Encoding(char.UTF8String):
+ pass
+
+Encoding.namedValues = namedval.NamedValues(
+ ('dec', "DECIMAL"),
+ ('hex', "HEXADECIMAL"),
+ ('alpha', "ALPHANUMERIC"),
+ ('b64', "BASE64"),
+ ('bin', "BINARY")
+)
+
+Encoding.subtypeSpec = constraint.SingleValueConstraint(
+ "DECIMAL", "HEXADECIMAL", "ALPHANUMERIC", "BASE64", "BINARY" )
+
+class ChallengeFormat(univ.Sequence):
+ pass
+
+ChallengeFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encoding', Encoding()),
+ namedtype.DefaultedNamedType('checkDigit',
+ univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('min', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('max', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+class ResponseFormat(univ.Sequence):
+ pass
+
+ResponseFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encoding', Encoding()),
+ namedtype.NamedType('length', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.DefaultedNamedType('checkDigit',
+ univ.Boolean().subtype(value=0))
+)
+
+class PSKCAlgorithmParameters(univ.Choice):
+ pass
+
+PSKCAlgorithmParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('suite', char.UTF8String()),
+ namedtype.NamedType('challengeFormat', ChallengeFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('responseFormat', ResponseFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+class at_pskc_algorithmParameters(PSKCAlgorithmParameters):
+ pass
+
+
+id_pskc_counter = _OID(id_pskc, 16)
+
+class at_pskc_counter(univ.Integer):
+ pass
+
+at_pskc_counter.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_time = _OID(id_pskc, 17)
+
+class at_pskc_time(rfc6019.BinaryTime):
+ pass
+
+
+id_pskc_timeInterval = _OID(id_pskc, 18)
+
+class at_pskc_timeInterval(univ.Integer):
+ pass
+
+at_pskc_timeInterval.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_timeDrift = _OID(id_pskc, 19)
+
+class at_pskc_timeDrift(univ.Integer):
+ pass
+
+at_pskc_timeDrift.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_valueMAC = _OID(id_pskc, 20)
+
+class ValueMac(univ.Sequence):
+ pass
+
+ValueMac.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('macAlgorithm', char.UTF8String()),
+ namedtype.NamedType('mac', char.UTF8String())
+)
+
+class at_pskc_valueMAC(ValueMac):
+ pass
+
+
+id_pskc_keyUserId = _OID(id_pskc, 27)
+
+class at_pskc_keyId(char.UTF8String):
+ pass
+
+
+id_pskc_keyStartDate = _OID(id_pskc, 21)
+
+class at_pskc_keyStartDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_keyExpiryDate = _OID(id_pskc, 22)
+
+class at_pskc_keyExpiryDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_numberOfTransactions = _OID(id_pskc, 23)
+
+class at_pskc_numberOfTransactions(univ.Integer):
+ pass
+
+at_pskc_numberOfTransactions.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_keyUsages = _OID(id_pskc, 24)
+
+class PSKCKeyUsage(char.UTF8String):
+ pass
+
+PSKCKeyUsage.namedValues = namedval.NamedValues(
+ ('otp', "OTP"),
+ ('cr', "CR"),
+ ('encrypt', "Encrypt"),
+ ('integrity', "Integrity"),
+ ('verify', "Verify"),
+ ('unlock', "Unlock"),
+ ('decrypt', "Decrypt"),
+ ('keywrap', "KeyWrap"),
+ ('unwrap', "Unwrap"),
+ ('derive', "Derive"),
+ ('generate', "Generate")
+)
+
+PSKCKeyUsage.subtypeSpec = constraint.SingleValueConstraint(
+ "OTP", "CR", "Encrypt", "Integrity", "Verify", "Unlock",
+ "Decrypt", "KeyWrap", "Unwrap", "Derive", "Generate" )
+
+class PSKCKeyUsages(univ.SequenceOf):
+ pass
+
+PSKCKeyUsages.componentType = PSKCKeyUsage()
+
+class at_pskc_keyUsage(PSKCKeyUsages):
+ pass
+
+
+id_pskc_pinPolicy = _OID(id_pskc, 25)
+
+class PINUsageMode(char.UTF8String):
+ pass
+
+PINUsageMode.namedValues = namedval.NamedValues(
+ ("local", "Local"),
+ ("prepend", "Prepend"),
+ ("append", "Append"),
+ ("algorithmic", "Algorithmic")
+)
+
+PINUsageMode.subtypeSpec = constraint.SingleValueConstraint(
+ "Local", "Prepend", "Append", "Algorithmic" )
+
+class PINPolicy(univ.Sequence):
+ pass
+
+PINPolicy.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pinKeyId', char.UTF8String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('pinUsageMode', PINUsageMode().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('maxFailedAttempts', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('minLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('maxLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('pinEncoding', Encoding().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+class at_pskc_pinPolicy(PINPolicy):
+ pass
+
+
+# Map of Symmetric Key Package Attribute OIDs to Attributes
+
+sKeyPkgAttributesMap = {
+ id_pskc_manufacturer: at_pskc_manufacturer(),
+ id_pskc_serialNo: at_pskc_serialNo(),
+ id_pskc_model: at_pskc_model(),
+ id_pskc_issueNo: at_pskc_issueNo(),
+ id_pskc_deviceBinding: at_pskc_deviceBinding(),
+ id_pskc_deviceStartDate: at_pskc_deviceStartDate(),
+ id_pskc_deviceExpiryDate: at_pskc_deviceExpiryDate(),
+ id_pskc_moduleId: at_pskc_moduleId(),
+ id_pskc_deviceUserId: at_pskc_deviceUserId(),
+}
+
+
+# Map of Symmetric Key Attribute OIDs to Attributes
+
+sKeyAttributesMap = {
+ id_pskc_keyId: at_pskc_keyId(),
+ id_pskc_algorithm: at_pskc_algorithm(),
+ id_pskc_issuer: at_pskc_issuer(),
+ id_pskc_keyProfileId: at_pskc_keyProfileId(),
+ id_pskc_keyReference: at_pskc_keyReference(),
+ id_pskc_friendlyName: at_pskc_friendlyName(),
+ id_pskc_algorithmParameters: at_pskc_algorithmParameters(),
+ id_pskc_counter: at_pskc_counter(),
+ id_pskc_time: at_pskc_time(),
+ id_pskc_timeInterval: at_pskc_timeInterval(),
+ id_pskc_timeDrift: at_pskc_timeDrift(),
+ id_pskc_valueMAC: at_pskc_valueMAC(),
+ id_pskc_keyUserId: at_pskc_keyUserId(),
+ id_pskc_keyStartDate: at_pskc_keyStartDate(),
+ id_pskc_keyExpiryDate: at_pskc_keyExpiryDate(),
+ id_pskc_numberOfTransactions: at_pskc_numberOfTransactions(),
+ id_pskc_keyUsages: at_pskc_keyUsage(),
+ id_pskc_pinPolicy: at_pskc_pinPolicy(),
+}
+
+
+# This definition replaces Attribute() from rfc5652.py; it is the same except
+# that opentype is added with sKeyPkgAttributesMap and sKeyAttributesMap
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class SKeyAttribute(univ.Sequence):
+ pass
+
+SKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', sKeyAttributesMap)
+ )
+)
+
+
+class SKeyPkgAttribute(univ.Sequence):
+ pass
+
+SKeyPkgAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', sKeyPkgAttributesMap)
+ )
+)
+
+
+# Symmetric Key Package Content Type
+
+id_ct_KP_sKeyPackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.25')
+
+
+class KeyPkgVersion(univ.Integer):
+ pass
+
+KeyPkgVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+class OneSymmetricKey(univ.Sequence):
+ pass
+
+OneSymmetricKey.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('sKeyAttrs',
+ univ.SequenceOf(componentType=SKeyAttribute()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('sKey', univ.OctetString())
+)
+
+OneSymmetricKey.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2)
+
+
+class SymmetricKeys(univ.SequenceOf):
+ pass
+
+SymmetricKeys.componentType = OneSymmetricKey()
+SymmetricKeys.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class SymmetricKeyPackage(univ.Sequence):
+ pass
+
+SymmetricKeyPackage.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v1')),
+ namedtype.OptionalNamedType('sKeyPkgAttrs',
+ univ.SequenceOf(componentType=SKeyPkgAttribute()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('sKeys', SymmetricKeys())
+)
+
+
+# Map of Content Type OIDs to Content Types are
+# added to the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_sKeyPackage: SymmetricKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6032.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6032.py
new file mode 100644
index 0000000000..563639a8d6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6032.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Encrypted Key Package Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6032.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5083
+
+
+# Content Decryption Key Identifier attribute
+
+id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66')
+
+class ContentDecryptKeyID(univ.OctetString):
+ pass
+
+aa_content_decrypt_key_identifier = rfc5652.Attribute()
+aa_content_decrypt_key_identifier['attrType'] = id_aa_KP_contentDecryptKeyID
+aa_content_decrypt_key_identifier['attrValues'][0] = ContentDecryptKeyID()
+
+
+# Encrypted Key Package Content Type
+
+id_ct_KP_encryptedKeyPkg = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.2')
+
+class EncryptedKeyPackage(univ.Choice):
+ pass
+
+EncryptedKeyPackage.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encrypted', rfc5652.EncryptedData()),
+ namedtype.NamedType('enveloped', rfc5652.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('authEnveloped', rfc5083.AuthEnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Map of Attribute Type OIDs to Attributes are
+# added to the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types are
+# added to the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_encryptedKeyPkg: EncryptedKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6120.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6120.py
new file mode 100644
index 0000000000..ab256203a0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6120.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Extensible Messaging and Presence Protocol (XMPP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6120.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# XmppAddr Identifier Type as specified in Section 13.7.1.4. of RFC 6120
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_xmppAddr = id_on + (5, )
+
+
+class XmppAddr(char.UTF8String):
+ pass
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_xmppAddr: XmppAddr(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6170.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6170.py
new file mode 100644
index 0000000000..e2876167b7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6170.py
@@ -0,0 +1,17 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Image in the Internet X.509 Public Key Infrastructure
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6170.txt
+#
+
+from pyasn1.type import univ
+
+id_logo_certImage = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.3')
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6187.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6187.py
new file mode 100644
index 0000000000..4be0054716
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6187.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509v3 Certificates for Secure Shell Authentication
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6187.txt
+#
+
+from pyasn1.type import univ
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_kp = id_pkix + (3, )
+
+id_kp_secureShellClient = id_kp + (21, )
+id_kp_secureShellServer = id_kp + (22, )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6210.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6210.py
new file mode 100644
index 0000000000..28587b9e70
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6210.py
@@ -0,0 +1,42 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Experiment for Hash Functions with Parameters in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6210.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_alg_MD5_XOR_EXPERIMENT = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.13')
+
+
+class MD5_XOR_EXPERIMENT(univ.OctetString):
+ pass
+
+MD5_XOR_EXPERIMENT.subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+mda_xor_md5_EXPERIMENT = rfc5280.AlgorithmIdentifier()
+mda_xor_md5_EXPERIMENT['algorithm'] = id_alg_MD5_XOR_EXPERIMENT
+mda_xor_md5_EXPERIMENT['parameters'] = MD5_XOR_EXPERIMENT()
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones that are in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_MD5_XOR_EXPERIMENT: MD5_XOR_EXPERIMENT(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6211.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6211.py
new file mode 100644
index 0000000000..abd7a8688d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6211.py
@@ -0,0 +1,72 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Algorithm Identifier Protection Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6211.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+DigestAlgorithmIdentifier = rfc5652.DigestAlgorithmIdentifier
+
+MessageAuthenticationCodeAlgorithm = rfc5652.MessageAuthenticationCodeAlgorithm
+
+SignatureAlgorithmIdentifier = rfc5652.SignatureAlgorithmIdentifier
+
+
+# CMS Algorithm Protection attribute
+
+id_aa_cmsAlgorithmProtect = univ.ObjectIdentifier('1.2.840.113549.1.9.52')
+
+
+class CMSAlgorithmProtection(univ.Sequence):
+ pass
+
+CMSAlgorithmProtection.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signatureAlgorithm',
+ SignatureAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('macAlgorithm',
+ MessageAuthenticationCodeAlgorithm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+CMSAlgorithmProtection.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('signatureAlgorithm', constraint.ComponentPresentConstraint()),
+ ('macAlgorithm', constraint.ComponentAbsentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('signatureAlgorithm', constraint.ComponentAbsentConstraint()),
+ ('macAlgorithm', constraint.ComponentPresentConstraint()))
+)
+
+
+aa_cmsAlgorithmProtection = rfc5652.Attribute()
+aa_cmsAlgorithmProtection['attrType'] = id_aa_cmsAlgorithmProtect
+aa_cmsAlgorithmProtection['attrValues'][0] = CMSAlgorithmProtection()
+
+
+# Map of Attribute Type OIDs to Attributes are
+# added to the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_cmsAlgorithmProtect: CMSAlgorithmProtection(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) \ No newline at end of file
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6402.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6402.py
new file mode 100644
index 0000000000..5490b05fb9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6402.py
@@ -0,0 +1,628 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Modified by Russ Housley to add a maps for CMC Control Attributes
+# and CMC Content Types for use with opentypes.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Management over CMS (CMC) Updates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6402.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc4211
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Since CMS Attributes and CMC Controls both use 'attrType', one map is used
+cmcControlAttributesMap = rfc5652.cmsAttributesMap
+
+
+class ChangeSubjectName(univ.Sequence):
+ pass
+
+
+ChangeSubjectName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('subject', rfc5280.Name()),
+ namedtype.OptionalNamedType('subjectAlt', rfc5280.GeneralNames())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class CMCStatus(univ.Integer):
+ pass
+
+
+CMCStatus.namedValues = namedval.NamedValues(
+ ('success', 0),
+ ('failed', 2),
+ ('pending', 3),
+ ('noSupport', 4),
+ ('confirmRequired', 5),
+ ('popRequired', 6),
+ ('partial', 7)
+)
+
+
+class PendInfo(univ.Sequence):
+ pass
+
+
+PendInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pendToken', univ.OctetString()),
+ namedtype.NamedType('pendTime', useful.GeneralizedTime())
+)
+
+bodyIdMax = univ.Integer(4294967295)
+
+
+class BodyPartID(univ.Integer):
+ pass
+
+
+BodyPartID.subtypeSpec = constraint.ValueRangeConstraint(0, bodyIdMax)
+
+
+class BodyPartPath(univ.SequenceOf):
+ pass
+
+
+BodyPartPath.componentType = BodyPartID()
+BodyPartPath.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class BodyPartReference(univ.Choice):
+ pass
+
+
+BodyPartReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('bodyPartPath', BodyPartPath())
+)
+
+
+class CMCFailInfo(univ.Integer):
+ pass
+
+
+CMCFailInfo.namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badMessageCheck', 1),
+ ('badRequest', 2),
+ ('badTime', 3),
+ ('badCertId', 4),
+ ('unsupportedExt', 5),
+ ('mustArchiveKeys', 6),
+ ('badIdentity', 7),
+ ('popRequired', 8),
+ ('popFailed', 9),
+ ('noKeyReuse', 10),
+ ('internalCAError', 11),
+ ('tryLater', 12),
+ ('authDataFail', 13)
+)
+
+
+class CMCStatusInfoV2(univ.Sequence):
+ pass
+
+
+CMCStatusInfoV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo()),
+ namedtype.NamedType(
+ 'extendedFailInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfoOID', univ.ObjectIdentifier()),
+ namedtype.NamedType('failInfoValue', AttributeValue()))
+ )
+ )
+ )
+ )
+ )
+)
+
+
+class GetCRL(univ.Sequence):
+ pass
+
+
+GetCRL.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.OptionalNamedType('cRLName', rfc5280.GeneralName()),
+ namedtype.OptionalNamedType('time', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('reasons', rfc5280.ReasonFlags())
+)
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_cmc = _buildOid(id_pkix, 7)
+
+id_cmc_batchResponses = _buildOid(id_cmc, 29)
+
+id_cmc_popLinkWitness = _buildOid(id_cmc, 23)
+
+
+class PopLinkWitnessV2(univ.Sequence):
+ pass
+
+
+PopLinkWitnessV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyGenAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_popLinkWitnessV2 = _buildOid(id_cmc, 33)
+
+id_cmc_identityProofV2 = _buildOid(id_cmc, 34)
+
+id_cmc_revokeRequest = _buildOid(id_cmc, 17)
+
+id_cmc_recipientNonce = _buildOid(id_cmc, 7)
+
+
+class ControlsProcessed(univ.Sequence):
+ pass
+
+
+ControlsProcessed.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference()))
+)
+
+
+class CertificationRequest(univ.Sequence):
+ pass
+
+
+CertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'certificationRequestInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('subject', rfc5280.Name()),
+ namedtype.NamedType(
+ 'subjectPublicKeyInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+ )
+ )
+ ),
+ namedtype.NamedType(
+ 'attributes', univ.SetOf(
+ componentType=rfc5652.Attribute()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ )
+ )
+ )
+ ),
+ namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class TaggedCertificationRequest(univ.Sequence):
+ pass
+
+
+TaggedCertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('certificationRequest', CertificationRequest())
+)
+
+
+class TaggedRequest(univ.Choice):
+ pass
+
+
+TaggedRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tcr', TaggedCertificationRequest().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('crm',
+ rfc4211.CertReqMsg().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('orm', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('requestMessageType', univ.ObjectIdentifier()),
+ namedtype.NamedType('requestMessageValue', univ.Any())
+ ))
+ .subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+id_cmc_popLinkRandom = _buildOid(id_cmc, 22)
+
+id_cmc_statusInfo = _buildOid(id_cmc, 1)
+
+id_cmc_trustedAnchors = _buildOid(id_cmc, 26)
+
+id_cmc_transactionId = _buildOid(id_cmc, 5)
+
+id_cmc_encryptedPOP = _buildOid(id_cmc, 9)
+
+
+class PublishTrustAnchors(univ.Sequence):
+ pass
+
+
+PublishTrustAnchors.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seqNumber', univ.Integer()),
+ namedtype.NamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('anchorHashes', univ.SequenceOf(componentType=univ.OctetString()))
+)
+
+
+class RevokeRequest(univ.Sequence):
+ pass
+
+
+RevokeRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.NamedType('serialNumber', univ.Integer()),
+ namedtype.NamedType('reason', rfc5280.CRLReason()),
+ namedtype.OptionalNamedType('invalidityDate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('passphrase', univ.OctetString()),
+ namedtype.OptionalNamedType('comment', char.UTF8String())
+)
+
+id_cmc_senderNonce = _buildOid(id_cmc, 6)
+
+id_cmc_authData = _buildOid(id_cmc, 27)
+
+
+class TaggedContentInfo(univ.Sequence):
+ pass
+
+
+TaggedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('contentInfo', rfc5652.ContentInfo())
+)
+
+
+class IdentifyProofV2(univ.Sequence):
+ pass
+
+
+IdentifyProofV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('proofAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgId', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+
+class CMCPublicationInfo(univ.Sequence):
+ pass
+
+
+CMCPublicationInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('certHashes', univ.SequenceOf(componentType=univ.OctetString())),
+ namedtype.NamedType('pubInfo', rfc4211.PKIPublicationInfo())
+)
+
+id_kp_cmcCA = _buildOid(rfc5280.id_kp, 27)
+
+id_cmc_confirmCertAcceptance = _buildOid(id_cmc, 24)
+
+id_cmc_raIdentityWitness = _buildOid(id_cmc, 35)
+
+id_ExtensionReq = _buildOid(1, 2, 840, 113549, 1, 9, 14)
+
+id_cct = _buildOid(id_pkix, 12)
+
+id_cct_PKIData = _buildOid(id_cct, 2)
+
+id_kp_cmcRA = _buildOid(rfc5280.id_kp, 28)
+
+
+class CMCStatusInfo(univ.Sequence):
+ pass
+
+
+CMCStatusInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo())
+ )
+ )
+ )
+)
+
+
+class DecryptedPOP(univ.Sequence):
+ pass
+
+
+DecryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('thePOP', univ.OctetString())
+)
+
+id_cmc_addExtensions = _buildOid(id_cmc, 8)
+
+id_cmc_modCertTemplate = _buildOid(id_cmc, 31)
+
+
+class TaggedAttribute(univ.Sequence):
+ pass
+
+
+TaggedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', cmcControlAttributesMap)
+ )
+)
+
+
+class OtherMsg(univ.Sequence):
+ pass
+
+
+OtherMsg.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('otherMsgType', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherMsgValue', univ.Any())
+)
+
+
+class PKIData(univ.Sequence):
+ pass
+
+
+PKIData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('reqSequence', univ.SequenceOf(componentType=TaggedRequest())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class BodyPartList(univ.SequenceOf):
+ pass
+
+
+BodyPartList.componentType = BodyPartID()
+BodyPartList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_cmc_responseBody = _buildOid(id_cmc, 37)
+
+
+class AuthPublish(BodyPartID):
+ pass
+
+
+class CMCUnsignedData(univ.Sequence):
+ pass
+
+
+CMCUnsignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartPath', BodyPartPath()),
+ namedtype.NamedType('identifier', univ.ObjectIdentifier()),
+ namedtype.NamedType('content', univ.Any())
+)
+
+
+class CMCCertId(rfc5652.IssuerAndSerialNumber):
+ pass
+
+
+class PKIResponse(univ.Sequence):
+ pass
+
+
+PKIResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class ResponseBody(PKIResponse):
+ pass
+
+
+id_cmc_statusInfoV2 = _buildOid(id_cmc, 25)
+
+id_cmc_lraPOPWitness = _buildOid(id_cmc, 11)
+
+
+class ModCertTemplate(univ.Sequence):
+ pass
+
+
+ModCertTemplate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartPath()),
+ namedtype.NamedType('certReferences', BodyPartList()),
+ namedtype.DefaultedNamedType('replace', univ.Boolean().subtype(value=1)),
+ namedtype.NamedType('certTemplate', rfc4211.CertTemplate())
+)
+
+id_cmc_regInfo = _buildOid(id_cmc, 18)
+
+id_cmc_identityProof = _buildOid(id_cmc, 3)
+
+
+class ExtensionReq(univ.SequenceOf):
+ pass
+
+
+ExtensionReq.componentType = rfc5280.Extension()
+ExtensionReq.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_kp_cmcArchive = _buildOid(rfc5280.id_kp, 28)
+
+id_cmc_publishCert = _buildOid(id_cmc, 30)
+
+id_cmc_dataReturn = _buildOid(id_cmc, 4)
+
+
+class LraPopWitness(univ.Sequence):
+ pass
+
+
+LraPopWitness.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataBodyid', BodyPartID()),
+ namedtype.NamedType('bodyIds', univ.SequenceOf(componentType=BodyPartID()))
+)
+
+id_aa = _buildOid(1, 2, 840, 113549, 1, 9, 16, 2)
+
+id_aa_cmc_unsignedData = _buildOid(id_aa, 34)
+
+id_cmc_getCert = _buildOid(id_cmc, 15)
+
+id_cmc_batchRequests = _buildOid(id_cmc, 28)
+
+id_cmc_decryptedPOP = _buildOid(id_cmc, 10)
+
+id_cmc_responseInfo = _buildOid(id_cmc, 19)
+
+id_cmc_changeSubjectName = _buildOid(id_cmc, 36)
+
+
+class GetCert(univ.Sequence):
+ pass
+
+
+GetCert.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+)
+
+id_cmc_identification = _buildOid(id_cmc, 2)
+
+id_cmc_queryPending = _buildOid(id_cmc, 21)
+
+
+class AddExtensions(univ.Sequence):
+ pass
+
+
+AddExtensions.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartID()),
+ namedtype.NamedType('certReferences', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.NamedType('extensions', univ.SequenceOf(componentType=rfc5280.Extension()))
+)
+
+
+class EncryptedPOP(univ.Sequence):
+ pass
+
+
+EncryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request', TaggedRequest()),
+ namedtype.NamedType('cms', rfc5652.ContentInfo()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witnessAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_getCRL = _buildOid(id_cmc, 16)
+
+id_cct_PKIResponse = _buildOid(id_cct, 3)
+
+id_cmc_controlProcessed = _buildOid(id_cmc, 32)
+
+
+class NoSignatureValue(univ.OctetString):
+ pass
+
+
+id_ad_cmc = _buildOid(rfc5280.id_ad, 12)
+
+id_alg_noSignature = _buildOid(id_pkix, 6, 2)
+
+
+# Map of CMC Control OIDs to CMC Control Attributes
+
+_cmcControlAttributesMapUpdate = {
+ id_cmc_statusInfo: CMCStatusInfo(),
+ id_cmc_statusInfoV2: CMCStatusInfoV2(),
+ id_cmc_identification: char.UTF8String(),
+ id_cmc_identityProof: univ.OctetString(),
+ id_cmc_identityProofV2: IdentifyProofV2(),
+ id_cmc_dataReturn: univ.OctetString(),
+ id_cmc_transactionId: univ.Integer(),
+ id_cmc_senderNonce: univ.OctetString(),
+ id_cmc_recipientNonce: univ.OctetString(),
+ id_cmc_addExtensions: AddExtensions(),
+ id_cmc_encryptedPOP: EncryptedPOP(),
+ id_cmc_decryptedPOP: DecryptedPOP(),
+ id_cmc_lraPOPWitness: LraPopWitness(),
+ id_cmc_getCert: GetCert(),
+ id_cmc_getCRL: GetCRL(),
+ id_cmc_revokeRequest: RevokeRequest(),
+ id_cmc_regInfo: univ.OctetString(),
+ id_cmc_responseInfo: univ.OctetString(),
+ id_cmc_queryPending: univ.OctetString(),
+ id_cmc_popLinkRandom: univ.OctetString(),
+ id_cmc_popLinkWitness: univ.OctetString(),
+ id_cmc_popLinkWitnessV2: PopLinkWitnessV2(),
+ id_cmc_confirmCertAcceptance: CMCCertId(),
+ id_cmc_trustedAnchors: PublishTrustAnchors(),
+ id_cmc_authData: AuthPublish(),
+ id_cmc_batchRequests: BodyPartList(),
+ id_cmc_batchResponses: BodyPartList(),
+ id_cmc_publishCert: CMCPublicationInfo(),
+ id_cmc_modCertTemplate: ModCertTemplate(),
+ id_cmc_controlProcessed: ControlsProcessed(),
+ id_ExtensionReq: ExtensionReq(),
+}
+
+cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate)
+
+
+# Map of CMC Content Type OIDs to CMC Content Types are added to
+# the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_cct_PKIData: PKIData(),
+ id_cct_PKIResponse: PKIResponse(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6482.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6482.py
new file mode 100644
index 0000000000..d213a46f8d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6482.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RPKI Route Origin Authorizations (ROAs)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6482.txt
+# https://www.rfc-editor.org/errata/eid5881
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_ct_routeOriginAuthz = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.24')
+
+
+class ASID(univ.Integer):
+ pass
+
+
+class IPAddress(univ.BitString):
+ pass
+
+
+class ROAIPAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('address', IPAddress()),
+ namedtype.OptionalNamedType('maxLength', univ.Integer())
+ )
+
+
+class ROAIPAddressFamily(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressFamily',
+ univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
+ namedtype.NamedType('addresses',
+ univ.SequenceOf(componentType=ROAIPAddress()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class RouteOriginAttestation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.NamedType('asID', ASID()),
+ namedtype.NamedType('ipAddrBlocks',
+ univ.SequenceOf(componentType=ROAIPAddressFamily()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_routeOriginAuthz: RouteOriginAttestation(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6486.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6486.py
new file mode 100644
index 0000000000..31c936a4f2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6486.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RPKI Manifests
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6486.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import useful
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16')
+
+id_ct = id_smime + (1, )
+
+id_ct_rpkiManifest = id_ct + (26, )
+
+
+class FileAndHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('file', char.IA5String()),
+ namedtype.NamedType('hash', univ.BitString())
+ )
+
+
+class Manifest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.NamedType('manifestNumber',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('thisUpdate',
+ useful.GeneralizedTime()),
+ namedtype.NamedType('nextUpdate',
+ useful.GeneralizedTime()),
+ namedtype.NamedType('fileHashAlg',
+ univ.ObjectIdentifier()),
+ namedtype.NamedType('fileList',
+ univ.SequenceOf(componentType=FileAndHash()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, MAX)))
+ )
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_rpkiManifest: Manifest(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6487.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6487.py
new file mode 100644
index 0000000000..d8c2f87423
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6487.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Profile for X.509 PKIX Resource Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6487.txt
+#
+
+from pyasn1.type import univ
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_ad = id_pkix + (48, )
+
+id_ad_rpkiManifest = id_ad + (10, )
+id_ad_signedObject = id_ad + (11, )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6664.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6664.py
new file mode 100644
index 0000000000..41629d8d7f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6664.py
@@ -0,0 +1,147 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# S/MIME Capabilities for Public Key Definitions
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6664.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc3279
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 3279
+
+dhpublicnumber = rfc3279.dhpublicnumber
+
+Dss_Parms = rfc3279.Dss_Parms
+
+id_dsa = rfc3279.id_dsa
+
+id_ecPublicKey = rfc3279.id_ecPublicKey
+
+rsaEncryption = rfc3279.rsaEncryption
+
+
+# Imports from RFC 4055
+
+id_mgf1 = rfc4055.id_mgf1
+
+id_RSAES_OAEP = rfc4055.id_RSAES_OAEP
+
+id_RSASSA_PSS = rfc4055.id_RSASSA_PSS
+
+
+# Imports from RFC 5480
+
+ECParameters = rfc5480.ECParameters
+
+id_ecDH = rfc5480.id_ecDH
+
+id_ecMQV = rfc5480.id_ecMQV
+
+
+# RSA
+
+class RSAKeySize(univ.Integer):
+ # suggested values are 1024, 2048, 3072, 4096, 7680, 8192, and 15360;
+ # however, the integer value is not limited to these suggestions
+ pass
+
+
+class RSAKeyCapabilities(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('minKeySize', RSAKeySize()),
+ namedtype.OptionalNamedType('maxKeySize', RSAKeySize())
+ )
+
+
+class RsaSsa_Pss_sig_caps(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', AlgorithmIdentifier()),
+ namedtype.OptionalNamedType('maskAlg', AlgorithmIdentifier()),
+ namedtype.DefaultedNamedType('trailerField', univ.Integer().subtype(value=1))
+ )
+
+
+# Diffie-Hellman and DSA
+
+class DSAKeySize(univ.Integer):
+ subtypeSpec = constraint.SingleValueConstraint(1024, 2048, 3072, 7680, 15360)
+
+
+class DSAKeyCapabilities(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keySizes', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('minKeySize',
+ DSAKeySize()),
+ namedtype.OptionalNamedType('maxKeySize',
+ DSAKeySize()),
+ namedtype.OptionalNamedType('maxSizeP',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('maxSizeQ',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('maxSizeG',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyParams',
+ Dss_Parms().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+# Elliptic Curve
+
+class EC_SMimeCaps(univ.SequenceOf):
+ componentType = ECParameters()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Update the SMIMECapabilities Attribute Map in rfc5751.py
+#
+# The map can either include an entry for scap-sa-rsaSSA-PSS or
+# scap-pk-rsaSSA-PSS, but not both. One is associated with the
+# public key and the other is associated with the signature
+# algorithm; however, they use the same OID. If you need the
+# other one in your application, copy the map into a local dict,
+# adjust as needed, and pass the local dict to the decoder with
+# openTypes=your_local_map.
+
+_smimeCapabilityMapUpdate = {
+ rsaEncryption: RSAKeyCapabilities(),
+ id_RSASSA_PSS: RSAKeyCapabilities(),
+ # id_RSASSA_PSS: RsaSsa_Pss_sig_caps(),
+ id_RSAES_OAEP: RSAKeyCapabilities(),
+ id_dsa: DSAKeyCapabilities(),
+ dhpublicnumber: DSAKeyCapabilities(),
+ id_ecPublicKey: EC_SMimeCaps(),
+ id_ecDH: EC_SMimeCaps(),
+ id_ecMQV: EC_SMimeCaps(),
+ id_mgf1: AlgorithmIdentifier(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6955.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6955.py
new file mode 100644
index 0000000000..09f2d6562e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6955.py
@@ -0,0 +1,108 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Proof-of-Possession Algorithms
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6955.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+MessageDigest = rfc5652.MessageDigest
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+
+# Imports from RFC 5280
+
+id_pkix = rfc5280.id_pkix
+
+
+# Imports from RFC 3279
+
+Dss_Sig_Value = rfc3279.Dss_Sig_Value
+
+DomainParameters = rfc3279.DomainParameters
+
+
+# Static DH Proof-of-Possession
+
+class DhSigStatic(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerAndSerial', IssuerAndSerialNumber()),
+ namedtype.NamedType('hashValue', MessageDigest())
+ )
+
+
+# Object Identifiers
+
+id_dh_sig_hmac_sha1 = id_pkix + (6, 3, )
+
+id_dhPop_static_sha1_hmac_sha1 = univ.ObjectIdentifier(id_dh_sig_hmac_sha1)
+
+
+id_alg_dh_pop = id_pkix + (6, 4, )
+
+id_alg_dhPop_sha1 = univ.ObjectIdentifier(id_alg_dh_pop)
+
+id_alg_dhPop_sha224 = id_pkix + (6, 5, )
+
+id_alg_dhPop_sha256 = id_pkix + (6, 6, )
+
+id_alg_dhPop_sha384 = id_pkix + (6, 7, )
+
+id_alg_dhPop_sha512 = id_pkix + (6, 8, )
+
+
+id_alg_dhPop_static_sha224_hmac_sha224 = id_pkix + (6, 15, )
+
+id_alg_dhPop_static_sha256_hmac_sha256 = id_pkix + (6, 16, )
+
+id_alg_dhPop_static_sha384_hmac_sha384 = id_pkix + (6, 17, )
+
+id_alg_dhPop_static_sha512_hmac_sha512 = id_pkix + (6, 18, )
+
+
+id_alg_ecdhPop_static_sha224_hmac_sha224 = id_pkix + (6, 25, )
+
+id_alg_ecdhPop_static_sha256_hmac_sha256 = id_pkix + (6, 26, )
+
+id_alg_ecdhPop_static_sha384_hmac_sha384 = id_pkix + (6, 27, )
+
+id_alg_ecdhPop_static_sha512_hmac_sha512 = id_pkix + (6, 28, )
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_dh_pop: DomainParameters(),
+ id_alg_dhPop_sha224: DomainParameters(),
+ id_alg_dhPop_sha256: DomainParameters(),
+ id_alg_dhPop_sha384: DomainParameters(),
+ id_alg_dhPop_sha512: DomainParameters(),
+ id_dh_sig_hmac_sha1: univ.Null(""),
+ id_alg_dhPop_static_sha224_hmac_sha224: univ.Null(""),
+ id_alg_dhPop_static_sha256_hmac_sha256: univ.Null(""),
+ id_alg_dhPop_static_sha384_hmac_sha384: univ.Null(""),
+ id_alg_dhPop_static_sha512_hmac_sha512: univ.Null(""),
+ id_alg_ecdhPop_static_sha224_hmac_sha224: univ.Null(""),
+ id_alg_ecdhPop_static_sha256_hmac_sha256: univ.Null(""),
+ id_alg_ecdhPop_static_sha384_hmac_sha384: univ.Null(""),
+ id_alg_ecdhPop_static_sha512_hmac_sha512: univ.Null(""),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6960.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6960.py
new file mode 100644
index 0000000000..e5f1305649
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc6960.py
@@ -0,0 +1,223 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Online Certificate Status Protocol (OCSP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6960.txt
+#
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax
+Certificate = rfc5280.Certificate
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+CRLReason = rfc5280.CRLReason
+Extensions = rfc5280.Extensions
+GeneralName = rfc5280.GeneralName
+Name = rfc5280.Name
+
+id_kp = rfc5280.id_kp
+
+id_ad_ocsp = rfc5280.id_ad_ocsp
+
+
+# Imports from the original OCSP module in RFC 2560
+
+AcceptableResponses = rfc2560.AcceptableResponses
+ArchiveCutoff = rfc2560.ArchiveCutoff
+CertStatus = rfc2560.CertStatus
+KeyHash = rfc2560.KeyHash
+OCSPResponse = rfc2560.OCSPResponse
+OCSPResponseStatus = rfc2560.OCSPResponseStatus
+ResponseBytes = rfc2560.ResponseBytes
+RevokedInfo = rfc2560.RevokedInfo
+UnknownInfo = rfc2560.UnknownInfo
+Version = rfc2560.Version
+
+id_kp_OCSPSigning = rfc2560.id_kp_OCSPSigning
+
+id_pkix_ocsp = rfc2560.id_pkix_ocsp
+id_pkix_ocsp_archive_cutoff = rfc2560.id_pkix_ocsp_archive_cutoff
+id_pkix_ocsp_basic = rfc2560.id_pkix_ocsp_basic
+id_pkix_ocsp_crl = rfc2560.id_pkix_ocsp_crl
+id_pkix_ocsp_nocheck = rfc2560.id_pkix_ocsp_nocheck
+id_pkix_ocsp_nonce = rfc2560.id_pkix_ocsp_nonce
+id_pkix_ocsp_response = rfc2560.id_pkix_ocsp_response
+id_pkix_ocsp_service_locator = rfc2560.id_pkix_ocsp_service_locator
+
+
+# Additional object identifiers
+
+id_pkix_ocsp_pref_sig_algs = id_pkix_ocsp + (8, )
+id_pkix_ocsp_extended_revoke = id_pkix_ocsp + (9, )
+
+
+# Updated structures (mostly to improve openTypes support)
+
+class CertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('issuerNameHash', univ.OctetString()),
+ namedtype.NamedType('issuerKeyHash', univ.OctetString()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+class SingleResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certID', CertID()),
+ namedtype.NamedType('certStatus', CertStatus()),
+ namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('singleExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ResponderID(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('byName', Name().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('byKey', KeyHash().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class ResponseData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('responderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('responses', univ.SequenceOf(
+ componentType=SingleResponse())),
+ namedtype.OptionalNamedType('responseExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BasicOCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsResponseData', ResponseData()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(
+ componentType=Certificate()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Request(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('reqCert', CertID()),
+ namedtype.OptionalNamedType('singleRequestExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Signature(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(
+ componentType=Certificate()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class TBSRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('requestList', univ.SequenceOf(
+ componentType=Request())),
+ namedtype.OptionalNamedType('requestExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class OCSPRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsRequest', TBSRequest()),
+ namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+# Previously omitted structure
+
+class ServiceLocator(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('locator', AuthorityInfoAccessSyntax())
+ )
+
+
+# Additional structures
+
+class CrlID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('crlUrl', char.IA5String().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crlNum', univ.Integer().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('crlTime', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class PreferredSignatureAlgorithm(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sigIdentifier', AlgorithmIdentifier()),
+ namedtype.OptionalNamedType('certIdentifier', AlgorithmIdentifier())
+ )
+
+
+class PreferredSignatureAlgorithms(univ.SequenceOf):
+ componentType = PreferredSignatureAlgorithm()
+
+
+
+# Response Type OID to Response Map
+
+ocspResponseMap = {
+ id_pkix_ocsp_basic: BasicOCSPResponse(),
+}
+
+
+# Map of Extension OIDs to Extensions added to the ones
+# that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ # Certificate Extension
+ id_pkix_ocsp_nocheck: univ.Null(""),
+ # OCSP Request Extensions
+ id_pkix_ocsp_nonce: univ.OctetString(),
+ id_pkix_ocsp_response: AcceptableResponses(),
+ id_pkix_ocsp_service_locator: ServiceLocator(),
+ id_pkix_ocsp_pref_sig_algs: PreferredSignatureAlgorithms(),
+ # OCSP Response Extensions
+ id_pkix_ocsp_crl: CrlID(),
+ id_pkix_ocsp_archive_cutoff: ArchiveCutoff(),
+ id_pkix_ocsp_extended_revoke: univ.Null(""),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7030.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7030.py
new file mode 100644
index 0000000000..84b6dc5f9a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7030.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Enrollment over Secure Transport (EST)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7030.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# Imports from RFC 5652
+
+Attribute = rfc5652.Attribute
+
+
+# Asymmetric Decrypt Key Identifier Attribute
+
+id_aa_asymmDecryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.54')
+
+class AsymmetricDecryptKeyIdentifier(univ.OctetString):
+ pass
+
+
+aa_asymmDecryptKeyID = Attribute()
+aa_asymmDecryptKeyID['attrType'] = id_aa_asymmDecryptKeyID
+aa_asymmDecryptKeyID['attrValues'][0] = AsymmetricDecryptKeyIdentifier()
+
+
+# CSR Attributes
+
+class AttrOrOID(univ.Choice):
+ pass
+
+AttrOrOID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('attribute', Attribute())
+)
+
+
+class CsrAttrs(univ.SequenceOf):
+ pass
+
+CsrAttrs.componentType = AttrOrOID()
+CsrAttrs.subtypeSpec=constraint.ValueSizeConstraint(0, MAX)
+
+
+# Update CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ id_aa_asymmDecryptKeyID: AsymmetricDecryptKeyIdentifier(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7191.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7191.py
new file mode 100644
index 0000000000..7c2be11562
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7191.py
@@ -0,0 +1,261 @@
+# This file is being contributed to of pyasn1-modules software.
+#
+# Created by Russ Housley without assistance from the asn1ate tool.
+# Modified by Russ Housley to add support for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Key Package Receipt and Error Content Types
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7191.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+DistinguishedName = rfc5280.DistinguishedName
+
+
+# SingleAttribute is the same as Attribute in RFC 5652, except that the
+# attrValues SET must have one and only one member
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+AttributeValues.sizeSpec = univ.Set.sizeSpec + constraint.ValueSizeConstraint(1, 1)
+
+
+class SingleAttribute(univ.Sequence):
+ pass
+
+SingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', AttributeValues(),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# SIR Entity Name
+
+class SIREntityNameType(univ.ObjectIdentifier):
+ pass
+
+
+class SIREntityNameValue(univ.Any):
+ pass
+
+
+class SIREntityName(univ.Sequence):
+ pass
+
+SIREntityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sirenType', SIREntityNameType()),
+ namedtype.NamedType('sirenValue', univ.OctetString())
+ # CONTAINING the DER-encoded SIREntityNameValue
+)
+
+
+class SIREntityNames(univ.SequenceOf):
+ pass
+
+SIREntityNames.componentType = SIREntityName()
+SIREntityNames.sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+id_dn = univ.ObjectIdentifier('2.16.840.1.101.2.1.16.0')
+
+
+class siren_dn(SIREntityName):
+ def __init__(self):
+ SIREntityName.__init__(self)
+ self['sirenType'] = id_dn
+
+
+# Key Package Error CMS Content Type
+
+class EnumeratedErrorCode(univ.Enumerated):
+ pass
+
+# Error codes with values <= 33 are aligned with RFC 5934
+EnumeratedErrorCode.namedValues = namedval.NamedValues(
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('unsupportedParameters', 15),
+ ('signatureFailure', 16),
+ ('insufficientMemory', 17),
+ ('incorrectTarget', 23),
+ ('missingSignature', 29),
+ ('resourcesBusy', 30),
+ ('versionNumberMismatch', 31),
+ ('revokedCertificate', 33),
+ ('ambiguousDecrypt', 60),
+ ('noDecryptKey', 61),
+ ('badEncryptedData', 62),
+ ('badEnvelopedData', 63),
+ ('badAuthenticatedData', 64),
+ ('badAuthEnvelopedData', 65),
+ ('badKeyAgreeRecipientInfo', 66),
+ ('badKEKRecipientInfo', 67),
+ ('badEncryptContent', 68),
+ ('badEncryptAlgorithm', 69),
+ ('missingCiphertext', 70),
+ ('decryptFailure', 71),
+ ('badMACAlgorithm', 72),
+ ('badAuthAttrs', 73),
+ ('badUnauthAttrs', 74),
+ ('invalidMAC', 75),
+ ('mismatchedDigestAlg', 76),
+ ('missingCertificate', 77),
+ ('tooManySigners', 78),
+ ('missingSignedAttributes', 79),
+ ('derEncodingNotUsed', 80),
+ ('missingContentHints', 81),
+ ('invalidAttributeLocation', 82),
+ ('badMessageDigest', 83),
+ ('badKeyPackage', 84),
+ ('badAttributes', 85),
+ ('attributeComparisonFailure', 86),
+ ('unsupportedSymmetricKeyPackage', 87),
+ ('unsupportedAsymmetricKeyPackage', 88),
+ ('constraintViolation', 89),
+ ('ambiguousDefaultValue', 90),
+ ('noMatchingRecipientInfo', 91),
+ ('unsupportedKeyWrapAlgorithm', 92),
+ ('badKeyTransRecipientInfo', 93),
+ ('other', 127)
+)
+
+
+class ErrorCodeChoice(univ.Choice):
+ pass
+
+ErrorCodeChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enum', EnumeratedErrorCode()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier())
+)
+
+
+class KeyPkgID(univ.OctetString):
+ pass
+
+
+class KeyPkgIdentifier(univ.Choice):
+ pass
+
+KeyPkgIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkgID', KeyPkgID()),
+ namedtype.NamedType('attribute', SingleAttribute())
+)
+
+
+class KeyPkgVersion(univ.Integer):
+ pass
+
+
+KeyPkgVersion.namedValues = namedval.NamedValues(
+ ('v1', 1),
+ ('v2', 2)
+)
+
+KeyPkgVersion.subtypeSpec = constraint.ValueRangeConstraint(1, 65535)
+
+
+id_ct_KP_keyPackageError = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.6')
+
+class KeyPackageError(univ.Sequence):
+ pass
+
+KeyPackageError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')),
+ namedtype.OptionalNamedType('errorOf', KeyPkgIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('errorBy', SIREntityName()),
+ namedtype.NamedType('errorCode', ErrorCodeChoice())
+)
+
+
+# Key Package Receipt CMS Content Type
+
+id_ct_KP_keyPackageReceipt = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.3')
+
+class KeyPackageReceipt(univ.Sequence):
+ pass
+
+KeyPackageReceipt.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')),
+ namedtype.NamedType('receiptOf', KeyPkgIdentifier()),
+ namedtype.NamedType('receivedBy', SIREntityName())
+)
+
+
+# Key Package Receipt Request Attribute
+
+class KeyPkgReceiptReq(univ.Sequence):
+ pass
+
+KeyPkgReceiptReq.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('encryptReceipt', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('receiptsFrom', SIREntityNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receiptsTo', SIREntityNames())
+)
+
+
+id_aa_KP_keyPkgIdAndReceiptReq = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.65')
+
+class KeyPkgIdentifierAndReceiptReq(univ.Sequence):
+ pass
+
+KeyPkgIdentifierAndReceiptReq.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkgID', KeyPkgID()),
+ namedtype.OptionalNamedType('receiptReq', KeyPkgReceiptReq())
+)
+
+
+# Map of Attribute Type OIDs to Attributes are added to
+# the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of CMC Content Type OIDs to CMC Content Types are added to
+# the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_keyPackageError: KeyPackageError(),
+ id_ct_KP_keyPackageReceipt: KeyPackageReceipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7229.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7229.py
new file mode 100644
index 0000000000..e9bce2d5b6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7229.py
@@ -0,0 +1,29 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Object Identifiers for Test Certificate Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7229.txt
+#
+
+from pyasn1.type import univ
+
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_TEST = id_pkix + (13, )
+
+id_TEST_certPolicyOne = id_TEST + (1, )
+id_TEST_certPolicyTwo = id_TEST + (2, )
+id_TEST_certPolicyThree = id_TEST + (3, )
+id_TEST_certPolicyFour = id_TEST + (4, )
+id_TEST_certPolicyFive = id_TEST + (5, )
+id_TEST_certPolicySix = id_TEST + (6, )
+id_TEST_certPolicySeven = id_TEST + (7, )
+id_TEST_certPolicyEight = id_TEST + (8, )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7292.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7292.py
new file mode 100644
index 0000000000..1c9f319a5d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7292.py
@@ -0,0 +1,357 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #12: Personal Information Exchange Syntax v1.1
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7292.txt
+# https://www.rfc-editor.org/errata_search.php?rfc=7292
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2315
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5958
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Initialize the maps used in PKCS#12
+
+pkcs12BagTypeMap = { }
+
+pkcs12CertBagMap = { }
+
+pkcs12CRLBagMap = { }
+
+pkcs12SecretBagMap = { }
+
+
+# Imports from RFC 2315, RFC 5652, and RFC 5958
+
+DigestInfo = rfc2315.DigestInfo
+
+
+ContentInfo = rfc5652.ContentInfo
+
+PKCS12Attribute = rfc5652.Attribute
+
+
+EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo
+
+PrivateKeyInfo = rfc5958.PrivateKeyInfo
+
+
+# CMSSingleAttribute is the same as Attribute in RFC 5652 except the attrValues
+# SET must have one and only one member
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+
+
+class CMSSingleAttribute(univ.Sequence):
+ pass
+
+CMSSingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# Object identifier arcs
+
+rsadsi = _OID(1, 2, 840, 113549)
+
+pkcs = _OID(rsadsi, 1)
+
+pkcs_9 = _OID(pkcs, 9)
+
+certTypes = _OID(pkcs_9, 22)
+
+crlTypes = _OID(pkcs_9, 23)
+
+pkcs_12 = _OID(pkcs, 12)
+
+
+# PBE Algorithm Identifiers and Parameters Structure
+
+pkcs_12PbeIds = _OID(pkcs_12, 1)
+
+pbeWithSHAAnd128BitRC4 = _OID(pkcs_12PbeIds, 1)
+
+pbeWithSHAAnd40BitRC4 = _OID(pkcs_12PbeIds, 2)
+
+pbeWithSHAAnd3_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 3)
+
+pbeWithSHAAnd2_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 4)
+
+pbeWithSHAAnd128BitRC2_CBC = _OID(pkcs_12PbeIds, 5)
+
+pbeWithSHAAnd40BitRC2_CBC = _OID(pkcs_12PbeIds, 6)
+
+
+class Pkcs_12PbeParams(univ.Sequence):
+ pass
+
+Pkcs_12PbeParams.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('iterations', univ.Integer())
+)
+
+
+# Bag types
+
+bagtypes = _OID(pkcs_12, 10, 1)
+
+class BAG_TYPE(univ.Sequence):
+ pass
+
+BAG_TYPE.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.ObjectIdentifier()),
+ namedtype.NamedType('unnamed1', univ.Any(),
+ openType=opentype.OpenType('attrType', pkcs12BagTypeMap)
+ )
+)
+
+
+id_keyBag = _OID(bagtypes, 1)
+
+class KeyBag(PrivateKeyInfo):
+ pass
+
+
+id_pkcs8ShroudedKeyBag = _OID(bagtypes, 2)
+
+class PKCS8ShroudedKeyBag(EncryptedPrivateKeyInfo):
+ pass
+
+
+id_certBag = _OID(bagtypes, 3)
+
+class CertBag(univ.Sequence):
+ pass
+
+CertBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certId', univ.ObjectIdentifier()),
+ namedtype.NamedType('certValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('certId', pkcs12CertBagMap)
+ )
+)
+
+
+x509Certificate = CertBag()
+x509Certificate['certId'] = _OID(certTypes, 1)
+x509Certificate['certValue'] = univ.OctetString()
+# DER-encoded X.509 certificate stored in OCTET STRING
+
+
+sdsiCertificate = CertBag()
+sdsiCertificate['certId'] = _OID(certTypes, 2)
+sdsiCertificate['certValue'] = char.IA5String()
+# Base64-encoded SDSI certificate stored in IA5String
+
+
+id_CRLBag = _OID(bagtypes, 4)
+
+class CRLBag(univ.Sequence):
+ pass
+
+CRLBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crlId', univ.ObjectIdentifier()),
+ namedtype.NamedType('crlValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('crlId', pkcs12CRLBagMap)
+ )
+)
+
+
+x509CRL = CRLBag()
+x509CRL['crlId'] = _OID(crlTypes, 1)
+x509CRL['crlValue'] = univ.OctetString()
+# DER-encoded X.509 CRL stored in OCTET STRING
+
+
+id_secretBag = _OID(bagtypes, 5)
+
+class SecretBag(univ.Sequence):
+ pass
+
+SecretBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('secretTypeId', univ.ObjectIdentifier()),
+ namedtype.NamedType('secretValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('secretTypeId', pkcs12SecretBagMap)
+ )
+)
+
+
+id_safeContentsBag = _OID(bagtypes, 6)
+
+class SafeBag(univ.Sequence):
+ pass
+
+SafeBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bagId', univ.ObjectIdentifier()),
+ namedtype.NamedType('bagValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('bagId', pkcs12BagTypeMap)
+ ),
+ namedtype.OptionalNamedType('bagAttributes',
+ univ.SetOf(componentType=PKCS12Attribute())
+ )
+)
+
+
+class SafeContents(univ.SequenceOf):
+ pass
+
+SafeContents.componentType = SafeBag()
+
+
+# The PFX PDU
+
+class AuthenticatedSafe(univ.SequenceOf):
+ pass
+
+AuthenticatedSafe.componentType = ContentInfo()
+# Data if unencrypted
+# EncryptedData if password-encrypted
+# EnvelopedData if public key-encrypted
+
+
+class MacData(univ.Sequence):
+ pass
+
+MacData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mac', DigestInfo()),
+ namedtype.NamedType('macSalt', univ.OctetString()),
+ namedtype.DefaultedNamedType('iterations', univ.Integer().subtype(value=1))
+ # Note: The default is for historical reasons and its use is deprecated
+)
+
+
+class PFX(univ.Sequence):
+ pass
+
+PFX.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ univ.Integer(namedValues=namedval.NamedValues(('v3', 3)))
+ ),
+ namedtype.NamedType('authSafe', ContentInfo()),
+ namedtype.OptionalNamedType('macData', MacData())
+)
+
+
+# Local key identifier (also defined as certificateAttribute in rfc2985.py)
+
+pkcs_9_at_localKeyId = _OID(pkcs_9, 21)
+
+localKeyId = CMSSingleAttribute()
+localKeyId['attrType'] = pkcs_9_at_localKeyId
+localKeyId['attrValues'][0] = univ.OctetString()
+
+
+# Friendly name (also defined as certificateAttribute in rfc2985.py)
+
+pkcs_9_ub_pkcs9String = univ.Integer(255)
+
+pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_at_friendlyName = _OID(pkcs_9, 20)
+
+class FriendlyName(char.BMPString):
+ pass
+
+FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName)
+
+
+friendlyName = CMSSingleAttribute()
+friendlyName['attrType'] = pkcs_9_at_friendlyName
+friendlyName['attrValues'][0] = FriendlyName()
+
+
+# Update the PKCS#12 maps
+
+_pkcs12BagTypeMap = {
+ id_keyBag: KeyBag(),
+ id_pkcs8ShroudedKeyBag: PKCS8ShroudedKeyBag(),
+ id_certBag: CertBag(),
+ id_CRLBag: CRLBag(),
+ id_secretBag: SecretBag(),
+ id_safeContentsBag: SafeBag(),
+}
+
+pkcs12BagTypeMap.update(_pkcs12BagTypeMap)
+
+
+_pkcs12CertBagMap = {
+ _OID(certTypes, 1): univ.OctetString(),
+ _OID(certTypes, 2): char.IA5String(),
+}
+
+pkcs12CertBagMap.update(_pkcs12CertBagMap)
+
+
+_pkcs12CRLBagMap = {
+ _OID(crlTypes, 1): univ.OctetString(),
+}
+
+pkcs12CRLBagMap.update(_pkcs12CRLBagMap)
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ pbeWithSHAAnd128BitRC4: Pkcs_12PbeParams(),
+ pbeWithSHAAnd40BitRC4: Pkcs_12PbeParams(),
+ pbeWithSHAAnd3_KeyTripleDES_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd2_KeyTripleDES_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd128BitRC2_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd40BitRC2_CBC: Pkcs_12PbeParams(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the CMS Attribute map
+
+_cmsAttributesMapUpdate = {
+ pkcs_9_at_friendlyName: FriendlyName(),
+ pkcs_9_at_localKeyId: univ.OctetString(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7296.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7296.py
new file mode 100644
index 0000000000..95a191a14d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7296.py
@@ -0,0 +1,32 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# IKEv2 Certificate Bundle
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7296.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class CertificateOrCRL(univ.Choice):
+ pass
+
+CertificateOrCRL.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cert', rfc5280.Certificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('crl', rfc5280.CertificateList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class CertificateBundle(univ.SequenceOf):
+ pass
+
+CertificateBundle.componentType = CertificateOrCRL()
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7508.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7508.py
new file mode 100644
index 0000000000..66460240f1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7508.py
@@ -0,0 +1,90 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Securing Header Fields with S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7508.txt
+# https://www.rfc-editor.org/errata/eid5875
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+import string
+
+MAX = float('inf')
+
+
+class Algorithm(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('canonAlgorithmSimple', 0),
+ ('canonAlgorithmRelaxed', 1)
+ )
+
+
+class HeaderFieldStatus(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('duplicated', 0),
+ ('deleted', 1),
+ ('modified', 2)
+ )
+
+
+class HeaderFieldName(char.VisibleString):
+ subtypeSpec = (
+ constraint.PermittedAlphabetConstraint(*string.printable) -
+ constraint.PermittedAlphabetConstraint(':')
+ )
+
+
+class HeaderFieldValue(char.UTF8String):
+ pass
+
+
+class HeaderField(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('field-Name', HeaderFieldName()),
+ namedtype.NamedType('field-Value', HeaderFieldValue()),
+ namedtype.DefaultedNamedType('field-Status',
+ HeaderFieldStatus().subtype(value='duplicated'))
+ )
+
+
+class HeaderFields(univ.SequenceOf):
+ componentType = HeaderField()
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SecureHeaderFields(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('canonAlgorithm', Algorithm()),
+ namedtype.NamedType('secHeaderFields', HeaderFields())
+ )
+
+
+id_aa = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 2, ))
+
+id_aa_secureHeaderFieldsIdentifier = id_aa + (55, )
+
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_secureHeaderFieldsIdentifier: SecureHeaderFields(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7585.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7585.py
new file mode 100644
index 0000000000..b3fd4a5bac
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7585.py
@@ -0,0 +1,50 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Network Access Identifier (NAI) Realm Name for Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7585.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# NAI Realm Name for Certificates
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_on = id_pkix + (8, )
+
+id_on_naiRealm = id_on + (8, )
+
+
+ub_naiRealm_length = univ.Integer(255)
+
+
+class NAIRealm(char.UTF8String):
+ subtypeSpec = constraint.ValueSizeConstraint(1, ub_naiRealm_length)
+
+
+naiRealm = rfc5280.AnotherName()
+naiRealm['type-id'] = id_on_naiRealm
+naiRealm['value'] = NAIRealm()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_naiRealm: NAIRealm(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7633.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7633.py
new file mode 100644
index 0000000000..f518440ff4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7633.py
@@ -0,0 +1,38 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Transport Layer Security (TLS) Feature Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7633.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# TLS Features Extension
+
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+
+id_pe_tlsfeature = id_pe + (24, )
+
+
+class Features(univ.SequenceOf):
+ componentType = univ.Integer()
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_tlsfeature: Features(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7773.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7773.py
new file mode 100644
index 0000000000..0fee2aa346
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7773.py
@@ -0,0 +1,52 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authentication Context Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7773.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Authentication Context Extension
+
+e_legnamnden = univ.ObjectIdentifier('1.2.752.201')
+
+id_eleg_ce = e_legnamnden + (5, )
+
+id_ce_authContext = id_eleg_ce + (1, )
+
+
+class AuthenticationContext(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contextType', char.UTF8String()),
+ namedtype.OptionalNamedType('contextInfo', char.UTF8String())
+ )
+
+class AuthenticationContexts(univ.SequenceOf):
+ componentType = AuthenticationContext()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_ce_authContext: AuthenticationContexts(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7894.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7894.py
new file mode 100644
index 0000000000..41936433d1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7894.py
@@ -0,0 +1,92 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Alternative Challenge Password Attributes for EST
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7894.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc7191
+
+
+# SingleAttribute is the same as Attribute in RFC 5652, except that the
+# attrValues SET must have one and only one member
+
+Attribute = rfc7191.SingleAttribute
+
+
+# DirectoryString is the same as RFC 5280, except the length is limited to 255
+
+class DirectoryString(univ.Choice):
+ pass
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
+)
+
+
+# OTP Challenge Attribute
+
+id_aa_otpChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.56')
+
+ub_aa_otpChallenge = univ.Integer(255)
+
+otpChallenge = Attribute()
+otpChallenge['attrType'] = id_aa_otpChallenge
+otpChallenge['attrValues'][0] = DirectoryString()
+
+
+# Revocation Challenge Attribute
+
+id_aa_revocationChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.57')
+
+ub_aa_revocationChallenge = univ.Integer(255)
+
+revocationChallenge = Attribute()
+revocationChallenge['attrType'] = id_aa_revocationChallenge
+revocationChallenge['attrValues'][0] = DirectoryString()
+
+
+# EST Identity Linking Attribute
+
+id_aa_estIdentityLinking = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.58')
+
+ub_aa_est_identity_linking = univ.Integer(255)
+
+estIdentityLinking = Attribute()
+estIdentityLinking['attrType'] = id_aa_estIdentityLinking
+estIdentityLinking['attrValues'][0] = DirectoryString()
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc6402.py
+
+_cmcControlAttributesMapUpdate = {
+ id_aa_otpChallenge: DirectoryString(),
+ id_aa_revocationChallenge: DirectoryString(),
+ id_aa_estIdentityLinking: DirectoryString(),
+}
+
+rfc6402.cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7906.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7906.py
new file mode 100644
index 0000000000..fa5f6b0733
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7906.py
@@ -0,0 +1,736 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# NSA's CMS Key Management Attributes
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7906.txt
+# https://www.rfc-editor.org/errata/eid5850
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4108
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6010
+from pyasn1_modules import rfc6019
+from pyasn1_modules import rfc7191
+
+MAX = float('inf')
+
+
+# Imports From RFC 2634
+
+id_aa_contentHint = rfc2634.id_aa_contentHint
+
+ContentHints = rfc2634.ContentHints
+
+id_aa_securityLabel = rfc2634.id_aa_securityLabel
+
+SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier
+
+SecurityClassification = rfc2634.SecurityClassification
+
+ESSPrivacyMark = rfc2634.ESSPrivacyMark
+
+SecurityCategories= rfc2634.SecurityCategories
+
+ESSSecurityLabel = rfc2634.ESSSecurityLabel
+
+
+# Imports From RFC 4108
+
+id_aa_communityIdentifiers = rfc4108.id_aa_communityIdentifiers
+
+CommunityIdentifier = rfc4108.CommunityIdentifier
+
+CommunityIdentifiers = rfc4108.CommunityIdentifiers
+
+
+# Imports From RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Name = rfc5280.Name
+
+Certificate = rfc5280.Certificate
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+
+SubjectInfoAccessSyntax = rfc5280.SubjectInfoAccessSyntax
+
+id_pkix = rfc5280.id_pkix
+
+id_pe = rfc5280.id_pe
+
+id_pe_subjectInfoAccess = rfc5280.id_pe_subjectInfoAccess
+
+
+# Imports From RFC 6010
+
+CMSContentConstraints = rfc6010.CMSContentConstraints
+
+
+# Imports From RFC 6019
+
+BinaryTime = rfc6019.BinaryTime
+
+id_aa_binarySigningTime = rfc6019.id_aa_binarySigningTime
+
+BinarySigningTime = rfc6019.BinarySigningTime
+
+
+# Imports From RFC 5652
+
+Attribute = rfc5652.Attribute
+
+CertificateSet = rfc5652.CertificateSet
+
+CertificateChoices = rfc5652.CertificateChoices
+
+id_contentType = rfc5652.id_contentType
+
+ContentType = rfc5652.ContentType
+
+id_messageDigest = rfc5652.id_messageDigest
+
+MessageDigest = rfc5652.MessageDigest
+
+
+# Imports From RFC 7191
+
+SIREntityName = rfc7191.SIREntityName
+
+id_aa_KP_keyPkgIdAndReceiptReq = rfc7191.id_aa_KP_keyPkgIdAndReceiptReq
+
+KeyPkgIdentifierAndReceiptReq = rfc7191.KeyPkgIdentifierAndReceiptReq
+
+
+# Key Province Attribute
+
+id_aa_KP_keyProvinceV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.71')
+
+
+class KeyProvinceV2(univ.ObjectIdentifier):
+ pass
+
+
+aa_keyProvince_v2 = Attribute()
+aa_keyProvince_v2['attrType'] = id_aa_KP_keyProvinceV2
+aa_keyProvince_v2['attrValues'][0] = KeyProvinceV2()
+
+
+# Manifest Attribute
+
+id_aa_KP_manifest = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.72')
+
+
+class ShortTitle(char.PrintableString):
+ pass
+
+
+class Manifest(univ.SequenceOf):
+ pass
+
+Manifest.componentType = ShortTitle()
+Manifest.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_manifest = Attribute()
+aa_manifest['attrType'] = id_aa_KP_manifest
+aa_manifest['attrValues'][0] = Manifest()
+
+
+# Key Algorithm Attribute
+
+id_kma_keyAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.1')
+
+
+class KeyAlgorithm(univ.Sequence):
+ pass
+
+KeyAlgorithm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAlg', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('checkWordAlg', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('crcAlg', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+aa_keyAlgorithm = Attribute()
+aa_keyAlgorithm['attrType'] = id_kma_keyAlgorithm
+aa_keyAlgorithm['attrValues'][0] = KeyAlgorithm()
+
+
+# User Certificate Attribute
+
+id_at_userCertificate = univ.ObjectIdentifier('2.5.4.36')
+
+
+aa_userCertificate = Attribute()
+aa_userCertificate['attrType'] = id_at_userCertificate
+aa_userCertificate['attrValues'][0] = Certificate()
+
+
+# Key Package Receivers Attribute
+
+id_kma_keyPkgReceiversV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.16')
+
+
+class KeyPkgReceiver(univ.Choice):
+ pass
+
+KeyPkgReceiver.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sirEntity', SIREntityName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('community', CommunityIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class KeyPkgReceiversV2(univ.SequenceOf):
+ pass
+
+KeyPkgReceiversV2.componentType = KeyPkgReceiver()
+KeyPkgReceiversV2.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_keyPackageReceivers_v2 = Attribute()
+aa_keyPackageReceivers_v2['attrType'] = id_kma_keyPkgReceiversV2
+aa_keyPackageReceivers_v2['attrValues'][0] = KeyPkgReceiversV2()
+
+
+# TSEC Nomenclature Attribute
+
+id_kma_TSECNomenclature = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.3')
+
+
+class CharEdition(char.PrintableString):
+ pass
+
+
+class CharEditionRange(univ.Sequence):
+ pass
+
+CharEditionRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstCharEdition', CharEdition()),
+ namedtype.NamedType('lastCharEdition', CharEdition())
+)
+
+
+class NumEdition(univ.Integer):
+ pass
+
+NumEdition.subtypeSpec = constraint.ValueRangeConstraint(0, 308915776)
+
+
+class NumEditionRange(univ.Sequence):
+ pass
+
+NumEditionRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstNumEdition', NumEdition()),
+ namedtype.NamedType('lastNumEdition', NumEdition())
+)
+
+
+class EditionID(univ.Choice):
+ pass
+
+EditionID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('char', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('charEdition', CharEdition().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('charEditionRange', CharEditionRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ ))
+ ),
+ namedtype.NamedType('num', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('numEdition', NumEdition().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('numEditionRange', NumEditionRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+ ))
+ )
+)
+
+
+class Register(univ.Integer):
+ pass
+
+Register.subtypeSpec = constraint.ValueRangeConstraint(0, 2147483647)
+
+
+class RegisterRange(univ.Sequence):
+ pass
+
+RegisterRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstRegister', Register()),
+ namedtype.NamedType('lastRegister', Register())
+)
+
+
+class RegisterID(univ.Choice):
+ pass
+
+RegisterID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('register', Register().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('registerRange', RegisterRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)))
+)
+
+
+class SegmentNumber(univ.Integer):
+ pass
+
+SegmentNumber.subtypeSpec = constraint.ValueRangeConstraint(1, 127)
+
+
+class SegmentRange(univ.Sequence):
+ pass
+
+SegmentRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstSegment', SegmentNumber()),
+ namedtype.NamedType('lastSegment', SegmentNumber())
+)
+
+
+class SegmentID(univ.Choice):
+ pass
+
+SegmentID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('segmentNumber', SegmentNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('segmentRange', SegmentRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)))
+)
+
+
+class TSECNomenclature(univ.Sequence):
+ pass
+
+TSECNomenclature.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('shortTitle', ShortTitle()),
+ namedtype.OptionalNamedType('editionID', EditionID()),
+ namedtype.OptionalNamedType('registerID', RegisterID()),
+ namedtype.OptionalNamedType('segmentID', SegmentID())
+)
+
+
+aa_tsecNomenclature = Attribute()
+aa_tsecNomenclature['attrType'] = id_kma_TSECNomenclature
+aa_tsecNomenclature['attrValues'][0] = TSECNomenclature()
+
+
+# Key Purpose Attribute
+
+id_kma_keyPurpose = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.13')
+
+
+class KeyPurpose(univ.Enumerated):
+ pass
+
+KeyPurpose.namedValues = namedval.NamedValues(
+ ('n-a', 0),
+ ('a', 65),
+ ('b', 66),
+ ('l', 76),
+ ('m', 77),
+ ('r', 82),
+ ('s', 83),
+ ('t', 84),
+ ('v', 86),
+ ('x', 88),
+ ('z', 90)
+)
+
+
+aa_keyPurpose = Attribute()
+aa_keyPurpose['attrType'] = id_kma_keyPurpose
+aa_keyPurpose['attrValues'][0] = KeyPurpose()
+
+
+# Key Use Attribute
+
+id_kma_keyUse = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.14')
+
+
+class KeyUse(univ.Enumerated):
+ pass
+
+KeyUse.namedValues = namedval.NamedValues(
+ ('n-a', 0),
+ ('ffk', 1),
+ ('kek', 2),
+ ('kpk', 3),
+ ('msk', 4),
+ ('qkek', 5),
+ ('tek', 6),
+ ('tsk', 7),
+ ('trkek', 8),
+ ('nfk', 9),
+ ('effk', 10),
+ ('ebfk', 11),
+ ('aek', 12),
+ ('wod', 13),
+ ('kesk', 246),
+ ('eik', 247),
+ ('ask', 248),
+ ('kmk', 249),
+ ('rsk', 250),
+ ('csk', 251),
+ ('sak', 252),
+ ('rgk', 253),
+ ('cek', 254),
+ ('exk', 255)
+)
+
+
+aa_keyUse = Attribute()
+aa_keyPurpose['attrType'] = id_kma_keyUse
+aa_keyPurpose['attrValues'][0] = KeyUse()
+
+
+# Transport Key Attribute
+
+id_kma_transportKey = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.15')
+
+
+class TransOp(univ.Enumerated):
+ pass
+
+TransOp.namedValues = namedval.NamedValues(
+ ('transport', 1),
+ ('operational', 2)
+)
+
+
+aa_transportKey = Attribute()
+aa_transportKey['attrType'] = id_kma_transportKey
+aa_transportKey['attrValues'][0] = TransOp()
+
+
+# Key Distribution Period Attribute
+
+id_kma_keyDistPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.5')
+
+
+class KeyDistPeriod(univ.Sequence):
+ pass
+
+KeyDistPeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('doNotDistBefore', BinaryTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('doNotDistAfter', BinaryTime())
+)
+
+
+aa_keyDistributionPeriod = Attribute()
+aa_keyDistributionPeriod['attrType'] = id_kma_keyDistPeriod
+aa_keyDistributionPeriod['attrValues'][0] = KeyDistPeriod()
+
+
+# Key Validity Period Attribute
+
+id_kma_keyValidityPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.6')
+
+
+class KeyValidityPeriod(univ.Sequence):
+ pass
+
+KeyValidityPeriod.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('doNotUseBefore', BinaryTime()),
+ namedtype.OptionalNamedType('doNotUseAfter', BinaryTime())
+)
+
+
+aa_keyValidityPeriod = Attribute()
+aa_keyValidityPeriod['attrType'] = id_kma_keyValidityPeriod
+aa_keyValidityPeriod['attrValues'][0] = KeyValidityPeriod()
+
+
+# Key Duration Attribute
+
+id_kma_keyDuration = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.7')
+
+
+ub_KeyDuration_months = univ.Integer(72)
+
+ub_KeyDuration_hours = univ.Integer(96)
+
+ub_KeyDuration_days = univ.Integer(732)
+
+ub_KeyDuration_weeks = univ.Integer(104)
+
+ub_KeyDuration_years = univ.Integer(100)
+
+
+class KeyDuration(univ.Choice):
+ pass
+
+KeyDuration.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hours', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_hours)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('days', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_days))),
+ namedtype.NamedType('weeks', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_weeks)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('months', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_months)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('years', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_years)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+aa_keyDurationPeriod = Attribute()
+aa_keyDurationPeriod['attrType'] = id_kma_keyDuration
+aa_keyDurationPeriod['attrValues'][0] = KeyDuration()
+
+
+# Classification Attribute
+
+id_aa_KP_classification = univ.ObjectIdentifier(id_aa_securityLabel)
+
+
+id_enumeratedPermissiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.1')
+
+id_enumeratedRestrictiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.4')
+
+id_informativeAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.3')
+
+
+class SecurityAttribute(univ.Integer):
+ pass
+
+SecurityAttribute.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class EnumeratedTag(univ.Sequence):
+ pass
+
+EnumeratedTag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tagName', univ.ObjectIdentifier()),
+ namedtype.NamedType('attributeList', univ.SetOf(componentType=SecurityAttribute()))
+)
+
+
+class FreeFormField(univ.Choice):
+ pass
+
+FreeFormField.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bitSetAttributes', univ.BitString()), # Not permitted in RFC 7906
+ namedtype.NamedType('securityAttributes', univ.SetOf(componentType=SecurityAttribute()))
+)
+
+
+class InformativeTag(univ.Sequence):
+ pass
+
+InformativeTag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tagName', univ.ObjectIdentifier()),
+ namedtype.NamedType('attributes', FreeFormField())
+)
+
+
+class Classification(ESSSecurityLabel):
+ pass
+
+
+aa_classification = Attribute()
+aa_classification['attrType'] = id_aa_KP_classification
+aa_classification['attrValues'][0] = Classification()
+
+
+# Split Identifier Attribute
+
+id_kma_splitID = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.11')
+
+
+class SplitID(univ.Sequence):
+ pass
+
+SplitID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('half', univ.Enumerated(
+ namedValues=namedval.NamedValues(('a', 0), ('b', 1)))),
+ namedtype.OptionalNamedType('combineAlg', AlgorithmIdentifier())
+)
+
+
+aa_splitIdentifier = Attribute()
+aa_splitIdentifier['attrType'] = id_kma_splitID
+aa_splitIdentifier['attrValues'][0] = SplitID()
+
+
+# Key Package Type Attribute
+
+id_kma_keyPkgType = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.12')
+
+
+class KeyPkgType(univ.ObjectIdentifier):
+ pass
+
+
+aa_keyPackageType = Attribute()
+aa_keyPackageType['attrType'] = id_kma_keyPkgType
+aa_keyPackageType['attrValues'][0] = KeyPkgType()
+
+
+# Signature Usage Attribute
+
+id_kma_sigUsageV3 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.22')
+
+
+class SignatureUsage(CMSContentConstraints):
+ pass
+
+
+aa_signatureUsage_v3 = Attribute()
+aa_signatureUsage_v3['attrType'] = id_kma_sigUsageV3
+aa_signatureUsage_v3['attrValues'][0] = SignatureUsage()
+
+
+# Other Certificate Format Attribute
+
+id_kma_otherCertFormats = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.19')
+
+
+aa_otherCertificateFormats = Attribute()
+aa_signatureUsage_v3['attrType'] = id_kma_otherCertFormats
+aa_signatureUsage_v3['attrValues'][0] = CertificateChoices()
+
+
+# PKI Path Attribute
+
+id_at_pkiPath = univ.ObjectIdentifier('2.5.4.70')
+
+
+class PkiPath(univ.SequenceOf):
+ pass
+
+PkiPath.componentType = Certificate()
+PkiPath.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_pkiPath = Attribute()
+aa_pkiPath['attrType'] = id_at_pkiPath
+aa_pkiPath['attrValues'][0] = PkiPath()
+
+
+# Useful Certificates Attribute
+
+id_kma_usefulCerts = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.20')
+
+
+aa_usefulCertificates = Attribute()
+aa_usefulCertificates['attrType'] = id_kma_usefulCerts
+aa_usefulCertificates['attrValues'][0] = CertificateSet()
+
+
+# Key Wrap Attribute
+
+id_kma_keyWrapAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.21')
+
+
+aa_keyWrapAlgorithm = Attribute()
+aa_keyWrapAlgorithm['attrType'] = id_kma_keyWrapAlgorithm
+aa_keyWrapAlgorithm['attrValues'][0] = AlgorithmIdentifier()
+
+
+# Content Decryption Key Identifier Attribute
+
+id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66')
+
+
+class ContentDecryptKeyID(univ.OctetString):
+ pass
+
+
+aa_contentDecryptKeyIdentifier = Attribute()
+aa_contentDecryptKeyIdentifier['attrType'] = id_aa_KP_contentDecryptKeyID
+aa_contentDecryptKeyIdentifier['attrValues'][0] = ContentDecryptKeyID()
+
+
+# Certificate Pointers Attribute
+
+aa_certificatePointers = Attribute()
+aa_certificatePointers['attrType'] = id_pe_subjectInfoAccess
+aa_certificatePointers['attrValues'][0] = SubjectInfoAccessSyntax()
+
+
+# CRL Pointers Attribute
+
+id_aa_KP_crlPointers = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.70')
+
+
+aa_cRLDistributionPoints = Attribute()
+aa_cRLDistributionPoints['attrType'] = id_aa_KP_crlPointers
+aa_cRLDistributionPoints['attrValues'][0] = GeneralNames()
+
+
+# Extended Error Codes
+
+id_errorCodes = univ.ObjectIdentifier('2.16.840.1.101.2.1.22')
+
+id_missingKeyType = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.1')
+
+id_privacyMarkTooLong = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.2')
+
+id_unrecognizedSecurityPolicy = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.3')
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_contentHint: ContentHints(),
+ id_aa_communityIdentifiers: CommunityIdentifiers(),
+ id_aa_binarySigningTime: BinarySigningTime(),
+ id_contentType: ContentType(),
+ id_messageDigest: MessageDigest(),
+ id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(),
+ id_aa_KP_keyProvinceV2: KeyProvinceV2(),
+ id_aa_KP_manifest: Manifest(),
+ id_kma_keyAlgorithm: KeyAlgorithm(),
+ id_at_userCertificate: Certificate(),
+ id_kma_keyPkgReceiversV2: KeyPkgReceiversV2(),
+ id_kma_TSECNomenclature: TSECNomenclature(),
+ id_kma_keyPurpose: KeyPurpose(),
+ id_kma_keyUse: KeyUse(),
+ id_kma_transportKey: TransOp(),
+ id_kma_keyDistPeriod: KeyDistPeriod(),
+ id_kma_keyValidityPeriod: KeyValidityPeriod(),
+ id_kma_keyDuration: KeyDuration(),
+ id_aa_KP_classification: Classification(),
+ id_kma_splitID: SplitID(),
+ id_kma_keyPkgType: KeyPkgType(),
+ id_kma_sigUsageV3: SignatureUsage(),
+ id_kma_otherCertFormats: CertificateChoices(),
+ id_at_pkiPath: PkiPath(),
+ id_kma_usefulCerts: CertificateSet(),
+ id_kma_keyWrapAlgorithm: AlgorithmIdentifier(),
+ id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(),
+ id_pe_subjectInfoAccess: SubjectInfoAccessSyntax(),
+ id_aa_KP_crlPointers: GeneralNames(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7914.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7914.py
new file mode 100644
index 0000000000..99e9551567
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc7914.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+#The scrypt Password-Based Key Derivation Function
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8520.txt
+# https://www.rfc-editor.org/errata/eid5871
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+id_scrypt = univ.ObjectIdentifier('1.3.6.1.4.1.11591.4.11')
+
+
+class Scrypt_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt',
+ univ.OctetString()),
+ namedtype.NamedType('costParameter',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('blockSize',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('parallelizationParameter',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('keyLength',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX)))
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_scrypt: Scrypt_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8017.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8017.py
new file mode 100644
index 0000000000..fefed1dcd6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8017.py
@@ -0,0 +1,153 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #1: RSA Cryptography Specifications Version 2.2
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8017.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2437
+from pyasn1_modules import rfc3447
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Import Algorithm Identifier from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+class DigestAlgorithm(AlgorithmIdentifier):
+ pass
+
+class HashAlgorithm(AlgorithmIdentifier):
+ pass
+
+class MaskGenAlgorithm(AlgorithmIdentifier):
+ pass
+
+class PSourceAlgorithm(AlgorithmIdentifier):
+ pass
+
+
+# Object identifiers from NIST SHA2
+
+hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2')
+id_sha256 = rfc4055.id_sha256
+id_sha384 = rfc4055.id_sha384
+id_sha512 = rfc4055.id_sha512
+id_sha224 = rfc4055.id_sha224
+id_sha512_224 = hashAlgs + (5, )
+id_sha512_256 = hashAlgs + (6, )
+
+
+# Basic object identifiers
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = rfc2437.rsaEncryption
+id_RSAES_OAEP = rfc2437.id_RSAES_OAEP
+id_pSpecified = rfc2437.id_pSpecified
+id_RSASSA_PSS = rfc4055.id_RSASSA_PSS
+md2WithRSAEncryption = rfc2437.md2WithRSAEncryption
+md5WithRSAEncryption = rfc2437.md5WithRSAEncryption
+sha1WithRSAEncryption = rfc2437.sha1WithRSAEncryption
+sha224WithRSAEncryption = rfc4055.sha224WithRSAEncryption
+sha256WithRSAEncryption = rfc4055.sha256WithRSAEncryption
+sha384WithRSAEncryption = rfc4055.sha384WithRSAEncryption
+sha512WithRSAEncryption = rfc4055.sha512WithRSAEncryption
+sha512_224WithRSAEncryption = pkcs_1 + (15, )
+sha512_256WithRSAEncryption = pkcs_1 + (16, )
+id_sha1 = rfc2437.id_sha1
+id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2')
+id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5')
+id_mgf1 = rfc2437.id_mgf1
+
+
+# Default parameter values
+
+sha1 = rfc4055.sha1Identifier
+SHA1Parameters = univ.Null("")
+
+mgf1SHA1 = rfc4055.mgf1SHA1Identifier
+
+class EncodingParameters(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(0, MAX)
+
+pSpecifiedEmpty = rfc4055.pSpecifiedEmptyIdentifier
+
+emptyString = EncodingParameters(value='')
+
+
+# Main structures
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('two-prime', 0),
+ ('multi', 1)
+ )
+
+class TrailerField(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('trailerFieldBC', 1)
+ )
+
+RSAPublicKey = rfc2437.RSAPublicKey
+
+OtherPrimeInfo = rfc3447.OtherPrimeInfo
+OtherPrimeInfos = rfc3447.OtherPrimeInfos
+RSAPrivateKey = rfc3447.RSAPrivateKey
+
+RSAES_OAEP_params = rfc4055.RSAES_OAEP_params
+rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier
+
+RSASSA_PSS_params = rfc4055.RSASSA_PSS_params
+rSASSA_PSS_Default_Identifier = rfc4055.rSASSA_PSS_Default_Identifier
+
+
+# Syntax for the EMSA-PKCS1-v1_5 hash identifier
+
+class DigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithm()),
+ namedtype.NamedType('digest', univ.OctetString())
+ )
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_sha512_224: univ.Null(),
+ id_sha512_256: univ.Null(),
+ id_mgf1: AlgorithmIdentifier(),
+ id_pSpecified: univ.OctetString(),
+ id_RSAES_OAEP: RSAES_OAEP_params(),
+ id_RSASSA_PSS: RSASSA_PSS_params(),
+ md2WithRSAEncryption: univ.Null(),
+ md5WithRSAEncryption: univ.Null(),
+ sha1WithRSAEncryption: univ.Null(),
+ sha224WithRSAEncryption: univ.Null(),
+ sha256WithRSAEncryption: univ.Null(),
+ sha384WithRSAEncryption: univ.Null(),
+ sha512WithRSAEncryption: univ.Null(),
+ sha512_224WithRSAEncryption: univ.Null(),
+ sha512_256WithRSAEncryption: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8018.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8018.py
new file mode 100644
index 0000000000..7a44eea8d2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8018.py
@@ -0,0 +1,260 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #5: Password-Based Cryptography Specification, Version 2.1
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8018.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Import from RFC 3565
+
+AES_IV = rfc3565.AES_IV
+
+
+# Import from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Basic object identifiers
+
+nistAlgorithms = _OID(2, 16, 840, 1, 101, 3, 4)
+
+aes = _OID(nistAlgorithms, 1)
+
+oiw = _OID(1, 3, 14)
+
+rsadsi = _OID(1, 2, 840, 113549)
+
+pkcs = _OID(rsadsi, 1)
+
+digestAlgorithm = _OID(rsadsi, 2)
+
+encryptionAlgorithm = _OID(rsadsi, 3)
+
+pkcs_5 = _OID(pkcs, 5)
+
+
+
+# HMAC object identifiers
+
+id_hmacWithSHA1 = _OID(digestAlgorithm, 7)
+
+id_hmacWithSHA224 = _OID(digestAlgorithm, 8)
+
+id_hmacWithSHA256 = _OID(digestAlgorithm, 9)
+
+id_hmacWithSHA384 = _OID(digestAlgorithm, 10)
+
+id_hmacWithSHA512 = _OID(digestAlgorithm, 11)
+
+id_hmacWithSHA512_224 = _OID(digestAlgorithm, 12)
+
+id_hmacWithSHA512_256 = _OID(digestAlgorithm, 13)
+
+
+# PBES1 object identifiers
+
+pbeWithMD2AndDES_CBC = _OID(pkcs_5, 1)
+
+pbeWithMD2AndRC2_CBC = _OID(pkcs_5, 4)
+
+pbeWithMD5AndDES_CBC = _OID(pkcs_5, 3)
+
+pbeWithMD5AndRC2_CBC = _OID(pkcs_5, 6)
+
+pbeWithSHA1AndDES_CBC = _OID(pkcs_5, 10)
+
+pbeWithSHA1AndRC2_CBC = _OID(pkcs_5, 11)
+
+
+# Supporting techniques object identifiers
+
+desCBC = _OID(oiw, 3, 2, 7)
+
+des_EDE3_CBC = _OID(encryptionAlgorithm, 7)
+
+rc2CBC = _OID(encryptionAlgorithm, 2)
+
+rc5_CBC_PAD = _OID(encryptionAlgorithm, 9)
+
+aes128_CBC_PAD = _OID(aes, 2)
+
+aes192_CBC_PAD = _OID(aes, 22)
+
+aes256_CBC_PAD = _OID(aes, 42)
+
+
+# PBES1
+
+class PBEParameter(univ.Sequence):
+ pass
+
+PBEParameter.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8))),
+ namedtype.NamedType('iterationCount', univ.Integer())
+)
+
+
+# PBES2
+
+id_PBES2 = _OID(pkcs_5, 13)
+
+
+class PBES2_params(univ.Sequence):
+ pass
+
+PBES2_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()),
+ namedtype.NamedType('encryptionScheme', AlgorithmIdentifier())
+)
+
+
+# PBMAC1
+
+id_PBMAC1 = _OID(pkcs_5, 14)
+
+
+class PBMAC1_params(univ.Sequence):
+ pass
+
+PBMAC1_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()),
+ namedtype.NamedType('messageAuthScheme', AlgorithmIdentifier())
+)
+
+
+# PBKDF2
+
+id_PBKDF2 = _OID(pkcs_5, 12)
+
+
+algid_hmacWithSHA1 = AlgorithmIdentifier()
+algid_hmacWithSHA1['algorithm'] = id_hmacWithSHA1
+algid_hmacWithSHA1['parameters'] = univ.Null("")
+
+
+class PBKDF2_params(univ.Sequence):
+ pass
+
+PBKDF2_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('specified', univ.OctetString()),
+ namedtype.NamedType('otherSource', AlgorithmIdentifier())
+ ))),
+ namedtype.NamedType('iterationCount', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('keyLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.DefaultedNamedType('prf', algid_hmacWithSHA1)
+)
+
+
+# RC2 CBC algorithm parameter
+
+class RC2_CBC_Parameter(univ.Sequence):
+ pass
+
+RC2_CBC_Parameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('rc2ParameterVersion', univ.Integer()),
+ namedtype.NamedType('iv', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8)))
+)
+
+
+# RC5 CBC algorithm parameter
+
+class RC5_CBC_Parameters(univ.Sequence):
+ pass
+
+RC5_CBC_Parameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ univ.Integer(namedValues=namedval.NamedValues(('v1_0', 16))).subtype(
+ subtypeSpec=constraint.SingleValueConstraint(16))),
+ namedtype.NamedType('rounds',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(8, 127))),
+ namedtype.NamedType('blockSizeInBits',
+ univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(64, 128))),
+ namedtype.OptionalNamedType('iv', univ.OctetString())
+)
+
+
+# Initialization Vector for AES: OCTET STRING (SIZE(16))
+
+class AES_IV(univ.OctetString):
+ pass
+
+AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+# Initialization Vector for DES: OCTET STRING (SIZE(8))
+
+class DES_IV(univ.OctetString):
+ pass
+
+DES_IV.subtypeSpec = constraint.ValueSizeConstraint(8, 8)
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ # PBKDF2-PRFs
+ id_hmacWithSHA1: univ.Null(),
+ id_hmacWithSHA224: univ.Null(),
+ id_hmacWithSHA256: univ.Null(),
+ id_hmacWithSHA384: univ.Null(),
+ id_hmacWithSHA512: univ.Null(),
+ id_hmacWithSHA512_224: univ.Null(),
+ id_hmacWithSHA512_256: univ.Null(),
+ # PBES1Algorithms
+ pbeWithMD2AndDES_CBC: PBEParameter(),
+ pbeWithMD2AndRC2_CBC: PBEParameter(),
+ pbeWithMD5AndDES_CBC: PBEParameter(),
+ pbeWithMD5AndRC2_CBC: PBEParameter(),
+ pbeWithSHA1AndDES_CBC: PBEParameter(),
+ pbeWithSHA1AndRC2_CBC: PBEParameter(),
+ # PBES2Algorithms
+ id_PBES2: PBES2_params(),
+ # PBES2-KDFs
+ id_PBKDF2: PBKDF2_params(),
+ # PBMAC1Algorithms
+ id_PBMAC1: PBMAC1_params(),
+ # SupportingAlgorithms
+ desCBC: DES_IV(),
+ des_EDE3_CBC: DES_IV(),
+ rc2CBC: RC2_CBC_Parameter(),
+ rc5_CBC_PAD: RC5_CBC_Parameters(),
+ aes128_CBC_PAD: AES_IV(),
+ aes192_CBC_PAD: AES_IV(),
+ aes256_CBC_PAD: AES_IV(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8103.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8103.py
new file mode 100644
index 0000000000..6429e8635f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8103.py
@@ -0,0 +1,36 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool.
+# Auto-generated by asn1ate v.0.6.0 from rfc8103.asn.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# ChaCha20Poly1305 algorithm fo use with the Authenticated-Enveloped-Data
+# protecting content type for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8103.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AEADChaCha20Poly1305Nonce(univ.OctetString):
+ pass
+
+
+AEADChaCha20Poly1305Nonce.subtypeSpec = constraint.ValueSizeConstraint(12, 12)
+
+id_alg_AEADChaCha20Poly1305 = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 18)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8209.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8209.py
new file mode 100644
index 0000000000..7d70f51b0c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8209.py
@@ -0,0 +1,20 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# BGPsec Router PKI Profile
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8209.txt
+#
+
+from pyasn1.type import univ
+
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_kp_bgpsec_router = id_kp + (30, )
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8226.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8226.py
new file mode 100644
index 0000000000..e7fe9460e9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8226.py
@@ -0,0 +1,149 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool, with manual
+# changes to implement appropriate constraints and added comments.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# JWT Claim Constraints and TN Authorization List for certificate extensions.
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8226.txt (with errata corrected)
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class JWTClaimName(char.IA5String):
+ pass
+
+
+class JWTClaimNames(univ.SequenceOf):
+ pass
+
+JWTClaimNames.componentType = JWTClaimName()
+JWTClaimNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class JWTClaimPermittedValues(univ.Sequence):
+ pass
+
+JWTClaimPermittedValues.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('claim', JWTClaimName()),
+ namedtype.NamedType('permitted', univ.SequenceOf(
+ componentType=char.UTF8String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class JWTClaimPermittedValuesList(univ.SequenceOf):
+ pass
+
+JWTClaimPermittedValuesList.componentType = JWTClaimPermittedValues()
+JWTClaimPermittedValuesList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class JWTClaimConstraints(univ.Sequence):
+ pass
+
+JWTClaimConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('mustInclude',
+ JWTClaimNames().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('permittedValues',
+ JWTClaimPermittedValuesList().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+JWTClaimConstraints.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('mustInclude', constraint.ComponentPresentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('permittedValues', constraint.ComponentPresentConstraint()))
+)
+
+
+id_pe_JWTClaimConstraints = _OID(1, 3, 6, 1, 5, 5, 7, 1, 27)
+
+
+class ServiceProviderCode(char.IA5String):
+ pass
+
+
+class TelephoneNumber(char.IA5String):
+ pass
+
+TelephoneNumber.subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 15),
+ constraint.PermittedAlphabetConstraint(
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '#', '*')
+)
+
+
+class TelephoneNumberRange(univ.Sequence):
+ pass
+
+TelephoneNumberRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('start', TelephoneNumber()),
+ namedtype.NamedType('count',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(2, MAX)))
+)
+
+
+class TNEntry(univ.Choice):
+ pass
+
+TNEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('spc',
+ ServiceProviderCode().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('range',
+ TelephoneNumberRange().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('one',
+ TelephoneNumber().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 2)))
+)
+
+
+class TNAuthorizationList(univ.SequenceOf):
+ pass
+
+TNAuthorizationList.componentType = TNEntry()
+TNAuthorizationList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_pe_TNAuthList = _OID(1, 3, 6, 1, 5, 5, 7, 1, 26)
+
+
+id_ad_stirTNList = _OID(1, 3, 6, 1, 5, 5, 7, 48, 14)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_TNAuthList: TNAuthorizationList(),
+ id_pe_JWTClaimConstraints: JWTClaimConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8358.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8358.py
new file mode 100644
index 0000000000..647a366622
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8358.py
@@ -0,0 +1,50 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Digital Signatures on Internet-Draft Documents
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8358.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+id_ct = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1')
+
+id_ct_asciiTextWithCRLF = id_ct + (27, )
+
+id_ct_epub = id_ct + (39, )
+
+id_ct_htmlWithCRLF = id_ct + (38, )
+
+id_ct_pdf = id_ct + (29, )
+
+id_ct_postscript = id_ct + (30, )
+
+id_ct_utf8TextWithCRLF = id_ct + (37, )
+
+id_ct_xml = id_ct + (28, )
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_asciiTextWithCRLF: univ.OctetString(),
+ id_ct_epub: univ.OctetString(),
+ id_ct_htmlWithCRLF: univ.OctetString(),
+ id_ct_pdf: univ.OctetString(),
+ id_ct_postscript: univ.OctetString(),
+ id_ct_utf8TextWithCRLF: univ.OctetString(),
+ id_ct_xml: univ.OctetString(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8360.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8360.py
new file mode 100644
index 0000000000..ca180c18d8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8360.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Resource Public Key Infrastructure (RPKI) Validation Reconsidered
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8360.txt
+# https://www.rfc-editor.org/errata/eid5870
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3779
+from pyasn1_modules import rfc5280
+
+
+# IP Address Delegation Extension V2
+
+id_pe_ipAddrBlocks_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.28')
+
+IPAddrBlocks = rfc3779.IPAddrBlocks
+
+
+# Autonomous System Identifier Delegation Extension V2
+
+id_pe_autonomousSysIds_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.29')
+
+ASIdentifiers = rfc3779.ASIdentifiers
+
+
+# Map of Certificate Extension OIDs to Extensions is added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ipAddrBlocks_v2: IPAddrBlocks(),
+ id_pe_autonomousSysIds_v2: ASIdentifiers(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8398.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8398.py
new file mode 100644
index 0000000000..151b632107
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8398.py
@@ -0,0 +1,52 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internationalized Email Addresses in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8398.txt
+# https://www.rfc-editor.org/errata/eid5418
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# SmtpUTF8Mailbox contains Mailbox as specified in Section 3.3 of RFC 6531
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_SmtpUTF8Mailbox = id_on + (9, )
+
+
+class SmtpUTF8Mailbox(char.UTF8String):
+ pass
+
+SmtpUTF8Mailbox.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+on_SmtpUTF8Mailbox = rfc5280.AnotherName()
+on_SmtpUTF8Mailbox['type-id'] = id_on_SmtpUTF8Mailbox
+on_SmtpUTF8Mailbox['value'] = SmtpUTF8Mailbox()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_SmtpUTF8Mailbox: SmtpUTF8Mailbox(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8410.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8410.py
new file mode 100644
index 0000000000..98bc97bb14
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8410.py
@@ -0,0 +1,43 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for Ed25519, Ed448, X25519, and X448
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8410.txt
+
+from pyasn1.type import univ
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+
+
+class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class CurvePrivateKey(univ.OctetString):
+ pass
+
+
+id_X25519 = univ.ObjectIdentifier('1.3.101.110')
+
+id_X448 = univ.ObjectIdentifier('1.3.101.111')
+
+id_Ed25519 = univ.ObjectIdentifier('1.3.101.112')
+
+id_Ed448 = univ.ObjectIdentifier('1.3.101.113')
+
+id_sha512 = rfc4055.id_sha512
+
+id_aes128_wrap = rfc3565.id_aes128_wrap
+
+id_aes256_wrap = rfc3565.id_aes256_wrap
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8418.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8418.py
new file mode 100644
index 0000000000..6e76487c88
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8418.py
@@ -0,0 +1,36 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Diffie-Hellman (ECDH) Key Agreement Algorithm
+# with X25519 and X448
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8418.txt
+
+from pyasn1.type import univ
+from pyasn1_modules import rfc5280
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KeyWrapAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+dhSinglePass_stdDH_sha256kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.1')
+
+dhSinglePass_stdDH_sha384kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.2')
+
+dhSinglePass_stdDH_sha512kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.3')
+
+dhSinglePass_stdDH_hkdf_sha256_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.19')
+
+dhSinglePass_stdDH_hkdf_sha384_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.20')
+
+dhSinglePass_stdDH_hkdf_sha512_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.21')
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8419.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8419.py
new file mode 100644
index 0000000000..f10994be28
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8419.py
@@ -0,0 +1,68 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Edwards-Curve Digital Signature Algorithm (EdDSA) Signatures in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8419.txt
+# https://www.rfc-editor.org/errata/eid5869
+
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class ShakeOutputLen(univ.Integer):
+ pass
+
+
+id_Ed25519 = univ.ObjectIdentifier('1.3.101.112')
+
+sigAlg_Ed25519 = rfc5280.AlgorithmIdentifier()
+sigAlg_Ed25519['algorithm'] = id_Ed25519
+# sigAlg_Ed25519['parameters'] is absent
+
+
+id_Ed448 = univ.ObjectIdentifier('1.3.101.113')
+
+sigAlg_Ed448 = rfc5280.AlgorithmIdentifier()
+sigAlg_Ed448['algorithm'] = id_Ed448
+# sigAlg_Ed448['parameters'] is absent
+
+
+hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2')
+
+id_sha512 = hashAlgs + (3, )
+
+hashAlg_SHA_512 = rfc5280.AlgorithmIdentifier()
+hashAlg_SHA_512['algorithm'] = id_sha512
+# hashAlg_SHA_512['parameters'] is absent
+
+
+id_shake256 = hashAlgs + (12, )
+
+hashAlg_SHAKE256 = rfc5280.AlgorithmIdentifier()
+hashAlg_SHAKE256['algorithm'] = id_shake256
+# hashAlg_SHAKE256['parameters']is absent
+
+
+id_shake256_len = hashAlgs + (18, )
+
+hashAlg_SHAKE256_LEN = rfc5280.AlgorithmIdentifier()
+hashAlg_SHAKE256_LEN['algorithm'] = id_shake256_len
+hashAlg_SHAKE256_LEN['parameters'] = ShakeOutputLen()
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones in rfc5280.py. Do not add OIDs with absent paramaters.
+
+_algorithmIdentifierMapUpdate = {
+ id_shake256_len: ShakeOutputLen(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8479.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8479.py
new file mode 100644
index 0000000000..57f78b62f2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8479.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Storing Validation Parameters in PKCS#8
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8479.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+id_attr_validation_parameters = univ.ObjectIdentifier('1.3.6.1.4.1.2312.18.8.1')
+
+
+class ValidationParams(univ.Sequence):
+ pass
+
+ValidationParams.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', univ.ObjectIdentifier()),
+ namedtype.NamedType('seed', univ.OctetString())
+)
+
+
+at_validation_parameters = rfc5652.Attribute()
+at_validation_parameters['attrType'] = id_attr_validation_parameters
+at_validation_parameters['attrValues'][0] = ValidationParams()
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_attr_validation_parameters: ValidationParams(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8494.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8494.py
new file mode 100644
index 0000000000..fe349e14ca
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8494.py
@@ -0,0 +1,80 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Multicast Email (MULE) over Allied Communications Publication 142
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8494.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+id_mmhs_CDT = univ.ObjectIdentifier('1.3.26.0.4406.0.4.2')
+
+
+class AlgorithmID_ShortForm(univ.Integer):
+ pass
+
+AlgorithmID_ShortForm.namedValues = namedval.NamedValues(
+ ('zlibCompress', 0)
+)
+
+
+class ContentType_ShortForm(univ.Integer):
+ pass
+
+ContentType_ShortForm.namedValues = namedval.NamedValues(
+ ('unidentified', 0),
+ ('external', 1),
+ ('p1', 2),
+ ('p3', 3),
+ ('p7', 4),
+ ('mule', 25)
+)
+
+
+class CompressedContentInfo(univ.Sequence):
+ pass
+
+CompressedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('unnamed', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('contentType-ShortForm',
+ ContentType_ShortForm().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('contentType-OID',
+ univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))),
+ namedtype.NamedType('compressedContent',
+ univ.OctetString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class CompressionAlgorithmIdentifier(univ.Choice):
+ pass
+
+CompressionAlgorithmIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithmID-ShortForm',
+ AlgorithmID_ShortForm().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('algorithmID-OID',
+ univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class CompressedData(univ.Sequence):
+ pass
+
+CompressedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()),
+ namedtype.NamedType('compressedContentInfo', CompressedContentInfo())
+)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8520.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8520.py
new file mode 100644
index 0000000000..b9eb6e9377
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8520.py
@@ -0,0 +1,63 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Extensions for MUD URL and MUD Signer;
+# Object Identifier for CMS Content Type for a MUD file
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8520.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+# X.509 Extension for MUD URL
+
+id_pe_mud_url = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.25')
+
+class MUDURLSyntax(char.IA5String):
+ pass
+
+
+# X.509 Extension for MUD Signer
+
+id_pe_mudsigner = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.30')
+
+class MUDsignerSyntax(rfc5280.Name):
+ pass
+
+
+# Object Identifier for CMS Content Type for a MUD file
+
+id_ct_mudtype = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.41')
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_mud_url: MUDURLSyntax(),
+ id_pe_mudsigner: MUDsignerSyntax(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_mudtype: univ.OctetString(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8619.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8619.py
new file mode 100644
index 0000000000..0aaa811bad
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8619.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for HKDF
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8619.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Object Identifiers
+
+id_alg_hkdf_with_sha256 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.28')
+
+
+id_alg_hkdf_with_sha384 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.29')
+
+
+id_alg_hkdf_with_sha512 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.30')
+
+
+# Key Derivation Algorithm Identifiers
+
+kda_hkdf_with_sha256 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha256['algorithm'] = id_alg_hkdf_with_sha256
+# kda_hkdf_with_sha256['parameters'] are absent
+
+
+kda_hkdf_with_sha384 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha384['algorithm'] = id_alg_hkdf_with_sha384
+# kda_hkdf_with_sha384['parameters'] are absent
+
+
+kda_hkdf_with_sha512 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha512['algorithm'] = id_alg_hkdf_with_sha512
+# kda_hkdf_with_sha512['parameters'] are absent
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8649.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8649.py
new file mode 100644
index 0000000000..c405f050e8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8649.py
@@ -0,0 +1,40 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Certificate Extension for Hash Of Root Key
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8649.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_ce_hashOfRootKey = univ.ObjectIdentifier('1.3.6.1.4.1.51483.2.1')
+
+
+class HashedRootKey(univ.Sequence):
+ pass
+
+HashedRootKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', univ.OctetString())
+)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_ce_hashOfRootKey: HashedRootKey(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8692.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8692.py
new file mode 100644
index 0000000000..7a6791ad20
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8692.py
@@ -0,0 +1,79 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for RSASSA-PSS and ECDSA using SHAKEs
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8692.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+
+
+# SHAKE128 One-Way Hash Function
+
+id_shake128 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.11')
+
+mda_shake128 = rfc5280.AlgorithmIdentifier()
+mda_shake128['algorithm'] = id_shake128
+# mda_shake128['parameters'] is absent
+
+
+# SHAKE256 One-Way Hash Function
+
+id_shake256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.12')
+
+mda_shake256 = rfc5280.AlgorithmIdentifier()
+mda_shake256['algorithm'] = id_shake256
+# mda_shake256['parameters'] is absent
+
+
+# RSA PSS with SHAKE128
+
+id_RSASSA_PSS_SHAKE128 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.30')
+
+sa_rSASSA_PSS_SHAKE128 = rfc5280.AlgorithmIdentifier()
+sa_rSASSA_PSS_SHAKE128['algorithm'] = id_RSASSA_PSS_SHAKE128
+# sa_rSASSA_PSS_SHAKE128['parameters'] is absent
+
+pk_rsaSSA_PSS_SHAKE128 = rfc4055.RSAPublicKey()
+
+
+# RSA PSS with SHAKE256
+
+id_RSASSA_PSS_SHAKE256 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.31')
+
+sa_rSASSA_PSS_SHAKE256 = rfc5280.AlgorithmIdentifier()
+sa_rSASSA_PSS_SHAKE256['algorithm'] = id_RSASSA_PSS_SHAKE256
+# sa_rSASSA_PSS_SHAKE256['parameters'] is absent
+
+pk_rsaSSA_PSS_SHAKE256 = rfc4055.RSAPublicKey()
+
+
+# ECDSA with SHAKE128
+
+id_ecdsa_with_shake128 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.32')
+
+sa_ecdsa_with_shake128 = rfc5280.AlgorithmIdentifier()
+sa_ecdsa_with_shake128['algorithm'] = id_ecdsa_with_shake128
+# sa_ecdsa_with_shake128['parameters'] is absent
+
+pk_ec = rfc5480.ECPoint()
+
+
+# ECDSA with SHAKE128
+
+id_ecdsa_with_shake256 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.33')
+
+sa_ecdsa_with_shake256 = rfc5280.AlgorithmIdentifier()
+sa_ecdsa_with_shake256['algorithm'] = id_ecdsa_with_shake256
+# sa_ecdsa_with_shake256['parameters'] is absent
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8696.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8696.py
new file mode 100644
index 0000000000..4c6d38d441
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8696.py
@@ -0,0 +1,104 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Using Pre-Shared Key (PSK) in the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8696.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_ori = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13')
+
+id_ori_keyTransPSK = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13.1')
+
+id_ori_keyAgreePSK = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13.2')
+
+
+class PreSharedKeyIdentifier(univ.OctetString):
+ pass
+
+
+class KeyTransRecipientInfos(univ.SequenceOf):
+ componentType = rfc5652.KeyTransRecipientInfo()
+
+
+class KeyTransPSKRecipientInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ rfc5652.CMSVersion()),
+ namedtype.NamedType('pskid',
+ PreSharedKeyIdentifier()),
+ namedtype.NamedType('kdfAlgorithm',
+ rfc5652.KeyDerivationAlgorithmIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm',
+ rfc5652.KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('ktris',
+ KeyTransRecipientInfos()),
+ namedtype.NamedType('encryptedKey',
+ rfc5652.EncryptedKey())
+ )
+
+
+class KeyAgreePSKRecipientInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ rfc5652.CMSVersion()),
+ namedtype.NamedType('pskid',
+ PreSharedKeyIdentifier()),
+ namedtype.NamedType('originator',
+ rfc5652.OriginatorIdentifierOrKey().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm',
+ rfc5652.UserKeyingMaterial().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('kdfAlgorithm',
+ rfc5652.KeyDerivationAlgorithmIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm',
+ rfc5652.KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys',
+ rfc5652.RecipientEncryptedKeys())
+ )
+
+
+class CMSORIforPSKOtherInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('psk',
+ univ.OctetString()),
+ namedtype.NamedType('keyMgmtAlgType',
+ univ.Enumerated(namedValues=namedval.NamedValues(
+ ('keyTrans', 5), ('keyAgree', 10)))),
+ namedtype.NamedType('keyEncryptionAlgorithm',
+ rfc5652.KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('pskLength',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('kdkLength',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX)))
+ )
+
+
+# Update the CMS Other Recipient Info map in rfc5652.py
+
+_otherRecipientInfoMapUpdate = {
+ id_ori_keyTransPSK: KeyTransPSKRecipientInfo(),
+ id_ori_keyAgreePSK: KeyAgreePSKRecipientInfo(),
+}
+
+rfc5652.otherRecipientInfoMap.update(_otherRecipientInfoMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8702.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8702.py
new file mode 100644
index 0000000000..977c278760
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8702.py
@@ -0,0 +1,105 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SHAKE One-way Hash Functions for CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8702.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8692
+
+
+# Imports fprm RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 8692
+
+id_shake128 = rfc8692.id_shake128
+
+mda_shake128 = rfc8692.mda_shake128
+
+id_shake256 = rfc8692.id_shake256
+
+mda_shake256 = rfc8692.mda_shake256
+
+id_RSASSA_PSS_SHAKE128 = rfc8692.id_RSASSA_PSS_SHAKE128
+
+sa_rSASSA_PSS_SHAKE128 = rfc8692.sa_rSASSA_PSS_SHAKE128
+
+pk_rsaSSA_PSS_SHAKE128 = rfc8692.pk_rsaSSA_PSS_SHAKE128
+
+id_RSASSA_PSS_SHAKE256 = rfc8692.id_RSASSA_PSS_SHAKE256
+
+sa_rSASSA_PSS_SHAKE256 = rfc8692.sa_rSASSA_PSS_SHAKE256
+
+pk_rsaSSA_PSS_SHAKE256 = rfc8692.pk_rsaSSA_PSS_SHAKE256
+
+id_ecdsa_with_shake128 = rfc8692.id_ecdsa_with_shake128
+
+sa_ecdsa_with_shake128 = rfc8692.sa_ecdsa_with_shake128
+
+id_ecdsa_with_shake256 = rfc8692.id_ecdsa_with_shake256
+
+sa_ecdsa_with_shake256 = rfc8692.sa_ecdsa_with_shake256
+
+pk_ec = rfc8692.pk_ec
+
+
+# KMAC with SHAKE128
+
+id_KMACWithSHAKE128 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.19')
+
+
+class KMACwithSHAKE128_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('kMACOutputLength',
+ univ.Integer().subtype(value=256)),
+ namedtype.DefaultedNamedType('customizationString',
+ univ.OctetString().subtype(value=''))
+ )
+
+
+maca_KMACwithSHAKE128 = AlgorithmIdentifier()
+maca_KMACwithSHAKE128['algorithm'] = id_KMACWithSHAKE128
+maca_KMACwithSHAKE128['parameters'] = KMACwithSHAKE128_params()
+
+
+# KMAC with SHAKE256
+
+id_KMACWithSHAKE256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.20')
+
+
+class KMACwithSHAKE256_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('kMACOutputLength',
+ univ.Integer().subtype(value=512)),
+ namedtype.DefaultedNamedType('customizationString',
+ univ.OctetString().subtype(value=''))
+ )
+
+
+maca_KMACwithSHAKE256 = AlgorithmIdentifier()
+maca_KMACwithSHAKE256['algorithm'] = id_KMACWithSHAKE256
+maca_KMACwithSHAKE256['parameters'] = KMACwithSHAKE256_params()
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_KMACWithSHAKE128: KMACwithSHAKE128_params(),
+ id_KMACWithSHAKE256: KMACwithSHAKE256_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8708.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8708.py
new file mode 100644
index 0000000000..3e9909cf90
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8708.py
@@ -0,0 +1,41 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# HSS/LMS Hash-based Signature Algorithm for CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8708.txt
+
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Object Identifiers
+
+id_alg_hss_lms_hashsig = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.17')
+
+id_alg_mts_hashsig = id_alg_hss_lms_hashsig
+
+
+# Signature Algorithm Identifier
+
+sa_HSS_LMS_HashSig = rfc5280.AlgorithmIdentifier()
+sa_HSS_LMS_HashSig['algorithm'] = id_alg_hss_lms_hashsig
+# sa_HSS_LMS_HashSig['parameters'] is alway absent
+
+
+# Public Key
+
+class HSS_LMS_HashSig_PublicKey(univ.OctetString):
+ pass
+
+
+pk_HSS_LMS_HashSig = rfc5280.SubjectPublicKeyInfo()
+pk_HSS_LMS_HashSig['algorithm'] = sa_HSS_LMS_HashSig
+# pk_HSS_LMS_HashSig['parameters'] CONTAINS a DER-encoded HSS_LMS_HashSig_PublicKey
diff --git a/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8769.py b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8769.py
new file mode 100644
index 0000000000..5d2b300674
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/pyasn1_modules/rfc8769.py
@@ -0,0 +1,21 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CBOR Content for CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8769.txt
+#
+
+from pyasn1.type import univ
+
+
+id_ct_cbor = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.44')
+
+
+id_ct_cborSequence = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.45')
diff --git a/contrib/python/pyasn1-modules/py2/tests/__init__.py b/contrib/python/pyasn1-modules/py2/tests/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1-modules/py2/tests/__main__.py b/contrib/python/pyasn1-modules/py2/tests/__main__.py
new file mode 100644
index 0000000000..4e10bc8afb
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/__main__.py
@@ -0,0 +1,138 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.test_pem.suite',
+ 'tests.test_rfc2314.suite',
+ 'tests.test_rfc2315.suite',
+ 'tests.test_rfc2437.suite',
+ 'tests.test_rfc2459.suite',
+ 'tests.test_rfc2511.suite',
+ 'tests.test_rfc2560.suite',
+ 'tests.test_rfc2631.suite',
+ 'tests.test_rfc2634.suite',
+ 'tests.test_rfc2876.suite',
+ 'tests.test_rfc2985.suite',
+ 'tests.test_rfc2986.suite',
+ 'tests.test_rfc3058.suite',
+ 'tests.test_rfc3114.suite',
+ 'tests.test_rfc3125.suite',
+ 'tests.test_rfc3161.suite',
+ 'tests.test_rfc3274.suite',
+ 'tests.test_rfc3279.suite',
+ 'tests.test_rfc3280.suite',
+ 'tests.test_rfc3281.suite',
+ 'tests.test_rfc3370.suite',
+ 'tests.test_rfc3447.suite',
+ 'tests.test_rfc3537.suite',
+ 'tests.test_rfc3560.suite',
+ 'tests.test_rfc3565.suite',
+ 'tests.test_rfc3657.suite',
+ 'tests.test_rfc3709.suite',
+ 'tests.test_rfc3739.suite',
+ 'tests.test_rfc3770.suite',
+ 'tests.test_rfc3779.suite',
+ 'tests.test_rfc3820.suite',
+ 'tests.test_rfc3852.suite',
+ 'tests.test_rfc4010.suite',
+ 'tests.test_rfc4043.suite',
+ 'tests.test_rfc4055.suite',
+ 'tests.test_rfc4073.suite',
+ 'tests.test_rfc4108.suite',
+ 'tests.test_rfc4210.suite',
+ 'tests.test_rfc4211.suite',
+ 'tests.test_rfc4334.suite',
+ 'tests.test_rfc4357.suite',
+ 'tests.test_rfc4387.suite',
+ 'tests.test_rfc4476.suite',
+ 'tests.test_rfc4490.suite',
+ 'tests.test_rfc4491.suite',
+ 'tests.test_rfc4683.suite',
+ 'tests.test_rfc4985.suite',
+ 'tests.test_rfc5035.suite',
+ 'tests.test_rfc5083.suite',
+ 'tests.test_rfc5084.suite',
+ 'tests.test_rfc5126.suite',
+ 'tests.test_rfc5208.suite',
+ 'tests.test_rfc5275.suite',
+ 'tests.test_rfc5280.suite',
+ 'tests.test_rfc5480.suite',
+ 'tests.test_rfc5636.suite',
+ 'tests.test_rfc5639.suite',
+ 'tests.test_rfc5649.suite',
+ 'tests.test_rfc5652.suite',
+ 'tests.test_rfc5697.suite',
+ 'tests.test_rfc5751.suite',
+ 'tests.test_rfc5752.suite',
+ 'tests.test_rfc5753.suite',
+ 'tests.test_rfc5755.suite',
+ 'tests.test_rfc5913.suite',
+ 'tests.test_rfc5914.suite',
+ 'tests.test_rfc5915.suite',
+ 'tests.test_rfc5916.suite',
+ 'tests.test_rfc5917.suite',
+ 'tests.test_rfc5924.suite',
+ 'tests.test_rfc5934.suite',
+ 'tests.test_rfc5940.suite',
+ 'tests.test_rfc5958.suite',
+ 'tests.test_rfc5990.suite',
+ 'tests.test_rfc6010.suite',
+ 'tests.test_rfc6019.suite',
+ 'tests.test_rfc6031.suite',
+ 'tests.test_rfc6032.suite',
+ 'tests.test_rfc6120.suite',
+ 'tests.test_rfc6187.suite',
+ 'tests.test_rfc6210.suite',
+ 'tests.test_rfc6211.suite',
+ 'tests.test_rfc6482.suite',
+ 'tests.test_rfc6486.suite',
+ 'tests.test_rfc6487.suite',
+ 'tests.test_rfc6664.suite',
+ 'tests.test_rfc6955.suite',
+ 'tests.test_rfc6960.suite',
+ 'tests.test_rfc7030.suite',
+ 'tests.test_rfc7191.suite',
+ 'tests.test_rfc7229.suite',
+ 'tests.test_rfc7292.suite',
+ 'tests.test_rfc7296.suite',
+ 'tests.test_rfc7508.suite',
+ 'tests.test_rfc7585.suite',
+ 'tests.test_rfc7633.suite',
+ 'tests.test_rfc7773.suite',
+ 'tests.test_rfc7894.suite',
+ 'tests.test_rfc7906.suite',
+ 'tests.test_rfc7914.suite',
+ 'tests.test_rfc8017.suite',
+ 'tests.test_rfc8018.suite',
+ 'tests.test_rfc8103.suite',
+ 'tests.test_rfc8209.suite',
+ 'tests.test_rfc8226.suite',
+ 'tests.test_rfc8358.suite',
+ 'tests.test_rfc8360.suite',
+ 'tests.test_rfc8398.suite',
+ 'tests.test_rfc8410.suite',
+ 'tests.test_rfc8418.suite',
+ 'tests.test_rfc8419.suite',
+ 'tests.test_rfc8479.suite',
+ 'tests.test_rfc8494.suite',
+ 'tests.test_rfc8520.suite',
+ 'tests.test_rfc8619.suite',
+ 'tests.test_rfc8649.suite',
+ 'tests.test_rfc8692.suite',
+ 'tests.test_rfc8696.suite',
+ 'tests.test_rfc8702.suite',
+ 'tests.test_rfc8708.suite',
+ 'tests.test_rfc8769.suite']
+)
+
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_missing.py b/contrib/python/pyasn1-modules/py2/tests/test_missing.py
new file mode 100644
index 0000000000..0b3f58a7ce
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_missing.py
@@ -0,0 +1,18 @@
+#
+# This file is part of pyasn1-modules software.
+#
+import sys
+import unittest
+
+# modules without tests
+from pyasn1_modules import (
+ rfc1155, rfc1157, rfc1901, rfc3412, rfc3414
+)
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_pem.py b/contrib/python/pyasn1-modules/py2/tests/test_pem.py
new file mode 100644
index 0000000000..dbcca5a78c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_pem.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.compat.octets import ints2octs
+from pyasn1_modules import pem
+
+
+class PemTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDATCCAekCAQAwgZkxCzAJBgNVBAYTAlJVMRYwFAYDVQQIEw1Nb3Njb3cgUmVn
+aW9uMQ8wDQYDVQQHEwZNb3Njb3cxGjAYBgNVBAoTEVNOTVAgTGFib3JhdG9yaWVz
+MQwwCgYDVQQLFANSJkQxFTATBgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3
+DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQC9n2NfGS98JDBmAXQn+vNUyPB3QPYC1cwpX8UMYh9MdAmBZJCnvXrQ
+Pp14gNAv6AQKxefmGES1b+Yd+1we9HB8AKm1/8xvRDUjAvy4iO0sqFCPvIfSujUy
+pBcfnR7QE2itvyrMxCDSEVnMhKdCNb23L2TptUmpvLcb8wfAMLFsSu2yaOtJysep
+oH/mvGqlRv2ti2+E2YA0M7Pf83wyV1XmuEsc9tQ225rprDk2uyshUglkDD2235rf
+0QyONq3Aw3BMrO9ss1qj7vdDhVHVsxHnTVbEgrxEWkq2GkVKh9QReMZ2AKxe40j4
+og+OjKXguOCggCZHJyXKxccwqCaeCztbAgMBAAGgIjAgBgkqhkiG9w0BCQIxExMR
+U05NUCBMYWJvcmF0b3JpZXMwDQYJKoZIhvcNAQEFBQADggEBAAihbwmN9M2bsNNm
+9KfxqiGMqqcGCtzIlpDz/2NVwY93cEZsbz3Qscc0QpknRmyTSoDwIG+1nUH0vzkT
+Nv8sBmp9I1GdhGg52DIaWwL4t9O5WUHgfHSJpPxZ/zMP2qIsdPJ+8o19BbXRlufc
+73c03H1piGeb9VcePIaulSHI622xukI6f4Sis49vkDaoi+jadbEEb6TYkJQ3AMRD
+WdApGGm0BePdLqboW1Yv70WRRFFD8sxeT7Yw4qrJojdnq0xMHPGfKpf6dJsqWkHk
+b5DRbjil1Zt9pJuF680S9wtBzSi0hsMHXR9TzS7HpMjykL2nmCVY6A78MZapsCzn
+GGbx7DI=
+"""
+
+ def testReadBase64fromText(self):
+
+ binary = pem.readBase64fromText(self.pem_text)
+
+ self.assertTrue(binary)
+
+ expected = [
+ 48, 130, 3, 1, 48, 130, 1, 233, 2, 1, 0, 48, 129, 153, 49, 11, 48,
+ 9, 6, 3, 85, 4, 6, 19, 2, 82, 85, 49, 22, 48, 20, 6, 3, 85, 4, 8,
+ 19, 13, 77, 111, 115, 99, 111, 119, 32, 82, 101, 103, 105, 111,
+ 110, 49, 15, 48, 13, 6, 3, 85, 4, 7, 19, 6, 77, 111, 115, 99, 111,
+ 119, 49, 26, 48, 24, 6, 3, 85, 4, 10, 19, 17, 83, 78, 77, 80, 32,
+ 76, 97, 98, 111, 114, 97, 116, 111, 114, 105, 101, 115, 49, 12,
+ 48, 10, 6, 3, 85, 4, 11, 20, 3, 82, 38, 68, 49, 21, 48, 19, 6, 3,
+ 85, 4, 3, 19, 12, 115, 110, 109, 112, 108, 97, 98, 115, 46, 99,
+ 111, 109, 49, 32, 48, 30, 6, 9, 42, 134, 72, 134, 247, 13, 1, 9, 1,
+ 22, 17, 105, 110, 102, 111, 64, 115, 110, 109, 112, 108, 97, 98,
+ 115, 46, 99, 111, 109, 48, 130, 1, 34, 48, 13, 6, 9, 42, 134, 72,
+ 134, 247, 13, 1, 1, 1, 5, 0, 3, 130, 1, 15, 0, 48, 130, 1, 10, 2,
+ 130, 1, 1, 0, 189, 159, 99, 95, 25, 47, 124, 36, 48, 102, 1, 116,
+ 39, 250, 243, 84, 200, 240, 119, 64, 246, 2, 213, 204, 41, 95, 197,
+ 12, 98, 31, 76, 116, 9, 129, 100, 144, 167, 189, 122, 208, 62, 157,
+ 120, 128, 208, 47, 232, 4, 10, 197, 231, 230, 24, 68, 181, 111,
+ 230, 29, 251, 92, 30, 244, 112, 124, 0, 169, 181, 255, 204, 111,
+ 68, 53, 35, 2, 252, 184, 136, 237, 44, 168, 80, 143, 188, 135, 210,
+ 186, 53, 50, 164, 23, 31, 157, 30, 208, 19, 104, 173, 191, 42, 204,
+ 196, 32, 210, 17, 89, 204, 132, 167, 66, 53, 189, 183, 47, 100,
+ 233, 181, 73, 169, 188, 183, 27, 243, 7, 192, 48, 177, 108, 74,
+ 237, 178, 104, 235, 73, 202, 199, 169, 160, 127, 230, 188, 106,
+ 165, 70, 253, 173, 139, 111, 132, 217, 128, 52, 51, 179, 223, 243,
+ 124, 50, 87, 85, 230, 184, 75, 28, 246, 212, 54, 219, 154, 233,
+ 172, 57, 54, 187, 43, 33, 82, 9, 100, 12, 61, 182, 223, 154, 223,
+ 209, 12, 142, 54, 173, 192, 195, 112, 76, 172, 239, 108, 179, 90,
+ 163, 238, 247, 67, 133, 81, 213, 179, 17, 231, 77, 86, 196, 130,
+ 188, 68, 90, 74, 182, 26, 69, 74, 135, 212, 17, 120, 198, 118, 0,
+ 172, 94, 227, 72, 248, 162, 15, 142, 140, 165, 224, 184, 224, 160,
+ 128, 38, 71, 39, 37, 202, 197, 199, 48, 168, 38, 158, 11, 59, 91, 2,
+ 3, 1, 0, 1, 160, 34, 48, 32, 6, 9, 42, 134, 72, 134, 247, 13, 1, 9,
+ 2, 49, 19, 19, 17, 83, 78, 77, 80, 32, 76, 97, 98, 111, 114, 97,
+ 116, 111, 114, 105, 101, 115, 48, 13, 6, 9, 42, 134, 72, 134, 247,
+ 13, 1, 1, 5, 5, 0, 3, 130, 1, 1, 0, 8, 161, 111, 9, 141, 244, 205,
+ 155, 176, 211, 102, 244, 167, 241, 170, 33, 140, 170, 167, 6, 10,
+ 220, 200, 150, 144, 243, 255, 99, 85, 193, 143, 119, 112, 70, 108,
+ 111, 61, 208, 177, 199, 52, 66, 153, 39, 70, 108, 147, 74, 128, 240,
+ 32, 111, 181, 157, 65, 244, 191, 57, 19, 54, 255, 44, 6, 106, 125,
+ 35, 81, 157, 132, 104, 57, 216, 50, 26, 91, 2, 248, 183, 211, 185,
+ 89, 65, 224, 124, 116, 137, 164, 252, 89, 255, 51, 15, 218, 162,
+ 44, 116, 242, 126, 242, 141, 125, 5, 181, 209, 150, 231, 220, 239,
+ 119, 52, 220, 125, 105, 136, 103, 155, 245, 87, 30, 60, 134, 174,
+ 149, 33, 200, 235, 109, 177, 186, 66, 58, 127, 132, 162, 179, 143,
+ 111, 144, 54, 168, 139, 232, 218, 117, 177, 4, 111, 164, 216, 144,
+ 148, 55, 0, 196, 67, 89, 208, 41, 24, 105, 180, 5, 227, 221, 46,
+ 166, 232, 91, 86, 47, 239, 69, 145, 68, 81, 67, 242, 204, 94, 79,
+ 182, 48, 226, 170, 201, 162, 55, 103, 171, 76, 76, 28, 241, 159,
+ 42, 151, 250, 116, 155, 42, 90, 65, 228, 111, 144, 209, 110, 56,
+ 165, 213, 155, 125, 164, 155, 133, 235, 205, 18, 247, 11, 65, 205,
+ 40, 180, 134, 195, 7, 93, 31, 83, 205, 46, 199, 164, 200, 242, 144,
+ 189, 167, 152, 37, 88, 232, 14, 252, 49, 150, 169, 176, 44, 231,
+ 24, 102, 241, 236, 50
+ ]
+
+ self.assertEqual(ints2octs(expected), binary)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2314.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2314.py
new file mode 100644
index 0000000000..69927a6a5c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2314.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2314
+
+
+class CertificationRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDATCCAekCAQAwgZkxCzAJBgNVBAYTAlJVMRYwFAYDVQQIEw1Nb3Njb3cgUmVn
+aW9uMQ8wDQYDVQQHEwZNb3Njb3cxGjAYBgNVBAoTEVNOTVAgTGFib3JhdG9yaWVz
+MQwwCgYDVQQLFANSJkQxFTATBgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3
+DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQC9n2NfGS98JDBmAXQn+vNUyPB3QPYC1cwpX8UMYh9MdAmBZJCnvXrQ
+Pp14gNAv6AQKxefmGES1b+Yd+1we9HB8AKm1/8xvRDUjAvy4iO0sqFCPvIfSujUy
+pBcfnR7QE2itvyrMxCDSEVnMhKdCNb23L2TptUmpvLcb8wfAMLFsSu2yaOtJysep
+oH/mvGqlRv2ti2+E2YA0M7Pf83wyV1XmuEsc9tQ225rprDk2uyshUglkDD2235rf
+0QyONq3Aw3BMrO9ss1qj7vdDhVHVsxHnTVbEgrxEWkq2GkVKh9QReMZ2AKxe40j4
+og+OjKXguOCggCZHJyXKxccwqCaeCztbAgMBAAGgIjAgBgkqhkiG9w0BCQIxExMR
+U05NUCBMYWJvcmF0b3JpZXMwDQYJKoZIhvcNAQEFBQADggEBAAihbwmN9M2bsNNm
+9KfxqiGMqqcGCtzIlpDz/2NVwY93cEZsbz3Qscc0QpknRmyTSoDwIG+1nUH0vzkT
+Nv8sBmp9I1GdhGg52DIaWwL4t9O5WUHgfHSJpPxZ/zMP2qIsdPJ+8o19BbXRlufc
+73c03H1piGeb9VcePIaulSHI622xukI6f4Sis49vkDaoi+jadbEEb6TYkJQ3AMRD
+WdApGGm0BePdLqboW1Yv70WRRFFD8sxeT7Yw4qrJojdnq0xMHPGfKpf6dJsqWkHk
+b5DRbjil1Zt9pJuF680S9wtBzSi0hsMHXR9TzS7HpMjykL2nmCVY6A78MZapsCzn
+GGbx7DI=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2314.CertificationRequest()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2315.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2315.py
new file mode 100644
index 0000000000..40030c9972
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2315.py
@@ -0,0 +1,165 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2315
+
+
+class Pkcs7TestCase(unittest.TestCase):
+ pem_text_unordered = """\
+MIIKdQYJKoZIhvcNAQcCoIIKZjCCCmICAQExADALBgkqhkiG9w0BBwGgggpIMIIC
+XjCCAcegAwIBAgIBADANBgkqhkiG9w0BAQQFADB1MQswCQYDVQQGEwJSVTEPMA0G
+A1UEBxMGTW9zY293MRcwFQYDVQQKEw5Tb3ZhbSBUZWxlcG9ydDEMMAoGA1UECxMD
+TklTMQ8wDQYDVQQDEwZBQlMgQ0ExHTAbBgkqhkiG9w0BCQEWDmNlcnRAb25saW5l
+LnJ1MB4XDTk5MDgxNTE5MDI1OFoXDTAwMDExMjE5MDI1OFowdTELMAkGA1UEBhMC
+UlUxDzANBgNVBAcTBk1vc2NvdzEXMBUGA1UEChMOU292YW0gVGVsZXBvcnQxDDAK
+BgNVBAsTA05JUzEPMA0GA1UEAxMGQUJTIENBMR0wGwYJKoZIhvcNAQkBFg5jZXJ0
+QG9ubGluZS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAw0g1P0yQAZIi
+ml2XOCOxnCcuhHmAgj4Ei9M2ebrrGwUMONPzr1a8W7JcpnR3FeOjxEIxrzkHr6UA
+oj4l/oC7Rv28uIig+Okf+82ekhH6VgAQNr5LAzfN8J6dZLx2OXAmmLleAqHuisT7
+I40vEFRoRmC5hiMlILE2rIlIKJn6cUkCAwEAATANBgkqhkiG9w0BAQQFAAOBgQBZ
+7ELDfGUNb+fbpHl5W3d9JMXsdOgd96+HG+X1SPgeiRAMjkla8WFCSaQPIR4vCy0m
+tm5a2bWSji6+vP5FGbjOz5iMlHMrCtu0He7Eim2zpaGI06ZIY75Cn1h2r3+KS0/R
+h01TJUbmsfV1tZm6Wk3bayJ+/K8A4mBHv8P6rhYacDCCAowwggH1oAMCAQICAQAw
+DQYJKoZIhvcNAQEEBQAwgYsxCzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cx
+FzAVBgNVBAoTDkdvbGRlbiBUZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMT
+FUdvbGRlbiBUZWxlY29tIEFCUyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xk
+ZW50ZWxlY29tLnJ1MB4XDTAwMDEwNTE1MDY1MVoXDTEwMDExNTE1MDY1MVowgYsx
+CzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cxFzAVBgNVBAoTDkdvbGRlbiBU
+ZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMTFUdvbGRlbiBUZWxlY29tIEFC
+UyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xkZW50ZWxlY29tLnJ1MIGfMA0G
+CSqGSIb3DQEBAQUAA4GNADCBiQKBgQDPFel/Svli6ogoUEb6eLtEvNSjyalETSMP
+MIZXdmWIkWijvEUhDnNJVAE3knAt6dVYqxWq0vc6CbAGFZNqEyioGU48IECLzV0G
+toiYejF/c9PuyIKDejeV9/YZnNFaZAUOXhOjREdZURLISKhX4tAbQyvK0Qka9AAR
+MEy9DoqV8QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAHQzgqFkoSMQr077UCr5C0l1
+rxLA17TrocCmUC1/PLmN0LmUHD0d7TjjTQKJaJBHxcKIg6+FOY6LSSY4nAN79eXi
+nBz+jEUG7+NTU/jcEArI35yP7fi4Mwb96EYDmUkUGtcLNq3JBe/d1Zhmy9HnNBL1
+Dn9thM2Q8RPYAJIU3JnGMIICqTCCAhICAQAwDQYJKoZIhvcNAQEEBQAwgZwxCzAJ
+BgNVBAYTAlJVMQ8wDQYDVQQIEwZNb3Njb3cxDzANBgNVBAcTBk1vc2NvdzEXMBUG
+A1UEChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA1JPTDEeMBwGA1UEAxMVR29s
+ZGVuIFRlbGVjb20gQUJTIENBMSQwIgYJKoZIhvcNAQkBFhVjZXJ0QGdvbGRlbnRl
+bGVjb20ucnUwHhcNMTAwMTE1MTU0MDI2WhcNMjAwMjIyMTU0MDI2WjCBnDELMAkG
+A1UEBhMCUlUxDzANBgNVBAgTBk1vc2NvdzEPMA0GA1UEBxMGTW9zY293MRcwFQYD
+VQQKEw5Hb2xkZW4gVGVsZWNvbTEMMAoGA1UECxMDUk9MMR4wHAYDVQQDExVHb2xk
+ZW4gVGVsZWNvbSBBQlMgQ0ExJDAiBgkqhkiG9w0BCQEWFWNlcnRAZ29sZGVudGVs
+ZWNvbS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAzxXpf0r5YuqIKFBG
++ni7RLzUo8mpRE0jDzCGV3ZliJFoo7xFIQ5zSVQBN5JwLenVWKsVqtL3OgmwBhWT
+ahMoqBlOPCBAi81dBraImHoxf3PT7siCg3o3lff2GZzRWmQFDl4To0RHWVESyEio
+V+LQG0MrytEJGvQAETBMvQ6KlfECAwEAATANBgkqhkiG9w0BAQQFAAOBgQCMrS4T
+LIzxcpu8nwOq/xMcxW4Ctz/wjIoePWkmSLe+Tkb4zo7aTsvzn+ETaWb7qztUpyl0
+QvlXn4vC2iCJloPpofPqSzF1UV3g5Zb93ReZu7E6kEyW0ag8R5XZKv0xuR3b3Le+
+ZqolT8wQELd5Mmw5JPofZ+O2cGNvet8tYwOKFjCCAqUwggIOoAMCAQICAgboMA0G
+CSqGSIb3DQEBBAUAMIGcMQswCQYDVQQGEwJSVTEPMA0GA1UECBMGTW9zY293MQ8w
+DQYDVQQHEwZNb3Njb3cxFzAVBgNVBAoTDkdvbGRlbiBUZWxlY29tMQwwCgYDVQQL
+EwNST0wxHjAcBgNVBAMTFUdvbGRlbiBUZWxlY29tIEFCUyBDQTEkMCIGCSqGSIb3
+DQEJARYVY2VydEBnb2xkZW50ZWxlY29tLnJ1MB4XDTExMDEyODEyMTcwOVoXDTEy
+MDIwMTAwMDAwMFowdjELMAkGA1UEBhMCUlUxDDAKBgNVBAgTA04vQTEXMBUGA1UE
+ChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA0lTUDEWMBQGA1UEAxMNY3JheS5n
+bGFzLm5ldDEaMBgGCSqGSIb3DQEJARYLZWxpZUByb2wucnUwgZ8wDQYJKoZIhvcN
+AQEBBQADgY0AMIGJAoGBAPJAm8KG3ZCoJSvoGmLMPlGaMIpadu/EGSEYu+M/ybLp
+Cs8XmwB3876JVKKCbtGI6eqxOqvjedYXb+nKcyhz4Ztmm8RgAD7Z1WUItIpatejT
+79EYOUWrDN713SLZsImMyP4B4EySl4LZfHFRU2iOwLB6WozGCYuULLqYS9MDPrnT
+AgMBAAGjGzAZMBcGCWCGSAGG+EIBDQQKFghDPS07Uz0tOzANBgkqhkiG9w0BAQQF
+AAOBgQDEttS70qYCA+MGBA3hOR88XiBcTmuBarJDwn/rj31vRjYZUgp9bbFwscRI
+Ic4lDnlyvunwNitl+341bDg7u6Ebu9hCMbciyu4EtrsDh77DlLzbmNcXbnhlvbFL
+K9GiPz3dNyvQMfmaA0twd62zJDOVJ1SmO04lLmu/pAx8GhBZkqEAMQA=
+"""
+
+ # canonically ordered SET components
+ pem_text_reordered = """\
+MIIKcwYJKoZIhvcNAQcCoIIKZDCCCmACAQExADALBgkqhkiG9w0BBwGgggpIMIIC
+XjCCAcegAwIBAgIBADANBgkqhkiG9w0BAQQFADB1MQswCQYDVQQGEwJSVTEPMA0G
+A1UEBxMGTW9zY293MRcwFQYDVQQKEw5Tb3ZhbSBUZWxlcG9ydDEMMAoGA1UECxMD
+TklTMQ8wDQYDVQQDEwZBQlMgQ0ExHTAbBgkqhkiG9w0BCQEWDmNlcnRAb25saW5l
+LnJ1MB4XDTk5MDgxNTE5MDI1OFoXDTAwMDExMjE5MDI1OFowdTELMAkGA1UEBhMC
+UlUxDzANBgNVBAcTBk1vc2NvdzEXMBUGA1UEChMOU292YW0gVGVsZXBvcnQxDDAK
+BgNVBAsTA05JUzEPMA0GA1UEAxMGQUJTIENBMR0wGwYJKoZIhvcNAQkBFg5jZXJ0
+QG9ubGluZS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAw0g1P0yQAZIi
+ml2XOCOxnCcuhHmAgj4Ei9M2ebrrGwUMONPzr1a8W7JcpnR3FeOjxEIxrzkHr6UA
+oj4l/oC7Rv28uIig+Okf+82ekhH6VgAQNr5LAzfN8J6dZLx2OXAmmLleAqHuisT7
+I40vEFRoRmC5hiMlILE2rIlIKJn6cUkCAwEAATANBgkqhkiG9w0BAQQFAAOBgQBZ
+7ELDfGUNb+fbpHl5W3d9JMXsdOgd96+HG+X1SPgeiRAMjkla8WFCSaQPIR4vCy0m
+tm5a2bWSji6+vP5FGbjOz5iMlHMrCtu0He7Eim2zpaGI06ZIY75Cn1h2r3+KS0/R
+h01TJUbmsfV1tZm6Wk3bayJ+/K8A4mBHv8P6rhYacDCCAowwggH1oAMCAQICAQAw
+DQYJKoZIhvcNAQEEBQAwgYsxCzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cx
+FzAVBgNVBAoTDkdvbGRlbiBUZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMT
+FUdvbGRlbiBUZWxlY29tIEFCUyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xk
+ZW50ZWxlY29tLnJ1MB4XDTAwMDEwNTE1MDY1MVoXDTEwMDExNTE1MDY1MVowgYsx
+CzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cxFzAVBgNVBAoTDkdvbGRlbiBU
+ZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMTFUdvbGRlbiBUZWxlY29tIEFC
+UyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xkZW50ZWxlY29tLnJ1MIGfMA0G
+CSqGSIb3DQEBAQUAA4GNADCBiQKBgQDPFel/Svli6ogoUEb6eLtEvNSjyalETSMP
+MIZXdmWIkWijvEUhDnNJVAE3knAt6dVYqxWq0vc6CbAGFZNqEyioGU48IECLzV0G
+toiYejF/c9PuyIKDejeV9/YZnNFaZAUOXhOjREdZURLISKhX4tAbQyvK0Qka9AAR
+MEy9DoqV8QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAHQzgqFkoSMQr077UCr5C0l1
+rxLA17TrocCmUC1/PLmN0LmUHD0d7TjjTQKJaJBHxcKIg6+FOY6LSSY4nAN79eXi
+nBz+jEUG7+NTU/jcEArI35yP7fi4Mwb96EYDmUkUGtcLNq3JBe/d1Zhmy9HnNBL1
+Dn9thM2Q8RPYAJIU3JnGMIICpTCCAg6gAwIBAgICBugwDQYJKoZIhvcNAQEEBQAw
+gZwxCzAJBgNVBAYTAlJVMQ8wDQYDVQQIEwZNb3Njb3cxDzANBgNVBAcTBk1vc2Nv
+dzEXMBUGA1UEChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA1JPTDEeMBwGA1UE
+AxMVR29sZGVuIFRlbGVjb20gQUJTIENBMSQwIgYJKoZIhvcNAQkBFhVjZXJ0QGdv
+bGRlbnRlbGVjb20ucnUwHhcNMTEwMTI4MTIxNzA5WhcNMTIwMjAxMDAwMDAwWjB2
+MQswCQYDVQQGEwJSVTEMMAoGA1UECBMDTi9BMRcwFQYDVQQKEw5Hb2xkZW4gVGVs
+ZWNvbTEMMAoGA1UECxMDSVNQMRYwFAYDVQQDEw1jcmF5LmdsYXMubmV0MRowGAYJ
+KoZIhvcNAQkBFgtlbGllQHJvbC5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
+gYEA8kCbwobdkKglK+gaYsw+UZowilp278QZIRi74z/JsukKzxebAHfzvolUooJu
+0Yjp6rE6q+N51hdv6cpzKHPhm2abxGAAPtnVZQi0ilq16NPv0Rg5RasM3vXdItmw
+iYzI/gHgTJKXgtl8cVFTaI7AsHpajMYJi5QsuphL0wM+udMCAwEAAaMbMBkwFwYJ
+YIZIAYb4QgENBAoWCEM9LTtTPS07MA0GCSqGSIb3DQEBBAUAA4GBAMS21LvSpgID
+4wYEDeE5HzxeIFxOa4FqskPCf+uPfW9GNhlSCn1tsXCxxEghziUOeXK+6fA2K2X7
+fjVsODu7oRu72EIxtyLK7gS2uwOHvsOUvNuY1xdueGW9sUsr0aI/Pd03K9Ax+ZoD
+S3B3rbMkM5UnVKY7TiUua7+kDHwaEFmSMIICqTCCAhICAQAwDQYJKoZIhvcNAQEE
+BQAwgZwxCzAJBgNVBAYTAlJVMQ8wDQYDVQQIEwZNb3Njb3cxDzANBgNVBAcTBk1v
+c2NvdzEXMBUGA1UEChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA1JPTDEeMBwG
+A1UEAxMVR29sZGVuIFRlbGVjb20gQUJTIENBMSQwIgYJKoZIhvcNAQkBFhVjZXJ0
+QGdvbGRlbnRlbGVjb20ucnUwHhcNMTAwMTE1MTU0MDI2WhcNMjAwMjIyMTU0MDI2
+WjCBnDELMAkGA1UEBhMCUlUxDzANBgNVBAgTBk1vc2NvdzEPMA0GA1UEBxMGTW9z
+Y293MRcwFQYDVQQKEw5Hb2xkZW4gVGVsZWNvbTEMMAoGA1UECxMDUk9MMR4wHAYD
+VQQDExVHb2xkZW4gVGVsZWNvbSBBQlMgQ0ExJDAiBgkqhkiG9w0BCQEWFWNlcnRA
+Z29sZGVudGVsZWNvbS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAzxXp
+f0r5YuqIKFBG+ni7RLzUo8mpRE0jDzCGV3ZliJFoo7xFIQ5zSVQBN5JwLenVWKsV
+qtL3OgmwBhWTahMoqBlOPCBAi81dBraImHoxf3PT7siCg3o3lff2GZzRWmQFDl4T
+o0RHWVESyEioV+LQG0MrytEJGvQAETBMvQ6KlfECAwEAATANBgkqhkiG9w0BAQQF
+AAOBgQCMrS4TLIzxcpu8nwOq/xMcxW4Ctz/wjIoePWkmSLe+Tkb4zo7aTsvzn+ET
+aWb7qztUpyl0QvlXn4vC2iCJloPpofPqSzF1UV3g5Zb93ReZu7E6kEyW0ag8R5XZ
+Kv0xuR3b3Le+ZqolT8wQELd5Mmw5JPofZ+O2cGNvet8tYwOKFjEA
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2315.ContentInfo()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text_unordered)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text_reordered)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ substrate, der_encoder(asn1Object, omitEmptyOptionals=False))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2437.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2437.py
new file mode 100644
index 0000000000..b411756bbf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2437.py
@@ -0,0 +1,46 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2437
+
+
+class RSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBPAIBAAJBAMfAjvBNDDYBCl1w3yNcagZkPhqd0q5KqeOTgKSLuJWfe5+VSeR5
+Y1PcF3DyH8dvS3t8PIQjxJLoKS7HVRlsfhECAwEAAQJBAIr93/gxhIenXbD7MykF
+yvi7k8MtgkWoymICZwcX+c6RudFyuPPfQJ/sf6RmFZlRA9X9CQm5NwVG7+x1Yi6t
+KoECIQDmJUCWkPCiQYow6YxetpXFa0K6hTzOPmax7MNHVWNgmQIhAN4xOZ4JFT34
+xVhK+8EudBCYRomJUHmOJfoQAxiIXVw5AiEAyB7ecc5on/5zhqKef4Eu7LKfHIdc
+304diFuDVpTmTAkCIC2ZmKOQZaWkSowGR4isCfHl7oQHhFaOD8k0RA5i3hYxAiEA
+n8lDw3JT6NjvMnD6aM8KBsLyhazWSVVkaUSqmJzgCF0=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2437.RSAPrivateKey()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2459.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2459.py
new file mode 100644
index 0000000000..4132daa426
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2459.py
@@ -0,0 +1,142 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2459
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2459.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+
+class CertificateListTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2459.CertificateList()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+
+class DSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBugIBAAKBgQCN91+Cma8UPw09gjwP9WOJCdpv3mv3/qFqzgiODGZx0Q002iTl
+1dq36m5TsWYFEcMCEyC3tFuoQ0mGq5zUUOmJvHCIPufs0g8Av0fhY77uFqneHHUi
+VQMCPCHX9vTCWskmDE21LJppU27bR4H2q+ysE30d6u3+84qrItsn4bjpcQIVAPR5
+QrmooOXDn7fHJzshmxImGC4VAoGAXxKyEnlvzq93d4V6KLWX3H5Jk2JP771Ss1bT
+6D/mSbLlvjjo7qsj6diul1axu6Wny31oPertzA2FeGEzkqvjSNmSxyYYMDB3kEcx
+ahntt37I1FgSlgdZHuhdtl1h1DBKXqCCneOZuNj+kW5ib14u5HDfFIbec2HJbvVs
+lJ/k83kCgYB4TD8vgHetXHxqsiZDoy5wOnQ3mmFAfl8ZdQsIfov6kEgArwPYUOVB
+JsX84f+MFjIOKXUV8dHZ8VRrGCLAbXcxKqLNWKlKHUnEsvt63pkaTy/RKHyQS+pn
+wontdTt9EtbF+CqIWnm2wpn3O+SbdtawzPOL1CcGB0jYABwbeQ81RwIUFKdyRYaa
+INow2I3/ks+0MxDabTY=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2459.DSAPrivateKey()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2511.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2511.py
new file mode 100644
index 0000000000..057b7fe861
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2511.py
@@ -0,0 +1,48 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2511
+
+
+class CertificateReqTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBozCCAZ8wggEFAgUAwTnj2jCByoABAqURMA8xDTALBgNVBAMTBHVzZXKmgZ8w
+DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ6ZQ2cYbn/lFsmBOlRltbRbFQUvvE0Q
+nbopOu1kC7Bmaaz7QTx8nxeiHi4m7uxCbGGxHNoGCt7EmdG8eZUBNAcHyGlXrJdm
+0z3/uNEGiBHq+xB8FnFJCA5EIJ3RWFnlbu9otSITLxWK7c5+/NHmWM+yaeHD/f/h
+rp01c/8qXZfZAgMBAAGpEDAOBgNVHQ8BAf8EBAMCBeAwLzASBgkrBgEFBQcFAQEM
+BTExMTExMBkGCSsGAQUFBwUBAgwMc2VydmVyX21hZ2ljoYGTMA0GCSqGSIb3DQEB
+BQUAA4GBAEI3KNEvTq/n1kNVhNhPkovk1AZxyJrN1u1+7Gkc4PLjWwjLOjcEVWt4
+AajUk/gkIJ6bbeO+fZlMjHfPSDKcD6AV2hN+n72QZwfzcw3icNvBG1el9EU4XfIm
+xfu5YVWi81/fw8QQ6X6YGHFQkomLd7jxakVyjxSng9BhO6GpjJNF
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2511.CertReqMessages()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2560.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2560.py
new file mode 100644
index 0000000000..eef5451f00
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2560.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2560
+
+
+class OCSPRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MGowaDBBMD8wPTAJBgUrDgMCGgUABBS3ZrMV9C5Dko03aH13cEZeppg3wgQUkqR1LKSevoFE63n8
+isWVpesQdXMCBDXe9M+iIzAhMB8GCSsGAQUFBzABAgQSBBBjdJOiIW9EKJGELNNf/rdA
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2560.OCSPRequest()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+
+class OCSPResponseTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEvQoBAKCCBLYwggSyBgkrBgEFBQcwAQEEggSjMIIEnzCCAQ+hgYAwfjELMAkGA1UEBhMCQVUx
+EzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEV
+MBMGA1UEAxMMc25tcGxhYnMuY29tMSAwHgYJKoZIhvcNAQkBFhFpbmZvQHNubXBsYWJzLmNvbRgP
+MjAxMjA0MTExNDA5MjJaMFQwUjA9MAkGBSsOAwIaBQAEFLdmsxX0LkOSjTdofXdwRl6mmDfCBBSS
+pHUspJ6+gUTrefyKxZWl6xB1cwIENd70z4IAGA8yMDEyMDQxMTE0MDkyMlqhIzAhMB8GCSsGAQUF
+BzABAgQSBBBjdJOiIW9EKJGELNNf/rdAMA0GCSqGSIb3DQEBBQUAA4GBADk7oRiCy4ew1u0N52QL
+RFpW+tdb0NfkV2Xyu+HChKiTThZPr9ZXalIgkJ1w3BAnzhbB0JX/zq7Pf8yEz/OrQ4GGH7HyD3Vg
+PkMu+J6I3A2An+bUQo99AmCbZ5/tSHtDYQMQt3iNbv1fk0yvDmh7UdKuXUNSyJdHeg27dMNy4k8A
+oIIC9TCCAvEwggLtMIICVqADAgECAgEBMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAkFVMRMw
+EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxFTAT
+BgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wHhcN
+MTIwNDExMTMyNTM1WhcNMTMwNDExMTMyNTM1WjB+MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29t
+ZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRUwEwYDVQQDEwxzbm1w
+bGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25tcGxhYnMuY29tMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDDDU5HOnNV8I2CojxB8ilIWRHYQuaAjnjrETMOprouDHFXnwWqQo/I3m0b
+XYmocrh9kDefb+cgc7+eJKvAvBqrqXRnU38DmQU/zhypCftGGfP8xjuBZ1n23lR3hplN1yYA0J2X
+SgBaAg6e8OsKf1vcX8Es09rDo8mQpt4G2zR56wIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG
++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU8Ys2dpJFLMHl
+yY57D4BNmlqnEcYwHwYDVR0jBBgwFoAU8Ys2dpJFLMHlyY57D4BNmlqnEcYwDQYJKoZIhvcNAQEF
+BQADgYEAWR0uFJVlQId6hVpUbgXFTpywtNitNXFiYYkRRv77McSJqLCa/c1wnuLmqcFcuRUK0oN6
+8ZJDP2HDDKe8MCZ8+sx+CF54eM8VCgN9uQ9XyE7x9XrXDd3Uw9RJVaWSIezkNKNeBE0lDM2jUjC4
+HAESdf7nebz1wtqAOXE1jWF/y8g=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2560.OCSPResponse()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2631.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2631.py
new file mode 100644
index 0000000000..ca9e547694
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2631.py
@@ -0,0 +1,41 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2631
+
+
+class OtherInfoTestCase(unittest.TestCase):
+ pem_text = "MB0wEwYLKoZIhvcNAQkQAwYEBAAAAAGiBgQEAAAAwA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc2631.OtherInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ hex1 = univ.OctetString(hexValue='00000001')
+ self.assertEqual(hex1, asn1Object['keyInfo']['counter'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2634.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2634.py
new file mode 100644
index 0000000000..225b987ed2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2634.py
@@ -0,0 +1,191 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc2634
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIFLgYJKoZIhvcNAQcCoIIFHzCCBRsCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggIy
+MIICLgIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKgggFXMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8X
+DTE5MDUyOTE4MjMxOVowJQYLKoZIhvcNAQkQAgcxFgQUAbWZQYhLO5wtUgsOCGtT
+4V3aNhUwLwYLKoZIhvcNAQkQAgQxIDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZI
+hvcNAQcBMDUGCyqGSIb3DQEJEAICMSYxJAIBAQYKKwYBBAGBrGABARMTQm9hZ3Vz
+IFByaXZhY3kgTWFyazA/BgkqhkiG9w0BCQQxMgQwtuQipP2CZx7U96rGbUT06LC5
+jVFYccZW5/CaNvpcrOPiChDm2vI3m4k300z5mSZsME0GCyqGSIb3DQEJEAIBMT4w
+PAQgx08hD2QnVwj1DoeRELNtdZ0PffW4BQIvcwwVc/goU6OAAQEwFTATgRFhbGlj
+ZUBleGFtcGxlLmNvbTAKBggqhkjOPQQDAwRnMGUCMAFFVP2gYFLTbaxvV5J2ICNM
+Nk/K4pXbj5Zvj3dcCeC4+OUYyG3ZW5lOtKqaabEAXAIxALDg1WOouhkDfwuQdgBi
+mNTr0mjYeUWRe/15IsWNx+kuFcLDr71DFHvMFY5M3sdfMA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+
+class SignedReceiptTestCase(unittest.TestCase):
+ signed_receipt_pem_text = """\
+MIIE3gYJKoZIhvcNAQcCoIIEzzCCBMsCAQMxDTALBglghkgBZQMEAgEwga4GCyq
+GSIb3DQEJEAEBoIGeBIGbMIGYAgEBBgkqhkiG9w0BBwEEIMdPIQ9kJ1cI9Q6HkR
+CzbXWdD331uAUCL3MMFXP4KFOjBGYwZAIwOLV5WCbYjy5HLHE69IqXQQHVDJQzm
+o18WwkFrEYH3EMsvpXEIGqsFTFN6NV4VBe9AjA5fGOCP5IhI32YqmGfs+zDlqZy
+b2xSX6Gr/IfCIm0angfOI39g7lAZDyivjh5H/oSgggJ3MIICczCCAfqgAwIBAgI
+JAKWzVCgbsG48MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDA
+JWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwN
+TI5MTkyMDEzWhcNMjAwNTI4MTkyMDEzWjBsMQswCQYDVQQGEwJVUzELMAkGA1UE
+CBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1wbGUxDDAKBgN
+VBAMTA0JvYjEeMBwGCSqGSIb3DQEJARYPYm9iQGV4YW1wbGUuY29tMHYwEAYHKo
+ZIzj0CAQYFK4EEACIDYgAEMaRiVS8WvN8Ycmpfq75jBbOMUukNfXAg6AL0JJBXt
+IFAuIJcZVlkLn/xbywkcMLHK/O+w9RWUQa2Cjw+h8b/1Cl+gIpqLtE558bD5PfM
+2aYpJ/YE6yZ9nBfTQs7z1TH5o4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvh
+CAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW
+55IHB1cnBvc2UuMB0GA1UdDgQWBBTKa2Zy3iybV3+YjuLDKtNmjsIapTAfBgNVH
+SMEGDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNnADBkAjAV
+boS6OfEYQomLDi2RUkd71hzwwiQZztbxNbosahIzjR8ZQaHhjdjJlrP/T6aXBws
+CMDfRweYz3Ce4E4wPfoqQnvqpM7ZlfhstjQQGOsWAtIIfqW/l+TgCO8ux3XLV6f
+j36zGCAYkwggGFAgEBMEwwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwD
+gYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQQIJAKWzVCgbsG48MAsG
+CWCGSAFlAwQCAaCBrjAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAQEwHAYJKoZ
+IhvcNAQkFMQ8XDTE5MDUyOTE5MzU1NVowLwYJKoZIhvcNAQkEMSIEIGb9Hm2kCn
+M0CYNpZU4Uj7dN0AzOieIn9sDqZMcIcZrEMEEGCyqGSIb3DQEJEAIFMTIEMBZze
+HVja7fQ62ywyh8rtKzBP1WJooMdZ+8c6pRqfIESYIU5bQnH99OPA51QCwdOdjAK
+BggqhkjOPQQDAgRoMGYCMQDZiT22xgab6RFMAPvN4fhWwzx017EzttD4VaYrpbo
+lropBdPJ6jIXiZQgCwxbGTCwCMQClaQ9K+L5LTeuW50ZKSIbmBZQ5dxjtnK3OlS
+7hYRi6U0JKZmWbbuS8vFIgX7eIkd8=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+ self.assertEqual(sd['encapContentInfo']['eContentType'],
+ rfc2634.id_ct_receipt)
+
+ receipt, rest = der_decoder(sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc2634.Receipt())
+
+ self.assertFalse(rest)
+ self.assertTrue(receipt.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(receipt))
+ self.assertEqual(receipt['version'], rfc2634.ESSVersion().subtype(value='v1'))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap.keys())
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd = asn1Object['content']
+
+ self.assertEqual(sd['version'], rfc5652.CMSVersion().subtype(value='v3'))
+ self.assertIn(sd['encapContentInfo']['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(sd['encapContentInfo']['eContentType'], rfc2634.id_ct_receipt)
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+
+ if sa['attrType'] == rfc2634.id_aa_msgSigDigest:
+ sa['attrValues'][0].prettyPrint()[:10] == '0x167378'
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ receipt, rest = der_decoder(sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd['encapContentInfo']['eContentType']])
+
+ self.assertEqual(receipt['version'], rfc2634.ESSVersion().subtype(value='v1'))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2876.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2876.py
new file mode 100644
index 0000000000..177e038b84
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2876.py
@@ -0,0 +1,185 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2876
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIIogYJKoZIhvcNAQcDoIIIkzCCCI8CAQKgggKRoIICjTCCAokwggIwoAMCAQIC
+FGPMbd5dAfZyD1kqY7NIQyVCWZgqMAkGByqGSM44BAMwPzELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMREwDwYDVQQKEwhCb2d1cyBD
+QTAeFw0xOTExMjAwODQzNDJaFw0yMDExMTkwODQzNDJaMGwxCzAJBgNVBAYTAlVT
+MQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBs
+ZTEMMAoGA1UEAxMDQm9iMR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20w
+gaEwFwYJYIZIAWUCAQEWBAp8tRylalhmjdM2A4GFAAKBgQD02ElSAgt9CWmKZ28J
+DMbpm/+aQ5PFPCTJRb1s2NuCHdakdYnkXXdtUgkIjgGYkVfGU6vhpGsdSRAFembb
+rjVdN/VkznUAxYFoyU/qmP5Az4R4dnNh08vdF49/XQA0JSasuN9WpmWtm2yPK3ZZ
+FXu2TRXIfD4ZlCDV1AcD+wnnVqOBlDCBkTALBgNVHQ8EBAMCAwgwQgYJYIZIAYb4
+QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFu
+eSBwdXJwb3NlLjAdBgNVHQ4EFgQUwtn/xRsTMH+uoIGDveicDyWKGlcwHwYDVR0j
+BBgwFoAUzUhlAYOypgdbBv4jgQzEc+TRtTgwCQYHKoZIzjgEAwNIADBFAiEAjK0V
+hpRdsxyQru4WTifhKnIioSUQlIkxymvsLD8VuSgCIGJ9vnSsDIthyWa5yove5wC7
+x3hFIBJXb31cTkdfMFYsMYHooYHlAgEEoBaAFMLZ/8UbEzB/rqCBg73onA8lihpX
+oYGDBIGAAVX+m3ogSJMhSVHNj/+juXxsWZ/UYNUmKXxH6YqRkHiRUl5Nd3cw6a1D
+vtNXb77ST3D6F/U/NS9VFfn2MBDhue2R7Mgfqgp8TnDOXgwxM/Po4qMH46UalPK3
+MeZ/e1xSI/yaIGJHlHFRZt0UI9ZTDsCTwMsK3XwAyEBmIeXRO0owGAYJYIZIAWUC
+AQEYMAsGCWCGSAFlAgEBFzAoMCagFgQUwtn/xRsTMH+uoIGDveicDyWKGlcEDGPg
+euAHFRJ4Hv6fXTCCBQgGCSqGSIb3DQEHATAXBglghkgBZQIBAQQwCgQIQk9HVVNf
+SVaAggTgc8exehjJD/gtEOIrg6tK5Emaa4PJ7l8f+EtyDD/ffQayXVAGz2MXUIQM
+EzmSLrnsr9NEyXvxGpvcsi7mV8tDxZU0YuyhA/C/HMh7EaBKG1hjC7xNw+IRIUxr
+bRJakMQbzMWWYJupC5zRu4/Ge9i+JVOGgES2E0L5LZSZ53wmnHA0ols1PHl3F3Z2
+QM3CkewqA3NP1waXQ0XXb0Oyl6Gq12B7ksm7euPWA3KctEjfYBD6nBT6wQd57rAM
+eFTk5aceWd2Sb/0xMpjfCg6GzX8pAWVEU8LqTvVmlSWdx3f3fAtUgiZ+gx7jNY8A
+6duln8zvMQn3mtPDCa50GzSrAx8JreHRWSDr3Dp8EfJzUgfy7dWlI9xs5bh1TMkE
+Mk+AHWQ5sBXTZkDgVAS5m1mIbXe7dzuxKsfGxjWu1eyy9J77mtOGo9aAOqYfxv/I
+8YQcgWHTeQcIO39Rmt2QsI7trRaEJ1jgj2E1To5gRCbIQWzQuyoS6affgu/9dwPX
+CAt0+0XrnO5vhaKX/RWm7ve8hYsiT0vI0hdBJ3rDRkdS9VL6NlnXOuohAqEq8b3s
+2koBigdri052hceAElTHD+4A4qRDiMLlFLlQqoJlpBwCtEPZsIQSy62K7J/Towxx
+ab5FoFjUTC5f79xPQPoKxYdgUB5AeAu5HgdWTn49Uqg4v/spTPSNRTmDMVVyZ9qh
+zJfkDpH3TKCAE5t59w4gSPe/7l+MeSml9O+L9HTd9Vng3LBbIds3uQ4cfLyyQmly
+81qpJjR1+Rvwo46hOm0kf2sIFi0WULmP/XzLw6b1SbiHf/jqFg7TFTyLMkPMPMmc
+7/kpLmYbKyTB4ineasTUL+bDrwu+uSzFAjTcI+1sz4Wo4p7RVywBDKSI5Ocbd3iM
+t4XWJWtz0KBX6nBzlV+BBTCwaGMAU4IpPBYOuvcl7TJWx/ODBjbO4zm4T/66w5IG
+3tKpsVMs4Jtrh8mtVXCLTBmKDzyjBVN2X8ALGXarItRgLa7k80lJjqTHwKCjiAMm
+T/eh67KzwmqBq5+8rJuXkax0NoXcDu6xkCMNHUQBYdnskaJqC2pu8hIsPTOrh7ie
+YSEuchFvu7lI0E+p7ypW65CMiy+Y/Rm5OWeHzjKkU5AbPtx/Me2vpQRCgaPwciZu
+nx2Ivi1+WYUBU1pGNDO7Xz7a8UHbDURkh7b+40uz2d7YQjKgrZBv6YwLAmw1LTE4
+bT9PM9n7LROnX8u6ksei8yiw8gZeVu+plWHbF+0O9siKAgxZlBna0XFgPpdzjMDT
+S/sfTIYXWlFj7camhsmTDRjo5G2B212evaKmKgh5ALLSFSk86ZN5KvQvcfsp81jv
+JCBmDStrsUgSMzy0Og2quHOd61hRTVlYzwvJvfMzHGKdIWwYUbHZOKo/KLEk3E36
+U9PkPoZGEL2ZeCH4F9Wh3mgg0knBfEmlPnGexmBby6NXGK7VW3l6xcJlpdMaXKNV
+Mfl2YK8k/34Hyft06KaYLEJsxAqk1pmLEmGhdZC1OAqovVB/1agSzpMMaB9OWWqN
+sTjDc7tkDt8BZ72NsAbCI9XmsX81W+NqPb6Ju1dtI09bn113LX/ZbOSdVicQcXSp
+l0FnTZaHgHJdQLcU28O7yFFOblqrvcMKpctdTA1TwG9LXEFttGrlpgjZF3edo0Ce
+z10epK+S
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kari_kea = ed['recipientInfos'][0]['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_kEAKeyEncryptionAlgorithm, kari_kea['algorithm'])
+ kwa, rest = der_decoder(
+ kari_kea['parameters'], asn1Spec=rfc5280.AlgorithmIdentifier())
+ self.assertFalse(rest)
+ self.assertTrue(kwa.prettyPrint())
+ self.assertEqual(kari_kea['parameters'], der_encoder(kwa))
+ self.assertEqual(rfc2876.id_fortezzaWrap80, kwa['algorithm'])
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_fortezzaConfidentialityAlgorithm, cea['algorithm'])
+ param, rest = der_decoder(cea['parameters'], rfc2876.Skipjack_Parm())
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, param['initialization-vector'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap.keys())
+ kari_kea = asn1Object['content']['recipientInfos'][0]['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_kEAKeyEncryptionAlgorithm, kari_kea['algorithm'])
+ self.assertEqual(rfc2876.id_fortezzaWrap80, kari_kea['parameters']['algorithm'])
+
+ cea = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_fortezzaConfidentialityAlgorithm, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, cea['parameters']['initialization-vector'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "\
+MCcwGAYJYIZIAWUCAQEYMAsGCWCGSAFlAgEBFzALBglghkgBZQIBAQQ="
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg = False
+ for cap in asn1Object:
+ if cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys():
+ if cap['parameters'].hasValue():
+ param, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(param))
+
+ if cap['capabilityID'] == rfc2876.id_kEAKeyEncryptionAlgorithm:
+ self.assertEqual(rfc2876.id_fortezzaWrap80, param['algorithm'])
+ found_wrap_alg = True
+
+ self.assertTrue(found_wrap_alg)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg = False
+ for cap in asn1Object:
+ if cap['capabilityID'] == rfc2876.id_kEAKeyEncryptionAlgorithm:
+ self.assertEqual(rfc2876.id_fortezzaWrap80, cap['parameters']['algorithm'])
+ found_wrap_alg = True
+
+ self.assertTrue(found_wrap_alg)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2985.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2985.py
new file mode 100644
index 0000000000..376475e60f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2985.py
@@ -0,0 +1,319 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7292
+
+
+class PKCS9AttrsTestCase(unittest.TestCase):
+ pem_text = """\
+MYIQjzAOBgNVBEExBwwFQWxpY2UwDwYIKwYBBQUHCQMxAxMBTTAQBgNVBAUxCRMH
+QjQ4LTAwNzAQBggrBgEFBQcJBDEEEwJVUzAQBggrBgEFBQcJBTEEEwJVUzARBgoq
+hkiG9w0BCRkEMQMCATAwFAYJKoZIhvcNAQkCMQcWBUFsaWNlMBgGCiqGSIb3DQEJ
+GQMxCgQIUTeqnHYky4AwHAYJKoZIhvcNAQkPMQ8wDTALBglghkgBZQMEAS0wHQYI
+KwYBBQUHCQExERgPMjAxOTA4MDMxMjAwMDBaMB0GCCsGAQUFBwkCMREMD0hlcm5k
+b24sIFZBLCBVUzApBgkqhkiG9w0BCRQxHB4aAEYAcgBpAGUAbgBkAGwAeQAgAE4A
+YQBtAGUwLwYJKoZIhvcNAQkIMSITIDEyMyBVbmtub3duIFdheSwgTm93aGVyZSwg
+VkEsIFVTMIGZBgoqhkiG9w0BCRkCMYGKMIGHMAsGCWCGSAFlAwQBLQR4VsJb7t4l
+IqjJCT54rqkbCJsBPE17YQJeEYvyA4M1aDIUU5GnCgEhctgMiDPWGMvaSziixdIg
+aU/0zvWvYCm8UwPvBBwMtm9X5NDvk9p4nXbGAT8E/OsV1SYWVvwRJwYak0yWWexM
+HSixw1Ljh2nb0fIbqwLOeMmIMIIEsQYKKoZIhvcNAQkZBTGCBKEwggSdBgkqhkiG
+9w0BBwKgggSOMIIEigIBATENMAsGCWCGSAFlAwQCAjBRBgkqhkiG9w0BBwGgRARC
+Q29udGVudC1UeXBlOiB0ZXh0L3BsYWluDQoNCldhdHNvbiwgY29tZSBoZXJlIC0g
+SSB3YW50IHRvIHNlZSB5b3UuoIICfDCCAngwggH+oAMCAQICCQCls1QoG7BuOzAK
+BggqhkjOPQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcM
+B0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4XDTE5MDUyOTE0NDU0MVoXDTIw
+MDUyODE0NDU0MVowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQH
+EwdIZXJuZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGljZTEgMB4G
+CSqGSIb3DQEJARYRYWxpY2VAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6zXCYlmsEGD5vPu5hl9hDEjd1U
+HRgJIPoy3fJcWWeZ8FHCirICtuMgFisNscG/aTwKyDYOFDuqz/C2jyEwqgWCRyxy
+ohuJXtmjgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNl
+cnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYD
+VR0OBBYEFMS6Wg4+euM8gbD0Aqpouxbglg41MB8GA1UdIwQYMBaAFPI12zQE2qVV
+8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2gAMGUCMGO5H9E1uAveRGGaf48lN4po
+v2yH+hCAc5hOAuZKe/f40MKSF8q4w2ij+0euSaKFiAIxAL3gxp6sMitCmLQgOH6/
+RBIC/2syJ97y0KVp9da0PDAvwxLugCHTKZPjjpSLPHHc9TGCAaEwggGdAgEBMEww
+PzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREw
+DwYDVQQKDAhCb2d1cyBDQQIJAKWzVCgbsG47MAsGCWCGSAFlAwQCAqCByDAYBgkq
+hkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xOTA1MjkxODIz
+MTlaMD8GCSqGSIb3DQEJBDEyBDC25CKk/YJnHtT3qsZtRPTosLmNUVhxxlbn8Jo2
++lys4+IKEOba8jebiTfTTPmZJmwwTQYLKoZIhvcNAQkQAgExPjA8BCDHTyEPZCdX
+CPUOh5EQs211nQ999bgFAi9zDBVz+ChTo4ABATAVMBOBEWFsaWNlQGV4YW1wbGUu
+Y29tMAoGCCqGSM49BAMDBGYwZAIwOLV5WCbYjy5HLHE69IqXQQHVDJQzmo18WwkF
+rEYH3EMsvpXEIGqsFTFN6NV4VBe9AjA5fGOCP5IhI32YqmGfs+zDlqZyb2xSX6Gr
+/IfCIm0angfOI39g7lAZDyivjh5H/oQwggnoBgtghkgBhvhCAwGBWDGCCdcwggnT
+AgEDMIIJjwYJKoZIhvcNAQcBoIIJgASCCXwwggl4MIIGCAYJKoZIhvcNAQcBoIIF
++QSCBfUwggXxMIIF7QYLKoZIhvcNAQwKAQKgggT+MIIE+jAcBgoqhkiG9w0BDAED
+MA4ECO6rT/7SnK61AgIH0ASCBNhl7+ZgGmaQO8qy97gTAhXCjVM2/iV3LHWodlbY
+iHqpAJj42/Uye/3B7TNROXine1DMI9ZeetIDzYiA52i0sh7PhjBeuCIqFwiRJIv7
+bIKYCgz6qSOIAgqr6XdQnpeFp97YqDgST/RGQel7obCNO115+SlelmBxwwSik60p
+AwslawMzunvvH9qafrIiTa2myQqpRj/ifxjESJNZxG1O2FiplAi36r3icotim3Sj
+zzRJU5+90SqnkogjtxODrQYkv6fqg3qGY/RuwAy+eT3V/z+UUoyL22w1T8qdSFsN
+WmMnAFCSGBuoHHoZ22ipItKVg09UzTCWe3CbUmEfjJuJDmw3Oo7sWVYLltxjCS86
+XHWAauyFjmMr9aNsDiloGnFKSChslF6Ktj0F6ohOe+iReW5vi16EeEzbQiTjakpr
+eQZoeajC/N+XGoT6jKxbk5r1dtnEEJ+Q4wnvSjiGpr6frr4T+4pw301sptOjfO3f
+F23rKk7Advvi3k5xZobHcRmzDSfT9X5agtKlc4HCnHTz7XKHstXb1o1DSgTNVWQX
+phhFBm10gx6zfEHaLqyMtqXbWe2TuIHMwnBWiLnbhIBn+hbxK4MCfVz3cBZbApks
+Au/lXcVnakOJBcCtx/MMfZ3kcnI3Hs6W8rM2ASeDBLIQLVduOc6xlVSoYUQ24NNr
+9usfigQkcSTJZPIO52vPyIIQ7zR7U8TiqonkKWU3QJJVarPgLEYMUhBfNHqiGfx/
+d1Hf4MBoti8CMFUwsmOTv6d+cHYvQelqeFMXP0DE88gN/mkFBDAzXiXzAqMQcjJ+
+pyW6l4o2iQFSvXKSKg/IKved/hGp7RngQohjg4KlbqeGuRYea8Xs4pH5ue5KTeOc
+HGNI3Qi/Lmr2rd+e1iuGxwwYZHve6Z+Lxnb20zW9I/2MFm+KsCiB4Z/+x84jR7BG
+8l//lpuc2D/vxnKTxaaUAdUXM0Zwze7e+Gc2lMhVG5TJWR1KY51vN5J+apDYc8IR
+0L0c2bbkom3WkPq/po/dPDuoaX61nKmztUHaL5r5QZzBBwKVyhdw9J0btnWAFPNK
+vzgy5U9iV4+6jXH5TCmlIreszwRPoqqEaYRIfmUpp2+zy91PpzjTs98tx/HIAbOM
+fT3WmuTahEnEHehABhwq+S4xwzoVIskLbrcOP6l7UYYR7GTUCjKxh7ru0rSwHrqG
+9t33YdzJaFbz+8jb88xtf454Rvur66Cew/4GYX9u1Zef0DF9So1ay3IicpOf5emo
+VWIwg4bh7bELi78i/MbdWtNZQcXimykfeTsYH8Q4u+1uxHS5pwEWWwKiUnLQVpZP
+2ut255TdgSIhEILwsaLVelRrx/lp14EpY355FOusXiju6g14aWfBnt5udvuTXxDQ
+ZHPPNNk+gwzgvvTey98T941hYUctjg0NApJiB66bfrlYB9mkc5ftg5zqhEasYH5C
+4ajKKRNMM7zGlwSZvy8PPhnAeE3Q9LTnos0l4ygjQD/kMlvd7XSLW3GUzjyxtkG4
+gQh6LGvnafAbgu7GpcapKEppN86sXEePHiQjj92n103+TxMYWwtaO4iAwkjqdEdt
+avEHcXRcpdqC0st6nUwPAPAC4LKJbZgLQnNG+wlWIiCMMD56IdfQ7r/zGIr13MxC
+kjNNUdISoWWE5GnQMYHbMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFcGCSqGSIb3DQEJ
+FDFKHkgAMwBmADcAMQBhAGYANgA1AC0AMQA2ADgANwAtADQANAA0AGEALQA5AGYA
+NAA2AC0AYwA4AGIAZQAxADkANABjADMAZQA4AGUwawYJKwYBBAGCNxEBMV4eXABN
+AGkAYwByAG8AcwBvAGYAdAAgAEUAbgBoAGEAbgBjAGUAZAAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIAIAB2ADEALgAwMIIDaAYJ
+KoZIhvcNAQcBoIIDWQSCA1UwggNRMIIDTQYLKoZIhvcNAQwKAQOgggMlMIIDIQYK
+KoZIhvcNAQkWAaCCAxEEggMNMIIDCTCCAfGgAwIBAgIQNu32hzqhCKdHATXzboyI
+ETANBgkqhkiG9w0BAQUFADAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwIBcNMTYwNzE5
+MjIwMDAxWhgPMjExNjA2MjUyMjAwMDFaMBQxEjAQBgNVBAMTCWFub255bW91czCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALy2sEJMGNdcDg6BI7mdFM5T
+lPzo5sKBzvUnagK5SKBJ11xMPN5toPTBzICB/XTWEB3AwpD0O+srSca+bsUAyedS
+5V4BNp8qCyEu5RNRR8qPHheJ/guhLT96/gGI4jlrUyUhFntPkLKODxu+7KanMy6K
+dD+PVE8shXRUZTYe4PG64/c7z3wapnf4XoCXkJRzCY5f3MKz3Ul039kVnTlJcikd
+C7I9I9RflXLwXVl4nxUbeeRt6Z8WVWS4pCq+14v2aVPvP3mtVmAYHedRkvS04Hrx
+4xx98D3NSSw6Z5OLkzqOcFw15fYmH2NLdhh34gSWJmaaCBAbuQ+1rx/42p7MvvsC
+AwEAAaNVMFMwFQYDVR0lBA4wDAYKKwYBBAGCNwoDBDAvBgNVHREEKDAmoCQGCisG
+AQQBgjcUAgOgFgwUYW5vbnltb3VzQHdpbmRvd3MteAAwCQYDVR0TBAIwADANBgkq
+hkiG9w0BAQUFAAOCAQEAuH7iqY0/MLozwFb39ILYAJDHE+HToZBQbHQP4YtienrU
+Stk60rIp0WH65lam7m/JhgAcItc/tV1L8mEnLrvvKcA+NeIL8sDOtM28azvgcOi0
+P3roeLLLRCuiykUaKmUcZEDm9cDYKIpJf7QetWQ3uuGTk9iRzpH79x2ix35BnyWQ
+Rr3INZzmX/+9YRvPBXKYl/89F/w1ORYArpI9XtjfuPWaGQmM4f1WRHE2t3qRyKFF
+ri7QiZdpcSx5zvsRHSyjfUMoKs+b6upk+P01lIhg/ewwYngGab+fZhF15pTNN2hx
+8PdNGcrGzrkNKCmJKrWCa2xczuMA+z8SCuC1tYTKmDEVMBMGCSqGSIb3DQEJFTEG
+BAQBAAAAMDswHzAHBgUrDgMCGgQUpWCP/fZR0TK5BwGuqvTd0+duiKcEFJTubF2k
+HktMK+isIjxOTk4yJTOOAgIH0A==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.AttributeSet()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+ openTypesMap = {
+ rfc2985.pkcs_9_at_smimeCapabilities: rfc2985.SMIMECapabilities(),
+ }
+ openTypesMap.update(rfc5280.certificateAttributesMap)
+ openTypesMap.update(rfc5652.cmsAttributesMap)
+
+ for attr in asn1Object:
+ self.assertIn(attr['type'], openTypesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0], asn1Spec=openTypesMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ if attr['type'] == rfc2985.pkcs_9_at_userPKCS12:
+
+ self.assertEqual(univ.Integer(3), av['version'])
+ self.assertEqual(rfc5652.id_data, av['authSafe']['contentType'])
+
+ outdata, rest = der_decoder(
+ av['authSafe']['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ authsafe, rest = der_decoder(
+ outdata, asn1Spec=rfc7292.AuthenticatedSafe())
+
+ self.assertFalse(rest)
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+
+ indata, rest = der_decoder(
+ ci['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ sc, rest = der_decoder(
+ indata, asn1Spec=rfc7292.SafeContents())
+
+ self.assertFalse(rest)
+
+ for sb in sc:
+ if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
+ bv, rest = der_decoder(
+ sb['bagValue'], asn1Spec=rfc7292.pkcs12BagTypeMap[sb['bagId']])
+
+ self.assertFalse(rest)
+
+ for bagattr in sb['bagAttributes']:
+ if bagattr['attrType'] in openTypesMap:
+ inav, rest = der_decoder(
+ bagattr['attrValues'][0], asn1Spec=openTypesMap[bagattr['attrType']])
+
+ self.assertFalse(rest)
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_friendlyName:
+ self.assertEqual( "3f71af65-1687-444a-9f46-c8be194c3e8e", inav)
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_localKeyId:
+ self.assertEqual(univ.OctetString(hexValue='01000000'), inav)
+
+ if attr['type'] == rfc2985.pkcs_9_at_pkcs7PDU:
+ ci, rest = der_decoder(
+ attr['values'][0], asn1Spec=rfc5652.ContentInfo())
+
+ self.assertFalse(rest)
+ self.assertEqual(rfc5652.id_signedData, ci['contentType'])
+
+ sd, rest = der_decoder(
+ ci['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertEqual(1, sd['version'])
+
+ for si in sd['signerInfos']:
+ self.assertEqual(1, si['version'])
+
+ for siattr in si['signedAttrs']:
+ if siattr['attrType'] in openTypesMap:
+ siav, rest = der_decoder(
+ siattr['attrValues'][0], asn1Spec=openTypesMap[siattr['attrType']])
+
+ self.assertFalse(rest)
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_contentType:
+ self.assertEqual(rfc5652.id_data, siav)
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_messageDigest:
+ self.assertEqual('b6e422a4', siav.prettyPrint()[2:10])
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_signingTime:
+ self.assertEqual('190529182319Z', siav['utcTime'])
+
+ for choices in sd['certificates']:
+ for rdn in choices[0]['tbsCertificate']['subject']['rdnSequence']:
+ if rdn[0]['type'] in openTypesMap:
+ nv, rest = der_decoder(
+ rdn[0]['value'], asn1Spec=openTypesMap[rdn[0]['type']])
+ self.assertFalse(rest)
+
+ if rdn[0]['type'] == rfc2985.pkcs_9_at_emailAddress:
+ self.assertEqual('alice@example.com', nv)
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ rfc2985.pkcs_9_at_smimeCapabilities: rfc2985.SMIMECapabilities(),
+ }
+ openTypesMap.update(rfc5280.certificateAttributesMap)
+ openTypesMap.update(rfc5652.cmsAttributesMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypesMap, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object:
+ self.assertTrue(attr['type'], openTypesMap)
+
+ if attr['type'] == rfc2985.pkcs_9_at_userPKCS12:
+
+ self.assertEqual(univ.Integer(3), attr['values'][0]['version'])
+ self.assertEqual(rfc5652.id_data, attr['values'][0]['authSafe']['contentType'])
+
+ authsafe, rest = der_decoder(
+ attr['values'][0]['authSafe']['content'],
+ asn1Spec=rfc7292.AuthenticatedSafe())
+
+ self.assertFalse(rest)
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+
+ indata, rest = der_decoder(
+ ci['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ sc, rest = der_decoder(
+ indata, asn1Spec=rfc7292.SafeContents(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+
+ for sb in sc:
+ if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
+ for bagattr in sb['bagAttributes']:
+ if bagattr['attrType'] in openTypesMap:
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_friendlyName:
+ self.assertEqual(
+ "3f71af65-1687-444a-9f46-c8be194c3e8e",
+ bagattr['attrValues'][0])
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_localKeyId:
+ self.assertEqual(
+ univ.OctetString(hexValue='01000000'),
+ bagattr['attrValues'][0])
+
+ if attr['type'] == rfc2985.pkcs_9_at_pkcs7PDU:
+ self.assertEqual(rfc5652.id_signedData, attr['values'][0]['contentType'])
+ self.assertEqual(1, attr['values'][0]['content']['version'])
+
+ for si in attr['values'][0]['content']['signerInfos']:
+ self.assertEqual(1, si['version'])
+
+ for siattr in si['signedAttrs']:
+ if siattr['attrType'] in openTypesMap:
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_contentType:
+ self.assertEqual(rfc5652.id_data, siattr['attrValues'][0])
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_messageDigest:
+ self.assertEqual('b6e422a4', siattr['attrValues'][0].prettyPrint()[2:10])
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_signingTime:
+ self.assertEqual('190529182319Z', siattr['attrValues'][0]['utcTime'])
+
+ for choices in attr['values'][0]['content']['certificates']:
+ for rdn in choices[0]['tbsCertificate']['subject']['rdnSequence']:
+ if rdn[0]['type'] in openTypesMap:
+ if rdn[0]['type'] == rfc2985.pkcs_9_at_emailAddress:
+ self.assertEqual('alice@example.com', rdn[0]['value'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc2986.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc2986.py
new file mode 100644
index 0000000000..91e3d05645
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc2986.py
@@ -0,0 +1,90 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2986
+from pyasn1_modules import rfc5280
+
+
+class CertificationRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MIICxjCCAa4CAQAwgYAxCzAJBgNVBAYTAlVTMR0wGwYDVQQDDBRmY3UuZmFrZS5h
+ZGRyZXNzLm9yZzEXMBUGA1UEBwwOUGxlYXNhbnQgR3JvdmUxHDAaBgNVBAoME0Zh
+a2UgQ29tcGFueSBVbml0ZWQxDTALBgNVBAgMBFV0YWgxDDAKBgNVBAsMA0VuZzCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALvnYesymhLKSm9Llix53BUA
+h99xMDBUYk0OB1VIdNQyjmFabHinM+lYUzVzrfcm1xtYB5QYKbsYuwZ4r5WI7qho
+CRJy6JwXqKpOe72ScCogxlGDr2QtKjtvyWrRwXBHX1/OqVSZ3hdz3njhKpmq6HgK
+87vH26RCSmK8FqCgn+qePfpspA7GzBvYwXhXluQtG7r4yBMKNRTQlPst8Vcy+iK+
+pI8hmQVrzGi8Hgbpr2L9EjPUOlAQEb8hxeKc7s5VhjN/RHMLVMX8YczZYt7mcDKr
+3PMwOVmXL1DMCtnS50MA2AxcPWcbQBeGyMroP+DLhAt6y1/IT0H5sQruNQw4euMC
+AwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQBQXYQPfH5Wy4o0ZFbKQOO1e3dHV8rl
+e8m9Z6qLgJO8rtW+OI+4FavJ6zjUvNVzd9JJxgwQ/1xprwrXh36nPcSyNLpGs7JT
+6u7TGQ38QQAOmziLXzauMWGBeLuzWGmOKA1cs5HFGLSmbxF3+0IWpz4GlD86pU1+
+WYyWgWHHAMA+kFYwBUR6CvPkmhshnZ8vrQavoOlcidCJ8o6IGA7N/Z0/NrgIDcoz
+YaruhoMrmRKHKNpfamhT0gvqEPBec+UB3uLElESIqaeqYc6eMtUQP3lqyghF6I0M
+fi6h7i9VVAZpslaKFfkNg12gLbbsCB1q36l5VXjHY/qe0FIUa9ogRrOi
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2986.CertificationRequest()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=rfc2986.CertificationRequest(),
+ openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for rdn in asn1Object['certificationRequestInfo']['subject']['rdnSequence']:
+ for atv in rdn:
+ if atv['type'] == rfc5280.id_at_countryName:
+ self.assertEqual(char.PrintableString('US'), atv['value'])
+
+ else:
+ self.assertGreater(len(atv['value']['utf8String']), 2)
+
+ spki_alg = asn1Object['certificationRequestInfo']['subjectPKInfo']['algorithm']
+
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ sig_alg = asn1Object['signatureAlgorithm']
+
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3058.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3058.py
new file mode 100644
index 0000000000..0a0645ca2f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3058.py
@@ -0,0 +1,140 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3058
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFgwYJKoZIhvcNAQcDoIIFdDCCBXACAQIxXaJbAgEEMCMEEDiCUYXKXu8SzLos
+n2xeYP4YDzIwMTkwOTEyMTIwMDAwWjAPBgsrBgEEAYE8BwEBBgUABCB0G/YBGH3L
+3RhoG0mK33M8IvRYAOsnHB5MfUAOGF6kuDCCBQoGCSqGSIb3DQEHATAZBgsrBgEE
+AYE8BwEBAjAKBAhCT0dVU19JVoCCBOBzx7F6GMkP+C0Q4iuDq0rkSZprg8nuXx/4
+S3IMP999BrJdUAbPYxdQhAwTOZIuueyv00TJe/Eam9yyLuZXy0PFlTRi7KED8L8c
+yHsRoEobWGMLvE3D4hEhTGttElqQxBvMxZZgm6kLnNG7j8Z72L4lU4aARLYTQvkt
+lJnnfCaccDSiWzU8eXcXdnZAzcKR7CoDc0/XBpdDRddvQ7KXoarXYHuSybt649YD
+cpy0SN9gEPqcFPrBB3nusAx4VOTlpx5Z3ZJv/TEymN8KDobNfykBZURTwupO9WaV
+JZ3Hd/d8C1SCJn6DHuM1jwDp26WfzO8xCfea08MJrnQbNKsDHwmt4dFZIOvcOnwR
+8nNSB/Lt1aUj3GzluHVMyQQyT4AdZDmwFdNmQOBUBLmbWYhtd7t3O7Eqx8bGNa7V
+7LL0nvua04aj1oA6ph/G/8jxhByBYdN5Bwg7f1Ga3ZCwju2tFoQnWOCPYTVOjmBE
+JshBbNC7KhLpp9+C7/13A9cIC3T7Reuc7m+Fopf9Fabu97yFiyJPS8jSF0EnesNG
+R1L1Uvo2Wdc66iECoSrxvezaSgGKB2uLTnaFx4ASVMcP7gDipEOIwuUUuVCqgmWk
+HAK0Q9mwhBLLrYrsn9OjDHFpvkWgWNRMLl/v3E9A+grFh2BQHkB4C7keB1ZOfj1S
+qDi/+ylM9I1FOYMxVXJn2qHMl+QOkfdMoIATm3n3DiBI97/uX4x5KaX074v0dN31
+WeDcsFsh2ze5Dhx8vLJCaXLzWqkmNHX5G/CjjqE6bSR/awgWLRZQuY/9fMvDpvVJ
+uId/+OoWDtMVPIsyQ8w8yZzv+SkuZhsrJMHiKd5qxNQv5sOvC765LMUCNNwj7WzP
+hajintFXLAEMpIjk5xt3eIy3hdYla3PQoFfqcHOVX4EFMLBoYwBTgik8Fg669yXt
+MlbH84MGNs7jObhP/rrDkgbe0qmxUyzgm2uHya1VcItMGYoPPKMFU3ZfwAsZdqsi
+1GAtruTzSUmOpMfAoKOIAyZP96HrsrPCaoGrn7ysm5eRrHQ2hdwO7rGQIw0dRAFh
+2eyRomoLam7yEiw9M6uHuJ5hIS5yEW+7uUjQT6nvKlbrkIyLL5j9Gbk5Z4fOMqRT
+kBs+3H8x7a+lBEKBo/ByJm6fHYi+LX5ZhQFTWkY0M7tfPtrxQdsNRGSHtv7jS7PZ
+3thCMqCtkG/pjAsCbDUtMThtP08z2fstE6dfy7qSx6LzKLDyBl5W76mVYdsX7Q72
+yIoCDFmUGdrRcWA+l3OMwNNL+x9MhhdaUWPtxqaGyZMNGOjkbYHbXZ69oqYqCHkA
+stIVKTzpk3kq9C9x+ynzWO8kIGYNK2uxSBIzPLQ6Daq4c53rWFFNWVjPC8m98zMc
+Yp0hbBhRsdk4qj8osSTcTfpT0+Q+hkYQvZl4IfgX1aHeaCDSScF8SaU+cZ7GYFvL
+o1cYrtVbeXrFwmWl0xpco1Ux+XZgryT/fgfJ+3ToppgsQmzECqTWmYsSYaF1kLU4
+Cqi9UH/VqBLOkwxoH05Zao2xOMNzu2QO3wFnvY2wBsIj1eaxfzVb42o9vom7V20j
+T1ufXXctf9ls5J1WJxBxdKmXQWdNloeAcl1AtxTbw7vIUU5uWqu9wwqly11MDVPA
+b0tcQW20auWmCNkXd52jQJ7PXR6kr5I=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_alg_CMSIDEAwrap, kwa['algorithm'])
+ self.assertEqual(kwa['parameters'], der_encoder(univ.Null("")))
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_IDEA_CBC, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], asn1Spec=rfc3058.IDEA_CBCPar())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, param['iv'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ kekri = asn1Object['content']['recipientInfos'][0]['kekri']
+ kwa = kekri['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_alg_CMSIDEAwrap, kwa['algorithm'])
+ self.assertEqual(univ.Null(""), kwa['parameters'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_IDEA_CBC, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, cea['parameters']['iv'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "MB4wDQYLKwYBBAGBPAcBAQIwDQYLKwYBBAGBPAcBAQY="
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ alg_oid_list = [ ]
+ for cap in asn1Object:
+ self.assertFalse(cap['parameters'].hasValue())
+ alg_oid_list.append(cap['capabilityID'])
+
+ self.assertIn(rfc3058.id_IDEA_CBC, alg_oid_list)
+ self.assertIn(rfc3058.id_alg_CMSIDEAwrap, alg_oid_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3114.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3114.py
new file mode 100644
index 0000000000..d0492a66c5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3114.py
@@ -0,0 +1,244 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3114
+from pyasn1_modules import rfc5035
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5755
+
+
+class SecurityLabelTestCase(unittest.TestCase):
+ pem_text = """\
+MIITHAYJKoZIhvcNAQcCoIITDTCCEwkCAQMxDTALBglghkgBZQMEAgIwggeUBgsq
+hkiG9w0BCRABF6CCB4MEggd/MIIHewIBADGCAk8wggJLAgEAMDMwJjEUMBIGA1UE
+CgwLZXhhbXBsZS5jb20xDjAMBgNVBAMMBUFsaWNlAgkAg/ULtwvVxA4wDQYJKoZI
+hvcNAQEBBQAEggIAdZphtN3x8a8kZoAFY15HYRD6JyPBueRUhLbTPoOH3pZ9xeDK
++zVXGlahl1y1UOe+McEx2oD7cxAkhFuruNZMrCYEBCTZMwVhyEOZlBXdZEs8rZUH
+L3FFE5PJnygsSIO9DMxd1UuTFGTgCm5V5ZLFGmjeEGJRbsfTyo52S7iseJqIN3dl
+743DbApu0+yuUoXKxqKdUFlEVxmhvc+Qbg/zfiwu8PTsYiUQDMBi4cdIlju8iLjj
+389xQHNyndXHWD51is89GG8vpBe+IsN8mnbGtCcpqtJ/c65ErJhHTR7rSJSMEqQD
+0LPOCKIY1q9FaSSJfMXJZk9t/rPxgUEVjfw7hAkKpgOAqoZRN+FpnFyBl0FnnXo8
+kLp55tfVyNibtUpmdCPkOwt9b3jAtKtnvDQ2YqY1/llfEUnFOVDKwuC6MYwifm92
+qNlAQA/T0+ocjs6gA9zOLx+wD1zqM13hMD/L+T2OHL/WgvGb62JLrNHXuPWA8RSh
+O4kIlPtARKXap2S3+MX/kpSUUrNa65Y5uK1jwFFclczG+CPCIBBn6iJiQT/vOX1I
+97YUP4Qq6OGkjK064Bq6o8+e5+NmIOBcygYRv6wA7vGkmPLSWbnw99qD728bBh84
+fC3EjItdusqGIwjzL0eSUWXJ5eu0Z3mYhJGN1pe0R/TEB5ibiJsMLpWAr3gwggUP
+BgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEGMBEEDE2HVyIurFKUEX8MEgIBEICCBOD+
+L7PeC/BpmMOb9KlS+r+LD+49fi6FGBrs8aieGi7ezZQEiFYS38aYQzTYYCt3SbJQ
+TkX1fDsGZiaw/HRiNh7sJnxWATm+XNKGoq+Wls9RhSJ45Sw4GMqwpoxZjeT84Uoz
+OITk3l3fV+3XiGcCejHkp8DAKZFExd5rrjlpnnAOBX6w8NrXO4s2n0LrMhtBU4eB
+2YKhGgs5Q6wQyXtU7rc7OOwTGvxWEONzSHJ01pyvqVQZAohsZPaWLULrM/kEGkrh
+G4jcaVjVPfULi7Uqo14imYhdCq5Ba4bwqI0Ot6mB27KD6LlOnVC/YmXCNIoYoWmq
+y1o3pSm9ovnLEO/dzxQjEJXYeWRje9M/sTxotM/5oZBpYMHqIwHTJbehXFgp8+oD
+jyTfayMYA3fTcTH3XbGPQfnYW2U9+ka/JhcSYybM8cuDNFd1I1LIQXoJRITXtkvP
+UbJqm+s6DtS5yvG9I8aQxlT365zphS4vbQaO74ujO8bE3dynrvTTV0c318TcHpN3
+DY9PIt6mHXMIPDLEA4wes90zg6iah5XiQcLtfLaAdYwEEGlImGD8n0kOhSNgclSL
+Mklpj5mVOs8exli3qoXlVMRJcBptSwOe0QPcRY30spywS4zt1UDIQ0jaecGGVtUY
+j586nkubhAxwZkuQKWxgt6yYTpGNSKCdvd+ygfyGJRDbWdn6nck/EPnG1773KTHR
+hMrXrBPBpSlfyJ/ju3644CCFqCjFoTh4bmB63k9ejUEVkJIJuoeKeTBaUxbCIink
+K4htBkgchHP51RJp4q9jQbziD3aOhg13hO1GFQ4E/1DNIJxbEnURNp/ga8SqmnLY
+8f5Pzwhm1mSzZf+obowbQ+epISrswWyjUKKO+uJfrAVN2TS/5+X6T3U6pBWWjH6+
+xDngrAJwtIdKBo0iSEwJ2eir4X8TcrSy9l8RSOiTPtqS5dF3RWSWOzkcO72fHCf/
+42+DLgUVX8Oe5mUvp7QYiXXsXGezLJ8hPIrGuOEypafDv3TwFkBc2MIB0QUhk+GG
+1ENY3jiNcyEbovF5Lzz+ubvechHSb1arBuEczJzN4riM2Dc3c+r8N/2Ft6eivK7H
+UuYX1uAcArhunZpA8yBGLF1m+DUXFtzWAUvfMKYPdfwGMckghF7YwLrTXd8ZhPIk
+HNO1KdwQKIRfgIlUPfTxRB7eNrG/Ma9a/IwrcI1QtkXU59uIZIw+7+FHZRWPsOjT
+u1Pdy+JtcSTG4dmS+DIwqpUzdu6MaBCVaOhXHwybvaSPTfMG/nR/NxF1FI8xgydn
+zXZs8HtFDL9iytKnvXHx+IIz8Rahp/PK8S80vPQNIeef/JgnIhtosID/A614LW1t
+B4cWdveYlD5U8T/XXInAtCY78Q9WJD+ecu87OJmlOdmjrFvitpQAo8+NGWxc7Wl7
+LtgDuYel7oXFCVtI2npbA7R+K5/kzUvDCY6GTgzn1Gfamc1/Op6Ue17qd/emvhbI
+x+ng3swf8TJVnCNDIXucKVA4boXSlCEhCGzfoZZYGVvm1/hrypiBtpUIKWTxLnz4
+AQJdZ5LGiCQJQU1wMyHsg6vWmNaJVhGHE6D/EnKsvJptFIkAx0wWkh35s48p7EbU
+8QBg//5eNru6yvLRutfdBX7T4w681pCD+dOiom75C3UdahrfoFkNsZ2hB88+qNsE
+EPb/xuGu8ZzSPZhakhl2NS2ggglpMIICAjCCAYigAwIBAgIJAOiR1gaRT87yMAoG
+CCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNTE0MDg1ODExWhcNMjEw
+NTEzMDg1ODExWjA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcM
+B0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMHYwEAYHKoZIzj0CAQYFK4EEACID
+YgAE8FF2VLHojmqlnawpQwjG6fWBQDPOy05hYq8oKcyg1PXH6kgoO8wQyKYVwsDH
+Evc1Vg6ErQm3LzdI8OQpYx3H386R2F/dT/PEmUSdcOIWsB4zrFsbzNwJGIGeZ33Z
+S+xGo1AwTjAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwHwYDVR0jBBgw
+FoAU8jXbNATapVXyvWkDmbBi7OIVCMEwDAYDVR0TBAUwAwEB/zAKBggqhkjOPQQD
+AwNoADBlAjBaUY2Nv03KolLNRJ2wSoNK8xlvzIWTFgIhsBWpD1SpJxRRv22kkoaw
+9bBtmyctW+YCMQC3/KmjNtSFDDh1I+lbOufkFDSQpsMzcNAlwEAERQGgg6iXX+Nh
+A+bFqNC7FyF4WWQwggOHMIIDDqADAgECAgkApbNUKBuwbkYwCgYIKoZIzj0EAwMw
+PzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREw
+DwYDVQQKDAhCb2d1cyBDQTAeFw0xOTExMDIxODQyMThaFw0yMDExMDExODQyMTha
+MGYxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQ
+MA4GA1UEChMHRXhhbXBsZTEMMAoGA1UECxMDUENBMRgwFgYDVQQDEw9wY2EuZXhh
+bXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQ9/m9uACpsTl2frBuILHiw
+IJyfUEpKseYJ+JYL1AtIZU0YeJ9DA+32h0ZeNGJDtDClnbBEPpn3W/5+TzldcsTe
+QlAJB08gcVRjkQym9LtPq7rGubCeVWlRRE9M7F9znk6jggGtMIIBqTAdBgNVHQ4E
+FgQUJuolDwsyICik11oKjf8t3L1/VGUwbwYDVR0jBGgwZoAU8jXbNATapVXyvWkD
+mbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UE
+BwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYGkU/O8jAPBgNVHRMB
+Af8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0
+aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1Ud
+IAQOMAwwCgYIKwYBBQUHDQIwCgYDVR02BAMCAQIwgZEGCCsGAQUFBwEVBIGEMIGB
+MFkGCyqGSIb3DQEJEAcDAwIF4DFGMESACyqGSIb3DQEJEAcEgTUwMwwXTEFXIERF
+UEFSVE1FTlQgVVNFIE9OTFkMGEhVTUFOIFJFU09VUkNFUyBVU0UgT05MWTARBgsq
+hkiG9w0BCRAHAgMCBPAwEQYLKoZIhvcNAQkQBwEDAgXgMAoGCCqGSM49BAMDA2cA
+MGQCMBlIP4FWrNzWXR8OgfcvCLGPG+110EdsmwznIF6ThT1vbJYvYoSbBXTZ9OCh
+/cCMMQIwJOySybHl/eLkNJh971DWF4mUQkt3WGBmZ+9Rg2cJTdat2ZjPKg101NuD
+tkUyjGxfMIID1DCCA1qgAwIBAgIUUc1IQGJpeYQ0XwOS2ZmVEb3aeZ0wCgYIKoZI
+zj0EAwMwZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJu
+ZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQwwCgYDVQQLEwNQQ0ExGDAWBgNVBAMTD3Bj
+YS5leGFtcGxlLmNvbTAeFw0xOTExMDUyMjIwNDZaFw0yMDExMDQyMjIwNDZaMIGS
+MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAO
+BgNVBAoTB0V4YW1wbGUxIjAgBgNVBAsTGUh1bWFuIFJlc291cmNlIERlcGFydG1l
+bnQxDTALBgNVBAMTBEZyZWQxHzAdBgkqhkiG9w0BCQEWEGZyZWRAZXhhbXBsZS5j
+b20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQObFslQ2EBP0xlDJ3sRnsNaqm/woQg
+KpBispSxXxK5bWUVpfnWsZnjLWhtDuPcu1BcBlM2g7gwL/aw8nUSIK3D8Ja9rTUQ
+QXc3zxnkcl8+8znNXHMGByRjPUH87C+TOrqjggGaMIIBljAdBgNVHQ4EFgQU5m71
+1OqFDNGRSWMOSzTXjpTLIFUwbwYDVR0jBGgwZoAUJuolDwsyICik11oKjf8t3L1/
+VGWhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVy
+bmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQCls1QoG7BuRjAPBgNVHRMBAf8EBTAD
+AQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0
+ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1UdIAQOMAww
+CgYIKwYBBQUHDQIwCgYDVR02BAMCAQIwfwYDVR0JBHgwdjBJBgNVBDcxQjBABgsq
+hkiG9w0BCRAHAwMCBeAxLTArgAsqhkiG9w0BCRAHBIEcMBoMGEhVTUFOIFJFU09V
+UkNFUyBVU0UgT05MWTApBglghkgBZQIBBUQxHAwaSHVtYW4gUmVzb3VyY2VzIERl
+cGFydG1lbnQwCgYIKoZIzj0EAwMDaAAwZQIwVh/RypULFgPpAN0I7OvuMomRWnm/
+Hea3Hk8PtTRz2Zai8iYat7oeAmGVgMhSXy2jAjEAuJW4l/CFatBy4W/lZ7gS3weB
+dBa5WEDIFFMC7GjGtCeLtXYqWfBnRdK26dOaHLB2MYIB7jCCAeoCAQEwfjBmMQsw
+CQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNV
+BAoTB0V4YW1wbGUxDDAKBgNVBAsTA1BDQTEYMBYGA1UEAxMPcGNhLmV4YW1wbGUu
+Y29tAhRRzUhAYml5hDRfA5LZmZURvdp5nTALBglghkgBZQMEAgKggeIwGgYJKoZI
+hvcNAQkDMQ0GCyqGSIb3DQEJEAEXMBwGCSqGSIb3DQEJBTEPFw0xOTExMDgyMDA4
+MzFaMD8GCSqGSIb3DQEJBDEyBDCd5WyvIB0VdXgPBWPtI152MIJLg5o68IRimCXx
+bVY0j3YyAKbi0egiZ/UunkyCfv0wZQYLKoZIhvcNAQkQAgIxVjFUAgEIBgsqhkiG
+9w0BCRAHAzEtMCuACyqGSIb3DQEJEAcEgRwwGgwYSFVNQU4gUkVTT1VSQ0VTIFVT
+RSBPTkxZExNCb2FndXMgUHJpdmFjeSBNYXJrMAoGCCqGSM49BAMDBGcwZQIwWkD7
+03QoNrKL5HJnuGJqvML1KlUXZDHnFpnJ+QMzXi8gocyfpRXWm6h0NjXieE0XAjEA
+uuDSOoaUIz+G9aemAE0ldpo1c0avNGa7BtynUTHmwosD6Sjfj0epAg9OnMedOjbr
+"""
+
+ def testDerCodec(self):
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5083.id_ct_authEnvelopedData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5083.id_ct_authEnvelopedData: lambda x: None
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if next_layer == rfc5652.id_signedData:
+ attrs = asn1Object['signerInfos'][0]['signedAttrs']
+ certs = asn1Object['certificates']
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ spid = rfc3114.id_tsp_TEST_Whirlpool
+ catid = rfc3114.id_tsp_TEST_Whirlpool_Categories
+ conf = rfc3114.Whirlpool_SecurityClassification(value='whirlpool-confidential')
+
+ self.assertIn(catid, rfc5755.securityCategoryMap)
+ self.assertIn(rfc5755.id_at_clearance, rfc5280.certificateAttributesMap)
+ self.assertIn(rfc5280.id_ce_subjectDirectoryAttributes, rfc5280.certificateExtensionsMap)
+
+ security_label_okay = False
+
+ for attr in attrs:
+ if attr['attrType'] == rfc5035.id_aa_securityLabel:
+ esssl, rest = der_decoder(
+ attr['attrValues'][0], asn1Spec=rfc5035.ESSSecurityLabel())
+
+ self.assertFalse(rest)
+ self.assertTrue(esssl.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(esssl))
+
+ self.assertEqual(spid, esssl['security-policy-identifier'])
+ self.assertEqual(conf, esssl['security-classification'])
+
+ for cat in esssl['security-categories']:
+ if cat['type'] == catid:
+ scv, rest = der_decoder(
+ cat['value'], asn1Spec=rfc3114.SecurityCategoryValues())
+
+ self.assertFalse(rest)
+ self.assertTrue(scv.prettyPrint())
+ self.assertEqual(cat['value'], der_encoder(scv))
+
+ for scv_str in scv:
+ self.assertIn('USE ONLY', scv_str)
+ security_label_okay = True
+
+ self.assertTrue(security_label_okay)
+
+ clearance_okay = False
+ for cert_choice in certs:
+ for extn in cert_choice['certificate']['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+
+ if attr['type'] == rfc5755.id_at_clearance:
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5280.certificateAttributesMap[attr['type']])
+
+ self.assertEqual(spid, av['policyId'])
+
+ for cat in av['securityCategories']:
+
+ self.assertEqual(catid, cat['type'])
+
+ scv, rest = der_decoder(
+ cat['value'],
+ asn1Spec=rfc5755.securityCategoryMap[cat['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(scv.prettyPrint())
+ self.assertEqual(cat['value'], der_encoder(scv))
+
+ for scv_str in scv:
+ self.assertIn('USE ONLY', scv_str)
+ clearance_okay = True
+
+ self.assertTrue(clearance_okay)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3125.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3125.py
new file mode 100644
index 0000000000..d7072b91be
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3125.py
@@ -0,0 +1,109 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc3125
+
+
+class SignaturePolicyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIMYzALBglghkgBZQMEAgEwggwwBgorgR6RmYQFAQICGA8yMDE2MTAwMjAwMDAwMFowgaSk
+XjBcMQswCQYDVQQGEwJTSzETMBEGA1UEBwwKQnJhdGlzbGF2YTEiMCAGA1UECgwZTmFyb2Ru
+eSBiZXpwZWNub3N0bnkgdXJhZDEUMBIGA1UECwwLU2VrY2lhIElCRVCGQmh0dHA6Ly9lcC5u
+YnVzci5zay90cnVzdGVkX2RhdGEvMjAxNjEwMDIwMDAwMDB6c2lnbmF0dXJlcG9saWN5LmRl
+cgyBz0VOOiBFbC4gc2lnbmF0dXJlL3NlYWwsIG9wdGlvbmFsIGVsLiB0aW1lLXN0YW1wIG92
+ZXIgT0NTUCwgYWNjb3JkaW5nIHRvIFJlZ3VsYXRpb24gKEVVKSBObyA5MTAvMjAxNC4gU0s6
+IEVsLiBwb2RwaXMvcGXEjWHFpSwgdm9saXRlxL5uw6EgZWwuIMSNYXNvdsOhIHBlxI1pYXRr
+YSBuYWQgT0NTUCwgcG9kxL5hIG5hcmlhZGVuaWEgKEXDmikgxI0uIDkxMC8yMDE0LjCCCpYw
+IhgPMjAxNjEwMDIwMDAwMDBaGA8yMDIxMTAwMjAwMDAwMFowggpsoD8wPTA3MC4GCSqGSIb3
+DQEJAwYJKoZIhvcNAQkEBgkqhkiG9w0BCQUGCyqGSIb3DQEJEAIvMAChAwoBAjACMACiEjAQ
+ow4wDAIBAAIBAAIBAAIBAaSCChMwggoPoIIB/zCCAfswCwYJYIZIAWUDBAIBMAsGCWCGSAFl
+AwQCAjALBglghkgBZQMEAgMwCwYJYIZIAWUDBAIGMAsGCWCGSAFlAwQCCDALBglghkgBZQME
+AgkwCwYJYIZIAWUDBAIKMA8GCWCGSAFlAwQDAgICCAAwDwYJYIZIAWUDBAMDAgIIADAPBglg
+hkgBZQMEAwQCAggAMA8GCWCGSAFlAwQDBgICCAAwDwYJYIZIAWUDBAMHAgIIADAPBglghkgB
+ZQMEAwgCAggAMA4GCCqGSM49BAMCAgIBADAOBggqhkjOPQQDAwICAQAwDgYIKoZIzj0EAwQC
+AgEAMA8GCWCGSAFlAwQDCgICAQAwDwYJYIZIAWUDBAMLAgIBADAPBglghkgBZQMEAwwCAgEA
+MA8GCSqGSIb3DQEBCwICCAAwDwYJKoZIhvcNAQEMAgIIADAPBgkqhkiG9w0BAQ0CAggAMA8G
+CWCGSAFlAwQDDgICCAAwDwYJYIZIAWUDBAMPAgIIADAPBglghkgBZQMEAxACAggAMA8GCSqG
+SIb3DQEBCgICCAAwDwYJKoZIhvcNAQEBAgIIADANBgcqhkjOPQIBAgIBADAOBggrJAMDAgUC
+AQICAQAwDgYIKyQDAwIFBAQCAgEAMA4GCCskAwMCBQQFAgIBADAOBggrJAMDAgUEBgICAQCh
+ggH/MIIB+zALBglghkgBZQMEAgEwCwYJYIZIAWUDBAICMAsGCWCGSAFlAwQCAzALBglghkgB
+ZQMEAgYwCwYJYIZIAWUDBAIIMAsGCWCGSAFlAwQCCTALBglghkgBZQMEAgowDwYJYIZIAWUD
+BAMCAgIIADAPBglghkgBZQMEAwMCAggAMA8GCWCGSAFlAwQDBAICCAAwDwYJYIZIAWUDBAMG
+AgIIADAPBglghkgBZQMEAwcCAggAMA8GCWCGSAFlAwQDCAICCAAwDgYIKoZIzj0EAwICAgEA
+MA4GCCqGSM49BAMDAgIBADAOBggqhkjOPQQDBAICAQAwDwYJYIZIAWUDBAMKAgIBADAPBglg
+hkgBZQMEAwsCAgEAMA8GCWCGSAFlAwQDDAICAQAwDwYJKoZIhvcNAQELAgIIADAPBgkqhkiG
+9w0BAQwCAggAMA8GCSqGSIb3DQEBDQICCAAwDwYJYIZIAWUDBAMOAgIIADAPBglghkgBZQME
+Aw8CAggAMA8GCWCGSAFlAwQDEAICCAAwDwYJKoZIhvcNAQEKAgIIADAPBgkqhkiG9w0BAQEC
+AggAMA0GByqGSM49AgECAgEAMA4GCCskAwMCBQIBAgIBADAOBggrJAMDAgUEBAICAQAwDgYI
+KyQDAwIFBAUCAgEAMA4GCCskAwMCBQQGAgIBAKKCAf8wggH7MAsGCWCGSAFlAwQCATALBglg
+hkgBZQMEAgIwCwYJYIZIAWUDBAIDMAsGCWCGSAFlAwQCBjALBglghkgBZQMEAggwCwYJYIZI
+AWUDBAIJMAsGCWCGSAFlAwQCCjAPBglghkgBZQMEAwICAggAMA8GCWCGSAFlAwQDAwICCAAw
+DwYJYIZIAWUDBAMEAgIIADAPBglghkgBZQMEAwYCAggAMA8GCWCGSAFlAwQDBwICCAAwDwYJ
+YIZIAWUDBAMIAgIIADAOBggqhkjOPQQDAgICAQAwDgYIKoZIzj0EAwMCAgEAMA4GCCqGSM49
+BAMEAgIBADAPBglghkgBZQMEAwoCAgEAMA8GCWCGSAFlAwQDCwICAQAwDwYJYIZIAWUDBAMM
+AgIBADAPBgkqhkiG9w0BAQsCAggAMA8GCSqGSIb3DQEBDAICCAAwDwYJKoZIhvcNAQENAgII
+ADAPBglghkgBZQMEAw4CAggAMA8GCWCGSAFlAwQDDwICCAAwDwYJYIZIAWUDBAMQAgIIADAP
+BgkqhkiG9w0BAQoCAggAMA8GCSqGSIb3DQEBAQICCAAwDQYHKoZIzj0CAQICAQAwDgYIKyQD
+AwIFAgECAgEAMA4GCCskAwMCBQQEAgIBADAOBggrJAMDAgUEBQICAQAwDgYIKyQDAwIFBAYC
+AgEAo4IB/zCCAfswCwYJYIZIAWUDBAIBMAsGCWCGSAFlAwQCAjALBglghkgBZQMEAgMwCwYJ
+YIZIAWUDBAIGMAsGCWCGSAFlAwQCCDALBglghkgBZQMEAgkwCwYJYIZIAWUDBAIKMA8GCWCG
+SAFlAwQDAgICCAAwDwYJYIZIAWUDBAMDAgIIADAPBglghkgBZQMEAwQCAggAMA8GCWCGSAFl
+AwQDBgICCAAwDwYJYIZIAWUDBAMHAgIIADAPBglghkgBZQMEAwgCAggAMA4GCCqGSM49BAMC
+AgIBADAOBggqhkjOPQQDAwICAQAwDgYIKoZIzj0EAwQCAgEAMA8GCWCGSAFlAwQDCgICAQAw
+DwYJYIZIAWUDBAMLAgIBADAPBglghkgBZQMEAwwCAgEAMA8GCSqGSIb3DQEBCwICCAAwDwYJ
+KoZIhvcNAQEMAgIIADAPBgkqhkiG9w0BAQ0CAggAMA8GCWCGSAFlAwQDDgICCAAwDwYJYIZI
+AWUDBAMPAgIIADAPBglghkgBZQMEAxACAggAMA8GCSqGSIb3DQEBCgICCAAwDwYJKoZIhvcN
+AQEBAgIIADANBgcqhkjOPQIBAgIBADAOBggrJAMDAgUCAQICAQAwDgYIKyQDAwIFBAQCAgEA
+MA4GCCskAwMCBQQFAgIBADAOBggrJAMDAgUEBgICAQCkggH/MIIB+zALBglghkgBZQMEAgEw
+CwYJYIZIAWUDBAICMAsGCWCGSAFlAwQCAzALBglghkgBZQMEAgYwCwYJYIZIAWUDBAIIMAsG
+CWCGSAFlAwQCCTALBglghkgBZQMEAgowDwYJYIZIAWUDBAMCAgIIADAPBglghkgBZQMEAwMC
+AggAMA8GCWCGSAFlAwQDBAICCAAwDwYJYIZIAWUDBAMGAgIIADAPBglghkgBZQMEAwcCAggA
+MA8GCWCGSAFlAwQDCAICCAAwDgYIKoZIzj0EAwICAgEAMA4GCCqGSM49BAMDAgIBADAOBggq
+hkjOPQQDBAICAQAwDwYJYIZIAWUDBAMKAgIBADAPBglghkgBZQMEAwsCAgEAMA8GCWCGSAFl
+AwQDDAICAQAwDwYJKoZIhvcNAQELAgIIADAPBgkqhkiG9w0BAQwCAggAMA8GCSqGSIb3DQEB
+DQICCAAwDwYJYIZIAWUDBAMOAgIIADAPBglghkgBZQMEAw8CAggAMA8GCWCGSAFlAwQDEAIC
+CAAwDwYJKoZIhvcNAQEKAgIIADAPBgkqhkiG9w0BAQECAggAMA0GByqGSM49AgECAgEAMA4G
+CCskAwMCBQIBAgIBADAOBggrJAMDAgUEBAICAQAwDgYIKyQDAwIFBAUCAgEAMA4GCCskAwMC
+BQQGAgIBADAABCAaWobQZ1EuANtF/NjfuaBXR0nR0fKnGJ7Z8t/mregtvQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3125.SignaturePolicy()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ svp = asn1Object['signPolicyInfo']['signatureValidationPolicy']
+ sr = svp['commonRules']['signerAndVeriferRules']['signerRules']
+ msa = sr['mandatedSignedAttr']
+
+ self.assertIn(rfc2985.pkcs_9_at_contentType, msa)
+ self.assertIn(rfc2985.pkcs_9_at_messageDigest, msa)
+ self.assertIn(rfc2985.pkcs_9_at_signingTime, msa)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3161.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3161.py
new file mode 100644
index 0000000000..47db88ab1e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3161.py
@@ -0,0 +1,81 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3161
+
+
+class TSPQueryTestCase(unittest.TestCase):
+ tsp_query_pem_text = """\
+MFYCAQEwUTANBglghkgBZQMEAgMFAARAGu1DauxDZZv8F7l4EKIbS00U40mUKfBW5C0giEz0
+t1zOHCvK4A8i8zxwUXFHv4pAJZE+uFhZ+v53HTg9rLjO5Q==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3161.TimeStampReq()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tsp_query_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class TSPResponseTestCase(unittest.TestCase):
+ tsp_response_pem_text = """\
+MIIFMTADAgEAMIIFKAYJKoZIhvcNAQcCoIIFGTCCBRUCAQMxCzAJBgUrDgMCGgUAMIIBowYL
+KoZIhvcNAQkQAQSgggGSBIIBjjCCAYoCAQEGBCoDBAEwUTANBglghkgBZQMEAgMFAARAGu1D
+auxDZZv8F7l4EKIbS00U40mUKfBW5C0giEz0t1zOHCvK4A8i8zxwUXFHv4pAJZE+uFhZ+v53
+HTg9rLjO5QIDDwJEGA8yMDE5MDUxMDE4MzQxOFoBAf+gggERpIIBDTCCAQkxETAPBgNVBAoT
+CEZyZWUgVFNBMQwwCgYDVQQLEwNUU0ExdjB0BgNVBA0TbVRoaXMgY2VydGlmaWNhdGUgZGln
+aXRhbGx5IHNpZ25zIGRvY3VtZW50cyBhbmQgdGltZSBzdGFtcCByZXF1ZXN0cyBtYWRlIHVz
+aW5nIHRoZSBmcmVldHNhLm9yZyBvbmxpbmUgc2VydmljZXMxGDAWBgNVBAMTD3d3dy5mcmVl
+dHNhLm9yZzEiMCAGCSqGSIb3DQEJARYTYnVzaWxlemFzQGdtYWlsLmNvbTESMBAGA1UEBxMJ
+V3VlcnpidXJnMQswCQYDVQQGEwJERTEPMA0GA1UECBMGQmF5ZXJuMYIDWjCCA1YCAQEwgaMw
+gZUxETAPBgNVBAoTCEZyZWUgVFNBMRAwDgYDVQQLEwdSb290IENBMRgwFgYDVQQDEw93d3cu
+ZnJlZXRzYS5vcmcxIjAgBgkqhkiG9w0BCQEWE2J1c2lsZXphc0BnbWFpbC5jb20xEjAQBgNV
+BAcTCVd1ZXJ6YnVyZzEPMA0GA1UECBMGQmF5ZXJuMQswCQYDVQQGEwJERQIJAMHphhYNqOmC
+MAkGBSsOAwIaBQCggYwwGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEEMBwGCSqGSIb3DQEJ
+BTEPFw0xOTA1MTAxODM0MThaMCMGCSqGSIb3DQEJBDEWBBSuLICty7PQHx0Ynk0a3rGcCRrf
+EjArBgsqhkiG9w0BCRACDDEcMBowGDAWBBSRbaPYYOzKguNLxZ0Xk+fpaIdfFDANBgkqhkiG
+9w0BAQEFAASCAgBFDVbGQ3L5GcaUBMtBnMW7x3S57QowQhhrTewvncY+3Nc2i6tlM1UEdxIp
+3m2iMqaH/N2xIm2sU/L/lIwaT1XIS4bJ2Nn8UPjZu/prJrVUFTMjJ5LWkG55x6c5A4pa2xxS
+N/kOV2e+6RHYlGvcDOvu2fzuz08hE+NjaHIPg3idU1cBsl0gTWZCTrxdXTLuuvHahxUAdQKm
+gTdGPjIiOR4GYpaVxEAgulaBQLZU5MhfBTASI1LkljhiFeDBQMhTUeZoA59/OxgnQR1Zpca4
+ZuWuqnZImxziRQA1tX/6pjAo5eP1V+SLWYHeIO7ia/urGIK9AXd3jY3Ljq4h7R1E+RRKIseO
+74mmtbJtCaiGL9H+6k164qC7U5fHBzKl3UboZtOUmNj10IJPUNyKQ5JPwCe6HEhbeXLRdh/8
+bjdqy56hBHyG1NRBqiTXTvj9LOzsJGIF5GjwyCT0B2hpvzdTdzNtfQ27HUUYgnYg0fGEpNpi
+vyaW5qCh9S704IKB0m/fXlqiIfNVdqDr/aAHNww8CouZP2oFO61WXCspbFNPLubeqxd5P4o4
+dJzD4PKsurILdX7SL8pRI+O2UtJLwNB1t3LBLKfTZuOWoSBFvQwbqBsDEchrZIDZXSXMbXd6
+uuvuO3ZsRWuej+gso+nWi3CRnRc9Wb0++cq4s8YSLaYSj2pHMA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3161.TimeStampResp()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tsp_response_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3274.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3274.py
new file mode 100644
index 0000000000..cb24d3725f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3274.py
@@ -0,0 +1,81 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3274
+from pyasn1_modules import rfc5652
+
+
+class CompressedDataTestCase(unittest.TestCase):
+ compressed_data_pem_text = """\
+MIIB7wYLKoZIhvcNAQkQAQmgggHeMIIB2gIBADANBgsqhkiG9w0BCRADCDCCAcQG
+CSqGSIb3DQEHAaCCAbUEggGxeJxVksGO1DAQRO/+ir4xK4VlNSAhcUPRrgRiLgw/
+0Il7Egu7bdntMOHraSezMJyixOWq19XpIwuxvP2xJvoEQld5lzw6Nub7Sw/vjx8/
+dJDq4F2ZyYJj+FqZ4Pj0dOzA0sUxFUC4xBxQ2gNqcTzBGEPKVApZY1EQsKn6vCaJ
+U8Y0uxFOeowTwXllwSsc+tP5Qe9tOCCK8wjQ32zUcvcZSDMIJCOX4PQgMqQcF2c3
+Dq5hoAzxAmgXVN+JSqfUo6+2YclMhrwLjlHaVRVutplsZYs8rvBL2WblqN7CTD4B
+MqAIjj8pd1ASUXMyNbXccWeDYd0sxlsGYIhVp3i1l6jgr3qtUeUehbIpQqnAoVSN
+1IqKm7hZaI3EY2tLIR86RbD//ONCGb2HsPdnivvdqvrsZY51mlu+NjTjQhpKWz0p
+FvRlWw9ae7+fVgKKie0SeFpIZYemoyuG5HUS2QY6fTk9N6zz+dsuUyr9Xghs5Ddi
+1LbZbVoNHDyFNv19jL7qiv9uuLK/XTD3Kqct1JS822vS8vWXpMzYBtal/083rMap
+XQ7u2qbaKFtZ7V96NH8ApkUFkg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.compressed_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc3274.id_ct_compressedData, asn1Object['contentType'])
+
+ cd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc3274.CompressedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(cd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(cd))
+
+ self.assertEqual(rfc3274.id_alg_zlibCompress,
+ cd['compressionAlgorithm']['algorithm'])
+ self.assertEqual(rfc5652.id_data, cd['encapContentInfo']['eContentType'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.compressed_data_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc3274.id_ct_compressedData, asn1Object['contentType'])
+
+ cd = asn1Object['content']
+
+ self.assertEqual(rfc3274.id_alg_zlibCompress,
+ cd['compressionAlgorithm']['algorithm'])
+ self.assertEqual(rfc5652.id_data, cd['encapContentInfo']['eContentType'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3279.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3279.py
new file mode 100644
index 0000000000..210a2e9795
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3279.py
@@ -0,0 +1,385 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3279
+
+
+class RSACertificateTestCase(unittest.TestCase):
+ rsa_cert_pem_text = """\
+MIIE8TCCA9mgAwIBAgIQbyXcFa/fXqMIVgw7ek/H+DANBgkqhkiG9w0BAQUFADBv
+MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk
+ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF
+eHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFow
+gYExCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO
+BgNVBAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMScwJQYD
+VQQDEx5DT01PRE8gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDQQIuLcuORG/dRwRtUBJjTqb/B5opdO4f7u4jO
+DeMvPwaW8KIpUJmu2zuhV7B0UXHN7UKRTUH+qcjYaoZ3RLtZZpdQXrTULHBEz9o3
+lUJpPDDEcbNS8CFNodi6OXwcnqMknfKDFpiqFnxDmxVbt640kf7UYiYYRpo/68H5
+8ZBX66x6DYvbcjBqZtXgRqNw3GjZ/wRIiXfeten7Z21B6bw5vTLZYgLxsag9bjec
+4i/i06Imi8a4VUOI4SM+pdIkOWpHqwDUobOpJf4NP6cdutNRwQuk2qw471VQJAVl
+RpM0Ty2NrcbUIRnSjsoFYXEHc0flihkSvQRNzk6cpUisuyb3AgMBAAGjggF0MIIB
+cDAfBgNVHSMEGDAWgBStvZh6NLQm9/rEJlTvA73gJMtUGjAdBgNVHQ4EFgQUC1jl
+i8ZMFTekQKkwqSG+RzZaVv8wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
+Af8wEQYDVR0gBAowCDAGBgRVHSAAMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9j
+cmwudXNlcnRydXN0LmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDCBswYI
+KwYBBQUHAQEEgaYwgaMwPwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRydXN0
+LmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LnA3YzA5BggrBgEFBQcwAoYtaHR0
+cDovL2NydC51c2VydHJ1c3QuY29tL0FkZFRydXN0VVROU0dDQ0EuY3J0MCUGCCsG
+AQUFBzABhhlodHRwOi8vb2NzcC51c2VydHJ1c3QuY29tMA0GCSqGSIb3DQEBBQUA
+A4IBAQAHYJOZqs7Q00fQNzPeP2S35S6jJQzVMx0Njav2fkZ7WQaS44LE5/X289kF
+z0k0LTdf9CXH8PtrI3fx8UDXTLtJRTHdAChntylMdagfeTHJNjcPyjVPjPF+3vxG
+q79om3AjMC63xVx7ivsYE3lLkkKM3CyrbCK3KFOzGkrOG/soDrc6pNoN90AyT99v
+uwFQ/IfTdtn8+7aEA8rJNhj33Wzbu7qBHKat/ij5z7micV0ZBepKRtxzQe+JlEKx
+Q4hvNRevHmCDrHqMEHufyfaDbZ76iO4+3e6esL/garnQnweyCROa9aTlyFt5p0c1
+M2jlVZ6qW8swC53HD79oRIGXi1FK
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.rsa_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.rsaEncryption, spki_a['algorithm'])
+
+ spki_pk = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['subjectPublicKey'].asOctets()
+ pk, rest = der_decoder(spki_pk, asn1Spec=rfc3279.RSAPublicKey())
+
+ self.assertFalse(rest)
+ self.assertTrue(pk.prettyPrint())
+ self.assertEqual(spki_pk, der_encoder(pk))
+ self.assertEqual(65537, pk['publicExponent'])
+ self.assertEqual(rfc3279.sha1WithRSAEncryption,
+ asn1Object['tbsCertificate']['signature']['algorithm'])
+ self.assertEqual(rfc3279.sha1WithRSAEncryption,
+ asn1Object['signatureAlgorithm']['algorithm'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.rsa_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.rsaEncryption, spki_a['algorithm'])
+ self.assertEqual(univ.Null(""), spki_a['parameters'])
+
+
+class ECCertificateTestCase(unittest.TestCase):
+ ec_cert_pem_text = """\
+MIIDrDCCApSgAwIBAgIQCssoukZe5TkIdnRw883GEjANBgkqhkiG9w0BAQwFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0xMzAzMDgxMjAwMDBaFw0yMzAzMDgxMjAwMDBaMEwxCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxJjAkBgNVBAMTHURpZ2lDZXJ0IEVDQyBT
+ZWN1cmUgU2VydmVyIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4ghC6nfYJN6g
+LGSkE85AnCNyqQIKDjc/ITa4jVMU9tWRlUvzlgKNcR7E2Munn17voOZ/WpIRllNv
+68DLP679Wz9HJOeaBy6Wvqgvu1cYr3GkvXg6HuhbPGtkESvMNCuMo4IBITCCAR0w
+EgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwNAYIKwYBBQUHAQEE
+KDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wQgYDVR0f
+BDswOTA3oDWgM4YxaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0R2xv
+YmFsUm9vdENBLmNybDA9BgNVHSAENjA0MDIGBFUdIAAwKjAoBggrBgEFBQcCARYc
+aHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAdBgNVHQ4EFgQUo53mH/naOU/A
+buiRy5Wl2jHiCp8wHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJ
+KoZIhvcNAQEMBQADggEBAMeKoENL7HTJxavVHzA1Nm6YVntIrAVjrnuaVyRXzG/6
+3qttnMe2uuzO58pzZNvfBDcKAEmzP58mrZGMIOgfiA4q+2Y3yDDo0sIkp0VILeoB
+UEoxlBPfjV/aKrtJPGHzecicZpIalir0ezZYoyxBEHQa0+1IttK7igZFcTMQMHp6
+mCHdJLnsnLWSB62DxsRq+HfmNb4TDydkskO/g+l3VtsIh5RHFPVfKK+jaEyDj2D3
+loB5hWp2Jp2VDCADjT7ueihlZGak2YPqmXTNbk19HOuNssWvFhtOyPNV6og4ETQd
+Ea8/B6hPatJ0ES8q/HO3X8IVQwVs1n3aAr0im0/T+Xc=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_ecPublicKey, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc3279.EcpkParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+ self.assertEqual(univ.ObjectIdentifier('1.3.132.0.34'), spki_a_p['namedCurve'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.ec_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_ecPublicKey, spki_a['algorithm'])
+ self.assertEqual(
+ univ.ObjectIdentifier('1.3.132.0.34'), spki_a['parameters']['namedCurve'])
+
+
+class DSACertificateTestCase(unittest.TestCase):
+ dsa_cert_pem_text = """\
+MIIDpjCCA0ygAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAxMDE5MjAxMjMw
+WjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQCLpR53
+xHfe+SiknAK/L9lm/ZO1109c9iYkriPIW/5MMlM+qc/tdRkKpG6ELIpfXTPtKCJm
+zqqVIyTmAJryyE8Xw0Ie2mzYPU5ULvKmllQkjTsWgPGgQBkciZ0AW9ggD9VwZilg
+4qh3iSO7T97hVQFnpCh6vm8pOH6UP/5kpr9ZJQIVANzdbztBJlJfqCB1t4h/NvSu
+wCFvAoGAITP+jhYk9Rngd98l+5ccgauQ+cLEUBgNG2Wq56zBXQbLou6eKkQi7ecL
+NiRmExq3IU3LOj426wSxL72Kw6FPyOEv3edIFkJJEHL4Z+ZJeVe//dzya0ddOJ7k
+k6qNF2ic+viD/5Vm8yRyKiig2uHH/MgIesLdZnvbzvX+f/P0z50DgYQAAoGALAUl
+jkOi1PxjjFVvhGfK95yIsrfbfcIEKUBaTs9NR2rbGWUeP+93paoXwP39X9wrJx2M
+SWeHWhWKszNgoiyqYT0k4R9mem3WClotxOvB5fHfwIp2kQYvE7H0/TPdGhfUpHQG
+YpyLQgT6L80meSKMFnu4VXGzOANhWDxu3JxiADCjgZQwgZEwCwYDVR0PBAQDAgeA
+MEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVz
+dGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFO37wHcauyc03rDc6cDRRsHz
+gcK+MB8GA1UdIwQYMBaAFM1IZQGDsqYHWwb+I4EMxHPk0bU4MAsGCWCGSAFlAwQD
+AgNHADBEAiBBRbfMzLi7+SVyO8SM3xxwUsMf/k1B+Nkvf1kBTfCfGwIgSAx/6mI+
+pNqdXqZZGESXy1MT1aBc4ynPGLFUr2r7cPY=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.dsa_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_dsa, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(spki_a['parameters'],
+ asn1Spec=rfc3279.Dss_Parms())
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+
+ q_value = 1260916123897116834511257683105158021801897369967
+
+ self.assertEqual(q_value, spki_a_p['q'])
+
+ sig_value, rest = der_decoder(
+ asn1Object['signature'].asOctets(), asn1Spec=rfc3279.Dss_Sig_Value())
+
+ self.assertFalse(rest)
+ self.assertTrue(sig_value.prettyPrint())
+ self.assertEqual(asn1Object['signature'].asOctets(), der_encoder(sig_value))
+ self.assertTrue(sig_value['r'].hasValue())
+ self.assertTrue(sig_value['s'].hasValue())
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.dsa_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_dsa, spki_a['algorithm'])
+
+ q_value = 1260916123897116834511257683105158021801897369967
+
+ self.assertEqual(q_value, spki_a['parameters']['q'])
+
+
+class KEACertificateTestCase(unittest.TestCase):
+ kea_cert_pem_text = """\
+MIICizCCAjOgAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCgwCQYHKoZIzjgEAzA/
+MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAP
+BgNVBAoTCEJvZ3VzIENBMB4XDTE5MTAyMDIwMDkyMVoXDTIwMTAxOTIwMDkyMVow
+cDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAw
+DgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGljZTEgMB4GCSqGSIb3DQEJARYR
+YWxpY2VAZXhhbXBsZS5jb20wgaAwFwYJYIZIAWUCAQEWBApc+PEn5ladbYizA4GE
+AAKBgB9Lc2QcoSW0E9/VnQ2xGBtpYh9MaDUBzIixbN8rhDwh0BBesD2TwHjzBpDM
+2PJ6DD1ZbBcz2M3vJaIKoZ8hA2EUtbbHX1BSnVfAdeqr5St5gfnuxSdloUjLQlWO
+rOYfpFVEp6hJoKAZiYfiXz0fohNXn8+fiU5k214byxlCPlU0o4GUMIGRMAsGA1Ud
+DwQEAwIDCDBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3Qg
+YmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1UdDgQWBBSE49bkPB9sQm27
+Rs2jgAPMyY6UCDAfBgNVHSMEGDAWgBTNSGUBg7KmB1sG/iOBDMRz5NG1ODAJBgcq
+hkjOOAQDA0cAMEQCIE9PWhUbnJVdNQcVYSc36BMZ+23uk2ITLsgSXtkScF6TAiAf
+TPnJ5Wym0hv2fOpnPPsWTgqvLFYfX27GGTquuOd/6A==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kea_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_keyExchangeAlgorithm, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(spki_a['parameters'],
+ asn1Spec=rfc3279.KEA_Parms_Id())
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+ self.assertEqual(univ.OctetString(hexValue='5cf8f127e6569d6d88b3'), spki_a_p)
+ self.assertEqual(
+ rfc3279.id_dsa_with_sha1, asn1Object['tbsCertificate']['signature']['algorithm'])
+ self.assertEqual(
+ rfc3279.id_dsa_with_sha1, asn1Object['signatureAlgorithm']['algorithm'])
+
+ sig_value, rest = der_decoder(asn1Object['signature'].asOctets(),
+ asn1Spec=rfc3279.Dss_Sig_Value())
+ self.assertFalse(rest)
+ self.assertTrue(sig_value.prettyPrint())
+ self.assertEqual(asn1Object['signature'].asOctets(), der_encoder(sig_value))
+ self.assertTrue(sig_value['r'].hasValue())
+ self.assertTrue(sig_value['s'].hasValue())
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.kea_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_keyExchangeAlgorithm, spki_a['algorithm'])
+ self.assertEqual(
+ univ.OctetString(hexValue='5cf8f127e6569d6d88b3'), spki_a['parameters'])
+
+ self.assertEqual(rfc3279.id_dsa_with_sha1,
+ asn1Object['tbsCertificate']['signature']['algorithm'])
+ self.assertEqual(
+ rfc3279.id_dsa_with_sha1, asn1Object['signatureAlgorithm']['algorithm'])
+
+
+class DHCertificateTestCase(unittest.TestCase):
+ dh_cert_pem_text = """\
+MIIEtDCCBFqgAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAxMDE5MjAxMjMw
+WjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTCCAsQwggI5BgcqhkjOPgIBMIICLAKCAQEAt9x/
+0iwGww3k19h+wbODVK1yqjFzEY2pyfXthHcn+nEw+DpURJ+iOhYPr68E3XO5sB48
+r5xTZhPN5+YejD3T8qhnDtiq4qrrSH7BOaEzqCDpHE2Bpoy3SodQ5Obaiu9Kx1ix
+BRk/oRZUH+F+ATZmF0rPKrZGZOnmsh0IZm3dlmRR9FRGn0aJlZKXveqp+hZ97/r0
+cbSo6wdT47APfocgweZMvgWu1IQBs6FiunRgaeX3RyLr4fnkvCzUM7TmxpRJYtL6
+myAp007QvtgQ0AdEwVfNl3jQ0IIW7TtpXVxDDQaKZZe9yYrY4GV3etlYk8a4cpjN
+rBxBCCTMASE4+iVtPQKCAQAg3m19vWc1TlHmkeqLwgvHN0Ufdyw5axWtc8qIJGZ1
+MezhyLyD4RU0VFCSocJCCe2k2kS2P2vQERZZYcn/nCYuiswCjOCbnwKozfaTZ3Fc
+1KOCtb4EEcuk/th5XNhWCYJJ7Hasym8zuPaqh5TLcsHXp0/lQUiOV2uVHnAt503A
+HY1v4PhlZ3G0CRZMenafU0Ky7a6zhrqFvWgtSdo+vN0S9xS/KJuTaWsYgOAt4r2I
+K1uwuWuvA5L1Qrdj8pDzMLkdlyHU1Jgjzk0rNQDTbUkZX9CAi/xKUGZysjWfOn1F
+HC1vJ1sbP9nTXpWRain1/6yatB2RxLTvWYyAq9IsL/8PAiEAkY8lGryvcZI/pxXt
+XwSaXEL2d77GSGICMGZa1wOJtdEDgYQAAoGALAUljkOi1PxjjFVvhGfK95yIsrfb
+fcIEKUBaTs9NR2rbGWUeP+93paoXwP39X9wrJx2MSWeHWhWKszNgoiyqYT0k4R9m
+em3WClotxOvB5fHfwIp2kQYvE7H0/TPdGhfUpHQGYpyLQgT6L80meSKMFnu4VXGz
+OANhWDxu3JxiADCjgZQwgZEwCwYDVR0PBAQDAgMIMEIGCWCGSAGG+EIBDQQ1FjNU
+aGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9z
+ZS4wHQYDVR0OBBYEFO37wHcauyc03rDc6cDRRsHzgcK+MB8GA1UdIwQYMBaAFM1I
+ZQGDsqYHWwb+I4EMxHPk0bU4MAsGCWCGSAFlAwQDAgNHADBEAiB1LU0esRdHDvSj
+kqAm+3viU2a+hl66sLrK5lYBOYqGYAIgWG7bDxqFVP6/stHfdbeMovLejquEl9tr
+iPEBA+EDHjk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.dh_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.dhpublicnumber, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc3279.DomainParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+
+ q_value = 65838278260281264030127352144753816831178774189428428256716126077244217603537
+
+ self.assertEqual(q_value, spki_a_p['q'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.dh_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.dhpublicnumber, spki_a['algorithm'])
+
+ q_value = 65838278260281264030127352144753816831178774189428428256716126077244217603537
+
+ self.assertEqual(q_value, spki_a['parameters']['q'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3280.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3280.py
new file mode 100644
index 0000000000..3031335467
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3280.py
@@ -0,0 +1,79 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3280
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class CertificateListTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3280.CertificateList()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3281.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3281.py
new file mode 100644
index 0000000000..f03316f1f0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3281.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3281
+
+
+class AttributeCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDBTCCAm4CAQEwgY+gUTBKpEgwRjEjMCEGA1UEAwwaQUNNRSBJbnRlcm1lZGlh
+dGUgRUNEU0EgQ0ExCzAJBgNVBAYTAkZJMRIwEAYDVQQKDAlBQ01FIEx0ZC4CAx7N
+WqE6pDgwNjETMBEGA1UEAwwKQUNNRSBFQ0RTQTELMAkGA1UEBhMCRkkxEjAQBgNV
+BAoMCUFDTUUgTHRkLqA9MDukOTA3MRQwEgYDVQQDDAtleGFtcGxlLmNvbTELMAkG
+A1UEBhMCRkkxEjAQBgNVBAoMCUFDTUUgTHRkLjANBgkqhkiG9w0BAQsFAAIEC63K
+/jAiGA8yMDE2MDEwMTEyMDAwMFoYDzIwMTYwMzAxMTIwMDAwWjCB8jA8BggrBgEF
+BQcKATEwMC6GC3VybjpzZXJ2aWNlpBUwEzERMA8GA1UEAwwIdXNlcm5hbWUECHBh
+c3N3b3JkMDIGCCsGAQUFBwoCMSYwJIYLdXJuOnNlcnZpY2WkFTATMREwDwYDVQQD
+DAh1c2VybmFtZTA1BggrBgEFBQcKAzEpMCegGKQWMBQxEjAQBgNVBAMMCUFDTUUg
+THRkLjALDAlBQ01FIEx0ZC4wIAYIKwYBBQUHCgQxFDASMBAMBmdyb3VwMQwGZ3Jv
+dXAyMCUGA1UESDEeMA2hC4YJdXJuOnJvbGUxMA2hC4YJdXJuOnJvbGUyMGowHwYD
+VR0jBBgwFoAUgJCMhskAsEBzvklAX8yJBOXO500wCQYDVR04BAIFADA8BgNVHTcB
+Af8EMjAwMB2gCoYIdXJuOnRlc3SgD4INKi5leGFtcGxlLmNvbTAPoA2GC3Vybjph
+bm90aGVyMA0GCSqGSIb3DQEBCwUAA4GBACygfTs6TkPurZQTLufcE3B1H2707OXK
+sJlwRpuodR2oJbunSHZ94jcJHs5dfbzFs6vNfVLlBiDBRieX4p+4JcQ2P44bkgyi
+UTJu7g1b6C1liB3vO6yH5hOZicOAaKd+c/myuGb9uJ4n6y2oLNxnk/fDzpuZUe2h
+Q4eikPk4LQey
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3281.AttributeCertificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ attributeMap = {
+ rfc3281.id_at_role: rfc3281.RoleSyntax(),
+ rfc3281.id_aca_authenticationInfo: rfc3281.SvceAuthInfo(),
+ rfc3281.id_aca_accessIdentity: rfc3281.SvceAuthInfo(),
+ rfc3281.id_aca_chargingIdentity: rfc3281.IetfAttrSyntax(),
+ rfc3281.id_aca_group: rfc3281.IetfAttrSyntax(),
+ }
+
+ count = 0
+
+ for attr in asn1Object['acinfo']['attributes']:
+ self.assertIn(attr['type'], attributeMap)
+
+ av, rest = der_decoder(
+ attr['values'][0], asn1Spec=attributeMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ count += 1
+
+ self.assertEqual(5, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3370.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3370.py
new file mode 100644
index 0000000000..70d9d4215f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3370.py
@@ -0,0 +1,234 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3370
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFjAYJKoZIhvcNAQcDoIIFfTCCBXkCAQIxZqJkAgEEMCMEEH744tkBAA6gplAQ
+nKYxCF8YDzIwMTkwOTEyMTIwMDAwWjAQBgsqhkiG9w0BCRADBwIBOgQocOaZ+1cB
+94MzMPtx6HyFpCC9yZrwXSKvWg5I018xOJhsuq+0so1PNTCCBQoGCSqGSIb3DQEH
+ATAZBggqhkiG9w0DAjANAgE6BAhCT0dVU19JVoCCBOBzx7F6GMkP+C0Q4iuDq0rk
+SZprg8nuXx/4S3IMP999BrJdUAbPYxdQhAwTOZIuueyv00TJe/Eam9yyLuZXy0PF
+lTRi7KED8L8cyHsRoEobWGMLvE3D4hEhTGttElqQxBvMxZZgm6kLnNG7j8Z72L4l
+U4aARLYTQvktlJnnfCaccDSiWzU8eXcXdnZAzcKR7CoDc0/XBpdDRddvQ7KXoarX
+YHuSybt649YDcpy0SN9gEPqcFPrBB3nusAx4VOTlpx5Z3ZJv/TEymN8KDobNfykB
+ZURTwupO9WaVJZ3Hd/d8C1SCJn6DHuM1jwDp26WfzO8xCfea08MJrnQbNKsDHwmt
+4dFZIOvcOnwR8nNSB/Lt1aUj3GzluHVMyQQyT4AdZDmwFdNmQOBUBLmbWYhtd7t3
+O7Eqx8bGNa7V7LL0nvua04aj1oA6ph/G/8jxhByBYdN5Bwg7f1Ga3ZCwju2tFoQn
+WOCPYTVOjmBEJshBbNC7KhLpp9+C7/13A9cIC3T7Reuc7m+Fopf9Fabu97yFiyJP
+S8jSF0EnesNGR1L1Uvo2Wdc66iECoSrxvezaSgGKB2uLTnaFx4ASVMcP7gDipEOI
+wuUUuVCqgmWkHAK0Q9mwhBLLrYrsn9OjDHFpvkWgWNRMLl/v3E9A+grFh2BQHkB4
+C7keB1ZOfj1SqDi/+ylM9I1FOYMxVXJn2qHMl+QOkfdMoIATm3n3DiBI97/uX4x5
+KaX074v0dN31WeDcsFsh2ze5Dhx8vLJCaXLzWqkmNHX5G/CjjqE6bSR/awgWLRZQ
+uY/9fMvDpvVJuId/+OoWDtMVPIsyQ8w8yZzv+SkuZhsrJMHiKd5qxNQv5sOvC765
+LMUCNNwj7WzPhajintFXLAEMpIjk5xt3eIy3hdYla3PQoFfqcHOVX4EFMLBoYwBT
+gik8Fg669yXtMlbH84MGNs7jObhP/rrDkgbe0qmxUyzgm2uHya1VcItMGYoPPKMF
+U3ZfwAsZdqsi1GAtruTzSUmOpMfAoKOIAyZP96HrsrPCaoGrn7ysm5eRrHQ2hdwO
+7rGQIw0dRAFh2eyRomoLam7yEiw9M6uHuJ5hIS5yEW+7uUjQT6nvKlbrkIyLL5j9
+Gbk5Z4fOMqRTkBs+3H8x7a+lBEKBo/ByJm6fHYi+LX5ZhQFTWkY0M7tfPtrxQdsN
+RGSHtv7jS7PZ3thCMqCtkG/pjAsCbDUtMThtP08z2fstE6dfy7qSx6LzKLDyBl5W
+76mVYdsX7Q72yIoCDFmUGdrRcWA+l3OMwNNL+x9MhhdaUWPtxqaGyZMNGOjkbYHb
+XZ69oqYqCHkAstIVKTzpk3kq9C9x+ynzWO8kIGYNK2uxSBIzPLQ6Daq4c53rWFFN
+WVjPC8m98zMcYp0hbBhRsdk4qj8osSTcTfpT0+Q+hkYQvZl4IfgX1aHeaCDSScF8
+SaU+cZ7GYFvLo1cYrtVbeXrFwmWl0xpco1Ux+XZgryT/fgfJ+3ToppgsQmzECqTW
+mYsSYaF1kLU4Cqi9UH/VqBLOkwxoH05Zao2xOMNzu2QO3wFnvY2wBsIj1eaxfzVb
+42o9vom7V20jT1ufXXctf9ls5J1WJxBxdKmXQWdNloeAcl1AtxTbw7vIUU5uWqu9
+wwqly11MDVPAb0tcQW20auWmCNkXd52jQJ7PXR6kr5I=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, kwa['algorithm'])
+ kwa_param, rest = der_decoder(
+ kwa['parameters'], rfc3370.RC2wrapParameter())
+ self.assertFalse(rest)
+ self.assertTrue(kwa_param.prettyPrint())
+ self.assertEqual(kwa['parameters'], der_encoder(kwa_param))
+ self.assertEqual(58, kwa_param)
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3370.rc2CBC, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], rfc3370.RC2CBCParameter())
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, param['iv'])
+ self.assertEqual(58, param['rc2ParameterVersion'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
+
+ ri0 = asn1Object['content']['recipientInfos'][0]
+ kwa = ri0['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, kwa['algorithm'])
+ self.assertEqual(58, kwa['parameters'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3370.rc2CBC, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, cea['parameters']['iv'])
+ self.assertEqual(58, cea['parameters']['rc2ParameterVersion'])
+
+class DSAPublicKeyTestCase(unittest.TestCase):
+ dsa_cert_pem_text = """\
+MIIDpjCCA0ygAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAxMDE5MjAxMjMw
+WjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQCLpR53
+xHfe+SiknAK/L9lm/ZO1109c9iYkriPIW/5MMlM+qc/tdRkKpG6ELIpfXTPtKCJm
+zqqVIyTmAJryyE8Xw0Ie2mzYPU5ULvKmllQkjTsWgPGgQBkciZ0AW9ggD9VwZilg
+4qh3iSO7T97hVQFnpCh6vm8pOH6UP/5kpr9ZJQIVANzdbztBJlJfqCB1t4h/NvSu
+wCFvAoGAITP+jhYk9Rngd98l+5ccgauQ+cLEUBgNG2Wq56zBXQbLou6eKkQi7ecL
+NiRmExq3IU3LOj426wSxL72Kw6FPyOEv3edIFkJJEHL4Z+ZJeVe//dzya0ddOJ7k
+k6qNF2ic+viD/5Vm8yRyKiig2uHH/MgIesLdZnvbzvX+f/P0z50DgYQAAoGALAUl
+jkOi1PxjjFVvhGfK95yIsrfbfcIEKUBaTs9NR2rbGWUeP+93paoXwP39X9wrJx2M
+SWeHWhWKszNgoiyqYT0k4R9mem3WClotxOvB5fHfwIp2kQYvE7H0/TPdGhfUpHQG
+YpyLQgT6L80meSKMFnu4VXGzOANhWDxu3JxiADCjgZQwgZEwCwYDVR0PBAQDAgeA
+MEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVz
+dGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFO37wHcauyc03rDc6cDRRsHz
+gcK+MB8GA1UdIwQYMBaAFM1IZQGDsqYHWwb+I4EMxHPk0bU4MAsGCWCGSAFlAwQD
+AgNHADBEAiBBRbfMzLi7+SVyO8SM3xxwUsMf/k1B+Nkvf1kBTfCfGwIgSAx/6mI+
+pNqdXqZZGESXy1MT1aBc4ynPGLFUr2r7cPY=
+"""
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.dsa_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki = asn1Object['tbsCertificate']['subjectPublicKeyInfo']
+ self.assertEqual(rfc3370.id_dsa, spki['algorithm']['algorithm'])
+ pk_substrate = spki['subjectPublicKey'].asOctets()
+
+ pk, rest = der_decoder(pk_substrate, asn1Spec=rfc3370.Dss_Pub_Key())
+ self.assertFalse(rest)
+ self.assertTrue(pk.prettyPrint())
+ self.assertEqual(pk_substrate, der_encoder(pk))
+
+ self.assertEqual(48, pk % 1024)
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MGIwDAYIKwYBBQUIAQIFADAfBgsqhkiG9w0BCRADBTAQBgsqhkiG9w0BCRADBwIB
+OjAfBgsqhkiG9w0BCRADCjAQBgsqhkiG9w0BCRADBwIBOjAQBgsqhkiG9w0BCRAD
+BwIBOg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg_param = False
+ for cap in asn1Object:
+ if cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys():
+ if cap['parameters'].hasValue():
+ param, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(param))
+
+ if cap['capabilityID'] == rfc3370.id_alg_ESDH:
+ kwa, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(kwa.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(kwa))
+
+ self.assertTrue(kwa['algorithm'] in rfc5280.algorithmIdentifierMap.keys())
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, kwa['algorithm'])
+ kwa_p, rest = der_decoder(
+ kwa['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[kwa['algorithm']])
+ self.assertFalse(rest)
+ self.assertTrue(kwa_p.prettyPrint())
+ self.assertEqual(kwa['parameters'], der_encoder(kwa_p))
+ self.assertEqual(58, kwa_p)
+ found_wrap_alg_param = True
+
+ self.assertTrue(found_wrap_alg_param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg_param = False
+ for cap in asn1Object:
+ if cap['capabilityID'] == rfc3370.id_alg_ESDH:
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, cap['parameters']['algorithm'])
+ self.assertEqual(58, cap['parameters']['parameters'])
+ found_wrap_alg_param = True
+
+ self.assertTrue(found_wrap_alg_param)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
+
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3447.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3447.py
new file mode 100644
index 0000000000..8788691208
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3447.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3447
+
+
+# openssl genrsa -primes 3 -f4 -out multiprime.key
+
+class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIE2QIBAQKCAQEAn82EqwXasE2TFNSmZucB8LNza2mOWLHF3nxpxKXalPMDvezc
+5Dq7Ytcv/k9jJL4j4jYfvR4yyZdU9iHLaD6hOINZ8E6hVpx/4c96ZUSOLzD2g+u+
+jIuoNfG+zygSBGYCS6BLCAIsZ+2wUyxYpLJknHJld9/jy+aLmmyrilhH9dH5AUiV
+3NeWht/68++dMXf4ZI/gV4bMSlWhggxkz2WJJpiQdCdJatGkwNDkHmLA9X0tC6OH
+SPE7qYdxG38cYS5F445SgnhDpiK7BodSqYLwgehaDjoOYdEgHVnOcpBCDI5zCJSL
+b1c/z8uhrB1xxlECR44wCLcKsIIYQxaEErRJ/wIDAQABAoIBAD+Ra5L0szeqxDVn
+GgKZJkZvvBwgU0WpAgMtDo3xQ/A4c2ab0IrhaiU5YJgTUGcPVArqaNm8J4vVrTBz
+5QxEzbFDXwWe4cMoYh6bgB7ElKLlIUr8/kGZUfgc7kI29luEjcAIEAC2/RQHesVn
+DHkL5OzqZL+4fIwckAMh0tXdflsPgZ/jgIaKca4OqKu4KGnczm3UvqtlvwisAjkx
+zMyfZXOLn0vEwP2bfbhQrCVrP7n6a+CV+Kqm8NBWnbiS6x2rWemVVssNTbfXQztq
+wC6ZJZCLK7plciDBWvHcS6vxdcsS9DUxuqSV6o/stCGTl1D+9tDx8Od0Eunna2B2
+wAoRHZECVgbNO1bqwfYpp5aFuySWoP+KZz8f/5ZkHjLwiNGpQcqVd4+7Ql2R4qgF
+NgSoQQOZFhKtiOeLVU0HYfp6doI4waSINZdF/fJDHD6fY3AMOc/IIMDHHIzbAlYG
+vKOocLXWj/2+gcyQ1XoAmrE70aIFUBLSvd7RCi8GI74zYWp5lCSvO850Z4GsWSZT
+41iF13sTDDJPm3+BbzMvEu2GuACi/8/IpbUr24/FP9Cp1Rf7kwJWAgMxfoshbrNu
+ebQB5laHNnT+DYhrOFVRNiNDaD2bUNSetrFidosWtD4ueHxMGENwa4BbFJ9+UrdP
+fyxC6k7exM7khGjaNZczwTep1VpYtKjzP/bp9KcCVgYoj9s9HZ1FCAsNEPodjGfd
+AcPTQS9mIa7wzy19B7uvFQJXPURi/p4KKBMVQ99Pp8/r9lJzxxiEf8FyPr8N7lZM
+EUKkFkDrZQDhKpsrHWSNj6yRFlltAlYC7dYR8KLEWoOUATLosxQhwgypv+23r+d4
+ZdPOdDv9n8Kmj+NFy/oISFfdXzlOU4RWQtMx3hEwAabwct7vjiJEej/kmiTqco02
+17tt13VvvQ5ZXF73dDCCAQwwggEIAlYDfMpM1WNfxcLLOgkRZ+0S9OvIrEOi0ALV
+SquTdi/thhCuCsK3lMD4miN9te8j16YtqEFVWXC3a6DWwIJ6m/xZ50bBwPqM8RsI
+6FWhZw4Dr5VqjYXUvwJWAvapRk9SydDYri/cAtGIkUJVlspkE1emALAaSw30vmfd
+hrgYLT6YGOmK3UmcNJ4NVeET275MXWF1ZOhkOGKTN6aj5wPhJaHBMnmUQrq7GwC6
+/LfUkSsCVgMCDTV9gbFW8u6TcTVW85dBIeUGxZh1T2pbU3dkGO3IOxOhzJUplH4/
+EeEs9dusHakg1ERXAg4Vo1YowPW8kuVbZ9faxeVrmuER5NcCuZzS5X/obGUw
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3447.RSAPrivateKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3537.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3537.py
new file mode 100644
index 0000000000..1b7490b002
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3537.py
@@ -0,0 +1,76 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3537
+from pyasn1_modules import rfc5751
+
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "MCIwDwYLKoZIhvcNAQkQAwwFADAPBgsqhkiG9w0BCRADCwUA"
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ alg_oid_list = [
+ rfc3537.id_alg_HMACwithAESwrap,
+ rfc3537.id_alg_HMACwith3DESwrap,
+ ]
+
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for cap in asn1Object:
+ self.assertEqual(der_encoder(univ.Null("")), cap['parameters'])
+ self.assertTrue(cap['capabilityID'] in alg_oid_list)
+ count += 1
+
+ self.assertEqual(count, 2)
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ rfc3537.id_alg_HMACwithAESwrap: univ.Null(""),
+ rfc3537.id_alg_HMACwith3DESwrap: univ.Null(""),
+ }
+
+ asn1Spec=rfc5751.SMIMECapabilities()
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypesMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for cap in asn1Object:
+ self.assertEqual(univ.Null(""), cap['parameters'])
+ self.assertTrue(cap['capabilityID'] in openTypesMap.keys())
+ count += 1
+
+ self.assertEqual(count, 2)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3560.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3560.py
new file mode 100644
index 0000000000..3419cdea7c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3560.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3560
+
+
+class OAEPDefautTestCase(unittest.TestCase):
+ oaep_default_pem_text = "MAsGCSqGSIb3DQEBBw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3560.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class OAEPSHA256TestCase(unittest.TestCase):
+ oaep_sha256_pem_text = "MDwGCSqGSIb3DQEBBzAvoA8wDQYJYIZIAWUDBAIBBQChHDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAIBBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_sha256_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3560.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class OAEPFullTestCase(unittest.TestCase):
+ oaep_full_pem_text = "MFMGCSqGSIb3DQEBBzBGoA8wDQYJYIZIAWUDBAICBQChHDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAICBQCiFTATBgkqhkiG9w0BAQkEBmZvb2Jhcg=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_full_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3560.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3565.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3565.py
new file mode 100644
index 0000000000..58574ec22c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3565.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3565
+
+
+class AESKeyWrapTestCase(unittest.TestCase):
+ kw_alg_id_pem_text = "MAsGCWCGSAFlAwQBLQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc3565.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kw_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3565.id_aes256_wrap, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class AESCBCTestCase(unittest.TestCase):
+ aes_alg_id_pem_text = "MB0GCWCGSAFlAwQBKgQQEImWuoUOPwM5mTu1h4oONw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc3565.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.aes_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3565.id_aes256_CBC, asn1Object[0])
+ self.assertTrue(asn1Object[1].isValue)
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.aes_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3565.id_aes256_CBC, asn1Object[0])
+
+ aes_iv = univ.OctetString(hexValue='108996ba850e3f0339993bb5878a0e37')
+
+ self.assertEqual(aes_iv, asn1Object[1])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3657.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3657.py
new file mode 100644
index 0000000000..12b49dc884
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3657.py
@@ -0,0 +1,167 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3657
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFfwYJKoZIhvcNAQcDoIIFcDCCBWwCAQIxU6JRAgEEMCMEECBlcTFnxBsPlsug
+4KOCj78YDzIwMTkwOTEyMTIwMDAwWjANBgsqgwiMmks9AQEDAgQYS3mK9jQmvth1
+iuBV8PEa89ICvmoomJCvMIIFEAYJKoZIhvcNAQcBMB8GCyqDCIyaSz0BAQECBBBC
+T0dVU0lWX0JPR1VTSVYhgIIE4HPHsXoYyQ/4LRDiK4OrSuRJmmuDye5fH/hLcgw/
+330Gsl1QBs9jF1CEDBM5ki657K/TRMl78Rqb3LIu5lfLQ8WVNGLsoQPwvxzIexGg
+ShtYYwu8TcPiESFMa20SWpDEG8zFlmCbqQuc0buPxnvYviVThoBEthNC+S2Umed8
+JpxwNKJbNTx5dxd2dkDNwpHsKgNzT9cGl0NF129Dspehqtdge5LJu3rj1gNynLRI
+32AQ+pwU+sEHee6wDHhU5OWnHlndkm/9MTKY3woOhs1/KQFlRFPC6k71ZpUlncd3
+93wLVIImfoMe4zWPAOnbpZ/M7zEJ95rTwwmudBs0qwMfCa3h0Vkg69w6fBHyc1IH
+8u3VpSPcbOW4dUzJBDJPgB1kObAV02ZA4FQEuZtZiG13u3c7sSrHxsY1rtXssvSe
++5rThqPWgDqmH8b/yPGEHIFh03kHCDt/UZrdkLCO7a0WhCdY4I9hNU6OYEQmyEFs
+0LsqEumn34Lv/XcD1wgLdPtF65zub4Wil/0Vpu73vIWLIk9LyNIXQSd6w0ZHUvVS
++jZZ1zrqIQKhKvG97NpKAYoHa4tOdoXHgBJUxw/uAOKkQ4jC5RS5UKqCZaQcArRD
+2bCEEsutiuyf06MMcWm+RaBY1EwuX+/cT0D6CsWHYFAeQHgLuR4HVk5+PVKoOL/7
+KUz0jUU5gzFVcmfaocyX5A6R90yggBObefcOIEj3v+5fjHkppfTvi/R03fVZ4Nyw
+WyHbN7kOHHy8skJpcvNaqSY0dfkb8KOOoTptJH9rCBYtFlC5j/18y8Om9Um4h3/4
+6hYO0xU8izJDzDzJnO/5KS5mGyskweIp3mrE1C/mw68LvrksxQI03CPtbM+FqOKe
+0VcsAQykiOTnG3d4jLeF1iVrc9CgV+pwc5VfgQUwsGhjAFOCKTwWDrr3Je0yVsfz
+gwY2zuM5uE/+usOSBt7SqbFTLOCba4fJrVVwi0wZig88owVTdl/ACxl2qyLUYC2u
+5PNJSY6kx8Cgo4gDJk/3oeuys8JqgaufvKybl5GsdDaF3A7usZAjDR1EAWHZ7JGi
+agtqbvISLD0zq4e4nmEhLnIRb7u5SNBPqe8qVuuQjIsvmP0ZuTlnh84ypFOQGz7c
+fzHtr6UEQoGj8HImbp8diL4tflmFAVNaRjQzu18+2vFB2w1EZIe2/uNLs9ne2EIy
+oK2Qb+mMCwJsNS0xOG0/TzPZ+y0Tp1/LupLHovMosPIGXlbvqZVh2xftDvbIigIM
+WZQZ2tFxYD6Xc4zA00v7H0yGF1pRY+3GpobJkw0Y6ORtgdtdnr2ipioIeQCy0hUp
+POmTeSr0L3H7KfNY7yQgZg0ra7FIEjM8tDoNqrhznetYUU1ZWM8Lyb3zMxxinSFs
+GFGx2TiqPyixJNxN+lPT5D6GRhC9mXgh+BfVod5oINJJwXxJpT5xnsZgW8ujVxiu
+1Vt5esXCZaXTGlyjVTH5dmCvJP9+B8n7dOimmCxCbMQKpNaZixJhoXWQtTgKqL1Q
+f9WoEs6TDGgfTllqjbE4w3O7ZA7fAWe9jbAGwiPV5rF/NVvjaj2+ibtXbSNPW59d
+dy1/2WzknVYnEHF0qZdBZ02Wh4ByXUC3FNvDu8hRTm5aq73DCqXLXUwNU8BvS1xB
+bbRq5aYI2Rd3naNAns9dHqSvkg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_wrap, kwa['algorithm'])
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_cbc, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], asn1Spec=rfc3657.Camellia_IV())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = rfc3657.Camellia_IV(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
+
+ kekri = asn1Object['content']['recipientInfos'][0]['kekri']
+ kwa = kekri['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_wrap, kwa['algorithm'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_cbc, cea['algorithm'])
+
+ iv = rfc3657.Camellia_IV(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, cea['parameters'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MGYwDwYLKoMIjJpLPQEBAQIFADAPBgsqgwiMmks9AQEBAwUAMA8GCyqDCIyaSz0B
+AQEEBQAwDwYLKoMIjJpLPQEBAwIFADAPBgsqgwiMmks9AQEDAwUAMA8GCyqDCIya
+Sz0BAQMEBQA=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ alg_oid_list = [
+ rfc3657.id_camellia128_cbc,
+ rfc3657.id_camellia192_cbc,
+ rfc3657.id_camellia256_cbc,
+ rfc3657.id_camellia128_wrap,
+ rfc3657.id_camellia192_wrap,
+ rfc3657.id_camellia256_wrap,
+ ]
+
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ param = der_encoder(rfc3657.CamelliaSMimeCapability(""))
+ count = 0
+ for cap in asn1Object:
+ self.assertEqual(cap['parameters'], param)
+ self.assertTrue(cap['capabilityID'] in alg_oid_list)
+ count += 1
+
+ self.assertEqual(count, 6)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ param = rfc3657.CamelliaSMimeCapability("")
+ count = 0
+ for cap in asn1Object:
+ self.assertTrue(cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys())
+ self.assertEqual(cap['parameters'], param)
+ count += 1
+
+ self.assertEqual(count, 6)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3709.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3709.py
new file mode 100644
index 0000000000..dcab4b6e8c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3709.py
@@ -0,0 +1,194 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3709
+
+
+class CertificateExtnWithUrlTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC9zCCAn2gAwIBAgIJAKWzVCgbsG46MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNTE0MTAwMjAwWhcNMjAwNTEzMTAwMjAwWjBlMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZp
+Z2lsIFNlY3VyaXR5IExMQzEaMBgGA1UEAxMRbWFpbC52aWdpbHNlYy5jb20wdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAATwUXZUseiOaqWdrClDCMbp9YFAM87LTmFirygp
+zKDU9cfqSCg7zBDIphXCwMcS9zVWDoStCbcvN0jw5CljHcffzpHYX91P88SZRJ1w
+4hawHjOsWxvM3AkYgZ5nfdlL7EajggEdMIIBGTALBgNVHQ8EBAMCB4AwQgYJYIZI
+AYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9y
+IGFueSBwdXJwb3NlLjAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwHwYD
+VR0jBBgwFoAU8jXbNATapVXyvWkDmbBi7OIVCMEwgYUGCCsGAQUFBwEMBHkwd6J1
+oHMwcTBvMG0WCWltYWdlL3BuZzAzMDEwDQYJYIZIAWUDBAIBBQAEIJtBNrMSSNo+
+6Rwqwctmcy0qf68ilRuKEmlf3GLwGiIkMCsWKWh0dHA6Ly93d3cudmlnaWxzZWMu
+Y29tL3ZpZ2lsc2VjX2xvZ28ucG5nMAoGCCqGSM49BAMDA2gAMGUCMGhfLH4kZaCD
+H43A8m8mHCUpYt9unT0qYu4TCMaRuOTYEuqj3qtuwyLcfAGuXKp/oAIxAIrPY+3y
+Pj22pmfmQi5w21UljqoTj/+lQLkU3wfy5BdVKBwI0GfEA+YL3ctSzPNqAA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc3709.id_pe_logotype:
+ s = extn['extnValue']
+ logotype, rest = der_decoder(s, rfc3709.LogotypeExtn())
+
+ self.assertFalse(rest)
+ self.assertTrue(logotype.prettyPrint())
+ self.assertEqual(s, der_encoder(logotype))
+
+ ids = logotype['subjectLogo']['direct']['image'][0]['imageDetails']
+
+ self.assertEqual( "image/png", ids['mediaType'])
+
+ expected = "http://www.vigilsec.com/vigilsec_logo.png"
+ self.assertEqual(expected, ids['logotypeURI'][0])
+
+ self.assertIn(rfc3709.id_pe_logotype, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+
+class CertificateExtnWithDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIIJJDCCCAygAwIBAgIRAPIGo/5ScWbpAAAAAFwQBqkwDQYJKoZIhvcNAQELBQAw
+gbkxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL
+Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg
+MjAxOCBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxLTAr
+BgNVBAMTJEVudHJ1c3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gVk1DMTAeFw0x
+OTA4MzAxNDMyMzlaFw0yMDAyMjUxNTAyMzZaMIIBjTEOMAwGA1UEERMFMTAwMTcx
+CzAJBgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazERMA8GA1UEBxMITmV3IFlv
+cmsxGDAWBgNVBAkTDzI3MCBQYXJrIEF2ZW51ZTETMBEGCysGAQQBgjc8AgEDEwJV
+UzEZMBcGCysGAQQBgjc8AgECEwhEZWxhd2FyZTEfMB0GA1UEChMWSlBNb3JnYW4g
+Q2hhc2UgYW5kIENvLjEdMBsGA1UEDxMUUHJpdmF0ZSBPcmdhbml6YXRpb24xNzA1
+BgNVBAsTLkpQTUMgRmlyc3QgVmVyaWZpZWQgTWFyayBDZXJ0aWZpY2F0ZSBXb3Js
+ZHdpZGUxDzANBgNVBAUTBjY5MTAxMTEXMBUGCisGAQQBg55fAQQTBzIwMTUzODkx
+EjAQBgorBgEEAYOeXwEDEwJVUzEmMCQGCisGAQQBg55fAQITFmh0dHBzOi8vd3d3
+LnVzcHRvLmdvdi8xHzAdBgNVBAMTFkpQTW9yZ2FuIENoYXNlIGFuZCBDby4wggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCNLY+etlX06q1MxA1VT/P20h1i
+eFGTzX4fqSQNG+ypmjNfLa8YXraO1v1hahenkRUWrVPW0Hq3zKNJcCDmosox6+tB
+59u0b1xgN8y8D05AEC7qoVVdbaWKENMxCN4CDfST6d3YOqApjqEFAGZ71s39tRRG
+kmWGJb4jKXcUX8FWV8w/vjKrpipZ8JsX2tuOp2uxFLkmi+V7gvN8tpbHUipP5K7L
+190VOBytSWPudXefnYG3UWRfwah7Fq1bKYT/cCwStUm8XlfA8nUumeVsAiyC6phs
+adn26MYiSddsBU08TGthmunLAO0+shaBy6jHYZxMa37S67vVlDpxbeF+TPVXAgMB
+AAGjggROMIIESjATBgorBgEEAdZ5AgQDAQH/BAIFADCCArAGCCsGAQUFBwEMBIIC
+ojCCAp6iggKaoIICljCCApIwggKOMIICihYNaW1hZ2Uvc3ZnK3htbDAzMDEwDQYJ
+YIZIAWUDBAIBBQAEIBnwW6ChGgWWIRn3qn/xGAOlhDflA3z5jhZcZTNDlxF5MIIC
+QhaCAj5kYXRhOmltYWdlL3N2Zyt4bWw7YmFzZTY0LEg0c0lBQUFBQUFBQUFJV1Iz
+V3JqTUJCR3I1dW5tR3F2Rml4NUpQODBObkZLRTVhbTRFSmhJYmVMazZpT1dhOXRa
+TWQyOXVrN2NsTG9SV25CMHNENGNPYVR0TGdmLzVYUWE5TVdkWlV3S1pDQnJ2YjFv
+YWp5aEoyNlZ6NW45OHZaNHBaemVOU1ZObGxYbXhnZUR2Vk93MU5abnRwdWFvRlNB
+b1YwNFBmMkVYNk5UVzA2ZUNsUE9YK3FRRXpON1dWR0RLRkFoTldwS0ErQVB3RTRK
+MzNiNXg5REtBYTdyTlV2cG40dFNwMndycWpPRElwRHd0THNyTTBmeVlCaVYyM0Nq
+bDNYeEs0N0RJTVlQRkdiM0ZXSTZKTHZpc1JqV1ZSL1B3TmxGRVh1OUpmTmJtQk1H
+RFlqZy9PMTlvVWVWclh0QWtJWTBEY0o0N2JKOXBTb01iclZwdGVNd3VmTDJjMml5
+Ym9qVU5veVlUOFFnL1VxWWtCNW41VW5QQWZYU2pub0tPbEl1eW5oOVRJVTh1Z3JF
+YVMrVC9lRzZRWDh6OXl2YkdIZ0VLZjJ5S1h3dU9Sa2VsOGJQeFJoUHhtSnN0TDBT
+bi9qOUtXWU8yR3dsM2EremNhbmhOYTV0YzZORkdHcVVFUUVwVmY0R3lVNnhOMnRx
+WGgwWXQrM1BpcEhlK2l0cElRMGg0VHBoWnRrQ3plM0d6M2NjdllHbkp0cjZKVUNB
+QUE9MCIGA1UdEQQbMBmCF2V4Y2hhZGRldi5sYWJtb3JnYW4uY29tMBMGA1UdJQQM
+MAoGCCsGAQUFBwMfMA4GA1UdDwEB/wQEAwIHgDBmBggrBgEFBQcBAQRaMFgwIwYI
+KwYBBQUHMAGGF2h0dHA6Ly9vY3NwLmVudHJ1c3QubmV0MDEGCCsGAQUFBzAChiVo
+dHRwOi8vYWlhLmVudHJ1c3QubmV0L3ZtYzEtY2hhaW4uY2VyMDIGA1UdHwQrMCkw
+J6AloCOGIWh0dHA6Ly9jcmwuZW50cnVzdC5uZXQvdm1jMWNhLmNybDBPBgNVHSAE
+SDBGMDYGCmCGSAGG+mwKAQswKDAmBggrBgEFBQcCARYaaHR0cDovL3d3dy5lbnRy
+dXN0Lm5ldC9ycGEwDAYKKwYBBAGDnl8BATAfBgNVHSMEGDAWgBSLtjl20DSQpj9i
+4WTqPrz0fEahczAdBgNVHQ4EFgQUxAJ+yoDhzpPUzAPWKBYxg108dU0wCQYDVR0T
+BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAnqdB/vcwxFcxAlyCK0W5HOthXUdXRg9a
+GwPDupqmLq2rKfyysZXonJJfr8jqO0f3l6TWTTJlXHljAwwXMtg3T3ngLyEzip5p
+g0zH7s5eXjmWRhOeuHt21o611bXDbUNFTF0IpbYBTgOwAz/+k3XLVehf8dW7Y0Lr
+VkzxJ6U82NxmqjaAnkm+H127x5/jPAr4LLD4gZfqFaHzw/ZLoS+fXFGs+dpuYE4s
+n+xe0msYMu8qWABiMGA+MCKl45Dp5di+c2fyXtKyQ3rKI8XXZ0nN4bXK7DZd+3E3
+kbpmR6cDliloU808Bi/erMkrfUHRoZ2d586lkmwkLcoDkJ/yPD+Jhw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc3709.id_pe_logotype:
+ s = extn['extnValue']
+ logotype, rest = der_decoder(s, rfc3709.LogotypeExtn())
+ self.assertFalse(rest)
+
+ self.assertTrue(logotype.prettyPrint())
+ self.assertEqual(s, der_encoder(logotype))
+
+ ids = logotype['subjectLogo']['direct']['image'][0]['imageDetails']
+
+ self.assertEqual("image/svg+xml", ids['mediaType'])
+ self.assertEqual(
+ "data:image/svg+xml;base64", ids['logotypeURI'][0][0:25])
+
+ self.assertIn(rfc3709.id_pe_logotype, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3739.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3739.py
new file mode 100644
index 0000000000..3c4ce3a4df
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3739.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import error
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3739
+
+
+class QCCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIFLTCCBBWgAwIBAgIMVRaIE9MInBkG6aUaMA0GCSqGSIb3DQEBCwUAMHMxCzAJ
+BgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRowGAYDVQQLExFG
+b3IgRGVtbyBVc2UgT25seTEtMCsGA1UEAxMkR2xvYmFsU2lnbiBEZW1vIElzc3Vp
+bmcgQ0EgLSBTdGFnaW5nMB4XDTE4MDYxNTA1MTgxNFoXDTE5MDYxNjA1MTgxNFow
+WjELMAkGA1UEBhMCQkUxGTAXBgNVBAMTEFRlc3QgQ2VydGlmaWNhdGUxEjAQBgNV
+BAUTCTEyMzQ1Njc4OTENMAsGA1UEKhMEVGVzdDENMAsGA1UEBBMEVGVzdDCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL/tsE2EIVQhpkZU5XmFR6FAq9ou
+k8FWbyku5M7S2JT3c6OFMQiVgu6nfqdsl4rzojhUXQtMOnO7sUqcIedmwqRIR/jd
+X+ELqGGRHodZt94Tjf6Qgn2Wv/EgG0EIwsOAisGKr4qTNs6ZmVMqQ3I4+l9Ik5eM
+whr9JfrhSxrXDzoh8Prc9lNjQbk+YKXw0zLmVxW7GAu9zTr98GF+HapIhNQbvqOc
+fHoY5svla5MqoRXagfrw/w2fSaO/LT+AFsZYODVpvCg/X3xsknoG7TDIeZ8Hmlgq
+Mvg9l9VA2JbSv1C38SeOm0Hfv0l0fspZPSrtmbYlvBtQoO1X/GhQXvE7UvMCAwEA
+AaOCAdgwggHUMA4GA1UdDwEB/wQEAwIGQDCBkQYIKwYBBQUHAQEEgYQwgYEwQQYI
+KwYBBQUHMAKGNWh0dHA6Ly9zZWN1cmUuc3RhZ2luZy5nbG9iYWxzaWduLmNvbS9n
+c2RlbW9zaGEyZzMuY3J0MDwGCCsGAQUFBzABhjBodHRwOi8vb2NzcDIuc3RhZ2lu
+Zy5nbG9iYWxzaWduLmNvbS9nc2RlbW9zaGEyZzMwWQYDVR0gBFIwUDBDBgsrBgEE
+AaAyASgjAjA0MDIGCCsGAQUFBwIBFiZodHRwczovL3d3dy5nbG9iYWxzaWduLmNv
+bS9yZXBvc2l0b3J5LzAJBgcEAIvsQAECMAkGA1UdEwQCMAAwQwYDVR0fBDwwOjA4
+oDagNIYyaHR0cDovL2NybC5zdGFnaW5nLmdsb2JhbHNpZ24uY29tL2dzZGVtb3No
+YTJnMy5jcmwwLQYIKwYBBQUHAQMEITAfMAgGBgQAjkYBATATBgYEAI5GAQYwCQYH
+BACORgEGATAUBgNVHSUEDTALBgkqhkiG9y8BAQUwHQYDVR0OBBYEFNRFutzxY2Jg
+qilbYWe86em0QQC+MB8GA1UdIwQYMBaAFBcYifCc7R2iN5qLgGGRDT/RWZN6MA0G
+CSqGSIb3DQEBCwUAA4IBAQCMJeiaEAu45PetKSoPEnJ5t4MYr4dUl/HdnV13WEUW
+/34yHDGuubTFqJ6sM7P7dO25kdNOr75mR8yc0+gsGJv5K5C7LXfk36ofDlVQm0RJ
+3LTRhCvnJIzvuc5R52QW3MvB0EEPd1sfkpGgyTdK8zYZkwCXrWgMuPhBG/kgTiN0
+65qitL/WfkcX9SXmsYuV1a3Tsxz+6/rTtxdZfXSJgaVCOWHGyXCvpAQM/4eH5hSj
+UfTNwEMrE4sw4k9F90Sp8Wx24sMRDTIpnEXh3ceZSzBN2OYCIO84GaiZDpSvvkYN
+Iwtui+Wql/HveMqbAtXkiv9GDXYZms3HBoIaCVuDaUf6
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc3739.id_pe_qcStatements:
+ s = extn['extnValue']
+ qc_stmts, rest = der_decoder(s, rfc3739.QCStatements())
+ self.assertFalse(rest)
+ self.assertTrue(qc_stmts.prettyPrint())
+ self.assertEqual(s, der_encoder(qc_stmts))
+
+ for qcs in qc_stmts:
+ count += 1
+
+ self.assertEqual(2, count)
+
+ def testExtensionsMap(self):
+
+ class SequenceOfOID(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+ openTypesMap = {
+ univ.ObjectIdentifier('0.4.0.1862.1.6'): SequenceOfOID()
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ found_qc_stmt_oid = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc3739.id_pe_qcStatements:
+ qc_stmts, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ openTypes=openTypesMap,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(qc_stmts.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(qc_stmts))
+
+ for qcs in qc_stmts:
+ count += 1
+ if qcs['statementId'] in openTypesMap.keys():
+ for oid in qcs['statementInfo']:
+ if oid == univ.ObjectIdentifier('0.4.0.1862.1.6.1'):
+ found_qc_stmt_oid = True
+
+ self.assertEqual(2, count)
+ self.assertTrue(found_qc_stmt_oid)
+
+class WithComponentsTestCase(unittest.TestCase):
+
+ def testDerCodec(self):
+ si = rfc3739.SemanticsInformation()
+ self.assertRaises(error.PyAsn1Error, der_encoder, si)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3770.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3770.py
new file mode 100644
index 0000000000..667ab249fe
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3770.py
@@ -0,0 +1,95 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3770
+
+
+class CertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICqzCCAjCgAwIBAgIJAKWzVCgbsG4/MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNzE5MTk0MjQ3WhcNMjAwNzE4MTk0MjQ3WjBjMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZp
+Z2lsIFNlY3VyaXR5IExMQzEYMBYGA1UEAxMPZWFwLmV4YW1wbGUuY29tMHYwEAYH
+KoZIzj0CAQYFK4EEACIDYgAEMMbnIp2BUbuyMgH9HhNHrh7VBy7ql2lBjGRSsefR
+Wa7+vCWs4uviW6On4eem5YoP9/UdO7DaIL+/J9/3DJHERI17oFxn+YWiE4JwXofy
+QwfSu3cncVNMqpiDjEkUGGvBo4HTMIHQMAsGA1UdDwQEAwIHgDBCBglghkgBhvhC
+AQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55
+IHB1cnBvc2UuMB0GA1UdDgQWBBSDjPGr7M742rsE4oQGwBvGvllZ+zAfBgNVHSME
+GDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAeBggrBgEFBQcBDQQSMBAEB0V4YW1w
+bGUEBUJvZ3VzMB0GA1UdJQQWMBQGCCsGAQUFBwMOBggrBgEFBQcDDTAKBggqhkjO
+PQQDAwNpADBmAjEAmCPZnnlUQOKlcOIIOgFrRCkOqO0ESs+dobYwAc2rFCBtQyP7
+C3N00xkX8WZZpiAZAjEAi1Z5+nGbJg5eJTc8fwudutN/HNwJEIS6mHds9kfcy26x
+DAlVlhox680Jxy5J8Pkx
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sig_alg = asn1Object['tbsCertificate']['signature']
+
+ self.assertEqual(rfc5480.ecdsa_with_SHA384, sig_alg['algorithm'])
+ self.assertFalse(sig_alg['parameters'].hasValue())
+
+ spki_alg = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, spki_alg['algorithm'])
+ self.assertEqual(
+ rfc5480.secp384r1, spki_alg['parameters']['namedCurve'])
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ if extn['extnID'] == rfc3770.id_pe_wlanSSID:
+ self.assertIn(str2octs('Example'), extnValue)
+
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(rfc3770.id_kp_eapOverLAN, extnValue)
+ self.assertIn(rfc3770.id_kp_eapOverPPP, extnValue)
+
+ self.assertIn(rfc3770.id_pe_wlanSSID, extn_list)
+ self.assertIn(rfc5280.id_ce_extKeyUsage, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3779.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3779.py
new file mode 100644
index 0000000000..652826edde
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3779.py
@@ -0,0 +1,98 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3779
+
+
+class CertificateExtnTestCase(unittest.TestCase):
+ pem_text = """\
+MIIECjCCAvKgAwIBAgICAMkwDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAxMLcmlw
+ZS1uY2MtdGEwIBcNMTcxMTI4MTQzOTU1WhgPMjExNzExMjgxNDM5NTVaMBYxFDAS
+BgNVBAMTC3JpcGUtbmNjLXRhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA0URYSGqUz2myBsOzeW1jQ6NsxNvlLMyhWknvnl8NiBCs/T/S2XuNKQNZ+wBZ
+xIgPPV2pFBFeQAvoH/WK83HwA26V2siwm/MY2nKZ+Olw+wlpzlZ1p3Ipj2eNcKrm
+it8BwBC8xImzuCGaV0jkRB0GZ0hoH6Ml03umLprRsn6v0xOP0+l6Qc1ZHMFVFb38
+5IQ7FQQTcVIxrdeMsoyJq9eMkE6DoclHhF/NlSllXubASQ9KUWqJ0+Ot3QCXr4LX
+ECMfkpkVR2TZT+v5v658bHVs6ZxRD1b6Uk1uQKAyHUbn/tXvP8lrjAibGzVsXDT2
+L0x4Edx+QdixPgOji3gBMyL2VwIDAQABo4IBXjCCAVowHQYDVR0OBBYEFOhVKx/W
+0aT35ATG2OVoDR68Fj/DMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG
+MIGxBggrBgEFBQcBCwSBpDCBoTA8BggrBgEFBQcwCoYwcnN5bmM6Ly9ycGtpLnJp
+cGUubmV0L3JlcG9zaXRvcnkvcmlwZS1uY2MtdGEubWZ0MDIGCCsGAQUFBzANhiZo
+dHRwczovL3JyZHAucmlwZS5uZXQvbm90aWZpY2F0aW9uLnhtbDAtBggrBgEFBQcw
+BYYhcnN5bmM6Ly9ycGtpLnJpcGUubmV0L3JlcG9zaXRvcnkvMBgGA1UdIAEB/wQO
+MAwwCgYIKwYBBQUHDgIwJwYIKwYBBQUHAQcBAf8EGDAWMAkEAgABMAMDAQAwCQQC
+AAIwAwMBADAhBggrBgEFBQcBCAEB/wQSMBCgDjAMMAoCAQACBQD/////MA0GCSqG
+SIb3DQEBCwUAA4IBAQAVgJjrZ3wFppC8Yk8D2xgzwSeWVT2vtYq96CQQsjaKb8nb
+eVz3DwcS3a7RIsevrNVGo43k3AGymg1ki+AWJjvHvJ+tSzCbn5+X6Z7AfYTf2g37
+xINVDHru0PTQUargSMBAz/MBNpFG8KThtT7WbJrK4+f/lvx0m8QOlYm2a17iXS3A
+GQJ6RHcq9ADscqGdumxmMMDjwED26bGaYdmru1hNIpwF//jVM/eRjBFoPHKFlx0k
+Ld/yoCQNmx1kW+xANx4uyWxi/DYgSV7Oynq+C60OucW+d8tIhkblh8+YfrmukJds
+V+vo2L72yerdbsP9xjqvhZrLKfsLZjYK4SdYYthi
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc3779.id_pe_ipAddrBlocks:
+ s = extn['extnValue']
+ addr_blocks, rest = der_decoder.decode(s, rfc3779.IPAddrBlocks())
+ self.assertFalse(rest)
+ self.assertTrue(addr_blocks.prettyPrint())
+ self.assertEqual(s, der_encoder.encode(addr_blocks))
+
+ if extn['extnID'] == rfc3779.id_pe_autonomousSysIds:
+ s = extn['extnValue']
+ as_ids, rest = der_decoder.decode(s, rfc3779.ASIdentifiers())
+ self.assertFalse(rest)
+ self.assertTrue(as_ids.prettyPrint())
+ self.assertEqual(s, der_encoder.encode(as_ids))
+
+ self.assertIn(rfc3779.id_pe_ipAddrBlocks, extn_list)
+ self.assertIn(rfc3779.id_pe_autonomousSysIds, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if (extn['extnID'] == rfc3779.id_pe_ipAddrBlocks or
+ extn['extnID'] == rfc3779.id_pe_autonomousSysIds):
+ extnValue, rest = der_decoder.decode(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+ self.assertEqual(extn['extnValue'], der_encoder.encode(extnValue))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3820.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3820.py
new file mode 100644
index 0000000000..0895b286e4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3820.py
@@ -0,0 +1,78 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3820
+
+
+class ProxyCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIID9DCCAtygAwIBAgIEDODd4TANBgkqhkiG9w0BAQUFADCBjTESMBAGCgmSJomT
+8ixkARkWAm5sMRcwFQYKCZImiZPyLGQBGRYHZS1pbmZyYTEaMBgGA1UEChMRVHJh
+aW5pbmcgU2VydmljZXMxDjAMBgNVBAsTBXVzZXJzMRowGAYDVQQLExFTZWN1cml0
+eSBUcmFpbmluZzEWMBQGA1UEAxMNUGlldGplIFB1ayA0MjAeFw0xOTExMjcwODMz
+NDZaFw0xOTExMjcyMDM4NDZaMIGhMRIwEAYKCZImiZPyLGQBGRYCbmwxFzAVBgoJ
+kiaJk/IsZAEZFgdlLWluZnJhMRowGAYDVQQKExFUcmFpbmluZyBTZXJ2aWNlczEO
+MAwGA1UECxMFdXNlcnMxGjAYBgNVBAsTEVNlY3VyaXR5IFRyYWluaW5nMRYwFAYD
+VQQDEw1QaWV0amUgUHVrIDQyMRIwEAYDVQQDEwkyMTYwNjM0NTcwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCu2b1j1XQXAgNazmTtdp6jjzvNQT8221/c
+dSIv2ftxr3UochHbazTfoR7wDT5PGlp2v99M0kZQvAEJ96CJpBDte4pwio7xHK3w
+s5h7lH3W2ydrxAMSnZp0NHxyo3DNenTV5HavGjraOZDLt/k1aPJ8C68CBbrGDQxH
+wzTs21Z+7lAy4C1ZNyOhkNF4qD5qy9Q2SHOPD+uc2QZE8IadZyxbeW/lEWHjESI1
+5y55oLZhe3leb2NswvppgdwM8KW4Pbtya6mDKGH4e1qQfNfxsqlxbIBr4UaM8iSM
+5BhJhe7VCny2iesGCJWz3NNoTJKBehN5o2xs7+fHv+sOW2Yuc3MnAgMBAAGjRjBE
+MBMGA1UdJQQMMAoGCCsGAQUFBwMCMA4GA1UdDwEB/wQEAwIEsDAdBggrBgEFBQcB
+DgEB/wQOMAwwCgYIKwYBBQUHFQEwDQYJKoZIhvcNAQEFBQADggEBAJbeKv3yQ9Yc
+GHT4r64gVkKd4do7+cRS9dfWg8pcLRn3aBzTCBIznkg+OpzjteOJCuw6AxDsDPmf
+n0Ms7LaAqegW8vcYgcZTxeABE5kgg5HTMUSMo39kFNTYHlNgsVfnOhpePnWX+e0Y
+gPpQU7w1npAhr23lXn9DNWgWMMT6T3z+NngcJ9NQdEee9D4rzY5Oo9W/2OAPuMne
+w5dGF7wVCUBRi6vrMnWYN8E3sHiFDJJrOsPWZzjRCa/W3N9A/OdgjitKQc3X4dlS
+tP2J7Yxv/B/6+VxVEa9WtVXsm/wJnhwvICBscB1/4WkI0PfJ7Nh4ZqQplPdlDEKe
+FOuri/fKBe0=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_ppl = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc3820.id_pe_proxyCertInfo:
+ self.assertTrue(rfc3820.id_pe_proxyCertInfo in rfc5280.certificateExtensionsMap.keys())
+ pci, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[rfc3820.id_pe_proxyCertInfo])
+ self.assertFalse(rest)
+ self.assertTrue(pci.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(pci))
+
+ self.assertEqual(rfc3820.id_ppl_inheritAll, pci['proxyPolicy']['policyLanguage'])
+ found_ppl = True
+
+ self.assertTrue(found_ppl)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc3852.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc3852.py
new file mode 100644
index 0000000000..56b25ccc56
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc3852.py
@@ -0,0 +1,128 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3852
+from pyasn1_modules import rfc6402
+
+
+class ContentInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEJQYJKoZIhvcNAQcCoIIEFjCCBBICAQMxCzAJBgUrDgMCGgUAMIIDAgYIKwYBBQUHDAKgggL0
+BIIC8DCCAuwweDB2AgECBgorBgEEAYI3CgoBMWUwYwIBADADAgEBMVkwVwYJKwYBBAGCNxUUMUow
+SAIBBQwZcGl0dWNoYTEuZW1lYS5ocHFjb3JwLm5ldAwMRU1FQVxwaXR1Y2hhDBpDTUNSZXFHZW5l
+cmF0b3IudnNob3N0LmV4ZTCCAmqgggJmAgEBMIICXzCCAcgCAQAwADCBnzANBgkqhkiG9w0BAQEF
+AAOBjQAwgYkCgYEA0jm7SSSm2wyEAzuNKtFZFJKo91SrJq9wQwEhEKHDavZwMQOm1rZ2PF8NWCEb
+PqrhToQ7rtiGLSZa4dF4bzgmBqQ9aoSfEX4jISt31Vy+skHidXjHHpbsjT24NPhrZgANivL7CxD6
+Ft+s7qS1gL4HRm2twQkqSwOLrE/q2QeXl2UCAwEAAaCCAR0wGgYKKwYBBAGCNw0CAzEMFgo2LjIu
+OTIwMC4yMD4GCSqGSIb3DQEJDjExMC8wHQYDVR0OBBYEFMW2skn88gxhONWZQA4sWGBDb68yMA4G
+A1UdDwEB/wQEAwIHgDBXBgkrBgEEAYI3FRQxSjBIAgEFDBlwaXR1Y2hhMS5lbWVhLmhwcWNvcnAu
+bmV0DAxFTUVBXHBpdHVjaGEMGkNNQ1JlcUdlbmVyYXRvci52c2hvc3QuZXhlMGYGCisGAQQBgjcN
+AgIxWDBWAgECHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIDAQAwDQYJKoZIhvcNAQEFBQADgYEAJZlu
+mxjtCxSOQi27jsVdd3y8NSIlzNv0b3LqmzvAly6L+CstXcnuG2MPQqPH9R7tbJonGUniBQO9sQ7C
+KhYWj2gfhiEkSID82lV5chINVUFKoUlSiEhWr0tPGgvOaqdsKQcrHfzrsBbFkhDqrFSVy7Yivbnh
+qYszKrOjJKiiCPMwADAAMYH5MIH2AgEDgBTFtrJJ/PIMYTjVmUAOLFhgQ2+vMjAJBgUrDgMCGgUA
+oD4wFwYJKoZIhvcNAQkDMQoGCCsGAQUFBwwCMCMGCSqGSIb3DQEJBDEWBBTFTkK/OifaFjwqHiJu
+xM7qXcg/VzANBgkqhkiG9w0BAQEFAASBgKfC6jOi1Wgy4xxDCQVK9+e5tktL8wE/j2cb9JSqq+aU
+5UxEgXEw7q7BoYZCAzcxMRriGzakXr8aXHcgkRJ7XcFvLPUjpmGg9SOZ2sGW4zQdWAwImN/i8loc
+xicQmJP+VoMHo/ZpjFY9fYCjNZUArgKsEwK/s+p9yrVVeB1Nf8Mn
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3852.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = {
+ rfc3852.id_ct_contentInfo: rfc3852.ContentInfo(),
+ rfc3852.id_signedData: rfc3852.SignedData(),
+ rfc6402.id_cct_PKIData: rfc6402.PKIData()
+ }
+
+ getNextLayer = {
+ rfc3852.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc3852.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc3852.id_ct_contentInfo: lambda x: x['content'],
+ rfc3852.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ alg_oids = (
+ univ.ObjectIdentifier('1.3.14.3.2.26'),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'),
+ )
+
+ encoded_null = der_encoder(univ.Null(""))
+
+ next_layer = rfc3852.id_ct_contentInfo
+
+ count = 0
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if next_layer == rfc3852.id_signedData:
+ for d in asn1Object['digestAlgorithms']:
+ self.assertIn(d['algorithm'], alg_oids)
+ self.assertEqual(encoded_null, d['parameters'])
+ count += 1
+
+ for si in asn1Object['signerInfos']:
+ self.assertIn(si['digestAlgorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, si['digestAlgorithm']['parameters'])
+ count += 1
+
+ self.assertIn(si['signatureAlgorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, si['signatureAlgorithm']['parameters'])
+ count += 1
+
+ if next_layer == rfc6402.id_cct_PKIData:
+ for req in asn1Object['reqSequence']:
+ cr = req['tcr']['certificationRequest']
+ self.assertIn(cr['signatureAlgorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, cr['signatureAlgorithm']['parameters'])
+ count += 1
+
+ cri_spki = cr['certificationRequestInfo']['subjectPublicKeyInfo']
+ self.assertIn(cri_spki['algorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, cri_spki['algorithm']['parameters'])
+ count += 1
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(5, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4010.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4010.py
new file mode 100644
index 0000000000..7474b9d849
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4010.py
@@ -0,0 +1,136 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4010
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFewYJKoZIhvcNAQcDoIIFbDCCBWgCAQIxUqJQAgEEMCMEEKBBI2KxDUPS5TCo
+RCEDJo4YDzIwMTkwOTEyMTIwMDAwWjAMBgoqgxqMmkQHAQEBBBipFE2DxCLAx2Og
+E53Jt21V8kAoscU7K3wwggUNBgkqhkiG9w0BBwEwHAYIKoMajJpEAQQEEEJPR1VT
+SVZfQk9HVVNJViGAggTgc8exehjJD/gtEOIrg6tK5Emaa4PJ7l8f+EtyDD/ffQay
+XVAGz2MXUIQMEzmSLrnsr9NEyXvxGpvcsi7mV8tDxZU0YuyhA/C/HMh7EaBKG1hj
+C7xNw+IRIUxrbRJakMQbzMWWYJupC5zRu4/Ge9i+JVOGgES2E0L5LZSZ53wmnHA0
+ols1PHl3F3Z2QM3CkewqA3NP1waXQ0XXb0Oyl6Gq12B7ksm7euPWA3KctEjfYBD6
+nBT6wQd57rAMeFTk5aceWd2Sb/0xMpjfCg6GzX8pAWVEU8LqTvVmlSWdx3f3fAtU
+giZ+gx7jNY8A6duln8zvMQn3mtPDCa50GzSrAx8JreHRWSDr3Dp8EfJzUgfy7dWl
+I9xs5bh1TMkEMk+AHWQ5sBXTZkDgVAS5m1mIbXe7dzuxKsfGxjWu1eyy9J77mtOG
+o9aAOqYfxv/I8YQcgWHTeQcIO39Rmt2QsI7trRaEJ1jgj2E1To5gRCbIQWzQuyoS
+6affgu/9dwPXCAt0+0XrnO5vhaKX/RWm7ve8hYsiT0vI0hdBJ3rDRkdS9VL6NlnX
+OuohAqEq8b3s2koBigdri052hceAElTHD+4A4qRDiMLlFLlQqoJlpBwCtEPZsIQS
+y62K7J/Towxxab5FoFjUTC5f79xPQPoKxYdgUB5AeAu5HgdWTn49Uqg4v/spTPSN
+RTmDMVVyZ9qhzJfkDpH3TKCAE5t59w4gSPe/7l+MeSml9O+L9HTd9Vng3LBbIds3
+uQ4cfLyyQmly81qpJjR1+Rvwo46hOm0kf2sIFi0WULmP/XzLw6b1SbiHf/jqFg7T
+FTyLMkPMPMmc7/kpLmYbKyTB4ineasTUL+bDrwu+uSzFAjTcI+1sz4Wo4p7RVywB
+DKSI5Ocbd3iMt4XWJWtz0KBX6nBzlV+BBTCwaGMAU4IpPBYOuvcl7TJWx/ODBjbO
+4zm4T/66w5IG3tKpsVMs4Jtrh8mtVXCLTBmKDzyjBVN2X8ALGXarItRgLa7k80lJ
+jqTHwKCjiAMmT/eh67KzwmqBq5+8rJuXkax0NoXcDu6xkCMNHUQBYdnskaJqC2pu
+8hIsPTOrh7ieYSEuchFvu7lI0E+p7ypW65CMiy+Y/Rm5OWeHzjKkU5AbPtx/Me2v
+pQRCgaPwciZunx2Ivi1+WYUBU1pGNDO7Xz7a8UHbDURkh7b+40uz2d7YQjKgrZBv
+6YwLAmw1LTE4bT9PM9n7LROnX8u6ksei8yiw8gZeVu+plWHbF+0O9siKAgxZlBna
+0XFgPpdzjMDTS/sfTIYXWlFj7camhsmTDRjo5G2B212evaKmKgh5ALLSFSk86ZN5
+KvQvcfsp81jvJCBmDStrsUgSMzy0Og2quHOd61hRTVlYzwvJvfMzHGKdIWwYUbHZ
+OKo/KLEk3E36U9PkPoZGEL2ZeCH4F9Wh3mgg0knBfEmlPnGexmBby6NXGK7VW3l6
+xcJlpdMaXKNVMfl2YK8k/34Hyft06KaYLEJsxAqk1pmLEmGhdZC1OAqovVB/1agS
+zpMMaB9OWWqNsTjDc7tkDt8BZ72NsAbCI9XmsX81W+NqPb6Ju1dtI09bn113LX/Z
+bOSdVicQcXSpl0FnTZaHgHJdQLcU28O7yFFOblqrvcMKpctdTA1TwG9LXEFttGrl
+pgjZF3edo0Cez10epK+S
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(asn1Object['content'], rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_npki_app_cmsSeed_wrap, kwa['algorithm'])
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_seedCBC, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], asn1Spec=rfc4010.SeedCBCParameter())
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
+
+ kekri = asn1Object['content']['recipientInfos'][0]['kekri']
+ kwa = kekri['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_npki_app_cmsSeed_wrap, kwa['algorithm'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_seedCBC, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, cea['parameters'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "MB4wDAYIKoMajJpEAQQFADAOBgoqgxqMmkQHAQEBBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ alg_oid_list = [ ]
+ for cap in asn1Object:
+ self.assertTrue(cap['parameters'].hasValue())
+ self.assertEqual(cap['parameters'], der_encoder(rfc4010.SeedSMimeCapability("")))
+ alg_oid_list.append(cap['capabilityID'])
+
+ self.assertIn(rfc4010.id_seedCBC, alg_oid_list)
+ self.assertIn(rfc4010.id_npki_app_cmsSeed_wrap, alg_oid_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
+
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4043.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4043.py
new file mode 100644
index 0000000000..0ab72dd364
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4043.py
@@ -0,0 +1,118 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4043
+
+
+class PermIdCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIDDTCCApOgAwIBAgIJAKWzVCgbsG5HMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMTEwMDA0MDIyWhcNMjAxMTA5MDA0MDIyWjBNMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxDTALBgNVBAMTBEdhaWwwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQBoktg
+/68xL+uEQaWBoHyOjw8EMLeMEng3R2H7yiEzTGoaMJgPOKvSfzB2P0paHYPL+B5y
+Gc0CK5EHRujMl9ljH+Wydpk57rKBLo1ZzpWUS6anLGIkWs1sOakcgGGr7hGjggFL
+MIIBRzAdBgNVHQ4EFgQU1pCNZuMzfEaJ9GGhH7RKy6Mvz+cwbwYDVR0jBGgwZoAU
+8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQI
+DAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYG
+kU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMFMGA1UdEQRMMEqgNgYIKwYBBQUHCAOgKjAoDBs4MjYyMDgtNDE3MDI4
+LTU0ODE5NS0yMTUyMzMGCSsGAQQBgaxgMIEQZ2FpbEBleGFtcGxlLmNvbTAKBggq
+hkjOPQQDAwNoADBlAjBT+36Y/LPaGSu+61P7kR97M8jAjtH5DtUwrWR02ChshvYJ
+x0bpZq3PJaO0WlBgFicCMQCf+67wSvjxxtjI/OAg4t8NQIJW1LcehSXizlPDc772
+/FC5OiUAxO+iFaSVMeDFsCo=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ perm_id_oid = rfc4043.id_on_permanentIdentifier
+ assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
+ permanent_identifier_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ self.assertEqual(perm_id_oid, gn['otherName']['type-id'])
+
+ onValue, rest = der_decoder(
+ gn['otherName']['value'],
+ asn1Spec=rfc4043.PermanentIdentifier())
+
+ self.assertFalse(rest)
+ self.assertTrue(onValue.prettyPrint())
+ self.assertEqual(gn['otherName']['value'], der_encoder(onValue))
+ self.assertEqual(assigner_oid, onValue['assigner'])
+ permanent_identifier_found = True
+
+ self.assertTrue(permanent_identifier_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ perm_id_oid = rfc4043.id_on_permanentIdentifier
+ assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
+ permanent_identifier_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ on = gn['otherName']
+ self.assertEqual(perm_id_oid, on['type-id'])
+ self.assertEqual(assigner_oid, on['value']['assigner'])
+ permanent_identifier_found = True
+
+ self.assertTrue(permanent_identifier_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4055.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4055.py
new file mode 100644
index 0000000000..cf0b376daa
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4055.py
@@ -0,0 +1,181 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4055
+
+
+class PSSDefautTestCase(unittest.TestCase):
+ pss_default_pem_text = "MAsGCSqGSIb3DQEBCg=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pss_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSASSA_PSS, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pss_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertFalse(asn1Object['parameters'].hasValue())
+
+
+class PSSSHA512TestCase(unittest.TestCase):
+ pss_sha512_pem_text = "MDwGCSqGSIb3DQEBCjAvoA8wDQYJYIZIAWUDBAIDBQChHDAaBg" \
+ "kqhkiG9w0BAQgwDQYJYIZIAWUDBAIDBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pss_sha512_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSASSA_PSS, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pss_sha512_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertTrue(asn1Object['parameters'].hasValue())
+ self.assertTrue(20, asn1Object['parameters']['saltLength'])
+
+
+class OAEPDefautTestCase(unittest.TestCase):
+ oaep_default_pem_text = "MAsGCSqGSIb3DQEBBw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oaep_default_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertFalse(asn1Object['parameters'].hasValue())
+
+
+class OAEPSHA256TestCase(unittest.TestCase):
+ oaep_sha256_pem_text = "MDwGCSqGSIb3DQEBBzAvoA8wDQYJYIZIAWUDBAIBBQChHDAaB" \
+ "gkqhkiG9w0BAQgwDQYJYIZIAWUDBAIBBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_sha256_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oaep_sha256_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertTrue(asn1Object['parameters'].hasValue())
+
+ oaep_p = asn1Object['parameters']
+
+ self.assertEqual(univ.Null(""), oaep_p['hashFunc']['parameters'])
+ self.assertEqual(univ.Null(""), oaep_p['maskGenFunc']['parameters']['parameters'])
+
+
+class OAEPFullTestCase(unittest.TestCase):
+ oaep_full_pem_text = "MFMGCSqGSIb3DQEBBzBGoA8wDQYJYIZIAWUDBAICBQChHDAaBgk" \
+ "qhkiG9w0BAQgwDQYJYIZIAWUDBAICBQCiFTATBgkqhkiG9w0BAQ" \
+ "kEBmZvb2Jhcg=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_full_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+
+ self.assertTrue(rfc4055.id_RSAES_OAEP, asn1Object[0])
+
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oaep_full_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ self.assertTrue(asn1Object['parameters'].hasValue())
+
+ oaep_p = asn1Object['parameters']
+
+ self.assertEqual(univ.Null(""), oaep_p['hashFunc']['parameters'])
+ self.assertEqual(
+ univ.Null(""), oaep_p['maskGenFunc']['parameters']['parameters'])
+ self.assertEqual(
+ univ.OctetString(value='foobar'),
+ oaep_p['pSourceFunc']['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4073.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4073.py
new file mode 100644
index 0000000000..4bd5e5f7fc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4073.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4073
+from pyasn1_modules import rfc5652
+
+
+class ContentCollectionTestCase(unittest.TestCase):
+ pem_text = """\
+MIIG/QYLKoZIhvcNAQkQAROgggbsMIIG6DCCAWcGCyqGSIb3DQEJEAEUoIIBVjCC
+AVIwgfEGCSqGSIb3DQEHAaCB4wSB4ENvbnRlbnQtVHlwZTogdGV4dC9wbGFpbgoK
+UkZDIDQwNzMsIHB1Ymxpc2hlZCBpbiBNYXkgMjAwNSwgZGVzY3JpYmVzIGEgY29u
+dmVudGlvbiBmb3IgdXNpbmcgdGhlCkNyeXB0b2dyYXBoaWMgTWVzc2FnZSBTeW50
+YXggKENNUykgdG8gcHJvdGVjdCBhIGNvbnRlbnQgY29sbGVjdGlvbi4gIElmCmRl
+c2lyZWQsIGF0dHJpYnV0ZXMgY2FuIGJlIGFzc29jaWF0ZWQgd2l0aCB0aGUgY29u
+dGVudC4KMFwwMwYLKoZIhvcNAQkQAgQxJDAiDBVBYnN0cmFjdCBmb3IgUkZDIDQw
+NzMGCSqGSIb3DQEHATAlBgsqhkiG9w0BCRACBzEWBBSkLSXBiRWvbwnJKb4EGb1X
+FwCa3zCCBXkGCyqGSIb3DQEJEAEUoIIFaDCCBWQwggT9BgkqhkiG9w0BBwGgggTu
+BIIE6kNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbgoKVGhlIGZvbGxvd2luZyBBU04u
+MSBtb2R1bGUgZGVmaW5lcyB0aGUgc3RydWN0dXJlcyB0aGF0IGFyZSBuZWVkZWQg
+dG8KaW1wbGVtZW50IHRoZSBzcGVjaWZpY2F0aW9uIGluIFJGQyA0MDczLiAgSXQg
+aXMgZXhwZWN0ZWQgdG8gYmUgdXNlZCBpbgpjb25qdW5jdGlvbiB3aXRoIHRoZSBB
+U04uMSBtb2R1bGVzIGluIFJGQyA1NjUyIGFuZCBSRkMgMzI3NC4KCiAgIENvbnRl
+bnRDb2xsZWN0aW9uTW9kdWxlCiAgICAgeyBpc28oMSkgbWVtYmVyLWJvZHkoMikg
+dXMoODQwKSByc2Fkc2koMTEzNTQ5KSBwa2NzKDEpCiAgICAgICBwa2NzLTkoOSkg
+c21pbWUoMTYpIG1vZHVsZXMoMCkgMjYgfQoKICAgREVGSU5JVElPTlMgSU1QTElD
+SVQgVEFHUyA6Oj0KICAgQkVHSU4KCiAgIElNUE9SVFMKICAgICBBdHRyaWJ1dGUs
+IENvbnRlbnRJbmZvCiAgICAgICBGUk9NIENyeXB0b2dyYXBoaWNNZXNzYWdlU3lu
+dGF4MjAwNCAtLSBbQ01TXQogICAgICAgICB7IGlzbygxKSBtZW1iZXItYm9keSgy
+KSB1cyg4NDApIHJzYWRzaSgxMTM1NDkpCiAgICAgICAgICAgcGtjcygxKSBwa2Nz
+LTkoOSkgc21pbWUoMTYpIG1vZHVsZXMoMCkgY21zLTIwMDEoMTQpIH07CgoKICAg
+LS0gQ29udGVudCBDb2xsZWN0aW9uIENvbnRlbnQgVHlwZSBhbmQgT2JqZWN0IElk
+ZW50aWZpZXIKCiAgIGlkLWN0LWNvbnRlbnRDb2xsZWN0aW9uIE9CSkVDVCBJREVO
+VElGSUVSIDo6PSB7CiAgICAgICAgICAgaXNvKDEpIG1lbWJlci1ib2R5KDIpIHVz
+KDg0MCkgcnNhZHNpKDExMzU0OSkgcGtjcygxKQogICAgICAgICAgIHBrY3M5KDkp
+IHNtaW1lKDE2KSBjdCgxKSAxOSB9CgogICBDb250ZW50Q29sbGVjdGlvbiA6Oj0g
+U0VRVUVOQ0UgU0laRSAoMS4uTUFYKSBPRiBDb250ZW50SW5mbwoKICAgLS0gQ29u
+dGVudCBXaXRoIEF0dHJpYnV0ZXMgQ29udGVudCBUeXBlIGFuZCBPYmplY3QgSWRl
+bnRpZmllcgoKICAgaWQtY3QtY29udGVudFdpdGhBdHRycyBPQkpFQ1QgSURFTlRJ
+RklFUiA6Oj0gewogICAgICAgICAgIGlzbygxKSBtZW1iZXItYm9keSgyKSB1cyg4
+NDApIHJzYWRzaSgxMTM1NDkpIHBrY3MoMSkKICAgICAgICAgICBwa2NzOSg5KSBz
+bWltZSgxNikgY3QoMSkgMjAgfQoKICAgQ29udGVudFdpdGhBdHRyaWJ1dGVzIDo6
+PSBTRVFVRU5DRSB7CiAgICAgICBjb250ZW50ICAgICBDb250ZW50SW5mbywKICAg
+ICAgIGF0dHJzICAgICAgIFNFUVVFTkNFIFNJWkUgKDEuLk1BWCkgT0YgQXR0cmli
+dXRlIH0KCiAgIEVORAowYTA4BgsqhkiG9w0BCRACBDEpMCcMGkFTTi4xIE1vZHVs
+ZSBmcm9tIFJGQyA0MDczBgkqhkiG9w0BBwEwJQYLKoZIhvcNAQkQAgcxFgQUMbeK
+buWO3egPDL8Kf7tBhzjIKLw=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+
+ def test_layer(substrate, content_type):
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[content_type])
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if content_type == rfc4073.id_ct_contentWithAttrs:
+ for attr in asn1Object['attrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+
+ return asn1Object
+
+ layers = rfc5652.cmsContentTypesMap
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc4073.id_ct_contentCollection: lambda x: x[0]['contentType'],
+ rfc4073.id_ct_contentWithAttrs: lambda x: x['content']['contentType'],
+ rfc5652.id_data: lambda x: None,
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc4073.id_ct_contentCollection: lambda x: x[0]['content'],
+ rfc4073.id_ct_contentWithAttrs: lambda x: x['content']['content'],
+ rfc5652.id_data: lambda x: None,
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ this_layer = rfc5652.id_ct_contentInfo
+
+ while this_layer != rfc5652.id_data:
+ if this_layer == rfc4073.id_ct_contentCollection:
+ asn1Object = test_layer(substrate, this_layer)
+ for ci in asn1Object:
+ substrate = ci['content']
+ this_layer = ci['contentType']
+ while this_layer != rfc5652.id_data:
+ asn1Object = test_layer(substrate, this_layer)
+ substrate = getNextSubstrate[this_layer](asn1Object)
+ this_layer = getNextLayer[this_layer](asn1Object)
+ else:
+ asn1Object = test_layer(substrate, this_layer)
+ substrate = getNextSubstrate[this_layer](asn1Object)
+ this_layer = getNextLayer[this_layer](asn1Object)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=rfc5652.ContentInfo(),
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc4073.id_ct_contentCollection, asn1Object['contentType'])
+
+ for ci in asn1Object['content']:
+ self.assertIn(ci['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc4073.id_ct_contentWithAttrs, ci['contentType'])
+
+ next_ci = ci['content']['content']
+
+ self.assertIn(next_ci['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_data, next_ci['contentType'])
+ self.assertIn(str2octs('Content-Type: text'), next_ci['content'])
+
+ for attr in ci['content']['attrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc2634.id_aa_contentHint:
+ self.assertIn('RFC 4073', attr['attrValues'][0]['contentDescription'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4108.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4108.py
new file mode 100644
index 0000000000..9d71601077
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4108.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc4108
+
+
+class CMSFirmwareWrapperTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEvAYJKoZIhvcNAQcCoIIErTCCBKkCAQExDTALBglghkgBZQMEAgEwggIVBgsq
+hkiG9w0BCRABEKCCAgQEggIA3ntqPr5kDpx+//pgWGfHCH/Ht4pbenGwXv80txyE
+Y0I2mT9BUGz8ILkbhD7Xz89pBS5KhEJpthxH8WREJtvS+wL4BqYLt23wjWoZy5Gt
+5dPzWgaNlV/aQ5AdfAY9ljmnNYnK8D8r8ur7bQM4cKUdxry+QA0nqXHMAOSpx4Um
+8impCc0BICXaFfL3zBrNxyPubbFO9ofbYOAWaNmmIAhzthXf12vDrLostIqmYrP4
+LMRCjTr4LeYaVrAWfKtbUbByN6IuBef3Qt5cJaChr74udz3JvbYFsUvCpl64kpRq
+g2CT6R+xE4trO/pViJlI15dvJVz04BBYQ2jQsutJwChi97/DDcjIv03VBmrwRE0k
+RJNFP9vpDM8CxJIqcobC5Kuv8b0GqGfGl6ouuQKEVMfBcrupgjk3oc3KL1iVdSr1
++74amb1vDtTMWNm6vWRqh+Kk17NGEi2mNvYkkZUTIHNGH7OgiDclFU8dSMZd1fun
+/D9dmiFiErDB3Fzr4+8Qz0aKedNE/1uvM+dhu9qjuRdkDzZ4S7txTfk6y9pG9iyk
+aEeTV2kElKXblgi+Cf0Ut4f5he8rt6jveHdMo9X36YiUQVvevj2cgN7lFivEnFYV
+QY0xugpP7lvEFDfsi2+0ozgP8EKOLYaCUKpuvttlYJ+vdtUFEijizEZ4cx02RsXm
+EesxggJ6MIICdgIBA4AUnutnybladNRNLxY5ZoDoAbXLpJwwCwYJYIZIAWUDBAIB
+oIG8MBoGCSqGSIb3DQEJAzENBgsqhkiG9w0BCRABEDArBgsqhkiG9w0BCRACJDEc
+MBoGCysGAQQBjb9BAQEqBgsrBgEEAY2/QQEBMDAvBgkqhkiG9w0BCQQxIgQgAJfv
+uasB4P6WDLOkOyvj33YPgZW4olHbidzyh1EKP9YwQAYLKoZIhvcNAQkQAikxMTAv
+MAsGCWCGSAFlAwQCAQQgAJfvuasB4P6WDLOkOyvj33YPgZW4olHbidzyh1EKP9Yw
+CwYJKoZIhvcNAQELBIIBgDivAlSLbMPPu+zV+pPcYpNp+A1mwVOytjMBzSo31kR/
+qEu+hVrDknAOk9IdCaDvcz612CcfNT85/KzrYvWWxOP2woU/vZj253SnndALpfNN
+n3/crJjF6hKgkjUwoXebI7kuj5WCh2q5lkd6xUa+jkCw+CINcN43thtS66UsVI4d
+mv02EvsS2cxPY/508uaQZ6AYAacm667bgX8xEjbzACMOeMCuvKQXWAuh3DkNk+gV
+xizHDw7xZxXgMGMAnJglAeBtd3Si5ztILw9U2gKUqFn/nOgy+eW63JuU/q31/Hgg
+ZATjyBznSzneTZrw8/ePoSCj7E9vBeCTUkeFbVB2tJK1iYDMblp6HUuwgYuGKXy/
+ZwKL3GvB11qg7ntdEyjdLq0xcVrht/K0d2dPo4iO4Ac7c1xbFMDAlWOt4FMPWh6O
+iTh55YvT7hAJjTbB5ebgMA9QJnAczQPFnaIePnlFrkETd3YyLK4yHwnoIGo1GiW/
+dsnhVtIdkPtfJIvcYteYJg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ inner, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertEqual(
+ rfc4108.id_ct_firmwarePackage, inner['encapContentInfo']['eContentType'])
+
+ self.assertTrue(inner['encapContentInfo']['eContent'])
+
+ attribute_list = []
+
+ for attr in inner['signerInfos'][0]['signedAttrs']:
+ attribute_list.append(attr['attrType'])
+ if attr['attrType'] == rfc4108.id_aa_targetHardwareIDs:
+ av, rest = der_decoder(attr['attrValues'][0],
+ asn1Spec=rfc4108.TargetHardwareIdentifiers())
+ self.assertEqual(2, len(av))
+
+ for oid in av:
+ self.assertIn('1.3.6.1.4.1.221121.1.1.', oid.prettyPrint())
+
+ self.assertIn( rfc5652.id_contentType, attribute_list)
+ self.assertIn( rfc5652.id_messageDigest, attribute_list)
+ self.assertIn(rfc4108.id_aa_targetHardwareIDs, attribute_list)
+ self.assertIn(rfc4108.id_aa_fwPkgMessageDigest, attribute_list)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd_eci = asn1Object['content']['encapContentInfo']
+
+ self.assertEqual(sd_eci['eContentType'], rfc4108.id_ct_firmwarePackage)
+ self.assertTrue(sd_eci['eContent'].hasValue())
+
+ for attr in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc4108.id_aa_targetHardwareIDs:
+ for oid in attr['attrValues'][0]:
+ self.assertIn('1.3.6.1.4.1.221121.1.1.', oid.prettyPrint())
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4210.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4210.py
new file mode 100644
index 0000000000..39d407f72f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4210.py
@@ -0,0 +1,128 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4210
+
+
+class PKIMessageTestCase(unittest.TestCase):
+ pem_text = """\
+MIITuTCCARECAQKkWTBXMQswCQYDVQQGEwJUUjEQMA4GA1UEChMHRS1HdXZlbjEUMBIGA1UECxML
+VHJ1c3RDZW50ZXIxIDAeBgNVBAMTF1JTQSBTZWN1cml0eSBDTVAgU2VydmVypC0wKzELMAkGA1UE
+BhMCVFIxHDAaBgNVBAMME1ZhbGltby1WZXR0b3ItMTdEZWOgERgPMjAxMjA1MDMxMTE2MTdaoQ8w
+DQYJKoZIhvcNAQEFBQCiIgQgZWVhMjg5MGU2ZGY5N2IyNzk5NWY2MWE0MzE2MzI1OWGkEgQQQ01Q
+VjJUMTIyMzM0NjI3MKUSBBCAAAABgAAAAYAAAAGAAAABphIEEDEzNjY0NDMwMjlSYW5kb22jghIZ
+MIISFaGCC84wggvKMIIFwDCCBKigAwIBAgIQfOVE05R616R6Nqgu3drXHzANBgkqhkiG9w0BAQUF
+ADBxMQswCQYDVQQGEwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5T
+LjE4MDYGA1UEAxMvZS1HdXZlbiBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2FnbGF5aWNp
+c2kwHhcNMDgxMTI0MTAwMzI0WhcNMTYxMjE0MTExNzI0WjBdMQswCQYDVQQGEwJUUjEoMCYGA1UE
+CgwfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjEkMCIGA1UEAwwbZS1HdXZlbiBNb2Jp
+bCBUZXN0VVRGLTgtU09OMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzqaymRo5chRK
+EKrhjWQky1HOm6b/Jy4tSUuo4vq3O9U3G2osOU/hHb6fyMmznLpc6CaZ3qKYiuDMFRW8g1kNjEjV
+sFSvH0Yd4qgwP1+qqzhBSe+nCAnEbRUrz+nXJ4fKhmGaQ+ZSic+MeyoqDsf/zENKqdV7ea9l3Ilu
+Rj93bmTxas9aWPWQ/U/fpwkwRXaqaONlM5e4GWdgA7T1aq106NvH1z6LDNXcMYw4lSZkj/UjmM/0
+NhVz+57Ib4a0bogTaBmm8a1E5NtzkcA7pgnZT8576T0UoiOpEo+NAELA1B0mRh1/82HK1/0xn1zt
+1ym4XZRtn2r2l/wTeEwU79ALVQIDAQABo4ICZjCCAmIwfAYIKwYBBQUHAQEEcDBuMDIGCCsGAQUF
+BzABhiZodHRwOi8vdGVzdG9jc3AyLmUtZ3V2ZW4uY29tL29jc3AueHVkYTA4BggrBgEFBQcwAoYs
+aHR0cDovL3d3dy5lLWd1dmVuLmNvbS9kb2N1bWVudHMvVGVzdEtvay5jcnQwDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wggElBgNVHSAEggEcMIIBGDCCARQGCWCGGAMAAQECATCCAQUw
+NgYIKwYBBQUHAgEWKmh0dHA6Ly93d3cuZS1ndXZlbi5jb20vZG9jdW1lbnRzL05FU1VFLnBkZjCB
+ygYIKwYBBQUHAgIwgb0egboAQgB1ACAAcwBlAHIAdABpAGYAaQBrAGEAIABpAGwAZQAgAGkAbABn
+AGkAbABpACAAcwBlAHIAdABpAGYAaQBrAGEAIAB1AHkAZwB1AGwAYQBtAGEAIABlAHMAYQBzAGwA
+YQByATEAbgExACAAbwBrAHUAbQBhAGsAIABpAOcAaQBuACAAYgBlAGwAaQByAHQAaQBsAGUAbgAg
+AGQAbwBrAPwAbQBhAG4BMQAgAGEA5wExAG4BMQB6AC4wWAYDVR0fBFEwTzBNoEugSYZHaHR0cDov
+L3Rlc3RzaWwuZS1ndXZlbi5jb20vRWxla3Ryb25pa0JpbGdpR3V2ZW5saWdpQVNSb290L0xhdGVz
+dENSTC5jcmwwHQYDVR0OBBYEFLMoTImEKeXbqNjbYZkKshQi2vwzMB8GA1UdIwQYMBaAFGCI4dY9
+qCIkag0hwBgz5haCSNl0MA0GCSqGSIb3DQEBBQUAA4IBAQAWOsmvpoFB9sX2aq1/LjPDJ+A5Fpxm
+0XkOGM9yD/FsLfWgyv2HqBY1cVM7mjJfJ1ezkS0ODdlU6TyN5ouvAi21V9CIk69I3eUYSDjPpGia
+qcCCvJoMF0QD7B70kj2zW7IJ7pF11cbvPLaatdzojsH9fVfKtxtn/ZLrXtKsyUW5vKHOeniU6BBB
+Gl/ZZkFNXNN4mrB+B+wDV9OmdMw+Mc8KPq463hJQRat5a9lrXMdNtMAJOkvsUUzOemAsITjXWlyg
+BULijBhi8ZmMp0W7p6oKENX3vH2HCPCGQU29WIrK4iUoscjz93fB6oa4FQpxY0k3JRnWvD5FqkRD
+FKJdq/q9MIIDzzCCAregAwIBAgIQa34pJYdDFNXx90OkMkKzIjANBgkqhkiG9w0BAQUFADBxMQsw
+CQYDVQQGEwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE4MDYG
+A1UEAxMvZS1HdXZlbiBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2FnbGF5aWNpc2kwHhcN
+MDYxMjE1MTUxMzU0WhcNMTYxMjE1MTExMzU0WjBxMQswCQYDVQQGEwJUUjEoMCYGA1UEChMfRWxl
+a3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE4MDYGA1UEAxMvZS1HdXZlbiBFbGVrdHJvbmlr
+IFNlcnRpZmlrYSBIaXptZXQgU2FnbGF5aWNpc2kwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQCU/PTxSkcWPJMx4UO8L8ep9/JqRgAZ79EqYWgR4K2bNLgENpc5j0hO+QydgovFODzkEIBP
+RIBavMz9Cw2PONpSBmxd4K1A/5hGqoGEz8UCA2tIx4+Z2A9AQ2O3BYi9FWM+0D1brJDO+6yvX4m5
+Rf3mLlso52NIVV705fIkmOExHjdAj/xB0/LICZMfwKn8F19Jae/SQv9cFnptbNRCq8hU5zLRngpR
+eT1PYrZVV0XLbzbDPwgzLXCzDxG1atdGd5JRTnD58qM1foC3+hGafuyissMQVGnBQFlsx7V6OdlD
+bsxUXegCl2li0RpRJXLqyqMdtEplaznKp8NnbddylfrPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
+hjAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFGCI4dY9qCIkag0hwBgz5haCSNl0MB0GA1Ud
+DgQWBBRgiOHWPagiJGoNIcAYM+YWgkjZdDANBgkqhkiG9w0BAQUFAAOCAQEAKftTVjgltZJxXwDs
+MumguOSlljOQjotVVpES1QYwo3a5RQVpKuS4KYDEdWLD4ITtDNOA/iGKYWCNyKsE1BCL66irknZw
+iR6p6P+q2Wf7fGYSwUBcSBwWBTA+0EgpvPL3/vRuVVCVgC8XHBr72jKKTg9Nwcj+1FwXGZTDpjX8
+dzPhTXEWceQcDn2FRdNt6BQad9Hdq08lMHiyozsWniYZYuWpud91i8Pl698H9t0KqiJg6rPKc9kd
+z9QyC8E/cLIJgYhvfzXMxvmSjeSSFSqTHioqfpU3k8AWXuxqJUxbdQ8QrVaTXRByzEr1Ze0TYpDs
+oel1PjC9ouO8bC7cGrbCWzCCAi8wggGYAhBlEjJUo9asY2ISG4oHjcpzMA0GCSqGSIb3DQEBBQUA
+MFoxCzAJBgNVBAYTAlRSMRAwDgYDVQQKEwdFLUd1dmVuMRQwEgYDVQQLEwtUcnVzdENlbnRlcjEj
+MCEGA1UEAxMaRS1HdXZlblRFU1RDQUhTTSBTeXN0ZW0gQ0EwHhcNMDkxMTMwMjIxMzEzWhcNMTYx
+MTMwMTkxMTUxWjBXMQswCQYDVQQGEwJUUjEQMA4GA1UEChMHRS1HdXZlbjEUMBIGA1UECxMLVHJ1
+c3RDZW50ZXIxIDAeBgNVBAMTF1JTQSBTZWN1cml0eSBDTVAgU2VydmVyMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDCaZeJerGULW+1UPSu9T0voPNgzPcihXX6G5Q45nS4RNCe+pOc226EtD51
+wu6Eq2oARpZmCrKPn63EFmHEE04dRDr8MS2LHuZK8xslIx/AvPnV568795EPoAyhGIX9Na9ZHhnI
+zSPWmWfBd9bsQiLVF7C9dOvfW125mtywWXELewIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAAiIse/x
+aWwRWUM0CIzfnoXfrgyLdKVykK7dTPgoMJgAx229uN6VTPyk+E+lTKq9PhK+e/VJNNg9PjSFjKFd
+lfSDOi9ne1xOrb7cNTjw+sGf1mfNWyzizLXa7su7ISFN+GaClmAstH9vXsRxg1oh3pFMJv47I6iw
+gUQlwwg8WsY/MIIGPzCCBjsCAQAwAwIBADCCBi+gggYrMIIGJzCCBQ+gAwIBAgIRALGVtVAeoM1x
+gjgOX3alZ5MwDQYJKoZIhvcNAQEFBQAwXTELMAkGA1UEBhMCVFIxKDAmBgNVBAoMH0VsZWt0cm9u
+aWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xJDAiBgNVBAMMG2UtR3V2ZW4gTW9iaWwgVGVzdFVURi04
+LVNPTjAeFw0xMjA1MDMxMTE2MTdaFw0xMzA1MDMxMTE2MTdaMGoxCzAJBgNVBAYTAlRSMREwDwYD
+VQQKDAhGaXJlIExMVDEbMBkGA1UECwwScG9wQ29kZSAtIDEyMzQ1Njc4MRQwEgYDVQQFEws3NjU0
+MzQ1Njc2NTEVMBMGA1UEAwwMQnVyYWsgWW9uZGVtMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
+gQCpfSB7xcsHZR4E27yGHkzUJx1y2iknzX4gRM2acyPljRw/V5Lm7POrfWIX9UF2sxfYfRqxYmD0
++nw72nx8R/5AFQK0BfjHxIc5W1YekMHF8PSORo9rJqcX+qn+NBYwqcJl4EdObTcOtMWC6ws6n0uA
+oDvYYN0ujkua496sp+INiQIDAQABo4IDVzCCA1MwQgYIKwYBBQUHAQEENjA0MDIGCCsGAQUFBzAB
+hiZodHRwOi8vdGVzdG9jc3AyLmUtZ3V2ZW4uY29tL29jc3AueHVkYTAfBgNVHSMEGDAWgBSzKEyJ
+hCnl26jY22GZCrIUItr8MzCCAXIGA1UdIASCAWkwggFlMIGxBgZghhgDAAEwgaYwNgYIKwYBBQUH
+AgEWKmh0dHA6Ly93d3cuZS1ndXZlbi5jb20vZG9jdW1lbnRzL05FU1VFLnBkZjBsBggrBgEFBQcC
+AjBgGl5CdSBzZXJ0aWZpa2EsIDUwNzAgc2F5xLFsxLEgRWxla3Ryb25payDEsG16YSBLYW51bnVu
+YSBnw7ZyZSBuaXRlbGlrbGkgZWxla3Ryb25payBzZXJ0aWZpa2FkxLFyMIGuBglghhgDAAEBAQMw
+gaAwNwYIKwYBBQUHAgEWK2h0dHA6Ly93d3cuZS1ndXZlbi5jb20vZG9jdW1lbnRzL01LTkVTSS5w
+ZGYwZQYIKwYBBQUHAgIwWRpXQnUgc2VydGlmaWthLCBNS05FU0kga2Fwc2FtxLFuZGEgeWF5xLFu
+bGFubcSxxZ8gYmlyIG5pdGVsaWtsaSBlbGVrdHJvbmlrIHNlcnRpZmlrYWTEsXIuMA4GA1UdDwEB
+/wQEAwIGwDCBgwYIKwYBBQUHAQMEdzB1MAgGBgQAjkYBATBpBgtghhgBPQABp04BAQxaQnUgc2Vy
+dGlmaWthLCA1MDcwIHNheWlsaSBFbGVrdHJvbmlrIEltemEgS2FudW51bmEgZ8O2cmUgbml0ZWxp
+a2xpIGVsZWt0cm9uaWsgc2VydGlmaWthZGlyMEUGA1UdCQQ+MDwwFAYIKwYBBQUHCQIxCAQGQW5r
+YXJhMBIGCCsGAQUFBwkBMQYEBDE5NzkwEAYIKwYBBQUHCQQxBAQCVFIwGAYDVR0RBBEwD4ENZmly
+ZUBmaXJlLmNvbTBgBgNVHR8EWTBXMFWgU6BRhk9odHRwOi8vdGVzdHNpbC5lLWd1dmVuLmNvbS9F
+bGVrdHJvbmlrQmlsZ2lHdXZlbmxpZ2lBU01LTkVTSS1VVEYtOC9MYXRlc3RDUkwuY3JsMB0GA1Ud
+DgQWBBSLG9aIb1k2emFLCpM93kXJkWhzuTANBgkqhkiG9w0BAQUFAAOCAQEACoGCn4bzDWLzs799
+rndpB971UD2wbwt8Hkw1MGZkkJVQeVF4IS8FacAyYk5vY8ONuTA/Wsh4x23v9WTCtO89HMTz81eU
+BclqZ2Gc2UeMq7Y4FQWR8PNCMdCsxVVhpRRE6jQAyyR9YEBHQYVLfy34e3+9G/h/BR73VGHZJdZI
+DDJYd+VWXmUD9kGk/mI35qYdzN3O28KI8sokqX0z2hvkpDKuP4jNXSCHcVkK23tX2x5m6m0LdqVn
+vnCx2LfBn1wf1u7q30p/GgMVX+mR3QHs7feGewEjlkxuEyLVVD+uBwWCT6zcad17oaAyXV5RV28L
+vH0WNg6pFUpwOP0l+nIOqqCBhAOBgQBAtTB5Qd18sTxEKhSzRiN2OycFPrqoqlZZTHBohe8bE2D4
+Xc1ejkFWUEvQivkqJxCD6C7I37xgDaq8DZnaczIBxbPkY0QMdeL4MiEqlw/tlrJGrWoC5Twb0t/m
+JA5RSwQoMDYTj2WrwtM/nsP12T39or4JRZhlLSM43IaTwEBtQw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc4210.PKIMessage()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4211.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4211.py
new file mode 100644
index 0000000000..e9be4cc39d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4211.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4211
+
+
+class CertificateReqTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBozCCAZ8wggEFAgUAwTnj2jCByoABAqURMA8xDTALBgNVBAMTBHVzZXKmgZ8w
+DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ6ZQ2cYbn/lFsmBOlRltbRbFQUvvE0Q
+nbopOu1kC7Bmaaz7QTx8nxeiHi4m7uxCbGGxHNoGCt7EmdG8eZUBNAcHyGlXrJdm
+0z3/uNEGiBHq+xB8FnFJCA5EIJ3RWFnlbu9otSITLxWK7c5+/NHmWM+yaeHD/f/h
+rp01c/8qXZfZAgMBAAGpEDAOBgNVHQ8BAf8EBAMCBeAwLzASBgkrBgEFBQcFAQEM
+BTExMTExMBkGCSsGAQUFBwUBAgwMc2VydmVyX21hZ2ljoYGTMA0GCSqGSIb3DQEB
+BQUAA4GBAEI3KNEvTq/n1kNVhNhPkovk1AZxyJrN1u1+7Gkc4PLjWwjLOjcEVWt4
+AajUk/gkIJ6bbeO+fZlMjHfPSDKcD6AV2hN+n72QZwfzcw3icNvBG1el9EU4XfIm
+xfu5YVWi81/fw8QQ6X6YGHFQkomLd7jxakVyjxSng9BhO6GpjJNF
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc4211.CertReqMessages()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for crm in asn1Object:
+ self.assertEqual(2, crm['certReq']['certTemplate']['version'])
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4334.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4334.py
new file mode 100644
index 0000000000..9ba5fdf339
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4334.py
@@ -0,0 +1,83 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4334
+
+
+class CertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICqzCCAjCgAwIBAgIJAKWzVCgbsG4/MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNzE5MTk0MjQ3WhcNMjAwNzE4MTk0MjQ3WjBjMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZp
+Z2lsIFNlY3VyaXR5IExMQzEYMBYGA1UEAxMPZWFwLmV4YW1wbGUuY29tMHYwEAYH
+KoZIzj0CAQYFK4EEACIDYgAEMMbnIp2BUbuyMgH9HhNHrh7VBy7ql2lBjGRSsefR
+Wa7+vCWs4uviW6On4eem5YoP9/UdO7DaIL+/J9/3DJHERI17oFxn+YWiE4JwXofy
+QwfSu3cncVNMqpiDjEkUGGvBo4HTMIHQMAsGA1UdDwQEAwIHgDBCBglghkgBhvhC
+AQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55
+IHB1cnBvc2UuMB0GA1UdDgQWBBSDjPGr7M742rsE4oQGwBvGvllZ+zAfBgNVHSME
+GDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAeBggrBgEFBQcBDQQSMBAEB0V4YW1w
+bGUEBUJvZ3VzMB0GA1UdJQQWMBQGCCsGAQUFBwMOBggrBgEFBQcDDTAKBggqhkjO
+PQQDAwNpADBmAjEAmCPZnnlUQOKlcOIIOgFrRCkOqO0ESs+dobYwAc2rFCBtQyP7
+C3N00xkX8WZZpiAZAjEAi1Z5+nGbJg5eJTc8fwudutN/HNwJEIS6mHds9kfcy26x
+DAlVlhox680Jxy5J8Pkx
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ if extn['extnID'] == rfc4334.id_pe_wlanSSID:
+ self.assertIn( str2octs('Example'), extnValue)
+
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(rfc4334.id_kp_eapOverLAN, extnValue)
+ self.assertIn(rfc4334.id_kp_eapOverPPP, extnValue)
+
+ self.assertIn(rfc4334.id_pe_wlanSSID, extn_list)
+ self.assertIn(rfc5280.id_ce_extKeyUsage, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4357.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4357.py
new file mode 100644
index 0000000000..cf10d59d12
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4357.py
@@ -0,0 +1,248 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4357
+
+
+class SignedTestCase(unittest.TestCase):
+ signed_pem_text = """\
+MIIBKAYJKoZIhvcNAQcCoIIBGTCCARUCAQExDDAKBgYqhQMCAgkFADAbBgkqhkiG
+9w0BBwGgDgQMc2FtcGxlIHRleHQKMYHkMIHhAgEBMIGBMG0xHzAdBgNVBAMMFkdv
+c3RSMzQxMC0yMDAxIGV4YW1wbGUxEjAQBgNVBAoMCUNyeXB0b1BybzELMAkGA1UE
+BhMCUlUxKTAnBgkqhkiG9w0BCQEWGkdvc3RSMzQxMC0yMDAxQGV4YW1wbGUuY29t
+AhAr9cYewhG9F8fc1GJmtC4hMAoGBiqFAwICCQUAMAoGBiqFAwICEwUABEDAw0LZ
+P4/+JRERiHe/icPbg0IE1iD5aCqZ9v4wO+T0yPjVtNr74caRZzQfvKZ6DRJ7/RAl
+xlHbjbL0jHF+7XKp
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ encoded_null = der_encoder(univ.Null(""))
+
+ si = sd['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['signatureAlgorithm']['parameters'])
+ self.assertEqual(64, len(si['signature']))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ si = asn1Object['content']['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['signatureAlgorithm']['parameters'])
+
+ self.assertEqual(64, len(si['signature']))
+
+class KeyAgreeTestCase(unittest.TestCase):
+ keyagree_pem_text = """\
+MIIBpAYJKoZIhvcNAQcDoIIBlTCCAZECAQIxggFQoYIBTAIBA6BloWMwHAYGKoUD
+AgITMBIGByqFAwICJAAGByqFAwICHgEDQwAEQLNVOfRngZcrpcTZhB8n+4HtCDLm
+mtTyAHi4/4Nk6tIdsHg8ff4DwfQG5DvMFrnF9vYZNxwXuKCqx9GhlLOlNiChCgQI
+L/D20YZLMoowHgYGKoUDAgJgMBQGByqFAwICDQAwCQYHKoUDAgIfATCBszCBsDCB
+gTBtMR8wHQYDVQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlD
+cnlwdG9Qcm8xCzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAt
+MjAwMUBleGFtcGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuIQQqMCgEIBajHOfOTukN
+8ex0aQRoHsefOu24Ox8dSn75pdnLGdXoBAST/YZ+MDgGCSqGSIb3DQEHATAdBgYq
+hQMCAhUwEwQItzXhegc1oh0GByqFAwICHwGADDmxivS/qeJlJbZVyQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'],
+ asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'], )
+ param2, rest = der_decoder(
+ alg2['parameters'],
+ asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+
+class KeyTransportTestCase(unittest.TestCase):
+ keytrans_pem_text = """\
+MIIBpwYJKoZIhvcNAQcDoIIBmDCCAZQCAQAxggFTMIIBTwIBADCBgTBtMR8wHQYD
+VQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlDcnlwdG9Qcm8x
+CzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAtMjAwMUBleGFt
+cGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuITAcBgYqhQMCAhMwEgYHKoUDAgIkAAYH
+KoUDAgIeAQSBpzCBpDAoBCBqL6ghBpVon5/kR6qey2EVK35BYLxdjfv1PSgbGJr5
+dQQENm2Yt6B4BgcqhQMCAh8BoGMwHAYGKoUDAgITMBIGByqFAwICJAAGByqFAwIC
+HgEDQwAEQE0rLzOQ5tyj3VUqzd/g7/sx93N+Tv+/eImKK8PNMZQESw5gSJYf28dd
+Em/askCKd7W96vLsNMsjn5uL3Z4SwPYECJeV4ywrrSsMMDgGCSqGSIb3DQEHATAd
+BgYqhQMCAhUwEwQIvBCLHwv/NCkGByqFAwICHwGADKqOch3uT7Mu4w+hNw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'], asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2, rest = der_decoder(
+ alg2['parameters'], asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param2.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4387.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4387.py
new file mode 100644
index 0000000000..5c122254c8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4387.py
@@ -0,0 +1,84 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4387
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDLzCCArWgAwIBAgIJAKWzVCgbsG5JMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMTIyMDI1MzAzWhcNMjAxMTIxMDI1MzAzWjBZMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxGTAXBgNVBAMTEHJlcG8uZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUr
+gQQAIgNiAAS/J1NNkqicN432Uwlw+Gu4pLvYpSr2W8zJvCOy61ncEzKNIs4cxqSc
+N0rl6K32tNCQGCsQFaBK4wZKXbHpUEPWrfYAWYebYDOhMlOE/agxH3nZRRnYv4O7
+pGrk/YZamGijggFhMIIBXTALBgNVHQ8EBAMCB4AwQgYJYIZIAYb4QgENBDUWM1Ro
+aXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBwdXJwb3Nl
+LjAdBgNVHQ4EFgQUWDRoN3XtN1n8ZH+bQuSAsr42gQwwHwYDVR0jBBgwFoAU8jXb
+NATapVXyvWkDmbBi7OIVCMEwgckGCCsGAQUFBwEBBIG8MIG5MCQGCCsGAQUFBzAB
+hhhodHRwOi8vb2NzcC5leGFtcGxlLmNvbS8wMgYIKwYBBQUHMAKGJmh0dHA6Ly9y
+ZXBvLmV4YW1wbGUuY29tL2NhaXNzdWVycy5odG1sMC4GCCsGAQUFBzAGhiJodHRw
+Oi8vcmVwby5leGFtcGxlLmNvbS9jZXJ0cy5odG1sMC0GCCsGAQUFBzAHhiFodHRw
+Oi8vcmVwby5leGFtcGxlLmNvbS9jcmxzLmh0bWwwCgYIKoZIzj0EAwMDaAAwZQIw
+C9Y1McQ+hSEZLtzLw1xzk3QSQX6NxalySoIIoNXpcDrGZJcjLRunBg8G9B0hqG69
+AjEAxtzj8BkMvhb5d9DTKDVg5pmjl9z7UtRK87/LJM+EW/9+PAzB2IT3T+BPHKb4
+kjBJ
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid_list = [
+ rfc4387.id_ad_http_certs,
+ rfc4387.id_ad_http_crls,
+ ]
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_pe_authorityInfoAccess:
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.AuthorityInfoAccessSyntax())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for ad in extnValue:
+ if ad['accessMethod'] in oid_list:
+ uri = ad['accessLocation']['uniformResourceIdentifier']
+ self.assertIn('http://repo.example.com/c', uri)
+ count += 1
+
+ self.assertEqual(len(oid_list), count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4476.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4476.py
new file mode 100644
index 0000000000..b0a8fd3f9b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4476.py
@@ -0,0 +1,144 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc4476
+
+
+class AttributeCertificatePolicyTestCase(unittest.TestCase):
+ pem_text = """\
+MIID7zCCA1gCAQEwgY+gUTBKpEgwRjEjMCEGA1UEAwwaQUNNRSBJbnRlcm1lZGlh
+dGUgRUNEU0EgQ0ExCzAJBgNVBAYTAkZJMRIwEAYDVQQKDAlBQ01FIEx0ZC4CAx7N
+WqE6pDgwNjETMBEGA1UEAwwKQUNNRSBFQ0RTQTELMAkGA1UEBhMCRkkxEjAQBgNV
+BAoMCUFDTUUgTHRkLqBWMFSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkEx
+EDAOBgNVBAcMB0hlcm5kb24xIjAgBgNVBAoMGUJvZ3VzIEF0dHJpYnV0ZSBBdXRo
+b3RpdHkwDQYJKoZIhvcNAQELBQACBAu1MO4wIhgPMjAxOTEyMTUxMjAwMDBaGA8y
+MDE5MTIzMTEyMDAwMFowgfIwPAYIKwYBBQUHCgExMDAuhgt1cm46c2VydmljZaQV
+MBMxETAPBgNVBAMMCHVzZXJuYW1lBAhwYXNzd29yZDAyBggrBgEFBQcKAjEmMCSG
+C3VybjpzZXJ2aWNlpBUwEzERMA8GA1UEAwwIdXNlcm5hbWUwNQYIKwYBBQUHCgMx
+KTAnoBikFjAUMRIwEAYDVQQDDAlBQ01FIEx0ZC4wCwwJQUNNRSBMdGQuMCAGCCsG
+AQUFBwoEMRQwEjAQDAZncm91cDEMBmdyb3VwMjAlBgNVBEgxHjANoQuGCXVybjpy
+b2xlMTANoQuGCXVybjpyb2xlMjCCATkwHwYDVR0jBBgwFoAUgJCMhskAsEBzvklA
+X8yJBOXO500wCQYDVR04BAIFADA8BgNVHTcENTAzoAqGCHVybjp0ZXN0oBaCFEFD
+TUUtTHRkLmV4YW1wbGUuY29toA2GC3Vybjphbm90aGVyMIHMBggrBgEFBQcBDwSB
+vzCBvDCBuQYKKwYBBAGBrGAwCjCBqjBFBggrBgEFBQcCBBY5aHR0cHM6Ly93d3cu
+ZXhhbXBsZS5jb20vYXR0cmlidXRlLWNlcnRpZmljYXRlLXBvbGljeS5odG1sMGEG
+CCsGAQUFBwIFMFUwIwwZQm9ndXMgQXR0cmlidXRlIEF1dGhvcml0eTAGAgEKAgEU
+Gi5URVNUIGF0dHJpYnV0ZSBjZXJ0aWZpY2F0ZSBwb2xpY3kgZGlzcGxheSB0ZXh0
+MA0GCSqGSIb3DQEBCwUAA4GBACygfTs6TkPurZQTLufcE3B1H2707OXKsJlwRpuo
+dR2oJbunSHZ94jcJHs5dfbzFs6vNfVLlBiDBRieX4p+4JcQ2P44bkgyiUTJu7g1b
+6C1liB3vO6yH5hOZicOAaKd+c/myuGb9uFRoaXNfc2lnbmF0dXJlX2lzX2ludmFs
+aWQh
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5755.AttributeCertificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ found_ac_policy_qualifier1 = False
+ found_ac_policy_qualifier2 = False
+ for extn in asn1Object['acinfo']['extensions']:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+ if extn['extnID'] == rfc4476.id_pe_acPolicies:
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ oid = univ.ObjectIdentifier((1, 3, 6, 1, 4, 1, 22112, 48, 10,))
+ self.assertEqual(oid, ev[0]['policyIdentifier'])
+
+ for pq in ev[0]['policyQualifiers']:
+ self.assertIn(
+ pq['policyQualifierId'], rfc5280.policyQualifierInfoMap)
+
+ pqv, rest = der_decoder(
+ pq['qualifier'],
+ asn1Spec=rfc5280.policyQualifierInfoMap[
+ pq['policyQualifierId']])
+
+ self.assertFalse(rest)
+ self.assertTrue(pqv.prettyPrint())
+ self.assertEqual(pq['qualifier'], der_encoder(pqv))
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acps:
+ self.assertIn('example.com', pqv)
+ found_ac_policy_qualifier1 = True
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acunotice:
+ self.assertIn(20, pqv[0]['noticeNumbers'])
+ found_ac_policy_qualifier2 = True
+
+ assert found_ac_policy_qualifier1
+ assert found_ac_policy_qualifier2
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ found_ac_policy_qualifier1 = False
+ found_ac_policy_qualifier2 = False
+ for extn in asn1Object['acinfo']['extensions']:
+ if extn['extnID'] == rfc4476.id_pe_acPolicies:
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ oid = univ.ObjectIdentifier((1, 3, 6, 1, 4, 1, 22112, 48, 10,))
+ self.assertEqual(oid, ev[0]['policyIdentifier'])
+
+ for pq in ev[0]['policyQualifiers']:
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acps:
+ self.assertIn('example.com', pq['qualifier'])
+ found_ac_policy_qualifier1 = True
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acunotice:
+ self.assertIn(20, pq['qualifier'][0]['noticeNumbers'])
+ found_ac_policy_qualifier2 = True
+
+ assert found_ac_policy_qualifier1
+ assert found_ac_policy_qualifier2
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4490.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4490.py
new file mode 100644
index 0000000000..5c3b8cf844
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4490.py
@@ -0,0 +1,274 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4357
+from pyasn1_modules import rfc4490
+
+
+class SignedTestCase(unittest.TestCase):
+ signed_pem_text = """\
+MIIBKAYJKoZIhvcNAQcCoIIBGTCCARUCAQExDDAKBgYqhQMCAgkFADAbBgkqhkiG
+9w0BBwGgDgQMc2FtcGxlIHRleHQKMYHkMIHhAgEBMIGBMG0xHzAdBgNVBAMMFkdv
+c3RSMzQxMC0yMDAxIGV4YW1wbGUxEjAQBgNVBAoMCUNyeXB0b1BybzELMAkGA1UE
+BhMCUlUxKTAnBgkqhkiG9w0BCQEWGkdvc3RSMzQxMC0yMDAxQGV4YW1wbGUuY29t
+AhAr9cYewhG9F8fc1GJmtC4hMAoGBiqFAwICCQUAMAoGBiqFAwICEwUABEDAw0LZ
+P4/+JRERiHe/icPbg0IE1iD5aCqZ9v4wO+T0yPjVtNr74caRZzQfvKZ6DRJ7/RAl
+xlHbjbL0jHF+7XKp
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ encoded_null = der_encoder(univ.Null(""))
+
+ si = sd['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['signatureAlgorithm']['parameters'])
+
+ sig = rfc4490.GostR3410_2001_Signature()
+ sig = si['signature']
+ self.assertEqual(64, len(sig))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ si = asn1Object['content']['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['signatureAlgorithm']['parameters'])
+
+ sig = rfc4490.GostR3410_2001_Signature()
+ sig = si['signature']
+ self.assertEqual(64, len(sig))
+
+class KeyAgreeTestCase(unittest.TestCase):
+ keyagree_pem_text = """\
+MIIBpAYJKoZIhvcNAQcDoIIBlTCCAZECAQIxggFQoYIBTAIBA6BloWMwHAYGKoUD
+AgITMBIGByqFAwICJAAGByqFAwICHgEDQwAEQLNVOfRngZcrpcTZhB8n+4HtCDLm
+mtTyAHi4/4Nk6tIdsHg8ff4DwfQG5DvMFrnF9vYZNxwXuKCqx9GhlLOlNiChCgQI
+L/D20YZLMoowHgYGKoUDAgJgMBQGByqFAwICDQAwCQYHKoUDAgIfATCBszCBsDCB
+gTBtMR8wHQYDVQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlD
+cnlwdG9Qcm8xCzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAt
+MjAwMUBleGFtcGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuIQQqMCgEIBajHOfOTukN
+8ex0aQRoHsefOu24Ox8dSn75pdnLGdXoBAST/YZ+MDgGCSqGSIb3DQEHATAdBgYq
+hQMCAhUwEwQItzXhegc1oh0GByqFAwICHwGADDmxivS/qeJlJbZVyQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'],
+ asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4490.id_GostR3410_2001_CryptoPro_ESDH, alg2['algorithm'])
+ param2, rest = der_decoder(
+ alg2['parameters'], asn1Spec=rfc4357.AlgorithmIdentifier())
+ self.assertFalse(rest)
+ self.assertTrue(param2.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+
+ self.assertEqual(rfc4490.id_Gost28147_89_None_KeyWrap, param2['algorithm'])
+ kwa_p, rest = der_decoder(
+ param2['parameters'], asn1Spec=rfc4490.Gost28147_89_KeyWrapParameters())
+ self.assertFalse(rest)
+ self.assertTrue(kwa_p.prettyPrint())
+ self.assertEqual(param2['parameters'], der_encoder(kwa_p))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, kwa_p['encryptionParamSet'])
+
+ alg3 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg3['algorithm'])
+ param3, rest = der_decoder(alg3['parameters'], asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param3.prettyPrint())
+ self.assertEqual(alg3['parameters'], der_encoder(param3))
+ self.assertEqual(8, len(param3['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param3['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ rfc4490.id_GostR3410_2001_CryptoPro_ESDH: rfc5280.AlgorithmIdentifier(),
+ }
+
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4490.id_GostR3410_2001_CryptoPro_ESDH, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(rfc4490.id_Gost28147_89_None_KeyWrap, param2['algorithm'])
+ kwa_p = param2['parameters']
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, kwa_p['encryptionParamSet'])
+
+ alg3 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg3['algorithm'])
+ param3 = alg3['parameters']
+ self.assertEqual(8, len(param3['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param3['encryptionParamSet'])
+
+class KeyTransportTestCase(unittest.TestCase):
+ keytrans_pem_text = """\
+MIIBpwYJKoZIhvcNAQcDoIIBmDCCAZQCAQAxggFTMIIBTwIBADCBgTBtMR8wHQYD
+VQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlDcnlwdG9Qcm8x
+CzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAtMjAwMUBleGFt
+cGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuITAcBgYqhQMCAhMwEgYHKoUDAgIkAAYH
+KoUDAgIeAQSBpzCBpDAoBCBqL6ghBpVon5/kR6qey2EVK35BYLxdjfv1PSgbGJr5
+dQQENm2Yt6B4BgcqhQMCAh8BoGMwHAYGKoUDAgITMBIGByqFAwICJAAGByqFAwIC
+HgEDQwAEQE0rLzOQ5tyj3VUqzd/g7/sx93N+Tv+/eImKK8PNMZQESw5gSJYf28dd
+Em/askCKd7W96vLsNMsjn5uL3Z4SwPYECJeV4ywrrSsMMDgGCSqGSIb3DQEHATAd
+BgYqhQMCAhUwEwQIvBCLHwv/NCkGByqFAwICHwGADKqOch3uT7Mu4w+hNw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'], asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2, rest = der_decoder(
+ alg2['parameters'], asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param2.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4491.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4491.py
new file mode 100644
index 0000000000..24b94a97ec
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4491.py
@@ -0,0 +1,156 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4491
+from pyasn1_modules import rfc4357
+
+
+class GostR341094CertificateTestCase(unittest.TestCase):
+ gostR3410_94_cert_pem_text = """\
+MIICCzCCAboCECMO42BGlSTOxwvklBgufuswCAYGKoUDAgIEMGkxHTAbBgNVBAMM
+FEdvc3RSMzQxMC05NCBleGFtcGxlMRIwEAYDVQQKDAlDcnlwdG9Qcm8xCzAJBgNV
+BAYTAlJVMScwJQYJKoZIhvcNAQkBFhhHb3N0UjM0MTAtOTRAZXhhbXBsZS5jb20w
+HhcNMDUwODE2MTIzMjUwWhcNMTUwODE2MTIzMjUwWjBpMR0wGwYDVQQDDBRHb3N0
+UjM0MTAtOTQgZXhhbXBsZTESMBAGA1UECgwJQ3J5cHRvUHJvMQswCQYDVQQGEwJS
+VTEnMCUGCSqGSIb3DQEJARYYR29zdFIzNDEwLTk0QGV4YW1wbGUuY29tMIGlMBwG
+BiqFAwICFDASBgcqhQMCAiACBgcqhQMCAh4BA4GEAASBgLuEZuF5nls02CyAfxOo
+GWZxV/6MVCUhR28wCyd3RpjG+0dVvrey85NsObVCNyaE4g0QiiQOHwxCTSs7ESuo
+v2Y5MlyUi8Go/htjEvYJJYfMdRv05YmKCYJo01x3pg+2kBATjeM+fJyR1qwNCCw+
+eMG1wra3Gqgqi0WBkzIydvp7MAgGBiqFAwICBANBABHHCH4S3ALxAiMpR3aPRyqB
+g1DjB8zy5DEjiULIc+HeIveF81W9lOxGkZxnrFjXBSqnjLeFKgF1hffXOAP7zUM=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.gostR3410_94_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_94, spki_a['algorithm'])
+
+ pk_p, rest = der_decoder(
+ spki_a['parameters'],
+ asn1Spec=rfc4491.GostR3410_94_PublicKeyParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(pk_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(pk_p))
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, pk_p['digestParamSet'])
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ rfc4491.id_GostR3410_94: rfc4491.GostR3410_94_PublicKeyParameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.gostR3410_94_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypesMap, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_94, spki_a['algorithm'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, spki_a['parameters']['digestParamSet'])
+
+class GostR34102001CertificateTestCase(unittest.TestCase):
+ gostR3410_2001_cert_pem_text = """\
+MIIB0DCCAX8CECv1xh7CEb0Xx9zUYma0LiEwCAYGKoUDAgIDMG0xHzAdBgNVBAMM
+Fkdvc3RSMzQxMC0yMDAxIGV4YW1wbGUxEjAQBgNVBAoMCUNyeXB0b1BybzELMAkG
+A1UEBhMCUlUxKTAnBgkqhkiG9w0BCQEWGkdvc3RSMzQxMC0yMDAxQGV4YW1wbGUu
+Y29tMB4XDTA1MDgxNjE0MTgyMFoXDTE1MDgxNjE0MTgyMFowbTEfMB0GA1UEAwwW
+R29zdFIzNDEwLTIwMDEgZXhhbXBsZTESMBAGA1UECgwJQ3J5cHRvUHJvMQswCQYD
+VQQGEwJSVTEpMCcGCSqGSIb3DQEJARYaR29zdFIzNDEwLTIwMDFAZXhhbXBsZS5j
+b20wYzAcBgYqhQMCAhMwEgYHKoUDAgIkAAYHKoUDAgIeAQNDAARAhJVodWACGkB1
+CM0TjDGJLP3lBQN6Q1z0bSsP508yfleP68wWuZWIA9CafIWuD+SN6qa7flbHy7Df
+D2a8yuoaYDAIBgYqhQMCAgMDQQA8L8kJRLcnqeyn1en7U23Sw6pkfEQu3u0xFkVP
+vFQ/3cHeF26NG+xxtZPz3TaTVXdoiYkXYiD02rEx1bUcM97i
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.gostR3410_2001_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_2001, spki_a['algorithm'])
+
+ pk_p, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc4491.GostR3410_2001_PublicKeyParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(pk_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(pk_p))
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, pk_p['digestParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4491.id_GostR3410_2001: rfc4491.GostR3410_2001_PublicKeyParameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.gostR3410_2001_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_2001, spki_a['algorithm'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, spki_a['parameters']['digestParamSet'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4683.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4683.py
new file mode 100644
index 0000000000..7935ad8f6d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4683.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4683
+
+
+class SIMCertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIDOzCCAsCgAwIBAgIJAKWzVCgbsG5KMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMjExMjIzODUwWhcNMjAxMjEwMjIzODUwWjBOMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxDjAMBgNVBAMTBUhlbnJ5MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEZj80
+YyLeDb0arJY8ZxBUMMxPEMT9+5WFVBCC1dPpUn25MmEpb82Dz1inv3xmG6sFKIHj
+achlvkNGDXTUzZ1DdCF0O7gU5Z+YctwczGQVSt/2Ox0NWTiHLDpbpyoTyK0Bo4IB
+dzCCAXMwHQYDVR0OBBYEFOjxtcL2ucMoTjS5MNKKpdKzXtz/MG8GA1UdIwRoMGaA
+FPI12zQE2qVV8r1pA5mwYuziFQjBoUOkQTA/MQswCQYDVQQGEwJVUzELMAkGA1UE
+CAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBggkA6JHW
+BpFPzvIwDwYDVR0TAQH/BAUwAwEB/zALBgNVHQ8EBAMCAYYwQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjB/BgNVHREEeDB2oGEGCCsGAQUFBwgGoFUwUzANBglghkgBZQMEAgEF
+AAQgnrmI6yL2lM5kmfLVn28A8PVIVgE2S7HEFtfLExhg7HsEIOaAn/Pq8hb4qn/K
+imN3uyZrjAv3Uspg0VYEcetJdHSCgRFoZW5yeUBleGFtcGxlLmNvbTAKBggqhkjO
+PQQDAwNpADBmAjEAiWhD493OGnqfdit6SRdBjn3N6HVaMxyVO0Lfosjf9+9FDWad
+rYt3o64YQqGz9NTMAjEAmahE0EMiu/TyzRDidlG2SxmY2aHg9hQO0t38i1jInJyi
+9LjB81zHEL6noTgBZsan
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_PEPSI = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ gn_on = gn['otherName']
+ if gn_on['type-id'] == rfc4683.id_on_SIM:
+ self.assertIn(
+ gn_on['type-id'], rfc5280.anotherNameMap)
+
+ spec = rfc5280.anotherNameMap[gn_on['type-id']]
+
+ on, rest = der_decoder(
+ gn_on['value'], asn1Spec=spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(on.prettyPrint())
+ self.assertEqual(gn_on['value'], der_encoder(on))
+
+ self.assertEqual(
+ 'e6809ff3ea', on['pEPSI'].prettyPrint()[2:12])
+
+ found_PEPSI = True
+
+ self.assertTrue(found_PEPSI)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_PEPSI = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ pepsi = gn['otherName']['value']['pEPSI']
+ self.assertEqual(
+ 'e6809ff3ea', pepsi.prettyPrint()[2:12])
+
+ found_PEPSI = True
+
+ self.assertTrue(found_PEPSI)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc4985.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc4985.py
new file mode 100644
index 0000000000..b261ef92af
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc4985.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4985
+
+
+class XMPPCertificateTestCase(unittest.TestCase):
+ xmpp_server_cert_pem_text = """\
+MIIC6DCCAm+gAwIBAgIJAKWzVCgbsG5DMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDI0MjMxNjA0WhcNMjAxMDIzMjMxNjA0WjBNMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xHzAdBgNVBAoTFkV4
+YW1wbGUgUHJvZHVjdHMsIEluYy4wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQZzQlk
+03nJRPF6+w1NxFELmQ5vJTjTRz3eu03CRtahK4Wnwd4GwbDe8NVHAEG2qTzBXFDu
+p6RZugsBdf9GcEZHG42rThYYOzIYzVFnI7tQgA+nTWSWZN6eoU/EXcknhgijggEn
+MIIBIzAdBgNVHQ4EFgQUkQpUMYcbUesEn5buI03POFnktJgwHwYDVR0jBBgwFoAU
+8jXbNATapVXyvWkDmbBi7OIVCMEwCwYDVR0PBAQDAgeAMIGPBgNVHREEgYcwgYSg
+KQYIKwYBBQUHCAegHRYbX3htcHAtY2xpZW50LmltLmV4YW1wbGUuY29toCkGCCsG
+AQUFBwgHoB0WG194bXBwLXNlcnZlci5pbS5leGFtcGxlLmNvbaAcBggrBgEFBQcI
+BaAQDA5pbS5leGFtcGxlLmNvbYIOaW0uZXhhbXBsZS5jb20wQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjAKBggqhkjOPQQDAwNnADBkAjAEo4mhDGC6/R39HyNgzLseNAp36qBH
+yQJ/AWsBojN0av8akeVv9IuM45yqLKdiCzcCMDCjh1lFnCvurahwp5D1j9pAZMsg
+nOzhcMpnHs2U/eN0lHl/JNgnbftl6Dvnt59xdA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ gn_on = gn['otherName']
+ if gn_on['type-id'] == rfc4985.id_on_dnsSRV:
+ self.assertIn(gn_on['type-id'], rfc5280.anotherNameMap)
+
+ spec = rfc5280.anotherNameMap[gn['otherName']['type-id']]
+ on, rest = der_decoder(gn_on['value'], asn1Spec=spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(on.prettyPrint())
+ self.assertEqual(gn_on['value'], der_encoder(on))
+ self.assertIn('im.example.com', on)
+
+ count += 1
+
+ self.assertEqual(2, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ if gn['otherName']['type-id'] == rfc4985.id_on_dnsSRV:
+ self.assertIn('im.example.com', gn['otherName']['value'])
+ count += 1
+
+ self.assertEqual(2, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5035.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5035.py
new file mode 100644
index 0000000000..196a6e4618
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5035.py
@@ -0,0 +1,192 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5035
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIFzAYJKoZIhvcNAQcCoIIFvTCCBbkCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggLQ
+MIICzAIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKgggH1MBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8X
+DTE5MDUyOTE4MjMxOVowJQYLKoZIhvcNAQkQAgcxFgQUAbWZQYhLO5wtUgsOCGtT
+4V3aNhUwLwYLKoZIhvcNAQkQAgQxIDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZI
+hvcNAQcBMDUGCyqGSIb3DQEJEAICMSYxJAIBAQYKKwYBBAGBrGABARMTQm9hZ3Vz
+IFByaXZhY3kgTWFyazA/BgkqhkiG9w0BCQQxMgQwtuQipP2CZx7U96rGbUT06LC5
+jVFYccZW5/CaNvpcrOPiChDm2vI3m4k300z5mSZsME0GCyqGSIb3DQEJEAIBMT4w
+PAQgx08hD2QnVwj1DoeRELNtdZ0PffW4BQIvcwwVc/goU6OAAQEwFTATgRFhbGlj
+ZUBleGFtcGxlLmNvbTCBmwYLKoZIhvcNAQkQAi8xgYswgYgwdjB0BCACcp04gyM2
+dTDg+0ydCwlucr6Mg8Wd3J3c9V+iLHsnZzBQMEOkQTA/MQswCQYDVQQGEwJVUzEL
+MAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENB
+AgkApbNUKBuwbjswDjAMBgorBgEEAYGsYAEBMAoGCCqGSM49BAMDBGcwZQIxAO3K
+D9YjFTKE3p383VVw/ol79WTVoMea4H1+7xn+3E1XO4oyb7qwQz0KmsGfdqWptgIw
+T9yMtRLN5ZDU14y+Phzq9NKpSw/x5KyXoUKjCMc3Ru6dIW+CgcRQees+dhnvuD5U
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder (substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+
+class SignedReceiptTestCase(unittest.TestCase):
+ signed_receipt_pem_text = """\
+MIIE3gYJKoZIhvcNAQcCoIIEzzCCBMsCAQMxDTALBglghkgBZQMEAgEwga4GCyqGSIb3DQEJ
+EAEBoIGeBIGbMIGYAgEBBgkqhkiG9w0BBwEEIMdPIQ9kJ1cI9Q6HkRCzbXWdD331uAUCL3MM
+FXP4KFOjBGYwZAIwOLV5WCbYjy5HLHE69IqXQQHVDJQzmo18WwkFrEYH3EMsvpXEIGqsFTFN
+6NV4VBe9AjA5fGOCP5IhI32YqmGfs+zDlqZyb2xSX6Gr/IfCIm0angfOI39g7lAZDyivjh5H
+/oSgggJ3MIICczCCAfqgAwIBAgIJAKWzVCgbsG48MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0Ew
+HhcNMTkwNTI5MTkyMDEzWhcNMjAwNTI4MTkyMDEzWjBsMQswCQYDVQQGEwJVUzELMAkGA1UE
+CBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1wbGUxDDAKBgNVBAMTA0Jv
+YjEeMBwGCSqGSIb3DQEJARYPYm9iQGV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACID
+YgAEMaRiVS8WvN8Ycmpfq75jBbOMUukNfXAg6AL0JJBXtIFAuIJcZVlkLn/xbywkcMLHK/O+
+w9RWUQa2Cjw+h8b/1Cl+gIpqLtE558bD5PfM2aYpJ/YE6yZ9nBfTQs7z1TH5o4GUMIGRMAsG
+A1UdDwQEAwIHgDBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUg
+dHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1UdDgQWBBTKa2Zy3iybV3+YjuLDKtNmjsIa
+pTAfBgNVHSMEGDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNnADBkAjAV
+boS6OfEYQomLDi2RUkd71hzwwiQZztbxNbosahIzjR8ZQaHhjdjJlrP/T6aXBwsCMDfRweYz
+3Ce4E4wPfoqQnvqpM7ZlfhstjQQGOsWAtIIfqW/l+TgCO8ux3XLV6fj36zGCAYkwggGFAgEB
+MEwwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYD
+VQQKDAhCb2d1cyBDQQIJAKWzVCgbsG48MAsGCWCGSAFlAwQCAaCBrjAaBgkqhkiG9w0BCQMx
+DQYLKoZIhvcNAQkQAQEwHAYJKoZIhvcNAQkFMQ8XDTE5MDUyOTE5MzU1NVowLwYJKoZIhvcN
+AQkEMSIEIGb9Hm2kCnM0CYNpZU4Uj7dN0AzOieIn9sDqZMcIcZrEMEEGCyqGSIb3DQEJEAIF
+MTIEMBZzeHVja7fQ62ywyh8rtKzBP1WJooMdZ+8c6pRqfIESYIU5bQnH99OPA51QCwdOdjAK
+BggqhkjOPQQDAgRoMGYCMQDZiT22xgab6RFMAPvN4fhWwzx017EzttD4VaYrpbolropBdPJ6
+jIXiZQgCwxbGTCwCMQClaQ9K+L5LTeuW50ZKSIbmBZQ5dxjtnK3OlS7hYRi6U0JKZmWbbuS8
+vFIgX7eIkd8=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+ self.assertEqual(
+ rfc5035.id_ct_receipt, sd['encapContentInfo']['eContentType'])
+
+ receipt, rest = der_decoder(
+ sd['encapContentInfo']['eContent'], asn1Spec=rfc5035.Receipt())
+
+ self.assertFalse(rest)
+ self.assertTrue(receipt.prettyPrint())
+ self.assertEqual(
+ sd['encapContentInfo']['eContent'], der_encoder(receipt))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd = asn1Object['content']
+
+ self.assertEqual(
+ rfc5652.CMSVersion().subtype(value='v3'), sd['version'])
+ self.assertIn(
+ sd['encapContentInfo']['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(
+ rfc5035.id_ct_receipt, sd['encapContentInfo']['eContentType'])
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+ if sa['attrType'] == rfc5035.id_aa_msgSigDigest:
+ self.assertIn(
+ '0x167378', sa['attrValues'][0].prettyPrint()[:10])
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ receipt, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd['encapContentInfo']['eContentType']])
+
+ self.assertEqual(1, receipt['version'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5083.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5083.py
new file mode 100644
index 0000000000..e2eb17274a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5083.py
@@ -0,0 +1,95 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2018, 2019 Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5035
+
+
+class AuthEnvelopedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIICdQIBADGCAiekggIjBgsqhkiG9w0BCRANATCCAhICAQAEE3B0Zi1rbWM6MTM2MTQxMjIx
+MTIwDQYLKoZIhvcNAQkQAzAwCwYJYIZIAWUDBAEtMIIBsDCCAawCAQKAFJ7rZ8m5WnTUTS8W
+OWaA6AG1y6ScMA0GCSqGSIb3DQEBAQUABIIBgHfnHNqDbyyql2NqX6UQggelWMTjwzJJ1L2e
+rbsj1bIAGmpIsUijw+fX8VOS7v1C9ui2Md9NFgCfkmKLo8T/jELqrk7MpMu09G5zDgeXzJfQ
+DFc115wbrWAUU3XP7XIb6TNOc3xtq4UxA5V6jNUK2XyWKpjzOtM7gm0VWIJGVVlYu+u32LQc
+CjRFb87kvOY/WEnjxQpCW8g+4V747Ud97dYpMub7TLJiRNZkdHnq8xEGKlXjVHSgc10lhphe
+1kFGeCpfJEsqjtN7YsVzf65ri9Z+3FJ1IO4cnMDbzGhyRXkS7a0k58/miJbSj88PvzKNSURw
+pu4YHMQQX/mjT2ey1SY4ihPMuxxgTdCa04L0UxaRr7xAucz3n2UWShelm3IIjnWRlYdXypnX
+vKvwCLoeh5mJwUl1JNFPCQkQ487cKRyobUyNgXQKT4ZDHCgXciwsX5nTsom87Ixp5vqSDJ+D
+hXA0r/Caiu1vnY5X9GLHSkqgXkgqgUuu0LfcsQERD8psfQQogbiuZDqJmYt1Iau/pkuGfmee
+qeiM3aeQ4NZf9AFZUVWBGArPNHrvVDA3BgkqhkiG9w0BBwEwGwYJYIZIAWUDBAEuMA4EDMr+
+ur76ztut3sr4iIANmvLRbyFUf87+2bPvLQQMoOWSXMGE4BckY8RM
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5083.AuthEnvelopedData()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class AuthEnvelopedDataOpenTypesTestCase(unittest.TestCase):
+ pem_text = """\
+MIICvQYLKoZIhvcNAQkQARegggKsMIICqAIBADGCAiekggIjBgsqhkiG9w0BCRAN
+ATCCAhICAQAEE3B0Zi1rbWM6MTM2MTQxMjIxMTIwDQYLKoZIhvcNAQkQAzAwCwYJ
+YIZIAWUDBAEtMIIBsDCCAawCAQKAFJ7rZ8m5WnTUTS8WOWaA6AG1y6ScMA0GCSqG
+SIb3DQEBAQUABIIBgHfnHNqDbyyql2NqX6UQggelWMTjwzJJ1L2erbsj1bIAGmpI
+sUijw+fX8VOS7v1C9ui2Md9NFgCfkmKLo8T/jELqrk7MpMu09G5zDgeXzJfQDFc1
+15wbrWAUU3XP7XIb6TNOc3xtq4UxA5V6jNUK2XyWKpjzOtM7gm0VWIJGVVlYu+u3
+2LQcCjRFb87kvOY/WEnjxQpCW8g+4V747Ud97dYpMub7TLJiRNZkdHnq8xEGKlXj
+VHSgc10lhphe1kFGeCpfJEsqjtN7YsVzf65ri9Z+3FJ1IO4cnMDbzGhyRXkS7a0k
+58/miJbSj88PvzKNSURwpu4YHMQQX/mjT2ey1SY4ihPMuxxgTdCa04L0UxaRr7xA
+ucz3n2UWShelm3IIjnWRlYdXypnXvKvwCLoeh5mJwUl1JNFPCQkQ487cKRyobUyN
+gXQKT4ZDHCgXciwsX5nTsom87Ixp5vqSDJ+DhXA0r/Caiu1vnY5X9GLHSkqgXkgq
+gUuu0LfcsQERD8psfQQogbiuZDqJmYt1Iau/pkuGfmeeqeiM3aeQ4NZf9AFZUVWB
+GArPNHrvVDA3BgkqhkiG9w0BBwEwGwYJYIZIAWUDBAEuMA4EDMr+ur76ztut3sr4
+iIANmvLRbyFUf87+2bPvLQQMoOWSXMGE4BckY8RMojEwLwYLKoZIhvcNAQkQAgQx
+IDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZIhvcNAQcB
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ authenv = asn1Object['content']
+
+ self.assertEqual(0, authenv['version'])
+
+ for attr in authenv['unauthAttrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc5035.id_aa_contentHint:
+ self.assertIn(
+ 'Watson', attr['attrValues'][0]['contentDescription'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5084.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5084.py
new file mode 100644
index 0000000000..c8ad0c29cb
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5084.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2018, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5084
+from pyasn1_modules import rfc5652
+
+
+class CCMParametersTestCase(unittest.TestCase):
+ ccm_pem_text = "MBEEDE2HVyIurFKUEX8MEgIBBA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5084.CCMParameters()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ccm_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class GCMParametersTestCase(unittest.TestCase):
+ gcm_pem_text = "MBEEDE2HVyIurFKUEX8MEgIBEA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5084.GCMParameters()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.gcm_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class GCMOpenTypesTestCase(unittest.TestCase):
+ rfc8591_pem_pext = """\
+MIIHkAYLKoZIhvcNAQkQARegggd/MIIHewIBADGCAk8wggJLAgEAMDMwJjEUMBIGA1UECgwL
+ZXhhbXBsZS5jb20xDjAMBgNVBAMMBUFsaWNlAgkAg/ULtwvVxA4wDQYJKoZIhvcNAQEBBQAE
+ggIAdZphtN3x8a8kZoAFY15HYRD6JyPBueRUhLbTPoOH3pZ9xeDK+zVXGlahl1y1UOe+McEx
+2oD7cxAkhFuruNZMrCYEBCTZMwVhyEOZlBXdZEs8rZUHL3FFE5PJnygsSIO9DMxd1UuTFGTg
+Cm5V5ZLFGmjeEGJRbsfTyo52S7iseJqIN3dl743DbApu0+yuUoXKxqKdUFlEVxmhvc+Qbg/z
+fiwu8PTsYiUQDMBi4cdIlju8iLjj389xQHNyndXHWD51is89GG8vpBe+IsN8mnbGtCcpqtJ/
+c65ErJhHTR7rSJSMEqQD0LPOCKIY1q9FaSSJfMXJZk9t/rPxgUEVjfw7hAkKpgOAqoZRN+Fp
+nFyBl0FnnXo8kLp55tfVyNibtUpmdCPkOwt9b3jAtKtnvDQ2YqY1/llfEUnFOVDKwuC6MYwi
+fm92qNlAQA/T0+ocjs6gA9zOLx+wD1zqM13hMD/L+T2OHL/WgvGb62JLrNHXuPWA8RShO4kI
+lPtARKXap2S3+MX/kpSUUrNa65Y5uK1jwFFclczG+CPCIBBn6iJiQT/vOX1I97YUP4Qq6OGk
+jK064Bq6o8+e5+NmIOBcygYRv6wA7vGkmPLSWbnw99qD728bBh84fC3EjItdusqGIwjzL0eS
+UWXJ5eu0Z3mYhJGN1pe0R/TEB5ibiJsMLpWAr3gwggUPBgkqhkiG9w0BBwEwHgYJYIZIAWUD
+BAEGMBEEDE2HVyIurFKUEX8MEgIBEICCBOD+L7PeC/BpmMOb9KlS+r+LD+49fi6FGBrs8aie
+Gi7ezZQEiFYS38aYQzTYYCt3SbJQTkX1fDsGZiaw/HRiNh7sJnxWATm+XNKGoq+Wls9RhSJ4
+5Sw4GMqwpoxZjeT84UozOITk3l3fV+3XiGcCejHkp8DAKZFExd5rrjlpnnAOBX6w8NrXO4s2
+n0LrMhtBU4eB2YKhGgs5Q6wQyXtU7rc7OOwTGvxWEONzSHJ01pyvqVQZAohsZPaWLULrM/kE
+GkrhG4jcaVjVPfULi7Uqo14imYhdCq5Ba4bwqI0Ot6mB27KD6LlOnVC/YmXCNIoYoWmqy1o3
+pSm9ovnLEO/dzxQjEJXYeWRje9M/sTxotM/5oZBpYMHqIwHTJbehXFgp8+oDjyTfayMYA3fT
+cTH3XbGPQfnYW2U9+ka/JhcSYybM8cuDNFd1I1LIQXoJRITXtkvPUbJqm+s6DtS5yvG9I8aQ
+xlT365zphS4vbQaO74ujO8bE3dynrvTTV0c318TcHpN3DY9PIt6mHXMIPDLEA4wes90zg6ia
+h5XiQcLtfLaAdYwEEGlImGD8n0kOhSNgclSLMklpj5mVOs8exli3qoXlVMRJcBptSwOe0QPc
+RY30spywS4zt1UDIQ0jaecGGVtUYj586nkubhAxwZkuQKWxgt6yYTpGNSKCdvd+ygfyGJRDb
+Wdn6nck/EPnG1773KTHRhMrXrBPBpSlfyJ/ju3644CCFqCjFoTh4bmB63k9ejUEVkJIJuoeK
+eTBaUxbCIinkK4htBkgchHP51RJp4q9jQbziD3aOhg13hO1GFQ4E/1DNIJxbEnURNp/ga8Sq
+mnLY8f5Pzwhm1mSzZf+obowbQ+epISrswWyjUKKO+uJfrAVN2TS/5+X6T3U6pBWWjH6+xDng
+rAJwtIdKBo0iSEwJ2eir4X8TcrSy9l8RSOiTPtqS5dF3RWSWOzkcO72fHCf/42+DLgUVX8Oe
+5mUvp7QYiXXsXGezLJ8hPIrGuOEypafDv3TwFkBc2MIB0QUhk+GG1ENY3jiNcyEbovF5Lzz+
+ubvechHSb1arBuEczJzN4riM2Dc3c+r8N/2Ft6eivK7HUuYX1uAcArhunZpA8yBGLF1m+DUX
+FtzWAUvfMKYPdfwGMckghF7YwLrTXd8ZhPIkHNO1KdwQKIRfgIlUPfTxRB7eNrG/Ma9a/Iwr
+cI1QtkXU59uIZIw+7+FHZRWPsOjTu1Pdy+JtcSTG4dmS+DIwqpUzdu6MaBCVaOhXHwybvaSP
+TfMG/nR/NxF1FI8xgydnzXZs8HtFDL9iytKnvXHx+IIz8Rahp/PK8S80vPQNIeef/JgnIhto
+sID/A614LW1tB4cWdveYlD5U8T/XXInAtCY78Q9WJD+ecu87OJmlOdmjrFvitpQAo8+NGWxc
+7Wl7LtgDuYel7oXFCVtI2npbA7R+K5/kzUvDCY6GTgzn1Gfamc1/Op6Ue17qd/emvhbIx+ng
+3swf8TJVnCNDIXucKVA4boXSlCEhCGzfoZZYGVvm1/hrypiBtpUIKWTxLnz4AQJdZ5LGiCQJ
+QU1wMyHsg6vWmNaJVhGHE6D/EnKsvJptFIkAx0wWkh35s48p7EbU8QBg//5eNru6yvLRutfd
+BX7T4w681pCD+dOiom75C3UdahrfoFkNsZ2hB88+qNsEEPb/xuGu8ZzSPZhakhl2NS0=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.rfc8591_pem_pext)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5083.AuthEnvelopedData(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ cea = aed['authEncryptedContentInfo']['contentEncryptionAlgorithm']
+
+ self.assertEqual(rfc5084.id_aes128_GCM, cea['algorithm'])
+ self.assertEqual(16, cea['parameters']['aes-ICVlen'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5126.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5126.py
new file mode 100644
index 0000000000..e43af9a33a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5126.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5126
+
+
+class SignedAttributesTestCase(unittest.TestCase):
+ pem_text = """\
+MYIBUzAYBgkqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMCsGCSqGSIb3DQEJNDEeMBww
+DQYJYIZIAWUDBAIBBQChCwYJKoZIhvcNAQELMC8GCSqGSIb3DQEJBDEiBCCyqtCC
+Gosj/GT4YPPAqKheze4A1QBU5O3tniTsVPGr7jBBBgsqhkiG9w0BCRACETEyMDCg
+BBMCVVOhBBMCVkGiIjAgExExMjMgU29tZXBsYWNlIFdheRMLSGVybmRvbiwgVkEw
+RgYLKoZIhvcNAQkQAi8xNzA1MDMwMTANBglghkgBZQMEAgEFAAQgJPmqUmGQnQ4q
+RkVtUHecJXIkozOzX8+pZQj/UD5JcnQwTgYLKoZIhvcNAQkQAg8xPzA9BgorBgEE
+AYGsYDAUMC8wCwYJYIZIAWUDBAIBBCDWjjVmAeXgZBkE/rG8Pf8pTCs4Ikowc8Vm
+l+AOeKdFgg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.SignedAttributes()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_spid_oid = False
+
+ for attr in asn1Object:
+ if attr['attrType'] in rfc5652.cmsAttributesMap.keys():
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc5126.id_aa_ets_sigPolicyId:
+ spid_oid = rfc5126.SigPolicyId('1.3.6.1.4.1.22112.48.20')
+
+ self.assertEqual(
+ spid_oid, av['signaturePolicyId']['sigPolicyId'])
+
+ found_spid_oid = True
+
+ self.assertTrue(found_spid_oid)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ attr_type_list = []
+ spid_oid = rfc5126.SigPolicyId('1.3.6.1.4.1.22112.48.20')
+
+ for attr in asn1Object:
+ if attr['attrType'] == rfc5126.id_aa_ets_sigPolicyId:
+ spid = attr['attrValues'][0]['signaturePolicyId']
+ self.assertEqual(spid_oid, spid['sigPolicyId'])
+ attr_type_list.append(rfc5126.id_aa_ets_sigPolicyId)
+
+ if attr['attrType'] == rfc5126.id_aa_ets_signerLocation:
+ cn = attr['attrValues'][0]['countryName']
+ self.assertEqual('US', cn['printableString'])
+ attr_type_list.append(rfc5126.id_aa_ets_signerLocation)
+
+ if attr['attrType'] == rfc5126.id_aa_signingCertificateV2:
+ ha = attr['attrValues'][0]['certs'][0]['hashAlgorithm']
+ self.assertEqual(rfc4055.id_sha256, ha['algorithm'])
+ attr_type_list.append(rfc5126.id_aa_signingCertificateV2)
+
+ self.assertIn(rfc5126.id_aa_ets_sigPolicyId, attr_type_list)
+ self.assertIn(rfc5126.id_aa_ets_signerLocation, attr_type_list)
+ self.assertIn(rfc5126.id_aa_signingCertificateV2, attr_type_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5208.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5208.py
new file mode 100644
index 0000000000..4bb684fd24
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5208.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5208
+
+
+class PrivateKeyInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVgIBADANBgkqhkiG9w0BAQEFAASCAUAwggE8AgEAAkEAx8CO8E0MNgEKXXDf
+I1xqBmQ+Gp3Srkqp45OApIu4lZ97n5VJ5HljU9wXcPIfx29Le3w8hCPEkugpLsdV
+GWx+EQIDAQABAkEAiv3f+DGEh6ddsPszKQXK+LuTwy2CRajKYgJnBxf5zpG50XK4
+899An+x/pGYVmVED1f0JCbk3BUbv7HViLq0qgQIhAOYlQJaQ8KJBijDpjF62lcVr
+QrqFPM4+ZrHsw0dVY2CZAiEA3jE5ngkVPfjFWEr7wS50EJhGiYlQeY4l+hADGIhd
+XDkCIQDIHt5xzmif/nOGop5/gS7ssp8ch1zfTh2IW4NWlOZMCQIgLZmYo5BlpaRK
+jAZHiKwJ8eXuhAeEVo4PyTREDmLeFjECIQCfyUPDclPo2O8ycPpozwoGwvKFrNZJ
+VWRpRKqYnOAIXQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5208.PrivateKeyInfo()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class EncryptedPrivateKeyInfoInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBgTAbBgkqhkiG9w0BBQMwDgQIdtFgDWnipT8CAggABIIBYN0hkm2xqkTCt8dJ
+iZS8+HNiyHxy8g+rmWSXv/i+bTHFUReZA2GINtTRUkWpXqWcSHxNslgf7QdfgbVJ
+xQiUM+lLhwOFh85iAHR3xmPU1wfN9NvY9DiLSpM0DMhF3OvAMZD75zIhA0GSKu7w
+dUu7ey7H4fv7bez6RhEyLdKw9/Lf2KNStNOs4ow9CAtCoxeoMSniTt6CNhbvCkve
+9vNHKiGavX1tS/YTog4wiiGzh2YxuW1RiQpTdhWiKyECgD8qQVg2tY5t3QRcXrzi
+OkStpkiAPAbiwS/gyHpsqiLo0al63SCxRefugbn1ucZyc5Ya59e3xNFQXCNhYl+Z
+Hl3hIl3cssdWZkJ455Z/bBE29ks1HtsL+bTfFi+kw/4yuMzoaB8C7rXScpGNI/8E
+pvTU2+wtuoOFcttJregtR94ZHu5wgdYqRydmFNG8PnvZT1mRMmQgUe/vp88FMmsZ
+dLsZjNQ=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5208.EncryptedPrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5275.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5275.py
new file mode 100644
index 0000000000..30bce8f314
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5275.py
@@ -0,0 +1,190 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5275
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+
+
+class GLUseKEKTestCase(unittest.TestCase):
+ pem_text = """\
+MIIMVAYJKoZIhvcNAQcCoIIMRTCCDEECAQMxDTALBglghkgBZQMEAgIwggY7Bggr
+BgEFBQcMAqCCBi0EggYpMIIGJTCCBhswggYXAgEBBgsqhkiG9w0BCRAIATGCBgMw
+ggX/MEaGLGh0dHBzOi8vd3d3LmV4YW1wbGUuY29tL2xpc3QtaW5mby9ncm91cC1s
+aXN0gRZncm91cC1saXN0QGV4YW1wbGUuY29tMIIFmzCCBZekQTA/MQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAPBgNVBAoTCEJv
+Z3VzIENBgRxncm91cC1saXN0LW93bmVyQGV4YW1wbGUuY29tMIIFMqCCBS4wggTU
+oAMCAQICFCVehe2QOuzvkY+pMECid/MyYVKJMAsGCWCGSAFlAwQDAjA/MQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAPBgNVBAoT
+CEJvZ3VzIENBMB4XDTE5MTAyMDE5MzE1MloXDTIxMTAxOTE5MzE1MlowPzELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMREwDwYDVQQK
+EwhCb2d1cyBDQTCCA0cwggI5BgcqhkjOOAQBMIICLAKCAQEAt9x/0iwGww3k19h+
+wbODVK1yqjFzEY2pyfXthHcn+nEw+DpURJ+iOhYPr68E3XO5sB48r5xTZhPN5+Ye
+jD3T8qhnDtiq4qrrSH7BOaEzqCDpHE2Bpoy3SodQ5Obaiu9Kx1ixBRk/oRZUH+F+
+ATZmF0rPKrZGZOnmsh0IZm3dlmRR9FRGn0aJlZKXveqp+hZ97/r0cbSo6wdT47AP
+focgweZMvgWu1IQBs6FiunRgaeX3RyLr4fnkvCzUM7TmxpRJYtL6myAp007QvtgQ
+0AdEwVfNl3jQ0IIW7TtpXVxDDQaKZZe9yYrY4GV3etlYk8a4cpjNrBxBCCTMASE4
++iVtPQIhAJGPJRq8r3GSP6cV7V8EmlxC9ne+xkhiAjBmWtcDibXRAoIBACDebX29
+ZzVOUeaR6ovCC8c3RR93LDlrFa1zyogkZnUx7OHIvIPhFTRUUJKhwkIJ7aTaRLY/
+a9ARFllhyf+cJi6KzAKM4JufAqjN9pNncVzUo4K1vgQRy6T+2Hlc2FYJgknsdqzK
+bzO49qqHlMtywdenT+VBSI5Xa5UecC3nTcAdjW/g+GVncbQJFkx6dp9TQrLtrrOG
+uoW9aC1J2j683RL3FL8om5NpaxiA4C3ivYgrW7C5a68DkvVCt2PykPMwuR2XIdTU
+mCPOTSs1ANNtSRlf0ICL/EpQZnKyNZ86fUUcLW8nWxs/2dNelZFqKfX/rJq0HZHE
+tO9ZjICr0iwv/w8DggEGAAKCAQEAttFBDPuFMmcpY8ryoq+ES4JBYSHJNF+zBCFo
+NF/ZrCayL3HBn+BNGy5WVHFWUF/JfdNzCGdZ0/vcMT2KdS9xMsOGmK8luDyarj6z
+u4rDuQaeAmLcBsTgK+JjgNS+nxIz0pgoWyKsKwnB3ipYibgdOl6HpavVLSdC1i3U
+TV6/jpVOgWoxrYjOOOSi6Ov9y4kzsvI33H1cfUwzNd8pcV4MBcEq5rliEouo4W46
+k3Ry0RnoDejnVxzog3/6RLOyRmv/+uhLpx0n6Cl+hyPtJ+GbAv5ttle8P0ofUnYM
+gi+oVquYc7wBCjWpaL8wvIjDF4oEh264a0ZpcqrLL/mKNJeOaqOBvDCBuTAdBgNV
+HQ4EFgQUzUhlAYOypgdbBv4jgQzEc+TRtTgwegYDVR0jBHMwcYAUzUhlAYOypgdb
+Bv4jgQzEc+TRtTihQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4G
+A1UEBxMHSGVybmRvbjERMA8GA1UEChMIQm9ndXMgQ0GCFCVehe2QOuzvkY+pMECi
+d/MyYVKJMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgGGMAsGCWCGSAFlAwQD
+AgNHADBEAiBry0TcN3QY3vbI214hdSdpfP4CnLQNxRK5XEP+wQbcHQIgTGF1BXLj
+OW3eUkwUeymnG+paj+qrW+ems2ANjq3bbQkCAQIwE4AB/4IBH6QLBglghkgBZQME
+AS0wADAAMACgggSYMIICAjCCAYigAwIBAgIJAOiR1gaRT87yMAoGCCqGSM49BAMD
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjER
+MA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNTE0MDg1ODExWhcNMjEwNTEzMDg1ODEx
+WjA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24x
+ETAPBgNVBAoMCEJvZ3VzIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8FF2VLHo
+jmqlnawpQwjG6fWBQDPOy05hYq8oKcyg1PXH6kgoO8wQyKYVwsDHEvc1Vg6ErQm3
+LzdI8OQpYx3H386R2F/dT/PEmUSdcOIWsB4zrFsbzNwJGIGeZ33ZS+xGo1AwTjAd
+BgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwHwYDVR0jBBgwFoAU8jXbNATa
+pVXyvWkDmbBi7OIVCMEwDAYDVR0TBAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjBa
+UY2Nv03KolLNRJ2wSoNK8xlvzIWTFgIhsBWpD1SpJxRRv22kkoaw9bBtmyctW+YC
+MQC3/KmjNtSFDDh1I+lbOufkFDSQpsMzcNAlwEAERQGgg6iXX+NhA+bFqNC7FyF4
+WWQwggKOMIICFaADAgECAgkApbNUKBuwbkswCgYIKoZIzj0EAwMwPzELMAkGA1UE
+BhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhC
+b2d1cyBDQTAeFw0xOTEyMjAyMDQ1MjZaFw0yMDEyMTkyMDQ1MjZaMIGGMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoT
+B0V4YW1wbGUxGTAXBgNVBAMTEEdyb3VwIExpc3QgT3duZXIxKzApBgkqhkiG9w0B
+CQEWHGdyb3VwLWxpc3Qtb3duZXJAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUr
+gQQAIgNiAASzrdo0dy4su1viboFbwU8NjgURE5GxAxYIHUPOWsdR1lnMR2v8vnjy
+zd80HkNlInHRAoZuXgzceCpbqhcBHtFLPWCqxL55duG9+CwlL9uIl4ovrFH6ZMtD
+oZFLtDJvMhOjgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlz
+IGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4w
+HQYDVR0OBBYEFK/WP1p7EM56lkxxIBAohNZWvwkjMB8GA1UdIwQYMBaAFPI12zQE
+2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMF2eLAXNa+8ve16CF31Y
++/DDErehb5V3G5DGWZ5CGPcNcuevDeOIXcTuKqXineR3EAIwIkR+5d9UvSsAfFPk
+OItcoI8so2BH4Da0wkUU+o7nQ9yRtZvE0syujxIzgEzv9JUZMYIBUDCCAUwCAQEw
+TDA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24x
+ETAPBgNVBAoMCEJvZ3VzIENBAgkApbNUKBuwbkswCwYJYIZIAWUDBAICoHgwFwYJ
+KoZIhvcNAQkDMQoGCCsGAQUFBwwCMBwGCSqGSIb3DQEJBTEPFw0xOTEyMjIxNjA5
+MTRaMD8GCSqGSIb3DQEJBDEyBDADTid4Yy+UzDasyRb9j2bsz/pPHjAtNZV3oa+E
+RQ/auLffZXl8h43ecu6ERv4t+AswCgYIKoZIzj0EAwMEZjBkAjAt5JqjM4WJ9Yd5
+RnziEbhlnVoo7ADPYl8hRnxrfYG+jiNsqbAMrjqqPFiG7yOPtNwCMEcQJZT1SBud
+KS1zJZvX/ury+ySGvKDLkfnqwZARR9W7TkTdx0L9W9oVjyEgOeGkvA==
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ found_gl_use_kek = False
+ for ctrl in asn1Object['controlSequence']:
+ if ctrl['attrType'] == rfc5275.id_skd_glUseKEK:
+ cv, rest = der_decoder(
+ ctrl['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[ctrl['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(cv.prettyPrint())
+ self.assertEqual(ctrl['attrValues'][0], der_encoder(cv))
+
+ self.assertIn(
+ 'example.com',
+ cv['glInfo']['glAddress']['rfc822Name'])
+
+ self.assertIn(
+ 'example.com',
+ cv['glOwnerInfo'][0]['glOwnerAddress']['rfc822Name'])
+
+ self.assertEqual(31, cv['glKeyAttributes']['duration'])
+ found_gl_use_kek = True
+
+ self.assertTrue(found_gl_use_kek)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sd = asn1Object['content']
+ self.assertEqual(
+ rfc6402.id_cct_PKIData, sd['encapContentInfo']['eContentType'])
+
+ pkid, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc6402.PKIData(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(pkid.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(pkid))
+
+ found_gl_use_kek = False
+ for ctrl in pkid['controlSequence']:
+ if ctrl['attrType'] == rfc5275.id_skd_glUseKEK:
+ cv = ctrl['attrValues'][0]
+
+ self.assertIn(
+ 'example.com',
+ cv['glInfo']['glAddress']['rfc822Name'])
+
+ self.assertIn(
+ 'example.com',
+ cv['glOwnerInfo'][0]['glOwnerAddress']['rfc822Name'])
+
+ self.assertEqual(31, cv['glKeyAttributes']['duration'])
+ found_gl_use_kek = True
+
+ self.assertTrue(found_gl_use_kek)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5280.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5280.py
new file mode 100644
index 0000000000..ea9e5337ce
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5280.py
@@ -0,0 +1,253 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class CertificateListTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.CertificateList()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class CertificateOpenTypeTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ openTypesMap = {
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sig_alg = asn1Object['tbsCertificate']['signature']
+
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ spki_alg = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ for rdn in asn1Object['tbsCertificate']['subject']['rdnSequence']:
+ for atv in rdn:
+ if atv['type'] == rfc5280.id_emailAddress:
+ self.assertIn("valicert.com", atv['value'])
+ else:
+ atv_ps = str(atv['value']['printableString'])
+ self.assertIn("valicert", atv_ps.lower())
+
+
+class CertificateListOpenTypeTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.CertificateList()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ openTypesMap = {
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sig_alg = asn1Object['tbsCertList']['signature']
+
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ for rdn in asn1Object['tbsCertList']['issuer']['rdnSequence']:
+ for atv in rdn:
+ if atv['type'] == rfc5280.id_emailAddress:
+ self.assertIn("snmplabs.com", atv['value'])
+
+ elif atv['type'] == rfc5280.id_at_countryName:
+ self.assertEqual('AU', atv['value'])
+
+ else:
+ self.assertLess(9, len(atv['value']['printableString']))
+
+ crl_extn_count = 0
+
+ for extn in asn1Object['tbsCertList']['crlExtensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ crl_extn_count += 1
+
+ self.assertEqual(1, crl_extn_count)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cert_extn_count = 0
+
+ for extn in asn1Object['tbsCertList']['crlExtensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ cert_extn_count += 1
+
+ self.assertEqual(1, cert_extn_count)
+
+
+class ORAddressOpenTypeTestCase(unittest.TestCase):
+ oraddress_pem_text = """\
+MEMwK2EEEwJHQmIKEwhHT0xEIDQwMKIHEwVVSy5BQ4MHU2FsZm9yZKYFEwNSLUQx
+FDASgAEBoQ0TC1N0ZXZlIEtpbGxl
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.ORAddress()
+
+ def testDecodeOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oraddress_pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ea0 = asn1Object['extension-attributes'][0]
+
+ self.assertEqual(rfc5280.common_name, ea0['extension-attribute-type'])
+ self.assertEqual("Steve Kille", ea0['extension-attribute-value'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5480.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5480.py
new file mode 100644
index 0000000000..72ca51adfd
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5480.py
@@ -0,0 +1,81 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+
+
+class ECCertTestCase(unittest.TestCase):
+ digicert_ec_cert_pem_text = """\
+MIIDrDCCApSgAwIBAgIQCssoukZe5TkIdnRw883GEjANBgkqhkiG9w0BAQwFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0xMzAzMDgxMjAwMDBaFw0yMzAzMDgxMjAwMDBaMEwxCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxJjAkBgNVBAMTHURpZ2lDZXJ0IEVDQyBT
+ZWN1cmUgU2VydmVyIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4ghC6nfYJN6g
+LGSkE85AnCNyqQIKDjc/ITa4jVMU9tWRlUvzlgKNcR7E2Munn17voOZ/WpIRllNv
+68DLP679Wz9HJOeaBy6Wvqgvu1cYr3GkvXg6HuhbPGtkESvMNCuMo4IBITCCAR0w
+EgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwNAYIKwYBBQUHAQEE
+KDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wQgYDVR0f
+BDswOTA3oDWgM4YxaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0R2xv
+YmFsUm9vdENBLmNybDA9BgNVHSAENjA0MDIGBFUdIAAwKjAoBggrBgEFBQcCARYc
+aHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAdBgNVHQ4EFgQUo53mH/naOU/A
+buiRy5Wl2jHiCp8wHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJ
+KoZIhvcNAQEMBQADggEBAMeKoENL7HTJxavVHzA1Nm6YVntIrAVjrnuaVyRXzG/6
+3qttnMe2uuzO58pzZNvfBDcKAEmzP58mrZGMIOgfiA4q+2Y3yDDo0sIkp0VILeoB
+UEoxlBPfjV/aKrtJPGHzecicZpIalir0ezZYoyxBEHQa0+1IttK7igZFcTMQMHp6
+mCHdJLnsnLWSB62DxsRq+HfmNb4TDydkskO/g+l3VtsIh5RHFPVfKK+jaEyDj2D3
+loB5hWp2Jp2VDCADjT7ueihlZGak2YPqmXTNbk19HOuNssWvFhtOyPNV6og4ETQd
+Ea8/B6hPatJ0ES8q/HO3X8IVQwVs1n3aAr0im0/T+Xc=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.digicert_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ algid = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, algid['algorithm'])
+
+ param, rest = der_decoder(algid['parameters'], asn1Spec=rfc5480.ECParameters())
+
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(rfc5480.secp384r1, param['namedCurve'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.digicert_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_alg = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, spki_alg['algorithm'])
+ self.assertEqual(rfc5480.secp384r1, spki_alg['parameters']['namedCurve'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5636.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5636.py
new file mode 100644
index 0000000000..8f5d90ee7f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5636.py
@@ -0,0 +1,118 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5636
+
+
+class TraceableAnonymousCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIGOgYJKoZIhvcNAQcCoIIGKzCCBicCAQMxDTALBglghkgBZQMEAgEwRQYKKoMajJpECgEB
+AaA3BDUwMwQgTgtiLdByNcZGP/PPE1I2lvxDA/6bajEE4VAWF13N9E4YDzIwMTkxMjMxMTIw
+MDAwWqCCBB0wggQZMIIDAaADAgECAhQLxXbZnuC+8r+RhlN0rgUga/of6TANBgkqhkiG9w0B
+AQsFADA/MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAP
+BgNVBAoTCEJvZ3VzIENBMB4XDTE5MTIxNTE4MTA0OFoXDTIwMTIxNDE4MTA0OFowTjELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMRAwDgYDVQQKDAdFeGFt
+cGxlMQ4wDAYDVQQDDAVBbGljZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALt2
+dWnBBb4MnwcHij1I2h+oNy7zGhG7Wd4GhtonVjn5XhyLhZLTjGAbPHqFBOb9fwElS4TfpTtG
+d7K9INUIgM0a6wZI3j3qCqDphQBW6sPVksip9Elan1hR8Upd4iutaWKKNxCpNO5gQiMM0Nay
+PTIp1ZcLByLxbHPBx/ZuJ/eg2OuBbkyTph0syWTUsiCbqXnraXP9pZUq0XL8Gu1tlvMZJm1J
+7NjE0CyDPQR8G9SS7IdCjhCcesP6E6OD0ang46Chx1S78fGB/UhSyQcFP3pznz0XS7pVAObU
+iMshwMzmUlcoErU7cf4V1t8ukjAsjVbx2QPPB6y64TN4//AYDdkCAwEAAaOB/TCB+jAdBgNV
+HQ4EFgQUVDw+01Pdj1UbXOmY7KLo9P0gau0wegYDVR0jBHMwcYAUbyHWHCqlZ40B9ilNhfDx
+VWD6nKehQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRv
+bjERMA8GA1UEChMIQm9ndXMgQ0GCFGR4rdxyWiX71uMC1s8lhGG24Gu7MAwGA1UdEwEB/wQC
+MAAwCwYDVR0PBAQDAgXgMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5v
+dCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wDQYJKoZIhvcNAQELBQADggEBAHO8u2ne
+bxI2OhSj1SaSgQXe4SN+BEWbortXetALwbDXs2+XO5CF88Nmf/CyZxKLWGNOGwlLBoaUDI1/
+rAf+Na244Om8JdKhAj3OimXX5KvebQgS/SYRf8XVM0zLmkp4DKgrMw5aXpMke8QrrouOt7EG
+rpKcVXCqG2gOiUomKYDCgIC0H95TWbYnJ1BLJIOqSvtBe+5GpWMyJUs6sZOvWJoXQ9U5MHJQ
+BczpA85TlMUPMojOC1OGUJty13h3GFX66K3GwpeMFBLsYfIT4N90EPioZYTs8srYMVl0//pK
+9XeuT4/zs47k1js8vuzILD9g5dD5hkw2dI/2utucjXpM9aExggGpMIIBpQIBA4AUVDw+01Pd
+j1UbXOmY7KLo9P0gau0wCwYJYIZIAWUDBAIBoGowGQYJKoZIhvcNAQkDMQwGCiqDGoyaRAoB
+AQEwHAYJKoZIhvcNAQkFMQ8XDTE5MTIxNjE1NTEyMlowLwYJKoZIhvcNAQkEMSIEIJumtIa6
+3jeKcCTvxY+Pf3O8U6jko6J0atleMxdZWNAHMA0GCSqGSIb3DQEBAQUABIIBAJHxEz3qLxDz
+UaMxBt1wW/2tMx5AGKlxhBIE2Am/iIpdpkk0nMNt+R6GduAz9yE+lS7V+lZafZq7WKUPpAIR
+YYD1apaxWAigHYQCLQg08MSlhzkCjzKiVXtsfAYHYLWutvqPY8WRX7x85If333/v7kVBPZvS
+su/MkZ4V9USpocRq/BFYo7VbitBYFHqra+vzhRiYD1pS6EfhFwZoAv/Ud59FUACU8ixw2IuO
+Efe1LUIWVmbJ3HKtk8JTrWTg9iLVp+keqOWJfSEEUZXnyNIMt/SCONtZT+6SJQqwQV0C8AcR
+9sxMfZum5/eKypTZ9liGP4jz6nxtD3hEyfEXf7BOfds=
+"""
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5636.id_kisa_tac_token: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5636.id_kisa_tac_token: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual('2019', asn1Object['timeout'][:4])
+ self.assertEqual('5dcdf44e', asn1Object['userKey'].prettyPrint()[-8:])
+
+ def testOpenTypes(self):
+ asn1Spec=rfc5652.ContentInfo()
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = asn1Object['content']['encapContentInfo']['eContent']
+ oid = asn1Object['content']['encapContentInfo']['eContentType']
+ self.assertIn(oid, rfc5652.cmsContentTypesMap)
+
+ tac_token, rest = der_decoder(
+ substrate,
+ asn1Spec=rfc5652.cmsContentTypesMap[oid],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(tac_token.prettyPrint())
+ self.assertEqual(substrate, der_encoder(tac_token))
+
+ self.assertEqual('2019', tac_token['timeout'][:4])
+ self.assertEqual('5dcdf44e', tac_token['userKey'].prettyPrint()[-8:])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5639.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5639.py
new file mode 100644
index 0000000000..628b902c7d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5639.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5639
+
+
+class ECCertTestCase(unittest.TestCase):
+ brainpool_ec_cert_pem_text = """\
+MIIB0jCCAXmgAwIBAgITPUXQAyl3ZE5iAHYGZYSp1FkqzTAKBggqhkjOPQQDAjA/
+MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAP
+BgNVBAoMCEJvZ3VzIENBMB4XDTE5MTIwOTIxNDM0NFoXDTIxMTIwODIxNDM0NFow
+PzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREw
+DwYDVQQKDAhCb2d1cyBDQTBaMBQGByqGSM49AgEGCSskAwMCCAEBBwNCAASBvvOk
+WNZlGAf5O3V94qgC3IUUR/6uxFxT6To0ULFmrVVndXiVP6DE5h5QHGXPwKfO+4Yt
+n0OVnGHp68dPS37Go1MwUTAdBgNVHQ4EFgQUiRFFVcdn6Fp9+sEP1GVRtwl9XgIw
+HwYDVR0jBBgwFoAUiRFFVcdn6Fp9+sEP1GVRtwl9XgIwDwYDVR0TAQH/BAUwAwEB
+/zAKBggqhkjOPQQDAgNHADBEAiB3d+P64Dh5YzwyM++uOL6zHUeLbNpW2sF1eJsm
+l3M5uQIgGxpbAXOt/o1xtyhEGLNUBE7ObgQpm7tHMMQGUHo4wV8=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.brainpool_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki = asn1Object['tbsCertificate']['subjectPublicKeyInfo']
+ algid = spki['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, algid['algorithm'])
+
+ param, rest = der_decoder(
+ algid['parameters'], asn1Spec=rfc5480.ECParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(algid['parameters'], der_encoder(param))
+
+ self.assertEqual(rfc5639.brainpoolP256r1, param['namedCurve'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.brainpool_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki = asn1Object['tbsCertificate']['subjectPublicKeyInfo']
+ algid = spki['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, algid['algorithm'])
+ self.assertEqual(
+ rfc5639.brainpoolP256r1, algid['parameters']['namedCurve'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5649.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5649.py
new file mode 100644
index 0000000000..c2fa9d1db5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5649.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5649
+
+
+class AESKeyWrapTestCase(unittest.TestCase):
+ kw_alg_id_pem_text = "MAsGCWCGSAFlAwQBLQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5649.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kw_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc5649.id_aes256_wrap, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class AESKeyWrapWithPadTestCase(unittest.TestCase):
+ kw_pad_alg_id_pem_text = "MAsGCWCGSAFlAwQBMA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5649.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kw_pad_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc5649.id_aes256_wrap_pad, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5652.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5652.py
new file mode 100644
index 0000000000..7055b5201f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5652.py
@@ -0,0 +1,169 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+
+
+class ContentInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEJQYJKoZIhvcNAQcCoIIEFjCCBBICAQMxCzAJBgUrDgMCGgUAMIIDAgYIKwYBBQUHDAKgggL0
+BIIC8DCCAuwweDB2AgECBgorBgEEAYI3CgoBMWUwYwIBADADAgEBMVkwVwYJKwYBBAGCNxUUMUow
+SAIBBQwZcGl0dWNoYTEuZW1lYS5ocHFjb3JwLm5ldAwMRU1FQVxwaXR1Y2hhDBpDTUNSZXFHZW5l
+cmF0b3IudnNob3N0LmV4ZTCCAmqgggJmAgEBMIICXzCCAcgCAQAwADCBnzANBgkqhkiG9w0BAQEF
+AAOBjQAwgYkCgYEA0jm7SSSm2wyEAzuNKtFZFJKo91SrJq9wQwEhEKHDavZwMQOm1rZ2PF8NWCEb
+PqrhToQ7rtiGLSZa4dF4bzgmBqQ9aoSfEX4jISt31Vy+skHidXjHHpbsjT24NPhrZgANivL7CxD6
+Ft+s7qS1gL4HRm2twQkqSwOLrE/q2QeXl2UCAwEAAaCCAR0wGgYKKwYBBAGCNw0CAzEMFgo2LjIu
+OTIwMC4yMD4GCSqGSIb3DQEJDjExMC8wHQYDVR0OBBYEFMW2skn88gxhONWZQA4sWGBDb68yMA4G
+A1UdDwEB/wQEAwIHgDBXBgkrBgEEAYI3FRQxSjBIAgEFDBlwaXR1Y2hhMS5lbWVhLmhwcWNvcnAu
+bmV0DAxFTUVBXHBpdHVjaGEMGkNNQ1JlcUdlbmVyYXRvci52c2hvc3QuZXhlMGYGCisGAQQBgjcN
+AgIxWDBWAgECHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIDAQAwDQYJKoZIhvcNAQEFBQADgYEAJZlu
+mxjtCxSOQi27jsVdd3y8NSIlzNv0b3LqmzvAly6L+CstXcnuG2MPQqPH9R7tbJonGUniBQO9sQ7C
+KhYWj2gfhiEkSID82lV5chINVUFKoUlSiEhWr0tPGgvOaqdsKQcrHfzrsBbFkhDqrFSVy7Yivbnh
+qYszKrOjJKiiCPMwADAAMYH5MIH2AgEDgBTFtrJJ/PIMYTjVmUAOLFhgQ2+vMjAJBgUrDgMCGgUA
+oD4wFwYJKoZIhvcNAQkDMQoGCCsGAQUFBwwCMCMGCSqGSIb3DQEJBDEWBBTFTkK/OifaFjwqHiJu
+xM7qXcg/VzANBgkqhkiG9w0BAQEFAASBgKfC6jOi1Wgy4xxDCQVK9+e5tktL8wE/j2cb9JSqq+aU
+5UxEgXEw7q7BoYZCAzcxMRriGzakXr8aXHcgkRJ7XcFvLPUjpmGg9SOZ2sGW4zQdWAwImN/i8loc
+xicQmJP+VoMHo/ZpjFY9fYCjNZUArgKsEwK/s+p9yrVVeB1Nf8Mn
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = {
+ rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
+ rfc5652.id_signedData: rfc5652.SignedData(),
+ rfc6402.id_cct_PKIData: rfc6402.PKIData()
+ }
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=layers[next_layer]
+ )
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ def testOpenTypes(self):
+ class ClientInformation(univ.Sequence):
+ pass
+
+ ClientInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('clientId', univ.Integer()),
+ namedtype.NamedType('MachineName', char.UTF8String()),
+ namedtype.NamedType('UserName', char.UTF8String()),
+ namedtype.NamedType('ProcessName', char.UTF8String())
+ )
+
+ class EnrollmentCSP(univ.Sequence):
+ pass
+
+ EnrollmentCSP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('KeySpec', univ.Integer()),
+ namedtype.NamedType('Name', char.BMPString()),
+ namedtype.NamedType('Signature', univ.BitString())
+ )
+
+ openTypeMap = {
+ # attributes
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'): char.IA5String(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.2'): EnrollmentCSP(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.21.20'): ClientInformation(),
+ # algorithm identifier parameters
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ openTypeMap.update(rfc5652.cmsAttributesMap)
+ openTypeMap.update(rfc6402.cmcControlAttributesMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+
+ self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc6402.id_cct_PKIData, eci['eContentType'])
+
+ pkid, rest = der_decoder.decode(eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
+ openTypes=openTypeMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(pkid.prettyPrint())
+ self.assertEqual(eci['eContent'], der_encoder.encode(pkid))
+
+ for req in pkid['reqSequence']:
+ cr = req['tcr']['certificationRequest']
+
+ sig_alg = cr['signatureAlgorithm']
+
+ self.assertIn(sig_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ cri = cr['certificationRequestInfo']
+ spki_alg = cri['subjectPublicKeyInfo']['algorithm']
+
+ self.assertIn( spki_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ attrs = cr['certificationRequestInfo']['attributes']
+
+ for attr in attrs:
+ self.assertIn(attr['attrType'], openTypeMap)
+
+ if attr['attrType'] == univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'):
+ self.assertEqual("6.2.9200.2", attr['attrValues'][0])
+
+ else:
+ self.assertTrue(attr['attrValues'][0].hasValue())
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5697.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5697.py
new file mode 100644
index 0000000000..1aa0e2b4c5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5697.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5697
+
+
+class OtherCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIGUTCCBfegAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCswCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMjExMTczMzQ0WhcNMjAxMjEwMTczMzQ0
+WjBNMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDTALBgNVBAMTBEdhaWwwggNHMIICOgYHKoZIzjgE
+ATCCAi0CggEBAMj5CIXkPmfEDm3rrTqf/sIPh5XNWTT+U/+W74HbEXfi0NdafvNc
+WowncDznn4BZuotmuahJKBLFL0WCE28SAcJlhoOZ+gy6CMBV3LbupTEhPcWdc+qC
+wj1kL6WQwBfuzMlfKqXbGcO+CAP59iirw/LGcgmjLk/BpNAQ5oPtmD88DKAm4Ysz
+l3+n0F8ZhLhw33NEcEVNcVr+Q+ZZP/4ezAizvOK46QA5KnlXBQoC+MgTqxk+zhjw
+JRE5UnQDv8FbUF3GrehLDN0q+Pt76+jl+ikOnMzeXi+tz8d49LCogxh7oq6N2Ptt
+o9ksMkExNRJhW6JeVQ4PggOR4CI8BwYt7T0CIQD5VsG4AQIeMIDGmu8ek+FEKp8l
+utd6GBzrQwfDkgiGpQKCAQEAo2c3ze980XHSjTnsFAcDXb71KrQV5FadnRAzWxWO
+MrDDCVUq6JqaRKWAMRmk72Tl3V1c6IC3Y3mjorYH0HEi3EbYq5KxGXRaoK8NJAFh
+YKhHk5VAVyCvM1J9NNdlDyl0uYrxLLSwt+S7yrEL4qCijAzQ270h0cnBiYG06e5l
+XVola9Wec4KqFfqnDQGiDIYZSWvGqMGKbrMzkJMmYN/8ls54l3ATvSEt5ijeDJzk
+MkyMaTV77g/R9n43JqvyOdkizZCRKovvL+m+wRdilFcIMDXwSG1Pw9kmCa/NenjF
+5swCfyF3P2TsO3QsppM7KWfLglj9j7sPM4MTiOfc+wPKqwOCAQUAAoIBACcxpFMg
+T2EEPRojEYDwIY4t9u6eP2scBrkrc3JJ6osTXHfkeluR9OvME620Hm01+EivnETI
+W5o+hCAdoic2h93kjx137QLAAL9ECoYgzm32SB796Nn630XVnd44gP1G3KbPZ8eD
+uC1GsSuxkmDR9PH0Tbx6XdnbTKW4ycHpKrrDLLeryZsghQfv4O63oaXgaJHwdQD3
+BwTZcUexZGstI7hFEdZrc7HWF3kmZdHjxuXYL/DP2T7akHyLc6ktepastZ6cGTZr
+GUJ52sgM50Swb2CtrJuGDvtnEcZjtEb+rJgFIWHDs3lelLT72GWX+Xs7jeJaSjx5
++NK1qahR8hguww6jggHQMIIBzDAdBgNVHQ4EFgQU34Ol7JNqPoDCG/WE8toUQUiS
+tUQwegYDVR0jBHMwcYAUzUhlAYOypgdbBv4jgQzEc+TRtTihQ6RBMD8xCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjERMA8GA1UEChMI
+Qm9ndXMgQ0GCFCVehe2QOuzvkY+pMECid/MyYVKJMA8GA1UdEwEB/wQFMAMBAf8w
+CwYDVR0PBAQDAgGGMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNh
+bm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wUwYDVR0RBEwwSqA2Bggr
+BgEFBQcIA6AqMCgMGzgyNjIwOC00MTcwMjgtNTQ4MTk1LTIxNTIzMwYJKwYBBAGB
+rGAwgRBnYWlsQGV4YW1wbGUuY29tMHgGCCsGAQUFBwETBGwwajBoBBT9+d0Ci+/R
+j5toRA+A7p+ECmGaWDBQMEOkQTA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkEx
+EDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBAgkApbNUKBuwbkcw
+CwYJYIZIAWUDBAMCA0cAMEQCIAyAog0z/KyROhb8Fl3Hyjcia/POnMq4yhPZFwlI
+hn1cAiAIfnI1FVrosL/94ZKfGW+xydYaelsPL+WBgqGvKuTMEg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ other_cert_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5697.id_pe_otherCerts:
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5697.OtherCertificates())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ self.assertEqual(
+ 11939979568329289287,
+ extnValue[0]['issuerSerial']['serialNumber'])
+
+ other_cert_found = True
+
+ self.assertTrue(other_cert_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ other_cert_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5697.id_pe_otherCerts:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ self.assertEqual(
+ 11939979568329289287,
+ extnValue[0]['issuerSerial']['serialNumber'])
+
+ other_cert_found = True
+
+ self.assertTrue(other_cert_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5751.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5751.py
new file mode 100644
index 0000000000..7ce4373956
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5751.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ pem_text = """\
+MIIGigYJKoZIhvcNAQcCoIIGezCCBncCAQExCTAHBgUrDgMCGjArBgkqhkiG9w0B
+BwGgHgQcVGhpcyBpcyBzb21lIHNhbXBsZSBjb250ZW50LqCCAuAwggLcMIICm6AD
+AgECAgIAyDAJBgcqhkjOOAQDMBIxEDAOBgNVBAMTB0NhcmxEU1MwHhcNOTkwODE3
+MDExMDQ5WhcNMzkxMjMxMjM1OTU5WjATMREwDwYDVQQDEwhBbGljZURTUzCCAbYw
+ggErBgcqhkjOOAQBMIIBHgKBgQCBjc3tg+oKnjk+wkgoo+RHk90O16gO7FPFq4QI
+T/+U4XNIfgzW80RI0f6fr6ShiS/h2TDINt4/m7+3TNxfaYrkddA3DJEIlZvep175
+/PSfL91DqItU8T+wBwhHTV2Iw8O1s+NVCHXVOXYQxHi9/52whJc38uRRG7XkCZZc
+835b2wIVAOJHphpFZrgTxtqPuDchK2KL95PNAoGAJjjQFIkyqjn7Pm3ZS1lqTHYj
+OQQCNVzyyxowwx5QXd2bWeLNqgU9WMB7oja4bgevfYpCJaf0dc9KCF5LPpD4beqc
+ySGKO3YU6c4uXaMHzSOFuC8wAXxtSYkRiTZEvfjIlUpTVrXi+XPsGmE2HxF/wr3t
+0VD/mHTC0YFKYDm6NjkDgYQAAoGAXOO5WnUUlgupet3jP6nsrF7cvbcTETSmFoko
+ESPZNIZndXUTEj1DW2/lUb/6ifKiGz4kfT0HjVtjyLtFpaBK44XWzgaAP+gjfhry
+JKtTGrgnDR7vCL9mFIBcYqxl+hWL8bs01NKWN/ZhR7LEMoTwfkFA/UanY04z8qXi
+9PKD5bijgYEwfzAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIGwDAfBgNVHSME
+GDAWgBRwRD6CLm+H3krTdeM9ILxDK5PxHzAdBgNVHQ4EFgQUvmyhs+PB9+1DcKTO
+EwHi/eOX/s0wHwYDVR0RBBgwFoEUQWxpY2VEU1NAZXhhbXBsZS5jb20wCQYHKoZI
+zjgEAwMwADAtAhRVDKQZH0IriXEiM42DarU9Z2u/RQIVAJ9hU1JUC1yy3drndh3i
+EFJbQ169MYIDVDCCA1ACAQEwGDASMRAwDgYDVQQDEwdDYXJsRFNTAgIAyDAHBgUr
+DgMCGqCCAuowGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAjBgkqhkiG9w0BCQQx
+FgQUQGrsCFJ5um4WAi2eBinAIpaH3UgwOAYDKqszMTEEL1RoaXMgaXMgYSB0ZXN0
+IEdlbmVyYWwgQVNOIEF0dHJpYnV0ZSwgbnVtYmVyIDEuMD4GCyqGSIb3DQEJEAIE
+MS8wLQwgQ29udGVudCBIaW50cyBEZXNjcmlwdGlvbiBCdWZmZXIGCSqGSIb3DQEH
+ATBKBgkqhkiG9w0BCQ8xPTA7MAcGBSoDBAUGMDAGBioDBAUGTQQmU21pbWUgQ2Fw
+YWJpbGl0aWVzIHBhcmFtZXRlcnMgYnVmZmVyIDIwbwYLKoZIhvcNAQkQAgoxYDBe
+BgUqAwQFBgQrQ29udGVudCBSZWZlcmVuY2UgQ29udGVudCBJZGVudGlmaWVyIEJ1
+ZmZlcgQoQ29udGVudCBSZWZlcmVuY2UgU2lnbmF0dXJlIFZhbHVlIEJ1ZmZlcjBz
+BgsqhkiG9w0BCRACCzFkoGIwWjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDVVTIEdv
+dmVybm1lbnQxETAPBgNVBAsTCFZEQSBTaXRlMQwwCgYDVQQLEwNWREExEjAQBgNV
+BAMTCURhaXN5IFJTQQIEClVEMzCB/AYLKoZIhvcNAQkQAgMxgewwgekwgeYEBzU3
+MzgyOTkYDzE5OTkwMzExMTA0NDMzWqGByTCBxqRhMF8xCzAJBgNVBAYTAlVTMRYw
+FAYDVQQKEw1VUyBHb3Zlcm5tZW50MREwDwYDVQQLEwhWREEgU2l0ZTEMMAoGA1UE
+CxMDVkRBMRcwFQYDVQQDEw5CdWdzIEJ1bm55IERTQaRhMF8xCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1VUyBHb3Zlcm5tZW50MREwDwYDVQQLEwhWREEgU2l0ZTEMMAoG
+A1UECxMDVkRBMRcwFQYDVQQDEw5FbG1lciBGdWRkIERTQTAJBgcqhkjOOAQDBC8w
+LQIVALwzN2XE93BcF0kTqkyFyrtSkUhZAhRjlqIUi89X3rBIX2xk3YQESV8cyg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ smimeCapMap = {
+ univ.ObjectIdentifier('1.2.3.4.5.6.77'): univ.OctetString(),
+ }
+ smimeCapMap.update(rfc5751.smimeCapabilityMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder (substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+ self.assertEqual(1, asn1Object['content']['version'])
+
+ for si in asn1Object['content']['signerInfos']:
+ self.assertEqual(1, si['version'])
+
+ for attr in si['signedAttrs']:
+
+ if attr['attrType'] == rfc5751.smimeCapabilities:
+ for scap in attr['attrValues'][0]:
+ if scap['capabilityID'] in smimeCapMap.keys():
+ scap_p, rest = der_decoder(scap['parameters'],
+ asn1Spec=smimeCapMap[scap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertEqual(scap['parameters'], der_encoder(scap_p))
+ self.assertIn('parameters', scap_p.prettyPrint())
+
+ if attr['attrType'] == rfc5751.id_aa_encrypKeyPref:
+ ekp_issuer_serial = attr['attrValues'][0]['issuerAndSerialNumber']
+
+ self.assertEqual(173360179, ekp_issuer_serial['serialNumber'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5752.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5752.py
new file mode 100644
index 0000000000..76776323d3
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5752.py
@@ -0,0 +1,207 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5752
+
+
+class MultipleSignaturesTestCase(unittest.TestCase):
+ pem_text = """\
+MIIKawYJKoZIhvcNAQcCoIIKXDCCClgCAQExGjALBglghkgBZQMEAgEwCwYJYIZI
+AWUDBAICMFEGCSqGSIb3DQEHAaBEBEJDb250ZW50LVR5cGU6IHRleHQvcGxhaW4N
+Cg0KV2F0c29uLCBjb21lIGhlcmUgLSBJIHdhbnQgdG8gc2VlIHlvdS6gggYmMIIC
+eDCCAf6gAwIBAgIJAKWzVCgbsG47MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYTAlVT
+MQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMg
+Q0EwHhcNMTkwNTI5MTQ0NTQxWhcNMjAwNTI4MTQ0NTQxWjBwMQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1w
+bGUxDjAMBgNVBAMTBUFsaWNlMSAwHgYJKoZIhvcNAQkBFhFhbGljZUBleGFtcGxl
+LmNvbTB2MBAGByqGSM49AgEGBSuBBAAiA2IABPjNnwcv7EQOldaShannEUxPPi7g
+B7WcXrNcJiWawQYPm8+7mGX2EMSN3VQdGAkg+jLd8lxZZ5nwUcKKsgK24yAWKw2x
+wb9pPArINg4UO6rP8LaPITCqBYJHLHKiG4le2aOBlDCBkTALBgNVHQ8EBAMCB4Aw
+QgYJYIZIAYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0
+ZWQgZm9yIGFueSBwdXJwb3NlLjAdBgNVHQ4EFgQUxLpaDj564zyBsPQCqmi7FuCW
+DjUwHwYDVR0jBBgwFoAU8jXbNATapVXyvWkDmbBi7OIVCMEwCgYIKoZIzj0EAwMD
+aAAwZQIwY7kf0TW4C95EYZp/jyU3imi/bIf6EIBzmE4C5kp79/jQwpIXyrjDaKP7
+R65JooWIAjEAveDGnqwyK0KYtCA4fr9EEgL/azIn3vLQpWn11rQ8MC/DEu6AIdMp
+k+OOlIs8cdz1MIIDpjCCA0ygAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJ
+YIZIAWUDBAMCMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMH
+SGVybmRvbjERMA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAx
+MDE5MjAxMjMwWjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcT
+B0hlcm5kb24xEDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJ
+KoZIhvcNAQkBFhFhbGljZUBleGFtcGxlLmNvbTCCAbYwggErBgcqhkjOOAQBMIIB
+HgKBgQCLpR53xHfe+SiknAK/L9lm/ZO1109c9iYkriPIW/5MMlM+qc/tdRkKpG6E
+LIpfXTPtKCJmzqqVIyTmAJryyE8Xw0Ie2mzYPU5ULvKmllQkjTsWgPGgQBkciZ0A
+W9ggD9VwZilg4qh3iSO7T97hVQFnpCh6vm8pOH6UP/5kpr9ZJQIVANzdbztBJlJf
+qCB1t4h/NvSuwCFvAoGAITP+jhYk9Rngd98l+5ccgauQ+cLEUBgNG2Wq56zBXQbL
+ou6eKkQi7ecLNiRmExq3IU3LOj426wSxL72Kw6FPyOEv3edIFkJJEHL4Z+ZJeVe/
+/dzya0ddOJ7kk6qNF2ic+viD/5Vm8yRyKiig2uHH/MgIesLdZnvbzvX+f/P0z50D
+gYQAAoGALAUljkOi1PxjjFVvhGfK95yIsrfbfcIEKUBaTs9NR2rbGWUeP+93paoX
+wP39X9wrJx2MSWeHWhWKszNgoiyqYT0k4R9mem3WClotxOvB5fHfwIp2kQYvE7H0
+/TPdGhfUpHQGYpyLQgT6L80meSKMFnu4VXGzOANhWDxu3JxiADCjgZQwgZEwCwYD
+VR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5v
+dCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFO37wHcauyc0
+3rDc6cDRRsHzgcK+MB8GA1UdIwQYMBaAFM1IZQGDsqYHWwb+I4EMxHPk0bU4MAsG
+CWCGSAFlAwQDAgNHADBEAiBBRbfMzLi7+SVyO8SM3xxwUsMf/k1B+Nkvf1kBTfCf
+GwIgSAx/6mI+pNqdXqZZGESXy1MT1aBc4ynPGLFUr2r7cPYxggO4MIIBvAIBATBX
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0ECFGPMbd5dAfZyD1kqY7NIQyVCWZgpMA0GCWCGSAFl
+AwQCAQUAoIIBDjAYBgkqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJ
+BTEPFw0xOTEyMTgxNjAwMDBaMC8GCSqGSIb3DQEJBDEiBCCT0Lk67cs7v1OtnRbv
+ZUBOns/RgPEsttXJOxLKFB79aTCBogYLKoZIhvcNAQkQAjMxgZIwgY8wCwYJYIZI
+AWUDBAICMAoGCCqGSM49BAMDMEEwDQYJYIZIAWUDBAIBBQAEMN+vbArIfin1JoRw
+/UHR1y/ylbyUEeMpbC+1HKRpa6xdPJBovlGTcTReUoked6KSAjAxMA0GCWCGSAFl
+AwQCAQUABCC+AWJGNa+7R7wLKTza/Ix8On6IS6V5aUhEcflZzdM/8TALBglghkgB
+ZQMEAwIEMDAuAhUAm9IjQ1413cJQ24I8W0RfWAPXM7oCFQCMUB4rXWPZbe22HPXZ
+j7q0TKR3sjCCAfQCAQEwTDA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAO
+BgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBAgkApbNUKBuwbjswCwYJ
+YIZIAWUDBAICoIIBHTAYBgkqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3
+DQEJBTEPFw0xOTEyMTgxNjAwMDBaMD8GCSqGSIb3DQEJBDEyBDC25CKk/YJnHtT3
+qsZtRPTosLmNUVhxxlbn8Jo2+lys4+IKEOba8jebiTfTTPmZJmwwgaEGCyqGSIb3
+DQEJEAIzMYGRMIGOMA0GCWCGSAFlAwQCAQUAMAsGCWCGSAFlAwQDAjAvMAsGCWCG
+SAFlAwQCAgQgcylSfbq7wnltzEF7G//28TirRvVDkabxEivR5UKosqUwPzALBglg
+hkgBZQMEAgIEMEAx5qC6BXrb7o0yUseNCSX6+3h5ZX+26e1dBKpApbX3t8rEcsRR
+82TZYCPTWtz4jzAKBggqhkjOPQQDAwRnMGUCMCq/bAd/e5oCu6YIWGZN/xyIX6g7
+QL9hfgKz9i/lPoE35xmRwL/9/H0viqg3HvnDWAIxAIADENLOLox7NiiMK+Ya70I0
+jdEOIlE+zO/fF9I+syiz898JzTosN/V8wvaDoALtnQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.SignedAttributes()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5652.id_data: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5652.id_data: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while not next_layer == rfc5652.id_data:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if next_layer == rfc5652.id_signedData:
+ signerInfos = asn1Object['signerInfos']
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ found_mult_sig1 = False
+ for attr in signerInfos[0]['signedAttrs']:
+ if attr['attrType'] in rfc5652.cmsAttributesMap:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha384)
+
+ self.assertEqual(
+ 'dfaf6c0a',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig1 = True
+
+ found_mult_sig2 = False
+ for attr in signerInfos[1]['signedAttrs']:
+ if attr['attrType'] in rfc5652.cmsAttributesMap:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha256)
+
+ self.assertEqual(
+ '7329527d',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig2 = True
+
+ self.assertTrue(found_mult_sig1)
+ self.assertTrue(found_mult_sig2)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_mult_sig1 = False
+ for attr in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ av = attr['attrValues'][0]
+
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha384)
+
+ self.assertEqual(
+ 'dfaf6c0a',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig1 = True
+
+ found_mult_sig2 = False
+ for attr in asn1Object['content']['signerInfos'][1]['signedAttrs']:
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ av = attr['attrValues'][0]
+
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha256)
+
+ self.assertEqual(
+ '7329527d',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig2 = True
+
+ self.assertTrue(found_mult_sig1)
+ self.assertTrue(found_mult_sig2)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5753.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5753.py
new file mode 100644
index 0000000000..7bb44ef102
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5753.py
@@ -0,0 +1,129 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5753
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIIGAwYJKoZIhvcNAQcDoIIF9DCCBfACAQIxgdihgdUCAQOgeKF2MBAGByqGSM49
+AgEGBSuBBAAiA2IABGJ8n8NE7e0+gs36C3P+klXlvBXudwiw84lyW0U0pbo9U0Lz
+tr6cknb+lbsRk21dXwHrK9ZW/SjBG+ONTvD+8P6+62xh2OO9lil5uSHmzDYNiTKn
+w8PDuC6X25uFO6Nf2qEJBAdSRkM1NzUzMBUGBiuBBAELAjALBglghkgBZQMEAS0w
+NDAyoBYEFMS6Wg4+euM8gbD0Aqpouxbglg41BBiH5Gdz0Rla/mjLUzxq49Lbxfpv
+p56UaPAwggUOBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAECBBAsmDsiOo0ySncPc/RM
+K3FLgIIE4HPHsXoYyQ/4LRDiK4OrSuRJmmuDye5fH/hLcgw/330Gsl1QBs9jF1CE
+DBM5ki657K/TRMl78Rqb3LIu5lfLQ8WVNGLsoQPwvxzIexGgShtYYwu8TcPiESFM
+a20SWpDEG8zFlmCbqQuc0buPxnvYviVThoBEthNC+S2Umed8JpxwNKJbNTx5dxd2
+dkDNwpHsKgNzT9cGl0NF129Dspehqtdge5LJu3rj1gNynLRI32AQ+pwU+sEHee6w
+DHhU5OWnHlndkm/9MTKY3woOhs1/KQFlRFPC6k71ZpUlncd393wLVIImfoMe4zWP
+AOnbpZ/M7zEJ95rTwwmudBs0qwMfCa3h0Vkg69w6fBHyc1IH8u3VpSPcbOW4dUzJ
+BDJPgB1kObAV02ZA4FQEuZtZiG13u3c7sSrHxsY1rtXssvSe+5rThqPWgDqmH8b/
+yPGEHIFh03kHCDt/UZrdkLCO7a0WhCdY4I9hNU6OYEQmyEFs0LsqEumn34Lv/XcD
+1wgLdPtF65zub4Wil/0Vpu73vIWLIk9LyNIXQSd6w0ZHUvVS+jZZ1zrqIQKhKvG9
+7NpKAYoHa4tOdoXHgBJUxw/uAOKkQ4jC5RS5UKqCZaQcArRD2bCEEsutiuyf06MM
+cWm+RaBY1EwuX+/cT0D6CsWHYFAeQHgLuR4HVk5+PVKoOL/7KUz0jUU5gzFVcmfa
+ocyX5A6R90yggBObefcOIEj3v+5fjHkppfTvi/R03fVZ4NywWyHbN7kOHHy8skJp
+cvNaqSY0dfkb8KOOoTptJH9rCBYtFlC5j/18y8Om9Um4h3/46hYO0xU8izJDzDzJ
+nO/5KS5mGyskweIp3mrE1C/mw68LvrksxQI03CPtbM+FqOKe0VcsAQykiOTnG3d4
+jLeF1iVrc9CgV+pwc5VfgQUwsGhjAFOCKTwWDrr3Je0yVsfzgwY2zuM5uE/+usOS
+Bt7SqbFTLOCba4fJrVVwi0wZig88owVTdl/ACxl2qyLUYC2u5PNJSY6kx8Cgo4gD
+Jk/3oeuys8JqgaufvKybl5GsdDaF3A7usZAjDR1EAWHZ7JGiagtqbvISLD0zq4e4
+nmEhLnIRb7u5SNBPqe8qVuuQjIsvmP0ZuTlnh84ypFOQGz7cfzHtr6UEQoGj8HIm
+bp8diL4tflmFAVNaRjQzu18+2vFB2w1EZIe2/uNLs9ne2EIyoK2Qb+mMCwJsNS0x
+OG0/TzPZ+y0Tp1/LupLHovMosPIGXlbvqZVh2xftDvbIigIMWZQZ2tFxYD6Xc4zA
+00v7H0yGF1pRY+3GpobJkw0Y6ORtgdtdnr2ipioIeQCy0hUpPOmTeSr0L3H7KfNY
+7yQgZg0ra7FIEjM8tDoNqrhznetYUU1ZWM8Lyb3zMxxinSFsGFGx2TiqPyixJNxN
++lPT5D6GRhC9mXgh+BfVod5oINJJwXxJpT5xnsZgW8ujVxiu1Vt5esXCZaXTGlyj
+VTH5dmCvJP9+B8n7dOimmCxCbMQKpNaZixJhoXWQtTgKqL1Qf9WoEs6TDGgfTllq
+jbE4w3O7ZA7fAWe9jbAGwiPV5rF/NVvjaj2+ibtXbSNPW59ddy1/2WzknVYnEHF0
+qZdBZ02Wh4ByXUC3FNvDu8hRTm5aq73DCqXLXUwNU8BvS1xBbbRq5aYI2Rd3naNA
+ns9dHqSvkg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ opk_ai_p = rfc5480.ECParameters()
+ opk_ai_p['namedCurve'] = rfc5480.secp384r1
+
+ kwai = rfc5753.KeyWrapAlgorithm()
+ kwai['algorithm'] = rfc3565.id_aes256_wrap
+
+ ukm_found = False
+ self.assertEqual(ed['version'], rfc5652.CMSVersion(value=2))
+ for ri in ed['recipientInfos']:
+ self.assertEqual(ri['kari']['version'], rfc5652.CMSVersion(value=3))
+ opk_alg = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(opk_alg['algorithm'], rfc5753.id_ecPublicKey)
+ self.assertEqual(opk_alg['parameters'], der_encoder(opk_ai_p))
+ kek_alg = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(kek_alg['algorithm'], rfc5753.dhSinglePass_stdDH_sha384kdf_scheme)
+ self.assertEqual(kek_alg['parameters'], der_encoder(kwai))
+ ukm = ri['kari']['ukm']
+ self.assertEqual(ukm, rfc5652.UserKeyingMaterial(hexValue='52464335373533'))
+ ukm_found = True
+
+ self.assertTrue(ukm_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ assert asn1Object['contentType'] == rfc5652.id_envelopedData
+ ed = asn1Object['content']
+
+ ukm_found = False
+ self.assertEqual(ed['version'], rfc5652.CMSVersion(value=2))
+ for ri in ed['recipientInfos']:
+ self.assertEqual(ri['kari']['version'], rfc5652.CMSVersion(value=3))
+ opk_alg = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(opk_alg['algorithm'], rfc5753.id_ecPublicKey)
+ self.assertEqual(opk_alg['parameters']['namedCurve'], rfc5480.secp384r1)
+ kek_alg = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(kek_alg['algorithm'], rfc5753.dhSinglePass_stdDH_sha384kdf_scheme)
+ self.assertEqual(kek_alg['parameters']['algorithm'], rfc3565.id_aes256_wrap)
+ ukm = ri['kari']['ukm']
+ self.assertEqual(ukm, rfc5652.UserKeyingMaterial(hexValue='52464335373533'))
+ ukm_found = True
+
+ self.assertTrue(ukm_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5755.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5755.py
new file mode 100644
index 0000000000..cf4a05fa29
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5755.py
@@ -0,0 +1,212 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc3114
+
+
+class AttributeCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDBTCCAm4CAQEwgY+gUTBKpEgwRjEjMCEGA1UEAwwaQUNNRSBJbnRlcm1lZGlh
+dGUgRUNEU0EgQ0ExCzAJBgNVBAYTAkZJMRIwEAYDVQQKDAlBQ01FIEx0ZC4CAx7N
+WqE6pDgwNjETMBEGA1UEAwwKQUNNRSBFQ0RTQTELMAkGA1UEBhMCRkkxEjAQBgNV
+BAoMCUFDTUUgTHRkLqA9MDukOTA3MRQwEgYDVQQDDAtleGFtcGxlLmNvbTELMAkG
+A1UEBhMCRkkxEjAQBgNVBAoMCUFDTUUgTHRkLjANBgkqhkiG9w0BAQsFAAIEC63K
+/jAiGA8yMDE2MDEwMTEyMDAwMFoYDzIwMTYwMzAxMTIwMDAwWjCB8jA8BggrBgEF
+BQcKATEwMC6GC3VybjpzZXJ2aWNlpBUwEzERMA8GA1UEAwwIdXNlcm5hbWUECHBh
+c3N3b3JkMDIGCCsGAQUFBwoCMSYwJIYLdXJuOnNlcnZpY2WkFTATMREwDwYDVQQD
+DAh1c2VybmFtZTA1BggrBgEFBQcKAzEpMCegGKQWMBQxEjAQBgNVBAMMCUFDTUUg
+THRkLjALDAlBQ01FIEx0ZC4wIAYIKwYBBQUHCgQxFDASMBAMBmdyb3VwMQwGZ3Jv
+dXAyMCUGA1UESDEeMA2hC4YJdXJuOnJvbGUxMA2hC4YJdXJuOnJvbGUyMGowHwYD
+VR0jBBgwFoAUgJCMhskAsEBzvklAX8yJBOXO500wCQYDVR04BAIFADA8BgNVHTcB
+Af8EMjAwMB2gCoYIdXJuOnRlc3SgD4INKi5leGFtcGxlLmNvbTAPoA2GC3Vybjph
+bm90aGVyMA0GCSqGSIb3DQEBCwUAA4GBACygfTs6TkPurZQTLufcE3B1H2707OXK
+sJlwRpuodR2oJbunSHZ94jcJHs5dfbzFs6vNfVLlBiDBRieX4p+4JcQ2P44bkgyi
+UTJu7g1b6C1liB3vO6yH5hOZicOAaKd+c/myuGb9uJ4n6y2oLNxnk/fDzpuZUe2h
+Q4eikPk4LQey
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5755.AttributeCertificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ count = 0
+
+ for attr in asn1Object['acinfo']['attributes']:
+ self.assertIn(attr['type'], rfc5280.certificateAttributesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5280.certificateAttributesMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ count += 1
+
+ self.assertEqual(5, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ count = 0
+
+ for attr in asn1Object['acinfo']['attributes']:
+ self.assertIn(attr['type'], rfc5280.certificateAttributesMap)
+ count += 1
+ if attr['type'] == rfc5755.id_aca_authenticationInfo:
+ self.assertEqual(
+ str2octs('password'), attr['values'][0]['authInfo'])
+
+ self.assertEqual(5, count)
+
+
+class CertificateWithClearanceTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIID1DCCA1qgAwIBAgIUUc1IQGJpeYQ0XwOS2ZmVEb3aeZ0wCgYIKoZIzj0EAwMw
+ZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAw
+DgYDVQQKEwdFeGFtcGxlMQwwCgYDVQQLEwNQQ0ExGDAWBgNVBAMTD3BjYS5leGFt
+cGxlLmNvbTAeFw0xOTExMDUyMjIwNDZaFw0yMDExMDQyMjIwNDZaMIGSMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoT
+B0V4YW1wbGUxIjAgBgNVBAsTGUh1bWFuIFJlc291cmNlIERlcGFydG1lbnQxDTAL
+BgNVBAMTBEZyZWQxHzAdBgkqhkiG9w0BCQEWEGZyZWRAZXhhbXBsZS5jb20wdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAAQObFslQ2EBP0xlDJ3sRnsNaqm/woQgKpBispSx
+XxK5bWUVpfnWsZnjLWhtDuPcu1BcBlM2g7gwL/aw8nUSIK3D8Ja9rTUQQXc3zxnk
+cl8+8znNXHMGByRjPUH87C+TOrqjggGaMIIBljAdBgNVHQ4EFgQU5m711OqFDNGR
+SWMOSzTXjpTLIFUwbwYDVR0jBGgwZoAUJuolDwsyICik11oKjf8t3L1/VGWhQ6RB
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjER
+MA8GA1UECgwIQm9ndXMgQ0GCCQCls1QoG7BuRjAPBgNVHRMBAf8EBTADAQH/MAsG
+A1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5u
+b3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1UdIAQOMAwwCgYIKwYB
+BQUHDQIwCgYDVR02BAMCAQIwfwYDVR0JBHgwdjBJBgNVBDcxQjBABgsqhkiG9w0B
+CRAHAwMCBeAxLTArgAsqhkiG9w0BCRAHBIEcMBoMGEhVTUFOIFJFU09VUkNFUyBV
+U0UgT05MWTApBglghkgBZQIBBUQxHAwaSHVtYW4gUmVzb3VyY2VzIERlcGFydG1l
+bnQwCgYIKoZIzj0EAwMDaAAwZQIwVh/RypULFgPpAN0I7OvuMomRWnm/Hea3Hk8P
+tTRz2Zai8iYat7oeAmGVgMhSXy2jAjEAuJW4l/CFatBy4W/lZ7gS3weBdBa5WEDI
+FFMC7GjGtCeLtXYqWfBnRdK26dOaHLB2
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ clearance_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5755.id_at_clearance:
+ self.assertIn(attr['type'], rfc5280.certificateAttributesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5280.certificateAttributesMap[attr['type']])
+
+ self.assertEqual(rfc3114.id_tsp_TEST_Whirlpool, av['policyId'])
+
+ for cat in av['securityCategories']:
+ self.assertEqual(
+ rfc3114.id_tsp_TEST_Whirlpool_Categories, cat['type'])
+ self.assertIn(
+ cat['type'], rfc5755.securityCategoryMap)
+ catv, rest = der_decoder(
+ cat['value'],
+ asn1Spec=rfc5755.securityCategoryMap[cat['type']])
+
+ self.assertIn('USE ONLY', catv[0])
+
+ clearance_found = True
+
+ self.assertTrue(clearance_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ clearance_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5755.id_at_clearance:
+ spid = rfc3114.id_tsp_TEST_Whirlpool
+ catid = rfc3114.id_tsp_TEST_Whirlpool_Categories
+
+ self.assertEqual(spid, attr['values'][0]['policyId'])
+
+ for cat in attr['values'][0]['securityCategories']:
+ self.assertEqual(catid, cat['type'])
+ self.assertIn( u'USE ONLY', cat['value'][0])
+
+ clearance_found = True
+
+ self.assertTrue(clearance_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5913.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5913.py
new file mode 100644
index 0000000000..ef5908662f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5913.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5913
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc3114
+
+
+class ClearanceTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIDhzCCAw6gAwIBAgIJAKWzVCgbsG5GMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMTAyMTg0MjE4WhcNMjAxMTAxMTg0MjE4WjBmMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxDDAKBgNVBAsTA1BDQTEYMBYGA1UEAxMPcGNhLmV4YW1wbGUuY29tMHYw
+EAYHKoZIzj0CAQYFK4EEACIDYgAEPf5vbgAqbE5dn6wbiCx4sCCcn1BKSrHmCfiW
+C9QLSGVNGHifQwPt9odGXjRiQ7QwpZ2wRD6Z91v+fk85XXLE3kJQCQdPIHFUY5EM
+pvS7T6u6xrmwnlVpUURPTOxfc55Oo4IBrTCCAakwHQYDVR0OBBYEFCbqJQ8LMiAo
+pNdaCo3/Ldy9f1RlMG8GA1UdIwRoMGaAFPI12zQE2qVV8r1pA5mwYuziFQjBoUOk
+QTA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24x
+ETAPBgNVBAoMCEJvZ3VzIENBggkA6JHWBpFPzvIwDwYDVR0TAQH/BAUwAwEB/zAL
+BgNVHQ8EBAMCAYYwQgYJYIZIAYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fu
+bm90IGJlIHRydXN0ZWQgZm9yIGFueSBwdXJwb3NlLjAVBgNVHSAEDjAMMAoGCCsG
+AQUFBw0CMAoGA1UdNgQDAgECMIGRBggrBgEFBQcBFQSBhDCBgTBZBgsqhkiG9w0B
+CRAHAwMCBeAxRjBEgAsqhkiG9w0BCRAHBIE1MDMMF0xBVyBERVBBUlRNRU5UIFVT
+RSBPTkxZDBhIVU1BTiBSRVNPVVJDRVMgVVNFIE9OTFkwEQYLKoZIhvcNAQkQBwID
+AgTwMBEGCyqGSIb3DQEJEAcBAwIF4DAKBggqhkjOPQQDAwNnADBkAjAZSD+BVqzc
+1l0fDoH3LwixjxvtddBHbJsM5yBek4U9b2yWL2KEmwV02fTgof3AjDECMCTsksmx
+5f3i5DSYfe9Q1heJlEJLd1hgZmfvUYNnCU3WrdmYzyoNdNTbg7ZFMoxsXw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cat_value_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5913.id_pe_clearanceConstraints:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for c in ev:
+ if c['policyId'] == rfc3114.id_tsp_TEST_Whirlpool:
+ for sc in c['securityCategories']:
+ self.assertIn(sc['type'], rfc5755.securityCategoryMap)
+
+ scv, rest = der_decoder(
+ sc['value'],
+ asn1Spec=rfc5755.securityCategoryMap[sc['type']])
+
+ for cat in scv:
+ self.assertIn('USE ONLY', cat)
+ cat_value_found = True
+
+ self.assertTrue(cat_value_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cat_value_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5913.id_pe_clearanceConstraints:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for c in ev:
+ if c['policyId'] == rfc3114.id_tsp_TEST_Whirlpool:
+ for sc in c['securityCategories']:
+ self.assertIn(sc['type'], rfc5755.securityCategoryMap)
+ for cat in sc['value']:
+ self.assertIn('USE ONLY', cat)
+ cat_value_found = True
+
+ self.assertTrue(cat_value_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5914.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5914.py
new file mode 100644
index 0000000000..3a70ec8d83
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5914.py
@@ -0,0 +1,79 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5914
+from pyasn1_modules import rfc5652
+
+
+class TrustAnchorListTestCase(unittest.TestCase):
+ trust_anchor_list_pem_text = """\
+MIIGGQYLKoZIhvcNAQkQASKgggYIMIIGBKGCAvYwggLyoAMCAQICAgDJMA0GCSqG
+SIb3DQEBCwUAMBYxFDASBgNVBAMTC3JpcGUtbmNjLXRhMCAXDTE3MTEyODE0Mzk1
+NVoYDzIxMTcxMTI4MTQzOTU1WjAWMRQwEgYDVQQDEwtyaXBlLW5jYy10YTCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANFEWEhqlM9psgbDs3ltY0OjbMTb
+5SzMoVpJ755fDYgQrP0/0tl7jSkDWfsAWcSIDz1dqRQRXkAL6B/1ivNx8ANuldrI
+sJvzGNpymfjpcPsJac5WdadyKY9njXCq5orfAcAQvMSJs7ghmldI5EQdBmdIaB+j
+JdN7pi6a0bJ+r9MTj9PpekHNWRzBVRW9/OSEOxUEE3FSMa3XjLKMiavXjJBOg6HJ
+R4RfzZUpZV7mwEkPSlFqidPjrd0Al6+C1xAjH5KZFUdk2U/r+b+ufGx1bOmcUQ9W
++lJNbkCgMh1G5/7V7z/Ja4wImxs1bFw09i9MeBHcfkHYsT4Do4t4ATMi9lcCAwEA
+AaOCAV4wggFaMB0GA1UdDgQWBBToVSsf1tGk9+QExtjlaA0evBY/wzAPBgNVHRMB
+Af8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjCBsQYIKwYBBQUHAQsEgaQwgaEwPAYI
+KwYBBQUHMAqGMHJzeW5jOi8vcnBraS5yaXBlLm5ldC9yZXBvc2l0b3J5L3JpcGUt
+bmNjLXRhLm1mdDAyBggrBgEFBQcwDYYmaHR0cHM6Ly9ycmRwLnJpcGUubmV0L25v
+dGlmaWNhdGlvbi54bWwwLQYIKwYBBQUHMAWGIXJzeW5jOi8vcnBraS5yaXBlLm5l
+dC9yZXBvc2l0b3J5LzAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMCcGCCsGAQUF
+BwEHAQH/BBgwFjAJBAIAATADAwEAMAkEAgACMAMDAQAwIQYIKwYBBQUHAQgBAf8E
+EjAQoA4wDDAKAgEAAgUA/////zCCAgIwggGIoAMCAQICCQDokdYGkU/O8jAKBggq
+hkjOPQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hl
+cm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4XDTE5MDUxNDA4NTgxMVoXDTIxMDUx
+MzA4NTgxMVowPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdI
+ZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTB2MBAGByqGSM49AgEGBSuBBAAiA2IA
+BPBRdlSx6I5qpZ2sKUMIxun1gUAzzstOYWKvKCnMoNT1x+pIKDvMEMimFcLAxxL3
+NVYOhK0Jty83SPDkKWMdx9/Okdhf3U/zxJlEnXDiFrAeM6xbG8zcCRiBnmd92Uvs
+RqNQME4wHQYDVR0OBBYEFPI12zQE2qVV8r1pA5mwYuziFQjBMB8GA1UdIwQYMBaA
+FPI12zQE2qVV8r1pA5mwYuziFQjBMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwMD
+aAAwZQIwWlGNjb9NyqJSzUSdsEqDSvMZb8yFkxYCIbAVqQ9UqScUUb9tpJKGsPWw
+bZsnLVvmAjEAt/ypozbUhQw4dSPpWzrn5BQ0kKbDM3DQJcBABEUBoIOol1/jYQPm
+xajQuxcheFlkooIBADCB/TB2MBAGByqGSM49AgEGBSuBBAAiA2IABOIIQup32CTe
+oCxkpBPOQJwjcqkCCg43PyE2uI1TFPbVkZVL85YCjXEexNjLp59e76Dmf1qSEZZT
+b+vAyz+u/Vs/RyTnmgculr6oL7tXGK9xpL14Oh7oWzxrZBErzDQrjAQUo53mH/na
+OU/AbuiRy5Wl2jHiCp8MFURpZ2lDZXJ0IFRydXN0IEFuY2hvcjBSMEwxCzAJBgNV
+BAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxJjAkBgNVBAMTHURpZ2lDZXJ0
+IEVDQyBTZWN1cmUgU2VydmVyIENBggIFIIICZW4=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.trust_anchor_list_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5914.id_ct_trustAnchorList, asn1Object['contentType'])
+
+ tal, rest = der_decoder(asn1Object['content'], rfc5914.TrustAnchorList())
+
+ self.assertFalse(rest)
+ self.assertTrue(tal.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(tal))
+ self.assertEqual(3, sum(1 for _ in tal))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5915.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5915.py
new file mode 100644
index 0000000000..6e54e5a4b1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5915.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5915
+from pyasn1_modules import rfc5480
+
+
+class MUDCertTestCase(unittest.TestCase):
+ private_key_pem_text = """\
+MIGkAgEBBDDLjzGbbLrR3T13lrrVum7WC/4Ua4Femc1RhhNVe1Q5XsArQ33kn9kx
+3lOUfOcG+qagBwYFK4EEACKhZANiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6z
+XCYlmsEGD5vPu5hl9hDEjd1UHRgJIPoy3fJcWWeZ8FHCirICtuMgFisNscG/aTwK
+yDYOFDuqz/C2jyEwqgWCRyxyohuJXtk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5915.ECPrivateKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.private_key_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc5480.secp384r1, asn1Object['parameters']['namedCurve'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5916.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5916.py
new file mode 100644
index 0000000000..a653b8c96d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5916.py
@@ -0,0 +1,107 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5916
+
+
+class DeviceCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICpzCCAiygAwIBAgIJAKWzVCgbsG5FMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDMxMTQwMDE1WhcNMjAxMDMwMTQwMDE1WjB4MQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxGjAYBgNVBAsTEURldmljZSBPcGVyYXRpb25zMRwwGgYDVQQDExNleDEy
+MzQ1LmV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE7Lje3glS2qYl
+5x6N9TOlD4CbnzfFeJQfbDaCa3vexEiwE0apuAP+4L5fqOsYeZC970iNW+z3PdUs
+GzkKDC2cCVy8nIxQ3mWhNQDvavT3iz5OGSwa1GjSXRFbGn2x9QjNo4G6MIG3MEIG
+CWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVk
+IGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFPTQN1kXEM5Rd4hNvQL5HyA+o2No
+MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAsGA1UdDwQEAwIHgDAk
+BgNVHQkEHTAbMBkGCWCGSAFlAgEFRTEMBgorBgEEAYGsYDAYMAoGCCqGSM49BAMD
+A2kAMGYCMQCt6AceOEIwXFKFHIV8+wTK/vgs7ZYSA6jhXUpzNtzZw1xh9NxVUhmx
+pogu5Q9Vp28CMQC5YVF8dShC1tk9YImRftiVl8C6pbj//1K/+MwmR6nRk/WU+hKl
++Qsc5Goi6At471s=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_dev_owner = False
+ der_dev_own_oid = der_encoder(univ.ObjectIdentifier('1.3.6.1.4.1.22112.48.24'))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5916.id_deviceOwner:
+ self.assertEqual(der_dev_own_oid, attr['values'][0])
+ found_dev_owner = True
+
+ self.assertTrue(found_dev_owner)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_dev_owner = False
+ dev_own_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48.24')
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5916.id_deviceOwner:
+ self.assertEqual(dev_own_oid, attr['values'][0])
+ found_dev_owner = True
+
+ self.assertTrue(found_dev_owner)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5917.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5917.py
new file mode 100644
index 0000000000..1023fb86a7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5917.py
@@ -0,0 +1,119 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5917
+
+
+class ClearanceSponsorTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIID1DCCA1qgAwIBAgIUUc1IQGJpeYQ0XwOS2ZmVEb3aeZ0wCgYIKoZIzj0EAwMw
+ZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAw
+DgYDVQQKEwdFeGFtcGxlMQwwCgYDVQQLEwNQQ0ExGDAWBgNVBAMTD3BjYS5leGFt
+cGxlLmNvbTAeFw0xOTExMDUyMjIwNDZaFw0yMDExMDQyMjIwNDZaMIGSMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoT
+B0V4YW1wbGUxIjAgBgNVBAsTGUh1bWFuIFJlc291cmNlIERlcGFydG1lbnQxDTAL
+BgNVBAMTBEZyZWQxHzAdBgkqhkiG9w0BCQEWEGZyZWRAZXhhbXBsZS5jb20wdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAAQObFslQ2EBP0xlDJ3sRnsNaqm/woQgKpBispSx
+XxK5bWUVpfnWsZnjLWhtDuPcu1BcBlM2g7gwL/aw8nUSIK3D8Ja9rTUQQXc3zxnk
+cl8+8znNXHMGByRjPUH87C+TOrqjggGaMIIBljAdBgNVHQ4EFgQU5m711OqFDNGR
+SWMOSzTXjpTLIFUwbwYDVR0jBGgwZoAUJuolDwsyICik11oKjf8t3L1/VGWhQ6RB
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjER
+MA8GA1UECgwIQm9ndXMgQ0GCCQCls1QoG7BuRjAPBgNVHRMBAf8EBTADAQH/MAsG
+A1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5u
+b3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1UdIAQOMAwwCgYIKwYB
+BQUHDQIwCgYDVR02BAMCAQIwfwYDVR0JBHgwdjBJBgNVBDcxQjBABgsqhkiG9w0B
+CRAHAwMCBeAxLTArgAsqhkiG9w0BCRAHBIEcMBoMGEhVTUFOIFJFU09VUkNFUyBV
+U0UgT05MWTApBglghkgBZQIBBUQxHAwaSHVtYW4gUmVzb3VyY2VzIERlcGFydG1l
+bnQwCgYIKoZIzj0EAwMDaAAwZQIwVh/RypULFgPpAN0I7OvuMomRWnm/Hea3Hk8P
+tTRz2Zai8iYat7oeAmGVgMhSXy2jAjEAuJW4l/CFatBy4W/lZ7gS3weBdBa5WEDI
+FFMC7GjGtCeLtXYqWfBnRdK26dOaHLB2
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cs = rfc5917.DirectoryString()
+ cs['utf8String'] = u'Human Resources Department'
+ encoded_cs = der_encoder(cs)
+
+ clearance_sponsor_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5917.id_clearanceSponsor:
+ self.assertEqual(encoded_cs, attr['values'][0])
+ clearance_sponsor_found = True
+
+ self.assertTrue(clearance_sponsor_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ clearance_sponsor_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5917.id_clearanceSponsor:
+ hrd = u'Human Resources Department'
+
+ self.assertEqual(hrd, attr['values'][0]['utf8String'])
+
+ clearance_sponsor_found = True
+
+ self.assertTrue(clearance_sponsor_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5924.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5924.py
new file mode 100644
index 0000000000..f1ae64ac17
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5924.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5924
+
+
+class SIPDomainCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICiTCCAg+gAwIBAgIJAKWzVCgbsG5EMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDMwMjEwMDM0WhcNMjAxMDI5MjEwMDM0WjBsMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxEjAQBgNVBAsTCVNJUCBQcm94eTEYMBYGA1UEAxMPc2lwLmV4YW1wbGUu
+Y29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEcY3ckttSa6z3CfOFwZvPmZY8C9Ml
+D1XOydz00+Vqifh1lydhDuulHrJaQ+QgVjG1TzlTAssD9GeABit/M98DPS/IC3wi
+TsTMSyQ9/Oz4hKAw7x7lYEvufvycsZ7pJGRso4GpMIGmMEIGCWCGSAGG+EIBDQQ1
+FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVy
+cG9zZS4wHQYDVR0OBBYEFEcJ8iFWmJOl3Hg/44UFgFWNbe7FMB8GA1UdIwQYMBaA
+FPI12zQE2qVV8r1pA5mwYuziFQjBMAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggr
+BgEFBQcDFDAKBggqhkjOPQQDAwNoADBlAjAXEPPNyXBUj40dzy+ZOqafuM3/6Fy6
+bkgiIObcQImra96X10fe6qacanrbu4uU6d8CMQCQ+BCjCnOP4dBbNC3vB0WypxLo
+UwZ6TjS0Rfr+dRvlyilVjP+hPVwbyb7ZOSZR6zk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_kp_sipDomain = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(
+ extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+ self.assertIn(rfc5924.id_kp_sipDomain, ev)
+
+ found_kp_sipDomain = True
+
+ self.assertTrue(found_kp_sipDomain)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5934.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5934.py
new file mode 100644
index 0000000000..ba18b560f7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5934.py
@@ -0,0 +1,299 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Acknowledgement to Carl Wallace for the test messages.
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5934
+
+
+class TAMPStatusResponseTestCase(unittest.TestCase):
+ tsr_pem_text = """\
+MIIU/QYJKoZIhvcNAQcCoIIU7jCCFOoCAQMxDTALBglghkgBZQMEAgEwgg/GBgpghkgBZQIB
+Ak0CoIIPtgSCD7Iwgg+uMAiDAAIEXXp3f6GCD50wgg+ZooIFFTCCBREwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDALMH2jTus/z881nG+uHQiB+xwQRX8q0DjB6rBw9if/tpM
+Or8/yNgoe0s2AcCsRSXD0g4Kj4UYZBA9GhNwKm+O19yNk7NBDzghza2rwj0qBdNXETcNzYxR
++ZPjzEZJIY4UtM3LFD44zXIx7qsS8mXqNC5WXf/uY3XLbbqRNPye8/QtHL5QxELfWYj/arP6
+qGw9y1ZxcQWWu5+A5YBFWWdBsOvDrWCkgHUGF5wO9EPgmQ4b+3/1s8yygYKx/TLBuL5BpGS1
+YDpaUTCMzt5BLBlHXEkQZLl0qYdBr31uusG4ob9lMToEZ/m1u46SigBjuLHmjDhfg/9Q1Tui
+XWuyEMxjAgMBAAEEFEl0uwxeunr+AlTve6DGlcYJgHCWMIID0TBbMQswCQYDVQQGEwJVUzEY
+MBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEW
+MBQGA1UEAxMNRG9EIFJvb3QgQ0EgMqCCA3AwggJYoAMCAQICAQUwDQYJKoZIhvcNAQEFBQAw
+WzELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDEMMAoGA1UECxMDRG9E
+MQwwCgYDVQQLEwNQS0kxFjAUBgNVBAMTDURvRCBSb290IENBIDIwHhcNMDQxMjEzMTUwMDEw
+WhcNMjkxMjA1MTUwMDEwWjBbMQswCQYDVQQGEwJVUzEYMBYGA1UEChMPVS5TLiBHb3Zlcm5t
+ZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEWMBQGA1UEAxMNRG9EIFJvb3QgQ0Eg
+MjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMAswfaNO6z/PzzWcb64dCIH7HBB
+FfyrQOMHqsHD2J/+2kw6vz/I2Ch7SzYBwKxFJcPSDgqPhRhkED0aE3Aqb47X3I2Ts0EPOCHN
+ravCPSoF01cRNw3NjFH5k+PMRkkhjhS0zcsUPjjNcjHuqxLyZeo0LlZd/+5jdcttupE0/J7z
+9C0cvlDEQt9ZiP9qs/qobD3LVnFxBZa7n4DlgEVZZ0Gw68OtYKSAdQYXnA70Q+CZDhv7f/Wz
+zLKBgrH9MsG4vkGkZLVgOlpRMIzO3kEsGUdcSRBkuXSph0GvfW66wbihv2UxOgRn+bW7jpKK
+AGO4seaMOF+D/1DVO6Jda7IQzGMCAwEAAaM/MD0wHQYDVR0OBBYEFEl0uwxeunr+AlTve6DG
+lcYJgHCWMAsGA1UdDwQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IB
+AQCYkY0/ici79cBpcyk7Nay6swh2PXAJkumERCEBfRR2G+5RbB2NFTctezFp9JpEuK9GzDT6
+I8sDJxnSgyF1K+fgG5km3IRAleio0sz2WFxm7z9KlxCCHboKot1bBiudp2RO6y4BNaS0PxOt
+VeTVc6hpmxHxmPIxHm9A1Ph4n46RoG9wBJBmqgYrzuF6krV94eDRluehOi3MsZ0fBUTth5nT
+TRpwOcEEDOV+2fGv1yAO8SJ6JaRzmcw/pAcnlqiile2CuRbTnguHwsHyiPVi32jfx7xpUe2x
+XNxUVCkPCTmarAPB2wxNrm8KehZJ8b+R0jiU0/aVLLdsyUK2jcqQjYXZooIFGDCCBRQwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCp7BRyiuhLcKPaEAOEpvunNg0qOlIWvzAV
+UoYFRyDPqqbNdcRkbu/xYCPLCmZArrTIaCoAUWhJN+lZMk2VvEMn6UCNOhDOFLxDGKH53szn
+hXZzXhgaI1u9Px/y7Y0ZzAPRQKSPpyACTCdaeTb2ozchjgBaBhbK01WWbzEpu3IOy+JIUfLU
+N6Q11m/uF7OxBqsLGYboI20xGyh4ZcXeYlK8wX3r7qBdVAT7sssrsiNUkYJM8L+6dEA7DARF
+gGdcxeuiV8MafwotvX+53MGZsMgH5AyGNpQ6JS/yfeaXPBuUtJdZBsk65AvZ6un8O3M0b/3n
+mOTzocKQXxz1Py7XGdN/AgMBAAEEFGyKlKJ3sYByHYF6Fqry3M5m7kXAMIID1DBbMQswCQYD
+VQQGEwJVUzEYMBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNV
+BAsTA1BLSTEWMBQGA1UEAxMNRG9EIFJvb3QgQ0EgM6CCA3MwggJboAMCAQICAQEwDQYJKoZI
+hvcNAQELBQAwWzELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDEMMAoG
+A1UECxMDRG9EMQwwCgYDVQQLEwNQS0kxFjAUBgNVBAMTDURvRCBSb290IENBIDMwHhcNMTIw
+MzIwMTg0NjQxWhcNMjkxMjMwMTg0NjQxWjBbMQswCQYDVQQGEwJVUzEYMBYGA1UEChMPVS5T
+LiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEWMBQGA1UEAxMNRG9E
+IFJvb3QgQ0EgMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKnsFHKK6Etwo9oQ
+A4Sm+6c2DSo6Uha/MBVShgVHIM+qps11xGRu7/FgI8sKZkCutMhoKgBRaEk36VkyTZW8Qyfp
+QI06EM4UvEMYofnezOeFdnNeGBojW70/H/LtjRnMA9FApI+nIAJMJ1p5NvajNyGOAFoGFsrT
+VZZvMSm7cg7L4khR8tQ3pDXWb+4Xs7EGqwsZhugjbTEbKHhlxd5iUrzBfevuoF1UBPuyyyuy
+I1SRgkzwv7p0QDsMBEWAZ1zF66JXwxp/Ci29f7ncwZmwyAfkDIY2lDolL/J95pc8G5S0l1kG
+yTrkC9nq6fw7czRv/eeY5POhwpBfHPU/LtcZ038CAwEAAaNCMEAwHQYDVR0OBBYEFGyKlKJ3
+sYByHYF6Fqry3M5m7kXAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+SIb3DQEBCwUAA4IBAQCfcaTAtpbSgEOgSOkfdgT5xTytZhhYY5vDtuhoioVaQmYStNLmi4h/
+h/SY9ajGCckf8Cwf7IK49KVHOMEzK99Mfpq+Cwuxyw98UCgQz4qNoum6rIbX1LGTXyKPlgW0
+Tgx1kX3T8ueUwpQUdk+PDKsQh1gyhQd1hhILXupTtArITISSH+voQYY8uvROQUrRbFhHQcOG
+WvLu6fKYJ4LqLjbW+AZegvGgUpNECbrSqRlaWKOoXSBtT2T4MIcbkBNIgc3KkMcNwdSYP47y
+DldoMxKOmQmx8OT2EPQ28km96qM4yFZBI4Oa36EbNXzrP0Gz9W9LOl6ub5N2mNLxmZ1FxI5y
+ooIFYDCCBVwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ3HcYEBAYYEH753gQ
+D/iEd3DvLW5VOxGmmVI/bfS9oZf6Nh5uREIRyFP+dYabXjcSiKJ92XEI1Ek1cc5Gz1vQWY5l
+H+tCPcoO3EyQ2FRpz144siBg3YNRLt/b1Vs4kVotz5oztG+WkOV2FGJDaYQQz1RB+TXqntRa
+l51eEFm94OTDWYnX3vJ5sIdrAsBZoSoAghVvaxERAFM0dD304cxWYqLkZegjsYMdWFMIsjMt
+lr7lfTOeEFonc1PdXZjiSxFTWJGP6nIR7LuU8g0PUK3yFrUaACQx5RW9FwaQqiSxrN0MUh7w
+i2qruPft32O0zpRov16W0ESW8fj0ejoKeRVTAgMBAAEEFKg8CZ1n9thHuqLQ/BhyVohAbZWV
+MIID0jBTMQswCQYDVQQGEwJVUzEfMB0GA1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEj
+MCEGA1UEAxMaVmFsaWQgRUUgQ2VydGlmaWNhdGUgVGVzdDGgggN5MIICYaADAgECAgEBMA0G
+CSqGSIb3DQEBCwUAMEAxCzAJBgNVBAYTAlVTMR8wHQYDVQQKExZUZXN0IENlcnRpZmljYXRl
+cyAyMDExMRAwDgYDVQQDEwdHb29kIENBMB4XDTEwMDEwMTA4MzAwMFoXDTMwMTIzMTA4MzAw
+MFowUzELMAkGA1UEBhMCVVMxHzAdBgNVBAoTFlRlc3QgQ2VydGlmaWNhdGVzIDIwMTExIzAh
+BgNVBAMTGlZhbGlkIEVFIENlcnRpZmljYXRlIFRlc3QxMIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEA2dx3GBAQGGBB++d4EA/4hHdw7y1uVTsRpplSP230vaGX+jYebkRCEchT
+/nWGm143EoiifdlxCNRJNXHORs9b0FmOZR/rQj3KDtxMkNhUac9eOLIgYN2DUS7f29VbOJFa
+Lc+aM7RvlpDldhRiQ2mEEM9UQfk16p7UWpedXhBZveDkw1mJ197yebCHawLAWaEqAIIVb2sR
+EQBTNHQ99OHMVmKi5GXoI7GDHVhTCLIzLZa+5X0znhBaJ3NT3V2Y4ksRU1iRj+pyEey7lPIN
+D1Ct8ha1GgAkMeUVvRcGkKoksazdDFIe8Itqq7j37d9jtM6UaL9eltBElvH49Ho6CnkVUwID
+AQABo2swaTAfBgNVHSMEGDAWgBRYAYQkG7wrUpRKPaUQchRR9a86yTAdBgNVHQ4EFgQUqDwJ
+nWf22Ee6otD8GHJWiEBtlZUwDgYDVR0PAQH/BAQDAgTwMBcGA1UdIAQQMA4wDAYKYIZIAWUD
+AgEwATANBgkqhkiG9w0BAQsFAAOCAQEAHlrZD69ipblSvLzsDGGIEwGqCg8NR6OeqbIXG/ij
+2SzSjTi+O7LP1DGIz85p9I7HuXAFUcAGh8aVtPZq+jGeLcQXs+3lehlhGG6M0eQO2pttbI0G
+kO4s0XlY2ITNm0HTGOL+kcZfACcUZXsS+i+9qL80ji3PF0xYWzAPLmlmRSYmIZjT85CuKYda
+Tsa96Ch+D6CU5v9ctVxP3YphWQ4F0v/FacDTiUrRwuXI9MgIw/0qI0+EAFwsRC2DisI9Isc8
+YPKKeOMbRmXamY/4Y8HUeqBwpnqnEJudrH++FPBEI4dYrBAV6POgvx4lyzarAmlarv/AbrBD
+ngieGTynMG6NwqFIMEYwRAYIKwYBBQUHARIBAf8ENTAzMA8GCmCGSAFlAgECTQMKAQEwDwYK
+YIZIAWUCAQJNAQoBATAPBgpghkgBZQIBAk0CCgEBAQEAoIIDfTCCA3kwggJhoAMCAQICAQEw
+DQYJKoZIhvcNAQELBQAwQDELMAkGA1UEBhMCVVMxHzAdBgNVBAoTFlRlc3QgQ2VydGlmaWNh
+dGVzIDIwMTExEDAOBgNVBAMTB0dvb2QgQ0EwHhcNMTAwMTAxMDgzMDAwWhcNMzAxMjMxMDgz
+MDAwWjBTMQswCQYDVQQGEwJVUzEfMB0GA1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEj
+MCEGA1UEAxMaVmFsaWQgRUUgQ2VydGlmaWNhdGUgVGVzdDEwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDZ3HcYEBAYYEH753gQD/iEd3DvLW5VOxGmmVI/bfS9oZf6Nh5uREIR
+yFP+dYabXjcSiKJ92XEI1Ek1cc5Gz1vQWY5lH+tCPcoO3EyQ2FRpz144siBg3YNRLt/b1Vs4
+kVotz5oztG+WkOV2FGJDaYQQz1RB+TXqntRal51eEFm94OTDWYnX3vJ5sIdrAsBZoSoAghVv
+axERAFM0dD304cxWYqLkZegjsYMdWFMIsjMtlr7lfTOeEFonc1PdXZjiSxFTWJGP6nIR7LuU
+8g0PUK3yFrUaACQx5RW9FwaQqiSxrN0MUh7wi2qruPft32O0zpRov16W0ESW8fj0ejoKeRVT
+AgMBAAGjazBpMB8GA1UdIwQYMBaAFFgBhCQbvCtSlEo9pRByFFH1rzrJMB0GA1UdDgQWBBSo
+PAmdZ/bYR7qi0PwYclaIQG2VlTAOBgNVHQ8BAf8EBAMCBPAwFwYDVR0gBBAwDjAMBgpghkgB
+ZQMCATABMA0GCSqGSIb3DQEBCwUAA4IBAQAeWtkPr2KluVK8vOwMYYgTAaoKDw1Ho56pshcb
++KPZLNKNOL47ss/UMYjPzmn0jse5cAVRwAaHxpW09mr6MZ4txBez7eV6GWEYbozR5A7am21s
+jQaQ7izReVjYhM2bQdMY4v6Rxl8AJxRlexL6L72ovzSOLc8XTFhbMA8uaWZFJiYhmNPzkK4p
+h1pOxr3oKH4PoJTm/1y1XE/dimFZDgXS/8VpwNOJStHC5cj0yAjD/SojT4QAXCxELYOKwj0i
+xzxg8op44xtGZdqZj/hjwdR6oHCmeqcQm52sf74U8EQjh1isEBXo86C/HiXLNqsCaVqu/8Bu
+sEOeCJ4ZPKcwbo3CMYIBiTCCAYUCAQOAFKg8CZ1n9thHuqLQ/BhyVohAbZWVMAsGCWCGSAFl
+AwQCAaBMMBkGCSqGSIb3DQEJAzEMBgpghkgBZQIBAk0CMC8GCSqGSIb3DQEJBDEiBCAiPyBP
+FFwHJbHgGmoz+54OEJ/ppMyfSoZmbS/nkWfxxjALBgkqhkiG9w0BAQsEggEAHllTg+TMT2ll
+zVvrvRDwOwrzr6YIJSt96sLANqOXiqqnvrHDDWTdVMcRX/LccVbm9JP4sGSfGDdwbm3FqB+l
+kgSBlejFgjWfF/YVK5OpaVcPGg4DB3oAOwxtn0GVQtKgGkiGQF0r5389mTHYlQzS6BVDG2Oi
+sKIe4SBazrBGjnKANf9LEunpWPt15y6QCxiEKnJfPlAqiMuiIhHmXPIHi+d3sYkC+iu+5I68
+2oeLdtBWCDcGh4+DdS6Qqzkpp14MpvzBMdfD3lKcI3NRmY+GmRYaGAiEalh83vggslF7N4SS
+iPxQyqz7LIQe9/5ynJV5/CPUDBL9QK2vSCOQaihWCg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tsr_pem_text)
+
+ layers = {
+ rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
+ rfc5652.id_signedData: rfc5652.SignedData(),
+ rfc5934.id_ct_TAMP_statusResponse: rfc5934.TAMPStatusResponse()
+ }
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5934.id_ct_TAMP_statusResponse: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5934.id_ct_TAMP_statusResponse: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.tsr_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+
+ self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5934.id_ct_TAMP_statusResponse, eci['eContentType'])
+
+ tsr, rest = der_decoder(
+ eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(tsr.prettyPrint())
+ self.assertEqual(eci['eContent'], der_encoder(tsr))
+ self.assertEqual(2, tsr['version'])
+ self.assertEqual(univ.Null(""), tsr['query']['target'])
+ self.assertEqual(1568307071, tsr['query']['seqNum'])
+ self.assertFalse(tsr['usesApex'])
+
+ count = 0
+
+ for tai in tsr['response']['verboseResponse']['taInfo']:
+ count += 1
+ self.assertEqual(1, tai['taInfo']['version'])
+
+ self.assertEqual(3, count)
+
+
+class TrustAnchorUpdateTestCase(unittest.TestCase):
+ tau_pem_text = """\
+MIIGgwYJKoZIhvcNAQcCoIIGdDCCBnACAQMxDTALBglghkgBZQMEAgEwggFMBgpghkgBZQIB
+Ak0DoIIBPASCATgwggE0MAiDAAIEXXp3kDCCASaiggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDALMH2jTus/z881nG+uHQiB+xwQRX8q0DjB6rBw9if/tpMOr8/yNgoe0s2AcCs
+RSXD0g4Kj4UYZBA9GhNwKm+O19yNk7NBDzghza2rwj0qBdNXETcNzYxR+ZPjzEZJIY4UtM3L
+FD44zXIx7qsS8mXqNC5WXf/uY3XLbbqRNPye8/QtHL5QxELfWYj/arP6qGw9y1ZxcQWWu5+A
+5YBFWWdBsOvDrWCkgHUGF5wO9EPgmQ4b+3/1s8yygYKx/TLBuL5BpGS1YDpaUTCMzt5BLBlH
+XEkQZLl0qYdBr31uusG4ob9lMToEZ/m1u46SigBjuLHmjDhfg/9Q1TuiXWuyEMxjAgMBAAGg
+ggN9MIIDeTCCAmGgAwIBAgIBATANBgkqhkiG9w0BAQsFADBAMQswCQYDVQQGEwJVUzEfMB0G
+A1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEQMA4GA1UEAxMHR29vZCBDQTAeFw0xMDAx
+MDEwODMwMDBaFw0zMDEyMzEwODMwMDBaMFMxCzAJBgNVBAYTAlVTMR8wHQYDVQQKExZUZXN0
+IENlcnRpZmljYXRlcyAyMDExMSMwIQYDVQQDExpWYWxpZCBFRSBDZXJ0aWZpY2F0ZSBUZXN0
+MTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANncdxgQEBhgQfvneBAP+IR3cO8t
+blU7EaaZUj9t9L2hl/o2Hm5EQhHIU/51hpteNxKIon3ZcQjUSTVxzkbPW9BZjmUf60I9yg7c
+TJDYVGnPXjiyIGDdg1Eu39vVWziRWi3PmjO0b5aQ5XYUYkNphBDPVEH5Neqe1FqXnV4QWb3g
+5MNZidfe8nmwh2sCwFmhKgCCFW9rEREAUzR0PfThzFZiouRl6COxgx1YUwiyMy2WvuV9M54Q
+WidzU91dmOJLEVNYkY/qchHsu5TyDQ9QrfIWtRoAJDHlFb0XBpCqJLGs3QxSHvCLaqu49+3f
+Y7TOlGi/XpbQRJbx+PR6Ogp5FVMCAwEAAaNrMGkwHwYDVR0jBBgwFoAUWAGEJBu8K1KUSj2l
+EHIUUfWvOskwHQYDVR0OBBYEFKg8CZ1n9thHuqLQ/BhyVohAbZWVMA4GA1UdDwEB/wQEAwIE
+8DAXBgNVHSAEEDAOMAwGCmCGSAFlAwIBMAEwDQYJKoZIhvcNAQELBQADggEBAB5a2Q+vYqW5
+Ury87AxhiBMBqgoPDUejnqmyFxv4o9ks0o04vjuyz9QxiM/OafSOx7lwBVHABofGlbT2avox
+ni3EF7Pt5XoZYRhujNHkDtqbbWyNBpDuLNF5WNiEzZtB0xji/pHGXwAnFGV7Evovvai/NI4t
+zxdMWFswDy5pZkUmJiGY0/OQrimHWk7Gvegofg+glOb/XLVcT92KYVkOBdL/xWnA04lK0cLl
+yPTICMP9KiNPhABcLEQtg4rCPSLHPGDyinjjG0Zl2pmP+GPB1HqgcKZ6pxCbnax/vhTwRCOH
+WKwQFejzoL8eJcs2qwJpWq7/wG6wQ54Inhk8pzBujcIxggGJMIIBhQIBA4AUqDwJnWf22Ee6
+otD8GHJWiEBtlZUwCwYJYIZIAWUDBAIBoEwwGQYJKoZIhvcNAQkDMQwGCmCGSAFlAgECTQMw
+LwYJKoZIhvcNAQkEMSIEINq+nldSoCoJuEe/lhrRhfx0ArygsPJ7mCMbOFrpr1dFMAsGCSqG
+SIb3DQEBCwSCAQBTeRE1DzwF2dnv2yJAOYOxNnAtTs72ZG8mv5Ad4M/9n1+MPiAykLcBslW8
+7D1KjBdwB3oxIT4sjwGh0kxKLe4G+VuvQuPwtT8MqMl3hounnFOM5nMSj1TSbfHVPs3dhEyk
+Wu1gQ5g9gxLF3MpwEJGJKvhRtK17LGElJWvGPniRMChAJZJWoLjFBMe5JMzpqu2za50S1K3t
+YtkTOx/2FQdVApkTY1qMQooljDiuvSvOuSDXcyAA15uIypQJvfrBNqe6Ush+j7yS5UQyTm0o
+ZidB8vj4jIZT3S2gqWhtBLMUc11j+kWlXEZEigSL8WgCbAu7lqhItMwz2dy4C5aAWq8r"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tau_pem_text)
+
+ layers = {
+ rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
+ rfc5652.id_signedData: rfc5652.SignedData(),
+ rfc5934.id_ct_TAMP_update: rfc5934.TAMPUpdate()
+ }
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5934.id_ct_TAMP_update: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5934.id_ct_TAMP_update: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.tau_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+ self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5934.id_ct_TAMP_update, eci['eContentType'])
+
+ tau, rest = der_decoder(
+ eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(tau.prettyPrint())
+ self.assertEqual(eci['eContent'], der_encoder(tau))
+ self.assertEqual(2, tau['version'])
+ self.assertEqual(univ.Null(""), tau['msgRef']['target'])
+ self.assertEqual(1568307088, tau['msgRef']['seqNum'])
+ self.assertEqual(1, len(tau['updates']))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5940.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5940.py
new file mode 100644
index 0000000000..d55ba6e813
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5940.py
@@ -0,0 +1,141 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5940
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+
+class CRLandOCSPResponseTestCase(unittest.TestCase):
+ pem_text = """\
+MIIHWQYJKoZIhvcNAQcCoIIHSjCCB0YCAQExDTALBglghkgBZQMEAgEwUwYJKoZI
+hvcNAQcBoEYERENvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91Lg0KoIIBaDCCAWQwggEKoAMCAQIC
+CQClWUKCJkwnGTAKBggqhkjOPQQDAjAkMRQwEgYDVQQKDAtleGFtcGxlLm9yZzEM
+MAoGA1UEAwwDQm9iMB4XDTE3MTIyMDIzMDc0OVoXDTE4MTIyMDIzMDc0OVowJDEU
+MBIGA1UECgwLZXhhbXBsZS5vcmcxDDAKBgNVBAMMA0JvYjBZMBMGByqGSM49AgEG
+CCqGSM49AwEHA0IABIZP//xT8ah2ymmxfidIegeccVKuGxN+OTuvGq69EnQ8fUFD
+ov2KNw8Cup0DtzAfHaZOMFWUu2+Vy3H6SLbQo4OjJTAjMCEGA1UdEQEB/wQXMBWG
+E3NpcDpib2JAZXhhbXBsZS5vcmcwCgYIKoZIzj0EAwIDSAAwRQIhALIkjJJAKCI4
+nsklf2TM/RBvuguWwRkHMDTVGxAvczlsAiAVjrFR8IW5vS4EzyePDVIua7b+Tzb3
+THcQsVpPR53kDaGCBGQwggIbMIIBAwIBATANBgkqhkiG9w0BAQsFADBsMQswCQYD
+VQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGln
+aWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBS
+b290IENBFw0xOTA1MDIyMjE1NTRaFw0xOTA1MjMyMjE1NTRaMDEwLwIQDPWCOBgZ
+nlb4K9ZS7Sft6RcNMTgxMDI1MTYxMTM4WjAMMAoGA1UdFQQDCgEAoDAwLjAfBgNV
+HSMEGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzALBgNVHRQEBAICAcQwDQYJKoZI
+hvcNAQELBQADggEBABPO3OA0OkQZ+RLVxz/cNx5uNVEO416oOePkN0A4DxFztf33
+7caS4OyfS9Wyu1j5yUdWJVpAKXSQeN95MqHkpSpYDssuqbuYjv8ViJfseGBgtXTc
+zUzzNeNdY2uxMbCxuhmPkgacAo1lx9LkK2ScYHWVbfFRF1UQ/dcmavaZsEOBNuLW
+OxQYA9MqfVNAymHe7vPqwm/8IY2FbHe9HsiJZfGxNWMDP5lmJiXmpntTeDQ2Ujdi
+yXwGGKjyiSTFk2jVRutrGINufaoA/f7eCmIb4UDPbpMjVfD215dW8eBKouypCVoE
+vmCSSTacdiBI2yOluvMN0PzvPve0ECAE+D4em9ahggJBBggrBgEFBQcQAjCCAjMK
+AQCgggIsMIICKAYJKwYBBQUHMAEBBIICGTCCAhUwZqEgMB4xHDAJBgNVBAYTAlJV
+MA8GA1UEAx4IAFQAZQBzAHQYEzIwMTkwNTA5MTU1MDQ4LjI1OVowLTArMBIwBwYF
+Kw4DAhoEAQEEAQECAQGAABgTMjAxOTA1MDkxNTUwNDguMjYxWjAKBggqhkjOPQQD
+AgNJADBGAiEAujFVH+NvuTLYa8RW3pvWSUwZfjOW5H5171JI+/50BjcCIQDhwige
+wl+ts6TIvhU+CFoOipQBNKyKXKh7ngJkUtpZ86CCAVIwggFOMIIBSjCB8aADAgEC
+AgEBMAoGCCqGSM49BAMCMB4xHDAJBgNVBAYTAlJVMA8GA1UEAx4IAFQAZQBzAHQw
+HhcNMTkwMjAxMDUwMDAwWhcNMjIwMjAxMDUwMDAwWjAeMRwwCQYDVQQGEwJSVTAP
+BgNVBAMeCABUAGUAcwB0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEM0jxEYgg
+RxC/r87uV/h6iZ8BAdHT/6fxRuzG0PRMIlFBy38skFUXJJulKV9JW16YJqOkVsqv
+xwMM61z7p1vQ/qMgMB4wDwYDVR0TBAgwBgEB/wIBAzALBgNVHQ8EBAMCAAYwCgYI
+KoZIzj0EAwIDSAAwRQIhAIdpCt5g89ofSADXmBD3KXQGnTghwbAMeWrKXqTGww+x
+AiAl8NQgfUk4xMymZ3VtCLJ2MdczDps4Zh2KPOqAR5fZAjGCAQcwggEDAgEBMDEw
+JDEUMBIGA1UECgwLZXhhbXBsZS5vcmcxDDAKBgNVBAMMA0JvYgIJAKVZQoImTCcZ
+MAsGCWCGSAFlAwQCAaBpMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZI
+hvcNAQkFMQ8XDTE5MDEyNDIzNTI1NlowLwYJKoZIhvcNAQkEMSIEIO93j8lA1ebc
+JXb0elmbMSYZWp8aInra81+iLAUNjRlaMAoGCCqGSM49BAMCBEcwRQIhAPeI7URq
+tw//LB/6TAN0/Qh3/WHukXwxRbOJpnYVx0b6AiB3lK3FfwBhx4S5YSPMblS7goJl
+ttTMEpl2prH8bbwo1g==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertTrue(sd.prettyPrint())
+
+ self.assertEqual(
+ rfc5652.id_data, sd['encapContentInfo']['eContentType'])
+ self.assertTrue(sd['encapContentInfo']['eContent'])
+
+ v2 = rfc5280.Version(value='v2')
+
+ self.assertEqual(v2, sd['crls'][0]['crl']['tbsCertList']['version'])
+
+ ocspr_oid = rfc5940.id_ri_ocsp_response
+
+ self.assertEqual(ocspr_oid, sd['crls'][1]['other']['otherRevInfoFormat'])
+
+ ocspr, rest = der_decoder(
+ sd['crls'][1]['other']['otherRevInfo'],
+ asn1Spec=rfc5940.OCSPResponse())
+
+ self.assertTrue(ocspr.prettyPrint())
+
+ success = rfc2560.OCSPResponseStatus(value='successful')
+
+ self.assertEqual(success, ocspr['responseStatus'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd_eci = asn1Object['content']['encapContentInfo']
+
+ self.assertEqual(rfc5652.id_data, sd_eci['eContentType'])
+ self.assertTrue(sd_eci['eContent'].hasValue())
+
+ for ri in asn1Object['content']['crls']:
+ if ri.getName() == 'crl':
+ v2 = rfc5280.Version(value='v2')
+ self.assertEqual(v2, ri['crl']['tbsCertList']['version'])
+
+ if ri.getName() == 'other':
+ ori = ri['other']
+ ocspr_oid = rfc5940.id_ri_ocsp_response
+
+ self.assertEqual(ocspr_oid, ori['otherRevInfoFormat'])
+
+ ocspr_status = ori['otherRevInfo']['responseStatus']
+ success = rfc2560.OCSPResponseStatus(value='successful')
+
+ self.assertEqual(success, ocspr_status)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5958.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5958.py
new file mode 100644
index 0000000000..980a11ed5b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5958.py
@@ -0,0 +1,84 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc8410
+
+
+class PrivateKeyTestCase(unittest.TestCase):
+ priv_key_pem_text = """\
+MHICAQEwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC
+oB8wHQYKKoZIhvcNAQkJFDEPDA1DdXJkbGUgQ2hhaXJzgSEAGb9ECWmEzf6FQbrB
+Z9w7lshQhqowtrbLDFw4rXAxZuE=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5958.PrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.priv_key_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ rfc8410.id_Ed25519, asn1Object['privateKeyAlgorithm']['algorithm'])
+ self.assertTrue(asn1Object['privateKey'].isValue)
+ self.assertEqual(
+ "0x0420d4ee", asn1Object['privateKey'].prettyPrint()[0:10])
+ self.assertTrue(asn1Object['publicKey'].isValue)
+ self.assertEqual(
+ "1164575857", asn1Object['publicKey'].prettyPrint()[0:10])
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class PrivateKeyOpenTypesTestCase(unittest.TestCase):
+ asymmetric_key_pkg_pem_text = """\
+MIGEBgpghkgBZQIBAk4FoHYwdDByAgEBMAUGAytlcAQiBCDU7nLb+RNYStW22PH3
+afitOv58KMvx1Pvgl6iPRHVYQqAfMB0GCiqGSIb3DQEJCRQxDwwNQ3VyZGxlIENo
+YWlyc4EhABm/RAlphM3+hUG6wWfcO5bIUIaqMLa2ywxcOK1wMWbh
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.asymmetric_key_pkg_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(
+ rfc5958.id_ct_KP_aKeyPackage, rfc5652.cmsContentTypesMap)
+
+ oneKey = asn1Object['content'][0]
+
+ self.assertEqual(
+ rfc8410.id_Ed25519, oneKey['privateKeyAlgorithm']['algorithm'])
+
+ pkcs_9_at_friendlyName = univ.ObjectIdentifier('1.2.840.113549.1.9.9.20')
+
+ self.assertEqual(
+ pkcs_9_at_friendlyName, oneKey['attributes'][0]['attrType'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc5990.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc5990.py
new file mode 100644
index 0000000000..7d51d67bb4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc5990.py
@@ -0,0 +1,87 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5990
+
+
+class RSAKEMTestCase(unittest.TestCase):
+ pem_text = """\
+MEcGCyqGSIb3DQEJEAMOMDgwKQYHKIGMcQICBDAeMBkGCiuBBRCGSAksAQIwCwYJ
+YIZIAWUDBAIBAgEQMAsGCWCGSAFlAwQBBQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5990.id_rsa_kem, asn1Object['algorithm'])
+
+ rsa_kem_p, rest = der_decoder(
+ asn1Object['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[rfc5990.id_rsa_kem])
+
+ self.assertFalse(rest)
+ self.assertTrue(rsa_kem_p.prettyPrint())
+ self.assertEqual(asn1Object['parameters'], der_encoder(rsa_kem_p))
+ self.assertEqual(rfc5990.id_kem_rsa, rsa_kem_p['kem']['algorithm'])
+
+ kem_rsa_p, rest = der_decoder(
+ rsa_kem_p['kem']['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[rfc5990.id_kem_rsa])
+
+ self.assertFalse(rest)
+ self.assertTrue(kem_rsa_p.prettyPrint())
+ self.assertEqual(
+ rsa_kem_p['kem']['parameters'], der_encoder(kem_rsa_p))
+ self.assertEqual(16, kem_rsa_p['keyLength'])
+ self.assertEqual(
+ rfc5990.id_kdf_kdf3, kem_rsa_p['keyDerivationFunction']['algorithm'])
+
+ kdf_p, rest = der_decoder(
+ kem_rsa_p['keyDerivationFunction']['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[rfc5990.id_kdf_kdf3])
+
+ self.assertFalse(rest)
+ self.assertTrue(kdf_p.prettyPrint())
+ self.assertEqual(
+ kem_rsa_p['keyDerivationFunction']['parameters'],
+ der_encoder(kdf_p))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5990.id_rsa_kem, asn1Object['algorithm'])
+ self.assertEqual(
+ rfc5990.id_kem_rsa, asn1Object['parameters']['kem']['algorithm'])
+ self.assertEqual(
+ 16, asn1Object['parameters']['kem']['parameters']['keyLength'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6010.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6010.py
new file mode 100644
index 0000000000..1726a8d880
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6010.py
@@ -0,0 +1,101 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6010
+
+
+class UnconstrainedCCCExtensionTestCase(unittest.TestCase):
+ unconstrained_pem_text = "MB0GCCsGAQUFBwESBBEwDzANBgsqhkiG9w0BCRABAA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extension()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.unconstrained_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc6010.id_pe_cmsContentConstraints, asn1Object['extnID'])
+
+ evalue, rest = der_decoder(
+ asn1Object['extnValue'],
+ asn1Spec=rfc6010.CMSContentConstraints())
+
+ self.assertFalse(rest)
+ self.assertTrue(evalue.prettyPrint())
+ self.assertEqual(asn1Object['extnValue'], der_encoder(evalue))
+ self.assertEqual(
+ rfc6010.id_ct_anyContentType, evalue[0]['contentType'])
+
+
+class ConstrainedCCCExtensionTestCase(unittest.TestCase):
+ constrained_pem_text = """\
+MIG7BggrBgEFBQcBEgSBrjCBqzA0BgsqhkiG9w0BCRABEDAlMCMGCyqGSIb3DQEJ
+EAwBMRQMElZpZ2lsIFNlY3VyaXR5IExMQzAwBgpghkgBZQIBAk4CMCIwIAYLKoZI
+hvcNAQkQDAsxEQwPa3RhLmV4YW1wbGUuY29tMDEGCyqGSIb3DQEJEAEZMCIwIAYL
+KoZIhvcNAQkQDAsxEQwPa3RhLmV4YW1wbGUuY29tMA4GCSqGSIb3DQEHAQoBAQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extension()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.constrained_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc6010.id_pe_cmsContentConstraints, asn1Object['extnID'])
+
+ evalue, rest = der_decoder(
+ asn1Object['extnValue'],
+ asn1Spec=rfc6010.CMSContentConstraints())
+
+ self.assertFalse(rest)
+ self.assertTrue(evalue.prettyPrint())
+ self.assertEqual(asn1Object['extnValue'], der_encoder(evalue))
+
+ constraint_count = 0
+ attribute_count = 0
+ cannot_count = 0
+
+ for ccc in evalue:
+ constraint_count += 1
+ if ccc['canSource'] == 1:
+ cannot_count += 1
+ if ccc['attrConstraints'].hasValue():
+ for attr in ccc['attrConstraints']:
+ attribute_count += 1
+
+ self.assertEqual(4, constraint_count)
+ self.assertEqual(3, attribute_count)
+ self.assertEqual(1, cannot_count)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.constrained_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertIn(asn1Object['extnID'], rfc5280.certificateExtensionsMap)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6019.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6019.py
new file mode 100644
index 0000000000..2e08670e42
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6019.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6019
+
+
+class BinarySigningTimeTestCase(unittest.TestCase):
+ pem_text = "MBUGCyqGSIb3DQEJEAIuMQYCBFy/hlQ="
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.Attribute()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc6019.id_aa_binarySigningTime, asn1Object['attrType'])
+
+ bintime, rest = der_decoder(
+ asn1Object['attrValues'][0], asn1Spec=rfc6019.BinaryTime())
+
+ self.assertEqual(0x5cbf8654, bintime)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['attrType'], rfc5652.cmsAttributesMap)
+ self.assertEqual(0x5cbf8654, asn1Object['attrValues'][0])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6031.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6031.py
new file mode 100644
index 0000000000..29a8d86c5c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6031.py
@@ -0,0 +1,91 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6031
+
+
+class SymmetricKeyPkgTestCase(unittest.TestCase):
+ key_pkg_pem_text = """\
+MIG7BgsqhkiG9w0BCRABGaCBqzCBqKBEMCMGCyqGSIb3DQEJEAwBMRQMElZpZ2ls
+IFNlY3VyaXR5IExMQzAdBgsqhkiG9w0BCRAMAzEODAxQcmV0ZW5kIDA0OEEwYDBe
+MFYwGwYLKoZIhvcNAQkQDBsxDAwKZXhhbXBsZUlEMTAVBgsqhkiG9w0BCRAMCjEG
+DARIT1RQMCAGCyqGSIb3DQEJEAwLMREMD2t0YS5leGFtcGxlLmNvbQQEMTIzNA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_pkg_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+
+ asn1Spec = rfc5652.cmsContentTypesMap[asn1Object['contentType']]
+ skp, rest = der_decoder(asn1Object['content'], asn1Spec=asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(skp.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(skp))
+
+ for attr in skp['sKeyPkgAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyPkgAttributesMap)
+
+ for osk in skp['sKeys']:
+ for attr in osk['sKeyAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyAttributesMap)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.key_pkg_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertTrue(asn1Object['content'].hasValue())
+
+ keypkg = asn1Object['content']
+
+ self.assertEqual(
+ rfc6031.KeyPkgVersion().subtype(value='v1'), keypkg['version'])
+
+ for attr in keypkg['sKeyPkgAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyPkgAttributesMap)
+ self.assertNotEqual('0x', attr['attrValues'][0].prettyPrint()[:2])
+
+ # decodeOpenTypes=True did not decode if the value is shown in hex ...
+ if attr['attrType'] == rfc6031.id_pskc_manufacturer:
+ attr['attrValues'][0] == 'Vigil Security LLC'
+
+ for osk in keypkg['sKeys']:
+ for attr in osk['sKeyAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyAttributesMap)
+ self.assertNotEqual(
+ '0x', attr['attrValues'][0].prettyPrint()[:2])
+
+ # decodeOpenTypes=True did not decode if the value is shown in hex ...
+ if attr['attrType'] == rfc6031.id_pskc_issuer:
+ attr['attrValues'][0] == 'kta.example.com'
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6032.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6032.py
new file mode 100644
index 0000000000..287bad89ae
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6032.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6032
+
+
+class EncryptedKeyPkgTestCase(unittest.TestCase):
+ encrypted_key_pkg_pem_text = """\
+MIIBBwYKYIZIAWUCAQJOAqCB+DCB9QIBAjCBzgYKYIZIAWUCAQJOAjAdBglghkgB
+ZQMEASoEEN6HFteHMZ3DyeO35xIwWQOAgaCKTs0D0HguNzMhsLgiwG/Kw8OwX+GF
+9/cZ1YVNesUTW/VsbXJcbTmFmWyfqZsM4DLBegIbrUEHQZnQRq6/NO4ricQdHApD
+B/ip6RRqeN1yxMJLv1YN0zUOOIDBS2iMEjTLXZLWw3w22GN2JK7G+Lr4OH1NhMgU
+ILJyh/RePmPseMwxvcJs7liEfkiSNMtDfEcpjtzA9bDe95GjhQRsiSByoR8wHQYJ
+YIZIAWUCAQVCMRAEDnB0Zi1rZGMtODEyMzc0
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.encrypted_key_pkg_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc6032.id_ct_KP_encryptedKeyPkg, asn1Object['contentType'])
+
+ content, rest = der_decoder(
+ asn1Object['content'], rfc6032.EncryptedKeyPackage())
+
+ self.assertFalse(rest)
+ self.assertTrue(content.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(content))
+ self.assertEqual('encrypted', content.getName())
+
+ eci = content['encrypted']['encryptedContentInfo']
+
+ self.assertEqual(
+ rfc6032.id_ct_KP_encryptedKeyPkg, eci['contentType'])
+
+ attrType = content['encrypted']['unprotectedAttrs'][0]['attrType']
+
+ self.assertEqual(rfc6032.id_aa_KP_contentDecryptKeyID, attrType)
+
+ attrVal0 = content['encrypted']['unprotectedAttrs'][0]['attrValues'][0]
+ keyid, rest = der_decoder(attrVal0, rfc6032.ContentDecryptKeyID())
+
+ self.assertFalse(rest)
+ self.assertTrue(keyid.prettyPrint())
+ self.assertEqual(attrVal0, der_encoder(keyid))
+ self.assertEqual(str2octs('ptf-kdc-812374'), keyid)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.encrypted_key_pkg_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+
+ eci = asn1Object['content']['encrypted']['encryptedContentInfo']
+
+ self.assertIn(eci['contentType'], rfc5652.cmsContentTypesMap)
+
+ for attr in asn1Object['content']['encrypted']['unprotectedAttrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ self.assertNotEqual('0x', attr['attrValues'][0].prettyPrint()[:2])
+
+ if attr['attrType'] == rfc6032.id_aa_KP_contentDecryptKeyID:
+ self.assertEqual(str2octs(
+ 'ptf-kdc-812374'), attr['attrValues'][0])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6120.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6120.py
new file mode 100644
index 0000000000..bdedab8c50
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6120.py
@@ -0,0 +1,115 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6120
+
+
+class XMPPCertificateTestCase(unittest.TestCase):
+ xmpp_server_cert_pem_text = """\
+MIIC6DCCAm+gAwIBAgIJAKWzVCgbsG5DMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDI0MjMxNjA0WhcNMjAxMDIzMjMxNjA0WjBNMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xHzAdBgNVBAoTFkV4
+YW1wbGUgUHJvZHVjdHMsIEluYy4wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQZzQlk
+03nJRPF6+w1NxFELmQ5vJTjTRz3eu03CRtahK4Wnwd4GwbDe8NVHAEG2qTzBXFDu
+p6RZugsBdf9GcEZHG42rThYYOzIYzVFnI7tQgA+nTWSWZN6eoU/EXcknhgijggEn
+MIIBIzAdBgNVHQ4EFgQUkQpUMYcbUesEn5buI03POFnktJgwHwYDVR0jBBgwFoAU
+8jXbNATapVXyvWkDmbBi7OIVCMEwCwYDVR0PBAQDAgeAMIGPBgNVHREEgYcwgYSg
+KQYIKwYBBQUHCAegHRYbX3htcHAtY2xpZW50LmltLmV4YW1wbGUuY29toCkGCCsG
+AQUFBwgHoB0WG194bXBwLXNlcnZlci5pbS5leGFtcGxlLmNvbaAcBggrBgEFBQcI
+BaAQDA5pbS5leGFtcGxlLmNvbYIOaW0uZXhhbXBsZS5jb20wQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjAKBggqhkjOPQQDAwNnADBkAjAEo4mhDGC6/R39HyNgzLseNAp36qBH
+yQJ/AWsBojN0av8akeVv9IuM45yqLKdiCzcCMDCjh1lFnCvurahwp5D1j9pAZMsg
+nOzhcMpnHs2U/eN0lHl/JNgnbftl6Dvnt59xdA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ gn_on = gn['otherName']
+ if gn_on['type-id'] == rfc6120.id_on_xmppAddr:
+ self.assertIn(gn_on['type-id'], rfc5280.anotherNameMap)
+
+ spec = rfc5280.anotherNameMap[gn['otherName']['type-id']]
+ on, rest = der_decoder(gn_on['value'], asn1Spec=spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(on.prettyPrint())
+ self.assertEqual(gn_on['value'], der_encoder(on))
+ self.assertEqual('im.example.com', on)
+
+ count += 1
+
+ self.assertEqual(1, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ if gn['otherName']['type-id'] == rfc6120.id_on_xmppAddr:
+ self.assertEqual(
+ 'im.example.com', gn['otherName']['value'])
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6187.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6187.py
new file mode 100644
index 0000000000..75c1e91d86
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6187.py
@@ -0,0 +1,70 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6187
+
+
+class SSHClientCertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICkDCCAhegAwIBAgIJAKWzVCgbsG5BMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDI0MTgyNjA3WhcNMjAxMDIzMTgyNjA3WjB0MQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxEDAOBgNVBAMTB0NoYXJsaWUxIjAgBgkqhkiG9w0BCQEWE2NoYXJsaWVA
+ZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARfr1XPl5S0A/BwTOm4
+/rO7mGVt2Tmfr3yvYnfN/ggMvyS3RiIXSsdzcAwzeqc907Jp7Dggab0PpaOKDOxD
+WoK0g6B8+kC/VMsU23mfShlb9et8qcR3A8gdU6g8uvSMahWjgakwgaYwCwYDVR0P
+BAQDAgeAMB0GA1UdDgQWBBQfwm5u0GoxiDcjhDt33UJYlvMPFTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTATBgNVHSUEDDAKBggrBgEFBQcDFTBCBglg
+hkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBm
+b3IgYW55IHB1cnBvc2UuMAoGCCqGSM49BAMDA2cAMGQCMGEme38A3k8q4RGSEs2D
+ThQQOQz3TBJrIW8zr92S8e8BNPkRcQDR+C72TEhL/qoPCQIwGpGaC4ERiUypETkC
+voNP0ODFhhlpFo6lwVHd8Gu+6hShC2PKdAfs4QFDS9ZKgQeZ
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ ssh_eku_oids = [
+ rfc6187.id_kp_secureShellClient,
+ rfc6187.id_kp_secureShellServer,
+ ]
+
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.ExtKeyUsageSyntax())
+
+ for oid in extnValue:
+ if oid in ssh_eku_oids:
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6210.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6210.py
new file mode 100644
index 0000000000..54d8b66e85
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6210.py
@@ -0,0 +1,73 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6210
+
+
+class AuthenticatedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIICRQYLKoZIhvcNAQkQAQKgggI0MIICMAIBADGBwDCBvQIBADAmMBIxEDAOBgNVBAMMB0
+NhcmxSU0ECEEY0a8eAAFa8EdNuLs1dcdAwDQYJKoZIhvcNAQEBBQAEgYCH70EpEikY7deb
+859YJRAWfFondQv1D4NFltw6C1ceheWnlAU0C2WEXr3LUBXZp1/PSte29FnJxu5bXCTn1g
+elMm6zNlZNWNd0KadVBcaxi1n8L52tVM5sWFGJPO5cStOyAka2ucuZM6iAnCSkn1Ju7fgU
+5j2g3bZ/IM8nHTcygjAKBggrBgEFBQgBAqFPBgsqhkiG9w0BCRADDQRAAQIDBAUGBwgJCg
+sMDQ4PEBESEwQVFhcYGRobHB0eHyAhIiMEJSYnKCkqKywtLi8wMTIzBDU2Nzg5Ojs8PT4/
+QDArBgkqhkiG9w0BBwGgHgQcVGhpcyBpcyBzb21lIHNhbXBsZSBjb250ZW50LqKBxzAYBg
+kqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0wOTEyMTAyMzI1MDBa
+MB8GCSqGSIb3DQEJBDESBBCWaa5hG1eeg+oQK2tJ3cD5MGwGCSqGSIb3DQEJNDFfMF0wTw
+YLKoZIhvcNAQkQAw0EQAECAwQFBgcICQoLDA0ODxAREhMEFRYXGBkaGxwdHh8gISIjBCUm
+JygpKissLS4vMDEyMwQ1Njc4OTo7PD0+P0CiCgYIKwYBBQUIAQIEFLjUxQ9PJFzFnWraxb
+EIbVbg2xql
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_ct_authData, asn1Object['contentType'])
+
+ ad, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.AuthenticatedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ad.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ad))
+ self.assertEqual(0, ad['version'])
+ self.assertEqual(
+ rfc6210.id_alg_MD5_XOR_EXPERIMENT, ad['digestAlgorithm']['algorithm'])
+
+ mac_alg_p, rest = der_decoder(
+ ad['digestAlgorithm']['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[ad['digestAlgorithm']['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(mac_alg_p.prettyPrint())
+ self.assertEqual(
+ ad['digestAlgorithm']['parameters'], der_encoder(mac_alg_p))
+ self.assertEqual("0x01020304", mac_alg_p.prettyPrint()[:10])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6211.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6211.py
new file mode 100644
index 0000000000..040b17ac7c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6211.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6211
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIEyAYJKoZIhvcNAQcCoIIEuTCCBLUCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggHM
+MIIByAIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKggfIwGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcN
+MTkwNTI5MTgyMzE5WjAoBgkqhkiG9w0BCTQxGzAZMAsGCWCGSAFlAwQCAqEKBggq
+hkjOPQQDAzA/BgkqhkiG9w0BCQQxMgQwtuQipP2CZx7U96rGbUT06LC5jVFYccZW
+5/CaNvpcrOPiChDm2vI3m4k300z5mSZsME0GCyqGSIb3DQEJEAIBMT4wPAQgx08h
+D2QnVwj1DoeRELNtdZ0PffW4BQIvcwwVc/goU6OAAQEwFTATgRFhbGljZUBleGFt
+cGxlLmNvbTAKBggqhkjOPQQDAwRnMGUCMQChIMyN1nTN+LLQcYJuhWT297vSKMDK
+fIUedSwWYrcSnSa1pq2s3Wue+pNBfecEjYECMGrUNu1UpWdafEJulP9Vz76qOPMa
+5V/AnTEV5zkmzRle8sffN+nQ+SGkoos5zpI1kA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder (substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc6211.id_aa_cmsAlgorithmProtect:
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc6211.CMSAlgorithmProtection())
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd = asn1Object['content']
+
+ self.assertEqual(
+ rfc5652.CMSVersion().subtype(value='v1'), sd['version'])
+
+ ect = sd['encapContentInfo']['eContentType']
+
+ self.assertIn(ect, rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_data, ect)
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc6211.id_aa_cmsAlgorithmProtect:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+
+ sav0 = sa['attrValues'][0]
+ digest_oid = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2')
+ sig_oid = univ.ObjectIdentifier('1.2.840.10045.4.3.3')
+
+ self.assertEqual(
+ digest_oid, sav0['digestAlgorithm']['algorithm'])
+ self.assertEqual(
+ sig_oid, sav0['signatureAlgorithm']['algorithm'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6402.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6402.py
new file mode 100644
index 0000000000..e970dfa7dd
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6402.py
@@ -0,0 +1,157 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+
+
+class BackwardCompatibilityTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEJQYJKoZIhvcNAQcCoIIEFjCCBBICAQMxCzAJBgUrDgMCGgUAMIIDAgYIKwYBBQUHDAKgggL0
+BIIC8DCCAuwweDB2AgECBgorBgEEAYI3CgoBMWUwYwIBADADAgEBMVkwVwYJKwYBBAGCNxUUMUow
+SAIBBQwZcGl0dWNoYTEuZW1lYS5ocHFjb3JwLm5ldAwMRU1FQVxwaXR1Y2hhDBpDTUNSZXFHZW5l
+cmF0b3IudnNob3N0LmV4ZTCCAmqgggJmAgEBMIICXzCCAcgCAQAwADCBnzANBgkqhkiG9w0BAQEF
+AAOBjQAwgYkCgYEA0jm7SSSm2wyEAzuNKtFZFJKo91SrJq9wQwEhEKHDavZwMQOm1rZ2PF8NWCEb
+PqrhToQ7rtiGLSZa4dF4bzgmBqQ9aoSfEX4jISt31Vy+skHidXjHHpbsjT24NPhrZgANivL7CxD6
+Ft+s7qS1gL4HRm2twQkqSwOLrE/q2QeXl2UCAwEAAaCCAR0wGgYKKwYBBAGCNw0CAzEMFgo2LjIu
+OTIwMC4yMD4GCSqGSIb3DQEJDjExMC8wHQYDVR0OBBYEFMW2skn88gxhONWZQA4sWGBDb68yMA4G
+A1UdDwEB/wQEAwIHgDBXBgkrBgEEAYI3FRQxSjBIAgEFDBlwaXR1Y2hhMS5lbWVhLmhwcWNvcnAu
+bmV0DAxFTUVBXHBpdHVjaGEMGkNNQ1JlcUdlbmVyYXRvci52c2hvc3QuZXhlMGYGCisGAQQBgjcN
+AgIxWDBWAgECHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIDAQAwDQYJKoZIhvcNAQEFBQADgYEAJZlu
+mxjtCxSOQi27jsVdd3y8NSIlzNv0b3LqmzvAly6L+CstXcnuG2MPQqPH9R7tbJonGUniBQO9sQ7C
+KhYWj2gfhiEkSID82lV5chINVUFKoUlSiEhWr0tPGgvOaqdsKQcrHfzrsBbFkhDqrFSVy7Yivbnh
+qYszKrOjJKiiCPMwADAAMYH5MIH2AgEDgBTFtrJJ/PIMYTjVmUAOLFhgQ2+vMjAJBgUrDgMCGgUA
+oD4wFwYJKoZIhvcNAQkDMQoGCCsGAQUFBwwCMCMGCSqGSIb3DQEJBDEWBBTFTkK/OifaFjwqHiJu
+xM7qXcg/VzANBgkqhkiG9w0BAQEFAASBgKfC6jOi1Wgy4xxDCQVK9+e5tktL8wE/j2cb9JSqq+aU
+5UxEgXEw7q7BoYZCAzcxMRriGzakXr8aXHcgkRJ7XcFvLPUjpmGg9SOZ2sGW4zQdWAwImN/i8loc
+xicQmJP+VoMHo/ZpjFY9fYCjNZUArgKsEwK/s+p9yrVVeB1Nf8Mn
+"""
+
+ def testDerCodec(self):
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+
+ def testOpenTypes(self):
+ class ClientInformation(univ.Sequence):
+ pass
+
+ ClientInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('clientId', univ.Integer()),
+ namedtype.NamedType('MachineName', char.UTF8String()),
+ namedtype.NamedType('UserName', char.UTF8String()),
+ namedtype.NamedType('ProcessName', char.UTF8String())
+ )
+
+ class EnrollmentCSP(univ.Sequence):
+ pass
+
+ EnrollmentCSP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('KeySpec', univ.Integer()),
+ namedtype.NamedType('Name', char.BMPString()),
+ namedtype.NamedType('Signature', univ.BitString())
+ )
+
+ openTypeMap = {
+ # attributes
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'): char.IA5String(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.2'): EnrollmentCSP(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.21.20'): ClientInformation(),
+ # algorithm identifier parameters
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ openTypeMap.update(rfc5652.cmsAttributesMap)
+ openTypeMap.update(rfc6402.cmcControlAttributesMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+
+ self.assertEqual(rfc6402.id_cct_PKIData, eci['eContentType'])
+
+ substrate = eci['eContent']
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc6402.PKIData(), openTypes=openTypeMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for req in asn1Object['reqSequence']:
+ cr = req['tcr']['certificationRequest']
+
+ sig_alg = cr['signatureAlgorithm']
+
+ self.assertIn(sig_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ cri = cr['certificationRequestInfo']
+ spki_alg = cri['subjectPublicKeyInfo']['algorithm']
+
+ self.assertIn(spki_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ attrs = cr['certificationRequestInfo']['attributes']
+ for attr in attrs:
+ self.assertIn( attr['attrType'], openTypeMap)
+
+ if attr['attrType'] == univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'):
+ self.assertEqual("6.2.9200.2", attr['attrValues'][0])
+
+ else:
+ self.assertTrue(attr['attrValues'][0].hasValue())
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6482.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6482.py
new file mode 100644
index 0000000000..c2f6a94831
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6482.py
@@ -0,0 +1,116 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6482
+
+
+class RPKIROATestCase(unittest.TestCase):
+ roa_pem_text = """\
+MIIGvwYJKoZIhvcNAQcCoIIGsDCCBqwCAQMxDTALBglghkgBZQMEAgEwKgYLKoZIhvcNAQkQ
+ARigGwQZMBcCAwDj+zAQMA4EAgABMAgwBgMEAJMcLaCCBLwwggS4MIIDoKADAgECAgIGGDAN
+BgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyg2ZDZmYmZhOTc1M2RiOGQ4NDY0MzNkYjUzNTFk
+OWE5ZWMwN2M5NmJkMB4XDTE5MDgyMDAwNDkyOVoXDTIwMDcwMTAwMDAwMFowMzExMC8GA1UE
+AxMoNUI4M0REODdERTlBQzdDNkUzNEI4NzdERjUwMUEyQjEyMzBBODFCNDCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJcnDgSUtiQeelGQsTx2Ou5cgmfq6KPSEgMz/XyZrRzj
+wcqUQ/DyMYHyRJK8umKZjfMu+rItoPSkE26Wi9PcSnfuY+SyS9chTAtNOGMES6MbtHjNTmBF
+Xar5CFGM8teLIRHlCcScesgSR7q2eKgQ+cLiLTZnol0Mpmuf2NIs+V63Y4Hn/T7QOoudg9nU
+tmsh31hUN4jIENEXFvNDovkray25rl9aqFfW+dtkoNtdJjp367nNXCdp3GdE/3z0SIqT8wnh
+F67tgR22mwzex3umteQBwmM+iR28vuHL4E5jwRKBoiEgGPYqq7gbfkcoFtR3AV6QGKSK2aJU
+mUi+9VheS78CAwEAAaOCAdQwggHQMB0GA1UdDgQWBBRbg92H3prHxuNLh331AaKxIwqBtDAf
+BgNVHSMEGDAWgBRtb7+pdT242EZDPbU1HZqewHyWvTAYBgNVHSABAf8EDjAMMAoGCCsGAQUF
+Bw4CMFAGA1UdHwRJMEcwRaBDoEGGP3JzeW5jOi8vY2EucmcubmV0L3Jwa2kvUkduZXQtT1Uv
+YlctX3FYVTl1TmhHUXoyMU5SMmFuc0I4bHIwLmNybDBkBggrBgEFBQcBAQRYMFYwVAYIKwYB
+BQUHMAKGSHJzeW5jOi8vcnBraS5yaXBlLm5ldC9yZXBvc2l0b3J5L0RFRkFVTFQvYlctX3FY
+VTl1TmhHUXoyMU5SMmFuc0I4bHIwLmNlcjAOBgNVHQ8BAf8EBAMCB4AwgYoGCCsGAQUFBwEL
+BH4wfDBLBggrBgEFBQcwC4Y/cnN5bmM6Ly9jYS5yZy5uZXQvcnBraS9SR25ldC1PVS9XNFBk
+aDk2YXg4YmpTNGQ5OVFHaXNTTUtnYlEucm9hMC0GCCsGAQUFBzANhiFodHRwczovL2NhLnJn
+Lm5ldC9ycmRwL25vdGlmeS54bWwwHwYIKwYBBQUHAQcBAf8EEDAOMAwEAgABMAYDBACTHC0w
+DQYJKoZIhvcNAQELBQADggEBAKhhoJ3XtHejvG6XkFaCTxJci10gOgNvvPFWqz+CfOX2LmB0
+N3QhYjLiAZbfYSOxNReyL4bWDK/tpZgVA2VHuS8GB8fI8+nauQUiP38orVXKAbcUUxo7UkEM
+HxQ5T61FtXrEZx8hgKTlsfof0G2Q+baSJzNV2MIUgHmSszL4Mx/fHUXv8b7l/5mZQbdv3cZ9
+SbODHD0iOVAzK3fmHeuA4roSOk4mBQDWNRY1Ok+xH/HMDQdoOVtbfy57TZI2W7O2uxfElKvx
+fBeEc9TOaWqDz0xvmJ6bdZnmWRuvqW1475mhxi0s/I4eE2ZdaCinvrgrglBp/jpZi1jitY14
+dx+A1PMxggGqMIIBpgIBA4AUW4Pdh96ax8bjS4d99QGisSMKgbQwCwYJYIZIAWUDBAIBoGsw
+GgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEYMBwGCSqGSIb3DQEJBTEPFw0xOTA4MjAwMDQ5
+MjlaMC8GCSqGSIb3DQEJBDEiBCCfuHnOmhF2iBF3JXMOnoZCJzmE+Tcf8b+zObvDUpUddzAN
+BgkqhkiG9w0BAQEFAASCAQBDlJIMKCqWsFV/tQj/XvpSJUxJybG+zwjrUKm4yTKv8QEGOzOD
+aIL6irSOhhXeax6Lw0P2J7x+L3jGW1we1qWslumEDTr9kTE+kN/6rZuptUhwdrXcu3p9G6gJ
+mAUQtzqe2jRN1T3eSBfz1CNU3C7+jSHXOc+4Tea5mKiVddsjotYHXX0PbSCS/ZZ1yzdeES0o
+KWhXhW9ogS0bwtXWVTrciSekaRpp2n/pqcVEDxWg/5NpPiDlPNrRL/9eTEHFp940RAUfhbBh
+pbC2J02N0KgxUJxIJnGnpZ7rXKpG4jMiTVry7XB9bnFxCvZGBdjQW1Hagrfpl2TiVxQFvJWl
+IzU1
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.roa_pem_text)
+
+ layers = {}
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6482.id_ct_routeOriginAuthz: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6482.id_ct_routeOriginAuthz: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(0, asn1Object['version'])
+ self.assertEqual(58363, asn1Object['asID'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.roa_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid = asn1Object['content']['encapContentInfo']['eContentType']
+ substrate = asn1Object['content']['encapContentInfo']['eContent']
+
+ self.assertIn(oid, rfc5652.cmsContentTypesMap)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.cmsContentTypesMap[oid],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['version'])
+ self.assertEqual(58363, asn1Object['asID'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6486.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6486.py
new file mode 100644
index 0000000000..1e0075c877
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6486.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6486
+
+
+class SignedManifestTestCase(unittest.TestCase):
+ manifest_pem_text = """\
+MIIHVAYJKoZIhvcNAQcCoIIHRTCCB0ECAQMxDTALBglghkgBZQMEAgEwgYwGCyqGSIb3DQEJ
+EAEaoH0EezB5AgIK5xgPMjAxMjEwMjMyMjI2MDNaGA8yMDEyMTAyNTIyMjYwM1oGCWCGSAFl
+AwQCATBGMEQWH1pYU0dCREJrTDgyVEZHSHVFNFZPWXRKUC1FNC5jcmwDIQCzTdC3GsuONsRq
+RFnYf8+AJ2NnCIgmnc3O8PyfGvn18aCCBO4wggTqMIID0qADAgECAgIK5zANBgkqhkiG9w0B
+AQsFADATMREwDwYDVQQDEwhBOTE5OTg4NTAeFw0xMjEwMjMyMjI2MDNaFw0xMjEwMjUyMjI2
+MDNaMBgxFjAUBgNVBAMTDTUwODcxOTdjLTIwZjcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDEl4R4LiCs6zyR/IAeaRCfz0O0mXXAUKt8bmG6DXzaDYNG8dnBjbrsM1L05sb4
+2Ti4TyE1UXtwFFEwatsFQ2uRBn9gsKmDGOjW8TH1AYObmZW+hZlEN7OLSz2bmPLtxIMwiCq/
+vqmBJlMWPyCSym4iPnjzwWbJechqHSiTMOYGICF1QSW5xjJDAhRfeZG3nRY7TqfW8R2KJXeN
+cKSYSGNKzv79B8GCswmwU8J8kcuryIiqb7WtcK2B6VBsROIQHGXM0UV4Zbnvv9m9Fl0SjvZJ
+XyrzRjGzV2C00hM0f4jAplD9nJhAJ7nOTe8OnadrFABRga+Ge1HooeDQJGmTekLXAgMBAAGj
+ggJBMIICPTAdBgNVHQ4EFgQUbcbOyNBHkRXXDaMq51jC7vOSHFUwHwYDVR0jBBgwFoAUZXSG
+BDBkL82TFGHuE4VOYtJP+E4wDgYDVR0PAQH/BAQDAgeAMIGDBgNVHR8EfDB6MHigdqB0hnJy
+c3luYzovL3Jwa2kuYXBuaWMubmV0L21lbWJlcl9yZXBvc2l0b3J5L0E5MTk5ODg1LzY1RkQ0
+M0FBNUJFRjExREZBQjYxQjNFNzU1QUZFN0NGL1pYU0dCREJrTDgyVEZHSHVFNFZPWXRKUC1F
+NC5jcmwwfgYIKwYBBQUHAQEEcjBwMG4GCCsGAQUFBzAChmJyc3luYzovL3Jwa2kuYXBuaWMu
+bmV0L3JlcG9zaXRvcnkvQTNDMzhBMjRENjAzMTFEQ0FCMDhGMzE5NzlCREJFMzkvWlhTR0JE
+QmtMODJURkdIdUU0Vk9ZdEpQLUU0LmNlcjAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMIGQ
+BggrBgEFBQcBCwSBgzCBgDB+BggrBgEFBQcwC4ZycnN5bmM6Ly9ycGtpLmFwbmljLm5ldC9t
+ZW1iZXJfcmVwb3NpdG9yeS9BOTE5OTg4NS82NUZENDNBQTVCRUYxMURGQUI2MUIzRTc1NUFG
+RTdDRi9aWFNHQkRCa0w4MlRGR0h1RTRWT1l0SlAtRTQubWZ0MBUGCCsGAQUFBwEIAQH/BAYw
+BKACBQAwIQYIKwYBBQUHAQcBAf8EEjAQMAYEAgABBQAwBgQCAAIFADANBgkqhkiG9w0BAQsF
+AAOCAQEAyBl1J+ql1O3d6JiaQEG2UAjDSKHSMVau++QcB6/yd4RuWv2KpQxk1cp+awf4Ttoh
+GYakbUZQl7lJaXzbluG5siRSv6AowEWxf99iLhDx+pE1htklRfmmTE9oFpKnITAYZAUjarNC
+sYGCZ00vSwRu27OdpSQbZQ7WdyDAhyHS0Sun0pkImVSqPO11gqyKV9ZCwCJUa5U/zsWDMNrj
+MSZl1I3VoPs2rx997rLoiQiMqwGeoqfl7snpsL9OR/CazPmepuq3SyZNWcCrUGcGRhRdGScj
+Tm2EHne1GiRHapn46HWQ3am8jumEKv5u0gLT4Mi9CyZwkDyhotGTJZmdAmN7zzGCAaowggGm
+AgEDgBRtxs7I0EeRFdcNoyrnWMLu85IcVTALBglghkgBZQMEAgGgazAaBgkqhkiG9w0BCQMx
+DQYLKoZIhvcNAQkQARowHAYJKoZIhvcNAQkFMQ8XDTEyMTAyMzIyMjYwNFowLwYJKoZIhvcN
+AQkEMSIEIIu2XV8dT+rqQy5Cbpm3Tv5I1dwkLK8n2GesMGOr6/pEMA0GCSqGSIb3DQEBAQUA
+BIIBAFsd0zkl4dIHrqZts441T+w/5/ekymDLFwftk6W+Mi35Htjvm2IHOthnKHQsK5h6dnEh
+6DfNfc6tACmzLnM+UG7ve+uAhfpA+CUJIoVhpQvDH7Ntql0cD1X3d9ng484jpkVoHhbUIYNR
+TyxvV4DV5EBbLYpx2HYf6wWa8TCobxUXNtw53OVA24ceavS+KvuDa0JQPFpbYUCS0UPMt/Im
+mtKrWTmRUr8sYWdIQn+SStUh8iAR5rmSVr+Pe7aFbe2ju2FPf08gnIjH/SdCrJuFK8q7Z5MT
+C9ijmXiajracUe+7eCluqgXRE8yRtnscWoA/9fVFz1lPwgEeNHLoaK7Sqew=
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.manifest_pem_text)
+
+ layers = rfc5652.cmsContentTypesMap.copy()
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6486.id_ct_rpkiManifest: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6486.id_ct_rpkiManifest: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(0, asn1Object['version'])
+
+ for f in asn1Object['fileList']:
+ self.assertEqual('ZXSGBDBkL82TFGHuE4VOYtJP-E4.crl', f['file'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.manifest_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid = asn1Object['content']['encapContentInfo']['eContentType']
+ substrate = asn1Object['content']['encapContentInfo']['eContent']
+
+ self.assertIn(oid, rfc5652.cmsContentTypesMap)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.cmsContentTypesMap[oid],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['version'])
+
+ for f in asn1Object['fileList']:
+ self.assertEqual('ZXSGBDBkL82TFGHuE4VOYtJP-E4.crl', f['file'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6487.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6487.py
new file mode 100644
index 0000000000..9e42d0736e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6487.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6487
+
+
+class CertificateWithManifestTestCase(unittest.TestCase):
+ rpki_cert_pem_text = """\
+MIIGCTCCBPGgAwIBAgICKJgwDQYJKoZIhvcNAQELBQAwRjERMA8GA1UEAxMIQTkwREM1QkUx
+MTAvBgNVBAUTKDBDRkNFNzc4NTdGQ0YwMUYzOUQ5OUE2MkI0QUE2MkU2MTU5RTc2RjgwHhcN
+MTkwODA2MDQwMzIyWhcNMjAxMDMxMDAwMDAwWjBGMREwDwYDVQQDEwhBOTFEMTY5MTExMC8G
+A1UEBRMoREMwNEFGMTk4Qzk3RjI1ODJGMTVBRERFRUU3QzY4MjYxMUNBREE1MTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMksR6bPbZFpxlXID/2dhYFuS11agb6ACDUFJpII
+41uw65tFIPT+Y4laccnYRcWPWMTvHLyj0ggU+bc2zJCTYfmGD/GW/Q3WW0A3niBCdXDfkrp2
+DXvSTASJ5+wtVb+AE74C4Mr3UiMOXhJre1rRd5Lq7o6+TEKbVkmUrmTlbsz2Vs2F4//t5sCr
+WjAVP9D5jUBGH2MInbleBP1Bwf+kIxD16OKftRb/vGLzk1UhLsbq22GGE0vZ2hnJP3CbyXkN
+dLBraErzvyCnqYF7/yA0JL0KWRDwr7a9y37s8O3xOxhA/dL8hLZXllzJmoxvxHmq8D+5CjHv
+2/EmH8ODGm2aAzcCAwEAAaOCAv8wggL7MB0GA1UdDgQWBBTcBK8ZjJfyWC8Vrd7ufGgmEcra
+UTAfBgNVHSMEGDAWgBQM/Od4V/zwHznZmmK0qmLmFZ52+DAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zBzBgNVHR8EbDBqMGigZqBkhmJyc3luYzovL3Jwa2kuYXBuaWMubmV0
+L3JlcG9zaXRvcnkvQjMyMkE1RjQxRDY2MTFFMkEzRjI3RjdDNzJGRDFGRjIvRFB6bmVGZjg4
+Qjg1MlpwaXRLcGk1aFdlZHZnLmNybDB+BggrBgEFBQcBAQRyMHAwbgYIKwYBBQUHMAKGYnJz
+eW5jOi8vcnBraS5hcG5pYy5uZXQvcmVwb3NpdG9yeS85ODA2NTJFMEI3N0UxMUU3QTk2QTM5
+NTIxQTRGNEZCNC9EUHpuZUZmODhCODUyWnBpdEtwaTVoV2VkdmcuY2VyMEoGA1UdIAEB/wRA
+MD4wPAYIKwYBBQUHDgIwMDAuBggrBgEFBQcCARYiaHR0cHM6Ly93d3cuYXBuaWMubmV0L1JQ
+S0kvQ1BTLnBkZjCCASgGCCsGAQUFBwELBIIBGjCCARYwXwYIKwYBBQUHMAWGU3JzeW5jOi8v
+cnBraS5hcG5pYy5uZXQvbWVtYmVyX3JlcG9zaXRvcnkvQTkxRDE2OTEvNTBDNjkyOTI5RDI0
+MTFFNzg2MUEyMjZCQzRGOUFFMDIvMH4GCCsGAQUFBzAKhnJyc3luYzovL3Jwa2kuYXBuaWMu
+bmV0L21lbWJlcl9yZXBvc2l0b3J5L0E5MUQxNjkxLzUwQzY5MjkyOUQyNDExRTc4NjFBMjI2
+QkM0RjlBRTAyLzNBU3ZHWXlYOGxndkZhM2U3bnhvSmhISzJsRS5tZnQwMwYIKwYBBQUHMA2G
+J2h0dHBzOi8vcnJkcC5hcG5pYy5uZXQvbm90aWZpY2F0aW9uLnhtbDArBggrBgEFBQcBBwEB
+/wQcMBowGAQCAAEwEgMEAdQI5gMEAdQI/gMEAdRcZjANBgkqhkiG9w0BAQsFAAOCAQEAGvJ+
+s7VgIZk8LDSz6uvsyX80KzZgaqMF7sMsqln0eo5KiGGBHjwvZuiDf46xbNseWW2nwAHmjLda
+osCbcTGVu0JzFYBdkimgyHiq2l8yEchh5BUXr8x4CQIxwGEZEOlEp5mRa/AfHVEfDeMm7mob
+eiCfyTC8q8KH9Tb/rY192kBe+n9MuRyn7TkimV5eYMdwWMyT/VSBCQzzfJ0r+S9o0rBYWH9k
+HDFd3u1ztO8WGjH/LOehoO30xsm52kbxZjc4SJWubgBgxTMIWyjPHbKqCF44NwYev/6eFcOC
++KTEQ/hydcURm3YtX7EZLDtksWB2me576J8opeLsbNeNgzfJpg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ access_methods = [
+ rfc6487.id_ad_rpkiManifest,
+ rfc6487.id_ad_signedObject,
+ ]
+
+ substrate = pem.readBase64fromText(self.rpki_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_pe_subjectInfoAccess:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectInfoAccessSyntax())
+ for ad in extnValue:
+ if ad['accessMethod'] in access_methods:
+ uri = ad['accessLocation']['uniformResourceIdentifier']
+ self.assertIn('rpki.apnic.net', uri)
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+class CertificateWithSignedObjectTestCase(unittest.TestCase):
+ rpki_cert_pem_text = """\
+MIIEuDCCA6CgAwIBAgICBhgwDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoNmQ2
+ZmJmYTk3NTNkYjhkODQ2NDMzZGI1MzUxZDlhOWVjMDdjOTZiZDAeFw0xOTA4MjAw
+MDQ5MjlaFw0yMDA3MDEwMDAwMDBaMDMxMTAvBgNVBAMTKDVCODNERDg3REU5QUM3
+QzZFMzRCODc3REY1MDFBMkIxMjMwQTgxQjQwggEiMA0GCSqGSIb3DQEBAQUAA4IB
+DwAwggEKAoIBAQCXJw4ElLYkHnpRkLE8djruXIJn6uij0hIDM/18ma0c48HKlEPw
+8jGB8kSSvLpimY3zLvqyLaD0pBNulovT3Ep37mPkskvXIUwLTThjBEujG7R4zU5g
+RV2q+QhRjPLXiyER5QnEnHrIEke6tnioEPnC4i02Z6JdDKZrn9jSLPlet2OB5/0+
+0DqLnYPZ1LZrId9YVDeIyBDRFxbzQ6L5K2stua5fWqhX1vnbZKDbXSY6d+u5zVwn
+adxnRP989EiKk/MJ4Reu7YEdtpsM3sd7prXkAcJjPokdvL7hy+BOY8ESgaIhIBj2
+Kqu4G35HKBbUdwFekBikitmiVJlIvvVYXku/AgMBAAGjggHUMIIB0DAdBgNVHQ4E
+FgQUW4Pdh96ax8bjS4d99QGisSMKgbQwHwYDVR0jBBgwFoAUbW+/qXU9uNhGQz21
+NR2ansB8lr0wGAYDVR0gAQH/BA4wDDAKBggrBgEFBQcOAjBQBgNVHR8ESTBHMEWg
+Q6BBhj9yc3luYzovL2NhLnJnLm5ldC9ycGtpL1JHbmV0LU9VL2JXLV9xWFU5dU5o
+R1F6MjFOUjJhbnNCOGxyMC5jcmwwZAYIKwYBBQUHAQEEWDBWMFQGCCsGAQUFBzAC
+hkhyc3luYzovL3Jwa2kucmlwZS5uZXQvcmVwb3NpdG9yeS9ERUZBVUxUL2JXLV9x
+WFU5dU5oR1F6MjFOUjJhbnNCOGxyMC5jZXIwDgYDVR0PAQH/BAQDAgeAMIGKBggr
+BgEFBQcBCwR+MHwwSwYIKwYBBQUHMAuGP3JzeW5jOi8vY2EucmcubmV0L3Jwa2kv
+UkduZXQtT1UvVzRQZGg5NmF4OGJqUzRkOTlRR2lzU01LZ2JRLnJvYTAtBggrBgEF
+BQcwDYYhaHR0cHM6Ly9jYS5yZy5uZXQvcnJkcC9ub3RpZnkueG1sMB8GCCsGAQUF
+BwEHAQH/BBAwDjAMBAIAATAGAwQAkxwtMA0GCSqGSIb3DQEBCwUAA4IBAQCoYaCd
+17R3o7xul5BWgk8SXItdIDoDb7zxVqs/gnzl9i5gdDd0IWIy4gGW32EjsTUXsi+G
+1gyv7aWYFQNlR7kvBgfHyPPp2rkFIj9/KK1VygG3FFMaO1JBDB8UOU+tRbV6xGcf
+IYCk5bH6H9BtkPm2kiczVdjCFIB5krMy+DMf3x1F7/G+5f+ZmUG3b93GfUmzgxw9
+IjlQMyt35h3rgOK6EjpOJgUA1jUWNTpPsR/xzA0HaDlbW38ue02SNluztrsXxJSr
+8XwXhHPUzmlqg89Mb5iem3WZ5lkbr6lteO+ZocYtLPyOHhNmXWgop764K4JQaf46
+WYtY4rWNeHcfgNTz
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ access_methods = [
+ rfc6487.id_ad_rpkiManifest,
+ rfc6487.id_ad_signedObject,
+ ]
+
+ substrate = pem.readBase64fromText(self.rpki_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_pe_subjectInfoAccess:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectInfoAccessSyntax())
+ for ad in extnValue:
+ if ad['accessMethod'] in access_methods:
+ uri = ad['accessLocation']['uniformResourceIdentifier']
+ self.assertIn('ca.rg.net', uri)
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6664.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6664.py
new file mode 100644
index 0000000000..83278a7f4b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6664.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc6664
+
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MIICOjAJBgUrDgMCGgUAMA0GCWCGSAFlAwQCBAUAMA0GCWCGSAFlAwQCAQUAMA0G
+CWCGSAFlAwQCAgUAMA0GCWCGSAFlAwQCAwUAMBUGCSqGSIb3DQEBATAIAgIEAAIC
+EAAwFQYJKoZIhvcNAQEHMAgCAgQAAgIQADAVBgkqhkiG9w0BAQowCAICBAACAhAA
+MBUGByqGSM44BAGgCjAIAgIEAAICDAAwggEvBgcqhkjOPgIBoYIBIjCCAR4CgYEA
+i6Ued8R33vkopJwCvy/ZZv2TtddPXPYmJK4jyFv+TDJTPqnP7XUZCqRuhCyKX10z
+7SgiZs6qlSMk5gCa8shPF8NCHtps2D1OVC7yppZUJI07FoDxoEAZHImdAFvYIA/V
+cGYpYOKod4kju0/e4VUBZ6Qoer5vKTh+lD/+ZKa/WSUCFQDc3W87QSZSX6ggdbeI
+fzb0rsAhbwKBgCEz/o4WJPUZ4HffJfuXHIGrkPnCxFAYDRtlqueswV0Gy6LunipE
+Iu3nCzYkZhMatyFNyzo+NusEsS+9isOhT8jhL93nSBZCSRBy+GfmSXlXv/3c8mtH
+XTie5JOqjRdonPr4g/+VZvMkcioooNrhx/zICHrC3WZ72871/n/z9M+dMCMGByqG
+SM49AgEwGAYIKoZIzj0DAQcGBSuBBAAiBgUrgQQAIzAhBgUrgQQBDTAYBggqhkjO
+PQMBBwYFK4EEACIGBSuBBAAjMBoGCSqGSIb3DQEBCDANBglghkgBZQMEAgEFAA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for cap in asn1Object:
+ if cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys():
+ substrate = cap['parameters']
+ cap_p, rest = der_decoder(
+ substrate, asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(cap_p.prettyPrint())
+ self.assertEqual(substrate, der_encoder(cap_p))
+ count += 1
+
+ self.assertEqual(8, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ parameterValue = {
+ rfc6664.rsaEncryption: lambda x: x['maxKeySize'],
+ rfc6664.id_RSAES_OAEP: lambda x: x['maxKeySize'],
+ rfc6664.id_RSASSA_PSS: lambda x: x['minKeySize'],
+ rfc6664.id_dsa: lambda x: x['keySizes']['maxKeySize'],
+ rfc6664.dhpublicnumber: lambda x: x['keyParams']['q'] % 1023,
+ rfc6664.id_ecPublicKey: lambda x: x[0]['namedCurve'],
+ rfc6664.id_ecMQV: lambda x: x[1]['namedCurve'],
+ }
+
+ expectedValue = {
+ rfc6664.rsaEncryption: 4096,
+ rfc6664.id_RSAES_OAEP: 4096,
+ rfc6664.id_RSASSA_PSS: 1024,
+ rfc6664.id_dsa: 3072,
+ rfc6664.dhpublicnumber: 257,
+ rfc6664.id_ecPublicKey: rfc5480.secp256r1,
+ rfc6664.id_ecMQV: rfc5480.secp384r1,
+ }
+
+ count = 0
+ for cap in asn1Object:
+ if cap['capabilityID'] in parameterValue.keys():
+ pValue = parameterValue[cap['capabilityID']](cap['parameters'])
+ eValue = expectedValue[cap['capabilityID']]
+ self.assertEqual(eValue, pValue)
+ count += 1
+
+ self.assertEqual(7, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6955.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6955.py
new file mode 100644
index 0000000000..443d70daf0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6955.py
@@ -0,0 +1,101 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc6955
+
+
+class CertificationRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDPDCCArsCAQAwTjELMAkGA1UEBhMCVVMxETAPBgNVBAoTCFhFVEkgSW5jMRAw
+DgYDVQQLEwdUZXN0aW5nMRowGAYDVQQDExFQS0lYIEV4YW1wbGUgVXNlcjCCAkEw
+ggG2BgcqhkjOPgIBMIIBqQKBgQCUhOBFbH9pUWI+VoB8aOfFqZ6edHSU7ZCMHcTh
+ShSC9dKUDBnjuRC7EbnlpfuOIVFjAoaqBrghNrZ/Nt/R1mhbeXwdWhR1H2qTdZPO
+u5dyivAPI51H9tSzx/D05vYrwjLhiWe+fgau+NABa4sq9QLXtqhjlIOwGzF9Uhre
+5QOFJwKBgCamMixaK9QzK1zcBodTP5AGYVA4PtK5fYEcEhDFDFPUZNGOMAcIjN0/
+Ci8s1ht/V4bQ2rtuNioY6NO8cDF6SLZOGG7dHyIG6z/q1EFp2ZveR5V6cpHSCX9J
+XDsDM1HI8Tma/wTVbn6UPQO49jEVJkiVqFzeR4i0aToAp4ae2tHNAiEA6HL6lvAR
+QPXy3P07XXiUsYUB5Wk3IfclubpxSvxgMPsCYQCjkQHAqG6kTaBW/Gz+H6ewzQ+U
+hwwlvpd2jevlpAldq4PNgAs1Z38MjqcxmDKFOUCdEZjY3rh/hpuvjWc9tna0YS8h
+4UsOaP9TPofd2HFWaEfc9yBjSzxfeHGD5nCe4pIwGgMVABzVOg0Xgm0KgXWBRhCO
+PtsJ5Jg0AgE3A4GEAAKBgBNjoYUEjEaoiOv0XqiTdK79rp6WJxJlxEwHBj4Y/pS4
+qHlIvS40tkfKBDCh7DP9GgstnlDJeA+uauy1a2q+slzasp94LLl34nkrJb8uC1lK
+k0v4s+yBNK6XR1LgqCmY7NGwyitveovbTo2lFX5+rzNiCZ4PEUSMwY2iEZ5T77Lo
+oCEwHwYJKoZIhvcNAQkOMRIwEDAOBgNVHQ8BAf8EBAMCAwgwDAYIKwYBBQUHBgMF
+AANtADBqMFIwSDELMAkGA1UEBhMCVVMxETAPBgNVBAoTCFhFVEkgSW5jMRAwDgYD
+VQQLEwdUZXN0aW5nMRQwEgYDVQQDEwtSb290IERTQSBDQQIGANo5tuLLBBQtBXf+
+Xo9l9a+tyVybAsCoiClhYw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6402.CertificationRequest()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['certificationRequestInfo']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.dhpublicnumber, spki_a['algorithm'])
+ self.assertIn(spki_a['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ params, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc6955.DomainParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(params.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(params))
+ self.assertEqual(55, params['validationParms']['pgenCounter'])
+
+ sig_a = asn1Object['signatureAlgorithm']
+
+ self.assertEqual(
+ rfc6955.id_dhPop_static_sha1_hmac_sha1, sig_a['algorithm'])
+ self.assertIn(sig_a['algorithm'], rfc5280.algorithmIdentifierMap)
+ self.assertEqual(sig_a['parameters'], der_encoder(univ.Null("")))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['certificationRequestInfo']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.dhpublicnumber, spki_a['algorithm'])
+ self.assertEqual(
+ 55, spki_a['parameters']['validationParms']['pgenCounter'])
+
+ sig_a = asn1Object['signatureAlgorithm']
+
+ self.assertEqual(
+ rfc6955.id_dhPop_static_sha1_hmac_sha1, sig_a['algorithm'])
+ self.assertEqual(univ.Null(""), sig_a['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc6960.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc6960.py
new file mode 100644
index 0000000000..151c934ca9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc6960.py
@@ -0,0 +1,176 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc6960
+
+
+class OCSPRequestTestCase(unittest.TestCase):
+ ocsp_req_pem_text = """\
+MGowaDBBMD8wPTAJBgUrDgMCGgUABBS3ZrMV9C5Dko03aH13cEZeppg3wgQUkqR1LKSevoFE63n8
+isWVpesQdXMCBDXe9M+iIzAhMB8GCSsGAQUFBzABAgQSBBBjdJOiIW9EKJGELNNf/rdA
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6960.OCSPRequest()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ocsp_req_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['tbsRequest']['version'])
+
+ count = 0
+ for extn in asn1Object['tbsRequest']['requestExtensions']:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ count += 1
+
+ self.assertEqual(1, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.ocsp_req_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['tbsRequest']['version'])
+
+ for req in asn1Object['tbsRequest']['requestList']:
+ ha = req['reqCert']['hashAlgorithm']
+ self.assertEqual(rfc4055.id_sha1, ha['algorithm'])
+ self.assertEqual(univ.Null(""), ha['parameters'])
+
+
+class OCSPResponseTestCase(unittest.TestCase):
+ ocsp_resp_pem_text = """\
+MIIEvQoBAKCCBLYwggSyBgkrBgEFBQcwAQEEggSjMIIEnzCCAQ+hgYAwfjELMAkGA1UEBhMCQVUx
+EzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEV
+MBMGA1UEAxMMc25tcGxhYnMuY29tMSAwHgYJKoZIhvcNAQkBFhFpbmZvQHNubXBsYWJzLmNvbRgP
+MjAxMjA0MTExNDA5MjJaMFQwUjA9MAkGBSsOAwIaBQAEFLdmsxX0LkOSjTdofXdwRl6mmDfCBBSS
+pHUspJ6+gUTrefyKxZWl6xB1cwIENd70z4IAGA8yMDEyMDQxMTE0MDkyMlqhIzAhMB8GCSsGAQUF
+BzABAgQSBBBjdJOiIW9EKJGELNNf/rdAMA0GCSqGSIb3DQEBBQUAA4GBADk7oRiCy4ew1u0N52QL
+RFpW+tdb0NfkV2Xyu+HChKiTThZPr9ZXalIgkJ1w3BAnzhbB0JX/zq7Pf8yEz/OrQ4GGH7HyD3Vg
+PkMu+J6I3A2An+bUQo99AmCbZ5/tSHtDYQMQt3iNbv1fk0yvDmh7UdKuXUNSyJdHeg27dMNy4k8A
+oIIC9TCCAvEwggLtMIICVqADAgECAgEBMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAkFVMRMw
+EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxFTAT
+BgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wHhcN
+MTIwNDExMTMyNTM1WhcNMTMwNDExMTMyNTM1WjB+MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29t
+ZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRUwEwYDVQQDEwxzbm1w
+bGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25tcGxhYnMuY29tMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDDDU5HOnNV8I2CojxB8ilIWRHYQuaAjnjrETMOprouDHFXnwWqQo/I3m0b
+XYmocrh9kDefb+cgc7+eJKvAvBqrqXRnU38DmQU/zhypCftGGfP8xjuBZ1n23lR3hplN1yYA0J2X
+SgBaAg6e8OsKf1vcX8Es09rDo8mQpt4G2zR56wIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG
++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU8Ys2dpJFLMHl
+yY57D4BNmlqnEcYwHwYDVR0jBBgwFoAU8Ys2dpJFLMHlyY57D4BNmlqnEcYwDQYJKoZIhvcNAQEF
+BQADgYEAWR0uFJVlQId6hVpUbgXFTpywtNitNXFiYYkRRv77McSJqLCa/c1wnuLmqcFcuRUK0oN6
+8ZJDP2HDDKe8MCZ8+sx+CF54eM8VCgN9uQ9XyE7x9XrXDd3Uw9RJVaWSIezkNKNeBE0lDM2jUjC4
+HAESdf7nebz1wtqAOXE1jWF/y8g=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6960.OCSPResponse()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ocsp_resp_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['responseStatus'])
+
+ rb = asn1Object['responseBytes']
+
+ self.assertIn(rb['responseType'], rfc6960.ocspResponseMap)
+
+ resp, rest = der_decoder(
+ rb['response'], asn1Spec=rfc6960.ocspResponseMap[rb['responseType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(resp.prettyPrint())
+ self.assertEqual(rb['response'], der_encoder(resp))
+ self.assertEqual(0, resp['tbsResponseData']['version'])
+
+ count = 0
+ for extn in resp['tbsResponseData']['responseExtensions']:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ count += 1
+
+ self.assertEqual(1, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.ocsp_resp_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['responseStatus'])
+
+ rb = asn1Object['responseBytes']
+
+ self.assertIn(rb['responseType'], rfc6960.ocspResponseMap)
+
+ resp, rest = der_decoder(
+ rb['response'],
+ asn1Spec=rfc6960.ocspResponseMap[rb['responseType']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(resp.prettyPrint())
+ self.assertEqual(rb['response'], der_encoder(resp))
+ self.assertEqual(0, resp['tbsResponseData']['version'])
+
+ for rdn in resp['tbsResponseData']['responderID']['byName']['rdnSequence']:
+ for attr in rdn:
+ if attr['type'] == rfc5280.id_emailAddress:
+ self.assertEqual('info@snmplabs.com', attr['value'])
+
+ for r in resp['tbsResponseData']['responses']:
+ ha = r['certID']['hashAlgorithm']
+ self.assertEqual(rfc4055.id_sha1, ha['algorithm'])
+ self.assertEqual(univ.Null(""), ha['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7030.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7030.py
new file mode 100644
index 0000000000..7d011f0bb0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7030.py
@@ -0,0 +1,89 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7030
+
+
+class CSRAttrsTestCase(unittest.TestCase):
+ pem_text = """\
+MEEGCSqGSIb3DQEJBzASBgcqhkjOPQIBMQcGBSuBBAAiMBYGCSqGSIb3DQEJDjEJ
+BgcrBgEBAQEWBggqhkjOPQQDAw==
+"""
+
+ the_oids = (
+ univ.ObjectIdentifier('1.2.840.113549.1.9.7'),
+ univ.ObjectIdentifier('1.2.840.10045.4.3.3')
+ )
+
+ the_attrTypes = (
+ univ.ObjectIdentifier('1.2.840.10045.2.1'),
+ univ.ObjectIdentifier('1.2.840.113549.1.9.14'),
+ )
+
+ the_attrVals = (
+ '1.3.132.0.34',
+ '1.3.6.1.1.1.1.22',
+ )
+
+ def setUp(self):
+ self.asn1Spec = rfc7030.CsrAttrs()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr_or_oid in asn1Object:
+ if attr_or_oid.getName() == 'oid':
+ self.assertIn(attr_or_oid['oid'], self.the_oids)
+
+ if attr_or_oid.getName() == 'attribute':
+ self.assertIn(
+ attr_or_oid['attribute']['attrType'], self.the_attrTypes)
+
+ def testOpenTypes(self):
+ openTypesMap = rfc5652.cmsAttributesMap.copy()
+
+ for at in self.the_attrTypes:
+ openTypesMap.update({at: univ.ObjectIdentifier()})
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr_or_oid in asn1Object:
+ if attr_or_oid.getName() == 'attribute':
+ valString = attr_or_oid['attribute']['attrValues'][0].prettyPrint()
+
+ if attr_or_oid['attribute']['attrType'] == self.the_attrTypes[0]:
+ self.assertEqual(self.the_attrVals[0], valString)
+
+ if attr_or_oid['attribute']['attrType'] == self.the_attrTypes[1]:
+ self.assertEqual(self.the_attrVals[1], valString)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7191.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7191.py
new file mode 100644
index 0000000000..40afbd42ea
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7191.py
@@ -0,0 +1,313 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7191
+
+
+class ReceiptRequestTestCase(unittest.TestCase):
+ message1_pem_text = """\
+MIIGfAYJKoZIhvcNAQcCoIIGbTCCBmkCAQMxDTALBglghkgBZQMEAgIwgb4GCyqGSIb3DQEJ
+EAEZoIGuBIGrMIGooEQwIwYLKoZIhvcNAQkQDAExFAwSVmlnaWwgU2VjdXJpdHkgTExDMB0G
+CyqGSIb3DQEJEAwDMQ4MDFByZXRlbmQgMDQ4QTBgMF4wVjAbBgsqhkiG9w0BCRAMGzEMDApl
+eGFtcGxlSUQxMBUGCyqGSIb3DQEJEAwKMQYMBEhPVFAwIAYLKoZIhvcNAQkQDAsxEQwPa3Rh
+LmV4YW1wbGUuY29tBAQxMjM0oIIChzCCAoMwggIKoAMCAQICCQCls1QoG7BuPTAKBggqhkjO
+PQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAP
+BgNVBAoMCEJvZ3VzIENBMB4XDTE5MDYxMjE0MzEwNFoXDTIwMDYxMTE0MzEwNFowfDELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRswGQYDVQQKExJWaWdp
+bCBTZWN1cml0eSBMTEMxFzAVBgNVBAsTDktleSBNYW5hZ2VtZW50MRgwFgYDVQQDEw9rdGEu
+ZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASX9l7E3VS3GAEiiRrVozgCBQfL
+F67IhOxtbQviD/ojhHSQmflLyfRJ8e7+nbWlOLstRc7lgmq+OQVaSlStkzVk/BO1wE5BgUyF
+xje+sieUtPRXVqfoVZCJJsgiSbo181ejgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIB
+DQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9z
+ZS4wHQYDVR0OBBYEFG2bXP0Dr7W51YvxZJ8aVuC1rU0PMB8GA1UdIwQYMBaAFPI12zQE2qVV
+8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMAZ4lqTtdbaDLFfHywaQYwOWBkL3d0wH
+EsNZTW1qQKy/oY3tXc0O6cbJZ5JJb9wk8QIwblXm8+JjdEJHsNjSv4rcJZou4vkMT7PzEme2
+BbMkwOWeIdhmy1vszd8TQgvdb36XMYIDBzCCAwMCAQOAFG2bXP0Dr7W51YvxZJ8aVuC1rU0P
+MAsGCWCGSAFlAwQCAqCCAmUwGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEZMBwGCSqGSIb3
+DQEJBTEPFw0xOTA2MTIxOTM1NTFaMCUGCyqGSIb3DQEJEAIHMRYEFCe4nFY7FiJRnReHHHm/
+rIht3/g9MD8GCSqGSIb3DQEJBDEyBDA3gzQlzfvylOn9Rf59kMSa1K2IyOBA5Eoeiyp83Bmj
+KasomGorn9htte1iFPbxPRUwggG/BglghkgBZQIBBUExggGwMIIBrAQUJ7icVjsWIlGdF4cc
+eb+siG3f+D0wggGSoIH+MH8GCWCGSAFlAgEQAARyMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQI
+EwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxp
+Y2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHsGCWCGSAFlAgEQAARuMGwx
+CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMH
+RXhhbXBsZTEMMAoGA1UEAxMDQm9iMR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20w
+gY4wgYsGCWCGSAFlAgEQAAR+MHwxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UE
+BxMHSGVybmRvbjEbMBkGA1UEChMSVmlnaWwgU2VjdXJpdHkgTExDMRcwFQYDVQQLEw5LZXkg
+TWFuYWdlbWVudDEYMBYGA1UEAxMPa3RhLmV4YW1wbGUuY29tMAoGCCqGSM49BAMDBGYwZAIw
+Z7DXliUb8FDKs+BadyCY+IJobPnQ6UoLldMj3pKEowONPifqrbWBJJ5cQQNgW6YuAjBbjSlY
+goRV+bq4fdgOOj25JFqa80xnXGtQqjm/7NSII5SbdJk+DT7KCkSbkElkbgQ=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.message1_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat == rfc7191.id_aa_KP_keyPkgIdAndReceiptReq:
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc7191.KeyPkgIdentifierAndReceiptReq())
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, sav['pkgID'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.message1_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ v3 = rfc5652.CMSVersion().subtype(value='v3')
+
+ self.assertEqual(v3, asn1Object['content']['version'])
+
+ for sa in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc7191.id_aa_KP_keyPkgIdAndReceiptReq:
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+ self.assertEqual(package_id, sa['attrValues'][0]['pkgID'])
+
+
+class ReceiptTestCase(unittest.TestCase):
+ message2_pem_text = """\
+MIIEdAYJKoZIhvcNAQcCoIIEZTCCBGECAQMxDTALBglghkgBZQMEAgIwgawGCmCGSAFlAgEC
+TgOggZ0EgZowgZcEFCe4nFY7FiJRnReHHHm/rIht3/g9MH8GCWCGSAFlAgEQAARyMHAxCzAJ
+BgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhh
+bXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29t
+oIICfDCCAngwggH+oAMCAQICCQCls1QoG7BuOzAKBggqhkjOPQQDAzA/MQswCQYDVQQGEwJV
+UzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4X
+DTE5MDUyOTE0NDU0MVoXDTIwMDUyODE0NDU0MVowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
+AlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGlj
+ZTEgMB4GCSqGSIb3DQEJARYRYWxpY2VAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6zXCYlmsEGD5vPu5hl9hDEjd1UHRgJIPoy
+3fJcWWeZ8FHCirICtuMgFisNscG/aTwKyDYOFDuqz/C2jyEwqgWCRyxyohuJXtmjgZQwgZEw
+CwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBi
+ZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFMS6Wg4+euM8gbD0Aqpouxbg
+lg41MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2gAMGUC
+MGO5H9E1uAveRGGaf48lN4pov2yH+hCAc5hOAuZKe/f40MKSF8q4w2ij+0euSaKFiAIxAL3g
+xp6sMitCmLQgOH6/RBIC/2syJ97y0KVp9da0PDAvwxLugCHTKZPjjpSLPHHc9TGCARwwggEY
+AgEDgBTEuloOPnrjPIGw9AKqaLsW4JYONTALBglghkgBZQMEAgKgejAZBgkqhkiG9w0BCQMx
+DAYKYIZIAWUCAQJOAzAcBgkqhkiG9w0BCQUxDxcNMTkwNjEzMTYxNjA4WjA/BgkqhkiG9w0B
+CQQxMgQwQSWYpq4jwhMkmS0as0JL3gjYxKLgDfzP2ndTNsAY0m9p8Igp8ZcK4+5n9fXJ43vU
+MAoGCCqGSM49BAMDBGgwZgIxAMfq2EJ5pSl9tGOEVJEgZitc266ljrOg5GDjkd2d089qw1A3
+bUcOYuCdivgxVuhlAgIxAPR9JavxziwCbVyBUWOAiKKYfglTgG3AwNmrKDj0NtXUQ9qDmGAc
+6L+EAY2P5OVB8Q==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.message2_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ oid = sd['encapContentInfo']['eContentType']
+
+ self.assertEqual(rfc7191.id_ct_KP_keyPackageReceipt, oid)
+
+ receipt, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc7191.KeyPackageReceipt())
+
+ self.assertFalse(rest)
+ self.assertTrue(receipt.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(receipt))
+
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, receipt['receiptOf']['pkgID'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.message2_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ v3 = rfc5652.CMSVersion().subtype(value='v3')
+
+ self.assertEqual(v3, asn1Object['content']['version'])
+
+ for sa in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ self.assertIn( sa['attrType'], rfc5652.cmsAttributesMap)
+ if sa['attrType'] == rfc5652.id_messageDigest:
+ self.assertIn(
+ '0x412598a6ae2', sa['attrValues'][0].prettyPrint())
+
+ ct_oid = asn1Object['content']['encapContentInfo']['eContentType']
+
+ self.assertIn(ct_oid, rfc5652.cmsContentTypesMap)
+ self.assertEqual(ct_oid, rfc7191.id_ct_KP_keyPackageReceipt)
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ sd_eci = asn1Object['content']['encapContentInfo']
+ receipt, rest = der_decoder(
+ sd_eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd_eci['eContentType']])
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, receipt['receiptOf']['pkgID'])
+
+
+class ErrorTestCase(unittest.TestCase):
+ message3_pem_text = """\
+MIIEbwYJKoZIhvcNAQcCoIIEYDCCBFwCAQMxDTALBglghkgBZQMEAgIwga0GCmCGSAFlAgEC
+TgaggZ4EgZswgZigFgQUJ7icVjsWIlGdF4cceb+siG3f+D0wewYJYIZIAWUCARAABG4wbDEL
+MAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAwDgYDVQQKEwdF
+eGFtcGxlMQwwCgYDVQQDEwNCb2IxHjAcBgkqhkiG9w0BCQEWD2JvYkBleGFtcGxlLmNvbQoB
+CqCCAncwggJzMIIB+qADAgECAgkApbNUKBuwbjwwCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMC
+VVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAe
+Fw0xOTA1MjkxOTIwMTNaFw0yMDA1MjgxOTIwMTNaMGwxCzAJBgNVBAYTAlVTMQswCQYDVQQI
+EwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEMMAoGA1UEAxMDQm9i
+MR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AAQxpGJVLxa83xhyal+rvmMFs4xS6Q19cCDoAvQkkFe0gUC4glxlWWQuf/FvLCRwwscr877D
+1FZRBrYKPD6Hxv/UKX6Aimou0TnnxsPk98zZpikn9gTrJn2cF9NCzvPVMfmjgZQwgZEwCwYD
+VR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0
+cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFMprZnLeLJtXf5iO4sMq02aOwhql
+MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMBVu
+hLo58RhCiYsOLZFSR3vWHPDCJBnO1vE1uixqEjONHxlBoeGN2MmWs/9PppcHCwIwN9HB5jPc
+J7gTjA9+ipCe+qkztmV+Gy2NBAY6xYC0gh+pb+X5OAI7y7HdctXp+PfrMYIBGzCCARcCAQOA
+FMprZnLeLJtXf5iO4sMq02aOwhqlMAsGCWCGSAFlAwQCAqB6MBkGCSqGSIb3DQEJAzEMBgpg
+hkgBZQIBAk4GMBwGCSqGSIb3DQEJBTEPFw0xOTA2MTMxNjE2MDhaMD8GCSqGSIb3DQEJBDEy
+BDCgXFTUc3ZInjt+MWYkYmXYERk4FgErEZNILlWgVl7Z9pImgLObIpdrGqGPt06/VkwwCgYI
+KoZIzj0EAwMEZzBlAjEAsjJ3iWRUteMKBVsjaYeN6TG9NITRTOpRVkSVq55DcnhwS9g9lu8D
+iNF8uKtW/lk0AjA7z2q40N0lamXkSU7ECasiWOYV1X4cWGiQwMZDKknBPDqXqB6Es6p4J+qe
+0V6+BtY=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.message3_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ oid = sd['encapContentInfo']['eContentType']
+
+ self.assertEqual(rfc7191.id_ct_KP_keyPackageError, oid)
+
+ kpe, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc7191.KeyPackageError())
+
+ self.assertFalse(rest)
+ self.assertTrue(kpe.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(kpe))
+
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, kpe['errorOf']['pkgID'])
+ self.assertEqual(
+ rfc7191.EnumeratedErrorCode(value=10), kpe['errorCode'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.message3_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ v3 = rfc5652.CMSVersion().subtype(value='v3')
+
+ self.assertEqual(v3, asn1Object['content']['version'])
+
+ for sa in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+ if sa['attrType'] == rfc5652.id_messageDigest:
+ self.assertIn(
+ '0xa05c54d4737', sa['attrValues'][0].prettyPrint())
+
+ ct_oid = asn1Object['content']['encapContentInfo']['eContentType']
+
+ self.assertIn(ct_oid, rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc7191.id_ct_KP_keyPackageError, ct_oid)
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ sd_eci = asn1Object['content']['encapContentInfo']
+ kpe, rest = der_decoder(
+ sd_eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd_eci['eContentType']])
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, kpe['errorOf']['pkgID'])
+ self.assertEqual(rfc7191.EnumeratedErrorCode(value=10), kpe['errorCode'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7229.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7229.py
new file mode 100644
index 0000000000..915b9be530
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7229.py
@@ -0,0 +1,93 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7229
+
+
+class CertificatePolicyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDJDCCAqqgAwIBAgIJAKWzVCgbsG5AMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDEzMTkwNTUzWhcNMjAxMDEyMTkwNTUzWjBTMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xJTAjBgNVBAoTHFRF
+U1QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AATwUXZUseiOaqWdrClDCMbp9YFAM87LTmFirygpzKDU9cfqSCg7zBDIphXCwMcS
+9zVWDoStCbcvN0jw5CljHcffzpHYX91P88SZRJ1w4hawHjOsWxvM3AkYgZ5nfdlL
+7EajggFcMIIBWDAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwbwYDVR0j
+BGgwZoAU8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQsw
+CQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GC
+CQDokdYGkU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgB
+hvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3Ig
+YW55IHB1cnBvc2UuMCEGA1UdIAQaMBgwCgYIKwYBBQUHDQEwCgYIKwYBBQUHDQIw
+CgYDVR02BAMCAQIwNQYDVR0hBC4wLDAUBggrBgEFBQcNAQYIKwYBBQUHDQcwFAYI
+KwYBBQUHDQIGCCsGAQUFBw0IMAoGCCqGSM49BAMDA2gAMGUCMHaWskjS7MKQCMcn
+zEKFOV3LWK8pL57vrECJd8ywKdwBJUNw9HhvSKkfUwL6rjlLpQIxAL2QO3CNoZRP
+PZs8K3IjUA5+U73pA8lpaTOPscLY22WL9pAGmyVUyEJ8lM7E+r4iDg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ test_oids = [
+ rfc7229.id_TEST_certPolicyOne,
+ rfc7229.id_TEST_certPolicyTwo,
+ rfc7229.id_TEST_certPolicyThree,
+ rfc7229.id_TEST_certPolicyFour,
+ rfc7229.id_TEST_certPolicyFive,
+ rfc7229.id_TEST_certPolicySix,
+ rfc7229.id_TEST_certPolicySeven,
+ rfc7229.id_TEST_certPolicyEight,
+ ]
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ s = extn['extnValue']
+ ev, rest = der_decoder(
+ s, rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(s, der_encoder(ev))
+
+ if extn['extnID'] == rfc5280.id_ce_certificatePolicies:
+ for pol in ev:
+ if pol['policyIdentifier'] in test_oids:
+ count += 1
+
+ if extn['extnID'] == rfc5280.id_ce_policyMappings:
+ for pmap in ev:
+ if pmap['issuerDomainPolicy'] in test_oids:
+ count += 1
+ if pmap['subjectDomainPolicy'] in test_oids:
+ count += 1
+
+ self.assertEqual(6, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7292.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7292.py
new file mode 100644
index 0000000000..583d396d67
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7292.py
@@ -0,0 +1,183 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7292
+
+
+class PKCS12TestCase(unittest.TestCase):
+ pfx_pem_text = """\
+MIIJ0wIBAzCCCY8GCSqGSIb3DQEHAaCCCYAEggl8MIIJeDCCBggGCSqGSIb3DQEHAaCCBfkE
+ggX1MIIF8TCCBe0GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAjuq0/+
+0pyutQICB9AEggTYZe/mYBpmkDvKsve4EwIVwo1TNv4ldyx1qHZW2Ih6qQCY+Nv1Mnv9we0z
+UTl4p3tQzCPWXnrSA82IgOdotLIez4YwXrgiKhcIkSSL+2yCmAoM+qkjiAIKq+l3UJ6Xhafe
+2Kg4Ek/0RkHpe6GwjTtdefkpXpZgccMEopOtKQMLJWsDM7p77x/amn6yIk2tpskKqUY/4n8Y
+xEiTWcRtTthYqZQIt+q94nKLYpt0o880SVOfvdEqp5KII7cTg60GJL+n6oN6hmP0bsAMvnk9
+1f8/lFKMi9tsNU/KnUhbDVpjJwBQkhgbqBx6GdtoqSLSlYNPVM0wlntwm1JhH4ybiQ5sNzqO
+7FlWC5bcYwkvOlx1gGrshY5jK/WjbA4paBpxSkgobJReirY9BeqITnvokXlub4tehHhM20Ik
+42pKa3kGaHmowvzflxqE+oysW5Oa9XbZxBCfkOMJ70o4hqa+n66+E/uKcN9NbKbTo3zt3xdt
+6ypOwHb74t5OcWaGx3EZsw0n0/V+WoLSpXOBwpx08+1yh7LV29aNQ0oEzVVkF6YYRQZtdIMe
+s3xB2i6sjLal21ntk7iBzMJwVoi524SAZ/oW8SuDAn1c93AWWwKZLALv5V3FZ2pDiQXArcfz
+DH2d5HJyNx7OlvKzNgEngwSyEC1XbjnOsZVUqGFENuDTa/brH4oEJHEkyWTyDudrz8iCEO80
+e1PE4qqJ5CllN0CSVWqz4CxGDFIQXzR6ohn8f3dR3+DAaLYvAjBVMLJjk7+nfnB2L0HpanhT
+Fz9AxPPIDf5pBQQwM14l8wKjEHIyfqclupeKNokBUr1ykioPyCr3nf4Rqe0Z4EKIY4OCpW6n
+hrkWHmvF7OKR+bnuSk3jnBxjSN0Ivy5q9q3fntYrhscMGGR73umfi8Z29tM1vSP9jBZvirAo
+geGf/sfOI0ewRvJf/5abnNg/78Zyk8WmlAHVFzNGcM3u3vhnNpTIVRuUyVkdSmOdbzeSfmqQ
+2HPCEdC9HNm25KJt1pD6v6aP3Tw7qGl+tZyps7VB2i+a+UGcwQcClcoXcPSdG7Z1gBTzSr84
+MuVPYlePuo1x+UwppSK3rM8ET6KqhGmESH5lKadvs8vdT6c407PfLcfxyAGzjH091prk2oRJ
+xB3oQAYcKvkuMcM6FSLJC263Dj+pe1GGEexk1AoysYe67tK0sB66hvbd92HcyWhW8/vI2/PM
+bX+OeEb7q+ugnsP+BmF/btWXn9AxfUqNWstyInKTn+XpqFViMIOG4e2xC4u/IvzG3VrTWUHF
+4pspH3k7GB/EOLvtbsR0uacBFlsColJy0FaWT9rrdueU3YEiIRCC8LGi1XpUa8f5adeBKWN+
+eRTrrF4o7uoNeGlnwZ7ebnb7k18Q0GRzzzTZPoMM4L703svfE/eNYWFHLY4NDQKSYgeum365
+WAfZpHOX7YOc6oRGrGB+QuGoyikTTDO8xpcEmb8vDz4ZwHhN0PS056LNJeMoI0A/5DJb3e10
+i1txlM48sbZBuIEIeixr52nwG4LuxqXGqShKaTfOrFxHjx4kI4/dp9dN/k8TGFsLWjuIgMJI
+6nRHbWrxB3F0XKXagtLLep1MDwDwAuCyiW2YC0JzRvsJViIgjDA+eiHX0O6/8xiK9dzMQpIz
+TVHSEqFlhORp0DGB2zATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IADMA
+ZgA3ADEAYQBmADYANQAtADEANgA4ADcALQA0ADQANABhAC0AOQBmADQANgAtAGMAOABiAGUA
+MQA5ADQAYwAzAGUAOABlMGsGCSsGAQQBgjcRATFeHlwATQBpAGMAcgBvAHMAbwBmAHQAIABF
+AG4AaABhAG4AYwBlAGQAIABDAHIAeQBwAHQAbwBnAHIAYQBwAGgAaQBjACAAUAByAG8AdgBp
+AGQAZQByACAAdgAxAC4AMDCCA2gGCSqGSIb3DQEHAaCCA1kEggNVMIIDUTCCA00GCyqGSIb3
+DQEMCgEDoIIDJTCCAyEGCiqGSIb3DQEJFgGgggMRBIIDDTCCAwkwggHxoAMCAQICEDbt9oc6
+oQinRwE1826MiBEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAxMJYW5vbnltb3VzMCAXDTE2
+MDcxOTIyMDAwMVoYDzIxMTYwNjI1MjIwMDAxWjAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8trBCTBjXXA4OgSO5nRTOU5T86ObCgc71
+J2oCuUigSddcTDzebaD0wcyAgf101hAdwMKQ9DvrK0nGvm7FAMnnUuVeATafKgshLuUTUUfK
+jx4Xif4LoS0/ev4BiOI5a1MlIRZ7T5Cyjg8bvuympzMuinQ/j1RPLIV0VGU2HuDxuuP3O898
+GqZ3+F6Al5CUcwmOX9zCs91JdN/ZFZ05SXIpHQuyPSPUX5Vy8F1ZeJ8VG3nkbemfFlVkuKQq
+vteL9mlT7z95rVZgGB3nUZL0tOB68eMcffA9zUksOmeTi5M6jnBcNeX2Jh9jS3YYd+IEliZm
+mggQG7kPta8f+NqezL77AgMBAAGjVTBTMBUGA1UdJQQOMAwGCisGAQQBgjcKAwQwLwYDVR0R
+BCgwJqAkBgorBgEEAYI3FAIDoBYMFGFub255bW91c0B3aW5kb3dzLXgAMAkGA1UdEwQCMAAw
+DQYJKoZIhvcNAQEFBQADggEBALh+4qmNPzC6M8BW9/SC2ACQxxPh06GQUGx0D+GLYnp61ErZ
+OtKyKdFh+uZWpu5vyYYAHCLXP7VdS/JhJy677ynAPjXiC/LAzrTNvGs74HDotD966Hiyy0Qr
+ospFGiplHGRA5vXA2CiKSX+0HrVkN7rhk5PYkc6R+/cdosd+QZ8lkEa9yDWc5l//vWEbzwVy
+mJf/PRf8NTkWAK6SPV7Y37j1mhkJjOH9VkRxNrd6kcihRa4u0ImXaXEsec77ER0so31DKCrP
+m+rqZPj9NZSIYP3sMGJ4Bmm/n2YRdeaUzTdocfD3TRnKxs65DSgpiSq1gmtsXM7jAPs/Egrg
+tbWEypgxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFKVgj/32UdEyuQcB
+rqr03dPnboinBBSU7mxdpB5LTCvorCI8Tk5OMiUzjgICB9A=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc7292.PFX()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pfx_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(3, asn1Object['version'])
+
+ oid = asn1Object['macData']['mac']['digestAlgorithm']['algorithm']
+
+ self.assertEqual(univ.ObjectIdentifier('1.3.14.3.2.26'), oid)
+
+ md_hex = asn1Object['macData']['mac']['digest'].prettyPrint()
+
+ self.assertEqual('0xa5608ffdf651d132b90701aeaaf4ddd3e76e88a7', md_hex)
+ self.assertEqual(
+ rfc5652.id_data, asn1Object['authSafe']['contentType'])
+
+ data, rest = der_decoder(
+ asn1Object['authSafe']['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ authsafe, rest = der_decoder(data, asn1Spec=rfc7292.AuthenticatedSafe())
+
+ self.assertFalse(rest)
+ self.assertTrue(authsafe.prettyPrint())
+ self.assertEqual(data, der_encoder(authsafe))
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+
+ data, rest = der_decoder(ci['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ sc, rest = der_decoder(data, asn1Spec=rfc7292.SafeContents())
+
+ self.assertFalse(rest)
+ self.assertTrue(sc.prettyPrint())
+ self.assertEqual(data, der_encoder(sc))
+
+ for sb in sc:
+ if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
+ bv, rest = der_decoder(
+ sb['bagValue'],
+ asn1Spec=rfc7292.pkcs12BagTypeMap[sb['bagId']])
+
+ self.assertFalse(rest)
+ self.assertTrue(bv.prettyPrint())
+ self.assertEqual(sb['bagValue'], der_encoder(bv))
+
+ for attr in sb['bagAttributes']:
+ if attr['attrType'] in rfc5652.cmsAttributesMap:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(
+ attr['attrValues'][0], der_encoder(av))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pfx_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ digest_alg = asn1Object['macData']['mac']['digestAlgorithm']
+
+ self.assertFalse(digest_alg['parameters'].hasValue())
+
+ authsafe, rest = der_decoder(
+ asn1Object['authSafe']['content'],
+ asn1Spec=rfc7292.AuthenticatedSafe(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(authsafe.prettyPrint())
+ self.assertEqual(
+ asn1Object['authSafe']['content'], der_encoder(authsafe))
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+ sc, rest = der_decoder(
+ ci['content'], asn1Spec=rfc7292.SafeContents(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(sc.prettyPrint())
+ self.assertEqual(ci['content'], der_encoder(sc))
+
+ for sb in sc:
+ if sb['bagId'] == rfc7292.id_pkcs8ShroudedKeyBag:
+ bv = sb['bagValue']
+ enc_alg = bv['encryptionAlgorithm']['algorithm']
+ self.assertEqual(
+ rfc7292.pbeWithSHAAnd3_KeyTripleDES_CBC, enc_alg)
+ enc_alg_param = bv['encryptionAlgorithm']['parameters']
+ self.assertEqual(2000, enc_alg_param['iterations'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7296.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7296.py
new file mode 100644
index 0000000000..4bc7577073
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7296.py
@@ -0,0 +1,160 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc7296
+
+
+class CertBundleTestCase(unittest.TestCase):
+ cert_bundle_pem_text = """\
+MIITfqCCA8kwggPFMIICraADAgECAhACrFwmagtAm48LefKuRiV3MA0GCSqGSIb3
+DQEBBQUAMGwxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAX
+BgNVBAsTEHd3dy5kaWdpY2VydC5jb20xKzApBgNVBAMTIkRpZ2lDZXJ0IEhpZ2gg
+QXNzdXJhbmNlIEVWIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAw
+MDAwWjBsMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYD
+VQQLExB3d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFz
+c3VyYW5jZSBFViBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAxszlc+b71LvlLS0ypt/lgT/JzSVJtnEqw9WUNGeiChywX2mmQLHEt7KP0Jik
+qUFZOtPclNY823Q4pErMTSWC90qlUxI47vNJbXGRfmO2q6Zfw6SE+E9iUb74xezb
+OJLjBuUIkQzEKEFV+8taiRV+ceg1v01yCT2+OjhQW3cxG42zxyRFmqesbQAUWgS3
+uhPrUQqYQUEiTmVhh4FBUKZ5XIneGUpX1S7mXRxTLH6YzRoGFqRoc9A0BBNcoXHT
+WnxV215k4TeHMFYE5RG0KYAS8Xk5iKICEXwnZreIt3jyygqoOKsKZMK/Zl2VhMGh
+JR6HXRpQCyASzEG7bgtROLhLywIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUsT7DaQP4v0cB1JgmGggC72NkK8MwHwYD
+VR0jBBgwFoAUsT7DaQP4v0cB1JgmGggC72NkK8MwDQYJKoZIhvcNAQEFBQADggEB
+ABwaBpfc15yfPIhmBghXIdshR/gqZ6q/GDJ2QBBXwYrzetkRZY41+p78RbWe2Uwx
+S7iR6EMsjrN4ztvjU3lx1uUhlAHaVYeaJGT2imbM3pw3zag0sWmbI8ieeCIrcEPj
+VUcxYRnvWMWFL04w9qAxFiPI5+JlFjPLvxoboD34yl6LMYtgCIktDAZcUrfE+QqY
+0RVfnxK+fDZjOL1EpH/kJisKxJdpDemM4sAQV7jIdhKRVfJIadi8KgJbD0TUIDHb
+9LpwJl2QYJ68SxcJL7TLHkNoyQcnwdJc9+ohuWgSnDycv578gFybY83sR6olJ2eg
+N/MAgn1U16n46S4To3foH0qgggS6MIIEtjCCA56gAwIBAgIQDHmpRLCMEZUgkmFf
+4msdgzANBgkqhkiG9w0BAQsFADBsMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGln
+aUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJE
+aWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTEzMTAyMjEyMDAw
+MFoXDTI4MTAyMjEyMDAwMFowdTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lD
+ZXJ0IEluYzEZMBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTE0MDIGA1UEAxMrRGln
+aUNlcnQgU0hBMiBFeHRlbmRlZCBWYWxpZGF0aW9uIFNlcnZlciBDQTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBANdTpARR+JmmFkhLZyeqk0nQOe0MsLAA
+h/FnKIaFjI5j2ryxQDji0/XspQUYuD0+xZkXMuwYjPrxDKZkIYXLBxA0sFKIKx9o
+m9KxjxKws9LniB8f7zh3VFNfgHk/LhqqqB5LKw2rt2O5Nbd9FLxZS99RStKh4gzi
+kIKHaq7q12TWmFXo/a8aUGxUvBHy/Urynbt/DvTVvo4WiRJV2MBxNO723C3sxIcl
+ho3YIeSwTQyJ3DkmF93215SF2AQhcJ1vb/9cuhnhRctWVyh+HA1BV6q3uCe7seT6
+Ku8hI3UarS2bhjWMnHe1c63YlC3k8wyd7sFOYn4XwHGeLN7x+RAoGTMCAwEAAaOC
+AUkwggFFMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud
+JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA0BggrBgEFBQcBAQQoMCYwJAYIKwYB
+BQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBLBgNVHR8ERDBCMECgPqA8
+hjpodHRwOi8vY3JsNC5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNl
+RVZSb290Q0EuY3JsMD0GA1UdIAQ2MDQwMgYEVR0gADAqMCgGCCsGAQUFBwIBFhxo
+dHRwczovL3d3dy5kaWdpY2VydC5jb20vQ1BTMB0GA1UdDgQWBBQ901Cl1qCt7vNK
+YApl0yHU+PjWDzAfBgNVHSMEGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzANBgkq
+hkiG9w0BAQsFAAOCAQEAnbbQkIbhhgLtxaDwNBx0wY12zIYKqPBKikLWP8ipTa18
+CK3mtlC4ohpNiAexKSHc59rGPCHg4xFJcKx6HQGkyhE6V6t9VypAdP3THYUYUN9X
+R3WhfVUgLkc3UHKMf4Ib0mKPLQNa2sPIoc4sUqIAY+tzunHISScjl2SFnjgOrWNo
+PLpSgVh5oywM395t6zHyuqB8bPEs1OG9d4Q3A84ytciagRpKkk47RpqF/oOi+Z6M
+o8wNXrM9zwR4jxQUezKcxwCmXMS1oVWNWlZopCJwqjyBcdmdqEU79OX2olHdx3ti
+6G8MdOu42vi/hw15UJGQmxg7kVkn8TUoE6smftX3eqCCB9wwggfYMIIGwKADAgEC
+AhABW9pmX8RLdRe2iCweq9TcMA0GCSqGSIb3DQEBCwUAMHUxCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xNDAyBgNVBAMTK0RpZ2lDZXJ0IFNIQTIgRXh0ZW5kZWQgVmFsaWRhdGlvbiBT
+ZXJ2ZXIgQ0EwHhcNMTgwODE0MDAwMDAwWhcNMjAwODE4MTIwMDAwWjCB3DEdMBsG
+A1UEDwwUUHJpdmF0ZSBPcmdhbml6YXRpb24xEzARBgsrBgEEAYI3PAIBAxMCVVMx
+GTAXBgsrBgEEAYI3PAIBAhMIRGVsYXdhcmUxEDAOBgNVBAUTBzMwMTQyNjcxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMREwDwYDVQQHEwhTYW4gSm9z
+ZTEVMBMGA1UEChMMUGF5UGFsLCBJbmMuMRQwEgYDVQQLEwtDRE4gU3VwcG9ydDEX
+MBUGA1UEAxMOd3d3LnBheXBhbC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDOofrgGYvXjVHH1WKEgxO51/bNk8Vw0WlZAyu0iwAUULZ3mrI8+xOw
+gE5VGghgoQY9QNIA0mdFPrEmRRQAZXitszlL5s8oks4+tFzBHHtJp2D9BixRKxAR
+Afo6c54tufaJUrQyIMwr2mpfbPox3palkK7RmHdimcOqtUjjQyS/WcHxMkyX3wa9
+e1JoEB9ofJGupNnC90uGgxilWLvOtn/27w56p2AYkKoSGgXsNRGE5ySxns23sZOo
+tgSeTRe16K7X5JuzPcGtZGMRxlkVagZsrp8rNsf4aq0wKkBjkvVzSvJTaDJSDqEt
+hV+ZoGSFYpwaHArVir0sJ63E/aq2Tb97AgMBAAGjggP6MIID9jAfBgNVHSMEGDAW
+gBQ901Cl1qCt7vNKYApl0yHU+PjWDzAdBgNVHQ4EFgQUuzrmqCkAmIQyec538AFt
+Xwp5Y7kwgaUGA1UdEQSBnTCBmoIOd3d3LnBheXBhbC5jb22CEmhpc3RvcnkucGF5
+cGFsLmNvbYIMdC5wYXlwYWwuY29tggxjLnBheXBhbC5jb22CDWM2LnBheXBhbC5j
+b22CFGRldmVsb3Blci5wYXlwYWwuY29tggxwLnBheXBhbC5jb22CFXd3dy5wYXlw
+YWxvYmplY3RzLmNvbYIOY21zLnBheXBhbC5jb20wDgYDVR0PAQH/BAQDAgWgMB0G
+A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjB1BgNVHR8EbjBsMDSgMqAwhi5o
+dHRwOi8vY3JsMy5kaWdpY2VydC5jb20vc2hhMi1ldi1zZXJ2ZXItZzIuY3JsMDSg
+MqAwhi5odHRwOi8vY3JsNC5kaWdpY2VydC5jb20vc2hhMi1ldi1zZXJ2ZXItZzIu
+Y3JsMEsGA1UdIAREMEIwNwYJYIZIAYb9bAIBMCowKAYIKwYBBQUHAgEWHGh0dHBz
+Oi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwBwYFZ4EMAQEwgYgGCCsGAQUFBwEBBHww
+ejAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMFIGCCsGAQUF
+BzAChkZodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRTSEEyRXh0
+ZW5kZWRWYWxpZGF0aW9uU2VydmVyQ0EuY3J0MAwGA1UdEwEB/wQCMAAwggF+Bgor
+BgEEAdZ5AgQCBIIBbgSCAWoBaAB3AKS5CZC0GFgUh7sTosxncAo8NZgE+RvfuON3
+zQ7IDdwQAAABZTquQ3wAAAQDAEgwRgIhAMvZlCpgP2+v8gH82y3PQoMNVUVQNBjG
+4DZy7qRFBo0JAiEAkzEfNkc2/B+88VR3QjutnaF1Qpj0QkSodPGAtB377UUAdQBW
+FAaaL9fC7NP14b1Esj7HRna5vJkRXMDvlJhV1onQ3QAAAWU6rkPZAAAEAwBGMEQC
+IHAvzbsYhbMy5jUazj6X3mDMjjyryN5BMwbDIFv58T9nAiBxzUIRTfj+Kevp0mmO
+Oe9q6K/klOU2klRuVmcs7Gzw8AB2ALvZ37wfinG1k5Qjl6qSe0c4V5UKq1LoGpCW
+ZDaOHtGFAAABZTquRGgAAAQDAEcwRQIhAMvzcJw5loOfVnDNFEr4+c4y/usA2pU5
+M7vhHND680tHAiASqPd7KXNaNTJsBJ9IfBN6J2XwGJjxccRy9fJc9+UgYjANBgkq
+hkiG9w0BAQsFAAOCAQEAoeuef8cXLigvTQs4lbtbyp4UOIzspiMmHztqB95OS0ER
+/u7995SO0C0mQjvyPeiptQ5Yh+/OVCqV6p2ZpBmSc+mn5tzjP3LaVxoyjwghja03
+mNBXPmdkEIG+V78Ov5iIm6vxGH1xSjHssV8iXpWo3gJ+xH3krtY1Atkg243JgwNC
+I3xgp01VMLAmvIvvTqmIKeEd88Ukc6kHcZsEjxwtNivWx2nl1cyDu9B1wJK0D5Mu
+IBXgbFKmqUhWlEXRimphvONOJGd71qT94bT/+bhq28oGleH1leTvqft0fj+e/a7e
+Hx1u3fYAxNWjNAImIxpGUyUwSVo29w/CYYc2cS69y6GB7TCB6jCBqQIBATALBgcq
+hkjOOAQDBQAwLjELMAkGA1UEBhMCdXMxDDAKBgNVBAoTA3N1bjERMA8GA1UEAxMI
+aGFuZmVpeXUXDTA1MDEwNzIwMDkxMFoXDTA2MDEwNzIwMDkxMFowSTAjAgMBCTIX
+DTA1MDEwNzIwMDkxMFowDTALBgNVHRUEBAoCAQQwIgICMDkXDTA1MDEwNzIwMDkx
+MFowDTALBgNVHRUEBAoCAQEwCwYHKoZIzjgEAwUAAy8AMCwCFFbxw8qxTDJqc8H9
+O1QIkzwkkvJfAhRF5zFU8mFsrKmnE50ERySS8vA6AKGCAh8wggIbMIIBAwIBATAN
+BgkqhkiG9w0BAQsFADBsMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQg
+SW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2Vy
+dCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBFw0xOTA1MDIyMjE1NTRaFw0xOTA1
+MjMyMjE1NTRaMDEwLwIQDPWCOBgZnlb4K9ZS7Sft6RcNMTgxMDI1MTYxMTM4WjAM
+MAoGA1UdFQQDCgEAoDAwLjAfBgNVHSMEGDAWgBSxPsNpA/i/RwHUmCYaCALvY2Qr
+wzALBgNVHRQEBAICAcQwDQYJKoZIhvcNAQELBQADggEBABPO3OA0OkQZ+RLVxz/c
+Nx5uNVEO416oOePkN0A4DxFztf337caS4OyfS9Wyu1j5yUdWJVpAKXSQeN95MqHk
+pSpYDssuqbuYjv8ViJfseGBgtXTczUzzNeNdY2uxMbCxuhmPkgacAo1lx9LkK2Sc
+YHWVbfFRF1UQ/dcmavaZsEOBNuLWOxQYA9MqfVNAymHe7vPqwm/8IY2FbHe9HsiJ
+ZfGxNWMDP5lmJiXmpntTeDQ2UjdiyXwGGKjyiSTFk2jVRutrGINufaoA/f7eCmIb
+4UDPbpMjVfD215dW8eBKouypCVoEvmCSSTacdiBI2yOluvMN0PzvPve0ECAE+D4e
+m9Y=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc7296.CertificateBundle()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_bundle_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cert_count = 0
+ crl_count = 0
+ unk_count = 0
+
+ for item in asn1Object:
+ if item.getName() == 'cert':
+ cert_count += 1
+
+ elif item.getName() == 'crl':
+ crl_count += 1
+
+ else:
+ unk_count += 1
+
+ self.assertEqual(3, cert_count)
+ self.assertEqual(2, crl_count)
+ self.assertEqual(0, unk_count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7508.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7508.py
new file mode 100644
index 0000000000..914e6d8b2a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7508.py
@@ -0,0 +1,134 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7508
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIE/AYJKoZIhvcNAQcCoIIE7TCCBOkCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggIA
+MIIB/AIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKgggElMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8X
+DTE5MDUyOTE4MjMxOVowKAYJKoZIhvcNAQk0MRswGTALBglghkgBZQMEAgKhCgYI
+KoZIzj0EAwMwMQYLKoZIhvcNAQkQAjcxIjEgCgEBMBswGRoERnJvbQwRYWxpY2VA
+ZXhhbXBsZS5jb20wPwYJKoZIhvcNAQkEMTIEMLbkIqT9gmce1Peqxm1E9OiwuY1R
+WHHGVufwmjb6XKzj4goQ5tryN5uJN9NM+ZkmbDBNBgsqhkiG9w0BCRACATE+MDwE
+IMdPIQ9kJ1cI9Q6HkRCzbXWdD331uAUCL3MMFXP4KFOjgAEBMBUwE4ERYWxpY2VA
+ZXhhbXBsZS5jb20wCgYIKoZIzj0EAwMEZzBlAjEAuZ8SebvwMRvLPn9+s3VHFUNU
+bEtkkWCao1uNm5TOzphK0NbxzOsD854aC5ReKPSDAjAm1U0siLQw5p4qzGwyxDw9
+5AI5J8Mvy+icNubmfsd4ofvxdaECdhr4rvsSMwbOsFk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ secure_header_field_attr_found = False
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat == rfc7508.id_aa_secureHeaderFieldsIdentifier:
+ self.assertIn(sat, rfc5652.cmsAttributesMap)
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ from_field = rfc7508.HeaderFieldName('From')
+ alice_email = rfc7508.HeaderFieldValue('alice@example.com')
+ for shf in sav['secHeaderFields']:
+ if shf['field-Name'] == from_field:
+ self.assertEqual(alice_email, shf['field-Value'])
+ secure_header_field_attr_found = True
+
+ self.assertTrue(secure_header_field_attr_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd = asn1Object['content']
+
+ self.assertEqual(
+ rfc5652.CMSVersion().subtype(value='v1'), sd['version'])
+
+ ect = sd['encapContentInfo']['eContentType']
+
+ self.assertIn(ect, rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_data, ect)
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc7508.id_aa_secureHeaderFieldsIdentifier:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+
+ secure_header_field_attr_found = False
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc7508.id_aa_secureHeaderFieldsIdentifier:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+ from_field = rfc7508.HeaderFieldName('From')
+ alice_email = rfc7508.HeaderFieldValue('alice@example.com')
+ for shf in sa['attrValues'][0]['secHeaderFields']:
+ if shf['field-Name'] == from_field:
+ self.assertEqual(alice_email, shf['field-Value'])
+ secure_header_field_attr_found = True
+
+ self.assertTrue(secure_header_field_attr_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7585.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7585.py
new file mode 100644
index 0000000000..5e538347c9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7585.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7585
+
+
+class NAIRealmCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIEZzCCA0+gAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBkjELMAkGA1UEBhMCRlIx
+DzANBgNVBAgMBlJhZGl1czESMBAGA1UEBwwJU29tZXdoZXJlMRQwEgYDVQQKDAtF
+eGFtcGxlIEluYzEgMB4GCSqGSIb3DQEJARYRYWRtaW5AZXhhbXBsZS5vcmcxJjAk
+BgNVBAMMHUV4YW1wbGUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTE5MTExMTE4
+MDQyMVoXDTIwMDExMDE4MDQyMVowezELMAkGA1UEBhMCRlIxDzANBgNVBAgMBlJh
+ZGl1czEUMBIGA1UECgwLRXhhbXBsZSBJbmMxIzAhBgNVBAMMGkV4YW1wbGUgU2Vy
+dmVyIENlcnRpZmljYXRlMSAwHgYJKoZIhvcNAQkBFhFhZG1pbkBleGFtcGxlLm9y
+ZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM9HqbuyWpsTMKo739Dm
+DwmQo2HUkNdQYbvsB+e7ILsw8fWa2qnsF1CoRr/1bcZqXUR1To/QbHse7xSMZH9t
+F7rdlDMc7QtgdwVfn8TiL3hCg5LSE8iaBzfJUjrts/V5WOByP1DwJVM7W3Va/5dN
+oOiceVeC7ThghMlwIx/wN5cy78a8fPYV2FvPR6e+U2HG35zaIv2PizYcliF/QmZG
+gnw4Q9dYC1Lw/ogVBZBALlv+/MuGheb/xIuL8lu1PFZ0YbW65WLD9Cx4wvytAke7
+tKlhL/Kd4OBSeOY3OYmpxbc1gEUmFoLTlZesY2NP9Jyl5mGsIHtPdvVkh/tSBy8o
+VLUCAwEAAaOB3TCB2jAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DATBgNVHSUEDDAK
+BggrBgEFBQcDATA2BgNVHR8ELzAtMCugKaAnhiVodHRwOi8vd3d3LmV4YW1wbGUu
+Y29tL2V4YW1wbGVfY2EuY3JsMDcGCCsGAQUFBwEBBCswKTAnBggrBgEFBQcwAYYb
+aHR0cDovL3d3dy5leGFtcGxlLm9yZy9vY3NwMDoGA1UdEQQzMDGCEnJhZGl1cy5l
+eGFtcGxlLm9yZ6AbBggrBgEFBQcICKAPDA0qLmV4YW1wbGUuY29tMA0GCSqGSIb3
+DQEBCwUAA4IBAQBOhtH2Jpi0b0MZ8FBKTqDl44rIHL1rHG2mW/YYmRI4jZo8kFhA
+yWm/T8ZpdaotJgRqbQbeXvTXIg4/JNFheyLG4yLOzS1esdMAYDD5EN9/dXE++jND
+/wrfPU+QtTgzAjkgFDKuqO7gr1/vSizxLYTWLKBPRHhiQo7GGlEC6/CPb38x4mfQ
+5Y9DsKCp6BEZu+LByCho/HMDzcIPCdtXRX7Fs8rtX4/zRpVIdm6D+vebuo6CwRKp
+mIljfssCvZjb9YIxSVDmA/6Lapqsfsfo922kb+MTXvPrq2ynPx8LrPDrxKc8maYc
+Jiw8B0yjkokwojxyRGftMT8uxNjWQVsMDbxl
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ nai_realm_oid = rfc7585.id_on_naiRealm
+ nai_realm_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ self.assertEqual(
+ nai_realm_oid, gn['otherName']['type-id'])
+
+ onValue, rest = der_decoder(
+ gn['otherName']['value'], asn1Spec=rfc7585.NAIRealm())
+
+ self.assertFalse(rest)
+ self.assertTrue(onValue.prettyPrint())
+ self.assertEqual(
+ gn['otherName']['value'], der_encoder(onValue))
+ self.assertIn('example', onValue)
+
+ nai_realm_found = True
+
+ self.assertTrue(nai_realm_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ nai_realm_oid = rfc7585.id_on_naiRealm
+ nai_realm_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ self.assertEqual(
+ nai_realm_oid, gn['otherName']['type-id'])
+ self.assertIn('example', gn['otherName']['value'])
+
+ nai_realm_found = True
+
+ self.assertTrue(nai_realm_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7633.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7633.py
new file mode 100644
index 0000000000..64e874e7b1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7633.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7633
+
+
+class TLSFeaturesExtnTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEbTCCBBOgAwIBAgIRAO5f2N8q74GBATjTMXQCjlgwCgYIKoZIzj0EAwIwgZYx
+CzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNV
+BAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMTwwOgYDVQQD
+EzNDT01PRE8gRUNDIE9yZ2FuaXphdGlvbiBWYWxpZGF0aW9uIFNlY3VyZSBTZXJ2
+ZXIgQ0EwHhcNMTYwMTE1MDAwMDAwWhcNMTgwMTE0MjM1OTU5WjCBwjELMAkGA1UE
+BhMCUlUxDzANBgNVBBETBjExNzY0NzEUMBIGA1UECBMLTW9zY293IENpdHkxDzAN
+BgNVBAcTBk1vc2NvdzE4MDYGA1UECRMvQWthZGVtaWthIEthcGljeSBzdHJlZXQs
+IGhvdXNlIDQsIGFwYXJ0bWVudCAxNjYxGDAWBgNVBAoTD0FuZHJleSBDaHVyYW5v
+djETMBEGA1UECxMKSW5zdGFudFNTTDESMBAGA1UEAxMJYWRtc2VsLmVjMHYwEAYH
+KoZIzj0CAQYFK4EEACIDYgAEwrPPzgBO1vDNmV0UVvYSBnys9B7LVkGLiIBbKYf2
+nNFRuJKo1gzNurI8pv4CbvqjkCX4Je/aSeYFHSCR9y82+zTwYQuJFt5LIL5f+Syp
+xZ7aLH56bOiQ+QhCtIvWP4YWo4IB9TCCAfEwHwYDVR0jBBgwFoAUdr4iSO4/PvZG
+A9mHGNBlfiKcC+EwHQYDVR0OBBYEFHTFQqV+H5a7+RVL+70Z6zqCbqq9MA4GA1Ud
+DwEB/wQEAwIFgDAMBgNVHRMBAf8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr
+BgEFBQcDAjBQBgNVHSAESTBHMDsGDCsGAQQBsjEBAgEDBDArMCkGCCsGAQUFBwIB
+Fh1odHRwczovL3NlY3VyZS5jb21vZG8uY29tL0NQUzAIBgZngQwBAgIwWgYDVR0f
+BFMwUTBPoE2gS4ZJaHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPRUNDT3Jn
+YW5pemF0aW9uVmFsaWRhdGlvblNlY3VyZVNlcnZlckNBLmNybDCBiwYIKwYBBQUH
+AQEEfzB9MFUGCCsGAQUFBzAChklodHRwOi8vY3J0LmNvbW9kb2NhLmNvbS9DT01P
+RE9FQ0NPcmdhbml6YXRpb25WYWxpZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3J0MCQG
+CCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wEQYIKwYBBQUHARgE
+BTADAgEFMCMGA1UdEQQcMBqCCWFkbXNlbC5lY4INd3d3LmFkbXNlbC5lYzAKBggq
+hkjOPQQDAgNIADBFAiAi6TXl76FTKPP1AhqtEjU5BjAj9Ju7CSKChHZSmzxeXQIh
+AOQSxhs011emVxyBIXT0ZGbmBY8LFRh6eGIOCAJbkM5T
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] == rfc7633.id_pe_tlsfeature:
+ s = extn['extnValue']
+ features, rest = der_decoder(
+ s, rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(features.prettyPrint())
+ self.assertEqual(s, der_encoder(features))
+ self.assertEqual(1, len(features))
+ self.assertEqual(5, features[0])
+
+ self.assertIn(rfc7633.id_pe_tlsfeature, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7773.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7773.py
new file mode 100644
index 0000000000..2b4e50b7cc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7773.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7773
+
+
+class AuthenticationContextExtnTestCase(unittest.TestCase):
+ pem_text = """\
+MIIMUjCCCzqgAwIBAgIQevDaX+wRYAlpUgjTYjCCRjANBgkqhkiG9w0BAQsFADCBuDELMAkGA1UE
+BhMCU0UxNTAzBgNVBAoTLERldiBURVNUIENBIG9yZyBBQiAoTk9UIEEgUkVBTCBPUkdBTklaQVRJ
+T04pMSAwHgYDVQQLExdDZW50cmFsIFNpZ25pbmcgU2VydmljZTEVMBMGA1UEBRMMQTEyMzQ1Ni03
+ODkwMTkwNwYDVQQDEzBDZW50cmFsIFNpZ25pbmcgQ0EwMDEgLSBFSUQgMi4wIERldiBURVNUIFNl
+cnZpY2UwHhcNMTkxMDA5MDc0ODI2WhcNMjAxMDA5MDc0ODI2WjBgMRUwEwYDVQQFEwwxODg4MDMw
+OTkzNjgxCzAJBgNVBAYTAlNFMQ0wCwYDVQQqEwRBZ2RhMRcwFQYDVQQDEw5BZ2RhIEFuZGVyc3Nv
+bjESMBAGA1UEBBMJQW5kZXJzc29uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjl1H
+7vveI/EUaF9z6EiL/AmTHDbpLAKoWh9JJjpRlb8lU0TseYOzZp6ySiAO8St2a/HxxhrNuAAELUwZ
+3oICkmxM/NeYgI7EEaLVPUwBAWfGZrRWb/+h8C6SrivWc73M/LI1A0B9tcEpUuh0CHTSVIBZsH+L
+IDyKW6n3T8YeI7+0CX391I/j3iyEBNFcfDaHaFChzkPxgPg6Xh1D1JWs+mUj1rOoTLxsyusWiIQk
+IkjDgFNUCpS1+NUvkTU1uFewvluxjOzRVqzYZWesOL+V/lGnyVPw4o1INEKYpOurYii2TXElTmXO
+iQdIG20S96uFH6vFFJ2cPwgYjWpory/K+QIDAQABo4IIrTCCCKkwCwYDVR0PBAQDAgZAMB0GA1Ud
+DgQWBBQo71oFnxX2kapLl3ZoYOylnJo01TATBgNVHSAEDDAKMAgGBgQAizABATBLBgNVHR8ERDBC
+MECgPqA8hjpodHRwczovL2VpZDJjc2lnLmtvbmtpLnNlL3B1Ymxpc2gvY3JsLzE4MTRiMGFiYzEx
+NGM3YmEuY3JsMIIH6wYHKoVwgUkFAQSCB94wggfaMIIH1gwraHR0cDovL2lkLmVsZWduYW1uZGVu
+LnNlL2F1dGgtY29udC8xLjAvc2FjaQyCB6U8c2FjaTpTQU1MQXV0aENvbnRleHQgeG1sbnM6c2Fj
+aT0iaHR0cDovL2lkLmVsZWduYW1uZGVuLnNlL2F1dGgtY29udC8xLjAvc2FjaSI+PHNhY2k6QXV0
+aENvbnRleHRJbmZvIElkZW50aXR5UHJvdmlkZXI9Imh0dHA6Ly9kZXYudGVzdC5zd2VkZW5jb25u
+ZWN0LnNlL2lkcCIgQXV0aGVudGljYXRpb25JbnN0YW50PSIyMDE5LTEwLTA5VDA3OjU4OjI2LjAw
+MFoiIFNlcnZpY2VJRD0iRmVkU2lnbmluZyIgQXV0aG5Db250ZXh0Q2xhc3NSZWY9Imh0dHA6Ly9p
+ZC5lbGVnbmFtbmRlbi5zZS9sb2EvMS4wL2xvYTMtc2lnbWVzc2FnZSIgQXNzZXJ0aW9uUmVmPSJf
+ZGM5MjM0Y2Y3Zjc5OWQwMDlmMjUwNWVhMzVlMWU0NmUiLz48c2FjaTpJZEF0dHJpYnV0ZXM+PHNh
+Y2k6QXR0cmlidXRlTWFwcGluZyBUeXBlPSJyZG4iIFJlZj0iMi41LjQuNSI+PHNhbWw6QXR0cmli
+dXRlIEZyaWVuZGx5TmFtZT0iU3dlZGlzaCBQZXJzb25udW1tZXIiIE5hbWU9InVybjpvaWQ6MS4y
+Ljc1Mi4yOS40LjEzIiB4bWxuczpzYW1sPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6YXNz
+ZXJ0aW9uIj48c2FtbDpBdHRyaWJ1dGVWYWx1ZSB4c2k6dHlwZT0ieHM6c3RyaW5nIiB4bWxuczp4
+cz0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3
+dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiPjE4ODgwMzA5OTM2ODwvc2FtbDpBdHRy
+aWJ1dGVWYWx1ZT48L3NhbWw6QXR0cmlidXRlPjwvc2FjaTpBdHRyaWJ1dGVNYXBwaW5nPjxzYWNp
+OkF0dHJpYnV0ZU1hcHBpbmcgVHlwZT0icmRuIiBSZWY9IjIuNS40LjQyIj48c2FtbDpBdHRyaWJ1
+dGUgRnJpZW5kbHlOYW1lPSJHaXZlbiBOYW1lIiBOYW1lPSJ1cm46b2lkOjIuNS40LjQyIiB4bWxu
+czpzYW1sPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6YXNzZXJ0aW9uIj48c2FtbDpBdHRy
+aWJ1dGVWYWx1ZSB4c2k6dHlwZT0ieHM6c3RyaW5nIiB4bWxuczp4cz0iaHR0cDovL3d3dy53My5v
+cmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxT
+Y2hlbWEtaW5zdGFuY2UiPkFnZGE8L3NhbWw6QXR0cmlidXRlVmFsdWU+PC9zYW1sOkF0dHJpYnV0
+ZT48L3NhY2k6QXR0cmlidXRlTWFwcGluZz48c2FjaTpBdHRyaWJ1dGVNYXBwaW5nIFR5cGU9InJk
+biIgUmVmPSIyLjUuNC4zIj48c2FtbDpBdHRyaWJ1dGUgRnJpZW5kbHlOYW1lPSJEaXNwbGF5IE5h
+bWUiIE5hbWU9InVybjpvaWQ6Mi4xNi44NDAuMS4xMTM3MzAuMy4xLjI0MSIgeG1sbnM6c2FtbD0i
+dXJuOm9hc2lzOm5hbWVzOnRjOlNBTUw6Mi4wOmFzc2VydGlvbiI+PHNhbWw6QXR0cmlidXRlVmFs
+dWUgeHNpOnR5cGU9InhzOnN0cmluZyIgeG1sbnM6eHM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDEv
+WE1MU2NoZW1hIiB4bWxuczp4c2k9Imh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWlu
+c3RhbmNlIj5BZ2RhIEFuZGVyc3Nvbjwvc2FtbDpBdHRyaWJ1dGVWYWx1ZT48L3NhbWw6QXR0cmli
+dXRlPjwvc2FjaTpBdHRyaWJ1dGVNYXBwaW5nPjxzYWNpOkF0dHJpYnV0ZU1hcHBpbmcgVHlwZT0i
+cmRuIiBSZWY9IjIuNS40LjQiPjxzYW1sOkF0dHJpYnV0ZSBGcmllbmRseU5hbWU9IlN1cm5hbWUi
+IE5hbWU9InVybjpvaWQ6Mi41LjQuNCIgeG1sbnM6c2FtbD0idXJuOm9hc2lzOm5hbWVzOnRjOlNB
+TUw6Mi4wOmFzc2VydGlvbiI+PHNhbWw6QXR0cmlidXRlVmFsdWUgeHNpOnR5cGU9InhzOnN0cmlu
+ZyIgeG1sbnM6eHM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hIiB4bWxuczp4c2k9
+Imh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlIj5BbmRlcnNzb248L3Nh
+bWw6QXR0cmlidXRlVmFsdWU+PC9zYW1sOkF0dHJpYnV0ZT48L3NhY2k6QXR0cmlidXRlTWFwcGlu
+Zz48L3NhY2k6SWRBdHRyaWJ1dGVzPjwvc2FjaTpTQU1MQXV0aENvbnRleHQ+MAkGA1UdEwQCMAAw
+HwYDVR0jBBgwFoAUqKv0QPwAYcLfcD/Vy1A2deHtiqcwDQYJKoZIhvcNAQELBQADggEBAETlZOIL
+NknxlMiYHCxoYypyzYuza2l3M4+YWakT0vFPgXpCk+l0dNst7h9nWvKKHCboSj+YP5dUCSsuUXhb
+7xTei/F2nj7q1oCPuVJGThZqhWgF/JkqOy34hHEM5VniJiQu2W9TjzRMSOSFzRlQsHcOuXzdTkhr
+CQpD1TWxYL9sCy4YoCdE4edfgBGBMujxoijl3/xJ5uI1FjhlSPVP88p8Wsi8i7GdMYuxqjZMwrt2
+PHIPgop3BNN9/BzW0cmdyNvFgcD9qR8Rv5aFBYuQbyg6fST8JdAOrbMrCST6v2U41OOXH5MC/kL6
+tAGXsYdcuQpglUngmo/FV4Z9qjIDkYQ=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc7773.id_ce_authContext:
+ s = extn['extnValue']
+ acs, rest = der_decoder(
+ s, asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+ self.assertFalse(rest)
+ self.assertTrue(acs.prettyPrint())
+ self.assertEqual(s, der_encoder(acs))
+ self.assertIn('id.elegnamnden.se', acs[0]['contextType'])
+ self.assertIn(
+ 'AuthContextInfo IdentityProvider', acs[0]['contextInfo'])
+
+ self.assertIn(rfc7773.id_ce_authContext, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7894.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7894.py
new file mode 100644
index 0000000000..3d38155c5a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7894.py
@@ -0,0 +1,84 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc7894
+
+
+class AlternativeChallengePasswordTestCase(unittest.TestCase):
+ otp_pem_text = """\
+MIICsjCCAZwCAQAwJDELMAkGA1UEBhMCVVMxFTATBgNVBAMTDDRUUzJWMk5MWEE2
+WjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKmF0oUj5+1rBB+pUO8X
+7FPxer+1BhWOa54RTSucJmBaLx0H95qNaBCcctNDl1kcmIro/a0zMcEvj5Do29vQ
+lStJdTeJ/B3X4qzOGShupxJcAhCreRZjN6Yz3T9z0zJ8OPnRvJOzcSiIzlubc9lK
+Cpq4U0UsCLLfymOgL9NH4lZi96J+PFuJr0J+rTY38076U2jcPqNq5/L/d6NV9Sz2
+IVOvCK1kqP/nElJVibIQZvj9YESLUKyVAfTNxLj3+IpioOOv2dT3kB9wdi4plAVi
+UFEUvED1okRrI29+LdPV1UXglOCksyJIIw+DgDtutDE5Co6QkTNURFEdKIV9Sg13
+zEECAwEAAaBLMBkGCyqGSIb3DQEJEAI4MQoTCDkwNTAzODQ2MC4GCSqGSIb3DQEJ
+DjEhMB8wHQYDVR0OBBYEFBj12LVowM16Ed0D+AmoElKNYP/kMAsGCSqGSIb3DQEB
+CwOCAQEAZZdDWKejs3UVfgZI3R9cMWGijmscVeZrjwFVkn7MI9pEDZ2aS1QaRYjY
+1cu9j3i+LQp9LWPIW/ztYk11e/OcZp3fo8pZ+MT66n7YTWfDXNkqqA5xmI84DMEx
+/cqenyzOBZWqpZGx7eyM9BtnrdeJ0r2qSc7LYU25FbIQFJJf8IvgMAXWMs50fvs2
+Gzns447x952se2ReQ3vYhXdHvYYcgAZfSJZvK+nCmhzzqowv5p15Y5S+IHpBSXTO
+a1qhNW4cjdicQZUeQ2R5kiuwZ+8vHaq9jKxAEk0hBeqG6RQaxvNOBQhHtTLNGw/C
+NmaF8Y2Sl/MgvC5tjs0Ck0/r3lsoLQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6402.CertificationRequest()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.otp_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['certificationRequestInfo']['version'])
+
+ for attr in asn1Object['certificationRequestInfo']['attributes']:
+ self.assertIn(
+ attr['attrType'], rfc6402.cmcControlAttributesMap)
+
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ rfc6402.cmcControlAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc7894.id_aa_otpChallenge:
+ self.assertEqual('90503846', av['printableString'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.otp_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object['certificationRequestInfo']['attributes']:
+ self.assertIn(attr['attrType'], rfc6402.cmcControlAttributesMap)
+ if attr['attrType'] == rfc7894.id_aa_otpChallenge:
+ self.assertEqual(
+ '90503846', attr['attrValues'][0]['printableString'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7906.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7906.py
new file mode 100644
index 0000000000..3806987d4f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7906.py
@@ -0,0 +1,168 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7906
+
+
+class AttributeSetTestCase(unittest.TestCase):
+ attr_set_pem_text = """\
+MYIRmDAQBglghkgBZQIBDQcxA4IBATAQBglghkgBZQIBDQ0xAwoBUzAQBglghkgB
+ZQIBDQ4xAwoBAjAQBglghkgBZQIBDQ8xAwoBATARBglghkgBZQIBBUIxBAQCeQYw
+EgYJYIZIAWUCAQ0LMQUwAwoBATAVBglghkgBZQIBDQUxCDAGAgReAA//MBUGCyqG
+SIb3DQEJEAIuMQYCBF1qowYwGQYJYIZIAWUCAQVHMQwGCisGAQQBgaxgME0wGgYJ
+YIZIAWUCAQ0BMQ0wCwYJYIZIAWUDBAEtMBoGCWCGSAFlAgENDDENBgsqhkiG9w0B
+CRABGTAaBglghkgBZQIBDRUxDTALBglghkgBZQMEAS0wGwYJYIZIAWUCAQ0GMQ4w
+DAIEXQAAAAIEXwAP/zAdBgsqhkiG9w0BCRACKDEOMAwGCisGAQQBgaxgMDAwLQYJ
+YIZIAWUCAQVGMSAwHoYcaHR0cDovL3JlcG8uZXhhbXBsZS5jb20vcGtpLzAvBglg
+hkgBZQIBDQMxIjAgExFCb2d1cyBTaG9ydCBUaXRsZYEFQm9ndXOFATCHAU0wNAYJ
+YIZIAWUCAQVIMScwJRMRQm9ndXMgU2hvcnQgVGl0bGUTEEZha2UgU2hvcnQgVGl0
+bGUwOAYIKwYBBQUHAQsxLDAqMCgGCCsGAQUFBzAFhhxodHRwOi8vcmVwby5leGFt
+cGxlLmNvbS9wa2kvMEEGCyqGSIb3DQEJEAIEMTIwMAwjVGhlc2UgUkZDIDc5MDYg
+YXR0cmlidXRlcyBhcmUgYm9ndXMGCSqGSIb3DQEHATCBggYLKoZIhvcNAQkQAgIx
+czFxAgEBBgorBgEEAYGsYAEBMUwwJIAKYIZIAWUCAQgDA4EWMBQGCisGAQQBgaxg
+MEkxBgIBMAIBSTAkgApghkgBZQIBCAMEgRYwFAYKKwYBBAGBrGAwRTEGAgEwAgFF
+ExJCb2d1cyBQcml2YWN5IE1hcmswgYQGCWCGSAFlAgENFjF3MHUwMAYKYIZIAWUC
+AQJOAjAiMCAGCyqGSIb3DQEJEAwLMREMD2t0YS5leGFtcGxlLmNvbTAxBgsqhkiG
+9w0BCRABGTAiMCAGCyqGSIb3DQEJEAwLMREMD2t0YS5leGFtcGxlLmNvbTAOBgkq
+hkiG9w0BBwEKAQEwgaAGCWCGSAFlAgENEDGBkjCBj6EMBgorBgEEAYGsYDAwoH8G
+CWCGSAFlAgEQAARyMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UE
+BxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAe
+BgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMIIBvwYJYIZIAWUCAQVBMYIB
+sDCCAawEFO1lDTbJmd4voc2GDuaMzYO+XJSmMIIBkqCB/jB/BglghkgBZQIBEAAE
+cjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAMTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTB7BglghkgBZQIBEAAEbjBsMQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1w
+bGUxDDAKBgNVBAMTA0JvYjEeMBwGCSqGSIb3DQEJARYPYm9iQGV4YW1wbGUuY29t
+MIGOMIGLBglghkgBZQIBEAAEfjB8MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkEx
+EDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZpZ2lsIFNlY3VyaXR5IExMQzEX
+MBUGA1UECxMOS2V5IE1hbmFnZW1lbnQxGDAWBgNVBAMTD2t0YS5leGFtcGxlLmNv
+bTCCAoUGA1UEJDGCAnwwggJ4MIIB/qADAgECAgkApbNUKBuwbjswCgYIKoZIzj0E
+AwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9u
+MREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1MjkxNDQ1NDFaFw0yMDA1MjgxNDQ1
+NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRv
+bjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0B
+CQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE+M2f
+By/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+bz7uYZfYQxI3dVB0YCSD6Mt3y
+XFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/wto8hMKoFgkcscqIbiV7Zo4GU
+MIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0
+ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1UdDgQWBBTE
+uloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAWgBTyNds0BNqlVfK9aQOZsGLs
+4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL3kRhmn+PJTeKaL9sh/oQgHOY
+TgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94MaerDIrQpi0IDh+v0QSAv9rMife
+8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUwggSaBgNVBEYxggSRMIIEjTCCAgIw
+ggGIoAMCAQICCQDokdYGkU/O8jAKBggqhkjOPQQDAzA/MQswCQYDVQQGEwJVUzEL
+MAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENB
+MB4XDTE5MDUxNDA4NTgxMVoXDTIxMDUxMzA4NTgxMVowPzELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBD
+QTB2MBAGByqGSM49AgEGBSuBBAAiA2IABPBRdlSx6I5qpZ2sKUMIxun1gUAzzstO
+YWKvKCnMoNT1x+pIKDvMEMimFcLAxxL3NVYOhK0Jty83SPDkKWMdx9/Okdhf3U/z
+xJlEnXDiFrAeM6xbG8zcCRiBnmd92UvsRqNQME4wHQYDVR0OBBYEFPI12zQE2qVV
+8r1pA5mwYuziFQjBMB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAwG
+A1UdEwQFMAMBAf8wCgYIKoZIzj0EAwMDaAAwZQIwWlGNjb9NyqJSzUSdsEqDSvMZ
+b8yFkxYCIbAVqQ9UqScUUb9tpJKGsPWwbZsnLVvmAjEAt/ypozbUhQw4dSPpWzrn
+5BQ0kKbDM3DQJcBABEUBoIOol1/jYQPmxajQuxcheFlkMIICgzCCAgqgAwIBAgIJ
+AKWzVCgbsG49MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJW
+QTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNjEy
+MTQzMTA0WhcNMjAwNjExMTQzMTA0WjB8MQswCQYDVQQGEwJVUzELMAkGA1UECBMC
+VkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZpZ2lsIFNlY3VyaXR5IExM
+QzEXMBUGA1UECxMOS2V5IE1hbmFnZW1lbnQxGDAWBgNVBAMTD2t0YS5leGFtcGxl
+LmNvbTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJf2XsTdVLcYASKJGtWjOAIFB8sX
+rsiE7G1tC+IP+iOEdJCZ+UvJ9Enx7v6dtaU4uy1FzuWCar45BVpKVK2TNWT8E7XA
+TkGBTIXGN76yJ5S09FdWp+hVkIkmyCJJujXzV6OBlDCBkTALBgNVHQ8EBAMCB4Aw
+QgYJYIZIAYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0
+ZWQgZm9yIGFueSBwdXJwb3NlLjAdBgNVHQ4EFgQUbZtc/QOvtbnVi/FknxpW4LWt
+TQ8wHwYDVR0jBBgwFoAU8jXbNATapVXyvWkDmbBi7OIVCMEwCgYIKoZIzj0EAwMD
+ZwAwZAIwBniWpO11toMsV8fLBpBjA5YGQvd3TAcSw1lNbWpArL+hje1dzQ7pxsln
+kklv3CTxAjBuVebz4mN0Qkew2NK/itwlmi7i+QxPs/MSZ7YFsyTA5Z4h2GbLW+zN
+3xNCC91vfpcwggSgBglghkgBZQIBDRQxggSRMYIEjTCCAgIwggGIoAMCAQICCQDo
+kdYGkU/O8jAKBggqhkjOPQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkEx
+EDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4XDTE5MDUxNDA4
+NTgxMVoXDTIxMDUxMzA4NTgxMVowPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABPBRdlSx6I5qpZ2sKUMIxun1gUAzzstOYWKvKCnMoNT1x+pI
+KDvMEMimFcLAxxL3NVYOhK0Jty83SPDkKWMdx9/Okdhf3U/zxJlEnXDiFrAeM6xb
+G8zcCRiBnmd92UvsRqNQME4wHQYDVR0OBBYEFPI12zQE2qVV8r1pA5mwYuziFQjB
+MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAwGA1UdEwQFMAMBAf8w
+CgYIKoZIzj0EAwMDaAAwZQIwWlGNjb9NyqJSzUSdsEqDSvMZb8yFkxYCIbAVqQ9U
+qScUUb9tpJKGsPWwbZsnLVvmAjEAt/ypozbUhQw4dSPpWzrn5BQ0kKbDM3DQJcBA
+BEUBoIOol1/jYQPmxajQuxcheFlkMIICgzCCAgqgAwIBAgIJAKWzVCgbsG49MAoG
+CCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNjEyMTQzMTA0WhcNMjAw
+NjExMTQzMTA0WjB8MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcT
+B0hlcm5kb24xGzAZBgNVBAoTElZpZ2lsIFNlY3VyaXR5IExMQzEXMBUGA1UECxMO
+S2V5IE1hbmFnZW1lbnQxGDAWBgNVBAMTD2t0YS5leGFtcGxlLmNvbTB2MBAGByqG
+SM49AgEGBSuBBAAiA2IABJf2XsTdVLcYASKJGtWjOAIFB8sXrsiE7G1tC+IP+iOE
+dJCZ+UvJ9Enx7v6dtaU4uy1FzuWCar45BVpKVK2TNWT8E7XATkGBTIXGN76yJ5S0
+9FdWp+hVkIkmyCJJujXzV6OBlDCBkTALBgNVHQ8EBAMCB4AwQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjAdBgNVHQ4EFgQUbZtc/QOvtbnVi/FknxpW4LWtTQ8wHwYDVR0jBBgw
+FoAU8jXbNATapVXyvWkDmbBi7OIVCMEwCgYIKoZIzj0EAwMDZwAwZAIwBniWpO11
+toMsV8fLBpBjA5YGQvd3TAcSw1lNbWpArL+hje1dzQ7pxslnkklv3CTxAjBuVebz
+4mN0Qkew2NK/itwlmi7i+QxPs/MSZ7YFsyTA5Z4h2GbLW+zN3xNCC91vfpc=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.AttributeSet()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.attr_set_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object:
+ self.assertIn(attr['type'], rfc5652.cmsAttributesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ if attr['type'] == rfc7906.id_aa_KP_contentDecryptKeyID:
+ self.assertEqual(univ.OctetString(hexValue='7906'), av)
+
+ def testOpenTypes(self):
+ openTypesMap = rfc5280.certificateAttributesMap.copy()
+ openTypesMap.update(rfc5652.cmsAttributesMap)
+
+ substrate = pem.readBase64fromText(self.attr_set_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object:
+ if attr['type'] == rfc7906.id_aa_KP_contentDecryptKeyID:
+ self.assertEqual(
+ univ.OctetString(hexValue='7906'), attr['values'][0])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc7914.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc7914.py
new file mode 100644
index 0000000000..e0b1cb3728
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc7914.py
@@ -0,0 +1,97 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc7914
+from pyasn1_modules import rfc8018
+
+
+# From RFC 7914, Section 13
+
+class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIHiME0GCSqGSIb3DQEFDTBAMB8GCSsGAQQB2kcECzASBAVNb3VzZQIDEAAAAgEI
+AgEBMB0GCWCGSAFlAwQBKgQQyYmguHMsOwzGMPoyObk/JgSBkJb47EWd5iAqJlyy
++ni5ftd6gZgOPaLQClL7mEZc2KQay0VhjZm/7MbBUNbqOAXNM6OGebXxVp6sHUAL
+iBGY/Dls7B1TsWeGObE0sS1MXEpuREuloZjcsNVcNXWPlLdZtkSH6uwWzR0PyG/Z
++ZXfNodZtd/voKlvLOw5B3opGIFaLkbtLZQwMiGtl42AS89lZg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5958.EncryptedPrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ea = asn1Object['encryptionAlgorithm']
+
+ self.assertEqual(rfc8018.id_PBES2, ea['algorithm'])
+ self.assertIn(ea['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ params, rest = der_decoder(
+ ea['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[ea['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(params.prettyPrint())
+ self.assertEqual(ea['parameters'], der_encoder(params))
+
+ kdf = params['keyDerivationFunc']
+
+ self.assertEqual(rfc7914.id_scrypt, kdf['algorithm'])
+ self.assertIn(kdf['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ kdfp, rest = der_decoder(
+ kdf['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[kdf['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(kdfp.prettyPrint())
+ self.assertTrue(kdf['parameters'], der_encoder(kdfp))
+ self.assertEqual(1048576, kdfp['costParameter'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ea = asn1Object['encryptionAlgorithm']
+
+ self.assertEqual(rfc8018.id_PBES2, ea['algorithm'])
+
+ params = asn1Object['encryptionAlgorithm']['parameters']
+
+ self.assertEqual(
+ rfc7914.id_scrypt, params['keyDerivationFunc']['algorithm'])
+
+ kdfp = params['keyDerivationFunc']['parameters']
+
+ self.assertEqual(1048576, kdfp['costParameter'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8017.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8017.py
new file mode 100644
index 0000000000..9601997f1d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8017.py
@@ -0,0 +1,125 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8017
+from pyasn1_modules import rfc2985
+
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MIIBAzA8BgkqhkiG9w0BAQcwL6APMA0GCWCGSAFlAwQCAgUAoRwwGgYJKoZIhvcN
+AQEIMA0GCWCGSAFlAwQCAgUAMDwGCSqGSIb3DQEBCjAvoA8wDQYJYIZIAWUDBAIC
+BQChHDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAICBQAwDQYJKoZIhvcNAQECBQAw
+DQYJKoZIhvcNAQEEBQAwDQYJKoZIhvcNAQEFBQAwDQYJKoZIhvcNAQEOBQAwDQYJ
+KoZIhvcNAQELBQAwDQYJKoZIhvcNAQEMBQAwDQYJKoZIhvcNAQENBQAwDQYJKoZI
+hvcNAQEPBQAwDQYJKoZIhvcNAQEQBQA=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for cap in asn1Object:
+ self.assertIn(cap['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ if cap['parameters'].hasValue():
+ p, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[cap['algorithm']])
+
+ self.assertFalse(rest)
+ if not p == univ.Null(""):
+ self.assertTrue(p.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(p))
+
+ if cap['algorithm'] == rfc8017.id_RSAES_OAEP:
+ self.assertEqual(
+ rfc8017.id_sha384, p['hashFunc']['algorithm'])
+ self.assertEqual(
+ rfc8017.id_mgf1, p['maskGenFunc']['algorithm'])
+
+ def OpenTypesCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for cap in asn1Object:
+ if cap['algorithm'] == rfc8017.id_RSAES_OAEP:
+ p = cap['parameters']
+ self.assertEqual(
+ rfc8017.id_sha384, p['hashFunc']['algorithm'])
+ self.assertEqual(
+ rfc8017.id_mgf1, p['maskGenFunc']['algorithm'])
+
+
+class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIE2QIBAQKCAQEAn82EqwXasE2TFNSmZucB8LNza2mOWLHF3nxpxKXalPMDvezc
+5Dq7Ytcv/k9jJL4j4jYfvR4yyZdU9iHLaD6hOINZ8E6hVpx/4c96ZUSOLzD2g+u+
+jIuoNfG+zygSBGYCS6BLCAIsZ+2wUyxYpLJknHJld9/jy+aLmmyrilhH9dH5AUiV
+3NeWht/68++dMXf4ZI/gV4bMSlWhggxkz2WJJpiQdCdJatGkwNDkHmLA9X0tC6OH
+SPE7qYdxG38cYS5F445SgnhDpiK7BodSqYLwgehaDjoOYdEgHVnOcpBCDI5zCJSL
+b1c/z8uhrB1xxlECR44wCLcKsIIYQxaEErRJ/wIDAQABAoIBAD+Ra5L0szeqxDVn
+GgKZJkZvvBwgU0WpAgMtDo3xQ/A4c2ab0IrhaiU5YJgTUGcPVArqaNm8J4vVrTBz
+5QxEzbFDXwWe4cMoYh6bgB7ElKLlIUr8/kGZUfgc7kI29luEjcAIEAC2/RQHesVn
+DHkL5OzqZL+4fIwckAMh0tXdflsPgZ/jgIaKca4OqKu4KGnczm3UvqtlvwisAjkx
+zMyfZXOLn0vEwP2bfbhQrCVrP7n6a+CV+Kqm8NBWnbiS6x2rWemVVssNTbfXQztq
+wC6ZJZCLK7plciDBWvHcS6vxdcsS9DUxuqSV6o/stCGTl1D+9tDx8Od0Eunna2B2
+wAoRHZECVgbNO1bqwfYpp5aFuySWoP+KZz8f/5ZkHjLwiNGpQcqVd4+7Ql2R4qgF
+NgSoQQOZFhKtiOeLVU0HYfp6doI4waSINZdF/fJDHD6fY3AMOc/IIMDHHIzbAlYG
+vKOocLXWj/2+gcyQ1XoAmrE70aIFUBLSvd7RCi8GI74zYWp5lCSvO850Z4GsWSZT
+41iF13sTDDJPm3+BbzMvEu2GuACi/8/IpbUr24/FP9Cp1Rf7kwJWAgMxfoshbrNu
+ebQB5laHNnT+DYhrOFVRNiNDaD2bUNSetrFidosWtD4ueHxMGENwa4BbFJ9+UrdP
+fyxC6k7exM7khGjaNZczwTep1VpYtKjzP/bp9KcCVgYoj9s9HZ1FCAsNEPodjGfd
+AcPTQS9mIa7wzy19B7uvFQJXPURi/p4KKBMVQ99Pp8/r9lJzxxiEf8FyPr8N7lZM
+EUKkFkDrZQDhKpsrHWSNj6yRFlltAlYC7dYR8KLEWoOUATLosxQhwgypv+23r+d4
+ZdPOdDv9n8Kmj+NFy/oISFfdXzlOU4RWQtMx3hEwAabwct7vjiJEej/kmiTqco02
+17tt13VvvQ5ZXF73dDCCAQwwggEIAlYDfMpM1WNfxcLLOgkRZ+0S9OvIrEOi0ALV
+SquTdi/thhCuCsK3lMD4miN9te8j16YtqEFVWXC3a6DWwIJ6m/xZ50bBwPqM8RsI
+6FWhZw4Dr5VqjYXUvwJWAvapRk9SydDYri/cAtGIkUJVlspkE1emALAaSw30vmfd
+hrgYLT6YGOmK3UmcNJ4NVeET275MXWF1ZOhkOGKTN6aj5wPhJaHBMnmUQrq7GwC6
+/LfUkSsCVgMCDTV9gbFW8u6TcTVW85dBIeUGxZh1T2pbU3dkGO3IOxOhzJUplH4/
+EeEs9dusHakg1ERXAg4Vo1YowPW8kuVbZ9faxeVrmuER5NcCuZzS5X/obGUw
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc8017.RSAPrivateKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8018.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8018.py
new file mode 100644
index 0000000000..f354c63eb2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8018.py
@@ -0,0 +1,58 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8018
+
+
+class PWRITestCase(unittest.TestCase):
+ rfc3211_ex1_pem_text = """\
+o1MCAQCgGgYJKoZIhvcNAQUMMA0ECBI0Vnh4VjQSAgEFMCAGCyqGSIb3DQEJEAMJMBEGBSsO
+AwIHBAjv5ZjvIbM9bQQQuBslZe43PKbe3KJqF4sMEA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.RecipientInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.rfc3211_ex1_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ alg_oid = asn1Object['pwri']['keyDerivationAlgorithm']['algorithm']
+
+ self.assertEqual(rfc8018.id_PBKDF2, alg_oid)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.rfc3211_ex1_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ icount = (asn1Object['pwri']['keyDerivationAlgorithm']
+ ['parameters']['iterationCount'])
+
+ self.assertEqual(5, icount)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8103.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8103.py
new file mode 100644
index 0000000000..002f5c9067
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8103.py
@@ -0,0 +1,53 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8103
+
+
+class CAEADChaCha20Poly1305TestCase(unittest.TestCase):
+ alg_id_pem_text = "MBsGCyqGSIb3DQEJEAMSBAzK/rq++s7brd7K+Ig="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8103.id_alg_AEADChaCha20Poly1305, asn1Object[0])
+
+ param, rest = der_decoder.decode(
+ asn1Object[1], rfc8103.AEADChaCha20Poly1305Nonce())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(
+ rfc8103.AEADChaCha20Poly1305Nonce(value='\xca\xfe\xba\xbe\xfa'
+ '\xce\xdb\xad\xde\xca'
+ '\xf8\x88'),
+ param)
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
+
+
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8209.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8209.py
new file mode 100644
index 0000000000..1afd77f24a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8209.py
@@ -0,0 +1,63 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8209
+
+
+class CertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIBiDCCAS+gAwIBAgIEAk3WfDAKBggqhkjOPQQDAjAaMRgwFgYDVQQDDA9ST1VU
+RVItMDAwMEZCRjAwHhcNMTcwMTAxMDUwMDAwWhcNMTgwNzAxMDUwMDAwWjAaMRgw
+FgYDVQQDDA9ST1VURVItMDAwMEZCRjAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
+AARzkbq7kqDLO+EOWbGev/shTgSpHgy6GxOafTjZD3flWqBbjmlWeOD6FpBLVdnU
+9cDfxYiV7lC8T3XSBaJb02/1o2MwYTALBgNVHQ8EBAMCB4AwHQYDVR0OBBYEFKtN
+kQ9VyucaIV7zyv46zEW17sFUMBMGA1UdJQQMMAoGCCsGAQUFBwMeMB4GCCsGAQUF
+BwEIAQH/BA8wDaAHMAUCAwD78KECBQAwCgYIKoZIzj0EAwIDRwAwRAIgB7e0al+k
+8cxoNjkDpIPsfIAC0vYInUay7Cp75pKzb7ECIACRBUqh9bAYnSck6LQi/dEc8D2x
+OCRdZCk1KI3uDDgp
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(rfc8209.id_kp_bgpsec_router, extnValue)
+
+ self.assertIn(rfc5280.id_ce_extKeyUsage, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8226.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8226.py
new file mode 100644
index 0000000000..aa5257c3d2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8226.py
@@ -0,0 +1,104 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8226
+
+
+class JWTClaimConstraintsTestCase(unittest.TestCase):
+ jwtcc_pem_text = ("MD2gBzAFFgNmb2+hMjAwMBkWA2ZvbzASDARmb28xDARmb28yDARmb2"
+ "8zMBMWA2JhcjAMDARiYXIxDARiYXIy")
+
+ def setUp(self):
+ self.asn1Spec = rfc8226.JWTClaimConstraints()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.jwtcc_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class TNAuthorizationListTestCase(unittest.TestCase):
+ tnal_pem_text = ("MCugBxYFYm9ndXOhEjAQFgo1NzE1NTUxMjEyAgIDFKIMFgo3MDM1NTU"
+ "xMjEy")
+
+ def setUp(self):
+ self.asn1Spec = rfc8226.TNAuthorizationList()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tnal_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class CertificateOpenTypesTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICkTCCAhegAwIBAgIJAKWzVCgbsG4+MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNzE4MTUwNzQ5WhcNMjAwNzE3MTUwNzQ5WjBxMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xKDAmBgNVBAoTH0Zh
+a2UgVGVsZXBob25lIFNlcnZpY2UgUHJvdmlkZXIxGTAXBgNVBAMTEGZha2UuZXhh
+bXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARLyLhnsvrS9WBY29tmN2LI
+CF/wuX4ohhUy3sxO0ynCplHHojpDg+tghGzusf0aLtMDu1II915O8YK5XVL+KZJD
+C82jybxWIKjjzX2qc5/O06joUttdEDzkTaD0kgbcXl6jgawwgakwCwYDVR0PBAQD
+AgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0
+cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFHOI3GpDt9dWsTAZxhcj
+96uyL2aIMB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMBYGCCsGAQUF
+BwEaBAowCKAGFgRmYWtlMAoGCCqGSM49BAMDA2gAMGUCMQCy+qFhT7X1i18jcyIa
+Jkgz/tumrPsaBA2RihkooTEr4GbqC650Z4Cwt7+x2xZq37sCMFSM6fRueLyV5StG
+yEFWA6G95b/HbtPMTjLpPKtrOjhofc4LyVCDYhFhKzpvHh1qeA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder.decode(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(
+ extn['extnValue'], der_encoder.encode(extnValue))
+
+ if extn['extnID'] == rfc8226.id_pe_TNAuthList:
+ self.assertEqual('fake', extnValue[0]['spc'])
+
+ self.assertIn(rfc8226.id_pe_TNAuthList, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8358.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8358.py
new file mode 100644
index 0000000000..48a01ce45b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8358.py
@@ -0,0 +1,195 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8358
+
+
+class P7STestCase(unittest.TestCase):
+ pem_text_list = (
+"""\
+MIIJWgYJKoZIhvcNAQcCoIIJSzCCCUcCAQMxDTALBglghkgBZQMEAgEwDQYLKoZIhvcNAQkQ
+ARugggZ0MIIGcDCCBVigAwIBAgIRANa58hQvZ26svTWQaGtqo/YwDQYJKoZIhvcNAQELBQAw
+gZcxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcT
+B1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMT0wOwYDVQQDEzRDT01PRE8g
+UlNBIENsaWVudCBBdXRoZW50aWNhdGlvbiBhbmQgU2VjdXJlIEVtYWlsIENBMB4XDTE1MDIx
+MjAwMDAwMFoXDTIwMDIxMjIzNTk1OVowgZUxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhWaXJn
+aW5pYTEPMA0GA1UEBxMGUmVzdG9uMRMwEQYDVQQKEwpJRVRGIFRydXN0MRkwFwYDVQQLExBT
+ZWNyZXRhcmlhdCBXZXN0MQ0wCwYDVQQDEwRJRVRGMSMwIQYJKoZIhvcNAQkBFhRpZXRmLWFj
+dGlvbkBpZXRmLm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUkKtMPP1RA
+FU6sxMezYJKCt4rw30RDieB8/P67TMhA6j8Um4a2Xo+CP9Ce1oMri2bwaaQPYWB4ciEL32za
+0NUE0B0iCjZZl36hon6wW6mJw1NGD/AFxnKWzhkSWG6BHMoeOAzu/ye8sHu4Jp5nazpGptK7
+30SjTS3JJFU9pHwQY6JlcmwVv0j2rsT3gj92Cbj5S+U5wCSE6+mZbCC+VPFeeI1kFITwyaIm
+uK9kSYHr15OXua/jrYNrHNRfqtexGKSgnUT96KkTh9TVvkMETB1WJS4WuEIP6GITvwVTp0lA
+qS3oNO4SM4tgFVdYqppcvZBg52kHY9y7IdR156c99zzZDBfWBduqjs/AXa0uol0EJd7qFLUs
+xEJ96XN3tPgR/Cwq18ec29pZQH6kO81Kato/RsQrj6A05TFx/J0MYE0R1MZqvIDUu55vlicb
+wT2lpXMiz1szKuvjTZRR9H/IgbKPNpt/kDUSgXLYwuKBm+nBoJXgybEyJ+A4arb60d9Uiusu
+UA8/h6s1rDMuTnIYMbIii4Y+KgevBWPawqk0xioilEMJ0RBaBVrDreuFlK8aYh+Jo2piruBA
+QnB9ZaPmEM1HPNArJxqL6XcUJTkFxNSksOATDFV5sEoBWYIe6qv2nV2r/HWDAEaa4WH2h3o/
+kASarXk3SxPXmfjOOr1XgpKjAgMBAAGjggG1MIIBsTAfBgNVHSMEGDAWgBSCr2yM+MX+lmF8
+6B89K3FIXsSLwDAdBgNVHQ4EFgQU7Olc92Oy6nkCvpv6jCj6qN8YPtUwDgYDVR0PAQH/BAQD
+AgeAMAwGA1UdEwEB/wQCMAAwRgYDVR0gBD8wPTA7BgwrBgEEAbIxAQIBAwUwKzApBggrBgEF
+BQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwWgYDVR0fBFMwUTBPoE2gS4ZJ
+aHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPUlNBQ2xpZW50QXV0aGVudGljYXRpb25h
+bmRTZWN1cmVFbWFpbENBLmNybDCBiwYIKwYBBQUHAQEEfzB9MFUGCCsGAQUFBzAChklodHRw
+Oi8vY3J0LmNvbW9kb2NhLmNvbS9DT01PRE9SU0FDbGllbnRBdXRoZW50aWNhdGlvbmFuZFNl
+Y3VyZUVtYWlsQ0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20w
+HwYDVR0RBBgwFoEUaWV0Zi1hY3Rpb25AaWV0Zi5vcmcwDQYJKoZIhvcNAQELBQADggEBAGPm
+QUKHxkEQ9vk69vIP68anHc8UsTv5powtLSXLqUw3rAoKAdoWkKjb7ZByHCuFiNk1BvTnhQPh
+LAZm5dI8dYWKp3zgWVxsCXOQv2K4XbaQpIk8KKdLycHWsOq2OD4xBdhfTQqDj9EidhxaLf4B
+bRUePOuWVvwNqHI6OQ9FbRllUsTsSH3XK7z9Ru/0Ub07uEzmWyrIpeFRLJUg9EqQj25pw8j7
+N9ym8ItpfEQvK4Nrzt9KnGwFDaNOUjYAvejig9iUNdOXEQKVzbq8fC25HrXPQisq8u2jrP38
+cRqzwgGHZ1bJrQa8+LPBfADZ4ZHeqlEe6IqZhS/wDSuDNCIZHtkxggKqMIICpgIBA4AU7Olc
+92Oy6nkCvpv6jCj6qN8YPtUwCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3
+DQEJEAEbMBwGCSqGSIb3DQEJBTEPFw0xOTA2MDkxNjU3NTdaMC8GCSqGSIb3DQEJBDEiBCDx
+ACvH9u26K1BdX+IPp6vguUAtA9k0lp9JMNunvXTuQzANBgkqhkiG9w0BAQEFAASCAgBY8kFl
+SxQIvU4n6LaVoAV6ibHrlCqOp9KrUc9DmwXtDifsgoGfhDHb6i5k9BSHmerjTGF6mLlquPUV
+Z2EHSUuVpk8rX//ki6vngq91+f+ufrzEpvO6BLc2aO/zOat0W3U2hiq3zJSLMYMNZhX484Nq
+9+ImsU0S5f32ZpEXH0lFINUaZFo0eRAOZywqNuY57stjWBxTI6MA00S0+eMuWlmkMy0C2LL9
+BQvWW01/ri2UDEprAiKo2sXLcScgHimEVYHuWsrnP+sJ3XVWRsWGRW2i5qIalu2ZGmmIU/vg
+bdBzQnAjCoS2xC5Kwv+cqtUiiyLI0nnuu1aKKi4hivmt1n+hSIWWgGNwTFn3S4+mYDDNSH0u
+ocOr0uDFVv/SH9QPQuGh9rpSz3cd3hlA4R63Rylm46Tt6DnXiovu0mDoos68UQjIAPXWj1ES
+Peeubp+wSbuqN8Rh+koZU+HK7YpsR2bB4hL0GIwMA9lQjGSCxPCt1ViRL6zAWECzQC1YgLyc
++f1Fe8pkaWUbZz+18H/rJoKsXiNWH8yhfAyk+JGTxc4qxWJ/BuF0vzSyuVEffuxIHrOMZTpO
++xfAaJVDqFjxT5yKj3dCfy6XSDZq39AeX/w26/WfH+0ALRiViAAaMHSldbawVR/W3isecDWF
+tlU4NSJMLi/tTohe0QN1fjOaFryAvw==
+""",
+"""\
+MIIJWgYJKoZIhvcNAQcCoIIJSzCCCUcCAQMxDTALBglghkgBZQMEAgEwDQYLKoZIhvcNAQkQ
+ARygggZ0MIIGcDCCBVigAwIBAgIRANa58hQvZ26svTWQaGtqo/YwDQYJKoZIhvcNAQELBQAw
+gZcxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcT
+B1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMT0wOwYDVQQDEzRDT01PRE8g
+UlNBIENsaWVudCBBdXRoZW50aWNhdGlvbiBhbmQgU2VjdXJlIEVtYWlsIENBMB4XDTE1MDIx
+MjAwMDAwMFoXDTIwMDIxMjIzNTk1OVowgZUxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhWaXJn
+aW5pYTEPMA0GA1UEBxMGUmVzdG9uMRMwEQYDVQQKEwpJRVRGIFRydXN0MRkwFwYDVQQLExBT
+ZWNyZXRhcmlhdCBXZXN0MQ0wCwYDVQQDEwRJRVRGMSMwIQYJKoZIhvcNAQkBFhRpZXRmLWFj
+dGlvbkBpZXRmLm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUkKtMPP1RA
+FU6sxMezYJKCt4rw30RDieB8/P67TMhA6j8Um4a2Xo+CP9Ce1oMri2bwaaQPYWB4ciEL32za
+0NUE0B0iCjZZl36hon6wW6mJw1NGD/AFxnKWzhkSWG6BHMoeOAzu/ye8sHu4Jp5nazpGptK7
+30SjTS3JJFU9pHwQY6JlcmwVv0j2rsT3gj92Cbj5S+U5wCSE6+mZbCC+VPFeeI1kFITwyaIm
+uK9kSYHr15OXua/jrYNrHNRfqtexGKSgnUT96KkTh9TVvkMETB1WJS4WuEIP6GITvwVTp0lA
+qS3oNO4SM4tgFVdYqppcvZBg52kHY9y7IdR156c99zzZDBfWBduqjs/AXa0uol0EJd7qFLUs
+xEJ96XN3tPgR/Cwq18ec29pZQH6kO81Kato/RsQrj6A05TFx/J0MYE0R1MZqvIDUu55vlicb
+wT2lpXMiz1szKuvjTZRR9H/IgbKPNpt/kDUSgXLYwuKBm+nBoJXgybEyJ+A4arb60d9Uiusu
+UA8/h6s1rDMuTnIYMbIii4Y+KgevBWPawqk0xioilEMJ0RBaBVrDreuFlK8aYh+Jo2piruBA
+QnB9ZaPmEM1HPNArJxqL6XcUJTkFxNSksOATDFV5sEoBWYIe6qv2nV2r/HWDAEaa4WH2h3o/
+kASarXk3SxPXmfjOOr1XgpKjAgMBAAGjggG1MIIBsTAfBgNVHSMEGDAWgBSCr2yM+MX+lmF8
+6B89K3FIXsSLwDAdBgNVHQ4EFgQU7Olc92Oy6nkCvpv6jCj6qN8YPtUwDgYDVR0PAQH/BAQD
+AgeAMAwGA1UdEwEB/wQCMAAwRgYDVR0gBD8wPTA7BgwrBgEEAbIxAQIBAwUwKzApBggrBgEF
+BQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwWgYDVR0fBFMwUTBPoE2gS4ZJ
+aHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPUlNBQ2xpZW50QXV0aGVudGljYXRpb25h
+bmRTZWN1cmVFbWFpbENBLmNybDCBiwYIKwYBBQUHAQEEfzB9MFUGCCsGAQUFBzAChklodHRw
+Oi8vY3J0LmNvbW9kb2NhLmNvbS9DT01PRE9SU0FDbGllbnRBdXRoZW50aWNhdGlvbmFuZFNl
+Y3VyZUVtYWlsQ0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20w
+HwYDVR0RBBgwFoEUaWV0Zi1hY3Rpb25AaWV0Zi5vcmcwDQYJKoZIhvcNAQELBQADggEBAGPm
+QUKHxkEQ9vk69vIP68anHc8UsTv5powtLSXLqUw3rAoKAdoWkKjb7ZByHCuFiNk1BvTnhQPh
+LAZm5dI8dYWKp3zgWVxsCXOQv2K4XbaQpIk8KKdLycHWsOq2OD4xBdhfTQqDj9EidhxaLf4B
+bRUePOuWVvwNqHI6OQ9FbRllUsTsSH3XK7z9Ru/0Ub07uEzmWyrIpeFRLJUg9EqQj25pw8j7
+N9ym8ItpfEQvK4Nrzt9KnGwFDaNOUjYAvejig9iUNdOXEQKVzbq8fC25HrXPQisq8u2jrP38
+cRqzwgGHZ1bJrQa8+LPBfADZ4ZHeqlEe6IqZhS/wDSuDNCIZHtkxggKqMIICpgIBA4AU7Olc
+92Oy6nkCvpv6jCj6qN8YPtUwCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3
+DQEJEAEcMBwGCSqGSIb3DQEJBTEPFw0xOTA2MDkxNjU3NTdaMC8GCSqGSIb3DQEJBDEiBCBg
+ifxBsUb2E8RicFvqZB+NJEs1FOG4hFFU1bPqV2UwGzANBgkqhkiG9w0BAQEFAASCAgCApFAS
+4+cYrnkMebrANXw7/TGn6Qx01p9fuOugQb6lcfE5CysIKGLJJogs0BXwHK4jTeJRdt/lutuz
+bACg1bakABxuCiLWMu3pKCKS94qAgElYgWru+pAxPhuslz5MwAU0qFW3KnaNq3f5wXlVQ+h2
+l9spSiLhAQ+vLTLfotn6tCmUfjaaYsoNIUGg6b/2vH75QGYaXDq9YGoCrrkDbaRS4eDenSL5
+S2fBTZ5VMJE/1VQY1D5CWqt2CTfzRkNkU7mkarPy6SPvguDlqKJJnFaZJmeIYbGOpDt6KxWc
+DLFD9+J6CH492QwlHxDtM94nK1oIaqdu9TTV94t0ToGezElOZZuVA2DVkov5DzrYQLI5GjMw
+7iHXW1ewCaGF38DdOopqBYp7jcCCZpruKBWDq/uz40MzSBrffYTP/dg4//8Awvt/JomvTUoH
+E18Pt/G2cqdw0NqOE7YEcFpsLGfikTWmGhnrcYUkt8odDDAv/vqZRt8DLkB56waQeQw0TLit
+2M3gbTSHJ1KFsBM/kqHanVapGtnClkY7hYh8DVpgJymJpupkNFs8lDNbN4C42DhQ6Oz9P2qu
+8a/ybEb5gMZ3fsVLvvp6LhbJfqIvYgZO2uKXeKg3eLASD5nVY/Tuhnn2plhx+weKULGys0Ov
+zPKZ+N96KLerIBr3FmGByqhr3jNrBw==
+""",
+"""\
+MIIJWgYJKoZIhvcNAQcCoIIJSzCCCUcCAQMxDTALBglghkgBZQMEAgEwDQYLKoZIhvcNAQkQ
+AR2gggZ0MIIGcDCCBVigAwIBAgIRANa58hQvZ26svTWQaGtqo/YwDQYJKoZIhvcNAQELBQAw
+gZcxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcT
+B1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMT0wOwYDVQQDEzRDT01PRE8g
+UlNBIENsaWVudCBBdXRoZW50aWNhdGlvbiBhbmQgU2VjdXJlIEVtYWlsIENBMB4XDTE1MDIx
+MjAwMDAwMFoXDTIwMDIxMjIzNTk1OVowgZUxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhWaXJn
+aW5pYTEPMA0GA1UEBxMGUmVzdG9uMRMwEQYDVQQKEwpJRVRGIFRydXN0MRkwFwYDVQQLExBT
+ZWNyZXRhcmlhdCBXZXN0MQ0wCwYDVQQDEwRJRVRGMSMwIQYJKoZIhvcNAQkBFhRpZXRmLWFj
+dGlvbkBpZXRmLm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUkKtMPP1RA
+FU6sxMezYJKCt4rw30RDieB8/P67TMhA6j8Um4a2Xo+CP9Ce1oMri2bwaaQPYWB4ciEL32za
+0NUE0B0iCjZZl36hon6wW6mJw1NGD/AFxnKWzhkSWG6BHMoeOAzu/ye8sHu4Jp5nazpGptK7
+30SjTS3JJFU9pHwQY6JlcmwVv0j2rsT3gj92Cbj5S+U5wCSE6+mZbCC+VPFeeI1kFITwyaIm
+uK9kSYHr15OXua/jrYNrHNRfqtexGKSgnUT96KkTh9TVvkMETB1WJS4WuEIP6GITvwVTp0lA
+qS3oNO4SM4tgFVdYqppcvZBg52kHY9y7IdR156c99zzZDBfWBduqjs/AXa0uol0EJd7qFLUs
+xEJ96XN3tPgR/Cwq18ec29pZQH6kO81Kato/RsQrj6A05TFx/J0MYE0R1MZqvIDUu55vlicb
+wT2lpXMiz1szKuvjTZRR9H/IgbKPNpt/kDUSgXLYwuKBm+nBoJXgybEyJ+A4arb60d9Uiusu
+UA8/h6s1rDMuTnIYMbIii4Y+KgevBWPawqk0xioilEMJ0RBaBVrDreuFlK8aYh+Jo2piruBA
+QnB9ZaPmEM1HPNArJxqL6XcUJTkFxNSksOATDFV5sEoBWYIe6qv2nV2r/HWDAEaa4WH2h3o/
+kASarXk3SxPXmfjOOr1XgpKjAgMBAAGjggG1MIIBsTAfBgNVHSMEGDAWgBSCr2yM+MX+lmF8
+6B89K3FIXsSLwDAdBgNVHQ4EFgQU7Olc92Oy6nkCvpv6jCj6qN8YPtUwDgYDVR0PAQH/BAQD
+AgeAMAwGA1UdEwEB/wQCMAAwRgYDVR0gBD8wPTA7BgwrBgEEAbIxAQIBAwUwKzApBggrBgEF
+BQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwWgYDVR0fBFMwUTBPoE2gS4ZJ
+aHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPUlNBQ2xpZW50QXV0aGVudGljYXRpb25h
+bmRTZWN1cmVFbWFpbENBLmNybDCBiwYIKwYBBQUHAQEEfzB9MFUGCCsGAQUFBzAChklodHRw
+Oi8vY3J0LmNvbW9kb2NhLmNvbS9DT01PRE9SU0FDbGllbnRBdXRoZW50aWNhdGlvbmFuZFNl
+Y3VyZUVtYWlsQ0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20w
+HwYDVR0RBBgwFoEUaWV0Zi1hY3Rpb25AaWV0Zi5vcmcwDQYJKoZIhvcNAQELBQADggEBAGPm
+QUKHxkEQ9vk69vIP68anHc8UsTv5powtLSXLqUw3rAoKAdoWkKjb7ZByHCuFiNk1BvTnhQPh
+LAZm5dI8dYWKp3zgWVxsCXOQv2K4XbaQpIk8KKdLycHWsOq2OD4xBdhfTQqDj9EidhxaLf4B
+bRUePOuWVvwNqHI6OQ9FbRllUsTsSH3XK7z9Ru/0Ub07uEzmWyrIpeFRLJUg9EqQj25pw8j7
+N9ym8ItpfEQvK4Nrzt9KnGwFDaNOUjYAvejig9iUNdOXEQKVzbq8fC25HrXPQisq8u2jrP38
+cRqzwgGHZ1bJrQa8+LPBfADZ4ZHeqlEe6IqZhS/wDSuDNCIZHtkxggKqMIICpgIBA4AU7Olc
+92Oy6nkCvpv6jCj6qN8YPtUwCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3
+DQEJEAEdMBwGCSqGSIb3DQEJBTEPFw0xOTA3MTQwMTMyMTdaMC8GCSqGSIb3DQEJBDEiBCAJ
+zK6u0RRfrSQ2ebn+GOxnbovlG3Raul/1zOOGmTaIPzANBgkqhkiG9w0BAQEFAASCAgBlKYNd
+euVzPDqEa13k4nQthmyJUUqjWlAVolgohXioYok8Z5BkKmkp8ANLbvkJl0hV1Al1hutTRNeF
+a5ZeWyS6nAWyPFKfRSNqwWLMIi1dX+rO7Vhf15Lz944ZYsqO+O2f7rjWUJmi8/uJKD7cFDiW
+uKkPMgvqyIMnnC3ya/sC1vU+0Feqr5JcIMs2AHQeNVe8hzN4T9Pthyax7gqbxTkg3Gyt7Mwy
+WLZeK84oJmkl9ANeVgzq+P/cmqUaqtfkBFDSxaTag/eoYM3QfHNisr/jHCazqCh88VMgwhvk
+cl6NS9hdH+aOWqQ3FE1c7VJNoQRDT7ztyKCrRJFPc4wZL8tsGkKp1lP4WcaStcbUJ65AdWPb
+3CZonLY4UOBotAUpG/PObMCmWBEpr8MN0Q+kuEO2oAe9kBoFsv7MtNfyHE4CuOANRqGLRgOL
+72hN8Cy0sGWYUy+2chH3i50cT8XkDV5Rz2Z5xW3SfyAuW53j2WKLFsKkZjfkZBopSJM20V4E
+8pPnQQ/ByFwYPyS/xJZc24vsRxgogbrf11JU8hKVkfSsq3JXxUxe5w+Sh1XGTmO5tXDKFfyi
+S+VljWVifzXaR3pmTEQPhXH4nBa4K/HYytxofDP3EMli+imil2fFBbBedZkb5CIQ/Ly3soHZ
+dZlmZDkyeXJLpkNjRAsG6V82raZd9g==
+""",
+)
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ oids = [ ]
+ for pem_text in self.pem_text_list:
+ substrate = pem.readBase64fromText(pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ oids.append(sd['encapContentInfo']['eContentType'])
+
+ self.assertIn(rfc8358.id_ct_asciiTextWithCRLF, oids)
+ self.assertIn(rfc8358.id_ct_pdf, oids)
+ self.assertIn(rfc8358.id_ct_xml, oids)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8360.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8360.py
new file mode 100644
index 0000000000..56a76cf303
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8360.py
@@ -0,0 +1,464 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8360
+
+
+class CertExtnsTestCase(unittest.TestCase):
+ extns_pem_text = """\
+MIJM7TCCRkUGCCsGAQUFBwEcBIJGNzCCRjMwgkXJBAIAATCCRcEDAgACMAoDAgAF
+AwQFBRwAMAoDBAMFHCgDAgEEMAoDAwINdAMDAg14AwMCDYwwCgMDAw2oAwMDDbAD
+AwIN8AMEAhcTMAMEAxcTOAMEAxdTQAMEBBdTcAMEBRdp4DAMAwQDF2poAwQDF2pw
+AwQEF2zQAwMAF20DBAMXbwAwDAMEBBdvEAMEBhdvADAMAwQEF29QAwQHF28AMAwD
+BAYXb8ADBAMXb/ADBAUX74ADBAMX+egwDAMEARf8QgMEABf8RDAMAwQAF/xHAwQA
+F/xIMAwDBAAX/EsDBAAX/EwDBAAX/E8DAwIYhAMEARjrFgMCABkwCwMDARtuAwQC
+G24QMAkDAgAfAwMAH84wDAMEAx/PCAMEAx/PMDAKAwQCH888AwIFADAJAwIAJQMD
+ACVuMAkDAwQlcAMCASQDBAIr4XADBAIr+6ADAwMtCAMEBS0rQAMEBS044AMEBi1B
+QAMDAS1CAwQHLUuAAwMELVADAwUtgAMEAi36GDAKAwIBLgMEBi6VgDAKAwQELpXQ
+AwIALgMDATEMAwMBMjwDAwAydzAJAwIAMwMDADNOAwMAM1AwCQMDATNSAwICMAME
+BTSQQAMCADUwCgMDAjYkAwMANiYDAgA5AwQCPQ7gMAoDAgE+AwQGPggAMAwDBAU+
+CGADBAU+DEAwDAMEBz4MgAMEBT4YQDAMAwQHPhiAAwQGPj2AMAsDAwE+PgMEBT5E
+ADAMAwQGPkRAAwQFPkTAMAoDAwA+RQMDAT5wMAsDAwA+cwMEBT51ADALAwQGPnVA
+AwMAPoYwCwMEBz6HgAMDAD6KAwQGPowAMAwDBAc+jIADBAU+rQAwDAMEBj6tQAME
+Bj7BAAMEBT7BgDALAwQGPsHAAwMBPugwCwMDAD7rAwQFPvAAAwQFPvBAMAwDBAc+
+8IADBAc+8QAwDAMEBT7xoAMEBz77AAMDAj78MAwDBAE/jSIDBAE/jSQDBAQ/jgAD
+BAVAEMADBAVAHAADBAdAKwADBAZAQUADBAZAicADBAJA7+wDBAVA/SADBAZBEoAD
+BARBEtADBAVBJ0ADBAZBY4ADBAVCT2ADBARCVEADBAZCdoADBAVCzUADBAVC1GAD
+BAJC+MwwDAMEBUMWIAMEAkMWOAMEAEOePgMEBUPRgAMEBERGwAMEBkUGAAMEBEVe
+QAMEBEVecAMEBUWsYAMEBkXCQDAJAwIATQMDAk3YMAwDBAVN3CADBAdQQwAwDAME
+BFBDkAMEBVBHADAMAwQEUEcwAwQFUEhAMAwDBARQSHADBAVQS4AwDAMEBlBLwAME
+BFBWADAMAwQFUFYgAwQGUFcAMAsDBAVQV2ADAwNQUDALAwQEUFgQAwMAUF4wDAME
+BFBfEAMEBlDwgDALAwQEUPDQAwMBUPQwCwMEBFD2EAMDA1DwMAwDBARQ+BADBAZQ
++AAwDAMEBFD4UAMEBlD5ADAMAwQEUPlQAwQFUPoAMAsDBARQ+jADAwJRADALAwQG
+UQRAAwMBUQgwDAMEB1EKgAMEBVEVQDAMAwQEURVwAwQGURYAMAwDBAVRFmADBARR
+F8AwDAMEBVEX4AMEBlEaADAMAwQEURpQAwQFUR1AMAsDBARRHXADAwJRQDALAwMD
+UUgDBAVRW8AwCwMEBFFb8AMDBlGAMAsDAwBRwQMEB1JlADALAwQGUmXAAwMHUgAw
+DAMEB1KAgAMEB1KBADALAwMBUoIDBAZSlwAwCwMEBVKXYAMDAlKYMAsDAwFSngME
+B1LJADALAwMBUsoDBANTjxAwCwMEBVOPIAMDAlQgMAsDAwBUJQMEBVTNQDALAwQH
+VM2AAwMAVOgwDAMEB1TpgAMEBVf3gDAMAwQDV/eoAwQFV/9AMAwDBAdX/4ADBARZ
+0OAwCwMDAFnRAwQEWe8gMAwDBAZZ70ADBAVb4QAwDAMEAVvhIgMEAlvrADALAwQB
+W+sGAwMBXMgwCwMDAlzMAwQFXbNAAwQDXbNoAwQCXbN4MAwDBAdds4ADBABeGhow
+DAMEAl4aHAMEAF4aHjAMAwQFXhogAwQCXhooAwQAXhotMAwDBAReGjADBABeGkIw
+DAMEAl4aRAMEAF4aSDAMAwQBXhpKAwQCXhpQMAwDBAFeGlYDBAJeZwAwDAMEA15n
+CAMEBF6+wAMEBF6+8DAMAwQHXr+AAwQGX6OAMAsDBARfo9ADAwBfqDAMAwQFX6kg
+AwQCX7W4MAoDBAZftcADAgVAAwQFYAmAAwQEYH2QAwMAYoADAwNlOAMEAmfhyAME
+AmfniAMEAmfxbAMEAmf7pAMEAmf8WAMEA2glIAMEAmiZVAMEAWjoJAMEBmjpQAME
+Amjz2AMEAmj0CDAMAwQBa5aiAwQCa5awAwQEa6GgAwQFa7WAAwQAa79EMAkDAgBt
+AwMCbegwCwMEBG3sEAMDAm3wMAkDAwBt9QMCAWwwCwMDAHHLAwQGccuAAwMBdMoD
+BAJ9PkgDAwCAAAMDAIAHAwMAgBAwCgMDAIAnAwMBgCgDAwCALTAKAwMAgEEDAwSA
+QAMDAYBWAwMAgF0DAwCAYgMEB4B0gAMDAIB8AwMAgH8DAwGAgjAKAwMAgIsDAwCA
+jgMDAICoAwMAgLADAwGAsgMDAIDHAwMAgMwDAwCA1gMDAIDoAwMAgOoDAwCA8AMD
+AIDzAwMAgPYwCgMDAIELAwMBgQwDAwCBEAMDAIEUAwMBgRoDAwCBHwMDAIFDMAoD
+AwCBRQMDAIFGAwMAgUkDAwCBWDAKAwMBgWYDAwCBaAMDAIF9AwMAgYEDAwCBhAMD
+AYGOAwMAgakDAwCBrzAKAwMAgbEDAwCBsjAKAwMAgbUDAwGBuAMDAIG7AwMBgcID
+AwCBxwMDAIHOAwMAgdADAwGB1gMDAIHZMAoDAwCB6QMDAIHqMAoDAwSB8AMDAIHy
+AwMAgfcDAwCCAAMDAYIYAwMAghwDAwCCIAMDAIIlAwMAgiswCgMDAII7AwMAgjwD
+AwGCQgMDAIJJAwMAgksDAwGCTjAKAwMBglIDAwCCVAMDAYJYAwMBglwDAwCCYgMD
+AIJkAwMAgmgDAwCCcAMDAIJzAwMAgngDAwCCfQMDAIKFAwMAgogwCgMDAYKKAwMB
+gpQDAwCCnwMDAIKhAwQHgqSAAwMAgrQDAwCCtzAKAwMAgrkDAwCCugMDAIK8AwMA
+gr4DAwGCwAMDAILGAwMAgswDAwCCzgMDAYLQAwMAgt8wCgMDAILhAwMAgvQDAwCC
+9gMEA4L4MDAMAwQBgvg6AwQGgvgAMAwDBAKC+EQDBAKC+GgDAwCC+wMDAIL/AwMA
+gwEDAwCDYQMDAINjAwMAg2YDAwCDbzAKAwMBg3IDAwGDdAMDAIOCAwMAg5gDAwGD
+mjAKAwMAg58DAwCDoDAKAwMCg6QDAwCDpgMDAIOpMAoDAwCDrQMDAYOwAwMAg7QD
+AwCDvAMDAIPNAwMAg88DAwCD0wMDAIPcAwMAg+AwCgMDAIPjAwMAg+QDAwCD5wMD
+AIPqAwMAg+0DAwCD9gMDAIP7AwMAg/4wCgMDBoRAAwMAhE4DAwCEkjAKAwMAhJUD
+AwCElgMDAISZAwMAhJswCgMDAISlAwMBhKgDAwCEqwMDAISwAwMAhLQwCgMDAIS5
+AwMChLgwCgMDAITDAwMAhMQDAwCExwMDAITjMAoDAwCE5QMDA4TgAwMAhPQDAwCE
+/AMDAoYAAwMAhhEDAwCGEwMDAIYVAwMAhhkwCgMDAIYbAwMAhhwDAwCGHjAKAwMB
+hiIDAwGGJAMDAIYvAwMAhjYwCgMDAYY6AwMBhjwDAwCGTAMDAIZRAwMAhlMwCgMD
+AYZaAwMAhm4DAwCGdwMDAIaCAwMAhooDAwGGjjAKAwMAhpEDAwKGkAMDAIaXAwMA
+hpswCgMDAIadAwMAhp4DAwCGqQMDAIarAwMAhrAwCgMDAIa3AwMAhrgDAwCGvAMD
+AIa/AwMAhssDAwCGzjAKAwMChtQDAwCG1jAKAwMAhtsDAwCG3jAKAwMAhuEDAwKG
+4AMDAIbvMAoDAwCG9QMDA4bwAwMAhvkDAwCG/wMEAodUNAMEAodUlAMDAIdaAwMA
+h8QDBAeIjwAwCwMEBIiQEAMDAIiQAwMAiJQwCgMDAIibAwMBiJwDBAeInoAwCgMD
+AIijAwMAiKQwCgMDAIipAwMAiKoDAwGIrAMDAIjHAwMAiMkDAwCIzgMDAIjhAwMB
+iOYDAwCI8wMDAIj/AwMAiREwCgMDAIkhAwMAiSIwCgMDAIkrAwMAiSwDAwCJLwMD
+AIkyMAoDAwCJNwMDAIk4AwMAiTowCgMDAok8AwMAiT4wCgMDAIlJAwMAiUoDAwCJ
+XQMDAIlgAwMAiWUDAwCJaQMDAIlsAwMBiXgDAwCJgQMDAImFAwMAiYoDAwCJnAMD
+AImjAwMAia4DAwCJvzAKAwMAicEDAwKJwAMDAYnMAwMAidADAwCJ1TAKAwMAidkD
+AwCJ2jAKAwMAid0DAwCJ4AMDAIniAwMAifgDAwGJ+gMDAIn9MAoDAwCKAwMDAIoE
+AwMAigYDAwCKDjAKAwMAihUDAwCKFjAKAwMAiiUDAwCKJgMDAYooAwMAijADAwGK
+PgMDAIpCAwMAikYDAwCKUQMDAIpgAwMAimQwCgMDAYpmAwMAimgDAwCKagMDAIp8
+AwQDioCIMAoDAwCKgwMDAIqEAwMAioYwCgMDAIq7AwMGioADAwCKwzAKAwMBisYD
+AwGKyAMDAIrLMAoDAwCKzQMDAIrOMAoDAwCK1wMDAIrYMAoDAwCK3QMDAIrkMAoD
+AwCK5wMDAYroMAoDAwKK9AMDAIr2AwMCivgDAwCK/TAKAwMAiwEDAwCLBDAKAwMB
+iwYDAwCLCDAKAwMBiwoDAwGLGDAKAwMAixsDAwCLHgMDAIstAwMAiy8DAwCLMgMD
+AIs2AwMAizowCwMDAIs/AwQHi0AAAwMAi0IDAwGLSgMDAItPAwMAi1MwCgMDAItZ
+AwMAi1wwCgMDBYtgAwMAi2IDAwCLZDAKAwMAi2kDAwCLeDAKAwMBi3oDAwCLfAMD
+AIuAAwMAi4UDBAWLisADAwCLjQMDAIuPAwMAi5EDAwCLlQMDAIuZMAoDAwKLnAMD
+AIueAwMAi6ADAwCLojAKAwMCi6QDAwCLpgMDAIuuAwMBi7IDAwCLuAMDAIu/AwMA
+i94wCgMDAIxNAwMAjE4wCgMDAoxUAwMAjFYwCgMDAIxdAwMAjF4DAwCMYQMDAIxp
+AwMBjJYDAwCMpAMDAIymAwMAjLUDAwCMywMDAIznMAkDAgCNAwMBjVQwCgMDAI1X
+AwMBjWQDAwCNaQMDAI1sAwMAjXEDAwCNcwMDAI19AwMAjYIwCgMDAY2GAwMAjYow
+CgMDAI2PAwMAjZQDAwCNowMEB42kgDAKAwMAjakDAwCNrDAKAwMAja8DAwCNsAMD
+AI3AAwQAjcEUAwQCjcFsAwQBjcHWAwMAjcIDAwCNxAMDAY3IAwMAjcsDAwCN0DAL
+AwMBjeIDBAON4oADBACN4oswCwMEBI3ikAMDAI3kAwMAjecDAwCN7QMDAI3xAwMB
+jfQwCgMDAI35AwMAjfoDAwGN/AMDAI3/AwQDjlsIAwQDjlt4AwQDjluYAwQHjpoA
+AwQDjuogAwMAjvcDAwCPKQMDAI8vMAoDAwGPMgMDAY80AwMAj0EwCwMEB49cgAMD
+AY9cAwMAj2EDAwCPYzAKAwMAj3UDAwCPdgMDAI95AwMAj34wCgMDAI+BAwMAj4ID
+AwCPoTAKAwMAj6MDAwCPpAMDAI+nAwMAj6kwCgMDBI+wAwMAj7QDAwCPxAMDAI/N
+AwMAj9IDAwCP2QMDAY/gMAoDAwCP6QMDAI/qAwMAj+0DAwCP7wMDAI/1AwMAj/wD
+AwCQAgMDAJAEAwMBkBQDAwCQGAMDAJAbAwMAkCADAwCQKTAKAwMAkCsDAwCQLAME
+ApAwPAMDAJA2AwMBkDgwCgMDAJA/AwMBkEADAwCQTAMDAJBSMAsDAwKQVAMEB5BW
+AAMDAJBXAwMAkF8DAwCQYgMDAJB3AwMAkHoDAwCQfAMDAJB/AwMAkJEDAwGQpAMD
+AJCtMAoDAwSQsAMDAZC0AwMAkMEDAwCQyAMDAJDMAwMAkM4DBAaQ0AAwCwMEB5DQ
+gAMDAZDQAwMAkPgwCQMCAJEDAwCSADAKAwMBkgIDAwCSBAMDAJITAwMAkhUDAwCS
+MAMDAJIyAwMAkjQwCgMDAJI7AwMAkjwDAwGSQgMDAJJGAwQBkkdeAwMAkkgDAwCS
+SwMDAJJNAwMBklADAwCSVwMEA5JY6AMDAJJaAwMAkmEwCgMDApJkAwMAkm4DAwCS
+cDAKAwMAkncDAwCSeAMDAJJ8AwMAkoUDAwCSiAMDAJKMAwMBkp4wCgMDAJKhAwMA
+kqIDAwCSqQMDAZKsMAoDAwCSrwMDAZKwAwMAkrMDAwCSuQMDAJK8MAoDAwCSvwMD
+AJLCMAoDAwGS0gMDAZLUAwMAktgwCgMDAJLbAwMAktwDAwCS4DAKAwMAkuMDAwCS
+5AMDAJLqAwMAkvEwCgMDAJL3AwMBkvgDAwCS+zAJAwMAkv0DAgCSAwMAkwcwCgMD
+ApMMAwMAkw4wCgMDAJMbAwMAkx4DAwGTIAMDAZMsAwMAkzQDAwCTNgMDAJM8MAoD
+AwCTQwMDAJNEAwMAk0swCgMDAJNNAwMAk04wCgMDAZNSAwMAk1QwCgMDAZNWAwMB
+k1gDAwCTWzAKAwMAk10DAwCTYDAKAwMBk2IDAwCTZAMDAJNmMAoDAwCTbwMDAJNw
+AwMAk3cDAwGTegMDAJN9AwMAk38DBAeTh4ADAwGTjjAKAwMAk5MDAwCTmAMDAJOc
+MAoDAwCToQMDApOgAwMAk6cwCgMDAJOrAwMBk6wDAwCTrwMDAZO0AwMAk7gDAwCT
+ugMDAZO8AwMAk8EDAwGTxAMDAJPJAwMAk8wDAwCT0jAKAwMAk9UDAwOT0AMDAJPc
+MAoDAwKT5AMDAZPsAwMAk/MwCgMDAZP6AwMAk/wDAwGUAgMDAJQGAwMAlDYDAwCU
+OAMEAJQ7cwMDAJQ8AwMAlD8DBAKUQDgDAwCURQMDAJRHAwQFlEzgAwMAlE8wCgMD
+AJRRAwMClFADAwCUWAMDAZRuMAoDAwGUdgMDApR4MAoDAwCUhwMDAJSIAwMAlIoD
+AwCUjAMDAJSPAwMAlJQDAwCUlwMDAJSgAwMAlKkDAwCUsAMDAJS1AwMAlLkDAwCU
+uzAKAwMClMQDAwCUxgMDAJTIMAoDAwCU+wMDAZT8AwMAlQADAwCVAwMDAJUbAwMA
+lTEDAwCVOwMDAJU+AwMAlVEDAwCVWgMDAJVtAwMAlX4wCgMDApWEAwMAlYYwCgMD
+AJWLAwMAlYwwCgMDAZWSAwMAlZQwCgMDAJWZAwMBlZwDAwCVqgMDAJWsMAoDAwCV
+sQMDAZXEMAoDAwOVyAMDAZXoMAwDBAOV6ggDBAeV6gAwCgMDAJXrAwMDlfAwCgMD
+AJX5AwMClfgDAwGV/gMDAJZqAwQClmvIAwMAlnADAwCWgAMEApaBCAMDAJaEAwMA
+lowwCgMDAJaRAwMAlpIDAwCWrwMDAJayAwMBlswwCgMDAJbVAwMAltYDAwCW2QMD
+AJbjAwMBluwDAwCW8QMDAJb0AwMAlvsDAwCW/jAJAwIAlwMDAJdkMAsDAwGXagME
+BZdqgDAMAwQGl2rAAwQEl2rgAwMAl3MDAwCXeAMDAJd/AwMAl4EDAwCXhTAKAwMA
+l4cDAwCXiAMDAZecAwMAl6oDAwCXrTAKAwMAl68DAwGXsAMDAJe0MAoDAwGXtgMD
+AJe4AwMAl7sDAwCXvTAKAwMAl9MDAwCX1DAJAwMDl9gDAgOQAwMAmEIDAwCYRwMD
+AJhJMAoDAwCYTQMDAJhOAwMAmFEwCgMDA5hYAwMAmFowCgMDAJhdAwMAmGADAwCY
+aQMDAZhyAwMAmIYDAwCYjwMDAJiWAwMAmJgDAwCZAQMDAJkFAwMAmQ8DAwCZEQMD
+AJkTAwMAmVgwCgMDAplcAwMAmWIDAwCZZDAKAwMCmWwDAwCZbgMDAJlwMAsDAwOa
+CAMEBJoIIAMEBpoIQAMDAZoOAwMAmiADAwCbBAMDAJstMAoDAwGbNgMDAJs4AwMA
+m0IDAwCbSQMDAJtpMAoDAwCbgwMDAZuEAwMBm4gDBAabikADAwCbjAMDAJuRAwMA
+m54DAwCbuQMDAJvAAwMAm8YDAwCbygMDAJvMAwMAm88wCgMDAJvRAwMAm9IDAwCb
+3zAKAwMAm+MDAwCb5AMDAJvnAwMAm/UwCgMDAJv5AwMAm/oDAwCb/QMEBZv+IAMD
+AJwKAwMAnA4wCgMDAJwRAwMAnBIDAwCcGQMDAJwcAwMAnCMDAwCcKwMDAZwwMAoD
+AwCcMwMDAJw0AwMAnDYDAwCcOgMDAJw9AwMAnEMDAwCcUwMDAJxqMAoDAwGccgMD
+AJx0AwMAnHYDAwCchQMDAJyHAwMAnJQDAwCcljAKAwMAnRcDAwGdHDAKAwMAnVMD
+AwCdVAMDAJ1YAwMAnV4DAwGdYAMDAJ1jAwQDnXjgMAsDBAKdeOwDAwCdeAMDAZ18
+AwMAnYEDAwCdiAMDAJ2KAwMAnYwDAwCdkDAKAwMAnZ0DAwWdgDAKAwMAnaEDAwCd
+pDAKAwMAnacDAwGdqDAKAwMAnasDAwGdrAMDAJ2xAwMBnbQDAwCdugMDAJ2+AwMA
+ncEDAwCdyAMDAJ3LAwMAneQDAwCd5wMDAZ3sAwMAnfMDAwCd9wMDAJ35AwMCniQw
+CgMDAJ4pAwMCnigDAwGeLjAKAwMAnjEDAwCeMgMDAJ46AwMAnjwDAwCeQAMDAZ5C
+AwMAnksDAwCeWgMDAJ5cAwMAnl4DAwCeYwMDAJ5mAwMAnmkwCgMDAJ5tAwMAnm4D
+AwCecAMDAJ53AwMCnnwDAwCegQMDAJ6DAwMAnoUDBASejBADBAaejEADAwCejwME
+BJ6SgDAKAwMCnpQDAwCelgMDAJ6YAwMAnpwDAwCeojAKAwMBnqYDAwGeqDAKAwMC
+nqwDAwGetDAKAwMBnr4DAwGexAMDAJ7YAwMAntoDAwCe3AMDAJ7fMAoDAwCe4QMD
+Ap7gMAoDAwGe5gMDAJ7qMAoDAwOe+AMDAJ76MAoDAwCe/wMDAJ8AMAoDAwCfBwMD
+AJ8IAwMAnwwDAwCfDwMDAJ8UAwMBnxYDAwCfGTAKAwMAnx0DAwWfAAMDAJ8iAwMA
+nyYDAwCfLgMDAZ8yAwMAnzoDAwCfPAMDAJ9FAwMAn0gDAwCfUQMDAJ9UAwMAn1YD
+AwGfXAMDAJ9fAwMBn2QwCgMDAJ9nAwMAn2gDAwCfawMDAJ9tAwMAn3IDBAOfdcAw
+DAMEAZ91ygMEBJ91wAMDAJ96AwMAn4IDAwCfhgMEBZ+HgAMDAJ+QMAoDAwGfkgMD
+AZ+UMAoDAwCflwMDAJ+YAwMAn5oDAwCfnAMDAJ+gAwMAn6IwCgMDAJ+nAwMAn6gD
+AwGfqgMDAJ+tMAoDAwCfswMDAJ+0AwMAn74wCgMDAJ/BAwMCn8ADAwCfxQMDAJ/I
+AwMAn80DAwCf0gMDAJ/VAwMBn9gDAwCf2wMDAJ/gAwMAn+gDAwGf7AMDAJ/vAwMA
+n/EDBAOf8ggDBAaf8kADBASf8uADAwGf9DAJAwMAn/0DAgWAMAoDAwCgBQMDAKAG
+AwMBoAgDBAKgE1wDBAKgE7QDBAOgFGADBAKgFGwDBASgFJADBAGgFNYDBACgFOUD
+BAGgFPgDAwCgJgMDAKAoMAoDAwKgLAMDBqAAMAoDAwGgQgMDAKBEAwMBoEYDAwCg
+SwMDAKBOAwMAoFADAwCgVQMDAKBcMAoDAwCgYQMDAKBkMAoDAwCgZwMDAKBoAwMA
+oHIDAwCgtAMEBKDKEAMDAaDSMAoDAwCg1QMDAKDWMAoDAwOg2AMDAaDcAwMAoOQD
+BAag50ADBACg7hUDBAKg7iQDBAKg7jQDBAGg7jwDBAKg7mADBASg7nAwCgMDAaEC
+AwMBoQQDAwGhCAMDAKEMAwMAoREDAwChFAMDAKEXAwMAoRsDAwChHgMDAKElMAoD
+AwChKQMDAKEqAwMAoTAwCgMDAqE0AwMAoTYDAwChOwMDAKE+AwMAoUMwCgMDAaFG
+AwMAoUoDAwChTAMDAKFOMAoDAwChUwMDAKFcMAoDAwOhaAMDAKFqMAoDAwGhbgMD
+AKFwAwMAoXQDAwChhgMDAKGTAwMAoZwDAwChngMDAKHKAwMAodoDAwCh5jAKAwMA
+ofsDAwCh/AMDAKILAwQDogzIAwMAog0DAwCiFQMDAKIXMAoDAwCiGQMDAKIaAwMA
+oiYDAwCiVgMDAKKFAwQHoo6AAwQBotiKAwQCotn4AwQDotpYAwQCotqwAwQDotzw
+MAwDBAOi3lgDBACi3loDBAKi9DQDBAKi9cwDBAKi+MQDBAKi+tgDBAOi+yADBAKi
+/8QDAwCjAQMDAKMDAwMAowUDAwCjCQMDAKMiMAoDAwGjPgMDAaN0AwMAo3cwCgMD
+AqOcAwMEo6ADAwCj8jAJAwICpAMDAKQoAwMApDAwCgMDAKQ7AwMBpDwDAwCkUQME
+B6RdgDAKAwMBpH4DAwSkgAMDAKSxAwMApNcDAwClTgMEBKVU0AMDAKVyAwMApcAD
+AwCl2gMDAKXeAwMDpggDBAemMYADAwCmVwMDAKdRMAsDBAGnVkIDAwOnUAMDAKdi
+AwQHp2SAAwMAp28DBAWnoAADAwCnqAMDAKesAwMAp8sDAwCn6QMDAKgBAwMAqIsD
+BAeolQADBAGolfgDAwCouwMEAaj1xAMDBakgAwQHqZQAAwQEqgpwAwQHqhGAMAwD
+BACqJcsDBACqJc4DAwCqPAMEB6plgAMEB6pmgAMEB6qFAAMDAarsAwMAqv8wCgMD
+BKsQAwMBqyADBAesUIADBAOsZ1gDBAOs8QAwDAMEA6zxKAMEA6zxMAMEA6zxQAME
+AqzxVAMEBKz/AAMEBKz/YAMEA6z/iAMEBKz/wAMEBKz/4AMEA6z/+AMEBq3UwAME
+Aq3WyAMEA63qgAMEBK30kAMEBq35AAMEA63/kAMEBq6MAAMEB69ugDAKAwIEsAME
+A7BvMDAMAwQAsG85AwQEsHOgMAwDBAOwc7gDBAewegAwCgMEBrB6wAMCALAwCgMC
+AbIDBASy7kAwCgMEBbLuYAMCALIDAwC06jAKAwIAuQMEArkIYDAMAwQDuQhoAwQC
+uRTYMAwDBAW5FOADBAO5HkAwDAMEArkeTAMEAbkm0DAMAwQCuSbUAwQCuVrwMAwD
+BAO5WvgDBAO5ZCAwDAMEArlkLAMEALlpBjAMAwQDuWkIAwQFuZIAMAwDBAK5kiQD
+BAK5qWgwDAMEBLmpcAMEBLmrwDAMAwQCuavUAwQCubAwMAwDBAO5sDgDBAS5soAw
+CgMEArmylAMCAbgwCgMCArwDBAe8gwAwCwMDAryEAwQEvNGAMAwDBAO80ZgDBAa8
+1gAwCgMEBLzWUAMCALwwDAMEAsAFHAMEAMAFHgMEAMAFJAMEAMAFMgMEAMAFOzAM
+AwQAwAU9AwQAwAU+AwQAwAVhAwQAwAWOAwQAwAWRAwQAwAWiAwQAwAXvAwQAwAX+
+AwQAwAwBAwQAwAwvAwQAwAw2AwQBwAxIAwQAwAxNAwQAwAxRAwQAwAxgAwQAwAxj
+MAwDBAbADMADBADADMIwDAMEAMAM2QMEAMAM2jAMAwQAwAznAwQAwAzoAwQAwAzr
+AwQAwAz3AwMAwA8wDAMEAMAQewMEAMAQpjAMAwQAwBC3AwQAwBDKAwQAwBLDMAwD
+BALAGhwDBATAGiAwDAMEAMAaaQMEAcAabDAMAwQAwBpvAwQAwBqAMAwDBADAGoUD
+BAPAGoAwDAMEAcAamgMEAcAawAMEAMAa5wMEAMAa6gMEAsAa7DAMAwQCwBx8AwQC
+wByAAwQAwB8OAwQAwB8XAwQBwB8aAwQAwB8fAwQAwB8oAwQAwB8+AwQAwB9mMAwD
+BAHAH6YDBALAH6gDBADAH9MDBADAH+cDBADAH/wwDAMEAMAhDwMEAMAhEAMEAMAh
+JDAMAwQAwCFXAwQEwCFgMAwDBADAIXEDBADAIXIwDAMEAcAhdgMEB8AhAAMEAcAh
+gjAMAwQAwCGPAwQBwCGQMAwDBADAIZMDBADAIaYwDAMEAMAhqQMEAsAhqDAMAwQE
+wCGwAwQAwCG2MAwDBAbAIcADBAPAIeAwDAMEAMAh6QMEBMAh4AMEAMAh/gMEAMAi
+EwMEAMAiMgMEAMAiawMEAMAidAMEAMAiszALAwMAwCMDBALAIxAwDAMEAMAjPwME
+AMAjSAMEAMAjWgMEAMAjXgMEAMAjbDAMAwQBwCOCAwQBwCOEAwQAwCOKAwQAwCOS
+MAwDBADAI5UDBAHAI5gDBADAI6wwDAMEAMAjtwMEAMAjwDAMAwQAwCPFAwQAwCPG
+MAwDBADAI80DBATAI8ADBADAI+UwDAMEBMAj8AMEAMAj9AMEAcAj9jAKAwMCwCQD
+AwDAJjAMAwQAwChFAwQAwChQAwQAwCjkMAwDBADAKWcDBADAKYgwDAMEAsApjAME
+AcApkAMEAMApkzAMAwQAwCmVAwQAwCmgAwQAwCnSAwQAwCnYAwQAwCnaAwQAwCnj
+AwQAwCoBMAwDBAHAKioDBATAKiADBADAKjUwDAMEAMAqPwMEAcAqQAMEAMAqVwME
+AMAqYQMEAMAqZAMEAMAqZjAMAwQAwCpxAwQAwCqEAwQAwCqPMAwDBALAKrQDBAHA
+KsgDBADAKv0wDAMEAcArogMEAsArqDAMAwQGwCvAAwQAwCvEAwQAwCvSAwQAwCvU
+AwQAwCvqMAsDAwLALAMEAsAsQDAMAwQAwCxHAwQAwCxaMAwDBATALPADBALALPgw
+DAMEAsAv9AMEAcAv+AMEAMAwHwMEAMAwawMEAMAwkQMEAMAw4AMEAMAw5wMDAMAx
+AwQEwDMAMAsDAwLANAMEAMA0MgMEAMA0mDAMAwQAwDSfAwQBwDSgMAwDBADANN0D
+BAXANMAwDAMEAMA0/QMEAMA0/jAMAwQAwDVnAwQAwDVoMAwDBADANh8DBADANjQw
+DAMEAcA2NgMEAMA2UAMEAMA2aAMEAMA2cTAMAwQAwDZzAwQAwDZ4AwQAwDZ6MAwD
+BADANn0DBADANoADBADANoQDBADANoswDAMEAMA2jQMEAcA23AMEAMA24QMEAcA2
+9AMEAMA2/gMEAMA3VAMEAMA3WQMEAMA3ZQMEAMA3aQMEAMA3bQMEAMA3cwMEAMA3
+gQMEAMA3hAMEAMA3vAMEAMA3wQMEAMA3xQMEAMA31AMEAMA39DAMAwQCwDocAwQC
+wDogMAwDBADAOikDBAHAOlgDBADAOsUDBADAOtowDAMEAcA64gMEAcA65AMEAMBA
+HAMEAcBALAMEAsBAZAMEAMBAfQMEAMBAyjAMAwQAwEEzAwQAwEFGMAwDBALAQVwD
+BADAQV4DBADAQWAwDAMEAMBBgwMEAMBBhAMEAMBBizAMAwQEwEGQAwQAwEGSAwQA
+wEGZMAwDBADAQbcDBAHAQcQwDAMEAMBB2wMEAMBB5AMDAMBCMAwDBADAQwMDBADA
+QwQDBADAQycDBADAQysDBADAQy8DBADAQzIDBADAQzQDBADAQzcDBADAQzoDBADA
+Q0wDBADAQ08DBADAQ1cDBADAQ14wDAMEAsBDZAMEAMBDaAMEAMBDhwMEAMBDpwME
+AMBDqjAMAwQAwEO9AwQAwEPQAwQAwEPaAwQBwEPcAwQAwEPfAwQAwEP5MAsDAwLA
+RAMEAsBEEAMEAMBEFzAMAwQAwEQfAwQCwEQwMAwDBALAREwDBALARGgwDAMEAMBE
+lwMEAcBEmDAMAwQAwESlAwQAwESqAwQAwESuAwQAwESwAwQAwES2AwQAwES6AwQA
+wETRMAwDBADARNMDBADARNgDBADARN0DBADAROADBADAROYwDAMEAcBE+gMEAMBE
+/AMEAMBE/jALAwMBwEYDBAPARnAwDAMEAMBGhQMEAMBGhgMEAMBGiDAMAwQCwEaM
+AwQFwEaAAwQDwEbAAwQAwEbyAwMAwEcwDAMEAMBJEwMEAMBJFDAMAwQBwEkiAwQA
+wEksAwQAwEniAwQAwEnlMAwDBAHATAYDBALATCAwDAMEAMBMewMEAMBMhDAMAwQB
+wEyGAwQAwEysAwQAwEywMAwDBADATPEDBADATPgDBADATQsDBAHATXIDBALATXgw
+DAMEAsBNhAMEAMBNigMEAcBNjAMEAcBQFDAMAwQAwFAfAwQAwFAqAwQAwFAuAwQA
+wFAzAwQAwFE7MAwDBADAUT0DBADAUT4DBADAUW0DBADAUXkDBADAUXsDBADAUaAw
+DAMEAcBRtgMEAMBRuAMEAMBRwgMEAMBR5gMEAMBR6gMEAMBSeQMEAMBSfAMEAMBS
+fwMEAMBSmTAMAwQAwFKdAwQAwFKeAwQAwFLWAwQBwFLcAwQAwFLxMAsDAwDAUwME
+AMBTZAMEAMBTZgMEAMBToAMEAMBTpQMEAMBTyAMEAMBTygMEAMBT2AMEAMBT3zAM
+AwQAwFPlAwQAwFPmAwQAwFQFAwQAwFQNAwQAwFQPAwQAwFQbAwQAwFQeAwQBwFQg
+AwQAwFQ+MAwDBADAVEsDBADAVFQDBADAVFcwDAMEAcBUWgMEAsBUYDAMAwQAwFRl
+AwQBwFRsMAwDBADAVH8DBADAVJwDBADAVKYDBADAVK0wDAMEBMBUsAMEBMBUwAME
+AMBU1AMEAcBU3DAMAwQBwFTiAwQBwFTkMAwDBADAVPUDBAPAVPADBADAVgsDBADA
+Vg4DBADAVhIDBADAVhkDBADAVhsDBADAVlkwDAMEAMBWfQMEB8BWAAMEAMBWhjAM
+AwQAwFaJAwQAwFaKAwQAwFajAwQBwFamAwQAwFapAwQAwFb+AwMAwFcDBADAWAED
+BADAWAQwDAMEAMBYCQMEAMBYCgMEAMBYETAMAwQAwFgXAwQAwFgYMAwDBADAWFMD
+BADAWFQDBADAWFYwDAMEAMBYYQMEAMBYYgMEAMBYbAMEAMBYdgMEAMBYewMEAMBY
+gAMEAMBYggMEAMBYhQMEAMBYxAMEAMBYzAMEAcBY7jAMAwQBwFj6AwQAwFj+AwMA
+wFkDBAHAW4wDBADAW7EDBADAW7oDBADAW70DBADAW78DBADAW8cDBADAW8kDBADA
+W9MwDAMEAcBb1gMEAcBb6DAMAwQCwFvsAwQDwFvwAwQAwFxWAwQAwFxeMAwDBAPA
+XGgDBADAXGoDBAHAXGwDBADAXHQwDAMEAMBcfQMEAsBciDAMAwQAwFyNAwQBwFyY
+MAwDBADAXJsDBADAXJwDBADAXNgDAwDAXQMEAMBeGAMEAMBeHDAMAwQAwF45AwQA
+wF46MAwDBADAXkMDBADAXkQDBADAXkwDBADAXk4wDAMEAMBebwMEAcBedDAMAwQC
+wF6cAwQCwF6gAwQAwF6sMAwDBADAXq8DBAPAXsADBADAXtQDBADAXt0DBADAXuID
+BADAXukDBADAXusDBADAXu8DAwDAYgMEAMBkEjAMAwQAwGQXAwQBwGQYAwQAwGQ0
+AwQAwGQ9AwQAwGQ/AwQAwGROAwQCwGRgMAwDBAHAZGYDBAHAZIQwDAMEAMBkhwME
+AMBkjAMEAMBkkAMEAMBkmgMEAMBlAQMEAMBlBAMEAMBlCAMEAMBlCwMEAMBlHAME
+AMBlIgMEAMBlSzAMAwQAwGVRAwQAwGVaMAwDBADAZW8DBADAZXIDBADAZXYDBADA
+ZYkwDAMEAMBloQMEAMBlqAMEAMBlqgMEAMBlsDAMAwQAwGWzAwQAwGW0AwQAwGXA
+MAwDBADAZcUDBADAZcYDBADAZfwDBADAZgEwDAMEAcBmBgMEAMBmCDAMAwQAwGYR
+AwQBwGZQAwQAwGZZAwQAwGZfMAwDBAHAZpIDBAHAZrADBADAZtYDBAHAZuAwDAME
+AMBm4wMEAcBm5AMEAMBnAgMEAMBnBwMEAMBnDgMEAMBnFAMEAMBnFzAMAwQAwGcb
+AwQAwGcoMAwDBADAZ1UDBADAZ3QwDAMEAMBniQMEAsBniAMEAMBnkwMEAMBoFwME
+AcBoHDAMAwQAwGgjAwQBwGgkAwQAwGgpAwQAwGgwAwQAwGg1MAwDBADAaDcDBADA
+aDoDBADAaEgDBADAaE0DBADAaFIDBADAaIwDBADAaI4DBADAaJMDBAHAaJowDAME
+AMBopwMEAMBoqAMEAMBo7gMEAMBo9QMEAMBo+AMEAMBo+wMEAMBpSwMDAMBqAwQA
+wGsCAwQBwGsEMAwDBADAawsDBAHAawwwDAMEAMBrMwMEAMBrZAMEAMBrbgMEAMBr
+cjAMAwQBwGt6AwQBwGuAAwQAwGuEAwQAwGuoAwQAwGuuMAwDBADAa7EDBADAa7ID
+BADAa7swDAMEA8BryAMEAcBr6DAMAwQAwGvrAwQAwGvsMAwDBADAbBcDBAHAbDAw
+DAMEAMBsMwMEAMBsXAMEAcBsZDAMAwQAwGxrAwQAwGxsMAwDBAHAbHIDBAHAbHgw
+DAMEAMBsfQMEAMBsfjAMAwQHwGyAAwQAwGyuMAwDBADAbMMDBADAbNYDBADAbOoD
+BADAbO4wCwMDAMBtAwQAwG0sMAwDBAHAbS4DBADAbUowDAMEAsBtTAMEAsBtWDAM
+AwQBwG1eAwQAwG1iMAwDBALAbWQDBADAbWYwDAMEAMBtaQMEA8BtcDAMAwQAwG15
+AwQBwG3wMAsDBADAbfMDAwHAbAMEAMBvIQMEAMBvJwMEAMBvLDAMAwQAwG8vAwQA
+wG8wAwQAwG9YAwQAwG9lMAwDBADAb2cDBADAb2gDBAHAb3wDBADAb38DBADAb/ww
+DAMEAcBwHgMEAMBwIAMEAMBwLQMEAMBwMQMEAMBwPTAMAwQBwHBGAwQEwHBAMAwD
+BAHAcGIDBADAcGQDBADAcMwDBADAcM4DBADAcNAwDAMEAMBw1QMEAMBw1gMEAMBw
+9wMEAMBw/jAKAwMAwHEDAwDAdgMDAMB5MAwDBADAegEDBADAeoIwDAMEAMB6jQME
+AMB6kjAMAwQAwHqXAwQAwHqqAwQAwHrWMAwDBAPAetgDBADAeuowDAMEAcB67gME
+AMB68gMEAMB6/jAMAwQAwHwZAwQAwHwcAwQAwHwgAwQAwHwnAwQAwHwuAwQAwHxw
+MAwDBADAfHMDBADAfHQDBADAfJswDAMEAcB8qgMEAsB82AMEAMB86zAMAwQAwHzt
+AwQBwHz0MAwDBADAfPcDBADAfPgwDAMEAcB8+gMEAMB8/gMDAMB9MAwDBADAfgED
+BADAfkAwDAMEAMCBAQMEAcCBPAMEAMCBUAMEAMCBVwMEAMCBYgMDAMCCAwQAwIMU
+MAwDBADAgxkDBADAgxoDBADAg08DBADAg1kDBADAg2ADBADAg2wDBADAg4QwDAME
+AMCECQMEAsCECAMEAMCEIgMEAMCENQMEAMCENwMEAMCEYwMEAMCE7wMEAcCE9AME
+AMCE/AMEAMCFDwMEAMCFHAMEAMCFIAMEAMCFJDAMAwQAwIU1AwQDwIUwAwQAwIU6
+AwQAwIVAAwQCwIVsAwQAwIV5AwQAwIWDAwQAwIX0AwMAwIYwDAMEAMCHBwMEAcCH
+JAMEAMCHLjAMAwQAwIczAwQBwIc0AwQAwIc/AwQAwIdCAwQAwIdEAwQAwIdSAwQA
+wIdkAwQAwIeBAwQAwIeFAwQAwIePMAwDBADAh5EDBADAh6gDBADAh68DBADAh7sD
+BADAh9sDBADAh+EwDAMEAMCH5wMEAMCH6jAMAwQAwIf9AwQAwIf+AwQAwIgHAwQA
+wIgJAwQBwIgSAwQAwIgXMAwDBADAiB0DBAXAiAADBAHAiCgDBADAiDEwDAMEAMCI
+MwMEAMCINAMEAMCIPTAMAwQAwIhHAwQAwIhmMAwDBAHAiJoDBADAiJwwDAMEAMCK
+AQMEAMCKCAMEAMCKVjAMAwQAwIppAwQBwIp0MAwDBADAipsDBADAip4DBADAircD
+BADAisADBADAiswwDAMEAsCK5AMEAMCK6AMEAMCK+AMEAMCLTgMEAcCMAgMEBsCQ
+ADAMAwQBwJBKAwQBwJBMAwQHwJEAAwQCwJHgAwQAwJJ1AwQAwJJ3MAwDBADAknsD
+BADAkn4DBADAkoQwDAMEAcCShgMEAcCSjAMEAMCSmDAMAwQAwJKjAwQAwJKqMAwD
+BALAkqwDBADAkrYDBADAkrkDBADAkrsDBADAksEDBADAkswwDAMEAMCS4wMEAMCS
+5DAMAwQAwJLpAwQAwJLqAwQBwJLuAwQAwJLyAwQAwJMXAwQAwJMiAwQAwJMkAwQA
+wJMqAwQCwJNMAwQAwJONAwQAwJOWAwQAwJObAwQAwJPUMAwDBADAk9cDBAHAk9gD
+BADAk9sDBADAk+QDBADAk/cDBADAk/swDAMEAMCUIQMEAMCUXAMEAMCUZwMEAcCU
+pjAMAwQAwJSxAwQAwJS6MAwDBAbAlMADBADAlMIwDAMEAcCUxgMEAMCU2AMEAMCU
+3AMEAMCVAwMEAMCVBQMEAMCVDwMEAMCVEwMEAMCVGzAMAwQAwJUdAwQBwJUgAwQA
+wJUjAwQAwJUpAwQAwJU5MAwDBADAlTsDBADAlTwwDAMEAMCVTQMEBMCVQAMEAMCV
+YgMEAMCVZAMEAMCVZgMEAcCVbjAMAwQAwJV1AwQBwJV4AwQAwJV+AwQAwJXjAwQA
+wJXoAwQAwJXuAwQAwJYUMAwDBAHAljoDBADAlkgwDAMEAMCWSwMEAMCWTDAMAwQB
+wJZOAwQAwJZUAwQAwJZZAwQAwJZcAwQAwJZeAwQAwJZoAwQAwJZqAwQAwJZ8AwQA
+wJaMAwQAwJaSMAwDBADAlrEDBADAlrgwDAMEAsCWvAMEAMCWvjAMAwQGwJbAAwQA
+wJbGMAwDBADAlssDBAHAlswDBAHAltADBADAlt8wDAMEAMCW4wMEAcCW6AMEAMCW
+7gMEAMCW+AMEAMCW/AMEAMCW/gMEAMCYBgMEAMCYDgMEAMCYEQMEAcCYGgMEAMCY
+KgMEAMCYLDAMAwQAwJgvAwQCwJgwAwQAwJg2MAwDBADAmD0DBAbAmAADBADAmEQD
+BADAmFIDBADAmGIwDAMEAMCYbwMEAMCYcAMEAMCYegMEAMCYfAMEAMCYjQMEAMCY
+lwMEAMCYnAMEAcCYpgMEAMCYrgMEAsCYuAMEAMCY8QMEAMCY9DAMAwQAwJj9AwQA
+wJj+AwQBwJkCAwQAwJkNAwQBwJkSAwQAwJlZAwQAwJl0AwQAwJl/AwQAwJmZAwQA
+wJmmAwQAwJmoAwQAwJmrMAwDBADAma0DBADAmbYDBAHAmbwDBADAmcIDBADAmdUw
+DAMEAMCbAQMEAMCbBgMEAMCchAMEAMCcogMEAMCcpwMEAMCc0gMEAMCc1QMEAMCc
+2QMEAMCc4wMEAMCc7wMEAMCc+DAMAwQAwJ0BAwQCwJ0AMAwDBAPAnQgDBAHAnRAD
+BADAnYEwDAMEAMCdpQMEAMCdrAMEAMCdrgMEAMCdsAMEAMCduQMEAMCduwMEAMCd
+vQMEAcCfJgMEAMCfRgMEAMCfSQMEAMCfTQMEAcCfVAMEAMCfWgMEAMCfXzAMAwQA
+wJ9jAwQDwJ9gAwQAwJ9pAwQBwJ9sAwQAwJ92MAwDBADAn3kDBADAn3oDBADAoAoD
+BADAoA8wDAMEAMCgFQMEA8CgEAMEAMCgGwMEAMCgIQMEAMCgJQMEAMCgQQMEAMCg
+QzAMAwQAwKBfAwQAwKBgAwQAwKBqMAwDBADAoG0DBADAoG4DBADAoHsDBADAoH4D
+BAHAoI4DBADAoJgDBADAoJwDBADAoKADBADAoKwwDAMEAMCgsQMEAsCgsAMEAcCg
+wgMEAcCg4DAMAwQAwKDnAwQBwKDoMAwDBADAoPUDBAHAoPgwDAMEAMCg+wMEAMCg
+/AMEAcChBgMEAcChQAMDAMCiMAwDBAXAoyADBAXAo4ADAwLApDAMAwQAwKsBAwQB
+wKsEMAwDBAfAq4ADBAPAq8ADBADArOgDBADArP0wDAMEAMCtAQMEAMCtBAMEA8Ct
+gDAMAwQGwK5AAwQAwK5EAwQBwK8OAwQEwK8gAwMAwLAwDAMEBMC7EAMEAcC7GAME
+AMC8CjAMAwQAwLw/AwQBwLxAAwQAwLxFAwQAwLxgAwQAwLxpAwQBwLx0MAwDBADA
+vHkDBADAvHoDBADAvH0DBADAvH8DBADAvIEDBADAvIQDBADAvIgDBADAvJEwDAME
+AMC8nQMEAMC8ngMEAMC8uwMEAMC8vTAMAwQAwLzpAwQBwLzsMAwDBAHAvPIDBADA
+vPgDBADAvQEDBALAvQgDBADAvQ4DBADAvRcDBADAvSkwDAMEAMC9MwMEAMC9NAME
+AMC9NwMEAMC9QjAMAwQAwL1FAwQAwL1GAwQAwL1JAwQAwL1MAwQAwL13AwQAwL2X
+AwQAwL2aAwQAwL2dAwQAwL2gMAwDBAHAvaYDBADAvaoDBAHAvcoDBADAvfsDBADA
+viwDBAHAvjoDBADAvkADBADAvkMDBADAvkUDBADAvl8wDAMEAMC+gQMEAMC+hDAM
+AwQAwL6tAwQAwL6uAwQBwL62MAwDBAHAvr4DBAHAvsAwDAMEAMC+yQMEAMC+yjAM
+AwQAwL7pAwQAwL7sAwQAwL7wAwQAwL7yMAwDBADAvvcDBAHAvvgDAwDAwgMEAMDD
+AQMEAMDDCAMEAcDDKgMEAMDDSAMEAMDDYjAMAwQAwMNpAwQAwMNqAwQAwMNuMAwD
+BALAw3QDBADAw3YwDAMEAcDDhgMEAMDDlDAMAwQAwMO3AwQAwMO4AwQAwMPDAwQA
+wMPsMAwDBADAxAEDBALAxJgDBATAxxADBADAy1ADBAHAy2wDBADAy+MwDAMEAMDO
+TQMEAcDOUAMEAMDOVjAMAwQBwM7eAwQAwM7iAwQAwM8OAwQAwM8fMAwDBADAz40D
+BADAz44DBADAz8QDBADA50MDBADA51IwDAMEAMDuAQMEAMDuCgMEAMD1mAMEAMD1
+qQMEAMD14TAMAwQAwPcBAwQAwPcKMAwDBADA+z0DBAHA+0ADBADA++IDBADA++Yw
+CgMCAMEDBADBEdYwDAMEA8ER2AMEA8ET4DAMAwQCwRPsAwQBwSmQMAwDBALBKZQD
+BAbBUgAwDAMEBcFSYAMEBcFSwDAKAwMAwVMDAwDBXjAMAwQHwV+AAwQAwWwWAwQC
+wWwYMAwDBADBbB0DBAHBbNQwDAMEAMFs1wMEAsFs+DALAwMAwW0DBAHBbUAwDAME
+AsFtRAMEA8FuYDALAwQBwW5qAwMEwWADAwDBcTAMAwQFwXIgAwQFwXJAMAwDBAXB
+cqADBAXBcwAwDAMEB8FzgAMEBcF0AAMEBsF0gAMEBsF1ADALAwQFwXVgAwMAwXYw
+CwMEB8F3gAMDAcF4MAsDAwLBfAMEAMG8BjALAwQDwbwIAwMAwbwwDAMEAcG9QgME
+B8G9ADAMAwQAwb2BAwQAwcIAMAwDBAHBwgYDBAXBwgADBAXBwmAwDAMEAMHCgQME
+BcHCgDAMAwQGwcLAAwQBwd3YMAsDBADB3dsDAwDB4gMEBsHjQDAMAwQAweOBAwQF
+wgbAMAwDBADCBuEDBAbCCQAwDAMEAcIJQgMEAcIJUDAMAwQCwglUAwQAwiO+MAwD
+BAbCI8ADBAXCT0AwCwMEB8JPgAMDAMJaMAsDAwLCXAMEBcLBAAMEBsLBQDAMAwQF
+wsHgAwQGwsyAMAoDAwDCzQMDAMLeMAwDBAXC32ADBAXC34AwDAMEBsLfwAMEBMMY
+QDAMAwQDwxhYAwQGwxiAMAwDBAXDGOADBAHDJ9gwCwMEAsMn3AMDAMMqMAwDBAXD
+KyADBALDgAAwDAMEA8OACAMEBcOmwDALAwMAw6cDBAXDp4AwDAMEBMOnsAMEBsPK
+ADAMAwQFw8pgAwQDw+pwMAwDBALD6nwDBAPD6qAwDAMEAMPqqQMEAMPquDAMAwQB
+w+q6AwQCw+r4MAsDAwDD6wMEBcP2ADAKAwQGw/ZAAwICwAMEAMQBAzAMAwQAxAEF
+AwQAxAEGAwQCxAFAMAwDBADEAUUDBADEAUYDBALEAgQwDAMEAcQDQgMEA8QDQAME
+AMQDWwMEBcQPIAMEAsYLAAMEAMYRTQMEAMYRdTAMAwQCxhG0AwQAxhG2AwQAxhYz
+MAwDBADGFl0DBAHGFmAwDAMEBcYkIAMEAMYkLgMEAsYtdAMEAMYzDDAMAwQAxjOP
+AwQAxjOSAwQCxjQsAwQCxjccAwQDxllYAwQAxmOUAwQAxmPeAwQFxmlgMAwDBATG
+hVADBAHGhVQDBADGhYwDBADGhc4DBADGheIwDAMEAMaHiQMEAMaHigMEAMaHpwME
+AMaToDAMAwQAxpSxAwQAxpSyMAwDBAHGtJYDBAHGtJgDBADGzsUwDAMEA8bOyAME
+AMbOygMEB8bwgAMEAccr9gMEAccw5gMDAMc1AwQDx1jQAwQEx1sQMAwDBAPHZwgD
+BADHZwwDBAXH9wADBAPH9zgDBAXH+oADBADKAE0DBATLn1ADBAPLvjgDBALMCwAD
+AwDMEgMEAcwwIAMEAMzh2gMEAM3JNwMEAM3TUwMEAc3c2AMEAM6nIQMEAs6+3AME
+Bc7DIAMEBc784AMEBs9ZQAMEBc+WoAMEAs+u2AMEBc+yQAMEBs+0wAMEBM+9wDAM
+AwQEz+VwAwQAz+V0MAwDBAHP5XYDBADP5XgwDAMEAc/legMEB8/lAAMEAtBSSAME
+BtEqwAMEBdGigAMEBdHOAAMEAdHOJgMEBNHVMAMEBdH64DAMAwQA0fvDAwQB0fvE
+AwQB0fv8AwQH1AAAMAwDBAXUAKADBAHUCOQwDAMEA9QI6AMEANQI8DAMAwQB1Ajy
+AwQB1Aj8MAsDAwDUCQMEBdQMwDALAwMA1A0DBAXUFoAwDAMEBtQWwAMEBtQxADAM
+AwQF1DFgAwQH1DQAMAwDBAXUNKADBAbUPAAwCwMEBdQ8YAMDBtQAMAwDBAfUQIAD
+BATURYAwDAMEBdRFoAMEBtRVgDAMAwQF1FXgAwQF1FhAMAwDBAfUWIADBAHUXGQw
+DAMEA9RcaAMEB9RfADALAwQF1F+gAwMF1EAwDAMEBdRgIAMEBtRkADAMAwQF1GRg
+AwQF1GeAMAwDBAbUZ8ADBATUdSAwDAMEBtR1QAMEBdR6wDALAwMA1HsDBAfUgQAw
+CgMDAdSCAwMA1NgwDAMEB9TZgAMEBtU3ADAMAwQH1TeAAwQG1YMAMAwDBAXVg2AD
+BAXViEAwDAMEB9WIgAMEBtWTADAMAwQF1ZNgAwQF1ZZAAwQF1ZaAMAwDBAXVluAD
+BAbVmAAwDAMEBdWYYAMEBdWaADAMAwQF1ZpgAwQF1Z6AMAwDBAbVnsADBAHVnwww
+DAMEBNWfEAMEAtWfgDAMAwQD1Z+IAwQH1awAMAwDBAXVrKADBAXVs4AwDAMEBtWz
+wAMEBdW1wDALAwMB1bYDBAXVwQAwDAMEBtXBQAMEBtXUgDAKAwMA1dUDAwDV9jAM
+AwQF1fcgAwQH1f8AMAoDBAXV/6ADAgHUAwQB2C58AwQA2GPeAwQE2J5gAwQE2KxA
+AwQG2NWAAwQF2PGAMAoDAgDZAwQE2Q5AMAwDBAXZDmADBAXZFMAwDAMEBNkU8AME
+BNkVYDAMAwQH2RWAAwQH2R0AMAwDBATZHZADBATZHcAwCwMEBdkd4AMDAtkwMAsD
+AwPZOAMEBdlAQDAMAwQE2UBwAwQG2U0AMAwDBATZTVADBAbZTgAwCwMEBNlOUAMD
+ANl0MAsDBATZdRADAwDZijALAwMC2YwDBAPZk7AwDAMEBtmTwAMEBNmqgDALAwQF
+2aqgAwMB2bAwCwMDANmzAwQE2ceAMAoDBAXZx6ADAgHYAwQC3J7EMGQEAgACMF4D
+BQAgAQAFMA0DBAEgAQYDBQEgAQf4MA0DBQAgAQf7AwQCIAEIAwQCIAEUMAwDBAEg
+ARoDBAEgAUADBAEgAUYwDAMEASABSgMEASABTAMEBCABUAMEBiADAAMDBCoAMIIG
+oAYIKwYBBQUHAR0EggaSMIIGjqCCBoowggaGAgEHAgEcAgIAiQICAOAwCAICAPgC
+AgD7AgIBBQICAR4CAgEgAgIBJgICAXcCAgF6AgICAQICAgUwCAICAhACAgIRAgIC
+IAICAikCAgIvAgICNQICAk4CAgJRAgICnTAIAgICpwICAqgwCAICArcCAgK5MAgC
+AgLFAgICxgICAsgCAgLPMAgCAgL4AgIC+QICAvwCAgL+MAgCAgMGAgIDDwICAxIw
+CAICAxUCAgMWMAgCAgRNAgIEsAICBLMCAgS1AgIEvTAIAgIE0gICBNMCAgTZAgIE
+4AICBOUCAgTpMAgCAgTzAgIE+wICBP8CAgUKAgIFETAIAgIFEwICBR0CAgUmAgIF
+PjAIAgIFSAICBUkCAgYLMAgCAgZ1AgIGdgICBn8CAgaQMAgCAgarAgIGvgICBsEC
+AgbEMAgCAgbKAgIGywICBs0CAgbUAgIG2DAIAgIG2gICBtwCAgbfAgIG5DAIAgIG
+6gICBusCAgbuAgIG8AICBvQCAgcpMAgCAgcrAgIHLQICBzEwCAICBzkCAgc6MAgC
+Agc9AgIHPjAIAgIHVQICB28wCAICB4ECAgeDAgIHhgICB4owCAICB48CAgejMAgC
+AgeoAgIHqgICB68CAgfUAgIH3DAIAgIH4AICB+EwCAICB+oCAgftAgIH9DAIAgIH
+9gICB/gCAgf7AgIH/QICB/8CAggBMAgCAggJAgIIWDAIAgIIYwICCGQwCAICCH4C
+AgjhMAgCAgjmAgIJSQICCUwwCAICCVMCAgm4AgIJvjAIAgIJ4QICCeICAgntMAgC
+AgnyAgIJ8wICChIwCAICChkCAgo2AgIKUwICClcCAgp7AgIKzjAIAgIK1QICCwYw
+CAICCw4CAgs/AgILTwICC2UCAgtpAgIL8jAIAgIMCwICDCUCAgxPMAgCAgxSAgIM
+hzAIAgIMiQICDRkwCAICDVQCAg1XAgIOKAICDwMwCAICD00CAg9OAgIQNDAIAgIR
+NQICEU4wCAICEWkCAhFqAgIRrDAIAgIR7AICEe0CAhNuAgIT4TAIAgIVAQICFZ8w
+CAICFaECAhX/AgIXswICF8UCAhgYAgIYsAICGQwwCAICGgACAho4MAgCAho6AgIa
+3jAIAgIa4AICGv8CAh+dMAgCAiAAAgIhSzAIAgIhTQICIkEwCAICIkMCAiOoMAgC
+AiOqAgIj/wICLE0CAi2MAgIvDjAIAgIwAAICMKYwCAICMKgCAjELMAgCAjENAgIz
+pzAIAgIzqQICM/8CAjY3MAgCAjwAAgI8JjAIAgI8KAICPHIwCAICPHQCAj1ZMAgC
+Aj1bAgI9uzAIAgI9vQICPdAwCAICPdICAj3ZMAgCAj3bAgI+WzAIAgI+XQICPrkw
+CAICPrsCAj9VMAgCAj9XAgI/mzAIAgI/nQICP/8CAkksAgJK6gICS7ACAkvHMAgC
+AlAAAgJQAzAIAgJQBQICUXkwCAICUXsCAlG/MAgCAlHBAgJSCjAIAgJSDAICUp8w
+CAICUqECAlL5MAgCAlL7AgJTFjAIAgJTGAICUx0CAlMfMAgCAlMhAgJTjjAIAgJT
+kAICU8swCAICU80CAlP/AgJWXAICWGMCAlibAgJayjAIAgJgAAICYJ8wCAICYKEC
+AmC0MAgCAmC2AgJg0zAIAgJg1QICYOAwCAICYOICAmECMAgCAmEEAgJhHjAIAgJh
+IAICYS0wCAICYS8CAmGaMAgCAmGcAgJiSjAIAgJiTAICYqEwCAICYqMCAmMRAgJj
+EzAIAgJjFQICY8YwCAICY8gCAmPfMAgCAmPhAgJj5zAIAgJj6QICY/8CAmUYMAgC
+AnAAAgJwCjAIAgJwDAICcBkwCAICcBsCAnDwMAgCAnDyAgJxojAIAgJxpAICcpkC
+AnKbMAgCAnKdAgJy8zAIAgJy9QICczYwCAICczgCAnNnMAgCAnNpAgJzgjAIAgJz
+hAICc60wCAICc68CAnPpMAgCAnPrAgJz/zAIAgJ4AAICeK8wCAICeLECAnkDAgJ5
+BTAIAgJ5GAICeVgwCAICeVoCAnoMMAgCAnoOAgJ7gjAIAgJ7hAICe/8wCgIDAIQA
+AgMAi/8wCgIDAJgAAgMAm/8wCgIDAKAAAgMApY0wCgIDAKWPAgMAr/8wCgIDALgA
+AgMAy/8wCgIDANwAAgMA4/8wCgIDAOgAAgMA7/8wCgIDAPIAAgMA8/8wCgIDAPuM
+AgMA++8wCgIDAwAAAgMDNZs=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extensions()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.extns_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oids = []
+ for extn in asn1Object:
+ oids.append(extn['extnID'])
+ extn_value, rest = der_decoder(
+ extn['extnValue'],
+ rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(extn_value.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extn_value))
+
+ self.assertIn(rfc8360.id_pe_ipAddrBlocks_v2, oids)
+ self.assertIn(rfc8360.id_pe_autonomousSysIds_v2, oids)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8398.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8398.py
new file mode 100644
index 0000000000..b5248318b4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8398.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8398
+
+
+class EAITestCase(unittest.TestCase):
+ pem_text = "oCAGCCsGAQUFBwgJoBQMEuiAgeW4q0BleGFtcGxlLmNvbQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.GeneralName()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['otherName']['type-id'],
+ rfc5280.anotherNameMap)
+ self.assertEqual(rfc8398.id_on_SmtpUTF8Mailbox,
+ asn1Object['otherName']['type-id'])
+
+ eai, rest = der_decoder(
+ asn1Object['otherName']['value'],
+ asn1Spec=rfc5280.anotherNameMap[asn1Object['otherName']['type-id']])
+
+ self.assertFalse(rest)
+ self.assertTrue(eai.prettyPrint())
+ self.assertEqual(asn1Object['otherName']['value'], der_encoder(eai))
+ self.assertEqual(u'\u8001', eai[0])
+ self.assertEqual(u'\u5E2B', eai[1])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc8398.id_on_SmtpUTF8Mailbox, asn1Object['otherName']['type-id'])
+ self.assertEqual(u'\u8001', asn1Object['otherName']['value'][0])
+
+ self.assertEqual(u'\u5E2B', asn1Object['otherName']['value'][1])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8410.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8410.py
new file mode 100644
index 0000000000..d6df485536
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8410.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5208
+from pyasn1_modules import rfc8410
+
+
+class PrivateKeyTestCase(unittest.TestCase):
+ no_pub_key_pem_text = ("MC4CAQAwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwo"
+ "y/HU++CXqI9EdVhC")
+
+ def setUp(self):
+ self.asn1Spec = rfc5208.PrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.no_pub_key_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ rfc8410.id_Ed25519, asn1Object['privateKeyAlgorithm']['algorithm'])
+ self.assertTrue(asn1Object['privateKey'].isValue)
+ self.assertEqual(
+ "0x0420d4ee", asn1Object['privateKey'].prettyPrint()[0:10])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8418.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8418.py
new file mode 100644
index 0000000000..b5e8d3e829
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8418.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8418
+
+
+class KeyAgreeAlgTestCase(unittest.TestCase):
+ key_agree_alg_id_pem_text = "MBoGCyqGSIb3DQEJEAMUMAsGCWCGSAFlAwQBLQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_agree_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ rfc8418.dhSinglePass_stdDH_hkdf_sha384_scheme,
+ asn1Object['algorithm'])
+ self.assertTrue(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8419.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8419.py
new file mode 100644
index 0000000000..3ad05cb611
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8419.py
@@ -0,0 +1,130 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8419
+
+
+class Ed25519TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MAUGAytlcA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_Ed25519, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class Ed448TestCase(unittest.TestCase):
+ alg_id_2_pem_text = "MAUGAytlcQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_2_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_Ed448, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class SHA512TestCase(unittest.TestCase):
+ alg_id_3_pem_text = "MAsGCWCGSAFlAwQCAw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_3_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_sha512, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class SHAKE256TestCase(unittest.TestCase):
+ alg_id_4_pem_text = "MAsGCWCGSAFlAwQCDA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_4_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_shake256, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class SHAKE256LENTestCase(unittest.TestCase):
+ alg_id_5_pem_text = "MA8GCWCGSAFlAwQCEgICAgA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_5_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_shake256_len, asn1Object['algorithm'])
+ self.assertTrue(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ param, rest = der_decoder(
+ asn1Object['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[asn1Object['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(asn1Object['parameters'], der_encoder(param))
+ self.assertEqual(512, param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.alg_id_5_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_shake256_len, asn1Object['algorithm'])
+ self.assertEqual(512, asn1Object['parameters'])
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8479.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8479.py
new file mode 100644
index 0000000000..e5b135f73d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8479.py
@@ -0,0 +1,108 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc8479
+
+
+class ValidationParmTestCase(unittest.TestCase):
+ pem_text = """\
+MIIE/gIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCpPwXwfhDsWA3q
+jN2BWg1xfDjvZDVNfgTV/b95g304Aty3z13xPXAhHZ3ROW3pgPxTj9fiq7ZMy4Ua
+gMpPK81v3pHX1uokC2KcGXbgbAq2Q8ClxSXgEJllRwDENufjEdV10gArt8NlIP0N
+lota1kQUuI1DMsqc5DTIa35Nq4j1GW+KmLtP0kCrGq9fMGwjDbPEpSp9DTquEMHJ
+o7kyJIjB+93ikLvBUTgbxr+jcnTLXuhA8rC8r+KXre4NPPNPRyefRcALLt/URvfA
+rTvFOQfi3vIjNhBZL5FdC+FVAr5QnF3r2+cuDPbnczr4/rr81kzFGWrwyAgF5FWu
+pFtB5IYDAgMBAAECggEAHZ88vGNsNdmRkfhWupGW4cKCuo+Y7re8Q/H2Jd/4Nin2
+FKvUPuloaztiSGDbVm+vejama/Nu5FEIumNJRYMeoVJcx2DDuUxO1ZB1aIEwfMct
+/DWd0/JDzuCXB0Cu5GTWLhlz0zMGHXihIdQ0DtGKt++3Ncg5gy1D+cIqqJB515/z
+jYdZmb0Wqmz7H3DisuxvnhiCAOuNrjcDau80hpMA9TQlb+XKNGHIBgKpJe6lnB0P
+MsS/AjDiDoEpP9GG9mv9+96rAga4Nos6avYlwWwbC6d+hHIWvWEWsmrDfcJlm2gN
+tjvG8omj00t5dAt7qGhfOoNDGr5tvJVo/g96O/0I8QKBgQDdzytVRulo9aKVdAYW
+/Nj04thtnRaqsTyFH+7ibEVwNIUuld/Bp6NnuGrY+K1siX8+zA9f8mKxuXXV9KK4
+O89Ypw9js2BxM7VYO9Gmp6e1RY3Rrd8w7pG7/KqoPWXkuixTay9eybrJMWu3TT36
+q7NheNmBHqcFmSQQuUwEmvp3MQKBgQDDVaisMJkc/sIyQh3XrlfzmMLK+GlPDucD
+w5e50fHl8Q5PmTcP20zVLhTevffCqeItSyeAno94Xdzc9vZ/rt69410kJEHyBO9L
+CmhtYz94wvSdRhbqf4VzAl2WU184sIYiIZDGsnGScgIYvo6v6mITjRhc8AMdYoPR
+rL6xp6frcwKBgFi1+avCj6mFzD+fxqu89nyCmXLFiAI+nmjTy7PM/7yPlNB76qDG
+Dil2bW1Xj+y/1R9ld6S1CVnxRbqLe+TZLuVS82m5nRHJT3b5fbD8jquGJOE+e+xT
+DgA0XoCpBa6D8yRt0uVDIyxCUsVd5DL0JusN7VehzcUEaZMyuL+CyDeRAoGBAImB
+qH6mq3Kc6Komnwlw4ttJ436sxr1vuTKOIyYdZBNB0Zg5PGi+MWU0zl5LDroLi3vl
+FwbVGBxcvxkSBU63FHhKMQw7Ne0gii+iQQcYQdtKKpb4ezNS1+exd55WTIcExTgL
+tvYZMhgsh8tRgfLWpXor7kWmdBrgeflFiOxZIL1/AoGAeBP7sdE+gzsh8jqFnVRj
+7nOg+YllJAlWsf7cTH4pLIy2Eo9D+cNjhL9LK6RaAd7PSZ1adm8HfaROA2cfCm84
+RI4c7Ue0G+N6LZiFvC0Bfi5SaPVAExXOty8UqjOCoZavSaXBPuNcTXZuzswcgbxI
+G5/kaJNHoEcdlVsPsYWKRNKgPzA9BgorBgEEAZIIEggBMS8wLQYJYIZIAWUDBAIC
+BCCK9DKMh7687DHjA7j1U37/y2qR2UcITZmjaYI7NvAUYg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5958.OneAsymmetricKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object['attributes']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+
+ if attr['attrType'] == rfc8479.id_attr_validation_parameters:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+ self.assertEqual(rfc4055.id_sha384, av['hashAlg'])
+
+ seed = univ.OctetString(hexValue='8af4328c87bebcec31e303b8f55'
+ '37effcb6a91d947084d99a36982'
+ '3b36f01462')
+
+ self.assertEqual(seed, av['seed'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object['attributes']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc8479.id_attr_validation_parameters:
+ av = attr['attrValues'][0]
+
+ self.assertEqual(av['hashAlg'], rfc4055.id_sha384)
+
+ seed = univ.OctetString(hexValue='8af4328c87bebcec31e303b8f553'
+ '7effcb6a91d947084d99a369823b'
+ '36f01462')
+
+ self.assertEqual(seed, av['seed'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8494.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8494.py
new file mode 100644
index 0000000000..2951e39200
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8494.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc8494
+
+
+class CompresssedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBNqADAgEAMIIBLaADAgEZoIIBJASCASB4nG2P0U7CQBBF3/cr5l2K3YpSF5YA
+bYmbWArtQsJjKVuogd1mO0T8e0ti1IjJZB4md07OHZbWnMbqkp/qo+oW5jSCWDqL
+VCSpkBveg2kSbrg/FTIWcQRpJPlLmGYQzdci5MvlA+3Rx2cyREO/KVrhCOaJFLMN
+n03E6yqNIEmDheS2LHzPG0zNdqw0dn89XAnev4RsFQRRlnW+SITMWmMGf72JNAyk
+oXCj0mnPHtzwSZijYuD1YVJb8FzaB/rE2n3nUtcl2Xn7pgpkkAOqBsm1vrNWtqmM
+ZkC7LgmMxraFgx91y0F1wfv6mFd6AMUht41CfsbS8X9yNtdNqayjdGF2ld4z8LcV
+EiIPVQPtvBuLBxjW5qx3TbXXo6vHJ1OhhLY=
+
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc8494.CompressedData()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ 0, asn1Object['compressionAlgorithm']['algorithmID-ShortForm'])
+
+ cci = asn1Object['compressedContentInfo']
+
+ self.assertEqual(
+ 25, cci['unnamed']['contentType-ShortForm'])
+ self.assertEqual(
+ '0x789c6d8fd1', cci['compressedContent'].prettyPrint()[:12])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8520.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8520.py
new file mode 100644
index 0000000000..da615dccfe
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8520.py
@@ -0,0 +1,115 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8520
+
+
+class MUDCertTestCase(unittest.TestCase):
+ mud_cert_pem_text = """\
+MIIFODCCAyCgAwIBAgICEEAwDQYJKoZIhvcNAQELBQAwZTELMAkGA1UEBhMCQ0gx
+DzANBgNVBAgMBlp1cmljaDERMA8GA1UEBwwIV2V0emlrb24xEDAOBgNVBAoMB0lt
+UmlnaHQxIDAeBgNVBAMMF0ltUmlnaHQgVGVzdCA4MDIuMUFSIENBMB4XDTE5MDUw
+MTE4MDMyMVoXDTE5MDUzMTE4MDMyMVowZzELMAkGA1UEBhMCQ0gxEzARBgNVBAgM
+ClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEg
+MB4GA1UEAwwXTGlnaHRidWxiMjAwMCwgU04jMjAyMDIwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQCzntv6tCdkZWPUx+CK9A9PCgKF8zGCJwdU4eIjo0oe
+A81i7iltOPnU416GJMEc2jGhlZPn2Rjjy8tPbyh1RVBfkgdq4UPWPnZPb+Gkq1c8
+X8zLRrMSWKqkSGOPENieDuQpzcrkMfj7dCPcxTcJ5Gluv1jEI7bxoZOZXjNxaFXi
+vsaZWFub7b+5zDLWpvmpKDaeCU+gad7rWpRE/Hjh3FX8paW8KE/hMF/au4xX2Qj/
+rDwHSxgs3n8FtuFUELotSgL3Acy3aISmJILBx6XrSs3nLruZzamulwWupSryHo3L
+U+GsOETiXwxiyrfOZo3aJNnWzlEvrYCQGyqd8Nd/XOENAgMBAAGjge8wgewwCQYD
+VR0TBAIwADBABggrBgEFBQcBGQQ0FjJodHRwczovL3d3dy5vZmNvdXJzZWltcmln
+aHQuY29tL0x1bWluYWlyZV8xNTAuanNvbjBdBggrBgEFBQcBHgRRME8xCzAJBgNV
+BAYTAkNIMSswKQYJKoZIhvcNAQkBFhxhc2NlcnRpYUBvZmNvdXJzZWltcmlnaHQu
+Y29tMRMwEQYDVQQDEwpFbGlvdCBMZWFyMB0GA1UdDgQWBBS00spi6cRFdqz95TQI
+9AuPn5/DRjAfBgNVHSMEGDAWgBREKvrASIa7JJ41mQWDkJ06rXTCtTANBgkqhkiG
+9w0BAQsFAAOCAgEAiS4OlazkDpgR4qhrq5Wpx6m3Bmkk5RkXnqey1yyhyfZlAGH7
+ewQiybkF3nN6at/TcNWMRfGBLhRrQn1h75KEXKlc18RDorj72/bvkbJLoBmA43Mv
+xMF0w4YX8pQwzb4hSt04p79P2RVVYM3ex/vdok0KkouhLTlxzY7vhv1T8WGTVQHJ
+k2EyswS2nFa/OtIkwruXqJj+lotdV2yPgFav5j9lkw5VbOztlfSKT7qQInVm+VBI
+/qddz/LOYrls1A7KHzWkTvOwmvQBqI4e9xLjc3r8K4pZyMd7EsmepYmLOU+pfINf
+/sEjliCluR65mKcKGiUa5J31pzbVpCr6FM/NGEjqpp6F+slyNC8YM/UlaJK1W9ZI
+W7JAhmfil5z1CtQILFSnUh4VneTVOaYg6+gXr169fXUDlMM4ECnuqWAE2PLhfhI8
++lY8u18rFiX0bNSiUySgxU3asCC92xNmvJHuL4QwiYaGtTne36NMN7dH/32nMKl+
+G3XA8cX8yZIrIkmWLBSji8UwOXwVhYovmbhHjaUMTQommxYv/Cuqi5nJUJfh5YJr
+APeEK6fTYpPMiZ6U1++qzZDp78MRAq7UQbluJHh8ujPuK6kQmSLXmvK5yGpnJ+Cw
+izaUuU1EEwgOMELjeFL62Ssvq8X+x6hZFCLygI7GNeitlblNhCXhFFurqMs=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.mud_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc8520.id_pe_mudsigner:
+ mudsigner, rest = der_decoder(
+ extn['extnValue'], rfc8520.MUDsignerSyntax())
+
+ self.assertEqual(extn['extnValue'], der_encoder(mudsigner))
+
+ c = rfc5280.X520countryName(value="CH")
+
+ self.assertEqual(mudsigner[0][0][0]['value'], der_encoder(c))
+
+ e = rfc5280.EmailAddress(value="ascertia@ofcourseimright.com")
+
+ self.assertEqual(mudsigner[0][1][0]['value'], der_encoder(e))
+
+ cn = rfc5280.X520CommonName()
+ cn['printableString'] = "Eliot Lear"
+
+ self.assertEqual(mudsigner[0][2][0]['value'], der_encoder(cn))
+
+ if extn['extnID'] == rfc8520.id_pe_mud_url:
+ mudurl, rest = der_decoder(
+ extn['extnValue'], rfc8520.MUDURLSyntax())
+
+ self.assertEqual(extn['extnValue'], der_encoder(mudurl))
+ self.assertEqual(".json", mudurl[-5:])
+
+ self.assertIn(rfc8520.id_pe_mudsigner, extn_list)
+ self.assertIn(rfc8520.id_pe_mud_url, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.mud_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8619.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8619.py
new file mode 100644
index 0000000000..cd54db669e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8619.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8619
+
+
+class HKDFSHA256TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MA0GCyqGSIb3DQEJEAMc"
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ self.assertEqual(
+ rfc8619.id_alg_hkdf_with_sha256, asn1Object['algorithm'])
+
+
+class HKDFSHA384TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MA0GCyqGSIb3DQEJEAMd"
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(
+ rfc8619.id_alg_hkdf_with_sha384, asn1Object['algorithm'])
+
+
+class HKDFSHA512TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MA0GCyqGSIb3DQEJEAMe"
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(
+ rfc8619.id_alg_hkdf_with_sha512, asn1Object['algorithm'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8649.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8649.py
new file mode 100644
index 0000000000..67f8f9fd39
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8649.py
@@ -0,0 +1,60 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8649
+
+
+class RootCertificateExtnTestCase(unittest.TestCase):
+ extn_pem_text = """\
+MGEGCisGAQQBg5IbAgEEUzBRMA0GCWCGSAFlAwQCAwUABEBxId+rK+WVDLOda2Yk
+FFRbqQAztXhs91j/RxHjYJIv/3gleQg3Qix/yQy2rIg3xysjCvHWw8AuYOGVh/sL
+GANG
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extension()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.extn_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc8649.id_ce_hashOfRootKey, asn1Object['extnID'])
+
+ hashed_root_key, rest = der_decoder(
+ asn1Object['extnValue'], rfc8649.HashedRootKey())
+
+ self.assertFalse(rest)
+ self.assertTrue(hashed_root_key.prettyPrint())
+ self.assertEqual(asn1Object['extnValue'], der_encoder(hashed_root_key))
+ self.assertEqual(
+ rfc4055.id_sha512, hashed_root_key['hashAlg']['algorithm'])
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.extn_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertEqual(rfc8649.id_ce_hashOfRootKey, asn1Object['extnID'])
+ self.assertIn(asn1Object['extnID'], rfc5280.certificateExtensionsMap)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8692.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8692.py
new file mode 100644
index 0000000000..416b59ce07
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8692.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc8692
+
+
+class AlgorithmIdentifierTestCase(unittest.TestCase):
+ pem_text = """\
+MEowCwYJYIZIAWUDBAILMAsGCWCGSAFlAwQCDDAKBggrBgEFBQcGHjAKBggrBgEF
+BQcGHzAKBggrBgEFBQcGIDAKBggrBgEFBQcGIQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid_list = (
+ rfc8692.id_shake128,
+ rfc8692.id_shake256,
+ rfc8692.id_RSASSA_PSS_SHAKE128,
+ rfc8692.id_RSASSA_PSS_SHAKE256,
+ rfc8692.id_ecdsa_with_shake128,
+ rfc8692.id_ecdsa_with_shake256,
+ )
+
+ count = 0
+ for algid in asn1Object:
+ self.assertTrue(algid['capabilityID'] in oid_list)
+ count += 1
+
+ self.assertTrue(len(oid_list), count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8696.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8696.py
new file mode 100644
index 0000000000..119f65826c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8696.py
@@ -0,0 +1,193 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8696
+
+
+class KeyTransPSKTestCase(unittest.TestCase):
+ key_trans_psk_pem_text = """\
+MIICigYLKoZIhvcNAQkQARegggJ5MIICdQIBADGCAiekggIjBgsqhkiG9w0BCRANATCCAhIC
+AQAEE3B0Zi1rbWM6MTM2MTQxMjIxMTIwDQYLKoZIhvcNAQkQAx0wCwYJYIZIAWUDBAEtMIIB
+sDCCAawCAQKAFJ7rZ8m5WnTUTS8WOWaA6AG1y6ScMA0GCSqGSIb3DQEBAQUABIIBgKo/Hkhu
+eoOdn1/cIEpt38NbEEdSC586IWcG+0l+ND9pcmQvvKvscpvFFVAjqLjvoXGatmSazr2Q4BVS
+yWKm0JqlyVWEAhRsU7wNlD7zRAKI8+obWpU57gjEKs13D8gb1PI2YPZWajN1Ye+yHSF6h+fb
+7YtaQepxTGHYF0LgHaAC8cqtgwIRW8N4Gnvl0Uuz+YEZXUX0I8fvJG6MKCEFzwHvfrfPb3rW
+B8k7BHfekRpY+793JNrjSP2lY+W0fhqBN8dALDKGqlbUCyojMQkQiD/iXSBRbZWiJ1CE92iT
+x7Ji9irq8rhYDNoDP2vghJUaepoZgIJwPWqhoTH+KRPqHTjLnnbi/TGzEdeO5h0C9Gc0DVzs
+9OHvHknQ7mSxPT9xKMXGztVT+P3a9ct6TaMotpMqL9cuZxTYGpHMYNkLSUXFSadAGFrgP7QV
+FGwC/Z/YomEzSLPgZi8HnVHsAGkJzXxmM/PJBu4dAXcKjEv/GgpmaS2B7gKHUpTyyAgdsBsy
+2AQo6glHJQ+mbNUlWV5Sppqq3ojvzxsPEIq+KRBgORsc31kH82tAZ+RTQjA3BgkqhkiG9w0B
+BwEwGwYJYIZIAWUDBAEuMA4EDMr+ur76ztut3sr4iIANmvLRbyFUf87+2bPvLQQMoOWSXMGE
+4BckY8RM
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_trans_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertEqual(rfc8696.id_ori_keyTransPSK, ri['ori']['oriType'])
+
+ ktpsk, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc8696.KeyTransPSKRecipientInfo())
+
+ self.assertFalse(rest)
+ self.assertTrue(ktpsk.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(ktpsk))
+ self.assertEqual(0, ktpsk['version'])
+
+ ktri = ktpsk['ktris'][0]
+ self.assertEqual(2, ktri['version'])
+
+ def testOtherRecipientInfoMap(self):
+ substrate = pem.readBase64fromText(self.key_trans_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertIn(ri['ori']['oriType'], rfc5652.otherRecipientInfoMap)
+
+ ori, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc5652.otherRecipientInfoMap[ri['ori']['oriType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ori.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(ori))
+
+class KeyAgreePSKTestCase(unittest.TestCase):
+ key_agree_psk_pem_text = """\
+MIIBRwYLKoZIhvcNAQkQARegggE2MIIBMgIBADGB5aSB4gYLKoZIhvcNAQkQDQIwgdICAQAE
+FHB0Zi1rbWM6MjE2ODQwMTEwMTIxoFWhUzATBgYrgQQBCwEGCWCGSAFlAwQBLQM8AAQ5G0Em
+Jk/2ks8sXY1kzbuG3Uu3ttWwQRXALFDJICjvYfr+yTpOQVkchm88FAh9MEkw4NKctokKNgps
+MA0GCyqGSIb3DQEJEAMdMAsGCWCGSAFlAwQBLTBEMEKgFgQU6CGLmLi32Gtenr3IrrjE7NwF
+xSkEKCKf4LReQAA+fYJE7Bt+f/ssjcoWw29XNyIlU6cSY6kr3giGamAtY/QwNwYJKoZIhvcN
+AQcBMBsGCWCGSAFlAwQBLjAOBAzbrd7K+IjK/rq++s6ADfxtb4I+PtLSCdDG/88EDFUCYMQu
+WylxlCbB/w==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_agree_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertEqual(rfc8696.id_ori_keyAgreePSK, ri['ori']['oriType'])
+
+ kapsk, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc8696.KeyAgreePSKRecipientInfo())
+
+ self.assertFalse(rest)
+ self.assertTrue(kapsk.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(kapsk))
+ self.assertEqual(0, kapsk['version'])
+
+ rek = kapsk['recipientEncryptedKeys'][0]
+ ski = rek['rid']['rKeyId']['subjectKeyIdentifier']
+ expected_ski = univ.OctetString(
+ hexValue='e8218b98b8b7d86b5e9ebdc8aeb8c4ecdc05c529')
+
+ self.assertEqual(expected_ski, ski)
+
+ def testOtherRecipientInfoMap(self):
+ substrate = pem.readBase64fromText(self.key_agree_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertIn(ri['ori']['oriType'], rfc5652.otherRecipientInfoMap)
+
+ ori, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc5652.otherRecipientInfoMap[ri['ori']['oriType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ori.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(ori))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8702.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8702.py
new file mode 100644
index 0000000000..d6303cfca5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8702.py
@@ -0,0 +1,140 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6211
+from pyasn1_modules import rfc8702
+
+
+class AlgorithmIdentifierTestCase(unittest.TestCase):
+ pem_text = """\
+MEowCwYJYIZIAWUDBAILMAsGCWCGSAFlAwQCDDAKBggrBgEFBQcGHjAKBggrBgEF
+BQcGHzAKBggrBgEFBQcGIDAKBggrBgEFBQcGIQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid_list = (
+ rfc8702.id_shake128,
+ rfc8702.id_shake256,
+ rfc8702.id_RSASSA_PSS_SHAKE128,
+ rfc8702.id_RSASSA_PSS_SHAKE256,
+ rfc8702.id_ecdsa_with_shake128,
+ rfc8702.id_ecdsa_with_shake256,
+ )
+
+ for algid in asn1Object:
+ self.assertIn(algid['algorithm'], oid_list)
+
+
+class AuthenticatedDataTestCase(unittest.TestCase):
+ auth_message_pem_text = """\
+MIIDqgYLKoZIhvcNAQkQAQKgggOZMIIDlQIBADGCAk8wggJLAgEAMDMwJjEUMBIG
+A1UECgwLZXhhbXBsZS5jb20xDjAMBgNVBAMMBUFsaWNlAgkAg/ULtwvVxA4wDQYJ
+KoZIhvcNAQEBBQAEggIAdZphtN3x8a8kZoAFY15HYRD6JyPBueRUhLbTPoOH3pZ9
+xeDK+zVXGlahl1y1UOe+McEx2oD7cxAkhFuruNZMrCYEBCTZMwVhyEOZlBXdZEs8
+rZUHL3FFE5PJnygsSIO9DMxd1UuTFGTgCm5V5ZLFGmjeEGJRbsfTyo52S7iseJqI
+N3dl743DbApu0+yuUoXKxqKdUFlEVxmhvc+Qbg/zfiwu8PTsYiUQDMBi4cdIlju8
+iLjj389xQHNyndXHWD51is89GG8vpBe+IsN8mnbGtCcpqtJ/c65ErJhHTR7rSJSM
+EqQD0LPOCKIY1q9FaSSJfMXJZk9t/rPxgUEVjfw7hAkKpgOAqoZRN+FpnFyBl0Fn
+nXo8kLp55tfVyNibtUpmdCPkOwt9b3jAtKtnvDQ2YqY1/llfEUnFOVDKwuC6MYwi
+fm92qNlAQA/T0+ocjs6gA9zOLx+wD1zqM13hMD/L+T2OHL/WgvGb62JLrNHXuPWA
+8RShO4kIlPtARKXap2S3+MX/kpSUUrNa65Y5uK1jwFFclczG+CPCIBBn6iJiQT/v
+OX1I97YUP4Qq6OGkjK064Bq6o8+e5+NmIOBcygYRv6wA7vGkmPLSWbnw99qD728b
+Bh84fC3EjItdusqGIwjzL0eSUWXJ5eu0Z3mYhJGN1pe0R/TEB5ibiJsMLpWAr3gw
+FQYJYIZIAWUDBAITMAgEBnB5YXNuMaELBglghkgBZQMEAgswNQYJKoZIhvcNAQcB
+oCgEJldhdHNvbiwgY29tZSBoZXJlIC0gSSB3YW50IHRvIHNlZSB5b3UuooG/MBgG
+CSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE5MDkxOTEz
+NDEwMFowHwYJKoZIhvcNAQkEMRIEENiFx45okcgTCVIBhhgF+ogwLwYLKoZIhvcN
+AQkQAgQxIDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZIhvcNAQcBMDMGCSqGSIb3
+DQEJNDEmMCQwCwYJYIZIAWUDBAILohUGCWCGSAFlAwQCEzAIBAZweWFzbjEEIBxm
+7hx+iivDlWYp8iUmYYbc2xkpBAcTACkWH+KBRZuF
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.auth_message_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_ct_authData, asn1Object['contentType'])
+ ad, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.AuthenticatedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ad.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ad))
+
+ self.assertEqual(
+ rfc8702.id_shake128, ad['digestAlgorithm']['algorithm'])
+
+ ad_mac = ad['macAlgorithm']
+ self.assertEqual(
+ rfc8702.id_KMACWithSHAKE128, ad_mac['algorithm'])
+
+ kmac128_p, rest = der_decoder(
+ ad_mac['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[ad_mac['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(kmac128_p.prettyPrint())
+ self.assertEqual(ad_mac['parameters'], der_encoder(kmac128_p))
+
+ self.assertEqual(
+ univ.OctetString("pyasn1"), kmac128_p['customizationString'])
+
+ found_kmac128_params = False
+ for attr in ad['authAttrs']:
+ if attr['attrType'] == rfc6211.id_aa_cmsAlgorithmProtect:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc6211.CMSAlgorithmProtection())
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ self.assertEqual(
+ rfc8702.id_shake128, av['digestAlgorithm']['algorithm'])
+
+ self.assertEqual(
+ rfc8702.id_KMACWithSHAKE128, av['macAlgorithm']['algorithm'])
+
+ found_kmac128_params = True
+
+ self.assertTrue(found_kmac128_params)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8708.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8708.py
new file mode 100644
index 0000000000..049aead8e8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8708.py
@@ -0,0 +1,127 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8708
+
+
+class HashSigPublicKeyTestCase(unittest.TestCase):
+ public_key_pem_text = """\
+MFAwDQYLKoZIhvcNAQkQAxEDPwAEPAAAAAIAAAAGAAAAA9CPq9SiCR/wqMtO2DTn
+RTQypYiFzZugQxI1Rmv/llHGySEkQE1F+lPPFhwo8a1ajg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.SubjectPublicKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.public_key_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ asn1Object['algorithm']['algorithm'],
+ rfc8708.id_alg_hss_lms_hashsig)
+
+
+class HashSigSignedDataTestCase(unittest.TestCase):
+ signed_data_pem_text = """\
+MIIKfQYJKoZIhvcNAQcCoIIKbjCCCmoCAQMxADAtBgkqhkiG9w0BBwGgIAQe
+VGhpcyBpcyBzb21lIHNhbXBsZSBjb250ZW50Lg0KMYIKMjCCCi4CAQOABkhp
+TW9tITALBglghkgBZQMEAgGgMTAvBgkqhkiG9w0BCQQxIgQgF6DPgklChkQZ
+NfFTIwED50Du7vSlr2SKRDkhJIYWL8gwDQYLKoZIhvcNAQkQAxEEggnQAAAA
+AAAAAAEAAAADkSkd52zm4k0eKptgJmUZvIzoifgU3rD8AO3TEp3jq5v2DGW6
+SNGcy3dtMFWLhS9DutlHr2Iphd6AGwaL7RGA004g3b+BnpErvDqnGIPy5CZl
+u0Q1UlL2U9DEp6LaPrhZSv7LVR5odGEu9h8TvI/HCXs8IeaDW3S+mtaGOr+V
+sRLCwWpCOYQXBfErxfRJqCgJEtJjQ/KPdROB3u4yTRcFh8qyfHBJCN5Cphx7
+g/NkceH36hXJP/L7f2oFFMP9bloY7Tqyt9etZUKdlDiyrxZGCmdcmCJoj9SL
+hrkz+nIisCMKy3MjjZ+pT1XUOuv6QOCJcTezCDucuspflxyqJADXIbXnMn6B
+7H/vYfxuXCBWyRXLulOe00xNY2XaIAdJRGdm1oLuLWsNuv+v9stWiZGQT3j6
+AQlC0CV1PFno/TpAeTFUcKo+fxHOmDOfV7wGExWhOoh1+1c0eQjJujefNJMB
+9lgSFMCYcLcsOXN+xMRqlhmbZsrSmQvL5bsav96ZEHx/g7OkEenXupLA0RsG
+UrggIMHshcISeZAH6sYKPSVNYFx8ub9UVNgUvAxSygUei9UnDvTCUGAhs/1U
+ZZZnzwRwWh7BgyAb35mzl79jCRXgsZ84GFcZi9WtiWsQWoRN8/YM0d13o4NS
+6gtsCqOKdo21mAyQ7D9UnTZBWhlhRX1M9M14hblDGtkI02pvioJiVtKqNPiq
+BzGjV8Bg246A/v1hsRDOID+toMvgnneS8tBc279QGYessKBUWFvKjkzJFrui
+yv1WgFyyc+YxujldI+hqz26uYxgaWv4fCjYcu9X+/rcxwapgvSUgcdaJydnM
+Ht/oqgI1xlT3WPyJNlFa40QcO/BTuC7bzrX6j9H0tlSlbxJfakZwGuNL19o1
+tYSAnBhTkcz4OTVCTJAL1pgqK4zljUO/R+iPgvWavMKIh1HxXJB4EER4FF/L
+hFZMCqPdDN0EN5NOr+9n14ko34m+u/izqAKyAakF3eEi0ZISU52yEpajSvmA
+s8HIEbml3khWvmfH2W+FJ5thiPKCwfI4r68nfEL4Cbd+tDNQQVNieSBggWMB
+uPzWhjho+5IvRtLdXHDCxQ5cLOQsC+bE7q+8d1UG4vAS2RzpEmhc0vKj/R0Y
+ItqA9AVE0DcKkEqQTpvbpkfoeEOdyTKUPCDQIZSOlO7+H3PvMbdmUKrJ9DMJ
+1LmdDJiwHXb9YHXSCEUESszqVNxIcql8LbzwqZaAAct8IvnZOBgf1dOR8SjA
+3RBUwus1ph4uLzVTkWFqj4kpNfGx/nfcAcJMWPwbTKKPQKUwzjfCNOyy4pPV
+0HEDRR5YFF5wWfvFbpNqEIUfxhDKg8F/r5dbjzgnSSnzawQilxJyFp+XlOYW
+pU5gMDuGoISu2yCyLO/yShAHqKcJOofy+NBt+AIk0uZAQlGXDkJTmDXp+VBg
+ZnVOdMGOFFZMWEVR6pxEKiBPH72B+Vd16NAEJwPBslisrgN7f8neuZvYApG0
+jX+Kt7DrG4V4kIvXSB82luObdGQMoHsS9B4775mXkhn/tKpQNfavHXgaDfwu
+OSvUcFRvX6JhpA+7RJjJVwA85zWpYGPUJVHC/1Roc1GIH+4l885dHfLPAVCL
+GkuYhAPiqnOKPgsOfxlFakDLK+8EePw9ixr/0O2fz4sNgNnz0cMjyY7FmTBL
+E7kiyph3jBu2PHPFm7V8xPq0OTzu+wt6Wol/KK8lHEYF4dXmxpk/Rp8mAhTM
+OrK+UxZX/rEhgNMqiV3b15xjXXSzzv2zQ1MlfM6zdX3OeWF0djjj4TOGtd50
+LQjudbyhyZ8yJSWBvPcnrd9kzGf4Vd42x1/8EVsxlh8pLesJGbTTcfNKhSxL
+4oRppxB1iiLLlsmbsWrqSXee9+4GXMmk099+85HPZZWm2MFLDYD5MCOxs9Q3
+EjnamLZ6G2o3k2Iec2K8ExQICfHUFiz3Xqm/opVMO1AF57khY0QX8RsmTW+7
+jL+pOxTzyGj7qo2RolhjpsALRd65GXsPidNnb5jBYZY/xM4KrdBzoI67CX9A
+8gzDf/v+Ob0DF0Y1HWAZ+hGG4JNTrIdwuhWALnhoZNFlaoVOO0k/OsZ3upwD
+bYtbLqv0NPzcN1N/yOQhCRmB1N3pTI6fVQQN7AcIzzUxMVpNgk25yomzgE8N
+6FlBHVIEnKy3fVME5aokhb0TNU7RpGPWDYdSgcEuKltkCVZRObvB/QKu6HLM
+ErOdFtE0xnTqeIADeT84cIupofpnJPsguY8T/KJkzSPJa/MrZM5Pb3aw/cnk
+WkhczBk79aver+0v/4NyF/+n9e8khNPl8jQ0kayxKtIiYfXP2tXBuxLsmx7U
+cdm9qae446tt5uIkbUx4g9a58yCVDpEmZ0DG2/rWs8/lbeCqZliw3Ik7tuSe
+YiMfRtqA86MTf6ugKP6b9hF+zuSYxf0GfbZsvvYGQSgeCU+meiUKF7ckoav1
+LpfVoloCXq18TZ5hrRqnVpx2O6eb6F6Q9A7OJ205FmwCuNz3acJRXkq0IFQf
+fxs6faAXHE7cLaZY16Sal61qovvjsEPURnSVsG2j3GU2ed/gwfTiHmQKwFAF
+4ns49Wpt6TkX0QZ6sBtOHEhhDEjSxtl/CC8MWm9idDElxYCg56yRfi6aTuVG
+Bl8bYn7zvIVwDj+bDfvdzu3UvZUi1IDOylUDH6siBJDa7eEetRgLpTX+QIhQ
+5yqAyA/TQiJKO1PBsYXoVT6RZBQQiJr7+OWtDqAr+K+Bv34Daax5OUEIMavi
+eWzsJz/xLRH0cph04eobCfGRMoaJtYkCy6xORMkxQWtHzV4gAm1bgbQHoOKc
+quyB8cNShGMTLwBYmp+AIadBCfjb+B/igsH1i/PypSxWDji/1osYxM58O6Yb
+NmK1irtuh2PIVb2SUrqEB/2MvSr89bU5gwAAAAbtHOjG5DeRjUP7p72ThWlM
+QRgnA/a39wTe7dk4S6b4vDYslIZGs8mEiAPm2boffTln9wnN3TXcd9YDVvDD
+aAiQC0kctOy7q+wSjnyBpG5ipntXZAoKeL4cv33Z1BmhDNhobRZiGoCBa/21
+vcViEdcspwuB8RF9EpUpp1cM95z1KnAopIU47N07ONPV1i0mJGWVxPtzpSWl
+7SwwUk67HYzILgwZvEl3xomP+V/T0xCwuucWls75PGpVJFa/lunQdeODu3VD
+xnWEK6+/x824hIOzJ2wp1PCjQcLUBuQNRlO35NBFhRrPagoOqccQuAXM7UY1
+7owQc2Lw/I2AwU0KxJxRZwPSbRR1LzTBwNLEJHWBwYws9N5I6c6Um+fIiOnK
+6+SkFeKR/RB9IdwfCEsRWCCCSfKPT3x+kxuns70NgkpFcA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ oid = sd['signerInfos'][0]['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc8708.id_alg_hss_lms_hashsig, oid)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py2/tests/test_rfc8769.py b/contrib/python/pyasn1-modules/py2/tests/test_rfc8769.py
new file mode 100644
index 0000000000..614f326720
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/test_rfc8769.py
@@ -0,0 +1,134 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8769
+
+
+class CBORContentTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEHwYJKoZIhvcNAQcCoIIEEDCCBAwCAQMxDTALBglghkgBZQMEAgIwIQYLKoZIhvcNAQkQ
+ASygEgQQgw9kUnVzc/tADzMzMzMzM6CCAnwwggJ4MIIB/qADAgECAgkApbNUKBuwbjswCgYI
+KoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9u
+MREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1MjkxNDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAx
+CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMH
+RXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUu
+Y29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wm
+JZrBBg+bz7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0ENRYzVGhp
+cyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1Ud
+DgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAWgBTyNds0BNqlVfK9aQOZsGLs
+4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3
++NDCkhfKuMNoo/tHrkmihYgCMQC94MaerDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS
+7oAh0ymT446Uizxx3PUxggFTMIIBTwIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJW
+QTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglg
+hkgBZQMEAgKgezAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQASwwHAYJKoZIhvcNAQkFMQ8X
+DTIwMDExNDIyMjIxNVowPwYJKoZIhvcNAQkEMTIEMADSWdHn4vsesm9XnjJq1WxkoV6EtD+f
+qDAs1JEpZMZ+n8AtUxvC5SFobYpGCl+fsDAKBggqhkjOPQQDAwRmMGQCMGclPwvZLwVJqgON
+mOfnxSF8Cqn3AC+ZFBg7VplspiuhKPNIyu3IofqZjCxw0TzSpAIwEK0JxNlY28KDb5te0iN6
+I2hw+am26W+PRyltVVGUAISHM2kA4tG39HcxEQi+6HJx
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer in layers:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(rfc8769.id_ct_cbor, next_layer)
+
+
+class CBORSequenceContentTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEKQYJKoZIhvcNAQcCoIIEGjCCBBYCAQMxDTALBglghkgBZQMEAgIwKgYLKoZIhvcNAQkQ
+AS2gGwQZgw9kUnVzc/tADzMzMzMzM6MDCSD1YWFhYqCCAnwwggJ4MIIB/qADAgECAgkApbNU
+KBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQH
+DAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1MjkxNDQ1NDFaFw0yMDA1Mjgx
+NDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQ
+MA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNl
+QGV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+
+LuAHtZxes1wmJZrBBg+bz7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8
+Csg2DhQ7qs/wto8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhC
+AQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBv
+c2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAWgBTyNds0BNql
+VfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL3kRhmn+PJTeKaL9sh/oQ
+gHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94MaerDIrQpi0IDh+v0QSAv9rMife8tCl
+afXWtDwwL8MS7oAh0ymT446Uizxx3PUxggFUMIIBUAIBATBMMD8xCzAJBgNVBAYTAlVTMQsw
+CQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1Qo
+G7BuOzALBglghkgBZQMEAgKgezAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAS0wHAYJKoZI
+hvcNAQkFMQ8XDTIwMDExNDIyMjIxNVowPwYJKoZIhvcNAQkEMTIEMOsEu3dGU5j6fKZbsZPL
+LDA8QWxpP36CPDZWr3BVJ3R5mMCKCSmoWtVRnB7XASQcjTAKBggqhkjOPQQDAwRnMGUCMBLW
+PyYw4c11nrH97KHnEmx3BSDX/SfepFNM6PoPR5HCI+OR/v/wlIIByuhyrIl8xAIxAK8dEwOe
+I06um+ATKQzUcbgq0PCKA7T31pAq46fsWc5tA+mMARTrxZjSXsDneeAWpw==
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer in layers:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(rfc8769.id_ct_cborSequence, next_layer)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py2/tests/ya.make b/contrib/python/pyasn1-modules/py2/tests/ya.make
new file mode 100644
index 0000000000..92a81e2ce5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/tests/ya.make
@@ -0,0 +1,136 @@
+PY2TEST()
+
+PEERDIR(
+ contrib/python/pyasn1-modules
+)
+
+TEST_SRCS(
+ __init__.py
+ test_missing.py
+ test_pem.py
+ test_rfc2314.py
+ test_rfc2315.py
+ test_rfc2437.py
+ test_rfc2459.py
+ test_rfc2511.py
+ test_rfc2560.py
+ test_rfc2631.py
+ test_rfc2634.py
+ test_rfc2876.py
+ test_rfc2985.py
+ test_rfc2986.py
+ test_rfc3058.py
+ test_rfc3114.py
+ test_rfc3125.py
+ test_rfc3161.py
+ test_rfc3274.py
+ test_rfc3279.py
+ test_rfc3280.py
+ test_rfc3281.py
+ test_rfc3370.py
+ test_rfc3447.py
+ test_rfc3537.py
+ test_rfc3560.py
+ test_rfc3565.py
+ test_rfc3657.py
+ test_rfc3709.py
+ test_rfc3739.py
+ test_rfc3770.py
+ test_rfc3779.py
+ test_rfc3820.py
+ test_rfc3852.py
+ test_rfc4010.py
+ test_rfc4043.py
+ test_rfc4055.py
+ test_rfc4073.py
+ test_rfc4108.py
+ test_rfc4210.py
+ test_rfc4211.py
+ test_rfc4334.py
+ test_rfc4357.py
+ test_rfc4387.py
+ test_rfc4476.py
+ test_rfc4490.py
+ test_rfc4491.py
+ test_rfc4683.py
+ test_rfc4985.py
+ test_rfc5035.py
+ test_rfc5083.py
+ test_rfc5084.py
+ test_rfc5126.py
+ test_rfc5208.py
+ test_rfc5275.py
+ test_rfc5280.py
+ test_rfc5480.py
+ test_rfc5636.py
+ test_rfc5639.py
+ test_rfc5649.py
+ test_rfc5652.py
+ test_rfc5697.py
+ test_rfc5751.py
+ test_rfc5752.py
+ test_rfc5753.py
+ test_rfc5755.py
+ test_rfc5913.py
+ test_rfc5914.py
+ test_rfc5915.py
+ test_rfc5916.py
+ test_rfc5917.py
+ test_rfc5924.py
+ test_rfc5934.py
+ test_rfc5940.py
+ test_rfc5958.py
+ test_rfc5990.py
+ test_rfc6010.py
+ test_rfc6019.py
+ test_rfc6031.py
+ test_rfc6032.py
+ test_rfc6120.py
+ test_rfc6187.py
+ test_rfc6210.py
+ test_rfc6211.py
+ test_rfc6402.py
+ test_rfc6482.py
+ test_rfc6486.py
+ test_rfc6487.py
+ test_rfc6664.py
+ test_rfc6955.py
+ test_rfc6960.py
+ test_rfc7030.py
+ test_rfc7191.py
+ test_rfc7229.py
+ test_rfc7292.py
+ test_rfc7296.py
+ test_rfc7508.py
+ test_rfc7585.py
+ test_rfc7633.py
+ test_rfc7773.py
+ test_rfc7894.py
+ test_rfc7906.py
+ test_rfc7914.py
+ test_rfc8017.py
+ test_rfc8018.py
+ test_rfc8103.py
+ test_rfc8209.py
+ test_rfc8226.py
+ test_rfc8358.py
+ test_rfc8360.py
+ test_rfc8398.py
+ test_rfc8410.py
+ test_rfc8418.py
+ test_rfc8419.py
+ test_rfc8479.py
+ test_rfc8494.py
+ test_rfc8520.py
+ test_rfc8619.py
+ test_rfc8649.py
+ test_rfc8692.py
+ test_rfc8696.py
+ test_rfc8702.py
+ test_rfc8708.py
+ test_rfc8769.py
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/pyasn1-modules/py2/ya.make b/contrib/python/pyasn1-modules/py2/ya.make
new file mode 100644
index 0000000000..a1bab079af
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py2/ya.make
@@ -0,0 +1,161 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(0.3.0)
+
+LICENSE(BSD-2-Clause)
+
+PEERDIR(
+ contrib/python/pyasn1
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ pyasn1_modules/__init__.py
+ pyasn1_modules/pem.py
+ pyasn1_modules/rfc1155.py
+ pyasn1_modules/rfc1157.py
+ pyasn1_modules/rfc1901.py
+ pyasn1_modules/rfc1902.py
+ pyasn1_modules/rfc1905.py
+ pyasn1_modules/rfc2251.py
+ pyasn1_modules/rfc2314.py
+ pyasn1_modules/rfc2315.py
+ pyasn1_modules/rfc2437.py
+ pyasn1_modules/rfc2459.py
+ pyasn1_modules/rfc2511.py
+ pyasn1_modules/rfc2560.py
+ pyasn1_modules/rfc2631.py
+ pyasn1_modules/rfc2634.py
+ pyasn1_modules/rfc2876.py
+ pyasn1_modules/rfc2985.py
+ pyasn1_modules/rfc2986.py
+ pyasn1_modules/rfc3058.py
+ pyasn1_modules/rfc3114.py
+ pyasn1_modules/rfc3125.py
+ pyasn1_modules/rfc3161.py
+ pyasn1_modules/rfc3274.py
+ pyasn1_modules/rfc3279.py
+ pyasn1_modules/rfc3280.py
+ pyasn1_modules/rfc3281.py
+ pyasn1_modules/rfc3370.py
+ pyasn1_modules/rfc3412.py
+ pyasn1_modules/rfc3414.py
+ pyasn1_modules/rfc3447.py
+ pyasn1_modules/rfc3537.py
+ pyasn1_modules/rfc3560.py
+ pyasn1_modules/rfc3565.py
+ pyasn1_modules/rfc3657.py
+ pyasn1_modules/rfc3709.py
+ pyasn1_modules/rfc3739.py
+ pyasn1_modules/rfc3770.py
+ pyasn1_modules/rfc3779.py
+ pyasn1_modules/rfc3820.py
+ pyasn1_modules/rfc3852.py
+ pyasn1_modules/rfc4010.py
+ pyasn1_modules/rfc4043.py
+ pyasn1_modules/rfc4055.py
+ pyasn1_modules/rfc4073.py
+ pyasn1_modules/rfc4108.py
+ pyasn1_modules/rfc4210.py
+ pyasn1_modules/rfc4211.py
+ pyasn1_modules/rfc4334.py
+ pyasn1_modules/rfc4357.py
+ pyasn1_modules/rfc4387.py
+ pyasn1_modules/rfc4476.py
+ pyasn1_modules/rfc4490.py
+ pyasn1_modules/rfc4491.py
+ pyasn1_modules/rfc4683.py
+ pyasn1_modules/rfc4985.py
+ pyasn1_modules/rfc5035.py
+ pyasn1_modules/rfc5083.py
+ pyasn1_modules/rfc5084.py
+ pyasn1_modules/rfc5126.py
+ pyasn1_modules/rfc5208.py
+ pyasn1_modules/rfc5275.py
+ pyasn1_modules/rfc5280.py
+ pyasn1_modules/rfc5480.py
+ pyasn1_modules/rfc5636.py
+ pyasn1_modules/rfc5639.py
+ pyasn1_modules/rfc5649.py
+ pyasn1_modules/rfc5652.py
+ pyasn1_modules/rfc5697.py
+ pyasn1_modules/rfc5751.py
+ pyasn1_modules/rfc5752.py
+ pyasn1_modules/rfc5753.py
+ pyasn1_modules/rfc5755.py
+ pyasn1_modules/rfc5913.py
+ pyasn1_modules/rfc5914.py
+ pyasn1_modules/rfc5915.py
+ pyasn1_modules/rfc5916.py
+ pyasn1_modules/rfc5917.py
+ pyasn1_modules/rfc5924.py
+ pyasn1_modules/rfc5934.py
+ pyasn1_modules/rfc5940.py
+ pyasn1_modules/rfc5958.py
+ pyasn1_modules/rfc5990.py
+ pyasn1_modules/rfc6010.py
+ pyasn1_modules/rfc6019.py
+ pyasn1_modules/rfc6031.py
+ pyasn1_modules/rfc6032.py
+ pyasn1_modules/rfc6120.py
+ pyasn1_modules/rfc6170.py
+ pyasn1_modules/rfc6187.py
+ pyasn1_modules/rfc6210.py
+ pyasn1_modules/rfc6211.py
+ pyasn1_modules/rfc6402.py
+ pyasn1_modules/rfc6482.py
+ pyasn1_modules/rfc6486.py
+ pyasn1_modules/rfc6487.py
+ pyasn1_modules/rfc6664.py
+ pyasn1_modules/rfc6955.py
+ pyasn1_modules/rfc6960.py
+ pyasn1_modules/rfc7030.py
+ pyasn1_modules/rfc7191.py
+ pyasn1_modules/rfc7229.py
+ pyasn1_modules/rfc7292.py
+ pyasn1_modules/rfc7296.py
+ pyasn1_modules/rfc7508.py
+ pyasn1_modules/rfc7585.py
+ pyasn1_modules/rfc7633.py
+ pyasn1_modules/rfc7773.py
+ pyasn1_modules/rfc7894.py
+ pyasn1_modules/rfc7906.py
+ pyasn1_modules/rfc7914.py
+ pyasn1_modules/rfc8017.py
+ pyasn1_modules/rfc8018.py
+ pyasn1_modules/rfc8103.py
+ pyasn1_modules/rfc8209.py
+ pyasn1_modules/rfc8226.py
+ pyasn1_modules/rfc8358.py
+ pyasn1_modules/rfc8360.py
+ pyasn1_modules/rfc8398.py
+ pyasn1_modules/rfc8410.py
+ pyasn1_modules/rfc8418.py
+ pyasn1_modules/rfc8419.py
+ pyasn1_modules/rfc8479.py
+ pyasn1_modules/rfc8494.py
+ pyasn1_modules/rfc8520.py
+ pyasn1_modules/rfc8619.py
+ pyasn1_modules/rfc8649.py
+ pyasn1_modules/rfc8692.py
+ pyasn1_modules/rfc8696.py
+ pyasn1_modules/rfc8702.py
+ pyasn1_modules/rfc8708.py
+ pyasn1_modules/rfc8769.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/pyasn1-modules/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/pyasn1-modules/py3/.dist-info/METADATA b/contrib/python/pyasn1-modules/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..34a82a084a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/.dist-info/METADATA
@@ -0,0 +1,74 @@
+Metadata-Version: 2.1
+Name: pyasn1-modules
+Version: 0.3.0
+Summary: A collection of ASN.1-based protocols modules
+Home-page: https://github.com/pyasn1/pyasn1-modules
+Author: Ilya Etingof
+Author-email: etingof@gmail.com
+Maintainer: pyasn1 maintenance organization
+Maintainer-email: Christian Heimes <christian@python.org>
+License: BSD
+Project-URL: Source, https://github.com/pyasn1/pyasn1-modules
+Project-URL: Issues, https://github.com/pyasn1/pyasn1-modules/issues
+Project-URL: Changelog, https://github.com/pyasn1/pyasn1-modules/blob/master/CHANGES.txt
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: Intended Audience :: Telecommunications Industry
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Communications
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7
+Description-Content-Type: text/markdown
+License-File: LICENSE.txt
+Requires-Dist: pyasn1 (<0.6.0,>=0.4.6)
+
+
+ASN.1 modules for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1-modules.svg?maxAge=2592000)](https://pypi.org/project/pyasn1-modules)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1-modules.svg)](https://pypi.org/project/pyasn1-modules/)
+[![Build status](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1-modules.svg)](https://codecov.io/github/pyasn1/pyasn1-modules)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1-modules/master/LICENSE.txt)
+
+The `pyasn1-modules` package contains a collection of
+[ASN.1](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items)
+data structures expressed as Python classes based on [pyasn1](https://github.com/pyasn1/pyasn1)
+data model.
+
+If ASN.1 module you need is not present in this collection, try using
+[Asn1ate](https://github.com/kimgr/asn1ate) tool that compiles ASN.1 documents
+into pyasn1 code.
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1-modules.
+
+Feedback
+--------
+
+If something does not work as expected,
+[open an issue](https://github.com/pyasn1/pyasn1-modules/issues) at GitHub
+or post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+
+New modules contributions are welcome via GitHub pull requests.
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1-modules/py3/.dist-info/top_level.txt b/contrib/python/pyasn1-modules/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..9dad8496ee
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyasn1_modules
diff --git a/contrib/python/pyasn1-modules/py3/LICENSE.txt b/contrib/python/pyasn1-modules/py3/LICENSE.txt
new file mode 100644
index 0000000000..598b8430ef
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/LICENSE.txt
@@ -0,0 +1,24 @@
+Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/pyasn1-modules/py3/README.md b/contrib/python/pyasn1-modules/py3/README.md
new file mode 100644
index 0000000000..c70b1e8bc3
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/README.md
@@ -0,0 +1,32 @@
+
+ASN.1 modules for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1-modules.svg?maxAge=2592000)](https://pypi.org/project/pyasn1-modules)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1-modules.svg)](https://pypi.org/project/pyasn1-modules/)
+[![Build status](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1-modules/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1-modules.svg)](https://codecov.io/github/pyasn1/pyasn1-modules)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1-modules/master/LICENSE.txt)
+
+The `pyasn1-modules` package contains a collection of
+[ASN.1](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items)
+data structures expressed as Python classes based on [pyasn1](https://github.com/pyasn1/pyasn1)
+data model.
+
+If ASN.1 module you need is not present in this collection, try using
+[Asn1ate](https://github.com/kimgr/asn1ate) tool that compiles ASN.1 documents
+into pyasn1 code.
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1-modules.
+
+Feedback
+--------
+
+If something does not work as expected,
+[open an issue](https://github.com/pyasn1/pyasn1-modules/issues) at GitHub
+or post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+
+New modules contributions are welcome via GitHub pull requests.
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/__init__.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/__init__.py
new file mode 100644
index 0000000000..95a220efd2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/__init__.py
@@ -0,0 +1,2 @@
+# http://www.python.org/dev/peps/pep-0396/
+__version__ = '0.3.0'
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/pem.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/pem.py
new file mode 100644
index 0000000000..f7c80a9b9d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/pem.py
@@ -0,0 +1,65 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import base64
+import sys
+
+stSpam, stHam, stDump = 0, 1, 2
+
+
+# The markers parameters is in form ('start1', 'stop1'), ('start2', 'stop2')...
+# Return is (marker-index, substrate)
+def readPemBlocksFromFile(fileObj, *markers):
+ startMarkers = dict(map(lambda x: (x[1], x[0]),
+ enumerate(map(lambda y: y[0], markers))))
+ stopMarkers = dict(map(lambda x: (x[1], x[0]),
+ enumerate(map(lambda y: y[1], markers))))
+ idx = -1
+ substrate = ''
+ certLines = []
+ state = stSpam
+ while True:
+ certLine = fileObj.readline()
+ if not certLine:
+ break
+ certLine = certLine.strip()
+ if state == stSpam:
+ if certLine in startMarkers:
+ certLines = []
+ idx = startMarkers[certLine]
+ state = stHam
+ continue
+ if state == stHam:
+ if certLine in stopMarkers and stopMarkers[certLine] == idx:
+ state = stDump
+ else:
+ certLines.append(certLine)
+ if state == stDump:
+ if sys.version_info[0] <= 2:
+ substrate = ''.join([base64.b64decode(x) for x in certLines])
+ else:
+ substrate = ''.encode().join([base64.b64decode(x.encode()) for x in certLines])
+ break
+ return idx, substrate
+
+
+# Backward compatibility routine
+def readPemFromFile(fileObj,
+ startMarker='-----BEGIN CERTIFICATE-----',
+ endMarker='-----END CERTIFICATE-----'):
+ idx, substrate = readPemBlocksFromFile(fileObj, (startMarker, endMarker))
+ return substrate
+
+
+def readBase64fromText(text):
+ if sys.version_info[0] <= 2:
+ return base64.b64decode(text)
+ else:
+ return base64.b64decode(text.encode())
+
+
+def readBase64FromFile(fileObj):
+ return readBase64fromText(fileObj.read())
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1155.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1155.py
new file mode 100644
index 0000000000..18702345d1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1155.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv1 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1155.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class ObjectName(univ.ObjectIdentifier):
+ pass
+
+
+class SimpleSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('number', univ.Integer()),
+ namedtype.NamedType('string', univ.OctetString()),
+ namedtype.NamedType('object', univ.ObjectIdentifier()),
+ namedtype.NamedType('empty', univ.Null())
+ )
+
+
+class IpAddress(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
+ 4, 4
+ )
+
+
+class NetworkAddress(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('internet', IpAddress())
+ )
+
+
+class Counter(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 1)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Gauge(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class TimeTicks(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 3)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Opaque(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 4)
+ )
+
+
+class ApplicationSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('address', NetworkAddress()),
+ namedtype.NamedType('counter', Counter()),
+ namedtype.NamedType('gauge', Gauge()),
+ namedtype.NamedType('ticks', TimeTicks()),
+ namedtype.NamedType('arbitrary', Opaque())
+ )
+
+
+class ObjectSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', SimpleSyntax()),
+ namedtype.NamedType('application-wide', ApplicationSyntax())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1157.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1157.py
new file mode 100644
index 0000000000..df49e482db
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1157.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv1 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1157.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1155
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('version-1', 0)
+ )
+ defaultValue = 0
+
+
+class Community(univ.OctetString):
+ pass
+
+
+class RequestID(univ.Integer):
+ pass
+
+
+class ErrorStatus(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('noError', 0),
+ ('tooBig', 1),
+ ('noSuchName', 2),
+ ('badValue', 3),
+ ('readOnly', 4),
+ ('genErr', 5)
+ )
+
+
+class ErrorIndex(univ.Integer):
+ pass
+
+
+class VarBind(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', rfc1155.ObjectName()),
+ namedtype.NamedType('value', rfc1155.ObjectSyntax())
+ )
+
+
+class VarBindList(univ.SequenceOf):
+ componentType = VarBind()
+
+
+class _RequestBase(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', RequestID()),
+ namedtype.NamedType('error-status', ErrorStatus()),
+ namedtype.NamedType('error-index', ErrorIndex()),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class GetRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+
+
+class GetNextRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+
+
+class GetResponsePDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+
+
+class SetRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+
+
+class TrapPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
+ namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
+ namedtype.NamedType('generic-trap', univ.Integer().clone(
+ namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3),
+ ('authenticationFailure', 4), ('egpNeighborLoss', 5),
+ ('enterpriseSpecific', 6)))),
+ namedtype.NamedType('specific-trap', univ.Integer()),
+ namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class Pdus(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('get-request', GetRequestPDU()),
+ namedtype.NamedType('get-next-request', GetNextRequestPDU()),
+ namedtype.NamedType('get-response', GetResponsePDU()),
+ namedtype.NamedType('set-request', SetRequestPDU()),
+ namedtype.NamedType('trap', TrapPDU())
+ )
+
+
+class Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('community', Community()),
+ namedtype.NamedType('data', Pdus())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1901.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1901.py
new file mode 100644
index 0000000000..658dcb9381
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1901.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1901.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+
+class Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('version-2c', 1)))),
+ namedtype.NamedType('community', univ.OctetString()),
+ namedtype.NamedType('data', univ.Any())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1902.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1902.py
new file mode 100644
index 0000000000..063998a948
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1902.py
@@ -0,0 +1,129 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1902.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class Integer(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ -2147483648, 2147483647
+ )
+
+
+class Integer32(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ -2147483648, 2147483647
+ )
+
+
+class OctetString(univ.OctetString):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
+ 0, 65535
+ )
+
+
+class IpAddress(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x00)
+ )
+ subtypeSpec = univ.OctetString.subtypeSpec + constraint.ValueSizeConstraint(
+ 4, 4
+ )
+
+
+class Counter32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x01)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Gauge32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Unsigned32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class TimeTicks(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x03)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Opaque(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x04)
+ )
+
+
+class Counter64(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x06)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 18446744073709551615
+ )
+
+
+class Bits(univ.OctetString):
+ pass
+
+
+class ObjectName(univ.ObjectIdentifier):
+ pass
+
+
+class SimpleSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('integer-value', Integer()),
+ namedtype.NamedType('string-value', OctetString()),
+ namedtype.NamedType('objectID-value', univ.ObjectIdentifier())
+ )
+
+
+class ApplicationSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ipAddress-value', IpAddress()),
+ namedtype.NamedType('counter-value', Counter32()),
+ namedtype.NamedType('timeticks-value', TimeTicks()),
+ namedtype.NamedType('arbitrary-value', Opaque()),
+ namedtype.NamedType('big-counter-value', Counter64()),
+ # This conflicts with Counter32
+ # namedtype.NamedType('unsigned-integer-value', Unsigned32()),
+ namedtype.NamedType('gauge32-value', Gauge32())
+ ) # BITS misplaced?
+
+
+class ObjectSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', SimpleSyntax()),
+ namedtype.NamedType('application-wide', ApplicationSyntax())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1905.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1905.py
new file mode 100644
index 0000000000..435427b2bc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc1905.py
@@ -0,0 +1,135 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c PDU syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1905.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1902
+
+max_bindings = rfc1902.Integer(2147483647)
+
+
+class _BindValue(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('value', rfc1902.ObjectSyntax()),
+ namedtype.NamedType('unSpecified', univ.Null()),
+ namedtype.NamedType('noSuchObject',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('noSuchInstance',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('endOfMibView',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class VarBind(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', rfc1902.ObjectName()),
+ namedtype.NamedType('', _BindValue())
+ )
+
+
+class VarBindList(univ.SequenceOf):
+ componentType = VarBind()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(
+ 0, max_bindings
+ )
+
+
+class PDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', rfc1902.Integer32()),
+ namedtype.NamedType('error-status', univ.Integer(
+ namedValues=namedval.NamedValues(('noError', 0), ('tooBig', 1), ('noSuchName', 2), ('badValue', 3),
+ ('readOnly', 4), ('genErr', 5), ('noAccess', 6), ('wrongType', 7),
+ ('wrongLength', 8), ('wrongEncoding', 9), ('wrongValue', 10),
+ ('noCreation', 11), ('inconsistentValue', 12), ('resourceUnavailable', 13),
+ ('commitFailed', 14), ('undoFailed', 15), ('authorizationError', 16),
+ ('notWritable', 17), ('inconsistentName', 18)))),
+ namedtype.NamedType('error-index',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class BulkPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', rfc1902.Integer32()),
+ namedtype.NamedType('non-repeaters',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('max-repetitions',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class GetRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+
+
+class GetNextRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+
+
+class ResponsePDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+
+
+class SetRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+
+
+class GetBulkRequestPDU(BulkPDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
+ )
+
+
+class InformRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
+ )
+
+
+class SNMPv2TrapPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
+ )
+
+
+class ReportPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
+ )
+
+
+class PDUs(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('get-request', GetRequestPDU()),
+ namedtype.NamedType('get-next-request', GetNextRequestPDU()),
+ namedtype.NamedType('get-bulk-request', GetBulkRequestPDU()),
+ namedtype.NamedType('response', ResponsePDU()),
+ namedtype.NamedType('set-request', SetRequestPDU()),
+ namedtype.NamedType('inform-request', InformRequestPDU()),
+ namedtype.NamedType('snmpV2-trap', SNMPv2TrapPDU()),
+ namedtype.NamedType('report', ReportPDU())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2251.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2251.py
new file mode 100644
index 0000000000..094922cad0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2251.py
@@ -0,0 +1,563 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# LDAP message syntax
+#
+# ASN.1 source from:
+# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/ldap.asn
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+maxInt = univ.Integer(2147483647)
+
+
+class LDAPString(univ.OctetString):
+ pass
+
+
+class LDAPOID(univ.OctetString):
+ pass
+
+
+class LDAPDN(LDAPString):
+ pass
+
+
+class RelativeLDAPDN(LDAPString):
+ pass
+
+
+class AttributeType(LDAPString):
+ pass
+
+
+class AttributeDescription(LDAPString):
+ pass
+
+
+class AttributeDescriptionList(univ.SequenceOf):
+ componentType = AttributeDescription()
+
+
+class AttributeValue(univ.OctetString):
+ pass
+
+
+class AssertionValue(univ.OctetString):
+ pass
+
+
+class AttributeValueAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeDesc', AttributeDescription()),
+ namedtype.NamedType('assertionValue', AssertionValue())
+ )
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class MatchingRuleId(LDAPString):
+ pass
+
+
+class Control(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlType', LDAPOID()),
+ namedtype.DefaultedNamedType('criticality', univ.Boolean('False')),
+ namedtype.OptionalNamedType('controlValue', univ.OctetString())
+ )
+
+
+class Controls(univ.SequenceOf):
+ componentType = Control()
+
+
+class LDAPURL(LDAPString):
+ pass
+
+
+class Referral(univ.SequenceOf):
+ componentType = LDAPURL()
+
+
+class SaslCredentials(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mechanism', LDAPString()),
+ namedtype.OptionalNamedType('credentials', univ.OctetString())
+ )
+
+
+class AuthenticationChoice(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('reserved-1', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('reserved-2', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('sasl',
+ SaslCredentials().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class BindRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 127))),
+ namedtype.NamedType('name', LDAPDN()),
+ namedtype.NamedType('authentication', AuthenticationChoice())
+ )
+
+
+class PartialAttributeList(univ.SequenceOf):
+ componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+ )
+
+
+class SearchResultEntry(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 4)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('objectName', LDAPDN()),
+ namedtype.NamedType('attributes', PartialAttributeList())
+ )
+
+
+class MatchingRuleAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('matchingRule', MatchingRuleId().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('type', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('matchValue',
+ AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('dnAttributes', univ.Boolean('False').subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class SubstringFilter(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('substrings',
+ univ.SequenceOf(
+ componentType=univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'initial', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType(
+ 'any', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
+ ),
+ namedtype.NamedType(
+ 'final', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))
+ )
+ )
+ )
+ )
+ )
+ )
+
+
+# Ugly hack to handle recursive Filter reference (up to 3-levels deep).
+
+class Filter3(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class Filter2(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('and', univ.SetOf(componentType=Filter3()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('or', univ.SetOf(componentType=Filter3()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('not',
+ Filter3().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class Filter(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('and', univ.SetOf(componentType=Filter2()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('or', univ.SetOf(componentType=Filter2()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('not',
+ Filter2().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+# End of Filter hack
+
+class SearchRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 3)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('baseObject', LDAPDN()),
+ namedtype.NamedType('scope', univ.Enumerated(
+ namedValues=namedval.NamedValues(('baseObject', 0), ('singleLevel', 1), ('wholeSubtree', 2)))),
+ namedtype.NamedType('derefAliases', univ.Enumerated(
+ namedValues=namedval.NamedValues(('neverDerefAliases', 0), ('derefInSearching', 1),
+ ('derefFindingBaseObj', 2), ('derefAlways', 3)))),
+ namedtype.NamedType('sizeLimit',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
+ namedtype.NamedType('timeLimit',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
+ namedtype.NamedType('typesOnly', univ.Boolean()),
+ namedtype.NamedType('filter', Filter()),
+ namedtype.NamedType('attributes', AttributeDescriptionList())
+ )
+
+
+class UnbindRequest(univ.Null):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ )
+
+
+class BindResponse(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('serverSaslCreds', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)))
+ )
+
+
+class LDAPResult(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class SearchResultReference(univ.SequenceOf):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 19)
+ )
+ componentType = LDAPURL()
+
+
+class SearchResultDone(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5)
+ )
+
+
+class AttributeTypeAndValues(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class ModifyRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 6)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('object', LDAPDN()),
+ namedtype.NamedType('modification',
+ univ.SequenceOf(
+ componentType=univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'operation', univ.Enumerated(namedValues=namedval.NamedValues(('add', 0), ('delete', 1), ('replace', 2)))
+ ),
+ namedtype.NamedType('modification', AttributeTypeAndValues())))
+ )
+ )
+ )
+
+
+class ModifyResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 7)
+ )
+
+
+class AttributeList(univ.SequenceOf):
+ componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+ )
+
+
+class AddRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 8)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('attributes', AttributeList())
+ )
+
+
+class AddResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 9)
+ )
+
+
+class DelRequest(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 10)
+ )
+
+
+class DelResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 11)
+ )
+
+
+class ModifyDNRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 12)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('newrdn', RelativeLDAPDN()),
+ namedtype.NamedType('deleteoldrdn', univ.Boolean()),
+ namedtype.OptionalNamedType('newSuperior',
+ LDAPDN().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+
+ )
+
+
+class ModifyDNResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 13)
+ )
+
+
+class CompareRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 14)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('ava', AttributeValueAssertion())
+ )
+
+
+class CompareResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 15)
+ )
+
+
+class AbandonRequest(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 16)
+ )
+
+
+class ExtendedRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 23)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('requestName',
+ LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestValue', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtendedResponse(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 24)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+
+ namedtype.OptionalNamedType('responseName', LDAPOID().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))),
+ namedtype.OptionalNamedType('response', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11)))
+ )
+
+
+class MessageID(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, maxInt
+ )
+
+
+class LDAPMessage(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('messageID', MessageID()),
+ namedtype.NamedType(
+ 'protocolOp', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('bindRequest', BindRequest()),
+ namedtype.NamedType('bindResponse', BindResponse()),
+ namedtype.NamedType('unbindRequest', UnbindRequest()),
+ namedtype.NamedType('searchRequest', SearchRequest()),
+ namedtype.NamedType('searchResEntry', SearchResultEntry()),
+ namedtype.NamedType('searchResDone', SearchResultDone()),
+ namedtype.NamedType('searchResRef', SearchResultReference()),
+ namedtype.NamedType('modifyRequest', ModifyRequest()),
+ namedtype.NamedType('modifyResponse', ModifyResponse()),
+ namedtype.NamedType('addRequest', AddRequest()),
+ namedtype.NamedType('addResponse', AddResponse()),
+ namedtype.NamedType('delRequest', DelRequest()),
+ namedtype.NamedType('delResponse', DelResponse()),
+ namedtype.NamedType('modDNRequest', ModifyDNRequest()),
+ namedtype.NamedType('modDNResponse', ModifyDNResponse()),
+ namedtype.NamedType('compareRequest', CompareRequest()),
+ namedtype.NamedType('compareResponse', CompareResponse()),
+ namedtype.NamedType('abandonRequest', AbandonRequest()),
+ namedtype.NamedType('extendedReq', ExtendedRequest()),
+ namedtype.NamedType('extendedResp', ExtendedResponse())
+ )
+ )
+ ),
+ namedtype.OptionalNamedType('controls', Controls().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2314.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2314.py
new file mode 100644
index 0000000000..b0edfe0917
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2314.py
@@ -0,0 +1,48 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#10 syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc2314
+#
+# Sample captures could be obtained with "openssl req" command
+#
+from pyasn1_modules.rfc2459 import *
+
+
+class Attributes(univ.SetOf):
+ componentType = Attribute()
+
+
+class Version(univ.Integer):
+ pass
+
+
+class CertificationRequestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.NamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class CertificationRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2315.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2315.py
new file mode 100644
index 0000000000..1069fc27dd
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2315.py
@@ -0,0 +1,294 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#7 message syntax
+#
+# ASN.1 source from:
+# https://opensource.apple.com/source/Security/Security-55179.1/libsecurity_asn1/asn1/pkcs7.asn.auto.html
+#
+# Sample captures from:
+# openssl crl2pkcs7 -nocrl -certfile cert1.cer -out outfile.p7b
+#
+from pyasn1_modules.rfc2459 import *
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class AttributeValueAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeType', AttributeType()),
+ namedtype.NamedType('attributeValue', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+
+pkcs_7 = univ.ObjectIdentifier('1.2.840.113549.1.7')
+data = univ.ObjectIdentifier('1.2.840.113549.1.7.1')
+signedData = univ.ObjectIdentifier('1.2.840.113549.1.7.2')
+envelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.3')
+signedAndEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.4')
+digestedData = univ.ObjectIdentifier('1.2.840.113549.1.7.5')
+encryptedData = univ.ObjectIdentifier('1.2.840.113549.1.7.6')
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+contentTypeMap = {}
+
+
+class EncryptedContentInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType(
+ 'encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ ),
+ openType=opentype.OpenType('contentType', contentTypeMap)
+ )
+ )
+
+
+class Version(univ.Integer): # overrides x509.Version
+ pass
+
+
+class EncryptedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
+ )
+
+
+class DigestAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ componentType = DigestAlgorithmIdentifier()
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class ContentInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.OptionalNamedType(
+ 'content',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)),
+ openType=opentype.OpenType('contentType', contentTypeMap)
+ )
+ )
+
+
+class DigestedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('contentInfo', ContentInfo()),
+ namedtype.NamedType('digest', Digest())
+ )
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class RecipientInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+ )
+
+
+class RecipientInfos(univ.SetOf):
+ componentType = RecipientInfo()
+
+
+class Attributes(univ.SetOf):
+ componentType = Attribute()
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('attributes', Attributes())
+ )
+
+
+class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+ )
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class ExtendedCertificatesAndCertificates(univ.SetOf):
+ componentType = ExtendedCertificateOrCertificate()
+
+
+class SerialNumber(univ.Integer):
+ pass
+
+
+class CRLEntry(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', SerialNumber()),
+ namedtype.NamedType('revocationDate', useful.UTCTime())
+ )
+
+
+class TBSCertificateRevocationList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('lastUpdate', useful.UTCTime()),
+ namedtype.NamedType('nextUpdate', useful.UTCTime()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry()))
+ )
+
+
+class CertificateRevocationList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificateRevocationList', TBSCertificateRevocationList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+
+class CertificateRevocationLists(univ.SetOf):
+ componentType = CertificateRevocationList()
+
+
+class DigestEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedDigest(univ.OctetString):
+ pass
+
+
+class SignerInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('authenticatedAttributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('digestEncryptionAlgorithm', DigestEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedDigest', EncryptedDigest()),
+ namedtype.OptionalNamedType('unauthenticatedAttributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class SignerInfos(univ.SetOf):
+ componentType = SignerInfo()
+
+
+class SignedAndEnvelopedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+ )
+
+
+class EnvelopedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
+ )
+
+
+class DigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('digest', Digest())
+ )
+
+
+class SignedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.OptionalNamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('contentInfo', ContentInfo()),
+ namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('signerInfos', SignerInfos())
+ )
+
+
+class Data(univ.OctetString):
+ pass
+
+_contentTypeMapUpdate = {
+ data: Data(),
+ signedData: SignedData(),
+ envelopedData: EnvelopedData(),
+ signedAndEnvelopedData: SignedAndEnvelopedData(),
+ digestedData: DigestedData(),
+ encryptedData: EncryptedData()
+}
+
+contentTypeMap.update(_contentTypeMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2437.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2437.py
new file mode 100644
index 0000000000..88641cf07d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2437.py
@@ -0,0 +1,69 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#1 syntax
+#
+# ASN.1 source from:
+# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2.asn
+#
+# Sample captures could be obtained with "openssl genrsa" command
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules.rfc2459 import AlgorithmIdentifier
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+md4WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.3')
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+rsaOAEPEncryptionSET = univ.ObjectIdentifier('1.2.840.113549.1.1.6')
+id_RSAES_OAEP = univ.ObjectIdentifier('1.2.840.113549.1.1.7')
+id_mgf1 = univ.ObjectIdentifier('1.2.840.113549.1.1.8')
+id_pSpecified = univ.ObjectIdentifier('1.2.840.113549.1.1.9')
+id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26')
+
+MAX = float('inf')
+
+
+class Version(univ.Integer):
+ pass
+
+
+class RSAPrivateKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer())
+ )
+
+
+class RSAPublicKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+ )
+
+
+# XXX defaults not set
+class RSAES_OAEP_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('maskGenFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('pSourceFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2459.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2459.py
new file mode 100644
index 0000000000..57f783e451
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2459.py
@@ -0,0 +1,1339 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Updated by Russ Housley to resolve the TODO regarding the Certificate
+# Policies Certificate Extension.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 message syntax
+#
+# ASN.1 source from:
+# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn
+# http://www.ietf.org/rfc/rfc2459.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+#
+# PKIX1Explicit88
+#
+
+# Upper Bounds
+ub_name = univ.Integer(32768)
+ub_common_name = univ.Integer(64)
+ub_locality_name = univ.Integer(128)
+ub_state_name = univ.Integer(128)
+ub_organization_name = univ.Integer(64)
+ub_organizational_unit_name = univ.Integer(64)
+ub_title = univ.Integer(64)
+ub_match = univ.Integer(128)
+ub_emailaddress_length = univ.Integer(128)
+ub_common_name_length = univ.Integer(64)
+ub_country_name_alpha_length = univ.Integer(2)
+ub_country_name_numeric_length = univ.Integer(3)
+ub_domain_defined_attributes = univ.Integer(4)
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+ub_domain_name_length = univ.Integer(16)
+ub_extension_attributes = univ.Integer(256)
+ub_e163_4_number_length = univ.Integer(15)
+ub_e163_4_sub_address_length = univ.Integer(40)
+ub_generation_qualifier_length = univ.Integer(3)
+ub_given_name_length = univ.Integer(16)
+ub_initials_length = univ.Integer(5)
+ub_integer_options = univ.Integer(256)
+ub_numeric_user_id_length = univ.Integer(32)
+ub_organization_name_length = univ.Integer(64)
+ub_organizational_unit_name_length = univ.Integer(32)
+ub_organizational_units = univ.Integer(4)
+ub_pds_name_length = univ.Integer(16)
+ub_pds_parameter_length = univ.Integer(30)
+ub_pds_physical_address_lines = univ.Integer(6)
+ub_postal_code_length = univ.Integer(16)
+ub_surname_length = univ.Integer(40)
+ub_terminal_id_length = univ.Integer(24)
+ub_unformatted_address_length = univ.Integer(180)
+ub_x121_address_length = univ.Integer(16)
+
+
+class UniversalString(char.UniversalString):
+ pass
+
+
+class BMPString(char.BMPString):
+ pass
+
+
+class UTF8String(char.UTF8String):
+ pass
+
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2')
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48')
+
+id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1')
+id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2')
+
+id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1')
+id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
+
+
+
+
+id_at = univ.ObjectIdentifier('2.5.4')
+id_at_name = univ.ObjectIdentifier('2.5.4.41')
+# preserve misspelled variable for compatibility
+id_at_sutname = id_at_surname = univ.ObjectIdentifier('2.5.4.4')
+id_at_givenName = univ.ObjectIdentifier('2.5.4.42')
+id_at_initials = univ.ObjectIdentifier('2.5.4.43')
+id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44')
+
+
+class X520name(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+ )
+
+
+id_at_commonName = univ.ObjectIdentifier('2.5.4.3')
+
+
+class X520CommonName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+ )
+
+
+id_at_localityName = univ.ObjectIdentifier('2.5.4.7')
+
+
+class X520LocalityName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+ )
+
+
+id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8')
+
+
+class X520StateOrProvinceName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+ )
+
+
+id_at_organizationName = univ.ObjectIdentifier('2.5.4.10')
+
+
+class X520OrganizationName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+ )
+
+
+id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11')
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+ )
+
+
+id_at_title = univ.ObjectIdentifier('2.5.4.12')
+
+
+class X520Title(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+ )
+
+
+id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46')
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+id_at_countryName = univ.ObjectIdentifier('2.5.4.6')
+
+
+class X520countryName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2)
+
+
+pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9')
+
+emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1')
+
+
+class Pkcs9email(char.IA5String):
+ subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+
+# ----
+
+class DSAPrivateKey(univ.Sequence):
+ """PKIX compliant DSA private key structure"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))),
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('public', univ.Integer()),
+ namedtype.NamedType('private', univ.Integer())
+ )
+
+
+# ----
+
+
+class DirectoryString(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ # hm, this should not be here!? XXX
+ )
+
+
+# certificate and CRL specific structures begin here
+
+class AlgorithmIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any())
+ )
+
+
+
+# Algorithm OIDs and parameter structures
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
+
+
+class Dss_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
+
+
+class ValidationParms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seed', univ.BitString()),
+ namedtype.NamedType('pgenCounter', univ.Integer())
+ )
+
+
+class DomainParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('j', univ.Integer()),
+ namedtype.OptionalNamedType('validationParms', ValidationParms())
+ )
+
+
+id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
+
+
+class Dss_Parms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer())
+ )
+
+
+# x400 address syntax starts here
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString())
+ )
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ componentType = TeletexDomainDefinedAttribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+terminal_type = univ.Integer(23)
+
+
+class TerminalType(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options)
+ namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletelex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+ )
+
+
+class PresentationAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3),
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ )
+
+
+extended_network_address = univ.Integer(22)
+
+
+class E163_4_address(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('e163-4-address', E163_4_address()),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class PDSParameter(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+ )
+
+
+local_postal_attributes = univ.Integer(21)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+unique_postal_name = univ.Integer(20)
+
+poste_restante_address = univ.Integer(19)
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+post_office_box_address = univ.Integer(18)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+street_address = univ.Integer(17)
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+class UnformattedPostalAddress(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+ )
+
+
+physical_delivery_office_name = univ.Integer(10)
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+physical_delivery_office_number = univ.Integer(11)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+extension_OR_address_components = univ.Integer(12)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+physical_delivery_personal_name = univ.Integer(13)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+physical_delivery_organization_name = univ.Integer(14)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+unformatted_postal_address = univ.Integer(16)
+
+postal_code = univ.Integer(9)
+
+
+class PostalCode(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+ )
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
+ ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+ )
+
+
+class PDSName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+physical_delivery_country_name = univ.Integer(8)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+pds_name = univ.Integer(7)
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ componentType = TeletexOrganizationalUnitName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+teletex_personal_name = univ.Integer(4)
+
+
+class TeletexPersonalName(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+teletex_organization_name = univ.Integer(3)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+teletex_common_name = univ.Integer(2)
+
+
+class TeletexCommonName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class CommonName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+common_name = univ.Integer(1)
+
+
+class ExtensionAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtensionAttributes(univ.SetOf):
+ componentType = ExtensionAttribute()
+ sizeSpec = univ.SetOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+ )
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ componentType = BuiltInDomainDefinedAttribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ componentType = OrganizationalUnitName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class PersonalName(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class NumericUserIdentifier(char.NumericString):
+ subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class OrganizationName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+class PrivateDomainName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+ )
+
+
+class TerminalIdentifier(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+
+class X121Address(char.NumericString):
+ subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+class AdministrationDomainName(univ.Choice):
+ tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+ )
+
+
+class CountryName(univ.Choice):
+ tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
+ ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+ )
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+ )
+
+
+class ORAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+ )
+
+
+#
+# PKIX1Implicit88
+#
+
+id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24')
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1')
+id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2')
+id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3')
+
+holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2')
+
+id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23')
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21')
+
+
+class CRLReason(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8)
+ )
+
+
+id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20')
+
+
+class CRLNumber(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1')
+id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2')
+id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3')
+id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4')
+id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5')
+id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6')
+id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7')
+id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8')
+id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
+id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37')
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ componentType = KeyPurposeId()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class ReasonFlags(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6)
+ )
+
+
+class SkipCerts(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
+
+
+id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36')
+
+
+class PolicyConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19')
+
+
+class BasicConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean(False)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+ )
+
+
+id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9')
+
+
+class EDIPartyName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('partyName',
+ DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+
+id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27')
+
+
+
+class BaseDistance(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX)
+
+
+id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31')
+
+
+id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28')
+
+
+
+
+id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30')
+
+
+class DisplayText(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+ )
+
+
+class NoticeReference(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+ )
+
+
+class UserNotice(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+ )
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice)
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType('qualifier', univ.Any())
+ )
+
+
+id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32')
+
+
+class PolicyInformation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class CertificatePolicies(univ.SequenceOf):
+ componentType = PolicyInformation()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33')
+
+
+class PolicyMapping(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+ )
+
+
+class PolicyMappings(univ.SequenceOf):
+ componentType = PolicyMapping()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16')
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15')
+
+
+class KeyUsage(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+ )
+
+
+id_ce = univ.ObjectIdentifier('2.5.29')
+
+id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35')
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14')
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29')
+
+
+id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17')
+
+
+id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18')
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+certificateAttributesMap = {}
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('value', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ componentType = Attribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ componentType = AttributeTypeAndValue()
+
+
+class RDNSequence(univ.SequenceOf):
+ componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('', RDNSequence())
+ )
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+class AnotherName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType('value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class GeneralName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+ )
+
+
+class GeneralNames(univ.SequenceOf):
+ componentType = GeneralName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class AccessDescription(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+ )
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ componentType = AccessDescription()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class DistributionPointName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class DistributionPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class CRLDistPointsSyntax(univ.SequenceOf):
+ componentType = DistributionPoint()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class GeneralSubtree(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance(0).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ componentType = GeneralSubtree()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+certificateExtensionsMap = {}
+
+
+class Extension(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
+ namedtype.NamedType('extnValue', univ.OctetString(),
+ openType=opentype.OpenType('extnID', certificateExtensionsMap))
+ )
+
+
+class Extensions(univ.SequenceOf):
+ componentType = Extension()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+ )
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Time(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+ )
+
+
+class Validity(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('v1', 0), ('v2', 1), ('v3', 2)
+ )
+
+
+class TBSCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class Certificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+ )
+
+# CRL structures
+
+class RevokedCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ )
+
+
+class TBSCertList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())),
+ namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class CertificateList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+# map of AttributeType -> AttributeValue
+
+_certificateAttributesMapUpdate = {
+ id_at_name: X520name(),
+ id_at_surname: X520name(),
+ id_at_givenName: X520name(),
+ id_at_initials: X520name(),
+ id_at_generationQualifier: X520name(),
+ id_at_commonName: X520CommonName(),
+ id_at_localityName: X520LocalityName(),
+ id_at_stateOrProvinceName: X520StateOrProvinceName(),
+ id_at_organizationName: X520OrganizationName(),
+ id_at_organizationalUnitName: X520OrganizationalUnitName(),
+ id_at_title: X520Title(),
+ id_at_dnQualifier: X520dnQualifier(),
+ id_at_countryName: X520countryName(),
+ emailAddress: Pkcs9email(),
+}
+
+certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# map of Certificate Extension OIDs to Extensions
+
+_certificateExtensionsMapUpdate = {
+ id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
+ id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
+ id_ce_keyUsage: KeyUsage(),
+ id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
+ id_ce_certificatePolicies: CertificatePolicies(),
+ id_ce_policyMappings: PolicyMappings(),
+ id_ce_subjectAltName: SubjectAltName(),
+ id_ce_issuerAltName: IssuerAltName(),
+ id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
+ id_ce_basicConstraints: BasicConstraints(),
+ id_ce_nameConstraints: NameConstraints(),
+ id_ce_policyConstraints: PolicyConstraints(),
+ id_ce_extKeyUsage: ExtKeyUsageSyntax(),
+ id_ce_cRLDistributionPoints: CRLDistPointsSyntax(),
+ id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
+ id_ce_cRLNumber: univ.Integer(),
+ id_ce_deltaCRLIndicator: BaseCRLNumber(),
+ id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
+ id_ce_cRLReasons: CRLReason(),
+ id_ce_holdInstructionCode: univ.ObjectIdentifier(),
+ id_ce_invalidityDate: useful.GeneralizedTime(),
+ id_ce_certificateIssuer: GeneralNames(),
+}
+
+certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2511.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2511.py
new file mode 100644
index 0000000000..8935cdabe3
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2511.py
@@ -0,0 +1,258 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 certificate Request Message Format (CRMF) syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc2511
+#
+# Sample captures could be obtained with OpenSSL
+#
+from pyasn1_modules import rfc2315
+from pyasn1_modules.rfc2459 import *
+
+MAX = float('inf')
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+id_pkip = univ.ObjectIdentifier('1.3.6.1.5.5.7.5')
+id_regCtrl = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1')
+id_regCtrl_regToken = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.1')
+id_regCtrl_authenticator = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.2')
+id_regCtrl_pkiPublicationInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.3')
+id_regCtrl_pkiArchiveOptions = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.4')
+id_regCtrl_oldCertID = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.5')
+id_regCtrl_protocolEncrKey = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.6')
+id_regInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2')
+id_regInfo_utf8Pairs = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.1')
+id_regInfo_certReq = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.2')
+
+
+# This should be in PKIX Certificate Extensions module
+
+class GeneralName(univ.OctetString):
+ pass
+
+
+# end of PKIX Certificate Extensions module
+
+class UTF8Pairs(char.UTF8String):
+ pass
+
+
+class ProtocolEncrKey(SubjectPublicKeyInfo):
+ pass
+
+
+class CertId(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+ )
+
+
+class OldCertId(CertId):
+ pass
+
+
+class KeyGenParameters(univ.OctetString):
+ pass
+
+
+class EncryptedValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('intendedAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('symmAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('keyAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('encValue', univ.BitString())
+ )
+
+
+class EncryptedKey(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedValue', EncryptedValue()),
+ namedtype.NamedType('envelopedData', rfc2315.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class PKIArchiveOptions(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedPrivKey', EncryptedKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyGenParameters', KeyGenParameters().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('archiveRemGenPrivKey',
+ univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class SinglePubInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubMethod', univ.Integer(
+ namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
+ namedtype.OptionalNamedType('pubLocation', GeneralName())
+ )
+
+
+class PKIPublicationInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('action',
+ univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
+ namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class Authenticator(char.UTF8String):
+ pass
+
+
+class RegToken(char.UTF8String):
+ pass
+
+
+class SubsequentMessage(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('encrCert', 0),
+ ('challengeResp', 1)
+ )
+
+
+class POPOPrivKey(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('thisMessage',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subsequentMessage', SubsequentMessage().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dhMAC',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class PBMParameter(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('owf', AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', AlgorithmIdentifier())
+ )
+
+
+class PKMACValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algId', AlgorithmIdentifier()),
+ namedtype.NamedType('value', univ.BitString())
+ )
+
+
+class POPOSigningKeyInput(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'authInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'sender', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType('publicKeyMAC', PKMACValue())
+ )
+ )
+ ),
+ namedtype.NamedType('publicKey', SubjectPublicKeyInfo())
+ )
+
+
+class POPOSigningKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('algorithmIdentifier', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+
+class ProofOfPossession(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('raVerified',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signature', POPOSigningKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('keyEncipherment', POPOPrivKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('keyAgreement', POPOPrivKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class Controls(univ.SequenceOf):
+ componentType = AttributeTypeAndValue()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class OptionalValidity(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore',
+ Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter',
+ Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class CertTemplate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('signingAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('issuer', Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('subject', Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('publicKey', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.OptionalNamedType('issuerUID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('subjectUID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class CertRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('certTemplate', CertTemplate()),
+ namedtype.OptionalNamedType('controls', Controls())
+ )
+
+
+class CertReq(CertRequest):
+ pass
+
+
+class CertReqMsg(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReq', CertRequest()),
+ namedtype.OptionalNamedType('pop', ProofOfPossession()),
+ namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class CertReqMessages(univ.SequenceOf):
+ componentType = CertReqMsg()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2560.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2560.py
new file mode 100644
index 0000000000..017ac0b66e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2560.py
@@ -0,0 +1,225 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# OCSP request/response syntax
+#
+# Derived from a minimal OCSP library (RFC2560) code written by
+# Bud P. Bruegger <bud@ancitel.it>
+# Copyright: Ancitel, S.p.a, Rome, Italy
+# License: BSD
+#
+
+#
+# current limitations:
+# * request and response works only for a single certificate
+# * only some values are parsed out of the response
+# * the request does't set a nonce nor signature
+# * there is no signature validation of the response
+# * dates are left as strings in GeneralizedTime format -- datetime.datetime
+# would be nicer
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc2459
+
+
+# Start of OCSP module definitions
+
+# This should be in directory Authentication Framework (X.509) module
+
+class CRLReason(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+ )
+
+
+# end of directory Authentication Framework (X.509) module
+
+# This should be in PKIX Certificate Extensions module
+
+class GeneralName(univ.OctetString):
+ pass
+
+
+# end of PKIX Certificate Extensions module
+
+id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
+id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
+id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
+id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
+id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
+id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
+id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
+id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
+id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
+
+
+class AcceptableResponses(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class ArchiveCutoff(useful.GeneralizedTime):
+ pass
+
+
+class UnknownInfo(univ.Null):
+ pass
+
+
+class RevokedInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class CertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('issuerNameHash', univ.OctetString()),
+ namedtype.NamedType('issuerKeyHash', univ.OctetString()),
+ namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
+ )
+
+
+class CertStatus(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('good',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('revoked',
+ RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('unknown',
+ UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class SingleResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certID', CertID()),
+ namedtype.NamedType('certStatus', CertStatus()),
+ namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class KeyHash(univ.OctetString):
+ pass
+
+
+class ResponderID(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('byName',
+ rfc2459.Name().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('byKey',
+ KeyHash().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0))
+
+
+class ResponseData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('responderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('responses', univ.SequenceOf(componentType=SingleResponse())),
+ namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BasicOCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsResponseData', ResponseData()),
+ namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class ResponseBytes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('responseType', univ.ObjectIdentifier()),
+ namedtype.NamedType('response', univ.OctetString())
+ )
+
+
+class OCSPResponseStatus(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('successful', 0),
+ ('malformedRequest', 1),
+ ('internalError', 2),
+ ('tryLater', 3),
+ ('undefinedStatus', 4), # should never occur
+ ('sigRequired', 5),
+ ('unauthorized', 6)
+ )
+
+
+class OCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('responseStatus', OCSPResponseStatus()),
+ namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Request(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('reqCert', CertID()),
+ namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Signature(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class TBSRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('requestList', univ.SequenceOf(componentType=Request())),
+ namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class OCSPRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsRequest', TBSRequest()),
+ namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2631.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2631.py
new file mode 100644
index 0000000000..44e537101c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2631.py
@@ -0,0 +1,37 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Key Agreement
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2631.txt
+# https://www.rfc-editor.org/errata/eid5897
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class KeySpecificInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.NamedType('counter', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(4, 4)))
+ )
+
+
+class OtherInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyInfo', KeySpecificInfo()),
+ namedtype.OptionalNamedType('partyAInfo', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('suppPubInfo', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2634.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2634.py
new file mode 100644
index 0000000000..2099a4b206
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2634.py
@@ -0,0 +1,336 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Enhanced Security Services for S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2634.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedval
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+ContentType = rfc5652.ContentType
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+PolicyInformation = rfc5280.PolicyInformation
+
+GeneralNames = rfc5280.GeneralNames
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+
+# Signing Certificate Attribute
+# Warning: It is better to use SigningCertificateV2 from RFC 5035
+
+id_aa_signingCertificate = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.12')
+
+class Hash(univ.OctetString):
+ pass # SHA-1 hash of entire certificate; RFC 5035 supports other hash algorithms
+
+
+class IssuerSerial(univ.Sequence):
+ pass
+
+IssuerSerial.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+)
+
+
+class ESSCertID(univ.Sequence):
+ pass
+
+ESSCertID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', Hash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+)
+
+
+class SigningCertificate(univ.Sequence):
+ pass
+
+SigningCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs', univ.SequenceOf(
+ componentType=ESSCertID())),
+ namedtype.OptionalNamedType('policies', univ.SequenceOf(
+ componentType=PolicyInformation()))
+)
+
+
+# Mail List Expansion History Attribute
+
+id_aa_mlExpandHistory = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.3')
+
+ub_ml_expansion_history = univ.Integer(64)
+
+
+class EntityIdentifier(univ.Choice):
+ pass
+
+EntityIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier())
+)
+
+
+class MLReceiptPolicy(univ.Choice):
+ pass
+
+MLReceiptPolicy.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('none', univ.Null().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('insteadOf', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('inAdditionTo', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class MLData(univ.Sequence):
+ pass
+
+MLData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mailListIdentifier', EntityIdentifier()),
+ namedtype.NamedType('expansionTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('mlReceiptPolicy', MLReceiptPolicy())
+)
+
+class MLExpansionHistory(univ.SequenceOf):
+ pass
+
+MLExpansionHistory.componentType = MLData()
+MLExpansionHistory.sizeSpec = constraint.ValueSizeConstraint(1, ub_ml_expansion_history)
+
+
+# ESS Security Label Attribute
+
+id_aa_securityLabel = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.2')
+
+ub_privacy_mark_length = univ.Integer(128)
+
+ub_security_categories = univ.Integer(64)
+
+ub_integer_options = univ.Integer(256)
+
+
+class ESSPrivacyMark(univ.Choice):
+ pass
+
+ESSPrivacyMark.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_privacy_mark_length))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class SecurityClassification(univ.Integer):
+ pass
+
+SecurityClassification.subtypeSpec=constraint.ValueRangeConstraint(0, ub_integer_options)
+
+SecurityClassification.namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('top-secret', 5)
+)
+
+
+class SecurityPolicyIdentifier(univ.ObjectIdentifier):
+ pass
+
+
+class SecurityCategory(univ.Sequence):
+ pass
+
+SecurityCategory.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SecurityCategories(univ.SetOf):
+ pass
+
+SecurityCategories.componentType = SecurityCategory()
+SecurityCategories.sizeSpec = constraint.ValueSizeConstraint(1, ub_security_categories)
+
+
+class ESSSecurityLabel(univ.Set):
+ pass
+
+ESSSecurityLabel.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('security-policy-identifier', SecurityPolicyIdentifier()),
+ namedtype.OptionalNamedType('security-classification', SecurityClassification()),
+ namedtype.OptionalNamedType('privacy-mark', ESSPrivacyMark()),
+ namedtype.OptionalNamedType('security-categories', SecurityCategories())
+)
+
+
+# Equivalent Labels Attribute
+
+id_aa_equivalentLabels = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.9')
+
+class EquivalentLabels(univ.SequenceOf):
+ pass
+
+EquivalentLabels.componentType = ESSSecurityLabel()
+
+
+# Content Identifier Attribute
+
+id_aa_contentIdentifier = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.7')
+
+class ContentIdentifier(univ.OctetString):
+ pass
+
+
+# Content Reference Attribute
+
+id_aa_contentReference = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.10')
+
+class ContentReference(univ.Sequence):
+ pass
+
+ContentReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('originatorSignatureValue', univ.OctetString())
+)
+
+
+# Message Signature Digest Attribute
+
+id_aa_msgSigDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.5')
+
+class MsgSigDigest(univ.OctetString):
+ pass
+
+
+# Content Hints Attribute
+
+id_aa_contentHint = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.4')
+
+class ContentHints(univ.Sequence):
+ pass
+
+ContentHints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('contentDescription', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('contentType', ContentType())
+)
+
+
+# Receipt Request Attribute
+
+class AllOrFirstTier(univ.Integer):
+ pass
+
+AllOrFirstTier.namedValues = namedval.NamedValues(
+ ('allReceipts', 0),
+ ('firstTierRecipients', 1)
+)
+
+
+class ReceiptsFrom(univ.Choice):
+ pass
+
+ReceiptsFrom.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('allOrFirstTier', AllOrFirstTier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receiptList', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+id_aa_receiptRequest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.1')
+
+ub_receiptsTo = univ.Integer(16)
+
+class ReceiptRequest(univ.Sequence):
+ pass
+
+ReceiptRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('receiptsFrom', ReceiptsFrom()),
+ namedtype.NamedType('receiptsTo', univ.SequenceOf(componentType=GeneralNames()).subtype(sizeSpec=constraint.ValueSizeConstraint(1, ub_receiptsTo)))
+)
+
+# Receipt Content Type
+
+class ESSVersion(univ.Integer):
+ pass
+
+ESSVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_receipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.1')
+
+class Receipt(univ.Sequence):
+ pass
+
+Receipt.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', ESSVersion()),
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('originatorSignatureValue', univ.OctetString())
+)
+
+
+# Map of Attribute Type to the Attribute structure is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_signingCertificate: SigningCertificate(),
+ id_aa_mlExpandHistory: MLExpansionHistory(),
+ id_aa_securityLabel: ESSSecurityLabel(),
+ id_aa_equivalentLabels: EquivalentLabels(),
+ id_aa_contentIdentifier: ContentIdentifier(),
+ id_aa_contentReference: ContentReference(),
+ id_aa_msgSigDigest: MsgSigDigest(),
+ id_aa_contentHint: ContentHints(),
+ id_aa_receiptRequest: ReceiptRequest(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_receipt: Receipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2876.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2876.py
new file mode 100644
index 0000000000..04c402b7ea
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2876.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# KEA and SKIPJACK Algorithms in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2876.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+
+
+id_fortezzaConfidentialityAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.4')
+
+
+id_fortezzaWrap80 = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.23')
+
+
+id_kEAKeyEncryptionAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.24')
+
+
+id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22')
+
+
+class Skipjack_Parm(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('initialization-vector', univ.OctetString())
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_fortezzaConfidentialityAlgorithm: Skipjack_Parm(),
+ id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the SMIMECapabilities Attribute map in rfc5751.py
+
+_smimeCapabilityMapUpdate = {
+ id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2985.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2985.py
new file mode 100644
index 0000000000..75bccf097d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2985.py
@@ -0,0 +1,588 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#9: Selected Attribute Types (Version 2.0)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2985.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc7292
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+EmailAddress = rfc5280.EmailAddress
+
+Extensions = rfc5280.Extensions
+
+Time = rfc5280.Time
+
+X520countryName = rfc5280.X520countryName
+
+X520SerialNumber = rfc5280.X520SerialNumber
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+ContentType = rfc5652.ContentType
+
+Countersignature = rfc5652.Countersignature
+
+MessageDigest = rfc5652.MessageDigest
+
+SignerInfo = rfc5652.SignerInfo
+
+SigningTime = rfc5652.SigningTime
+
+
+# Imports from RFC 5958
+
+EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo
+
+
+# Imports from RFC 7292
+
+PFX = rfc7292.PFX
+
+
+# TODO:
+# Need a place to import PKCS15Token; it does not yet appear in an RFC
+
+
+# SingleAttribute is the same as Attribute in RFC 5280, except that the
+# attrValues SET must have one and only one member
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+
+
+class SingleAttributeValues(univ.SetOf):
+ pass
+
+SingleAttributeValues.componentType = AttributeValue()
+
+
+class SingleAttribute(univ.Sequence):
+ pass
+
+SingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('type', rfc5280.certificateAttributesMap)
+ )
+)
+
+
+# CMSAttribute is the same as Attribute in RFC 5652, and CMSSingleAttribute
+# is the companion where the attrValues SET must have one and only one member
+
+CMSAttribute = rfc5652.Attribute
+
+
+class CMSSingleAttribute(univ.Sequence):
+ pass
+
+CMSSingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# DirectoryString is the same as RFC 5280, except the length is limited to 255
+
+class DirectoryString(univ.Choice):
+ pass
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
+)
+
+
+# PKCS9String is DirectoryString with an additional choice of IA5String,
+# and the SIZE is limited to 255
+
+class PKCS9String(univ.Choice):
+ pass
+
+PKCS9String.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('directoryString', DirectoryString())
+)
+
+
+# Upper Bounds
+
+pkcs_9_ub_pkcs9String = univ.Integer(255)
+
+pkcs_9_ub_challengePassword = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_emailAddress = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_match = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_signingDescription = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_unstructuredAddress = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_unstructuredName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+
+ub_name = univ.Integer(32768)
+
+pkcs_9_ub_placeOfBirth = univ.Integer(ub_name)
+
+pkcs_9_ub_pseudonym = univ.Integer(ub_name)
+
+
+# Object Identifier Arcs
+
+ietf_at = _OID(1, 3, 6, 1, 5, 5, 7, 9)
+
+id_at = _OID(2, 5, 4)
+
+pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
+
+pkcs_9_mo = _OID(pkcs_9, 0)
+
+smime = _OID(pkcs_9, 16)
+
+certTypes = _OID(pkcs_9, 22)
+
+crlTypes = _OID(pkcs_9, 23)
+
+pkcs_9_oc = _OID(pkcs_9, 24)
+
+pkcs_9_at = _OID(pkcs_9, 25)
+
+pkcs_9_sx = _OID(pkcs_9, 26)
+
+pkcs_9_mr = _OID(pkcs_9, 27)
+
+
+# Object Identifiers for Syntaxes for use with LDAP-accessible directories
+
+pkcs_9_sx_pkcs9String = _OID(pkcs_9_sx, 1)
+
+pkcs_9_sx_signingTime = _OID(pkcs_9_sx, 2)
+
+
+# Object Identifiers for object classes
+
+pkcs_9_oc_pkcsEntity = _OID(pkcs_9_oc, 1)
+
+pkcs_9_oc_naturalPerson = _OID(pkcs_9_oc, 2)
+
+
+# Object Identifiers for matching rules
+
+pkcs_9_mr_caseIgnoreMatch = _OID(pkcs_9_mr, 1)
+
+pkcs_9_mr_signingTimeMatch = _OID(pkcs_9_mr, 2)
+
+
+# PKCS #7 PDU
+
+pkcs_9_at_pkcs7PDU = _OID(pkcs_9_at, 5)
+
+pKCS7PDU = Attribute()
+pKCS7PDU['type'] = pkcs_9_at_pkcs7PDU
+pKCS7PDU['values'][0] = ContentInfo()
+
+
+# PKCS #12 token
+
+pkcs_9_at_userPKCS12 = _OID(2, 16, 840, 1, 113730, 3, 1, 216)
+
+userPKCS12 = Attribute()
+userPKCS12['type'] = pkcs_9_at_userPKCS12
+userPKCS12['values'][0] = PFX()
+
+
+# PKCS #15 token
+
+pkcs_9_at_pkcs15Token = _OID(pkcs_9_at, 1)
+
+# TODO: Once PKCS15Token can be imported, this can be included
+#
+# pKCS15Token = Attribute()
+# userPKCS12['type'] = pkcs_9_at_pkcs15Token
+# userPKCS12['values'][0] = PKCS15Token()
+
+
+# PKCS #8 encrypted private key information
+
+pkcs_9_at_encryptedPrivateKeyInfo = _OID(pkcs_9_at, 2)
+
+encryptedPrivateKeyInfo = Attribute()
+encryptedPrivateKeyInfo['type'] = pkcs_9_at_encryptedPrivateKeyInfo
+encryptedPrivateKeyInfo['values'][0] = EncryptedPrivateKeyInfo()
+
+
+# Electronic-mail address
+
+pkcs_9_at_emailAddress = rfc5280.id_emailAddress
+
+emailAddress = Attribute()
+emailAddress['type'] = pkcs_9_at_emailAddress
+emailAddress['values'][0] = EmailAddress()
+
+
+# Unstructured name
+
+pkcs_9_at_unstructuredName = _OID(pkcs_9, 2)
+
+unstructuredName = Attribute()
+unstructuredName['type'] = pkcs_9_at_unstructuredName
+unstructuredName['values'][0] = PKCS9String()
+
+
+# Unstructured address
+
+pkcs_9_at_unstructuredAddress = _OID(pkcs_9, 8)
+
+unstructuredAddress = Attribute()
+unstructuredAddress['type'] = pkcs_9_at_unstructuredAddress
+unstructuredAddress['values'][0] = DirectoryString()
+
+
+# Date of birth
+
+pkcs_9_at_dateOfBirth = _OID(ietf_at, 1)
+
+dateOfBirth = SingleAttribute()
+dateOfBirth['type'] = pkcs_9_at_dateOfBirth
+dateOfBirth['values'][0] = useful.GeneralizedTime()
+
+
+# Place of birth
+
+pkcs_9_at_placeOfBirth = _OID(ietf_at, 2)
+
+placeOfBirth = SingleAttribute()
+placeOfBirth['type'] = pkcs_9_at_placeOfBirth
+placeOfBirth['values'][0] = DirectoryString()
+
+
+# Gender
+
+class GenderString(char.PrintableString):
+ pass
+
+GenderString.subtypeSpec = constraint.ValueSizeConstraint(1, 1)
+GenderString.subtypeSpec = constraint.SingleValueConstraint("M", "F", "m", "f")
+
+
+pkcs_9_at_gender = _OID(ietf_at, 3)
+
+gender = SingleAttribute()
+gender['type'] = pkcs_9_at_gender
+gender['values'][0] = GenderString()
+
+
+# Country of citizenship
+
+pkcs_9_at_countryOfCitizenship = _OID(ietf_at, 4)
+
+countryOfCitizenship = Attribute()
+countryOfCitizenship['type'] = pkcs_9_at_countryOfCitizenship
+countryOfCitizenship['values'][0] = X520countryName()
+
+
+# Country of residence
+
+pkcs_9_at_countryOfResidence = _OID(ietf_at, 5)
+
+countryOfResidence = Attribute()
+countryOfResidence['type'] = pkcs_9_at_countryOfResidence
+countryOfResidence['values'][0] = X520countryName()
+
+
+# Pseudonym
+
+id_at_pseudonym = _OID(2, 5, 4, 65)
+
+pseudonym = Attribute()
+pseudonym['type'] = id_at_pseudonym
+pseudonym['values'][0] = DirectoryString()
+
+
+# Serial number
+
+id_at_serialNumber = rfc5280.id_at_serialNumber
+
+serialNumber = Attribute()
+serialNumber['type'] = id_at_serialNumber
+serialNumber['values'][0] = X520SerialNumber()
+
+
+# Content type
+
+pkcs_9_at_contentType = rfc5652.id_contentType
+
+contentType = CMSSingleAttribute()
+contentType['attrType'] = pkcs_9_at_contentType
+contentType['attrValues'][0] = ContentType()
+
+
+# Message digest
+
+pkcs_9_at_messageDigest = rfc5652.id_messageDigest
+
+messageDigest = CMSSingleAttribute()
+messageDigest['attrType'] = pkcs_9_at_messageDigest
+messageDigest['attrValues'][0] = MessageDigest()
+
+
+# Signing time
+
+pkcs_9_at_signingTime = rfc5652.id_signingTime
+
+signingTime = CMSSingleAttribute()
+signingTime['attrType'] = pkcs_9_at_signingTime
+signingTime['attrValues'][0] = SigningTime()
+
+
+# Random nonce
+
+class RandomNonce(univ.OctetString):
+ pass
+
+RandomNonce.subtypeSpec = constraint.ValueSizeConstraint(4, MAX)
+
+
+pkcs_9_at_randomNonce = _OID(pkcs_9_at, 3)
+
+randomNonce = CMSSingleAttribute()
+randomNonce['attrType'] = pkcs_9_at_randomNonce
+randomNonce['attrValues'][0] = RandomNonce()
+
+
+# Sequence number
+
+class SequenceNumber(univ.Integer):
+ pass
+
+SequenceNumber.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
+
+
+pkcs_9_at_sequenceNumber = _OID(pkcs_9_at, 4)
+
+sequenceNumber = CMSSingleAttribute()
+sequenceNumber['attrType'] = pkcs_9_at_sequenceNumber
+sequenceNumber['attrValues'][0] = SequenceNumber()
+
+
+# Countersignature
+
+pkcs_9_at_counterSignature = rfc5652.id_countersignature
+
+counterSignature = CMSAttribute()
+counterSignature['attrType'] = pkcs_9_at_counterSignature
+counterSignature['attrValues'][0] = Countersignature()
+
+
+# Challenge password
+
+pkcs_9_at_challengePassword = _OID(pkcs_9, 7)
+
+challengePassword = SingleAttribute()
+challengePassword['type'] = pkcs_9_at_challengePassword
+challengePassword['values'][0] = DirectoryString()
+
+
+# Extension request
+
+class ExtensionRequest(Extensions):
+ pass
+
+
+pkcs_9_at_extensionRequest = _OID(pkcs_9, 14)
+
+extensionRequest = SingleAttribute()
+extensionRequest['type'] = pkcs_9_at_extensionRequest
+extensionRequest['values'][0] = ExtensionRequest()
+
+
+# Extended-certificate attributes (deprecated)
+
+class AttributeSet(univ.SetOf):
+ pass
+
+AttributeSet.componentType = Attribute()
+
+
+pkcs_9_at_extendedCertificateAttributes = _OID(pkcs_9, 9)
+
+extendedCertificateAttributes = SingleAttribute()
+extendedCertificateAttributes['type'] = pkcs_9_at_extendedCertificateAttributes
+extendedCertificateAttributes['values'][0] = AttributeSet()
+
+
+# Friendly name
+
+class FriendlyName(char.BMPString):
+ pass
+
+FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName)
+
+
+pkcs_9_at_friendlyName = _OID(pkcs_9, 20)
+
+friendlyName = SingleAttribute()
+friendlyName['type'] = pkcs_9_at_friendlyName
+friendlyName['values'][0] = FriendlyName()
+
+
+# Local key identifier
+
+pkcs_9_at_localKeyId = _OID(pkcs_9, 21)
+
+localKeyId = SingleAttribute()
+localKeyId['type'] = pkcs_9_at_localKeyId
+localKeyId['values'][0] = univ.OctetString()
+
+
+# Signing description
+
+pkcs_9_at_signingDescription = _OID(pkcs_9, 13)
+
+signingDescription = CMSSingleAttribute()
+signingDescription['attrType'] = pkcs_9_at_signingDescription
+signingDescription['attrValues'][0] = DirectoryString()
+
+
+# S/MIME capabilities
+
+class SMIMECapability(AlgorithmIdentifier):
+ pass
+
+
+class SMIMECapabilities(univ.SequenceOf):
+ pass
+
+SMIMECapabilities.componentType = SMIMECapability()
+
+
+pkcs_9_at_smimeCapabilities = _OID(pkcs_9, 15)
+
+smimeCapabilities = CMSSingleAttribute()
+smimeCapabilities['attrType'] = pkcs_9_at_smimeCapabilities
+smimeCapabilities['attrValues'][0] = SMIMECapabilities()
+
+
+# Certificate Attribute Map
+
+_certificateAttributesMapUpdate = {
+ # Attribute types for use with the "pkcsEntity" object class
+ pkcs_9_at_pkcs7PDU: ContentInfo(),
+ pkcs_9_at_userPKCS12: PFX(),
+ # TODO: Once PKCS15Token can be imported, this can be included
+ # pkcs_9_at_pkcs15Token: PKCS15Token(),
+ pkcs_9_at_encryptedPrivateKeyInfo: EncryptedPrivateKeyInfo(),
+ # Attribute types for use with the "naturalPerson" object class
+ pkcs_9_at_emailAddress: EmailAddress(),
+ pkcs_9_at_unstructuredName: PKCS9String(),
+ pkcs_9_at_unstructuredAddress: DirectoryString(),
+ pkcs_9_at_dateOfBirth: useful.GeneralizedTime(),
+ pkcs_9_at_placeOfBirth: DirectoryString(),
+ pkcs_9_at_gender: GenderString(),
+ pkcs_9_at_countryOfCitizenship: X520countryName(),
+ pkcs_9_at_countryOfResidence: X520countryName(),
+ id_at_pseudonym: DirectoryString(),
+ id_at_serialNumber: X520SerialNumber(),
+ # Attribute types for use with PKCS #10 certificate requests
+ pkcs_9_at_challengePassword: DirectoryString(),
+ pkcs_9_at_extensionRequest: ExtensionRequest(),
+ pkcs_9_at_extendedCertificateAttributes: AttributeSet(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# CMS Attribute Map
+
+# Note: pkcs_9_at_smimeCapabilities is not included in the map because
+# the definition in RFC 5751 is preferred, which produces the same
+# encoding, but it allows different parameters for SMIMECapability
+# and AlgorithmIdentifier.
+
+_cmsAttributesMapUpdate = {
+ # Attribute types for use in PKCS #7 data (a.k.a. CMS)
+ pkcs_9_at_contentType: ContentType(),
+ pkcs_9_at_messageDigest: MessageDigest(),
+ pkcs_9_at_signingTime: SigningTime(),
+ pkcs_9_at_randomNonce: RandomNonce(),
+ pkcs_9_at_sequenceNumber: SequenceNumber(),
+ pkcs_9_at_counterSignature: Countersignature(),
+ # Attributes for use in PKCS #12 "PFX" PDUs or PKCS #15 tokens
+ pkcs_9_at_friendlyName: FriendlyName(),
+ pkcs_9_at_localKeyId: univ.OctetString(),
+ pkcs_9_at_signingDescription: DirectoryString(),
+ # pkcs_9_at_smimeCapabilities: SMIMECapabilities(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2986.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2986.py
new file mode 100644
index 0000000000..309637d1fe
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc2986.py
@@ -0,0 +1,75 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Joel Johnson with asn1ate tool.
+# Modified by Russ Housley to add support for opentypes by importing
+# definitions from rfc5280 so that the same maps are used.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #10: Certification Request Syntax Specification
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2986.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+AttributeType = rfc5280.AttributeType
+
+AttributeValue = rfc5280.AttributeValue
+
+AttributeTypeAndValue = rfc5280.AttributeTypeAndValue
+
+Attribute = rfc5280.Attribute
+
+RelativeDistinguishedName = rfc5280.RelativeDistinguishedName
+
+RDNSequence = rfc5280.RDNSequence
+
+Name = rfc5280.Name
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+
+class Attributes(univ.SetOf):
+ pass
+
+
+Attributes.componentType = Attribute()
+
+
+class CertificationRequestInfo(univ.Sequence):
+ pass
+
+
+CertificationRequestInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPKInfo', SubjectPublicKeyInfo()),
+ namedtype.NamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))
+ )
+)
+
+
+class CertificationRequest(univ.Sequence):
+ pass
+
+
+CertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3058.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3058.py
new file mode 100644
index 0000000000..725de82ae7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3058.py
@@ -0,0 +1,42 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# IDEA Encryption Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3058.txt
+# https://www.rfc-editor.org/errata/eid5913
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_IDEA_CBC = univ.ObjectIdentifier('1.3.6.1.4.1.188.7.1.1.2')
+
+
+id_alg_CMSIDEAwrap = univ.ObjectIdentifier('1.3.6.1.4.1.188.7.1.1.6')
+
+
+class IDEA_CBCPar(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('iv', univ.OctetString())
+ # exactly 8 octets, when present
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_IDEA_CBC: IDEA_CBCPar(),
+ id_alg_CMSIDEAwrap: univ.Null("")
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3114.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3114.py
new file mode 100644
index 0000000000..badcb1f214
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3114.py
@@ -0,0 +1,77 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# TEST Company Classification Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3114.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5755
+
+
+id_smime = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, ))
+
+id_tsp = id_smime + (7, )
+
+id_tsp_TEST_Amoco = id_tsp + (1, )
+
+class Amoco_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('amoco-general', 6),
+ ('amoco-confidential', 7),
+ ('amoco-highly-confidential', 8)
+ )
+
+
+id_tsp_TEST_Caterpillar = id_tsp + (2, )
+
+class Caterpillar_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('caterpillar-public', 6),
+ ('caterpillar-green', 7),
+ ('caterpillar-yellow', 8),
+ ('caterpillar-red', 9)
+ )
+
+
+id_tsp_TEST_Whirlpool = id_tsp + (3, )
+
+class Whirlpool_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('whirlpool-public', 6),
+ ('whirlpool-internal', 7),
+ ('whirlpool-confidential', 8)
+ )
+
+
+id_tsp_TEST_Whirlpool_Categories = id_tsp + (4, )
+
+class SecurityCategoryValues(univ.SequenceOf):
+ componentType = char.UTF8String()
+
+# Example SecurityCategoryValues: "LAW DEPARTMENT USE ONLY"
+# Example SecurityCategoryValues: "HUMAN RESOURCES USE ONLY"
+
+
+# Also, the privacy mark in the security label can contain a string,
+# such as: "ATTORNEY-CLIENT PRIVILEGED INFORMATION"
+
+
+# Map of security category type OIDs to security category added
+# to the ones that are in rfc5755.py
+
+_securityCategoryMapUpdate = {
+ id_tsp_TEST_Whirlpool_Categories: SecurityCategoryValues(),
+}
+
+rfc5755.securityCategoryMap.update(_securityCategoryMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3125.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3125.py
new file mode 100644
index 0000000000..00ff9bff48
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3125.py
@@ -0,0 +1,469 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Electronic Signature Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3125.txt
+# https://www.rfc-editor.org/errata/eid5901
+# https://www.rfc-editor.org/errata/eid5902
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import useful
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+AttributeType = rfc5280.AttributeType
+
+AttributeTypeAndValue = rfc5280.AttributeTypeAndValue
+
+AttributeValue = rfc5280.AttributeValue
+
+Certificate = rfc5280.Certificate
+
+CertificateList = rfc5280.CertificateList
+
+DirectoryString = rfc5280.DirectoryString
+
+GeneralName = rfc5280.GeneralName
+
+GeneralNames = rfc5280.GeneralNames
+
+Name = rfc5280.Name
+
+PolicyInformation = rfc5280.PolicyInformation
+
+
+# Electronic Signature Policies
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class AcceptablePolicySet(univ.SequenceOf):
+ componentType = CertPolicyId()
+
+
+class SignPolExtn(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.NamedType('extnValue', univ.OctetString())
+ )
+
+
+class SignPolExtensions(univ.SequenceOf):
+ componentType = SignPolExtn()
+
+
+class AlgAndLength(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algID', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('minKeyLength', univ.Integer()),
+ namedtype.OptionalNamedType('other', SignPolExtensions())
+ )
+
+
+class AlgorithmConstraints(univ.SequenceOf):
+ componentType = AlgAndLength()
+
+
+class AlgorithmConstraintSet(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('signerAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('eeCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('caCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('aaCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('tsaCertAlgorithmConstraints',
+ AlgorithmConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class AttributeValueConstraints(univ.SequenceOf):
+ componentType = AttributeTypeAndValue()
+
+
+class AttributeTypeConstraints(univ.SequenceOf):
+ componentType = AttributeType()
+
+
+class AttributeConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('attributeTypeConstarints',
+ AttributeTypeConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('attributeValueConstarints',
+ AttributeValueConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class HowCertAttribute(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('claimedAttribute', 0),
+ ('certifiedAttribtes', 1),
+ ('either', 2)
+ )
+
+
+class SkipCerts(univ.Integer):
+ subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class PolicyConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BaseDistance(univ.Integer):
+ subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum',
+ BaseDistance().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(
+ value=0)),
+ namedtype.OptionalNamedType('maximum',
+ BaseDistance().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ componentType = GeneralSubtree()
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees',
+ GeneralSubtrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees',
+ GeneralSubtrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class PathLenConstraint(univ.Integer):
+ subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class CertificateTrustPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('trustpoint', Certificate()),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ PathLenConstraint().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('acceptablePolicySet',
+ AcceptablePolicySet().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('nameConstraints',
+ NameConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('policyConstraints',
+ PolicyConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class CertificateTrustTrees(univ.SequenceOf):
+ componentType = CertificateTrustPoint()
+
+
+class EnuRevReq(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('clrCheck', 0),
+ ('ocspCheck', 1),
+ ('bothCheck', 2),
+ ('eitherCheck', 3),
+ ('noCheck', 4),
+ ('other', 5)
+ )
+
+
+class RevReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enuRevReq', EnuRevReq()),
+ namedtype.OptionalNamedType('exRevReq', SignPolExtensions())
+ )
+
+
+class CertRevReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('endCertRevReq', RevReq()),
+ namedtype.NamedType('caCerts',
+ RevReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class AttributeTrustCondition(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeMandated', univ.Boolean()),
+ namedtype.NamedType('howCertAttribute', HowCertAttribute()),
+ namedtype.OptionalNamedType('attrCertificateTrustTrees',
+ CertificateTrustTrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('attrRevReq',
+ CertRevReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('attributeConstraints',
+ AttributeConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class CMSAttrs(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class CertInfoReq(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('none', 0),
+ ('signerOnly', 1),
+ ('fullPath', 2)
+ )
+
+
+class CertRefReq(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('signerOnly', 1),
+ ('fullPath', 2)
+ )
+
+
+class DeltaTime(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('deltaSeconds', univ.Integer()),
+ namedtype.NamedType('deltaMinutes', univ.Integer()),
+ namedtype.NamedType('deltaHours', univ.Integer()),
+ namedtype.NamedType('deltaDays', univ.Integer())
+ )
+
+
+class TimestampTrustCondition(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('ttsCertificateTrustTrees',
+ CertificateTrustTrees().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('ttsRevReq',
+ CertRevReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('ttsNameConstraints',
+ NameConstraints().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('cautionPeriod',
+ DeltaTime().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('signatureTimestampDelay',
+ DeltaTime().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 4)))
+ )
+
+
+class SignerRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('externalSignedData', univ.Boolean()),
+ namedtype.NamedType('mandatedSignedAttr', CMSAttrs()),
+ namedtype.NamedType('mandatedUnsignedAttr', CMSAttrs()),
+ namedtype.DefaultedNamedType('mandatedCertificateRef',
+ CertRefReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(
+ value='signerOnly')),
+ namedtype.DefaultedNamedType('mandatedCertificateInfo',
+ CertInfoReq().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(
+ value='none')),
+ namedtype.OptionalNamedType('signPolExtensions',
+ SignPolExtensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class MandatedUnsignedAttr(CMSAttrs):
+ pass
+
+
+class VerifierRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mandatedUnsignedAttr', MandatedUnsignedAttr()),
+ namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions())
+ )
+
+
+class SignerAndVerifierRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signerRules', SignerRules()),
+ namedtype.NamedType('verifierRules', VerifierRules())
+ )
+
+
+class SigningCertTrustCondition(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signerTrustTrees', CertificateTrustTrees()),
+ namedtype.NamedType('signerRevReq', CertRevReq())
+ )
+
+
+class CommitmentTypeIdentifier(univ.ObjectIdentifier):
+ pass
+
+
+class FieldOfApplication(DirectoryString):
+ pass
+
+
+class CommitmentType(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('identifier', CommitmentTypeIdentifier()),
+ namedtype.OptionalNamedType('fieldOfApplication',
+ FieldOfApplication().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('semantics',
+ DirectoryString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class SelectedCommitmentTypes(univ.SequenceOf):
+ componentType = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('empty', univ.Null()),
+ namedtype.NamedType('recognizedCommitmentType', CommitmentType())
+ ))
+
+
+class CommitmentRule(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('selCommitmentTypes', SelectedCommitmentTypes()),
+ namedtype.OptionalNamedType('signerAndVeriferRules',
+ SignerAndVerifierRules().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('signingCertTrustCondition',
+ SigningCertTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('timeStampTrustCondition',
+ TimestampTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('attributeTrustCondition',
+ AttributeTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('algorithmConstraintSet',
+ AlgorithmConstraintSet().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('signPolExtensions',
+ SignPolExtensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 5)))
+ )
+
+
+class CommitmentRules(univ.SequenceOf):
+ componentType = CommitmentRule()
+
+
+class CommonRules(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('signerAndVeriferRules',
+ SignerAndVerifierRules().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('signingCertTrustCondition',
+ SigningCertTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('timeStampTrustCondition',
+ TimestampTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('attributeTrustCondition',
+ AttributeTrustCondition().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('algorithmConstraintSet',
+ AlgorithmConstraintSet().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('signPolExtensions',
+ SignPolExtensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 5)))
+ )
+
+
+class PolicyIssuerName(GeneralNames):
+ pass
+
+
+class SignPolicyHash(univ.OctetString):
+ pass
+
+
+class SignPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class SigningPeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime())
+ )
+
+
+class SignatureValidationPolicy(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signingPeriod', SigningPeriod()),
+ namedtype.NamedType('commonRules', CommonRules()),
+ namedtype.NamedType('commitmentRules', CommitmentRules()),
+ namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions())
+ )
+
+
+class SignPolicyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signPolicyIdentifier', SignPolicyId()),
+ namedtype.NamedType('dateOfIssue', useful.GeneralizedTime()),
+ namedtype.NamedType('policyIssuerName', PolicyIssuerName()),
+ namedtype.NamedType('fieldOfApplication', FieldOfApplication()),
+ namedtype.NamedType('signatureValidationPolicy', SignatureValidationPolicy()),
+ namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions())
+ )
+
+
+class SignaturePolicy(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signPolicyHashAlg', AlgorithmIdentifier()),
+ namedtype.NamedType('signPolicyInfo', SignPolicyInfo()),
+ namedtype.OptionalNamedType('signPolicyHash', SignPolicyHash())
+ )
+
+
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3161.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3161.py
new file mode 100644
index 0000000000..0e1dcedb39
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3161.py
@@ -0,0 +1,142 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Time-Stamp Protocol (TSP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3161.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc4210
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+Extensions = rfc5280.Extensions
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+GeneralName = rfc5280.GeneralName
+
+ContentInfo = rfc5652.ContentInfo
+
+PKIFreeText = rfc4210.PKIFreeText
+
+
+id_ct_TSTInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.4')
+
+
+class Accuracy(univ.Sequence):
+ pass
+
+Accuracy.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('seconds', univ.Integer()),
+ namedtype.OptionalNamedType('millis', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('micros', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class MessageImprint(univ.Sequence):
+ pass
+
+MessageImprint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('hashedMessage', univ.OctetString())
+)
+
+
+class PKIFailureInfo(univ.BitString):
+ pass
+
+PKIFailureInfo.namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badRequest', 2),
+ ('badDataFormat', 5),
+ ('timeNotAvailable', 14),
+ ('unacceptedPolicy', 15),
+ ('unacceptedExtension', 16),
+ ('addInfoNotAvailable', 17),
+ ('systemFailure', 25)
+)
+
+
+class PKIStatus(univ.Integer):
+ pass
+
+PKIStatus.namedValues = namedval.NamedValues(
+ ('granted', 0),
+ ('grantedWithMods', 1),
+ ('rejection', 2),
+ ('waiting', 3),
+ ('revocationWarning', 4),
+ ('revocationNotification', 5)
+)
+
+
+class PKIStatusInfo(univ.Sequence):
+ pass
+
+PKIStatusInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.OptionalNamedType('statusString', PKIFreeText()),
+ namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
+)
+
+
+class TSAPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class TSTInfo(univ.Sequence):
+ pass
+
+TSTInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))),
+ namedtype.NamedType('policy', TSAPolicyId()),
+ namedtype.NamedType('messageImprint', MessageImprint()),
+ namedtype.NamedType('serialNumber', univ.Integer()),
+ namedtype.NamedType('genTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('accuracy', Accuracy()),
+ namedtype.DefaultedNamedType('ordering', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('nonce', univ.Integer()),
+ namedtype.OptionalNamedType('tsa', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class TimeStampReq(univ.Sequence):
+ pass
+
+TimeStampReq.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))),
+ namedtype.NamedType('messageImprint', MessageImprint()),
+ namedtype.OptionalNamedType('reqPolicy', TSAPolicyId()),
+ namedtype.OptionalNamedType('nonce', univ.Integer()),
+ namedtype.DefaultedNamedType('certReq', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class TimeStampToken(ContentInfo):
+ pass
+
+
+class TimeStampResp(univ.Sequence):
+ pass
+
+TimeStampResp.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType('timeStampToken', TimeStampToken())
+)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3274.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3274.py
new file mode 100644
index 0000000000..425e006f3d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3274.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Compressed Data Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3274.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+class CompressionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+# The CMS Compressed Data Content Type
+
+id_ct_compressedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.9')
+
+class CompressedData(univ.Sequence):
+ pass
+
+CompressedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', rfc5652.CMSVersion()), # Always set to 0
+ namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', rfc5652.EncapsulatedContentInfo())
+)
+
+
+# Algorithm identifier for the zLib Compression Algorithm
+# This includes cpa_zlibCompress as defined in RFC 6268,
+# from https://www.rfc-editor.org/rfc/rfc6268.txt
+
+id_alg_zlibCompress = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.8')
+
+cpa_zlibCompress = rfc5280.AlgorithmIdentifier()
+cpa_zlibCompress['algorithm'] = id_alg_zlibCompress
+# cpa_zlibCompress['parameters'] are absent
+
+
+# Map of Content Type OIDs to Content Types is added to thr
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_compressedData: CompressedData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3279.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3279.py
new file mode 100644
index 0000000000..f6e24deafc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3279.py
@@ -0,0 +1,260 @@
+#
+# This file is part of pyasn1-modules.
+#
+# Copyright (c) 2017, Danielle Madeley <danielle@madeley.id.au>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Algorithms and Identifiers for Internet X.509 Certificates and CRLs
+#
+# Derived from RFC 3279:
+# https://www.rfc-editor.org/rfc/rfc3279.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+md2 = _OID(1, 2, 840, 113549, 2, 2)
+md5 = _OID(1, 2, 840, 113549, 2, 5)
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+id_dsa = _OID(1, 2, 840, 10040, 4, 1)
+
+
+class DSAPublicKey(univ.Integer):
+ pass
+
+
+class Dss_Parms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer())
+ )
+
+
+id_dsa_with_sha1 = _OID(1, 2, 840, 10040, 4, 3)
+
+
+class Dss_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
+rsaEncryption = _OID(pkcs_1, 1)
+md2WithRSAEncryption = _OID(pkcs_1, 2)
+md5WithRSAEncryption = _OID(pkcs_1, 4)
+sha1WithRSAEncryption = _OID(pkcs_1, 5)
+
+
+class RSAPublicKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+ )
+
+
+dhpublicnumber = _OID(1, 2, 840, 10046, 2, 1)
+
+
+class DHPublicKey(univ.Integer):
+ pass
+
+
+class ValidationParms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seed', univ.BitString()),
+ namedtype.NamedType('pgenCounter', univ.Integer())
+ )
+
+
+class DomainParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.OptionalNamedType('j', univ.Integer()),
+ namedtype.OptionalNamedType('validationParms', ValidationParms())
+ )
+
+
+id_keyExchangeAlgorithm = _OID(2, 16, 840, 1, 101, 2, 1, 1, 22)
+
+
+class KEA_Parms_Id(univ.OctetString):
+ pass
+
+
+ansi_X9_62 = _OID(1, 2, 840, 10045)
+
+
+class FieldID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fieldType', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Any())
+ )
+
+
+id_ecSigType = _OID(ansi_X9_62, 4)
+ecdsa_with_SHA1 = _OID(id_ecSigType, 1)
+
+
+class ECDSA_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+id_fieldType = _OID(ansi_X9_62, 1)
+prime_field = _OID(id_fieldType, 1)
+
+
+class Prime_p(univ.Integer):
+ pass
+
+
+characteristic_two_field = _OID(id_fieldType, 2)
+
+
+class Characteristic_two(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('m', univ.Integer()),
+ namedtype.NamedType('basis', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Any())
+ )
+
+
+id_characteristic_two_basis = _OID(characteristic_two_field, 3)
+gnBasis = _OID(id_characteristic_two_basis, 1)
+tpBasis = _OID(id_characteristic_two_basis, 2)
+
+
+class Trinomial(univ.Integer):
+ pass
+
+
+ppBasis = _OID(id_characteristic_two_basis, 3)
+
+
+class Pentanomial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('k1', univ.Integer()),
+ namedtype.NamedType('k2', univ.Integer()),
+ namedtype.NamedType('k3', univ.Integer())
+ )
+
+
+class FieldElement(univ.OctetString):
+ pass
+
+
+class ECPoint(univ.OctetString):
+ pass
+
+
+class Curve(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('a', FieldElement()),
+ namedtype.NamedType('b', FieldElement()),
+ namedtype.OptionalNamedType('seed', univ.BitString())
+ )
+
+
+class ECPVer(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('ecpVer1', 1)
+ )
+
+
+class ECParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', ECPVer()),
+ namedtype.NamedType('fieldID', FieldID()),
+ namedtype.NamedType('curve', Curve()),
+ namedtype.NamedType('base', ECPoint()),
+ namedtype.NamedType('order', univ.Integer()),
+ namedtype.OptionalNamedType('cofactor', univ.Integer())
+ )
+
+
+class EcpkParameters(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ecParameters', ECParameters()),
+ namedtype.NamedType('namedCurve', univ.ObjectIdentifier()),
+ namedtype.NamedType('implicitlyCA', univ.Null())
+ )
+
+
+id_publicKeyType = _OID(ansi_X9_62, 2)
+id_ecPublicKey = _OID(id_publicKeyType, 1)
+
+ellipticCurve = _OID(ansi_X9_62, 3)
+
+c_TwoCurve = _OID(ellipticCurve, 0)
+c2pnb163v1 = _OID(c_TwoCurve, 1)
+c2pnb163v2 = _OID(c_TwoCurve, 2)
+c2pnb163v3 = _OID(c_TwoCurve, 3)
+c2pnb176w1 = _OID(c_TwoCurve, 4)
+c2tnb191v1 = _OID(c_TwoCurve, 5)
+c2tnb191v2 = _OID(c_TwoCurve, 6)
+c2tnb191v3 = _OID(c_TwoCurve, 7)
+c2onb191v4 = _OID(c_TwoCurve, 8)
+c2onb191v5 = _OID(c_TwoCurve, 9)
+c2pnb208w1 = _OID(c_TwoCurve, 10)
+c2tnb239v1 = _OID(c_TwoCurve, 11)
+c2tnb239v2 = _OID(c_TwoCurve, 12)
+c2tnb239v3 = _OID(c_TwoCurve, 13)
+c2onb239v4 = _OID(c_TwoCurve, 14)
+c2onb239v5 = _OID(c_TwoCurve, 15)
+c2pnb272w1 = _OID(c_TwoCurve, 16)
+c2pnb304w1 = _OID(c_TwoCurve, 17)
+c2tnb359v1 = _OID(c_TwoCurve, 18)
+c2pnb368w1 = _OID(c_TwoCurve, 19)
+c2tnb431r1 = _OID(c_TwoCurve, 20)
+
+primeCurve = _OID(ellipticCurve, 1)
+prime192v1 = _OID(primeCurve, 1)
+prime192v2 = _OID(primeCurve, 2)
+prime192v3 = _OID(primeCurve, 3)
+prime239v1 = _OID(primeCurve, 4)
+prime239v2 = _OID(primeCurve, 5)
+prime239v3 = _OID(primeCurve, 6)
+prime256v1 = _OID(primeCurve, 7)
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones in rfc5280.py. Do not add OIDs with absent paramaters.
+
+_algorithmIdentifierMapUpdate = {
+ md2: univ.Null(""),
+ md5: univ.Null(""),
+ id_sha1: univ.Null(""),
+ id_dsa: Dss_Parms(),
+ rsaEncryption: univ.Null(""),
+ md2WithRSAEncryption: univ.Null(""),
+ md5WithRSAEncryption: univ.Null(""),
+ sha1WithRSAEncryption: univ.Null(""),
+ dhpublicnumber: DomainParameters(),
+ id_keyExchangeAlgorithm: KEA_Parms_Id(),
+ id_ecPublicKey: EcpkParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3280.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3280.py
new file mode 100644
index 0000000000..4c6df13280
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3280.py
@@ -0,0 +1,1543 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate and Certificate
+# Revocation List (CRL) Profile
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3280.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+unformatted_postal_address = univ.Integer(16)
+
+ub_organizational_units = univ.Integer(4)
+
+ub_organizational_unit_name_length = univ.Integer(32)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ pass
+
+
+OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+OrganizationalUnitNames.componentType = OrganizationalUnitName()
+OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+id_at = _OID(2, 5, 4)
+
+id_at_name = _OID(id_at, 41)
+
+ub_pds_parameter_length = univ.Integer(30)
+
+
+class PDSParameter(univ.Set):
+ pass
+
+
+PDSParameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+ub_organization_name_length = univ.Integer(64)
+
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+id_pkix = _OID(1, 3, 6, 1, 5, 5, 7)
+
+id_qt = _OID(id_pkix, 2)
+
+
+class PresentationAddress(univ.Sequence):
+ pass
+
+
+PresentationAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class AlgorithmIdentifier(univ.Sequence):
+ pass
+
+
+AlgorithmIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any())
+)
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Extension(univ.Sequence):
+ pass
+
+
+Extension.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('extnValue', univ.OctetString())
+)
+
+
+class Extensions(univ.SequenceOf):
+ pass
+
+
+Extensions.componentType = Extension()
+Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ pass
+
+
+SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+)
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class Validity(univ.Sequence):
+ pass
+
+
+Validity.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+)
+
+
+class Version(univ.Integer):
+ pass
+
+
+Version.namedValues = namedval.NamedValues(
+ ('v1', 0),
+ ('v2', 1),
+ ('v3', 2)
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ pass
+
+
+AttributeTypeAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('value', AttributeValue())
+)
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ pass
+
+
+RelativeDistinguishedName.componentType = AttributeTypeAndValue()
+RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class RDNSequence(univ.SequenceOf):
+ pass
+
+
+RDNSequence.componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ pass
+
+
+Name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rdnSequence', RDNSequence())
+)
+
+
+class TBSCertificate(univ.Sequence):
+ pass
+
+
+TBSCertificate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value="v1")),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class Certificate(univ.Sequence):
+ pass
+
+
+Certificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+ub_surname_length = univ.Integer(40)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_e163_4_sub_address_length = univ.Integer(40)
+
+teletex_common_name = univ.Integer(2)
+
+ub_country_name_alpha_length = univ.Integer(2)
+
+ub_country_name_numeric_length = univ.Integer(3)
+
+
+class CountryName(univ.Choice):
+ pass
+
+
+CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+CountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+extension_OR_address_components = univ.Integer(12)
+
+id_at_dnQualifier = _OID(id_at, 46)
+
+ub_e163_4_number_length = univ.Integer(15)
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ pass
+
+
+ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('e163-4-address', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))
+ ),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+terminal_type = univ.Integer(23)
+
+id_domainComponent = _OID(0, 9, 2342, 19200300, 100, 1, 25)
+
+ub_state_name = univ.Integer(128)
+
+
+class X520StateOrProvinceName(univ.Choice):
+ pass
+
+
+X520StateOrProvinceName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+)
+
+ub_organization_name = univ.Integer(64)
+
+
+class X520OrganizationName(univ.Choice):
+ pass
+
+
+X520OrganizationName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+)
+
+ub_emailaddress_length = univ.Integer(128)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+id_at_surname = _OID(id_at, 4)
+
+ub_common_name_length = univ.Integer(64)
+
+id_ad = _OID(id_pkix, 48)
+
+ub_numeric_user_id_length = univ.Integer(32)
+
+
+class NumericUserIdentifier(char.NumericString):
+ pass
+
+
+NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class OrganizationName(char.PrintableString):
+ pass
+
+
+OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_domain_name_length = univ.Integer(16)
+
+
+class AdministrationDomainName(univ.Choice):
+ pass
+
+
+AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+AdministrationDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+)
+
+
+class PrivateDomainName(univ.Choice):
+ pass
+
+
+PrivateDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+)
+
+ub_generation_qualifier_length = univ.Integer(3)
+
+ub_given_name_length = univ.Integer(16)
+
+ub_initials_length = univ.Integer(5)
+
+
+class PersonalName(univ.Set):
+ pass
+
+
+PersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+ub_terminal_id_length = univ.Integer(24)
+
+
+class TerminalIdentifier(char.PrintableString):
+ pass
+
+
+TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+ub_x121_address_length = univ.Integer(16)
+
+
+class X121Address(char.NumericString):
+ pass
+
+
+X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ pass
+
+
+BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+)
+
+ub_domain_defined_attributes = univ.Integer(4)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
+BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+ub_extension_attributes = univ.Integer(256)
+
+
+class ExtensionAttribute(univ.Sequence):
+ pass
+
+
+ExtensionAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ExtensionAttributes(univ.SetOf):
+ pass
+
+
+ExtensionAttributes.componentType = ExtensionAttribute()
+ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+
+class ORAddress(univ.Sequence):
+ pass
+
+
+ORAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+)
+
+id_pe = _OID(id_pkix, 1)
+
+ub_title = univ.Integer(64)
+
+
+class X520Title(univ.Choice):
+ pass
+
+
+X520Title.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+)
+
+id_at_organizationalUnitName = _OID(id_at, 11)
+
+
+class EmailAddress(char.IA5String):
+ pass
+
+
+EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+physical_delivery_country_name = univ.Integer(8)
+
+id_at_givenName = _OID(id_at, 42)
+
+
+class TeletexCommonName(char.TeletexString):
+ pass
+
+
+TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+id_qt_cps = _OID(id_qt, 1)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+id_kp = _OID(id_pkix, 3)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class DomainComponent(char.IA5String):
+ pass
+
+
+id_at_initials = _OID(id_at, 43)
+
+id_qt_unotice = _OID(id_qt, 2)
+
+ub_pds_name_length = univ.Integer(16)
+
+
+class PDSName(char.PrintableString):
+ pass
+
+
+PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+class DistinguishedName(RDNSequence):
+ pass
+
+
+class CommonName(char.PrintableString):
+ pass
+
+
+CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+ub_serial_number = univ.Integer(64)
+
+
+class X520SerialNumber(char.PrintableString):
+ pass
+
+
+X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
+
+id_at_generationQualifier = _OID(id_at, 44)
+
+ub_organizational_unit_name = univ.Integer(64)
+
+id_ad_ocsp = _OID(id_ad, 1)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class TeletexPersonalName(univ.Set):
+ pass
+
+
+TeletexPersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
+TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+class TBSCertList(univ.Sequence):
+ pass
+
+
+TBSCertList.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType('revokedCertificates',
+ univ.SequenceOf(componentType=univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ ))
+ )),
+ namedtype.OptionalNamedType('crlExtensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+local_postal_attributes = univ.Integer(21)
+
+pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ pass
+
+
+PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+ub_name = univ.Integer(32768)
+
+
+class X520name(univ.Choice):
+ pass
+
+
+X520name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+)
+
+id_emailAddress = _OID(pkcs_9, 1)
+
+
+class TerminalType(univ.Integer):
+ pass
+
+
+TerminalType.namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+)
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ pass
+
+
+X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+)
+
+id_at_commonName = _OID(id_at, 3)
+
+pds_name = univ.Integer(7)
+
+post_office_box_address = univ.Integer(18)
+
+ub_locality_name = univ.Integer(128)
+
+
+class X520LocalityName(univ.Choice):
+ pass
+
+
+X520LocalityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+)
+
+id_ad_timeStamping = _OID(id_ad, 3)
+
+id_at_countryName = _OID(id_at, 6)
+
+physical_delivery_personal_name = univ.Integer(13)
+
+teletex_personal_name = univ.Integer(4)
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+ub_postal_code_length = univ.Integer(16)
+
+
+class PostalCode(univ.Choice):
+ pass
+
+
+PostalCode.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+)
+
+
+class X520countryName(char.PrintableString):
+ pass
+
+
+X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+
+postal_code = univ.Integer(9)
+
+id_ad_caRepository = _OID(id_ad, 5)
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+id_at_title = _OID(id_at, 12)
+
+id_at_serialNumber = _OID(id_at, 5)
+
+id_ad_caIssuers = _OID(id_ad, 2)
+
+ub_integer_options = univ.Integer(256)
+
+
+class CertificateList(univ.Sequence):
+ pass
+
+
+CertificateList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
+TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+physical_delivery_office_name = univ.Integer(10)
+
+ub_common_name = univ.Integer(64)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+ub_pseudonym = univ.Integer(128)
+
+poste_restante_address = univ.Integer(19)
+
+id_at_organizationName = _OID(id_at, 10)
+
+physical_delivery_office_number = univ.Integer(11)
+
+id_at_pseudonym = _OID(id_at, 65)
+
+
+class X520CommonName(univ.Choice):
+ pass
+
+
+X520CommonName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+)
+
+physical_delivery_organization_name = univ.Integer(14)
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+id_at_stateOrProvinceName = _OID(id_at, 8)
+
+common_name = univ.Integer(1)
+
+id_at_localityName = _OID(id_at, 7)
+
+ub_match = univ.Integer(128)
+
+ub_unformatted_address_length = univ.Integer(180)
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
+)
+
+extended_network_address = univ.Integer(22)
+
+unique_postal_name = univ.Integer(20)
+
+ub_pds_physical_address_lines = univ.Integer(6)
+
+
+class UnformattedPostalAddress(univ.Set):
+ pass
+
+
+UnformattedPostalAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+)
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+class X520Pseudonym(univ.Choice):
+ pass
+
+
+X520Pseudonym.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
+)
+
+teletex_organization_name = univ.Integer(3)
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+street_address = univ.Integer(17)
+
+id_kp_OCSPSigning = _OID(id_kp, 9)
+
+id_ce = _OID(2, 5, 29)
+
+id_ce_certificatePolicies = _OID(id_ce, 32)
+
+
+class EDIPartyName(univ.Sequence):
+ pass
+
+
+EDIPartyName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('partyName',
+ DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class AnotherName(univ.Sequence):
+ pass
+
+
+AnotherName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType('value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class GeneralName(univ.Choice):
+ pass
+
+
+GeneralName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress',
+ univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class GeneralNames(univ.SequenceOf):
+ pass
+
+
+GeneralNames.componentType = GeneralName()
+GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+id_ce_cRLDistributionPoints = _OID(id_ce, 31)
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyMappings(univ.SequenceOf):
+ pass
+
+
+PolicyMappings.componentType = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+))
+
+PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+holdInstruction = _OID(2, 2, 840, 10040, 2)
+
+id_ce_subjectDirectoryAttributes = _OID(id_ce, 9)
+
+id_holdinstruction_callissuer = _OID(holdInstruction, 2)
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ pass
+
+
+SubjectDirectoryAttributes.componentType = Attribute()
+SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+anyPolicy = _OID(id_ce_certificatePolicies, 0)
+
+id_ce_subjectAltName = _OID(id_ce, 17)
+
+id_kp_emailProtection = _OID(id_kp, 4)
+
+
+class ReasonFlags(univ.BitString):
+ pass
+
+
+ReasonFlags.namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('privilegeWithdrawn', 7),
+ ('aACompromise', 8)
+)
+
+
+class DistributionPointName(univ.Choice):
+ pass
+
+
+DistributionPointName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName',
+ GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class DistributionPoint(univ.Sequence):
+ pass
+
+
+DistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_ce_keyUsage = _OID(id_ce, 15)
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ pass
+
+
+PolicyQualifierInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType('qualifier', univ.Any())
+)
+
+
+class PolicyInformation(univ.Sequence):
+ pass
+
+
+PolicyInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
+)
+
+
+class CertificatePolicies(univ.SequenceOf):
+ pass
+
+
+CertificatePolicies.componentType = PolicyInformation()
+CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_ce_basicConstraints = _OID(id_ce, 19)
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ pass
+
+
+ExtKeyUsageSyntax.componentType = KeyPurposeId()
+ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+class BasicConstraints(univ.Sequence):
+ pass
+
+
+BasicConstraints.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+
+class SkipCerts(univ.Integer):
+ pass
+
+
+SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class InhibitAnyPolicy(SkipCerts):
+ pass
+
+
+class CRLNumber(univ.Integer):
+ pass
+
+
+CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ pass
+
+
+AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_ce_nameConstraints = _OID(id_ce, 30)
+
+id_kp_serverAuth = _OID(id_kp, 1)
+
+id_ce_freshestCRL = _OID(id_ce, 46)
+
+id_ce_cRLReasons = _OID(id_ce, 21)
+
+
+class CRLDistributionPoints(univ.SequenceOf):
+ pass
+
+
+CRLDistributionPoints.componentType = DistributionPoint()
+CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class FreshestCRL(CRLDistributionPoints):
+ pass
+
+
+id_ce_inhibitAnyPolicy = _OID(id_ce, 54)
+
+
+class CRLReason(univ.Enumerated):
+ pass
+
+
+CRLReason.namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+)
+
+
+class BaseDistance(univ.Integer):
+ pass
+
+
+BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ pass
+
+
+GeneralSubtree.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ pass
+
+
+GeneralSubtrees.componentType = GeneralSubtree()
+GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ pass
+
+
+NameConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_pe_authorityInfoAccess = _OID(id_pe, 1)
+
+id_pe_subjectInfoAccess = _OID(id_pe, 11)
+
+id_ce_certificateIssuer = _OID(id_ce, 29)
+
+id_ce_invalidityDate = _OID(id_ce, 24)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('any', univ.Any())
+)
+
+id_ce_authorityKeyIdentifier = _OID(id_ce, 35)
+
+
+class AccessDescription(univ.Sequence):
+ pass
+
+
+AccessDescription.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+)
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+AuthorityInfoAccessSyntax.componentType = AccessDescription()
+AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_ce_issuingDistributionPoint = _OID(id_ce, 28)
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+class DisplayText(univ.Choice):
+ pass
+
+
+DisplayText.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+)
+
+
+class NoticeReference(univ.Sequence):
+ pass
+
+
+NoticeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+)
+
+
+class UserNotice(univ.Sequence):
+ pass
+
+
+UserNotice.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+)
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ pass
+
+
+PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_ce_subjectKeyIdentifier = _OID(id_ce, 14)
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+class SubjectInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+SubjectInfoAccessSyntax.componentType = AccessDescription()
+SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class KeyUsage(univ.BitString):
+ pass
+
+
+KeyUsage.namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+)
+
+id_ce_extKeyUsage = _OID(id_ce, 37)
+
+anyExtendedKeyUsage = _OID(id_ce_extKeyUsage, 0)
+
+id_ce_privateKeyUsagePeriod = _OID(id_ce, 16)
+
+id_ce_policyMappings = _OID(id_ce, 33)
+
+id_ce_cRLNumber = _OID(id_ce, 20)
+
+id_ce_policyConstraints = _OID(id_ce, 36)
+
+id_holdinstruction_none = _OID(holdInstruction, 1)
+
+id_holdinstruction_reject = _OID(holdInstruction, 3)
+
+id_kp_timeStamping = _OID(id_kp, 8)
+
+
+class PolicyConstraints(univ.Sequence):
+ pass
+
+
+PolicyConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_kp_clientAuth = _OID(id_kp, 2)
+
+id_ce_deltaCRLIndicator = _OID(id_ce, 27)
+
+id_ce_issuerAltName = _OID(id_ce, 18)
+
+id_kp_codeSigning = _OID(id_kp, 3)
+
+id_ce_holdInstructionCode = _OID(id_ce, 23)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ pass
+
+
+IssuingDistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
+)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3281.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3281.py
new file mode 100644
index 0000000000..a78abf9fea
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3281.py
@@ -0,0 +1,331 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3281.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3280
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class ObjectDigestInfo(univ.Sequence):
+ pass
+
+
+ObjectDigestInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestedObjectType', univ.Enumerated(
+ namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))),
+ namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()),
+ namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('objectDigest', univ.BitString())
+)
+
+
+class IssuerSerial(univ.Sequence):
+ pass
+
+
+IssuerSerial.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.GeneralNames()),
+ namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()),
+ namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier())
+)
+
+
+class TargetCert(univ.Sequence):
+ pass
+
+
+TargetCert.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetCertificate', IssuerSerial()),
+ namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()),
+ namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
+)
+
+
+class Target(univ.Choice):
+ pass
+
+
+Target.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetName', rfc3280.GeneralName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('targetCert',
+ TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class Targets(univ.SequenceOf):
+ pass
+
+
+Targets.componentType = Target()
+
+
+class ProxyInfo(univ.SequenceOf):
+ pass
+
+
+ProxyInfo.componentType = Targets()
+
+id_at_role = _buildOid(rfc3280.id_at, 72)
+
+id_pe_aaControls = _buildOid(rfc3280.id_pe, 6)
+
+id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55)
+
+id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4)
+
+
+class ClassList(univ.BitString):
+ pass
+
+
+ClassList.namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('topSecret', 5)
+)
+
+
+class SecurityCategory(univ.Sequence):
+ pass
+
+
+SecurityCategory.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Clearance(univ.Sequence):
+ pass
+
+
+Clearance.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(
+ value="unclassified")),
+ namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class AttCertVersion(univ.Integer):
+ pass
+
+
+AttCertVersion.namedValues = namedval.NamedValues(
+ ('v2', 1)
+)
+
+id_aca = _buildOid(rfc3280.id_pkix, 10)
+
+id_at_clearance = _buildOid(2, 5, 1, 5, 55)
+
+
+class AttrSpec(univ.SequenceOf):
+ pass
+
+
+AttrSpec.componentType = univ.ObjectIdentifier()
+
+
+class AAControls(univ.Sequence):
+ pass
+
+
+AAControls.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.OptionalNamedType('permittedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1))
+)
+
+
+class AttCertValidityPeriod(univ.Sequence):
+ pass
+
+
+AttCertValidityPeriod.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
+ namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
+)
+
+
+id_aca_authenticationInfo = _buildOid(id_aca, 1)
+
+
+class V2Form(univ.Sequence):
+ pass
+
+
+V2Form.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()),
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class AttCertIssuer(univ.Choice):
+ pass
+
+
+AttCertIssuer.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('v1Form', rfc3280.GeneralNames()),
+ namedtype.NamedType('v2Form',
+ V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class Holder(univ.Sequence):
+ pass
+
+
+Holder.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class AttributeCertificateInfo(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', AttCertVersion()),
+ namedtype.NamedType('holder', Holder()),
+ namedtype.NamedType('issuer', AttCertIssuer()),
+ namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
+ namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
+)
+
+
+class AttributeCertificate(univ.Sequence):
+ pass
+
+
+AttributeCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acinfo', AttributeCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+)
+
+id_mod = _buildOid(rfc3280.id_pkix, 0)
+
+id_mod_attribute_cert = _buildOid(id_mod, 12)
+
+id_aca_accessIdentity = _buildOid(id_aca, 2)
+
+
+class RoleSyntax(univ.Sequence):
+ pass
+
+
+RoleSyntax.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('roleName',
+ rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_aca_chargingIdentity = _buildOid(id_aca, 3)
+
+
+class ACClearAttrs(univ.Sequence):
+ pass
+
+
+ACClearAttrs.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acIssuer', rfc3280.GeneralName()),
+ namedtype.NamedType('acSerial', univ.Integer()),
+ namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute()))
+)
+
+id_aca_group = _buildOid(id_aca, 4)
+
+id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10)
+
+
+class SvceAuthInfo(univ.Sequence):
+ pass
+
+
+SvceAuthInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('service', rfc3280.GeneralName()),
+ namedtype.NamedType('ident', rfc3280.GeneralName()),
+ namedtype.OptionalNamedType('authInfo', univ.OctetString())
+)
+
+
+class IetfAttrSyntax(univ.Sequence):
+ pass
+
+
+IetfAttrSyntax.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType(
+ 'values', univ.SequenceOf(
+ componentType=univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('octets', univ.OctetString()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('string', char.UTF8String())
+ )
+ )
+ )
+ )
+)
+
+id_aca_encAttrs = _buildOid(id_aca, 6)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3370.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3370.py
new file mode 100644
index 0000000000..51a9d5c5b1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3370.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS) Algorithms
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3370.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc5753
+from pyasn1_modules import rfc5990
+from pyasn1_modules import rfc8018
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 3279
+
+dhpublicnumber = rfc3279.dhpublicnumber
+
+dh_public_number = dhpublicnumber
+
+DHPublicKey = rfc3279.DHPublicKey
+
+DomainParameters = rfc3279.DomainParameters
+
+DHDomainParameters = DomainParameters
+
+Dss_Parms = rfc3279.Dss_Parms
+
+Dss_Sig_Value = rfc3279.Dss_Sig_Value
+
+md5 = rfc3279.md5
+
+md5WithRSAEncryption = rfc3279.md5WithRSAEncryption
+
+RSAPublicKey = rfc3279.RSAPublicKey
+
+rsaEncryption = rfc3279.rsaEncryption
+
+ValidationParms = rfc3279.ValidationParms
+
+id_dsa = rfc3279.id_dsa
+
+id_dsa_with_sha1 = rfc3279.id_dsa_with_sha1
+
+id_sha1 = rfc3279.id_sha1
+
+sha_1 = id_sha1
+
+sha1WithRSAEncryption = rfc3279.sha1WithRSAEncryption
+
+
+# Imports from RFC 5753
+
+CBCParameter = rfc5753.CBCParameter
+
+CBCParameter = rfc5753.IV
+
+KeyWrapAlgorithm = rfc5753.KeyWrapAlgorithm
+
+
+# Imports from RFC 5990
+
+id_alg_CMS3DESwrap = rfc5990.id_alg_CMS3DESwrap
+
+
+# Imports from RFC 8018
+
+des_EDE3_CBC = rfc8018.des_EDE3_CBC
+
+des_ede3_cbc = des_EDE3_CBC
+
+rc2CBC = rfc8018.rc2CBC
+
+rc2_cbc = rc2CBC
+
+RC2_CBC_Parameter = rfc8018.RC2_CBC_Parameter
+
+RC2CBCParameter = RC2_CBC_Parameter
+
+PBKDF2_params = rfc8018.PBKDF2_params
+
+id_PBKDF2 = rfc8018.id_PBKDF2
+
+
+# The few things that are not already defined elsewhere
+
+hMAC_SHA1 = univ.ObjectIdentifier('1.3.6.1.5.5.8.1.2')
+
+
+id_alg_ESDH = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.5')
+
+
+id_alg_SSDH = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.10')
+
+
+id_alg_CMSRC2wrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.7')
+
+
+class RC2ParameterVersion(univ.Integer):
+ pass
+
+
+class RC2wrapParameter(RC2ParameterVersion):
+ pass
+
+
+class Dss_Pub_Key(univ.Integer):
+ pass
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ hMAC_SHA1: univ.Null(""),
+ id_alg_CMSRC2wrap: RC2wrapParameter(),
+ id_alg_ESDH: KeyWrapAlgorithm(),
+ id_alg_SSDH: KeyWrapAlgorithm(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the S/MIME Capabilities map in rfc5751.py.
+
+_smimeCapabilityMapUpdate = {
+ id_alg_CMSRC2wrap: RC2wrapParameter(),
+ id_alg_ESDH: KeyWrapAlgorithm(),
+ id_alg_SSDH: KeyWrapAlgorithm(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3412.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3412.py
new file mode 100644
index 0000000000..2cf1e1020f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3412.py
@@ -0,0 +1,53 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv3 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3412.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1905
+
+
+class ScopedPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contextEngineId', univ.OctetString()),
+ namedtype.NamedType('contextName', univ.OctetString()),
+ namedtype.NamedType('data', rfc1905.PDUs())
+ )
+
+
+class ScopedPduData(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('plaintext', ScopedPDU()),
+ namedtype.NamedType('encryptedPDU', univ.OctetString()),
+ )
+
+
+class HeaderData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgID',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgMaxSize',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(484, 2147483647))),
+ namedtype.NamedType('msgFlags', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 1))),
+ namedtype.NamedType('msgSecurityModel',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 2147483647)))
+ )
+
+
+class SNMPv3Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgVersion',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgGlobalData', HeaderData()),
+ namedtype.NamedType('msgSecurityParameters', univ.OctetString()),
+ namedtype.NamedType('msgData', ScopedPduData())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3414.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3414.py
new file mode 100644
index 0000000000..00420cb01c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3414.py
@@ -0,0 +1,28 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv3 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3414.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+
+class UsmSecurityParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgAuthoritativeEngineID', univ.OctetString()),
+ namedtype.NamedType('msgAuthoritativeEngineBoots',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgAuthoritativeEngineTime',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgUserName',
+ univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))),
+ namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()),
+ namedtype.NamedType('msgPrivacyParameters', univ.OctetString())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3447.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3447.py
new file mode 100644
index 0000000000..3352b70c9e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3447.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#1 syntax
+#
+# ASN.1 source from:
+# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.asn
+#
+# Sample captures could be obtained with "openssl genrsa" command
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedval
+
+from pyasn1_modules.rfc2437 import *
+
+
+class OtherPrimeInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('prime', univ.Integer()),
+ namedtype.NamedType('exponent', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer())
+ )
+
+
+class OtherPrimeInfos(univ.SequenceOf):
+ componentType = OtherPrimeInfo()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class RSAPrivateKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('two-prime', 0), ('multi', 1)))),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer()),
+ namedtype.OptionalNamedType('otherPrimeInfos', OtherPrimeInfos())
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3537.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3537.py
new file mode 100644
index 0000000000..374dd8193c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3537.py
@@ -0,0 +1,34 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SEED Encryption Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4010.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_alg_HMACwith3DESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.11')
+
+
+id_alg_HMACwithAESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.12')
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_HMACwith3DESwrap: univ.Null(""),
+ id_alg_HMACwithAESwrap: univ.Null(""),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3560.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3560.py
new file mode 100644
index 0000000000..8365436df5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3560.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RSAES-OAEP Key Transport Algorithm in CMS
+#
+# Notice that all of the things needed in RFC 3560 are also defined
+# in RFC 4055. So, they are all pulled from the RFC 4055 module into
+# this one so that people looking a RFC 3560 can easily find them.
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3560.txt
+#
+
+from pyasn1_modules import rfc4055
+
+id_sha1 = rfc4055.id_sha1
+
+id_sha256 = rfc4055.id_sha256
+
+id_sha384 = rfc4055.id_sha384
+
+id_sha512 = rfc4055.id_sha512
+
+id_mgf1 = rfc4055.id_mgf1
+
+rsaEncryption = rfc4055.rsaEncryption
+
+id_RSAES_OAEP = rfc4055.id_RSAES_OAEP
+
+id_pSpecified = rfc4055.id_pSpecified
+
+sha1Identifier = rfc4055.sha1Identifier
+
+sha256Identifier = rfc4055.sha256Identifier
+
+sha384Identifier = rfc4055.sha384Identifier
+
+sha512Identifier = rfc4055.sha512Identifier
+
+mgf1SHA1Identifier = rfc4055.mgf1SHA1Identifier
+
+mgf1SHA256Identifier = rfc4055.mgf1SHA256Identifier
+
+mgf1SHA384Identifier = rfc4055.mgf1SHA384Identifier
+
+mgf1SHA512Identifier = rfc4055.mgf1SHA512Identifier
+
+pSpecifiedEmptyIdentifier = rfc4055.pSpecifiedEmptyIdentifier
+
+
+class RSAES_OAEP_params(rfc4055.RSAES_OAEP_params):
+ pass
+
+
+rSAES_OAEP_Default_Params = RSAES_OAEP_params()
+
+rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier
+
+rSAES_OAEP_SHA256_Params = rfc4055.rSAES_OAEP_SHA256_Params
+
+rSAES_OAEP_SHA256_Identifier = rfc4055.rSAES_OAEP_SHA256_Identifier
+
+rSAES_OAEP_SHA384_Params = rfc4055.rSAES_OAEP_SHA384_Params
+
+rSAES_OAEP_SHA384_Identifier = rfc4055.rSAES_OAEP_SHA384_Identifier
+
+rSAES_OAEP_SHA512_Params = rfc4055.rSAES_OAEP_SHA512_Params
+
+rSAES_OAEP_SHA512_Identifier = rfc4055.rSAES_OAEP_SHA512_Identifier
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3565.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3565.py
new file mode 100644
index 0000000000..ec75e23489
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3565.py
@@ -0,0 +1,57 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Use of the Advanced Encryption Standard (AES) Encryption
+# Algorithm in the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3565.txt
+
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class AES_IV(univ.OctetString):
+ pass
+
+AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+id_aes128_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.2')
+
+id_aes192_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.22')
+
+id_aes256_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.42')
+
+
+id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5')
+
+id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25')
+
+id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45')
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_aes128_CBC: AES_IV(),
+ id_aes192_CBC: AES_IV(),
+ id_aes256_CBC: AES_IV(),
+ id_aes128_wrap: univ.Null(),
+ id_aes192_wrap: univ.Null(),
+ id_aes256_wrap: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3657.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3657.py
new file mode 100644
index 0000000000..ebf23dabcb
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3657.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Camellia Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3657.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+
+
+id_camellia128_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.2')
+
+id_camellia192_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.3')
+
+id_camellia256_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.4')
+
+id_camellia128_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.2')
+
+id_camellia192_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.3')
+
+id_camellia256_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.4')
+
+
+
+class Camellia_IV(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+class CamelliaSMimeCapability(univ.Null):
+ pass
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_camellia128_cbc: Camellia_IV(),
+ id_camellia192_cbc: Camellia_IV(),
+ id_camellia256_cbc: Camellia_IV(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the SMIMECapabilities Attribute map in rfc5751.py
+
+_smimeCapabilityMapUpdate = {
+ id_camellia128_cbc: CamelliaSMimeCapability(),
+ id_camellia192_cbc: CamelliaSMimeCapability(),
+ id_camellia256_cbc: CamelliaSMimeCapability(),
+ id_camellia128_wrap: CamelliaSMimeCapability(),
+ id_camellia192_wrap: CamelliaSMimeCapability(),
+ id_camellia256_wrap: CamelliaSMimeCapability(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3709.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3709.py
new file mode 100644
index 0000000000..aa1d5b6abf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3709.py
@@ -0,0 +1,207 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Logotypes in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3709.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6170
+
+MAX = float('inf')
+
+
+class HashAlgAndValue(univ.Sequence):
+ pass
+
+HashAlgAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', univ.OctetString())
+)
+
+
+class LogotypeDetails(univ.Sequence):
+ pass
+
+LogotypeDetails.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mediaType', char.IA5String()),
+ namedtype.NamedType('logotypeHash', univ.SequenceOf(
+ componentType=HashAlgAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('logotypeURI', univ.SequenceOf(
+ componentType=char.IA5String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class LogotypeAudioInfo(univ.Sequence):
+ pass
+
+LogotypeAudioInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fileSize', univ.Integer()),
+ namedtype.NamedType('playTime', univ.Integer()),
+ namedtype.NamedType('channels', univ.Integer()),
+ namedtype.OptionalNamedType('sampleRate', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('language', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class LogotypeAudio(univ.Sequence):
+ pass
+
+LogotypeAudio.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('audioDetails', LogotypeDetails()),
+ namedtype.OptionalNamedType('audioInfo', LogotypeAudioInfo())
+)
+
+
+class LogotypeImageType(univ.Integer):
+ pass
+
+LogotypeImageType.namedValues = namedval.NamedValues(
+ ('grayScale', 0),
+ ('color', 1)
+)
+
+
+class LogotypeImageResolution(univ.Choice):
+ pass
+
+LogotypeImageResolution.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numBits',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('tableSize',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class LogotypeImageInfo(univ.Sequence):
+ pass
+
+LogotypeImageInfo.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('type', LogotypeImageType().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='color')),
+ namedtype.NamedType('fileSize', univ.Integer()),
+ namedtype.NamedType('xSize', univ.Integer()),
+ namedtype.NamedType('ySize', univ.Integer()),
+ namedtype.OptionalNamedType('resolution', LogotypeImageResolution()),
+ namedtype.OptionalNamedType('language', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class LogotypeImage(univ.Sequence):
+ pass
+
+LogotypeImage.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('imageDetails', LogotypeDetails()),
+ namedtype.OptionalNamedType('imageInfo', LogotypeImageInfo())
+)
+
+
+class LogotypeData(univ.Sequence):
+ pass
+
+LogotypeData.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('image', univ.SequenceOf(
+ componentType=LogotypeImage())),
+ namedtype.OptionalNamedType('audio', univ.SequenceOf(
+ componentType=LogotypeAudio()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+
+class LogotypeReference(univ.Sequence):
+ pass
+
+LogotypeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('refStructHash', univ.SequenceOf(
+ componentType=HashAlgAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('refStructURI', univ.SequenceOf(
+ componentType=char.IA5String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class LogotypeInfo(univ.Choice):
+ pass
+
+LogotypeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('direct',
+ LogotypeData().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('indirect', LogotypeReference().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+# Other logotype type and associated object identifiers
+
+id_logo_background = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.2')
+
+id_logo_loyalty = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.1')
+
+id_logo_certImage = rfc6170.id_logo_certImage
+
+
+class OtherLogotypeInfo(univ.Sequence):
+ pass
+
+OtherLogotypeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('logotypeType', univ.ObjectIdentifier()),
+ namedtype.NamedType('info', LogotypeInfo())
+)
+
+
+# Logotype Certificate Extension
+
+id_pe_logotype = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.12')
+
+
+class LogotypeExtn(univ.Sequence):
+ pass
+
+LogotypeExtn.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('communityLogos', univ.SequenceOf(
+ componentType=LogotypeInfo()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('issuerLogo', LogotypeInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('subjectLogo', LogotypeInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('otherLogos', univ.SequenceOf(
+ componentType=OtherLogotypeInfo()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_logotype: LogotypeExtn(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3739.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3739.py
new file mode 100644
index 0000000000..4aa5aaf0de
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3739.py
@@ -0,0 +1,203 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add WithComponentsConstraints to
+# enforce the requirements that are indicated in comments.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Qualified Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3739.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Initialize the qcStatement map
+
+qcStatementMap = { }
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+AttributeType = rfc5280.AttributeType
+
+DirectoryString = rfc5280.DirectoryString
+
+GeneralName = rfc5280.GeneralName
+
+id_pkix = rfc5280.id_pkix
+
+id_pe = rfc5280.id_pe
+
+
+# Arc for QC personal data attributes
+
+id_pda = id_pkix + (9, )
+
+
+# Arc for QC statements
+
+id_qcs = id_pkix + (11, )
+
+
+# Personal data attributes
+
+id_pda_dateOfBirth = id_pda + (1, )
+
+class DateOfBirth(useful.GeneralizedTime):
+ pass
+
+
+id_pda_placeOfBirth = id_pda + (2, )
+
+class PlaceOfBirth(DirectoryString):
+ pass
+
+
+id_pda_gender = id_pda + (3, )
+
+class Gender(char.PrintableString):
+ subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 1),
+ constraint.SingleValueConstraint('M', 'F', 'm', 'f')
+ )
+
+
+id_pda_countryOfCitizenship = id_pda + (4, )
+
+class CountryOfCitizenship(char.PrintableString):
+ subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+ # ISO 3166 Country Code
+
+
+id_pda_countryOfResidence = id_pda + (5, )
+
+class CountryOfResidence(char.PrintableString):
+ subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+ # ISO 3166 Country Code
+
+
+# Biometric info certificate extension
+
+id_pe_biometricInfo = id_pe + (2, )
+
+
+class PredefinedBiometricType(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('picture', 0),
+ ('handwritten-signature', 1)
+ )
+ subtypeSpec = constraint.SingleValueConstraint(0, 1)
+
+
+class TypeOfBiometricData(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('predefinedBiometricType', PredefinedBiometricType()),
+ namedtype.NamedType('biometricDataOid', univ.ObjectIdentifier())
+ )
+
+
+class BiometricData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('typeOfBiometricData', TypeOfBiometricData()),
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('biometricDataHash', univ.OctetString()),
+ namedtype.OptionalNamedType('sourceDataUri', char.IA5String())
+ )
+
+
+class BiometricSyntax(univ.SequenceOf):
+ componentType = BiometricData()
+
+
+# QC Statements certificate extension
+# NOTE: This extension does not allow to mix critical and
+# non-critical Qualified Certificate Statements. Either all
+# statements must be critical or all statements must be
+# non-critical.
+
+id_pe_qcStatements = id_pe + (3, )
+
+
+class NameRegistrationAuthorities(univ.SequenceOf):
+ componentType = GeneralName()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class QCStatement(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('statementId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('statementInfo', univ.Any(),
+ openType=opentype.OpenType('statementId', qcStatementMap))
+ )
+
+
+class QCStatements(univ.SequenceOf):
+ componentType = QCStatement()
+
+
+class SemanticsInformation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('semanticsIndentifier',
+ univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('nameRegistrationAuthorities',
+ NameRegistrationAuthorities())
+ )
+ subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('semanticsIndentifier', constraint.ComponentPresentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('nameRegistrationAuthorities', constraint.ComponentPresentConstraint()))
+ )
+
+
+id_qcs = id_pkix + (11, )
+
+
+id_qcs_pkixQCSyntax_v1 = id_qcs + (1, )
+
+
+id_qcs_pkixQCSyntax_v2 = id_qcs + (2, )
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_biometricInfo: BiometricSyntax(),
+ id_pe_qcStatements: QCStatements(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_pda_dateOfBirth: DateOfBirth(),
+ id_pda_placeOfBirth: PlaceOfBirth(),
+ id_pda_gender: Gender(),
+ id_pda_countryOfCitizenship: CountryOfCitizenship(),
+ id_pda_countryOfResidence: CountryOfResidence(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3770.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3770.py
new file mode 100644
index 0000000000..3fefe1d90e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3770.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extensions and Attributes Supporting Authentication
+# in PPP and Wireless LAN Networks
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3770.txt
+# https://www.rfc-editor.org/errata/eid234
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+MAX = float('inf')
+
+
+# Extended Key Usage Values
+
+id_kp_eapOverLAN = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.14')
+
+id_kp_eapOverPPP = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.13')
+
+
+# Wireless LAN SSID Extension
+
+id_pe_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.13')
+
+
+class SSID(univ.OctetString):
+ pass
+
+SSID.subtypeSpec = constraint.ValueSizeConstraint(1, 32)
+
+
+class SSIDList(univ.SequenceOf):
+ pass
+
+SSIDList.componentType = SSID()
+SSIDList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Wireless LAN SSID Attribute Certificate Attribute
+# Uses same syntax as the certificate extension: SSIDList
+# Correction for https://www.rfc-editor.org/errata/eid234
+
+id_aca_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.10.7')
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3779.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3779.py
new file mode 100644
index 0000000000..8e6eaa3e7b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3779.py
@@ -0,0 +1,137 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Extensions for IP Addresses and AS Identifiers
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3779.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# IP Address Delegation Extension
+
+id_pe_ipAddrBlocks = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.7')
+
+
+class IPAddress(univ.BitString):
+ pass
+
+
+class IPAddressRange(univ.Sequence):
+ pass
+
+IPAddressRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('min', IPAddress()),
+ namedtype.NamedType('max', IPAddress())
+)
+
+
+class IPAddressOrRange(univ.Choice):
+ pass
+
+IPAddressOrRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressPrefix', IPAddress()),
+ namedtype.NamedType('addressRange', IPAddressRange())
+)
+
+
+class IPAddressChoice(univ.Choice):
+ pass
+
+IPAddressChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('inherit', univ.Null()),
+ namedtype.NamedType('addressesOrRanges', univ.SequenceOf(
+ componentType=IPAddressOrRange())
+ )
+)
+
+
+class IPAddressFamily(univ.Sequence):
+ pass
+
+IPAddressFamily.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressFamily', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
+ namedtype.NamedType('ipAddressChoice', IPAddressChoice())
+)
+
+
+class IPAddrBlocks(univ.SequenceOf):
+ pass
+
+IPAddrBlocks.componentType = IPAddressFamily()
+
+
+# Autonomous System Identifier Delegation Extension
+
+id_pe_autonomousSysIds = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.8')
+
+
+class ASId(univ.Integer):
+ pass
+
+
+class ASRange(univ.Sequence):
+ pass
+
+ASRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('min', ASId()),
+ namedtype.NamedType('max', ASId())
+)
+
+
+class ASIdOrRange(univ.Choice):
+ pass
+
+ASIdOrRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', ASId()),
+ namedtype.NamedType('range', ASRange())
+)
+
+
+class ASIdentifierChoice(univ.Choice):
+ pass
+
+ASIdentifierChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('inherit', univ.Null()),
+ namedtype.NamedType('asIdsOrRanges', univ.SequenceOf(
+ componentType=ASIdOrRange())
+ )
+)
+
+
+class ASIdentifiers(univ.Sequence):
+ pass
+
+ASIdentifiers.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('asnum', ASIdentifierChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('rdi', ASIdentifierChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+# Map of Certificate Extension OIDs to Extensions is added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ipAddrBlocks: IPAddrBlocks(),
+ id_pe_autonomousSysIds: ASIdentifiers(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3820.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3820.py
new file mode 100644
index 0000000000..b4ba34c05c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3820.py
@@ -0,0 +1,65 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Key Agreement
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3820.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+
+class ProxyCertPathLengthConstraint(univ.Integer):
+ pass
+
+
+class ProxyPolicy(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyLanguage', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('policy', univ.OctetString())
+ )
+
+
+class ProxyCertInfoExtension(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pCPathLenConstraint',
+ ProxyCertPathLengthConstraint()),
+ namedtype.NamedType('proxyPolicy', ProxyPolicy())
+ )
+
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+
+id_pe = id_pkix + (1, )
+
+id_pe_proxyCertInfo = id_pe + (14, )
+
+
+id_ppl = id_pkix + (21, )
+
+id_ppl_anyLanguage = id_ppl + (0, )
+
+id_ppl_inheritAll = id_ppl + (1, )
+
+id_ppl_independent = id_ppl + (2, )
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_proxyCertInfo: ProxyCertInfoExtension(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3852.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3852.py
new file mode 100644
index 0000000000..cf1bb85ad8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc3852.py
@@ -0,0 +1,706 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3852.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3280
+from pyasn1_modules import rfc3281
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()))
+)
+
+
+class SignedAttributes(univ.SetOf):
+ pass
+
+
+SignedAttributes.componentType = Attribute()
+SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class OtherRevocationInfoFormat(univ.Sequence):
+ pass
+
+
+OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherRevInfo', univ.Any())
+)
+
+
+class RevocationInfoChoice(univ.Choice):
+ pass
+
+
+RevocationInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crl', rfc3280.CertificateList()),
+ namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RevocationInfoChoices(univ.SetOf):
+ pass
+
+
+RevocationInfoChoices.componentType = RevocationInfoChoice()
+
+
+class OtherKeyAttribute(univ.Sequence):
+ pass
+
+
+OtherKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('keyAttr', univ.Any())
+)
+
+id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class CMSVersion(univ.Integer):
+ pass
+
+
+CMSVersion.namedValues = namedval.NamedValues(
+ ('v0', 0),
+ ('v1', 1),
+ ('v2', 2),
+ ('v3', 3),
+ ('v4', 4),
+ ('v5', 5)
+)
+
+
+class KEKIdentifier(univ.Sequence):
+ pass
+
+
+KEKIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyIdentifier', univ.OctetString()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KEKRecipientInfo(univ.Sequence):
+ pass
+
+
+KEKRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('kekid', KEKIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class KeyDerivationAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class PasswordRecipientInfo(univ.Sequence):
+ pass
+
+
+PasswordRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class OtherRecipientInfo(univ.Sequence):
+ pass
+
+
+OtherRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oriType', univ.ObjectIdentifier()),
+ namedtype.NamedType('oriValue', univ.Any())
+)
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ pass
+
+
+IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.Name()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber())
+)
+
+
+class SubjectKeyIdentifier(univ.OctetString):
+ pass
+
+
+class RecipientKeyIdentifier(univ.Sequence):
+ pass
+
+
+RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyAgreeRecipientIdentifier(univ.Choice):
+ pass
+
+
+KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class RecipientEncryptedKey(univ.Sequence):
+ pass
+
+
+RecipientEncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientEncryptedKeys(univ.SequenceOf):
+ pass
+
+
+RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
+
+
+class UserKeyingMaterial(univ.OctetString):
+ pass
+
+
+class OriginatorPublicKey(univ.Sequence):
+ pass
+
+
+OriginatorPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('publicKey', univ.BitString())
+)
+
+
+class OriginatorIdentifierOrKey(univ.Choice):
+ pass
+
+
+OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class KeyAgreeRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
+)
+
+
+class RecipientIdentifier(univ.Choice):
+ pass
+
+
+RecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyTransRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('rid', RecipientIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientInfo(univ.Choice):
+ pass
+
+
+RecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ktri', KeyTransRecipientInfo()),
+ namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ori', OtherRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class RecipientInfos(univ.SetOf):
+ pass
+
+
+RecipientInfos.componentType = RecipientInfo()
+RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class DigestAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignerIdentifier(univ.Choice):
+ pass
+
+
+SignerIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnprotectedAttributes(univ.SetOf):
+ pass
+
+
+UnprotectedAttributes.componentType = Attribute()
+UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContentInfo(univ.Sequence):
+ pass
+
+
+EncryptedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedData(univ.Sequence):
+ pass
+
+
+EncryptedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
+
+id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
+
+id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ pass
+
+
+DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
+
+
+class EncapsulatedContentInfo(univ.Sequence):
+ pass
+
+
+EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eContentType', ContentType()),
+ namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class DigestedData(univ.Sequence):
+ pass
+
+
+DigestedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.NamedType('digest', Digest())
+)
+
+
+class ContentInfo(univ.Sequence):
+ pass
+
+
+ContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnauthAttributes(univ.SetOf):
+ pass
+
+
+UnauthAttributes.componentType = Attribute()
+UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ pass
+
+
+ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('attributes', UnauthAttributes())
+)
+
+
+class SignatureAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ pass
+
+
+ExtendedCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+)
+
+
+class OtherCertificateFormat(univ.Sequence):
+ pass
+
+
+OtherCertificateFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherCert', univ.Any())
+)
+
+
+class AttributeCertificateV2(rfc3281.AttributeCertificate):
+ pass
+
+
+class AttCertVersionV1(univ.Integer):
+ pass
+
+
+AttCertVersionV1.namedValues = namedval.NamedValues(
+ ('v1', 0)
+)
+
+
+class AttributeCertificateInfoV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
+ namedtype.NamedType(
+ 'subject', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subjectName', rfc3280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('issuer', rfc3280.GeneralNames()),
+ namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
+ namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
+)
+
+
+class AttributeCertificateV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateV1.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
+ namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class CertificateChoices(univ.Choice):
+ pass
+
+
+CertificateChoices.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('other', OtherCertificateFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class CertificateSet(univ.SetOf):
+ pass
+
+
+CertificateSet.componentType = CertificateChoices()
+
+
+class MessageAuthenticationCode(univ.OctetString):
+ pass
+
+
+class UnsignedAttributes(univ.SetOf):
+ pass
+
+
+UnsignedAttributes.componentType = Attribute()
+UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SignatureValue(univ.OctetString):
+ pass
+
+
+class SignerInfo(univ.Sequence):
+ pass
+
+
+SignerInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('sid', SignerIdentifier()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', SignatureValue()),
+ namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignerInfos(univ.SetOf):
+ pass
+
+
+SignerInfos.componentType = SignerInfo()
+
+
+class SignedData(univ.Sequence):
+ pass
+
+
+SignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+
+class MessageAuthenticationCodeAlgorithm(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class MessageDigest(univ.OctetString):
+ pass
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class OriginatorInfo(univ.Sequence):
+ pass
+
+
+OriginatorInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('certs', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class AuthAttributes(univ.SetOf):
+ pass
+
+
+AuthAttributes.componentType = Attribute()
+AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class AuthenticatedData(univ.Sequence):
+ pass
+
+
+AuthenticatedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
+ namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('mac', MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
+
+id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
+
+
+class EnvelopedData(univ.Sequence):
+ pass
+
+
+EnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Countersignature(SignerInfo):
+ pass
+
+
+id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
+
+id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ pass
+
+
+ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
+
+id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
+
+
+class SigningTime(Time):
+ pass
+
+
+id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4010.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4010.py
new file mode 100644
index 0000000000..4981f76bed
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4010.py
@@ -0,0 +1,58 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SEED Encryption Algorithm in CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4010.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+
+
+id_seedCBC = univ.ObjectIdentifier('1.2.410.200004.1.4')
+
+
+id_npki_app_cmsSeed_wrap = univ.ObjectIdentifier('1.2.410.200004.7.1.1.1')
+
+
+class SeedIV(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+class SeedCBCParameter(SeedIV):
+ pass
+
+
+class SeedSMimeCapability(univ.Null):
+ pass
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_seedCBC: SeedCBCParameter(),
+ id_npki_app_cmsSeed_wrap: univ.Null(""),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the SMIMECapabilities Attribute map in rfc5751.py
+
+_smimeCapabilityMapUpdate = {
+ id_seedCBC: SeedSMimeCapability(),
+ id_npki_app_cmsSeed_wrap: SeedSMimeCapability(),
+
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4043.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4043.py
new file mode 100644
index 0000000000..cf0a801419
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4043.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Permanent Identifier
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4043.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+id_on = id_pkix + (8, )
+
+id_on_permanentIdentifier = id_on + (3, )
+
+
+class PermanentIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('identifierValue', char.UTF8String()),
+ namedtype.OptionalNamedType('assigner', univ.ObjectIdentifier())
+ )
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_permanentIdentifier: PermanentIdentifier(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4055.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4055.py
new file mode 100644
index 0000000000..bdc128632a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4055.py
@@ -0,0 +1,258 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with a very small amount of assistance from
+# asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional Algorithms and Identifiers for RSA Cryptography
+# for use in Certificates and CRLs
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4055.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+
+id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
+
+id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
+
+id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
+
+id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
+
+rsaEncryption = _OID(1, 2, 840, 113549, 1, 1, 1)
+
+id_mgf1 = _OID(1, 2, 840, 113549, 1, 1, 8)
+
+id_RSAES_OAEP = _OID(1, 2, 840, 113549, 1, 1, 7)
+
+id_pSpecified = _OID(1, 2, 840, 113549, 1, 1, 9)
+
+id_RSASSA_PSS = _OID(1, 2, 840, 113549, 1, 1, 10)
+
+sha256WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 11)
+
+sha384WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 12)
+
+sha512WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 13)
+
+sha224WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 14)
+
+sha1Identifier = rfc5280.AlgorithmIdentifier()
+sha1Identifier['algorithm'] = id_sha1
+sha1Identifier['parameters'] = univ.Null("")
+
+sha224Identifier = rfc5280.AlgorithmIdentifier()
+sha224Identifier['algorithm'] = id_sha224
+sha224Identifier['parameters'] = univ.Null("")
+
+sha256Identifier = rfc5280.AlgorithmIdentifier()
+sha256Identifier['algorithm'] = id_sha256
+sha256Identifier['parameters'] = univ.Null("")
+
+sha384Identifier = rfc5280.AlgorithmIdentifier()
+sha384Identifier['algorithm'] = id_sha384
+sha384Identifier['parameters'] = univ.Null("")
+
+sha512Identifier = rfc5280.AlgorithmIdentifier()
+sha512Identifier['algorithm'] = id_sha512
+sha512Identifier['parameters'] = univ.Null("")
+
+mgf1SHA1Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA1Identifier['algorithm'] = id_mgf1
+mgf1SHA1Identifier['parameters'] = sha1Identifier
+
+mgf1SHA224Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA224Identifier['algorithm'] = id_mgf1
+mgf1SHA224Identifier['parameters'] = sha224Identifier
+
+mgf1SHA256Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA256Identifier['algorithm'] = id_mgf1
+mgf1SHA256Identifier['parameters'] = sha256Identifier
+
+mgf1SHA384Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA384Identifier['algorithm'] = id_mgf1
+mgf1SHA384Identifier['parameters'] = sha384Identifier
+
+mgf1SHA512Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA512Identifier['algorithm'] = id_mgf1
+mgf1SHA512Identifier['parameters'] = sha512Identifier
+
+pSpecifiedEmptyIdentifier = rfc5280.AlgorithmIdentifier()
+pSpecifiedEmptyIdentifier['algorithm'] = id_pSpecified
+pSpecifiedEmptyIdentifier['parameters'] = univ.OctetString(value='')
+
+
+class RSAPublicKey(univ.Sequence):
+ pass
+
+RSAPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+)
+
+
+class HashAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class MaskGenAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class RSAES_OAEP_params(univ.Sequence):
+ pass
+
+RSAES_OAEP_params.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('hashFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maskGenFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('pSourceFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+rSAES_OAEP_Default_Params = RSAES_OAEP_params()
+
+rSAES_OAEP_Default_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_Default_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_Default_Identifier['parameters'] = rSAES_OAEP_Default_Params
+
+rSAES_OAEP_SHA224_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA224_Params['hashFunc'] = sha224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA224_Params['maskGenFunc'] = mgf1SHA224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA224_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA224_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA224_Identifier['parameters'] = rSAES_OAEP_SHA224_Params
+
+rSAES_OAEP_SHA256_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA256_Params['hashFunc'] = sha256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA256_Params['maskGenFunc'] = mgf1SHA256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA256_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA256_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA256_Identifier['parameters'] = rSAES_OAEP_SHA256_Params
+
+rSAES_OAEP_SHA384_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA384_Params['hashFunc'] = sha384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA384_Params['maskGenFunc'] = mgf1SHA384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA384_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA384_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA384_Identifier['parameters'] = rSAES_OAEP_SHA384_Params
+
+rSAES_OAEP_SHA512_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA512_Params['hashFunc'] = sha512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA512_Params['maskGenFunc'] = mgf1SHA512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA512_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA512_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA512_Identifier['parameters'] = rSAES_OAEP_SHA512_Params
+
+
+class RSASSA_PSS_params(univ.Sequence):
+ pass
+
+RSASSA_PSS_params.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maskGenAlgorithm', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.DefaultedNamedType('saltLength', univ.Integer(value=20).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.DefaultedNamedType('trailerField', univ.Integer(value=1).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+rSASSA_PSS_Default_Params = RSASSA_PSS_params()
+
+rSASSA_PSS_Default_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_Default_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_Default_Identifier['parameters'] = rSASSA_PSS_Default_Params
+
+rSASSA_PSS_SHA224_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA224_Params['hashAlgorithm'] = sha224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA224_Params['maskGenAlgorithm'] = mgf1SHA224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA224_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA224_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA224_Identifier['parameters'] = rSASSA_PSS_SHA224_Params
+
+rSASSA_PSS_SHA256_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA256_Params['hashAlgorithm'] = sha256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA256_Params['maskGenAlgorithm'] = mgf1SHA256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA256_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA256_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA256_Identifier['parameters'] = rSASSA_PSS_SHA256_Params
+
+rSASSA_PSS_SHA384_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA384_Params['hashAlgorithm'] = sha384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA384_Params['maskGenAlgorithm'] = mgf1SHA384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA384_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA384_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA384_Identifier['parameters'] = rSASSA_PSS_SHA384_Params
+
+rSASSA_PSS_SHA512_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA512_Params['hashAlgorithm'] = sha512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA512_Params['maskGenAlgorithm'] = mgf1SHA512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA512_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA512_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA512_Identifier['parameters'] = rSASSA_PSS_SHA512_Params
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_mgf1: rfc5280.AlgorithmIdentifier(),
+ id_pSpecified: univ.OctetString(),
+ id_RSAES_OAEP: RSAES_OAEP_params(),
+ id_RSASSA_PSS: RSASSA_PSS_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4073.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4073.py
new file mode 100644
index 0000000000..3f425b28ed
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4073.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Protecting Multiple Contents with the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4073.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# Content Collection Content Type and Object Identifier
+
+id_ct_contentCollection = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.19')
+
+class ContentCollection(univ.SequenceOf):
+ pass
+
+ContentCollection.componentType = rfc5652.ContentInfo()
+ContentCollection.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+# Content With Attributes Content Type and Object Identifier
+
+id_ct_contentWithAttrs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.20')
+
+class ContentWithAttributes(univ.Sequence):
+ pass
+
+ContentWithAttributes.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('content', rfc5652.ContentInfo()),
+ namedtype.NamedType('attrs', univ.SequenceOf(
+ componentType=rfc5652.Attribute()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_contentCollection: ContentCollection(),
+ id_ct_contentWithAttrs: ContentWithAttributes(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4108.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4108.py
new file mode 100644
index 0000000000..ecace9e3ee
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4108.py
@@ -0,0 +1,350 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add items from the verified errata.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Firmware Wrapper
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4108.txt
+# https://www.rfc-editor.org/errata_search.php?rfc=4108
+#
+
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+class HardwareSerialEntry(univ.Choice):
+ pass
+
+HardwareSerialEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('all', univ.Null()),
+ namedtype.NamedType('single', univ.OctetString()),
+ namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('low', univ.OctetString()),
+ namedtype.NamedType('high', univ.OctetString())
+ ))
+ )
+)
+
+
+class HardwareModules(univ.Sequence):
+ pass
+
+HardwareModules.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialEntries', univ.SequenceOf(componentType=HardwareSerialEntry()))
+)
+
+
+class CommunityIdentifier(univ.Choice):
+ pass
+
+CommunityIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('communityOID', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwModuleList', HardwareModules())
+)
+
+
+
+class PreferredPackageIdentifier(univ.Sequence):
+ pass
+
+PreferredPackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fwPkgID', univ.ObjectIdentifier()),
+ namedtype.NamedType('verNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+
+class PreferredOrLegacyPackageIdentifier(univ.Choice):
+ pass
+
+PreferredOrLegacyPackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('preferred', PreferredPackageIdentifier()),
+ namedtype.NamedType('legacy', univ.OctetString())
+)
+
+
+class CurrentFWConfig(univ.Sequence):
+ pass
+
+CurrentFWConfig.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('fwPkgType', univ.Integer()),
+ namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier())
+)
+
+
+class PreferredOrLegacyStalePackageIdentifier(univ.Choice):
+ pass
+
+PreferredOrLegacyStalePackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('preferredStaleVerNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('legacyStaleVersion', univ.OctetString())
+)
+
+
+class FirmwarePackageLoadErrorCode(univ.Enumerated):
+ pass
+
+FirmwarePackageLoadErrorCode.namedValues = namedval.NamedValues(
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('signatureFailure', 15),
+ ('contentTypeMismatch', 16),
+ ('badEncryptedData', 17),
+ ('unprotectedAttrsPresent', 18),
+ ('badEncryptContent', 19),
+ ('badEncryptAlgorithm', 20),
+ ('missingCiphertext', 21),
+ ('noDecryptKey', 22),
+ ('decryptFailure', 23),
+ ('badCompressAlgorithm', 24),
+ ('missingCompressedContent', 25),
+ ('decompressFailure', 26),
+ ('wrongHardware', 27),
+ ('stalePackage', 28),
+ ('notInCommunity', 29),
+ ('unsupportedPackageType', 30),
+ ('missingDependency', 31),
+ ('wrongDependencyVersion', 32),
+ ('insufficientMemory', 33),
+ ('badFirmware', 34),
+ ('unsupportedParameters', 35),
+ ('breaksDependency', 36),
+ ('otherError', 99)
+)
+
+
+class VendorLoadErrorCode(univ.Integer):
+ pass
+
+
+# Wrapped Firmware Key Unsigned Attribute and Object Identifier
+
+id_aa_wrappedFirmwareKey = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.39')
+
+class WrappedFirmwareKey(rfc5652.EnvelopedData):
+ pass
+
+
+# Firmware Package Information Signed Attribute and Object Identifier
+
+id_aa_firmwarePackageInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.42')
+
+class FirmwarePackageInfo(univ.Sequence):
+ pass
+
+FirmwarePackageInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('fwPkgType', univ.Integer()),
+ namedtype.OptionalNamedType('dependencies', univ.SequenceOf(componentType=PreferredOrLegacyPackageIdentifier()))
+)
+
+FirmwarePackageInfo.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2)
+
+
+# Community Identifiers Signed Attribute and Object Identifier
+
+id_aa_communityIdentifiers = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.40')
+
+class CommunityIdentifiers(univ.SequenceOf):
+ pass
+
+CommunityIdentifiers.componentType = CommunityIdentifier()
+
+
+# Implemented Compression Algorithms Signed Attribute and Object Identifier
+
+id_aa_implCompressAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.43')
+
+class ImplementedCompressAlgorithms(univ.SequenceOf):
+ pass
+
+ImplementedCompressAlgorithms.componentType = univ.ObjectIdentifier()
+
+
+# Implemented Cryptographic Algorithms Signed Attribute and Object Identifier
+
+id_aa_implCryptoAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.38')
+
+class ImplementedCryptoAlgorithms(univ.SequenceOf):
+ pass
+
+ImplementedCryptoAlgorithms.componentType = univ.ObjectIdentifier()
+
+
+# Decrypt Key Identifier Signed Attribute and Object Identifier
+
+id_aa_decryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.37')
+
+class DecryptKeyIdentifier(univ.OctetString):
+ pass
+
+
+# Target Hardware Identifier Signed Attribute and Object Identifier
+
+id_aa_targetHardwareIDs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.36')
+
+class TargetHardwareIdentifiers(univ.SequenceOf):
+ pass
+
+TargetHardwareIdentifiers.componentType = univ.ObjectIdentifier()
+
+
+# Firmware Package Identifier Signed Attribute and Object Identifier
+
+id_aa_firmwarePackageID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.35')
+
+class FirmwarePackageIdentifier(univ.Sequence):
+ pass
+
+FirmwarePackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('stale', PreferredOrLegacyStalePackageIdentifier())
+)
+
+
+# Firmware Package Message Digest Signed Attribute and Object Identifier
+
+id_aa_fwPkgMessageDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.41')
+
+class FirmwarePackageMessageDigest(univ.Sequence):
+ pass
+
+FirmwarePackageMessageDigest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('msgDigest', univ.OctetString())
+)
+
+
+# Firmware Package Load Error Report Content Type and Object Identifier
+
+class FWErrorVersion(univ.Integer):
+ pass
+
+FWErrorVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_firmwareLoadError = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.18')
+
+class FirmwarePackageLoadError(univ.Sequence):
+ pass
+
+FirmwarePackageLoadError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', FWErrorVersion().subtype(value='v1')),
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString()),
+ namedtype.NamedType('errorCode', FirmwarePackageLoadErrorCode()),
+ namedtype.OptionalNamedType('vendorErrorCode', VendorLoadErrorCode()),
+ namedtype.OptionalNamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('config', univ.SequenceOf(componentType=CurrentFWConfig()).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Firmware Package Load Receipt Content Type and Object Identifier
+
+class FWReceiptVersion(univ.Integer):
+ pass
+
+FWReceiptVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_firmwareLoadReceipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.17')
+
+class FirmwarePackageLoadReceipt(univ.Sequence):
+ pass
+
+FirmwarePackageLoadReceipt.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', FWReceiptVersion().subtype(value='v1')),
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString()),
+ namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('trustAnchorKeyID', univ.OctetString()),
+ namedtype.OptionalNamedType('decryptKeyID', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Firmware Package Content Type and Object Identifier
+
+id_ct_firmwarePackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.16')
+
+class FirmwarePkgData(univ.OctetString):
+ pass
+
+
+# Other Name syntax for Hardware Module Name
+
+id_on_hardwareModuleName = univ.ObjectIdentifier('1.3.6.1.5.5.7.8.4')
+
+class HardwareModuleName(univ.Sequence):
+ pass
+
+HardwareModuleName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString())
+)
+
+
+# Map of Attribute Type OIDs to Attributes is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_wrappedFirmwareKey: WrappedFirmwareKey(),
+ id_aa_firmwarePackageInfo: FirmwarePackageInfo(),
+ id_aa_communityIdentifiers: CommunityIdentifiers(),
+ id_aa_implCompressAlgs: ImplementedCompressAlgorithms(),
+ id_aa_implCryptoAlgs: ImplementedCryptoAlgorithms(),
+ id_aa_decryptKeyID: DecryptKeyIdentifier(),
+ id_aa_targetHardwareIDs: TargetHardwareIdentifiers(),
+ id_aa_firmwarePackageID: FirmwarePackageIdentifier(),
+ id_aa_fwPkgMessageDigest: FirmwarePackageMessageDigest(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_firmwareLoadError: FirmwarePackageLoadError(),
+ id_ct_firmwareLoadReceipt: FirmwarePackageLoadReceipt(),
+ id_ct_firmwarePackage: FirmwarePkgData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_hardwareModuleName: HardwareModuleName(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4210.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4210.py
new file mode 100644
index 0000000000..0935e3e9ac
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4210.py
@@ -0,0 +1,803 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Management Protocol structures as per RFC4210
+#
+# Based on Alex Railean's work
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc2314
+from pyasn1_modules import rfc2459
+from pyasn1_modules import rfc2511
+
+MAX = float('inf')
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class CMPCertificate(rfc2459.Certificate):
+ pass
+
+
+class OOBCert(CMPCertificate):
+ pass
+
+
+class CertAnnContent(CMPCertificate):
+ pass
+
+
+class PKIFreeText(univ.SequenceOf):
+ """
+ PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
+ """
+ componentType = char.UTF8String()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class PollRepContent(univ.SequenceOf):
+ """
+ PollRepContent ::= SEQUENCE OF SEQUENCE {
+ certReqId INTEGER,
+ checkAfter INTEGER, -- time in seconds
+ reason PKIFreeText OPTIONAL
+ }
+ """
+
+ class CertReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('checkAfter', univ.Integer()),
+ namedtype.OptionalNamedType('reason', PKIFreeText())
+ )
+
+ componentType = CertReq()
+
+
+class PollReqContent(univ.SequenceOf):
+ """
+ PollReqContent ::= SEQUENCE OF SEQUENCE {
+ certReqId INTEGER
+ }
+
+ """
+
+ class CertReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer())
+ )
+
+ componentType = CertReq()
+
+
+class InfoTypeAndValue(univ.Sequence):
+ """
+ InfoTypeAndValue ::= SEQUENCE {
+ infoType OBJECT IDENTIFIER,
+ infoValue ANY DEFINED BY infoType OPTIONAL
+ }"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('infoType', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('infoValue', univ.Any())
+ )
+
+
+class GenRepContent(univ.SequenceOf):
+ componentType = InfoTypeAndValue()
+
+
+class GenMsgContent(univ.SequenceOf):
+ componentType = InfoTypeAndValue()
+
+
+class PKIConfirmContent(univ.Null):
+ pass
+
+
+class CRLAnnContent(univ.SequenceOf):
+ componentType = rfc2459.CertificateList()
+
+
+class CAKeyUpdAnnContent(univ.Sequence):
+ """
+ CAKeyUpdAnnContent ::= SEQUENCE {
+ oldWithNew CMPCertificate,
+ newWithOld CMPCertificate,
+ newWithNew CMPCertificate
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oldWithNew', CMPCertificate()),
+ namedtype.NamedType('newWithOld', CMPCertificate()),
+ namedtype.NamedType('newWithNew', CMPCertificate())
+ )
+
+
+class RevDetails(univ.Sequence):
+ """
+ RevDetails ::= SEQUENCE {
+ certDetails CertTemplate,
+ crlEntryDetails Extensions OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
+ namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
+ )
+
+
+class RevReqContent(univ.SequenceOf):
+ componentType = RevDetails()
+
+
+class CertOrEncCert(univ.Choice):
+ """
+ CertOrEncCert ::= CHOICE {
+ certificate [0] CMPCertificate,
+ encryptedCert [1] EncryptedValue
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', CMPCertificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class CertifiedKeyPair(univ.Sequence):
+ """
+ CertifiedKeyPair ::= SEQUENCE {
+ certOrEncCert CertOrEncCert,
+ privateKey [0] EncryptedValue OPTIONAL,
+ publicationInfo [1] PKIPublicationInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certOrEncCert', CertOrEncCert()),
+ namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class POPODecKeyRespContent(univ.SequenceOf):
+ componentType = univ.Integer()
+
+
+class Challenge(univ.Sequence):
+ """
+ Challenge ::= SEQUENCE {
+ owf AlgorithmIdentifier OPTIONAL,
+ witness OCTET STRING,
+ challenge OCTET STRING
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString()),
+ namedtype.NamedType('challenge', univ.OctetString())
+ )
+
+
+class PKIStatus(univ.Integer):
+ """
+ PKIStatus ::= INTEGER {
+ accepted (0),
+ grantedWithMods (1),
+ rejection (2),
+ waiting (3),
+ revocationWarning (4),
+ revocationNotification (5),
+ keyUpdateWarning (6)
+ }
+ """
+ namedValues = namedval.NamedValues(
+ ('accepted', 0),
+ ('grantedWithMods', 1),
+ ('rejection', 2),
+ ('waiting', 3),
+ ('revocationWarning', 4),
+ ('revocationNotification', 5),
+ ('keyUpdateWarning', 6)
+ )
+
+
+class PKIFailureInfo(univ.BitString):
+ """
+ PKIFailureInfo ::= BIT STRING {
+ badAlg (0),
+ badMessageCheck (1),
+ badRequest (2),
+ badTime (3),
+ badCertId (4),
+ badDataFormat (5),
+ wrongAuthority (6),
+ incorrectData (7),
+ missingTimeStamp (8),
+ badPOP (9),
+ certRevoked (10),
+ certConfirmed (11),
+ wrongIntegrity (12),
+ badRecipientNonce (13),
+ timeNotAvailable (14),
+ unacceptedPolicy (15),
+ unacceptedExtension (16),
+ addInfoNotAvailable (17),
+ badSenderNonce (18),
+ badCertTemplate (19),
+ signerNotTrusted (20),
+ transactionIdInUse (21),
+ unsupportedVersion (22),
+ notAuthorized (23),
+ systemUnavail (24),
+ systemFailure (25),
+ duplicateCertReq (26)
+ """
+ namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badMessageCheck', 1),
+ ('badRequest', 2),
+ ('badTime', 3),
+ ('badCertId', 4),
+ ('badDataFormat', 5),
+ ('wrongAuthority', 6),
+ ('incorrectData', 7),
+ ('missingTimeStamp', 8),
+ ('badPOP', 9),
+ ('certRevoked', 10),
+ ('certConfirmed', 11),
+ ('wrongIntegrity', 12),
+ ('badRecipientNonce', 13),
+ ('timeNotAvailable', 14),
+ ('unacceptedPolicy', 15),
+ ('unacceptedExtension', 16),
+ ('addInfoNotAvailable', 17),
+ ('badSenderNonce', 18),
+ ('badCertTemplate', 19),
+ ('signerNotTrusted', 20),
+ ('transactionIdInUse', 21),
+ ('unsupportedVersion', 22),
+ ('notAuthorized', 23),
+ ('systemUnavail', 24),
+ ('systemFailure', 25),
+ ('duplicateCertReq', 26)
+ )
+
+
+class PKIStatusInfo(univ.Sequence):
+ """
+ PKIStatusInfo ::= SEQUENCE {
+ status PKIStatus,
+ statusString PKIFreeText OPTIONAL,
+ failInfo PKIFailureInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.OptionalNamedType('statusString', PKIFreeText()),
+ namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
+ )
+
+
+class ErrorMsgContent(univ.Sequence):
+ """
+ ErrorMsgContent ::= SEQUENCE {
+ pKIStatusInfo PKIStatusInfo,
+ errorCode INTEGER OPTIONAL,
+ -- implementation-specific error codes
+ errorDetails PKIFreeText OPTIONAL
+ -- implementation-specific error details
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
+ namedtype.OptionalNamedType('errorCode', univ.Integer()),
+ namedtype.OptionalNamedType('errorDetails', PKIFreeText())
+ )
+
+
+class CertStatus(univ.Sequence):
+ """
+ CertStatus ::= SEQUENCE {
+ certHash OCTET STRING,
+ certReqId INTEGER,
+ statusInfo PKIStatusInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', univ.OctetString()),
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
+ )
+
+
+class CertConfirmContent(univ.SequenceOf):
+ componentType = CertStatus()
+
+
+class RevAnnContent(univ.Sequence):
+ """
+ RevAnnContent ::= SEQUENCE {
+ status PKIStatus,
+ certId CertId,
+ willBeRevokedAt GeneralizedTime,
+ badSinceDate GeneralizedTime,
+ crlDetails Extensions OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.NamedType('certId', rfc2511.CertId()),
+ namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
+ )
+
+
+class RevRepContent(univ.Sequence):
+ """
+ RevRepContent ::= SEQUENCE {
+ status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
+ revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
+ OPTIONAL,
+ crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
+ OPTIONAL
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'status', univ.SequenceOf(
+ componentType=PKIStatusInfo(),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'revCerts', univ.SequenceOf(componentType=rfc2511.CertId()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'crls', univ.SequenceOf(componentType=rfc2459.CertificateList()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ )
+ )
+
+
+class KeyRecRepContent(univ.Sequence):
+ """
+ KeyRecRepContent ::= SEQUENCE {
+ status PKIStatusInfo,
+ newSigCert [0] CMPCertificate OPTIONAL,
+ caCerts [1] SEQUENCE SIZE (1..MAX) OF
+ CMPCertificate OPTIONAL,
+ keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
+ CertifiedKeyPair OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType(
+ 'newSigCert', CMPCertificate().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'caCerts', univ.SequenceOf(componentType=CMPCertificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ),
+ namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(componentType=CertifiedKeyPair()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))
+ )
+ )
+
+
+class CertResponse(univ.Sequence):
+ """
+ CertResponse ::= SEQUENCE {
+ certReqId INTEGER,
+ status PKIStatusInfo,
+ certifiedKeyPair CertifiedKeyPair OPTIONAL,
+ rspInfo OCTET STRING OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
+ namedtype.OptionalNamedType('rspInfo', univ.OctetString())
+ )
+
+
+class CertRepMessage(univ.Sequence):
+ """
+ CertRepMessage ::= SEQUENCE {
+ caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
+ OPTIONAL,
+ response SEQUENCE OF CertResponse
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'caPubs', univ.SequenceOf(
+ componentType=CMPCertificate()
+ ).subtype(sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
+ ),
+ namedtype.NamedType('response', univ.SequenceOf(componentType=CertResponse()))
+ )
+
+
+class POPODecKeyChallContent(univ.SequenceOf):
+ componentType = Challenge()
+
+
+class OOBCertHash(univ.Sequence):
+ """
+ OOBCertHash ::= SEQUENCE {
+ hashAlg [0] AlgorithmIdentifier OPTIONAL,
+ certId [1] CertId OPTIONAL,
+ hashVal BIT STRING
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'hashAlg', rfc2459.AlgorithmIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
+ ),
+ namedtype.OptionalNamedType(
+ 'certId', rfc2511.CertId().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
+ ),
+ namedtype.NamedType('hashVal', univ.BitString())
+ )
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+class NestedMessageContent(univ.SequenceOf):
+ """
+ NestedMessageContent ::= PKIMessages
+ """
+ componentType = univ.Any()
+
+
+class DHBMParameter(univ.Sequence):
+ """
+ DHBMParameter ::= SEQUENCE {
+ owf AlgorithmIdentifier,
+ -- AlgId for a One-Way Function (SHA-1 recommended)
+ mac AlgorithmIdentifier
+ -- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
+ } -- or HMAC [RFC2104, RFC2202])
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
+ )
+
+
+id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
+
+
+class PBMParameter(univ.Sequence):
+ """
+ PBMParameter ::= SEQUENCE {
+ salt OCTET STRING,
+ owf AlgorithmIdentifier,
+ iterationCount INTEGER,
+ mac AlgorithmIdentifier
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'salt', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 128))
+ ),
+ namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
+ )
+
+
+id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
+
+
+class PKIProtection(univ.BitString):
+ pass
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+nestedMessageContent = NestedMessageContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 20))
+
+
+class PKIBody(univ.Choice):
+ """
+ PKIBody ::= CHOICE { -- message-specific body elements
+ ir [0] CertReqMessages, --Initialization Request
+ ip [1] CertRepMessage, --Initialization Response
+ cr [2] CertReqMessages, --Certification Request
+ cp [3] CertRepMessage, --Certification Response
+ p10cr [4] CertificationRequest, --imported from [PKCS10]
+ popdecc [5] POPODecKeyChallContent, --pop Challenge
+ popdecr [6] POPODecKeyRespContent, --pop Response
+ kur [7] CertReqMessages, --Key Update Request
+ kup [8] CertRepMessage, --Key Update Response
+ krr [9] CertReqMessages, --Key Recovery Request
+ krp [10] KeyRecRepContent, --Key Recovery Response
+ rr [11] RevReqContent, --Revocation Request
+ rp [12] RevRepContent, --Revocation Response
+ ccr [13] CertReqMessages, --Cross-Cert. Request
+ ccp [14] CertRepMessage, --Cross-Cert. Response
+ ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
+ cann [16] CertAnnContent, --Certificate Ann.
+ rann [17] RevAnnContent, --Revocation Ann.
+ crlann [18] CRLAnnContent, --CRL Announcement
+ pkiconf [19] PKIConfirmContent, --Confirmation
+ nested [20] NestedMessageContent, --Nested Message
+ genm [21] GenMsgContent, --General Message
+ genp [22] GenRepContent, --General Response
+ error [23] ErrorMsgContent, --Error Message
+ certConf [24] CertConfirmContent, --Certificate confirm
+ pollReq [25] PollReqContent, --Polling request
+ pollRep [26] PollRepContent --Polling response
+
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'ir', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.NamedType(
+ 'ip', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ ),
+ namedtype.NamedType(
+ 'cr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+ ),
+ namedtype.NamedType(
+ 'cp', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+ ),
+ namedtype.NamedType(
+ 'p10cr', rfc2314.CertificationRequest().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
+ )
+ ),
+ namedtype.NamedType(
+ 'popdecc', POPODecKeyChallContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
+ )
+ ),
+ namedtype.NamedType(
+ 'popdecr', POPODecKeyRespContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
+ )
+ ),
+ namedtype.NamedType(
+ 'kur', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
+ )
+ ),
+ namedtype.NamedType(
+ 'kup', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
+ )
+ ),
+ namedtype.NamedType(
+ 'krr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)
+ )
+ ),
+ namedtype.NamedType(
+ 'krp', KeyRecRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10)
+ )
+ ),
+ namedtype.NamedType(
+ 'rr', RevReqContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 11)
+ )
+ ),
+ namedtype.NamedType(
+ 'rp', RevRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 12)
+ )
+ ),
+ namedtype.NamedType(
+ 'ccr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 13)
+ )
+ ),
+ namedtype.NamedType(
+ 'ccp', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 14)
+ )
+ ),
+ namedtype.NamedType(
+ 'ckuann', CAKeyUpdAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 15)
+ )
+ ),
+ namedtype.NamedType(
+ 'cann', CertAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 16)
+ )
+ ),
+ namedtype.NamedType(
+ 'rann', RevAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 17)
+ )
+ ),
+ namedtype.NamedType(
+ 'crlann', CRLAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 18)
+ )
+ ),
+ namedtype.NamedType(
+ 'pkiconf', PKIConfirmContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 19)
+ )
+ ),
+ namedtype.NamedType(
+ 'nested', nestedMessageContent
+ ),
+ # namedtype.NamedType('nested', NestedMessageContent().subtype(
+ # explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
+ # )
+ # ),
+ namedtype.NamedType(
+ 'genm', GenMsgContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 21)
+ )
+ ),
+ namedtype.NamedType(
+ 'gen', GenRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 22)
+ )
+ ),
+ namedtype.NamedType(
+ 'error', ErrorMsgContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 23)
+ )
+ ),
+ namedtype.NamedType(
+ 'certConf', CertConfirmContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 24)
+ )
+ ),
+ namedtype.NamedType(
+ 'pollReq', PollReqContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 25)
+ )
+ ),
+ namedtype.NamedType(
+ 'pollRep', PollRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 26)
+ )
+ )
+ )
+
+
+class PKIHeader(univ.Sequence):
+ """
+ PKIHeader ::= SEQUENCE {
+ pvno INTEGER { cmp1999(1), cmp2000(2) },
+ sender GeneralName,
+ recipient GeneralName,
+ messageTime [0] GeneralizedTime OPTIONAL,
+ protectionAlg [1] AlgorithmIdentifier OPTIONAL,
+ senderKID [2] KeyIdentifier OPTIONAL,
+ recipKID [3] KeyIdentifier OPTIONAL,
+ transactionID [4] OCTET STRING OPTIONAL,
+ senderNonce [5] OCTET STRING OPTIONAL,
+ recipNonce [6] OCTET STRING OPTIONAL,
+ freeText [7] PKIFreeText OPTIONAL,
+ generalInfo [8] SEQUENCE SIZE (1..MAX) OF
+ InfoTypeAndValue OPTIONAL
+ }
+
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'pvno', univ.Integer(
+ namedValues=namedval.NamedValues(('cmp1999', 1), ('cmp2000', 2))
+ )
+ ),
+ namedtype.NamedType('sender', rfc2459.GeneralName()),
+ namedtype.NamedType('recipient', rfc2459.GeneralName()),
+ namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
+ namedtype.OptionalNamedType('generalInfo',
+ univ.SequenceOf(
+ componentType=InfoTypeAndValue().subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))
+ )
+ )
+
+
+class ProtectedPart(univ.Sequence):
+ """
+ ProtectedPart ::= SEQUENCE {
+ header PKIHeader,
+ body PKIBody
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PKIHeader()),
+ namedtype.NamedType('infoValue', PKIBody())
+ )
+
+
+class PKIMessage(univ.Sequence):
+ """
+ PKIMessage ::= SEQUENCE {
+ header PKIHeader,
+ body PKIBody,
+ protection [0] PKIProtection OPTIONAL,
+ extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
+ OPTIONAL
+ }"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PKIHeader()),
+ namedtype.NamedType('body', PKIBody()),
+ namedtype.OptionalNamedType('protection', PKIProtection().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('extraCerts',
+ univ.SequenceOf(
+ componentType=CMPCertificate()
+ ).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ )
+ )
+
+
+class PKIMessages(univ.SequenceOf):
+ """
+ PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
+ """
+ componentType = PKIMessage()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+NestedMessageContent._componentType = PKIMessages()
+nestedMessageContent._componentType = PKIMessages()
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4211.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4211.py
new file mode 100644
index 0000000000..c47b3c5dd2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4211.py
@@ -0,0 +1,396 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate Request
+# Message Format (CRMF)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc4211.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3280
+from pyasn1_modules import rfc3852
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_pkip = _buildOid(id_pkix, 5)
+
+id_regCtrl = _buildOid(id_pkip, 1)
+
+
+class SinglePubInfo(univ.Sequence):
+ pass
+
+
+SinglePubInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubMethod', univ.Integer(
+ namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
+ namedtype.OptionalNamedType('pubLocation', rfc3280.GeneralName())
+)
+
+
+class UTF8Pairs(char.UTF8String):
+ pass
+
+
+class PKMACValue(univ.Sequence):
+ pass
+
+
+PKMACValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algId', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('value', univ.BitString())
+)
+
+
+class POPOSigningKeyInput(univ.Sequence):
+ pass
+
+
+POPOSigningKeyInput.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'authInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'sender', rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
+ ),
+ namedtype.NamedType(
+ 'publicKeyMAC', PKMACValue()
+ )
+ )
+ )
+ ),
+ namedtype.NamedType('publicKey', rfc3280.SubjectPublicKeyInfo())
+)
+
+
+class POPOSigningKey(univ.Sequence):
+ pass
+
+
+POPOSigningKey.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('algorithmIdentifier', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class Attributes(univ.SetOf):
+ pass
+
+
+Attributes.componentType = rfc3280.Attribute()
+
+
+class PrivateKeyInfo(univ.Sequence):
+ pass
+
+
+PrivateKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('privateKeyAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', univ.OctetString()),
+ namedtype.OptionalNamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedValue(univ.Sequence):
+ pass
+
+
+EncryptedValue.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('intendedAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('symmAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('keyAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('encValue', univ.BitString())
+)
+
+
+class EncryptedKey(univ.Choice):
+ pass
+
+
+EncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedValue', EncryptedValue()),
+ namedtype.NamedType('envelopedData', rfc3852.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyGenParameters(univ.OctetString):
+ pass
+
+
+class PKIArchiveOptions(univ.Choice):
+ pass
+
+
+PKIArchiveOptions.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedPrivKey',
+ EncryptedKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyGenParameters',
+ KeyGenParameters().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('archiveRemGenPrivKey',
+ univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_regCtrl_authenticator = _buildOid(id_regCtrl, 2)
+
+id_regInfo = _buildOid(id_pkip, 2)
+
+id_regInfo_certReq = _buildOid(id_regInfo, 2)
+
+
+class ProtocolEncrKey(rfc3280.SubjectPublicKeyInfo):
+ pass
+
+
+class Authenticator(char.UTF8String):
+ pass
+
+
+class SubsequentMessage(univ.Integer):
+ pass
+
+
+SubsequentMessage.namedValues = namedval.NamedValues(
+ ('encrCert', 0),
+ ('challengeResp', 1)
+)
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ pass
+
+
+AttributeTypeAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier()),
+ namedtype.NamedType('value', univ.Any())
+)
+
+
+class POPOPrivKey(univ.Choice):
+ pass
+
+
+POPOPrivKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('thisMessage',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subsequentMessage',
+ SubsequentMessage().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dhMAC',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('agreeMAC',
+ PKMACValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('encryptedKey', rfc3852.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class ProofOfPossession(univ.Choice):
+ pass
+
+
+ProofOfPossession.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('raVerified',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signature', POPOSigningKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('keyEncipherment',
+ POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('keyAgreement',
+ POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class OptionalValidity(univ.Sequence):
+ pass
+
+
+OptionalValidity.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', rfc3280.Time().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('notAfter', rfc3280.Time().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class CertTemplate(univ.Sequence):
+ pass
+
+
+CertTemplate.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', rfc3280.Version().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('signingAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('issuer', rfc3280.Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('subject', rfc3280.Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('publicKey', rfc3280.SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('subjectUID', rfc3280.UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9)))
+)
+
+
+class Controls(univ.SequenceOf):
+ pass
+
+
+Controls.componentType = AttributeTypeAndValue()
+Controls.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertRequest(univ.Sequence):
+ pass
+
+
+CertRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('certTemplate', CertTemplate()),
+ namedtype.OptionalNamedType('controls', Controls())
+)
+
+
+class CertReqMsg(univ.Sequence):
+ pass
+
+
+CertReqMsg.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReq', CertRequest()),
+ namedtype.OptionalNamedType('popo', ProofOfPossession()),
+ namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()))
+)
+
+
+class CertReqMessages(univ.SequenceOf):
+ pass
+
+
+CertReqMessages.componentType = CertReqMsg()
+CertReqMessages.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertReq(CertRequest):
+ pass
+
+
+id_regCtrl_pkiPublicationInfo = _buildOid(id_regCtrl, 3)
+
+
+class CertId(univ.Sequence):
+ pass
+
+
+CertId.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+)
+
+
+class OldCertId(CertId):
+ pass
+
+
+class PKIPublicationInfo(univ.Sequence):
+ pass
+
+
+PKIPublicationInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('action',
+ univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
+ namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()))
+)
+
+
+class EncKeyWithID(univ.Sequence):
+ pass
+
+
+EncKeyWithID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('privateKey', PrivateKeyInfo()),
+ namedtype.OptionalNamedType(
+ 'identifier', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('string', char.UTF8String()),
+ namedtype.NamedType('generalName', rfc3280.GeneralName())
+ )
+ )
+ )
+)
+
+id_regCtrl_protocolEncrKey = _buildOid(id_regCtrl, 6)
+
+id_regCtrl_oldCertID = _buildOid(id_regCtrl, 5)
+
+id_smime = _buildOid(1, 2, 840, 113549, 1, 9, 16)
+
+
+class PBMParameter(univ.Sequence):
+ pass
+
+
+PBMParameter.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('owf', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', rfc3280.AlgorithmIdentifier())
+)
+
+id_regCtrl_regToken = _buildOid(id_regCtrl, 1)
+
+id_regCtrl_pkiArchiveOptions = _buildOid(id_regCtrl, 4)
+
+id_regInfo_utf8Pairs = _buildOid(id_regInfo, 1)
+
+id_ct = _buildOid(id_smime, 1)
+
+id_ct_encKeyWithID = _buildOid(id_ct, 21)
+
+
+class RegToken(char.UTF8String):
+ pass
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4334.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4334.py
new file mode 100644
index 0000000000..44cd31b166
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4334.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extensions and Attributes Supporting Authentication
+# in PPP and Wireless LAN Networks
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4334.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# OID Arcs
+
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_aca = univ.ObjectIdentifier('1.3.6.1.5.5.7.10')
+
+
+# Extended Key Usage Values
+
+id_kp_eapOverPPP = id_kp + (13, )
+
+id_kp_eapOverLAN = id_kp + (14, )
+
+
+# Wireless LAN SSID Extension
+
+id_pe_wlanSSID = id_pe + (13, )
+
+class SSID(univ.OctetString):
+ constraint.ValueSizeConstraint(1, 32)
+
+
+class SSIDList(univ.SequenceOf):
+ componentType = SSID()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Wireless LAN SSID Attribute Certificate Attribute
+
+id_aca_wlanSSID = id_aca + (7, )
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4357.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4357.py
new file mode 100644
index 0000000000..42b9e3ecb8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4357.py
@@ -0,0 +1,477 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional Cryptographic Algorithms for Use with GOST 28147-89,
+# GOST R 34.10-94, GOST R 34.10-2001, and GOST R 34.11-94 Algorithms
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4357.txt
+# https://www.rfc-editor.org/errata/eid5927
+# https://www.rfc-editor.org/errata/eid5928
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Import from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Object Identifiers
+
+id_CryptoPro = univ.ObjectIdentifier((1, 2, 643, 2, 2,))
+
+
+id_CryptoPro_modules = id_CryptoPro + (1, 1,)
+
+id_CryptoPro_extensions = id_CryptoPro + (34,)
+
+id_CryptoPro_policyIds = id_CryptoPro + (38,)
+
+id_CryptoPro_policyQt = id_CryptoPro + (39,)
+
+
+cryptographic_Gost_Useful_Definitions = id_CryptoPro_modules + (0, 1,)
+
+gostR3411_94_DigestSyntax = id_CryptoPro_modules + (1, 1,)
+
+gostR3410_94_PKISyntax = id_CryptoPro_modules + (2, 1,)
+
+gostR3410_94_SignatureSyntax = id_CryptoPro_modules + (3, 1,)
+
+gost28147_89_EncryptionSyntax = id_CryptoPro_modules + (4, 1,)
+
+gostR3410_EncryptionSyntax = id_CryptoPro_modules + (5, 2,)
+
+gost28147_89_ParamSetSyntax = id_CryptoPro_modules + (6, 1,)
+
+gostR3411_94_ParamSetSyntax = id_CryptoPro_modules + (7, 1,)
+
+gostR3410_94_ParamSetSyntax = id_CryptoPro_modules + (8, 1, 1)
+
+gostR3410_2001_PKISyntax = id_CryptoPro_modules + (9, 1,)
+
+gostR3410_2001_SignatureSyntax = id_CryptoPro_modules + (10, 1,)
+
+gostR3410_2001_ParamSetSyntax = id_CryptoPro_modules + (12, 1,)
+
+gost_CryptoPro_ExtendedKeyUsage = id_CryptoPro_modules + (13, 1,)
+
+gost_CryptoPro_PrivateKey = id_CryptoPro_modules + (14, 1,)
+
+gost_CryptoPro_PKIXCMP = id_CryptoPro_modules + (15, 1,)
+
+gost_CryptoPro_TLS = id_CryptoPro_modules + (16, 1,)
+
+gost_CryptoPro_Policy = id_CryptoPro_modules + (17, 1,)
+
+gost_CryptoPro_Constants = id_CryptoPro_modules + (18, 1,)
+
+
+id_CryptoPro_algorithms = id_CryptoPro
+
+id_GostR3411_94_with_GostR3410_2001 = id_CryptoPro_algorithms + (3,)
+
+id_GostR3411_94_with_GostR3410_94 = id_CryptoPro_algorithms + (4,)
+
+id_GostR3411_94 = id_CryptoPro_algorithms + (9,)
+
+id_Gost28147_89_None_KeyMeshing = id_CryptoPro_algorithms + (14, 0,)
+
+id_Gost28147_89_CryptoPro_KeyMeshing = id_CryptoPro_algorithms + (14, 1,)
+
+id_GostR3410_2001 = id_CryptoPro_algorithms + (19,)
+
+id_GostR3410_94 = id_CryptoPro_algorithms + (20,)
+
+id_Gost28147_89 = id_CryptoPro_algorithms + (21,)
+
+id_Gost28147_89_MAC = id_CryptoPro_algorithms + (22,)
+
+id_CryptoPro_hashes = id_CryptoPro_algorithms + (30,)
+
+id_CryptoPro_encrypts = id_CryptoPro_algorithms + (31,)
+
+id_CryptoPro_signs = id_CryptoPro_algorithms + (32,)
+
+id_CryptoPro_exchanges = id_CryptoPro_algorithms + (33,)
+
+id_CryptoPro_ecc_signs = id_CryptoPro_algorithms + (35,)
+
+id_CryptoPro_ecc_exchanges = id_CryptoPro_algorithms + (36,)
+
+id_CryptoPro_private_keys = id_CryptoPro_algorithms + (37,)
+
+id_CryptoPro_pkixcmp_infos = id_CryptoPro_algorithms + (41,)
+
+id_CryptoPro_audit_service_types = id_CryptoPro_algorithms + (42,)
+
+id_CryptoPro_audit_record_types = id_CryptoPro_algorithms + (43,)
+
+id_CryptoPro_attributes = id_CryptoPro_algorithms + (44,)
+
+id_CryptoPro_name_service_types = id_CryptoPro_algorithms + (45,)
+
+id_GostR3410_2001DH = id_CryptoPro_algorithms + (98,)
+
+id_GostR3410_94DH = id_CryptoPro_algorithms + (99,)
+
+
+id_Gost28147_89_TestParamSet = id_CryptoPro_encrypts + (0,)
+
+id_Gost28147_89_CryptoPro_A_ParamSet = id_CryptoPro_encrypts + (1,)
+
+id_Gost28147_89_CryptoPro_B_ParamSet = id_CryptoPro_encrypts + (2,)
+
+id_Gost28147_89_CryptoPro_C_ParamSet = id_CryptoPro_encrypts + (3,)
+
+id_Gost28147_89_CryptoPro_D_ParamSet = id_CryptoPro_encrypts + (4,)
+
+id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet = id_CryptoPro_encrypts + (5,)
+
+id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet = id_CryptoPro_encrypts + (6,)
+
+id_Gost28147_89_CryptoPro_RIC_1_ParamSet = id_CryptoPro_encrypts + (7,)
+
+
+id_GostR3410_2001_TestParamSet = id_CryptoPro_ecc_signs + (0,)
+
+id_GostR3410_2001_CryptoPro_A_ParamSet = id_CryptoPro_ecc_signs + (1,)
+
+id_GostR3410_2001_CryptoPro_B_ParamSet = id_CryptoPro_ecc_signs + (2,)
+
+id_GostR3410_2001_CryptoPro_C_ParamSet = id_CryptoPro_ecc_signs + (3,)
+
+
+id_GostR3410_2001_CryptoPro_XchA_ParamSet = id_CryptoPro_ecc_exchanges + (0,)
+
+id_GostR3410_2001_CryptoPro_XchB_ParamSet = id_CryptoPro_ecc_exchanges + (1,)
+
+
+id_GostR3410_94_TestParamSet = id_CryptoPro_signs + (0,)
+
+id_GostR3410_94_CryptoPro_A_ParamSet = id_CryptoPro_signs + (2,)
+
+id_GostR3410_94_CryptoPro_B_ParamSet = id_CryptoPro_signs + (3,)
+
+id_GostR3410_94_CryptoPro_C_ParamSet = id_CryptoPro_signs + (4,)
+
+id_GostR3410_94_CryptoPro_D_ParamSet = id_CryptoPro_signs + (5,)
+
+
+id_GostR3410_94_CryptoPro_XchA_ParamSet = id_CryptoPro_exchanges + (1,)
+
+id_GostR3410_94_CryptoPro_XchB_ParamSet = id_CryptoPro_exchanges + (2,)
+
+id_GostR3410_94_CryptoPro_XchC_ParamSet = id_CryptoPro_exchanges + (3,)
+
+
+id_GostR3410_94_a = id_GostR3410_94 + (1,)
+
+id_GostR3410_94_aBis = id_GostR3410_94 + (2,)
+
+id_GostR3410_94_b = id_GostR3410_94 + (3,)
+
+id_GostR3410_94_bBis = id_GostR3410_94 + (4,)
+
+
+id_GostR3411_94_TestParamSet = id_CryptoPro_hashes + (0,)
+
+id_GostR3411_94_CryptoProParamSet = id_CryptoPro_hashes + (1,)
+
+
+
+
+class Gost28147_89_ParamSet(univ.ObjectIdentifier):
+ pass
+
+Gost28147_89_ParamSet.subtypeSpec = constraint.SingleValueConstraint(
+ id_Gost28147_89_TestParamSet,
+ id_Gost28147_89_CryptoPro_A_ParamSet,
+ id_Gost28147_89_CryptoPro_B_ParamSet,
+ id_Gost28147_89_CryptoPro_C_ParamSet,
+ id_Gost28147_89_CryptoPro_D_ParamSet,
+ id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet,
+ id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet,
+ id_Gost28147_89_CryptoPro_RIC_1_ParamSet
+)
+
+
+class Gost28147_89_BlobParameters(univ.Sequence):
+ pass
+
+Gost28147_89_BlobParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet())
+)
+
+
+class Gost28147_89_MAC(univ.OctetString):
+ pass
+
+Gost28147_89_MAC.subtypeSpec = constraint.ValueSizeConstraint(1, 4)
+
+
+class Gost28147_89_Key(univ.OctetString):
+ pass
+
+Gost28147_89_Key.subtypeSpec = constraint.ValueSizeConstraint(32, 32)
+
+
+class Gost28147_89_EncryptedKey(univ.Sequence):
+ pass
+
+Gost28147_89_EncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedKey', Gost28147_89_Key()),
+ namedtype.OptionalNamedType('maskKey', Gost28147_89_Key().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('macKey', Gost28147_89_MAC())
+)
+
+
+class Gost28147_89_IV(univ.OctetString):
+ pass
+
+Gost28147_89_IV.subtypeSpec = constraint.ValueSizeConstraint(8, 8)
+
+
+class Gost28147_89_UZ(univ.OctetString):
+ pass
+
+Gost28147_89_UZ.subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+class Gost28147_89_ParamSetParameters(univ.Sequence):
+ pass
+
+Gost28147_89_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eUZ', Gost28147_89_UZ()),
+ namedtype.NamedType('mode',
+ univ.Integer(namedValues=namedval.NamedValues(
+ ('gost28147-89-CNT', 0),
+ ('gost28147-89-CFB', 1),
+ ('cryptoPro-CBC', 2)
+ ))),
+ namedtype.NamedType('shiftBits',
+ univ.Integer(namedValues=namedval.NamedValues(
+ ('gost28147-89-block', 64)
+ ))),
+ namedtype.NamedType('keyMeshing', AlgorithmIdentifier())
+)
+
+
+class Gost28147_89_Parameters(univ.Sequence):
+ pass
+
+Gost28147_89_Parameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('iv', Gost28147_89_IV()),
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet())
+)
+
+
+class GostR3410_2001_CertificateSignature(univ.BitString):
+ pass
+
+GostR3410_2001_CertificateSignature.subtypeSpec=constraint.ValueSizeConstraint(256, 512)
+
+
+class GostR3410_2001_ParamSetParameters(univ.Sequence):
+ pass
+
+GostR3410_2001_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('a', univ.Integer()),
+ namedtype.NamedType('b', univ.Integer()),
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('x', univ.Integer()),
+ namedtype.NamedType('y', univ.Integer())
+)
+
+
+class GostR3410_2001_PublicKey(univ.OctetString):
+ pass
+
+GostR3410_2001_PublicKey.subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+class GostR3410_2001_PublicKeyParameters(univ.Sequence):
+ pass
+
+GostR3410_2001_PublicKeyParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('publicKeyParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3410_2001_TestParamSet,
+ id_GostR3410_2001_CryptoPro_A_ParamSet,
+ id_GostR3410_2001_CryptoPro_B_ParamSet,
+ id_GostR3410_2001_CryptoPro_C_ParamSet,
+ id_GostR3410_2001_CryptoPro_XchA_ParamSet,
+ id_GostR3410_2001_CryptoPro_XchB_ParamSet
+ ))),
+ namedtype.NamedType('digestParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3411_94_TestParamSet,
+ id_GostR3411_94_CryptoProParamSet
+ ))),
+ namedtype.DefaultedNamedType('encryptionParamSet',
+ Gost28147_89_ParamSet().subtype(value=id_Gost28147_89_CryptoPro_A_ParamSet
+ ))
+)
+
+
+class GostR3410_94_CertificateSignature(univ.BitString):
+ pass
+
+GostR3410_94_CertificateSignature.subtypeSpec = constraint.ValueSizeConstraint(256, 512)
+
+
+class GostR3410_94_ParamSetParameters_t(univ.Integer):
+ pass
+
+GostR3410_94_ParamSetParameters_t.subtypeSpec = constraint.SingleValueConstraint(512, 1024)
+
+
+class GostR3410_94_ParamSetParameters(univ.Sequence):
+ pass
+
+GostR3410_94_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('t', GostR3410_94_ParamSetParameters_t()),
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('a', univ.Integer()),
+ namedtype.OptionalNamedType('validationAlgorithm', AlgorithmIdentifier())
+)
+
+
+class GostR3410_94_PublicKey(univ.OctetString):
+ pass
+
+GostR3410_94_PublicKey.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.ValueSizeConstraint(64, 64),
+ constraint.ValueSizeConstraint(128, 128)
+)
+
+
+class GostR3410_94_PublicKeyParameters(univ.Sequence):
+ pass
+
+GostR3410_94_PublicKeyParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('publicKeyParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3410_94_TestParamSet,
+ id_GostR3410_94_CryptoPro_A_ParamSet,
+ id_GostR3410_94_CryptoPro_B_ParamSet,
+ id_GostR3410_94_CryptoPro_C_ParamSet,
+ id_GostR3410_94_CryptoPro_D_ParamSet,
+ id_GostR3410_94_CryptoPro_XchA_ParamSet,
+ id_GostR3410_94_CryptoPro_XchB_ParamSet,
+ id_GostR3410_94_CryptoPro_XchC_ParamSet
+ ))),
+ namedtype.NamedType('digestParamSet', univ.ObjectIdentifier().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(
+ id_GostR3411_94_TestParamSet,
+ id_GostR3411_94_CryptoProParamSet
+ ))),
+ namedtype.DefaultedNamedType('encryptionParamSet',
+ Gost28147_89_ParamSet().subtype(value=id_Gost28147_89_CryptoPro_A_ParamSet
+ ))
+)
+
+
+class GostR3410_94_ValidationBisParameters_c(univ.Integer):
+ pass
+
+GostR3410_94_ValidationBisParameters_c.subtypeSpec = constraint.ValueRangeConstraint(0, 4294967295)
+
+
+class GostR3410_94_ValidationBisParameters(univ.Sequence):
+ pass
+
+GostR3410_94_ValidationBisParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x0', GostR3410_94_ValidationBisParameters_c()),
+ namedtype.NamedType('c', GostR3410_94_ValidationBisParameters_c()),
+ namedtype.OptionalNamedType('d', univ.Integer())
+)
+
+
+class GostR3410_94_ValidationParameters_c(univ.Integer):
+ pass
+
+GostR3410_94_ValidationParameters_c.subtypeSpec = constraint.ValueRangeConstraint(0, 65535)
+
+
+class GostR3410_94_ValidationParameters(univ.Sequence):
+ pass
+
+GostR3410_94_ValidationParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x0', GostR3410_94_ValidationParameters_c()),
+ namedtype.NamedType('c', GostR3410_94_ValidationParameters_c()),
+ namedtype.OptionalNamedType('d', univ.Integer())
+)
+
+
+class GostR3411_94_Digest(univ.OctetString):
+ pass
+
+GostR3411_94_Digest.subtypeSpec = constraint.ValueSizeConstraint(32, 32)
+
+
+class GostR3411_94_DigestParameters(univ.ObjectIdentifier):
+ pass
+
+GostR3411_94_DigestParameters.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.SingleValueConstraint(id_GostR3411_94_TestParamSet),
+ constraint.SingleValueConstraint(id_GostR3411_94_CryptoProParamSet),
+)
+
+
+class GostR3411_94_ParamSetParameters(univ.Sequence):
+ pass
+
+GostR3411_94_ParamSetParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hUZ', Gost28147_89_UZ()),
+ namedtype.NamedType('h0', GostR3411_94_Digest())
+)
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_Gost28147_89: Gost28147_89_Parameters(),
+ id_Gost28147_89_TestParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_A_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_B_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_C_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_D_ParamSet: Gost28147_89_ParamSetParameters(),
+ id_Gost28147_89_CryptoPro_KeyMeshing: univ.Null(""),
+ id_Gost28147_89_None_KeyMeshing: univ.Null(""),
+ id_GostR3410_94: GostR3410_94_PublicKeyParameters(),
+ id_GostR3410_94_TestParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_A_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_B_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_C_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_D_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_XchA_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_XchB_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_CryptoPro_XchC_ParamSet: GostR3410_94_ParamSetParameters(),
+ id_GostR3410_94_a: GostR3410_94_ValidationParameters(),
+ id_GostR3410_94_aBis: GostR3410_94_ValidationBisParameters(),
+ id_GostR3410_94_b: GostR3410_94_ValidationParameters(),
+ id_GostR3410_94_bBis: GostR3410_94_ValidationBisParameters(),
+ id_GostR3410_2001: univ.Null(""),
+ id_GostR3411_94: univ.Null(""),
+ id_GostR3411_94_TestParamSet: GostR3411_94_ParamSetParameters(),
+ id_GostR3411_94_CryptoProParamSet: GostR3411_94_ParamSetParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4387.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4387.py
new file mode 100644
index 0000000000..c1f4e79acf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4387.py
@@ -0,0 +1,23 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Store Access via HTTP
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4387.txt
+#
+
+
+from pyasn1.type import univ
+
+
+id_ad = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, ))
+
+id_ad_http_certs = id_ad + (6, )
+
+id_ad_http_crls = id_ad + (7,)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4476.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4476.py
new file mode 100644
index 0000000000..25a0ccb7e8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4476.py
@@ -0,0 +1,93 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Attribute Certificate Policies Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4476.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+PolicyQualifierId = rfc5280.PolicyQualifierId
+
+PolicyQualifierInfo = rfc5280.PolicyQualifierInfo
+
+UserNotice = rfc5280.UserNotice
+
+id_pkix = rfc5280.id_pkix
+
+
+# Object Identifiers
+
+id_pe = id_pkix + (1,)
+
+id_pe_acPolicies = id_pe + (15,)
+
+id_qt = id_pkix + (2,)
+
+id_qt_acps = id_qt + (4,)
+
+id_qt_acunotice = id_qt + (5,)
+
+
+# Attribute Certificate Policies Extension
+
+class ACUserNotice(UserNotice):
+ pass
+
+
+class ACPSuri(char.IA5String):
+ pass
+
+
+class AcPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyInformation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', AcPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers',
+ univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class AcPoliciesSyntax(univ.SequenceOf):
+ componentType = PolicyInformation()
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+# Update the policy qualifier map in rfc5280.py
+
+_policyQualifierInfoMapUpdate = {
+ id_qt_acps: ACPSuri(),
+ id_qt_acunotice: UserNotice(),
+}
+
+rfc5280.policyQualifierInfoMap.update(_policyQualifierInfoMapUpdate)
+
+
+# Update the certificate extension map in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_acPolicies: AcPoliciesSyntax(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4490.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4490.py
new file mode 100644
index 0000000000..b8fe32134e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4490.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Using the GOST 28147-89, GOST R 34.11-94, GOST R 34.10-94, and
+# GOST R 34.10-2001 Algorithms with the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4490.txt
+#
+
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc4357
+from pyasn1_modules import rfc5280
+
+
+# Imports from RFC 4357
+
+id_CryptoPro_algorithms = rfc4357.id_CryptoPro_algorithms
+
+id_GostR3410_94 = rfc4357.id_GostR3410_94
+
+id_GostR3410_2001 = rfc4357.id_GostR3410_2001
+
+Gost28147_89_ParamSet = rfc4357.Gost28147_89_ParamSet
+
+Gost28147_89_EncryptedKey = rfc4357.Gost28147_89_EncryptedKey
+
+GostR3410_94_PublicKeyParameters = rfc4357.GostR3410_94_PublicKeyParameters
+
+GostR3410_2001_PublicKeyParameters = rfc4357.GostR3410_2001_PublicKeyParameters
+
+
+# Imports from RFC 5280
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+
+# CMS/PKCS#7 key agreement algorithms & parameters
+
+class Gost28147_89_KeyWrapParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()),
+ namedtype.OptionalNamedType('ukm', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8)))
+ )
+
+
+id_Gost28147_89_CryptoPro_KeyWrap = id_CryptoPro_algorithms + (13, 1, )
+
+
+id_Gost28147_89_None_KeyWrap = id_CryptoPro_algorithms + (13, 0, )
+
+
+id_GostR3410_2001_CryptoPro_ESDH = id_CryptoPro_algorithms + (96, )
+
+
+id_GostR3410_94_CryptoPro_ESDH = id_CryptoPro_algorithms + (97, )
+
+
+# CMS/PKCS#7 key transport algorithms & parameters
+
+id_GostR3410_2001_KeyTransportSMIMECapability = id_GostR3410_2001
+
+
+id_GostR3410_94_KeyTransportSMIMECapability = id_GostR3410_94
+
+
+class GostR3410_TransportParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()),
+ namedtype.OptionalNamedType('ephemeralPublicKey',
+ SubjectPublicKeyInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('ukm', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8)))
+ )
+
+class GostR3410_KeyTransport(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sessionEncryptedKey', Gost28147_89_EncryptedKey()),
+ namedtype.OptionalNamedType('transportParameters',
+ GostR3410_TransportParameters().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+# GOST R 34.10-94 signature algorithm & parameters
+
+class GostR3410_94_Signature(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+# GOST R 34.10-2001 signature algorithms and parameters
+
+class GostR3410_2001_Signature(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_Gost28147_89_CryptoPro_KeyWrap: Gost28147_89_KeyWrapParameters(),
+ id_Gost28147_89_None_KeyWrap: Gost28147_89_KeyWrapParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4491.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4491.py
new file mode 100644
index 0000000000..60b5560dcc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4491.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Using the GOST R 34.10-94, GOST R 34.10-2001, and GOST R 34.11-94
+# Algorithms with Certificates and CRLs
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4491.txt
+#
+
+from pyasn1_modules import rfc4357
+
+
+# Signature Algorithm GOST R 34.10-94
+
+id_GostR3411_94_with_GostR3410_94 = rfc4357.id_GostR3411_94_with_GostR3410_94
+
+
+# Signature Algorithm GOST R 34.10-2001
+
+id_GostR3411_94_with_GostR3410_2001 = rfc4357.id_GostR3411_94_with_GostR3410_2001
+
+
+# GOST R 34.10-94 Keys
+
+id_GostR3410_94 = rfc4357.id_GostR3410_94
+
+GostR3410_2001_PublicKey = rfc4357.GostR3410_2001_PublicKey
+
+GostR3410_2001_PublicKeyParameters = rfc4357.GostR3410_2001_PublicKeyParameters
+
+
+# GOST R 34.10-2001 Keys
+
+id_GostR3410_2001 = rfc4357.id_GostR3410_2001
+
+GostR3410_94_PublicKey = rfc4357.GostR3410_94_PublicKey
+
+GostR3410_94_PublicKeyParameters = rfc4357.GostR3410_94_PublicKeyParameters
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4683.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4683.py
new file mode 100644
index 0000000000..11ac65aa68
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4683.py
@@ -0,0 +1,72 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Subject Identification Method (SIM)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4683.txt
+# https://www.rfc-editor.org/errata/eid1047
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Used to compute the PEPSI value
+
+class HashContent(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userPassword', char.UTF8String()),
+ namedtype.NamedType('authorityRandom', univ.OctetString()),
+ namedtype.NamedType('identifierType', univ.ObjectIdentifier()),
+ namedtype.NamedType('identifier', char.UTF8String())
+ )
+
+
+# Used to encode the PEPSI value as the SIM Other Name
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8,)
+
+id_on_SIM = id_on + (6,)
+
+
+class SIM(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('authorityRandom', univ.OctetString()),
+ namedtype.NamedType('pEPSI', univ.OctetString())
+ )
+
+
+# Used to encrypt the PEPSI value during certificate request
+
+id_pkip = id_pkix + (5,)
+
+id_regEPEPSI = id_pkip + (3,)
+
+
+class EncryptedPEPSI(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('identifierType', univ.ObjectIdentifier()),
+ namedtype.NamedType('identifier', char.UTF8String()),
+ namedtype.NamedType('sIM', SIM())
+ )
+
+
+# Update the map of Other Name OIDs to Other Names in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_SIM: SIM(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4985.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4985.py
new file mode 100644
index 0000000000..318e412380
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc4985.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Expression of Service Names in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4985.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# As specified in Appendix A.2 of RFC 4985
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_dnsSRV = id_on + (7, )
+
+
+class SRVName(char.IA5String):
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+srvName = rfc5280.AnotherName()
+srvName['type-id'] = id_on_dnsSRV
+srvName['value'] = SRVName()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_dnsSRV: SRVName(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5035.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5035.py
new file mode 100644
index 0000000000..1cec98249c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5035.py
@@ -0,0 +1,199 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Update to Enhanced Security Services for S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5035.txt
+#
+
+from pyasn1.codec.der.encoder import encode as der_encode
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+ContentType = rfc5652.ContentType
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+PolicyInformation = rfc5280.PolicyInformation
+
+GeneralNames = rfc5280.GeneralNames
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+
+# Signing Certificate Attribute V1 and V2
+
+id_aa_signingCertificate = rfc2634.id_aa_signingCertificate
+
+id_aa_signingCertificateV2 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.47')
+
+Hash = rfc2634.Hash
+
+IssuerSerial = rfc2634.IssuerSerial
+
+ESSCertID = rfc2634.ESSCertID
+
+SigningCertificate = rfc2634.SigningCertificate
+
+
+sha256AlgId = AlgorithmIdentifier()
+sha256AlgId['algorithm'] = rfc4055.id_sha256
+# A non-schema object for sha256AlgId['parameters'] as absent
+sha256AlgId['parameters'] = der_encode(univ.OctetString(''))
+
+
+class ESSCertIDv2(univ.Sequence):
+ pass
+
+ESSCertIDv2.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('hashAlgorithm', sha256AlgId),
+ namedtype.NamedType('certHash', Hash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+)
+
+
+class SigningCertificateV2(univ.Sequence):
+ pass
+
+SigningCertificateV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs', univ.SequenceOf(
+ componentType=ESSCertIDv2())),
+ namedtype.OptionalNamedType('policies', univ.SequenceOf(
+ componentType=PolicyInformation()))
+)
+
+
+# Mail List Expansion History Attribute
+
+id_aa_mlExpandHistory = rfc2634.id_aa_mlExpandHistory
+
+ub_ml_expansion_history = rfc2634.ub_ml_expansion_history
+
+EntityIdentifier = rfc2634.EntityIdentifier
+
+MLReceiptPolicy = rfc2634.MLReceiptPolicy
+
+MLData = rfc2634.MLData
+
+MLExpansionHistory = rfc2634.MLExpansionHistory
+
+
+# ESS Security Label Attribute
+
+id_aa_securityLabel = rfc2634.id_aa_securityLabel
+
+ub_privacy_mark_length = rfc2634.ub_privacy_mark_length
+
+ub_security_categories = rfc2634.ub_security_categories
+
+ub_integer_options = rfc2634.ub_integer_options
+
+ESSPrivacyMark = rfc2634.ESSPrivacyMark
+
+SecurityClassification = rfc2634.SecurityClassification
+
+SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier
+
+SecurityCategory = rfc2634.SecurityCategory
+
+SecurityCategories = rfc2634.SecurityCategories
+
+ESSSecurityLabel = rfc2634.ESSSecurityLabel
+
+
+# Equivalent Labels Attribute
+
+id_aa_equivalentLabels = rfc2634.id_aa_equivalentLabels
+
+EquivalentLabels = rfc2634.EquivalentLabels
+
+
+# Content Identifier Attribute
+
+id_aa_contentIdentifier = rfc2634.id_aa_contentIdentifier
+
+ContentIdentifier = rfc2634.ContentIdentifier
+
+
+# Content Reference Attribute
+
+id_aa_contentReference = rfc2634.id_aa_contentReference
+
+ContentReference = rfc2634.ContentReference
+
+
+# Message Signature Digest Attribute
+
+id_aa_msgSigDigest = rfc2634.id_aa_msgSigDigest
+
+MsgSigDigest = rfc2634.MsgSigDigest
+
+
+# Content Hints Attribute
+
+id_aa_contentHint = rfc2634.id_aa_contentHint
+
+ContentHints = rfc2634.ContentHints
+
+
+# Receipt Request Attribute
+
+AllOrFirstTier = rfc2634.AllOrFirstTier
+
+ReceiptsFrom = rfc2634.ReceiptsFrom
+
+id_aa_receiptRequest = rfc2634.id_aa_receiptRequest
+
+ub_receiptsTo = rfc2634.ub_receiptsTo
+
+ReceiptRequest = rfc2634.ReceiptRequest
+
+
+# Receipt Content Type
+
+ESSVersion = rfc2634.ESSVersion
+
+id_ct_receipt = rfc2634.id_ct_receipt
+
+Receipt = rfc2634.Receipt
+
+ub_receiptsTo = rfc2634.ub_receiptsTo
+
+ReceiptRequest = rfc2634.ReceiptRequest
+
+
+# Map of Attribute Type to the Attribute structure is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_signingCertificateV2: SigningCertificateV2(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_receipt: Receipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5083.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5083.py
new file mode 100644
index 0000000000..26ef550c47
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5083.py
@@ -0,0 +1,52 @@
+# This file is being contributed to of pyasn1-modules software.
+#
+# Created by Russ Housley without assistance from the asn1ate tool.
+# Modified by Russ Housley to add a map for use with opentypes and
+# simplify the code for the object identifier assignment.
+#
+# Copyright (c) 2018, 2019 Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authenticated-Enveloped-Data for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5083.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# CMS Authenticated-Enveloped-Data Content Type
+
+id_ct_authEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.23')
+
+class AuthEnvelopedData(univ.Sequence):
+ pass
+
+AuthEnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', rfc5652.CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', rfc5652.OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', rfc5652.RecipientInfos()),
+ namedtype.NamedType('authEncryptedContentInfo', rfc5652.EncryptedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', rfc5652.AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('mac', rfc5652.MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', rfc5652.UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_authEnvelopedData: AuthEnvelopedData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5084.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5084.py
new file mode 100644
index 0000000000..7686839561
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5084.py
@@ -0,0 +1,97 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool, with manual
+# changes to AES_CCM_ICVlen.subtypeSpec and added comments
+#
+# Copyright (c) 2018-2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# AES-CCM and AES-GCM Algorithms fo use with the Authenticated-Enveloped-Data
+# protecting content type for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5084.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AES_CCM_ICVlen(univ.Integer):
+ pass
+
+
+class AES_GCM_ICVlen(univ.Integer):
+ pass
+
+
+AES_CCM_ICVlen.subtypeSpec = constraint.SingleValueConstraint(4, 6, 8, 10, 12, 14, 16)
+
+AES_GCM_ICVlen.subtypeSpec = constraint.ValueRangeConstraint(12, 16)
+
+
+class CCMParameters(univ.Sequence):
+ pass
+
+
+CCMParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('aes-nonce', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(7, 13))),
+ # The aes-nonce parameter contains 15-L octets, where L is the size of the length field. L=8 is RECOMMENDED.
+ # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique.
+ namedtype.DefaultedNamedType('aes-ICVlen', AES_CCM_ICVlen().subtype(value=12))
+)
+
+
+class GCMParameters(univ.Sequence):
+ pass
+
+
+GCMParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('aes-nonce', univ.OctetString()),
+ # The aes-nonce may have any number of bits between 8 and 2^64, but it MUST be a multiple of 8 bits.
+ # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique.
+ # A nonce value of 12 octets can be processed more efficiently, so that length is RECOMMENDED.
+ namedtype.DefaultedNamedType('aes-ICVlen', AES_GCM_ICVlen().subtype(value=12))
+)
+
+aes = _OID(2, 16, 840, 1, 101, 3, 4, 1)
+
+id_aes128_CCM = _OID(aes, 7)
+
+id_aes128_GCM = _OID(aes, 6)
+
+id_aes192_CCM = _OID(aes, 27)
+
+id_aes192_GCM = _OID(aes, 26)
+
+id_aes256_CCM = _OID(aes, 47)
+
+id_aes256_GCM = _OID(aes, 46)
+
+
+# Map of Algorithm Identifier OIDs to Parameters is added to the
+# ones in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_aes128_CCM: CCMParameters(),
+ id_aes128_GCM: GCMParameters(),
+ id_aes192_CCM: CCMParameters(),
+ id_aes192_GCM: GCMParameters(),
+ id_aes256_CCM: CCMParameters(),
+ id_aes256_GCM: GCMParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5126.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5126.py
new file mode 100644
index 0000000000..8e016c209f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5126.py
@@ -0,0 +1,577 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Advanced Electronic Signatures (CAdES)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5126.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import useful
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5035
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc6960
+from pyasn1_modules import rfc3161
+
+MAX = float('inf')
+
+
+# Maps for OpenTypes
+
+commitmentQualifierMap = { }
+
+sigQualifiersMap = { }
+
+otherRevRefMap = { }
+
+otherRevValMap = { }
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+ContentType = rfc5652.ContentType
+
+SignedData = rfc5652.SignedData
+
+EncapsulatedContentInfo = rfc5652.EncapsulatedContentInfo
+
+SignerInfo = rfc5652.SignerInfo
+
+MessageDigest = rfc5652.MessageDigest
+
+SigningTime = rfc5652.SigningTime
+
+Countersignature = rfc5652.Countersignature
+
+id_data = rfc5652.id_data
+
+id_signedData = rfc5652.id_signedData
+
+id_contentType= rfc5652.id_contentType
+
+id_messageDigest = rfc5652.id_messageDigest
+
+id_signingTime = rfc5652.id_signingTime
+
+id_countersignature = rfc5652.id_countersignature
+
+
+# Imports from RFC 5035
+
+SigningCertificate = rfc5035.SigningCertificate
+
+IssuerSerial = rfc5035.IssuerSerial
+
+ContentReference = rfc5035.ContentReference
+
+ContentIdentifier = rfc5035.ContentIdentifier
+
+id_aa_contentReference = rfc5035.id_aa_contentReference
+
+id_aa_contentIdentifier = rfc5035.id_aa_contentIdentifier
+
+id_aa_signingCertificate = rfc5035.id_aa_signingCertificate
+
+id_aa_signingCertificateV2 = rfc5035.id_aa_signingCertificateV2
+
+
+# Imports from RFC 5280
+
+Certificate = rfc5280.Certificate
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+CertificateList = rfc5280.CertificateList
+
+Name = rfc5280.Name
+
+Attribute = rfc5280.Attribute
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+PolicyInformation = rfc5280.PolicyInformation
+
+DirectoryString = rfc5280.DirectoryString
+
+
+# Imports from RFC 5755
+
+AttributeCertificate = rfc5755.AttributeCertificate
+
+
+# Imports from RFC 6960
+
+BasicOCSPResponse = rfc6960.BasicOCSPResponse
+
+ResponderID = rfc6960.ResponderID
+
+
+# Imports from RFC 3161
+
+TimeStampToken = rfc3161.TimeStampToken
+
+
+# OID used referencing electronic signature mechanisms
+
+id_etsi_es_IDUP_Mechanism_v1 = univ.ObjectIdentifier('0.4.0.1733.1.4.1')
+
+
+# OtherSigningCertificate - deprecated
+
+id_aa_ets_otherSigCert = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.19')
+
+
+class OtherHashValue(univ.OctetString):
+ pass
+
+
+class OtherHashAlgAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', OtherHashValue())
+ )
+
+
+class OtherHash(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sha1Hash', OtherHashValue()),
+ namedtype.NamedType('otherHash', OtherHashAlgAndValue())
+ )
+
+
+class OtherCertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertHash', OtherHash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+ )
+
+
+class OtherSigningCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs',
+ univ.SequenceOf(componentType=OtherCertID())),
+ namedtype.OptionalNamedType('policies',
+ univ.SequenceOf(componentType=PolicyInformation()))
+ )
+
+
+# Signature Policy Identifier
+
+id_aa_ets_sigPolicyId = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.15')
+
+
+class SigPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class SigPolicyHash(OtherHashAlgAndValue):
+ pass
+
+
+class SigPolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+class SigPolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sigPolicyQualifierId', SigPolicyQualifierId()),
+ namedtype.NamedType('sigQualifier', univ.Any(),
+ openType=opentype.OpenType('sigPolicyQualifierId', sigQualifiersMap))
+ )
+
+
+class SignaturePolicyId(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sigPolicyId', SigPolicyId()),
+ namedtype.NamedType('sigPolicyHash', SigPolicyHash()),
+ namedtype.OptionalNamedType('sigPolicyQualifiers',
+ univ.SequenceOf(componentType=SigPolicyQualifierInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class SignaturePolicyImplied(univ.Null):
+ pass
+
+
+class SignaturePolicy(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signaturePolicyId', SignaturePolicyId()),
+ namedtype.NamedType('signaturePolicyImplied', SignaturePolicyImplied())
+ )
+
+
+id_spq_ets_unotice = univ.ObjectIdentifier('1.2.840.113549.1.9.16.5.2')
+
+
+class DisplayText(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('visibleString', char.VisibleString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+ )
+
+
+class NoticeReference(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers',
+ univ.SequenceOf(componentType=univ.Integer()))
+ )
+
+class SPUserNotice(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+ )
+
+
+noticeToUser = SigPolicyQualifierInfo()
+noticeToUser['sigPolicyQualifierId'] = id_spq_ets_unotice
+noticeToUser['sigQualifier'] = SPUserNotice()
+
+
+id_spq_ets_uri = univ.ObjectIdentifier('1.2.840.113549.1.9.16.5.1')
+
+
+class SPuri(char.IA5String):
+ pass
+
+
+pointerToSigPolSpec = SigPolicyQualifierInfo()
+pointerToSigPolSpec['sigPolicyQualifierId'] = id_spq_ets_uri
+pointerToSigPolSpec['sigQualifier'] = SPuri()
+
+
+# Commitment Type
+
+id_aa_ets_commitmentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.16')
+
+
+class CommitmentTypeIdentifier(univ.ObjectIdentifier):
+ pass
+
+
+class CommitmentTypeQualifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('commitmentTypeIdentifier',
+ CommitmentTypeIdentifier()),
+ namedtype.NamedType('qualifier', univ.Any(),
+ openType=opentype.OpenType('commitmentTypeIdentifier',
+ commitmentQualifierMap))
+ )
+
+
+class CommitmentTypeIndication(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('commitmentTypeId', CommitmentTypeIdentifier()),
+ namedtype.OptionalNamedType('commitmentTypeQualifier',
+ univ.SequenceOf(componentType=CommitmentTypeQualifier()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+id_cti_ets_proofOfOrigin = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.1')
+
+id_cti_ets_proofOfReceipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.2')
+
+id_cti_ets_proofOfDelivery = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.3')
+
+id_cti_ets_proofOfSender = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.4')
+
+id_cti_ets_proofOfApproval = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.5')
+
+id_cti_ets_proofOfCreation = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.6')
+
+
+# Signer Location
+
+id_aa_ets_signerLocation = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.17')
+
+
+class PostalAddress(univ.SequenceOf):
+ componentType = DirectoryString()
+ subtypeSpec = constraint.ValueSizeConstraint(1, 6)
+
+
+class SignerLocation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('countryName',
+ DirectoryString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('localityName',
+ DirectoryString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('postalAdddress',
+ PostalAddress().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+# Signature Timestamp
+
+id_aa_signatureTimeStampToken = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.14')
+
+
+class SignatureTimeStampToken(TimeStampToken):
+ pass
+
+
+# Content Timestamp
+
+id_aa_ets_contentTimestamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.20')
+
+
+class ContentTimestamp(TimeStampToken):
+ pass
+
+
+# Signer Attributes
+
+id_aa_ets_signerAttr = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.18')
+
+
+class ClaimedAttributes(univ.SequenceOf):
+ componentType = Attribute()
+
+
+class CertifiedAttributes(AttributeCertificate):
+ pass
+
+
+class SignerAttribute(univ.SequenceOf):
+ componentType = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('claimedAttributes',
+ ClaimedAttributes().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('certifiedAttributes',
+ CertifiedAttributes().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))
+
+
+# Complete Certificate Refs
+
+id_aa_ets_certificateRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.21')
+
+
+class CompleteCertificateRefs(univ.SequenceOf):
+ componentType = OtherCertID()
+
+
+# Complete Revocation Refs
+
+id_aa_ets_revocationRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.22')
+
+
+class CrlIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crlissuer', Name()),
+ namedtype.NamedType('crlIssuedTime', useful.UTCTime()),
+ namedtype.OptionalNamedType('crlNumber', univ.Integer())
+ )
+
+
+class CrlValidatedID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crlHash', OtherHash()),
+ namedtype.OptionalNamedType('crlIdentifier', CrlIdentifier())
+ )
+
+
+class CRLListID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crls',
+ univ.SequenceOf(componentType=CrlValidatedID()))
+ )
+
+
+class OcspIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ocspResponderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime())
+ )
+
+
+class OcspResponsesID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ocspIdentifier', OcspIdentifier()),
+ namedtype.OptionalNamedType('ocspRepHash', OtherHash())
+ )
+
+
+class OcspListID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ocspResponses',
+ univ.SequenceOf(componentType=OcspResponsesID()))
+ )
+
+
+class OtherRevRefType(univ.ObjectIdentifier):
+ pass
+
+
+class OtherRevRefs(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevRefType', OtherRevRefType()),
+ namedtype.NamedType('otherRevRefs', univ.Any(),
+ openType=opentype.OpenType('otherRevRefType', otherRevRefMap))
+ )
+
+
+class CrlOcspRef(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('crlids',
+ CRLListID().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ocspids',
+ OcspListID().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('otherRev',
+ OtherRevRefs().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class CompleteRevocationRefs(univ.SequenceOf):
+ componentType = CrlOcspRef()
+
+
+# Certificate Values
+
+id_aa_ets_certValues = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.23')
+
+
+class CertificateValues(univ.SequenceOf):
+ componentType = Certificate()
+
+
+# Certificate Revocation Values
+
+id_aa_ets_revocationValues = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.24')
+
+
+class OtherRevValType(univ.ObjectIdentifier):
+ pass
+
+
+class OtherRevVals(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevValType', OtherRevValType()),
+ namedtype.NamedType('otherRevVals', univ.Any(),
+ openType=opentype.OpenType('otherRevValType', otherRevValMap))
+ )
+
+
+class RevocationValues(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('crlVals',
+ univ.SequenceOf(componentType=CertificateList()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('ocspVals',
+ univ.SequenceOf(componentType=BasicOCSPResponse()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('otherRevVals',
+ OtherRevVals().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+# CAdES-C Timestamp
+
+id_aa_ets_escTimeStamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.25')
+
+
+class ESCTimeStampToken(TimeStampToken):
+ pass
+
+
+# Time-Stamped Certificates and CRLs
+
+id_aa_ets_certCRLTimestamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.26')
+
+
+class TimestampedCertsCRLs(TimeStampToken):
+ pass
+
+
+# Archive Timestamp
+
+id_aa_ets_archiveTimestampV2 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.48')
+
+
+class ArchiveTimeStampToken(TimeStampToken):
+ pass
+
+
+# Attribute certificate references
+
+id_aa_ets_attrCertificateRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.44')
+
+
+class AttributeCertificateRefs(univ.SequenceOf):
+ componentType = OtherCertID()
+
+
+# Attribute revocation references
+
+id_aa_ets_attrRevocationRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.45')
+
+
+class AttributeRevocationRefs(univ.SequenceOf):
+ componentType = CrlOcspRef()
+
+
+# Update the sigQualifiersMap
+
+_sigQualifiersMapUpdate = {
+ id_spq_ets_unotice: SPUserNotice(),
+ id_spq_ets_uri: SPuri(),
+}
+
+sigQualifiersMap.update(_sigQualifiersMapUpdate)
+
+
+# Update the CMS Attribute Map in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_ets_otherSigCert: OtherSigningCertificate(),
+ id_aa_ets_sigPolicyId: SignaturePolicy(),
+ id_aa_ets_commitmentType: CommitmentTypeIndication(),
+ id_aa_ets_signerLocation: SignerLocation(),
+ id_aa_signatureTimeStampToken: SignatureTimeStampToken(),
+ id_aa_ets_contentTimestamp: ContentTimestamp(),
+ id_aa_ets_signerAttr: SignerAttribute(),
+ id_aa_ets_certificateRefs: CompleteCertificateRefs(),
+ id_aa_ets_revocationRefs: CompleteRevocationRefs(),
+ id_aa_ets_certValues: CertificateValues(),
+ id_aa_ets_revocationValues: RevocationValues(),
+ id_aa_ets_escTimeStamp: ESCTimeStampToken(),
+ id_aa_ets_certCRLTimestamp: TimestampedCertsCRLs(),
+ id_aa_ets_archiveTimestampV2: ArchiveTimeStampToken(),
+ id_aa_ets_attrCertificateRefs: AttributeCertificateRefs(),
+ id_aa_ets_attrRevocationRefs: AttributeRevocationRefs(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5208.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5208.py
new file mode 100644
index 0000000000..295fdbf388
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5208.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#8 syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc5208
+#
+# Sample captures could be obtained with "openssl pkcs8 -topk8" command
+#
+from pyasn1_modules import rfc2251
+from pyasn1_modules.rfc2459 import *
+
+
+class KeyEncryptionAlgorithms(AlgorithmIdentifier):
+ pass
+
+
+class PrivateKeyAlgorithms(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedData(univ.OctetString):
+ pass
+
+
+class EncryptedPrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('encryptedData', EncryptedData())
+ )
+
+
+class PrivateKey(univ.OctetString):
+ pass
+
+
+class Attributes(univ.SetOf):
+ componentType = rfc2251.Attribute()
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0), ('v2', 1))
+
+
+class PrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('privateKeyAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', PrivateKey()),
+ namedtype.OptionalNamedType('attributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5275.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5275.py
new file mode 100644
index 0000000000..1be9598142
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5275.py
@@ -0,0 +1,404 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5275.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc5755
+
+MAX = float('inf')
+
+
+# Initialize the map for GLAQueryRequests and GLAQueryResponses
+
+glaQueryRRMap = { }
+
+
+# Imports from RFC 3565
+
+id_aes128_wrap = rfc3565.id_aes128_wrap
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Certificate = rfc5280.Certificate
+
+GeneralName = rfc5280.GeneralName
+
+
+# Imports from RFC 5652
+
+CertificateSet = rfc5652.CertificateSet
+
+KEKIdentifier = rfc5652.KEKIdentifier
+
+RecipientInfos = rfc5652.RecipientInfos
+
+
+# Imports from RFC 5751
+
+SMIMECapability = rfc5751.SMIMECapability
+
+
+# Imports from RFC 5755
+
+AttributeCertificate = rfc5755.AttributeCertificate
+
+
+# The GL symmetric key distribution object identifier arc
+
+id_skd = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 8,))
+
+
+# The GL Use KEK control attribute
+
+id_skd_glUseKEK = id_skd + (1,)
+
+
+class Certificates(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pKC',
+ Certificate().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('aC',
+ univ.SequenceOf(componentType=AttributeCertificate()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('certPath',
+ CertificateSet().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class GLInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glAddress', GeneralName())
+ )
+
+
+class GLOwnerInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glOwnerName', GeneralName()),
+ namedtype.NamedType('glOwnerAddress', GeneralName()),
+ namedtype.OptionalNamedType('certificates', Certificates())
+ )
+
+
+class GLAdministration(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('unmanaged', 0),
+ ('managed', 1),
+ ('closed', 2)
+ )
+
+
+requested_algorithm = SMIMECapability().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+requested_algorithm['capabilityID'] = id_aes128_wrap
+
+
+class GLKeyAttributes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('rekeyControlledByGLO',
+ univ.Boolean().subtype(value=0,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('recipientsNotMutuallyAware',
+ univ.Boolean().subtype(value=1,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('duration',
+ univ.Integer().subtype(value=0,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.DefaultedNamedType('generationCounter',
+ univ.Integer().subtype(value=2,
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('requestedAlgorithm', requested_algorithm)
+ )
+
+
+class GLUseKEK(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glInfo', GLInfo()),
+ namedtype.NamedType('glOwnerInfo',
+ univ.SequenceOf(componentType=GLOwnerInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.DefaultedNamedType('glAdministration',
+ GLAdministration().subtype(value=1)),
+ namedtype.OptionalNamedType('glKeyAttributes', GLKeyAttributes())
+ )
+
+
+# The Delete GL control attribute
+
+id_skd_glDelete = id_skd + (2,)
+
+
+class DeleteGL(GeneralName):
+ pass
+
+
+# The Add GL Member control attribute
+
+id_skd_glAddMember = id_skd + (3,)
+
+
+class GLMember(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glMemberName', GeneralName()),
+ namedtype.OptionalNamedType('glMemberAddress', GeneralName()),
+ namedtype.OptionalNamedType('certificates', Certificates())
+ )
+
+
+class GLAddMember(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glMember', GLMember())
+ )
+
+
+# The Delete GL Member control attribute
+
+id_skd_glDeleteMember = id_skd + (4,)
+
+
+class GLDeleteMember(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glMemberToDelete', GeneralName())
+ )
+
+
+# The GL Rekey control attribute
+
+id_skd_glRekey = id_skd + (5,)
+
+
+class GLNewKeyAttributes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('rekeyControlledByGLO',
+ univ.Boolean().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('recipientsNotMutuallyAware',
+ univ.Boolean().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('duration',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generationCounter',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('requestedAlgorithm',
+ AlgorithmIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class GLRekey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.OptionalNamedType('glAdministration', GLAdministration()),
+ namedtype.OptionalNamedType('glNewKeyAttributes', GLNewKeyAttributes()),
+ namedtype.OptionalNamedType('glRekeyAllGLKeys', univ.Boolean())
+ )
+
+
+# The Add and Delete GL Owner control attributes
+
+id_skd_glAddOwner = id_skd + (6,)
+
+id_skd_glRemoveOwner = id_skd + (7,)
+
+
+class GLOwnerAdministration(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glOwnerInfo', GLOwnerInfo())
+ )
+
+
+# The GL Key Compromise control attribute
+
+id_skd_glKeyCompromise = id_skd + (8,)
+
+
+class GLKCompromise(GeneralName):
+ pass
+
+
+# The GL Key Refresh control attribute
+
+id_skd_glkRefresh = id_skd + (9,)
+
+
+class Date(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('start', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('end', useful.GeneralizedTime())
+ )
+
+
+class GLKRefresh(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('dates',
+ univ.SequenceOf(componentType=Date()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+# The GLA Query Request control attribute
+
+id_skd_glaQueryRequest = id_skd + (11,)
+
+
+class GLAQueryRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glaRequestType', univ.ObjectIdentifier()),
+ namedtype.NamedType('glaRequestValue', univ.Any(),
+ openType=opentype.OpenType('glaRequestType', glaQueryRRMap))
+ )
+
+
+# The GLA Query Response control attribute
+
+id_skd_glaQueryResponse = id_skd + (12,)
+
+
+class GLAQueryResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glaResponseType', univ.ObjectIdentifier()),
+ namedtype.NamedType('glaResponseValue', univ.Any(),
+ openType=opentype.OpenType('glaResponseType', glaQueryRRMap))
+ )
+
+
+# The GLA Request/Response (glaRR) arc for glaRequestType/glaResponseType
+
+id_cmc_glaRR = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 7, 99,))
+
+
+# The Algorithm Request
+
+id_cmc_gla_skdAlgRequest = id_cmc_glaRR + (1,)
+
+
+class SKDAlgRequest(univ.Null):
+ pass
+
+
+# The Algorithm Response
+
+id_cmc_gla_skdAlgResponse = id_cmc_glaRR + (2,)
+
+SMIMECapabilities = rfc5751.SMIMECapabilities
+
+
+# The control attribute to request an updated certificate to the GLA and
+# the control attribute to return an updated certificate to the GLA
+
+id_skd_glProvideCert = id_skd + (13,)
+
+id_skd_glManageCert = id_skd + (14,)
+
+
+class GLManageCert(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glMember', GLMember())
+ )
+
+
+# The control attribute to distribute the GL shared KEK
+
+id_skd_glKey = id_skd + (15,)
+
+
+class GLKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('glName', GeneralName()),
+ namedtype.NamedType('glIdentifier', KEKIdentifier()),
+ namedtype.NamedType('glkWrapped', RecipientInfos()),
+ namedtype.NamedType('glkAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('glkNotBefore', useful.GeneralizedTime()),
+ namedtype.NamedType('glkNotAfter', useful.GeneralizedTime())
+ )
+
+
+# The CMC error types
+
+id_cet_skdFailInfo = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 15, 1,))
+
+
+class SKDFailInfo(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('closedGL', 1),
+ ('unsupportedDuration', 2),
+ ('noGLACertificate', 3),
+ ('invalidCert', 4),
+ ('unsupportedAlgorithm', 5),
+ ('noGLONameMatch', 6),
+ ('invalidGLName', 7),
+ ('nameAlreadyInUse', 8),
+ ('noSpam', 9),
+ ('alreadyAMember', 11),
+ ('notAMember', 12),
+ ('alreadyAnOwner', 13),
+ ('notAnOwner', 14)
+ )
+
+
+# Update the map for GLAQueryRequests and GLAQueryResponses
+
+_glaQueryRRMapUpdate = {
+ id_cmc_gla_skdAlgRequest: univ.Null(""),
+ id_cmc_gla_skdAlgResponse: SMIMECapabilities(),
+}
+
+glaQueryRRMap.update(_glaQueryRRMapUpdate)
+
+
+# Update the map for CMC control attributes; since CMS Attributes and
+# CMC Controls both use 'attrType', one map is used for both
+
+_cmcControlAttributesMapUpdate = {
+ id_skd_glUseKEK: GLUseKEK(),
+ id_skd_glDelete: DeleteGL(),
+ id_skd_glAddMember: GLAddMember(),
+ id_skd_glDeleteMember: GLDeleteMember(),
+ id_skd_glRekey: GLRekey(),
+ id_skd_glAddOwner: GLOwnerAdministration(),
+ id_skd_glRemoveOwner: GLOwnerAdministration(),
+ id_skd_glKeyCompromise: GLKCompromise(),
+ id_skd_glkRefresh: GLKRefresh(),
+ id_skd_glaQueryRequest: GLAQueryRequest(),
+ id_skd_glaQueryResponse: GLAQueryResponse(),
+ id_skd_glProvideCert: GLManageCert(),
+ id_skd_glManageCert: GLManageCert(),
+ id_skd_glKey: GLKey(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmcControlAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5280.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5280.py
new file mode 100644
index 0000000000..ed5d28f751
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5280.py
@@ -0,0 +1,1658 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Updated by Russ Housley for ORAddress Extension Attribute opentype support.
+# Updated by Russ Housley for AlgorithmIdentifier opentype support.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate and Certificate
+# Revocation List (CRL) Profile
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5280.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+ub_e163_4_sub_address_length = univ.Integer(40)
+
+ub_e163_4_number_length = univ.Integer(15)
+
+unformatted_postal_address = univ.Integer(16)
+
+
+class TerminalType(univ.Integer):
+ pass
+
+
+TerminalType.namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+)
+
+
+class Extension(univ.Sequence):
+ pass
+
+
+Extension.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('extnValue', univ.OctetString())
+)
+
+
+class Extensions(univ.SequenceOf):
+ pass
+
+
+Extensions.componentType = Extension()
+Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+physical_delivery_personal_name = univ.Integer(13)
+
+ub_unformatted_address_length = univ.Integer(180)
+
+ub_pds_parameter_length = univ.Integer(30)
+
+ub_pds_physical_address_lines = univ.Integer(6)
+
+
+class UnformattedPostalAddress(univ.Set):
+ pass
+
+
+UnformattedPostalAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+)
+
+ub_organization_name = univ.Integer(64)
+
+
+class X520OrganizationName(univ.Choice):
+ pass
+
+
+X520OrganizationName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+)
+
+ub_x121_address_length = univ.Integer(16)
+
+pds_name = univ.Integer(7)
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_kp = _buildOid(id_pkix, 3)
+
+ub_postal_code_length = univ.Integer(16)
+
+
+class PostalCode(univ.Choice):
+ pass
+
+
+PostalCode.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+)
+
+ub_generation_qualifier_length = univ.Integer(3)
+
+unique_postal_name = univ.Integer(20)
+
+
+class DomainComponent(char.IA5String):
+ pass
+
+
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+
+ub_match = univ.Integer(128)
+
+id_at = _buildOid(2, 5, 4)
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+id_at_organizationalUnitName = _buildOid(id_at, 11)
+
+terminal_type = univ.Integer(23)
+
+
+class PDSParameter(univ.Set):
+ pass
+
+
+PDSParameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+ub_surname_length = univ.Integer(40)
+
+id_ad = _buildOid(id_pkix, 48)
+
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+ub_domain_defined_attributes = univ.Integer(4)
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
+TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+extended_network_address = univ.Integer(22)
+
+ub_locality_name = univ.Integer(128)
+
+
+class X520LocalityName(univ.Choice):
+ pass
+
+
+X520LocalityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+)
+
+teletex_organization_name = univ.Integer(3)
+
+ub_given_name_length = univ.Integer(16)
+
+ub_initials_length = univ.Integer(5)
+
+
+class PersonalName(univ.Set):
+ pass
+
+
+PersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+ub_organizational_unit_name_length = univ.Integer(32)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ pass
+
+
+OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+id_at_generationQualifier = _buildOid(id_at, 44)
+
+
+class Version(univ.Integer):
+ pass
+
+
+Version.namedValues = namedval.NamedValues(
+ ('v1', 0),
+ ('v2', 1),
+ ('v3', 2)
+)
+
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+algorithmIdentifierMap = {}
+
+
+class AlgorithmIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any(),
+ openType=opentype.OpenType('algorithm', algorithmIdentifierMap)
+ )
+ )
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+certificateAttributesMap = {}
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType(
+ 'value', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap)
+ )
+ )
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ pass
+
+
+RelativeDistinguishedName.componentType = AttributeTypeAndValue()
+RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class RDNSequence(univ.SequenceOf):
+ pass
+
+
+RDNSequence.componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ pass
+
+
+Name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rdnSequence', RDNSequence())
+)
+
+
+class TBSCertList(univ.Sequence):
+ pass
+
+
+TBSCertList.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType(
+ 'revokedCertificates', univ.SequenceOf(
+ componentType=univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ )
+ )
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'crlExtensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class CertificateList(univ.Sequence):
+ pass
+
+
+CertificateList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+ub_extension_attributes = univ.Integer(256)
+
+certificateExtensionsMap = {
+}
+
+oraddressExtensionAttributeMap = {
+}
+
+
+class ExtensionAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'extension-attribute-type',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType(
+ 'extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)),
+ openType=opentype.OpenType('extension-attribute-type', oraddressExtensionAttributeMap))
+ )
+
+id_qt = _buildOid(id_pkix, 2)
+
+id_qt_cps = _buildOid(id_qt, 1)
+
+id_at_stateOrProvinceName = _buildOid(id_at, 8)
+
+id_at_title = _buildOid(id_at, 12)
+
+id_at_serialNumber = _buildOid(id_at, 5)
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+poste_restante_address = univ.Integer(19)
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Validity(univ.Sequence):
+ pass
+
+
+Validity.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+)
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ pass
+
+
+SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+)
+
+
+class TBSCertificate(univ.Sequence):
+ pass
+
+
+TBSCertificate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value="v1")),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+physical_delivery_office_name = univ.Integer(10)
+
+ub_name = univ.Integer(32768)
+
+
+class X520name(univ.Choice):
+ pass
+
+
+X520name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+)
+
+id_at_dnQualifier = _buildOid(id_at, 46)
+
+ub_serial_number = univ.Integer(64)
+
+ub_pseudonym = univ.Integer(128)
+
+pkcs_9 = _buildOid(1, 2, 840, 113549, 1, 9)
+
+
+class X121Address(char.NumericString):
+ pass
+
+
+X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+ub_integer_options = univ.Integer(256)
+
+id_at_commonName = _buildOid(id_at, 3)
+
+ub_organization_name_length = univ.Integer(64)
+
+id_ad_ocsp = _buildOid(id_ad, 1)
+
+ub_country_name_numeric_length = univ.Integer(3)
+
+ub_country_name_alpha_length = univ.Integer(2)
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ pass
+
+
+PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+id_emailAddress = _buildOid(pkcs_9, 1)
+
+common_name = univ.Integer(1)
+
+
+class X520Pseudonym(univ.Choice):
+ pass
+
+
+X520Pseudonym.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
+)
+
+ub_domain_name_length = univ.Integer(16)
+
+
+class AdministrationDomainName(univ.Choice):
+ pass
+
+
+AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+AdministrationDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+)
+
+
+class PresentationAddress(univ.Sequence):
+ pass
+
+
+PresentationAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ pass
+
+
+ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'e163-4-address', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_terminal_id_length = univ.Integer(24)
+
+
+class TerminalIdentifier(char.PrintableString):
+ pass
+
+
+TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+id_ad_caIssuers = _buildOid(id_ad, 2)
+
+id_at_countryName = _buildOid(id_at, 6)
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+postal_code = univ.Integer(9)
+
+id_at_givenName = _buildOid(id_at, 42)
+
+ub_title = univ.Integer(64)
+
+
+class ExtensionAttributes(univ.SetOf):
+ pass
+
+
+ExtensionAttributes.componentType = ExtensionAttribute()
+ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+ub_emailaddress_length = univ.Integer(255)
+
+id_ad_caRepository = _buildOid(id_ad, 5)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+ub_organizational_unit_name = univ.Integer(64)
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ pass
+
+
+X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class X520Title(univ.Choice):
+ pass
+
+
+X520Title.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+)
+
+id_at_localityName = _buildOid(id_at, 7)
+
+id_at_initials = _buildOid(id_at, 43)
+
+ub_state_name = univ.Integer(128)
+
+
+class X520StateOrProvinceName(univ.Choice):
+ pass
+
+
+X520StateOrProvinceName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+)
+
+physical_delivery_organization_name = univ.Integer(14)
+
+id_at_surname = _buildOid(id_at, 4)
+
+
+class X520countryName(char.PrintableString):
+ pass
+
+
+X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+
+physical_delivery_office_number = univ.Integer(11)
+
+id_qt_unotice = _buildOid(id_qt, 2)
+
+
+class X520SerialNumber(char.PrintableString):
+ pass
+
+
+X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+ub_common_name = univ.Integer(64)
+
+id_pe = _buildOid(id_pkix, 1)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+class EmailAddress(char.IA5String):
+ pass
+
+
+EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+id_at_organizationName = _buildOid(id_at, 10)
+
+post_office_box_address = univ.Integer(18)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
+BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+id_at_pseudonym = _buildOid(id_at, 65)
+
+id_domainComponent = _buildOid(0, 9, 2342, 19200300, 100, 1, 25)
+
+
+class X520CommonName(univ.Choice):
+ pass
+
+
+X520CommonName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+)
+
+extension_OR_address_components = univ.Integer(12)
+
+ub_organizational_units = univ.Integer(4)
+
+teletex_personal_name = univ.Integer(4)
+
+ub_numeric_user_id_length = univ.Integer(32)
+
+ub_common_name_length = univ.Integer(64)
+
+
+class TeletexCommonName(char.TeletexString):
+ pass
+
+
+TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class NumericUserIdentifier(char.NumericString):
+ pass
+
+
+NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class CountryName(univ.Choice):
+ pass
+
+
+CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+CountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+
+class OrganizationName(char.PrintableString):
+ pass
+
+
+OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+OrganizationalUnitNames.componentType = OrganizationalUnitName()
+OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class PrivateDomainName(univ.Choice):
+ pass
+
+
+PrivateDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+)
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ pass
+
+
+BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+)
+
+
+class ORAddress(univ.Sequence):
+ pass
+
+
+ORAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+)
+
+
+class DistinguishedName(RDNSequence):
+ pass
+
+
+id_ad_timeStamping = _buildOid(id_ad, 3)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+physical_delivery_country_name = univ.Integer(8)
+
+ub_pds_name_length = univ.Integer(16)
+
+
+class PDSName(char.PrintableString):
+ pass
+
+
+PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+class TeletexPersonalName(univ.Set):
+ pass
+
+
+TeletexPersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+street_address = univ.Integer(17)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+local_postal_attributes = univ.Integer(21)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+teletex_common_name = univ.Integer(2)
+
+
+class CommonName(char.PrintableString):
+ pass
+
+
+CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class Certificate(univ.Sequence):
+ pass
+
+
+Certificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+id_at_name = _buildOid(id_at, 41)
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
+TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+id_ce = _buildOid(2, 5, 29)
+
+id_ce_issuerAltName = _buildOid(id_ce, 18)
+
+
+class SkipCerts(univ.Integer):
+ pass
+
+
+SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class CRLReason(univ.Enumerated):
+ pass
+
+
+CRLReason.namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+)
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ pass
+
+
+PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+anotherNameMap = {
+
+}
+
+
+class AnotherName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType(
+ 'value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('type-id', anotherNameMap)
+ )
+ )
+
+
+class EDIPartyName(univ.Sequence):
+ pass
+
+
+EDIPartyName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('partyName', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class GeneralName(univ.Choice):
+ pass
+
+
+GeneralName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress',
+ univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class BaseDistance(univ.Integer):
+ pass
+
+
+BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ pass
+
+
+GeneralSubtree.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class GeneralNames(univ.SequenceOf):
+ pass
+
+
+GeneralNames.componentType = GeneralName()
+GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class DistributionPointName(univ.Choice):
+ pass
+
+
+DistributionPointName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName',
+ GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ReasonFlags(univ.BitString):
+ pass
+
+
+ReasonFlags.namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('privilegeWithdrawn', 7),
+ ('aACompromise', 8)
+)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ pass
+
+
+IssuingDistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
+)
+
+id_ce_certificatePolicies = _buildOid(id_ce, 32)
+
+id_kp_emailProtection = _buildOid(id_kp, 4)
+
+
+class AccessDescription(univ.Sequence):
+ pass
+
+
+AccessDescription.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+)
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+id_ce_cRLDistributionPoints = _buildOid(id_ce, 31)
+
+holdInstruction = _buildOid(2, 2, 840, 10040, 2)
+
+id_holdinstruction_callissuer = _buildOid(holdInstruction, 2)
+
+id_ce_subjectDirectoryAttributes = _buildOid(id_ce, 9)
+
+id_ce_issuingDistributionPoint = _buildOid(id_ce, 28)
+
+
+class DistributionPoint(univ.Sequence):
+ pass
+
+
+DistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class CRLDistributionPoints(univ.SequenceOf):
+ pass
+
+
+CRLDistributionPoints.componentType = DistributionPoint()
+CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ pass
+
+
+GeneralSubtrees.componentType = GeneralSubtree()
+GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ pass
+
+
+NameConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ pass
+
+
+SubjectDirectoryAttributes.componentType = Attribute()
+SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_kp_OCSPSigning = _buildOid(id_kp, 9)
+
+id_kp_timeStamping = _buildOid(id_kp, 8)
+
+
+class DisplayText(univ.Choice):
+ pass
+
+
+DisplayText.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+)
+
+
+class NoticeReference(univ.Sequence):
+ pass
+
+
+NoticeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+)
+
+
+class UserNotice(univ.Sequence):
+ pass
+
+
+UserNotice.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+)
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+policyQualifierInfoMap = {
+
+}
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType(
+ 'qualifier', univ.Any(),
+ openType=opentype.OpenType('policyQualifierId', policyQualifierInfoMap)
+ )
+ )
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyInformation(univ.Sequence):
+ pass
+
+
+PolicyInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
+)
+
+
+class CertificatePolicies(univ.SequenceOf):
+ pass
+
+
+CertificatePolicies.componentType = PolicyInformation()
+CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+id_ce_basicConstraints = _buildOid(id_ce, 19)
+
+id_ce_authorityKeyIdentifier = _buildOid(id_ce, 35)
+
+id_kp_codeSigning = _buildOid(id_kp, 3)
+
+
+class BasicConstraints(univ.Sequence):
+ pass
+
+
+BasicConstraints.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+id_ce_certificateIssuer = _buildOid(id_ce, 29)
+
+
+class PolicyMappings(univ.SequenceOf):
+ pass
+
+
+PolicyMappings.componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+ )
+)
+
+PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class InhibitAnyPolicy(SkipCerts):
+ pass
+
+
+anyPolicy = _buildOid(id_ce_certificatePolicies, 0)
+
+
+class CRLNumber(univ.Integer):
+ pass
+
+
+CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+id_ce_nameConstraints = _buildOid(id_ce, 30)
+
+id_kp_serverAuth = _buildOid(id_kp, 1)
+
+id_ce_freshestCRL = _buildOid(id_ce, 46)
+
+id_ce_cRLReasons = _buildOid(id_ce, 21)
+
+id_ce_extKeyUsage = _buildOid(id_ce, 37)
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ pass
+
+
+AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class FreshestCRL(CRLDistributionPoints):
+ pass
+
+
+id_ce_policyConstraints = _buildOid(id_ce, 36)
+
+id_pe_authorityInfoAccess = _buildOid(id_pe, 1)
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+AuthorityInfoAccessSyntax.componentType = AccessDescription()
+AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_holdinstruction_none = _buildOid(holdInstruction, 1)
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+id_pe_subjectInfoAccess = _buildOid(id_pe, 11)
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_ce_subjectAltName = _buildOid(id_ce, 17)
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ pass
+
+
+ExtKeyUsageSyntax.componentType = KeyPurposeId()
+ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+id_ce_deltaCRLIndicator = _buildOid(id_ce, 27)
+
+id_ce_keyUsage = _buildOid(id_ce, 15)
+
+id_ce_holdInstructionCode = _buildOid(id_ce, 23)
+
+
+class SubjectInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+SubjectInfoAccessSyntax.componentType = AccessDescription()
+SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+class KeyUsage(univ.BitString):
+ pass
+
+
+KeyUsage.namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+)
+
+id_ce_invalidityDate = _buildOid(id_ce, 24)
+
+id_ce_policyMappings = _buildOid(id_ce, 33)
+
+anyExtendedKeyUsage = _buildOid(id_ce_extKeyUsage, 0)
+
+id_ce_privateKeyUsagePeriod = _buildOid(id_ce, 16)
+
+id_ce_cRLNumber = _buildOid(id_ce, 20)
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+id_holdinstruction_reject = _buildOid(holdInstruction, 3)
+
+
+class PolicyConstraints(univ.Sequence):
+ pass
+
+
+PolicyConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_kp_clientAuth = _buildOid(id_kp, 2)
+
+id_ce_subjectKeyIdentifier = _buildOid(id_ce, 14)
+
+id_ce_inhibitAnyPolicy = _buildOid(id_ce, 54)
+
+# map of ORAddress ExtensionAttribute type to ExtensionAttribute value
+
+_oraddressExtensionAttributeMapUpdate = {
+ common_name: CommonName(),
+ teletex_common_name: TeletexCommonName(),
+ teletex_organization_name: TeletexOrganizationName(),
+ teletex_personal_name: TeletexPersonalName(),
+ teletex_organizational_unit_names: TeletexOrganizationalUnitNames(),
+ pds_name: PDSName(),
+ physical_delivery_country_name: PhysicalDeliveryCountryName(),
+ postal_code: PostalCode(),
+ physical_delivery_office_name: PhysicalDeliveryOfficeName(),
+ physical_delivery_office_number: PhysicalDeliveryOfficeNumber(),
+ extension_OR_address_components: ExtensionORAddressComponents(),
+ physical_delivery_personal_name: PhysicalDeliveryPersonalName(),
+ physical_delivery_organization_name: PhysicalDeliveryOrganizationName(),
+ extension_physical_delivery_address_components: ExtensionPhysicalDeliveryAddressComponents(),
+ unformatted_postal_address: UnformattedPostalAddress(),
+ street_address: StreetAddress(),
+ post_office_box_address: PostOfficeBoxAddress(),
+ poste_restante_address: PosteRestanteAddress(),
+ unique_postal_name: UniquePostalName(),
+ local_postal_attributes: LocalPostalAttributes(),
+ extended_network_address: ExtendedNetworkAddress(),
+ terminal_type: TerminalType(),
+ teletex_domain_defined_attributes: TeletexDomainDefinedAttributes(),
+}
+
+oraddressExtensionAttributeMap.update(_oraddressExtensionAttributeMapUpdate)
+
+
+# map of AttributeType -> AttributeValue
+
+_certificateAttributesMapUpdate = {
+ id_at_name: X520name(),
+ id_at_surname: X520name(),
+ id_at_givenName: X520name(),
+ id_at_initials: X520name(),
+ id_at_generationQualifier: X520name(),
+ id_at_commonName: X520CommonName(),
+ id_at_localityName: X520LocalityName(),
+ id_at_stateOrProvinceName: X520StateOrProvinceName(),
+ id_at_organizationName: X520OrganizationName(),
+ id_at_organizationalUnitName: X520OrganizationalUnitName(),
+ id_at_title: X520Title(),
+ id_at_dnQualifier: X520dnQualifier(),
+ id_at_countryName: X520countryName(),
+ id_at_serialNumber: X520SerialNumber(),
+ id_at_pseudonym: X520Pseudonym(),
+ id_domainComponent: DomainComponent(),
+ id_emailAddress: EmailAddress(),
+}
+
+certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# map of Certificate Extension OIDs to Extensions
+
+_certificateExtensionsMap = {
+ id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
+ id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
+ id_ce_keyUsage: KeyUsage(),
+ id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
+ id_ce_certificatePolicies: CertificatePolicies(),
+ id_ce_policyMappings: PolicyMappings(),
+ id_ce_subjectAltName: SubjectAltName(),
+ id_ce_issuerAltName: IssuerAltName(),
+ id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
+ id_ce_basicConstraints: BasicConstraints(),
+ id_ce_nameConstraints: NameConstraints(),
+ id_ce_policyConstraints: PolicyConstraints(),
+ id_ce_extKeyUsage: ExtKeyUsageSyntax(),
+ id_ce_cRLDistributionPoints: CRLDistributionPoints(),
+ id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
+ id_ce_cRLNumber: univ.Integer(),
+ id_ce_deltaCRLIndicator: BaseCRLNumber(),
+ id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
+ id_ce_cRLReasons: CRLReason(),
+ id_ce_holdInstructionCode: univ.ObjectIdentifier(),
+ id_ce_invalidityDate: useful.GeneralizedTime(),
+ id_ce_certificateIssuer: GeneralNames(),
+}
+
+certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5480.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5480.py
new file mode 100644
index 0000000000..84c0c11b88
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5480.py
@@ -0,0 +1,190 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Cryptography Subject Public Key Information
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5480.txt
+
+
+# What can be imported from rfc4055.py ?
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+
+
+# These structures are the same as RFC 3279.
+
+DHPublicKey = rfc3279.DHPublicKey
+
+DSAPublicKey = rfc3279.DSAPublicKey
+
+ValidationParms = rfc3279.ValidationParms
+
+DomainParameters = rfc3279.DomainParameters
+
+ECDSA_Sig_Value = rfc3279.ECDSA_Sig_Value
+
+ECPoint = rfc3279.ECPoint
+
+KEA_Parms_Id = rfc3279.KEA_Parms_Id
+
+RSAPublicKey = rfc3279.RSAPublicKey
+
+
+# RFC 5480 changed the names of these structures from RFC 3279.
+
+DSS_Parms = rfc3279.Dss_Parms
+
+DSA_Sig_Value = rfc3279.Dss_Sig_Value
+
+
+# RFC 3279 defines a more complex alternative for ECParameters.
+# RFC 5480 narrows the definition to a single CHOICE: namedCurve.
+
+class ECParameters(univ.Choice):
+ pass
+
+ECParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('namedCurve', univ.ObjectIdentifier())
+)
+
+
+# OIDs for Message Digest Algorithms
+
+id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2')
+
+id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5')
+
+id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26')
+
+id_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.4')
+
+id_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.1')
+
+id_sha384 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2')
+
+id_sha512 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.3')
+
+
+# OID for RSA PK Algorithm and Key
+
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+
+
+# OID for DSA PK Algorithm, Key, and Parameters
+
+id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
+
+
+# OID for Diffie-Hellman PK Algorithm, Key, and Parameters
+
+dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
+
+# OID for KEA PK Algorithm and Parameters
+
+id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22')
+
+
+# OIDs for Elliptic Curve Algorithm ID, Key, and Parameters
+# Note that ECDSA keys always use this OID
+
+id_ecPublicKey = univ.ObjectIdentifier('1.2.840.10045.2.1')
+
+id_ecDH = univ.ObjectIdentifier('1.3.132.1.12')
+
+id_ecMQV = univ.ObjectIdentifier('1.3.132.1.13')
+
+
+# OIDs for RSA Signature Algorithms
+
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+
+
+# OIDs for DSA Signature Algorithms
+
+id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
+
+id_dsa_with_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.1')
+
+id_dsa_with_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.2')
+
+
+# OIDs for ECDSA Signature Algorithms
+
+ecdsa_with_SHA1 = univ.ObjectIdentifier('1.2.840.10045.4.1')
+
+ecdsa_with_SHA224 = univ.ObjectIdentifier('1.2.840.10045.4.3.1')
+
+ecdsa_with_SHA256 = univ.ObjectIdentifier('1.2.840.10045.4.3.2')
+
+ecdsa_with_SHA384 = univ.ObjectIdentifier('1.2.840.10045.4.3.3')
+
+ecdsa_with_SHA512 = univ.ObjectIdentifier('1.2.840.10045.4.3.4')
+
+
+# OIDs for Named Elliptic Curves
+
+secp192r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.1')
+
+sect163k1 = univ.ObjectIdentifier('1.3.132.0.1')
+
+sect163r2 = univ.ObjectIdentifier('1.3.132.0.15')
+
+secp224r1 = univ.ObjectIdentifier('1.3.132.0.33')
+
+sect233k1 = univ.ObjectIdentifier('1.3.132.0.26')
+
+sect233r1 = univ.ObjectIdentifier('1.3.132.0.27')
+
+secp256r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.7')
+
+sect283k1 = univ.ObjectIdentifier('1.3.132.0.16')
+
+sect283r1 = univ.ObjectIdentifier('1.3.132.0.17')
+
+secp384r1 = univ.ObjectIdentifier('1.3.132.0.34')
+
+sect409k1 = univ.ObjectIdentifier('1.3.132.0.36')
+
+sect409r1 = univ.ObjectIdentifier('1.3.132.0.37')
+
+secp521r1 = univ.ObjectIdentifier('1.3.132.0.35')
+
+sect571k1 = univ.ObjectIdentifier('1.3.132.0.38')
+
+sect571r1 = univ.ObjectIdentifier('1.3.132.0.39')
+
+
+# Map of Algorithm Identifier OIDs to Parameters
+# The algorithm is not included if the parameters MUST be absent
+
+_algorithmIdentifierMapUpdate = {
+ rsaEncryption: univ.Null(),
+ md2WithRSAEncryption: univ.Null(),
+ md5WithRSAEncryption: univ.Null(),
+ sha1WithRSAEncryption: univ.Null(),
+ id_dsa: DSS_Parms(),
+ dhpublicnumber: DomainParameters(),
+ id_keyExchangeAlgorithm: KEA_Parms_Id(),
+ id_ecPublicKey: ECParameters(),
+ id_ecDH: ECParameters(),
+ id_ecMQV: ECParameters(),
+}
+
+
+# Add these Algorithm Identifier map entries to the ones in rfc5280.py
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5636.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5636.py
new file mode 100644
index 0000000000..f87bc4ec82
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5636.py
@@ -0,0 +1,113 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Traceable Anonymous Certificate
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5480.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+EncapsulatedContentInfo = rfc5652.EncapsulatedContentInfo
+
+id_data = rfc5652.id_data
+
+
+# Object Identifiers
+
+id_KISA = univ.ObjectIdentifier((1, 2, 410, 200004,))
+
+
+id_npki = id_KISA + (10,)
+
+
+id_attribute = id_npki + (1,)
+
+
+id_kisa_tac = id_attribute + (1,)
+
+
+id_kisa_tac_token = id_kisa_tac + (1,)
+
+
+id_kisa_tac_tokenandblindbash = id_kisa_tac + (2,)
+
+
+id_kisa_tac_tokenandpartially = id_kisa_tac + (3,)
+
+
+# Structures for Traceable Anonymous Certificate (TAC)
+
+class UserKey(univ.OctetString):
+ pass
+
+
+class Timeout(useful.GeneralizedTime):
+ pass
+
+
+class BlinedCertificateHash(univ.OctetString):
+ pass
+
+
+class PartiallySignedCertificateHash(univ.OctetString):
+ pass
+
+
+class Token(ContentInfo):
+ pass
+
+
+class TokenandBlindHash(ContentInfo):
+ pass
+
+
+class TokenandPartiallySignedCertificateHash(ContentInfo):
+ pass
+
+
+# Added to the module in RFC 5636 for the CMS Content Type Map
+
+class TACToken(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userKey', UserKey()),
+ namedtype.NamedType('timeout', Timeout())
+ )
+
+
+class TACTokenandBlindHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('token', Token()),
+ namedtype.NamedType('blinded', BlinedCertificateHash())
+ )
+
+
+class TACTokenandPartiallySignedCertificateHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('token', Token()),
+ namedtype.NamedType('partially', PartiallySignedCertificateHash())
+ )
+
+
+# Add to the CMS Content Type Map in rfc5752.py
+
+_cmsContentTypesMapUpdate = {
+ id_kisa_tac_token: TACToken(),
+ id_kisa_tac_tokenandblindbash: TACTokenandBlindHash(),
+ id_kisa_tac_tokenandpartially: TACTokenandPartiallySignedCertificateHash(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5639.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5639.py
new file mode 100644
index 0000000000..d48d30044b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5639.py
@@ -0,0 +1,49 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Cryptography Brainpool Standard Curves
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5639.txt
+
+
+from pyasn1.type import univ
+
+
+ecStdCurvesAndGeneration = univ.ObjectIdentifier((1, 3, 36, 3, 3, 2, 8,))
+
+ellipticCurve = ecStdCurvesAndGeneration + (1,)
+
+versionOne = ellipticCurve + (1,)
+
+brainpoolP160r1 = versionOne + (1,)
+
+brainpoolP160t1 = versionOne + (2,)
+
+brainpoolP192r1 = versionOne + (3,)
+
+brainpoolP192t1 = versionOne + (4,)
+
+brainpoolP224r1 = versionOne + (5,)
+
+brainpoolP224t1 = versionOne + (6,)
+
+brainpoolP256r1 = versionOne + (7,)
+
+brainpoolP256t1 = versionOne + (8,)
+
+brainpoolP320r1 = versionOne + (9,)
+
+brainpoolP320t1 = versionOne + (10,)
+
+brainpoolP384r1 = versionOne + (11,)
+
+brainpoolP384t1 = versionOne + (12,)
+
+brainpoolP512r1 = versionOne + (13,)
+
+brainpoolP512t1 = versionOne + (14,)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5649.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5649.py
new file mode 100644
index 0000000000..84809eeb18
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5649.py
@@ -0,0 +1,33 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# AES Key Wrap with Padding
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5649.txt
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5')
+
+id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25')
+
+id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45')
+
+
+id_aes128_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.8')
+
+id_aes192_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.28')
+
+id_aes256_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.48')
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5652.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5652.py
new file mode 100644
index 0000000000..1e958293df
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5652.py
@@ -0,0 +1,761 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Modified by Russ Housley to add support for opentypes.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc5652.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3281
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+cmsContentTypesMap = { }
+
+cmsAttributesMap = { }
+
+otherKeyAttributesMap = { }
+
+otherCertFormatMap = { }
+
+otherRevInfoFormatMap = { }
+
+otherRecipientInfoMap = { }
+
+
+class AttCertVersionV1(univ.Integer):
+ pass
+
+
+AttCertVersionV1.namedValues = namedval.NamedValues(
+ ('v1', 0)
+)
+
+
+class AttributeCertificateInfoV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
+ namedtype.NamedType(
+ 'subject', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subjectName', rfc5280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('issuer', rfc5280.GeneralNames()),
+ namedtype.NamedType('signature', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber()),
+ namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc5280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc5280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc5280.Extensions())
+)
+
+
+class AttributeCertificateV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateV1.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
+ namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', cmsAttributesMap)
+ )
+)
+
+
+class SignedAttributes(univ.SetOf):
+ pass
+
+
+SignedAttributes.componentType = Attribute()
+SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class AttributeCertificateV2(rfc3281.AttributeCertificate):
+ pass
+
+
+class OtherKeyAttribute(univ.Sequence):
+ pass
+
+
+OtherKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('keyAttr', univ.Any(),
+ openType=opentype.OpenType('keyAttrId', otherKeyAttributesMap)
+ )
+)
+
+
+class UnauthAttributes(univ.SetOf):
+ pass
+
+
+UnauthAttributes.componentType = Attribute()
+UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
+
+
+class SignatureValue(univ.OctetString):
+ pass
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ pass
+
+
+IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc5280.Name()),
+ namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber())
+)
+
+
+class SubjectKeyIdentifier(univ.OctetString):
+ pass
+
+
+class RecipientKeyIdentifier(univ.Sequence):
+ pass
+
+
+RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyAgreeRecipientIdentifier(univ.Choice):
+ pass
+
+
+KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class RecipientEncryptedKey(univ.Sequence):
+ pass
+
+
+RecipientEncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientEncryptedKeys(univ.SequenceOf):
+ pass
+
+
+RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
+
+
+class MessageAuthenticationCode(univ.OctetString):
+ pass
+
+
+class CMSVersion(univ.Integer):
+ pass
+
+
+CMSVersion.namedValues = namedval.NamedValues(
+ ('v0', 0),
+ ('v1', 1),
+ ('v2', 2),
+ ('v3', 3),
+ ('v4', 4),
+ ('v5', 5)
+)
+
+
+class OtherCertificateFormat(univ.Sequence):
+ pass
+
+
+OtherCertificateFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherCert', univ.Any(),
+ openType=opentype.OpenType('otherCertFormat', otherCertFormatMap)
+ )
+)
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ pass
+
+
+ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('attributes', UnauthAttributes())
+)
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ pass
+
+
+ExtendedCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+)
+
+
+class CertificateChoices(univ.Choice):
+ pass
+
+
+CertificateChoices.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('other', OtherCertificateFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class CertificateSet(univ.SetOf):
+ pass
+
+
+CertificateSet.componentType = CertificateChoices()
+
+
+class OtherRevocationInfoFormat(univ.Sequence):
+ pass
+
+
+OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherRevInfo', univ.Any(),
+ openType=opentype.OpenType('otherRevInfoFormat', otherRevInfoFormatMap)
+ )
+)
+
+
+class RevocationInfoChoice(univ.Choice):
+ pass
+
+
+RevocationInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crl', rfc5280.CertificateList()),
+ namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RevocationInfoChoices(univ.SetOf):
+ pass
+
+
+RevocationInfoChoices.componentType = RevocationInfoChoice()
+
+
+class OriginatorInfo(univ.Sequence):
+ pass
+
+
+OriginatorInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('certs', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContentInfo(univ.Sequence):
+ pass
+
+
+EncryptedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnprotectedAttributes(univ.SetOf):
+ pass
+
+
+UnprotectedAttributes.componentType = Attribute()
+UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KEKIdentifier(univ.Sequence):
+ pass
+
+
+KEKIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyIdentifier', univ.OctetString()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KEKRecipientInfo(univ.Sequence):
+ pass
+
+
+KEKRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('kekid', KEKIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class KeyDerivationAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class PasswordRecipientInfo(univ.Sequence):
+ pass
+
+
+PasswordRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientIdentifier(univ.Choice):
+ pass
+
+
+RecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyTransRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('rid', RecipientIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class UserKeyingMaterial(univ.OctetString):
+ pass
+
+
+class OriginatorPublicKey(univ.Sequence):
+ pass
+
+
+OriginatorPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('publicKey', univ.BitString())
+)
+
+
+class OriginatorIdentifierOrKey(univ.Choice):
+ pass
+
+
+OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class KeyAgreeRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
+)
+
+
+class OtherRecipientInfo(univ.Sequence):
+ pass
+
+
+OtherRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oriType', univ.ObjectIdentifier()),
+ namedtype.NamedType('oriValue', univ.Any(),
+ openType=opentype.OpenType('oriType', otherRecipientInfoMap)
+ )
+)
+
+
+class RecipientInfo(univ.Choice):
+ pass
+
+
+RecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ktri', KeyTransRecipientInfo()),
+ namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ori', OtherRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class RecipientInfos(univ.SetOf):
+ pass
+
+
+RecipientInfos.componentType = RecipientInfo()
+RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class EnvelopedData(univ.Sequence):
+ pass
+
+
+EnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class DigestAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
+
+id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
+
+
+class EncryptedData(univ.Sequence):
+ pass
+
+
+EncryptedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
+
+id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
+
+
+class MessageAuthenticationCodeAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class UnsignedAttributes(univ.SetOf):
+ pass
+
+
+UnsignedAttributes.componentType = Attribute()
+UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SignerIdentifier(univ.Choice):
+ pass
+
+
+SignerIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class SignerInfo(univ.Sequence):
+ pass
+
+
+SignerInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('sid', SignerIdentifier()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', SignatureValue()),
+ namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignerInfos(univ.SetOf):
+ pass
+
+
+SignerInfos.componentType = SignerInfo()
+
+
+class Countersignature(SignerInfo):
+ pass
+
+
+class ContentInfo(univ.Sequence):
+ pass
+
+
+ContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('contentType', cmsContentTypesMap)
+ )
+)
+
+
+class EncapsulatedContentInfo(univ.Sequence):
+ pass
+
+
+EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eContentType', ContentType()),
+ namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
+
+id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
+
+
+class MessageDigest(univ.OctetString):
+ pass
+
+
+class AuthAttributes(univ.SetOf):
+ pass
+
+
+AuthAttributes.componentType = Attribute()
+AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class AuthenticatedData(univ.Sequence):
+ pass
+
+
+AuthenticatedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
+ namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('mac', MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ pass
+
+
+ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class DigestedData(univ.Sequence):
+ pass
+
+
+DigestedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.NamedType('digest', Digest())
+)
+
+id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ pass
+
+
+DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
+
+
+class SignedData(univ.Sequence):
+ pass
+
+
+SignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
+
+
+class SigningTime(Time):
+ pass
+
+
+id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
+
+
+# CMS Content Type Map
+
+_cmsContentTypesMapUpdate = {
+ id_ct_contentInfo: ContentInfo(),
+ id_data: univ.OctetString(),
+ id_signedData: SignedData(),
+ id_envelopedData: EnvelopedData(),
+ id_digestedData: DigestedData(),
+ id_encryptedData: EncryptedData(),
+ id_ct_authData: AuthenticatedData(),
+}
+
+cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ id_contentType: ContentType(),
+ id_messageDigest: MessageDigest(),
+ id_signingTime: SigningTime(),
+ id_countersignature: Countersignature(),
+}
+
+cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5697.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5697.py
new file mode 100644
index 0000000000..8c5a9d3ecf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5697.py
@@ -0,0 +1,70 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Other Certificates Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5697.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4055
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+GeneralNames = rfc5280.GeneralNames
+
+
+# Imports from RFC 4055
+
+id_sha1 = rfc4055.id_sha1
+
+
+# Imports from RFC 5055
+# These are defined here because a module for RFC 5055 does not exist yet
+
+class SCVPIssuerSerial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+sha1_alg_id = AlgorithmIdentifier()
+sha1_alg_id['algorithm'] = id_sha1
+
+
+class SCVPCertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', univ.OctetString()),
+ namedtype.NamedType('issuerSerial', SCVPIssuerSerial()),
+ namedtype.DefaultedNamedType('hashAlgorithm', sha1_alg_id)
+ )
+
+
+# Other Certificates Extension
+
+id_pe_otherCerts = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 1, 19,))
+
+class OtherCertificates(univ.SequenceOf):
+ componentType = SCVPCertID()
+
+
+# Update of certificate extension map in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_otherCerts: OtherCertificates(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5751.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5751.py
new file mode 100644
index 0000000000..7e200012c6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5751.py
@@ -0,0 +1,124 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# S/MIME Version 3.2 Message Specification
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5751.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8018
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 5652 and RFC 8018
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+RecipientKeyIdentifier = rfc5652.RecipientKeyIdentifier
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+rc2CBC = rfc8018.rc2CBC
+
+
+# S/MIME Capabilities Attribute
+
+smimeCapabilities = univ.ObjectIdentifier('1.2.840.113549.1.9.15')
+
+
+smimeCapabilityMap = { }
+
+
+class SMIMECapability(univ.Sequence):
+ pass
+
+SMIMECapability.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('capabilityID', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any(),
+ openType=opentype.OpenType('capabilityID', smimeCapabilityMap))
+)
+
+
+class SMIMECapabilities(univ.SequenceOf):
+ pass
+
+SMIMECapabilities.componentType = SMIMECapability()
+
+
+class SMIMECapabilitiesParametersForRC2CBC(univ.Integer):
+ # which carries the RC2 Key Length (number of bits)
+ pass
+
+
+# S/MIME Encryption Key Preference Attribute
+
+id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16')
+
+id_aa = _OID(id_smime, 2)
+
+id_aa_encrypKeyPref = _OID(id_aa, 11)
+
+
+class SMIMEEncryptionKeyPreference(univ.Choice):
+ pass
+
+SMIMEEncryptionKeyPreference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber',
+ IssuerAndSerialNumber().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receipentKeyId',
+ # Yes, 'receipentKeyId' is spelled incorrectly, but kept
+ # this way for alignment with the ASN.1 module in the RFC.
+ RecipientKeyIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('subjectAltKeyIdentifier',
+ SubjectKeyIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+# The Prefer Binary Inside SMIMECapabilities attribute
+
+id_cap = _OID(id_smime, 11)
+
+id_cap_preferBinaryInside = _OID(id_cap, 1)
+
+
+# CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ smimeCapabilities: SMIMECapabilities(),
+ id_aa_encrypKeyPref: SMIMEEncryptionKeyPreference(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# SMIMECapabilities Attribute Map
+#
+# Do not include OIDs in the dictionary when the parameters are absent.
+
+_smimeCapabilityMapUpdate = {
+ rc2CBC: SMIMECapabilitiesParametersForRC2CBC(),
+}
+
+smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5752.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5752.py
new file mode 100644
index 0000000000..1d0df8f459
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5752.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Multiple Signatures in Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5752.txt
+# https://www.rfc-editor.org/errata/eid4444
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5035
+from pyasn1_modules import rfc5652
+
+
+class SignAttrsHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algID', rfc5652.DigestAlgorithmIdentifier()),
+ namedtype.NamedType('hash', univ.OctetString())
+ )
+
+
+class MultipleSignatures(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyHashAlg', rfc5652.DigestAlgorithmIdentifier()),
+ namedtype.NamedType('signAlg', rfc5652.SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signAttrsHash', SignAttrsHash()),
+ namedtype.OptionalNamedType('cert', rfc5035.ESSCertIDv2())
+ )
+
+
+id_aa_multipleSignatures = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.51')
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_multipleSignatures: MultipleSignatures(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5753.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5753.py
new file mode 100644
index 0000000000..94c37c2ab1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5753.py
@@ -0,0 +1,157 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Cryptography (ECC) Algorithms in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5753.txt
+#
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc8018
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 5652
+
+OriginatorPublicKey = rfc5652.OriginatorPublicKey
+
+UserKeyingMaterial = rfc5652.UserKeyingMaterial
+
+
+# Imports from RFC 5480
+
+ECDSA_Sig_Value = rfc5480.ECDSA_Sig_Value
+
+ECParameters = rfc5480.ECParameters
+
+ECPoint = rfc5480.ECPoint
+
+id_ecPublicKey = rfc5480.id_ecPublicKey
+
+
+# Imports from RFC 8018
+
+id_hmacWithSHA224 = rfc8018.id_hmacWithSHA224
+
+id_hmacWithSHA256 = rfc8018.id_hmacWithSHA256
+
+id_hmacWithSHA384 = rfc8018.id_hmacWithSHA384
+
+id_hmacWithSHA512 = rfc8018.id_hmacWithSHA512
+
+
+# Object Identifier arcs
+
+x9_63_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0')
+
+secg_scheme = univ.ObjectIdentifier('1.3.132.1')
+
+
+# Object Identifiers for the algorithms
+
+dhSinglePass_cofactorDH_sha1kdf_scheme = x9_63_scheme + (3, )
+
+dhSinglePass_cofactorDH_sha224kdf_scheme = secg_scheme + (14, 0, )
+
+dhSinglePass_cofactorDH_sha256kdf_scheme = secg_scheme + (14, 1, )
+
+dhSinglePass_cofactorDH_sha384kdf_scheme = secg_scheme + (14, 2, )
+
+dhSinglePass_cofactorDH_sha512kdf_scheme = secg_scheme + (14, 3, )
+
+dhSinglePass_stdDH_sha1kdf_scheme = x9_63_scheme + (2, )
+
+dhSinglePass_stdDH_sha224kdf_scheme = secg_scheme + (11, 0, )
+
+dhSinglePass_stdDH_sha256kdf_scheme = secg_scheme + (11, 1, )
+
+dhSinglePass_stdDH_sha384kdf_scheme = secg_scheme + (11, 2, )
+
+dhSinglePass_stdDH_sha512kdf_scheme = secg_scheme + (11, 3, )
+
+mqvSinglePass_sha1kdf_scheme = x9_63_scheme + (16, )
+
+mqvSinglePass_sha224kdf_scheme = secg_scheme + (15, 0, )
+
+mqvSinglePass_sha256kdf_scheme = secg_scheme + (15, 1, )
+
+mqvSinglePass_sha384kdf_scheme = secg_scheme + (15, 2, )
+
+mqvSinglePass_sha512kdf_scheme = secg_scheme + (15, 3, )
+
+
+# Structures for parameters and key derivation
+
+class IV(univ.OctetString):
+ # Exactly 8 octets
+ pass
+
+
+class CBCParameter(IV):
+ pass
+
+
+class KeyWrapAlgorithm(AlgorithmIdentifier):
+ pass
+
+
+class ECC_CMS_SharedInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyInfo', KeyWrapAlgorithm()),
+ namedtype.OptionalNamedType('entityUInfo',
+ univ.OctetString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('suppPubInfo',
+ univ.OctetString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class MQVuserKeyingMaterial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ephemeralPublicKey', OriginatorPublicKey()),
+ namedtype.OptionalNamedType('addedukm',
+ UserKeyingMaterial().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py and
+# Update the SMIMECapabilities Attribute Map in rfc5751.py
+
+_algorithmIdentifierMapUpdate = {
+ dhSinglePass_stdDH_sha1kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha224kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha256kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha384kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_stdDH_sha512kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha1kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha224kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha256kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha384kdf_scheme: KeyWrapAlgorithm(),
+ dhSinglePass_cofactorDH_sha512kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha1kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha224kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha256kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha384kdf_scheme: KeyWrapAlgorithm(),
+ mqvSinglePass_sha512kdf_scheme: KeyWrapAlgorithm(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+rfc5751.smimeCapabilityMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5755.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5755.py
new file mode 100644
index 0000000000..14f56fc600
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5755.py
@@ -0,0 +1,398 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5755.txt
+# https://www.rfc-editor.org/rfc/rfc5912.txt (see Section 13)
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+# Map for Security Category type to value
+
+securityCategoryMap = { }
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax
+
+AuthorityKeyIdentifier = rfc5280.AuthorityKeyIdentifier
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+CRLDistributionPoints = rfc5280.CRLDistributionPoints
+
+Extensions = rfc5280.Extensions
+
+Extension = rfc5280.Extension
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+UniqueIdentifier = rfc5280.UniqueIdentifier
+
+
+# Object Identifier arcs
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+id_pe = id_pkix + (1, )
+
+id_kp = id_pkix + (3, )
+
+id_aca = id_pkix + (10, )
+
+id_ad = id_pkix + (48, )
+
+id_at = univ.ObjectIdentifier((2, 5, 4, ))
+
+id_ce = univ.ObjectIdentifier((2, 5, 29, ))
+
+
+# Attribute Certificate
+
+class AttCertVersion(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('v2', 1)
+ )
+
+
+class IssuerSerial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serial', CertificateSerialNumber()),
+ namedtype.OptionalNamedType('issuerUID', UniqueIdentifier())
+ )
+
+
+class ObjectDigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestedObjectType',
+ univ.Enumerated(namedValues=namedval.NamedValues(
+ ('publicKey', 0),
+ ('publicKeyCert', 1),
+ ('otherObjectTypes', 2)))),
+ namedtype.OptionalNamedType('otherObjectTypeID',
+ univ.ObjectIdentifier()),
+ namedtype.NamedType('digestAlgorithm',
+ AlgorithmIdentifier()),
+ namedtype.NamedType('objectDigest',
+ univ.BitString())
+ )
+
+
+class Holder(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('baseCertificateID',
+ IssuerSerial().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('entityName',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('objectDigestInfo',
+ ObjectDigestInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class V2Form(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerName',
+ GeneralNames()),
+ namedtype.OptionalNamedType('baseCertificateID',
+ IssuerSerial().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('objectDigestInfo',
+ ObjectDigestInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class AttCertIssuer(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('v1Form', GeneralNames()),
+ namedtype.NamedType('v2Form', V2Form().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class AttCertValidityPeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
+ namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
+ )
+
+
+class AttributeCertificateInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ AttCertVersion()),
+ namedtype.NamedType('holder',
+ Holder()),
+ namedtype.NamedType('issuer',
+ AttCertIssuer()),
+ namedtype.NamedType('signature',
+ AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber',
+ CertificateSerialNumber()),
+ namedtype.NamedType('attrCertValidityPeriod',
+ AttCertValidityPeriod()),
+ namedtype.NamedType('attributes',
+ univ.SequenceOf(componentType=Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID',
+ UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions',
+ Extensions())
+ )
+
+
+class AttributeCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acinfo', AttributeCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+ )
+
+
+# Attribute Certificate Extensions
+
+id_pe_ac_auditIdentity = id_pe + (4, )
+
+id_ce_noRevAvail = id_ce + (56, )
+
+id_ce_targetInformation = id_ce + (55, )
+
+
+class TargetCert(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetCertificate', IssuerSerial()),
+ namedtype.OptionalNamedType('targetName', GeneralName()),
+ namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
+ )
+
+
+class Target(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetName',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('targetGroup',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('targetCert',
+ TargetCert().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class Targets(univ.SequenceOf):
+ componentType = Target()
+
+
+id_pe_ac_proxying = id_pe + (10, )
+
+
+class ProxyInfo(univ.SequenceOf):
+ componentType = Targets()
+
+
+id_pe_aaControls = id_pe + (6, )
+
+
+class AttrSpec(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class AAControls(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.OptionalNamedType('permittedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('permitUnSpecified',
+ univ.Boolean().subtype(value=1))
+ )
+
+
+# Attribute Certificate Attributes
+
+id_aca_authenticationInfo = id_aca + (1, )
+
+
+id_aca_accessIdentity = id_aca + (2, )
+
+
+class SvceAuthInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('service', GeneralName()),
+ namedtype.NamedType('ident', GeneralName()),
+ namedtype.OptionalNamedType('authInfo', univ.OctetString())
+ )
+
+
+id_aca_chargingIdentity = id_aca + (3, )
+
+
+id_aca_group = id_aca + (4, )
+
+
+class IetfAttrSyntax(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('policyAuthority',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('values', univ.SequenceOf(
+ componentType=univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('octets', univ.OctetString()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('string', char.UTF8String())
+ ))
+ ))
+ )
+
+
+id_at_role = id_at + (72,)
+
+
+class RoleSyntax(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('roleAuthority',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('roleName',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ClassList(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('topSecret', 5)
+ )
+
+
+class SecurityCategory(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type',
+ univ.ObjectIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value',
+ univ.Any().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)),
+ openType=opentype.OpenType('type', securityCategoryMap))
+ )
+
+
+id_at_clearance = univ.ObjectIdentifier((2, 5, 4, 55, ))
+
+
+class Clearance(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId',
+ univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(value='unclassified')),
+ namedtype.OptionalNamedType('securityCategories',
+ univ.SetOf(componentType=SecurityCategory()))
+ )
+
+
+id_at_clearance_rfc3281 = univ.ObjectIdentifier((2, 5, 1, 5, 55, ))
+
+
+class Clearance_rfc3281(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId',
+ univ.ObjectIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(
+ value='unclassified')),
+ namedtype.OptionalNamedType('securityCategories',
+ univ.SetOf(componentType=SecurityCategory()).subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+id_aca_encAttrs = id_aca + (6, )
+
+
+class ACClearAttrs(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acIssuer', GeneralName()),
+ namedtype.NamedType('acSerial', univ.Integer()),
+ namedtype.NamedType('attrs', univ.SequenceOf(componentType=Attribute()))
+ )
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ac_auditIdentity: univ.OctetString(),
+ id_ce_noRevAvail: univ.Null(),
+ id_ce_targetInformation: Targets(),
+ id_pe_ac_proxying: ProxyInfo(),
+ id_pe_aaControls: AAControls(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_authenticationInfo: SvceAuthInfo(),
+ id_aca_accessIdentity: SvceAuthInfo(),
+ id_aca_chargingIdentity: IetfAttrSyntax(),
+ id_aca_group: IetfAttrSyntax(),
+ id_at_role: RoleSyntax(),
+ id_at_clearance: Clearance(),
+ id_at_clearance_rfc3281: Clearance_rfc3281(),
+ id_aca_encAttrs: ContentInfo(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5913.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5913.py
new file mode 100644
index 0000000000..0bd065330d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5913.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authority Clearance Constraints Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5913.txt
+# https://www.rfc-editor.org/errata/eid5890
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5755
+
+MAX = float('inf')
+
+
+# Authority Clearance Constraints Certificate Extension
+
+id_pe_clearanceConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.21')
+
+id_pe_authorityClearanceConstraints = id_pe_clearanceConstraints
+
+
+class AuthorityClearanceConstraints(univ.SequenceOf):
+ componentType = rfc5755.Clearance()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_clearanceConstraints: AuthorityClearanceConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5914.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5914.py
new file mode 100644
index 0000000000..d125ea2a65
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5914.py
@@ -0,0 +1,119 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Trust Anchor Format
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5914.txt
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+MAX = float('inf')
+
+Certificate = rfc5280.Certificate
+
+Name = rfc5280.Name
+
+Extensions = rfc5280.Extensions
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+TBSCertificate = rfc5280.TBSCertificate
+
+CertificatePolicies = rfc5280.CertificatePolicies
+
+KeyIdentifier = rfc5280.KeyIdentifier
+
+NameConstraints = rfc5280.NameConstraints
+
+
+class CertPolicyFlags(univ.BitString):
+ pass
+
+CertPolicyFlags.namedValues = namedval.NamedValues(
+ ('inhibitPolicyMapping', 0),
+ ('requireExplicitPolicy', 1),
+ ('inhibitAnyPolicy', 2)
+)
+
+
+class CertPathControls(univ.Sequence):
+ pass
+
+CertPathControls.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taName', Name()),
+ namedtype.OptionalNamedType('certificate', Certificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('policySet', CertificatePolicies().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('policyFlags', CertPolicyFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('nameConstr', NameConstraints().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('pathLenConstraint', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class TrustAnchorTitle(char.UTF8String):
+ pass
+
+TrustAnchorTitle.subtypeSpec = constraint.ValueSizeConstraint(1, 64)
+
+
+class TrustAnchorInfoVersion(univ.Integer):
+ pass
+
+TrustAnchorInfoVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+class TrustAnchorInfo(univ.Sequence):
+ pass
+
+TrustAnchorInfo.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TrustAnchorInfoVersion().subtype(value='v1')),
+ namedtype.NamedType('pubKey', SubjectPublicKeyInfo()),
+ namedtype.NamedType('keyId', KeyIdentifier()),
+ namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()),
+ namedtype.OptionalNamedType('certPath', CertPathControls()),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('taTitleLangTag', char.UTF8String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class TrustAnchorChoice(univ.Choice):
+ pass
+
+TrustAnchorChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('tbsCert', TBSCertificate().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('taInfo', TrustAnchorInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+id_ct_trustAnchorList = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.34')
+
+class TrustAnchorList(univ.SequenceOf):
+ pass
+
+TrustAnchorList.componentType = TrustAnchorChoice()
+TrustAnchorList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5915.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5915.py
new file mode 100644
index 0000000000..82ff4a338b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5915.py
@@ -0,0 +1,32 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Private Key
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5915.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5480
+
+
+class ECPrivateKey(univ.Sequence):
+ pass
+
+ECPrivateKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(
+ namedValues=namedval.NamedValues(('ecPrivkeyVer1', 1)))),
+ namedtype.NamedType('privateKey', univ.OctetString()),
+ namedtype.OptionalNamedType('parameters', rfc5480.ECParameters().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('publicKey', univ.BitString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5916.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5916.py
new file mode 100644
index 0000000000..ac23c86b79
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5916.py
@@ -0,0 +1,35 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Device Owner Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5916.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Device Owner Attribute
+
+id_deviceOwner = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 69))
+
+at_deviceOwner = rfc5280.Attribute()
+at_deviceOwner['type'] = id_deviceOwner
+at_deviceOwner['values'][0] = univ.ObjectIdentifier()
+
+
+# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py.
+
+_certificateAttributesMapUpdate = {
+ id_deviceOwner: univ.ObjectIdentifier(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5917.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5917.py
new file mode 100644
index 0000000000..ed9af987db
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5917.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Clearance Sponsor Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5917.txt
+# https://www.rfc-editor.org/errata/eid4558
+# https://www.rfc-editor.org/errata/eid5883
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# DirectoryString is the same as RFC 5280, except for two things:
+# 1. the length is limited to 64;
+# 2. only the 'utf8String' choice remains because the ASN.1
+# specification says: ( WITH COMPONENTS { utf8String PRESENT } )
+
+class DirectoryString(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 64))),
+ )
+
+
+# Clearance Sponsor Attribute
+
+id_clearanceSponsor = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 68))
+
+ub_clearance_sponsor = univ.Integer(64)
+
+
+at_clearanceSponsor = rfc5280.Attribute()
+at_clearanceSponsor['type'] = id_clearanceSponsor
+at_clearanceSponsor['values'][0] = DirectoryString()
+
+
+# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py.
+
+_certificateAttributesMapUpdate = {
+ id_clearanceSponsor: DirectoryString(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5924.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5924.py
new file mode 100644
index 0000000000..4358e4f529
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5924.py
@@ -0,0 +1,19 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Extended Key Usage (EKU) for Session Initiation Protocol (SIP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5924.txt
+#
+
+from pyasn1.type import univ
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_kp_sipDomain = id_kp + (20, )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5934.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5934.py
new file mode 100644
index 0000000000..e3ad247aa0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5934.py
@@ -0,0 +1,786 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Trust Anchor Format
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5934.txt
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5914
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 2985
+
+SingleAttribute = rfc2985.SingleAttribute
+
+
+# Imports from RFC5914
+
+CertPathControls = rfc5914.CertPathControls
+
+TrustAnchorChoice = rfc5914.TrustAnchorChoice
+
+TrustAnchorTitle = rfc5914.TrustAnchorTitle
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+AnotherName = rfc5280.AnotherName
+
+Attribute = rfc5280.Attribute
+
+Certificate = rfc5280.Certificate
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+Extension = rfc5280.Extension
+
+Extensions = rfc5280.Extensions
+
+KeyIdentifier = rfc5280.KeyIdentifier
+
+Name = rfc5280.Name
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+TBSCertificate = rfc5280.TBSCertificate
+
+Validity = rfc5280.Validity
+
+
+# Object Identifier Arc for TAMP Message Content Types
+
+id_tamp = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.77')
+
+
+# TAMP Status Query Message
+
+id_ct_TAMP_statusQuery = _OID(id_tamp, 1)
+
+
+class TAMPVersion(univ.Integer):
+ pass
+
+TAMPVersion.namedValues = namedval.NamedValues(
+ ('v1', 1),
+ ('v2', 2)
+)
+
+
+class TerseOrVerbose(univ.Enumerated):
+ pass
+
+TerseOrVerbose.namedValues = namedval.NamedValues(
+ ('terse', 1),
+ ('verbose', 2)
+)
+
+
+class HardwareSerialEntry(univ.Choice):
+ pass
+
+HardwareSerialEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('all', univ.Null()),
+ namedtype.NamedType('single', univ.OctetString()),
+ namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('low', univ.OctetString()),
+ namedtype.NamedType('high', univ.OctetString())
+ ))
+ )
+)
+
+
+class HardwareModules(univ.Sequence):
+ pass
+
+HardwareModules.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialEntries', univ.SequenceOf(
+ componentType=HardwareSerialEntry()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class HardwareModuleIdentifierList(univ.SequenceOf):
+ pass
+
+HardwareModuleIdentifierList.componentType = HardwareModules()
+HardwareModuleIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class Community(univ.ObjectIdentifier):
+ pass
+
+
+class CommunityIdentifierList(univ.SequenceOf):
+ pass
+
+CommunityIdentifierList.componentType = Community()
+CommunityIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(0, MAX)
+
+
+class TargetIdentifier(univ.Choice):
+ pass
+
+TargetIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwModules', HardwareModuleIdentifierList().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('communities', CommunityIdentifierList().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('allModules', univ.Null().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('uri', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('otherName', AnotherName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+
+class SeqNumber(univ.Integer):
+ pass
+
+SeqNumber.subtypeSpec = constraint.ValueRangeConstraint(0, 9223372036854775807)
+
+
+class TAMPMsgRef(univ.Sequence):
+ pass
+
+TAMPMsgRef.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('target', TargetIdentifier()),
+ namedtype.NamedType('seqNum', SeqNumber())
+)
+
+
+class TAMPStatusQuery(univ.Sequence):
+ pass
+
+TAMPStatusQuery.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse', TerseOrVerbose().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('query', TAMPMsgRef())
+)
+
+
+tamp_status_query = rfc5652.ContentInfo()
+tamp_status_query['contentType'] = id_ct_TAMP_statusQuery
+tamp_status_query['content'] = TAMPStatusQuery()
+
+
+# TAMP Status Response Message
+
+id_ct_TAMP_statusResponse = _OID(id_tamp, 2)
+
+
+class KeyIdentifiers(univ.SequenceOf):
+ pass
+
+KeyIdentifiers.componentType = KeyIdentifier()
+KeyIdentifiers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TrustAnchorChoiceList(univ.SequenceOf):
+ pass
+
+TrustAnchorChoiceList.componentType = TrustAnchorChoice()
+TrustAnchorChoiceList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TAMPSequenceNumber(univ.Sequence):
+ pass
+
+TAMPSequenceNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyId', KeyIdentifier()),
+ namedtype.NamedType('seqNumber', SeqNumber())
+)
+
+
+class TAMPSequenceNumbers(univ.SequenceOf):
+ pass
+
+TAMPSequenceNumbers.componentType = TAMPSequenceNumber()
+TAMPSequenceNumbers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TerseStatusResponse(univ.Sequence):
+ pass
+
+TerseStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taKeyIds', KeyIdentifiers()),
+ namedtype.OptionalNamedType('communities', CommunityIdentifierList())
+)
+
+
+class VerboseStatusResponse(univ.Sequence):
+ pass
+
+VerboseStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('continPubKeyDecryptAlg',
+ AlgorithmIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('communities',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class StatusResponse(univ.Choice):
+ pass
+
+StatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseResponse', TerseStatusResponse().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('verboseResponse', VerboseStatusResponse().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPStatusResponse(univ.Sequence):
+ pass
+
+TAMPStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('query', TAMPMsgRef()),
+ namedtype.NamedType('response', StatusResponse()),
+ namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
+)
+
+
+tamp_status_response = rfc5652.ContentInfo()
+tamp_status_response['contentType'] = id_ct_TAMP_statusResponse
+tamp_status_response['content'] = TAMPStatusResponse()
+
+
+# Trust Anchor Update Message
+
+id_ct_TAMP_update = _OID(id_tamp, 3)
+
+
+class TBSCertificateChangeInfo(univ.Sequence):
+ pass
+
+TBSCertificateChangeInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.OptionalNamedType('signature', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('issuer', Name().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('validity', Validity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('subject', Name().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+
+class TrustAnchorChangeInfo(univ.Sequence):
+ pass
+
+TrustAnchorChangeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubKey', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('keyId', KeyIdentifier()),
+ namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()),
+ namedtype.OptionalNamedType('certPath', CertPathControls()),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class TrustAnchorChangeInfoChoice(univ.Choice):
+ pass
+
+TrustAnchorChangeInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertChange', TBSCertificateChangeInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('taChange', TrustAnchorChangeInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TrustAnchorUpdate(univ.Choice):
+ pass
+
+TrustAnchorUpdate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('add', TrustAnchorChoice().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('remove', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('change', TrustAnchorChangeInfoChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class TAMPUpdate(univ.Sequence):
+ pass
+
+TAMPUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('updates',
+ univ.SequenceOf(componentType=TrustAnchorUpdate()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+tamp_update = rfc5652.ContentInfo()
+tamp_update['contentType'] = id_ct_TAMP_update
+tamp_update['content'] = TAMPUpdate()
+
+
+# Trust Anchor Update Confirm Message
+
+id_ct_TAMP_updateConfirm = _OID(id_tamp, 4)
+
+
+class StatusCode(univ.Enumerated):
+ pass
+
+StatusCode.namedValues = namedval.NamedValues(
+ ('success', 0),
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('unsupportedParameters', 15),
+ ('signatureFailure', 16),
+ ('insufficientMemory', 17),
+ ('unsupportedTAMPMsgType', 18),
+ ('apexTAMPAnchor', 19),
+ ('improperTAAddition', 20),
+ ('seqNumFailure', 21),
+ ('contingencyPublicKeyDecrypt', 22),
+ ('incorrectTarget', 23),
+ ('communityUpdateFailed', 24),
+ ('trustAnchorNotFound', 25),
+ ('unsupportedTAAlgorithm', 26),
+ ('unsupportedTAKeySize', 27),
+ ('unsupportedContinPubKeyDecryptAlg', 28),
+ ('missingSignature', 29),
+ ('resourcesBusy', 30),
+ ('versionNumberMismatch', 31),
+ ('missingPolicySet', 32),
+ ('revokedCertificate', 33),
+ ('unsupportedTrustAnchorFormat', 34),
+ ('improperTAChange', 35),
+ ('malformed', 36),
+ ('cmsError', 37),
+ ('unsupportedTargetIdentifier', 38),
+ ('other', 127)
+)
+
+
+class StatusCodeList(univ.SequenceOf):
+ pass
+
+StatusCodeList.componentType = StatusCode()
+StatusCodeList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TerseUpdateConfirm(StatusCodeList):
+ pass
+
+
+class VerboseUpdateConfirm(univ.Sequence):
+ pass
+
+VerboseUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCodeList()),
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('tampSeqNumbers', TAMPSequenceNumbers()),
+ namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
+)
+
+
+class UpdateConfirm(univ.Choice):
+ pass
+
+UpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseConfirm', TerseUpdateConfirm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseConfirm', VerboseUpdateConfirm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('update', TAMPMsgRef()),
+ namedtype.NamedType('confirm', UpdateConfirm())
+)
+
+
+tamp_update_confirm = rfc5652.ContentInfo()
+tamp_update_confirm['contentType'] = id_ct_TAMP_updateConfirm
+tamp_update_confirm['content'] = TAMPUpdateConfirm()
+
+
+# Apex Trust Anchor Update Message
+
+id_ct_TAMP_apexUpdate = _OID(id_tamp, 5)
+
+
+class TAMPApexUpdate(univ.Sequence):
+ pass
+
+TAMPApexUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('clearTrustAnchors', univ.Boolean()),
+ namedtype.NamedType('clearCommunities', univ.Boolean()),
+ namedtype.OptionalNamedType('seqNumber', SeqNumber()),
+ namedtype.NamedType('apexTA', TrustAnchorChoice())
+)
+
+
+tamp_apex_update = rfc5652.ContentInfo()
+tamp_apex_update['contentType'] = id_ct_TAMP_apexUpdate
+tamp_apex_update['content'] = TAMPApexUpdate()
+
+
+# Apex Trust Anchor Update Confirm Message
+
+id_ct_TAMP_apexUpdateConfirm = _OID(id_tamp, 6)
+
+
+class TerseApexUpdateConfirm(StatusCode):
+ pass
+
+
+class VerboseApexUpdateConfirm(univ.Sequence):
+ pass
+
+VerboseApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('communities',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+
+class ApexUpdateConfirm(univ.Choice):
+ pass
+
+ApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseApexConfirm',
+ TerseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseApexConfirm',
+ VerboseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPApexUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('apexReplace', TAMPMsgRef()),
+ namedtype.NamedType('apexConfirm', ApexUpdateConfirm())
+)
+
+
+tamp_apex_update_confirm = rfc5652.ContentInfo()
+tamp_apex_update_confirm['contentType'] = id_ct_TAMP_apexUpdateConfirm
+tamp_apex_update_confirm['content'] = TAMPApexUpdateConfirm()
+
+
+# Community Update Message
+
+id_ct_TAMP_communityUpdate = _OID(id_tamp, 7)
+
+
+class CommunityUpdates(univ.Sequence):
+ pass
+
+CommunityUpdates.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('remove',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('add',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 2)))
+)
+
+
+class TAMPCommunityUpdate(univ.Sequence):
+ pass
+
+TAMPCommunityUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('updates', CommunityUpdates())
+)
+
+
+tamp_community_update = rfc5652.ContentInfo()
+tamp_community_update['contentType'] = id_ct_TAMP_communityUpdate
+tamp_community_update['content'] = TAMPCommunityUpdate()
+
+
+# Community Update Confirm Message
+
+id_ct_TAMP_communityUpdateConfirm = _OID(id_tamp, 8)
+
+
+class TerseCommunityConfirm(StatusCode):
+ pass
+
+
+class VerboseCommunityConfirm(univ.Sequence):
+ pass
+
+VerboseCommunityConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.OptionalNamedType('communities', CommunityIdentifierList())
+)
+
+
+class CommunityConfirm(univ.Choice):
+ pass
+
+CommunityConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseCommConfirm',
+ TerseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseCommConfirm',
+ VerboseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPCommunityUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPCommunityUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('update', TAMPMsgRef()),
+ namedtype.NamedType('commConfirm', CommunityConfirm())
+)
+
+
+tamp_community_update_confirm = rfc5652.ContentInfo()
+tamp_community_update_confirm['contentType'] = id_ct_TAMP_communityUpdateConfirm
+tamp_community_update_confirm['content'] = TAMPCommunityUpdateConfirm()
+
+
+# Sequence Number Adjust Message
+
+id_ct_TAMP_seqNumAdjust = _OID(id_tamp, 10)
+
+
+
+class SequenceNumberAdjust(univ.Sequence):
+ pass
+
+SequenceNumberAdjust.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('msgRef', TAMPMsgRef())
+)
+
+
+tamp_sequence_number_adjust = rfc5652.ContentInfo()
+tamp_sequence_number_adjust['contentType'] = id_ct_TAMP_seqNumAdjust
+tamp_sequence_number_adjust['content'] = SequenceNumberAdjust()
+
+
+# Sequence Number Adjust Confirm Message
+
+id_ct_TAMP_seqNumAdjustConfirm = _OID(id_tamp, 11)
+
+
+class SequenceNumberAdjustConfirm(univ.Sequence):
+ pass
+
+SequenceNumberAdjustConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('adjust', TAMPMsgRef()),
+ namedtype.NamedType('status', StatusCode())
+)
+
+
+tamp_sequence_number_adjust_confirm = rfc5652.ContentInfo()
+tamp_sequence_number_adjust_confirm['contentType'] = id_ct_TAMP_seqNumAdjustConfirm
+tamp_sequence_number_adjust_confirm['content'] = SequenceNumberAdjustConfirm()
+
+
+# TAMP Error Message
+
+id_ct_TAMP_error = _OID(id_tamp, 9)
+
+
+class TAMPError(univ.Sequence):
+ pass
+
+TAMPError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('msgType', univ.ObjectIdentifier()),
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.OptionalNamedType('msgRef', TAMPMsgRef())
+)
+
+
+tamp_error = rfc5652.ContentInfo()
+tamp_error['contentType'] = id_ct_TAMP_error
+tamp_error['content'] = TAMPError()
+
+
+# Object Identifier Arc for Attributes
+
+id_attributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.5')
+
+
+# contingency-public-key-decrypt-key unsigned attribute
+
+id_aa_TAMP_contingencyPublicKeyDecryptKey = _OID(id_attributes, 63)
+
+
+class PlaintextSymmetricKey(univ.OctetString):
+ pass
+
+
+contingency_public_key_decrypt_key = Attribute()
+contingency_public_key_decrypt_key['type'] = id_aa_TAMP_contingencyPublicKeyDecryptKey
+contingency_public_key_decrypt_key['values'][0] = PlaintextSymmetricKey()
+
+
+# id-pe-wrappedApexContinKey extension
+
+id_pe_wrappedApexContinKey =univ.ObjectIdentifier('1.3.6.1.5.5.7.1.20')
+
+
+class ApexContingencyKey(univ.Sequence):
+ pass
+
+ApexContingencyKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('wrapAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('wrappedContinPubKey', univ.OctetString())
+)
+
+
+wrappedApexContinKey = Extension()
+wrappedApexContinKey['extnID'] = id_pe_wrappedApexContinKey
+wrappedApexContinKey['critical'] = 0
+wrappedApexContinKey['extnValue'] = univ.OctetString()
+
+
+# Add to the map of CMS Content Type OIDs to Content Types in
+# rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_TAMP_statusQuery: TAMPStatusQuery(),
+ id_ct_TAMP_statusResponse: TAMPStatusResponse(),
+ id_ct_TAMP_update: TAMPUpdate(),
+ id_ct_TAMP_updateConfirm: TAMPUpdateConfirm(),
+ id_ct_TAMP_apexUpdate: TAMPApexUpdate(),
+ id_ct_TAMP_apexUpdateConfirm: TAMPApexUpdateConfirm(),
+ id_ct_TAMP_communityUpdate: TAMPCommunityUpdate(),
+ id_ct_TAMP_communityUpdateConfirm: TAMPCommunityUpdateConfirm(),
+ id_ct_TAMP_seqNumAdjust: SequenceNumberAdjust(),
+ id_ct_TAMP_seqNumAdjustConfirm: SequenceNumberAdjustConfirm(),
+ id_ct_TAMP_error: TAMPError(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# Add to the map of CMS Attribute OIDs to Attribute Values in
+# rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_TAMP_contingencyPublicKeyDecryptKey: PlaintextSymmetricKey(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Add to the map of Certificate Extension OIDs to Extensions in
+# rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wrappedApexContinKey: ApexContingencyKey(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5940.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5940.py
new file mode 100644
index 0000000000..e105923358
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5940.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional CMS Revocation Information Choices
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5940.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5652
+
+
+# RevocationInfoChoice for OCSP response:
+# The OID is included in otherRevInfoFormat, and
+# signed OCSPResponse is included in otherRevInfo
+
+id_ri_ocsp_response = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.2')
+
+OCSPResponse = rfc2560.OCSPResponse
+
+
+# RevocationInfoChoice for SCVP request/response:
+# The OID is included in otherRevInfoFormat, and
+# SCVPReqRes is included in otherRevInfo
+
+id_ri_scvp = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.4')
+
+ContentInfo = rfc5652.ContentInfo
+
+class SCVPReqRes(univ.Sequence):
+ pass
+
+SCVPReqRes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('request',
+ ContentInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('response', ContentInfo())
+)
+
+
+# Map of Revocation Info Format OIDs to Revocation Info Format
+# is added to the ones that are in rfc5652.py
+
+_otherRevInfoFormatMapUpdate = {
+ id_ri_ocsp_response: OCSPResponse(),
+ id_ri_scvp: SCVPReqRes(),
+}
+
+rfc5652.otherRevInfoFormatMap.update(_otherRevInfoFormatMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5958.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5958.py
new file mode 100644
index 0000000000..1aaa9286ad
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5958.py
@@ -0,0 +1,98 @@
+#
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Asymmetric Key Packages, which is essentially version 2 of
+# the PrivateKeyInfo structure in PKCS#8 in RFC 5208
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5958.txt
+
+from pyasn1.type import univ, constraint, namedtype, namedval, tag
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+MAX = float('inf')
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class PrivateKeyAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedData(univ.OctetString):
+ pass
+
+
+class EncryptedPrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedData', EncryptedData())
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0), ('v2', 1))
+
+
+class PrivateKey(univ.OctetString):
+ pass
+
+
+class Attributes(univ.SetOf):
+ componentType = rfc5652.Attribute()
+
+
+class PublicKey(univ.BitString):
+ pass
+
+
+# OneAsymmetricKey is essentially version 2 of PrivateKeyInfo.
+# If publicKey is present, then the version must be v2;
+# otherwise, the version should be v1.
+
+class OneAsymmetricKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('privateKeyAlgorithm', PrivateKeyAlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', PrivateKey()),
+ namedtype.OptionalNamedType('attributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('publicKey', PublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class PrivateKeyInfo(OneAsymmetricKey):
+ pass
+
+
+# The CMS AsymmetricKeyPackage Content Type
+
+id_ct_KP_aKeyPackage = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.5')
+
+class AsymmetricKeyPackage(univ.SequenceOf):
+ pass
+
+AsymmetricKeyPackage.componentType = OneAsymmetricKey()
+AsymmetricKeyPackage.sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_aKeyPackage: AsymmetricKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5990.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5990.py
new file mode 100644
index 0000000000..281316fb81
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc5990.py
@@ -0,0 +1,237 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Use of the RSA-KEM Key Transport Algorithm in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5990.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Useful types and definitions
+
+class NullParms(univ.Null):
+ pass
+
+
+# Object identifier arcs
+
+is18033_2 = _OID(1, 0, 18033, 2)
+
+nistAlgorithm = _OID(2, 16, 840, 1, 101, 3, 4)
+
+pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
+
+x9_44 = _OID(1, 3, 133, 16, 840, 9, 44)
+
+x9_44_components = _OID(x9_44, 1)
+
+
+# Types for algorithm identifiers
+
+class Camellia_KeyWrappingScheme(AlgorithmIdentifier):
+ pass
+
+class DataEncapsulationMechanism(AlgorithmIdentifier):
+ pass
+
+class KDF2_HashFunction(AlgorithmIdentifier):
+ pass
+
+class KDF3_HashFunction(AlgorithmIdentifier):
+ pass
+
+class KeyDerivationFunction(AlgorithmIdentifier):
+ pass
+
+class KeyEncapsulationMechanism(AlgorithmIdentifier):
+ pass
+
+class X9_SymmetricKeyWrappingScheme(AlgorithmIdentifier):
+ pass
+
+
+# RSA-KEM Key Transport Algorithm
+
+id_rsa_kem = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 14)
+
+
+class GenericHybridParameters(univ.Sequence):
+ pass
+
+GenericHybridParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('kem', KeyEncapsulationMechanism()),
+ namedtype.NamedType('dem', DataEncapsulationMechanism())
+)
+
+
+rsa_kem = AlgorithmIdentifier()
+rsa_kem['algorithm'] = id_rsa_kem
+rsa_kem['parameters'] = GenericHybridParameters()
+
+
+# KEM-RSA Key Encapsulation Mechanism
+
+id_kem_rsa = _OID(is18033_2, 2, 4)
+
+
+class KeyLength(univ.Integer):
+ pass
+
+KeyLength.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
+
+
+class RsaKemParameters(univ.Sequence):
+ pass
+
+RsaKemParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunction', KeyDerivationFunction()),
+ namedtype.NamedType('keyLength', KeyLength())
+)
+
+
+kem_rsa = AlgorithmIdentifier()
+kem_rsa['algorithm'] = id_kem_rsa
+kem_rsa['parameters'] = RsaKemParameters()
+
+
+# Key Derivation Functions
+
+id_kdf_kdf2 = _OID(x9_44_components, 1)
+
+id_kdf_kdf3 = _OID(x9_44_components, 2)
+
+
+kdf2 = AlgorithmIdentifier()
+kdf2['algorithm'] = id_kdf_kdf2
+kdf2['parameters'] = KDF2_HashFunction()
+
+kdf3 = AlgorithmIdentifier()
+kdf3['algorithm'] = id_kdf_kdf3
+kdf3['parameters'] = KDF3_HashFunction()
+
+
+# Hash Functions
+
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+
+id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
+
+id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
+
+id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
+
+id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
+
+
+sha1 = AlgorithmIdentifier()
+sha1['algorithm'] = id_sha1
+sha1['parameters'] = univ.Null("")
+
+sha224 = AlgorithmIdentifier()
+sha224['algorithm'] = id_sha224
+sha224['parameters'] = univ.Null("")
+
+sha256 = AlgorithmIdentifier()
+sha256['algorithm'] = id_sha256
+sha256['parameters'] = univ.Null("")
+
+sha384 = AlgorithmIdentifier()
+sha384['algorithm'] = id_sha384
+sha384['parameters'] = univ.Null("")
+
+sha512 = AlgorithmIdentifier()
+sha512['algorithm'] = id_sha512
+sha512['parameters'] = univ.Null("")
+
+
+# Symmetric Key-Wrapping Schemes
+
+id_aes128_Wrap = _OID(nistAlgorithm, 1, 5)
+
+id_aes192_Wrap = _OID(nistAlgorithm, 1, 25)
+
+id_aes256_Wrap = _OID(nistAlgorithm, 1, 45)
+
+id_alg_CMS3DESwrap = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 6)
+
+id_camellia128_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 2)
+
+id_camellia192_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 3)
+
+id_camellia256_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 4)
+
+
+aes128_Wrap = AlgorithmIdentifier()
+aes128_Wrap['algorithm'] = id_aes128_Wrap
+# aes128_Wrap['parameters'] are absent
+
+aes192_Wrap = AlgorithmIdentifier()
+aes192_Wrap['algorithm'] = id_aes128_Wrap
+# aes192_Wrap['parameters'] are absent
+
+aes256_Wrap = AlgorithmIdentifier()
+aes256_Wrap['algorithm'] = id_sha256
+# aes256_Wrap['parameters'] are absent
+
+tdes_Wrap = AlgorithmIdentifier()
+tdes_Wrap['algorithm'] = id_alg_CMS3DESwrap
+tdes_Wrap['parameters'] = univ.Null("")
+
+camellia128_Wrap = AlgorithmIdentifier()
+camellia128_Wrap['algorithm'] = id_camellia128_Wrap
+# camellia128_Wrap['parameters'] are absent
+
+camellia192_Wrap = AlgorithmIdentifier()
+camellia192_Wrap['algorithm'] = id_camellia192_Wrap
+# camellia192_Wrap['parameters'] are absent
+
+camellia256_Wrap = AlgorithmIdentifier()
+camellia256_Wrap['algorithm'] = id_camellia256_Wrap
+# camellia256_Wrap['parameters'] are absent
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+# Note that the ones that must not have parameters are not added to the map.
+
+_algorithmIdentifierMapUpdate = {
+ id_rsa_kem: GenericHybridParameters(),
+ id_kem_rsa: RsaKemParameters(),
+ id_kdf_kdf2: KDF2_HashFunction(),
+ id_kdf_kdf3: KDF3_HashFunction(),
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_alg_CMS3DESwrap: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6010.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6010.py
new file mode 100644
index 0000000000..250e207ba4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6010.py
@@ -0,0 +1,88 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extension for CMS Content Constraints (CCC)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6010.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+AttributeType = rfc5280.AttributeType
+
+AttributeValue = rfc5280.AttributeValue
+
+
+id_ct_anyContentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.0')
+
+
+class AttrConstraint(univ.Sequence):
+ pass
+
+AttrConstraint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues', univ.SetOf(
+ componentType=AttributeValue()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class AttrConstraintList(univ.SequenceOf):
+ pass
+
+AttrConstraintList.componentType = AttrConstraint()
+AttrConstraintList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class ContentTypeGeneration(univ.Enumerated):
+ pass
+
+ContentTypeGeneration.namedValues = namedval.NamedValues(
+ ('canSource', 0),
+ ('cannotSource', 1)
+)
+
+
+class ContentTypeConstraint(univ.Sequence):
+ pass
+
+ContentTypeConstraint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('canSource', ContentTypeGeneration().subtype(value='canSource')),
+ namedtype.OptionalNamedType('attrConstraints', AttrConstraintList())
+)
+
+
+# CMS Content Constraints (CCC) Extension and Object Identifier
+
+id_pe_cmsContentConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.18')
+
+class CMSContentConstraints(univ.SequenceOf):
+ pass
+
+CMSContentConstraints.componentType = ContentTypeConstraint()
+CMSContentConstraints.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_cmsContentConstraints: CMSContentConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6019.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6019.py
new file mode 100644
index 0000000000..c6872c7669
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6019.py
@@ -0,0 +1,45 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# BinaryTime: An Alternate Format for Representing Date and Time
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6019.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# BinaryTime: Represent date and time as an integer
+
+class BinaryTime(univ.Integer):
+ pass
+
+BinaryTime.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+# CMS Attribute for representing signing time in BinaryTime
+
+id_aa_binarySigningTime = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.46')
+
+class BinarySigningTime(BinaryTime):
+ pass
+
+
+# Map of Attribute Type OIDs to Attributes ia added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_binarySigningTime: BinarySigningTime(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6031.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6031.py
new file mode 100644
index 0000000000..6e1bb2261d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6031.py
@@ -0,0 +1,469 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Symmetric Key Package Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6031.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6019
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+MAX = float('inf')
+
+id_pskc = univ.ObjectIdentifier('1.2.840.113549.1.9.16.12')
+
+
+# Symmetric Key Package Attributes
+
+id_pskc_manufacturer = _OID(id_pskc, 1)
+
+class at_pskc_manufacturer(char.UTF8String):
+ pass
+
+
+id_pskc_serialNo = _OID(id_pskc, 2)
+
+class at_pskc_serialNo(char.UTF8String):
+ pass
+
+
+id_pskc_model = _OID(id_pskc, 3)
+
+class at_pskc_model(char.UTF8String):
+ pass
+
+
+id_pskc_issueNo = _OID(id_pskc, 4)
+
+class at_pskc_issueNo(char.UTF8String):
+ pass
+
+
+id_pskc_deviceBinding = _OID(id_pskc, 5)
+
+class at_pskc_deviceBinding(char.UTF8String):
+ pass
+
+
+id_pskc_deviceStartDate = _OID(id_pskc, 6)
+
+class at_pskc_deviceStartDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_deviceExpiryDate = _OID(id_pskc, 7)
+
+class at_pskc_deviceExpiryDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_moduleId = _OID(id_pskc, 8)
+
+class at_pskc_moduleId(char.UTF8String):
+ pass
+
+
+id_pskc_deviceUserId = _OID(id_pskc, 26)
+
+class at_pskc_deviceUserId(char.UTF8String):
+ pass
+
+
+# Symmetric Key Attributes
+
+id_pskc_keyId = _OID(id_pskc, 9)
+
+class at_pskc_keyUserId(char.UTF8String):
+ pass
+
+
+id_pskc_algorithm = _OID(id_pskc, 10)
+
+class at_pskc_algorithm(char.UTF8String):
+ pass
+
+
+id_pskc_issuer = _OID(id_pskc, 11)
+
+class at_pskc_issuer(char.UTF8String):
+ pass
+
+
+id_pskc_keyProfileId = _OID(id_pskc, 12)
+
+class at_pskc_keyProfileId(char.UTF8String):
+ pass
+
+
+id_pskc_keyReference = _OID(id_pskc, 13)
+
+class at_pskc_keyReference(char.UTF8String):
+ pass
+
+
+id_pskc_friendlyName = _OID(id_pskc, 14)
+
+class FriendlyName(univ.Sequence):
+ pass
+
+FriendlyName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('friendlyName', char.UTF8String()),
+ namedtype.OptionalNamedType('friendlyNameLangTag', char.UTF8String())
+)
+
+class at_pskc_friendlyName(FriendlyName):
+ pass
+
+
+id_pskc_algorithmParameters = _OID(id_pskc, 15)
+
+class Encoding(char.UTF8String):
+ pass
+
+Encoding.namedValues = namedval.NamedValues(
+ ('dec', "DECIMAL"),
+ ('hex', "HEXADECIMAL"),
+ ('alpha', "ALPHANUMERIC"),
+ ('b64', "BASE64"),
+ ('bin', "BINARY")
+)
+
+Encoding.subtypeSpec = constraint.SingleValueConstraint(
+ "DECIMAL", "HEXADECIMAL", "ALPHANUMERIC", "BASE64", "BINARY" )
+
+class ChallengeFormat(univ.Sequence):
+ pass
+
+ChallengeFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encoding', Encoding()),
+ namedtype.DefaultedNamedType('checkDigit',
+ univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('min', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('max', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+class ResponseFormat(univ.Sequence):
+ pass
+
+ResponseFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encoding', Encoding()),
+ namedtype.NamedType('length', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.DefaultedNamedType('checkDigit',
+ univ.Boolean().subtype(value=0))
+)
+
+class PSKCAlgorithmParameters(univ.Choice):
+ pass
+
+PSKCAlgorithmParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('suite', char.UTF8String()),
+ namedtype.NamedType('challengeFormat', ChallengeFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('responseFormat', ResponseFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+class at_pskc_algorithmParameters(PSKCAlgorithmParameters):
+ pass
+
+
+id_pskc_counter = _OID(id_pskc, 16)
+
+class at_pskc_counter(univ.Integer):
+ pass
+
+at_pskc_counter.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_time = _OID(id_pskc, 17)
+
+class at_pskc_time(rfc6019.BinaryTime):
+ pass
+
+
+id_pskc_timeInterval = _OID(id_pskc, 18)
+
+class at_pskc_timeInterval(univ.Integer):
+ pass
+
+at_pskc_timeInterval.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_timeDrift = _OID(id_pskc, 19)
+
+class at_pskc_timeDrift(univ.Integer):
+ pass
+
+at_pskc_timeDrift.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_valueMAC = _OID(id_pskc, 20)
+
+class ValueMac(univ.Sequence):
+ pass
+
+ValueMac.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('macAlgorithm', char.UTF8String()),
+ namedtype.NamedType('mac', char.UTF8String())
+)
+
+class at_pskc_valueMAC(ValueMac):
+ pass
+
+
+id_pskc_keyUserId = _OID(id_pskc, 27)
+
+class at_pskc_keyId(char.UTF8String):
+ pass
+
+
+id_pskc_keyStartDate = _OID(id_pskc, 21)
+
+class at_pskc_keyStartDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_keyExpiryDate = _OID(id_pskc, 22)
+
+class at_pskc_keyExpiryDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_numberOfTransactions = _OID(id_pskc, 23)
+
+class at_pskc_numberOfTransactions(univ.Integer):
+ pass
+
+at_pskc_numberOfTransactions.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_keyUsages = _OID(id_pskc, 24)
+
+class PSKCKeyUsage(char.UTF8String):
+ pass
+
+PSKCKeyUsage.namedValues = namedval.NamedValues(
+ ('otp', "OTP"),
+ ('cr', "CR"),
+ ('encrypt', "Encrypt"),
+ ('integrity', "Integrity"),
+ ('verify', "Verify"),
+ ('unlock', "Unlock"),
+ ('decrypt', "Decrypt"),
+ ('keywrap', "KeyWrap"),
+ ('unwrap', "Unwrap"),
+ ('derive', "Derive"),
+ ('generate', "Generate")
+)
+
+PSKCKeyUsage.subtypeSpec = constraint.SingleValueConstraint(
+ "OTP", "CR", "Encrypt", "Integrity", "Verify", "Unlock",
+ "Decrypt", "KeyWrap", "Unwrap", "Derive", "Generate" )
+
+class PSKCKeyUsages(univ.SequenceOf):
+ pass
+
+PSKCKeyUsages.componentType = PSKCKeyUsage()
+
+class at_pskc_keyUsage(PSKCKeyUsages):
+ pass
+
+
+id_pskc_pinPolicy = _OID(id_pskc, 25)
+
+class PINUsageMode(char.UTF8String):
+ pass
+
+PINUsageMode.namedValues = namedval.NamedValues(
+ ("local", "Local"),
+ ("prepend", "Prepend"),
+ ("append", "Append"),
+ ("algorithmic", "Algorithmic")
+)
+
+PINUsageMode.subtypeSpec = constraint.SingleValueConstraint(
+ "Local", "Prepend", "Append", "Algorithmic" )
+
+class PINPolicy(univ.Sequence):
+ pass
+
+PINPolicy.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pinKeyId', char.UTF8String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('pinUsageMode', PINUsageMode().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('maxFailedAttempts', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('minLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('maxLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('pinEncoding', Encoding().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+class at_pskc_pinPolicy(PINPolicy):
+ pass
+
+
+# Map of Symmetric Key Package Attribute OIDs to Attributes
+
+sKeyPkgAttributesMap = {
+ id_pskc_manufacturer: at_pskc_manufacturer(),
+ id_pskc_serialNo: at_pskc_serialNo(),
+ id_pskc_model: at_pskc_model(),
+ id_pskc_issueNo: at_pskc_issueNo(),
+ id_pskc_deviceBinding: at_pskc_deviceBinding(),
+ id_pskc_deviceStartDate: at_pskc_deviceStartDate(),
+ id_pskc_deviceExpiryDate: at_pskc_deviceExpiryDate(),
+ id_pskc_moduleId: at_pskc_moduleId(),
+ id_pskc_deviceUserId: at_pskc_deviceUserId(),
+}
+
+
+# Map of Symmetric Key Attribute OIDs to Attributes
+
+sKeyAttributesMap = {
+ id_pskc_keyId: at_pskc_keyId(),
+ id_pskc_algorithm: at_pskc_algorithm(),
+ id_pskc_issuer: at_pskc_issuer(),
+ id_pskc_keyProfileId: at_pskc_keyProfileId(),
+ id_pskc_keyReference: at_pskc_keyReference(),
+ id_pskc_friendlyName: at_pskc_friendlyName(),
+ id_pskc_algorithmParameters: at_pskc_algorithmParameters(),
+ id_pskc_counter: at_pskc_counter(),
+ id_pskc_time: at_pskc_time(),
+ id_pskc_timeInterval: at_pskc_timeInterval(),
+ id_pskc_timeDrift: at_pskc_timeDrift(),
+ id_pskc_valueMAC: at_pskc_valueMAC(),
+ id_pskc_keyUserId: at_pskc_keyUserId(),
+ id_pskc_keyStartDate: at_pskc_keyStartDate(),
+ id_pskc_keyExpiryDate: at_pskc_keyExpiryDate(),
+ id_pskc_numberOfTransactions: at_pskc_numberOfTransactions(),
+ id_pskc_keyUsages: at_pskc_keyUsage(),
+ id_pskc_pinPolicy: at_pskc_pinPolicy(),
+}
+
+
+# This definition replaces Attribute() from rfc5652.py; it is the same except
+# that opentype is added with sKeyPkgAttributesMap and sKeyAttributesMap
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class SKeyAttribute(univ.Sequence):
+ pass
+
+SKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', sKeyAttributesMap)
+ )
+)
+
+
+class SKeyPkgAttribute(univ.Sequence):
+ pass
+
+SKeyPkgAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', sKeyPkgAttributesMap)
+ )
+)
+
+
+# Symmetric Key Package Content Type
+
+id_ct_KP_sKeyPackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.25')
+
+
+class KeyPkgVersion(univ.Integer):
+ pass
+
+KeyPkgVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+class OneSymmetricKey(univ.Sequence):
+ pass
+
+OneSymmetricKey.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('sKeyAttrs',
+ univ.SequenceOf(componentType=SKeyAttribute()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('sKey', univ.OctetString())
+)
+
+OneSymmetricKey.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2)
+
+
+class SymmetricKeys(univ.SequenceOf):
+ pass
+
+SymmetricKeys.componentType = OneSymmetricKey()
+SymmetricKeys.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class SymmetricKeyPackage(univ.Sequence):
+ pass
+
+SymmetricKeyPackage.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v1')),
+ namedtype.OptionalNamedType('sKeyPkgAttrs',
+ univ.SequenceOf(componentType=SKeyPkgAttribute()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('sKeys', SymmetricKeys())
+)
+
+
+# Map of Content Type OIDs to Content Types are
+# added to the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_sKeyPackage: SymmetricKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6032.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6032.py
new file mode 100644
index 0000000000..563639a8d6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6032.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Encrypted Key Package Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6032.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5083
+
+
+# Content Decryption Key Identifier attribute
+
+id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66')
+
+class ContentDecryptKeyID(univ.OctetString):
+ pass
+
+aa_content_decrypt_key_identifier = rfc5652.Attribute()
+aa_content_decrypt_key_identifier['attrType'] = id_aa_KP_contentDecryptKeyID
+aa_content_decrypt_key_identifier['attrValues'][0] = ContentDecryptKeyID()
+
+
+# Encrypted Key Package Content Type
+
+id_ct_KP_encryptedKeyPkg = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.2')
+
+class EncryptedKeyPackage(univ.Choice):
+ pass
+
+EncryptedKeyPackage.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encrypted', rfc5652.EncryptedData()),
+ namedtype.NamedType('enveloped', rfc5652.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('authEnveloped', rfc5083.AuthEnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Map of Attribute Type OIDs to Attributes are
+# added to the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types are
+# added to the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_encryptedKeyPkg: EncryptedKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6120.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6120.py
new file mode 100644
index 0000000000..ab256203a0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6120.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Extensible Messaging and Presence Protocol (XMPP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6120.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# XmppAddr Identifier Type as specified in Section 13.7.1.4. of RFC 6120
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_xmppAddr = id_on + (5, )
+
+
+class XmppAddr(char.UTF8String):
+ pass
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_xmppAddr: XmppAddr(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6170.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6170.py
new file mode 100644
index 0000000000..e2876167b7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6170.py
@@ -0,0 +1,17 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Image in the Internet X.509 Public Key Infrastructure
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6170.txt
+#
+
+from pyasn1.type import univ
+
+id_logo_certImage = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.3')
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6187.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6187.py
new file mode 100644
index 0000000000..4be0054716
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6187.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509v3 Certificates for Secure Shell Authentication
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6187.txt
+#
+
+from pyasn1.type import univ
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_kp = id_pkix + (3, )
+
+id_kp_secureShellClient = id_kp + (21, )
+id_kp_secureShellServer = id_kp + (22, )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6210.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6210.py
new file mode 100644
index 0000000000..28587b9e70
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6210.py
@@ -0,0 +1,42 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Experiment for Hash Functions with Parameters in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6210.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_alg_MD5_XOR_EXPERIMENT = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.13')
+
+
+class MD5_XOR_EXPERIMENT(univ.OctetString):
+ pass
+
+MD5_XOR_EXPERIMENT.subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+mda_xor_md5_EXPERIMENT = rfc5280.AlgorithmIdentifier()
+mda_xor_md5_EXPERIMENT['algorithm'] = id_alg_MD5_XOR_EXPERIMENT
+mda_xor_md5_EXPERIMENT['parameters'] = MD5_XOR_EXPERIMENT()
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones that are in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_MD5_XOR_EXPERIMENT: MD5_XOR_EXPERIMENT(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6211.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6211.py
new file mode 100644
index 0000000000..abd7a8688d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6211.py
@@ -0,0 +1,72 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Algorithm Identifier Protection Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6211.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+DigestAlgorithmIdentifier = rfc5652.DigestAlgorithmIdentifier
+
+MessageAuthenticationCodeAlgorithm = rfc5652.MessageAuthenticationCodeAlgorithm
+
+SignatureAlgorithmIdentifier = rfc5652.SignatureAlgorithmIdentifier
+
+
+# CMS Algorithm Protection attribute
+
+id_aa_cmsAlgorithmProtect = univ.ObjectIdentifier('1.2.840.113549.1.9.52')
+
+
+class CMSAlgorithmProtection(univ.Sequence):
+ pass
+
+CMSAlgorithmProtection.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signatureAlgorithm',
+ SignatureAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('macAlgorithm',
+ MessageAuthenticationCodeAlgorithm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+CMSAlgorithmProtection.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('signatureAlgorithm', constraint.ComponentPresentConstraint()),
+ ('macAlgorithm', constraint.ComponentAbsentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('signatureAlgorithm', constraint.ComponentAbsentConstraint()),
+ ('macAlgorithm', constraint.ComponentPresentConstraint()))
+)
+
+
+aa_cmsAlgorithmProtection = rfc5652.Attribute()
+aa_cmsAlgorithmProtection['attrType'] = id_aa_cmsAlgorithmProtect
+aa_cmsAlgorithmProtection['attrValues'][0] = CMSAlgorithmProtection()
+
+
+# Map of Attribute Type OIDs to Attributes are
+# added to the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_cmsAlgorithmProtect: CMSAlgorithmProtection(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) \ No newline at end of file
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6402.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6402.py
new file mode 100644
index 0000000000..5490b05fb9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6402.py
@@ -0,0 +1,628 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Modified by Russ Housley to add a maps for CMC Control Attributes
+# and CMC Content Types for use with opentypes.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Management over CMS (CMC) Updates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6402.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc4211
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Since CMS Attributes and CMC Controls both use 'attrType', one map is used
+cmcControlAttributesMap = rfc5652.cmsAttributesMap
+
+
+class ChangeSubjectName(univ.Sequence):
+ pass
+
+
+ChangeSubjectName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('subject', rfc5280.Name()),
+ namedtype.OptionalNamedType('subjectAlt', rfc5280.GeneralNames())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class CMCStatus(univ.Integer):
+ pass
+
+
+CMCStatus.namedValues = namedval.NamedValues(
+ ('success', 0),
+ ('failed', 2),
+ ('pending', 3),
+ ('noSupport', 4),
+ ('confirmRequired', 5),
+ ('popRequired', 6),
+ ('partial', 7)
+)
+
+
+class PendInfo(univ.Sequence):
+ pass
+
+
+PendInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pendToken', univ.OctetString()),
+ namedtype.NamedType('pendTime', useful.GeneralizedTime())
+)
+
+bodyIdMax = univ.Integer(4294967295)
+
+
+class BodyPartID(univ.Integer):
+ pass
+
+
+BodyPartID.subtypeSpec = constraint.ValueRangeConstraint(0, bodyIdMax)
+
+
+class BodyPartPath(univ.SequenceOf):
+ pass
+
+
+BodyPartPath.componentType = BodyPartID()
+BodyPartPath.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class BodyPartReference(univ.Choice):
+ pass
+
+
+BodyPartReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('bodyPartPath', BodyPartPath())
+)
+
+
+class CMCFailInfo(univ.Integer):
+ pass
+
+
+CMCFailInfo.namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badMessageCheck', 1),
+ ('badRequest', 2),
+ ('badTime', 3),
+ ('badCertId', 4),
+ ('unsupportedExt', 5),
+ ('mustArchiveKeys', 6),
+ ('badIdentity', 7),
+ ('popRequired', 8),
+ ('popFailed', 9),
+ ('noKeyReuse', 10),
+ ('internalCAError', 11),
+ ('tryLater', 12),
+ ('authDataFail', 13)
+)
+
+
+class CMCStatusInfoV2(univ.Sequence):
+ pass
+
+
+CMCStatusInfoV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo()),
+ namedtype.NamedType(
+ 'extendedFailInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfoOID', univ.ObjectIdentifier()),
+ namedtype.NamedType('failInfoValue', AttributeValue()))
+ )
+ )
+ )
+ )
+ )
+)
+
+
+class GetCRL(univ.Sequence):
+ pass
+
+
+GetCRL.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.OptionalNamedType('cRLName', rfc5280.GeneralName()),
+ namedtype.OptionalNamedType('time', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('reasons', rfc5280.ReasonFlags())
+)
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_cmc = _buildOid(id_pkix, 7)
+
+id_cmc_batchResponses = _buildOid(id_cmc, 29)
+
+id_cmc_popLinkWitness = _buildOid(id_cmc, 23)
+
+
+class PopLinkWitnessV2(univ.Sequence):
+ pass
+
+
+PopLinkWitnessV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyGenAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_popLinkWitnessV2 = _buildOid(id_cmc, 33)
+
+id_cmc_identityProofV2 = _buildOid(id_cmc, 34)
+
+id_cmc_revokeRequest = _buildOid(id_cmc, 17)
+
+id_cmc_recipientNonce = _buildOid(id_cmc, 7)
+
+
+class ControlsProcessed(univ.Sequence):
+ pass
+
+
+ControlsProcessed.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference()))
+)
+
+
+class CertificationRequest(univ.Sequence):
+ pass
+
+
+CertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'certificationRequestInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('subject', rfc5280.Name()),
+ namedtype.NamedType(
+ 'subjectPublicKeyInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+ )
+ )
+ ),
+ namedtype.NamedType(
+ 'attributes', univ.SetOf(
+ componentType=rfc5652.Attribute()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ )
+ )
+ )
+ ),
+ namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class TaggedCertificationRequest(univ.Sequence):
+ pass
+
+
+TaggedCertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('certificationRequest', CertificationRequest())
+)
+
+
+class TaggedRequest(univ.Choice):
+ pass
+
+
+TaggedRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tcr', TaggedCertificationRequest().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('crm',
+ rfc4211.CertReqMsg().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('orm', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('requestMessageType', univ.ObjectIdentifier()),
+ namedtype.NamedType('requestMessageValue', univ.Any())
+ ))
+ .subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+id_cmc_popLinkRandom = _buildOid(id_cmc, 22)
+
+id_cmc_statusInfo = _buildOid(id_cmc, 1)
+
+id_cmc_trustedAnchors = _buildOid(id_cmc, 26)
+
+id_cmc_transactionId = _buildOid(id_cmc, 5)
+
+id_cmc_encryptedPOP = _buildOid(id_cmc, 9)
+
+
+class PublishTrustAnchors(univ.Sequence):
+ pass
+
+
+PublishTrustAnchors.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seqNumber', univ.Integer()),
+ namedtype.NamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('anchorHashes', univ.SequenceOf(componentType=univ.OctetString()))
+)
+
+
+class RevokeRequest(univ.Sequence):
+ pass
+
+
+RevokeRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.NamedType('serialNumber', univ.Integer()),
+ namedtype.NamedType('reason', rfc5280.CRLReason()),
+ namedtype.OptionalNamedType('invalidityDate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('passphrase', univ.OctetString()),
+ namedtype.OptionalNamedType('comment', char.UTF8String())
+)
+
+id_cmc_senderNonce = _buildOid(id_cmc, 6)
+
+id_cmc_authData = _buildOid(id_cmc, 27)
+
+
+class TaggedContentInfo(univ.Sequence):
+ pass
+
+
+TaggedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('contentInfo', rfc5652.ContentInfo())
+)
+
+
+class IdentifyProofV2(univ.Sequence):
+ pass
+
+
+IdentifyProofV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('proofAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgId', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+
+class CMCPublicationInfo(univ.Sequence):
+ pass
+
+
+CMCPublicationInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('certHashes', univ.SequenceOf(componentType=univ.OctetString())),
+ namedtype.NamedType('pubInfo', rfc4211.PKIPublicationInfo())
+)
+
+id_kp_cmcCA = _buildOid(rfc5280.id_kp, 27)
+
+id_cmc_confirmCertAcceptance = _buildOid(id_cmc, 24)
+
+id_cmc_raIdentityWitness = _buildOid(id_cmc, 35)
+
+id_ExtensionReq = _buildOid(1, 2, 840, 113549, 1, 9, 14)
+
+id_cct = _buildOid(id_pkix, 12)
+
+id_cct_PKIData = _buildOid(id_cct, 2)
+
+id_kp_cmcRA = _buildOid(rfc5280.id_kp, 28)
+
+
+class CMCStatusInfo(univ.Sequence):
+ pass
+
+
+CMCStatusInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo())
+ )
+ )
+ )
+)
+
+
+class DecryptedPOP(univ.Sequence):
+ pass
+
+
+DecryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('thePOP', univ.OctetString())
+)
+
+id_cmc_addExtensions = _buildOid(id_cmc, 8)
+
+id_cmc_modCertTemplate = _buildOid(id_cmc, 31)
+
+
+class TaggedAttribute(univ.Sequence):
+ pass
+
+
+TaggedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', cmcControlAttributesMap)
+ )
+)
+
+
+class OtherMsg(univ.Sequence):
+ pass
+
+
+OtherMsg.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('otherMsgType', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherMsgValue', univ.Any())
+)
+
+
+class PKIData(univ.Sequence):
+ pass
+
+
+PKIData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('reqSequence', univ.SequenceOf(componentType=TaggedRequest())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class BodyPartList(univ.SequenceOf):
+ pass
+
+
+BodyPartList.componentType = BodyPartID()
+BodyPartList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_cmc_responseBody = _buildOid(id_cmc, 37)
+
+
+class AuthPublish(BodyPartID):
+ pass
+
+
+class CMCUnsignedData(univ.Sequence):
+ pass
+
+
+CMCUnsignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartPath', BodyPartPath()),
+ namedtype.NamedType('identifier', univ.ObjectIdentifier()),
+ namedtype.NamedType('content', univ.Any())
+)
+
+
+class CMCCertId(rfc5652.IssuerAndSerialNumber):
+ pass
+
+
+class PKIResponse(univ.Sequence):
+ pass
+
+
+PKIResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class ResponseBody(PKIResponse):
+ pass
+
+
+id_cmc_statusInfoV2 = _buildOid(id_cmc, 25)
+
+id_cmc_lraPOPWitness = _buildOid(id_cmc, 11)
+
+
+class ModCertTemplate(univ.Sequence):
+ pass
+
+
+ModCertTemplate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartPath()),
+ namedtype.NamedType('certReferences', BodyPartList()),
+ namedtype.DefaultedNamedType('replace', univ.Boolean().subtype(value=1)),
+ namedtype.NamedType('certTemplate', rfc4211.CertTemplate())
+)
+
+id_cmc_regInfo = _buildOid(id_cmc, 18)
+
+id_cmc_identityProof = _buildOid(id_cmc, 3)
+
+
+class ExtensionReq(univ.SequenceOf):
+ pass
+
+
+ExtensionReq.componentType = rfc5280.Extension()
+ExtensionReq.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_kp_cmcArchive = _buildOid(rfc5280.id_kp, 28)
+
+id_cmc_publishCert = _buildOid(id_cmc, 30)
+
+id_cmc_dataReturn = _buildOid(id_cmc, 4)
+
+
+class LraPopWitness(univ.Sequence):
+ pass
+
+
+LraPopWitness.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataBodyid', BodyPartID()),
+ namedtype.NamedType('bodyIds', univ.SequenceOf(componentType=BodyPartID()))
+)
+
+id_aa = _buildOid(1, 2, 840, 113549, 1, 9, 16, 2)
+
+id_aa_cmc_unsignedData = _buildOid(id_aa, 34)
+
+id_cmc_getCert = _buildOid(id_cmc, 15)
+
+id_cmc_batchRequests = _buildOid(id_cmc, 28)
+
+id_cmc_decryptedPOP = _buildOid(id_cmc, 10)
+
+id_cmc_responseInfo = _buildOid(id_cmc, 19)
+
+id_cmc_changeSubjectName = _buildOid(id_cmc, 36)
+
+
+class GetCert(univ.Sequence):
+ pass
+
+
+GetCert.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+)
+
+id_cmc_identification = _buildOid(id_cmc, 2)
+
+id_cmc_queryPending = _buildOid(id_cmc, 21)
+
+
+class AddExtensions(univ.Sequence):
+ pass
+
+
+AddExtensions.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartID()),
+ namedtype.NamedType('certReferences', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.NamedType('extensions', univ.SequenceOf(componentType=rfc5280.Extension()))
+)
+
+
+class EncryptedPOP(univ.Sequence):
+ pass
+
+
+EncryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request', TaggedRequest()),
+ namedtype.NamedType('cms', rfc5652.ContentInfo()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witnessAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_getCRL = _buildOid(id_cmc, 16)
+
+id_cct_PKIResponse = _buildOid(id_cct, 3)
+
+id_cmc_controlProcessed = _buildOid(id_cmc, 32)
+
+
+class NoSignatureValue(univ.OctetString):
+ pass
+
+
+id_ad_cmc = _buildOid(rfc5280.id_ad, 12)
+
+id_alg_noSignature = _buildOid(id_pkix, 6, 2)
+
+
+# Map of CMC Control OIDs to CMC Control Attributes
+
+_cmcControlAttributesMapUpdate = {
+ id_cmc_statusInfo: CMCStatusInfo(),
+ id_cmc_statusInfoV2: CMCStatusInfoV2(),
+ id_cmc_identification: char.UTF8String(),
+ id_cmc_identityProof: univ.OctetString(),
+ id_cmc_identityProofV2: IdentifyProofV2(),
+ id_cmc_dataReturn: univ.OctetString(),
+ id_cmc_transactionId: univ.Integer(),
+ id_cmc_senderNonce: univ.OctetString(),
+ id_cmc_recipientNonce: univ.OctetString(),
+ id_cmc_addExtensions: AddExtensions(),
+ id_cmc_encryptedPOP: EncryptedPOP(),
+ id_cmc_decryptedPOP: DecryptedPOP(),
+ id_cmc_lraPOPWitness: LraPopWitness(),
+ id_cmc_getCert: GetCert(),
+ id_cmc_getCRL: GetCRL(),
+ id_cmc_revokeRequest: RevokeRequest(),
+ id_cmc_regInfo: univ.OctetString(),
+ id_cmc_responseInfo: univ.OctetString(),
+ id_cmc_queryPending: univ.OctetString(),
+ id_cmc_popLinkRandom: univ.OctetString(),
+ id_cmc_popLinkWitness: univ.OctetString(),
+ id_cmc_popLinkWitnessV2: PopLinkWitnessV2(),
+ id_cmc_confirmCertAcceptance: CMCCertId(),
+ id_cmc_trustedAnchors: PublishTrustAnchors(),
+ id_cmc_authData: AuthPublish(),
+ id_cmc_batchRequests: BodyPartList(),
+ id_cmc_batchResponses: BodyPartList(),
+ id_cmc_publishCert: CMCPublicationInfo(),
+ id_cmc_modCertTemplate: ModCertTemplate(),
+ id_cmc_controlProcessed: ControlsProcessed(),
+ id_ExtensionReq: ExtensionReq(),
+}
+
+cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate)
+
+
+# Map of CMC Content Type OIDs to CMC Content Types are added to
+# the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_cct_PKIData: PKIData(),
+ id_cct_PKIResponse: PKIResponse(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6482.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6482.py
new file mode 100644
index 0000000000..d213a46f8d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6482.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RPKI Route Origin Authorizations (ROAs)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6482.txt
+# https://www.rfc-editor.org/errata/eid5881
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_ct_routeOriginAuthz = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.24')
+
+
+class ASID(univ.Integer):
+ pass
+
+
+class IPAddress(univ.BitString):
+ pass
+
+
+class ROAIPAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('address', IPAddress()),
+ namedtype.OptionalNamedType('maxLength', univ.Integer())
+ )
+
+
+class ROAIPAddressFamily(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressFamily',
+ univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
+ namedtype.NamedType('addresses',
+ univ.SequenceOf(componentType=ROAIPAddress()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class RouteOriginAttestation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.NamedType('asID', ASID()),
+ namedtype.NamedType('ipAddrBlocks',
+ univ.SequenceOf(componentType=ROAIPAddressFamily()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_routeOriginAuthz: RouteOriginAttestation(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6486.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6486.py
new file mode 100644
index 0000000000..31c936a4f2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6486.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RPKI Manifests
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6486.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import useful
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16')
+
+id_ct = id_smime + (1, )
+
+id_ct_rpkiManifest = id_ct + (26, )
+
+
+class FileAndHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('file', char.IA5String()),
+ namedtype.NamedType('hash', univ.BitString())
+ )
+
+
+class Manifest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.NamedType('manifestNumber',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('thisUpdate',
+ useful.GeneralizedTime()),
+ namedtype.NamedType('nextUpdate',
+ useful.GeneralizedTime()),
+ namedtype.NamedType('fileHashAlg',
+ univ.ObjectIdentifier()),
+ namedtype.NamedType('fileList',
+ univ.SequenceOf(componentType=FileAndHash()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, MAX)))
+ )
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_rpkiManifest: Manifest(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6487.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6487.py
new file mode 100644
index 0000000000..d8c2f87423
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6487.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Profile for X.509 PKIX Resource Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6487.txt
+#
+
+from pyasn1.type import univ
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_ad = id_pkix + (48, )
+
+id_ad_rpkiManifest = id_ad + (10, )
+id_ad_signedObject = id_ad + (11, )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6664.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6664.py
new file mode 100644
index 0000000000..41629d8d7f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6664.py
@@ -0,0 +1,147 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# S/MIME Capabilities for Public Key Definitions
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6664.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc3279
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 3279
+
+dhpublicnumber = rfc3279.dhpublicnumber
+
+Dss_Parms = rfc3279.Dss_Parms
+
+id_dsa = rfc3279.id_dsa
+
+id_ecPublicKey = rfc3279.id_ecPublicKey
+
+rsaEncryption = rfc3279.rsaEncryption
+
+
+# Imports from RFC 4055
+
+id_mgf1 = rfc4055.id_mgf1
+
+id_RSAES_OAEP = rfc4055.id_RSAES_OAEP
+
+id_RSASSA_PSS = rfc4055.id_RSASSA_PSS
+
+
+# Imports from RFC 5480
+
+ECParameters = rfc5480.ECParameters
+
+id_ecDH = rfc5480.id_ecDH
+
+id_ecMQV = rfc5480.id_ecMQV
+
+
+# RSA
+
+class RSAKeySize(univ.Integer):
+ # suggested values are 1024, 2048, 3072, 4096, 7680, 8192, and 15360;
+ # however, the integer value is not limited to these suggestions
+ pass
+
+
+class RSAKeyCapabilities(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('minKeySize', RSAKeySize()),
+ namedtype.OptionalNamedType('maxKeySize', RSAKeySize())
+ )
+
+
+class RsaSsa_Pss_sig_caps(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', AlgorithmIdentifier()),
+ namedtype.OptionalNamedType('maskAlg', AlgorithmIdentifier()),
+ namedtype.DefaultedNamedType('trailerField', univ.Integer().subtype(value=1))
+ )
+
+
+# Diffie-Hellman and DSA
+
+class DSAKeySize(univ.Integer):
+ subtypeSpec = constraint.SingleValueConstraint(1024, 2048, 3072, 7680, 15360)
+
+
+class DSAKeyCapabilities(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keySizes', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('minKeySize',
+ DSAKeySize()),
+ namedtype.OptionalNamedType('maxKeySize',
+ DSAKeySize()),
+ namedtype.OptionalNamedType('maxSizeP',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('maxSizeQ',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('maxSizeG',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyParams',
+ Dss_Parms().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+# Elliptic Curve
+
+class EC_SMimeCaps(univ.SequenceOf):
+ componentType = ECParameters()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Update the SMIMECapabilities Attribute Map in rfc5751.py
+#
+# The map can either include an entry for scap-sa-rsaSSA-PSS or
+# scap-pk-rsaSSA-PSS, but not both. One is associated with the
+# public key and the other is associated with the signature
+# algorithm; however, they use the same OID. If you need the
+# other one in your application, copy the map into a local dict,
+# adjust as needed, and pass the local dict to the decoder with
+# openTypes=your_local_map.
+
+_smimeCapabilityMapUpdate = {
+ rsaEncryption: RSAKeyCapabilities(),
+ id_RSASSA_PSS: RSAKeyCapabilities(),
+ # id_RSASSA_PSS: RsaSsa_Pss_sig_caps(),
+ id_RSAES_OAEP: RSAKeyCapabilities(),
+ id_dsa: DSAKeyCapabilities(),
+ dhpublicnumber: DSAKeyCapabilities(),
+ id_ecPublicKey: EC_SMimeCaps(),
+ id_ecDH: EC_SMimeCaps(),
+ id_ecMQV: EC_SMimeCaps(),
+ id_mgf1: AlgorithmIdentifier(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6955.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6955.py
new file mode 100644
index 0000000000..09f2d6562e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6955.py
@@ -0,0 +1,108 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Proof-of-Possession Algorithms
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6955.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+MessageDigest = rfc5652.MessageDigest
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+
+# Imports from RFC 5280
+
+id_pkix = rfc5280.id_pkix
+
+
+# Imports from RFC 3279
+
+Dss_Sig_Value = rfc3279.Dss_Sig_Value
+
+DomainParameters = rfc3279.DomainParameters
+
+
+# Static DH Proof-of-Possession
+
+class DhSigStatic(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerAndSerial', IssuerAndSerialNumber()),
+ namedtype.NamedType('hashValue', MessageDigest())
+ )
+
+
+# Object Identifiers
+
+id_dh_sig_hmac_sha1 = id_pkix + (6, 3, )
+
+id_dhPop_static_sha1_hmac_sha1 = univ.ObjectIdentifier(id_dh_sig_hmac_sha1)
+
+
+id_alg_dh_pop = id_pkix + (6, 4, )
+
+id_alg_dhPop_sha1 = univ.ObjectIdentifier(id_alg_dh_pop)
+
+id_alg_dhPop_sha224 = id_pkix + (6, 5, )
+
+id_alg_dhPop_sha256 = id_pkix + (6, 6, )
+
+id_alg_dhPop_sha384 = id_pkix + (6, 7, )
+
+id_alg_dhPop_sha512 = id_pkix + (6, 8, )
+
+
+id_alg_dhPop_static_sha224_hmac_sha224 = id_pkix + (6, 15, )
+
+id_alg_dhPop_static_sha256_hmac_sha256 = id_pkix + (6, 16, )
+
+id_alg_dhPop_static_sha384_hmac_sha384 = id_pkix + (6, 17, )
+
+id_alg_dhPop_static_sha512_hmac_sha512 = id_pkix + (6, 18, )
+
+
+id_alg_ecdhPop_static_sha224_hmac_sha224 = id_pkix + (6, 25, )
+
+id_alg_ecdhPop_static_sha256_hmac_sha256 = id_pkix + (6, 26, )
+
+id_alg_ecdhPop_static_sha384_hmac_sha384 = id_pkix + (6, 27, )
+
+id_alg_ecdhPop_static_sha512_hmac_sha512 = id_pkix + (6, 28, )
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_dh_pop: DomainParameters(),
+ id_alg_dhPop_sha224: DomainParameters(),
+ id_alg_dhPop_sha256: DomainParameters(),
+ id_alg_dhPop_sha384: DomainParameters(),
+ id_alg_dhPop_sha512: DomainParameters(),
+ id_dh_sig_hmac_sha1: univ.Null(""),
+ id_alg_dhPop_static_sha224_hmac_sha224: univ.Null(""),
+ id_alg_dhPop_static_sha256_hmac_sha256: univ.Null(""),
+ id_alg_dhPop_static_sha384_hmac_sha384: univ.Null(""),
+ id_alg_dhPop_static_sha512_hmac_sha512: univ.Null(""),
+ id_alg_ecdhPop_static_sha224_hmac_sha224: univ.Null(""),
+ id_alg_ecdhPop_static_sha256_hmac_sha256: univ.Null(""),
+ id_alg_ecdhPop_static_sha384_hmac_sha384: univ.Null(""),
+ id_alg_ecdhPop_static_sha512_hmac_sha512: univ.Null(""),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6960.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6960.py
new file mode 100644
index 0000000000..e5f1305649
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc6960.py
@@ -0,0 +1,223 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Online Certificate Status Protocol (OCSP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6960.txt
+#
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax
+Certificate = rfc5280.Certificate
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+CRLReason = rfc5280.CRLReason
+Extensions = rfc5280.Extensions
+GeneralName = rfc5280.GeneralName
+Name = rfc5280.Name
+
+id_kp = rfc5280.id_kp
+
+id_ad_ocsp = rfc5280.id_ad_ocsp
+
+
+# Imports from the original OCSP module in RFC 2560
+
+AcceptableResponses = rfc2560.AcceptableResponses
+ArchiveCutoff = rfc2560.ArchiveCutoff
+CertStatus = rfc2560.CertStatus
+KeyHash = rfc2560.KeyHash
+OCSPResponse = rfc2560.OCSPResponse
+OCSPResponseStatus = rfc2560.OCSPResponseStatus
+ResponseBytes = rfc2560.ResponseBytes
+RevokedInfo = rfc2560.RevokedInfo
+UnknownInfo = rfc2560.UnknownInfo
+Version = rfc2560.Version
+
+id_kp_OCSPSigning = rfc2560.id_kp_OCSPSigning
+
+id_pkix_ocsp = rfc2560.id_pkix_ocsp
+id_pkix_ocsp_archive_cutoff = rfc2560.id_pkix_ocsp_archive_cutoff
+id_pkix_ocsp_basic = rfc2560.id_pkix_ocsp_basic
+id_pkix_ocsp_crl = rfc2560.id_pkix_ocsp_crl
+id_pkix_ocsp_nocheck = rfc2560.id_pkix_ocsp_nocheck
+id_pkix_ocsp_nonce = rfc2560.id_pkix_ocsp_nonce
+id_pkix_ocsp_response = rfc2560.id_pkix_ocsp_response
+id_pkix_ocsp_service_locator = rfc2560.id_pkix_ocsp_service_locator
+
+
+# Additional object identifiers
+
+id_pkix_ocsp_pref_sig_algs = id_pkix_ocsp + (8, )
+id_pkix_ocsp_extended_revoke = id_pkix_ocsp + (9, )
+
+
+# Updated structures (mostly to improve openTypes support)
+
+class CertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('issuerNameHash', univ.OctetString()),
+ namedtype.NamedType('issuerKeyHash', univ.OctetString()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+class SingleResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certID', CertID()),
+ namedtype.NamedType('certStatus', CertStatus()),
+ namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('singleExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ResponderID(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('byName', Name().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('byKey', KeyHash().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class ResponseData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('responderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('responses', univ.SequenceOf(
+ componentType=SingleResponse())),
+ namedtype.OptionalNamedType('responseExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BasicOCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsResponseData', ResponseData()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(
+ componentType=Certificate()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Request(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('reqCert', CertID()),
+ namedtype.OptionalNamedType('singleRequestExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Signature(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(
+ componentType=Certificate()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class TBSRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('requestList', univ.SequenceOf(
+ componentType=Request())),
+ namedtype.OptionalNamedType('requestExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class OCSPRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsRequest', TBSRequest()),
+ namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+# Previously omitted structure
+
+class ServiceLocator(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('locator', AuthorityInfoAccessSyntax())
+ )
+
+
+# Additional structures
+
+class CrlID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('crlUrl', char.IA5String().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crlNum', univ.Integer().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('crlTime', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class PreferredSignatureAlgorithm(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sigIdentifier', AlgorithmIdentifier()),
+ namedtype.OptionalNamedType('certIdentifier', AlgorithmIdentifier())
+ )
+
+
+class PreferredSignatureAlgorithms(univ.SequenceOf):
+ componentType = PreferredSignatureAlgorithm()
+
+
+
+# Response Type OID to Response Map
+
+ocspResponseMap = {
+ id_pkix_ocsp_basic: BasicOCSPResponse(),
+}
+
+
+# Map of Extension OIDs to Extensions added to the ones
+# that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ # Certificate Extension
+ id_pkix_ocsp_nocheck: univ.Null(""),
+ # OCSP Request Extensions
+ id_pkix_ocsp_nonce: univ.OctetString(),
+ id_pkix_ocsp_response: AcceptableResponses(),
+ id_pkix_ocsp_service_locator: ServiceLocator(),
+ id_pkix_ocsp_pref_sig_algs: PreferredSignatureAlgorithms(),
+ # OCSP Response Extensions
+ id_pkix_ocsp_crl: CrlID(),
+ id_pkix_ocsp_archive_cutoff: ArchiveCutoff(),
+ id_pkix_ocsp_extended_revoke: univ.Null(""),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7030.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7030.py
new file mode 100644
index 0000000000..84b6dc5f9a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7030.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Enrollment over Secure Transport (EST)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7030.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# Imports from RFC 5652
+
+Attribute = rfc5652.Attribute
+
+
+# Asymmetric Decrypt Key Identifier Attribute
+
+id_aa_asymmDecryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.54')
+
+class AsymmetricDecryptKeyIdentifier(univ.OctetString):
+ pass
+
+
+aa_asymmDecryptKeyID = Attribute()
+aa_asymmDecryptKeyID['attrType'] = id_aa_asymmDecryptKeyID
+aa_asymmDecryptKeyID['attrValues'][0] = AsymmetricDecryptKeyIdentifier()
+
+
+# CSR Attributes
+
+class AttrOrOID(univ.Choice):
+ pass
+
+AttrOrOID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('attribute', Attribute())
+)
+
+
+class CsrAttrs(univ.SequenceOf):
+ pass
+
+CsrAttrs.componentType = AttrOrOID()
+CsrAttrs.subtypeSpec=constraint.ValueSizeConstraint(0, MAX)
+
+
+# Update CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ id_aa_asymmDecryptKeyID: AsymmetricDecryptKeyIdentifier(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7191.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7191.py
new file mode 100644
index 0000000000..7c2be11562
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7191.py
@@ -0,0 +1,261 @@
+# This file is being contributed to of pyasn1-modules software.
+#
+# Created by Russ Housley without assistance from the asn1ate tool.
+# Modified by Russ Housley to add support for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Key Package Receipt and Error Content Types
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7191.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+DistinguishedName = rfc5280.DistinguishedName
+
+
+# SingleAttribute is the same as Attribute in RFC 5652, except that the
+# attrValues SET must have one and only one member
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+AttributeValues.sizeSpec = univ.Set.sizeSpec + constraint.ValueSizeConstraint(1, 1)
+
+
+class SingleAttribute(univ.Sequence):
+ pass
+
+SingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', AttributeValues(),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# SIR Entity Name
+
+class SIREntityNameType(univ.ObjectIdentifier):
+ pass
+
+
+class SIREntityNameValue(univ.Any):
+ pass
+
+
+class SIREntityName(univ.Sequence):
+ pass
+
+SIREntityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sirenType', SIREntityNameType()),
+ namedtype.NamedType('sirenValue', univ.OctetString())
+ # CONTAINING the DER-encoded SIREntityNameValue
+)
+
+
+class SIREntityNames(univ.SequenceOf):
+ pass
+
+SIREntityNames.componentType = SIREntityName()
+SIREntityNames.sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+id_dn = univ.ObjectIdentifier('2.16.840.1.101.2.1.16.0')
+
+
+class siren_dn(SIREntityName):
+ def __init__(self):
+ SIREntityName.__init__(self)
+ self['sirenType'] = id_dn
+
+
+# Key Package Error CMS Content Type
+
+class EnumeratedErrorCode(univ.Enumerated):
+ pass
+
+# Error codes with values <= 33 are aligned with RFC 5934
+EnumeratedErrorCode.namedValues = namedval.NamedValues(
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('unsupportedParameters', 15),
+ ('signatureFailure', 16),
+ ('insufficientMemory', 17),
+ ('incorrectTarget', 23),
+ ('missingSignature', 29),
+ ('resourcesBusy', 30),
+ ('versionNumberMismatch', 31),
+ ('revokedCertificate', 33),
+ ('ambiguousDecrypt', 60),
+ ('noDecryptKey', 61),
+ ('badEncryptedData', 62),
+ ('badEnvelopedData', 63),
+ ('badAuthenticatedData', 64),
+ ('badAuthEnvelopedData', 65),
+ ('badKeyAgreeRecipientInfo', 66),
+ ('badKEKRecipientInfo', 67),
+ ('badEncryptContent', 68),
+ ('badEncryptAlgorithm', 69),
+ ('missingCiphertext', 70),
+ ('decryptFailure', 71),
+ ('badMACAlgorithm', 72),
+ ('badAuthAttrs', 73),
+ ('badUnauthAttrs', 74),
+ ('invalidMAC', 75),
+ ('mismatchedDigestAlg', 76),
+ ('missingCertificate', 77),
+ ('tooManySigners', 78),
+ ('missingSignedAttributes', 79),
+ ('derEncodingNotUsed', 80),
+ ('missingContentHints', 81),
+ ('invalidAttributeLocation', 82),
+ ('badMessageDigest', 83),
+ ('badKeyPackage', 84),
+ ('badAttributes', 85),
+ ('attributeComparisonFailure', 86),
+ ('unsupportedSymmetricKeyPackage', 87),
+ ('unsupportedAsymmetricKeyPackage', 88),
+ ('constraintViolation', 89),
+ ('ambiguousDefaultValue', 90),
+ ('noMatchingRecipientInfo', 91),
+ ('unsupportedKeyWrapAlgorithm', 92),
+ ('badKeyTransRecipientInfo', 93),
+ ('other', 127)
+)
+
+
+class ErrorCodeChoice(univ.Choice):
+ pass
+
+ErrorCodeChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enum', EnumeratedErrorCode()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier())
+)
+
+
+class KeyPkgID(univ.OctetString):
+ pass
+
+
+class KeyPkgIdentifier(univ.Choice):
+ pass
+
+KeyPkgIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkgID', KeyPkgID()),
+ namedtype.NamedType('attribute', SingleAttribute())
+)
+
+
+class KeyPkgVersion(univ.Integer):
+ pass
+
+
+KeyPkgVersion.namedValues = namedval.NamedValues(
+ ('v1', 1),
+ ('v2', 2)
+)
+
+KeyPkgVersion.subtypeSpec = constraint.ValueRangeConstraint(1, 65535)
+
+
+id_ct_KP_keyPackageError = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.6')
+
+class KeyPackageError(univ.Sequence):
+ pass
+
+KeyPackageError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')),
+ namedtype.OptionalNamedType('errorOf', KeyPkgIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('errorBy', SIREntityName()),
+ namedtype.NamedType('errorCode', ErrorCodeChoice())
+)
+
+
+# Key Package Receipt CMS Content Type
+
+id_ct_KP_keyPackageReceipt = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.3')
+
+class KeyPackageReceipt(univ.Sequence):
+ pass
+
+KeyPackageReceipt.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')),
+ namedtype.NamedType('receiptOf', KeyPkgIdentifier()),
+ namedtype.NamedType('receivedBy', SIREntityName())
+)
+
+
+# Key Package Receipt Request Attribute
+
+class KeyPkgReceiptReq(univ.Sequence):
+ pass
+
+KeyPkgReceiptReq.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('encryptReceipt', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('receiptsFrom', SIREntityNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receiptsTo', SIREntityNames())
+)
+
+
+id_aa_KP_keyPkgIdAndReceiptReq = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.65')
+
+class KeyPkgIdentifierAndReceiptReq(univ.Sequence):
+ pass
+
+KeyPkgIdentifierAndReceiptReq.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkgID', KeyPkgID()),
+ namedtype.OptionalNamedType('receiptReq', KeyPkgReceiptReq())
+)
+
+
+# Map of Attribute Type OIDs to Attributes are added to
+# the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of CMC Content Type OIDs to CMC Content Types are added to
+# the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_keyPackageError: KeyPackageError(),
+ id_ct_KP_keyPackageReceipt: KeyPackageReceipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7229.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7229.py
new file mode 100644
index 0000000000..e9bce2d5b6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7229.py
@@ -0,0 +1,29 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Object Identifiers for Test Certificate Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7229.txt
+#
+
+from pyasn1.type import univ
+
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_TEST = id_pkix + (13, )
+
+id_TEST_certPolicyOne = id_TEST + (1, )
+id_TEST_certPolicyTwo = id_TEST + (2, )
+id_TEST_certPolicyThree = id_TEST + (3, )
+id_TEST_certPolicyFour = id_TEST + (4, )
+id_TEST_certPolicyFive = id_TEST + (5, )
+id_TEST_certPolicySix = id_TEST + (6, )
+id_TEST_certPolicySeven = id_TEST + (7, )
+id_TEST_certPolicyEight = id_TEST + (8, )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7292.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7292.py
new file mode 100644
index 0000000000..1c9f319a5d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7292.py
@@ -0,0 +1,357 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #12: Personal Information Exchange Syntax v1.1
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7292.txt
+# https://www.rfc-editor.org/errata_search.php?rfc=7292
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2315
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5958
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Initialize the maps used in PKCS#12
+
+pkcs12BagTypeMap = { }
+
+pkcs12CertBagMap = { }
+
+pkcs12CRLBagMap = { }
+
+pkcs12SecretBagMap = { }
+
+
+# Imports from RFC 2315, RFC 5652, and RFC 5958
+
+DigestInfo = rfc2315.DigestInfo
+
+
+ContentInfo = rfc5652.ContentInfo
+
+PKCS12Attribute = rfc5652.Attribute
+
+
+EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo
+
+PrivateKeyInfo = rfc5958.PrivateKeyInfo
+
+
+# CMSSingleAttribute is the same as Attribute in RFC 5652 except the attrValues
+# SET must have one and only one member
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+
+
+class CMSSingleAttribute(univ.Sequence):
+ pass
+
+CMSSingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# Object identifier arcs
+
+rsadsi = _OID(1, 2, 840, 113549)
+
+pkcs = _OID(rsadsi, 1)
+
+pkcs_9 = _OID(pkcs, 9)
+
+certTypes = _OID(pkcs_9, 22)
+
+crlTypes = _OID(pkcs_9, 23)
+
+pkcs_12 = _OID(pkcs, 12)
+
+
+# PBE Algorithm Identifiers and Parameters Structure
+
+pkcs_12PbeIds = _OID(pkcs_12, 1)
+
+pbeWithSHAAnd128BitRC4 = _OID(pkcs_12PbeIds, 1)
+
+pbeWithSHAAnd40BitRC4 = _OID(pkcs_12PbeIds, 2)
+
+pbeWithSHAAnd3_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 3)
+
+pbeWithSHAAnd2_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 4)
+
+pbeWithSHAAnd128BitRC2_CBC = _OID(pkcs_12PbeIds, 5)
+
+pbeWithSHAAnd40BitRC2_CBC = _OID(pkcs_12PbeIds, 6)
+
+
+class Pkcs_12PbeParams(univ.Sequence):
+ pass
+
+Pkcs_12PbeParams.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('iterations', univ.Integer())
+)
+
+
+# Bag types
+
+bagtypes = _OID(pkcs_12, 10, 1)
+
+class BAG_TYPE(univ.Sequence):
+ pass
+
+BAG_TYPE.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.ObjectIdentifier()),
+ namedtype.NamedType('unnamed1', univ.Any(),
+ openType=opentype.OpenType('attrType', pkcs12BagTypeMap)
+ )
+)
+
+
+id_keyBag = _OID(bagtypes, 1)
+
+class KeyBag(PrivateKeyInfo):
+ pass
+
+
+id_pkcs8ShroudedKeyBag = _OID(bagtypes, 2)
+
+class PKCS8ShroudedKeyBag(EncryptedPrivateKeyInfo):
+ pass
+
+
+id_certBag = _OID(bagtypes, 3)
+
+class CertBag(univ.Sequence):
+ pass
+
+CertBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certId', univ.ObjectIdentifier()),
+ namedtype.NamedType('certValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('certId', pkcs12CertBagMap)
+ )
+)
+
+
+x509Certificate = CertBag()
+x509Certificate['certId'] = _OID(certTypes, 1)
+x509Certificate['certValue'] = univ.OctetString()
+# DER-encoded X.509 certificate stored in OCTET STRING
+
+
+sdsiCertificate = CertBag()
+sdsiCertificate['certId'] = _OID(certTypes, 2)
+sdsiCertificate['certValue'] = char.IA5String()
+# Base64-encoded SDSI certificate stored in IA5String
+
+
+id_CRLBag = _OID(bagtypes, 4)
+
+class CRLBag(univ.Sequence):
+ pass
+
+CRLBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crlId', univ.ObjectIdentifier()),
+ namedtype.NamedType('crlValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('crlId', pkcs12CRLBagMap)
+ )
+)
+
+
+x509CRL = CRLBag()
+x509CRL['crlId'] = _OID(crlTypes, 1)
+x509CRL['crlValue'] = univ.OctetString()
+# DER-encoded X.509 CRL stored in OCTET STRING
+
+
+id_secretBag = _OID(bagtypes, 5)
+
+class SecretBag(univ.Sequence):
+ pass
+
+SecretBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('secretTypeId', univ.ObjectIdentifier()),
+ namedtype.NamedType('secretValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('secretTypeId', pkcs12SecretBagMap)
+ )
+)
+
+
+id_safeContentsBag = _OID(bagtypes, 6)
+
+class SafeBag(univ.Sequence):
+ pass
+
+SafeBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bagId', univ.ObjectIdentifier()),
+ namedtype.NamedType('bagValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('bagId', pkcs12BagTypeMap)
+ ),
+ namedtype.OptionalNamedType('bagAttributes',
+ univ.SetOf(componentType=PKCS12Attribute())
+ )
+)
+
+
+class SafeContents(univ.SequenceOf):
+ pass
+
+SafeContents.componentType = SafeBag()
+
+
+# The PFX PDU
+
+class AuthenticatedSafe(univ.SequenceOf):
+ pass
+
+AuthenticatedSafe.componentType = ContentInfo()
+# Data if unencrypted
+# EncryptedData if password-encrypted
+# EnvelopedData if public key-encrypted
+
+
+class MacData(univ.Sequence):
+ pass
+
+MacData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mac', DigestInfo()),
+ namedtype.NamedType('macSalt', univ.OctetString()),
+ namedtype.DefaultedNamedType('iterations', univ.Integer().subtype(value=1))
+ # Note: The default is for historical reasons and its use is deprecated
+)
+
+
+class PFX(univ.Sequence):
+ pass
+
+PFX.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ univ.Integer(namedValues=namedval.NamedValues(('v3', 3)))
+ ),
+ namedtype.NamedType('authSafe', ContentInfo()),
+ namedtype.OptionalNamedType('macData', MacData())
+)
+
+
+# Local key identifier (also defined as certificateAttribute in rfc2985.py)
+
+pkcs_9_at_localKeyId = _OID(pkcs_9, 21)
+
+localKeyId = CMSSingleAttribute()
+localKeyId['attrType'] = pkcs_9_at_localKeyId
+localKeyId['attrValues'][0] = univ.OctetString()
+
+
+# Friendly name (also defined as certificateAttribute in rfc2985.py)
+
+pkcs_9_ub_pkcs9String = univ.Integer(255)
+
+pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_at_friendlyName = _OID(pkcs_9, 20)
+
+class FriendlyName(char.BMPString):
+ pass
+
+FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName)
+
+
+friendlyName = CMSSingleAttribute()
+friendlyName['attrType'] = pkcs_9_at_friendlyName
+friendlyName['attrValues'][0] = FriendlyName()
+
+
+# Update the PKCS#12 maps
+
+_pkcs12BagTypeMap = {
+ id_keyBag: KeyBag(),
+ id_pkcs8ShroudedKeyBag: PKCS8ShroudedKeyBag(),
+ id_certBag: CertBag(),
+ id_CRLBag: CRLBag(),
+ id_secretBag: SecretBag(),
+ id_safeContentsBag: SafeBag(),
+}
+
+pkcs12BagTypeMap.update(_pkcs12BagTypeMap)
+
+
+_pkcs12CertBagMap = {
+ _OID(certTypes, 1): univ.OctetString(),
+ _OID(certTypes, 2): char.IA5String(),
+}
+
+pkcs12CertBagMap.update(_pkcs12CertBagMap)
+
+
+_pkcs12CRLBagMap = {
+ _OID(crlTypes, 1): univ.OctetString(),
+}
+
+pkcs12CRLBagMap.update(_pkcs12CRLBagMap)
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ pbeWithSHAAnd128BitRC4: Pkcs_12PbeParams(),
+ pbeWithSHAAnd40BitRC4: Pkcs_12PbeParams(),
+ pbeWithSHAAnd3_KeyTripleDES_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd2_KeyTripleDES_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd128BitRC2_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd40BitRC2_CBC: Pkcs_12PbeParams(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the CMS Attribute map
+
+_cmsAttributesMapUpdate = {
+ pkcs_9_at_friendlyName: FriendlyName(),
+ pkcs_9_at_localKeyId: univ.OctetString(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7296.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7296.py
new file mode 100644
index 0000000000..95a191a14d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7296.py
@@ -0,0 +1,32 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# IKEv2 Certificate Bundle
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7296.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class CertificateOrCRL(univ.Choice):
+ pass
+
+CertificateOrCRL.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cert', rfc5280.Certificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('crl', rfc5280.CertificateList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class CertificateBundle(univ.SequenceOf):
+ pass
+
+CertificateBundle.componentType = CertificateOrCRL()
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7508.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7508.py
new file mode 100644
index 0000000000..66460240f1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7508.py
@@ -0,0 +1,90 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Securing Header Fields with S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7508.txt
+# https://www.rfc-editor.org/errata/eid5875
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+import string
+
+MAX = float('inf')
+
+
+class Algorithm(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('canonAlgorithmSimple', 0),
+ ('canonAlgorithmRelaxed', 1)
+ )
+
+
+class HeaderFieldStatus(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('duplicated', 0),
+ ('deleted', 1),
+ ('modified', 2)
+ )
+
+
+class HeaderFieldName(char.VisibleString):
+ subtypeSpec = (
+ constraint.PermittedAlphabetConstraint(*string.printable) -
+ constraint.PermittedAlphabetConstraint(':')
+ )
+
+
+class HeaderFieldValue(char.UTF8String):
+ pass
+
+
+class HeaderField(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('field-Name', HeaderFieldName()),
+ namedtype.NamedType('field-Value', HeaderFieldValue()),
+ namedtype.DefaultedNamedType('field-Status',
+ HeaderFieldStatus().subtype(value='duplicated'))
+ )
+
+
+class HeaderFields(univ.SequenceOf):
+ componentType = HeaderField()
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SecureHeaderFields(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('canonAlgorithm', Algorithm()),
+ namedtype.NamedType('secHeaderFields', HeaderFields())
+ )
+
+
+id_aa = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 2, ))
+
+id_aa_secureHeaderFieldsIdentifier = id_aa + (55, )
+
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_secureHeaderFieldsIdentifier: SecureHeaderFields(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7585.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7585.py
new file mode 100644
index 0000000000..b3fd4a5bac
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7585.py
@@ -0,0 +1,50 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Network Access Identifier (NAI) Realm Name for Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7585.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# NAI Realm Name for Certificates
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_on = id_pkix + (8, )
+
+id_on_naiRealm = id_on + (8, )
+
+
+ub_naiRealm_length = univ.Integer(255)
+
+
+class NAIRealm(char.UTF8String):
+ subtypeSpec = constraint.ValueSizeConstraint(1, ub_naiRealm_length)
+
+
+naiRealm = rfc5280.AnotherName()
+naiRealm['type-id'] = id_on_naiRealm
+naiRealm['value'] = NAIRealm()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_naiRealm: NAIRealm(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7633.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7633.py
new file mode 100644
index 0000000000..f518440ff4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7633.py
@@ -0,0 +1,38 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Transport Layer Security (TLS) Feature Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7633.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# TLS Features Extension
+
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+
+id_pe_tlsfeature = id_pe + (24, )
+
+
+class Features(univ.SequenceOf):
+ componentType = univ.Integer()
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_tlsfeature: Features(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7773.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7773.py
new file mode 100644
index 0000000000..0fee2aa346
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7773.py
@@ -0,0 +1,52 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authentication Context Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7773.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Authentication Context Extension
+
+e_legnamnden = univ.ObjectIdentifier('1.2.752.201')
+
+id_eleg_ce = e_legnamnden + (5, )
+
+id_ce_authContext = id_eleg_ce + (1, )
+
+
+class AuthenticationContext(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contextType', char.UTF8String()),
+ namedtype.OptionalNamedType('contextInfo', char.UTF8String())
+ )
+
+class AuthenticationContexts(univ.SequenceOf):
+ componentType = AuthenticationContext()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_ce_authContext: AuthenticationContexts(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7894.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7894.py
new file mode 100644
index 0000000000..41936433d1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7894.py
@@ -0,0 +1,92 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Alternative Challenge Password Attributes for EST
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7894.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc7191
+
+
+# SingleAttribute is the same as Attribute in RFC 5652, except that the
+# attrValues SET must have one and only one member
+
+Attribute = rfc7191.SingleAttribute
+
+
+# DirectoryString is the same as RFC 5280, except the length is limited to 255
+
+class DirectoryString(univ.Choice):
+ pass
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
+)
+
+
+# OTP Challenge Attribute
+
+id_aa_otpChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.56')
+
+ub_aa_otpChallenge = univ.Integer(255)
+
+otpChallenge = Attribute()
+otpChallenge['attrType'] = id_aa_otpChallenge
+otpChallenge['attrValues'][0] = DirectoryString()
+
+
+# Revocation Challenge Attribute
+
+id_aa_revocationChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.57')
+
+ub_aa_revocationChallenge = univ.Integer(255)
+
+revocationChallenge = Attribute()
+revocationChallenge['attrType'] = id_aa_revocationChallenge
+revocationChallenge['attrValues'][0] = DirectoryString()
+
+
+# EST Identity Linking Attribute
+
+id_aa_estIdentityLinking = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.58')
+
+ub_aa_est_identity_linking = univ.Integer(255)
+
+estIdentityLinking = Attribute()
+estIdentityLinking['attrType'] = id_aa_estIdentityLinking
+estIdentityLinking['attrValues'][0] = DirectoryString()
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc6402.py
+
+_cmcControlAttributesMapUpdate = {
+ id_aa_otpChallenge: DirectoryString(),
+ id_aa_revocationChallenge: DirectoryString(),
+ id_aa_estIdentityLinking: DirectoryString(),
+}
+
+rfc6402.cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7906.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7906.py
new file mode 100644
index 0000000000..fa5f6b0733
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7906.py
@@ -0,0 +1,736 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# NSA's CMS Key Management Attributes
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7906.txt
+# https://www.rfc-editor.org/errata/eid5850
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4108
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6010
+from pyasn1_modules import rfc6019
+from pyasn1_modules import rfc7191
+
+MAX = float('inf')
+
+
+# Imports From RFC 2634
+
+id_aa_contentHint = rfc2634.id_aa_contentHint
+
+ContentHints = rfc2634.ContentHints
+
+id_aa_securityLabel = rfc2634.id_aa_securityLabel
+
+SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier
+
+SecurityClassification = rfc2634.SecurityClassification
+
+ESSPrivacyMark = rfc2634.ESSPrivacyMark
+
+SecurityCategories= rfc2634.SecurityCategories
+
+ESSSecurityLabel = rfc2634.ESSSecurityLabel
+
+
+# Imports From RFC 4108
+
+id_aa_communityIdentifiers = rfc4108.id_aa_communityIdentifiers
+
+CommunityIdentifier = rfc4108.CommunityIdentifier
+
+CommunityIdentifiers = rfc4108.CommunityIdentifiers
+
+
+# Imports From RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Name = rfc5280.Name
+
+Certificate = rfc5280.Certificate
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+
+SubjectInfoAccessSyntax = rfc5280.SubjectInfoAccessSyntax
+
+id_pkix = rfc5280.id_pkix
+
+id_pe = rfc5280.id_pe
+
+id_pe_subjectInfoAccess = rfc5280.id_pe_subjectInfoAccess
+
+
+# Imports From RFC 6010
+
+CMSContentConstraints = rfc6010.CMSContentConstraints
+
+
+# Imports From RFC 6019
+
+BinaryTime = rfc6019.BinaryTime
+
+id_aa_binarySigningTime = rfc6019.id_aa_binarySigningTime
+
+BinarySigningTime = rfc6019.BinarySigningTime
+
+
+# Imports From RFC 5652
+
+Attribute = rfc5652.Attribute
+
+CertificateSet = rfc5652.CertificateSet
+
+CertificateChoices = rfc5652.CertificateChoices
+
+id_contentType = rfc5652.id_contentType
+
+ContentType = rfc5652.ContentType
+
+id_messageDigest = rfc5652.id_messageDigest
+
+MessageDigest = rfc5652.MessageDigest
+
+
+# Imports From RFC 7191
+
+SIREntityName = rfc7191.SIREntityName
+
+id_aa_KP_keyPkgIdAndReceiptReq = rfc7191.id_aa_KP_keyPkgIdAndReceiptReq
+
+KeyPkgIdentifierAndReceiptReq = rfc7191.KeyPkgIdentifierAndReceiptReq
+
+
+# Key Province Attribute
+
+id_aa_KP_keyProvinceV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.71')
+
+
+class KeyProvinceV2(univ.ObjectIdentifier):
+ pass
+
+
+aa_keyProvince_v2 = Attribute()
+aa_keyProvince_v2['attrType'] = id_aa_KP_keyProvinceV2
+aa_keyProvince_v2['attrValues'][0] = KeyProvinceV2()
+
+
+# Manifest Attribute
+
+id_aa_KP_manifest = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.72')
+
+
+class ShortTitle(char.PrintableString):
+ pass
+
+
+class Manifest(univ.SequenceOf):
+ pass
+
+Manifest.componentType = ShortTitle()
+Manifest.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_manifest = Attribute()
+aa_manifest['attrType'] = id_aa_KP_manifest
+aa_manifest['attrValues'][0] = Manifest()
+
+
+# Key Algorithm Attribute
+
+id_kma_keyAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.1')
+
+
+class KeyAlgorithm(univ.Sequence):
+ pass
+
+KeyAlgorithm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAlg', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('checkWordAlg', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('crcAlg', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+aa_keyAlgorithm = Attribute()
+aa_keyAlgorithm['attrType'] = id_kma_keyAlgorithm
+aa_keyAlgorithm['attrValues'][0] = KeyAlgorithm()
+
+
+# User Certificate Attribute
+
+id_at_userCertificate = univ.ObjectIdentifier('2.5.4.36')
+
+
+aa_userCertificate = Attribute()
+aa_userCertificate['attrType'] = id_at_userCertificate
+aa_userCertificate['attrValues'][0] = Certificate()
+
+
+# Key Package Receivers Attribute
+
+id_kma_keyPkgReceiversV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.16')
+
+
+class KeyPkgReceiver(univ.Choice):
+ pass
+
+KeyPkgReceiver.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sirEntity', SIREntityName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('community', CommunityIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class KeyPkgReceiversV2(univ.SequenceOf):
+ pass
+
+KeyPkgReceiversV2.componentType = KeyPkgReceiver()
+KeyPkgReceiversV2.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_keyPackageReceivers_v2 = Attribute()
+aa_keyPackageReceivers_v2['attrType'] = id_kma_keyPkgReceiversV2
+aa_keyPackageReceivers_v2['attrValues'][0] = KeyPkgReceiversV2()
+
+
+# TSEC Nomenclature Attribute
+
+id_kma_TSECNomenclature = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.3')
+
+
+class CharEdition(char.PrintableString):
+ pass
+
+
+class CharEditionRange(univ.Sequence):
+ pass
+
+CharEditionRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstCharEdition', CharEdition()),
+ namedtype.NamedType('lastCharEdition', CharEdition())
+)
+
+
+class NumEdition(univ.Integer):
+ pass
+
+NumEdition.subtypeSpec = constraint.ValueRangeConstraint(0, 308915776)
+
+
+class NumEditionRange(univ.Sequence):
+ pass
+
+NumEditionRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstNumEdition', NumEdition()),
+ namedtype.NamedType('lastNumEdition', NumEdition())
+)
+
+
+class EditionID(univ.Choice):
+ pass
+
+EditionID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('char', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('charEdition', CharEdition().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('charEditionRange', CharEditionRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ ))
+ ),
+ namedtype.NamedType('num', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('numEdition', NumEdition().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('numEditionRange', NumEditionRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+ ))
+ )
+)
+
+
+class Register(univ.Integer):
+ pass
+
+Register.subtypeSpec = constraint.ValueRangeConstraint(0, 2147483647)
+
+
+class RegisterRange(univ.Sequence):
+ pass
+
+RegisterRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstRegister', Register()),
+ namedtype.NamedType('lastRegister', Register())
+)
+
+
+class RegisterID(univ.Choice):
+ pass
+
+RegisterID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('register', Register().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('registerRange', RegisterRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)))
+)
+
+
+class SegmentNumber(univ.Integer):
+ pass
+
+SegmentNumber.subtypeSpec = constraint.ValueRangeConstraint(1, 127)
+
+
+class SegmentRange(univ.Sequence):
+ pass
+
+SegmentRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstSegment', SegmentNumber()),
+ namedtype.NamedType('lastSegment', SegmentNumber())
+)
+
+
+class SegmentID(univ.Choice):
+ pass
+
+SegmentID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('segmentNumber', SegmentNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('segmentRange', SegmentRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)))
+)
+
+
+class TSECNomenclature(univ.Sequence):
+ pass
+
+TSECNomenclature.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('shortTitle', ShortTitle()),
+ namedtype.OptionalNamedType('editionID', EditionID()),
+ namedtype.OptionalNamedType('registerID', RegisterID()),
+ namedtype.OptionalNamedType('segmentID', SegmentID())
+)
+
+
+aa_tsecNomenclature = Attribute()
+aa_tsecNomenclature['attrType'] = id_kma_TSECNomenclature
+aa_tsecNomenclature['attrValues'][0] = TSECNomenclature()
+
+
+# Key Purpose Attribute
+
+id_kma_keyPurpose = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.13')
+
+
+class KeyPurpose(univ.Enumerated):
+ pass
+
+KeyPurpose.namedValues = namedval.NamedValues(
+ ('n-a', 0),
+ ('a', 65),
+ ('b', 66),
+ ('l', 76),
+ ('m', 77),
+ ('r', 82),
+ ('s', 83),
+ ('t', 84),
+ ('v', 86),
+ ('x', 88),
+ ('z', 90)
+)
+
+
+aa_keyPurpose = Attribute()
+aa_keyPurpose['attrType'] = id_kma_keyPurpose
+aa_keyPurpose['attrValues'][0] = KeyPurpose()
+
+
+# Key Use Attribute
+
+id_kma_keyUse = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.14')
+
+
+class KeyUse(univ.Enumerated):
+ pass
+
+KeyUse.namedValues = namedval.NamedValues(
+ ('n-a', 0),
+ ('ffk', 1),
+ ('kek', 2),
+ ('kpk', 3),
+ ('msk', 4),
+ ('qkek', 5),
+ ('tek', 6),
+ ('tsk', 7),
+ ('trkek', 8),
+ ('nfk', 9),
+ ('effk', 10),
+ ('ebfk', 11),
+ ('aek', 12),
+ ('wod', 13),
+ ('kesk', 246),
+ ('eik', 247),
+ ('ask', 248),
+ ('kmk', 249),
+ ('rsk', 250),
+ ('csk', 251),
+ ('sak', 252),
+ ('rgk', 253),
+ ('cek', 254),
+ ('exk', 255)
+)
+
+
+aa_keyUse = Attribute()
+aa_keyPurpose['attrType'] = id_kma_keyUse
+aa_keyPurpose['attrValues'][0] = KeyUse()
+
+
+# Transport Key Attribute
+
+id_kma_transportKey = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.15')
+
+
+class TransOp(univ.Enumerated):
+ pass
+
+TransOp.namedValues = namedval.NamedValues(
+ ('transport', 1),
+ ('operational', 2)
+)
+
+
+aa_transportKey = Attribute()
+aa_transportKey['attrType'] = id_kma_transportKey
+aa_transportKey['attrValues'][0] = TransOp()
+
+
+# Key Distribution Period Attribute
+
+id_kma_keyDistPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.5')
+
+
+class KeyDistPeriod(univ.Sequence):
+ pass
+
+KeyDistPeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('doNotDistBefore', BinaryTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('doNotDistAfter', BinaryTime())
+)
+
+
+aa_keyDistributionPeriod = Attribute()
+aa_keyDistributionPeriod['attrType'] = id_kma_keyDistPeriod
+aa_keyDistributionPeriod['attrValues'][0] = KeyDistPeriod()
+
+
+# Key Validity Period Attribute
+
+id_kma_keyValidityPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.6')
+
+
+class KeyValidityPeriod(univ.Sequence):
+ pass
+
+KeyValidityPeriod.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('doNotUseBefore', BinaryTime()),
+ namedtype.OptionalNamedType('doNotUseAfter', BinaryTime())
+)
+
+
+aa_keyValidityPeriod = Attribute()
+aa_keyValidityPeriod['attrType'] = id_kma_keyValidityPeriod
+aa_keyValidityPeriod['attrValues'][0] = KeyValidityPeriod()
+
+
+# Key Duration Attribute
+
+id_kma_keyDuration = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.7')
+
+
+ub_KeyDuration_months = univ.Integer(72)
+
+ub_KeyDuration_hours = univ.Integer(96)
+
+ub_KeyDuration_days = univ.Integer(732)
+
+ub_KeyDuration_weeks = univ.Integer(104)
+
+ub_KeyDuration_years = univ.Integer(100)
+
+
+class KeyDuration(univ.Choice):
+ pass
+
+KeyDuration.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hours', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_hours)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('days', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_days))),
+ namedtype.NamedType('weeks', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_weeks)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('months', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_months)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('years', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_years)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+aa_keyDurationPeriod = Attribute()
+aa_keyDurationPeriod['attrType'] = id_kma_keyDuration
+aa_keyDurationPeriod['attrValues'][0] = KeyDuration()
+
+
+# Classification Attribute
+
+id_aa_KP_classification = univ.ObjectIdentifier(id_aa_securityLabel)
+
+
+id_enumeratedPermissiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.1')
+
+id_enumeratedRestrictiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.4')
+
+id_informativeAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.3')
+
+
+class SecurityAttribute(univ.Integer):
+ pass
+
+SecurityAttribute.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class EnumeratedTag(univ.Sequence):
+ pass
+
+EnumeratedTag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tagName', univ.ObjectIdentifier()),
+ namedtype.NamedType('attributeList', univ.SetOf(componentType=SecurityAttribute()))
+)
+
+
+class FreeFormField(univ.Choice):
+ pass
+
+FreeFormField.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bitSetAttributes', univ.BitString()), # Not permitted in RFC 7906
+ namedtype.NamedType('securityAttributes', univ.SetOf(componentType=SecurityAttribute()))
+)
+
+
+class InformativeTag(univ.Sequence):
+ pass
+
+InformativeTag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tagName', univ.ObjectIdentifier()),
+ namedtype.NamedType('attributes', FreeFormField())
+)
+
+
+class Classification(ESSSecurityLabel):
+ pass
+
+
+aa_classification = Attribute()
+aa_classification['attrType'] = id_aa_KP_classification
+aa_classification['attrValues'][0] = Classification()
+
+
+# Split Identifier Attribute
+
+id_kma_splitID = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.11')
+
+
+class SplitID(univ.Sequence):
+ pass
+
+SplitID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('half', univ.Enumerated(
+ namedValues=namedval.NamedValues(('a', 0), ('b', 1)))),
+ namedtype.OptionalNamedType('combineAlg', AlgorithmIdentifier())
+)
+
+
+aa_splitIdentifier = Attribute()
+aa_splitIdentifier['attrType'] = id_kma_splitID
+aa_splitIdentifier['attrValues'][0] = SplitID()
+
+
+# Key Package Type Attribute
+
+id_kma_keyPkgType = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.12')
+
+
+class KeyPkgType(univ.ObjectIdentifier):
+ pass
+
+
+aa_keyPackageType = Attribute()
+aa_keyPackageType['attrType'] = id_kma_keyPkgType
+aa_keyPackageType['attrValues'][0] = KeyPkgType()
+
+
+# Signature Usage Attribute
+
+id_kma_sigUsageV3 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.22')
+
+
+class SignatureUsage(CMSContentConstraints):
+ pass
+
+
+aa_signatureUsage_v3 = Attribute()
+aa_signatureUsage_v3['attrType'] = id_kma_sigUsageV3
+aa_signatureUsage_v3['attrValues'][0] = SignatureUsage()
+
+
+# Other Certificate Format Attribute
+
+id_kma_otherCertFormats = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.19')
+
+
+aa_otherCertificateFormats = Attribute()
+aa_signatureUsage_v3['attrType'] = id_kma_otherCertFormats
+aa_signatureUsage_v3['attrValues'][0] = CertificateChoices()
+
+
+# PKI Path Attribute
+
+id_at_pkiPath = univ.ObjectIdentifier('2.5.4.70')
+
+
+class PkiPath(univ.SequenceOf):
+ pass
+
+PkiPath.componentType = Certificate()
+PkiPath.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_pkiPath = Attribute()
+aa_pkiPath['attrType'] = id_at_pkiPath
+aa_pkiPath['attrValues'][0] = PkiPath()
+
+
+# Useful Certificates Attribute
+
+id_kma_usefulCerts = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.20')
+
+
+aa_usefulCertificates = Attribute()
+aa_usefulCertificates['attrType'] = id_kma_usefulCerts
+aa_usefulCertificates['attrValues'][0] = CertificateSet()
+
+
+# Key Wrap Attribute
+
+id_kma_keyWrapAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.21')
+
+
+aa_keyWrapAlgorithm = Attribute()
+aa_keyWrapAlgorithm['attrType'] = id_kma_keyWrapAlgorithm
+aa_keyWrapAlgorithm['attrValues'][0] = AlgorithmIdentifier()
+
+
+# Content Decryption Key Identifier Attribute
+
+id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66')
+
+
+class ContentDecryptKeyID(univ.OctetString):
+ pass
+
+
+aa_contentDecryptKeyIdentifier = Attribute()
+aa_contentDecryptKeyIdentifier['attrType'] = id_aa_KP_contentDecryptKeyID
+aa_contentDecryptKeyIdentifier['attrValues'][0] = ContentDecryptKeyID()
+
+
+# Certificate Pointers Attribute
+
+aa_certificatePointers = Attribute()
+aa_certificatePointers['attrType'] = id_pe_subjectInfoAccess
+aa_certificatePointers['attrValues'][0] = SubjectInfoAccessSyntax()
+
+
+# CRL Pointers Attribute
+
+id_aa_KP_crlPointers = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.70')
+
+
+aa_cRLDistributionPoints = Attribute()
+aa_cRLDistributionPoints['attrType'] = id_aa_KP_crlPointers
+aa_cRLDistributionPoints['attrValues'][0] = GeneralNames()
+
+
+# Extended Error Codes
+
+id_errorCodes = univ.ObjectIdentifier('2.16.840.1.101.2.1.22')
+
+id_missingKeyType = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.1')
+
+id_privacyMarkTooLong = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.2')
+
+id_unrecognizedSecurityPolicy = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.3')
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_contentHint: ContentHints(),
+ id_aa_communityIdentifiers: CommunityIdentifiers(),
+ id_aa_binarySigningTime: BinarySigningTime(),
+ id_contentType: ContentType(),
+ id_messageDigest: MessageDigest(),
+ id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(),
+ id_aa_KP_keyProvinceV2: KeyProvinceV2(),
+ id_aa_KP_manifest: Manifest(),
+ id_kma_keyAlgorithm: KeyAlgorithm(),
+ id_at_userCertificate: Certificate(),
+ id_kma_keyPkgReceiversV2: KeyPkgReceiversV2(),
+ id_kma_TSECNomenclature: TSECNomenclature(),
+ id_kma_keyPurpose: KeyPurpose(),
+ id_kma_keyUse: KeyUse(),
+ id_kma_transportKey: TransOp(),
+ id_kma_keyDistPeriod: KeyDistPeriod(),
+ id_kma_keyValidityPeriod: KeyValidityPeriod(),
+ id_kma_keyDuration: KeyDuration(),
+ id_aa_KP_classification: Classification(),
+ id_kma_splitID: SplitID(),
+ id_kma_keyPkgType: KeyPkgType(),
+ id_kma_sigUsageV3: SignatureUsage(),
+ id_kma_otherCertFormats: CertificateChoices(),
+ id_at_pkiPath: PkiPath(),
+ id_kma_usefulCerts: CertificateSet(),
+ id_kma_keyWrapAlgorithm: AlgorithmIdentifier(),
+ id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(),
+ id_pe_subjectInfoAccess: SubjectInfoAccessSyntax(),
+ id_aa_KP_crlPointers: GeneralNames(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7914.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7914.py
new file mode 100644
index 0000000000..99e9551567
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc7914.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+#The scrypt Password-Based Key Derivation Function
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8520.txt
+# https://www.rfc-editor.org/errata/eid5871
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+id_scrypt = univ.ObjectIdentifier('1.3.6.1.4.1.11591.4.11')
+
+
+class Scrypt_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt',
+ univ.OctetString()),
+ namedtype.NamedType('costParameter',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('blockSize',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('parallelizationParameter',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('keyLength',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX)))
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_scrypt: Scrypt_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8017.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8017.py
new file mode 100644
index 0000000000..fefed1dcd6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8017.py
@@ -0,0 +1,153 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #1: RSA Cryptography Specifications Version 2.2
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8017.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2437
+from pyasn1_modules import rfc3447
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Import Algorithm Identifier from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+class DigestAlgorithm(AlgorithmIdentifier):
+ pass
+
+class HashAlgorithm(AlgorithmIdentifier):
+ pass
+
+class MaskGenAlgorithm(AlgorithmIdentifier):
+ pass
+
+class PSourceAlgorithm(AlgorithmIdentifier):
+ pass
+
+
+# Object identifiers from NIST SHA2
+
+hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2')
+id_sha256 = rfc4055.id_sha256
+id_sha384 = rfc4055.id_sha384
+id_sha512 = rfc4055.id_sha512
+id_sha224 = rfc4055.id_sha224
+id_sha512_224 = hashAlgs + (5, )
+id_sha512_256 = hashAlgs + (6, )
+
+
+# Basic object identifiers
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = rfc2437.rsaEncryption
+id_RSAES_OAEP = rfc2437.id_RSAES_OAEP
+id_pSpecified = rfc2437.id_pSpecified
+id_RSASSA_PSS = rfc4055.id_RSASSA_PSS
+md2WithRSAEncryption = rfc2437.md2WithRSAEncryption
+md5WithRSAEncryption = rfc2437.md5WithRSAEncryption
+sha1WithRSAEncryption = rfc2437.sha1WithRSAEncryption
+sha224WithRSAEncryption = rfc4055.sha224WithRSAEncryption
+sha256WithRSAEncryption = rfc4055.sha256WithRSAEncryption
+sha384WithRSAEncryption = rfc4055.sha384WithRSAEncryption
+sha512WithRSAEncryption = rfc4055.sha512WithRSAEncryption
+sha512_224WithRSAEncryption = pkcs_1 + (15, )
+sha512_256WithRSAEncryption = pkcs_1 + (16, )
+id_sha1 = rfc2437.id_sha1
+id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2')
+id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5')
+id_mgf1 = rfc2437.id_mgf1
+
+
+# Default parameter values
+
+sha1 = rfc4055.sha1Identifier
+SHA1Parameters = univ.Null("")
+
+mgf1SHA1 = rfc4055.mgf1SHA1Identifier
+
+class EncodingParameters(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(0, MAX)
+
+pSpecifiedEmpty = rfc4055.pSpecifiedEmptyIdentifier
+
+emptyString = EncodingParameters(value='')
+
+
+# Main structures
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('two-prime', 0),
+ ('multi', 1)
+ )
+
+class TrailerField(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('trailerFieldBC', 1)
+ )
+
+RSAPublicKey = rfc2437.RSAPublicKey
+
+OtherPrimeInfo = rfc3447.OtherPrimeInfo
+OtherPrimeInfos = rfc3447.OtherPrimeInfos
+RSAPrivateKey = rfc3447.RSAPrivateKey
+
+RSAES_OAEP_params = rfc4055.RSAES_OAEP_params
+rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier
+
+RSASSA_PSS_params = rfc4055.RSASSA_PSS_params
+rSASSA_PSS_Default_Identifier = rfc4055.rSASSA_PSS_Default_Identifier
+
+
+# Syntax for the EMSA-PKCS1-v1_5 hash identifier
+
+class DigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithm()),
+ namedtype.NamedType('digest', univ.OctetString())
+ )
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_sha512_224: univ.Null(),
+ id_sha512_256: univ.Null(),
+ id_mgf1: AlgorithmIdentifier(),
+ id_pSpecified: univ.OctetString(),
+ id_RSAES_OAEP: RSAES_OAEP_params(),
+ id_RSASSA_PSS: RSASSA_PSS_params(),
+ md2WithRSAEncryption: univ.Null(),
+ md5WithRSAEncryption: univ.Null(),
+ sha1WithRSAEncryption: univ.Null(),
+ sha224WithRSAEncryption: univ.Null(),
+ sha256WithRSAEncryption: univ.Null(),
+ sha384WithRSAEncryption: univ.Null(),
+ sha512WithRSAEncryption: univ.Null(),
+ sha512_224WithRSAEncryption: univ.Null(),
+ sha512_256WithRSAEncryption: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8018.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8018.py
new file mode 100644
index 0000000000..7a44eea8d2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8018.py
@@ -0,0 +1,260 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #5: Password-Based Cryptography Specification, Version 2.1
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8018.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Import from RFC 3565
+
+AES_IV = rfc3565.AES_IV
+
+
+# Import from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Basic object identifiers
+
+nistAlgorithms = _OID(2, 16, 840, 1, 101, 3, 4)
+
+aes = _OID(nistAlgorithms, 1)
+
+oiw = _OID(1, 3, 14)
+
+rsadsi = _OID(1, 2, 840, 113549)
+
+pkcs = _OID(rsadsi, 1)
+
+digestAlgorithm = _OID(rsadsi, 2)
+
+encryptionAlgorithm = _OID(rsadsi, 3)
+
+pkcs_5 = _OID(pkcs, 5)
+
+
+
+# HMAC object identifiers
+
+id_hmacWithSHA1 = _OID(digestAlgorithm, 7)
+
+id_hmacWithSHA224 = _OID(digestAlgorithm, 8)
+
+id_hmacWithSHA256 = _OID(digestAlgorithm, 9)
+
+id_hmacWithSHA384 = _OID(digestAlgorithm, 10)
+
+id_hmacWithSHA512 = _OID(digestAlgorithm, 11)
+
+id_hmacWithSHA512_224 = _OID(digestAlgorithm, 12)
+
+id_hmacWithSHA512_256 = _OID(digestAlgorithm, 13)
+
+
+# PBES1 object identifiers
+
+pbeWithMD2AndDES_CBC = _OID(pkcs_5, 1)
+
+pbeWithMD2AndRC2_CBC = _OID(pkcs_5, 4)
+
+pbeWithMD5AndDES_CBC = _OID(pkcs_5, 3)
+
+pbeWithMD5AndRC2_CBC = _OID(pkcs_5, 6)
+
+pbeWithSHA1AndDES_CBC = _OID(pkcs_5, 10)
+
+pbeWithSHA1AndRC2_CBC = _OID(pkcs_5, 11)
+
+
+# Supporting techniques object identifiers
+
+desCBC = _OID(oiw, 3, 2, 7)
+
+des_EDE3_CBC = _OID(encryptionAlgorithm, 7)
+
+rc2CBC = _OID(encryptionAlgorithm, 2)
+
+rc5_CBC_PAD = _OID(encryptionAlgorithm, 9)
+
+aes128_CBC_PAD = _OID(aes, 2)
+
+aes192_CBC_PAD = _OID(aes, 22)
+
+aes256_CBC_PAD = _OID(aes, 42)
+
+
+# PBES1
+
+class PBEParameter(univ.Sequence):
+ pass
+
+PBEParameter.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8))),
+ namedtype.NamedType('iterationCount', univ.Integer())
+)
+
+
+# PBES2
+
+id_PBES2 = _OID(pkcs_5, 13)
+
+
+class PBES2_params(univ.Sequence):
+ pass
+
+PBES2_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()),
+ namedtype.NamedType('encryptionScheme', AlgorithmIdentifier())
+)
+
+
+# PBMAC1
+
+id_PBMAC1 = _OID(pkcs_5, 14)
+
+
+class PBMAC1_params(univ.Sequence):
+ pass
+
+PBMAC1_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()),
+ namedtype.NamedType('messageAuthScheme', AlgorithmIdentifier())
+)
+
+
+# PBKDF2
+
+id_PBKDF2 = _OID(pkcs_5, 12)
+
+
+algid_hmacWithSHA1 = AlgorithmIdentifier()
+algid_hmacWithSHA1['algorithm'] = id_hmacWithSHA1
+algid_hmacWithSHA1['parameters'] = univ.Null("")
+
+
+class PBKDF2_params(univ.Sequence):
+ pass
+
+PBKDF2_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('specified', univ.OctetString()),
+ namedtype.NamedType('otherSource', AlgorithmIdentifier())
+ ))),
+ namedtype.NamedType('iterationCount', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('keyLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.DefaultedNamedType('prf', algid_hmacWithSHA1)
+)
+
+
+# RC2 CBC algorithm parameter
+
+class RC2_CBC_Parameter(univ.Sequence):
+ pass
+
+RC2_CBC_Parameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('rc2ParameterVersion', univ.Integer()),
+ namedtype.NamedType('iv', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8)))
+)
+
+
+# RC5 CBC algorithm parameter
+
+class RC5_CBC_Parameters(univ.Sequence):
+ pass
+
+RC5_CBC_Parameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ univ.Integer(namedValues=namedval.NamedValues(('v1_0', 16))).subtype(
+ subtypeSpec=constraint.SingleValueConstraint(16))),
+ namedtype.NamedType('rounds',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(8, 127))),
+ namedtype.NamedType('blockSizeInBits',
+ univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(64, 128))),
+ namedtype.OptionalNamedType('iv', univ.OctetString())
+)
+
+
+# Initialization Vector for AES: OCTET STRING (SIZE(16))
+
+class AES_IV(univ.OctetString):
+ pass
+
+AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+# Initialization Vector for DES: OCTET STRING (SIZE(8))
+
+class DES_IV(univ.OctetString):
+ pass
+
+DES_IV.subtypeSpec = constraint.ValueSizeConstraint(8, 8)
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ # PBKDF2-PRFs
+ id_hmacWithSHA1: univ.Null(),
+ id_hmacWithSHA224: univ.Null(),
+ id_hmacWithSHA256: univ.Null(),
+ id_hmacWithSHA384: univ.Null(),
+ id_hmacWithSHA512: univ.Null(),
+ id_hmacWithSHA512_224: univ.Null(),
+ id_hmacWithSHA512_256: univ.Null(),
+ # PBES1Algorithms
+ pbeWithMD2AndDES_CBC: PBEParameter(),
+ pbeWithMD2AndRC2_CBC: PBEParameter(),
+ pbeWithMD5AndDES_CBC: PBEParameter(),
+ pbeWithMD5AndRC2_CBC: PBEParameter(),
+ pbeWithSHA1AndDES_CBC: PBEParameter(),
+ pbeWithSHA1AndRC2_CBC: PBEParameter(),
+ # PBES2Algorithms
+ id_PBES2: PBES2_params(),
+ # PBES2-KDFs
+ id_PBKDF2: PBKDF2_params(),
+ # PBMAC1Algorithms
+ id_PBMAC1: PBMAC1_params(),
+ # SupportingAlgorithms
+ desCBC: DES_IV(),
+ des_EDE3_CBC: DES_IV(),
+ rc2CBC: RC2_CBC_Parameter(),
+ rc5_CBC_PAD: RC5_CBC_Parameters(),
+ aes128_CBC_PAD: AES_IV(),
+ aes192_CBC_PAD: AES_IV(),
+ aes256_CBC_PAD: AES_IV(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8103.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8103.py
new file mode 100644
index 0000000000..6429e8635f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8103.py
@@ -0,0 +1,36 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool.
+# Auto-generated by asn1ate v.0.6.0 from rfc8103.asn.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# ChaCha20Poly1305 algorithm fo use with the Authenticated-Enveloped-Data
+# protecting content type for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8103.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AEADChaCha20Poly1305Nonce(univ.OctetString):
+ pass
+
+
+AEADChaCha20Poly1305Nonce.subtypeSpec = constraint.ValueSizeConstraint(12, 12)
+
+id_alg_AEADChaCha20Poly1305 = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 18)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8209.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8209.py
new file mode 100644
index 0000000000..7d70f51b0c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8209.py
@@ -0,0 +1,20 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# BGPsec Router PKI Profile
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8209.txt
+#
+
+from pyasn1.type import univ
+
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_kp_bgpsec_router = id_kp + (30, )
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8226.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8226.py
new file mode 100644
index 0000000000..e7fe9460e9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8226.py
@@ -0,0 +1,149 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool, with manual
+# changes to implement appropriate constraints and added comments.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# JWT Claim Constraints and TN Authorization List for certificate extensions.
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8226.txt (with errata corrected)
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class JWTClaimName(char.IA5String):
+ pass
+
+
+class JWTClaimNames(univ.SequenceOf):
+ pass
+
+JWTClaimNames.componentType = JWTClaimName()
+JWTClaimNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class JWTClaimPermittedValues(univ.Sequence):
+ pass
+
+JWTClaimPermittedValues.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('claim', JWTClaimName()),
+ namedtype.NamedType('permitted', univ.SequenceOf(
+ componentType=char.UTF8String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class JWTClaimPermittedValuesList(univ.SequenceOf):
+ pass
+
+JWTClaimPermittedValuesList.componentType = JWTClaimPermittedValues()
+JWTClaimPermittedValuesList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class JWTClaimConstraints(univ.Sequence):
+ pass
+
+JWTClaimConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('mustInclude',
+ JWTClaimNames().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('permittedValues',
+ JWTClaimPermittedValuesList().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+JWTClaimConstraints.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('mustInclude', constraint.ComponentPresentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('permittedValues', constraint.ComponentPresentConstraint()))
+)
+
+
+id_pe_JWTClaimConstraints = _OID(1, 3, 6, 1, 5, 5, 7, 1, 27)
+
+
+class ServiceProviderCode(char.IA5String):
+ pass
+
+
+class TelephoneNumber(char.IA5String):
+ pass
+
+TelephoneNumber.subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 15),
+ constraint.PermittedAlphabetConstraint(
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '#', '*')
+)
+
+
+class TelephoneNumberRange(univ.Sequence):
+ pass
+
+TelephoneNumberRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('start', TelephoneNumber()),
+ namedtype.NamedType('count',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(2, MAX)))
+)
+
+
+class TNEntry(univ.Choice):
+ pass
+
+TNEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('spc',
+ ServiceProviderCode().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('range',
+ TelephoneNumberRange().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('one',
+ TelephoneNumber().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 2)))
+)
+
+
+class TNAuthorizationList(univ.SequenceOf):
+ pass
+
+TNAuthorizationList.componentType = TNEntry()
+TNAuthorizationList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_pe_TNAuthList = _OID(1, 3, 6, 1, 5, 5, 7, 1, 26)
+
+
+id_ad_stirTNList = _OID(1, 3, 6, 1, 5, 5, 7, 48, 14)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_TNAuthList: TNAuthorizationList(),
+ id_pe_JWTClaimConstraints: JWTClaimConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8358.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8358.py
new file mode 100644
index 0000000000..647a366622
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8358.py
@@ -0,0 +1,50 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Digital Signatures on Internet-Draft Documents
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8358.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+id_ct = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1')
+
+id_ct_asciiTextWithCRLF = id_ct + (27, )
+
+id_ct_epub = id_ct + (39, )
+
+id_ct_htmlWithCRLF = id_ct + (38, )
+
+id_ct_pdf = id_ct + (29, )
+
+id_ct_postscript = id_ct + (30, )
+
+id_ct_utf8TextWithCRLF = id_ct + (37, )
+
+id_ct_xml = id_ct + (28, )
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_asciiTextWithCRLF: univ.OctetString(),
+ id_ct_epub: univ.OctetString(),
+ id_ct_htmlWithCRLF: univ.OctetString(),
+ id_ct_pdf: univ.OctetString(),
+ id_ct_postscript: univ.OctetString(),
+ id_ct_utf8TextWithCRLF: univ.OctetString(),
+ id_ct_xml: univ.OctetString(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8360.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8360.py
new file mode 100644
index 0000000000..ca180c18d8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8360.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Resource Public Key Infrastructure (RPKI) Validation Reconsidered
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8360.txt
+# https://www.rfc-editor.org/errata/eid5870
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3779
+from pyasn1_modules import rfc5280
+
+
+# IP Address Delegation Extension V2
+
+id_pe_ipAddrBlocks_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.28')
+
+IPAddrBlocks = rfc3779.IPAddrBlocks
+
+
+# Autonomous System Identifier Delegation Extension V2
+
+id_pe_autonomousSysIds_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.29')
+
+ASIdentifiers = rfc3779.ASIdentifiers
+
+
+# Map of Certificate Extension OIDs to Extensions is added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ipAddrBlocks_v2: IPAddrBlocks(),
+ id_pe_autonomousSysIds_v2: ASIdentifiers(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8398.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8398.py
new file mode 100644
index 0000000000..151b632107
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8398.py
@@ -0,0 +1,52 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internationalized Email Addresses in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8398.txt
+# https://www.rfc-editor.org/errata/eid5418
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# SmtpUTF8Mailbox contains Mailbox as specified in Section 3.3 of RFC 6531
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_SmtpUTF8Mailbox = id_on + (9, )
+
+
+class SmtpUTF8Mailbox(char.UTF8String):
+ pass
+
+SmtpUTF8Mailbox.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+on_SmtpUTF8Mailbox = rfc5280.AnotherName()
+on_SmtpUTF8Mailbox['type-id'] = id_on_SmtpUTF8Mailbox
+on_SmtpUTF8Mailbox['value'] = SmtpUTF8Mailbox()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_SmtpUTF8Mailbox: SmtpUTF8Mailbox(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8410.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8410.py
new file mode 100644
index 0000000000..98bc97bb14
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8410.py
@@ -0,0 +1,43 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for Ed25519, Ed448, X25519, and X448
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8410.txt
+
+from pyasn1.type import univ
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+
+
+class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class CurvePrivateKey(univ.OctetString):
+ pass
+
+
+id_X25519 = univ.ObjectIdentifier('1.3.101.110')
+
+id_X448 = univ.ObjectIdentifier('1.3.101.111')
+
+id_Ed25519 = univ.ObjectIdentifier('1.3.101.112')
+
+id_Ed448 = univ.ObjectIdentifier('1.3.101.113')
+
+id_sha512 = rfc4055.id_sha512
+
+id_aes128_wrap = rfc3565.id_aes128_wrap
+
+id_aes256_wrap = rfc3565.id_aes256_wrap
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8418.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8418.py
new file mode 100644
index 0000000000..6e76487c88
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8418.py
@@ -0,0 +1,36 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Diffie-Hellman (ECDH) Key Agreement Algorithm
+# with X25519 and X448
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8418.txt
+
+from pyasn1.type import univ
+from pyasn1_modules import rfc5280
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KeyWrapAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+dhSinglePass_stdDH_sha256kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.1')
+
+dhSinglePass_stdDH_sha384kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.2')
+
+dhSinglePass_stdDH_sha512kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.3')
+
+dhSinglePass_stdDH_hkdf_sha256_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.19')
+
+dhSinglePass_stdDH_hkdf_sha384_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.20')
+
+dhSinglePass_stdDH_hkdf_sha512_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.21')
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8419.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8419.py
new file mode 100644
index 0000000000..f10994be28
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8419.py
@@ -0,0 +1,68 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Edwards-Curve Digital Signature Algorithm (EdDSA) Signatures in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8419.txt
+# https://www.rfc-editor.org/errata/eid5869
+
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class ShakeOutputLen(univ.Integer):
+ pass
+
+
+id_Ed25519 = univ.ObjectIdentifier('1.3.101.112')
+
+sigAlg_Ed25519 = rfc5280.AlgorithmIdentifier()
+sigAlg_Ed25519['algorithm'] = id_Ed25519
+# sigAlg_Ed25519['parameters'] is absent
+
+
+id_Ed448 = univ.ObjectIdentifier('1.3.101.113')
+
+sigAlg_Ed448 = rfc5280.AlgorithmIdentifier()
+sigAlg_Ed448['algorithm'] = id_Ed448
+# sigAlg_Ed448['parameters'] is absent
+
+
+hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2')
+
+id_sha512 = hashAlgs + (3, )
+
+hashAlg_SHA_512 = rfc5280.AlgorithmIdentifier()
+hashAlg_SHA_512['algorithm'] = id_sha512
+# hashAlg_SHA_512['parameters'] is absent
+
+
+id_shake256 = hashAlgs + (12, )
+
+hashAlg_SHAKE256 = rfc5280.AlgorithmIdentifier()
+hashAlg_SHAKE256['algorithm'] = id_shake256
+# hashAlg_SHAKE256['parameters']is absent
+
+
+id_shake256_len = hashAlgs + (18, )
+
+hashAlg_SHAKE256_LEN = rfc5280.AlgorithmIdentifier()
+hashAlg_SHAKE256_LEN['algorithm'] = id_shake256_len
+hashAlg_SHAKE256_LEN['parameters'] = ShakeOutputLen()
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones in rfc5280.py. Do not add OIDs with absent paramaters.
+
+_algorithmIdentifierMapUpdate = {
+ id_shake256_len: ShakeOutputLen(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8479.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8479.py
new file mode 100644
index 0000000000..57f78b62f2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8479.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Storing Validation Parameters in PKCS#8
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8479.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+id_attr_validation_parameters = univ.ObjectIdentifier('1.3.6.1.4.1.2312.18.8.1')
+
+
+class ValidationParams(univ.Sequence):
+ pass
+
+ValidationParams.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', univ.ObjectIdentifier()),
+ namedtype.NamedType('seed', univ.OctetString())
+)
+
+
+at_validation_parameters = rfc5652.Attribute()
+at_validation_parameters['attrType'] = id_attr_validation_parameters
+at_validation_parameters['attrValues'][0] = ValidationParams()
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_attr_validation_parameters: ValidationParams(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8494.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8494.py
new file mode 100644
index 0000000000..fe349e14ca
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8494.py
@@ -0,0 +1,80 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Multicast Email (MULE) over Allied Communications Publication 142
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8494.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+id_mmhs_CDT = univ.ObjectIdentifier('1.3.26.0.4406.0.4.2')
+
+
+class AlgorithmID_ShortForm(univ.Integer):
+ pass
+
+AlgorithmID_ShortForm.namedValues = namedval.NamedValues(
+ ('zlibCompress', 0)
+)
+
+
+class ContentType_ShortForm(univ.Integer):
+ pass
+
+ContentType_ShortForm.namedValues = namedval.NamedValues(
+ ('unidentified', 0),
+ ('external', 1),
+ ('p1', 2),
+ ('p3', 3),
+ ('p7', 4),
+ ('mule', 25)
+)
+
+
+class CompressedContentInfo(univ.Sequence):
+ pass
+
+CompressedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('unnamed', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('contentType-ShortForm',
+ ContentType_ShortForm().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('contentType-OID',
+ univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))),
+ namedtype.NamedType('compressedContent',
+ univ.OctetString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class CompressionAlgorithmIdentifier(univ.Choice):
+ pass
+
+CompressionAlgorithmIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithmID-ShortForm',
+ AlgorithmID_ShortForm().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('algorithmID-OID',
+ univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class CompressedData(univ.Sequence):
+ pass
+
+CompressedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()),
+ namedtype.NamedType('compressedContentInfo', CompressedContentInfo())
+)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8520.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8520.py
new file mode 100644
index 0000000000..b9eb6e9377
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8520.py
@@ -0,0 +1,63 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Extensions for MUD URL and MUD Signer;
+# Object Identifier for CMS Content Type for a MUD file
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8520.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+# X.509 Extension for MUD URL
+
+id_pe_mud_url = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.25')
+
+class MUDURLSyntax(char.IA5String):
+ pass
+
+
+# X.509 Extension for MUD Signer
+
+id_pe_mudsigner = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.30')
+
+class MUDsignerSyntax(rfc5280.Name):
+ pass
+
+
+# Object Identifier for CMS Content Type for a MUD file
+
+id_ct_mudtype = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.41')
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_mud_url: MUDURLSyntax(),
+ id_pe_mudsigner: MUDsignerSyntax(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_mudtype: univ.OctetString(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8619.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8619.py
new file mode 100644
index 0000000000..0aaa811bad
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8619.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for HKDF
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8619.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Object Identifiers
+
+id_alg_hkdf_with_sha256 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.28')
+
+
+id_alg_hkdf_with_sha384 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.29')
+
+
+id_alg_hkdf_with_sha512 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.30')
+
+
+# Key Derivation Algorithm Identifiers
+
+kda_hkdf_with_sha256 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha256['algorithm'] = id_alg_hkdf_with_sha256
+# kda_hkdf_with_sha256['parameters'] are absent
+
+
+kda_hkdf_with_sha384 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha384['algorithm'] = id_alg_hkdf_with_sha384
+# kda_hkdf_with_sha384['parameters'] are absent
+
+
+kda_hkdf_with_sha512 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha512['algorithm'] = id_alg_hkdf_with_sha512
+# kda_hkdf_with_sha512['parameters'] are absent
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8649.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8649.py
new file mode 100644
index 0000000000..c405f050e8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8649.py
@@ -0,0 +1,40 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Certificate Extension for Hash Of Root Key
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8649.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_ce_hashOfRootKey = univ.ObjectIdentifier('1.3.6.1.4.1.51483.2.1')
+
+
+class HashedRootKey(univ.Sequence):
+ pass
+
+HashedRootKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', univ.OctetString())
+)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_ce_hashOfRootKey: HashedRootKey(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8692.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8692.py
new file mode 100644
index 0000000000..7a6791ad20
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8692.py
@@ -0,0 +1,79 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for RSASSA-PSS and ECDSA using SHAKEs
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8692.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+
+
+# SHAKE128 One-Way Hash Function
+
+id_shake128 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.11')
+
+mda_shake128 = rfc5280.AlgorithmIdentifier()
+mda_shake128['algorithm'] = id_shake128
+# mda_shake128['parameters'] is absent
+
+
+# SHAKE256 One-Way Hash Function
+
+id_shake256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.12')
+
+mda_shake256 = rfc5280.AlgorithmIdentifier()
+mda_shake256['algorithm'] = id_shake256
+# mda_shake256['parameters'] is absent
+
+
+# RSA PSS with SHAKE128
+
+id_RSASSA_PSS_SHAKE128 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.30')
+
+sa_rSASSA_PSS_SHAKE128 = rfc5280.AlgorithmIdentifier()
+sa_rSASSA_PSS_SHAKE128['algorithm'] = id_RSASSA_PSS_SHAKE128
+# sa_rSASSA_PSS_SHAKE128['parameters'] is absent
+
+pk_rsaSSA_PSS_SHAKE128 = rfc4055.RSAPublicKey()
+
+
+# RSA PSS with SHAKE256
+
+id_RSASSA_PSS_SHAKE256 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.31')
+
+sa_rSASSA_PSS_SHAKE256 = rfc5280.AlgorithmIdentifier()
+sa_rSASSA_PSS_SHAKE256['algorithm'] = id_RSASSA_PSS_SHAKE256
+# sa_rSASSA_PSS_SHAKE256['parameters'] is absent
+
+pk_rsaSSA_PSS_SHAKE256 = rfc4055.RSAPublicKey()
+
+
+# ECDSA with SHAKE128
+
+id_ecdsa_with_shake128 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.32')
+
+sa_ecdsa_with_shake128 = rfc5280.AlgorithmIdentifier()
+sa_ecdsa_with_shake128['algorithm'] = id_ecdsa_with_shake128
+# sa_ecdsa_with_shake128['parameters'] is absent
+
+pk_ec = rfc5480.ECPoint()
+
+
+# ECDSA with SHAKE128
+
+id_ecdsa_with_shake256 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.33')
+
+sa_ecdsa_with_shake256 = rfc5280.AlgorithmIdentifier()
+sa_ecdsa_with_shake256['algorithm'] = id_ecdsa_with_shake256
+# sa_ecdsa_with_shake256['parameters'] is absent
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8696.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8696.py
new file mode 100644
index 0000000000..4c6d38d441
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8696.py
@@ -0,0 +1,104 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Using Pre-Shared Key (PSK) in the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8696.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_ori = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13')
+
+id_ori_keyTransPSK = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13.1')
+
+id_ori_keyAgreePSK = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13.2')
+
+
+class PreSharedKeyIdentifier(univ.OctetString):
+ pass
+
+
+class KeyTransRecipientInfos(univ.SequenceOf):
+ componentType = rfc5652.KeyTransRecipientInfo()
+
+
+class KeyTransPSKRecipientInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ rfc5652.CMSVersion()),
+ namedtype.NamedType('pskid',
+ PreSharedKeyIdentifier()),
+ namedtype.NamedType('kdfAlgorithm',
+ rfc5652.KeyDerivationAlgorithmIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm',
+ rfc5652.KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('ktris',
+ KeyTransRecipientInfos()),
+ namedtype.NamedType('encryptedKey',
+ rfc5652.EncryptedKey())
+ )
+
+
+class KeyAgreePSKRecipientInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ rfc5652.CMSVersion()),
+ namedtype.NamedType('pskid',
+ PreSharedKeyIdentifier()),
+ namedtype.NamedType('originator',
+ rfc5652.OriginatorIdentifierOrKey().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm',
+ rfc5652.UserKeyingMaterial().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('kdfAlgorithm',
+ rfc5652.KeyDerivationAlgorithmIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm',
+ rfc5652.KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys',
+ rfc5652.RecipientEncryptedKeys())
+ )
+
+
+class CMSORIforPSKOtherInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('psk',
+ univ.OctetString()),
+ namedtype.NamedType('keyMgmtAlgType',
+ univ.Enumerated(namedValues=namedval.NamedValues(
+ ('keyTrans', 5), ('keyAgree', 10)))),
+ namedtype.NamedType('keyEncryptionAlgorithm',
+ rfc5652.KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('pskLength',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('kdkLength',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX)))
+ )
+
+
+# Update the CMS Other Recipient Info map in rfc5652.py
+
+_otherRecipientInfoMapUpdate = {
+ id_ori_keyTransPSK: KeyTransPSKRecipientInfo(),
+ id_ori_keyAgreePSK: KeyAgreePSKRecipientInfo(),
+}
+
+rfc5652.otherRecipientInfoMap.update(_otherRecipientInfoMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8702.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8702.py
new file mode 100644
index 0000000000..977c278760
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8702.py
@@ -0,0 +1,105 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SHAKE One-way Hash Functions for CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8702.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8692
+
+
+# Imports fprm RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 8692
+
+id_shake128 = rfc8692.id_shake128
+
+mda_shake128 = rfc8692.mda_shake128
+
+id_shake256 = rfc8692.id_shake256
+
+mda_shake256 = rfc8692.mda_shake256
+
+id_RSASSA_PSS_SHAKE128 = rfc8692.id_RSASSA_PSS_SHAKE128
+
+sa_rSASSA_PSS_SHAKE128 = rfc8692.sa_rSASSA_PSS_SHAKE128
+
+pk_rsaSSA_PSS_SHAKE128 = rfc8692.pk_rsaSSA_PSS_SHAKE128
+
+id_RSASSA_PSS_SHAKE256 = rfc8692.id_RSASSA_PSS_SHAKE256
+
+sa_rSASSA_PSS_SHAKE256 = rfc8692.sa_rSASSA_PSS_SHAKE256
+
+pk_rsaSSA_PSS_SHAKE256 = rfc8692.pk_rsaSSA_PSS_SHAKE256
+
+id_ecdsa_with_shake128 = rfc8692.id_ecdsa_with_shake128
+
+sa_ecdsa_with_shake128 = rfc8692.sa_ecdsa_with_shake128
+
+id_ecdsa_with_shake256 = rfc8692.id_ecdsa_with_shake256
+
+sa_ecdsa_with_shake256 = rfc8692.sa_ecdsa_with_shake256
+
+pk_ec = rfc8692.pk_ec
+
+
+# KMAC with SHAKE128
+
+id_KMACWithSHAKE128 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.19')
+
+
+class KMACwithSHAKE128_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('kMACOutputLength',
+ univ.Integer().subtype(value=256)),
+ namedtype.DefaultedNamedType('customizationString',
+ univ.OctetString().subtype(value=''))
+ )
+
+
+maca_KMACwithSHAKE128 = AlgorithmIdentifier()
+maca_KMACwithSHAKE128['algorithm'] = id_KMACWithSHAKE128
+maca_KMACwithSHAKE128['parameters'] = KMACwithSHAKE128_params()
+
+
+# KMAC with SHAKE256
+
+id_KMACWithSHAKE256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.20')
+
+
+class KMACwithSHAKE256_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('kMACOutputLength',
+ univ.Integer().subtype(value=512)),
+ namedtype.DefaultedNamedType('customizationString',
+ univ.OctetString().subtype(value=''))
+ )
+
+
+maca_KMACwithSHAKE256 = AlgorithmIdentifier()
+maca_KMACwithSHAKE256['algorithm'] = id_KMACWithSHAKE256
+maca_KMACwithSHAKE256['parameters'] = KMACwithSHAKE256_params()
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_KMACWithSHAKE128: KMACwithSHAKE128_params(),
+ id_KMACWithSHAKE256: KMACwithSHAKE256_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8708.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8708.py
new file mode 100644
index 0000000000..3e9909cf90
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8708.py
@@ -0,0 +1,41 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# HSS/LMS Hash-based Signature Algorithm for CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8708.txt
+
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Object Identifiers
+
+id_alg_hss_lms_hashsig = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.17')
+
+id_alg_mts_hashsig = id_alg_hss_lms_hashsig
+
+
+# Signature Algorithm Identifier
+
+sa_HSS_LMS_HashSig = rfc5280.AlgorithmIdentifier()
+sa_HSS_LMS_HashSig['algorithm'] = id_alg_hss_lms_hashsig
+# sa_HSS_LMS_HashSig['parameters'] is alway absent
+
+
+# Public Key
+
+class HSS_LMS_HashSig_PublicKey(univ.OctetString):
+ pass
+
+
+pk_HSS_LMS_HashSig = rfc5280.SubjectPublicKeyInfo()
+pk_HSS_LMS_HashSig['algorithm'] = sa_HSS_LMS_HashSig
+# pk_HSS_LMS_HashSig['parameters'] CONTAINS a DER-encoded HSS_LMS_HashSig_PublicKey
diff --git a/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8769.py b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8769.py
new file mode 100644
index 0000000000..5d2b300674
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/pyasn1_modules/rfc8769.py
@@ -0,0 +1,21 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CBOR Content for CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8769.txt
+#
+
+from pyasn1.type import univ
+
+
+id_ct_cbor = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.44')
+
+
+id_ct_cborSequence = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.45')
diff --git a/contrib/python/pyasn1-modules/py3/tests/__init__.py b/contrib/python/pyasn1-modules/py3/tests/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1-modules/py3/tests/__main__.py b/contrib/python/pyasn1-modules/py3/tests/__main__.py
new file mode 100644
index 0000000000..4e10bc8afb
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/__main__.py
@@ -0,0 +1,138 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.test_pem.suite',
+ 'tests.test_rfc2314.suite',
+ 'tests.test_rfc2315.suite',
+ 'tests.test_rfc2437.suite',
+ 'tests.test_rfc2459.suite',
+ 'tests.test_rfc2511.suite',
+ 'tests.test_rfc2560.suite',
+ 'tests.test_rfc2631.suite',
+ 'tests.test_rfc2634.suite',
+ 'tests.test_rfc2876.suite',
+ 'tests.test_rfc2985.suite',
+ 'tests.test_rfc2986.suite',
+ 'tests.test_rfc3058.suite',
+ 'tests.test_rfc3114.suite',
+ 'tests.test_rfc3125.suite',
+ 'tests.test_rfc3161.suite',
+ 'tests.test_rfc3274.suite',
+ 'tests.test_rfc3279.suite',
+ 'tests.test_rfc3280.suite',
+ 'tests.test_rfc3281.suite',
+ 'tests.test_rfc3370.suite',
+ 'tests.test_rfc3447.suite',
+ 'tests.test_rfc3537.suite',
+ 'tests.test_rfc3560.suite',
+ 'tests.test_rfc3565.suite',
+ 'tests.test_rfc3657.suite',
+ 'tests.test_rfc3709.suite',
+ 'tests.test_rfc3739.suite',
+ 'tests.test_rfc3770.suite',
+ 'tests.test_rfc3779.suite',
+ 'tests.test_rfc3820.suite',
+ 'tests.test_rfc3852.suite',
+ 'tests.test_rfc4010.suite',
+ 'tests.test_rfc4043.suite',
+ 'tests.test_rfc4055.suite',
+ 'tests.test_rfc4073.suite',
+ 'tests.test_rfc4108.suite',
+ 'tests.test_rfc4210.suite',
+ 'tests.test_rfc4211.suite',
+ 'tests.test_rfc4334.suite',
+ 'tests.test_rfc4357.suite',
+ 'tests.test_rfc4387.suite',
+ 'tests.test_rfc4476.suite',
+ 'tests.test_rfc4490.suite',
+ 'tests.test_rfc4491.suite',
+ 'tests.test_rfc4683.suite',
+ 'tests.test_rfc4985.suite',
+ 'tests.test_rfc5035.suite',
+ 'tests.test_rfc5083.suite',
+ 'tests.test_rfc5084.suite',
+ 'tests.test_rfc5126.suite',
+ 'tests.test_rfc5208.suite',
+ 'tests.test_rfc5275.suite',
+ 'tests.test_rfc5280.suite',
+ 'tests.test_rfc5480.suite',
+ 'tests.test_rfc5636.suite',
+ 'tests.test_rfc5639.suite',
+ 'tests.test_rfc5649.suite',
+ 'tests.test_rfc5652.suite',
+ 'tests.test_rfc5697.suite',
+ 'tests.test_rfc5751.suite',
+ 'tests.test_rfc5752.suite',
+ 'tests.test_rfc5753.suite',
+ 'tests.test_rfc5755.suite',
+ 'tests.test_rfc5913.suite',
+ 'tests.test_rfc5914.suite',
+ 'tests.test_rfc5915.suite',
+ 'tests.test_rfc5916.suite',
+ 'tests.test_rfc5917.suite',
+ 'tests.test_rfc5924.suite',
+ 'tests.test_rfc5934.suite',
+ 'tests.test_rfc5940.suite',
+ 'tests.test_rfc5958.suite',
+ 'tests.test_rfc5990.suite',
+ 'tests.test_rfc6010.suite',
+ 'tests.test_rfc6019.suite',
+ 'tests.test_rfc6031.suite',
+ 'tests.test_rfc6032.suite',
+ 'tests.test_rfc6120.suite',
+ 'tests.test_rfc6187.suite',
+ 'tests.test_rfc6210.suite',
+ 'tests.test_rfc6211.suite',
+ 'tests.test_rfc6482.suite',
+ 'tests.test_rfc6486.suite',
+ 'tests.test_rfc6487.suite',
+ 'tests.test_rfc6664.suite',
+ 'tests.test_rfc6955.suite',
+ 'tests.test_rfc6960.suite',
+ 'tests.test_rfc7030.suite',
+ 'tests.test_rfc7191.suite',
+ 'tests.test_rfc7229.suite',
+ 'tests.test_rfc7292.suite',
+ 'tests.test_rfc7296.suite',
+ 'tests.test_rfc7508.suite',
+ 'tests.test_rfc7585.suite',
+ 'tests.test_rfc7633.suite',
+ 'tests.test_rfc7773.suite',
+ 'tests.test_rfc7894.suite',
+ 'tests.test_rfc7906.suite',
+ 'tests.test_rfc7914.suite',
+ 'tests.test_rfc8017.suite',
+ 'tests.test_rfc8018.suite',
+ 'tests.test_rfc8103.suite',
+ 'tests.test_rfc8209.suite',
+ 'tests.test_rfc8226.suite',
+ 'tests.test_rfc8358.suite',
+ 'tests.test_rfc8360.suite',
+ 'tests.test_rfc8398.suite',
+ 'tests.test_rfc8410.suite',
+ 'tests.test_rfc8418.suite',
+ 'tests.test_rfc8419.suite',
+ 'tests.test_rfc8479.suite',
+ 'tests.test_rfc8494.suite',
+ 'tests.test_rfc8520.suite',
+ 'tests.test_rfc8619.suite',
+ 'tests.test_rfc8649.suite',
+ 'tests.test_rfc8692.suite',
+ 'tests.test_rfc8696.suite',
+ 'tests.test_rfc8702.suite',
+ 'tests.test_rfc8708.suite',
+ 'tests.test_rfc8769.suite']
+)
+
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_missing.py b/contrib/python/pyasn1-modules/py3/tests/test_missing.py
new file mode 100644
index 0000000000..0b3f58a7ce
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_missing.py
@@ -0,0 +1,18 @@
+#
+# This file is part of pyasn1-modules software.
+#
+import sys
+import unittest
+
+# modules without tests
+from pyasn1_modules import (
+ rfc1155, rfc1157, rfc1901, rfc3412, rfc3414
+)
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_pem.py b/contrib/python/pyasn1-modules/py3/tests/test_pem.py
new file mode 100644
index 0000000000..dbcca5a78c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_pem.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.compat.octets import ints2octs
+from pyasn1_modules import pem
+
+
+class PemTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDATCCAekCAQAwgZkxCzAJBgNVBAYTAlJVMRYwFAYDVQQIEw1Nb3Njb3cgUmVn
+aW9uMQ8wDQYDVQQHEwZNb3Njb3cxGjAYBgNVBAoTEVNOTVAgTGFib3JhdG9yaWVz
+MQwwCgYDVQQLFANSJkQxFTATBgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3
+DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQC9n2NfGS98JDBmAXQn+vNUyPB3QPYC1cwpX8UMYh9MdAmBZJCnvXrQ
+Pp14gNAv6AQKxefmGES1b+Yd+1we9HB8AKm1/8xvRDUjAvy4iO0sqFCPvIfSujUy
+pBcfnR7QE2itvyrMxCDSEVnMhKdCNb23L2TptUmpvLcb8wfAMLFsSu2yaOtJysep
+oH/mvGqlRv2ti2+E2YA0M7Pf83wyV1XmuEsc9tQ225rprDk2uyshUglkDD2235rf
+0QyONq3Aw3BMrO9ss1qj7vdDhVHVsxHnTVbEgrxEWkq2GkVKh9QReMZ2AKxe40j4
+og+OjKXguOCggCZHJyXKxccwqCaeCztbAgMBAAGgIjAgBgkqhkiG9w0BCQIxExMR
+U05NUCBMYWJvcmF0b3JpZXMwDQYJKoZIhvcNAQEFBQADggEBAAihbwmN9M2bsNNm
+9KfxqiGMqqcGCtzIlpDz/2NVwY93cEZsbz3Qscc0QpknRmyTSoDwIG+1nUH0vzkT
+Nv8sBmp9I1GdhGg52DIaWwL4t9O5WUHgfHSJpPxZ/zMP2qIsdPJ+8o19BbXRlufc
+73c03H1piGeb9VcePIaulSHI622xukI6f4Sis49vkDaoi+jadbEEb6TYkJQ3AMRD
+WdApGGm0BePdLqboW1Yv70WRRFFD8sxeT7Yw4qrJojdnq0xMHPGfKpf6dJsqWkHk
+b5DRbjil1Zt9pJuF680S9wtBzSi0hsMHXR9TzS7HpMjykL2nmCVY6A78MZapsCzn
+GGbx7DI=
+"""
+
+ def testReadBase64fromText(self):
+
+ binary = pem.readBase64fromText(self.pem_text)
+
+ self.assertTrue(binary)
+
+ expected = [
+ 48, 130, 3, 1, 48, 130, 1, 233, 2, 1, 0, 48, 129, 153, 49, 11, 48,
+ 9, 6, 3, 85, 4, 6, 19, 2, 82, 85, 49, 22, 48, 20, 6, 3, 85, 4, 8,
+ 19, 13, 77, 111, 115, 99, 111, 119, 32, 82, 101, 103, 105, 111,
+ 110, 49, 15, 48, 13, 6, 3, 85, 4, 7, 19, 6, 77, 111, 115, 99, 111,
+ 119, 49, 26, 48, 24, 6, 3, 85, 4, 10, 19, 17, 83, 78, 77, 80, 32,
+ 76, 97, 98, 111, 114, 97, 116, 111, 114, 105, 101, 115, 49, 12,
+ 48, 10, 6, 3, 85, 4, 11, 20, 3, 82, 38, 68, 49, 21, 48, 19, 6, 3,
+ 85, 4, 3, 19, 12, 115, 110, 109, 112, 108, 97, 98, 115, 46, 99,
+ 111, 109, 49, 32, 48, 30, 6, 9, 42, 134, 72, 134, 247, 13, 1, 9, 1,
+ 22, 17, 105, 110, 102, 111, 64, 115, 110, 109, 112, 108, 97, 98,
+ 115, 46, 99, 111, 109, 48, 130, 1, 34, 48, 13, 6, 9, 42, 134, 72,
+ 134, 247, 13, 1, 1, 1, 5, 0, 3, 130, 1, 15, 0, 48, 130, 1, 10, 2,
+ 130, 1, 1, 0, 189, 159, 99, 95, 25, 47, 124, 36, 48, 102, 1, 116,
+ 39, 250, 243, 84, 200, 240, 119, 64, 246, 2, 213, 204, 41, 95, 197,
+ 12, 98, 31, 76, 116, 9, 129, 100, 144, 167, 189, 122, 208, 62, 157,
+ 120, 128, 208, 47, 232, 4, 10, 197, 231, 230, 24, 68, 181, 111,
+ 230, 29, 251, 92, 30, 244, 112, 124, 0, 169, 181, 255, 204, 111,
+ 68, 53, 35, 2, 252, 184, 136, 237, 44, 168, 80, 143, 188, 135, 210,
+ 186, 53, 50, 164, 23, 31, 157, 30, 208, 19, 104, 173, 191, 42, 204,
+ 196, 32, 210, 17, 89, 204, 132, 167, 66, 53, 189, 183, 47, 100,
+ 233, 181, 73, 169, 188, 183, 27, 243, 7, 192, 48, 177, 108, 74,
+ 237, 178, 104, 235, 73, 202, 199, 169, 160, 127, 230, 188, 106,
+ 165, 70, 253, 173, 139, 111, 132, 217, 128, 52, 51, 179, 223, 243,
+ 124, 50, 87, 85, 230, 184, 75, 28, 246, 212, 54, 219, 154, 233,
+ 172, 57, 54, 187, 43, 33, 82, 9, 100, 12, 61, 182, 223, 154, 223,
+ 209, 12, 142, 54, 173, 192, 195, 112, 76, 172, 239, 108, 179, 90,
+ 163, 238, 247, 67, 133, 81, 213, 179, 17, 231, 77, 86, 196, 130,
+ 188, 68, 90, 74, 182, 26, 69, 74, 135, 212, 17, 120, 198, 118, 0,
+ 172, 94, 227, 72, 248, 162, 15, 142, 140, 165, 224, 184, 224, 160,
+ 128, 38, 71, 39, 37, 202, 197, 199, 48, 168, 38, 158, 11, 59, 91, 2,
+ 3, 1, 0, 1, 160, 34, 48, 32, 6, 9, 42, 134, 72, 134, 247, 13, 1, 9,
+ 2, 49, 19, 19, 17, 83, 78, 77, 80, 32, 76, 97, 98, 111, 114, 97,
+ 116, 111, 114, 105, 101, 115, 48, 13, 6, 9, 42, 134, 72, 134, 247,
+ 13, 1, 1, 5, 5, 0, 3, 130, 1, 1, 0, 8, 161, 111, 9, 141, 244, 205,
+ 155, 176, 211, 102, 244, 167, 241, 170, 33, 140, 170, 167, 6, 10,
+ 220, 200, 150, 144, 243, 255, 99, 85, 193, 143, 119, 112, 70, 108,
+ 111, 61, 208, 177, 199, 52, 66, 153, 39, 70, 108, 147, 74, 128, 240,
+ 32, 111, 181, 157, 65, 244, 191, 57, 19, 54, 255, 44, 6, 106, 125,
+ 35, 81, 157, 132, 104, 57, 216, 50, 26, 91, 2, 248, 183, 211, 185,
+ 89, 65, 224, 124, 116, 137, 164, 252, 89, 255, 51, 15, 218, 162,
+ 44, 116, 242, 126, 242, 141, 125, 5, 181, 209, 150, 231, 220, 239,
+ 119, 52, 220, 125, 105, 136, 103, 155, 245, 87, 30, 60, 134, 174,
+ 149, 33, 200, 235, 109, 177, 186, 66, 58, 127, 132, 162, 179, 143,
+ 111, 144, 54, 168, 139, 232, 218, 117, 177, 4, 111, 164, 216, 144,
+ 148, 55, 0, 196, 67, 89, 208, 41, 24, 105, 180, 5, 227, 221, 46,
+ 166, 232, 91, 86, 47, 239, 69, 145, 68, 81, 67, 242, 204, 94, 79,
+ 182, 48, 226, 170, 201, 162, 55, 103, 171, 76, 76, 28, 241, 159,
+ 42, 151, 250, 116, 155, 42, 90, 65, 228, 111, 144, 209, 110, 56,
+ 165, 213, 155, 125, 164, 155, 133, 235, 205, 18, 247, 11, 65, 205,
+ 40, 180, 134, 195, 7, 93, 31, 83, 205, 46, 199, 164, 200, 242, 144,
+ 189, 167, 152, 37, 88, 232, 14, 252, 49, 150, 169, 176, 44, 231,
+ 24, 102, 241, 236, 50
+ ]
+
+ self.assertEqual(ints2octs(expected), binary)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2314.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2314.py
new file mode 100644
index 0000000000..69927a6a5c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2314.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2314
+
+
+class CertificationRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDATCCAekCAQAwgZkxCzAJBgNVBAYTAlJVMRYwFAYDVQQIEw1Nb3Njb3cgUmVn
+aW9uMQ8wDQYDVQQHEwZNb3Njb3cxGjAYBgNVBAoTEVNOTVAgTGFib3JhdG9yaWVz
+MQwwCgYDVQQLFANSJkQxFTATBgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3
+DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQC9n2NfGS98JDBmAXQn+vNUyPB3QPYC1cwpX8UMYh9MdAmBZJCnvXrQ
+Pp14gNAv6AQKxefmGES1b+Yd+1we9HB8AKm1/8xvRDUjAvy4iO0sqFCPvIfSujUy
+pBcfnR7QE2itvyrMxCDSEVnMhKdCNb23L2TptUmpvLcb8wfAMLFsSu2yaOtJysep
+oH/mvGqlRv2ti2+E2YA0M7Pf83wyV1XmuEsc9tQ225rprDk2uyshUglkDD2235rf
+0QyONq3Aw3BMrO9ss1qj7vdDhVHVsxHnTVbEgrxEWkq2GkVKh9QReMZ2AKxe40j4
+og+OjKXguOCggCZHJyXKxccwqCaeCztbAgMBAAGgIjAgBgkqhkiG9w0BCQIxExMR
+U05NUCBMYWJvcmF0b3JpZXMwDQYJKoZIhvcNAQEFBQADggEBAAihbwmN9M2bsNNm
+9KfxqiGMqqcGCtzIlpDz/2NVwY93cEZsbz3Qscc0QpknRmyTSoDwIG+1nUH0vzkT
+Nv8sBmp9I1GdhGg52DIaWwL4t9O5WUHgfHSJpPxZ/zMP2qIsdPJ+8o19BbXRlufc
+73c03H1piGeb9VcePIaulSHI622xukI6f4Sis49vkDaoi+jadbEEb6TYkJQ3AMRD
+WdApGGm0BePdLqboW1Yv70WRRFFD8sxeT7Yw4qrJojdnq0xMHPGfKpf6dJsqWkHk
+b5DRbjil1Zt9pJuF680S9wtBzSi0hsMHXR9TzS7HpMjykL2nmCVY6A78MZapsCzn
+GGbx7DI=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2314.CertificationRequest()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2315.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2315.py
new file mode 100644
index 0000000000..40030c9972
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2315.py
@@ -0,0 +1,165 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2315
+
+
+class Pkcs7TestCase(unittest.TestCase):
+ pem_text_unordered = """\
+MIIKdQYJKoZIhvcNAQcCoIIKZjCCCmICAQExADALBgkqhkiG9w0BBwGgggpIMIIC
+XjCCAcegAwIBAgIBADANBgkqhkiG9w0BAQQFADB1MQswCQYDVQQGEwJSVTEPMA0G
+A1UEBxMGTW9zY293MRcwFQYDVQQKEw5Tb3ZhbSBUZWxlcG9ydDEMMAoGA1UECxMD
+TklTMQ8wDQYDVQQDEwZBQlMgQ0ExHTAbBgkqhkiG9w0BCQEWDmNlcnRAb25saW5l
+LnJ1MB4XDTk5MDgxNTE5MDI1OFoXDTAwMDExMjE5MDI1OFowdTELMAkGA1UEBhMC
+UlUxDzANBgNVBAcTBk1vc2NvdzEXMBUGA1UEChMOU292YW0gVGVsZXBvcnQxDDAK
+BgNVBAsTA05JUzEPMA0GA1UEAxMGQUJTIENBMR0wGwYJKoZIhvcNAQkBFg5jZXJ0
+QG9ubGluZS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAw0g1P0yQAZIi
+ml2XOCOxnCcuhHmAgj4Ei9M2ebrrGwUMONPzr1a8W7JcpnR3FeOjxEIxrzkHr6UA
+oj4l/oC7Rv28uIig+Okf+82ekhH6VgAQNr5LAzfN8J6dZLx2OXAmmLleAqHuisT7
+I40vEFRoRmC5hiMlILE2rIlIKJn6cUkCAwEAATANBgkqhkiG9w0BAQQFAAOBgQBZ
+7ELDfGUNb+fbpHl5W3d9JMXsdOgd96+HG+X1SPgeiRAMjkla8WFCSaQPIR4vCy0m
+tm5a2bWSji6+vP5FGbjOz5iMlHMrCtu0He7Eim2zpaGI06ZIY75Cn1h2r3+KS0/R
+h01TJUbmsfV1tZm6Wk3bayJ+/K8A4mBHv8P6rhYacDCCAowwggH1oAMCAQICAQAw
+DQYJKoZIhvcNAQEEBQAwgYsxCzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cx
+FzAVBgNVBAoTDkdvbGRlbiBUZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMT
+FUdvbGRlbiBUZWxlY29tIEFCUyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xk
+ZW50ZWxlY29tLnJ1MB4XDTAwMDEwNTE1MDY1MVoXDTEwMDExNTE1MDY1MVowgYsx
+CzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cxFzAVBgNVBAoTDkdvbGRlbiBU
+ZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMTFUdvbGRlbiBUZWxlY29tIEFC
+UyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xkZW50ZWxlY29tLnJ1MIGfMA0G
+CSqGSIb3DQEBAQUAA4GNADCBiQKBgQDPFel/Svli6ogoUEb6eLtEvNSjyalETSMP
+MIZXdmWIkWijvEUhDnNJVAE3knAt6dVYqxWq0vc6CbAGFZNqEyioGU48IECLzV0G
+toiYejF/c9PuyIKDejeV9/YZnNFaZAUOXhOjREdZURLISKhX4tAbQyvK0Qka9AAR
+MEy9DoqV8QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAHQzgqFkoSMQr077UCr5C0l1
+rxLA17TrocCmUC1/PLmN0LmUHD0d7TjjTQKJaJBHxcKIg6+FOY6LSSY4nAN79eXi
+nBz+jEUG7+NTU/jcEArI35yP7fi4Mwb96EYDmUkUGtcLNq3JBe/d1Zhmy9HnNBL1
+Dn9thM2Q8RPYAJIU3JnGMIICqTCCAhICAQAwDQYJKoZIhvcNAQEEBQAwgZwxCzAJ
+BgNVBAYTAlJVMQ8wDQYDVQQIEwZNb3Njb3cxDzANBgNVBAcTBk1vc2NvdzEXMBUG
+A1UEChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA1JPTDEeMBwGA1UEAxMVR29s
+ZGVuIFRlbGVjb20gQUJTIENBMSQwIgYJKoZIhvcNAQkBFhVjZXJ0QGdvbGRlbnRl
+bGVjb20ucnUwHhcNMTAwMTE1MTU0MDI2WhcNMjAwMjIyMTU0MDI2WjCBnDELMAkG
+A1UEBhMCUlUxDzANBgNVBAgTBk1vc2NvdzEPMA0GA1UEBxMGTW9zY293MRcwFQYD
+VQQKEw5Hb2xkZW4gVGVsZWNvbTEMMAoGA1UECxMDUk9MMR4wHAYDVQQDExVHb2xk
+ZW4gVGVsZWNvbSBBQlMgQ0ExJDAiBgkqhkiG9w0BCQEWFWNlcnRAZ29sZGVudGVs
+ZWNvbS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAzxXpf0r5YuqIKFBG
++ni7RLzUo8mpRE0jDzCGV3ZliJFoo7xFIQ5zSVQBN5JwLenVWKsVqtL3OgmwBhWT
+ahMoqBlOPCBAi81dBraImHoxf3PT7siCg3o3lff2GZzRWmQFDl4To0RHWVESyEio
+V+LQG0MrytEJGvQAETBMvQ6KlfECAwEAATANBgkqhkiG9w0BAQQFAAOBgQCMrS4T
+LIzxcpu8nwOq/xMcxW4Ctz/wjIoePWkmSLe+Tkb4zo7aTsvzn+ETaWb7qztUpyl0
+QvlXn4vC2iCJloPpofPqSzF1UV3g5Zb93ReZu7E6kEyW0ag8R5XZKv0xuR3b3Le+
+ZqolT8wQELd5Mmw5JPofZ+O2cGNvet8tYwOKFjCCAqUwggIOoAMCAQICAgboMA0G
+CSqGSIb3DQEBBAUAMIGcMQswCQYDVQQGEwJSVTEPMA0GA1UECBMGTW9zY293MQ8w
+DQYDVQQHEwZNb3Njb3cxFzAVBgNVBAoTDkdvbGRlbiBUZWxlY29tMQwwCgYDVQQL
+EwNST0wxHjAcBgNVBAMTFUdvbGRlbiBUZWxlY29tIEFCUyBDQTEkMCIGCSqGSIb3
+DQEJARYVY2VydEBnb2xkZW50ZWxlY29tLnJ1MB4XDTExMDEyODEyMTcwOVoXDTEy
+MDIwMTAwMDAwMFowdjELMAkGA1UEBhMCUlUxDDAKBgNVBAgTA04vQTEXMBUGA1UE
+ChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA0lTUDEWMBQGA1UEAxMNY3JheS5n
+bGFzLm5ldDEaMBgGCSqGSIb3DQEJARYLZWxpZUByb2wucnUwgZ8wDQYJKoZIhvcN
+AQEBBQADgY0AMIGJAoGBAPJAm8KG3ZCoJSvoGmLMPlGaMIpadu/EGSEYu+M/ybLp
+Cs8XmwB3876JVKKCbtGI6eqxOqvjedYXb+nKcyhz4Ztmm8RgAD7Z1WUItIpatejT
+79EYOUWrDN713SLZsImMyP4B4EySl4LZfHFRU2iOwLB6WozGCYuULLqYS9MDPrnT
+AgMBAAGjGzAZMBcGCWCGSAGG+EIBDQQKFghDPS07Uz0tOzANBgkqhkiG9w0BAQQF
+AAOBgQDEttS70qYCA+MGBA3hOR88XiBcTmuBarJDwn/rj31vRjYZUgp9bbFwscRI
+Ic4lDnlyvunwNitl+341bDg7u6Ebu9hCMbciyu4EtrsDh77DlLzbmNcXbnhlvbFL
+K9GiPz3dNyvQMfmaA0twd62zJDOVJ1SmO04lLmu/pAx8GhBZkqEAMQA=
+"""
+
+ # canonically ordered SET components
+ pem_text_reordered = """\
+MIIKcwYJKoZIhvcNAQcCoIIKZDCCCmACAQExADALBgkqhkiG9w0BBwGgggpIMIIC
+XjCCAcegAwIBAgIBADANBgkqhkiG9w0BAQQFADB1MQswCQYDVQQGEwJSVTEPMA0G
+A1UEBxMGTW9zY293MRcwFQYDVQQKEw5Tb3ZhbSBUZWxlcG9ydDEMMAoGA1UECxMD
+TklTMQ8wDQYDVQQDEwZBQlMgQ0ExHTAbBgkqhkiG9w0BCQEWDmNlcnRAb25saW5l
+LnJ1MB4XDTk5MDgxNTE5MDI1OFoXDTAwMDExMjE5MDI1OFowdTELMAkGA1UEBhMC
+UlUxDzANBgNVBAcTBk1vc2NvdzEXMBUGA1UEChMOU292YW0gVGVsZXBvcnQxDDAK
+BgNVBAsTA05JUzEPMA0GA1UEAxMGQUJTIENBMR0wGwYJKoZIhvcNAQkBFg5jZXJ0
+QG9ubGluZS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAw0g1P0yQAZIi
+ml2XOCOxnCcuhHmAgj4Ei9M2ebrrGwUMONPzr1a8W7JcpnR3FeOjxEIxrzkHr6UA
+oj4l/oC7Rv28uIig+Okf+82ekhH6VgAQNr5LAzfN8J6dZLx2OXAmmLleAqHuisT7
+I40vEFRoRmC5hiMlILE2rIlIKJn6cUkCAwEAATANBgkqhkiG9w0BAQQFAAOBgQBZ
+7ELDfGUNb+fbpHl5W3d9JMXsdOgd96+HG+X1SPgeiRAMjkla8WFCSaQPIR4vCy0m
+tm5a2bWSji6+vP5FGbjOz5iMlHMrCtu0He7Eim2zpaGI06ZIY75Cn1h2r3+KS0/R
+h01TJUbmsfV1tZm6Wk3bayJ+/K8A4mBHv8P6rhYacDCCAowwggH1oAMCAQICAQAw
+DQYJKoZIhvcNAQEEBQAwgYsxCzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cx
+FzAVBgNVBAoTDkdvbGRlbiBUZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMT
+FUdvbGRlbiBUZWxlY29tIEFCUyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xk
+ZW50ZWxlY29tLnJ1MB4XDTAwMDEwNTE1MDY1MVoXDTEwMDExNTE1MDY1MVowgYsx
+CzAJBgNVBAYTAlJVMQ8wDQYDVQQHEwZNb3Njb3cxFzAVBgNVBAoTDkdvbGRlbiBU
+ZWxlY29tMQwwCgYDVQQLEwNST0wxHjAcBgNVBAMTFUdvbGRlbiBUZWxlY29tIEFC
+UyBDQTEkMCIGCSqGSIb3DQEJARYVY2VydEBnb2xkZW50ZWxlY29tLnJ1MIGfMA0G
+CSqGSIb3DQEBAQUAA4GNADCBiQKBgQDPFel/Svli6ogoUEb6eLtEvNSjyalETSMP
+MIZXdmWIkWijvEUhDnNJVAE3knAt6dVYqxWq0vc6CbAGFZNqEyioGU48IECLzV0G
+toiYejF/c9PuyIKDejeV9/YZnNFaZAUOXhOjREdZURLISKhX4tAbQyvK0Qka9AAR
+MEy9DoqV8QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAHQzgqFkoSMQr077UCr5C0l1
+rxLA17TrocCmUC1/PLmN0LmUHD0d7TjjTQKJaJBHxcKIg6+FOY6LSSY4nAN79eXi
+nBz+jEUG7+NTU/jcEArI35yP7fi4Mwb96EYDmUkUGtcLNq3JBe/d1Zhmy9HnNBL1
+Dn9thM2Q8RPYAJIU3JnGMIICpTCCAg6gAwIBAgICBugwDQYJKoZIhvcNAQEEBQAw
+gZwxCzAJBgNVBAYTAlJVMQ8wDQYDVQQIEwZNb3Njb3cxDzANBgNVBAcTBk1vc2Nv
+dzEXMBUGA1UEChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA1JPTDEeMBwGA1UE
+AxMVR29sZGVuIFRlbGVjb20gQUJTIENBMSQwIgYJKoZIhvcNAQkBFhVjZXJ0QGdv
+bGRlbnRlbGVjb20ucnUwHhcNMTEwMTI4MTIxNzA5WhcNMTIwMjAxMDAwMDAwWjB2
+MQswCQYDVQQGEwJSVTEMMAoGA1UECBMDTi9BMRcwFQYDVQQKEw5Hb2xkZW4gVGVs
+ZWNvbTEMMAoGA1UECxMDSVNQMRYwFAYDVQQDEw1jcmF5LmdsYXMubmV0MRowGAYJ
+KoZIhvcNAQkBFgtlbGllQHJvbC5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
+gYEA8kCbwobdkKglK+gaYsw+UZowilp278QZIRi74z/JsukKzxebAHfzvolUooJu
+0Yjp6rE6q+N51hdv6cpzKHPhm2abxGAAPtnVZQi0ilq16NPv0Rg5RasM3vXdItmw
+iYzI/gHgTJKXgtl8cVFTaI7AsHpajMYJi5QsuphL0wM+udMCAwEAAaMbMBkwFwYJ
+YIZIAYb4QgENBAoWCEM9LTtTPS07MA0GCSqGSIb3DQEBBAUAA4GBAMS21LvSpgID
+4wYEDeE5HzxeIFxOa4FqskPCf+uPfW9GNhlSCn1tsXCxxEghziUOeXK+6fA2K2X7
+fjVsODu7oRu72EIxtyLK7gS2uwOHvsOUvNuY1xdueGW9sUsr0aI/Pd03K9Ax+ZoD
+S3B3rbMkM5UnVKY7TiUua7+kDHwaEFmSMIICqTCCAhICAQAwDQYJKoZIhvcNAQEE
+BQAwgZwxCzAJBgNVBAYTAlJVMQ8wDQYDVQQIEwZNb3Njb3cxDzANBgNVBAcTBk1v
+c2NvdzEXMBUGA1UEChMOR29sZGVuIFRlbGVjb20xDDAKBgNVBAsTA1JPTDEeMBwG
+A1UEAxMVR29sZGVuIFRlbGVjb20gQUJTIENBMSQwIgYJKoZIhvcNAQkBFhVjZXJ0
+QGdvbGRlbnRlbGVjb20ucnUwHhcNMTAwMTE1MTU0MDI2WhcNMjAwMjIyMTU0MDI2
+WjCBnDELMAkGA1UEBhMCUlUxDzANBgNVBAgTBk1vc2NvdzEPMA0GA1UEBxMGTW9z
+Y293MRcwFQYDVQQKEw5Hb2xkZW4gVGVsZWNvbTEMMAoGA1UECxMDUk9MMR4wHAYD
+VQQDExVHb2xkZW4gVGVsZWNvbSBBQlMgQ0ExJDAiBgkqhkiG9w0BCQEWFWNlcnRA
+Z29sZGVudGVsZWNvbS5ydTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAzxXp
+f0r5YuqIKFBG+ni7RLzUo8mpRE0jDzCGV3ZliJFoo7xFIQ5zSVQBN5JwLenVWKsV
+qtL3OgmwBhWTahMoqBlOPCBAi81dBraImHoxf3PT7siCg3o3lff2GZzRWmQFDl4T
+o0RHWVESyEioV+LQG0MrytEJGvQAETBMvQ6KlfECAwEAATANBgkqhkiG9w0BAQQF
+AAOBgQCMrS4TLIzxcpu8nwOq/xMcxW4Ctz/wjIoePWkmSLe+Tkb4zo7aTsvzn+ET
+aWb7qztUpyl0QvlXn4vC2iCJloPpofPqSzF1UV3g5Zb93ReZu7E6kEyW0ag8R5XZ
+Kv0xuR3b3Le+ZqolT8wQELd5Mmw5JPofZ+O2cGNvet8tYwOKFjEA
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2315.ContentInfo()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text_unordered)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text_reordered)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ substrate, der_encoder(asn1Object, omitEmptyOptionals=False))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2437.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2437.py
new file mode 100644
index 0000000000..b411756bbf
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2437.py
@@ -0,0 +1,46 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2437
+
+
+class RSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBPAIBAAJBAMfAjvBNDDYBCl1w3yNcagZkPhqd0q5KqeOTgKSLuJWfe5+VSeR5
+Y1PcF3DyH8dvS3t8PIQjxJLoKS7HVRlsfhECAwEAAQJBAIr93/gxhIenXbD7MykF
+yvi7k8MtgkWoymICZwcX+c6RudFyuPPfQJ/sf6RmFZlRA9X9CQm5NwVG7+x1Yi6t
+KoECIQDmJUCWkPCiQYow6YxetpXFa0K6hTzOPmax7MNHVWNgmQIhAN4xOZ4JFT34
+xVhK+8EudBCYRomJUHmOJfoQAxiIXVw5AiEAyB7ecc5on/5zhqKef4Eu7LKfHIdc
+304diFuDVpTmTAkCIC2ZmKOQZaWkSowGR4isCfHl7oQHhFaOD8k0RA5i3hYxAiEA
+n8lDw3JT6NjvMnD6aM8KBsLyhazWSVVkaUSqmJzgCF0=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2437.RSAPrivateKey()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2459.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2459.py
new file mode 100644
index 0000000000..4132daa426
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2459.py
@@ -0,0 +1,142 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2459
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2459.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+
+class CertificateListTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2459.CertificateList()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+
+class DSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBugIBAAKBgQCN91+Cma8UPw09gjwP9WOJCdpv3mv3/qFqzgiODGZx0Q002iTl
+1dq36m5TsWYFEcMCEyC3tFuoQ0mGq5zUUOmJvHCIPufs0g8Av0fhY77uFqneHHUi
+VQMCPCHX9vTCWskmDE21LJppU27bR4H2q+ysE30d6u3+84qrItsn4bjpcQIVAPR5
+QrmooOXDn7fHJzshmxImGC4VAoGAXxKyEnlvzq93d4V6KLWX3H5Jk2JP771Ss1bT
+6D/mSbLlvjjo7qsj6diul1axu6Wny31oPertzA2FeGEzkqvjSNmSxyYYMDB3kEcx
+ahntt37I1FgSlgdZHuhdtl1h1DBKXqCCneOZuNj+kW5ib14u5HDfFIbec2HJbvVs
+lJ/k83kCgYB4TD8vgHetXHxqsiZDoy5wOnQ3mmFAfl8ZdQsIfov6kEgArwPYUOVB
+JsX84f+MFjIOKXUV8dHZ8VRrGCLAbXcxKqLNWKlKHUnEsvt63pkaTy/RKHyQS+pn
+wontdTt9EtbF+CqIWnm2wpn3O+SbdtawzPOL1CcGB0jYABwbeQ81RwIUFKdyRYaa
+INow2I3/ks+0MxDabTY=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2459.DSAPrivateKey()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testDerCodecDecodeOpenTypes(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2511.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2511.py
new file mode 100644
index 0000000000..057b7fe861
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2511.py
@@ -0,0 +1,48 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2511
+
+
+class CertificateReqTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBozCCAZ8wggEFAgUAwTnj2jCByoABAqURMA8xDTALBgNVBAMTBHVzZXKmgZ8w
+DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ6ZQ2cYbn/lFsmBOlRltbRbFQUvvE0Q
+nbopOu1kC7Bmaaz7QTx8nxeiHi4m7uxCbGGxHNoGCt7EmdG8eZUBNAcHyGlXrJdm
+0z3/uNEGiBHq+xB8FnFJCA5EIJ3RWFnlbu9otSITLxWK7c5+/NHmWM+yaeHD/f/h
+rp01c/8qXZfZAgMBAAGpEDAOBgNVHQ8BAf8EBAMCBeAwLzASBgkrBgEFBQcFAQEM
+BTExMTExMBkGCSsGAQUFBwUBAgwMc2VydmVyX21hZ2ljoYGTMA0GCSqGSIb3DQEB
+BQUAA4GBAEI3KNEvTq/n1kNVhNhPkovk1AZxyJrN1u1+7Gkc4PLjWwjLOjcEVWt4
+AajUk/gkIJ6bbeO+fZlMjHfPSDKcD6AV2hN+n72QZwfzcw3icNvBG1el9EU4XfIm
+xfu5YVWi81/fw8QQ6X6YGHFQkomLd7jxakVyjxSng9BhO6GpjJNF
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2511.CertReqMessages()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2560.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2560.py
new file mode 100644
index 0000000000..eef5451f00
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2560.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2560
+
+
+class OCSPRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MGowaDBBMD8wPTAJBgUrDgMCGgUABBS3ZrMV9C5Dko03aH13cEZeppg3wgQUkqR1LKSevoFE63n8
+isWVpesQdXMCBDXe9M+iIzAhMB8GCSsGAQUFBzABAgQSBBBjdJOiIW9EKJGELNNf/rdA
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2560.OCSPRequest()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+
+class OCSPResponseTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEvQoBAKCCBLYwggSyBgkrBgEFBQcwAQEEggSjMIIEnzCCAQ+hgYAwfjELMAkGA1UEBhMCQVUx
+EzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEV
+MBMGA1UEAxMMc25tcGxhYnMuY29tMSAwHgYJKoZIhvcNAQkBFhFpbmZvQHNubXBsYWJzLmNvbRgP
+MjAxMjA0MTExNDA5MjJaMFQwUjA9MAkGBSsOAwIaBQAEFLdmsxX0LkOSjTdofXdwRl6mmDfCBBSS
+pHUspJ6+gUTrefyKxZWl6xB1cwIENd70z4IAGA8yMDEyMDQxMTE0MDkyMlqhIzAhMB8GCSsGAQUF
+BzABAgQSBBBjdJOiIW9EKJGELNNf/rdAMA0GCSqGSIb3DQEBBQUAA4GBADk7oRiCy4ew1u0N52QL
+RFpW+tdb0NfkV2Xyu+HChKiTThZPr9ZXalIgkJ1w3BAnzhbB0JX/zq7Pf8yEz/OrQ4GGH7HyD3Vg
+PkMu+J6I3A2An+bUQo99AmCbZ5/tSHtDYQMQt3iNbv1fk0yvDmh7UdKuXUNSyJdHeg27dMNy4k8A
+oIIC9TCCAvEwggLtMIICVqADAgECAgEBMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAkFVMRMw
+EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxFTAT
+BgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wHhcN
+MTIwNDExMTMyNTM1WhcNMTMwNDExMTMyNTM1WjB+MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29t
+ZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRUwEwYDVQQDEwxzbm1w
+bGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25tcGxhYnMuY29tMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDDDU5HOnNV8I2CojxB8ilIWRHYQuaAjnjrETMOprouDHFXnwWqQo/I3m0b
+XYmocrh9kDefb+cgc7+eJKvAvBqrqXRnU38DmQU/zhypCftGGfP8xjuBZ1n23lR3hplN1yYA0J2X
+SgBaAg6e8OsKf1vcX8Es09rDo8mQpt4G2zR56wIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG
++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU8Ys2dpJFLMHl
+yY57D4BNmlqnEcYwHwYDVR0jBBgwFoAU8Ys2dpJFLMHlyY57D4BNmlqnEcYwDQYJKoZIhvcNAQEF
+BQADgYEAWR0uFJVlQId6hVpUbgXFTpywtNitNXFiYYkRRv77McSJqLCa/c1wnuLmqcFcuRUK0oN6
+8ZJDP2HDDKe8MCZ8+sx+CF54eM8VCgN9uQ9XyE7x9XrXDd3Uw9RJVaWSIezkNKNeBE0lDM2jUjC4
+HAESdf7nebz1wtqAOXE1jWF/y8g=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2560.OCSPResponse()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2631.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2631.py
new file mode 100644
index 0000000000..ca9e547694
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2631.py
@@ -0,0 +1,41 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2631
+
+
+class OtherInfoTestCase(unittest.TestCase):
+ pem_text = "MB0wEwYLKoZIhvcNAQkQAwYEBAAAAAGiBgQEAAAAwA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc2631.OtherInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ hex1 = univ.OctetString(hexValue='00000001')
+ self.assertEqual(hex1, asn1Object['keyInfo']['counter'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2634.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2634.py
new file mode 100644
index 0000000000..225b987ed2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2634.py
@@ -0,0 +1,191 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc2634
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIFLgYJKoZIhvcNAQcCoIIFHzCCBRsCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggIy
+MIICLgIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKgggFXMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8X
+DTE5MDUyOTE4MjMxOVowJQYLKoZIhvcNAQkQAgcxFgQUAbWZQYhLO5wtUgsOCGtT
+4V3aNhUwLwYLKoZIhvcNAQkQAgQxIDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZI
+hvcNAQcBMDUGCyqGSIb3DQEJEAICMSYxJAIBAQYKKwYBBAGBrGABARMTQm9hZ3Vz
+IFByaXZhY3kgTWFyazA/BgkqhkiG9w0BCQQxMgQwtuQipP2CZx7U96rGbUT06LC5
+jVFYccZW5/CaNvpcrOPiChDm2vI3m4k300z5mSZsME0GCyqGSIb3DQEJEAIBMT4w
+PAQgx08hD2QnVwj1DoeRELNtdZ0PffW4BQIvcwwVc/goU6OAAQEwFTATgRFhbGlj
+ZUBleGFtcGxlLmNvbTAKBggqhkjOPQQDAwRnMGUCMAFFVP2gYFLTbaxvV5J2ICNM
+Nk/K4pXbj5Zvj3dcCeC4+OUYyG3ZW5lOtKqaabEAXAIxALDg1WOouhkDfwuQdgBi
+mNTr0mjYeUWRe/15IsWNx+kuFcLDr71DFHvMFY5M3sdfMA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+
+class SignedReceiptTestCase(unittest.TestCase):
+ signed_receipt_pem_text = """\
+MIIE3gYJKoZIhvcNAQcCoIIEzzCCBMsCAQMxDTALBglghkgBZQMEAgEwga4GCyq
+GSIb3DQEJEAEBoIGeBIGbMIGYAgEBBgkqhkiG9w0BBwEEIMdPIQ9kJ1cI9Q6HkR
+CzbXWdD331uAUCL3MMFXP4KFOjBGYwZAIwOLV5WCbYjy5HLHE69IqXQQHVDJQzm
+o18WwkFrEYH3EMsvpXEIGqsFTFN6NV4VBe9AjA5fGOCP5IhI32YqmGfs+zDlqZy
+b2xSX6Gr/IfCIm0angfOI39g7lAZDyivjh5H/oSgggJ3MIICczCCAfqgAwIBAgI
+JAKWzVCgbsG48MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDA
+JWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwN
+TI5MTkyMDEzWhcNMjAwNTI4MTkyMDEzWjBsMQswCQYDVQQGEwJVUzELMAkGA1UE
+CBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1wbGUxDDAKBgN
+VBAMTA0JvYjEeMBwGCSqGSIb3DQEJARYPYm9iQGV4YW1wbGUuY29tMHYwEAYHKo
+ZIzj0CAQYFK4EEACIDYgAEMaRiVS8WvN8Ycmpfq75jBbOMUukNfXAg6AL0JJBXt
+IFAuIJcZVlkLn/xbywkcMLHK/O+w9RWUQa2Cjw+h8b/1Cl+gIpqLtE558bD5PfM
+2aYpJ/YE6yZ9nBfTQs7z1TH5o4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvh
+CAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW
+55IHB1cnBvc2UuMB0GA1UdDgQWBBTKa2Zy3iybV3+YjuLDKtNmjsIapTAfBgNVH
+SMEGDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNnADBkAjAV
+boS6OfEYQomLDi2RUkd71hzwwiQZztbxNbosahIzjR8ZQaHhjdjJlrP/T6aXBws
+CMDfRweYz3Ce4E4wPfoqQnvqpM7ZlfhstjQQGOsWAtIIfqW/l+TgCO8ux3XLV6f
+j36zGCAYkwggGFAgEBMEwwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwD
+gYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQQIJAKWzVCgbsG48MAsG
+CWCGSAFlAwQCAaCBrjAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAQEwHAYJKoZ
+IhvcNAQkFMQ8XDTE5MDUyOTE5MzU1NVowLwYJKoZIhvcNAQkEMSIEIGb9Hm2kCn
+M0CYNpZU4Uj7dN0AzOieIn9sDqZMcIcZrEMEEGCyqGSIb3DQEJEAIFMTIEMBZze
+HVja7fQ62ywyh8rtKzBP1WJooMdZ+8c6pRqfIESYIU5bQnH99OPA51QCwdOdjAK
+BggqhkjOPQQDAgRoMGYCMQDZiT22xgab6RFMAPvN4fhWwzx017EzttD4VaYrpbo
+lropBdPJ6jIXiZQgCwxbGTCwCMQClaQ9K+L5LTeuW50ZKSIbmBZQ5dxjtnK3OlS
+7hYRi6U0JKZmWbbuS8vFIgX7eIkd8=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+ self.assertEqual(sd['encapContentInfo']['eContentType'],
+ rfc2634.id_ct_receipt)
+
+ receipt, rest = der_decoder(sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc2634.Receipt())
+
+ self.assertFalse(rest)
+ self.assertTrue(receipt.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(receipt))
+ self.assertEqual(receipt['version'], rfc2634.ESSVersion().subtype(value='v1'))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap.keys())
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd = asn1Object['content']
+
+ self.assertEqual(sd['version'], rfc5652.CMSVersion().subtype(value='v3'))
+ self.assertIn(sd['encapContentInfo']['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(sd['encapContentInfo']['eContentType'], rfc2634.id_ct_receipt)
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+
+ if sa['attrType'] == rfc2634.id_aa_msgSigDigest:
+ sa['attrValues'][0].prettyPrint()[:10] == '0x167378'
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ receipt, rest = der_decoder(sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd['encapContentInfo']['eContentType']])
+
+ self.assertEqual(receipt['version'], rfc2634.ESSVersion().subtype(value='v1'))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2876.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2876.py
new file mode 100644
index 0000000000..177e038b84
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2876.py
@@ -0,0 +1,185 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2876
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIIogYJKoZIhvcNAQcDoIIIkzCCCI8CAQKgggKRoIICjTCCAokwggIwoAMCAQIC
+FGPMbd5dAfZyD1kqY7NIQyVCWZgqMAkGByqGSM44BAMwPzELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMREwDwYDVQQKEwhCb2d1cyBD
+QTAeFw0xOTExMjAwODQzNDJaFw0yMDExMTkwODQzNDJaMGwxCzAJBgNVBAYTAlVT
+MQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBs
+ZTEMMAoGA1UEAxMDQm9iMR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20w
+gaEwFwYJYIZIAWUCAQEWBAp8tRylalhmjdM2A4GFAAKBgQD02ElSAgt9CWmKZ28J
+DMbpm/+aQ5PFPCTJRb1s2NuCHdakdYnkXXdtUgkIjgGYkVfGU6vhpGsdSRAFembb
+rjVdN/VkznUAxYFoyU/qmP5Az4R4dnNh08vdF49/XQA0JSasuN9WpmWtm2yPK3ZZ
+FXu2TRXIfD4ZlCDV1AcD+wnnVqOBlDCBkTALBgNVHQ8EBAMCAwgwQgYJYIZIAYb4
+QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFu
+eSBwdXJwb3NlLjAdBgNVHQ4EFgQUwtn/xRsTMH+uoIGDveicDyWKGlcwHwYDVR0j
+BBgwFoAUzUhlAYOypgdbBv4jgQzEc+TRtTgwCQYHKoZIzjgEAwNIADBFAiEAjK0V
+hpRdsxyQru4WTifhKnIioSUQlIkxymvsLD8VuSgCIGJ9vnSsDIthyWa5yove5wC7
+x3hFIBJXb31cTkdfMFYsMYHooYHlAgEEoBaAFMLZ/8UbEzB/rqCBg73onA8lihpX
+oYGDBIGAAVX+m3ogSJMhSVHNj/+juXxsWZ/UYNUmKXxH6YqRkHiRUl5Nd3cw6a1D
+vtNXb77ST3D6F/U/NS9VFfn2MBDhue2R7Mgfqgp8TnDOXgwxM/Po4qMH46UalPK3
+MeZ/e1xSI/yaIGJHlHFRZt0UI9ZTDsCTwMsK3XwAyEBmIeXRO0owGAYJYIZIAWUC
+AQEYMAsGCWCGSAFlAgEBFzAoMCagFgQUwtn/xRsTMH+uoIGDveicDyWKGlcEDGPg
+euAHFRJ4Hv6fXTCCBQgGCSqGSIb3DQEHATAXBglghkgBZQIBAQQwCgQIQk9HVVNf
+SVaAggTgc8exehjJD/gtEOIrg6tK5Emaa4PJ7l8f+EtyDD/ffQayXVAGz2MXUIQM
+EzmSLrnsr9NEyXvxGpvcsi7mV8tDxZU0YuyhA/C/HMh7EaBKG1hjC7xNw+IRIUxr
+bRJakMQbzMWWYJupC5zRu4/Ge9i+JVOGgES2E0L5LZSZ53wmnHA0ols1PHl3F3Z2
+QM3CkewqA3NP1waXQ0XXb0Oyl6Gq12B7ksm7euPWA3KctEjfYBD6nBT6wQd57rAM
+eFTk5aceWd2Sb/0xMpjfCg6GzX8pAWVEU8LqTvVmlSWdx3f3fAtUgiZ+gx7jNY8A
+6duln8zvMQn3mtPDCa50GzSrAx8JreHRWSDr3Dp8EfJzUgfy7dWlI9xs5bh1TMkE
+Mk+AHWQ5sBXTZkDgVAS5m1mIbXe7dzuxKsfGxjWu1eyy9J77mtOGo9aAOqYfxv/I
+8YQcgWHTeQcIO39Rmt2QsI7trRaEJ1jgj2E1To5gRCbIQWzQuyoS6affgu/9dwPX
+CAt0+0XrnO5vhaKX/RWm7ve8hYsiT0vI0hdBJ3rDRkdS9VL6NlnXOuohAqEq8b3s
+2koBigdri052hceAElTHD+4A4qRDiMLlFLlQqoJlpBwCtEPZsIQSy62K7J/Towxx
+ab5FoFjUTC5f79xPQPoKxYdgUB5AeAu5HgdWTn49Uqg4v/spTPSNRTmDMVVyZ9qh
+zJfkDpH3TKCAE5t59w4gSPe/7l+MeSml9O+L9HTd9Vng3LBbIds3uQ4cfLyyQmly
+81qpJjR1+Rvwo46hOm0kf2sIFi0WULmP/XzLw6b1SbiHf/jqFg7TFTyLMkPMPMmc
+7/kpLmYbKyTB4ineasTUL+bDrwu+uSzFAjTcI+1sz4Wo4p7RVywBDKSI5Ocbd3iM
+t4XWJWtz0KBX6nBzlV+BBTCwaGMAU4IpPBYOuvcl7TJWx/ODBjbO4zm4T/66w5IG
+3tKpsVMs4Jtrh8mtVXCLTBmKDzyjBVN2X8ALGXarItRgLa7k80lJjqTHwKCjiAMm
+T/eh67KzwmqBq5+8rJuXkax0NoXcDu6xkCMNHUQBYdnskaJqC2pu8hIsPTOrh7ie
+YSEuchFvu7lI0E+p7ypW65CMiy+Y/Rm5OWeHzjKkU5AbPtx/Me2vpQRCgaPwciZu
+nx2Ivi1+WYUBU1pGNDO7Xz7a8UHbDURkh7b+40uz2d7YQjKgrZBv6YwLAmw1LTE4
+bT9PM9n7LROnX8u6ksei8yiw8gZeVu+plWHbF+0O9siKAgxZlBna0XFgPpdzjMDT
+S/sfTIYXWlFj7camhsmTDRjo5G2B212evaKmKgh5ALLSFSk86ZN5KvQvcfsp81jv
+JCBmDStrsUgSMzy0Og2quHOd61hRTVlYzwvJvfMzHGKdIWwYUbHZOKo/KLEk3E36
+U9PkPoZGEL2ZeCH4F9Wh3mgg0knBfEmlPnGexmBby6NXGK7VW3l6xcJlpdMaXKNV
+Mfl2YK8k/34Hyft06KaYLEJsxAqk1pmLEmGhdZC1OAqovVB/1agSzpMMaB9OWWqN
+sTjDc7tkDt8BZ72NsAbCI9XmsX81W+NqPb6Ju1dtI09bn113LX/ZbOSdVicQcXSp
+l0FnTZaHgHJdQLcU28O7yFFOblqrvcMKpctdTA1TwG9LXEFttGrlpgjZF3edo0Ce
+z10epK+S
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kari_kea = ed['recipientInfos'][0]['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_kEAKeyEncryptionAlgorithm, kari_kea['algorithm'])
+ kwa, rest = der_decoder(
+ kari_kea['parameters'], asn1Spec=rfc5280.AlgorithmIdentifier())
+ self.assertFalse(rest)
+ self.assertTrue(kwa.prettyPrint())
+ self.assertEqual(kari_kea['parameters'], der_encoder(kwa))
+ self.assertEqual(rfc2876.id_fortezzaWrap80, kwa['algorithm'])
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_fortezzaConfidentialityAlgorithm, cea['algorithm'])
+ param, rest = der_decoder(cea['parameters'], rfc2876.Skipjack_Parm())
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, param['initialization-vector'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap.keys())
+ kari_kea = asn1Object['content']['recipientInfos'][0]['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_kEAKeyEncryptionAlgorithm, kari_kea['algorithm'])
+ self.assertEqual(rfc2876.id_fortezzaWrap80, kari_kea['parameters']['algorithm'])
+
+ cea = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc2876.id_fortezzaConfidentialityAlgorithm, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, cea['parameters']['initialization-vector'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "\
+MCcwGAYJYIZIAWUCAQEYMAsGCWCGSAFlAgEBFzALBglghkgBZQIBAQQ="
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg = False
+ for cap in asn1Object:
+ if cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys():
+ if cap['parameters'].hasValue():
+ param, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(param))
+
+ if cap['capabilityID'] == rfc2876.id_kEAKeyEncryptionAlgorithm:
+ self.assertEqual(rfc2876.id_fortezzaWrap80, param['algorithm'])
+ found_wrap_alg = True
+
+ self.assertTrue(found_wrap_alg)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg = False
+ for cap in asn1Object:
+ if cap['capabilityID'] == rfc2876.id_kEAKeyEncryptionAlgorithm:
+ self.assertEqual(rfc2876.id_fortezzaWrap80, cap['parameters']['algorithm'])
+ found_wrap_alg = True
+
+ self.assertTrue(found_wrap_alg)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2985.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2985.py
new file mode 100644
index 0000000000..376475e60f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2985.py
@@ -0,0 +1,319 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7292
+
+
+class PKCS9AttrsTestCase(unittest.TestCase):
+ pem_text = """\
+MYIQjzAOBgNVBEExBwwFQWxpY2UwDwYIKwYBBQUHCQMxAxMBTTAQBgNVBAUxCRMH
+QjQ4LTAwNzAQBggrBgEFBQcJBDEEEwJVUzAQBggrBgEFBQcJBTEEEwJVUzARBgoq
+hkiG9w0BCRkEMQMCATAwFAYJKoZIhvcNAQkCMQcWBUFsaWNlMBgGCiqGSIb3DQEJ
+GQMxCgQIUTeqnHYky4AwHAYJKoZIhvcNAQkPMQ8wDTALBglghkgBZQMEAS0wHQYI
+KwYBBQUHCQExERgPMjAxOTA4MDMxMjAwMDBaMB0GCCsGAQUFBwkCMREMD0hlcm5k
+b24sIFZBLCBVUzApBgkqhkiG9w0BCRQxHB4aAEYAcgBpAGUAbgBkAGwAeQAgAE4A
+YQBtAGUwLwYJKoZIhvcNAQkIMSITIDEyMyBVbmtub3duIFdheSwgTm93aGVyZSwg
+VkEsIFVTMIGZBgoqhkiG9w0BCRkCMYGKMIGHMAsGCWCGSAFlAwQBLQR4VsJb7t4l
+IqjJCT54rqkbCJsBPE17YQJeEYvyA4M1aDIUU5GnCgEhctgMiDPWGMvaSziixdIg
+aU/0zvWvYCm8UwPvBBwMtm9X5NDvk9p4nXbGAT8E/OsV1SYWVvwRJwYak0yWWexM
+HSixw1Ljh2nb0fIbqwLOeMmIMIIEsQYKKoZIhvcNAQkZBTGCBKEwggSdBgkqhkiG
+9w0BBwKgggSOMIIEigIBATENMAsGCWCGSAFlAwQCAjBRBgkqhkiG9w0BBwGgRARC
+Q29udGVudC1UeXBlOiB0ZXh0L3BsYWluDQoNCldhdHNvbiwgY29tZSBoZXJlIC0g
+SSB3YW50IHRvIHNlZSB5b3UuoIICfDCCAngwggH+oAMCAQICCQCls1QoG7BuOzAK
+BggqhkjOPQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcM
+B0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4XDTE5MDUyOTE0NDU0MVoXDTIw
+MDUyODE0NDU0MVowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQH
+EwdIZXJuZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGljZTEgMB4G
+CSqGSIb3DQEJARYRYWxpY2VAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6zXCYlmsEGD5vPu5hl9hDEjd1U
+HRgJIPoy3fJcWWeZ8FHCirICtuMgFisNscG/aTwKyDYOFDuqz/C2jyEwqgWCRyxy
+ohuJXtmjgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNl
+cnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYD
+VR0OBBYEFMS6Wg4+euM8gbD0Aqpouxbglg41MB8GA1UdIwQYMBaAFPI12zQE2qVV
+8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2gAMGUCMGO5H9E1uAveRGGaf48lN4po
+v2yH+hCAc5hOAuZKe/f40MKSF8q4w2ij+0euSaKFiAIxAL3gxp6sMitCmLQgOH6/
+RBIC/2syJ97y0KVp9da0PDAvwxLugCHTKZPjjpSLPHHc9TGCAaEwggGdAgEBMEww
+PzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREw
+DwYDVQQKDAhCb2d1cyBDQQIJAKWzVCgbsG47MAsGCWCGSAFlAwQCAqCByDAYBgkq
+hkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xOTA1MjkxODIz
+MTlaMD8GCSqGSIb3DQEJBDEyBDC25CKk/YJnHtT3qsZtRPTosLmNUVhxxlbn8Jo2
++lys4+IKEOba8jebiTfTTPmZJmwwTQYLKoZIhvcNAQkQAgExPjA8BCDHTyEPZCdX
+CPUOh5EQs211nQ999bgFAi9zDBVz+ChTo4ABATAVMBOBEWFsaWNlQGV4YW1wbGUu
+Y29tMAoGCCqGSM49BAMDBGYwZAIwOLV5WCbYjy5HLHE69IqXQQHVDJQzmo18WwkF
+rEYH3EMsvpXEIGqsFTFN6NV4VBe9AjA5fGOCP5IhI32YqmGfs+zDlqZyb2xSX6Gr
+/IfCIm0angfOI39g7lAZDyivjh5H/oQwggnoBgtghkgBhvhCAwGBWDGCCdcwggnT
+AgEDMIIJjwYJKoZIhvcNAQcBoIIJgASCCXwwggl4MIIGCAYJKoZIhvcNAQcBoIIF
++QSCBfUwggXxMIIF7QYLKoZIhvcNAQwKAQKgggT+MIIE+jAcBgoqhkiG9w0BDAED
+MA4ECO6rT/7SnK61AgIH0ASCBNhl7+ZgGmaQO8qy97gTAhXCjVM2/iV3LHWodlbY
+iHqpAJj42/Uye/3B7TNROXine1DMI9ZeetIDzYiA52i0sh7PhjBeuCIqFwiRJIv7
+bIKYCgz6qSOIAgqr6XdQnpeFp97YqDgST/RGQel7obCNO115+SlelmBxwwSik60p
+AwslawMzunvvH9qafrIiTa2myQqpRj/ifxjESJNZxG1O2FiplAi36r3icotim3Sj
+zzRJU5+90SqnkogjtxODrQYkv6fqg3qGY/RuwAy+eT3V/z+UUoyL22w1T8qdSFsN
+WmMnAFCSGBuoHHoZ22ipItKVg09UzTCWe3CbUmEfjJuJDmw3Oo7sWVYLltxjCS86
+XHWAauyFjmMr9aNsDiloGnFKSChslF6Ktj0F6ohOe+iReW5vi16EeEzbQiTjakpr
+eQZoeajC/N+XGoT6jKxbk5r1dtnEEJ+Q4wnvSjiGpr6frr4T+4pw301sptOjfO3f
+F23rKk7Advvi3k5xZobHcRmzDSfT9X5agtKlc4HCnHTz7XKHstXb1o1DSgTNVWQX
+phhFBm10gx6zfEHaLqyMtqXbWe2TuIHMwnBWiLnbhIBn+hbxK4MCfVz3cBZbApks
+Au/lXcVnakOJBcCtx/MMfZ3kcnI3Hs6W8rM2ASeDBLIQLVduOc6xlVSoYUQ24NNr
+9usfigQkcSTJZPIO52vPyIIQ7zR7U8TiqonkKWU3QJJVarPgLEYMUhBfNHqiGfx/
+d1Hf4MBoti8CMFUwsmOTv6d+cHYvQelqeFMXP0DE88gN/mkFBDAzXiXzAqMQcjJ+
+pyW6l4o2iQFSvXKSKg/IKved/hGp7RngQohjg4KlbqeGuRYea8Xs4pH5ue5KTeOc
+HGNI3Qi/Lmr2rd+e1iuGxwwYZHve6Z+Lxnb20zW9I/2MFm+KsCiB4Z/+x84jR7BG
+8l//lpuc2D/vxnKTxaaUAdUXM0Zwze7e+Gc2lMhVG5TJWR1KY51vN5J+apDYc8IR
+0L0c2bbkom3WkPq/po/dPDuoaX61nKmztUHaL5r5QZzBBwKVyhdw9J0btnWAFPNK
+vzgy5U9iV4+6jXH5TCmlIreszwRPoqqEaYRIfmUpp2+zy91PpzjTs98tx/HIAbOM
+fT3WmuTahEnEHehABhwq+S4xwzoVIskLbrcOP6l7UYYR7GTUCjKxh7ru0rSwHrqG
+9t33YdzJaFbz+8jb88xtf454Rvur66Cew/4GYX9u1Zef0DF9So1ay3IicpOf5emo
+VWIwg4bh7bELi78i/MbdWtNZQcXimykfeTsYH8Q4u+1uxHS5pwEWWwKiUnLQVpZP
+2ut255TdgSIhEILwsaLVelRrx/lp14EpY355FOusXiju6g14aWfBnt5udvuTXxDQ
+ZHPPNNk+gwzgvvTey98T941hYUctjg0NApJiB66bfrlYB9mkc5ftg5zqhEasYH5C
+4ajKKRNMM7zGlwSZvy8PPhnAeE3Q9LTnos0l4ygjQD/kMlvd7XSLW3GUzjyxtkG4
+gQh6LGvnafAbgu7GpcapKEppN86sXEePHiQjj92n103+TxMYWwtaO4iAwkjqdEdt
+avEHcXRcpdqC0st6nUwPAPAC4LKJbZgLQnNG+wlWIiCMMD56IdfQ7r/zGIr13MxC
+kjNNUdISoWWE5GnQMYHbMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFcGCSqGSIb3DQEJ
+FDFKHkgAMwBmADcAMQBhAGYANgA1AC0AMQA2ADgANwAtADQANAA0AGEALQA5AGYA
+NAA2AC0AYwA4AGIAZQAxADkANABjADMAZQA4AGUwawYJKwYBBAGCNxEBMV4eXABN
+AGkAYwByAG8AcwBvAGYAdAAgAEUAbgBoAGEAbgBjAGUAZAAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIAIAB2ADEALgAwMIIDaAYJ
+KoZIhvcNAQcBoIIDWQSCA1UwggNRMIIDTQYLKoZIhvcNAQwKAQOgggMlMIIDIQYK
+KoZIhvcNAQkWAaCCAxEEggMNMIIDCTCCAfGgAwIBAgIQNu32hzqhCKdHATXzboyI
+ETANBgkqhkiG9w0BAQUFADAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwIBcNMTYwNzE5
+MjIwMDAxWhgPMjExNjA2MjUyMjAwMDFaMBQxEjAQBgNVBAMTCWFub255bW91czCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALy2sEJMGNdcDg6BI7mdFM5T
+lPzo5sKBzvUnagK5SKBJ11xMPN5toPTBzICB/XTWEB3AwpD0O+srSca+bsUAyedS
+5V4BNp8qCyEu5RNRR8qPHheJ/guhLT96/gGI4jlrUyUhFntPkLKODxu+7KanMy6K
+dD+PVE8shXRUZTYe4PG64/c7z3wapnf4XoCXkJRzCY5f3MKz3Ul039kVnTlJcikd
+C7I9I9RflXLwXVl4nxUbeeRt6Z8WVWS4pCq+14v2aVPvP3mtVmAYHedRkvS04Hrx
+4xx98D3NSSw6Z5OLkzqOcFw15fYmH2NLdhh34gSWJmaaCBAbuQ+1rx/42p7MvvsC
+AwEAAaNVMFMwFQYDVR0lBA4wDAYKKwYBBAGCNwoDBDAvBgNVHREEKDAmoCQGCisG
+AQQBgjcUAgOgFgwUYW5vbnltb3VzQHdpbmRvd3MteAAwCQYDVR0TBAIwADANBgkq
+hkiG9w0BAQUFAAOCAQEAuH7iqY0/MLozwFb39ILYAJDHE+HToZBQbHQP4YtienrU
+Stk60rIp0WH65lam7m/JhgAcItc/tV1L8mEnLrvvKcA+NeIL8sDOtM28azvgcOi0
+P3roeLLLRCuiykUaKmUcZEDm9cDYKIpJf7QetWQ3uuGTk9iRzpH79x2ix35BnyWQ
+Rr3INZzmX/+9YRvPBXKYl/89F/w1ORYArpI9XtjfuPWaGQmM4f1WRHE2t3qRyKFF
+ri7QiZdpcSx5zvsRHSyjfUMoKs+b6upk+P01lIhg/ewwYngGab+fZhF15pTNN2hx
+8PdNGcrGzrkNKCmJKrWCa2xczuMA+z8SCuC1tYTKmDEVMBMGCSqGSIb3DQEJFTEG
+BAQBAAAAMDswHzAHBgUrDgMCGgQUpWCP/fZR0TK5BwGuqvTd0+duiKcEFJTubF2k
+HktMK+isIjxOTk4yJTOOAgIH0A==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.AttributeSet()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(der_encoder(asn1Object), substrate)
+
+ openTypesMap = {
+ rfc2985.pkcs_9_at_smimeCapabilities: rfc2985.SMIMECapabilities(),
+ }
+ openTypesMap.update(rfc5280.certificateAttributesMap)
+ openTypesMap.update(rfc5652.cmsAttributesMap)
+
+ for attr in asn1Object:
+ self.assertIn(attr['type'], openTypesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0], asn1Spec=openTypesMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ if attr['type'] == rfc2985.pkcs_9_at_userPKCS12:
+
+ self.assertEqual(univ.Integer(3), av['version'])
+ self.assertEqual(rfc5652.id_data, av['authSafe']['contentType'])
+
+ outdata, rest = der_decoder(
+ av['authSafe']['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ authsafe, rest = der_decoder(
+ outdata, asn1Spec=rfc7292.AuthenticatedSafe())
+
+ self.assertFalse(rest)
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+
+ indata, rest = der_decoder(
+ ci['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ sc, rest = der_decoder(
+ indata, asn1Spec=rfc7292.SafeContents())
+
+ self.assertFalse(rest)
+
+ for sb in sc:
+ if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
+ bv, rest = der_decoder(
+ sb['bagValue'], asn1Spec=rfc7292.pkcs12BagTypeMap[sb['bagId']])
+
+ self.assertFalse(rest)
+
+ for bagattr in sb['bagAttributes']:
+ if bagattr['attrType'] in openTypesMap:
+ inav, rest = der_decoder(
+ bagattr['attrValues'][0], asn1Spec=openTypesMap[bagattr['attrType']])
+
+ self.assertFalse(rest)
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_friendlyName:
+ self.assertEqual( "3f71af65-1687-444a-9f46-c8be194c3e8e", inav)
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_localKeyId:
+ self.assertEqual(univ.OctetString(hexValue='01000000'), inav)
+
+ if attr['type'] == rfc2985.pkcs_9_at_pkcs7PDU:
+ ci, rest = der_decoder(
+ attr['values'][0], asn1Spec=rfc5652.ContentInfo())
+
+ self.assertFalse(rest)
+ self.assertEqual(rfc5652.id_signedData, ci['contentType'])
+
+ sd, rest = der_decoder(
+ ci['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertEqual(1, sd['version'])
+
+ for si in sd['signerInfos']:
+ self.assertEqual(1, si['version'])
+
+ for siattr in si['signedAttrs']:
+ if siattr['attrType'] in openTypesMap:
+ siav, rest = der_decoder(
+ siattr['attrValues'][0], asn1Spec=openTypesMap[siattr['attrType']])
+
+ self.assertFalse(rest)
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_contentType:
+ self.assertEqual(rfc5652.id_data, siav)
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_messageDigest:
+ self.assertEqual('b6e422a4', siav.prettyPrint()[2:10])
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_signingTime:
+ self.assertEqual('190529182319Z', siav['utcTime'])
+
+ for choices in sd['certificates']:
+ for rdn in choices[0]['tbsCertificate']['subject']['rdnSequence']:
+ if rdn[0]['type'] in openTypesMap:
+ nv, rest = der_decoder(
+ rdn[0]['value'], asn1Spec=openTypesMap[rdn[0]['type']])
+ self.assertFalse(rest)
+
+ if rdn[0]['type'] == rfc2985.pkcs_9_at_emailAddress:
+ self.assertEqual('alice@example.com', nv)
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ rfc2985.pkcs_9_at_smimeCapabilities: rfc2985.SMIMECapabilities(),
+ }
+ openTypesMap.update(rfc5280.certificateAttributesMap)
+ openTypesMap.update(rfc5652.cmsAttributesMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypesMap, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object:
+ self.assertTrue(attr['type'], openTypesMap)
+
+ if attr['type'] == rfc2985.pkcs_9_at_userPKCS12:
+
+ self.assertEqual(univ.Integer(3), attr['values'][0]['version'])
+ self.assertEqual(rfc5652.id_data, attr['values'][0]['authSafe']['contentType'])
+
+ authsafe, rest = der_decoder(
+ attr['values'][0]['authSafe']['content'],
+ asn1Spec=rfc7292.AuthenticatedSafe())
+
+ self.assertFalse(rest)
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+
+ indata, rest = der_decoder(
+ ci['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ sc, rest = der_decoder(
+ indata, asn1Spec=rfc7292.SafeContents(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+
+ for sb in sc:
+ if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
+ for bagattr in sb['bagAttributes']:
+ if bagattr['attrType'] in openTypesMap:
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_friendlyName:
+ self.assertEqual(
+ "3f71af65-1687-444a-9f46-c8be194c3e8e",
+ bagattr['attrValues'][0])
+
+ if bagattr['attrType'] == rfc2985.pkcs_9_at_localKeyId:
+ self.assertEqual(
+ univ.OctetString(hexValue='01000000'),
+ bagattr['attrValues'][0])
+
+ if attr['type'] == rfc2985.pkcs_9_at_pkcs7PDU:
+ self.assertEqual(rfc5652.id_signedData, attr['values'][0]['contentType'])
+ self.assertEqual(1, attr['values'][0]['content']['version'])
+
+ for si in attr['values'][0]['content']['signerInfos']:
+ self.assertEqual(1, si['version'])
+
+ for siattr in si['signedAttrs']:
+ if siattr['attrType'] in openTypesMap:
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_contentType:
+ self.assertEqual(rfc5652.id_data, siattr['attrValues'][0])
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_messageDigest:
+ self.assertEqual('b6e422a4', siattr['attrValues'][0].prettyPrint()[2:10])
+
+ if siattr['attrType'] == rfc2985.pkcs_9_at_signingTime:
+ self.assertEqual('190529182319Z', siattr['attrValues'][0]['utcTime'])
+
+ for choices in attr['values'][0]['content']['certificates']:
+ for rdn in choices[0]['tbsCertificate']['subject']['rdnSequence']:
+ if rdn[0]['type'] in openTypesMap:
+ if rdn[0]['type'] == rfc2985.pkcs_9_at_emailAddress:
+ self.assertEqual('alice@example.com', rdn[0]['value'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc2986.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc2986.py
new file mode 100644
index 0000000000..91e3d05645
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc2986.py
@@ -0,0 +1,90 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2986
+from pyasn1_modules import rfc5280
+
+
+class CertificationRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MIICxjCCAa4CAQAwgYAxCzAJBgNVBAYTAlVTMR0wGwYDVQQDDBRmY3UuZmFrZS5h
+ZGRyZXNzLm9yZzEXMBUGA1UEBwwOUGxlYXNhbnQgR3JvdmUxHDAaBgNVBAoME0Zh
+a2UgQ29tcGFueSBVbml0ZWQxDTALBgNVBAgMBFV0YWgxDDAKBgNVBAsMA0VuZzCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALvnYesymhLKSm9Llix53BUA
+h99xMDBUYk0OB1VIdNQyjmFabHinM+lYUzVzrfcm1xtYB5QYKbsYuwZ4r5WI7qho
+CRJy6JwXqKpOe72ScCogxlGDr2QtKjtvyWrRwXBHX1/OqVSZ3hdz3njhKpmq6HgK
+87vH26RCSmK8FqCgn+qePfpspA7GzBvYwXhXluQtG7r4yBMKNRTQlPst8Vcy+iK+
+pI8hmQVrzGi8Hgbpr2L9EjPUOlAQEb8hxeKc7s5VhjN/RHMLVMX8YczZYt7mcDKr
+3PMwOVmXL1DMCtnS50MA2AxcPWcbQBeGyMroP+DLhAt6y1/IT0H5sQruNQw4euMC
+AwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQBQXYQPfH5Wy4o0ZFbKQOO1e3dHV8rl
+e8m9Z6qLgJO8rtW+OI+4FavJ6zjUvNVzd9JJxgwQ/1xprwrXh36nPcSyNLpGs7JT
+6u7TGQ38QQAOmziLXzauMWGBeLuzWGmOKA1cs5HFGLSmbxF3+0IWpz4GlD86pU1+
+WYyWgWHHAMA+kFYwBUR6CvPkmhshnZ8vrQavoOlcidCJ8o6IGA7N/Z0/NrgIDcoz
+YaruhoMrmRKHKNpfamhT0gvqEPBec+UB3uLElESIqaeqYc6eMtUQP3lqyghF6I0M
+fi6h7i9VVAZpslaKFfkNg12gLbbsCB1q36l5VXjHY/qe0FIUa9ogRrOi
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2986.CertificationRequest()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=rfc2986.CertificationRequest(),
+ openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for rdn in asn1Object['certificationRequestInfo']['subject']['rdnSequence']:
+ for atv in rdn:
+ if atv['type'] == rfc5280.id_at_countryName:
+ self.assertEqual(char.PrintableString('US'), atv['value'])
+
+ else:
+ self.assertGreater(len(atv['value']['utf8String']), 2)
+
+ spki_alg = asn1Object['certificationRequestInfo']['subjectPKInfo']['algorithm']
+
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ sig_alg = asn1Object['signatureAlgorithm']
+
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3058.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3058.py
new file mode 100644
index 0000000000..0a0645ca2f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3058.py
@@ -0,0 +1,140 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3058
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFgwYJKoZIhvcNAQcDoIIFdDCCBXACAQIxXaJbAgEEMCMEEDiCUYXKXu8SzLos
+n2xeYP4YDzIwMTkwOTEyMTIwMDAwWjAPBgsrBgEEAYE8BwEBBgUABCB0G/YBGH3L
+3RhoG0mK33M8IvRYAOsnHB5MfUAOGF6kuDCCBQoGCSqGSIb3DQEHATAZBgsrBgEE
+AYE8BwEBAjAKBAhCT0dVU19JVoCCBOBzx7F6GMkP+C0Q4iuDq0rkSZprg8nuXx/4
+S3IMP999BrJdUAbPYxdQhAwTOZIuueyv00TJe/Eam9yyLuZXy0PFlTRi7KED8L8c
+yHsRoEobWGMLvE3D4hEhTGttElqQxBvMxZZgm6kLnNG7j8Z72L4lU4aARLYTQvkt
+lJnnfCaccDSiWzU8eXcXdnZAzcKR7CoDc0/XBpdDRddvQ7KXoarXYHuSybt649YD
+cpy0SN9gEPqcFPrBB3nusAx4VOTlpx5Z3ZJv/TEymN8KDobNfykBZURTwupO9WaV
+JZ3Hd/d8C1SCJn6DHuM1jwDp26WfzO8xCfea08MJrnQbNKsDHwmt4dFZIOvcOnwR
+8nNSB/Lt1aUj3GzluHVMyQQyT4AdZDmwFdNmQOBUBLmbWYhtd7t3O7Eqx8bGNa7V
+7LL0nvua04aj1oA6ph/G/8jxhByBYdN5Bwg7f1Ga3ZCwju2tFoQnWOCPYTVOjmBE
+JshBbNC7KhLpp9+C7/13A9cIC3T7Reuc7m+Fopf9Fabu97yFiyJPS8jSF0EnesNG
+R1L1Uvo2Wdc66iECoSrxvezaSgGKB2uLTnaFx4ASVMcP7gDipEOIwuUUuVCqgmWk
+HAK0Q9mwhBLLrYrsn9OjDHFpvkWgWNRMLl/v3E9A+grFh2BQHkB4C7keB1ZOfj1S
+qDi/+ylM9I1FOYMxVXJn2qHMl+QOkfdMoIATm3n3DiBI97/uX4x5KaX074v0dN31
+WeDcsFsh2ze5Dhx8vLJCaXLzWqkmNHX5G/CjjqE6bSR/awgWLRZQuY/9fMvDpvVJ
+uId/+OoWDtMVPIsyQ8w8yZzv+SkuZhsrJMHiKd5qxNQv5sOvC765LMUCNNwj7WzP
+hajintFXLAEMpIjk5xt3eIy3hdYla3PQoFfqcHOVX4EFMLBoYwBTgik8Fg669yXt
+MlbH84MGNs7jObhP/rrDkgbe0qmxUyzgm2uHya1VcItMGYoPPKMFU3ZfwAsZdqsi
+1GAtruTzSUmOpMfAoKOIAyZP96HrsrPCaoGrn7ysm5eRrHQ2hdwO7rGQIw0dRAFh
+2eyRomoLam7yEiw9M6uHuJ5hIS5yEW+7uUjQT6nvKlbrkIyLL5j9Gbk5Z4fOMqRT
+kBs+3H8x7a+lBEKBo/ByJm6fHYi+LX5ZhQFTWkY0M7tfPtrxQdsNRGSHtv7jS7PZ
+3thCMqCtkG/pjAsCbDUtMThtP08z2fstE6dfy7qSx6LzKLDyBl5W76mVYdsX7Q72
+yIoCDFmUGdrRcWA+l3OMwNNL+x9MhhdaUWPtxqaGyZMNGOjkbYHbXZ69oqYqCHkA
+stIVKTzpk3kq9C9x+ynzWO8kIGYNK2uxSBIzPLQ6Daq4c53rWFFNWVjPC8m98zMc
+Yp0hbBhRsdk4qj8osSTcTfpT0+Q+hkYQvZl4IfgX1aHeaCDSScF8SaU+cZ7GYFvL
+o1cYrtVbeXrFwmWl0xpco1Ux+XZgryT/fgfJ+3ToppgsQmzECqTWmYsSYaF1kLU4
+Cqi9UH/VqBLOkwxoH05Zao2xOMNzu2QO3wFnvY2wBsIj1eaxfzVb42o9vom7V20j
+T1ufXXctf9ls5J1WJxBxdKmXQWdNloeAcl1AtxTbw7vIUU5uWqu9wwqly11MDVPA
+b0tcQW20auWmCNkXd52jQJ7PXR6kr5I=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_alg_CMSIDEAwrap, kwa['algorithm'])
+ self.assertEqual(kwa['parameters'], der_encoder(univ.Null("")))
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_IDEA_CBC, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], asn1Spec=rfc3058.IDEA_CBCPar())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, param['iv'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ kekri = asn1Object['content']['recipientInfos'][0]['kekri']
+ kwa = kekri['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_alg_CMSIDEAwrap, kwa['algorithm'])
+ self.assertEqual(univ.Null(""), kwa['parameters'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3058.id_IDEA_CBC, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, cea['parameters']['iv'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "MB4wDQYLKwYBBAGBPAcBAQIwDQYLKwYBBAGBPAcBAQY="
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ alg_oid_list = [ ]
+ for cap in asn1Object:
+ self.assertFalse(cap['parameters'].hasValue())
+ alg_oid_list.append(cap['capabilityID'])
+
+ self.assertIn(rfc3058.id_IDEA_CBC, alg_oid_list)
+ self.assertIn(rfc3058.id_alg_CMSIDEAwrap, alg_oid_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3114.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3114.py
new file mode 100644
index 0000000000..d0492a66c5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3114.py
@@ -0,0 +1,244 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3114
+from pyasn1_modules import rfc5035
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5755
+
+
+class SecurityLabelTestCase(unittest.TestCase):
+ pem_text = """\
+MIITHAYJKoZIhvcNAQcCoIITDTCCEwkCAQMxDTALBglghkgBZQMEAgIwggeUBgsq
+hkiG9w0BCRABF6CCB4MEggd/MIIHewIBADGCAk8wggJLAgEAMDMwJjEUMBIGA1UE
+CgwLZXhhbXBsZS5jb20xDjAMBgNVBAMMBUFsaWNlAgkAg/ULtwvVxA4wDQYJKoZI
+hvcNAQEBBQAEggIAdZphtN3x8a8kZoAFY15HYRD6JyPBueRUhLbTPoOH3pZ9xeDK
++zVXGlahl1y1UOe+McEx2oD7cxAkhFuruNZMrCYEBCTZMwVhyEOZlBXdZEs8rZUH
+L3FFE5PJnygsSIO9DMxd1UuTFGTgCm5V5ZLFGmjeEGJRbsfTyo52S7iseJqIN3dl
+743DbApu0+yuUoXKxqKdUFlEVxmhvc+Qbg/zfiwu8PTsYiUQDMBi4cdIlju8iLjj
+389xQHNyndXHWD51is89GG8vpBe+IsN8mnbGtCcpqtJ/c65ErJhHTR7rSJSMEqQD
+0LPOCKIY1q9FaSSJfMXJZk9t/rPxgUEVjfw7hAkKpgOAqoZRN+FpnFyBl0FnnXo8
+kLp55tfVyNibtUpmdCPkOwt9b3jAtKtnvDQ2YqY1/llfEUnFOVDKwuC6MYwifm92
+qNlAQA/T0+ocjs6gA9zOLx+wD1zqM13hMD/L+T2OHL/WgvGb62JLrNHXuPWA8RSh
+O4kIlPtARKXap2S3+MX/kpSUUrNa65Y5uK1jwFFclczG+CPCIBBn6iJiQT/vOX1I
+97YUP4Qq6OGkjK064Bq6o8+e5+NmIOBcygYRv6wA7vGkmPLSWbnw99qD728bBh84
+fC3EjItdusqGIwjzL0eSUWXJ5eu0Z3mYhJGN1pe0R/TEB5ibiJsMLpWAr3gwggUP
+BgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEGMBEEDE2HVyIurFKUEX8MEgIBEICCBOD+
+L7PeC/BpmMOb9KlS+r+LD+49fi6FGBrs8aieGi7ezZQEiFYS38aYQzTYYCt3SbJQ
+TkX1fDsGZiaw/HRiNh7sJnxWATm+XNKGoq+Wls9RhSJ45Sw4GMqwpoxZjeT84Uoz
+OITk3l3fV+3XiGcCejHkp8DAKZFExd5rrjlpnnAOBX6w8NrXO4s2n0LrMhtBU4eB
+2YKhGgs5Q6wQyXtU7rc7OOwTGvxWEONzSHJ01pyvqVQZAohsZPaWLULrM/kEGkrh
+G4jcaVjVPfULi7Uqo14imYhdCq5Ba4bwqI0Ot6mB27KD6LlOnVC/YmXCNIoYoWmq
+y1o3pSm9ovnLEO/dzxQjEJXYeWRje9M/sTxotM/5oZBpYMHqIwHTJbehXFgp8+oD
+jyTfayMYA3fTcTH3XbGPQfnYW2U9+ka/JhcSYybM8cuDNFd1I1LIQXoJRITXtkvP
+UbJqm+s6DtS5yvG9I8aQxlT365zphS4vbQaO74ujO8bE3dynrvTTV0c318TcHpN3
+DY9PIt6mHXMIPDLEA4wes90zg6iah5XiQcLtfLaAdYwEEGlImGD8n0kOhSNgclSL
+Mklpj5mVOs8exli3qoXlVMRJcBptSwOe0QPcRY30spywS4zt1UDIQ0jaecGGVtUY
+j586nkubhAxwZkuQKWxgt6yYTpGNSKCdvd+ygfyGJRDbWdn6nck/EPnG1773KTHR
+hMrXrBPBpSlfyJ/ju3644CCFqCjFoTh4bmB63k9ejUEVkJIJuoeKeTBaUxbCIink
+K4htBkgchHP51RJp4q9jQbziD3aOhg13hO1GFQ4E/1DNIJxbEnURNp/ga8SqmnLY
+8f5Pzwhm1mSzZf+obowbQ+epISrswWyjUKKO+uJfrAVN2TS/5+X6T3U6pBWWjH6+
+xDngrAJwtIdKBo0iSEwJ2eir4X8TcrSy9l8RSOiTPtqS5dF3RWSWOzkcO72fHCf/
+42+DLgUVX8Oe5mUvp7QYiXXsXGezLJ8hPIrGuOEypafDv3TwFkBc2MIB0QUhk+GG
+1ENY3jiNcyEbovF5Lzz+ubvechHSb1arBuEczJzN4riM2Dc3c+r8N/2Ft6eivK7H
+UuYX1uAcArhunZpA8yBGLF1m+DUXFtzWAUvfMKYPdfwGMckghF7YwLrTXd8ZhPIk
+HNO1KdwQKIRfgIlUPfTxRB7eNrG/Ma9a/IwrcI1QtkXU59uIZIw+7+FHZRWPsOjT
+u1Pdy+JtcSTG4dmS+DIwqpUzdu6MaBCVaOhXHwybvaSPTfMG/nR/NxF1FI8xgydn
+zXZs8HtFDL9iytKnvXHx+IIz8Rahp/PK8S80vPQNIeef/JgnIhtosID/A614LW1t
+B4cWdveYlD5U8T/XXInAtCY78Q9WJD+ecu87OJmlOdmjrFvitpQAo8+NGWxc7Wl7
+LtgDuYel7oXFCVtI2npbA7R+K5/kzUvDCY6GTgzn1Gfamc1/Op6Ue17qd/emvhbI
+x+ng3swf8TJVnCNDIXucKVA4boXSlCEhCGzfoZZYGVvm1/hrypiBtpUIKWTxLnz4
+AQJdZ5LGiCQJQU1wMyHsg6vWmNaJVhGHE6D/EnKsvJptFIkAx0wWkh35s48p7EbU
+8QBg//5eNru6yvLRutfdBX7T4w681pCD+dOiom75C3UdahrfoFkNsZ2hB88+qNsE
+EPb/xuGu8ZzSPZhakhl2NS2ggglpMIICAjCCAYigAwIBAgIJAOiR1gaRT87yMAoG
+CCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNTE0MDg1ODExWhcNMjEw
+NTEzMDg1ODExWjA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcM
+B0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMHYwEAYHKoZIzj0CAQYFK4EEACID
+YgAE8FF2VLHojmqlnawpQwjG6fWBQDPOy05hYq8oKcyg1PXH6kgoO8wQyKYVwsDH
+Evc1Vg6ErQm3LzdI8OQpYx3H386R2F/dT/PEmUSdcOIWsB4zrFsbzNwJGIGeZ33Z
+S+xGo1AwTjAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwHwYDVR0jBBgw
+FoAU8jXbNATapVXyvWkDmbBi7OIVCMEwDAYDVR0TBAUwAwEB/zAKBggqhkjOPQQD
+AwNoADBlAjBaUY2Nv03KolLNRJ2wSoNK8xlvzIWTFgIhsBWpD1SpJxRRv22kkoaw
+9bBtmyctW+YCMQC3/KmjNtSFDDh1I+lbOufkFDSQpsMzcNAlwEAERQGgg6iXX+Nh
+A+bFqNC7FyF4WWQwggOHMIIDDqADAgECAgkApbNUKBuwbkYwCgYIKoZIzj0EAwMw
+PzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREw
+DwYDVQQKDAhCb2d1cyBDQTAeFw0xOTExMDIxODQyMThaFw0yMDExMDExODQyMTha
+MGYxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQ
+MA4GA1UEChMHRXhhbXBsZTEMMAoGA1UECxMDUENBMRgwFgYDVQQDEw9wY2EuZXhh
+bXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQ9/m9uACpsTl2frBuILHiw
+IJyfUEpKseYJ+JYL1AtIZU0YeJ9DA+32h0ZeNGJDtDClnbBEPpn3W/5+TzldcsTe
+QlAJB08gcVRjkQym9LtPq7rGubCeVWlRRE9M7F9znk6jggGtMIIBqTAdBgNVHQ4E
+FgQUJuolDwsyICik11oKjf8t3L1/VGUwbwYDVR0jBGgwZoAU8jXbNATapVXyvWkD
+mbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UE
+BwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYGkU/O8jAPBgNVHRMB
+Af8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0
+aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1Ud
+IAQOMAwwCgYIKwYBBQUHDQIwCgYDVR02BAMCAQIwgZEGCCsGAQUFBwEVBIGEMIGB
+MFkGCyqGSIb3DQEJEAcDAwIF4DFGMESACyqGSIb3DQEJEAcEgTUwMwwXTEFXIERF
+UEFSVE1FTlQgVVNFIE9OTFkMGEhVTUFOIFJFU09VUkNFUyBVU0UgT05MWTARBgsq
+hkiG9w0BCRAHAgMCBPAwEQYLKoZIhvcNAQkQBwEDAgXgMAoGCCqGSM49BAMDA2cA
+MGQCMBlIP4FWrNzWXR8OgfcvCLGPG+110EdsmwznIF6ThT1vbJYvYoSbBXTZ9OCh
+/cCMMQIwJOySybHl/eLkNJh971DWF4mUQkt3WGBmZ+9Rg2cJTdat2ZjPKg101NuD
+tkUyjGxfMIID1DCCA1qgAwIBAgIUUc1IQGJpeYQ0XwOS2ZmVEb3aeZ0wCgYIKoZI
+zj0EAwMwZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJu
+ZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQwwCgYDVQQLEwNQQ0ExGDAWBgNVBAMTD3Bj
+YS5leGFtcGxlLmNvbTAeFw0xOTExMDUyMjIwNDZaFw0yMDExMDQyMjIwNDZaMIGS
+MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAO
+BgNVBAoTB0V4YW1wbGUxIjAgBgNVBAsTGUh1bWFuIFJlc291cmNlIERlcGFydG1l
+bnQxDTALBgNVBAMTBEZyZWQxHzAdBgkqhkiG9w0BCQEWEGZyZWRAZXhhbXBsZS5j
+b20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQObFslQ2EBP0xlDJ3sRnsNaqm/woQg
+KpBispSxXxK5bWUVpfnWsZnjLWhtDuPcu1BcBlM2g7gwL/aw8nUSIK3D8Ja9rTUQ
+QXc3zxnkcl8+8znNXHMGByRjPUH87C+TOrqjggGaMIIBljAdBgNVHQ4EFgQU5m71
+1OqFDNGRSWMOSzTXjpTLIFUwbwYDVR0jBGgwZoAUJuolDwsyICik11oKjf8t3L1/
+VGWhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVy
+bmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQCls1QoG7BuRjAPBgNVHRMBAf8EBTAD
+AQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0
+ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1UdIAQOMAww
+CgYIKwYBBQUHDQIwCgYDVR02BAMCAQIwfwYDVR0JBHgwdjBJBgNVBDcxQjBABgsq
+hkiG9w0BCRAHAwMCBeAxLTArgAsqhkiG9w0BCRAHBIEcMBoMGEhVTUFOIFJFU09V
+UkNFUyBVU0UgT05MWTApBglghkgBZQIBBUQxHAwaSHVtYW4gUmVzb3VyY2VzIERl
+cGFydG1lbnQwCgYIKoZIzj0EAwMDaAAwZQIwVh/RypULFgPpAN0I7OvuMomRWnm/
+Hea3Hk8PtTRz2Zai8iYat7oeAmGVgMhSXy2jAjEAuJW4l/CFatBy4W/lZ7gS3weB
+dBa5WEDIFFMC7GjGtCeLtXYqWfBnRdK26dOaHLB2MYIB7jCCAeoCAQEwfjBmMQsw
+CQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNV
+BAoTB0V4YW1wbGUxDDAKBgNVBAsTA1BDQTEYMBYGA1UEAxMPcGNhLmV4YW1wbGUu
+Y29tAhRRzUhAYml5hDRfA5LZmZURvdp5nTALBglghkgBZQMEAgKggeIwGgYJKoZI
+hvcNAQkDMQ0GCyqGSIb3DQEJEAEXMBwGCSqGSIb3DQEJBTEPFw0xOTExMDgyMDA4
+MzFaMD8GCSqGSIb3DQEJBDEyBDCd5WyvIB0VdXgPBWPtI152MIJLg5o68IRimCXx
+bVY0j3YyAKbi0egiZ/UunkyCfv0wZQYLKoZIhvcNAQkQAgIxVjFUAgEIBgsqhkiG
+9w0BCRAHAzEtMCuACyqGSIb3DQEJEAcEgRwwGgwYSFVNQU4gUkVTT1VSQ0VTIFVT
+RSBPTkxZExNCb2FndXMgUHJpdmFjeSBNYXJrMAoGCCqGSM49BAMDBGcwZQIwWkD7
+03QoNrKL5HJnuGJqvML1KlUXZDHnFpnJ+QMzXi8gocyfpRXWm6h0NjXieE0XAjEA
+uuDSOoaUIz+G9aemAE0ldpo1c0avNGa7BtynUTHmwosD6Sjfj0epAg9OnMedOjbr
+"""
+
+ def testDerCodec(self):
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5083.id_ct_authEnvelopedData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5083.id_ct_authEnvelopedData: lambda x: None
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if next_layer == rfc5652.id_signedData:
+ attrs = asn1Object['signerInfos'][0]['signedAttrs']
+ certs = asn1Object['certificates']
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ spid = rfc3114.id_tsp_TEST_Whirlpool
+ catid = rfc3114.id_tsp_TEST_Whirlpool_Categories
+ conf = rfc3114.Whirlpool_SecurityClassification(value='whirlpool-confidential')
+
+ self.assertIn(catid, rfc5755.securityCategoryMap)
+ self.assertIn(rfc5755.id_at_clearance, rfc5280.certificateAttributesMap)
+ self.assertIn(rfc5280.id_ce_subjectDirectoryAttributes, rfc5280.certificateExtensionsMap)
+
+ security_label_okay = False
+
+ for attr in attrs:
+ if attr['attrType'] == rfc5035.id_aa_securityLabel:
+ esssl, rest = der_decoder(
+ attr['attrValues'][0], asn1Spec=rfc5035.ESSSecurityLabel())
+
+ self.assertFalse(rest)
+ self.assertTrue(esssl.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(esssl))
+
+ self.assertEqual(spid, esssl['security-policy-identifier'])
+ self.assertEqual(conf, esssl['security-classification'])
+
+ for cat in esssl['security-categories']:
+ if cat['type'] == catid:
+ scv, rest = der_decoder(
+ cat['value'], asn1Spec=rfc3114.SecurityCategoryValues())
+
+ self.assertFalse(rest)
+ self.assertTrue(scv.prettyPrint())
+ self.assertEqual(cat['value'], der_encoder(scv))
+
+ for scv_str in scv:
+ self.assertIn('USE ONLY', scv_str)
+ security_label_okay = True
+
+ self.assertTrue(security_label_okay)
+
+ clearance_okay = False
+ for cert_choice in certs:
+ for extn in cert_choice['certificate']['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+
+ if attr['type'] == rfc5755.id_at_clearance:
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5280.certificateAttributesMap[attr['type']])
+
+ self.assertEqual(spid, av['policyId'])
+
+ for cat in av['securityCategories']:
+
+ self.assertEqual(catid, cat['type'])
+
+ scv, rest = der_decoder(
+ cat['value'],
+ asn1Spec=rfc5755.securityCategoryMap[cat['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(scv.prettyPrint())
+ self.assertEqual(cat['value'], der_encoder(scv))
+
+ for scv_str in scv:
+ self.assertIn('USE ONLY', scv_str)
+ clearance_okay = True
+
+ self.assertTrue(clearance_okay)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3125.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3125.py
new file mode 100644
index 0000000000..d7072b91be
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3125.py
@@ -0,0 +1,109 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc3125
+
+
+class SignaturePolicyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIMYzALBglghkgBZQMEAgEwggwwBgorgR6RmYQFAQICGA8yMDE2MTAwMjAwMDAwMFowgaSk
+XjBcMQswCQYDVQQGEwJTSzETMBEGA1UEBwwKQnJhdGlzbGF2YTEiMCAGA1UECgwZTmFyb2Ru
+eSBiZXpwZWNub3N0bnkgdXJhZDEUMBIGA1UECwwLU2VrY2lhIElCRVCGQmh0dHA6Ly9lcC5u
+YnVzci5zay90cnVzdGVkX2RhdGEvMjAxNjEwMDIwMDAwMDB6c2lnbmF0dXJlcG9saWN5LmRl
+cgyBz0VOOiBFbC4gc2lnbmF0dXJlL3NlYWwsIG9wdGlvbmFsIGVsLiB0aW1lLXN0YW1wIG92
+ZXIgT0NTUCwgYWNjb3JkaW5nIHRvIFJlZ3VsYXRpb24gKEVVKSBObyA5MTAvMjAxNC4gU0s6
+IEVsLiBwb2RwaXMvcGXEjWHFpSwgdm9saXRlxL5uw6EgZWwuIMSNYXNvdsOhIHBlxI1pYXRr
+YSBuYWQgT0NTUCwgcG9kxL5hIG5hcmlhZGVuaWEgKEXDmikgxI0uIDkxMC8yMDE0LjCCCpYw
+IhgPMjAxNjEwMDIwMDAwMDBaGA8yMDIxMTAwMjAwMDAwMFowggpsoD8wPTA3MC4GCSqGSIb3
+DQEJAwYJKoZIhvcNAQkEBgkqhkiG9w0BCQUGCyqGSIb3DQEJEAIvMAChAwoBAjACMACiEjAQ
+ow4wDAIBAAIBAAIBAAIBAaSCChMwggoPoIIB/zCCAfswCwYJYIZIAWUDBAIBMAsGCWCGSAFl
+AwQCAjALBglghkgBZQMEAgMwCwYJYIZIAWUDBAIGMAsGCWCGSAFlAwQCCDALBglghkgBZQME
+AgkwCwYJYIZIAWUDBAIKMA8GCWCGSAFlAwQDAgICCAAwDwYJYIZIAWUDBAMDAgIIADAPBglg
+hkgBZQMEAwQCAggAMA8GCWCGSAFlAwQDBgICCAAwDwYJYIZIAWUDBAMHAgIIADAPBglghkgB
+ZQMEAwgCAggAMA4GCCqGSM49BAMCAgIBADAOBggqhkjOPQQDAwICAQAwDgYIKoZIzj0EAwQC
+AgEAMA8GCWCGSAFlAwQDCgICAQAwDwYJYIZIAWUDBAMLAgIBADAPBglghkgBZQMEAwwCAgEA
+MA8GCSqGSIb3DQEBCwICCAAwDwYJKoZIhvcNAQEMAgIIADAPBgkqhkiG9w0BAQ0CAggAMA8G
+CWCGSAFlAwQDDgICCAAwDwYJYIZIAWUDBAMPAgIIADAPBglghkgBZQMEAxACAggAMA8GCSqG
+SIb3DQEBCgICCAAwDwYJKoZIhvcNAQEBAgIIADANBgcqhkjOPQIBAgIBADAOBggrJAMDAgUC
+AQICAQAwDgYIKyQDAwIFBAQCAgEAMA4GCCskAwMCBQQFAgIBADAOBggrJAMDAgUEBgICAQCh
+ggH/MIIB+zALBglghkgBZQMEAgEwCwYJYIZIAWUDBAICMAsGCWCGSAFlAwQCAzALBglghkgB
+ZQMEAgYwCwYJYIZIAWUDBAIIMAsGCWCGSAFlAwQCCTALBglghkgBZQMEAgowDwYJYIZIAWUD
+BAMCAgIIADAPBglghkgBZQMEAwMCAggAMA8GCWCGSAFlAwQDBAICCAAwDwYJYIZIAWUDBAMG
+AgIIADAPBglghkgBZQMEAwcCAggAMA8GCWCGSAFlAwQDCAICCAAwDgYIKoZIzj0EAwICAgEA
+MA4GCCqGSM49BAMDAgIBADAOBggqhkjOPQQDBAICAQAwDwYJYIZIAWUDBAMKAgIBADAPBglg
+hkgBZQMEAwsCAgEAMA8GCWCGSAFlAwQDDAICAQAwDwYJKoZIhvcNAQELAgIIADAPBgkqhkiG
+9w0BAQwCAggAMA8GCSqGSIb3DQEBDQICCAAwDwYJYIZIAWUDBAMOAgIIADAPBglghkgBZQME
+Aw8CAggAMA8GCWCGSAFlAwQDEAICCAAwDwYJKoZIhvcNAQEKAgIIADAPBgkqhkiG9w0BAQEC
+AggAMA0GByqGSM49AgECAgEAMA4GCCskAwMCBQIBAgIBADAOBggrJAMDAgUEBAICAQAwDgYI
+KyQDAwIFBAUCAgEAMA4GCCskAwMCBQQGAgIBAKKCAf8wggH7MAsGCWCGSAFlAwQCATALBglg
+hkgBZQMEAgIwCwYJYIZIAWUDBAIDMAsGCWCGSAFlAwQCBjALBglghkgBZQMEAggwCwYJYIZI
+AWUDBAIJMAsGCWCGSAFlAwQCCjAPBglghkgBZQMEAwICAggAMA8GCWCGSAFlAwQDAwICCAAw
+DwYJYIZIAWUDBAMEAgIIADAPBglghkgBZQMEAwYCAggAMA8GCWCGSAFlAwQDBwICCAAwDwYJ
+YIZIAWUDBAMIAgIIADAOBggqhkjOPQQDAgICAQAwDgYIKoZIzj0EAwMCAgEAMA4GCCqGSM49
+BAMEAgIBADAPBglghkgBZQMEAwoCAgEAMA8GCWCGSAFlAwQDCwICAQAwDwYJYIZIAWUDBAMM
+AgIBADAPBgkqhkiG9w0BAQsCAggAMA8GCSqGSIb3DQEBDAICCAAwDwYJKoZIhvcNAQENAgII
+ADAPBglghkgBZQMEAw4CAggAMA8GCWCGSAFlAwQDDwICCAAwDwYJYIZIAWUDBAMQAgIIADAP
+BgkqhkiG9w0BAQoCAggAMA8GCSqGSIb3DQEBAQICCAAwDQYHKoZIzj0CAQICAQAwDgYIKyQD
+AwIFAgECAgEAMA4GCCskAwMCBQQEAgIBADAOBggrJAMDAgUEBQICAQAwDgYIKyQDAwIFBAYC
+AgEAo4IB/zCCAfswCwYJYIZIAWUDBAIBMAsGCWCGSAFlAwQCAjALBglghkgBZQMEAgMwCwYJ
+YIZIAWUDBAIGMAsGCWCGSAFlAwQCCDALBglghkgBZQMEAgkwCwYJYIZIAWUDBAIKMA8GCWCG
+SAFlAwQDAgICCAAwDwYJYIZIAWUDBAMDAgIIADAPBglghkgBZQMEAwQCAggAMA8GCWCGSAFl
+AwQDBgICCAAwDwYJYIZIAWUDBAMHAgIIADAPBglghkgBZQMEAwgCAggAMA4GCCqGSM49BAMC
+AgIBADAOBggqhkjOPQQDAwICAQAwDgYIKoZIzj0EAwQCAgEAMA8GCWCGSAFlAwQDCgICAQAw
+DwYJYIZIAWUDBAMLAgIBADAPBglghkgBZQMEAwwCAgEAMA8GCSqGSIb3DQEBCwICCAAwDwYJ
+KoZIhvcNAQEMAgIIADAPBgkqhkiG9w0BAQ0CAggAMA8GCWCGSAFlAwQDDgICCAAwDwYJYIZI
+AWUDBAMPAgIIADAPBglghkgBZQMEAxACAggAMA8GCSqGSIb3DQEBCgICCAAwDwYJKoZIhvcN
+AQEBAgIIADANBgcqhkjOPQIBAgIBADAOBggrJAMDAgUCAQICAQAwDgYIKyQDAwIFBAQCAgEA
+MA4GCCskAwMCBQQFAgIBADAOBggrJAMDAgUEBgICAQCkggH/MIIB+zALBglghkgBZQMEAgEw
+CwYJYIZIAWUDBAICMAsGCWCGSAFlAwQCAzALBglghkgBZQMEAgYwCwYJYIZIAWUDBAIIMAsG
+CWCGSAFlAwQCCTALBglghkgBZQMEAgowDwYJYIZIAWUDBAMCAgIIADAPBglghkgBZQMEAwMC
+AggAMA8GCWCGSAFlAwQDBAICCAAwDwYJYIZIAWUDBAMGAgIIADAPBglghkgBZQMEAwcCAggA
+MA8GCWCGSAFlAwQDCAICCAAwDgYIKoZIzj0EAwICAgEAMA4GCCqGSM49BAMDAgIBADAOBggq
+hkjOPQQDBAICAQAwDwYJYIZIAWUDBAMKAgIBADAPBglghkgBZQMEAwsCAgEAMA8GCWCGSAFl
+AwQDDAICAQAwDwYJKoZIhvcNAQELAgIIADAPBgkqhkiG9w0BAQwCAggAMA8GCSqGSIb3DQEB
+DQICCAAwDwYJYIZIAWUDBAMOAgIIADAPBglghkgBZQMEAw8CAggAMA8GCWCGSAFlAwQDEAIC
+CAAwDwYJKoZIhvcNAQEKAgIIADAPBgkqhkiG9w0BAQECAggAMA0GByqGSM49AgECAgEAMA4G
+CCskAwMCBQIBAgIBADAOBggrJAMDAgUEBAICAQAwDgYIKyQDAwIFBAUCAgEAMA4GCCskAwMC
+BQQGAgIBADAABCAaWobQZ1EuANtF/NjfuaBXR0nR0fKnGJ7Z8t/mregtvQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3125.SignaturePolicy()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ svp = asn1Object['signPolicyInfo']['signatureValidationPolicy']
+ sr = svp['commonRules']['signerAndVeriferRules']['signerRules']
+ msa = sr['mandatedSignedAttr']
+
+ self.assertIn(rfc2985.pkcs_9_at_contentType, msa)
+ self.assertIn(rfc2985.pkcs_9_at_messageDigest, msa)
+ self.assertIn(rfc2985.pkcs_9_at_signingTime, msa)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3161.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3161.py
new file mode 100644
index 0000000000..47db88ab1e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3161.py
@@ -0,0 +1,81 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3161
+
+
+class TSPQueryTestCase(unittest.TestCase):
+ tsp_query_pem_text = """\
+MFYCAQEwUTANBglghkgBZQMEAgMFAARAGu1DauxDZZv8F7l4EKIbS00U40mUKfBW5C0giEz0
+t1zOHCvK4A8i8zxwUXFHv4pAJZE+uFhZ+v53HTg9rLjO5Q==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3161.TimeStampReq()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tsp_query_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class TSPResponseTestCase(unittest.TestCase):
+ tsp_response_pem_text = """\
+MIIFMTADAgEAMIIFKAYJKoZIhvcNAQcCoIIFGTCCBRUCAQMxCzAJBgUrDgMCGgUAMIIBowYL
+KoZIhvcNAQkQAQSgggGSBIIBjjCCAYoCAQEGBCoDBAEwUTANBglghkgBZQMEAgMFAARAGu1D
+auxDZZv8F7l4EKIbS00U40mUKfBW5C0giEz0t1zOHCvK4A8i8zxwUXFHv4pAJZE+uFhZ+v53
+HTg9rLjO5QIDDwJEGA8yMDE5MDUxMDE4MzQxOFoBAf+gggERpIIBDTCCAQkxETAPBgNVBAoT
+CEZyZWUgVFNBMQwwCgYDVQQLEwNUU0ExdjB0BgNVBA0TbVRoaXMgY2VydGlmaWNhdGUgZGln
+aXRhbGx5IHNpZ25zIGRvY3VtZW50cyBhbmQgdGltZSBzdGFtcCByZXF1ZXN0cyBtYWRlIHVz
+aW5nIHRoZSBmcmVldHNhLm9yZyBvbmxpbmUgc2VydmljZXMxGDAWBgNVBAMTD3d3dy5mcmVl
+dHNhLm9yZzEiMCAGCSqGSIb3DQEJARYTYnVzaWxlemFzQGdtYWlsLmNvbTESMBAGA1UEBxMJ
+V3VlcnpidXJnMQswCQYDVQQGEwJERTEPMA0GA1UECBMGQmF5ZXJuMYIDWjCCA1YCAQEwgaMw
+gZUxETAPBgNVBAoTCEZyZWUgVFNBMRAwDgYDVQQLEwdSb290IENBMRgwFgYDVQQDEw93d3cu
+ZnJlZXRzYS5vcmcxIjAgBgkqhkiG9w0BCQEWE2J1c2lsZXphc0BnbWFpbC5jb20xEjAQBgNV
+BAcTCVd1ZXJ6YnVyZzEPMA0GA1UECBMGQmF5ZXJuMQswCQYDVQQGEwJERQIJAMHphhYNqOmC
+MAkGBSsOAwIaBQCggYwwGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEEMBwGCSqGSIb3DQEJ
+BTEPFw0xOTA1MTAxODM0MThaMCMGCSqGSIb3DQEJBDEWBBSuLICty7PQHx0Ynk0a3rGcCRrf
+EjArBgsqhkiG9w0BCRACDDEcMBowGDAWBBSRbaPYYOzKguNLxZ0Xk+fpaIdfFDANBgkqhkiG
+9w0BAQEFAASCAgBFDVbGQ3L5GcaUBMtBnMW7x3S57QowQhhrTewvncY+3Nc2i6tlM1UEdxIp
+3m2iMqaH/N2xIm2sU/L/lIwaT1XIS4bJ2Nn8UPjZu/prJrVUFTMjJ5LWkG55x6c5A4pa2xxS
+N/kOV2e+6RHYlGvcDOvu2fzuz08hE+NjaHIPg3idU1cBsl0gTWZCTrxdXTLuuvHahxUAdQKm
+gTdGPjIiOR4GYpaVxEAgulaBQLZU5MhfBTASI1LkljhiFeDBQMhTUeZoA59/OxgnQR1Zpca4
+ZuWuqnZImxziRQA1tX/6pjAo5eP1V+SLWYHeIO7ia/urGIK9AXd3jY3Ljq4h7R1E+RRKIseO
+74mmtbJtCaiGL9H+6k164qC7U5fHBzKl3UboZtOUmNj10IJPUNyKQ5JPwCe6HEhbeXLRdh/8
+bjdqy56hBHyG1NRBqiTXTvj9LOzsJGIF5GjwyCT0B2hpvzdTdzNtfQ27HUUYgnYg0fGEpNpi
+vyaW5qCh9S704IKB0m/fXlqiIfNVdqDr/aAHNww8CouZP2oFO61WXCspbFNPLubeqxd5P4o4
+dJzD4PKsurILdX7SL8pRI+O2UtJLwNB1t3LBLKfTZuOWoSBFvQwbqBsDEchrZIDZXSXMbXd6
+uuvuO3ZsRWuej+gso+nWi3CRnRc9Wb0++cq4s8YSLaYSj2pHMA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3161.TimeStampResp()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tsp_response_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3274.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3274.py
new file mode 100644
index 0000000000..cb24d3725f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3274.py
@@ -0,0 +1,81 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3274
+from pyasn1_modules import rfc5652
+
+
+class CompressedDataTestCase(unittest.TestCase):
+ compressed_data_pem_text = """\
+MIIB7wYLKoZIhvcNAQkQAQmgggHeMIIB2gIBADANBgsqhkiG9w0BCRADCDCCAcQG
+CSqGSIb3DQEHAaCCAbUEggGxeJxVksGO1DAQRO/+ir4xK4VlNSAhcUPRrgRiLgw/
+0Il7Egu7bdntMOHraSezMJyixOWq19XpIwuxvP2xJvoEQld5lzw6Nub7Sw/vjx8/
+dJDq4F2ZyYJj+FqZ4Pj0dOzA0sUxFUC4xBxQ2gNqcTzBGEPKVApZY1EQsKn6vCaJ
+U8Y0uxFOeowTwXllwSsc+tP5Qe9tOCCK8wjQ32zUcvcZSDMIJCOX4PQgMqQcF2c3
+Dq5hoAzxAmgXVN+JSqfUo6+2YclMhrwLjlHaVRVutplsZYs8rvBL2WblqN7CTD4B
+MqAIjj8pd1ASUXMyNbXccWeDYd0sxlsGYIhVp3i1l6jgr3qtUeUehbIpQqnAoVSN
+1IqKm7hZaI3EY2tLIR86RbD//ONCGb2HsPdnivvdqvrsZY51mlu+NjTjQhpKWz0p
+FvRlWw9ae7+fVgKKie0SeFpIZYemoyuG5HUS2QY6fTk9N6zz+dsuUyr9Xghs5Ddi
+1LbZbVoNHDyFNv19jL7qiv9uuLK/XTD3Kqct1JS822vS8vWXpMzYBtal/083rMap
+XQ7u2qbaKFtZ7V96NH8ApkUFkg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.compressed_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc3274.id_ct_compressedData, asn1Object['contentType'])
+
+ cd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc3274.CompressedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(cd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(cd))
+
+ self.assertEqual(rfc3274.id_alg_zlibCompress,
+ cd['compressionAlgorithm']['algorithm'])
+ self.assertEqual(rfc5652.id_data, cd['encapContentInfo']['eContentType'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.compressed_data_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc3274.id_ct_compressedData, asn1Object['contentType'])
+
+ cd = asn1Object['content']
+
+ self.assertEqual(rfc3274.id_alg_zlibCompress,
+ cd['compressionAlgorithm']['algorithm'])
+ self.assertEqual(rfc5652.id_data, cd['encapContentInfo']['eContentType'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3279.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3279.py
new file mode 100644
index 0000000000..210a2e9795
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3279.py
@@ -0,0 +1,385 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3279
+
+
+class RSACertificateTestCase(unittest.TestCase):
+ rsa_cert_pem_text = """\
+MIIE8TCCA9mgAwIBAgIQbyXcFa/fXqMIVgw7ek/H+DANBgkqhkiG9w0BAQUFADBv
+MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk
+ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF
+eHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFow
+gYExCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO
+BgNVBAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMScwJQYD
+VQQDEx5DT01PRE8gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDQQIuLcuORG/dRwRtUBJjTqb/B5opdO4f7u4jO
+DeMvPwaW8KIpUJmu2zuhV7B0UXHN7UKRTUH+qcjYaoZ3RLtZZpdQXrTULHBEz9o3
+lUJpPDDEcbNS8CFNodi6OXwcnqMknfKDFpiqFnxDmxVbt640kf7UYiYYRpo/68H5
+8ZBX66x6DYvbcjBqZtXgRqNw3GjZ/wRIiXfeten7Z21B6bw5vTLZYgLxsag9bjec
+4i/i06Imi8a4VUOI4SM+pdIkOWpHqwDUobOpJf4NP6cdutNRwQuk2qw471VQJAVl
+RpM0Ty2NrcbUIRnSjsoFYXEHc0flihkSvQRNzk6cpUisuyb3AgMBAAGjggF0MIIB
+cDAfBgNVHSMEGDAWgBStvZh6NLQm9/rEJlTvA73gJMtUGjAdBgNVHQ4EFgQUC1jl
+i8ZMFTekQKkwqSG+RzZaVv8wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
+Af8wEQYDVR0gBAowCDAGBgRVHSAAMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9j
+cmwudXNlcnRydXN0LmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDCBswYI
+KwYBBQUHAQEEgaYwgaMwPwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRydXN0
+LmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LnA3YzA5BggrBgEFBQcwAoYtaHR0
+cDovL2NydC51c2VydHJ1c3QuY29tL0FkZFRydXN0VVROU0dDQ0EuY3J0MCUGCCsG
+AQUFBzABhhlodHRwOi8vb2NzcC51c2VydHJ1c3QuY29tMA0GCSqGSIb3DQEBBQUA
+A4IBAQAHYJOZqs7Q00fQNzPeP2S35S6jJQzVMx0Njav2fkZ7WQaS44LE5/X289kF
+z0k0LTdf9CXH8PtrI3fx8UDXTLtJRTHdAChntylMdagfeTHJNjcPyjVPjPF+3vxG
+q79om3AjMC63xVx7ivsYE3lLkkKM3CyrbCK3KFOzGkrOG/soDrc6pNoN90AyT99v
+uwFQ/IfTdtn8+7aEA8rJNhj33Wzbu7qBHKat/ij5z7micV0ZBepKRtxzQe+JlEKx
+Q4hvNRevHmCDrHqMEHufyfaDbZ76iO4+3e6esL/garnQnweyCROa9aTlyFt5p0c1
+M2jlVZ6qW8swC53HD79oRIGXi1FK
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.rsa_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.rsaEncryption, spki_a['algorithm'])
+
+ spki_pk = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['subjectPublicKey'].asOctets()
+ pk, rest = der_decoder(spki_pk, asn1Spec=rfc3279.RSAPublicKey())
+
+ self.assertFalse(rest)
+ self.assertTrue(pk.prettyPrint())
+ self.assertEqual(spki_pk, der_encoder(pk))
+ self.assertEqual(65537, pk['publicExponent'])
+ self.assertEqual(rfc3279.sha1WithRSAEncryption,
+ asn1Object['tbsCertificate']['signature']['algorithm'])
+ self.assertEqual(rfc3279.sha1WithRSAEncryption,
+ asn1Object['signatureAlgorithm']['algorithm'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.rsa_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.rsaEncryption, spki_a['algorithm'])
+ self.assertEqual(univ.Null(""), spki_a['parameters'])
+
+
+class ECCertificateTestCase(unittest.TestCase):
+ ec_cert_pem_text = """\
+MIIDrDCCApSgAwIBAgIQCssoukZe5TkIdnRw883GEjANBgkqhkiG9w0BAQwFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0xMzAzMDgxMjAwMDBaFw0yMzAzMDgxMjAwMDBaMEwxCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxJjAkBgNVBAMTHURpZ2lDZXJ0IEVDQyBT
+ZWN1cmUgU2VydmVyIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4ghC6nfYJN6g
+LGSkE85AnCNyqQIKDjc/ITa4jVMU9tWRlUvzlgKNcR7E2Munn17voOZ/WpIRllNv
+68DLP679Wz9HJOeaBy6Wvqgvu1cYr3GkvXg6HuhbPGtkESvMNCuMo4IBITCCAR0w
+EgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwNAYIKwYBBQUHAQEE
+KDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wQgYDVR0f
+BDswOTA3oDWgM4YxaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0R2xv
+YmFsUm9vdENBLmNybDA9BgNVHSAENjA0MDIGBFUdIAAwKjAoBggrBgEFBQcCARYc
+aHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAdBgNVHQ4EFgQUo53mH/naOU/A
+buiRy5Wl2jHiCp8wHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJ
+KoZIhvcNAQEMBQADggEBAMeKoENL7HTJxavVHzA1Nm6YVntIrAVjrnuaVyRXzG/6
+3qttnMe2uuzO58pzZNvfBDcKAEmzP58mrZGMIOgfiA4q+2Y3yDDo0sIkp0VILeoB
+UEoxlBPfjV/aKrtJPGHzecicZpIalir0ezZYoyxBEHQa0+1IttK7igZFcTMQMHp6
+mCHdJLnsnLWSB62DxsRq+HfmNb4TDydkskO/g+l3VtsIh5RHFPVfKK+jaEyDj2D3
+loB5hWp2Jp2VDCADjT7ueihlZGak2YPqmXTNbk19HOuNssWvFhtOyPNV6og4ETQd
+Ea8/B6hPatJ0ES8q/HO3X8IVQwVs1n3aAr0im0/T+Xc=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_ecPublicKey, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc3279.EcpkParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+ self.assertEqual(univ.ObjectIdentifier('1.3.132.0.34'), spki_a_p['namedCurve'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.ec_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_ecPublicKey, spki_a['algorithm'])
+ self.assertEqual(
+ univ.ObjectIdentifier('1.3.132.0.34'), spki_a['parameters']['namedCurve'])
+
+
+class DSACertificateTestCase(unittest.TestCase):
+ dsa_cert_pem_text = """\
+MIIDpjCCA0ygAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAxMDE5MjAxMjMw
+WjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQCLpR53
+xHfe+SiknAK/L9lm/ZO1109c9iYkriPIW/5MMlM+qc/tdRkKpG6ELIpfXTPtKCJm
+zqqVIyTmAJryyE8Xw0Ie2mzYPU5ULvKmllQkjTsWgPGgQBkciZ0AW9ggD9VwZilg
+4qh3iSO7T97hVQFnpCh6vm8pOH6UP/5kpr9ZJQIVANzdbztBJlJfqCB1t4h/NvSu
+wCFvAoGAITP+jhYk9Rngd98l+5ccgauQ+cLEUBgNG2Wq56zBXQbLou6eKkQi7ecL
+NiRmExq3IU3LOj426wSxL72Kw6FPyOEv3edIFkJJEHL4Z+ZJeVe//dzya0ddOJ7k
+k6qNF2ic+viD/5Vm8yRyKiig2uHH/MgIesLdZnvbzvX+f/P0z50DgYQAAoGALAUl
+jkOi1PxjjFVvhGfK95yIsrfbfcIEKUBaTs9NR2rbGWUeP+93paoXwP39X9wrJx2M
+SWeHWhWKszNgoiyqYT0k4R9mem3WClotxOvB5fHfwIp2kQYvE7H0/TPdGhfUpHQG
+YpyLQgT6L80meSKMFnu4VXGzOANhWDxu3JxiADCjgZQwgZEwCwYDVR0PBAQDAgeA
+MEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVz
+dGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFO37wHcauyc03rDc6cDRRsHz
+gcK+MB8GA1UdIwQYMBaAFM1IZQGDsqYHWwb+I4EMxHPk0bU4MAsGCWCGSAFlAwQD
+AgNHADBEAiBBRbfMzLi7+SVyO8SM3xxwUsMf/k1B+Nkvf1kBTfCfGwIgSAx/6mI+
+pNqdXqZZGESXy1MT1aBc4ynPGLFUr2r7cPY=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.dsa_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_dsa, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(spki_a['parameters'],
+ asn1Spec=rfc3279.Dss_Parms())
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+
+ q_value = 1260916123897116834511257683105158021801897369967
+
+ self.assertEqual(q_value, spki_a_p['q'])
+
+ sig_value, rest = der_decoder(
+ asn1Object['signature'].asOctets(), asn1Spec=rfc3279.Dss_Sig_Value())
+
+ self.assertFalse(rest)
+ self.assertTrue(sig_value.prettyPrint())
+ self.assertEqual(asn1Object['signature'].asOctets(), der_encoder(sig_value))
+ self.assertTrue(sig_value['r'].hasValue())
+ self.assertTrue(sig_value['s'].hasValue())
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.dsa_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_dsa, spki_a['algorithm'])
+
+ q_value = 1260916123897116834511257683105158021801897369967
+
+ self.assertEqual(q_value, spki_a['parameters']['q'])
+
+
+class KEACertificateTestCase(unittest.TestCase):
+ kea_cert_pem_text = """\
+MIICizCCAjOgAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCgwCQYHKoZIzjgEAzA/
+MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAP
+BgNVBAoTCEJvZ3VzIENBMB4XDTE5MTAyMDIwMDkyMVoXDTIwMTAxOTIwMDkyMVow
+cDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAw
+DgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGljZTEgMB4GCSqGSIb3DQEJARYR
+YWxpY2VAZXhhbXBsZS5jb20wgaAwFwYJYIZIAWUCAQEWBApc+PEn5ladbYizA4GE
+AAKBgB9Lc2QcoSW0E9/VnQ2xGBtpYh9MaDUBzIixbN8rhDwh0BBesD2TwHjzBpDM
+2PJ6DD1ZbBcz2M3vJaIKoZ8hA2EUtbbHX1BSnVfAdeqr5St5gfnuxSdloUjLQlWO
+rOYfpFVEp6hJoKAZiYfiXz0fohNXn8+fiU5k214byxlCPlU0o4GUMIGRMAsGA1Ud
+DwQEAwIDCDBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3Qg
+YmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1UdDgQWBBSE49bkPB9sQm27
+Rs2jgAPMyY6UCDAfBgNVHSMEGDAWgBTNSGUBg7KmB1sG/iOBDMRz5NG1ODAJBgcq
+hkjOOAQDA0cAMEQCIE9PWhUbnJVdNQcVYSc36BMZ+23uk2ITLsgSXtkScF6TAiAf
+TPnJ5Wym0hv2fOpnPPsWTgqvLFYfX27GGTquuOd/6A==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kea_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_keyExchangeAlgorithm, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(spki_a['parameters'],
+ asn1Spec=rfc3279.KEA_Parms_Id())
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+ self.assertEqual(univ.OctetString(hexValue='5cf8f127e6569d6d88b3'), spki_a_p)
+ self.assertEqual(
+ rfc3279.id_dsa_with_sha1, asn1Object['tbsCertificate']['signature']['algorithm'])
+ self.assertEqual(
+ rfc3279.id_dsa_with_sha1, asn1Object['signatureAlgorithm']['algorithm'])
+
+ sig_value, rest = der_decoder(asn1Object['signature'].asOctets(),
+ asn1Spec=rfc3279.Dss_Sig_Value())
+ self.assertFalse(rest)
+ self.assertTrue(sig_value.prettyPrint())
+ self.assertEqual(asn1Object['signature'].asOctets(), der_encoder(sig_value))
+ self.assertTrue(sig_value['r'].hasValue())
+ self.assertTrue(sig_value['s'].hasValue())
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.kea_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.id_keyExchangeAlgorithm, spki_a['algorithm'])
+ self.assertEqual(
+ univ.OctetString(hexValue='5cf8f127e6569d6d88b3'), spki_a['parameters'])
+
+ self.assertEqual(rfc3279.id_dsa_with_sha1,
+ asn1Object['tbsCertificate']['signature']['algorithm'])
+ self.assertEqual(
+ rfc3279.id_dsa_with_sha1, asn1Object['signatureAlgorithm']['algorithm'])
+
+
+class DHCertificateTestCase(unittest.TestCase):
+ dh_cert_pem_text = """\
+MIIEtDCCBFqgAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAxMDE5MjAxMjMw
+WjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTCCAsQwggI5BgcqhkjOPgIBMIICLAKCAQEAt9x/
+0iwGww3k19h+wbODVK1yqjFzEY2pyfXthHcn+nEw+DpURJ+iOhYPr68E3XO5sB48
+r5xTZhPN5+YejD3T8qhnDtiq4qrrSH7BOaEzqCDpHE2Bpoy3SodQ5Obaiu9Kx1ix
+BRk/oRZUH+F+ATZmF0rPKrZGZOnmsh0IZm3dlmRR9FRGn0aJlZKXveqp+hZ97/r0
+cbSo6wdT47APfocgweZMvgWu1IQBs6FiunRgaeX3RyLr4fnkvCzUM7TmxpRJYtL6
+myAp007QvtgQ0AdEwVfNl3jQ0IIW7TtpXVxDDQaKZZe9yYrY4GV3etlYk8a4cpjN
+rBxBCCTMASE4+iVtPQKCAQAg3m19vWc1TlHmkeqLwgvHN0Ufdyw5axWtc8qIJGZ1
+MezhyLyD4RU0VFCSocJCCe2k2kS2P2vQERZZYcn/nCYuiswCjOCbnwKozfaTZ3Fc
+1KOCtb4EEcuk/th5XNhWCYJJ7Hasym8zuPaqh5TLcsHXp0/lQUiOV2uVHnAt503A
+HY1v4PhlZ3G0CRZMenafU0Ky7a6zhrqFvWgtSdo+vN0S9xS/KJuTaWsYgOAt4r2I
+K1uwuWuvA5L1Qrdj8pDzMLkdlyHU1Jgjzk0rNQDTbUkZX9CAi/xKUGZysjWfOn1F
+HC1vJ1sbP9nTXpWRain1/6yatB2RxLTvWYyAq9IsL/8PAiEAkY8lGryvcZI/pxXt
+XwSaXEL2d77GSGICMGZa1wOJtdEDgYQAAoGALAUljkOi1PxjjFVvhGfK95yIsrfb
+fcIEKUBaTs9NR2rbGWUeP+93paoXwP39X9wrJx2MSWeHWhWKszNgoiyqYT0k4R9m
+em3WClotxOvB5fHfwIp2kQYvE7H0/TPdGhfUpHQGYpyLQgT6L80meSKMFnu4VXGz
+OANhWDxu3JxiADCjgZQwgZEwCwYDVR0PBAQDAgMIMEIGCWCGSAGG+EIBDQQ1FjNU
+aGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9z
+ZS4wHQYDVR0OBBYEFO37wHcauyc03rDc6cDRRsHzgcK+MB8GA1UdIwQYMBaAFM1I
+ZQGDsqYHWwb+I4EMxHPk0bU4MAsGCWCGSAFlAwQDAgNHADBEAiB1LU0esRdHDvSj
+kqAm+3viU2a+hl66sLrK5lYBOYqGYAIgWG7bDxqFVP6/stHfdbeMovLejquEl9tr
+iPEBA+EDHjk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.dh_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.dhpublicnumber, spki_a['algorithm'])
+
+ spki_a_p, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc3279.DomainParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(spki_a_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(spki_a_p))
+
+ q_value = 65838278260281264030127352144753816831178774189428428256716126077244217603537
+
+ self.assertEqual(q_value, spki_a_p['q'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.dh_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc3279.dhpublicnumber, spki_a['algorithm'])
+
+ q_value = 65838278260281264030127352144753816831178774189428428256716126077244217603537
+
+ self.assertEqual(q_value, spki_a['parameters']['q'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3280.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3280.py
new file mode 100644
index 0000000000..3031335467
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3280.py
@@ -0,0 +1,79 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3280
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class CertificateListTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3280.CertificateList()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3281.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3281.py
new file mode 100644
index 0000000000..f03316f1f0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3281.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3281
+
+
+class AttributeCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDBTCCAm4CAQEwgY+gUTBKpEgwRjEjMCEGA1UEAwwaQUNNRSBJbnRlcm1lZGlh
+dGUgRUNEU0EgQ0ExCzAJBgNVBAYTAkZJMRIwEAYDVQQKDAlBQ01FIEx0ZC4CAx7N
+WqE6pDgwNjETMBEGA1UEAwwKQUNNRSBFQ0RTQTELMAkGA1UEBhMCRkkxEjAQBgNV
+BAoMCUFDTUUgTHRkLqA9MDukOTA3MRQwEgYDVQQDDAtleGFtcGxlLmNvbTELMAkG
+A1UEBhMCRkkxEjAQBgNVBAoMCUFDTUUgTHRkLjANBgkqhkiG9w0BAQsFAAIEC63K
+/jAiGA8yMDE2MDEwMTEyMDAwMFoYDzIwMTYwMzAxMTIwMDAwWjCB8jA8BggrBgEF
+BQcKATEwMC6GC3VybjpzZXJ2aWNlpBUwEzERMA8GA1UEAwwIdXNlcm5hbWUECHBh
+c3N3b3JkMDIGCCsGAQUFBwoCMSYwJIYLdXJuOnNlcnZpY2WkFTATMREwDwYDVQQD
+DAh1c2VybmFtZTA1BggrBgEFBQcKAzEpMCegGKQWMBQxEjAQBgNVBAMMCUFDTUUg
+THRkLjALDAlBQ01FIEx0ZC4wIAYIKwYBBQUHCgQxFDASMBAMBmdyb3VwMQwGZ3Jv
+dXAyMCUGA1UESDEeMA2hC4YJdXJuOnJvbGUxMA2hC4YJdXJuOnJvbGUyMGowHwYD
+VR0jBBgwFoAUgJCMhskAsEBzvklAX8yJBOXO500wCQYDVR04BAIFADA8BgNVHTcB
+Af8EMjAwMB2gCoYIdXJuOnRlc3SgD4INKi5leGFtcGxlLmNvbTAPoA2GC3Vybjph
+bm90aGVyMA0GCSqGSIb3DQEBCwUAA4GBACygfTs6TkPurZQTLufcE3B1H2707OXK
+sJlwRpuodR2oJbunSHZ94jcJHs5dfbzFs6vNfVLlBiDBRieX4p+4JcQ2P44bkgyi
+UTJu7g1b6C1liB3vO6yH5hOZicOAaKd+c/myuGb9uJ4n6y2oLNxnk/fDzpuZUe2h
+Q4eikPk4LQey
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3281.AttributeCertificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ attributeMap = {
+ rfc3281.id_at_role: rfc3281.RoleSyntax(),
+ rfc3281.id_aca_authenticationInfo: rfc3281.SvceAuthInfo(),
+ rfc3281.id_aca_accessIdentity: rfc3281.SvceAuthInfo(),
+ rfc3281.id_aca_chargingIdentity: rfc3281.IetfAttrSyntax(),
+ rfc3281.id_aca_group: rfc3281.IetfAttrSyntax(),
+ }
+
+ count = 0
+
+ for attr in asn1Object['acinfo']['attributes']:
+ self.assertIn(attr['type'], attributeMap)
+
+ av, rest = der_decoder(
+ attr['values'][0], asn1Spec=attributeMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ count += 1
+
+ self.assertEqual(5, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3370.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3370.py
new file mode 100644
index 0000000000..70d9d4215f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3370.py
@@ -0,0 +1,234 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3370
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFjAYJKoZIhvcNAQcDoIIFfTCCBXkCAQIxZqJkAgEEMCMEEH744tkBAA6gplAQ
+nKYxCF8YDzIwMTkwOTEyMTIwMDAwWjAQBgsqhkiG9w0BCRADBwIBOgQocOaZ+1cB
+94MzMPtx6HyFpCC9yZrwXSKvWg5I018xOJhsuq+0so1PNTCCBQoGCSqGSIb3DQEH
+ATAZBggqhkiG9w0DAjANAgE6BAhCT0dVU19JVoCCBOBzx7F6GMkP+C0Q4iuDq0rk
+SZprg8nuXx/4S3IMP999BrJdUAbPYxdQhAwTOZIuueyv00TJe/Eam9yyLuZXy0PF
+lTRi7KED8L8cyHsRoEobWGMLvE3D4hEhTGttElqQxBvMxZZgm6kLnNG7j8Z72L4l
+U4aARLYTQvktlJnnfCaccDSiWzU8eXcXdnZAzcKR7CoDc0/XBpdDRddvQ7KXoarX
+YHuSybt649YDcpy0SN9gEPqcFPrBB3nusAx4VOTlpx5Z3ZJv/TEymN8KDobNfykB
+ZURTwupO9WaVJZ3Hd/d8C1SCJn6DHuM1jwDp26WfzO8xCfea08MJrnQbNKsDHwmt
+4dFZIOvcOnwR8nNSB/Lt1aUj3GzluHVMyQQyT4AdZDmwFdNmQOBUBLmbWYhtd7t3
+O7Eqx8bGNa7V7LL0nvua04aj1oA6ph/G/8jxhByBYdN5Bwg7f1Ga3ZCwju2tFoQn
+WOCPYTVOjmBEJshBbNC7KhLpp9+C7/13A9cIC3T7Reuc7m+Fopf9Fabu97yFiyJP
+S8jSF0EnesNGR1L1Uvo2Wdc66iECoSrxvezaSgGKB2uLTnaFx4ASVMcP7gDipEOI
+wuUUuVCqgmWkHAK0Q9mwhBLLrYrsn9OjDHFpvkWgWNRMLl/v3E9A+grFh2BQHkB4
+C7keB1ZOfj1SqDi/+ylM9I1FOYMxVXJn2qHMl+QOkfdMoIATm3n3DiBI97/uX4x5
+KaX074v0dN31WeDcsFsh2ze5Dhx8vLJCaXLzWqkmNHX5G/CjjqE6bSR/awgWLRZQ
+uY/9fMvDpvVJuId/+OoWDtMVPIsyQ8w8yZzv+SkuZhsrJMHiKd5qxNQv5sOvC765
+LMUCNNwj7WzPhajintFXLAEMpIjk5xt3eIy3hdYla3PQoFfqcHOVX4EFMLBoYwBT
+gik8Fg669yXtMlbH84MGNs7jObhP/rrDkgbe0qmxUyzgm2uHya1VcItMGYoPPKMF
+U3ZfwAsZdqsi1GAtruTzSUmOpMfAoKOIAyZP96HrsrPCaoGrn7ysm5eRrHQ2hdwO
+7rGQIw0dRAFh2eyRomoLam7yEiw9M6uHuJ5hIS5yEW+7uUjQT6nvKlbrkIyLL5j9
+Gbk5Z4fOMqRTkBs+3H8x7a+lBEKBo/ByJm6fHYi+LX5ZhQFTWkY0M7tfPtrxQdsN
+RGSHtv7jS7PZ3thCMqCtkG/pjAsCbDUtMThtP08z2fstE6dfy7qSx6LzKLDyBl5W
+76mVYdsX7Q72yIoCDFmUGdrRcWA+l3OMwNNL+x9MhhdaUWPtxqaGyZMNGOjkbYHb
+XZ69oqYqCHkAstIVKTzpk3kq9C9x+ynzWO8kIGYNK2uxSBIzPLQ6Daq4c53rWFFN
+WVjPC8m98zMcYp0hbBhRsdk4qj8osSTcTfpT0+Q+hkYQvZl4IfgX1aHeaCDSScF8
+SaU+cZ7GYFvLo1cYrtVbeXrFwmWl0xpco1Ux+XZgryT/fgfJ+3ToppgsQmzECqTW
+mYsSYaF1kLU4Cqi9UH/VqBLOkwxoH05Zao2xOMNzu2QO3wFnvY2wBsIj1eaxfzVb
+42o9vom7V20jT1ufXXctf9ls5J1WJxBxdKmXQWdNloeAcl1AtxTbw7vIUU5uWqu9
+wwqly11MDVPAb0tcQW20auWmCNkXd52jQJ7PXR6kr5I=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, kwa['algorithm'])
+ kwa_param, rest = der_decoder(
+ kwa['parameters'], rfc3370.RC2wrapParameter())
+ self.assertFalse(rest)
+ self.assertTrue(kwa_param.prettyPrint())
+ self.assertEqual(kwa['parameters'], der_encoder(kwa_param))
+ self.assertEqual(58, kwa_param)
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3370.rc2CBC, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], rfc3370.RC2CBCParameter())
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, param['iv'])
+ self.assertEqual(58, param['rc2ParameterVersion'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
+
+ ri0 = asn1Object['content']['recipientInfos'][0]
+ kwa = ri0['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, kwa['algorithm'])
+ self.assertEqual(58, kwa['parameters'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3370.rc2CBC, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f4755535f4956')
+ self.assertEqual(iv, cea['parameters']['iv'])
+ self.assertEqual(58, cea['parameters']['rc2ParameterVersion'])
+
+class DSAPublicKeyTestCase(unittest.TestCase):
+ dsa_cert_pem_text = """\
+MIIDpjCCA0ygAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAxMDE5MjAxMjMw
+WjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQCLpR53
+xHfe+SiknAK/L9lm/ZO1109c9iYkriPIW/5MMlM+qc/tdRkKpG6ELIpfXTPtKCJm
+zqqVIyTmAJryyE8Xw0Ie2mzYPU5ULvKmllQkjTsWgPGgQBkciZ0AW9ggD9VwZilg
+4qh3iSO7T97hVQFnpCh6vm8pOH6UP/5kpr9ZJQIVANzdbztBJlJfqCB1t4h/NvSu
+wCFvAoGAITP+jhYk9Rngd98l+5ccgauQ+cLEUBgNG2Wq56zBXQbLou6eKkQi7ecL
+NiRmExq3IU3LOj426wSxL72Kw6FPyOEv3edIFkJJEHL4Z+ZJeVe//dzya0ddOJ7k
+k6qNF2ic+viD/5Vm8yRyKiig2uHH/MgIesLdZnvbzvX+f/P0z50DgYQAAoGALAUl
+jkOi1PxjjFVvhGfK95yIsrfbfcIEKUBaTs9NR2rbGWUeP+93paoXwP39X9wrJx2M
+SWeHWhWKszNgoiyqYT0k4R9mem3WClotxOvB5fHfwIp2kQYvE7H0/TPdGhfUpHQG
+YpyLQgT6L80meSKMFnu4VXGzOANhWDxu3JxiADCjgZQwgZEwCwYDVR0PBAQDAgeA
+MEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVz
+dGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFO37wHcauyc03rDc6cDRRsHz
+gcK+MB8GA1UdIwQYMBaAFM1IZQGDsqYHWwb+I4EMxHPk0bU4MAsGCWCGSAFlAwQD
+AgNHADBEAiBBRbfMzLi7+SVyO8SM3xxwUsMf/k1B+Nkvf1kBTfCfGwIgSAx/6mI+
+pNqdXqZZGESXy1MT1aBc4ynPGLFUr2r7cPY=
+"""
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.dsa_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki = asn1Object['tbsCertificate']['subjectPublicKeyInfo']
+ self.assertEqual(rfc3370.id_dsa, spki['algorithm']['algorithm'])
+ pk_substrate = spki['subjectPublicKey'].asOctets()
+
+ pk, rest = der_decoder(pk_substrate, asn1Spec=rfc3370.Dss_Pub_Key())
+ self.assertFalse(rest)
+ self.assertTrue(pk.prettyPrint())
+ self.assertEqual(pk_substrate, der_encoder(pk))
+
+ self.assertEqual(48, pk % 1024)
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MGIwDAYIKwYBBQUIAQIFADAfBgsqhkiG9w0BCRADBTAQBgsqhkiG9w0BCRADBwIB
+OjAfBgsqhkiG9w0BCRADCjAQBgsqhkiG9w0BCRADBwIBOjAQBgsqhkiG9w0BCRAD
+BwIBOg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg_param = False
+ for cap in asn1Object:
+ if cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys():
+ if cap['parameters'].hasValue():
+ param, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(param))
+
+ if cap['capabilityID'] == rfc3370.id_alg_ESDH:
+ kwa, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(kwa.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(kwa))
+
+ self.assertTrue(kwa['algorithm'] in rfc5280.algorithmIdentifierMap.keys())
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, kwa['algorithm'])
+ kwa_p, rest = der_decoder(
+ kwa['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[kwa['algorithm']])
+ self.assertFalse(rest)
+ self.assertTrue(kwa_p.prettyPrint())
+ self.assertEqual(kwa['parameters'], der_encoder(kwa_p))
+ self.assertEqual(58, kwa_p)
+ found_wrap_alg_param = True
+
+ self.assertTrue(found_wrap_alg_param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_wrap_alg_param = False
+ for cap in asn1Object:
+ if cap['capabilityID'] == rfc3370.id_alg_ESDH:
+ self.assertEqual(rfc3370.id_alg_CMSRC2wrap, cap['parameters']['algorithm'])
+ self.assertEqual(58, cap['parameters']['parameters'])
+ found_wrap_alg_param = True
+
+ self.assertTrue(found_wrap_alg_param)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
+
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3447.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3447.py
new file mode 100644
index 0000000000..8788691208
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3447.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3447
+
+
+# openssl genrsa -primes 3 -f4 -out multiprime.key
+
+class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIE2QIBAQKCAQEAn82EqwXasE2TFNSmZucB8LNza2mOWLHF3nxpxKXalPMDvezc
+5Dq7Ytcv/k9jJL4j4jYfvR4yyZdU9iHLaD6hOINZ8E6hVpx/4c96ZUSOLzD2g+u+
+jIuoNfG+zygSBGYCS6BLCAIsZ+2wUyxYpLJknHJld9/jy+aLmmyrilhH9dH5AUiV
+3NeWht/68++dMXf4ZI/gV4bMSlWhggxkz2WJJpiQdCdJatGkwNDkHmLA9X0tC6OH
+SPE7qYdxG38cYS5F445SgnhDpiK7BodSqYLwgehaDjoOYdEgHVnOcpBCDI5zCJSL
+b1c/z8uhrB1xxlECR44wCLcKsIIYQxaEErRJ/wIDAQABAoIBAD+Ra5L0szeqxDVn
+GgKZJkZvvBwgU0WpAgMtDo3xQ/A4c2ab0IrhaiU5YJgTUGcPVArqaNm8J4vVrTBz
+5QxEzbFDXwWe4cMoYh6bgB7ElKLlIUr8/kGZUfgc7kI29luEjcAIEAC2/RQHesVn
+DHkL5OzqZL+4fIwckAMh0tXdflsPgZ/jgIaKca4OqKu4KGnczm3UvqtlvwisAjkx
+zMyfZXOLn0vEwP2bfbhQrCVrP7n6a+CV+Kqm8NBWnbiS6x2rWemVVssNTbfXQztq
+wC6ZJZCLK7plciDBWvHcS6vxdcsS9DUxuqSV6o/stCGTl1D+9tDx8Od0Eunna2B2
+wAoRHZECVgbNO1bqwfYpp5aFuySWoP+KZz8f/5ZkHjLwiNGpQcqVd4+7Ql2R4qgF
+NgSoQQOZFhKtiOeLVU0HYfp6doI4waSINZdF/fJDHD6fY3AMOc/IIMDHHIzbAlYG
+vKOocLXWj/2+gcyQ1XoAmrE70aIFUBLSvd7RCi8GI74zYWp5lCSvO850Z4GsWSZT
+41iF13sTDDJPm3+BbzMvEu2GuACi/8/IpbUr24/FP9Cp1Rf7kwJWAgMxfoshbrNu
+ebQB5laHNnT+DYhrOFVRNiNDaD2bUNSetrFidosWtD4ueHxMGENwa4BbFJ9+UrdP
+fyxC6k7exM7khGjaNZczwTep1VpYtKjzP/bp9KcCVgYoj9s9HZ1FCAsNEPodjGfd
+AcPTQS9mIa7wzy19B7uvFQJXPURi/p4KKBMVQ99Pp8/r9lJzxxiEf8FyPr8N7lZM
+EUKkFkDrZQDhKpsrHWSNj6yRFlltAlYC7dYR8KLEWoOUATLosxQhwgypv+23r+d4
+ZdPOdDv9n8Kmj+NFy/oISFfdXzlOU4RWQtMx3hEwAabwct7vjiJEej/kmiTqco02
+17tt13VvvQ5ZXF73dDCCAQwwggEIAlYDfMpM1WNfxcLLOgkRZ+0S9OvIrEOi0ALV
+SquTdi/thhCuCsK3lMD4miN9te8j16YtqEFVWXC3a6DWwIJ6m/xZ50bBwPqM8RsI
+6FWhZw4Dr5VqjYXUvwJWAvapRk9SydDYri/cAtGIkUJVlspkE1emALAaSw30vmfd
+hrgYLT6YGOmK3UmcNJ4NVeET275MXWF1ZOhkOGKTN6aj5wPhJaHBMnmUQrq7GwC6
+/LfUkSsCVgMCDTV9gbFW8u6TcTVW85dBIeUGxZh1T2pbU3dkGO3IOxOhzJUplH4/
+EeEs9dusHakg1ERXAg4Vo1YowPW8kuVbZ9faxeVrmuER5NcCuZzS5X/obGUw
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3447.RSAPrivateKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3537.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3537.py
new file mode 100644
index 0000000000..1b7490b002
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3537.py
@@ -0,0 +1,76 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3537
+from pyasn1_modules import rfc5751
+
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "MCIwDwYLKoZIhvcNAQkQAwwFADAPBgsqhkiG9w0BCRADCwUA"
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ alg_oid_list = [
+ rfc3537.id_alg_HMACwithAESwrap,
+ rfc3537.id_alg_HMACwith3DESwrap,
+ ]
+
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for cap in asn1Object:
+ self.assertEqual(der_encoder(univ.Null("")), cap['parameters'])
+ self.assertTrue(cap['capabilityID'] in alg_oid_list)
+ count += 1
+
+ self.assertEqual(count, 2)
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ rfc3537.id_alg_HMACwithAESwrap: univ.Null(""),
+ rfc3537.id_alg_HMACwith3DESwrap: univ.Null(""),
+ }
+
+ asn1Spec=rfc5751.SMIMECapabilities()
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypesMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for cap in asn1Object:
+ self.assertEqual(univ.Null(""), cap['parameters'])
+ self.assertTrue(cap['capabilityID'] in openTypesMap.keys())
+ count += 1
+
+ self.assertEqual(count, 2)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3560.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3560.py
new file mode 100644
index 0000000000..3419cdea7c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3560.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3560
+
+
+class OAEPDefautTestCase(unittest.TestCase):
+ oaep_default_pem_text = "MAsGCSqGSIb3DQEBBw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3560.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class OAEPSHA256TestCase(unittest.TestCase):
+ oaep_sha256_pem_text = "MDwGCSqGSIb3DQEBBzAvoA8wDQYJYIZIAWUDBAIBBQChHDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAIBBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_sha256_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3560.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class OAEPFullTestCase(unittest.TestCase):
+ oaep_full_pem_text = "MFMGCSqGSIb3DQEBBzBGoA8wDQYJYIZIAWUDBAICBQChHDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAICBQCiFTATBgkqhkiG9w0BAQkEBmZvb2Jhcg=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_full_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3560.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3565.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3565.py
new file mode 100644
index 0000000000..58574ec22c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3565.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3565
+
+
+class AESKeyWrapTestCase(unittest.TestCase):
+ kw_alg_id_pem_text = "MAsGCWCGSAFlAwQBLQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc3565.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kw_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3565.id_aes256_wrap, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class AESCBCTestCase(unittest.TestCase):
+ aes_alg_id_pem_text = "MB0GCWCGSAFlAwQBKgQQEImWuoUOPwM5mTu1h4oONw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc3565.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.aes_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3565.id_aes256_CBC, asn1Object[0])
+ self.assertTrue(asn1Object[1].isValue)
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.aes_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc3565.id_aes256_CBC, asn1Object[0])
+
+ aes_iv = univ.OctetString(hexValue='108996ba850e3f0339993bb5878a0e37')
+
+ self.assertEqual(aes_iv, asn1Object[1])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3657.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3657.py
new file mode 100644
index 0000000000..12b49dc884
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3657.py
@@ -0,0 +1,167 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3657
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFfwYJKoZIhvcNAQcDoIIFcDCCBWwCAQIxU6JRAgEEMCMEECBlcTFnxBsPlsug
+4KOCj78YDzIwMTkwOTEyMTIwMDAwWjANBgsqgwiMmks9AQEDAgQYS3mK9jQmvth1
+iuBV8PEa89ICvmoomJCvMIIFEAYJKoZIhvcNAQcBMB8GCyqDCIyaSz0BAQECBBBC
+T0dVU0lWX0JPR1VTSVYhgIIE4HPHsXoYyQ/4LRDiK4OrSuRJmmuDye5fH/hLcgw/
+330Gsl1QBs9jF1CEDBM5ki657K/TRMl78Rqb3LIu5lfLQ8WVNGLsoQPwvxzIexGg
+ShtYYwu8TcPiESFMa20SWpDEG8zFlmCbqQuc0buPxnvYviVThoBEthNC+S2Umed8
+JpxwNKJbNTx5dxd2dkDNwpHsKgNzT9cGl0NF129Dspehqtdge5LJu3rj1gNynLRI
+32AQ+pwU+sEHee6wDHhU5OWnHlndkm/9MTKY3woOhs1/KQFlRFPC6k71ZpUlncd3
+93wLVIImfoMe4zWPAOnbpZ/M7zEJ95rTwwmudBs0qwMfCa3h0Vkg69w6fBHyc1IH
+8u3VpSPcbOW4dUzJBDJPgB1kObAV02ZA4FQEuZtZiG13u3c7sSrHxsY1rtXssvSe
++5rThqPWgDqmH8b/yPGEHIFh03kHCDt/UZrdkLCO7a0WhCdY4I9hNU6OYEQmyEFs
+0LsqEumn34Lv/XcD1wgLdPtF65zub4Wil/0Vpu73vIWLIk9LyNIXQSd6w0ZHUvVS
++jZZ1zrqIQKhKvG97NpKAYoHa4tOdoXHgBJUxw/uAOKkQ4jC5RS5UKqCZaQcArRD
+2bCEEsutiuyf06MMcWm+RaBY1EwuX+/cT0D6CsWHYFAeQHgLuR4HVk5+PVKoOL/7
+KUz0jUU5gzFVcmfaocyX5A6R90yggBObefcOIEj3v+5fjHkppfTvi/R03fVZ4Nyw
+WyHbN7kOHHy8skJpcvNaqSY0dfkb8KOOoTptJH9rCBYtFlC5j/18y8Om9Um4h3/4
+6hYO0xU8izJDzDzJnO/5KS5mGyskweIp3mrE1C/mw68LvrksxQI03CPtbM+FqOKe
+0VcsAQykiOTnG3d4jLeF1iVrc9CgV+pwc5VfgQUwsGhjAFOCKTwWDrr3Je0yVsfz
+gwY2zuM5uE/+usOSBt7SqbFTLOCba4fJrVVwi0wZig88owVTdl/ACxl2qyLUYC2u
+5PNJSY6kx8Cgo4gDJk/3oeuys8JqgaufvKybl5GsdDaF3A7usZAjDR1EAWHZ7JGi
+agtqbvISLD0zq4e4nmEhLnIRb7u5SNBPqe8qVuuQjIsvmP0ZuTlnh84ypFOQGz7c
+fzHtr6UEQoGj8HImbp8diL4tflmFAVNaRjQzu18+2vFB2w1EZIe2/uNLs9ne2EIy
+oK2Qb+mMCwJsNS0xOG0/TzPZ+y0Tp1/LupLHovMosPIGXlbvqZVh2xftDvbIigIM
+WZQZ2tFxYD6Xc4zA00v7H0yGF1pRY+3GpobJkw0Y6ORtgdtdnr2ipioIeQCy0hUp
+POmTeSr0L3H7KfNY7yQgZg0ra7FIEjM8tDoNqrhznetYUU1ZWM8Lyb3zMxxinSFs
+GFGx2TiqPyixJNxN+lPT5D6GRhC9mXgh+BfVod5oINJJwXxJpT5xnsZgW8ujVxiu
+1Vt5esXCZaXTGlyjVTH5dmCvJP9+B8n7dOimmCxCbMQKpNaZixJhoXWQtTgKqL1Q
+f9WoEs6TDGgfTllqjbE4w3O7ZA7fAWe9jbAGwiPV5rF/NVvjaj2+ibtXbSNPW59d
+dy1/2WzknVYnEHF0qZdBZ02Wh4ByXUC3FNvDu8hRTm5aq73DCqXLXUwNU8BvS1xB
+bbRq5aYI2Rd3naNAns9dHqSvkg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_wrap, kwa['algorithm'])
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_cbc, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], asn1Spec=rfc3657.Camellia_IV())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = rfc3657.Camellia_IV(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
+
+ kekri = asn1Object['content']['recipientInfos'][0]['kekri']
+ kwa = kekri['keyEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_wrap, kwa['algorithm'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc3657.id_camellia128_cbc, cea['algorithm'])
+
+ iv = rfc3657.Camellia_IV(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, cea['parameters'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MGYwDwYLKoMIjJpLPQEBAQIFADAPBgsqgwiMmks9AQEBAwUAMA8GCyqDCIyaSz0B
+AQEEBQAwDwYLKoMIjJpLPQEBAwIFADAPBgsqgwiMmks9AQEDAwUAMA8GCyqDCIya
+Sz0BAQMEBQA=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ alg_oid_list = [
+ rfc3657.id_camellia128_cbc,
+ rfc3657.id_camellia192_cbc,
+ rfc3657.id_camellia256_cbc,
+ rfc3657.id_camellia128_wrap,
+ rfc3657.id_camellia192_wrap,
+ rfc3657.id_camellia256_wrap,
+ ]
+
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ param = der_encoder(rfc3657.CamelliaSMimeCapability(""))
+ count = 0
+ for cap in asn1Object:
+ self.assertEqual(cap['parameters'], param)
+ self.assertTrue(cap['capabilityID'] in alg_oid_list)
+ count += 1
+
+ self.assertEqual(count, 6)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ param = rfc3657.CamelliaSMimeCapability("")
+ count = 0
+ for cap in asn1Object:
+ self.assertTrue(cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys())
+ self.assertEqual(cap['parameters'], param)
+ count += 1
+
+ self.assertEqual(count, 6)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3709.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3709.py
new file mode 100644
index 0000000000..dcab4b6e8c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3709.py
@@ -0,0 +1,194 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3709
+
+
+class CertificateExtnWithUrlTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC9zCCAn2gAwIBAgIJAKWzVCgbsG46MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNTE0MTAwMjAwWhcNMjAwNTEzMTAwMjAwWjBlMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZp
+Z2lsIFNlY3VyaXR5IExMQzEaMBgGA1UEAxMRbWFpbC52aWdpbHNlYy5jb20wdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAATwUXZUseiOaqWdrClDCMbp9YFAM87LTmFirygp
+zKDU9cfqSCg7zBDIphXCwMcS9zVWDoStCbcvN0jw5CljHcffzpHYX91P88SZRJ1w
+4hawHjOsWxvM3AkYgZ5nfdlL7EajggEdMIIBGTALBgNVHQ8EBAMCB4AwQgYJYIZI
+AYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9y
+IGFueSBwdXJwb3NlLjAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwHwYD
+VR0jBBgwFoAU8jXbNATapVXyvWkDmbBi7OIVCMEwgYUGCCsGAQUFBwEMBHkwd6J1
+oHMwcTBvMG0WCWltYWdlL3BuZzAzMDEwDQYJYIZIAWUDBAIBBQAEIJtBNrMSSNo+
+6Rwqwctmcy0qf68ilRuKEmlf3GLwGiIkMCsWKWh0dHA6Ly93d3cudmlnaWxzZWMu
+Y29tL3ZpZ2lsc2VjX2xvZ28ucG5nMAoGCCqGSM49BAMDA2gAMGUCMGhfLH4kZaCD
+H43A8m8mHCUpYt9unT0qYu4TCMaRuOTYEuqj3qtuwyLcfAGuXKp/oAIxAIrPY+3y
+Pj22pmfmQi5w21UljqoTj/+lQLkU3wfy5BdVKBwI0GfEA+YL3ctSzPNqAA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc3709.id_pe_logotype:
+ s = extn['extnValue']
+ logotype, rest = der_decoder(s, rfc3709.LogotypeExtn())
+
+ self.assertFalse(rest)
+ self.assertTrue(logotype.prettyPrint())
+ self.assertEqual(s, der_encoder(logotype))
+
+ ids = logotype['subjectLogo']['direct']['image'][0]['imageDetails']
+
+ self.assertEqual( "image/png", ids['mediaType'])
+
+ expected = "http://www.vigilsec.com/vigilsec_logo.png"
+ self.assertEqual(expected, ids['logotypeURI'][0])
+
+ self.assertIn(rfc3709.id_pe_logotype, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+
+class CertificateExtnWithDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIIJJDCCCAygAwIBAgIRAPIGo/5ScWbpAAAAAFwQBqkwDQYJKoZIhvcNAQELBQAw
+gbkxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL
+Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg
+MjAxOCBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxLTAr
+BgNVBAMTJEVudHJ1c3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gVk1DMTAeFw0x
+OTA4MzAxNDMyMzlaFw0yMDAyMjUxNTAyMzZaMIIBjTEOMAwGA1UEERMFMTAwMTcx
+CzAJBgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazERMA8GA1UEBxMITmV3IFlv
+cmsxGDAWBgNVBAkTDzI3MCBQYXJrIEF2ZW51ZTETMBEGCysGAQQBgjc8AgEDEwJV
+UzEZMBcGCysGAQQBgjc8AgECEwhEZWxhd2FyZTEfMB0GA1UEChMWSlBNb3JnYW4g
+Q2hhc2UgYW5kIENvLjEdMBsGA1UEDxMUUHJpdmF0ZSBPcmdhbml6YXRpb24xNzA1
+BgNVBAsTLkpQTUMgRmlyc3QgVmVyaWZpZWQgTWFyayBDZXJ0aWZpY2F0ZSBXb3Js
+ZHdpZGUxDzANBgNVBAUTBjY5MTAxMTEXMBUGCisGAQQBg55fAQQTBzIwMTUzODkx
+EjAQBgorBgEEAYOeXwEDEwJVUzEmMCQGCisGAQQBg55fAQITFmh0dHBzOi8vd3d3
+LnVzcHRvLmdvdi8xHzAdBgNVBAMTFkpQTW9yZ2FuIENoYXNlIGFuZCBDby4wggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCNLY+etlX06q1MxA1VT/P20h1i
+eFGTzX4fqSQNG+ypmjNfLa8YXraO1v1hahenkRUWrVPW0Hq3zKNJcCDmosox6+tB
+59u0b1xgN8y8D05AEC7qoVVdbaWKENMxCN4CDfST6d3YOqApjqEFAGZ71s39tRRG
+kmWGJb4jKXcUX8FWV8w/vjKrpipZ8JsX2tuOp2uxFLkmi+V7gvN8tpbHUipP5K7L
+190VOBytSWPudXefnYG3UWRfwah7Fq1bKYT/cCwStUm8XlfA8nUumeVsAiyC6phs
+adn26MYiSddsBU08TGthmunLAO0+shaBy6jHYZxMa37S67vVlDpxbeF+TPVXAgMB
+AAGjggROMIIESjATBgorBgEEAdZ5AgQDAQH/BAIFADCCArAGCCsGAQUFBwEMBIIC
+ojCCAp6iggKaoIICljCCApIwggKOMIICihYNaW1hZ2Uvc3ZnK3htbDAzMDEwDQYJ
+YIZIAWUDBAIBBQAEIBnwW6ChGgWWIRn3qn/xGAOlhDflA3z5jhZcZTNDlxF5MIIC
+QhaCAj5kYXRhOmltYWdlL3N2Zyt4bWw7YmFzZTY0LEg0c0lBQUFBQUFBQUFJV1Iz
+V3JqTUJCR3I1dW5tR3F2Rml4NUpQODBObkZLRTVhbTRFSmhJYmVMazZpT1dhOXRa
+TWQyOXVrN2NsTG9SV25CMHNENGNPYVR0TGdmLzVYUWE5TVdkWlV3S1pDQnJ2YjFv
+YWp5aEoyNlZ6NW45OHZaNHBaemVOU1ZObGxYbXhnZUR2Vk93MU5abnRwdWFvRlNB
+b1YwNFBmMkVYNk5UVzA2ZUNsUE9YK3FRRXpON1dWR0RLRkFoTldwS0ErQVB3RTRK
+MzNiNXg5REtBYTdyTlV2cG40dFNwMndycWpPRElwRHd0THNyTTBmeVlCaVYyM0Nq
+bDNYeEs0N0RJTVlQRkdiM0ZXSTZKTHZpc1JqV1ZSL1B3TmxGRVh1OUpmTmJtQk1H
+RFlqZy9PMTlvVWVWclh0QWtJWTBEY0o0N2JKOXBTb01iclZwdGVNd3VmTDJjMml5
+Ym9qVU5veVlUOFFnL1VxWWtCNW41VW5QQWZYU2pub0tPbEl1eW5oOVRJVTh1Z3JF
+YVMrVC9lRzZRWDh6OXl2YkdIZ0VLZjJ5S1h3dU9Sa2VsOGJQeFJoUHhtSnN0TDBT
+bi9qOUtXWU8yR3dsM2EremNhbmhOYTV0YzZORkdHcVVFUUVwVmY0R3lVNnhOMnRx
+WGgwWXQrM1BpcEhlK2l0cElRMGg0VHBoWnRrQ3plM0d6M2NjdllHbkp0cjZKVUNB
+QUE9MCIGA1UdEQQbMBmCF2V4Y2hhZGRldi5sYWJtb3JnYW4uY29tMBMGA1UdJQQM
+MAoGCCsGAQUFBwMfMA4GA1UdDwEB/wQEAwIHgDBmBggrBgEFBQcBAQRaMFgwIwYI
+KwYBBQUHMAGGF2h0dHA6Ly9vY3NwLmVudHJ1c3QubmV0MDEGCCsGAQUFBzAChiVo
+dHRwOi8vYWlhLmVudHJ1c3QubmV0L3ZtYzEtY2hhaW4uY2VyMDIGA1UdHwQrMCkw
+J6AloCOGIWh0dHA6Ly9jcmwuZW50cnVzdC5uZXQvdm1jMWNhLmNybDBPBgNVHSAE
+SDBGMDYGCmCGSAGG+mwKAQswKDAmBggrBgEFBQcCARYaaHR0cDovL3d3dy5lbnRy
+dXN0Lm5ldC9ycGEwDAYKKwYBBAGDnl8BATAfBgNVHSMEGDAWgBSLtjl20DSQpj9i
+4WTqPrz0fEahczAdBgNVHQ4EFgQUxAJ+yoDhzpPUzAPWKBYxg108dU0wCQYDVR0T
+BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAnqdB/vcwxFcxAlyCK0W5HOthXUdXRg9a
+GwPDupqmLq2rKfyysZXonJJfr8jqO0f3l6TWTTJlXHljAwwXMtg3T3ngLyEzip5p
+g0zH7s5eXjmWRhOeuHt21o611bXDbUNFTF0IpbYBTgOwAz/+k3XLVehf8dW7Y0Lr
+VkzxJ6U82NxmqjaAnkm+H127x5/jPAr4LLD4gZfqFaHzw/ZLoS+fXFGs+dpuYE4s
+n+xe0msYMu8qWABiMGA+MCKl45Dp5di+c2fyXtKyQ3rKI8XXZ0nN4bXK7DZd+3E3
+kbpmR6cDliloU808Bi/erMkrfUHRoZ2d586lkmwkLcoDkJ/yPD+Jhw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc3709.id_pe_logotype:
+ s = extn['extnValue']
+ logotype, rest = der_decoder(s, rfc3709.LogotypeExtn())
+ self.assertFalse(rest)
+
+ self.assertTrue(logotype.prettyPrint())
+ self.assertEqual(s, der_encoder(logotype))
+
+ ids = logotype['subjectLogo']['direct']['image'][0]['imageDetails']
+
+ self.assertEqual("image/svg+xml", ids['mediaType'])
+ self.assertEqual(
+ "data:image/svg+xml;base64", ids['logotypeURI'][0][0:25])
+
+ self.assertIn(rfc3709.id_pe_logotype, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3739.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3739.py
new file mode 100644
index 0000000000..3c4ce3a4df
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3739.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import error
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3739
+
+
+class QCCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIFLTCCBBWgAwIBAgIMVRaIE9MInBkG6aUaMA0GCSqGSIb3DQEBCwUAMHMxCzAJ
+BgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRowGAYDVQQLExFG
+b3IgRGVtbyBVc2UgT25seTEtMCsGA1UEAxMkR2xvYmFsU2lnbiBEZW1vIElzc3Vp
+bmcgQ0EgLSBTdGFnaW5nMB4XDTE4MDYxNTA1MTgxNFoXDTE5MDYxNjA1MTgxNFow
+WjELMAkGA1UEBhMCQkUxGTAXBgNVBAMTEFRlc3QgQ2VydGlmaWNhdGUxEjAQBgNV
+BAUTCTEyMzQ1Njc4OTENMAsGA1UEKhMEVGVzdDENMAsGA1UEBBMEVGVzdDCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL/tsE2EIVQhpkZU5XmFR6FAq9ou
+k8FWbyku5M7S2JT3c6OFMQiVgu6nfqdsl4rzojhUXQtMOnO7sUqcIedmwqRIR/jd
+X+ELqGGRHodZt94Tjf6Qgn2Wv/EgG0EIwsOAisGKr4qTNs6ZmVMqQ3I4+l9Ik5eM
+whr9JfrhSxrXDzoh8Prc9lNjQbk+YKXw0zLmVxW7GAu9zTr98GF+HapIhNQbvqOc
+fHoY5svla5MqoRXagfrw/w2fSaO/LT+AFsZYODVpvCg/X3xsknoG7TDIeZ8Hmlgq
+Mvg9l9VA2JbSv1C38SeOm0Hfv0l0fspZPSrtmbYlvBtQoO1X/GhQXvE7UvMCAwEA
+AaOCAdgwggHUMA4GA1UdDwEB/wQEAwIGQDCBkQYIKwYBBQUHAQEEgYQwgYEwQQYI
+KwYBBQUHMAKGNWh0dHA6Ly9zZWN1cmUuc3RhZ2luZy5nbG9iYWxzaWduLmNvbS9n
+c2RlbW9zaGEyZzMuY3J0MDwGCCsGAQUFBzABhjBodHRwOi8vb2NzcDIuc3RhZ2lu
+Zy5nbG9iYWxzaWduLmNvbS9nc2RlbW9zaGEyZzMwWQYDVR0gBFIwUDBDBgsrBgEE
+AaAyASgjAjA0MDIGCCsGAQUFBwIBFiZodHRwczovL3d3dy5nbG9iYWxzaWduLmNv
+bS9yZXBvc2l0b3J5LzAJBgcEAIvsQAECMAkGA1UdEwQCMAAwQwYDVR0fBDwwOjA4
+oDagNIYyaHR0cDovL2NybC5zdGFnaW5nLmdsb2JhbHNpZ24uY29tL2dzZGVtb3No
+YTJnMy5jcmwwLQYIKwYBBQUHAQMEITAfMAgGBgQAjkYBATATBgYEAI5GAQYwCQYH
+BACORgEGATAUBgNVHSUEDTALBgkqhkiG9y8BAQUwHQYDVR0OBBYEFNRFutzxY2Jg
+qilbYWe86em0QQC+MB8GA1UdIwQYMBaAFBcYifCc7R2iN5qLgGGRDT/RWZN6MA0G
+CSqGSIb3DQEBCwUAA4IBAQCMJeiaEAu45PetKSoPEnJ5t4MYr4dUl/HdnV13WEUW
+/34yHDGuubTFqJ6sM7P7dO25kdNOr75mR8yc0+gsGJv5K5C7LXfk36ofDlVQm0RJ
+3LTRhCvnJIzvuc5R52QW3MvB0EEPd1sfkpGgyTdK8zYZkwCXrWgMuPhBG/kgTiN0
+65qitL/WfkcX9SXmsYuV1a3Tsxz+6/rTtxdZfXSJgaVCOWHGyXCvpAQM/4eH5hSj
+UfTNwEMrE4sw4k9F90Sp8Wx24sMRDTIpnEXh3ceZSzBN2OYCIO84GaiZDpSvvkYN
+Iwtui+Wql/HveMqbAtXkiv9GDXYZms3HBoIaCVuDaUf6
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc3739.id_pe_qcStatements:
+ s = extn['extnValue']
+ qc_stmts, rest = der_decoder(s, rfc3739.QCStatements())
+ self.assertFalse(rest)
+ self.assertTrue(qc_stmts.prettyPrint())
+ self.assertEqual(s, der_encoder(qc_stmts))
+
+ for qcs in qc_stmts:
+ count += 1
+
+ self.assertEqual(2, count)
+
+ def testExtensionsMap(self):
+
+ class SequenceOfOID(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+ openTypesMap = {
+ univ.ObjectIdentifier('0.4.0.1862.1.6'): SequenceOfOID()
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ found_qc_stmt_oid = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc3739.id_pe_qcStatements:
+ qc_stmts, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ openTypes=openTypesMap,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(qc_stmts.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(qc_stmts))
+
+ for qcs in qc_stmts:
+ count += 1
+ if qcs['statementId'] in openTypesMap.keys():
+ for oid in qcs['statementInfo']:
+ if oid == univ.ObjectIdentifier('0.4.0.1862.1.6.1'):
+ found_qc_stmt_oid = True
+
+ self.assertEqual(2, count)
+ self.assertTrue(found_qc_stmt_oid)
+
+class WithComponentsTestCase(unittest.TestCase):
+
+ def testDerCodec(self):
+ si = rfc3739.SemanticsInformation()
+ self.assertRaises(error.PyAsn1Error, der_encoder, si)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3770.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3770.py
new file mode 100644
index 0000000000..667ab249fe
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3770.py
@@ -0,0 +1,95 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3770
+
+
+class CertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICqzCCAjCgAwIBAgIJAKWzVCgbsG4/MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNzE5MTk0MjQ3WhcNMjAwNzE4MTk0MjQ3WjBjMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZp
+Z2lsIFNlY3VyaXR5IExMQzEYMBYGA1UEAxMPZWFwLmV4YW1wbGUuY29tMHYwEAYH
+KoZIzj0CAQYFK4EEACIDYgAEMMbnIp2BUbuyMgH9HhNHrh7VBy7ql2lBjGRSsefR
+Wa7+vCWs4uviW6On4eem5YoP9/UdO7DaIL+/J9/3DJHERI17oFxn+YWiE4JwXofy
+QwfSu3cncVNMqpiDjEkUGGvBo4HTMIHQMAsGA1UdDwQEAwIHgDBCBglghkgBhvhC
+AQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55
+IHB1cnBvc2UuMB0GA1UdDgQWBBSDjPGr7M742rsE4oQGwBvGvllZ+zAfBgNVHSME
+GDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAeBggrBgEFBQcBDQQSMBAEB0V4YW1w
+bGUEBUJvZ3VzMB0GA1UdJQQWMBQGCCsGAQUFBwMOBggrBgEFBQcDDTAKBggqhkjO
+PQQDAwNpADBmAjEAmCPZnnlUQOKlcOIIOgFrRCkOqO0ESs+dobYwAc2rFCBtQyP7
+C3N00xkX8WZZpiAZAjEAi1Z5+nGbJg5eJTc8fwudutN/HNwJEIS6mHds9kfcy26x
+DAlVlhox680Jxy5J8Pkx
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sig_alg = asn1Object['tbsCertificate']['signature']
+
+ self.assertEqual(rfc5480.ecdsa_with_SHA384, sig_alg['algorithm'])
+ self.assertFalse(sig_alg['parameters'].hasValue())
+
+ spki_alg = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, spki_alg['algorithm'])
+ self.assertEqual(
+ rfc5480.secp384r1, spki_alg['parameters']['namedCurve'])
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ if extn['extnID'] == rfc3770.id_pe_wlanSSID:
+ self.assertIn(str2octs('Example'), extnValue)
+
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(rfc3770.id_kp_eapOverLAN, extnValue)
+ self.assertIn(rfc3770.id_kp_eapOverPPP, extnValue)
+
+ self.assertIn(rfc3770.id_pe_wlanSSID, extn_list)
+ self.assertIn(rfc5280.id_ce_extKeyUsage, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3779.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3779.py
new file mode 100644
index 0000000000..652826edde
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3779.py
@@ -0,0 +1,98 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3779
+
+
+class CertificateExtnTestCase(unittest.TestCase):
+ pem_text = """\
+MIIECjCCAvKgAwIBAgICAMkwDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAxMLcmlw
+ZS1uY2MtdGEwIBcNMTcxMTI4MTQzOTU1WhgPMjExNzExMjgxNDM5NTVaMBYxFDAS
+BgNVBAMTC3JpcGUtbmNjLXRhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA0URYSGqUz2myBsOzeW1jQ6NsxNvlLMyhWknvnl8NiBCs/T/S2XuNKQNZ+wBZ
+xIgPPV2pFBFeQAvoH/WK83HwA26V2siwm/MY2nKZ+Olw+wlpzlZ1p3Ipj2eNcKrm
+it8BwBC8xImzuCGaV0jkRB0GZ0hoH6Ml03umLprRsn6v0xOP0+l6Qc1ZHMFVFb38
+5IQ7FQQTcVIxrdeMsoyJq9eMkE6DoclHhF/NlSllXubASQ9KUWqJ0+Ot3QCXr4LX
+ECMfkpkVR2TZT+v5v658bHVs6ZxRD1b6Uk1uQKAyHUbn/tXvP8lrjAibGzVsXDT2
+L0x4Edx+QdixPgOji3gBMyL2VwIDAQABo4IBXjCCAVowHQYDVR0OBBYEFOhVKx/W
+0aT35ATG2OVoDR68Fj/DMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG
+MIGxBggrBgEFBQcBCwSBpDCBoTA8BggrBgEFBQcwCoYwcnN5bmM6Ly9ycGtpLnJp
+cGUubmV0L3JlcG9zaXRvcnkvcmlwZS1uY2MtdGEubWZ0MDIGCCsGAQUFBzANhiZo
+dHRwczovL3JyZHAucmlwZS5uZXQvbm90aWZpY2F0aW9uLnhtbDAtBggrBgEFBQcw
+BYYhcnN5bmM6Ly9ycGtpLnJpcGUubmV0L3JlcG9zaXRvcnkvMBgGA1UdIAEB/wQO
+MAwwCgYIKwYBBQUHDgIwJwYIKwYBBQUHAQcBAf8EGDAWMAkEAgABMAMDAQAwCQQC
+AAIwAwMBADAhBggrBgEFBQcBCAEB/wQSMBCgDjAMMAoCAQACBQD/////MA0GCSqG
+SIb3DQEBCwUAA4IBAQAVgJjrZ3wFppC8Yk8D2xgzwSeWVT2vtYq96CQQsjaKb8nb
+eVz3DwcS3a7RIsevrNVGo43k3AGymg1ki+AWJjvHvJ+tSzCbn5+X6Z7AfYTf2g37
+xINVDHru0PTQUargSMBAz/MBNpFG8KThtT7WbJrK4+f/lvx0m8QOlYm2a17iXS3A
+GQJ6RHcq9ADscqGdumxmMMDjwED26bGaYdmru1hNIpwF//jVM/eRjBFoPHKFlx0k
+Ld/yoCQNmx1kW+xANx4uyWxi/DYgSV7Oynq+C60OucW+d8tIhkblh8+YfrmukJds
+V+vo2L72yerdbsP9xjqvhZrLKfsLZjYK4SdYYthi
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc3779.id_pe_ipAddrBlocks:
+ s = extn['extnValue']
+ addr_blocks, rest = der_decoder.decode(s, rfc3779.IPAddrBlocks())
+ self.assertFalse(rest)
+ self.assertTrue(addr_blocks.prettyPrint())
+ self.assertEqual(s, der_encoder.encode(addr_blocks))
+
+ if extn['extnID'] == rfc3779.id_pe_autonomousSysIds:
+ s = extn['extnValue']
+ as_ids, rest = der_decoder.decode(s, rfc3779.ASIdentifiers())
+ self.assertFalse(rest)
+ self.assertTrue(as_ids.prettyPrint())
+ self.assertEqual(s, der_encoder.encode(as_ids))
+
+ self.assertIn(rfc3779.id_pe_ipAddrBlocks, extn_list)
+ self.assertIn(rfc3779.id_pe_autonomousSysIds, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if (extn['extnID'] == rfc3779.id_pe_ipAddrBlocks or
+ extn['extnID'] == rfc3779.id_pe_autonomousSysIds):
+ extnValue, rest = der_decoder.decode(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+ self.assertEqual(extn['extnValue'], der_encoder.encode(extnValue))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3820.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3820.py
new file mode 100644
index 0000000000..0895b286e4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3820.py
@@ -0,0 +1,78 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc3820
+
+
+class ProxyCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIID9DCCAtygAwIBAgIEDODd4TANBgkqhkiG9w0BAQUFADCBjTESMBAGCgmSJomT
+8ixkARkWAm5sMRcwFQYKCZImiZPyLGQBGRYHZS1pbmZyYTEaMBgGA1UEChMRVHJh
+aW5pbmcgU2VydmljZXMxDjAMBgNVBAsTBXVzZXJzMRowGAYDVQQLExFTZWN1cml0
+eSBUcmFpbmluZzEWMBQGA1UEAxMNUGlldGplIFB1ayA0MjAeFw0xOTExMjcwODMz
+NDZaFw0xOTExMjcyMDM4NDZaMIGhMRIwEAYKCZImiZPyLGQBGRYCbmwxFzAVBgoJ
+kiaJk/IsZAEZFgdlLWluZnJhMRowGAYDVQQKExFUcmFpbmluZyBTZXJ2aWNlczEO
+MAwGA1UECxMFdXNlcnMxGjAYBgNVBAsTEVNlY3VyaXR5IFRyYWluaW5nMRYwFAYD
+VQQDEw1QaWV0amUgUHVrIDQyMRIwEAYDVQQDEwkyMTYwNjM0NTcwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCu2b1j1XQXAgNazmTtdp6jjzvNQT8221/c
+dSIv2ftxr3UochHbazTfoR7wDT5PGlp2v99M0kZQvAEJ96CJpBDte4pwio7xHK3w
+s5h7lH3W2ydrxAMSnZp0NHxyo3DNenTV5HavGjraOZDLt/k1aPJ8C68CBbrGDQxH
+wzTs21Z+7lAy4C1ZNyOhkNF4qD5qy9Q2SHOPD+uc2QZE8IadZyxbeW/lEWHjESI1
+5y55oLZhe3leb2NswvppgdwM8KW4Pbtya6mDKGH4e1qQfNfxsqlxbIBr4UaM8iSM
+5BhJhe7VCny2iesGCJWz3NNoTJKBehN5o2xs7+fHv+sOW2Yuc3MnAgMBAAGjRjBE
+MBMGA1UdJQQMMAoGCCsGAQUFBwMCMA4GA1UdDwEB/wQEAwIEsDAdBggrBgEFBQcB
+DgEB/wQOMAwwCgYIKwYBBQUHFQEwDQYJKoZIhvcNAQEFBQADggEBAJbeKv3yQ9Yc
+GHT4r64gVkKd4do7+cRS9dfWg8pcLRn3aBzTCBIznkg+OpzjteOJCuw6AxDsDPmf
+n0Ms7LaAqegW8vcYgcZTxeABE5kgg5HTMUSMo39kFNTYHlNgsVfnOhpePnWX+e0Y
+gPpQU7w1npAhr23lXn9DNWgWMMT6T3z+NngcJ9NQdEee9D4rzY5Oo9W/2OAPuMne
+w5dGF7wVCUBRi6vrMnWYN8E3sHiFDJJrOsPWZzjRCa/W3N9A/OdgjitKQc3X4dlS
+tP2J7Yxv/B/6+VxVEa9WtVXsm/wJnhwvICBscB1/4WkI0PfJ7Nh4ZqQplPdlDEKe
+FOuri/fKBe0=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_ppl = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc3820.id_pe_proxyCertInfo:
+ self.assertTrue(rfc3820.id_pe_proxyCertInfo in rfc5280.certificateExtensionsMap.keys())
+ pci, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[rfc3820.id_pe_proxyCertInfo])
+ self.assertFalse(rest)
+ self.assertTrue(pci.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(pci))
+
+ self.assertEqual(rfc3820.id_ppl_inheritAll, pci['proxyPolicy']['policyLanguage'])
+ found_ppl = True
+
+ self.assertTrue(found_ppl)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc3852.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc3852.py
new file mode 100644
index 0000000000..56b25ccc56
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc3852.py
@@ -0,0 +1,128 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3852
+from pyasn1_modules import rfc6402
+
+
+class ContentInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEJQYJKoZIhvcNAQcCoIIEFjCCBBICAQMxCzAJBgUrDgMCGgUAMIIDAgYIKwYBBQUHDAKgggL0
+BIIC8DCCAuwweDB2AgECBgorBgEEAYI3CgoBMWUwYwIBADADAgEBMVkwVwYJKwYBBAGCNxUUMUow
+SAIBBQwZcGl0dWNoYTEuZW1lYS5ocHFjb3JwLm5ldAwMRU1FQVxwaXR1Y2hhDBpDTUNSZXFHZW5l
+cmF0b3IudnNob3N0LmV4ZTCCAmqgggJmAgEBMIICXzCCAcgCAQAwADCBnzANBgkqhkiG9w0BAQEF
+AAOBjQAwgYkCgYEA0jm7SSSm2wyEAzuNKtFZFJKo91SrJq9wQwEhEKHDavZwMQOm1rZ2PF8NWCEb
+PqrhToQ7rtiGLSZa4dF4bzgmBqQ9aoSfEX4jISt31Vy+skHidXjHHpbsjT24NPhrZgANivL7CxD6
+Ft+s7qS1gL4HRm2twQkqSwOLrE/q2QeXl2UCAwEAAaCCAR0wGgYKKwYBBAGCNw0CAzEMFgo2LjIu
+OTIwMC4yMD4GCSqGSIb3DQEJDjExMC8wHQYDVR0OBBYEFMW2skn88gxhONWZQA4sWGBDb68yMA4G
+A1UdDwEB/wQEAwIHgDBXBgkrBgEEAYI3FRQxSjBIAgEFDBlwaXR1Y2hhMS5lbWVhLmhwcWNvcnAu
+bmV0DAxFTUVBXHBpdHVjaGEMGkNNQ1JlcUdlbmVyYXRvci52c2hvc3QuZXhlMGYGCisGAQQBgjcN
+AgIxWDBWAgECHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIDAQAwDQYJKoZIhvcNAQEFBQADgYEAJZlu
+mxjtCxSOQi27jsVdd3y8NSIlzNv0b3LqmzvAly6L+CstXcnuG2MPQqPH9R7tbJonGUniBQO9sQ7C
+KhYWj2gfhiEkSID82lV5chINVUFKoUlSiEhWr0tPGgvOaqdsKQcrHfzrsBbFkhDqrFSVy7Yivbnh
+qYszKrOjJKiiCPMwADAAMYH5MIH2AgEDgBTFtrJJ/PIMYTjVmUAOLFhgQ2+vMjAJBgUrDgMCGgUA
+oD4wFwYJKoZIhvcNAQkDMQoGCCsGAQUFBwwCMCMGCSqGSIb3DQEJBDEWBBTFTkK/OifaFjwqHiJu
+xM7qXcg/VzANBgkqhkiG9w0BAQEFAASBgKfC6jOi1Wgy4xxDCQVK9+e5tktL8wE/j2cb9JSqq+aU
+5UxEgXEw7q7BoYZCAzcxMRriGzakXr8aXHcgkRJ7XcFvLPUjpmGg9SOZ2sGW4zQdWAwImN/i8loc
+xicQmJP+VoMHo/ZpjFY9fYCjNZUArgKsEwK/s+p9yrVVeB1Nf8Mn
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc3852.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = {
+ rfc3852.id_ct_contentInfo: rfc3852.ContentInfo(),
+ rfc3852.id_signedData: rfc3852.SignedData(),
+ rfc6402.id_cct_PKIData: rfc6402.PKIData()
+ }
+
+ getNextLayer = {
+ rfc3852.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc3852.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc3852.id_ct_contentInfo: lambda x: x['content'],
+ rfc3852.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ alg_oids = (
+ univ.ObjectIdentifier('1.3.14.3.2.26'),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'),
+ )
+
+ encoded_null = der_encoder(univ.Null(""))
+
+ next_layer = rfc3852.id_ct_contentInfo
+
+ count = 0
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if next_layer == rfc3852.id_signedData:
+ for d in asn1Object['digestAlgorithms']:
+ self.assertIn(d['algorithm'], alg_oids)
+ self.assertEqual(encoded_null, d['parameters'])
+ count += 1
+
+ for si in asn1Object['signerInfos']:
+ self.assertIn(si['digestAlgorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, si['digestAlgorithm']['parameters'])
+ count += 1
+
+ self.assertIn(si['signatureAlgorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, si['signatureAlgorithm']['parameters'])
+ count += 1
+
+ if next_layer == rfc6402.id_cct_PKIData:
+ for req in asn1Object['reqSequence']:
+ cr = req['tcr']['certificationRequest']
+ self.assertIn(cr['signatureAlgorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, cr['signatureAlgorithm']['parameters'])
+ count += 1
+
+ cri_spki = cr['certificationRequestInfo']['subjectPublicKeyInfo']
+ self.assertIn(cri_spki['algorithm']['algorithm'], alg_oids)
+ self.assertEqual(
+ encoded_null, cri_spki['algorithm']['parameters'])
+ count += 1
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(5, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4010.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4010.py
new file mode 100644
index 0000000000..7474b9d849
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4010.py
@@ -0,0 +1,136 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4010
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ env_data_pem_text = """\
+MIIFewYJKoZIhvcNAQcDoIIFbDCCBWgCAQIxUqJQAgEEMCMEEKBBI2KxDUPS5TCo
+RCEDJo4YDzIwMTkwOTEyMTIwMDAwWjAMBgoqgxqMmkQHAQEBBBipFE2DxCLAx2Og
+E53Jt21V8kAoscU7K3wwggUNBgkqhkiG9w0BBwEwHAYIKoMajJpEAQQEEEJPR1VT
+SVZfQk9HVVNJViGAggTgc8exehjJD/gtEOIrg6tK5Emaa4PJ7l8f+EtyDD/ffQay
+XVAGz2MXUIQMEzmSLrnsr9NEyXvxGpvcsi7mV8tDxZU0YuyhA/C/HMh7EaBKG1hj
+C7xNw+IRIUxrbRJakMQbzMWWYJupC5zRu4/Ge9i+JVOGgES2E0L5LZSZ53wmnHA0
+ols1PHl3F3Z2QM3CkewqA3NP1waXQ0XXb0Oyl6Gq12B7ksm7euPWA3KctEjfYBD6
+nBT6wQd57rAMeFTk5aceWd2Sb/0xMpjfCg6GzX8pAWVEU8LqTvVmlSWdx3f3fAtU
+giZ+gx7jNY8A6duln8zvMQn3mtPDCa50GzSrAx8JreHRWSDr3Dp8EfJzUgfy7dWl
+I9xs5bh1TMkEMk+AHWQ5sBXTZkDgVAS5m1mIbXe7dzuxKsfGxjWu1eyy9J77mtOG
+o9aAOqYfxv/I8YQcgWHTeQcIO39Rmt2QsI7trRaEJ1jgj2E1To5gRCbIQWzQuyoS
+6affgu/9dwPXCAt0+0XrnO5vhaKX/RWm7ve8hYsiT0vI0hdBJ3rDRkdS9VL6NlnX
+OuohAqEq8b3s2koBigdri052hceAElTHD+4A4qRDiMLlFLlQqoJlpBwCtEPZsIQS
+y62K7J/Towxxab5FoFjUTC5f79xPQPoKxYdgUB5AeAu5HgdWTn49Uqg4v/spTPSN
+RTmDMVVyZ9qhzJfkDpH3TKCAE5t59w4gSPe/7l+MeSml9O+L9HTd9Vng3LBbIds3
+uQ4cfLyyQmly81qpJjR1+Rvwo46hOm0kf2sIFi0WULmP/XzLw6b1SbiHf/jqFg7T
+FTyLMkPMPMmc7/kpLmYbKyTB4ineasTUL+bDrwu+uSzFAjTcI+1sz4Wo4p7RVywB
+DKSI5Ocbd3iMt4XWJWtz0KBX6nBzlV+BBTCwaGMAU4IpPBYOuvcl7TJWx/ODBjbO
+4zm4T/66w5IG3tKpsVMs4Jtrh8mtVXCLTBmKDzyjBVN2X8ALGXarItRgLa7k80lJ
+jqTHwKCjiAMmT/eh67KzwmqBq5+8rJuXkax0NoXcDu6xkCMNHUQBYdnskaJqC2pu
+8hIsPTOrh7ieYSEuchFvu7lI0E+p7ypW65CMiy+Y/Rm5OWeHzjKkU5AbPtx/Me2v
+pQRCgaPwciZunx2Ivi1+WYUBU1pGNDO7Xz7a8UHbDURkh7b+40uz2d7YQjKgrZBv
+6YwLAmw1LTE4bT9PM9n7LROnX8u6ksei8yiw8gZeVu+plWHbF+0O9siKAgxZlBna
+0XFgPpdzjMDTS/sfTIYXWlFj7camhsmTDRjo5G2B212evaKmKgh5ALLSFSk86ZN5
+KvQvcfsp81jvJCBmDStrsUgSMzy0Og2quHOd61hRTVlYzwvJvfMzHGKdIWwYUbHZ
+OKo/KLEk3E36U9PkPoZGEL2ZeCH4F9Wh3mgg0knBfEmlPnGexmBby6NXGK7VW3l6
+xcJlpdMaXKNVMfl2YK8k/34Hyft06KaYLEJsxAqk1pmLEmGhdZC1OAqovVB/1agS
+zpMMaB9OWWqNsTjDc7tkDt8BZ72NsAbCI9XmsX81W+NqPb6Ju1dtI09bn113LX/Z
+bOSdVicQcXSpl0FnTZaHgHJdQLcU28O7yFFOblqrvcMKpctdTA1TwG9LXEFttGrl
+pgjZF3edo0Cez10epK+S
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(asn1Object['content'], rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_npki_app_cmsSeed_wrap, kwa['algorithm'])
+
+ cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_seedCBC, cea['algorithm'])
+ param, rest = der_decoder(
+ cea['parameters'], asn1Spec=rfc4010.SeedCBCParameter())
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(cea['parameters'], der_encoder(param))
+
+ iv = univ.OctetString(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.env_data_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
+
+ kekri = asn1Object['content']['recipientInfos'][0]['kekri']
+ kwa = kekri['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_npki_app_cmsSeed_wrap, kwa['algorithm'])
+
+ eci = asn1Object['content']['encryptedContentInfo']
+ cea = eci['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4010.id_seedCBC, cea['algorithm'])
+
+ iv = univ.OctetString(hexValue='424f47555349565f424f475553495621')
+ self.assertEqual(iv, cea['parameters'])
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = "MB4wDAYIKoMajJpEAQQFADAOBgoqgxqMmkQHAQEBBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ alg_oid_list = [ ]
+ for cap in asn1Object:
+ self.assertTrue(cap['parameters'].hasValue())
+ self.assertEqual(cap['parameters'], der_encoder(rfc4010.SeedSMimeCapability("")))
+ alg_oid_list.append(cap['capabilityID'])
+
+ self.assertIn(rfc4010.id_seedCBC, alg_oid_list)
+ self.assertIn(rfc4010.id_npki_app_cmsSeed_wrap, alg_oid_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
+
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4043.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4043.py
new file mode 100644
index 0000000000..0ab72dd364
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4043.py
@@ -0,0 +1,118 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4043
+
+
+class PermIdCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIDDTCCApOgAwIBAgIJAKWzVCgbsG5HMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMTEwMDA0MDIyWhcNMjAxMTA5MDA0MDIyWjBNMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxDTALBgNVBAMTBEdhaWwwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQBoktg
+/68xL+uEQaWBoHyOjw8EMLeMEng3R2H7yiEzTGoaMJgPOKvSfzB2P0paHYPL+B5y
+Gc0CK5EHRujMl9ljH+Wydpk57rKBLo1ZzpWUS6anLGIkWs1sOakcgGGr7hGjggFL
+MIIBRzAdBgNVHQ4EFgQU1pCNZuMzfEaJ9GGhH7RKy6Mvz+cwbwYDVR0jBGgwZoAU
+8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQI
+DAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYG
+kU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMFMGA1UdEQRMMEqgNgYIKwYBBQUHCAOgKjAoDBs4MjYyMDgtNDE3MDI4
+LTU0ODE5NS0yMTUyMzMGCSsGAQQBgaxgMIEQZ2FpbEBleGFtcGxlLmNvbTAKBggq
+hkjOPQQDAwNoADBlAjBT+36Y/LPaGSu+61P7kR97M8jAjtH5DtUwrWR02ChshvYJ
+x0bpZq3PJaO0WlBgFicCMQCf+67wSvjxxtjI/OAg4t8NQIJW1LcehSXizlPDc772
+/FC5OiUAxO+iFaSVMeDFsCo=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ perm_id_oid = rfc4043.id_on_permanentIdentifier
+ assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
+ permanent_identifier_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ self.assertEqual(perm_id_oid, gn['otherName']['type-id'])
+
+ onValue, rest = der_decoder(
+ gn['otherName']['value'],
+ asn1Spec=rfc4043.PermanentIdentifier())
+
+ self.assertFalse(rest)
+ self.assertTrue(onValue.prettyPrint())
+ self.assertEqual(gn['otherName']['value'], der_encoder(onValue))
+ self.assertEqual(assigner_oid, onValue['assigner'])
+ permanent_identifier_found = True
+
+ self.assertTrue(permanent_identifier_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ perm_id_oid = rfc4043.id_on_permanentIdentifier
+ assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
+ permanent_identifier_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ on = gn['otherName']
+ self.assertEqual(perm_id_oid, on['type-id'])
+ self.assertEqual(assigner_oid, on['value']['assigner'])
+ permanent_identifier_found = True
+
+ self.assertTrue(permanent_identifier_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4055.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4055.py
new file mode 100644
index 0000000000..cf0b376daa
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4055.py
@@ -0,0 +1,181 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4055
+
+
+class PSSDefautTestCase(unittest.TestCase):
+ pss_default_pem_text = "MAsGCSqGSIb3DQEBCg=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pss_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSASSA_PSS, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pss_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertFalse(asn1Object['parameters'].hasValue())
+
+
+class PSSSHA512TestCase(unittest.TestCase):
+ pss_sha512_pem_text = "MDwGCSqGSIb3DQEBCjAvoA8wDQYJYIZIAWUDBAIDBQChHDAaBg" \
+ "kqhkiG9w0BAQgwDQYJYIZIAWUDBAIDBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pss_sha512_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSASSA_PSS, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pss_sha512_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertTrue(asn1Object['parameters'].hasValue())
+ self.assertTrue(20, asn1Object['parameters']['saltLength'])
+
+
+class OAEPDefautTestCase(unittest.TestCase):
+ oaep_default_pem_text = "MAsGCSqGSIb3DQEBBw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_default_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oaep_default_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertFalse(asn1Object['parameters'].hasValue())
+
+
+class OAEPSHA256TestCase(unittest.TestCase):
+ oaep_sha256_pem_text = "MDwGCSqGSIb3DQEBBzAvoA8wDQYJYIZIAWUDBAIBBQChHDAaB" \
+ "gkqhkiG9w0BAQgwDQYJYIZIAWUDBAIBBQA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_sha256_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertTrue(rfc4055.id_RSAES_OAEP, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oaep_sha256_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertTrue(asn1Object['parameters'].hasValue())
+
+ oaep_p = asn1Object['parameters']
+
+ self.assertEqual(univ.Null(""), oaep_p['hashFunc']['parameters'])
+ self.assertEqual(univ.Null(""), oaep_p['maskGenFunc']['parameters']['parameters'])
+
+
+class OAEPFullTestCase(unittest.TestCase):
+ oaep_full_pem_text = "MFMGCSqGSIb3DQEBBzBGoA8wDQYJYIZIAWUDBAICBQChHDAaBgk" \
+ "qhkiG9w0BAQgwDQYJYIZIAWUDBAICBQCiFTATBgkqhkiG9w0BAQ" \
+ "kEBmZvb2Jhcg=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.oaep_full_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+
+ self.assertTrue(rfc4055.id_RSAES_OAEP, asn1Object[0])
+
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oaep_full_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ self.assertTrue(asn1Object['parameters'].hasValue())
+
+ oaep_p = asn1Object['parameters']
+
+ self.assertEqual(univ.Null(""), oaep_p['hashFunc']['parameters'])
+ self.assertEqual(
+ univ.Null(""), oaep_p['maskGenFunc']['parameters']['parameters'])
+ self.assertEqual(
+ univ.OctetString(value='foobar'),
+ oaep_p['pSourceFunc']['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4073.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4073.py
new file mode 100644
index 0000000000..4bd5e5f7fc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4073.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4073
+from pyasn1_modules import rfc5652
+
+
+class ContentCollectionTestCase(unittest.TestCase):
+ pem_text = """\
+MIIG/QYLKoZIhvcNAQkQAROgggbsMIIG6DCCAWcGCyqGSIb3DQEJEAEUoIIBVjCC
+AVIwgfEGCSqGSIb3DQEHAaCB4wSB4ENvbnRlbnQtVHlwZTogdGV4dC9wbGFpbgoK
+UkZDIDQwNzMsIHB1Ymxpc2hlZCBpbiBNYXkgMjAwNSwgZGVzY3JpYmVzIGEgY29u
+dmVudGlvbiBmb3IgdXNpbmcgdGhlCkNyeXB0b2dyYXBoaWMgTWVzc2FnZSBTeW50
+YXggKENNUykgdG8gcHJvdGVjdCBhIGNvbnRlbnQgY29sbGVjdGlvbi4gIElmCmRl
+c2lyZWQsIGF0dHJpYnV0ZXMgY2FuIGJlIGFzc29jaWF0ZWQgd2l0aCB0aGUgY29u
+dGVudC4KMFwwMwYLKoZIhvcNAQkQAgQxJDAiDBVBYnN0cmFjdCBmb3IgUkZDIDQw
+NzMGCSqGSIb3DQEHATAlBgsqhkiG9w0BCRACBzEWBBSkLSXBiRWvbwnJKb4EGb1X
+FwCa3zCCBXkGCyqGSIb3DQEJEAEUoIIFaDCCBWQwggT9BgkqhkiG9w0BBwGgggTu
+BIIE6kNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbgoKVGhlIGZvbGxvd2luZyBBU04u
+MSBtb2R1bGUgZGVmaW5lcyB0aGUgc3RydWN0dXJlcyB0aGF0IGFyZSBuZWVkZWQg
+dG8KaW1wbGVtZW50IHRoZSBzcGVjaWZpY2F0aW9uIGluIFJGQyA0MDczLiAgSXQg
+aXMgZXhwZWN0ZWQgdG8gYmUgdXNlZCBpbgpjb25qdW5jdGlvbiB3aXRoIHRoZSBB
+U04uMSBtb2R1bGVzIGluIFJGQyA1NjUyIGFuZCBSRkMgMzI3NC4KCiAgIENvbnRl
+bnRDb2xsZWN0aW9uTW9kdWxlCiAgICAgeyBpc28oMSkgbWVtYmVyLWJvZHkoMikg
+dXMoODQwKSByc2Fkc2koMTEzNTQ5KSBwa2NzKDEpCiAgICAgICBwa2NzLTkoOSkg
+c21pbWUoMTYpIG1vZHVsZXMoMCkgMjYgfQoKICAgREVGSU5JVElPTlMgSU1QTElD
+SVQgVEFHUyA6Oj0KICAgQkVHSU4KCiAgIElNUE9SVFMKICAgICBBdHRyaWJ1dGUs
+IENvbnRlbnRJbmZvCiAgICAgICBGUk9NIENyeXB0b2dyYXBoaWNNZXNzYWdlU3lu
+dGF4MjAwNCAtLSBbQ01TXQogICAgICAgICB7IGlzbygxKSBtZW1iZXItYm9keSgy
+KSB1cyg4NDApIHJzYWRzaSgxMTM1NDkpCiAgICAgICAgICAgcGtjcygxKSBwa2Nz
+LTkoOSkgc21pbWUoMTYpIG1vZHVsZXMoMCkgY21zLTIwMDEoMTQpIH07CgoKICAg
+LS0gQ29udGVudCBDb2xsZWN0aW9uIENvbnRlbnQgVHlwZSBhbmQgT2JqZWN0IElk
+ZW50aWZpZXIKCiAgIGlkLWN0LWNvbnRlbnRDb2xsZWN0aW9uIE9CSkVDVCBJREVO
+VElGSUVSIDo6PSB7CiAgICAgICAgICAgaXNvKDEpIG1lbWJlci1ib2R5KDIpIHVz
+KDg0MCkgcnNhZHNpKDExMzU0OSkgcGtjcygxKQogICAgICAgICAgIHBrY3M5KDkp
+IHNtaW1lKDE2KSBjdCgxKSAxOSB9CgogICBDb250ZW50Q29sbGVjdGlvbiA6Oj0g
+U0VRVUVOQ0UgU0laRSAoMS4uTUFYKSBPRiBDb250ZW50SW5mbwoKICAgLS0gQ29u
+dGVudCBXaXRoIEF0dHJpYnV0ZXMgQ29udGVudCBUeXBlIGFuZCBPYmplY3QgSWRl
+bnRpZmllcgoKICAgaWQtY3QtY29udGVudFdpdGhBdHRycyBPQkpFQ1QgSURFTlRJ
+RklFUiA6Oj0gewogICAgICAgICAgIGlzbygxKSBtZW1iZXItYm9keSgyKSB1cyg4
+NDApIHJzYWRzaSgxMTM1NDkpIHBrY3MoMSkKICAgICAgICAgICBwa2NzOSg5KSBz
+bWltZSgxNikgY3QoMSkgMjAgfQoKICAgQ29udGVudFdpdGhBdHRyaWJ1dGVzIDo6
+PSBTRVFVRU5DRSB7CiAgICAgICBjb250ZW50ICAgICBDb250ZW50SW5mbywKICAg
+ICAgIGF0dHJzICAgICAgIFNFUVVFTkNFIFNJWkUgKDEuLk1BWCkgT0YgQXR0cmli
+dXRlIH0KCiAgIEVORAowYTA4BgsqhkiG9w0BCRACBDEpMCcMGkFTTi4xIE1vZHVs
+ZSBmcm9tIFJGQyA0MDczBgkqhkiG9w0BBwEwJQYLKoZIhvcNAQkQAgcxFgQUMbeK
+buWO3egPDL8Kf7tBhzjIKLw=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+
+ def test_layer(substrate, content_type):
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[content_type])
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if content_type == rfc4073.id_ct_contentWithAttrs:
+ for attr in asn1Object['attrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+
+ return asn1Object
+
+ layers = rfc5652.cmsContentTypesMap
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc4073.id_ct_contentCollection: lambda x: x[0]['contentType'],
+ rfc4073.id_ct_contentWithAttrs: lambda x: x['content']['contentType'],
+ rfc5652.id_data: lambda x: None,
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc4073.id_ct_contentCollection: lambda x: x[0]['content'],
+ rfc4073.id_ct_contentWithAttrs: lambda x: x['content']['content'],
+ rfc5652.id_data: lambda x: None,
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ this_layer = rfc5652.id_ct_contentInfo
+
+ while this_layer != rfc5652.id_data:
+ if this_layer == rfc4073.id_ct_contentCollection:
+ asn1Object = test_layer(substrate, this_layer)
+ for ci in asn1Object:
+ substrate = ci['content']
+ this_layer = ci['contentType']
+ while this_layer != rfc5652.id_data:
+ asn1Object = test_layer(substrate, this_layer)
+ substrate = getNextSubstrate[this_layer](asn1Object)
+ this_layer = getNextLayer[this_layer](asn1Object)
+ else:
+ asn1Object = test_layer(substrate, this_layer)
+ substrate = getNextSubstrate[this_layer](asn1Object)
+ this_layer = getNextLayer[this_layer](asn1Object)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=rfc5652.ContentInfo(),
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc4073.id_ct_contentCollection, asn1Object['contentType'])
+
+ for ci in asn1Object['content']:
+ self.assertIn(ci['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc4073.id_ct_contentWithAttrs, ci['contentType'])
+
+ next_ci = ci['content']['content']
+
+ self.assertIn(next_ci['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_data, next_ci['contentType'])
+ self.assertIn(str2octs('Content-Type: text'), next_ci['content'])
+
+ for attr in ci['content']['attrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc2634.id_aa_contentHint:
+ self.assertIn('RFC 4073', attr['attrValues'][0]['contentDescription'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4108.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4108.py
new file mode 100644
index 0000000000..9d71601077
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4108.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc4108
+
+
+class CMSFirmwareWrapperTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEvAYJKoZIhvcNAQcCoIIErTCCBKkCAQExDTALBglghkgBZQMEAgEwggIVBgsq
+hkiG9w0BCRABEKCCAgQEggIA3ntqPr5kDpx+//pgWGfHCH/Ht4pbenGwXv80txyE
+Y0I2mT9BUGz8ILkbhD7Xz89pBS5KhEJpthxH8WREJtvS+wL4BqYLt23wjWoZy5Gt
+5dPzWgaNlV/aQ5AdfAY9ljmnNYnK8D8r8ur7bQM4cKUdxry+QA0nqXHMAOSpx4Um
+8impCc0BICXaFfL3zBrNxyPubbFO9ofbYOAWaNmmIAhzthXf12vDrLostIqmYrP4
+LMRCjTr4LeYaVrAWfKtbUbByN6IuBef3Qt5cJaChr74udz3JvbYFsUvCpl64kpRq
+g2CT6R+xE4trO/pViJlI15dvJVz04BBYQ2jQsutJwChi97/DDcjIv03VBmrwRE0k
+RJNFP9vpDM8CxJIqcobC5Kuv8b0GqGfGl6ouuQKEVMfBcrupgjk3oc3KL1iVdSr1
++74amb1vDtTMWNm6vWRqh+Kk17NGEi2mNvYkkZUTIHNGH7OgiDclFU8dSMZd1fun
+/D9dmiFiErDB3Fzr4+8Qz0aKedNE/1uvM+dhu9qjuRdkDzZ4S7txTfk6y9pG9iyk
+aEeTV2kElKXblgi+Cf0Ut4f5he8rt6jveHdMo9X36YiUQVvevj2cgN7lFivEnFYV
+QY0xugpP7lvEFDfsi2+0ozgP8EKOLYaCUKpuvttlYJ+vdtUFEijizEZ4cx02RsXm
+EesxggJ6MIICdgIBA4AUnutnybladNRNLxY5ZoDoAbXLpJwwCwYJYIZIAWUDBAIB
+oIG8MBoGCSqGSIb3DQEJAzENBgsqhkiG9w0BCRABEDArBgsqhkiG9w0BCRACJDEc
+MBoGCysGAQQBjb9BAQEqBgsrBgEEAY2/QQEBMDAvBgkqhkiG9w0BCQQxIgQgAJfv
+uasB4P6WDLOkOyvj33YPgZW4olHbidzyh1EKP9YwQAYLKoZIhvcNAQkQAikxMTAv
+MAsGCWCGSAFlAwQCAQQgAJfvuasB4P6WDLOkOyvj33YPgZW4olHbidzyh1EKP9Yw
+CwYJKoZIhvcNAQELBIIBgDivAlSLbMPPu+zV+pPcYpNp+A1mwVOytjMBzSo31kR/
+qEu+hVrDknAOk9IdCaDvcz612CcfNT85/KzrYvWWxOP2woU/vZj253SnndALpfNN
+n3/crJjF6hKgkjUwoXebI7kuj5WCh2q5lkd6xUa+jkCw+CINcN43thtS66UsVI4d
+mv02EvsS2cxPY/508uaQZ6AYAacm667bgX8xEjbzACMOeMCuvKQXWAuh3DkNk+gV
+xizHDw7xZxXgMGMAnJglAeBtd3Si5ztILw9U2gKUqFn/nOgy+eW63JuU/q31/Hgg
+ZATjyBznSzneTZrw8/ePoSCj7E9vBeCTUkeFbVB2tJK1iYDMblp6HUuwgYuGKXy/
+ZwKL3GvB11qg7ntdEyjdLq0xcVrht/K0d2dPo4iO4Ac7c1xbFMDAlWOt4FMPWh6O
+iTh55YvT7hAJjTbB5ebgMA9QJnAczQPFnaIePnlFrkETd3YyLK4yHwnoIGo1GiW/
+dsnhVtIdkPtfJIvcYteYJg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ inner, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertEqual(
+ rfc4108.id_ct_firmwarePackage, inner['encapContentInfo']['eContentType'])
+
+ self.assertTrue(inner['encapContentInfo']['eContent'])
+
+ attribute_list = []
+
+ for attr in inner['signerInfos'][0]['signedAttrs']:
+ attribute_list.append(attr['attrType'])
+ if attr['attrType'] == rfc4108.id_aa_targetHardwareIDs:
+ av, rest = der_decoder(attr['attrValues'][0],
+ asn1Spec=rfc4108.TargetHardwareIdentifiers())
+ self.assertEqual(2, len(av))
+
+ for oid in av:
+ self.assertIn('1.3.6.1.4.1.221121.1.1.', oid.prettyPrint())
+
+ self.assertIn( rfc5652.id_contentType, attribute_list)
+ self.assertIn( rfc5652.id_messageDigest, attribute_list)
+ self.assertIn(rfc4108.id_aa_targetHardwareIDs, attribute_list)
+ self.assertIn(rfc4108.id_aa_fwPkgMessageDigest, attribute_list)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd_eci = asn1Object['content']['encapContentInfo']
+
+ self.assertEqual(sd_eci['eContentType'], rfc4108.id_ct_firmwarePackage)
+ self.assertTrue(sd_eci['eContent'].hasValue())
+
+ for attr in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc4108.id_aa_targetHardwareIDs:
+ for oid in attr['attrValues'][0]:
+ self.assertIn('1.3.6.1.4.1.221121.1.1.', oid.prettyPrint())
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4210.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4210.py
new file mode 100644
index 0000000000..39d407f72f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4210.py
@@ -0,0 +1,128 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4210
+
+
+class PKIMessageTestCase(unittest.TestCase):
+ pem_text = """\
+MIITuTCCARECAQKkWTBXMQswCQYDVQQGEwJUUjEQMA4GA1UEChMHRS1HdXZlbjEUMBIGA1UECxML
+VHJ1c3RDZW50ZXIxIDAeBgNVBAMTF1JTQSBTZWN1cml0eSBDTVAgU2VydmVypC0wKzELMAkGA1UE
+BhMCVFIxHDAaBgNVBAMME1ZhbGltby1WZXR0b3ItMTdEZWOgERgPMjAxMjA1MDMxMTE2MTdaoQ8w
+DQYJKoZIhvcNAQEFBQCiIgQgZWVhMjg5MGU2ZGY5N2IyNzk5NWY2MWE0MzE2MzI1OWGkEgQQQ01Q
+VjJUMTIyMzM0NjI3MKUSBBCAAAABgAAAAYAAAAGAAAABphIEEDEzNjY0NDMwMjlSYW5kb22jghIZ
+MIISFaGCC84wggvKMIIFwDCCBKigAwIBAgIQfOVE05R616R6Nqgu3drXHzANBgkqhkiG9w0BAQUF
+ADBxMQswCQYDVQQGEwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5T
+LjE4MDYGA1UEAxMvZS1HdXZlbiBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2FnbGF5aWNp
+c2kwHhcNMDgxMTI0MTAwMzI0WhcNMTYxMjE0MTExNzI0WjBdMQswCQYDVQQGEwJUUjEoMCYGA1UE
+CgwfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjEkMCIGA1UEAwwbZS1HdXZlbiBNb2Jp
+bCBUZXN0VVRGLTgtU09OMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzqaymRo5chRK
+EKrhjWQky1HOm6b/Jy4tSUuo4vq3O9U3G2osOU/hHb6fyMmznLpc6CaZ3qKYiuDMFRW8g1kNjEjV
+sFSvH0Yd4qgwP1+qqzhBSe+nCAnEbRUrz+nXJ4fKhmGaQ+ZSic+MeyoqDsf/zENKqdV7ea9l3Ilu
+Rj93bmTxas9aWPWQ/U/fpwkwRXaqaONlM5e4GWdgA7T1aq106NvH1z6LDNXcMYw4lSZkj/UjmM/0
+NhVz+57Ib4a0bogTaBmm8a1E5NtzkcA7pgnZT8576T0UoiOpEo+NAELA1B0mRh1/82HK1/0xn1zt
+1ym4XZRtn2r2l/wTeEwU79ALVQIDAQABo4ICZjCCAmIwfAYIKwYBBQUHAQEEcDBuMDIGCCsGAQUF
+BzABhiZodHRwOi8vdGVzdG9jc3AyLmUtZ3V2ZW4uY29tL29jc3AueHVkYTA4BggrBgEFBQcwAoYs
+aHR0cDovL3d3dy5lLWd1dmVuLmNvbS9kb2N1bWVudHMvVGVzdEtvay5jcnQwDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wggElBgNVHSAEggEcMIIBGDCCARQGCWCGGAMAAQECATCCAQUw
+NgYIKwYBBQUHAgEWKmh0dHA6Ly93d3cuZS1ndXZlbi5jb20vZG9jdW1lbnRzL05FU1VFLnBkZjCB
+ygYIKwYBBQUHAgIwgb0egboAQgB1ACAAcwBlAHIAdABpAGYAaQBrAGEAIABpAGwAZQAgAGkAbABn
+AGkAbABpACAAcwBlAHIAdABpAGYAaQBrAGEAIAB1AHkAZwB1AGwAYQBtAGEAIABlAHMAYQBzAGwA
+YQByATEAbgExACAAbwBrAHUAbQBhAGsAIABpAOcAaQBuACAAYgBlAGwAaQByAHQAaQBsAGUAbgAg
+AGQAbwBrAPwAbQBhAG4BMQAgAGEA5wExAG4BMQB6AC4wWAYDVR0fBFEwTzBNoEugSYZHaHR0cDov
+L3Rlc3RzaWwuZS1ndXZlbi5jb20vRWxla3Ryb25pa0JpbGdpR3V2ZW5saWdpQVNSb290L0xhdGVz
+dENSTC5jcmwwHQYDVR0OBBYEFLMoTImEKeXbqNjbYZkKshQi2vwzMB8GA1UdIwQYMBaAFGCI4dY9
+qCIkag0hwBgz5haCSNl0MA0GCSqGSIb3DQEBBQUAA4IBAQAWOsmvpoFB9sX2aq1/LjPDJ+A5Fpxm
+0XkOGM9yD/FsLfWgyv2HqBY1cVM7mjJfJ1ezkS0ODdlU6TyN5ouvAi21V9CIk69I3eUYSDjPpGia
+qcCCvJoMF0QD7B70kj2zW7IJ7pF11cbvPLaatdzojsH9fVfKtxtn/ZLrXtKsyUW5vKHOeniU6BBB
+Gl/ZZkFNXNN4mrB+B+wDV9OmdMw+Mc8KPq463hJQRat5a9lrXMdNtMAJOkvsUUzOemAsITjXWlyg
+BULijBhi8ZmMp0W7p6oKENX3vH2HCPCGQU29WIrK4iUoscjz93fB6oa4FQpxY0k3JRnWvD5FqkRD
+FKJdq/q9MIIDzzCCAregAwIBAgIQa34pJYdDFNXx90OkMkKzIjANBgkqhkiG9w0BAQUFADBxMQsw
+CQYDVQQGEwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE4MDYG
+A1UEAxMvZS1HdXZlbiBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2FnbGF5aWNpc2kwHhcN
+MDYxMjE1MTUxMzU0WhcNMTYxMjE1MTExMzU0WjBxMQswCQYDVQQGEwJUUjEoMCYGA1UEChMfRWxl
+a3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE4MDYGA1UEAxMvZS1HdXZlbiBFbGVrdHJvbmlr
+IFNlcnRpZmlrYSBIaXptZXQgU2FnbGF5aWNpc2kwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQCU/PTxSkcWPJMx4UO8L8ep9/JqRgAZ79EqYWgR4K2bNLgENpc5j0hO+QydgovFODzkEIBP
+RIBavMz9Cw2PONpSBmxd4K1A/5hGqoGEz8UCA2tIx4+Z2A9AQ2O3BYi9FWM+0D1brJDO+6yvX4m5
+Rf3mLlso52NIVV705fIkmOExHjdAj/xB0/LICZMfwKn8F19Jae/SQv9cFnptbNRCq8hU5zLRngpR
+eT1PYrZVV0XLbzbDPwgzLXCzDxG1atdGd5JRTnD58qM1foC3+hGafuyissMQVGnBQFlsx7V6OdlD
+bsxUXegCl2li0RpRJXLqyqMdtEplaznKp8NnbddylfrPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
+hjAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFGCI4dY9qCIkag0hwBgz5haCSNl0MB0GA1Ud
+DgQWBBRgiOHWPagiJGoNIcAYM+YWgkjZdDANBgkqhkiG9w0BAQUFAAOCAQEAKftTVjgltZJxXwDs
+MumguOSlljOQjotVVpES1QYwo3a5RQVpKuS4KYDEdWLD4ITtDNOA/iGKYWCNyKsE1BCL66irknZw
+iR6p6P+q2Wf7fGYSwUBcSBwWBTA+0EgpvPL3/vRuVVCVgC8XHBr72jKKTg9Nwcj+1FwXGZTDpjX8
+dzPhTXEWceQcDn2FRdNt6BQad9Hdq08lMHiyozsWniYZYuWpud91i8Pl698H9t0KqiJg6rPKc9kd
+z9QyC8E/cLIJgYhvfzXMxvmSjeSSFSqTHioqfpU3k8AWXuxqJUxbdQ8QrVaTXRByzEr1Ze0TYpDs
+oel1PjC9ouO8bC7cGrbCWzCCAi8wggGYAhBlEjJUo9asY2ISG4oHjcpzMA0GCSqGSIb3DQEBBQUA
+MFoxCzAJBgNVBAYTAlRSMRAwDgYDVQQKEwdFLUd1dmVuMRQwEgYDVQQLEwtUcnVzdENlbnRlcjEj
+MCEGA1UEAxMaRS1HdXZlblRFU1RDQUhTTSBTeXN0ZW0gQ0EwHhcNMDkxMTMwMjIxMzEzWhcNMTYx
+MTMwMTkxMTUxWjBXMQswCQYDVQQGEwJUUjEQMA4GA1UEChMHRS1HdXZlbjEUMBIGA1UECxMLVHJ1
+c3RDZW50ZXIxIDAeBgNVBAMTF1JTQSBTZWN1cml0eSBDTVAgU2VydmVyMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDCaZeJerGULW+1UPSu9T0voPNgzPcihXX6G5Q45nS4RNCe+pOc226EtD51
+wu6Eq2oARpZmCrKPn63EFmHEE04dRDr8MS2LHuZK8xslIx/AvPnV568795EPoAyhGIX9Na9ZHhnI
+zSPWmWfBd9bsQiLVF7C9dOvfW125mtywWXELewIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAAiIse/x
+aWwRWUM0CIzfnoXfrgyLdKVykK7dTPgoMJgAx229uN6VTPyk+E+lTKq9PhK+e/VJNNg9PjSFjKFd
+lfSDOi9ne1xOrb7cNTjw+sGf1mfNWyzizLXa7su7ISFN+GaClmAstH9vXsRxg1oh3pFMJv47I6iw
+gUQlwwg8WsY/MIIGPzCCBjsCAQAwAwIBADCCBi+gggYrMIIGJzCCBQ+gAwIBAgIRALGVtVAeoM1x
+gjgOX3alZ5MwDQYJKoZIhvcNAQEFBQAwXTELMAkGA1UEBhMCVFIxKDAmBgNVBAoMH0VsZWt0cm9u
+aWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xJDAiBgNVBAMMG2UtR3V2ZW4gTW9iaWwgVGVzdFVURi04
+LVNPTjAeFw0xMjA1MDMxMTE2MTdaFw0xMzA1MDMxMTE2MTdaMGoxCzAJBgNVBAYTAlRSMREwDwYD
+VQQKDAhGaXJlIExMVDEbMBkGA1UECwwScG9wQ29kZSAtIDEyMzQ1Njc4MRQwEgYDVQQFEws3NjU0
+MzQ1Njc2NTEVMBMGA1UEAwwMQnVyYWsgWW9uZGVtMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
+gQCpfSB7xcsHZR4E27yGHkzUJx1y2iknzX4gRM2acyPljRw/V5Lm7POrfWIX9UF2sxfYfRqxYmD0
++nw72nx8R/5AFQK0BfjHxIc5W1YekMHF8PSORo9rJqcX+qn+NBYwqcJl4EdObTcOtMWC6ws6n0uA
+oDvYYN0ujkua496sp+INiQIDAQABo4IDVzCCA1MwQgYIKwYBBQUHAQEENjA0MDIGCCsGAQUFBzAB
+hiZodHRwOi8vdGVzdG9jc3AyLmUtZ3V2ZW4uY29tL29jc3AueHVkYTAfBgNVHSMEGDAWgBSzKEyJ
+hCnl26jY22GZCrIUItr8MzCCAXIGA1UdIASCAWkwggFlMIGxBgZghhgDAAEwgaYwNgYIKwYBBQUH
+AgEWKmh0dHA6Ly93d3cuZS1ndXZlbi5jb20vZG9jdW1lbnRzL05FU1VFLnBkZjBsBggrBgEFBQcC
+AjBgGl5CdSBzZXJ0aWZpa2EsIDUwNzAgc2F5xLFsxLEgRWxla3Ryb25payDEsG16YSBLYW51bnVu
+YSBnw7ZyZSBuaXRlbGlrbGkgZWxla3Ryb25payBzZXJ0aWZpa2FkxLFyMIGuBglghhgDAAEBAQMw
+gaAwNwYIKwYBBQUHAgEWK2h0dHA6Ly93d3cuZS1ndXZlbi5jb20vZG9jdW1lbnRzL01LTkVTSS5w
+ZGYwZQYIKwYBBQUHAgIwWRpXQnUgc2VydGlmaWthLCBNS05FU0kga2Fwc2FtxLFuZGEgeWF5xLFu
+bGFubcSxxZ8gYmlyIG5pdGVsaWtsaSBlbGVrdHJvbmlrIHNlcnRpZmlrYWTEsXIuMA4GA1UdDwEB
+/wQEAwIGwDCBgwYIKwYBBQUHAQMEdzB1MAgGBgQAjkYBATBpBgtghhgBPQABp04BAQxaQnUgc2Vy
+dGlmaWthLCA1MDcwIHNheWlsaSBFbGVrdHJvbmlrIEltemEgS2FudW51bmEgZ8O2cmUgbml0ZWxp
+a2xpIGVsZWt0cm9uaWsgc2VydGlmaWthZGlyMEUGA1UdCQQ+MDwwFAYIKwYBBQUHCQIxCAQGQW5r
+YXJhMBIGCCsGAQUFBwkBMQYEBDE5NzkwEAYIKwYBBQUHCQQxBAQCVFIwGAYDVR0RBBEwD4ENZmly
+ZUBmaXJlLmNvbTBgBgNVHR8EWTBXMFWgU6BRhk9odHRwOi8vdGVzdHNpbC5lLWd1dmVuLmNvbS9F
+bGVrdHJvbmlrQmlsZ2lHdXZlbmxpZ2lBU01LTkVTSS1VVEYtOC9MYXRlc3RDUkwuY3JsMB0GA1Ud
+DgQWBBSLG9aIb1k2emFLCpM93kXJkWhzuTANBgkqhkiG9w0BAQUFAAOCAQEACoGCn4bzDWLzs799
+rndpB971UD2wbwt8Hkw1MGZkkJVQeVF4IS8FacAyYk5vY8ONuTA/Wsh4x23v9WTCtO89HMTz81eU
+BclqZ2Gc2UeMq7Y4FQWR8PNCMdCsxVVhpRRE6jQAyyR9YEBHQYVLfy34e3+9G/h/BR73VGHZJdZI
+DDJYd+VWXmUD9kGk/mI35qYdzN3O28KI8sokqX0z2hvkpDKuP4jNXSCHcVkK23tX2x5m6m0LdqVn
+vnCx2LfBn1wf1u7q30p/GgMVX+mR3QHs7feGewEjlkxuEyLVVD+uBwWCT6zcad17oaAyXV5RV28L
+vH0WNg6pFUpwOP0l+nIOqqCBhAOBgQBAtTB5Qd18sTxEKhSzRiN2OycFPrqoqlZZTHBohe8bE2D4
+Xc1ejkFWUEvQivkqJxCD6C7I37xgDaq8DZnaczIBxbPkY0QMdeL4MiEqlw/tlrJGrWoC5Twb0t/m
+JA5RSwQoMDYTj2WrwtM/nsP12T39or4JRZhlLSM43IaTwEBtQw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc4210.PKIMessage()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4211.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4211.py
new file mode 100644
index 0000000000..e9be4cc39d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4211.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4211
+
+
+class CertificateReqTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBozCCAZ8wggEFAgUAwTnj2jCByoABAqURMA8xDTALBgNVBAMTBHVzZXKmgZ8w
+DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ6ZQ2cYbn/lFsmBOlRltbRbFQUvvE0Q
+nbopOu1kC7Bmaaz7QTx8nxeiHi4m7uxCbGGxHNoGCt7EmdG8eZUBNAcHyGlXrJdm
+0z3/uNEGiBHq+xB8FnFJCA5EIJ3RWFnlbu9otSITLxWK7c5+/NHmWM+yaeHD/f/h
+rp01c/8qXZfZAgMBAAGpEDAOBgNVHQ8BAf8EBAMCBeAwLzASBgkrBgEFBQcFAQEM
+BTExMTExMBkGCSsGAQUFBwUBAgwMc2VydmVyX21hZ2ljoYGTMA0GCSqGSIb3DQEB
+BQUAA4GBAEI3KNEvTq/n1kNVhNhPkovk1AZxyJrN1u1+7Gkc4PLjWwjLOjcEVWt4
+AajUk/gkIJ6bbeO+fZlMjHfPSDKcD6AV2hN+n72QZwfzcw3icNvBG1el9EU4XfIm
+xfu5YVWi81/fw8QQ6X6YGHFQkomLd7jxakVyjxSng9BhO6GpjJNF
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc4211.CertReqMessages()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for crm in asn1Object:
+ self.assertEqual(2, crm['certReq']['certTemplate']['version'])
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4334.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4334.py
new file mode 100644
index 0000000000..9ba5fdf339
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4334.py
@@ -0,0 +1,83 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4334
+
+
+class CertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICqzCCAjCgAwIBAgIJAKWzVCgbsG4/MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNzE5MTk0MjQ3WhcNMjAwNzE4MTk0MjQ3WjBjMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZp
+Z2lsIFNlY3VyaXR5IExMQzEYMBYGA1UEAxMPZWFwLmV4YW1wbGUuY29tMHYwEAYH
+KoZIzj0CAQYFK4EEACIDYgAEMMbnIp2BUbuyMgH9HhNHrh7VBy7ql2lBjGRSsefR
+Wa7+vCWs4uviW6On4eem5YoP9/UdO7DaIL+/J9/3DJHERI17oFxn+YWiE4JwXofy
+QwfSu3cncVNMqpiDjEkUGGvBo4HTMIHQMAsGA1UdDwQEAwIHgDBCBglghkgBhvhC
+AQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55
+IHB1cnBvc2UuMB0GA1UdDgQWBBSDjPGr7M742rsE4oQGwBvGvllZ+zAfBgNVHSME
+GDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAeBggrBgEFBQcBDQQSMBAEB0V4YW1w
+bGUEBUJvZ3VzMB0GA1UdJQQWMBQGCCsGAQUFBwMOBggrBgEFBQcDDTAKBggqhkjO
+PQQDAwNpADBmAjEAmCPZnnlUQOKlcOIIOgFrRCkOqO0ESs+dobYwAc2rFCBtQyP7
+C3N00xkX8WZZpiAZAjEAi1Z5+nGbJg5eJTc8fwudutN/HNwJEIS6mHds9kfcy26x
+DAlVlhox680Jxy5J8Pkx
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ if extn['extnID'] == rfc4334.id_pe_wlanSSID:
+ self.assertIn( str2octs('Example'), extnValue)
+
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(rfc4334.id_kp_eapOverLAN, extnValue)
+ self.assertIn(rfc4334.id_kp_eapOverPPP, extnValue)
+
+ self.assertIn(rfc4334.id_pe_wlanSSID, extn_list)
+ self.assertIn(rfc5280.id_ce_extKeyUsage, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4357.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4357.py
new file mode 100644
index 0000000000..cf10d59d12
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4357.py
@@ -0,0 +1,248 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4357
+
+
+class SignedTestCase(unittest.TestCase):
+ signed_pem_text = """\
+MIIBKAYJKoZIhvcNAQcCoIIBGTCCARUCAQExDDAKBgYqhQMCAgkFADAbBgkqhkiG
+9w0BBwGgDgQMc2FtcGxlIHRleHQKMYHkMIHhAgEBMIGBMG0xHzAdBgNVBAMMFkdv
+c3RSMzQxMC0yMDAxIGV4YW1wbGUxEjAQBgNVBAoMCUNyeXB0b1BybzELMAkGA1UE
+BhMCUlUxKTAnBgkqhkiG9w0BCQEWGkdvc3RSMzQxMC0yMDAxQGV4YW1wbGUuY29t
+AhAr9cYewhG9F8fc1GJmtC4hMAoGBiqFAwICCQUAMAoGBiqFAwICEwUABEDAw0LZ
+P4/+JRERiHe/icPbg0IE1iD5aCqZ9v4wO+T0yPjVtNr74caRZzQfvKZ6DRJ7/RAl
+xlHbjbL0jHF+7XKp
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ encoded_null = der_encoder(univ.Null(""))
+
+ si = sd['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['signatureAlgorithm']['parameters'])
+ self.assertEqual(64, len(si['signature']))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ si = asn1Object['content']['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['signatureAlgorithm']['parameters'])
+
+ self.assertEqual(64, len(si['signature']))
+
+class KeyAgreeTestCase(unittest.TestCase):
+ keyagree_pem_text = """\
+MIIBpAYJKoZIhvcNAQcDoIIBlTCCAZECAQIxggFQoYIBTAIBA6BloWMwHAYGKoUD
+AgITMBIGByqFAwICJAAGByqFAwICHgEDQwAEQLNVOfRngZcrpcTZhB8n+4HtCDLm
+mtTyAHi4/4Nk6tIdsHg8ff4DwfQG5DvMFrnF9vYZNxwXuKCqx9GhlLOlNiChCgQI
+L/D20YZLMoowHgYGKoUDAgJgMBQGByqFAwICDQAwCQYHKoUDAgIfATCBszCBsDCB
+gTBtMR8wHQYDVQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlD
+cnlwdG9Qcm8xCzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAt
+MjAwMUBleGFtcGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuIQQqMCgEIBajHOfOTukN
+8ex0aQRoHsefOu24Ox8dSn75pdnLGdXoBAST/YZ+MDgGCSqGSIb3DQEHATAdBgYq
+hQMCAhUwEwQItzXhegc1oh0GByqFAwICHwGADDmxivS/qeJlJbZVyQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'],
+ asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'], )
+ param2, rest = der_decoder(
+ alg2['parameters'],
+ asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+
+class KeyTransportTestCase(unittest.TestCase):
+ keytrans_pem_text = """\
+MIIBpwYJKoZIhvcNAQcDoIIBmDCCAZQCAQAxggFTMIIBTwIBADCBgTBtMR8wHQYD
+VQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlDcnlwdG9Qcm8x
+CzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAtMjAwMUBleGFt
+cGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuITAcBgYqhQMCAhMwEgYHKoUDAgIkAAYH
+KoUDAgIeAQSBpzCBpDAoBCBqL6ghBpVon5/kR6qey2EVK35BYLxdjfv1PSgbGJr5
+dQQENm2Yt6B4BgcqhQMCAh8BoGMwHAYGKoUDAgITMBIGByqFAwICJAAGByqFAwIC
+HgEDQwAEQE0rLzOQ5tyj3VUqzd/g7/sx93N+Tv+/eImKK8PNMZQESw5gSJYf28dd
+Em/askCKd7W96vLsNMsjn5uL3Z4SwPYECJeV4ywrrSsMMDgGCSqGSIb3DQEHATAd
+BgYqhQMCAhUwEwQIvBCLHwv/NCkGByqFAwICHwGADKqOch3uT7Mu4w+hNw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'], asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2, rest = der_decoder(
+ alg2['parameters'], asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param2.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4387.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4387.py
new file mode 100644
index 0000000000..5c122254c8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4387.py
@@ -0,0 +1,84 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4387
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDLzCCArWgAwIBAgIJAKWzVCgbsG5JMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMTIyMDI1MzAzWhcNMjAxMTIxMDI1MzAzWjBZMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxGTAXBgNVBAMTEHJlcG8uZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUr
+gQQAIgNiAAS/J1NNkqicN432Uwlw+Gu4pLvYpSr2W8zJvCOy61ncEzKNIs4cxqSc
+N0rl6K32tNCQGCsQFaBK4wZKXbHpUEPWrfYAWYebYDOhMlOE/agxH3nZRRnYv4O7
+pGrk/YZamGijggFhMIIBXTALBgNVHQ8EBAMCB4AwQgYJYIZIAYb4QgENBDUWM1Ro
+aXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBwdXJwb3Nl
+LjAdBgNVHQ4EFgQUWDRoN3XtN1n8ZH+bQuSAsr42gQwwHwYDVR0jBBgwFoAU8jXb
+NATapVXyvWkDmbBi7OIVCMEwgckGCCsGAQUFBwEBBIG8MIG5MCQGCCsGAQUFBzAB
+hhhodHRwOi8vb2NzcC5leGFtcGxlLmNvbS8wMgYIKwYBBQUHMAKGJmh0dHA6Ly9y
+ZXBvLmV4YW1wbGUuY29tL2NhaXNzdWVycy5odG1sMC4GCCsGAQUFBzAGhiJodHRw
+Oi8vcmVwby5leGFtcGxlLmNvbS9jZXJ0cy5odG1sMC0GCCsGAQUFBzAHhiFodHRw
+Oi8vcmVwby5leGFtcGxlLmNvbS9jcmxzLmh0bWwwCgYIKoZIzj0EAwMDaAAwZQIw
+C9Y1McQ+hSEZLtzLw1xzk3QSQX6NxalySoIIoNXpcDrGZJcjLRunBg8G9B0hqG69
+AjEAxtzj8BkMvhb5d9DTKDVg5pmjl9z7UtRK87/LJM+EW/9+PAzB2IT3T+BPHKb4
+kjBJ
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid_list = [
+ rfc4387.id_ad_http_certs,
+ rfc4387.id_ad_http_crls,
+ ]
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_pe_authorityInfoAccess:
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.AuthorityInfoAccessSyntax())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for ad in extnValue:
+ if ad['accessMethod'] in oid_list:
+ uri = ad['accessLocation']['uniformResourceIdentifier']
+ self.assertIn('http://repo.example.com/c', uri)
+ count += 1
+
+ self.assertEqual(len(oid_list), count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4476.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4476.py
new file mode 100644
index 0000000000..b0a8fd3f9b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4476.py
@@ -0,0 +1,144 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc4476
+
+
+class AttributeCertificatePolicyTestCase(unittest.TestCase):
+ pem_text = """\
+MIID7zCCA1gCAQEwgY+gUTBKpEgwRjEjMCEGA1UEAwwaQUNNRSBJbnRlcm1lZGlh
+dGUgRUNEU0EgQ0ExCzAJBgNVBAYTAkZJMRIwEAYDVQQKDAlBQ01FIEx0ZC4CAx7N
+WqE6pDgwNjETMBEGA1UEAwwKQUNNRSBFQ0RTQTELMAkGA1UEBhMCRkkxEjAQBgNV
+BAoMCUFDTUUgTHRkLqBWMFSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkEx
+EDAOBgNVBAcMB0hlcm5kb24xIjAgBgNVBAoMGUJvZ3VzIEF0dHJpYnV0ZSBBdXRo
+b3RpdHkwDQYJKoZIhvcNAQELBQACBAu1MO4wIhgPMjAxOTEyMTUxMjAwMDBaGA8y
+MDE5MTIzMTEyMDAwMFowgfIwPAYIKwYBBQUHCgExMDAuhgt1cm46c2VydmljZaQV
+MBMxETAPBgNVBAMMCHVzZXJuYW1lBAhwYXNzd29yZDAyBggrBgEFBQcKAjEmMCSG
+C3VybjpzZXJ2aWNlpBUwEzERMA8GA1UEAwwIdXNlcm5hbWUwNQYIKwYBBQUHCgMx
+KTAnoBikFjAUMRIwEAYDVQQDDAlBQ01FIEx0ZC4wCwwJQUNNRSBMdGQuMCAGCCsG
+AQUFBwoEMRQwEjAQDAZncm91cDEMBmdyb3VwMjAlBgNVBEgxHjANoQuGCXVybjpy
+b2xlMTANoQuGCXVybjpyb2xlMjCCATkwHwYDVR0jBBgwFoAUgJCMhskAsEBzvklA
+X8yJBOXO500wCQYDVR04BAIFADA8BgNVHTcENTAzoAqGCHVybjp0ZXN0oBaCFEFD
+TUUtTHRkLmV4YW1wbGUuY29toA2GC3Vybjphbm90aGVyMIHMBggrBgEFBQcBDwSB
+vzCBvDCBuQYKKwYBBAGBrGAwCjCBqjBFBggrBgEFBQcCBBY5aHR0cHM6Ly93d3cu
+ZXhhbXBsZS5jb20vYXR0cmlidXRlLWNlcnRpZmljYXRlLXBvbGljeS5odG1sMGEG
+CCsGAQUFBwIFMFUwIwwZQm9ndXMgQXR0cmlidXRlIEF1dGhvcml0eTAGAgEKAgEU
+Gi5URVNUIGF0dHJpYnV0ZSBjZXJ0aWZpY2F0ZSBwb2xpY3kgZGlzcGxheSB0ZXh0
+MA0GCSqGSIb3DQEBCwUAA4GBACygfTs6TkPurZQTLufcE3B1H2707OXKsJlwRpuo
+dR2oJbunSHZ94jcJHs5dfbzFs6vNfVLlBiDBRieX4p+4JcQ2P44bkgyiUTJu7g1b
+6C1liB3vO6yH5hOZicOAaKd+c/myuGb9uFRoaXNfc2lnbmF0dXJlX2lzX2ludmFs
+aWQh
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5755.AttributeCertificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ found_ac_policy_qualifier1 = False
+ found_ac_policy_qualifier2 = False
+ for extn in asn1Object['acinfo']['extensions']:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+ if extn['extnID'] == rfc4476.id_pe_acPolicies:
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ oid = univ.ObjectIdentifier((1, 3, 6, 1, 4, 1, 22112, 48, 10,))
+ self.assertEqual(oid, ev[0]['policyIdentifier'])
+
+ for pq in ev[0]['policyQualifiers']:
+ self.assertIn(
+ pq['policyQualifierId'], rfc5280.policyQualifierInfoMap)
+
+ pqv, rest = der_decoder(
+ pq['qualifier'],
+ asn1Spec=rfc5280.policyQualifierInfoMap[
+ pq['policyQualifierId']])
+
+ self.assertFalse(rest)
+ self.assertTrue(pqv.prettyPrint())
+ self.assertEqual(pq['qualifier'], der_encoder(pqv))
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acps:
+ self.assertIn('example.com', pqv)
+ found_ac_policy_qualifier1 = True
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acunotice:
+ self.assertIn(20, pqv[0]['noticeNumbers'])
+ found_ac_policy_qualifier2 = True
+
+ assert found_ac_policy_qualifier1
+ assert found_ac_policy_qualifier2
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ found_ac_policy_qualifier1 = False
+ found_ac_policy_qualifier2 = False
+ for extn in asn1Object['acinfo']['extensions']:
+ if extn['extnID'] == rfc4476.id_pe_acPolicies:
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ oid = univ.ObjectIdentifier((1, 3, 6, 1, 4, 1, 22112, 48, 10,))
+ self.assertEqual(oid, ev[0]['policyIdentifier'])
+
+ for pq in ev[0]['policyQualifiers']:
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acps:
+ self.assertIn('example.com', pq['qualifier'])
+ found_ac_policy_qualifier1 = True
+
+ if pq['policyQualifierId'] == rfc4476.id_qt_acunotice:
+ self.assertIn(20, pq['qualifier'][0]['noticeNumbers'])
+ found_ac_policy_qualifier2 = True
+
+ assert found_ac_policy_qualifier1
+ assert found_ac_policy_qualifier2
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4490.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4490.py
new file mode 100644
index 0000000000..5c3b8cf844
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4490.py
@@ -0,0 +1,274 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4357
+from pyasn1_modules import rfc4490
+
+
+class SignedTestCase(unittest.TestCase):
+ signed_pem_text = """\
+MIIBKAYJKoZIhvcNAQcCoIIBGTCCARUCAQExDDAKBgYqhQMCAgkFADAbBgkqhkiG
+9w0BBwGgDgQMc2FtcGxlIHRleHQKMYHkMIHhAgEBMIGBMG0xHzAdBgNVBAMMFkdv
+c3RSMzQxMC0yMDAxIGV4YW1wbGUxEjAQBgNVBAoMCUNyeXB0b1BybzELMAkGA1UE
+BhMCUlUxKTAnBgkqhkiG9w0BCQEWGkdvc3RSMzQxMC0yMDAxQGV4YW1wbGUuY29t
+AhAr9cYewhG9F8fc1GJmtC4hMAoGBiqFAwICCQUAMAoGBiqFAwICEwUABEDAw0LZ
+P4/+JRERiHe/icPbg0IE1iD5aCqZ9v4wO+T0yPjVtNr74caRZzQfvKZ6DRJ7/RAl
+xlHbjbL0jHF+7XKp
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ encoded_null = der_encoder(univ.Null(""))
+
+ si = sd['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(encoded_null, si['signatureAlgorithm']['parameters'])
+
+ sig = rfc4490.GostR3410_2001_Signature()
+ sig = si['signature']
+ self.assertEqual(64, len(sig))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ si = asn1Object['content']['signerInfos'][0]
+ self.assertEqual(rfc4357.id_GostR3411_94, si['digestAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['digestAlgorithm']['parameters'])
+
+ self.assertEqual(rfc4357.id_GostR3410_2001, si['signatureAlgorithm']['algorithm'])
+ self.assertEqual(univ.Null(""), si['signatureAlgorithm']['parameters'])
+
+ sig = rfc4490.GostR3410_2001_Signature()
+ sig = si['signature']
+ self.assertEqual(64, len(sig))
+
+class KeyAgreeTestCase(unittest.TestCase):
+ keyagree_pem_text = """\
+MIIBpAYJKoZIhvcNAQcDoIIBlTCCAZECAQIxggFQoYIBTAIBA6BloWMwHAYGKoUD
+AgITMBIGByqFAwICJAAGByqFAwICHgEDQwAEQLNVOfRngZcrpcTZhB8n+4HtCDLm
+mtTyAHi4/4Nk6tIdsHg8ff4DwfQG5DvMFrnF9vYZNxwXuKCqx9GhlLOlNiChCgQI
+L/D20YZLMoowHgYGKoUDAgJgMBQGByqFAwICDQAwCQYHKoUDAgIfATCBszCBsDCB
+gTBtMR8wHQYDVQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlD
+cnlwdG9Qcm8xCzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAt
+MjAwMUBleGFtcGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuIQQqMCgEIBajHOfOTukN
+8ex0aQRoHsefOu24Ox8dSn75pdnLGdXoBAST/YZ+MDgGCSqGSIb3DQEHATAdBgYq
+hQMCAhUwEwQItzXhegc1oh0GByqFAwICHwGADDmxivS/qeJlJbZVyQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'],
+ asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4490.id_GostR3410_2001_CryptoPro_ESDH, alg2['algorithm'])
+ param2, rest = der_decoder(
+ alg2['parameters'], asn1Spec=rfc4357.AlgorithmIdentifier())
+ self.assertFalse(rest)
+ self.assertTrue(param2.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+
+ self.assertEqual(rfc4490.id_Gost28147_89_None_KeyWrap, param2['algorithm'])
+ kwa_p, rest = der_decoder(
+ param2['parameters'], asn1Spec=rfc4490.Gost28147_89_KeyWrapParameters())
+ self.assertFalse(rest)
+ self.assertTrue(kwa_p.prettyPrint())
+ self.assertEqual(param2['parameters'], der_encoder(kwa_p))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, kwa_p['encryptionParamSet'])
+
+ alg3 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg3['algorithm'])
+ param3, rest = der_decoder(alg3['parameters'], asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param3.prettyPrint())
+ self.assertEqual(alg3['parameters'], der_encoder(param3))
+ self.assertEqual(8, len(param3['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param3['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ rfc4490.id_GostR3410_2001_CryptoPro_ESDH: rfc5280.AlgorithmIdentifier(),
+ }
+
+ substrate = pem.readBase64fromText(self.keyagree_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ self.assertEqual(8, len(ri['kari']['ukm']))
+
+ alg2 = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4490.id_GostR3410_2001_CryptoPro_ESDH, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(rfc4490.id_Gost28147_89_None_KeyWrap, param2['algorithm'])
+ kwa_p = param2['parameters']
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, kwa_p['encryptionParamSet'])
+
+ alg3 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg3['algorithm'])
+ param3 = alg3['parameters']
+ self.assertEqual(8, len(param3['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param3['encryptionParamSet'])
+
+class KeyTransportTestCase(unittest.TestCase):
+ keytrans_pem_text = """\
+MIIBpwYJKoZIhvcNAQcDoIIBmDCCAZQCAQAxggFTMIIBTwIBADCBgTBtMR8wHQYD
+VQQDDBZHb3N0UjM0MTAtMjAwMSBleGFtcGxlMRIwEAYDVQQKDAlDcnlwdG9Qcm8x
+CzAJBgNVBAYTAlJVMSkwJwYJKoZIhvcNAQkBFhpHb3N0UjM0MTAtMjAwMUBleGFt
+cGxlLmNvbQIQK/XGHsIRvRfH3NRiZrQuITAcBgYqhQMCAhMwEgYHKoUDAgIkAAYH
+KoUDAgIeAQSBpzCBpDAoBCBqL6ghBpVon5/kR6qey2EVK35BYLxdjfv1PSgbGJr5
+dQQENm2Yt6B4BgcqhQMCAh8BoGMwHAYGKoUDAgITMBIGByqFAwICJAAGByqFAwIC
+HgEDQwAEQE0rLzOQ5tyj3VUqzd/g7/sx93N+Tv+/eImKK8PNMZQESw5gSJYf28dd
+Em/askCKd7W96vLsNMsjn5uL3Z4SwPYECJeV4ywrrSsMMDgGCSqGSIb3DQEHATAd
+BgYqhQMCAhUwEwQIvBCLHwv/NCkGByqFAwICHwGADKqOch3uT7Mu4w+hNw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ ri = ed['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1, rest = der_decoder(
+ alg1['parameters'], asn1Spec=rfc4357.GostR3410_2001_PublicKeyParameters())
+ self.assertFalse(rest)
+ self.assertTrue(param1.prettyPrint())
+ self.assertEqual(alg1['parameters'], der_encoder(param1))
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2, rest = der_decoder(
+ alg2['parameters'], asn1Spec=rfc4357.Gost28147_89_Parameters())
+ self.assertFalse(rest)
+ self.assertTrue(param2.prettyPrint())
+ self.assertEqual(alg2['parameters'], der_encoder(param2))
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4357.id_GostR3410_2001: rfc4357.GostR3410_2001_PublicKeyParameters(),
+ rfc4357.id_Gost28147_89: rfc4357.Gost28147_89_Parameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.keytrans_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ri = asn1Object['content']['recipientInfos'][0]
+ alg1 = ri['ktri']['keyEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_GostR3410_2001, alg1['algorithm'])
+ param1 = alg1['parameters']
+ self.assertEqual(rfc4357.id_GostR3410_2001_CryptoPro_XchA_ParamSet, param1['publicKeyParamSet'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, param1['digestParamSet'])
+
+ alg2 = asn1Object['content']['encryptedContentInfo']['contentEncryptionAlgorithm']
+ self.assertEqual(rfc4357.id_Gost28147_89, alg2['algorithm'])
+ param2 = alg2['parameters']
+ self.assertEqual(8, len(param2['iv']))
+ self.assertEqual(rfc4357.id_Gost28147_89_CryptoPro_A_ParamSet, param2['encryptionParamSet'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4491.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4491.py
new file mode 100644
index 0000000000..24b94a97ec
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4491.py
@@ -0,0 +1,156 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4491
+from pyasn1_modules import rfc4357
+
+
+class GostR341094CertificateTestCase(unittest.TestCase):
+ gostR3410_94_cert_pem_text = """\
+MIICCzCCAboCECMO42BGlSTOxwvklBgufuswCAYGKoUDAgIEMGkxHTAbBgNVBAMM
+FEdvc3RSMzQxMC05NCBleGFtcGxlMRIwEAYDVQQKDAlDcnlwdG9Qcm8xCzAJBgNV
+BAYTAlJVMScwJQYJKoZIhvcNAQkBFhhHb3N0UjM0MTAtOTRAZXhhbXBsZS5jb20w
+HhcNMDUwODE2MTIzMjUwWhcNMTUwODE2MTIzMjUwWjBpMR0wGwYDVQQDDBRHb3N0
+UjM0MTAtOTQgZXhhbXBsZTESMBAGA1UECgwJQ3J5cHRvUHJvMQswCQYDVQQGEwJS
+VTEnMCUGCSqGSIb3DQEJARYYR29zdFIzNDEwLTk0QGV4YW1wbGUuY29tMIGlMBwG
+BiqFAwICFDASBgcqhQMCAiACBgcqhQMCAh4BA4GEAASBgLuEZuF5nls02CyAfxOo
+GWZxV/6MVCUhR28wCyd3RpjG+0dVvrey85NsObVCNyaE4g0QiiQOHwxCTSs7ESuo
+v2Y5MlyUi8Go/htjEvYJJYfMdRv05YmKCYJo01x3pg+2kBATjeM+fJyR1qwNCCw+
+eMG1wra3Gqgqi0WBkzIydvp7MAgGBiqFAwICBANBABHHCH4S3ALxAiMpR3aPRyqB
+g1DjB8zy5DEjiULIc+HeIveF81W9lOxGkZxnrFjXBSqnjLeFKgF1hffXOAP7zUM=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.gostR3410_94_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_94, spki_a['algorithm'])
+
+ pk_p, rest = der_decoder(
+ spki_a['parameters'],
+ asn1Spec=rfc4491.GostR3410_94_PublicKeyParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(pk_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(pk_p))
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, pk_p['digestParamSet'])
+
+ def testOpenTypes(self):
+ openTypesMap = {
+ rfc4491.id_GostR3410_94: rfc4491.GostR3410_94_PublicKeyParameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.gostR3410_94_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypesMap, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_94, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_94, spki_a['algorithm'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, spki_a['parameters']['digestParamSet'])
+
+class GostR34102001CertificateTestCase(unittest.TestCase):
+ gostR3410_2001_cert_pem_text = """\
+MIIB0DCCAX8CECv1xh7CEb0Xx9zUYma0LiEwCAYGKoUDAgIDMG0xHzAdBgNVBAMM
+Fkdvc3RSMzQxMC0yMDAxIGV4YW1wbGUxEjAQBgNVBAoMCUNyeXB0b1BybzELMAkG
+A1UEBhMCUlUxKTAnBgkqhkiG9w0BCQEWGkdvc3RSMzQxMC0yMDAxQGV4YW1wbGUu
+Y29tMB4XDTA1MDgxNjE0MTgyMFoXDTE1MDgxNjE0MTgyMFowbTEfMB0GA1UEAwwW
+R29zdFIzNDEwLTIwMDEgZXhhbXBsZTESMBAGA1UECgwJQ3J5cHRvUHJvMQswCQYD
+VQQGEwJSVTEpMCcGCSqGSIb3DQEJARYaR29zdFIzNDEwLTIwMDFAZXhhbXBsZS5j
+b20wYzAcBgYqhQMCAhMwEgYHKoUDAgIkAAYHKoUDAgIeAQNDAARAhJVodWACGkB1
+CM0TjDGJLP3lBQN6Q1z0bSsP508yfleP68wWuZWIA9CafIWuD+SN6qa7flbHy7Df
+D2a8yuoaYDAIBgYqhQMCAgMDQQA8L8kJRLcnqeyn1en7U23Sw6pkfEQu3u0xFkVP
+vFQ/3cHeF26NG+xxtZPz3TaTVXdoiYkXYiD02rEx1bUcM97i
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.gostR3410_2001_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_2001, spki_a['algorithm'])
+
+ pk_p, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc4491.GostR3410_2001_PublicKeyParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(pk_p.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(pk_p))
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, pk_p['digestParamSet'])
+
+ def testOpenTypes(self):
+ openTypeMap = {
+ rfc4491.id_GostR3410_2001: rfc4491.GostR3410_2001_PublicKeyParameters(),
+ }
+
+ substrate = pem.readBase64fromText(self.gostR3410_2001_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ openTypes=openTypeMap, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sa1 = asn1Object['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa1)
+
+ sa2 = asn1Object['tbsCertificate']['signature']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3411_94_with_GostR3410_2001, sa2)
+
+ spki_a = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+ self.assertEqual(rfc4491.id_GostR3410_2001, spki_a['algorithm'])
+ self.assertEqual(rfc4357.id_GostR3411_94_CryptoProParamSet, spki_a['parameters']['digestParamSet'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4683.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4683.py
new file mode 100644
index 0000000000..7935ad8f6d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4683.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4683
+
+
+class SIMCertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIDOzCCAsCgAwIBAgIJAKWzVCgbsG5KMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMjExMjIzODUwWhcNMjAxMjEwMjIzODUwWjBOMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxDjAMBgNVBAMTBUhlbnJ5MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEZj80
+YyLeDb0arJY8ZxBUMMxPEMT9+5WFVBCC1dPpUn25MmEpb82Dz1inv3xmG6sFKIHj
+achlvkNGDXTUzZ1DdCF0O7gU5Z+YctwczGQVSt/2Ox0NWTiHLDpbpyoTyK0Bo4IB
+dzCCAXMwHQYDVR0OBBYEFOjxtcL2ucMoTjS5MNKKpdKzXtz/MG8GA1UdIwRoMGaA
+FPI12zQE2qVV8r1pA5mwYuziFQjBoUOkQTA/MQswCQYDVQQGEwJVUzELMAkGA1UE
+CAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBggkA6JHW
+BpFPzvIwDwYDVR0TAQH/BAUwAwEB/zALBgNVHQ8EBAMCAYYwQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjB/BgNVHREEeDB2oGEGCCsGAQUFBwgGoFUwUzANBglghkgBZQMEAgEF
+AAQgnrmI6yL2lM5kmfLVn28A8PVIVgE2S7HEFtfLExhg7HsEIOaAn/Pq8hb4qn/K
+imN3uyZrjAv3Uspg0VYEcetJdHSCgRFoZW5yeUBleGFtcGxlLmNvbTAKBggqhkjO
+PQQDAwNpADBmAjEAiWhD493OGnqfdit6SRdBjn3N6HVaMxyVO0Lfosjf9+9FDWad
+rYt3o64YQqGz9NTMAjEAmahE0EMiu/TyzRDidlG2SxmY2aHg9hQO0t38i1jInJyi
+9LjB81zHEL6noTgBZsan
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_PEPSI = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ gn_on = gn['otherName']
+ if gn_on['type-id'] == rfc4683.id_on_SIM:
+ self.assertIn(
+ gn_on['type-id'], rfc5280.anotherNameMap)
+
+ spec = rfc5280.anotherNameMap[gn_on['type-id']]
+
+ on, rest = der_decoder(
+ gn_on['value'], asn1Spec=spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(on.prettyPrint())
+ self.assertEqual(gn_on['value'], der_encoder(on))
+
+ self.assertEqual(
+ 'e6809ff3ea', on['pEPSI'].prettyPrint()[2:12])
+
+ found_PEPSI = True
+
+ self.assertTrue(found_PEPSI)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_PEPSI = False
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ pepsi = gn['otherName']['value']['pEPSI']
+ self.assertEqual(
+ 'e6809ff3ea', pepsi.prettyPrint()[2:12])
+
+ found_PEPSI = True
+
+ self.assertTrue(found_PEPSI)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc4985.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc4985.py
new file mode 100644
index 0000000000..b261ef92af
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc4985.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4985
+
+
+class XMPPCertificateTestCase(unittest.TestCase):
+ xmpp_server_cert_pem_text = """\
+MIIC6DCCAm+gAwIBAgIJAKWzVCgbsG5DMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDI0MjMxNjA0WhcNMjAxMDIzMjMxNjA0WjBNMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xHzAdBgNVBAoTFkV4
+YW1wbGUgUHJvZHVjdHMsIEluYy4wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQZzQlk
+03nJRPF6+w1NxFELmQ5vJTjTRz3eu03CRtahK4Wnwd4GwbDe8NVHAEG2qTzBXFDu
+p6RZugsBdf9GcEZHG42rThYYOzIYzVFnI7tQgA+nTWSWZN6eoU/EXcknhgijggEn
+MIIBIzAdBgNVHQ4EFgQUkQpUMYcbUesEn5buI03POFnktJgwHwYDVR0jBBgwFoAU
+8jXbNATapVXyvWkDmbBi7OIVCMEwCwYDVR0PBAQDAgeAMIGPBgNVHREEgYcwgYSg
+KQYIKwYBBQUHCAegHRYbX3htcHAtY2xpZW50LmltLmV4YW1wbGUuY29toCkGCCsG
+AQUFBwgHoB0WG194bXBwLXNlcnZlci5pbS5leGFtcGxlLmNvbaAcBggrBgEFBQcI
+BaAQDA5pbS5leGFtcGxlLmNvbYIOaW0uZXhhbXBsZS5jb20wQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjAKBggqhkjOPQQDAwNnADBkAjAEo4mhDGC6/R39HyNgzLseNAp36qBH
+yQJ/AWsBojN0av8akeVv9IuM45yqLKdiCzcCMDCjh1lFnCvurahwp5D1j9pAZMsg
+nOzhcMpnHs2U/eN0lHl/JNgnbftl6Dvnt59xdA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ gn_on = gn['otherName']
+ if gn_on['type-id'] == rfc4985.id_on_dnsSRV:
+ self.assertIn(gn_on['type-id'], rfc5280.anotherNameMap)
+
+ spec = rfc5280.anotherNameMap[gn['otherName']['type-id']]
+ on, rest = der_decoder(gn_on['value'], asn1Spec=spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(on.prettyPrint())
+ self.assertEqual(gn_on['value'], der_encoder(on))
+ self.assertIn('im.example.com', on)
+
+ count += 1
+
+ self.assertEqual(2, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ if gn['otherName']['type-id'] == rfc4985.id_on_dnsSRV:
+ self.assertIn('im.example.com', gn['otherName']['value'])
+ count += 1
+
+ self.assertEqual(2, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5035.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5035.py
new file mode 100644
index 0000000000..196a6e4618
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5035.py
@@ -0,0 +1,192 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5035
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIFzAYJKoZIhvcNAQcCoIIFvTCCBbkCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggLQ
+MIICzAIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKgggH1MBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8X
+DTE5MDUyOTE4MjMxOVowJQYLKoZIhvcNAQkQAgcxFgQUAbWZQYhLO5wtUgsOCGtT
+4V3aNhUwLwYLKoZIhvcNAQkQAgQxIDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZI
+hvcNAQcBMDUGCyqGSIb3DQEJEAICMSYxJAIBAQYKKwYBBAGBrGABARMTQm9hZ3Vz
+IFByaXZhY3kgTWFyazA/BgkqhkiG9w0BCQQxMgQwtuQipP2CZx7U96rGbUT06LC5
+jVFYccZW5/CaNvpcrOPiChDm2vI3m4k300z5mSZsME0GCyqGSIb3DQEJEAIBMT4w
+PAQgx08hD2QnVwj1DoeRELNtdZ0PffW4BQIvcwwVc/goU6OAAQEwFTATgRFhbGlj
+ZUBleGFtcGxlLmNvbTCBmwYLKoZIhvcNAQkQAi8xgYswgYgwdjB0BCACcp04gyM2
+dTDg+0ydCwlucr6Mg8Wd3J3c9V+iLHsnZzBQMEOkQTA/MQswCQYDVQQGEwJVUzEL
+MAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENB
+AgkApbNUKBuwbjswDjAMBgorBgEEAYGsYAEBMAoGCCqGSM49BAMDBGcwZQIxAO3K
+D9YjFTKE3p383VVw/ol79WTVoMea4H1+7xn+3E1XO4oyb7qwQz0KmsGfdqWptgIw
+T9yMtRLN5ZDU14y+Phzq9NKpSw/x5KyXoUKjCMc3Ru6dIW+CgcRQees+dhnvuD5U
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder (substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+
+class SignedReceiptTestCase(unittest.TestCase):
+ signed_receipt_pem_text = """\
+MIIE3gYJKoZIhvcNAQcCoIIEzzCCBMsCAQMxDTALBglghkgBZQMEAgEwga4GCyqGSIb3DQEJ
+EAEBoIGeBIGbMIGYAgEBBgkqhkiG9w0BBwEEIMdPIQ9kJ1cI9Q6HkRCzbXWdD331uAUCL3MM
+FXP4KFOjBGYwZAIwOLV5WCbYjy5HLHE69IqXQQHVDJQzmo18WwkFrEYH3EMsvpXEIGqsFTFN
+6NV4VBe9AjA5fGOCP5IhI32YqmGfs+zDlqZyb2xSX6Gr/IfCIm0angfOI39g7lAZDyivjh5H
+/oSgggJ3MIICczCCAfqgAwIBAgIJAKWzVCgbsG48MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0Ew
+HhcNMTkwNTI5MTkyMDEzWhcNMjAwNTI4MTkyMDEzWjBsMQswCQYDVQQGEwJVUzELMAkGA1UE
+CBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1wbGUxDDAKBgNVBAMTA0Jv
+YjEeMBwGCSqGSIb3DQEJARYPYm9iQGV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACID
+YgAEMaRiVS8WvN8Ycmpfq75jBbOMUukNfXAg6AL0JJBXtIFAuIJcZVlkLn/xbywkcMLHK/O+
+w9RWUQa2Cjw+h8b/1Cl+gIpqLtE558bD5PfM2aYpJ/YE6yZ9nBfTQs7z1TH5o4GUMIGRMAsG
+A1UdDwQEAwIHgDBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUg
+dHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1UdDgQWBBTKa2Zy3iybV3+YjuLDKtNmjsIa
+pTAfBgNVHSMEGDAWgBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNnADBkAjAV
+boS6OfEYQomLDi2RUkd71hzwwiQZztbxNbosahIzjR8ZQaHhjdjJlrP/T6aXBwsCMDfRweYz
+3Ce4E4wPfoqQnvqpM7ZlfhstjQQGOsWAtIIfqW/l+TgCO8ux3XLV6fj36zGCAYkwggGFAgEB
+MEwwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYD
+VQQKDAhCb2d1cyBDQQIJAKWzVCgbsG48MAsGCWCGSAFlAwQCAaCBrjAaBgkqhkiG9w0BCQMx
+DQYLKoZIhvcNAQkQAQEwHAYJKoZIhvcNAQkFMQ8XDTE5MDUyOTE5MzU1NVowLwYJKoZIhvcN
+AQkEMSIEIGb9Hm2kCnM0CYNpZU4Uj7dN0AzOieIn9sDqZMcIcZrEMEEGCyqGSIb3DQEJEAIF
+MTIEMBZzeHVja7fQ62ywyh8rtKzBP1WJooMdZ+8c6pRqfIESYIU5bQnH99OPA51QCwdOdjAK
+BggqhkjOPQQDAgRoMGYCMQDZiT22xgab6RFMAPvN4fhWwzx017EzttD4VaYrpbolropBdPJ6
+jIXiZQgCwxbGTCwCMQClaQ9K+L5LTeuW50ZKSIbmBZQ5dxjtnK3OlS7hYRi6U0JKZmWbbuS8
+vFIgX7eIkd8=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+ self.assertEqual(
+ rfc5035.id_ct_receipt, sd['encapContentInfo']['eContentType'])
+
+ receipt, rest = der_decoder(
+ sd['encapContentInfo']['eContent'], asn1Spec=rfc5035.Receipt())
+
+ self.assertFalse(rest)
+ self.assertTrue(receipt.prettyPrint())
+ self.assertEqual(
+ sd['encapContentInfo']['eContent'], der_encoder(receipt))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc5652.cmsAttributesMap.keys():
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_receipt_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd = asn1Object['content']
+
+ self.assertEqual(
+ rfc5652.CMSVersion().subtype(value='v3'), sd['version'])
+ self.assertIn(
+ sd['encapContentInfo']['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(
+ rfc5035.id_ct_receipt, sd['encapContentInfo']['eContentType'])
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+ if sa['attrType'] == rfc5035.id_aa_msgSigDigest:
+ self.assertIn(
+ '0x167378', sa['attrValues'][0].prettyPrint()[:10])
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ receipt, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd['encapContentInfo']['eContentType']])
+
+ self.assertEqual(1, receipt['version'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5083.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5083.py
new file mode 100644
index 0000000000..e2eb17274a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5083.py
@@ -0,0 +1,95 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2018, 2019 Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5035
+
+
+class AuthEnvelopedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIICdQIBADGCAiekggIjBgsqhkiG9w0BCRANATCCAhICAQAEE3B0Zi1rbWM6MTM2MTQxMjIx
+MTIwDQYLKoZIhvcNAQkQAzAwCwYJYIZIAWUDBAEtMIIBsDCCAawCAQKAFJ7rZ8m5WnTUTS8W
+OWaA6AG1y6ScMA0GCSqGSIb3DQEBAQUABIIBgHfnHNqDbyyql2NqX6UQggelWMTjwzJJ1L2e
+rbsj1bIAGmpIsUijw+fX8VOS7v1C9ui2Md9NFgCfkmKLo8T/jELqrk7MpMu09G5zDgeXzJfQ
+DFc115wbrWAUU3XP7XIb6TNOc3xtq4UxA5V6jNUK2XyWKpjzOtM7gm0VWIJGVVlYu+u32LQc
+CjRFb87kvOY/WEnjxQpCW8g+4V747Ud97dYpMub7TLJiRNZkdHnq8xEGKlXjVHSgc10lhphe
+1kFGeCpfJEsqjtN7YsVzf65ri9Z+3FJ1IO4cnMDbzGhyRXkS7a0k58/miJbSj88PvzKNSURw
+pu4YHMQQX/mjT2ey1SY4ihPMuxxgTdCa04L0UxaRr7xAucz3n2UWShelm3IIjnWRlYdXypnX
+vKvwCLoeh5mJwUl1JNFPCQkQ487cKRyobUyNgXQKT4ZDHCgXciwsX5nTsom87Ixp5vqSDJ+D
+hXA0r/Caiu1vnY5X9GLHSkqgXkgqgUuu0LfcsQERD8psfQQogbiuZDqJmYt1Iau/pkuGfmee
+qeiM3aeQ4NZf9AFZUVWBGArPNHrvVDA3BgkqhkiG9w0BBwEwGwYJYIZIAWUDBAEuMA4EDMr+
+ur76ztut3sr4iIANmvLRbyFUf87+2bPvLQQMoOWSXMGE4BckY8RM
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5083.AuthEnvelopedData()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class AuthEnvelopedDataOpenTypesTestCase(unittest.TestCase):
+ pem_text = """\
+MIICvQYLKoZIhvcNAQkQARegggKsMIICqAIBADGCAiekggIjBgsqhkiG9w0BCRAN
+ATCCAhICAQAEE3B0Zi1rbWM6MTM2MTQxMjIxMTIwDQYLKoZIhvcNAQkQAzAwCwYJ
+YIZIAWUDBAEtMIIBsDCCAawCAQKAFJ7rZ8m5WnTUTS8WOWaA6AG1y6ScMA0GCSqG
+SIb3DQEBAQUABIIBgHfnHNqDbyyql2NqX6UQggelWMTjwzJJ1L2erbsj1bIAGmpI
+sUijw+fX8VOS7v1C9ui2Md9NFgCfkmKLo8T/jELqrk7MpMu09G5zDgeXzJfQDFc1
+15wbrWAUU3XP7XIb6TNOc3xtq4UxA5V6jNUK2XyWKpjzOtM7gm0VWIJGVVlYu+u3
+2LQcCjRFb87kvOY/WEnjxQpCW8g+4V747Ud97dYpMub7TLJiRNZkdHnq8xEGKlXj
+VHSgc10lhphe1kFGeCpfJEsqjtN7YsVzf65ri9Z+3FJ1IO4cnMDbzGhyRXkS7a0k
+58/miJbSj88PvzKNSURwpu4YHMQQX/mjT2ey1SY4ihPMuxxgTdCa04L0UxaRr7xA
+ucz3n2UWShelm3IIjnWRlYdXypnXvKvwCLoeh5mJwUl1JNFPCQkQ487cKRyobUyN
+gXQKT4ZDHCgXciwsX5nTsom87Ixp5vqSDJ+DhXA0r/Caiu1vnY5X9GLHSkqgXkgq
+gUuu0LfcsQERD8psfQQogbiuZDqJmYt1Iau/pkuGfmeeqeiM3aeQ4NZf9AFZUVWB
+GArPNHrvVDA3BgkqhkiG9w0BBwEwGwYJYIZIAWUDBAEuMA4EDMr+ur76ztut3sr4
+iIANmvLRbyFUf87+2bPvLQQMoOWSXMGE4BckY8RMojEwLwYLKoZIhvcNAQkQAgQx
+IDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZIhvcNAQcB
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ authenv = asn1Object['content']
+
+ self.assertEqual(0, authenv['version'])
+
+ for attr in authenv['unauthAttrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc5035.id_aa_contentHint:
+ self.assertIn(
+ 'Watson', attr['attrValues'][0]['contentDescription'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5084.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5084.py
new file mode 100644
index 0000000000..c8ad0c29cb
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5084.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2018, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5084
+from pyasn1_modules import rfc5652
+
+
+class CCMParametersTestCase(unittest.TestCase):
+ ccm_pem_text = "MBEEDE2HVyIurFKUEX8MEgIBBA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5084.CCMParameters()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ccm_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class GCMParametersTestCase(unittest.TestCase):
+ gcm_pem_text = "MBEEDE2HVyIurFKUEX8MEgIBEA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5084.GCMParameters()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.gcm_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class GCMOpenTypesTestCase(unittest.TestCase):
+ rfc8591_pem_pext = """\
+MIIHkAYLKoZIhvcNAQkQARegggd/MIIHewIBADGCAk8wggJLAgEAMDMwJjEUMBIGA1UECgwL
+ZXhhbXBsZS5jb20xDjAMBgNVBAMMBUFsaWNlAgkAg/ULtwvVxA4wDQYJKoZIhvcNAQEBBQAE
+ggIAdZphtN3x8a8kZoAFY15HYRD6JyPBueRUhLbTPoOH3pZ9xeDK+zVXGlahl1y1UOe+McEx
+2oD7cxAkhFuruNZMrCYEBCTZMwVhyEOZlBXdZEs8rZUHL3FFE5PJnygsSIO9DMxd1UuTFGTg
+Cm5V5ZLFGmjeEGJRbsfTyo52S7iseJqIN3dl743DbApu0+yuUoXKxqKdUFlEVxmhvc+Qbg/z
+fiwu8PTsYiUQDMBi4cdIlju8iLjj389xQHNyndXHWD51is89GG8vpBe+IsN8mnbGtCcpqtJ/
+c65ErJhHTR7rSJSMEqQD0LPOCKIY1q9FaSSJfMXJZk9t/rPxgUEVjfw7hAkKpgOAqoZRN+Fp
+nFyBl0FnnXo8kLp55tfVyNibtUpmdCPkOwt9b3jAtKtnvDQ2YqY1/llfEUnFOVDKwuC6MYwi
+fm92qNlAQA/T0+ocjs6gA9zOLx+wD1zqM13hMD/L+T2OHL/WgvGb62JLrNHXuPWA8RShO4kI
+lPtARKXap2S3+MX/kpSUUrNa65Y5uK1jwFFclczG+CPCIBBn6iJiQT/vOX1I97YUP4Qq6OGk
+jK064Bq6o8+e5+NmIOBcygYRv6wA7vGkmPLSWbnw99qD728bBh84fC3EjItdusqGIwjzL0eS
+UWXJ5eu0Z3mYhJGN1pe0R/TEB5ibiJsMLpWAr3gwggUPBgkqhkiG9w0BBwEwHgYJYIZIAWUD
+BAEGMBEEDE2HVyIurFKUEX8MEgIBEICCBOD+L7PeC/BpmMOb9KlS+r+LD+49fi6FGBrs8aie
+Gi7ezZQEiFYS38aYQzTYYCt3SbJQTkX1fDsGZiaw/HRiNh7sJnxWATm+XNKGoq+Wls9RhSJ4
+5Sw4GMqwpoxZjeT84UozOITk3l3fV+3XiGcCejHkp8DAKZFExd5rrjlpnnAOBX6w8NrXO4s2
+n0LrMhtBU4eB2YKhGgs5Q6wQyXtU7rc7OOwTGvxWEONzSHJ01pyvqVQZAohsZPaWLULrM/kE
+GkrhG4jcaVjVPfULi7Uqo14imYhdCq5Ba4bwqI0Ot6mB27KD6LlOnVC/YmXCNIoYoWmqy1o3
+pSm9ovnLEO/dzxQjEJXYeWRje9M/sTxotM/5oZBpYMHqIwHTJbehXFgp8+oDjyTfayMYA3fT
+cTH3XbGPQfnYW2U9+ka/JhcSYybM8cuDNFd1I1LIQXoJRITXtkvPUbJqm+s6DtS5yvG9I8aQ
+xlT365zphS4vbQaO74ujO8bE3dynrvTTV0c318TcHpN3DY9PIt6mHXMIPDLEA4wes90zg6ia
+h5XiQcLtfLaAdYwEEGlImGD8n0kOhSNgclSLMklpj5mVOs8exli3qoXlVMRJcBptSwOe0QPc
+RY30spywS4zt1UDIQ0jaecGGVtUYj586nkubhAxwZkuQKWxgt6yYTpGNSKCdvd+ygfyGJRDb
+Wdn6nck/EPnG1773KTHRhMrXrBPBpSlfyJ/ju3644CCFqCjFoTh4bmB63k9ejUEVkJIJuoeK
+eTBaUxbCIinkK4htBkgchHP51RJp4q9jQbziD3aOhg13hO1GFQ4E/1DNIJxbEnURNp/ga8Sq
+mnLY8f5Pzwhm1mSzZf+obowbQ+epISrswWyjUKKO+uJfrAVN2TS/5+X6T3U6pBWWjH6+xDng
+rAJwtIdKBo0iSEwJ2eir4X8TcrSy9l8RSOiTPtqS5dF3RWSWOzkcO72fHCf/42+DLgUVX8Oe
+5mUvp7QYiXXsXGezLJ8hPIrGuOEypafDv3TwFkBc2MIB0QUhk+GG1ENY3jiNcyEbovF5Lzz+
+ubvechHSb1arBuEczJzN4riM2Dc3c+r8N/2Ft6eivK7HUuYX1uAcArhunZpA8yBGLF1m+DUX
+FtzWAUvfMKYPdfwGMckghF7YwLrTXd8ZhPIkHNO1KdwQKIRfgIlUPfTxRB7eNrG/Ma9a/Iwr
+cI1QtkXU59uIZIw+7+FHZRWPsOjTu1Pdy+JtcSTG4dmS+DIwqpUzdu6MaBCVaOhXHwybvaSP
+TfMG/nR/NxF1FI8xgydnzXZs8HtFDL9iytKnvXHx+IIz8Rahp/PK8S80vPQNIeef/JgnIhto
+sID/A614LW1tB4cWdveYlD5U8T/XXInAtCY78Q9WJD+ecu87OJmlOdmjrFvitpQAo8+NGWxc
+7Wl7LtgDuYel7oXFCVtI2npbA7R+K5/kzUvDCY6GTgzn1Gfamc1/Op6Ue17qd/emvhbIx+ng
+3swf8TJVnCNDIXucKVA4boXSlCEhCGzfoZZYGVvm1/hrypiBtpUIKWTxLnz4AQJdZ5LGiCQJ
+QU1wMyHsg6vWmNaJVhGHE6D/EnKsvJptFIkAx0wWkh35s48p7EbU8QBg//5eNru6yvLRutfd
+BX7T4w681pCD+dOiom75C3UdahrfoFkNsZ2hB88+qNsEEPb/xuGu8ZzSPZhakhl2NS0=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.rfc8591_pem_pext)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5083.AuthEnvelopedData(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ cea = aed['authEncryptedContentInfo']['contentEncryptionAlgorithm']
+
+ self.assertEqual(rfc5084.id_aes128_GCM, cea['algorithm'])
+ self.assertEqual(16, cea['parameters']['aes-ICVlen'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5126.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5126.py
new file mode 100644
index 0000000000..e43af9a33a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5126.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5126
+
+
+class SignedAttributesTestCase(unittest.TestCase):
+ pem_text = """\
+MYIBUzAYBgkqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMCsGCSqGSIb3DQEJNDEeMBww
+DQYJYIZIAWUDBAIBBQChCwYJKoZIhvcNAQELMC8GCSqGSIb3DQEJBDEiBCCyqtCC
+Gosj/GT4YPPAqKheze4A1QBU5O3tniTsVPGr7jBBBgsqhkiG9w0BCRACETEyMDCg
+BBMCVVOhBBMCVkGiIjAgExExMjMgU29tZXBsYWNlIFdheRMLSGVybmRvbiwgVkEw
+RgYLKoZIhvcNAQkQAi8xNzA1MDMwMTANBglghkgBZQMEAgEFAAQgJPmqUmGQnQ4q
+RkVtUHecJXIkozOzX8+pZQj/UD5JcnQwTgYLKoZIhvcNAQkQAg8xPzA9BgorBgEE
+AYGsYDAUMC8wCwYJYIZIAWUDBAIBBCDWjjVmAeXgZBkE/rG8Pf8pTCs4Ikowc8Vm
+l+AOeKdFgg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.SignedAttributes()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_spid_oid = False
+
+ for attr in asn1Object:
+ if attr['attrType'] in rfc5652.cmsAttributesMap.keys():
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc5126.id_aa_ets_sigPolicyId:
+ spid_oid = rfc5126.SigPolicyId('1.3.6.1.4.1.22112.48.20')
+
+ self.assertEqual(
+ spid_oid, av['signaturePolicyId']['sigPolicyId'])
+
+ found_spid_oid = True
+
+ self.assertTrue(found_spid_oid)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ attr_type_list = []
+ spid_oid = rfc5126.SigPolicyId('1.3.6.1.4.1.22112.48.20')
+
+ for attr in asn1Object:
+ if attr['attrType'] == rfc5126.id_aa_ets_sigPolicyId:
+ spid = attr['attrValues'][0]['signaturePolicyId']
+ self.assertEqual(spid_oid, spid['sigPolicyId'])
+ attr_type_list.append(rfc5126.id_aa_ets_sigPolicyId)
+
+ if attr['attrType'] == rfc5126.id_aa_ets_signerLocation:
+ cn = attr['attrValues'][0]['countryName']
+ self.assertEqual('US', cn['printableString'])
+ attr_type_list.append(rfc5126.id_aa_ets_signerLocation)
+
+ if attr['attrType'] == rfc5126.id_aa_signingCertificateV2:
+ ha = attr['attrValues'][0]['certs'][0]['hashAlgorithm']
+ self.assertEqual(rfc4055.id_sha256, ha['algorithm'])
+ attr_type_list.append(rfc5126.id_aa_signingCertificateV2)
+
+ self.assertIn(rfc5126.id_aa_ets_sigPolicyId, attr_type_list)
+ self.assertIn(rfc5126.id_aa_ets_signerLocation, attr_type_list)
+ self.assertIn(rfc5126.id_aa_signingCertificateV2, attr_type_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5208.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5208.py
new file mode 100644
index 0000000000..4bb684fd24
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5208.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5208
+
+
+class PrivateKeyInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVgIBADANBgkqhkiG9w0BAQEFAASCAUAwggE8AgEAAkEAx8CO8E0MNgEKXXDf
+I1xqBmQ+Gp3Srkqp45OApIu4lZ97n5VJ5HljU9wXcPIfx29Le3w8hCPEkugpLsdV
+GWx+EQIDAQABAkEAiv3f+DGEh6ddsPszKQXK+LuTwy2CRajKYgJnBxf5zpG50XK4
+899An+x/pGYVmVED1f0JCbk3BUbv7HViLq0qgQIhAOYlQJaQ8KJBijDpjF62lcVr
+QrqFPM4+ZrHsw0dVY2CZAiEA3jE5ngkVPfjFWEr7wS50EJhGiYlQeY4l+hADGIhd
+XDkCIQDIHt5xzmif/nOGop5/gS7ssp8ch1zfTh2IW4NWlOZMCQIgLZmYo5BlpaRK
+jAZHiKwJ8eXuhAeEVo4PyTREDmLeFjECIQCfyUPDclPo2O8ycPpozwoGwvKFrNZJ
+VWRpRKqYnOAIXQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5208.PrivateKeyInfo()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class EncryptedPrivateKeyInfoInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBgTAbBgkqhkiG9w0BBQMwDgQIdtFgDWnipT8CAggABIIBYN0hkm2xqkTCt8dJ
+iZS8+HNiyHxy8g+rmWSXv/i+bTHFUReZA2GINtTRUkWpXqWcSHxNslgf7QdfgbVJ
+xQiUM+lLhwOFh85iAHR3xmPU1wfN9NvY9DiLSpM0DMhF3OvAMZD75zIhA0GSKu7w
+dUu7ey7H4fv7bez6RhEyLdKw9/Lf2KNStNOs4ow9CAtCoxeoMSniTt6CNhbvCkve
+9vNHKiGavX1tS/YTog4wiiGzh2YxuW1RiQpTdhWiKyECgD8qQVg2tY5t3QRcXrzi
+OkStpkiAPAbiwS/gyHpsqiLo0al63SCxRefugbn1ucZyc5Ya59e3xNFQXCNhYl+Z
+Hl3hIl3cssdWZkJ455Z/bBE29ks1HtsL+bTfFi+kw/4yuMzoaB8C7rXScpGNI/8E
+pvTU2+wtuoOFcttJregtR94ZHu5wgdYqRydmFNG8PnvZT1mRMmQgUe/vp88FMmsZ
+dLsZjNQ=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5208.EncryptedPrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5275.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5275.py
new file mode 100644
index 0000000000..30bce8f314
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5275.py
@@ -0,0 +1,190 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5275
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+
+
+class GLUseKEKTestCase(unittest.TestCase):
+ pem_text = """\
+MIIMVAYJKoZIhvcNAQcCoIIMRTCCDEECAQMxDTALBglghkgBZQMEAgIwggY7Bggr
+BgEFBQcMAqCCBi0EggYpMIIGJTCCBhswggYXAgEBBgsqhkiG9w0BCRAIATGCBgMw
+ggX/MEaGLGh0dHBzOi8vd3d3LmV4YW1wbGUuY29tL2xpc3QtaW5mby9ncm91cC1s
+aXN0gRZncm91cC1saXN0QGV4YW1wbGUuY29tMIIFmzCCBZekQTA/MQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAPBgNVBAoTCEJv
+Z3VzIENBgRxncm91cC1saXN0LW93bmVyQGV4YW1wbGUuY29tMIIFMqCCBS4wggTU
+oAMCAQICFCVehe2QOuzvkY+pMECid/MyYVKJMAsGCWCGSAFlAwQDAjA/MQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAPBgNVBAoT
+CEJvZ3VzIENBMB4XDTE5MTAyMDE5MzE1MloXDTIxMTAxOTE5MzE1MlowPzELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMREwDwYDVQQK
+EwhCb2d1cyBDQTCCA0cwggI5BgcqhkjOOAQBMIICLAKCAQEAt9x/0iwGww3k19h+
+wbODVK1yqjFzEY2pyfXthHcn+nEw+DpURJ+iOhYPr68E3XO5sB48r5xTZhPN5+Ye
+jD3T8qhnDtiq4qrrSH7BOaEzqCDpHE2Bpoy3SodQ5Obaiu9Kx1ixBRk/oRZUH+F+
+ATZmF0rPKrZGZOnmsh0IZm3dlmRR9FRGn0aJlZKXveqp+hZ97/r0cbSo6wdT47AP
+focgweZMvgWu1IQBs6FiunRgaeX3RyLr4fnkvCzUM7TmxpRJYtL6myAp007QvtgQ
+0AdEwVfNl3jQ0IIW7TtpXVxDDQaKZZe9yYrY4GV3etlYk8a4cpjNrBxBCCTMASE4
++iVtPQIhAJGPJRq8r3GSP6cV7V8EmlxC9ne+xkhiAjBmWtcDibXRAoIBACDebX29
+ZzVOUeaR6ovCC8c3RR93LDlrFa1zyogkZnUx7OHIvIPhFTRUUJKhwkIJ7aTaRLY/
+a9ARFllhyf+cJi6KzAKM4JufAqjN9pNncVzUo4K1vgQRy6T+2Hlc2FYJgknsdqzK
+bzO49qqHlMtywdenT+VBSI5Xa5UecC3nTcAdjW/g+GVncbQJFkx6dp9TQrLtrrOG
+uoW9aC1J2j683RL3FL8om5NpaxiA4C3ivYgrW7C5a68DkvVCt2PykPMwuR2XIdTU
+mCPOTSs1ANNtSRlf0ICL/EpQZnKyNZ86fUUcLW8nWxs/2dNelZFqKfX/rJq0HZHE
+tO9ZjICr0iwv/w8DggEGAAKCAQEAttFBDPuFMmcpY8ryoq+ES4JBYSHJNF+zBCFo
+NF/ZrCayL3HBn+BNGy5WVHFWUF/JfdNzCGdZ0/vcMT2KdS9xMsOGmK8luDyarj6z
+u4rDuQaeAmLcBsTgK+JjgNS+nxIz0pgoWyKsKwnB3ipYibgdOl6HpavVLSdC1i3U
+TV6/jpVOgWoxrYjOOOSi6Ov9y4kzsvI33H1cfUwzNd8pcV4MBcEq5rliEouo4W46
+k3Ry0RnoDejnVxzog3/6RLOyRmv/+uhLpx0n6Cl+hyPtJ+GbAv5ttle8P0ofUnYM
+gi+oVquYc7wBCjWpaL8wvIjDF4oEh264a0ZpcqrLL/mKNJeOaqOBvDCBuTAdBgNV
+HQ4EFgQUzUhlAYOypgdbBv4jgQzEc+TRtTgwegYDVR0jBHMwcYAUzUhlAYOypgdb
+Bv4jgQzEc+TRtTihQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4G
+A1UEBxMHSGVybmRvbjERMA8GA1UEChMIQm9ndXMgQ0GCFCVehe2QOuzvkY+pMECi
+d/MyYVKJMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgGGMAsGCWCGSAFlAwQD
+AgNHADBEAiBry0TcN3QY3vbI214hdSdpfP4CnLQNxRK5XEP+wQbcHQIgTGF1BXLj
+OW3eUkwUeymnG+paj+qrW+ems2ANjq3bbQkCAQIwE4AB/4IBH6QLBglghkgBZQME
+AS0wADAAMACgggSYMIICAjCCAYigAwIBAgIJAOiR1gaRT87yMAoGCCqGSM49BAMD
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjER
+MA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNTE0MDg1ODExWhcNMjEwNTEzMDg1ODEx
+WjA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24x
+ETAPBgNVBAoMCEJvZ3VzIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8FF2VLHo
+jmqlnawpQwjG6fWBQDPOy05hYq8oKcyg1PXH6kgoO8wQyKYVwsDHEvc1Vg6ErQm3
+LzdI8OQpYx3H386R2F/dT/PEmUSdcOIWsB4zrFsbzNwJGIGeZ33ZS+xGo1AwTjAd
+BgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwHwYDVR0jBBgwFoAU8jXbNATa
+pVXyvWkDmbBi7OIVCMEwDAYDVR0TBAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjBa
+UY2Nv03KolLNRJ2wSoNK8xlvzIWTFgIhsBWpD1SpJxRRv22kkoaw9bBtmyctW+YC
+MQC3/KmjNtSFDDh1I+lbOufkFDSQpsMzcNAlwEAERQGgg6iXX+NhA+bFqNC7FyF4
+WWQwggKOMIICFaADAgECAgkApbNUKBuwbkswCgYIKoZIzj0EAwMwPzELMAkGA1UE
+BhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhC
+b2d1cyBDQTAeFw0xOTEyMjAyMDQ1MjZaFw0yMDEyMTkyMDQ1MjZaMIGGMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoT
+B0V4YW1wbGUxGTAXBgNVBAMTEEdyb3VwIExpc3QgT3duZXIxKzApBgkqhkiG9w0B
+CQEWHGdyb3VwLWxpc3Qtb3duZXJAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUr
+gQQAIgNiAASzrdo0dy4su1viboFbwU8NjgURE5GxAxYIHUPOWsdR1lnMR2v8vnjy
+zd80HkNlInHRAoZuXgzceCpbqhcBHtFLPWCqxL55duG9+CwlL9uIl4ovrFH6ZMtD
+oZFLtDJvMhOjgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlz
+IGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4w
+HQYDVR0OBBYEFK/WP1p7EM56lkxxIBAohNZWvwkjMB8GA1UdIwQYMBaAFPI12zQE
+2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMF2eLAXNa+8ve16CF31Y
++/DDErehb5V3G5DGWZ5CGPcNcuevDeOIXcTuKqXineR3EAIwIkR+5d9UvSsAfFPk
+OItcoI8so2BH4Da0wkUU+o7nQ9yRtZvE0syujxIzgEzv9JUZMYIBUDCCAUwCAQEw
+TDA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24x
+ETAPBgNVBAoMCEJvZ3VzIENBAgkApbNUKBuwbkswCwYJYIZIAWUDBAICoHgwFwYJ
+KoZIhvcNAQkDMQoGCCsGAQUFBwwCMBwGCSqGSIb3DQEJBTEPFw0xOTEyMjIxNjA5
+MTRaMD8GCSqGSIb3DQEJBDEyBDADTid4Yy+UzDasyRb9j2bsz/pPHjAtNZV3oa+E
+RQ/auLffZXl8h43ecu6ERv4t+AswCgYIKoZIzj0EAwMEZjBkAjAt5JqjM4WJ9Yd5
+RnziEbhlnVoo7ADPYl8hRnxrfYG+jiNsqbAMrjqqPFiG7yOPtNwCMEcQJZT1SBud
+KS1zJZvX/ury+ySGvKDLkfnqwZARR9W7TkTdx0L9W9oVjyEgOeGkvA==
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ found_gl_use_kek = False
+ for ctrl in asn1Object['controlSequence']:
+ if ctrl['attrType'] == rfc5275.id_skd_glUseKEK:
+ cv, rest = der_decoder(
+ ctrl['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[ctrl['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(cv.prettyPrint())
+ self.assertEqual(ctrl['attrValues'][0], der_encoder(cv))
+
+ self.assertIn(
+ 'example.com',
+ cv['glInfo']['glAddress']['rfc822Name'])
+
+ self.assertIn(
+ 'example.com',
+ cv['glOwnerInfo'][0]['glOwnerAddress']['rfc822Name'])
+
+ self.assertEqual(31, cv['glKeyAttributes']['duration'])
+ found_gl_use_kek = True
+
+ self.assertTrue(found_gl_use_kek)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sd = asn1Object['content']
+ self.assertEqual(
+ rfc6402.id_cct_PKIData, sd['encapContentInfo']['eContentType'])
+
+ pkid, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc6402.PKIData(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(pkid.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(pkid))
+
+ found_gl_use_kek = False
+ for ctrl in pkid['controlSequence']:
+ if ctrl['attrType'] == rfc5275.id_skd_glUseKEK:
+ cv = ctrl['attrValues'][0]
+
+ self.assertIn(
+ 'example.com',
+ cv['glInfo']['glAddress']['rfc822Name'])
+
+ self.assertIn(
+ 'example.com',
+ cv['glOwnerInfo'][0]['glOwnerAddress']['rfc822Name'])
+
+ self.assertEqual(31, cv['glKeyAttributes']['duration'])
+ found_gl_use_kek = True
+
+ self.assertTrue(found_gl_use_kek)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5280.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5280.py
new file mode 100644
index 0000000000..ea9e5337ce
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5280.py
@@ -0,0 +1,253 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+
+
+class CertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class CertificateListTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.CertificateList()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class CertificateOpenTypeTestCase(unittest.TestCase):
+ pem_text = """\
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ openTypesMap = {
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sig_alg = asn1Object['tbsCertificate']['signature']
+
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ spki_alg = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ for rdn in asn1Object['tbsCertificate']['subject']['rdnSequence']:
+ for atv in rdn:
+ if atv['type'] == rfc5280.id_emailAddress:
+ self.assertIn("valicert.com", atv['value'])
+ else:
+ atv_ps = str(atv['value']['printableString'])
+ self.assertIn("valicert", atv_ps.lower())
+
+
+class CertificateListOpenTypeTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBVjCBwAIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJBVTETMBEGA1UE
+CBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
+MRUwEwYDVQQDEwxzbm1wbGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25t
+cGxhYnMuY29tFw0xMjA0MTExMzQwNTlaFw0xMjA1MTExMzQwNTlaoA4wDDAKBgNV
+HRQEAwIBATANBgkqhkiG9w0BAQUFAAOBgQC1D/wwnrcY/uFBHGc6SyoYss2kn+nY
+RTwzXmmldbNTCQ03x5vkWGGIaRJdN8QeCzbEi7gpgxgpxAx6Y5WkxkMQ1UPjNM5n
+DGVDOtR0dskFrrbHuNpWqWrDaBN0/ryZiWKjr9JRbrpkHgVY29I1gLooQ6IHuKHY
+vjnIhxTFoCb5vA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.CertificateList()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ openTypesMap = {
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ sig_alg = asn1Object['tbsCertList']['signature']
+
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ for rdn in asn1Object['tbsCertList']['issuer']['rdnSequence']:
+ for atv in rdn:
+ if atv['type'] == rfc5280.id_emailAddress:
+ self.assertIn("snmplabs.com", atv['value'])
+
+ elif atv['type'] == rfc5280.id_at_countryName:
+ self.assertEqual('AU', atv['value'])
+
+ else:
+ self.assertLess(9, len(atv['value']['printableString']))
+
+ crl_extn_count = 0
+
+ for extn in asn1Object['tbsCertList']['crlExtensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ crl_extn_count += 1
+
+ self.assertEqual(1, crl_extn_count)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cert_extn_count = 0
+
+ for extn in asn1Object['tbsCertList']['crlExtensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ cert_extn_count += 1
+
+ self.assertEqual(1, cert_extn_count)
+
+
+class ORAddressOpenTypeTestCase(unittest.TestCase):
+ oraddress_pem_text = """\
+MEMwK2EEEwJHQmIKEwhHT0xEIDQwMKIHEwVVSy5BQ4MHU2FsZm9yZKYFEwNSLUQx
+FDASgAEBoQ0TC1N0ZXZlIEtpbGxl
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.ORAddress()
+
+ def testDecodeOpenTypes(self):
+ substrate = pem.readBase64fromText(self.oraddress_pem_text)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ea0 = asn1Object['extension-attributes'][0]
+
+ self.assertEqual(rfc5280.common_name, ea0['extension-attribute-type'])
+ self.assertEqual("Steve Kille", ea0['extension-attribute-value'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5480.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5480.py
new file mode 100644
index 0000000000..72ca51adfd
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5480.py
@@ -0,0 +1,81 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+
+
+class ECCertTestCase(unittest.TestCase):
+ digicert_ec_cert_pem_text = """\
+MIIDrDCCApSgAwIBAgIQCssoukZe5TkIdnRw883GEjANBgkqhkiG9w0BAQwFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0xMzAzMDgxMjAwMDBaFw0yMzAzMDgxMjAwMDBaMEwxCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxJjAkBgNVBAMTHURpZ2lDZXJ0IEVDQyBT
+ZWN1cmUgU2VydmVyIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4ghC6nfYJN6g
+LGSkE85AnCNyqQIKDjc/ITa4jVMU9tWRlUvzlgKNcR7E2Munn17voOZ/WpIRllNv
+68DLP679Wz9HJOeaBy6Wvqgvu1cYr3GkvXg6HuhbPGtkESvMNCuMo4IBITCCAR0w
+EgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwNAYIKwYBBQUHAQEE
+KDAmMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wQgYDVR0f
+BDswOTA3oDWgM4YxaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0R2xv
+YmFsUm9vdENBLmNybDA9BgNVHSAENjA0MDIGBFUdIAAwKjAoBggrBgEFBQcCARYc
+aHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAdBgNVHQ4EFgQUo53mH/naOU/A
+buiRy5Wl2jHiCp8wHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJ
+KoZIhvcNAQEMBQADggEBAMeKoENL7HTJxavVHzA1Nm6YVntIrAVjrnuaVyRXzG/6
+3qttnMe2uuzO58pzZNvfBDcKAEmzP58mrZGMIOgfiA4q+2Y3yDDo0sIkp0VILeoB
+UEoxlBPfjV/aKrtJPGHzecicZpIalir0ezZYoyxBEHQa0+1IttK7igZFcTMQMHp6
+mCHdJLnsnLWSB62DxsRq+HfmNb4TDydkskO/g+l3VtsIh5RHFPVfKK+jaEyDj2D3
+loB5hWp2Jp2VDCADjT7ueihlZGak2YPqmXTNbk19HOuNssWvFhtOyPNV6og4ETQd
+Ea8/B6hPatJ0ES8q/HO3X8IVQwVs1n3aAr0im0/T+Xc=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.digicert_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ algid = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, algid['algorithm'])
+
+ param, rest = der_decoder(algid['parameters'], asn1Spec=rfc5480.ECParameters())
+
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(rfc5480.secp384r1, param['namedCurve'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.digicert_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_alg = asn1Object['tbsCertificate']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, spki_alg['algorithm'])
+ self.assertEqual(rfc5480.secp384r1, spki_alg['parameters']['namedCurve'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5636.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5636.py
new file mode 100644
index 0000000000..8f5d90ee7f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5636.py
@@ -0,0 +1,118 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5636
+
+
+class TraceableAnonymousCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIGOgYJKoZIhvcNAQcCoIIGKzCCBicCAQMxDTALBglghkgBZQMEAgEwRQYKKoMajJpECgEB
+AaA3BDUwMwQgTgtiLdByNcZGP/PPE1I2lvxDA/6bajEE4VAWF13N9E4YDzIwMTkxMjMxMTIw
+MDAwWqCCBB0wggQZMIIDAaADAgECAhQLxXbZnuC+8r+RhlN0rgUga/of6TANBgkqhkiG9w0B
+AQsFADA/MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xETAP
+BgNVBAoTCEJvZ3VzIENBMB4XDTE5MTIxNTE4MTA0OFoXDTIwMTIxNDE4MTA0OFowTjELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMRAwDgYDVQQKDAdFeGFt
+cGxlMQ4wDAYDVQQDDAVBbGljZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALt2
+dWnBBb4MnwcHij1I2h+oNy7zGhG7Wd4GhtonVjn5XhyLhZLTjGAbPHqFBOb9fwElS4TfpTtG
+d7K9INUIgM0a6wZI3j3qCqDphQBW6sPVksip9Elan1hR8Upd4iutaWKKNxCpNO5gQiMM0Nay
+PTIp1ZcLByLxbHPBx/ZuJ/eg2OuBbkyTph0syWTUsiCbqXnraXP9pZUq0XL8Gu1tlvMZJm1J
+7NjE0CyDPQR8G9SS7IdCjhCcesP6E6OD0ang46Chx1S78fGB/UhSyQcFP3pznz0XS7pVAObU
+iMshwMzmUlcoErU7cf4V1t8ukjAsjVbx2QPPB6y64TN4//AYDdkCAwEAAaOB/TCB+jAdBgNV
+HQ4EFgQUVDw+01Pdj1UbXOmY7KLo9P0gau0wegYDVR0jBHMwcYAUbyHWHCqlZ40B9ilNhfDx
+VWD6nKehQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRv
+bjERMA8GA1UEChMIQm9ndXMgQ0GCFGR4rdxyWiX71uMC1s8lhGG24Gu7MAwGA1UdEwEB/wQC
+MAAwCwYDVR0PBAQDAgXgMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5v
+dCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wDQYJKoZIhvcNAQELBQADggEBAHO8u2ne
+bxI2OhSj1SaSgQXe4SN+BEWbortXetALwbDXs2+XO5CF88Nmf/CyZxKLWGNOGwlLBoaUDI1/
+rAf+Na244Om8JdKhAj3OimXX5KvebQgS/SYRf8XVM0zLmkp4DKgrMw5aXpMke8QrrouOt7EG
+rpKcVXCqG2gOiUomKYDCgIC0H95TWbYnJ1BLJIOqSvtBe+5GpWMyJUs6sZOvWJoXQ9U5MHJQ
+BczpA85TlMUPMojOC1OGUJty13h3GFX66K3GwpeMFBLsYfIT4N90EPioZYTs8srYMVl0//pK
+9XeuT4/zs47k1js8vuzILD9g5dD5hkw2dI/2utucjXpM9aExggGpMIIBpQIBA4AUVDw+01Pd
+j1UbXOmY7KLo9P0gau0wCwYJYIZIAWUDBAIBoGowGQYJKoZIhvcNAQkDMQwGCiqDGoyaRAoB
+AQEwHAYJKoZIhvcNAQkFMQ8XDTE5MTIxNjE1NTEyMlowLwYJKoZIhvcNAQkEMSIEIJumtIa6
+3jeKcCTvxY+Pf3O8U6jko6J0atleMxdZWNAHMA0GCSqGSIb3DQEBAQUABIIBAJHxEz3qLxDz
+UaMxBt1wW/2tMx5AGKlxhBIE2Am/iIpdpkk0nMNt+R6GduAz9yE+lS7V+lZafZq7WKUPpAIR
+YYD1apaxWAigHYQCLQg08MSlhzkCjzKiVXtsfAYHYLWutvqPY8WRX7x85If333/v7kVBPZvS
+su/MkZ4V9USpocRq/BFYo7VbitBYFHqra+vzhRiYD1pS6EfhFwZoAv/Ud59FUACU8ixw2IuO
+Efe1LUIWVmbJ3HKtk8JTrWTg9iLVp+keqOWJfSEEUZXnyNIMt/SCONtZT+6SJQqwQV0C8AcR
+9sxMfZum5/eKypTZ9liGP4jz6nxtD3hEyfEXf7BOfds=
+"""
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5636.id_kisa_tac_token: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5636.id_kisa_tac_token: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual('2019', asn1Object['timeout'][:4])
+ self.assertEqual('5dcdf44e', asn1Object['userKey'].prettyPrint()[-8:])
+
+ def testOpenTypes(self):
+ asn1Spec=rfc5652.ContentInfo()
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = asn1Object['content']['encapContentInfo']['eContent']
+ oid = asn1Object['content']['encapContentInfo']['eContentType']
+ self.assertIn(oid, rfc5652.cmsContentTypesMap)
+
+ tac_token, rest = der_decoder(
+ substrate,
+ asn1Spec=rfc5652.cmsContentTypesMap[oid],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(tac_token.prettyPrint())
+ self.assertEqual(substrate, der_encoder(tac_token))
+
+ self.assertEqual('2019', tac_token['timeout'][:4])
+ self.assertEqual('5dcdf44e', tac_token['userKey'].prettyPrint()[-8:])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5639.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5639.py
new file mode 100644
index 0000000000..628b902c7d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5639.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5639
+
+
+class ECCertTestCase(unittest.TestCase):
+ brainpool_ec_cert_pem_text = """\
+MIIB0jCCAXmgAwIBAgITPUXQAyl3ZE5iAHYGZYSp1FkqzTAKBggqhkjOPQQDAjA/
+MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAP
+BgNVBAoMCEJvZ3VzIENBMB4XDTE5MTIwOTIxNDM0NFoXDTIxMTIwODIxNDM0NFow
+PzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREw
+DwYDVQQKDAhCb2d1cyBDQTBaMBQGByqGSM49AgEGCSskAwMCCAEBBwNCAASBvvOk
+WNZlGAf5O3V94qgC3IUUR/6uxFxT6To0ULFmrVVndXiVP6DE5h5QHGXPwKfO+4Yt
+n0OVnGHp68dPS37Go1MwUTAdBgNVHQ4EFgQUiRFFVcdn6Fp9+sEP1GVRtwl9XgIw
+HwYDVR0jBBgwFoAUiRFFVcdn6Fp9+sEP1GVRtwl9XgIwDwYDVR0TAQH/BAUwAwEB
+/zAKBggqhkjOPQQDAgNHADBEAiB3d+P64Dh5YzwyM++uOL6zHUeLbNpW2sF1eJsm
+l3M5uQIgGxpbAXOt/o1xtyhEGLNUBE7ObgQpm7tHMMQGUHo4wV8=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.brainpool_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki = asn1Object['tbsCertificate']['subjectPublicKeyInfo']
+ algid = spki['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, algid['algorithm'])
+
+ param, rest = der_decoder(
+ algid['parameters'], asn1Spec=rfc5480.ECParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(algid['parameters'], der_encoder(param))
+
+ self.assertEqual(rfc5639.brainpoolP256r1, param['namedCurve'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.brainpool_ec_cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki = asn1Object['tbsCertificate']['subjectPublicKeyInfo']
+ algid = spki['algorithm']
+
+ self.assertEqual(rfc5480.id_ecPublicKey, algid['algorithm'])
+ self.assertEqual(
+ rfc5639.brainpoolP256r1, algid['parameters']['namedCurve'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5649.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5649.py
new file mode 100644
index 0000000000..c2fa9d1db5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5649.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5649
+
+
+class AESKeyWrapTestCase(unittest.TestCase):
+ kw_alg_id_pem_text = "MAsGCWCGSAFlAwQBLQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5649.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kw_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc5649.id_aes256_wrap, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class AESKeyWrapWithPadTestCase(unittest.TestCase):
+ kw_pad_alg_id_pem_text = "MAsGCWCGSAFlAwQBMA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5649.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.kw_pad_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc5649.id_aes256_wrap_pad, asn1Object[0])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5652.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5652.py
new file mode 100644
index 0000000000..7055b5201f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5652.py
@@ -0,0 +1,169 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+
+
+class ContentInfoTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEJQYJKoZIhvcNAQcCoIIEFjCCBBICAQMxCzAJBgUrDgMCGgUAMIIDAgYIKwYBBQUHDAKgggL0
+BIIC8DCCAuwweDB2AgECBgorBgEEAYI3CgoBMWUwYwIBADADAgEBMVkwVwYJKwYBBAGCNxUUMUow
+SAIBBQwZcGl0dWNoYTEuZW1lYS5ocHFjb3JwLm5ldAwMRU1FQVxwaXR1Y2hhDBpDTUNSZXFHZW5l
+cmF0b3IudnNob3N0LmV4ZTCCAmqgggJmAgEBMIICXzCCAcgCAQAwADCBnzANBgkqhkiG9w0BAQEF
+AAOBjQAwgYkCgYEA0jm7SSSm2wyEAzuNKtFZFJKo91SrJq9wQwEhEKHDavZwMQOm1rZ2PF8NWCEb
+PqrhToQ7rtiGLSZa4dF4bzgmBqQ9aoSfEX4jISt31Vy+skHidXjHHpbsjT24NPhrZgANivL7CxD6
+Ft+s7qS1gL4HRm2twQkqSwOLrE/q2QeXl2UCAwEAAaCCAR0wGgYKKwYBBAGCNw0CAzEMFgo2LjIu
+OTIwMC4yMD4GCSqGSIb3DQEJDjExMC8wHQYDVR0OBBYEFMW2skn88gxhONWZQA4sWGBDb68yMA4G
+A1UdDwEB/wQEAwIHgDBXBgkrBgEEAYI3FRQxSjBIAgEFDBlwaXR1Y2hhMS5lbWVhLmhwcWNvcnAu
+bmV0DAxFTUVBXHBpdHVjaGEMGkNNQ1JlcUdlbmVyYXRvci52c2hvc3QuZXhlMGYGCisGAQQBgjcN
+AgIxWDBWAgECHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIDAQAwDQYJKoZIhvcNAQEFBQADgYEAJZlu
+mxjtCxSOQi27jsVdd3y8NSIlzNv0b3LqmzvAly6L+CstXcnuG2MPQqPH9R7tbJonGUniBQO9sQ7C
+KhYWj2gfhiEkSID82lV5chINVUFKoUlSiEhWr0tPGgvOaqdsKQcrHfzrsBbFkhDqrFSVy7Yivbnh
+qYszKrOjJKiiCPMwADAAMYH5MIH2AgEDgBTFtrJJ/PIMYTjVmUAOLFhgQ2+vMjAJBgUrDgMCGgUA
+oD4wFwYJKoZIhvcNAQkDMQoGCCsGAQUFBwwCMCMGCSqGSIb3DQEJBDEWBBTFTkK/OifaFjwqHiJu
+xM7qXcg/VzANBgkqhkiG9w0BAQEFAASBgKfC6jOi1Wgy4xxDCQVK9+e5tktL8wE/j2cb9JSqq+aU
+5UxEgXEw7q7BoYZCAzcxMRriGzakXr8aXHcgkRJ7XcFvLPUjpmGg9SOZ2sGW4zQdWAwImN/i8loc
+xicQmJP+VoMHo/ZpjFY9fYCjNZUArgKsEwK/s+p9yrVVeB1Nf8Mn
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = {
+ rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
+ rfc5652.id_signedData: rfc5652.SignedData(),
+ rfc6402.id_cct_PKIData: rfc6402.PKIData()
+ }
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=layers[next_layer]
+ )
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ def testOpenTypes(self):
+ class ClientInformation(univ.Sequence):
+ pass
+
+ ClientInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('clientId', univ.Integer()),
+ namedtype.NamedType('MachineName', char.UTF8String()),
+ namedtype.NamedType('UserName', char.UTF8String()),
+ namedtype.NamedType('ProcessName', char.UTF8String())
+ )
+
+ class EnrollmentCSP(univ.Sequence):
+ pass
+
+ EnrollmentCSP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('KeySpec', univ.Integer()),
+ namedtype.NamedType('Name', char.BMPString()),
+ namedtype.NamedType('Signature', univ.BitString())
+ )
+
+ openTypeMap = {
+ # attributes
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'): char.IA5String(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.2'): EnrollmentCSP(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.21.20'): ClientInformation(),
+ # algorithm identifier parameters
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ openTypeMap.update(rfc5652.cmsAttributesMap)
+ openTypeMap.update(rfc6402.cmcControlAttributesMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder.decode(substrate,
+ asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+
+ self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc6402.id_cct_PKIData, eci['eContentType'])
+
+ pkid, rest = der_decoder.decode(eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
+ openTypes=openTypeMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(pkid.prettyPrint())
+ self.assertEqual(eci['eContent'], der_encoder.encode(pkid))
+
+ for req in pkid['reqSequence']:
+ cr = req['tcr']['certificationRequest']
+
+ sig_alg = cr['signatureAlgorithm']
+
+ self.assertIn(sig_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ cri = cr['certificationRequestInfo']
+ spki_alg = cri['subjectPublicKeyInfo']['algorithm']
+
+ self.assertIn( spki_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ attrs = cr['certificationRequestInfo']['attributes']
+
+ for attr in attrs:
+ self.assertIn(attr['attrType'], openTypeMap)
+
+ if attr['attrType'] == univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'):
+ self.assertEqual("6.2.9200.2", attr['attrValues'][0])
+
+ else:
+ self.assertTrue(attr['attrValues'][0].hasValue())
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5697.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5697.py
new file mode 100644
index 0000000000..1aa0e2b4c5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5697.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5697
+
+
+class OtherCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIGUTCCBfegAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCswCwYJYIZIAWUDBAMC
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMjExMTczMzQ0WhcNMjAxMjEwMTczMzQ0
+WjBNMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDTALBgNVBAMTBEdhaWwwggNHMIICOgYHKoZIzjgE
+ATCCAi0CggEBAMj5CIXkPmfEDm3rrTqf/sIPh5XNWTT+U/+W74HbEXfi0NdafvNc
+WowncDznn4BZuotmuahJKBLFL0WCE28SAcJlhoOZ+gy6CMBV3LbupTEhPcWdc+qC
+wj1kL6WQwBfuzMlfKqXbGcO+CAP59iirw/LGcgmjLk/BpNAQ5oPtmD88DKAm4Ysz
+l3+n0F8ZhLhw33NEcEVNcVr+Q+ZZP/4ezAizvOK46QA5KnlXBQoC+MgTqxk+zhjw
+JRE5UnQDv8FbUF3GrehLDN0q+Pt76+jl+ikOnMzeXi+tz8d49LCogxh7oq6N2Ptt
+o9ksMkExNRJhW6JeVQ4PggOR4CI8BwYt7T0CIQD5VsG4AQIeMIDGmu8ek+FEKp8l
+utd6GBzrQwfDkgiGpQKCAQEAo2c3ze980XHSjTnsFAcDXb71KrQV5FadnRAzWxWO
+MrDDCVUq6JqaRKWAMRmk72Tl3V1c6IC3Y3mjorYH0HEi3EbYq5KxGXRaoK8NJAFh
+YKhHk5VAVyCvM1J9NNdlDyl0uYrxLLSwt+S7yrEL4qCijAzQ270h0cnBiYG06e5l
+XVola9Wec4KqFfqnDQGiDIYZSWvGqMGKbrMzkJMmYN/8ls54l3ATvSEt5ijeDJzk
+MkyMaTV77g/R9n43JqvyOdkizZCRKovvL+m+wRdilFcIMDXwSG1Pw9kmCa/NenjF
+5swCfyF3P2TsO3QsppM7KWfLglj9j7sPM4MTiOfc+wPKqwOCAQUAAoIBACcxpFMg
+T2EEPRojEYDwIY4t9u6eP2scBrkrc3JJ6osTXHfkeluR9OvME620Hm01+EivnETI
+W5o+hCAdoic2h93kjx137QLAAL9ECoYgzm32SB796Nn630XVnd44gP1G3KbPZ8eD
+uC1GsSuxkmDR9PH0Tbx6XdnbTKW4ycHpKrrDLLeryZsghQfv4O63oaXgaJHwdQD3
+BwTZcUexZGstI7hFEdZrc7HWF3kmZdHjxuXYL/DP2T7akHyLc6ktepastZ6cGTZr
+GUJ52sgM50Swb2CtrJuGDvtnEcZjtEb+rJgFIWHDs3lelLT72GWX+Xs7jeJaSjx5
++NK1qahR8hguww6jggHQMIIBzDAdBgNVHQ4EFgQU34Ol7JNqPoDCG/WE8toUQUiS
+tUQwegYDVR0jBHMwcYAUzUhlAYOypgdbBv4jgQzEc+TRtTihQ6RBMD8xCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjERMA8GA1UEChMI
+Qm9ndXMgQ0GCFCVehe2QOuzvkY+pMECid/MyYVKJMA8GA1UdEwEB/wQFMAMBAf8w
+CwYDVR0PBAQDAgGGMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNh
+bm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wUwYDVR0RBEwwSqA2Bggr
+BgEFBQcIA6AqMCgMGzgyNjIwOC00MTcwMjgtNTQ4MTk1LTIxNTIzMwYJKwYBBAGB
+rGAwgRBnYWlsQGV4YW1wbGUuY29tMHgGCCsGAQUFBwETBGwwajBoBBT9+d0Ci+/R
+j5toRA+A7p+ECmGaWDBQMEOkQTA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkEx
+EDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBAgkApbNUKBuwbkcw
+CwYJYIZIAWUDBAMCA0cAMEQCIAyAog0z/KyROhb8Fl3Hyjcia/POnMq4yhPZFwlI
+hn1cAiAIfnI1FVrosL/94ZKfGW+xydYaelsPL+WBgqGvKuTMEg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ other_cert_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5697.id_pe_otherCerts:
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5697.OtherCertificates())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ self.assertEqual(
+ 11939979568329289287,
+ extnValue[0]['issuerSerial']['serialNumber'])
+
+ other_cert_found = True
+
+ self.assertTrue(other_cert_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ other_cert_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5697.id_pe_otherCerts:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ self.assertEqual(
+ 11939979568329289287,
+ extnValue[0]['issuerSerial']['serialNumber'])
+
+ other_cert_found = True
+
+ self.assertTrue(other_cert_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5751.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5751.py
new file mode 100644
index 0000000000..7ce4373956
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5751.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5751
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ pem_text = """\
+MIIGigYJKoZIhvcNAQcCoIIGezCCBncCAQExCTAHBgUrDgMCGjArBgkqhkiG9w0B
+BwGgHgQcVGhpcyBpcyBzb21lIHNhbXBsZSBjb250ZW50LqCCAuAwggLcMIICm6AD
+AgECAgIAyDAJBgcqhkjOOAQDMBIxEDAOBgNVBAMTB0NhcmxEU1MwHhcNOTkwODE3
+MDExMDQ5WhcNMzkxMjMxMjM1OTU5WjATMREwDwYDVQQDEwhBbGljZURTUzCCAbYw
+ggErBgcqhkjOOAQBMIIBHgKBgQCBjc3tg+oKnjk+wkgoo+RHk90O16gO7FPFq4QI
+T/+U4XNIfgzW80RI0f6fr6ShiS/h2TDINt4/m7+3TNxfaYrkddA3DJEIlZvep175
+/PSfL91DqItU8T+wBwhHTV2Iw8O1s+NVCHXVOXYQxHi9/52whJc38uRRG7XkCZZc
+835b2wIVAOJHphpFZrgTxtqPuDchK2KL95PNAoGAJjjQFIkyqjn7Pm3ZS1lqTHYj
+OQQCNVzyyxowwx5QXd2bWeLNqgU9WMB7oja4bgevfYpCJaf0dc9KCF5LPpD4beqc
+ySGKO3YU6c4uXaMHzSOFuC8wAXxtSYkRiTZEvfjIlUpTVrXi+XPsGmE2HxF/wr3t
+0VD/mHTC0YFKYDm6NjkDgYQAAoGAXOO5WnUUlgupet3jP6nsrF7cvbcTETSmFoko
+ESPZNIZndXUTEj1DW2/lUb/6ifKiGz4kfT0HjVtjyLtFpaBK44XWzgaAP+gjfhry
+JKtTGrgnDR7vCL9mFIBcYqxl+hWL8bs01NKWN/ZhR7LEMoTwfkFA/UanY04z8qXi
+9PKD5bijgYEwfzAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIGwDAfBgNVHSME
+GDAWgBRwRD6CLm+H3krTdeM9ILxDK5PxHzAdBgNVHQ4EFgQUvmyhs+PB9+1DcKTO
+EwHi/eOX/s0wHwYDVR0RBBgwFoEUQWxpY2VEU1NAZXhhbXBsZS5jb20wCQYHKoZI
+zjgEAwMwADAtAhRVDKQZH0IriXEiM42DarU9Z2u/RQIVAJ9hU1JUC1yy3drndh3i
+EFJbQ169MYIDVDCCA1ACAQEwGDASMRAwDgYDVQQDEwdDYXJsRFNTAgIAyDAHBgUr
+DgMCGqCCAuowGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAjBgkqhkiG9w0BCQQx
+FgQUQGrsCFJ5um4WAi2eBinAIpaH3UgwOAYDKqszMTEEL1RoaXMgaXMgYSB0ZXN0
+IEdlbmVyYWwgQVNOIEF0dHJpYnV0ZSwgbnVtYmVyIDEuMD4GCyqGSIb3DQEJEAIE
+MS8wLQwgQ29udGVudCBIaW50cyBEZXNjcmlwdGlvbiBCdWZmZXIGCSqGSIb3DQEH
+ATBKBgkqhkiG9w0BCQ8xPTA7MAcGBSoDBAUGMDAGBioDBAUGTQQmU21pbWUgQ2Fw
+YWJpbGl0aWVzIHBhcmFtZXRlcnMgYnVmZmVyIDIwbwYLKoZIhvcNAQkQAgoxYDBe
+BgUqAwQFBgQrQ29udGVudCBSZWZlcmVuY2UgQ29udGVudCBJZGVudGlmaWVyIEJ1
+ZmZlcgQoQ29udGVudCBSZWZlcmVuY2UgU2lnbmF0dXJlIFZhbHVlIEJ1ZmZlcjBz
+BgsqhkiG9w0BCRACCzFkoGIwWjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDVVTIEdv
+dmVybm1lbnQxETAPBgNVBAsTCFZEQSBTaXRlMQwwCgYDVQQLEwNWREExEjAQBgNV
+BAMTCURhaXN5IFJTQQIEClVEMzCB/AYLKoZIhvcNAQkQAgMxgewwgekwgeYEBzU3
+MzgyOTkYDzE5OTkwMzExMTA0NDMzWqGByTCBxqRhMF8xCzAJBgNVBAYTAlVTMRYw
+FAYDVQQKEw1VUyBHb3Zlcm5tZW50MREwDwYDVQQLEwhWREEgU2l0ZTEMMAoGA1UE
+CxMDVkRBMRcwFQYDVQQDEw5CdWdzIEJ1bm55IERTQaRhMF8xCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1VUyBHb3Zlcm5tZW50MREwDwYDVQQLEwhWREEgU2l0ZTEMMAoG
+A1UECxMDVkRBMRcwFQYDVQQDEw5FbG1lciBGdWRkIERTQTAJBgcqhkjOOAQDBC8w
+LQIVALwzN2XE93BcF0kTqkyFyrtSkUhZAhRjlqIUi89X3rBIX2xk3YQESV8cyg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ smimeCapMap = {
+ univ.ObjectIdentifier('1.2.3.4.5.6.77'): univ.OctetString(),
+ }
+ smimeCapMap.update(rfc5751.smimeCapabilityMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder (substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+ self.assertEqual(1, asn1Object['content']['version'])
+
+ for si in asn1Object['content']['signerInfos']:
+ self.assertEqual(1, si['version'])
+
+ for attr in si['signedAttrs']:
+
+ if attr['attrType'] == rfc5751.smimeCapabilities:
+ for scap in attr['attrValues'][0]:
+ if scap['capabilityID'] in smimeCapMap.keys():
+ scap_p, rest = der_decoder(scap['parameters'],
+ asn1Spec=smimeCapMap[scap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertEqual(scap['parameters'], der_encoder(scap_p))
+ self.assertIn('parameters', scap_p.prettyPrint())
+
+ if attr['attrType'] == rfc5751.id_aa_encrypKeyPref:
+ ekp_issuer_serial = attr['attrValues'][0]['issuerAndSerialNumber']
+
+ self.assertEqual(173360179, ekp_issuer_serial['serialNumber'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5752.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5752.py
new file mode 100644
index 0000000000..76776323d3
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5752.py
@@ -0,0 +1,207 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5752
+
+
+class MultipleSignaturesTestCase(unittest.TestCase):
+ pem_text = """\
+MIIKawYJKoZIhvcNAQcCoIIKXDCCClgCAQExGjALBglghkgBZQMEAgEwCwYJYIZI
+AWUDBAICMFEGCSqGSIb3DQEHAaBEBEJDb250ZW50LVR5cGU6IHRleHQvcGxhaW4N
+Cg0KV2F0c29uLCBjb21lIGhlcmUgLSBJIHdhbnQgdG8gc2VlIHlvdS6gggYmMIIC
+eDCCAf6gAwIBAgIJAKWzVCgbsG47MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYTAlVT
+MQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMg
+Q0EwHhcNMTkwNTI5MTQ0NTQxWhcNMjAwNTI4MTQ0NTQxWjBwMQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1w
+bGUxDjAMBgNVBAMTBUFsaWNlMSAwHgYJKoZIhvcNAQkBFhFhbGljZUBleGFtcGxl
+LmNvbTB2MBAGByqGSM49AgEGBSuBBAAiA2IABPjNnwcv7EQOldaShannEUxPPi7g
+B7WcXrNcJiWawQYPm8+7mGX2EMSN3VQdGAkg+jLd8lxZZ5nwUcKKsgK24yAWKw2x
+wb9pPArINg4UO6rP8LaPITCqBYJHLHKiG4le2aOBlDCBkTALBgNVHQ8EBAMCB4Aw
+QgYJYIZIAYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0
+ZWQgZm9yIGFueSBwdXJwb3NlLjAdBgNVHQ4EFgQUxLpaDj564zyBsPQCqmi7FuCW
+DjUwHwYDVR0jBBgwFoAU8jXbNATapVXyvWkDmbBi7OIVCMEwCgYIKoZIzj0EAwMD
+aAAwZQIwY7kf0TW4C95EYZp/jyU3imi/bIf6EIBzmE4C5kp79/jQwpIXyrjDaKP7
+R65JooWIAjEAveDGnqwyK0KYtCA4fr9EEgL/azIn3vLQpWn11rQ8MC/DEu6AIdMp
+k+OOlIs8cdz1MIIDpjCCA0ygAwIBAgIUY8xt3l0B9nIPWSpjs0hDJUJZmCkwCwYJ
+YIZIAWUDBAMCMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMH
+SGVybmRvbjERMA8GA1UEChMIQm9ndXMgQ0EwHhcNMTkxMDIwMjAxMjMwWhcNMjAx
+MDE5MjAxMjMwWjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcT
+B0hlcm5kb24xEDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAsTBUFsaWNlMSAwHgYJ
+KoZIhvcNAQkBFhFhbGljZUBleGFtcGxlLmNvbTCCAbYwggErBgcqhkjOOAQBMIIB
+HgKBgQCLpR53xHfe+SiknAK/L9lm/ZO1109c9iYkriPIW/5MMlM+qc/tdRkKpG6E
+LIpfXTPtKCJmzqqVIyTmAJryyE8Xw0Ie2mzYPU5ULvKmllQkjTsWgPGgQBkciZ0A
+W9ggD9VwZilg4qh3iSO7T97hVQFnpCh6vm8pOH6UP/5kpr9ZJQIVANzdbztBJlJf
+qCB1t4h/NvSuwCFvAoGAITP+jhYk9Rngd98l+5ccgauQ+cLEUBgNG2Wq56zBXQbL
+ou6eKkQi7ecLNiRmExq3IU3LOj426wSxL72Kw6FPyOEv3edIFkJJEHL4Z+ZJeVe/
+/dzya0ddOJ7kk6qNF2ic+viD/5Vm8yRyKiig2uHH/MgIesLdZnvbzvX+f/P0z50D
+gYQAAoGALAUljkOi1PxjjFVvhGfK95yIsrfbfcIEKUBaTs9NR2rbGWUeP+93paoX
+wP39X9wrJx2MSWeHWhWKszNgoiyqYT0k4R9mem3WClotxOvB5fHfwIp2kQYvE7H0
+/TPdGhfUpHQGYpyLQgT6L80meSKMFnu4VXGzOANhWDxu3JxiADCjgZQwgZEwCwYD
+VR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5v
+dCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFO37wHcauyc0
+3rDc6cDRRsHzgcK+MB8GA1UdIwQYMBaAFM1IZQGDsqYHWwb+I4EMxHPk0bU4MAsG
+CWCGSAFlAwQDAgNHADBEAiBBRbfMzLi7+SVyO8SM3xxwUsMf/k1B+Nkvf1kBTfCf
+GwIgSAx/6mI+pNqdXqZZGESXy1MT1aBc4ynPGLFUr2r7cPYxggO4MIIBvAIBATBX
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjER
+MA8GA1UEChMIQm9ndXMgQ0ECFGPMbd5dAfZyD1kqY7NIQyVCWZgpMA0GCWCGSAFl
+AwQCAQUAoIIBDjAYBgkqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJ
+BTEPFw0xOTEyMTgxNjAwMDBaMC8GCSqGSIb3DQEJBDEiBCCT0Lk67cs7v1OtnRbv
+ZUBOns/RgPEsttXJOxLKFB79aTCBogYLKoZIhvcNAQkQAjMxgZIwgY8wCwYJYIZI
+AWUDBAICMAoGCCqGSM49BAMDMEEwDQYJYIZIAWUDBAIBBQAEMN+vbArIfin1JoRw
+/UHR1y/ylbyUEeMpbC+1HKRpa6xdPJBovlGTcTReUoked6KSAjAxMA0GCWCGSAFl
+AwQCAQUABCC+AWJGNa+7R7wLKTza/Ix8On6IS6V5aUhEcflZzdM/8TALBglghkgB
+ZQMEAwIEMDAuAhUAm9IjQ1413cJQ24I8W0RfWAPXM7oCFQCMUB4rXWPZbe22HPXZ
+j7q0TKR3sjCCAfQCAQEwTDA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAO
+BgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBAgkApbNUKBuwbjswCwYJ
+YIZIAWUDBAICoIIBHTAYBgkqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3
+DQEJBTEPFw0xOTEyMTgxNjAwMDBaMD8GCSqGSIb3DQEJBDEyBDC25CKk/YJnHtT3
+qsZtRPTosLmNUVhxxlbn8Jo2+lys4+IKEOba8jebiTfTTPmZJmwwgaEGCyqGSIb3
+DQEJEAIzMYGRMIGOMA0GCWCGSAFlAwQCAQUAMAsGCWCGSAFlAwQDAjAvMAsGCWCG
+SAFlAwQCAgQgcylSfbq7wnltzEF7G//28TirRvVDkabxEivR5UKosqUwPzALBglg
+hkgBZQMEAgIEMEAx5qC6BXrb7o0yUseNCSX6+3h5ZX+26e1dBKpApbX3t8rEcsRR
+82TZYCPTWtz4jzAKBggqhkjOPQQDAwRnMGUCMCq/bAd/e5oCu6YIWGZN/xyIX6g7
+QL9hfgKz9i/lPoE35xmRwL/9/H0viqg3HvnDWAIxAIADENLOLox7NiiMK+Ya70I0
+jdEOIlE+zO/fF9I+syiz898JzTosN/V8wvaDoALtnQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.SignedAttributes()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5652.id_data: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5652.id_data: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while not next_layer == rfc5652.id_data:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ if next_layer == rfc5652.id_signedData:
+ signerInfos = asn1Object['signerInfos']
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ found_mult_sig1 = False
+ for attr in signerInfos[0]['signedAttrs']:
+ if attr['attrType'] in rfc5652.cmsAttributesMap:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha384)
+
+ self.assertEqual(
+ 'dfaf6c0a',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig1 = True
+
+ found_mult_sig2 = False
+ for attr in signerInfos[1]['signedAttrs']:
+ if attr['attrType'] in rfc5652.cmsAttributesMap:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha256)
+
+ self.assertEqual(
+ '7329527d',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig2 = True
+
+ self.assertTrue(found_mult_sig1)
+ self.assertTrue(found_mult_sig2)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_mult_sig1 = False
+ for attr in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ av = attr['attrValues'][0]
+
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha384)
+
+ self.assertEqual(
+ 'dfaf6c0a',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig1 = True
+
+ found_mult_sig2 = False
+ for attr in asn1Object['content']['signerInfos'][1]['signedAttrs']:
+ if attr['attrType'] == rfc5752.id_aa_multipleSignatures:
+ av = attr['attrValues'][0]
+
+ self.assertEqual(
+ av['bodyHashAlg']['algorithm'], rfc4055.id_sha256)
+
+ self.assertEqual(
+ '7329527d',
+ av['signAttrsHash']['hash'].prettyPrint()[2:10])
+
+ found_mult_sig2 = True
+
+ self.assertTrue(found_mult_sig1)
+ self.assertTrue(found_mult_sig2)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5753.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5753.py
new file mode 100644
index 0000000000..7bb44ef102
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5753.py
@@ -0,0 +1,129 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5753
+
+
+class EnvelopedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIIGAwYJKoZIhvcNAQcDoIIF9DCCBfACAQIxgdihgdUCAQOgeKF2MBAGByqGSM49
+AgEGBSuBBAAiA2IABGJ8n8NE7e0+gs36C3P+klXlvBXudwiw84lyW0U0pbo9U0Lz
+tr6cknb+lbsRk21dXwHrK9ZW/SjBG+ONTvD+8P6+62xh2OO9lil5uSHmzDYNiTKn
+w8PDuC6X25uFO6Nf2qEJBAdSRkM1NzUzMBUGBiuBBAELAjALBglghkgBZQMEAS0w
+NDAyoBYEFMS6Wg4+euM8gbD0Aqpouxbglg41BBiH5Gdz0Rla/mjLUzxq49Lbxfpv
+p56UaPAwggUOBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAECBBAsmDsiOo0ySncPc/RM
+K3FLgIIE4HPHsXoYyQ/4LRDiK4OrSuRJmmuDye5fH/hLcgw/330Gsl1QBs9jF1CE
+DBM5ki657K/TRMl78Rqb3LIu5lfLQ8WVNGLsoQPwvxzIexGgShtYYwu8TcPiESFM
+a20SWpDEG8zFlmCbqQuc0buPxnvYviVThoBEthNC+S2Umed8JpxwNKJbNTx5dxd2
+dkDNwpHsKgNzT9cGl0NF129Dspehqtdge5LJu3rj1gNynLRI32AQ+pwU+sEHee6w
+DHhU5OWnHlndkm/9MTKY3woOhs1/KQFlRFPC6k71ZpUlncd393wLVIImfoMe4zWP
+AOnbpZ/M7zEJ95rTwwmudBs0qwMfCa3h0Vkg69w6fBHyc1IH8u3VpSPcbOW4dUzJ
+BDJPgB1kObAV02ZA4FQEuZtZiG13u3c7sSrHxsY1rtXssvSe+5rThqPWgDqmH8b/
+yPGEHIFh03kHCDt/UZrdkLCO7a0WhCdY4I9hNU6OYEQmyEFs0LsqEumn34Lv/XcD
+1wgLdPtF65zub4Wil/0Vpu73vIWLIk9LyNIXQSd6w0ZHUvVS+jZZ1zrqIQKhKvG9
+7NpKAYoHa4tOdoXHgBJUxw/uAOKkQ4jC5RS5UKqCZaQcArRD2bCEEsutiuyf06MM
+cWm+RaBY1EwuX+/cT0D6CsWHYFAeQHgLuR4HVk5+PVKoOL/7KUz0jUU5gzFVcmfa
+ocyX5A6R90yggBObefcOIEj3v+5fjHkppfTvi/R03fVZ4NywWyHbN7kOHHy8skJp
+cvNaqSY0dfkb8KOOoTptJH9rCBYtFlC5j/18y8Om9Um4h3/46hYO0xU8izJDzDzJ
+nO/5KS5mGyskweIp3mrE1C/mw68LvrksxQI03CPtbM+FqOKe0VcsAQykiOTnG3d4
+jLeF1iVrc9CgV+pwc5VfgQUwsGhjAFOCKTwWDrr3Je0yVsfzgwY2zuM5uE/+usOS
+Bt7SqbFTLOCba4fJrVVwi0wZig88owVTdl/ACxl2qyLUYC2u5PNJSY6kx8Cgo4gD
+Jk/3oeuys8JqgaufvKybl5GsdDaF3A7usZAjDR1EAWHZ7JGiagtqbvISLD0zq4e4
+nmEhLnIRb7u5SNBPqe8qVuuQjIsvmP0ZuTlnh84ypFOQGz7cfzHtr6UEQoGj8HIm
+bp8diL4tflmFAVNaRjQzu18+2vFB2w1EZIe2/uNLs9ne2EIyoK2Qb+mMCwJsNS0x
+OG0/TzPZ+y0Tp1/LupLHovMosPIGXlbvqZVh2xftDvbIigIMWZQZ2tFxYD6Xc4zA
+00v7H0yGF1pRY+3GpobJkw0Y6ORtgdtdnr2ipioIeQCy0hUpPOmTeSr0L3H7KfNY
+7yQgZg0ra7FIEjM8tDoNqrhznetYUU1ZWM8Lyb3zMxxinSFsGFGx2TiqPyixJNxN
++lPT5D6GRhC9mXgh+BfVod5oINJJwXxJpT5xnsZgW8ujVxiu1Vt5esXCZaXTGlyj
+VTH5dmCvJP9+B8n7dOimmCxCbMQKpNaZixJhoXWQtTgKqL1Qf9WoEs6TDGgfTllq
+jbE4w3O7ZA7fAWe9jbAGwiPV5rF/NVvjaj2+ibtXbSNPW59ddy1/2WzknVYnEHF0
+qZdBZ02Wh4ByXUC3FNvDu8hRTm5aq73DCqXLXUwNU8BvS1xBbbRq5aYI2Rd3naNA
+ns9dHqSvkg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
+
+ ed, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
+ self.assertFalse(rest)
+ self.assertTrue(ed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ed))
+
+ opk_ai_p = rfc5480.ECParameters()
+ opk_ai_p['namedCurve'] = rfc5480.secp384r1
+
+ kwai = rfc5753.KeyWrapAlgorithm()
+ kwai['algorithm'] = rfc3565.id_aes256_wrap
+
+ ukm_found = False
+ self.assertEqual(ed['version'], rfc5652.CMSVersion(value=2))
+ for ri in ed['recipientInfos']:
+ self.assertEqual(ri['kari']['version'], rfc5652.CMSVersion(value=3))
+ opk_alg = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(opk_alg['algorithm'], rfc5753.id_ecPublicKey)
+ self.assertEqual(opk_alg['parameters'], der_encoder(opk_ai_p))
+ kek_alg = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(kek_alg['algorithm'], rfc5753.dhSinglePass_stdDH_sha384kdf_scheme)
+ self.assertEqual(kek_alg['parameters'], der_encoder(kwai))
+ ukm = ri['kari']['ukm']
+ self.assertEqual(ukm, rfc5652.UserKeyingMaterial(hexValue='52464335373533'))
+ ukm_found = True
+
+ self.assertTrue(ukm_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ assert asn1Object['contentType'] == rfc5652.id_envelopedData
+ ed = asn1Object['content']
+
+ ukm_found = False
+ self.assertEqual(ed['version'], rfc5652.CMSVersion(value=2))
+ for ri in ed['recipientInfos']:
+ self.assertEqual(ri['kari']['version'], rfc5652.CMSVersion(value=3))
+ opk_alg = ri['kari']['originator']['originatorKey']['algorithm']
+ self.assertEqual(opk_alg['algorithm'], rfc5753.id_ecPublicKey)
+ self.assertEqual(opk_alg['parameters']['namedCurve'], rfc5480.secp384r1)
+ kek_alg = ri['kari']['keyEncryptionAlgorithm']
+ self.assertEqual(kek_alg['algorithm'], rfc5753.dhSinglePass_stdDH_sha384kdf_scheme)
+ self.assertEqual(kek_alg['parameters']['algorithm'], rfc3565.id_aes256_wrap)
+ ukm = ri['kari']['ukm']
+ self.assertEqual(ukm, rfc5652.UserKeyingMaterial(hexValue='52464335373533'))
+ ukm_found = True
+
+ self.assertTrue(ukm_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5755.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5755.py
new file mode 100644
index 0000000000..cf4a05fa29
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5755.py
@@ -0,0 +1,212 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc3114
+
+
+class AttributeCertificateTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDBTCCAm4CAQEwgY+gUTBKpEgwRjEjMCEGA1UEAwwaQUNNRSBJbnRlcm1lZGlh
+dGUgRUNEU0EgQ0ExCzAJBgNVBAYTAkZJMRIwEAYDVQQKDAlBQ01FIEx0ZC4CAx7N
+WqE6pDgwNjETMBEGA1UEAwwKQUNNRSBFQ0RTQTELMAkGA1UEBhMCRkkxEjAQBgNV
+BAoMCUFDTUUgTHRkLqA9MDukOTA3MRQwEgYDVQQDDAtleGFtcGxlLmNvbTELMAkG
+A1UEBhMCRkkxEjAQBgNVBAoMCUFDTUUgTHRkLjANBgkqhkiG9w0BAQsFAAIEC63K
+/jAiGA8yMDE2MDEwMTEyMDAwMFoYDzIwMTYwMzAxMTIwMDAwWjCB8jA8BggrBgEF
+BQcKATEwMC6GC3VybjpzZXJ2aWNlpBUwEzERMA8GA1UEAwwIdXNlcm5hbWUECHBh
+c3N3b3JkMDIGCCsGAQUFBwoCMSYwJIYLdXJuOnNlcnZpY2WkFTATMREwDwYDVQQD
+DAh1c2VybmFtZTA1BggrBgEFBQcKAzEpMCegGKQWMBQxEjAQBgNVBAMMCUFDTUUg
+THRkLjALDAlBQ01FIEx0ZC4wIAYIKwYBBQUHCgQxFDASMBAMBmdyb3VwMQwGZ3Jv
+dXAyMCUGA1UESDEeMA2hC4YJdXJuOnJvbGUxMA2hC4YJdXJuOnJvbGUyMGowHwYD
+VR0jBBgwFoAUgJCMhskAsEBzvklAX8yJBOXO500wCQYDVR04BAIFADA8BgNVHTcB
+Af8EMjAwMB2gCoYIdXJuOnRlc3SgD4INKi5leGFtcGxlLmNvbTAPoA2GC3Vybjph
+bm90aGVyMA0GCSqGSIb3DQEBCwUAA4GBACygfTs6TkPurZQTLufcE3B1H2707OXK
+sJlwRpuodR2oJbunSHZ94jcJHs5dfbzFs6vNfVLlBiDBRieX4p+4JcQ2P44bkgyi
+UTJu7g1b6C1liB3vO6yH5hOZicOAaKd+c/myuGb9uJ4n6y2oLNxnk/fDzpuZUe2h
+Q4eikPk4LQey
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5755.AttributeCertificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ count = 0
+
+ for attr in asn1Object['acinfo']['attributes']:
+ self.assertIn(attr['type'], rfc5280.certificateAttributesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5280.certificateAttributesMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ count += 1
+
+ self.assertEqual(5, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(1, asn1Object['acinfo']['version'])
+
+ count = 0
+
+ for attr in asn1Object['acinfo']['attributes']:
+ self.assertIn(attr['type'], rfc5280.certificateAttributesMap)
+ count += 1
+ if attr['type'] == rfc5755.id_aca_authenticationInfo:
+ self.assertEqual(
+ str2octs('password'), attr['values'][0]['authInfo'])
+
+ self.assertEqual(5, count)
+
+
+class CertificateWithClearanceTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIID1DCCA1qgAwIBAgIUUc1IQGJpeYQ0XwOS2ZmVEb3aeZ0wCgYIKoZIzj0EAwMw
+ZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAw
+DgYDVQQKEwdFeGFtcGxlMQwwCgYDVQQLEwNQQ0ExGDAWBgNVBAMTD3BjYS5leGFt
+cGxlLmNvbTAeFw0xOTExMDUyMjIwNDZaFw0yMDExMDQyMjIwNDZaMIGSMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoT
+B0V4YW1wbGUxIjAgBgNVBAsTGUh1bWFuIFJlc291cmNlIERlcGFydG1lbnQxDTAL
+BgNVBAMTBEZyZWQxHzAdBgkqhkiG9w0BCQEWEGZyZWRAZXhhbXBsZS5jb20wdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAAQObFslQ2EBP0xlDJ3sRnsNaqm/woQgKpBispSx
+XxK5bWUVpfnWsZnjLWhtDuPcu1BcBlM2g7gwL/aw8nUSIK3D8Ja9rTUQQXc3zxnk
+cl8+8znNXHMGByRjPUH87C+TOrqjggGaMIIBljAdBgNVHQ4EFgQU5m711OqFDNGR
+SWMOSzTXjpTLIFUwbwYDVR0jBGgwZoAUJuolDwsyICik11oKjf8t3L1/VGWhQ6RB
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjER
+MA8GA1UECgwIQm9ndXMgQ0GCCQCls1QoG7BuRjAPBgNVHRMBAf8EBTADAQH/MAsG
+A1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5u
+b3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1UdIAQOMAwwCgYIKwYB
+BQUHDQIwCgYDVR02BAMCAQIwfwYDVR0JBHgwdjBJBgNVBDcxQjBABgsqhkiG9w0B
+CRAHAwMCBeAxLTArgAsqhkiG9w0BCRAHBIEcMBoMGEhVTUFOIFJFU09VUkNFUyBV
+U0UgT05MWTApBglghkgBZQIBBUQxHAwaSHVtYW4gUmVzb3VyY2VzIERlcGFydG1l
+bnQwCgYIKoZIzj0EAwMDaAAwZQIwVh/RypULFgPpAN0I7OvuMomRWnm/Hea3Hk8P
+tTRz2Zai8iYat7oeAmGVgMhSXy2jAjEAuJW4l/CFatBy4W/lZ7gS3weBdBa5WEDI
+FFMC7GjGtCeLtXYqWfBnRdK26dOaHLB2
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ clearance_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5755.id_at_clearance:
+ self.assertIn(attr['type'], rfc5280.certificateAttributesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5280.certificateAttributesMap[attr['type']])
+
+ self.assertEqual(rfc3114.id_tsp_TEST_Whirlpool, av['policyId'])
+
+ for cat in av['securityCategories']:
+ self.assertEqual(
+ rfc3114.id_tsp_TEST_Whirlpool_Categories, cat['type'])
+ self.assertIn(
+ cat['type'], rfc5755.securityCategoryMap)
+ catv, rest = der_decoder(
+ cat['value'],
+ asn1Spec=rfc5755.securityCategoryMap[cat['type']])
+
+ self.assertIn('USE ONLY', catv[0])
+
+ clearance_found = True
+
+ self.assertTrue(clearance_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ clearance_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5755.id_at_clearance:
+ spid = rfc3114.id_tsp_TEST_Whirlpool
+ catid = rfc3114.id_tsp_TEST_Whirlpool_Categories
+
+ self.assertEqual(spid, attr['values'][0]['policyId'])
+
+ for cat in attr['values'][0]['securityCategories']:
+ self.assertEqual(catid, cat['type'])
+ self.assertIn( u'USE ONLY', cat['value'][0])
+
+ clearance_found = True
+
+ self.assertTrue(clearance_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5913.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5913.py
new file mode 100644
index 0000000000..ef5908662f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5913.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5913
+from pyasn1_modules import rfc5755
+from pyasn1_modules import rfc3114
+
+
+class ClearanceTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIDhzCCAw6gAwIBAgIJAKWzVCgbsG5GMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMTAyMTg0MjE4WhcNMjAxMTAxMTg0MjE4WjBmMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxDDAKBgNVBAsTA1BDQTEYMBYGA1UEAxMPcGNhLmV4YW1wbGUuY29tMHYw
+EAYHKoZIzj0CAQYFK4EEACIDYgAEPf5vbgAqbE5dn6wbiCx4sCCcn1BKSrHmCfiW
+C9QLSGVNGHifQwPt9odGXjRiQ7QwpZ2wRD6Z91v+fk85XXLE3kJQCQdPIHFUY5EM
+pvS7T6u6xrmwnlVpUURPTOxfc55Oo4IBrTCCAakwHQYDVR0OBBYEFCbqJQ8LMiAo
+pNdaCo3/Ldy9f1RlMG8GA1UdIwRoMGaAFPI12zQE2qVV8r1pA5mwYuziFQjBoUOk
+QTA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24x
+ETAPBgNVBAoMCEJvZ3VzIENBggkA6JHWBpFPzvIwDwYDVR0TAQH/BAUwAwEB/zAL
+BgNVHQ8EBAMCAYYwQgYJYIZIAYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fu
+bm90IGJlIHRydXN0ZWQgZm9yIGFueSBwdXJwb3NlLjAVBgNVHSAEDjAMMAoGCCsG
+AQUFBw0CMAoGA1UdNgQDAgECMIGRBggrBgEFBQcBFQSBhDCBgTBZBgsqhkiG9w0B
+CRAHAwMCBeAxRjBEgAsqhkiG9w0BCRAHBIE1MDMMF0xBVyBERVBBUlRNRU5UIFVT
+RSBPTkxZDBhIVU1BTiBSRVNPVVJDRVMgVVNFIE9OTFkwEQYLKoZIhvcNAQkQBwID
+AgTwMBEGCyqGSIb3DQEJEAcBAwIF4DAKBggqhkjOPQQDAwNnADBkAjAZSD+BVqzc
+1l0fDoH3LwixjxvtddBHbJsM5yBek4U9b2yWL2KEmwV02fTgof3AjDECMCTsksmx
+5f3i5DSYfe9Q1heJlEJLd1hgZmfvUYNnCU3WrdmYzyoNdNTbg7ZFMoxsXw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cat_value_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5913.id_pe_clearanceConstraints:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for c in ev:
+ if c['policyId'] == rfc3114.id_tsp_TEST_Whirlpool:
+ for sc in c['securityCategories']:
+ self.assertIn(sc['type'], rfc5755.securityCategoryMap)
+
+ scv, rest = der_decoder(
+ sc['value'],
+ asn1Spec=rfc5755.securityCategoryMap[sc['type']])
+
+ for cat in scv:
+ self.assertIn('USE ONLY', cat)
+ cat_value_found = True
+
+ self.assertTrue(cat_value_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cat_value_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5913.id_pe_clearanceConstraints:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for c in ev:
+ if c['policyId'] == rfc3114.id_tsp_TEST_Whirlpool:
+ for sc in c['securityCategories']:
+ self.assertIn(sc['type'], rfc5755.securityCategoryMap)
+ for cat in sc['value']:
+ self.assertIn('USE ONLY', cat)
+ cat_value_found = True
+
+ self.assertTrue(cat_value_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5914.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5914.py
new file mode 100644
index 0000000000..3a70ec8d83
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5914.py
@@ -0,0 +1,79 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5914
+from pyasn1_modules import rfc5652
+
+
+class TrustAnchorListTestCase(unittest.TestCase):
+ trust_anchor_list_pem_text = """\
+MIIGGQYLKoZIhvcNAQkQASKgggYIMIIGBKGCAvYwggLyoAMCAQICAgDJMA0GCSqG
+SIb3DQEBCwUAMBYxFDASBgNVBAMTC3JpcGUtbmNjLXRhMCAXDTE3MTEyODE0Mzk1
+NVoYDzIxMTcxMTI4MTQzOTU1WjAWMRQwEgYDVQQDEwtyaXBlLW5jYy10YTCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANFEWEhqlM9psgbDs3ltY0OjbMTb
+5SzMoVpJ755fDYgQrP0/0tl7jSkDWfsAWcSIDz1dqRQRXkAL6B/1ivNx8ANuldrI
+sJvzGNpymfjpcPsJac5WdadyKY9njXCq5orfAcAQvMSJs7ghmldI5EQdBmdIaB+j
+JdN7pi6a0bJ+r9MTj9PpekHNWRzBVRW9/OSEOxUEE3FSMa3XjLKMiavXjJBOg6HJ
+R4RfzZUpZV7mwEkPSlFqidPjrd0Al6+C1xAjH5KZFUdk2U/r+b+ufGx1bOmcUQ9W
++lJNbkCgMh1G5/7V7z/Ja4wImxs1bFw09i9MeBHcfkHYsT4Do4t4ATMi9lcCAwEA
+AaOCAV4wggFaMB0GA1UdDgQWBBToVSsf1tGk9+QExtjlaA0evBY/wzAPBgNVHRMB
+Af8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjCBsQYIKwYBBQUHAQsEgaQwgaEwPAYI
+KwYBBQUHMAqGMHJzeW5jOi8vcnBraS5yaXBlLm5ldC9yZXBvc2l0b3J5L3JpcGUt
+bmNjLXRhLm1mdDAyBggrBgEFBQcwDYYmaHR0cHM6Ly9ycmRwLnJpcGUubmV0L25v
+dGlmaWNhdGlvbi54bWwwLQYIKwYBBQUHMAWGIXJzeW5jOi8vcnBraS5yaXBlLm5l
+dC9yZXBvc2l0b3J5LzAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMCcGCCsGAQUF
+BwEHAQH/BBgwFjAJBAIAATADAwEAMAkEAgACMAMDAQAwIQYIKwYBBQUHAQgBAf8E
+EjAQoA4wDDAKAgEAAgUA/////zCCAgIwggGIoAMCAQICCQDokdYGkU/O8jAKBggq
+hkjOPQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hl
+cm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4XDTE5MDUxNDA4NTgxMVoXDTIxMDUx
+MzA4NTgxMVowPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdI
+ZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTB2MBAGByqGSM49AgEGBSuBBAAiA2IA
+BPBRdlSx6I5qpZ2sKUMIxun1gUAzzstOYWKvKCnMoNT1x+pIKDvMEMimFcLAxxL3
+NVYOhK0Jty83SPDkKWMdx9/Okdhf3U/zxJlEnXDiFrAeM6xbG8zcCRiBnmd92Uvs
+RqNQME4wHQYDVR0OBBYEFPI12zQE2qVV8r1pA5mwYuziFQjBMB8GA1UdIwQYMBaA
+FPI12zQE2qVV8r1pA5mwYuziFQjBMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwMD
+aAAwZQIwWlGNjb9NyqJSzUSdsEqDSvMZb8yFkxYCIbAVqQ9UqScUUb9tpJKGsPWw
+bZsnLVvmAjEAt/ypozbUhQw4dSPpWzrn5BQ0kKbDM3DQJcBABEUBoIOol1/jYQPm
+xajQuxcheFlkooIBADCB/TB2MBAGByqGSM49AgEGBSuBBAAiA2IABOIIQup32CTe
+oCxkpBPOQJwjcqkCCg43PyE2uI1TFPbVkZVL85YCjXEexNjLp59e76Dmf1qSEZZT
+b+vAyz+u/Vs/RyTnmgculr6oL7tXGK9xpL14Oh7oWzxrZBErzDQrjAQUo53mH/na
+OU/AbuiRy5Wl2jHiCp8MFURpZ2lDZXJ0IFRydXN0IEFuY2hvcjBSMEwxCzAJBgNV
+BAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxJjAkBgNVBAMTHURpZ2lDZXJ0
+IEVDQyBTZWN1cmUgU2VydmVyIENBggIFIIICZW4=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.trust_anchor_list_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5914.id_ct_trustAnchorList, asn1Object['contentType'])
+
+ tal, rest = der_decoder(asn1Object['content'], rfc5914.TrustAnchorList())
+
+ self.assertFalse(rest)
+ self.assertTrue(tal.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(tal))
+ self.assertEqual(3, sum(1 for _ in tal))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5915.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5915.py
new file mode 100644
index 0000000000..6e54e5a4b1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5915.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5915
+from pyasn1_modules import rfc5480
+
+
+class MUDCertTestCase(unittest.TestCase):
+ private_key_pem_text = """\
+MIGkAgEBBDDLjzGbbLrR3T13lrrVum7WC/4Ua4Femc1RhhNVe1Q5XsArQ33kn9kx
+3lOUfOcG+qagBwYFK4EEACKhZANiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6z
+XCYlmsEGD5vPu5hl9hDEjd1UHRgJIPoy3fJcWWeZ8FHCirICtuMgFisNscG/aTwK
+yDYOFDuqz/C2jyEwqgWCRyxyohuJXtk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5915.ECPrivateKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.private_key_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc5480.secp384r1, asn1Object['parameters']['namedCurve'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5916.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5916.py
new file mode 100644
index 0000000000..a653b8c96d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5916.py
@@ -0,0 +1,107 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5916
+
+
+class DeviceCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICpzCCAiygAwIBAgIJAKWzVCgbsG5FMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDMxMTQwMDE1WhcNMjAxMDMwMTQwMDE1WjB4MQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxGjAYBgNVBAsTEURldmljZSBPcGVyYXRpb25zMRwwGgYDVQQDExNleDEy
+MzQ1LmV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE7Lje3glS2qYl
+5x6N9TOlD4CbnzfFeJQfbDaCa3vexEiwE0apuAP+4L5fqOsYeZC970iNW+z3PdUs
+GzkKDC2cCVy8nIxQ3mWhNQDvavT3iz5OGSwa1GjSXRFbGn2x9QjNo4G6MIG3MEIG
+CWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVk
+IGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFPTQN1kXEM5Rd4hNvQL5HyA+o2No
+MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAsGA1UdDwQEAwIHgDAk
+BgNVHQkEHTAbMBkGCWCGSAFlAgEFRTEMBgorBgEEAYGsYDAYMAoGCCqGSM49BAMD
+A2kAMGYCMQCt6AceOEIwXFKFHIV8+wTK/vgs7ZYSA6jhXUpzNtzZw1xh9NxVUhmx
+pogu5Q9Vp28CMQC5YVF8dShC1tk9YImRftiVl8C6pbj//1K/+MwmR6nRk/WU+hKl
++Qsc5Goi6At471s=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_dev_owner = False
+ der_dev_own_oid = der_encoder(univ.ObjectIdentifier('1.3.6.1.4.1.22112.48.24'))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5916.id_deviceOwner:
+ self.assertEqual(der_dev_own_oid, attr['values'][0])
+ found_dev_owner = True
+
+ self.assertTrue(found_dev_owner)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_dev_owner = False
+ dev_own_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48.24')
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5916.id_deviceOwner:
+ self.assertEqual(dev_own_oid, attr['values'][0])
+ found_dev_owner = True
+
+ self.assertTrue(found_dev_owner)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5917.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5917.py
new file mode 100644
index 0000000000..1023fb86a7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5917.py
@@ -0,0 +1,119 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5917
+
+
+class ClearanceSponsorTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIID1DCCA1qgAwIBAgIUUc1IQGJpeYQ0XwOS2ZmVEb3aeZ0wCgYIKoZIzj0EAwMw
+ZjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAw
+DgYDVQQKEwdFeGFtcGxlMQwwCgYDVQQLEwNQQ0ExGDAWBgNVBAMTD3BjYS5leGFt
+cGxlLmNvbTAeFw0xOTExMDUyMjIwNDZaFw0yMDExMDQyMjIwNDZaMIGSMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoT
+B0V4YW1wbGUxIjAgBgNVBAsTGUh1bWFuIFJlc291cmNlIERlcGFydG1lbnQxDTAL
+BgNVBAMTBEZyZWQxHzAdBgkqhkiG9w0BCQEWEGZyZWRAZXhhbXBsZS5jb20wdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAAQObFslQ2EBP0xlDJ3sRnsNaqm/woQgKpBispSx
+XxK5bWUVpfnWsZnjLWhtDuPcu1BcBlM2g7gwL/aw8nUSIK3D8Ja9rTUQQXc3zxnk
+cl8+8znNXHMGByRjPUH87C+TOrqjggGaMIIBljAdBgNVHQ4EFgQU5m711OqFDNGR
+SWMOSzTXjpTLIFUwbwYDVR0jBGgwZoAUJuolDwsyICik11oKjf8t3L1/VGWhQ6RB
+MD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjER
+MA8GA1UECgwIQm9ndXMgQ0GCCQCls1QoG7BuRjAPBgNVHRMBAf8EBTADAQH/MAsG
+A1UdDwQEAwIBhjBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5u
+b3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMBUGA1UdIAQOMAwwCgYIKwYB
+BQUHDQIwCgYDVR02BAMCAQIwfwYDVR0JBHgwdjBJBgNVBDcxQjBABgsqhkiG9w0B
+CRAHAwMCBeAxLTArgAsqhkiG9w0BCRAHBIEcMBoMGEhVTUFOIFJFU09VUkNFUyBV
+U0UgT05MWTApBglghkgBZQIBBUQxHAwaSHVtYW4gUmVzb3VyY2VzIERlcGFydG1l
+bnQwCgYIKoZIzj0EAwMDaAAwZQIwVh/RypULFgPpAN0I7OvuMomRWnm/Hea3Hk8P
+tTRz2Zai8iYat7oeAmGVgMhSXy2jAjEAuJW4l/CFatBy4W/lZ7gS3weBdBa5WEDI
+FFMC7GjGtCeLtXYqWfBnRdK26dOaHLB2
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cs = rfc5917.DirectoryString()
+ cs['utf8String'] = u'Human Resources Department'
+ encoded_cs = der_encoder(cs)
+
+ clearance_sponsor_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5917.id_clearanceSponsor:
+ self.assertEqual(encoded_cs, attr['values'][0])
+ clearance_sponsor_found = True
+
+ self.assertTrue(clearance_sponsor_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ clearance_sponsor_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectDirectoryAttributes:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ for attr in ev:
+ if attr['type'] == rfc5917.id_clearanceSponsor:
+ hrd = u'Human Resources Department'
+
+ self.assertEqual(hrd, attr['values'][0]['utf8String'])
+
+ clearance_sponsor_found = True
+
+ self.assertTrue(clearance_sponsor_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5924.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5924.py
new file mode 100644
index 0000000000..f1ae64ac17
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5924.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5924
+
+
+class SIPDomainCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICiTCCAg+gAwIBAgIJAKWzVCgbsG5EMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDMwMjEwMDM0WhcNMjAxMDI5MjEwMDM0WjBsMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxEjAQBgNVBAsTCVNJUCBQcm94eTEYMBYGA1UEAxMPc2lwLmV4YW1wbGUu
+Y29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEcY3ckttSa6z3CfOFwZvPmZY8C9Ml
+D1XOydz00+Vqifh1lydhDuulHrJaQ+QgVjG1TzlTAssD9GeABit/M98DPS/IC3wi
+TsTMSyQ9/Oz4hKAw7x7lYEvufvycsZ7pJGRso4GpMIGmMEIGCWCGSAGG+EIBDQQ1
+FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVy
+cG9zZS4wHQYDVR0OBBYEFEcJ8iFWmJOl3Hg/44UFgFWNbe7FMB8GA1UdIwQYMBaA
+FPI12zQE2qVV8r1pA5mwYuziFQjBMAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggr
+BgEFBQcDFDAKBggqhkjOPQQDAwNoADBlAjAXEPPNyXBUj40dzy+ZOqafuM3/6Fy6
+bkgiIObcQImra96X10fe6qacanrbu4uU6d8CMQCQ+BCjCnOP4dBbNC3vB0WypxLo
+UwZ6TjS0Rfr+dRvlyilVjP+hPVwbyb7ZOSZR6zk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ found_kp_sipDomain = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(
+ extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+ self.assertIn(rfc5924.id_kp_sipDomain, ev)
+
+ found_kp_sipDomain = True
+
+ self.assertTrue(found_kp_sipDomain)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5934.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5934.py
new file mode 100644
index 0000000000..ba18b560f7
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5934.py
@@ -0,0 +1,299 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Acknowledgement to Carl Wallace for the test messages.
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5934
+
+
+class TAMPStatusResponseTestCase(unittest.TestCase):
+ tsr_pem_text = """\
+MIIU/QYJKoZIhvcNAQcCoIIU7jCCFOoCAQMxDTALBglghkgBZQMEAgEwgg/GBgpghkgBZQIB
+Ak0CoIIPtgSCD7Iwgg+uMAiDAAIEXXp3f6GCD50wgg+ZooIFFTCCBREwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDALMH2jTus/z881nG+uHQiB+xwQRX8q0DjB6rBw9if/tpM
+Or8/yNgoe0s2AcCsRSXD0g4Kj4UYZBA9GhNwKm+O19yNk7NBDzghza2rwj0qBdNXETcNzYxR
++ZPjzEZJIY4UtM3LFD44zXIx7qsS8mXqNC5WXf/uY3XLbbqRNPye8/QtHL5QxELfWYj/arP6
+qGw9y1ZxcQWWu5+A5YBFWWdBsOvDrWCkgHUGF5wO9EPgmQ4b+3/1s8yygYKx/TLBuL5BpGS1
+YDpaUTCMzt5BLBlHXEkQZLl0qYdBr31uusG4ob9lMToEZ/m1u46SigBjuLHmjDhfg/9Q1Tui
+XWuyEMxjAgMBAAEEFEl0uwxeunr+AlTve6DGlcYJgHCWMIID0TBbMQswCQYDVQQGEwJVUzEY
+MBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEW
+MBQGA1UEAxMNRG9EIFJvb3QgQ0EgMqCCA3AwggJYoAMCAQICAQUwDQYJKoZIhvcNAQEFBQAw
+WzELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDEMMAoGA1UECxMDRG9E
+MQwwCgYDVQQLEwNQS0kxFjAUBgNVBAMTDURvRCBSb290IENBIDIwHhcNMDQxMjEzMTUwMDEw
+WhcNMjkxMjA1MTUwMDEwWjBbMQswCQYDVQQGEwJVUzEYMBYGA1UEChMPVS5TLiBHb3Zlcm5t
+ZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEWMBQGA1UEAxMNRG9EIFJvb3QgQ0Eg
+MjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMAswfaNO6z/PzzWcb64dCIH7HBB
+FfyrQOMHqsHD2J/+2kw6vz/I2Ch7SzYBwKxFJcPSDgqPhRhkED0aE3Aqb47X3I2Ts0EPOCHN
+ravCPSoF01cRNw3NjFH5k+PMRkkhjhS0zcsUPjjNcjHuqxLyZeo0LlZd/+5jdcttupE0/J7z
+9C0cvlDEQt9ZiP9qs/qobD3LVnFxBZa7n4DlgEVZZ0Gw68OtYKSAdQYXnA70Q+CZDhv7f/Wz
+zLKBgrH9MsG4vkGkZLVgOlpRMIzO3kEsGUdcSRBkuXSph0GvfW66wbihv2UxOgRn+bW7jpKK
+AGO4seaMOF+D/1DVO6Jda7IQzGMCAwEAAaM/MD0wHQYDVR0OBBYEFEl0uwxeunr+AlTve6DG
+lcYJgHCWMAsGA1UdDwQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IB
+AQCYkY0/ici79cBpcyk7Nay6swh2PXAJkumERCEBfRR2G+5RbB2NFTctezFp9JpEuK9GzDT6
+I8sDJxnSgyF1K+fgG5km3IRAleio0sz2WFxm7z9KlxCCHboKot1bBiudp2RO6y4BNaS0PxOt
+VeTVc6hpmxHxmPIxHm9A1Ph4n46RoG9wBJBmqgYrzuF6krV94eDRluehOi3MsZ0fBUTth5nT
+TRpwOcEEDOV+2fGv1yAO8SJ6JaRzmcw/pAcnlqiile2CuRbTnguHwsHyiPVi32jfx7xpUe2x
+XNxUVCkPCTmarAPB2wxNrm8KehZJ8b+R0jiU0/aVLLdsyUK2jcqQjYXZooIFGDCCBRQwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCp7BRyiuhLcKPaEAOEpvunNg0qOlIWvzAV
+UoYFRyDPqqbNdcRkbu/xYCPLCmZArrTIaCoAUWhJN+lZMk2VvEMn6UCNOhDOFLxDGKH53szn
+hXZzXhgaI1u9Px/y7Y0ZzAPRQKSPpyACTCdaeTb2ozchjgBaBhbK01WWbzEpu3IOy+JIUfLU
+N6Q11m/uF7OxBqsLGYboI20xGyh4ZcXeYlK8wX3r7qBdVAT7sssrsiNUkYJM8L+6dEA7DARF
+gGdcxeuiV8MafwotvX+53MGZsMgH5AyGNpQ6JS/yfeaXPBuUtJdZBsk65AvZ6un8O3M0b/3n
+mOTzocKQXxz1Py7XGdN/AgMBAAEEFGyKlKJ3sYByHYF6Fqry3M5m7kXAMIID1DBbMQswCQYD
+VQQGEwJVUzEYMBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNV
+BAsTA1BLSTEWMBQGA1UEAxMNRG9EIFJvb3QgQ0EgM6CCA3MwggJboAMCAQICAQEwDQYJKoZI
+hvcNAQELBQAwWzELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDEMMAoG
+A1UECxMDRG9EMQwwCgYDVQQLEwNQS0kxFjAUBgNVBAMTDURvRCBSb290IENBIDMwHhcNMTIw
+MzIwMTg0NjQxWhcNMjkxMjMwMTg0NjQxWjBbMQswCQYDVQQGEwJVUzEYMBYGA1UEChMPVS5T
+LiBHb3Zlcm5tZW50MQwwCgYDVQQLEwNEb0QxDDAKBgNVBAsTA1BLSTEWMBQGA1UEAxMNRG9E
+IFJvb3QgQ0EgMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKnsFHKK6Etwo9oQ
+A4Sm+6c2DSo6Uha/MBVShgVHIM+qps11xGRu7/FgI8sKZkCutMhoKgBRaEk36VkyTZW8Qyfp
+QI06EM4UvEMYofnezOeFdnNeGBojW70/H/LtjRnMA9FApI+nIAJMJ1p5NvajNyGOAFoGFsrT
+VZZvMSm7cg7L4khR8tQ3pDXWb+4Xs7EGqwsZhugjbTEbKHhlxd5iUrzBfevuoF1UBPuyyyuy
+I1SRgkzwv7p0QDsMBEWAZ1zF66JXwxp/Ci29f7ncwZmwyAfkDIY2lDolL/J95pc8G5S0l1kG
+yTrkC9nq6fw7czRv/eeY5POhwpBfHPU/LtcZ038CAwEAAaNCMEAwHQYDVR0OBBYEFGyKlKJ3
+sYByHYF6Fqry3M5m7kXAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+SIb3DQEBCwUAA4IBAQCfcaTAtpbSgEOgSOkfdgT5xTytZhhYY5vDtuhoioVaQmYStNLmi4h/
+h/SY9ajGCckf8Cwf7IK49KVHOMEzK99Mfpq+Cwuxyw98UCgQz4qNoum6rIbX1LGTXyKPlgW0
+Tgx1kX3T8ueUwpQUdk+PDKsQh1gyhQd1hhILXupTtArITISSH+voQYY8uvROQUrRbFhHQcOG
+WvLu6fKYJ4LqLjbW+AZegvGgUpNECbrSqRlaWKOoXSBtT2T4MIcbkBNIgc3KkMcNwdSYP47y
+DldoMxKOmQmx8OT2EPQ28km96qM4yFZBI4Oa36EbNXzrP0Gz9W9LOl6ub5N2mNLxmZ1FxI5y
+ooIFYDCCBVwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ3HcYEBAYYEH753gQ
+D/iEd3DvLW5VOxGmmVI/bfS9oZf6Nh5uREIRyFP+dYabXjcSiKJ92XEI1Ek1cc5Gz1vQWY5l
+H+tCPcoO3EyQ2FRpz144siBg3YNRLt/b1Vs4kVotz5oztG+WkOV2FGJDaYQQz1RB+TXqntRa
+l51eEFm94OTDWYnX3vJ5sIdrAsBZoSoAghVvaxERAFM0dD304cxWYqLkZegjsYMdWFMIsjMt
+lr7lfTOeEFonc1PdXZjiSxFTWJGP6nIR7LuU8g0PUK3yFrUaACQx5RW9FwaQqiSxrN0MUh7w
+i2qruPft32O0zpRov16W0ESW8fj0ejoKeRVTAgMBAAEEFKg8CZ1n9thHuqLQ/BhyVohAbZWV
+MIID0jBTMQswCQYDVQQGEwJVUzEfMB0GA1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEj
+MCEGA1UEAxMaVmFsaWQgRUUgQ2VydGlmaWNhdGUgVGVzdDGgggN5MIICYaADAgECAgEBMA0G
+CSqGSIb3DQEBCwUAMEAxCzAJBgNVBAYTAlVTMR8wHQYDVQQKExZUZXN0IENlcnRpZmljYXRl
+cyAyMDExMRAwDgYDVQQDEwdHb29kIENBMB4XDTEwMDEwMTA4MzAwMFoXDTMwMTIzMTA4MzAw
+MFowUzELMAkGA1UEBhMCVVMxHzAdBgNVBAoTFlRlc3QgQ2VydGlmaWNhdGVzIDIwMTExIzAh
+BgNVBAMTGlZhbGlkIEVFIENlcnRpZmljYXRlIFRlc3QxMIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEA2dx3GBAQGGBB++d4EA/4hHdw7y1uVTsRpplSP230vaGX+jYebkRCEchT
+/nWGm143EoiifdlxCNRJNXHORs9b0FmOZR/rQj3KDtxMkNhUac9eOLIgYN2DUS7f29VbOJFa
+Lc+aM7RvlpDldhRiQ2mEEM9UQfk16p7UWpedXhBZveDkw1mJ197yebCHawLAWaEqAIIVb2sR
+EQBTNHQ99OHMVmKi5GXoI7GDHVhTCLIzLZa+5X0znhBaJ3NT3V2Y4ksRU1iRj+pyEey7lPIN
+D1Ct8ha1GgAkMeUVvRcGkKoksazdDFIe8Itqq7j37d9jtM6UaL9eltBElvH49Ho6CnkVUwID
+AQABo2swaTAfBgNVHSMEGDAWgBRYAYQkG7wrUpRKPaUQchRR9a86yTAdBgNVHQ4EFgQUqDwJ
+nWf22Ee6otD8GHJWiEBtlZUwDgYDVR0PAQH/BAQDAgTwMBcGA1UdIAQQMA4wDAYKYIZIAWUD
+AgEwATANBgkqhkiG9w0BAQsFAAOCAQEAHlrZD69ipblSvLzsDGGIEwGqCg8NR6OeqbIXG/ij
+2SzSjTi+O7LP1DGIz85p9I7HuXAFUcAGh8aVtPZq+jGeLcQXs+3lehlhGG6M0eQO2pttbI0G
+kO4s0XlY2ITNm0HTGOL+kcZfACcUZXsS+i+9qL80ji3PF0xYWzAPLmlmRSYmIZjT85CuKYda
+Tsa96Ch+D6CU5v9ctVxP3YphWQ4F0v/FacDTiUrRwuXI9MgIw/0qI0+EAFwsRC2DisI9Isc8
+YPKKeOMbRmXamY/4Y8HUeqBwpnqnEJudrH++FPBEI4dYrBAV6POgvx4lyzarAmlarv/AbrBD
+ngieGTynMG6NwqFIMEYwRAYIKwYBBQUHARIBAf8ENTAzMA8GCmCGSAFlAgECTQMKAQEwDwYK
+YIZIAWUCAQJNAQoBATAPBgpghkgBZQIBAk0CCgEBAQEAoIIDfTCCA3kwggJhoAMCAQICAQEw
+DQYJKoZIhvcNAQELBQAwQDELMAkGA1UEBhMCVVMxHzAdBgNVBAoTFlRlc3QgQ2VydGlmaWNh
+dGVzIDIwMTExEDAOBgNVBAMTB0dvb2QgQ0EwHhcNMTAwMTAxMDgzMDAwWhcNMzAxMjMxMDgz
+MDAwWjBTMQswCQYDVQQGEwJVUzEfMB0GA1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEj
+MCEGA1UEAxMaVmFsaWQgRUUgQ2VydGlmaWNhdGUgVGVzdDEwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDZ3HcYEBAYYEH753gQD/iEd3DvLW5VOxGmmVI/bfS9oZf6Nh5uREIR
+yFP+dYabXjcSiKJ92XEI1Ek1cc5Gz1vQWY5lH+tCPcoO3EyQ2FRpz144siBg3YNRLt/b1Vs4
+kVotz5oztG+WkOV2FGJDaYQQz1RB+TXqntRal51eEFm94OTDWYnX3vJ5sIdrAsBZoSoAghVv
+axERAFM0dD304cxWYqLkZegjsYMdWFMIsjMtlr7lfTOeEFonc1PdXZjiSxFTWJGP6nIR7LuU
+8g0PUK3yFrUaACQx5RW9FwaQqiSxrN0MUh7wi2qruPft32O0zpRov16W0ESW8fj0ejoKeRVT
+AgMBAAGjazBpMB8GA1UdIwQYMBaAFFgBhCQbvCtSlEo9pRByFFH1rzrJMB0GA1UdDgQWBBSo
+PAmdZ/bYR7qi0PwYclaIQG2VlTAOBgNVHQ8BAf8EBAMCBPAwFwYDVR0gBBAwDjAMBgpghkgB
+ZQMCATABMA0GCSqGSIb3DQEBCwUAA4IBAQAeWtkPr2KluVK8vOwMYYgTAaoKDw1Ho56pshcb
++KPZLNKNOL47ss/UMYjPzmn0jse5cAVRwAaHxpW09mr6MZ4txBez7eV6GWEYbozR5A7am21s
+jQaQ7izReVjYhM2bQdMY4v6Rxl8AJxRlexL6L72ovzSOLc8XTFhbMA8uaWZFJiYhmNPzkK4p
+h1pOxr3oKH4PoJTm/1y1XE/dimFZDgXS/8VpwNOJStHC5cj0yAjD/SojT4QAXCxELYOKwj0i
+xzxg8op44xtGZdqZj/hjwdR6oHCmeqcQm52sf74U8EQjh1isEBXo86C/HiXLNqsCaVqu/8Bu
+sEOeCJ4ZPKcwbo3CMYIBiTCCAYUCAQOAFKg8CZ1n9thHuqLQ/BhyVohAbZWVMAsGCWCGSAFl
+AwQCAaBMMBkGCSqGSIb3DQEJAzEMBgpghkgBZQIBAk0CMC8GCSqGSIb3DQEJBDEiBCAiPyBP
+FFwHJbHgGmoz+54OEJ/ppMyfSoZmbS/nkWfxxjALBgkqhkiG9w0BAQsEggEAHllTg+TMT2ll
+zVvrvRDwOwrzr6YIJSt96sLANqOXiqqnvrHDDWTdVMcRX/LccVbm9JP4sGSfGDdwbm3FqB+l
+kgSBlejFgjWfF/YVK5OpaVcPGg4DB3oAOwxtn0GVQtKgGkiGQF0r5389mTHYlQzS6BVDG2Oi
+sKIe4SBazrBGjnKANf9LEunpWPt15y6QCxiEKnJfPlAqiMuiIhHmXPIHi+d3sYkC+iu+5I68
+2oeLdtBWCDcGh4+DdS6Qqzkpp14MpvzBMdfD3lKcI3NRmY+GmRYaGAiEalh83vggslF7N4SS
+iPxQyqz7LIQe9/5ynJV5/CPUDBL9QK2vSCOQaihWCg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tsr_pem_text)
+
+ layers = {
+ rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
+ rfc5652.id_signedData: rfc5652.SignedData(),
+ rfc5934.id_ct_TAMP_statusResponse: rfc5934.TAMPStatusResponse()
+ }
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5934.id_ct_TAMP_statusResponse: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5934.id_ct_TAMP_statusResponse: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.tsr_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+
+ self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5934.id_ct_TAMP_statusResponse, eci['eContentType'])
+
+ tsr, rest = der_decoder(
+ eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(tsr.prettyPrint())
+ self.assertEqual(eci['eContent'], der_encoder(tsr))
+ self.assertEqual(2, tsr['version'])
+ self.assertEqual(univ.Null(""), tsr['query']['target'])
+ self.assertEqual(1568307071, tsr['query']['seqNum'])
+ self.assertFalse(tsr['usesApex'])
+
+ count = 0
+
+ for tai in tsr['response']['verboseResponse']['taInfo']:
+ count += 1
+ self.assertEqual(1, tai['taInfo']['version'])
+
+ self.assertEqual(3, count)
+
+
+class TrustAnchorUpdateTestCase(unittest.TestCase):
+ tau_pem_text = """\
+MIIGgwYJKoZIhvcNAQcCoIIGdDCCBnACAQMxDTALBglghkgBZQMEAgEwggFMBgpghkgBZQIB
+Ak0DoIIBPASCATgwggE0MAiDAAIEXXp3kDCCASaiggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDALMH2jTus/z881nG+uHQiB+xwQRX8q0DjB6rBw9if/tpMOr8/yNgoe0s2AcCs
+RSXD0g4Kj4UYZBA9GhNwKm+O19yNk7NBDzghza2rwj0qBdNXETcNzYxR+ZPjzEZJIY4UtM3L
+FD44zXIx7qsS8mXqNC5WXf/uY3XLbbqRNPye8/QtHL5QxELfWYj/arP6qGw9y1ZxcQWWu5+A
+5YBFWWdBsOvDrWCkgHUGF5wO9EPgmQ4b+3/1s8yygYKx/TLBuL5BpGS1YDpaUTCMzt5BLBlH
+XEkQZLl0qYdBr31uusG4ob9lMToEZ/m1u46SigBjuLHmjDhfg/9Q1TuiXWuyEMxjAgMBAAGg
+ggN9MIIDeTCCAmGgAwIBAgIBATANBgkqhkiG9w0BAQsFADBAMQswCQYDVQQGEwJVUzEfMB0G
+A1UEChMWVGVzdCBDZXJ0aWZpY2F0ZXMgMjAxMTEQMA4GA1UEAxMHR29vZCBDQTAeFw0xMDAx
+MDEwODMwMDBaFw0zMDEyMzEwODMwMDBaMFMxCzAJBgNVBAYTAlVTMR8wHQYDVQQKExZUZXN0
+IENlcnRpZmljYXRlcyAyMDExMSMwIQYDVQQDExpWYWxpZCBFRSBDZXJ0aWZpY2F0ZSBUZXN0
+MTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANncdxgQEBhgQfvneBAP+IR3cO8t
+blU7EaaZUj9t9L2hl/o2Hm5EQhHIU/51hpteNxKIon3ZcQjUSTVxzkbPW9BZjmUf60I9yg7c
+TJDYVGnPXjiyIGDdg1Eu39vVWziRWi3PmjO0b5aQ5XYUYkNphBDPVEH5Neqe1FqXnV4QWb3g
+5MNZidfe8nmwh2sCwFmhKgCCFW9rEREAUzR0PfThzFZiouRl6COxgx1YUwiyMy2WvuV9M54Q
+WidzU91dmOJLEVNYkY/qchHsu5TyDQ9QrfIWtRoAJDHlFb0XBpCqJLGs3QxSHvCLaqu49+3f
+Y7TOlGi/XpbQRJbx+PR6Ogp5FVMCAwEAAaNrMGkwHwYDVR0jBBgwFoAUWAGEJBu8K1KUSj2l
+EHIUUfWvOskwHQYDVR0OBBYEFKg8CZ1n9thHuqLQ/BhyVohAbZWVMA4GA1UdDwEB/wQEAwIE
+8DAXBgNVHSAEEDAOMAwGCmCGSAFlAwIBMAEwDQYJKoZIhvcNAQELBQADggEBAB5a2Q+vYqW5
+Ury87AxhiBMBqgoPDUejnqmyFxv4o9ks0o04vjuyz9QxiM/OafSOx7lwBVHABofGlbT2avox
+ni3EF7Pt5XoZYRhujNHkDtqbbWyNBpDuLNF5WNiEzZtB0xji/pHGXwAnFGV7Evovvai/NI4t
+zxdMWFswDy5pZkUmJiGY0/OQrimHWk7Gvegofg+glOb/XLVcT92KYVkOBdL/xWnA04lK0cLl
+yPTICMP9KiNPhABcLEQtg4rCPSLHPGDyinjjG0Zl2pmP+GPB1HqgcKZ6pxCbnax/vhTwRCOH
+WKwQFejzoL8eJcs2qwJpWq7/wG6wQ54Inhk8pzBujcIxggGJMIIBhQIBA4AUqDwJnWf22Ee6
+otD8GHJWiEBtlZUwCwYJYIZIAWUDBAIBoEwwGQYJKoZIhvcNAQkDMQwGCmCGSAFlAgECTQMw
+LwYJKoZIhvcNAQkEMSIEINq+nldSoCoJuEe/lhrRhfx0ArygsPJ7mCMbOFrpr1dFMAsGCSqG
+SIb3DQEBCwSCAQBTeRE1DzwF2dnv2yJAOYOxNnAtTs72ZG8mv5Ad4M/9n1+MPiAykLcBslW8
+7D1KjBdwB3oxIT4sjwGh0kxKLe4G+VuvQuPwtT8MqMl3hounnFOM5nMSj1TSbfHVPs3dhEyk
+Wu1gQ5g9gxLF3MpwEJGJKvhRtK17LGElJWvGPniRMChAJZJWoLjFBMe5JMzpqu2za50S1K3t
+YtkTOx/2FQdVApkTY1qMQooljDiuvSvOuSDXcyAA15uIypQJvfrBNqe6Ush+j7yS5UQyTm0o
+ZidB8vj4jIZT3S2gqWhtBLMUc11j+kWlXEZEigSL8WgCbAu7lqhItMwz2dy4C5aAWq8r"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tau_pem_text)
+
+ layers = {
+ rfc5652.id_ct_contentInfo: rfc5652.ContentInfo(),
+ rfc5652.id_signedData: rfc5652.SignedData(),
+ rfc5934.id_ct_TAMP_update: rfc5934.TAMPUpdate()
+ }
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc5934.id_ct_TAMP_update: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc5934.id_ct_TAMP_update: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.tau_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+ self.assertIn(eci['eContentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5934.id_ct_TAMP_update, eci['eContentType'])
+
+ tau, rest = der_decoder(
+ eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[eci['eContentType']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(tau.prettyPrint())
+ self.assertEqual(eci['eContent'], der_encoder(tau))
+ self.assertEqual(2, tau['version'])
+ self.assertEqual(univ.Null(""), tau['msgRef']['target'])
+ self.assertEqual(1568307088, tau['msgRef']['seqNum'])
+ self.assertEqual(1, len(tau['updates']))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5940.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5940.py
new file mode 100644
index 0000000000..d55ba6e813
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5940.py
@@ -0,0 +1,141 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5940
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+
+class CRLandOCSPResponseTestCase(unittest.TestCase):
+ pem_text = """\
+MIIHWQYJKoZIhvcNAQcCoIIHSjCCB0YCAQExDTALBglghkgBZQMEAgEwUwYJKoZI
+hvcNAQcBoEYERENvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91Lg0KoIIBaDCCAWQwggEKoAMCAQIC
+CQClWUKCJkwnGTAKBggqhkjOPQQDAjAkMRQwEgYDVQQKDAtleGFtcGxlLm9yZzEM
+MAoGA1UEAwwDQm9iMB4XDTE3MTIyMDIzMDc0OVoXDTE4MTIyMDIzMDc0OVowJDEU
+MBIGA1UECgwLZXhhbXBsZS5vcmcxDDAKBgNVBAMMA0JvYjBZMBMGByqGSM49AgEG
+CCqGSM49AwEHA0IABIZP//xT8ah2ymmxfidIegeccVKuGxN+OTuvGq69EnQ8fUFD
+ov2KNw8Cup0DtzAfHaZOMFWUu2+Vy3H6SLbQo4OjJTAjMCEGA1UdEQEB/wQXMBWG
+E3NpcDpib2JAZXhhbXBsZS5vcmcwCgYIKoZIzj0EAwIDSAAwRQIhALIkjJJAKCI4
+nsklf2TM/RBvuguWwRkHMDTVGxAvczlsAiAVjrFR8IW5vS4EzyePDVIua7b+Tzb3
+THcQsVpPR53kDaGCBGQwggIbMIIBAwIBATANBgkqhkiG9w0BAQsFADBsMQswCQYD
+VQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGln
+aWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBS
+b290IENBFw0xOTA1MDIyMjE1NTRaFw0xOTA1MjMyMjE1NTRaMDEwLwIQDPWCOBgZ
+nlb4K9ZS7Sft6RcNMTgxMDI1MTYxMTM4WjAMMAoGA1UdFQQDCgEAoDAwLjAfBgNV
+HSMEGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzALBgNVHRQEBAICAcQwDQYJKoZI
+hvcNAQELBQADggEBABPO3OA0OkQZ+RLVxz/cNx5uNVEO416oOePkN0A4DxFztf33
+7caS4OyfS9Wyu1j5yUdWJVpAKXSQeN95MqHkpSpYDssuqbuYjv8ViJfseGBgtXTc
+zUzzNeNdY2uxMbCxuhmPkgacAo1lx9LkK2ScYHWVbfFRF1UQ/dcmavaZsEOBNuLW
+OxQYA9MqfVNAymHe7vPqwm/8IY2FbHe9HsiJZfGxNWMDP5lmJiXmpntTeDQ2Ujdi
+yXwGGKjyiSTFk2jVRutrGINufaoA/f7eCmIb4UDPbpMjVfD215dW8eBKouypCVoE
+vmCSSTacdiBI2yOluvMN0PzvPve0ECAE+D4em9ahggJBBggrBgEFBQcQAjCCAjMK
+AQCgggIsMIICKAYJKwYBBQUHMAEBBIICGTCCAhUwZqEgMB4xHDAJBgNVBAYTAlJV
+MA8GA1UEAx4IAFQAZQBzAHQYEzIwMTkwNTA5MTU1MDQ4LjI1OVowLTArMBIwBwYF
+Kw4DAhoEAQEEAQECAQGAABgTMjAxOTA1MDkxNTUwNDguMjYxWjAKBggqhkjOPQQD
+AgNJADBGAiEAujFVH+NvuTLYa8RW3pvWSUwZfjOW5H5171JI+/50BjcCIQDhwige
+wl+ts6TIvhU+CFoOipQBNKyKXKh7ngJkUtpZ86CCAVIwggFOMIIBSjCB8aADAgEC
+AgEBMAoGCCqGSM49BAMCMB4xHDAJBgNVBAYTAlJVMA8GA1UEAx4IAFQAZQBzAHQw
+HhcNMTkwMjAxMDUwMDAwWhcNMjIwMjAxMDUwMDAwWjAeMRwwCQYDVQQGEwJSVTAP
+BgNVBAMeCABUAGUAcwB0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEM0jxEYgg
+RxC/r87uV/h6iZ8BAdHT/6fxRuzG0PRMIlFBy38skFUXJJulKV9JW16YJqOkVsqv
+xwMM61z7p1vQ/qMgMB4wDwYDVR0TBAgwBgEB/wIBAzALBgNVHQ8EBAMCAAYwCgYI
+KoZIzj0EAwIDSAAwRQIhAIdpCt5g89ofSADXmBD3KXQGnTghwbAMeWrKXqTGww+x
+AiAl8NQgfUk4xMymZ3VtCLJ2MdczDps4Zh2KPOqAR5fZAjGCAQcwggEDAgEBMDEw
+JDEUMBIGA1UECgwLZXhhbXBsZS5vcmcxDDAKBgNVBAMMA0JvYgIJAKVZQoImTCcZ
+MAsGCWCGSAFlAwQCAaBpMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZI
+hvcNAQkFMQ8XDTE5MDEyNDIzNTI1NlowLwYJKoZIhvcNAQkEMSIEIO93j8lA1ebc
+JXb0elmbMSYZWp8aInra81+iLAUNjRlaMAoGCCqGSM49BAMCBEcwRQIhAPeI7URq
+tw//LB/6TAN0/Qh3/WHukXwxRbOJpnYVx0b6AiB3lK3FfwBhx4S5YSPMblS7goJl
+ttTMEpl2prH8bbwo1g==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertTrue(sd.prettyPrint())
+
+ self.assertEqual(
+ rfc5652.id_data, sd['encapContentInfo']['eContentType'])
+ self.assertTrue(sd['encapContentInfo']['eContent'])
+
+ v2 = rfc5280.Version(value='v2')
+
+ self.assertEqual(v2, sd['crls'][0]['crl']['tbsCertList']['version'])
+
+ ocspr_oid = rfc5940.id_ri_ocsp_response
+
+ self.assertEqual(ocspr_oid, sd['crls'][1]['other']['otherRevInfoFormat'])
+
+ ocspr, rest = der_decoder(
+ sd['crls'][1]['other']['otherRevInfo'],
+ asn1Spec=rfc5940.OCSPResponse())
+
+ self.assertTrue(ocspr.prettyPrint())
+
+ success = rfc2560.OCSPResponseStatus(value='successful')
+
+ self.assertEqual(success, ocspr['responseStatus'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd_eci = asn1Object['content']['encapContentInfo']
+
+ self.assertEqual(rfc5652.id_data, sd_eci['eContentType'])
+ self.assertTrue(sd_eci['eContent'].hasValue())
+
+ for ri in asn1Object['content']['crls']:
+ if ri.getName() == 'crl':
+ v2 = rfc5280.Version(value='v2')
+ self.assertEqual(v2, ri['crl']['tbsCertList']['version'])
+
+ if ri.getName() == 'other':
+ ori = ri['other']
+ ocspr_oid = rfc5940.id_ri_ocsp_response
+
+ self.assertEqual(ocspr_oid, ori['otherRevInfoFormat'])
+
+ ocspr_status = ori['otherRevInfo']['responseStatus']
+ success = rfc2560.OCSPResponseStatus(value='successful')
+
+ self.assertEqual(success, ocspr_status)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5958.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5958.py
new file mode 100644
index 0000000000..980a11ed5b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5958.py
@@ -0,0 +1,84 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc8410
+
+
+class PrivateKeyTestCase(unittest.TestCase):
+ priv_key_pem_text = """\
+MHICAQEwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC
+oB8wHQYKKoZIhvcNAQkJFDEPDA1DdXJkbGUgQ2hhaXJzgSEAGb9ECWmEzf6FQbrB
+Z9w7lshQhqowtrbLDFw4rXAxZuE=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5958.PrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.priv_key_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ rfc8410.id_Ed25519, asn1Object['privateKeyAlgorithm']['algorithm'])
+ self.assertTrue(asn1Object['privateKey'].isValue)
+ self.assertEqual(
+ "0x0420d4ee", asn1Object['privateKey'].prettyPrint()[0:10])
+ self.assertTrue(asn1Object['publicKey'].isValue)
+ self.assertEqual(
+ "1164575857", asn1Object['publicKey'].prettyPrint()[0:10])
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class PrivateKeyOpenTypesTestCase(unittest.TestCase):
+ asymmetric_key_pkg_pem_text = """\
+MIGEBgpghkgBZQIBAk4FoHYwdDByAgEBMAUGAytlcAQiBCDU7nLb+RNYStW22PH3
+afitOv58KMvx1Pvgl6iPRHVYQqAfMB0GCiqGSIb3DQEJCRQxDwwNQ3VyZGxlIENo
+YWlyc4EhABm/RAlphM3+hUG6wWfcO5bIUIaqMLa2ywxcOK1wMWbh
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.asymmetric_key_pkg_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(
+ rfc5958.id_ct_KP_aKeyPackage, rfc5652.cmsContentTypesMap)
+
+ oneKey = asn1Object['content'][0]
+
+ self.assertEqual(
+ rfc8410.id_Ed25519, oneKey['privateKeyAlgorithm']['algorithm'])
+
+ pkcs_9_at_friendlyName = univ.ObjectIdentifier('1.2.840.113549.1.9.9.20')
+
+ self.assertEqual(
+ pkcs_9_at_friendlyName, oneKey['attributes'][0]['attrType'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc5990.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc5990.py
new file mode 100644
index 0000000000..7d51d67bb4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc5990.py
@@ -0,0 +1,87 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5990
+
+
+class RSAKEMTestCase(unittest.TestCase):
+ pem_text = """\
+MEcGCyqGSIb3DQEJEAMOMDgwKQYHKIGMcQICBDAeMBkGCiuBBRCGSAksAQIwCwYJ
+YIZIAWUDBAIBAgEQMAsGCWCGSAFlAwQBBQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5990.id_rsa_kem, asn1Object['algorithm'])
+
+ rsa_kem_p, rest = der_decoder(
+ asn1Object['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[rfc5990.id_rsa_kem])
+
+ self.assertFalse(rest)
+ self.assertTrue(rsa_kem_p.prettyPrint())
+ self.assertEqual(asn1Object['parameters'], der_encoder(rsa_kem_p))
+ self.assertEqual(rfc5990.id_kem_rsa, rsa_kem_p['kem']['algorithm'])
+
+ kem_rsa_p, rest = der_decoder(
+ rsa_kem_p['kem']['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[rfc5990.id_kem_rsa])
+
+ self.assertFalse(rest)
+ self.assertTrue(kem_rsa_p.prettyPrint())
+ self.assertEqual(
+ rsa_kem_p['kem']['parameters'], der_encoder(kem_rsa_p))
+ self.assertEqual(16, kem_rsa_p['keyLength'])
+ self.assertEqual(
+ rfc5990.id_kdf_kdf3, kem_rsa_p['keyDerivationFunction']['algorithm'])
+
+ kdf_p, rest = der_decoder(
+ kem_rsa_p['keyDerivationFunction']['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[rfc5990.id_kdf_kdf3])
+
+ self.assertFalse(rest)
+ self.assertTrue(kdf_p.prettyPrint())
+ self.assertEqual(
+ kem_rsa_p['keyDerivationFunction']['parameters'],
+ der_encoder(kdf_p))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5990.id_rsa_kem, asn1Object['algorithm'])
+ self.assertEqual(
+ rfc5990.id_kem_rsa, asn1Object['parameters']['kem']['algorithm'])
+ self.assertEqual(
+ 16, asn1Object['parameters']['kem']['parameters']['keyLength'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6010.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6010.py
new file mode 100644
index 0000000000..1726a8d880
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6010.py
@@ -0,0 +1,101 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6010
+
+
+class UnconstrainedCCCExtensionTestCase(unittest.TestCase):
+ unconstrained_pem_text = "MB0GCCsGAQUFBwESBBEwDzANBgsqhkiG9w0BCRABAA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extension()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.unconstrained_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc6010.id_pe_cmsContentConstraints, asn1Object['extnID'])
+
+ evalue, rest = der_decoder(
+ asn1Object['extnValue'],
+ asn1Spec=rfc6010.CMSContentConstraints())
+
+ self.assertFalse(rest)
+ self.assertTrue(evalue.prettyPrint())
+ self.assertEqual(asn1Object['extnValue'], der_encoder(evalue))
+ self.assertEqual(
+ rfc6010.id_ct_anyContentType, evalue[0]['contentType'])
+
+
+class ConstrainedCCCExtensionTestCase(unittest.TestCase):
+ constrained_pem_text = """\
+MIG7BggrBgEFBQcBEgSBrjCBqzA0BgsqhkiG9w0BCRABEDAlMCMGCyqGSIb3DQEJ
+EAwBMRQMElZpZ2lsIFNlY3VyaXR5IExMQzAwBgpghkgBZQIBAk4CMCIwIAYLKoZI
+hvcNAQkQDAsxEQwPa3RhLmV4YW1wbGUuY29tMDEGCyqGSIb3DQEJEAEZMCIwIAYL
+KoZIhvcNAQkQDAsxEQwPa3RhLmV4YW1wbGUuY29tMA4GCSqGSIb3DQEHAQoBAQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extension()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.constrained_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(
+ rfc6010.id_pe_cmsContentConstraints, asn1Object['extnID'])
+
+ evalue, rest = der_decoder(
+ asn1Object['extnValue'],
+ asn1Spec=rfc6010.CMSContentConstraints())
+
+ self.assertFalse(rest)
+ self.assertTrue(evalue.prettyPrint())
+ self.assertEqual(asn1Object['extnValue'], der_encoder(evalue))
+
+ constraint_count = 0
+ attribute_count = 0
+ cannot_count = 0
+
+ for ccc in evalue:
+ constraint_count += 1
+ if ccc['canSource'] == 1:
+ cannot_count += 1
+ if ccc['attrConstraints'].hasValue():
+ for attr in ccc['attrConstraints']:
+ attribute_count += 1
+
+ self.assertEqual(4, constraint_count)
+ self.assertEqual(3, attribute_count)
+ self.assertEqual(1, cannot_count)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.constrained_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertIn(asn1Object['extnID'], rfc5280.certificateExtensionsMap)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6019.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6019.py
new file mode 100644
index 0000000000..2e08670e42
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6019.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6019
+
+
+class BinarySigningTimeTestCase(unittest.TestCase):
+ pem_text = "MBUGCyqGSIb3DQEJEAIuMQYCBFy/hlQ="
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.Attribute()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc6019.id_aa_binarySigningTime, asn1Object['attrType'])
+
+ bintime, rest = der_decoder(
+ asn1Object['attrValues'][0], asn1Spec=rfc6019.BinaryTime())
+
+ self.assertEqual(0x5cbf8654, bintime)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['attrType'], rfc5652.cmsAttributesMap)
+ self.assertEqual(0x5cbf8654, asn1Object['attrValues'][0])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6031.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6031.py
new file mode 100644
index 0000000000..29a8d86c5c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6031.py
@@ -0,0 +1,91 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6031
+
+
+class SymmetricKeyPkgTestCase(unittest.TestCase):
+ key_pkg_pem_text = """\
+MIG7BgsqhkiG9w0BCRABGaCBqzCBqKBEMCMGCyqGSIb3DQEJEAwBMRQMElZpZ2ls
+IFNlY3VyaXR5IExMQzAdBgsqhkiG9w0BCRAMAzEODAxQcmV0ZW5kIDA0OEEwYDBe
+MFYwGwYLKoZIhvcNAQkQDBsxDAwKZXhhbXBsZUlEMTAVBgsqhkiG9w0BCRAMCjEG
+DARIT1RQMCAGCyqGSIb3DQEJEAwLMREMD2t0YS5leGFtcGxlLmNvbQQEMTIzNA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_pkg_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+
+ asn1Spec = rfc5652.cmsContentTypesMap[asn1Object['contentType']]
+ skp, rest = der_decoder(asn1Object['content'], asn1Spec=asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(skp.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(skp))
+
+ for attr in skp['sKeyPkgAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyPkgAttributesMap)
+
+ for osk in skp['sKeys']:
+ for attr in osk['sKeyAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyAttributesMap)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.key_pkg_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertTrue(asn1Object['content'].hasValue())
+
+ keypkg = asn1Object['content']
+
+ self.assertEqual(
+ rfc6031.KeyPkgVersion().subtype(value='v1'), keypkg['version'])
+
+ for attr in keypkg['sKeyPkgAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyPkgAttributesMap)
+ self.assertNotEqual('0x', attr['attrValues'][0].prettyPrint()[:2])
+
+ # decodeOpenTypes=True did not decode if the value is shown in hex ...
+ if attr['attrType'] == rfc6031.id_pskc_manufacturer:
+ attr['attrValues'][0] == 'Vigil Security LLC'
+
+ for osk in keypkg['sKeys']:
+ for attr in osk['sKeyAttrs']:
+ self.assertIn(attr['attrType'], rfc6031.sKeyAttributesMap)
+ self.assertNotEqual(
+ '0x', attr['attrValues'][0].prettyPrint()[:2])
+
+ # decodeOpenTypes=True did not decode if the value is shown in hex ...
+ if attr['attrType'] == rfc6031.id_pskc_issuer:
+ attr['attrValues'][0] == 'kta.example.com'
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6032.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6032.py
new file mode 100644
index 0000000000..287bad89ae
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6032.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6032
+
+
+class EncryptedKeyPkgTestCase(unittest.TestCase):
+ encrypted_key_pkg_pem_text = """\
+MIIBBwYKYIZIAWUCAQJOAqCB+DCB9QIBAjCBzgYKYIZIAWUCAQJOAjAdBglghkgB
+ZQMEASoEEN6HFteHMZ3DyeO35xIwWQOAgaCKTs0D0HguNzMhsLgiwG/Kw8OwX+GF
+9/cZ1YVNesUTW/VsbXJcbTmFmWyfqZsM4DLBegIbrUEHQZnQRq6/NO4ricQdHApD
+B/ip6RRqeN1yxMJLv1YN0zUOOIDBS2iMEjTLXZLWw3w22GN2JK7G+Lr4OH1NhMgU
+ILJyh/RePmPseMwxvcJs7liEfkiSNMtDfEcpjtzA9bDe95GjhQRsiSByoR8wHQYJ
+YIZIAWUCAQVCMRAEDnB0Zi1rZGMtODEyMzc0
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.encrypted_key_pkg_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc6032.id_ct_KP_encryptedKeyPkg, asn1Object['contentType'])
+
+ content, rest = der_decoder(
+ asn1Object['content'], rfc6032.EncryptedKeyPackage())
+
+ self.assertFalse(rest)
+ self.assertTrue(content.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(content))
+ self.assertEqual('encrypted', content.getName())
+
+ eci = content['encrypted']['encryptedContentInfo']
+
+ self.assertEqual(
+ rfc6032.id_ct_KP_encryptedKeyPkg, eci['contentType'])
+
+ attrType = content['encrypted']['unprotectedAttrs'][0]['attrType']
+
+ self.assertEqual(rfc6032.id_aa_KP_contentDecryptKeyID, attrType)
+
+ attrVal0 = content['encrypted']['unprotectedAttrs'][0]['attrValues'][0]
+ keyid, rest = der_decoder(attrVal0, rfc6032.ContentDecryptKeyID())
+
+ self.assertFalse(rest)
+ self.assertTrue(keyid.prettyPrint())
+ self.assertEqual(attrVal0, der_encoder(keyid))
+ self.assertEqual(str2octs('ptf-kdc-812374'), keyid)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.encrypted_key_pkg_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+
+ eci = asn1Object['content']['encrypted']['encryptedContentInfo']
+
+ self.assertIn(eci['contentType'], rfc5652.cmsContentTypesMap)
+
+ for attr in asn1Object['content']['encrypted']['unprotectedAttrs']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ self.assertNotEqual('0x', attr['attrValues'][0].prettyPrint()[:2])
+
+ if attr['attrType'] == rfc6032.id_aa_KP_contentDecryptKeyID:
+ self.assertEqual(str2octs(
+ 'ptf-kdc-812374'), attr['attrValues'][0])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6120.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6120.py
new file mode 100644
index 0000000000..bdedab8c50
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6120.py
@@ -0,0 +1,115 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.compat.octets import str2octs
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6120
+
+
+class XMPPCertificateTestCase(unittest.TestCase):
+ xmpp_server_cert_pem_text = """\
+MIIC6DCCAm+gAwIBAgIJAKWzVCgbsG5DMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDI0MjMxNjA0WhcNMjAxMDIzMjMxNjA0WjBNMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xHzAdBgNVBAoTFkV4
+YW1wbGUgUHJvZHVjdHMsIEluYy4wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQZzQlk
+03nJRPF6+w1NxFELmQ5vJTjTRz3eu03CRtahK4Wnwd4GwbDe8NVHAEG2qTzBXFDu
+p6RZugsBdf9GcEZHG42rThYYOzIYzVFnI7tQgA+nTWSWZN6eoU/EXcknhgijggEn
+MIIBIzAdBgNVHQ4EFgQUkQpUMYcbUesEn5buI03POFnktJgwHwYDVR0jBBgwFoAU
+8jXbNATapVXyvWkDmbBi7OIVCMEwCwYDVR0PBAQDAgeAMIGPBgNVHREEgYcwgYSg
+KQYIKwYBBQUHCAegHRYbX3htcHAtY2xpZW50LmltLmV4YW1wbGUuY29toCkGCCsG
+AQUFBwgHoB0WG194bXBwLXNlcnZlci5pbS5leGFtcGxlLmNvbaAcBggrBgEFBQcI
+BaAQDA5pbS5leGFtcGxlLmNvbYIOaW0uZXhhbXBsZS5jb20wQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjAKBggqhkjOPQQDAwNnADBkAjAEo4mhDGC6/R39HyNgzLseNAp36qBH
+yQJ/AWsBojN0av8akeVv9IuM45yqLKdiCzcCMDCjh1lFnCvurahwp5D1j9pAZMsg
+nOzhcMpnHs2U/eN0lHl/JNgnbftl6Dvnt59xdA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ gn_on = gn['otherName']
+ if gn_on['type-id'] == rfc6120.id_on_xmppAddr:
+ self.assertIn(gn_on['type-id'], rfc5280.anotherNameMap)
+
+ spec = rfc5280.anotherNameMap[gn['otherName']['type-id']]
+ on, rest = der_decoder(gn_on['value'], asn1Spec=spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(on.prettyPrint())
+ self.assertEqual(gn_on['value'], der_encoder(on))
+ self.assertEqual('im.example.com', on)
+
+ count += 1
+
+ self.assertEqual(1, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.xmpp_server_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate,
+ asn1Spec=self.asn1Spec,
+ decodeOpenTypes=True)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ if gn['otherName']['type-id'] == rfc6120.id_on_xmppAddr:
+ self.assertEqual(
+ 'im.example.com', gn['otherName']['value'])
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6187.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6187.py
new file mode 100644
index 0000000000..75c1e91d86
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6187.py
@@ -0,0 +1,70 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6187
+
+
+class SSHClientCertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICkDCCAhegAwIBAgIJAKWzVCgbsG5BMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDI0MTgyNjA3WhcNMjAxMDIzMTgyNjA3WjB0MQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
+YW1wbGUxEDAOBgNVBAMTB0NoYXJsaWUxIjAgBgkqhkiG9w0BCQEWE2NoYXJsaWVA
+ZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARfr1XPl5S0A/BwTOm4
+/rO7mGVt2Tmfr3yvYnfN/ggMvyS3RiIXSsdzcAwzeqc907Jp7Dggab0PpaOKDOxD
+WoK0g6B8+kC/VMsU23mfShlb9et8qcR3A8gdU6g8uvSMahWjgakwgaYwCwYDVR0P
+BAQDAgeAMB0GA1UdDgQWBBQfwm5u0GoxiDcjhDt33UJYlvMPFTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTATBgNVHSUEDDAKBggrBgEFBQcDFTBCBglg
+hkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBm
+b3IgYW55IHB1cnBvc2UuMAoGCCqGSM49BAMDA2cAMGQCMGEme38A3k8q4RGSEs2D
+ThQQOQz3TBJrIW8zr92S8e8BNPkRcQDR+C72TEhL/qoPCQIwGpGaC4ERiUypETkC
+voNP0ODFhhlpFo6lwVHd8Gu+6hShC2PKdAfs4QFDS9ZKgQeZ
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ ssh_eku_oids = [
+ rfc6187.id_kp_secureShellClient,
+ rfc6187.id_kp_secureShellServer,
+ ]
+
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.ExtKeyUsageSyntax())
+
+ for oid in extnValue:
+ if oid in ssh_eku_oids:
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6210.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6210.py
new file mode 100644
index 0000000000..54d8b66e85
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6210.py
@@ -0,0 +1,73 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6210
+
+
+class AuthenticatedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIICRQYLKoZIhvcNAQkQAQKgggI0MIICMAIBADGBwDCBvQIBADAmMBIxEDAOBgNVBAMMB0
+NhcmxSU0ECEEY0a8eAAFa8EdNuLs1dcdAwDQYJKoZIhvcNAQEBBQAEgYCH70EpEikY7deb
+859YJRAWfFondQv1D4NFltw6C1ceheWnlAU0C2WEXr3LUBXZp1/PSte29FnJxu5bXCTn1g
+elMm6zNlZNWNd0KadVBcaxi1n8L52tVM5sWFGJPO5cStOyAka2ucuZM6iAnCSkn1Ju7fgU
+5j2g3bZ/IM8nHTcygjAKBggrBgEFBQgBAqFPBgsqhkiG9w0BCRADDQRAAQIDBAUGBwgJCg
+sMDQ4PEBESEwQVFhcYGRobHB0eHyAhIiMEJSYnKCkqKywtLi8wMTIzBDU2Nzg5Ojs8PT4/
+QDArBgkqhkiG9w0BBwGgHgQcVGhpcyBpcyBzb21lIHNhbXBsZSBjb250ZW50LqKBxzAYBg
+kqhkiG9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0wOTEyMTAyMzI1MDBa
+MB8GCSqGSIb3DQEJBDESBBCWaa5hG1eeg+oQK2tJ3cD5MGwGCSqGSIb3DQEJNDFfMF0wTw
+YLKoZIhvcNAQkQAw0EQAECAwQFBgcICQoLDA0ODxAREhMEFRYXGBkaGxwdHh8gISIjBCUm
+JygpKissLS4vMDEyMwQ1Njc4OTo7PD0+P0CiCgYIKwYBBQUIAQIEFLjUxQ9PJFzFnWraxb
+EIbVbg2xql
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_ct_authData, asn1Object['contentType'])
+
+ ad, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.AuthenticatedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ad.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ad))
+ self.assertEqual(0, ad['version'])
+ self.assertEqual(
+ rfc6210.id_alg_MD5_XOR_EXPERIMENT, ad['digestAlgorithm']['algorithm'])
+
+ mac_alg_p, rest = der_decoder(
+ ad['digestAlgorithm']['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[ad['digestAlgorithm']['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(mac_alg_p.prettyPrint())
+ self.assertEqual(
+ ad['digestAlgorithm']['parameters'], der_encoder(mac_alg_p))
+ self.assertEqual("0x01020304", mac_alg_p.prettyPrint()[:10])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6211.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6211.py
new file mode 100644
index 0000000000..040b17ac7c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6211.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.type import univ
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6211
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIEyAYJKoZIhvcNAQcCoIIEuTCCBLUCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggHM
+MIIByAIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKggfIwGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcN
+MTkwNTI5MTgyMzE5WjAoBgkqhkiG9w0BCTQxGzAZMAsGCWCGSAFlAwQCAqEKBggq
+hkjOPQQDAzA/BgkqhkiG9w0BCQQxMgQwtuQipP2CZx7U96rGbUT06LC5jVFYccZW
+5/CaNvpcrOPiChDm2vI3m4k300z5mSZsME0GCyqGSIb3DQEJEAIBMT4wPAQgx08h
+D2QnVwj1DoeRELNtdZ0PffW4BQIvcwwVc/goU6OAAQEwFTATgRFhbGljZUBleGFt
+cGxlLmNvbTAKBggqhkjOPQQDAwRnMGUCMQChIMyN1nTN+LLQcYJuhWT297vSKMDK
+fIUedSwWYrcSnSa1pq2s3Wue+pNBfecEjYECMGrUNu1UpWdafEJulP9Vz76qOPMa
+5V/AnTEV5zkmzRle8sffN+nQ+SGkoos5zpI1kA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder (substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat in rfc6211.id_aa_cmsAlgorithmProtect:
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc6211.CMSAlgorithmProtection())
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd = asn1Object['content']
+
+ self.assertEqual(
+ rfc5652.CMSVersion().subtype(value='v1'), sd['version'])
+
+ ect = sd['encapContentInfo']['eContentType']
+
+ self.assertIn(ect, rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_data, ect)
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc6211.id_aa_cmsAlgorithmProtect:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+
+ sav0 = sa['attrValues'][0]
+ digest_oid = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2')
+ sig_oid = univ.ObjectIdentifier('1.2.840.10045.4.3.3')
+
+ self.assertEqual(
+ digest_oid, sav0['digestAlgorithm']['algorithm'])
+ self.assertEqual(
+ sig_oid, sav0['signatureAlgorithm']['algorithm'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6402.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6402.py
new file mode 100644
index 0000000000..e970dfa7dd
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6402.py
@@ -0,0 +1,157 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+
+
+class BackwardCompatibilityTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEJQYJKoZIhvcNAQcCoIIEFjCCBBICAQMxCzAJBgUrDgMCGgUAMIIDAgYIKwYBBQUHDAKgggL0
+BIIC8DCCAuwweDB2AgECBgorBgEEAYI3CgoBMWUwYwIBADADAgEBMVkwVwYJKwYBBAGCNxUUMUow
+SAIBBQwZcGl0dWNoYTEuZW1lYS5ocHFjb3JwLm5ldAwMRU1FQVxwaXR1Y2hhDBpDTUNSZXFHZW5l
+cmF0b3IudnNob3N0LmV4ZTCCAmqgggJmAgEBMIICXzCCAcgCAQAwADCBnzANBgkqhkiG9w0BAQEF
+AAOBjQAwgYkCgYEA0jm7SSSm2wyEAzuNKtFZFJKo91SrJq9wQwEhEKHDavZwMQOm1rZ2PF8NWCEb
+PqrhToQ7rtiGLSZa4dF4bzgmBqQ9aoSfEX4jISt31Vy+skHidXjHHpbsjT24NPhrZgANivL7CxD6
+Ft+s7qS1gL4HRm2twQkqSwOLrE/q2QeXl2UCAwEAAaCCAR0wGgYKKwYBBAGCNw0CAzEMFgo2LjIu
+OTIwMC4yMD4GCSqGSIb3DQEJDjExMC8wHQYDVR0OBBYEFMW2skn88gxhONWZQA4sWGBDb68yMA4G
+A1UdDwEB/wQEAwIHgDBXBgkrBgEEAYI3FRQxSjBIAgEFDBlwaXR1Y2hhMS5lbWVhLmhwcWNvcnAu
+bmV0DAxFTUVBXHBpdHVjaGEMGkNNQ1JlcUdlbmVyYXRvci52c2hvc3QuZXhlMGYGCisGAQQBgjcN
+AgIxWDBWAgECHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABv
+AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIDAQAwDQYJKoZIhvcNAQEFBQADgYEAJZlu
+mxjtCxSOQi27jsVdd3y8NSIlzNv0b3LqmzvAly6L+CstXcnuG2MPQqPH9R7tbJonGUniBQO9sQ7C
+KhYWj2gfhiEkSID82lV5chINVUFKoUlSiEhWr0tPGgvOaqdsKQcrHfzrsBbFkhDqrFSVy7Yivbnh
+qYszKrOjJKiiCPMwADAAMYH5MIH2AgEDgBTFtrJJ/PIMYTjVmUAOLFhgQ2+vMjAJBgUrDgMCGgUA
+oD4wFwYJKoZIhvcNAQkDMQoGCCsGAQUFBwwCMCMGCSqGSIb3DQEJBDEWBBTFTkK/OifaFjwqHiJu
+xM7qXcg/VzANBgkqhkiG9w0BAQEFAASBgKfC6jOi1Wgy4xxDCQVK9+e5tktL8wE/j2cb9JSqq+aU
+5UxEgXEw7q7BoYZCAzcxMRriGzakXr8aXHcgkRJ7XcFvLPUjpmGg9SOZ2sGW4zQdWAwImN/i8loc
+xicQmJP+VoMHo/ZpjFY9fYCjNZUArgKsEwK/s+p9yrVVeB1Nf8Mn
+"""
+
+ def testDerCodec(self):
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6402.id_cct_PKIData: lambda x: None
+ }
+
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+
+ def testOpenTypes(self):
+ class ClientInformation(univ.Sequence):
+ pass
+
+ ClientInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('clientId', univ.Integer()),
+ namedtype.NamedType('MachineName', char.UTF8String()),
+ namedtype.NamedType('UserName', char.UTF8String()),
+ namedtype.NamedType('ProcessName', char.UTF8String())
+ )
+
+ class EnrollmentCSP(univ.Sequence):
+ pass
+
+ EnrollmentCSP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('KeySpec', univ.Integer()),
+ namedtype.NamedType('Name', char.BMPString()),
+ namedtype.NamedType('Signature', univ.BitString())
+ )
+
+ openTypeMap = {
+ # attributes
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'): char.IA5String(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.2'): EnrollmentCSP(),
+ univ.ObjectIdentifier('1.3.6.1.4.1.311.21.20'): ClientInformation(),
+ # algorithm identifier parameters
+ univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
+ univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
+ }
+
+ openTypeMap.update(rfc5652.cmsAttributesMap)
+ openTypeMap.update(rfc6402.cmcControlAttributesMap)
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ eci = asn1Object['content']['encapContentInfo']
+
+ self.assertEqual(rfc6402.id_cct_PKIData, eci['eContentType'])
+
+ substrate = eci['eContent']
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc6402.PKIData(), openTypes=openTypeMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for req in asn1Object['reqSequence']:
+ cr = req['tcr']['certificationRequest']
+
+ sig_alg = cr['signatureAlgorithm']
+
+ self.assertIn(sig_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), sig_alg['parameters'])
+
+ cri = cr['certificationRequestInfo']
+ spki_alg = cri['subjectPublicKeyInfo']['algorithm']
+
+ self.assertIn(spki_alg['algorithm'], openTypeMap)
+ self.assertEqual(univ.Null(""), spki_alg['parameters'])
+
+ attrs = cr['certificationRequestInfo']['attributes']
+ for attr in attrs:
+ self.assertIn( attr['attrType'], openTypeMap)
+
+ if attr['attrType'] == univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'):
+ self.assertEqual("6.2.9200.2", attr['attrValues'][0])
+
+ else:
+ self.assertTrue(attr['attrValues'][0].hasValue())
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6482.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6482.py
new file mode 100644
index 0000000000..c2f6a94831
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6482.py
@@ -0,0 +1,116 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6482
+
+
+class RPKIROATestCase(unittest.TestCase):
+ roa_pem_text = """\
+MIIGvwYJKoZIhvcNAQcCoIIGsDCCBqwCAQMxDTALBglghkgBZQMEAgEwKgYLKoZIhvcNAQkQ
+ARigGwQZMBcCAwDj+zAQMA4EAgABMAgwBgMEAJMcLaCCBLwwggS4MIIDoKADAgECAgIGGDAN
+BgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyg2ZDZmYmZhOTc1M2RiOGQ4NDY0MzNkYjUzNTFk
+OWE5ZWMwN2M5NmJkMB4XDTE5MDgyMDAwNDkyOVoXDTIwMDcwMTAwMDAwMFowMzExMC8GA1UE
+AxMoNUI4M0REODdERTlBQzdDNkUzNEI4NzdERjUwMUEyQjEyMzBBODFCNDCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJcnDgSUtiQeelGQsTx2Ou5cgmfq6KPSEgMz/XyZrRzj
+wcqUQ/DyMYHyRJK8umKZjfMu+rItoPSkE26Wi9PcSnfuY+SyS9chTAtNOGMES6MbtHjNTmBF
+Xar5CFGM8teLIRHlCcScesgSR7q2eKgQ+cLiLTZnol0Mpmuf2NIs+V63Y4Hn/T7QOoudg9nU
+tmsh31hUN4jIENEXFvNDovkray25rl9aqFfW+dtkoNtdJjp367nNXCdp3GdE/3z0SIqT8wnh
+F67tgR22mwzex3umteQBwmM+iR28vuHL4E5jwRKBoiEgGPYqq7gbfkcoFtR3AV6QGKSK2aJU
+mUi+9VheS78CAwEAAaOCAdQwggHQMB0GA1UdDgQWBBRbg92H3prHxuNLh331AaKxIwqBtDAf
+BgNVHSMEGDAWgBRtb7+pdT242EZDPbU1HZqewHyWvTAYBgNVHSABAf8EDjAMMAoGCCsGAQUF
+Bw4CMFAGA1UdHwRJMEcwRaBDoEGGP3JzeW5jOi8vY2EucmcubmV0L3Jwa2kvUkduZXQtT1Uv
+YlctX3FYVTl1TmhHUXoyMU5SMmFuc0I4bHIwLmNybDBkBggrBgEFBQcBAQRYMFYwVAYIKwYB
+BQUHMAKGSHJzeW5jOi8vcnBraS5yaXBlLm5ldC9yZXBvc2l0b3J5L0RFRkFVTFQvYlctX3FY
+VTl1TmhHUXoyMU5SMmFuc0I4bHIwLmNlcjAOBgNVHQ8BAf8EBAMCB4AwgYoGCCsGAQUFBwEL
+BH4wfDBLBggrBgEFBQcwC4Y/cnN5bmM6Ly9jYS5yZy5uZXQvcnBraS9SR25ldC1PVS9XNFBk
+aDk2YXg4YmpTNGQ5OVFHaXNTTUtnYlEucm9hMC0GCCsGAQUFBzANhiFodHRwczovL2NhLnJn
+Lm5ldC9ycmRwL25vdGlmeS54bWwwHwYIKwYBBQUHAQcBAf8EEDAOMAwEAgABMAYDBACTHC0w
+DQYJKoZIhvcNAQELBQADggEBAKhhoJ3XtHejvG6XkFaCTxJci10gOgNvvPFWqz+CfOX2LmB0
+N3QhYjLiAZbfYSOxNReyL4bWDK/tpZgVA2VHuS8GB8fI8+nauQUiP38orVXKAbcUUxo7UkEM
+HxQ5T61FtXrEZx8hgKTlsfof0G2Q+baSJzNV2MIUgHmSszL4Mx/fHUXv8b7l/5mZQbdv3cZ9
+SbODHD0iOVAzK3fmHeuA4roSOk4mBQDWNRY1Ok+xH/HMDQdoOVtbfy57TZI2W7O2uxfElKvx
+fBeEc9TOaWqDz0xvmJ6bdZnmWRuvqW1475mhxi0s/I4eE2ZdaCinvrgrglBp/jpZi1jitY14
+dx+A1PMxggGqMIIBpgIBA4AUW4Pdh96ax8bjS4d99QGisSMKgbQwCwYJYIZIAWUDBAIBoGsw
+GgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEYMBwGCSqGSIb3DQEJBTEPFw0xOTA4MjAwMDQ5
+MjlaMC8GCSqGSIb3DQEJBDEiBCCfuHnOmhF2iBF3JXMOnoZCJzmE+Tcf8b+zObvDUpUddzAN
+BgkqhkiG9w0BAQEFAASCAQBDlJIMKCqWsFV/tQj/XvpSJUxJybG+zwjrUKm4yTKv8QEGOzOD
+aIL6irSOhhXeax6Lw0P2J7x+L3jGW1we1qWslumEDTr9kTE+kN/6rZuptUhwdrXcu3p9G6gJ
+mAUQtzqe2jRN1T3eSBfz1CNU3C7+jSHXOc+4Tea5mKiVddsjotYHXX0PbSCS/ZZ1yzdeES0o
+KWhXhW9ogS0bwtXWVTrciSekaRpp2n/pqcVEDxWg/5NpPiDlPNrRL/9eTEHFp940RAUfhbBh
+pbC2J02N0KgxUJxIJnGnpZ7rXKpG4jMiTVry7XB9bnFxCvZGBdjQW1Hagrfpl2TiVxQFvJWl
+IzU1
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.roa_pem_text)
+
+ layers = {}
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6482.id_ct_routeOriginAuthz: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6482.id_ct_routeOriginAuthz: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(0, asn1Object['version'])
+ self.assertEqual(58363, asn1Object['asID'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.roa_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid = asn1Object['content']['encapContentInfo']['eContentType']
+ substrate = asn1Object['content']['encapContentInfo']['eContent']
+
+ self.assertIn(oid, rfc5652.cmsContentTypesMap)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.cmsContentTypesMap[oid],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['version'])
+ self.assertEqual(58363, asn1Object['asID'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6486.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6486.py
new file mode 100644
index 0000000000..1e0075c877
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6486.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6486
+
+
+class SignedManifestTestCase(unittest.TestCase):
+ manifest_pem_text = """\
+MIIHVAYJKoZIhvcNAQcCoIIHRTCCB0ECAQMxDTALBglghkgBZQMEAgEwgYwGCyqGSIb3DQEJ
+EAEaoH0EezB5AgIK5xgPMjAxMjEwMjMyMjI2MDNaGA8yMDEyMTAyNTIyMjYwM1oGCWCGSAFl
+AwQCATBGMEQWH1pYU0dCREJrTDgyVEZHSHVFNFZPWXRKUC1FNC5jcmwDIQCzTdC3GsuONsRq
+RFnYf8+AJ2NnCIgmnc3O8PyfGvn18aCCBO4wggTqMIID0qADAgECAgIK5zANBgkqhkiG9w0B
+AQsFADATMREwDwYDVQQDEwhBOTE5OTg4NTAeFw0xMjEwMjMyMjI2MDNaFw0xMjEwMjUyMjI2
+MDNaMBgxFjAUBgNVBAMTDTUwODcxOTdjLTIwZjcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDEl4R4LiCs6zyR/IAeaRCfz0O0mXXAUKt8bmG6DXzaDYNG8dnBjbrsM1L05sb4
+2Ti4TyE1UXtwFFEwatsFQ2uRBn9gsKmDGOjW8TH1AYObmZW+hZlEN7OLSz2bmPLtxIMwiCq/
+vqmBJlMWPyCSym4iPnjzwWbJechqHSiTMOYGICF1QSW5xjJDAhRfeZG3nRY7TqfW8R2KJXeN
+cKSYSGNKzv79B8GCswmwU8J8kcuryIiqb7WtcK2B6VBsROIQHGXM0UV4Zbnvv9m9Fl0SjvZJ
+XyrzRjGzV2C00hM0f4jAplD9nJhAJ7nOTe8OnadrFABRga+Ge1HooeDQJGmTekLXAgMBAAGj
+ggJBMIICPTAdBgNVHQ4EFgQUbcbOyNBHkRXXDaMq51jC7vOSHFUwHwYDVR0jBBgwFoAUZXSG
+BDBkL82TFGHuE4VOYtJP+E4wDgYDVR0PAQH/BAQDAgeAMIGDBgNVHR8EfDB6MHigdqB0hnJy
+c3luYzovL3Jwa2kuYXBuaWMubmV0L21lbWJlcl9yZXBvc2l0b3J5L0E5MTk5ODg1LzY1RkQ0
+M0FBNUJFRjExREZBQjYxQjNFNzU1QUZFN0NGL1pYU0dCREJrTDgyVEZHSHVFNFZPWXRKUC1F
+NC5jcmwwfgYIKwYBBQUHAQEEcjBwMG4GCCsGAQUFBzAChmJyc3luYzovL3Jwa2kuYXBuaWMu
+bmV0L3JlcG9zaXRvcnkvQTNDMzhBMjRENjAzMTFEQ0FCMDhGMzE5NzlCREJFMzkvWlhTR0JE
+QmtMODJURkdIdUU0Vk9ZdEpQLUU0LmNlcjAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMIGQ
+BggrBgEFBQcBCwSBgzCBgDB+BggrBgEFBQcwC4ZycnN5bmM6Ly9ycGtpLmFwbmljLm5ldC9t
+ZW1iZXJfcmVwb3NpdG9yeS9BOTE5OTg4NS82NUZENDNBQTVCRUYxMURGQUI2MUIzRTc1NUFG
+RTdDRi9aWFNHQkRCa0w4MlRGR0h1RTRWT1l0SlAtRTQubWZ0MBUGCCsGAQUFBwEIAQH/BAYw
+BKACBQAwIQYIKwYBBQUHAQcBAf8EEjAQMAYEAgABBQAwBgQCAAIFADANBgkqhkiG9w0BAQsF
+AAOCAQEAyBl1J+ql1O3d6JiaQEG2UAjDSKHSMVau++QcB6/yd4RuWv2KpQxk1cp+awf4Ttoh
+GYakbUZQl7lJaXzbluG5siRSv6AowEWxf99iLhDx+pE1htklRfmmTE9oFpKnITAYZAUjarNC
+sYGCZ00vSwRu27OdpSQbZQ7WdyDAhyHS0Sun0pkImVSqPO11gqyKV9ZCwCJUa5U/zsWDMNrj
+MSZl1I3VoPs2rx997rLoiQiMqwGeoqfl7snpsL9OR/CazPmepuq3SyZNWcCrUGcGRhRdGScj
+Tm2EHne1GiRHapn46HWQ3am8jumEKv5u0gLT4Mi9CyZwkDyhotGTJZmdAmN7zzGCAaowggGm
+AgEDgBRtxs7I0EeRFdcNoyrnWMLu85IcVTALBglghkgBZQMEAgGgazAaBgkqhkiG9w0BCQMx
+DQYLKoZIhvcNAQkQARowHAYJKoZIhvcNAQkFMQ8XDTEyMTAyMzIyMjYwNFowLwYJKoZIhvcN
+AQkEMSIEIIu2XV8dT+rqQy5Cbpm3Tv5I1dwkLK8n2GesMGOr6/pEMA0GCSqGSIb3DQEBAQUA
+BIIBAFsd0zkl4dIHrqZts441T+w/5/ekymDLFwftk6W+Mi35Htjvm2IHOthnKHQsK5h6dnEh
+6DfNfc6tACmzLnM+UG7ve+uAhfpA+CUJIoVhpQvDH7Ntql0cD1X3d9ng484jpkVoHhbUIYNR
+TyxvV4DV5EBbLYpx2HYf6wWa8TCobxUXNtw53OVA24ceavS+KvuDa0JQPFpbYUCS0UPMt/Im
+mtKrWTmRUr8sYWdIQn+SStUh8iAR5rmSVr+Pe7aFbe2ju2FPf08gnIjH/SdCrJuFK8q7Z5MT
+C9ijmXiajracUe+7eCluqgXRE8yRtnscWoA/9fVFz1lPwgEeNHLoaK7Sqew=
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.manifest_pem_text)
+
+ layers = rfc5652.cmsContentTypesMap.copy()
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ rfc6486.id_ct_rpkiManifest: lambda x: None
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ rfc6486.id_ct_rpkiManifest: lambda x: None
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+
+ while next_layer:
+ asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(0, asn1Object['version'])
+
+ for f in asn1Object['fileList']:
+ self.assertEqual('ZXSGBDBkL82TFGHuE4VOYtJP-E4.crl', f['file'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.manifest_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid = asn1Object['content']['encapContentInfo']['eContentType']
+ substrate = asn1Object['content']['encapContentInfo']['eContent']
+
+ self.assertIn(oid, rfc5652.cmsContentTypesMap)
+
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=rfc5652.cmsContentTypesMap[oid],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['version'])
+
+ for f in asn1Object['fileList']:
+ self.assertEqual('ZXSGBDBkL82TFGHuE4VOYtJP-E4.crl', f['file'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6487.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6487.py
new file mode 100644
index 0000000000..9e42d0736e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6487.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6487
+
+
+class CertificateWithManifestTestCase(unittest.TestCase):
+ rpki_cert_pem_text = """\
+MIIGCTCCBPGgAwIBAgICKJgwDQYJKoZIhvcNAQELBQAwRjERMA8GA1UEAxMIQTkwREM1QkUx
+MTAvBgNVBAUTKDBDRkNFNzc4NTdGQ0YwMUYzOUQ5OUE2MkI0QUE2MkU2MTU5RTc2RjgwHhcN
+MTkwODA2MDQwMzIyWhcNMjAxMDMxMDAwMDAwWjBGMREwDwYDVQQDEwhBOTFEMTY5MTExMC8G
+A1UEBRMoREMwNEFGMTk4Qzk3RjI1ODJGMTVBRERFRUU3QzY4MjYxMUNBREE1MTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMksR6bPbZFpxlXID/2dhYFuS11agb6ACDUFJpII
+41uw65tFIPT+Y4laccnYRcWPWMTvHLyj0ggU+bc2zJCTYfmGD/GW/Q3WW0A3niBCdXDfkrp2
+DXvSTASJ5+wtVb+AE74C4Mr3UiMOXhJre1rRd5Lq7o6+TEKbVkmUrmTlbsz2Vs2F4//t5sCr
+WjAVP9D5jUBGH2MInbleBP1Bwf+kIxD16OKftRb/vGLzk1UhLsbq22GGE0vZ2hnJP3CbyXkN
+dLBraErzvyCnqYF7/yA0JL0KWRDwr7a9y37s8O3xOxhA/dL8hLZXllzJmoxvxHmq8D+5CjHv
+2/EmH8ODGm2aAzcCAwEAAaOCAv8wggL7MB0GA1UdDgQWBBTcBK8ZjJfyWC8Vrd7ufGgmEcra
+UTAfBgNVHSMEGDAWgBQM/Od4V/zwHznZmmK0qmLmFZ52+DAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zBzBgNVHR8EbDBqMGigZqBkhmJyc3luYzovL3Jwa2kuYXBuaWMubmV0
+L3JlcG9zaXRvcnkvQjMyMkE1RjQxRDY2MTFFMkEzRjI3RjdDNzJGRDFGRjIvRFB6bmVGZjg4
+Qjg1MlpwaXRLcGk1aFdlZHZnLmNybDB+BggrBgEFBQcBAQRyMHAwbgYIKwYBBQUHMAKGYnJz
+eW5jOi8vcnBraS5hcG5pYy5uZXQvcmVwb3NpdG9yeS85ODA2NTJFMEI3N0UxMUU3QTk2QTM5
+NTIxQTRGNEZCNC9EUHpuZUZmODhCODUyWnBpdEtwaTVoV2VkdmcuY2VyMEoGA1UdIAEB/wRA
+MD4wPAYIKwYBBQUHDgIwMDAuBggrBgEFBQcCARYiaHR0cHM6Ly93d3cuYXBuaWMubmV0L1JQ
+S0kvQ1BTLnBkZjCCASgGCCsGAQUFBwELBIIBGjCCARYwXwYIKwYBBQUHMAWGU3JzeW5jOi8v
+cnBraS5hcG5pYy5uZXQvbWVtYmVyX3JlcG9zaXRvcnkvQTkxRDE2OTEvNTBDNjkyOTI5RDI0
+MTFFNzg2MUEyMjZCQzRGOUFFMDIvMH4GCCsGAQUFBzAKhnJyc3luYzovL3Jwa2kuYXBuaWMu
+bmV0L21lbWJlcl9yZXBvc2l0b3J5L0E5MUQxNjkxLzUwQzY5MjkyOUQyNDExRTc4NjFBMjI2
+QkM0RjlBRTAyLzNBU3ZHWXlYOGxndkZhM2U3bnhvSmhISzJsRS5tZnQwMwYIKwYBBQUHMA2G
+J2h0dHBzOi8vcnJkcC5hcG5pYy5uZXQvbm90aWZpY2F0aW9uLnhtbDArBggrBgEFBQcBBwEB
+/wQcMBowGAQCAAEwEgMEAdQI5gMEAdQI/gMEAdRcZjANBgkqhkiG9w0BAQsFAAOCAQEAGvJ+
+s7VgIZk8LDSz6uvsyX80KzZgaqMF7sMsqln0eo5KiGGBHjwvZuiDf46xbNseWW2nwAHmjLda
+osCbcTGVu0JzFYBdkimgyHiq2l8yEchh5BUXr8x4CQIxwGEZEOlEp5mRa/AfHVEfDeMm7mob
+eiCfyTC8q8KH9Tb/rY192kBe+n9MuRyn7TkimV5eYMdwWMyT/VSBCQzzfJ0r+S9o0rBYWH9k
+HDFd3u1ztO8WGjH/LOehoO30xsm52kbxZjc4SJWubgBgxTMIWyjPHbKqCF44NwYev/6eFcOC
++KTEQ/hydcURm3YtX7EZLDtksWB2me576J8opeLsbNeNgzfJpg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ access_methods = [
+ rfc6487.id_ad_rpkiManifest,
+ rfc6487.id_ad_signedObject,
+ ]
+
+ substrate = pem.readBase64fromText(self.rpki_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_pe_subjectInfoAccess:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectInfoAccessSyntax())
+ for ad in extnValue:
+ if ad['accessMethod'] in access_methods:
+ uri = ad['accessLocation']['uniformResourceIdentifier']
+ self.assertIn('rpki.apnic.net', uri)
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+class CertificateWithSignedObjectTestCase(unittest.TestCase):
+ rpki_cert_pem_text = """\
+MIIEuDCCA6CgAwIBAgICBhgwDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoNmQ2
+ZmJmYTk3NTNkYjhkODQ2NDMzZGI1MzUxZDlhOWVjMDdjOTZiZDAeFw0xOTA4MjAw
+MDQ5MjlaFw0yMDA3MDEwMDAwMDBaMDMxMTAvBgNVBAMTKDVCODNERDg3REU5QUM3
+QzZFMzRCODc3REY1MDFBMkIxMjMwQTgxQjQwggEiMA0GCSqGSIb3DQEBAQUAA4IB
+DwAwggEKAoIBAQCXJw4ElLYkHnpRkLE8djruXIJn6uij0hIDM/18ma0c48HKlEPw
+8jGB8kSSvLpimY3zLvqyLaD0pBNulovT3Ep37mPkskvXIUwLTThjBEujG7R4zU5g
+RV2q+QhRjPLXiyER5QnEnHrIEke6tnioEPnC4i02Z6JdDKZrn9jSLPlet2OB5/0+
+0DqLnYPZ1LZrId9YVDeIyBDRFxbzQ6L5K2stua5fWqhX1vnbZKDbXSY6d+u5zVwn
+adxnRP989EiKk/MJ4Reu7YEdtpsM3sd7prXkAcJjPokdvL7hy+BOY8ESgaIhIBj2
+Kqu4G35HKBbUdwFekBikitmiVJlIvvVYXku/AgMBAAGjggHUMIIB0DAdBgNVHQ4E
+FgQUW4Pdh96ax8bjS4d99QGisSMKgbQwHwYDVR0jBBgwFoAUbW+/qXU9uNhGQz21
+NR2ansB8lr0wGAYDVR0gAQH/BA4wDDAKBggrBgEFBQcOAjBQBgNVHR8ESTBHMEWg
+Q6BBhj9yc3luYzovL2NhLnJnLm5ldC9ycGtpL1JHbmV0LU9VL2JXLV9xWFU5dU5o
+R1F6MjFOUjJhbnNCOGxyMC5jcmwwZAYIKwYBBQUHAQEEWDBWMFQGCCsGAQUFBzAC
+hkhyc3luYzovL3Jwa2kucmlwZS5uZXQvcmVwb3NpdG9yeS9ERUZBVUxUL2JXLV9x
+WFU5dU5oR1F6MjFOUjJhbnNCOGxyMC5jZXIwDgYDVR0PAQH/BAQDAgeAMIGKBggr
+BgEFBQcBCwR+MHwwSwYIKwYBBQUHMAuGP3JzeW5jOi8vY2EucmcubmV0L3Jwa2kv
+UkduZXQtT1UvVzRQZGg5NmF4OGJqUzRkOTlRR2lzU01LZ2JRLnJvYTAtBggrBgEF
+BQcwDYYhaHR0cHM6Ly9jYS5yZy5uZXQvcnJkcC9ub3RpZnkueG1sMB8GCCsGAQUF
+BwEHAQH/BBAwDjAMBAIAATAGAwQAkxwtMA0GCSqGSIb3DQEBCwUAA4IBAQCoYaCd
+17R3o7xul5BWgk8SXItdIDoDb7zxVqs/gnzl9i5gdDd0IWIy4gGW32EjsTUXsi+G
+1gyv7aWYFQNlR7kvBgfHyPPp2rkFIj9/KK1VygG3FFMaO1JBDB8UOU+tRbV6xGcf
+IYCk5bH6H9BtkPm2kiczVdjCFIB5krMy+DMf3x1F7/G+5f+ZmUG3b93GfUmzgxw9
+IjlQMyt35h3rgOK6EjpOJgUA1jUWNTpPsR/xzA0HaDlbW38ue02SNluztrsXxJSr
+8XwXhHPUzmlqg89Mb5iem3WZ5lkbr6lteO+ZocYtLPyOHhNmXWgop764K4JQaf46
+WYtY4rWNeHcfgNTz
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ access_methods = [
+ rfc6487.id_ad_rpkiManifest,
+ rfc6487.id_ad_signedObject,
+ ]
+
+ substrate = pem.readBase64fromText(self.rpki_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_pe_subjectInfoAccess:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectInfoAccessSyntax())
+ for ad in extnValue:
+ if ad['accessMethod'] in access_methods:
+ uri = ad['accessLocation']['uniformResourceIdentifier']
+ self.assertIn('ca.rg.net', uri)
+ count += 1
+
+ self.assertEqual(1, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6664.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6664.py
new file mode 100644
index 0000000000..83278a7f4b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6664.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc6664
+
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MIICOjAJBgUrDgMCGgUAMA0GCWCGSAFlAwQCBAUAMA0GCWCGSAFlAwQCAQUAMA0G
+CWCGSAFlAwQCAgUAMA0GCWCGSAFlAwQCAwUAMBUGCSqGSIb3DQEBATAIAgIEAAIC
+EAAwFQYJKoZIhvcNAQEHMAgCAgQAAgIQADAVBgkqhkiG9w0BAQowCAICBAACAhAA
+MBUGByqGSM44BAGgCjAIAgIEAAICDAAwggEvBgcqhkjOPgIBoYIBIjCCAR4CgYEA
+i6Ued8R33vkopJwCvy/ZZv2TtddPXPYmJK4jyFv+TDJTPqnP7XUZCqRuhCyKX10z
+7SgiZs6qlSMk5gCa8shPF8NCHtps2D1OVC7yppZUJI07FoDxoEAZHImdAFvYIA/V
+cGYpYOKod4kju0/e4VUBZ6Qoer5vKTh+lD/+ZKa/WSUCFQDc3W87QSZSX6ggdbeI
+fzb0rsAhbwKBgCEz/o4WJPUZ4HffJfuXHIGrkPnCxFAYDRtlqueswV0Gy6LunipE
+Iu3nCzYkZhMatyFNyzo+NusEsS+9isOhT8jhL93nSBZCSRBy+GfmSXlXv/3c8mtH
+XTie5JOqjRdonPr4g/+VZvMkcioooNrhx/zICHrC3WZ72871/n/z9M+dMCMGByqG
+SM49AgEwGAYIKoZIzj0DAQcGBSuBBAAiBgUrgQQAIzAhBgUrgQQBDTAYBggqhkjO
+PQMBBwYFK4EEACIGBSuBBAAjMBoGCSqGSIb3DQEBCDANBglghkgBZQMEAgEFAA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for cap in asn1Object:
+ if cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys():
+ substrate = cap['parameters']
+ cap_p, rest = der_decoder(
+ substrate, asn1Spec=rfc5751.smimeCapabilityMap[cap['capabilityID']])
+ self.assertFalse(rest)
+ self.assertTrue(cap_p.prettyPrint())
+ self.assertEqual(substrate, der_encoder(cap_p))
+ count += 1
+
+ self.assertEqual(8, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ parameterValue = {
+ rfc6664.rsaEncryption: lambda x: x['maxKeySize'],
+ rfc6664.id_RSAES_OAEP: lambda x: x['maxKeySize'],
+ rfc6664.id_RSASSA_PSS: lambda x: x['minKeySize'],
+ rfc6664.id_dsa: lambda x: x['keySizes']['maxKeySize'],
+ rfc6664.dhpublicnumber: lambda x: x['keyParams']['q'] % 1023,
+ rfc6664.id_ecPublicKey: lambda x: x[0]['namedCurve'],
+ rfc6664.id_ecMQV: lambda x: x[1]['namedCurve'],
+ }
+
+ expectedValue = {
+ rfc6664.rsaEncryption: 4096,
+ rfc6664.id_RSAES_OAEP: 4096,
+ rfc6664.id_RSASSA_PSS: 1024,
+ rfc6664.id_dsa: 3072,
+ rfc6664.dhpublicnumber: 257,
+ rfc6664.id_ecPublicKey: rfc5480.secp256r1,
+ rfc6664.id_ecMQV: rfc5480.secp384r1,
+ }
+
+ count = 0
+ for cap in asn1Object:
+ if cap['capabilityID'] in parameterValue.keys():
+ pValue = parameterValue[cap['capabilityID']](cap['parameters'])
+ eValue = expectedValue[cap['capabilityID']]
+ self.assertEqual(eValue, pValue)
+ count += 1
+
+ self.assertEqual(7, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6955.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6955.py
new file mode 100644
index 0000000000..443d70daf0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6955.py
@@ -0,0 +1,101 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc6955
+
+
+class CertificationRequestTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDPDCCArsCAQAwTjELMAkGA1UEBhMCVVMxETAPBgNVBAoTCFhFVEkgSW5jMRAw
+DgYDVQQLEwdUZXN0aW5nMRowGAYDVQQDExFQS0lYIEV4YW1wbGUgVXNlcjCCAkEw
+ggG2BgcqhkjOPgIBMIIBqQKBgQCUhOBFbH9pUWI+VoB8aOfFqZ6edHSU7ZCMHcTh
+ShSC9dKUDBnjuRC7EbnlpfuOIVFjAoaqBrghNrZ/Nt/R1mhbeXwdWhR1H2qTdZPO
+u5dyivAPI51H9tSzx/D05vYrwjLhiWe+fgau+NABa4sq9QLXtqhjlIOwGzF9Uhre
+5QOFJwKBgCamMixaK9QzK1zcBodTP5AGYVA4PtK5fYEcEhDFDFPUZNGOMAcIjN0/
+Ci8s1ht/V4bQ2rtuNioY6NO8cDF6SLZOGG7dHyIG6z/q1EFp2ZveR5V6cpHSCX9J
+XDsDM1HI8Tma/wTVbn6UPQO49jEVJkiVqFzeR4i0aToAp4ae2tHNAiEA6HL6lvAR
+QPXy3P07XXiUsYUB5Wk3IfclubpxSvxgMPsCYQCjkQHAqG6kTaBW/Gz+H6ewzQ+U
+hwwlvpd2jevlpAldq4PNgAs1Z38MjqcxmDKFOUCdEZjY3rh/hpuvjWc9tna0YS8h
+4UsOaP9TPofd2HFWaEfc9yBjSzxfeHGD5nCe4pIwGgMVABzVOg0Xgm0KgXWBRhCO
+PtsJ5Jg0AgE3A4GEAAKBgBNjoYUEjEaoiOv0XqiTdK79rp6WJxJlxEwHBj4Y/pS4
+qHlIvS40tkfKBDCh7DP9GgstnlDJeA+uauy1a2q+slzasp94LLl34nkrJb8uC1lK
+k0v4s+yBNK6XR1LgqCmY7NGwyitveovbTo2lFX5+rzNiCZ4PEUSMwY2iEZ5T77Lo
+oCEwHwYJKoZIhvcNAQkOMRIwEDAOBgNVHQ8BAf8EBAMCAwgwDAYIKwYBBQUHBgMF
+AANtADBqMFIwSDELMAkGA1UEBhMCVVMxETAPBgNVBAoTCFhFVEkgSW5jMRAwDgYD
+VQQLEwdUZXN0aW5nMRQwEgYDVQQDEwtSb290IERTQSBDQQIGANo5tuLLBBQtBXf+
+Xo9l9a+tyVybAsCoiClhYw==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6402.CertificationRequest()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['certificationRequestInfo']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.dhpublicnumber, spki_a['algorithm'])
+ self.assertIn(spki_a['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ params, rest = der_decoder(
+ spki_a['parameters'], asn1Spec=rfc6955.DomainParameters())
+
+ self.assertFalse(rest)
+ self.assertTrue(params.prettyPrint())
+ self.assertEqual(spki_a['parameters'], der_encoder(params))
+ self.assertEqual(55, params['validationParms']['pgenCounter'])
+
+ sig_a = asn1Object['signatureAlgorithm']
+
+ self.assertEqual(
+ rfc6955.id_dhPop_static_sha1_hmac_sha1, sig_a['algorithm'])
+ self.assertIn(sig_a['algorithm'], rfc5280.algorithmIdentifierMap)
+ self.assertEqual(sig_a['parameters'], der_encoder(univ.Null("")))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ spki_a = asn1Object['certificationRequestInfo']['subjectPublicKeyInfo']['algorithm']
+
+ self.assertEqual(rfc5480.dhpublicnumber, spki_a['algorithm'])
+ self.assertEqual(
+ 55, spki_a['parameters']['validationParms']['pgenCounter'])
+
+ sig_a = asn1Object['signatureAlgorithm']
+
+ self.assertEqual(
+ rfc6955.id_dhPop_static_sha1_hmac_sha1, sig_a['algorithm'])
+ self.assertEqual(univ.Null(""), sig_a['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc6960.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc6960.py
new file mode 100644
index 0000000000..151c934ca9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc6960.py
@@ -0,0 +1,176 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc6960
+
+
+class OCSPRequestTestCase(unittest.TestCase):
+ ocsp_req_pem_text = """\
+MGowaDBBMD8wPTAJBgUrDgMCGgUABBS3ZrMV9C5Dko03aH13cEZeppg3wgQUkqR1LKSevoFE63n8
+isWVpesQdXMCBDXe9M+iIzAhMB8GCSsGAQUFBzABAgQSBBBjdJOiIW9EKJGELNNf/rdA
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6960.OCSPRequest()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ocsp_req_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['tbsRequest']['version'])
+
+ count = 0
+ for extn in asn1Object['tbsRequest']['requestExtensions']:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ count += 1
+
+ self.assertEqual(1, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.ocsp_req_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['tbsRequest']['version'])
+
+ for req in asn1Object['tbsRequest']['requestList']:
+ ha = req['reqCert']['hashAlgorithm']
+ self.assertEqual(rfc4055.id_sha1, ha['algorithm'])
+ self.assertEqual(univ.Null(""), ha['parameters'])
+
+
+class OCSPResponseTestCase(unittest.TestCase):
+ ocsp_resp_pem_text = """\
+MIIEvQoBAKCCBLYwggSyBgkrBgEFBQcwAQEEggSjMIIEnzCCAQ+hgYAwfjELMAkGA1UEBhMCQVUx
+EzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEV
+MBMGA1UEAxMMc25tcGxhYnMuY29tMSAwHgYJKoZIhvcNAQkBFhFpbmZvQHNubXBsYWJzLmNvbRgP
+MjAxMjA0MTExNDA5MjJaMFQwUjA9MAkGBSsOAwIaBQAEFLdmsxX0LkOSjTdofXdwRl6mmDfCBBSS
+pHUspJ6+gUTrefyKxZWl6xB1cwIENd70z4IAGA8yMDEyMDQxMTE0MDkyMlqhIzAhMB8GCSsGAQUF
+BzABAgQSBBBjdJOiIW9EKJGELNNf/rdAMA0GCSqGSIb3DQEBBQUAA4GBADk7oRiCy4ew1u0N52QL
+RFpW+tdb0NfkV2Xyu+HChKiTThZPr9ZXalIgkJ1w3BAnzhbB0JX/zq7Pf8yEz/OrQ4GGH7HyD3Vg
+PkMu+J6I3A2An+bUQo99AmCbZ5/tSHtDYQMQt3iNbv1fk0yvDmh7UdKuXUNSyJdHeg27dMNy4k8A
+oIIC9TCCAvEwggLtMIICVqADAgECAgEBMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAkFVMRMw
+EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxFTAT
+BgNVBAMTDHNubXBsYWJzLmNvbTEgMB4GCSqGSIb3DQEJARYRaW5mb0Bzbm1wbGFicy5jb20wHhcN
+MTIwNDExMTMyNTM1WhcNMTMwNDExMTMyNTM1WjB+MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29t
+ZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRUwEwYDVQQDEwxzbm1w
+bGFicy5jb20xIDAeBgkqhkiG9w0BCQEWEWluZm9Ac25tcGxhYnMuY29tMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDDDU5HOnNV8I2CojxB8ilIWRHYQuaAjnjrETMOprouDHFXnwWqQo/I3m0b
+XYmocrh9kDefb+cgc7+eJKvAvBqrqXRnU38DmQU/zhypCftGGfP8xjuBZ1n23lR3hplN1yYA0J2X
+SgBaAg6e8OsKf1vcX8Es09rDo8mQpt4G2zR56wIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG
++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU8Ys2dpJFLMHl
+yY57D4BNmlqnEcYwHwYDVR0jBBgwFoAU8Ys2dpJFLMHlyY57D4BNmlqnEcYwDQYJKoZIhvcNAQEF
+BQADgYEAWR0uFJVlQId6hVpUbgXFTpywtNitNXFiYYkRRv77McSJqLCa/c1wnuLmqcFcuRUK0oN6
+8ZJDP2HDDKe8MCZ8+sx+CF54eM8VCgN9uQ9XyE7x9XrXDd3Uw9RJVaWSIezkNKNeBE0lDM2jUjC4
+HAESdf7nebz1wtqAOXE1jWF/y8g=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6960.OCSPResponse()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.ocsp_resp_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['responseStatus'])
+
+ rb = asn1Object['responseBytes']
+
+ self.assertIn(rb['responseType'], rfc6960.ocspResponseMap)
+
+ resp, rest = der_decoder(
+ rb['response'], asn1Spec=rfc6960.ocspResponseMap[rb['responseType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(resp.prettyPrint())
+ self.assertEqual(rb['response'], der_encoder(resp))
+ self.assertEqual(0, resp['tbsResponseData']['version'])
+
+ count = 0
+ for extn in resp['tbsResponseData']['responseExtensions']:
+ self.assertIn(extn['extnID'], rfc5280.certificateExtensionsMap)
+
+ ev, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(ev))
+
+ count += 1
+
+ self.assertEqual(1, count)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.ocsp_resp_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['responseStatus'])
+
+ rb = asn1Object['responseBytes']
+
+ self.assertIn(rb['responseType'], rfc6960.ocspResponseMap)
+
+ resp, rest = der_decoder(
+ rb['response'],
+ asn1Spec=rfc6960.ocspResponseMap[rb['responseType']],
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(resp.prettyPrint())
+ self.assertEqual(rb['response'], der_encoder(resp))
+ self.assertEqual(0, resp['tbsResponseData']['version'])
+
+ for rdn in resp['tbsResponseData']['responderID']['byName']['rdnSequence']:
+ for attr in rdn:
+ if attr['type'] == rfc5280.id_emailAddress:
+ self.assertEqual('info@snmplabs.com', attr['value'])
+
+ for r in resp['tbsResponseData']['responses']:
+ ha = r['certID']['hashAlgorithm']
+ self.assertEqual(rfc4055.id_sha1, ha['algorithm'])
+ self.assertEqual(univ.Null(""), ha['parameters'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7030.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7030.py
new file mode 100644
index 0000000000..7d011f0bb0
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7030.py
@@ -0,0 +1,89 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7030
+
+
+class CSRAttrsTestCase(unittest.TestCase):
+ pem_text = """\
+MEEGCSqGSIb3DQEJBzASBgcqhkjOPQIBMQcGBSuBBAAiMBYGCSqGSIb3DQEJDjEJ
+BgcrBgEBAQEWBggqhkjOPQQDAw==
+"""
+
+ the_oids = (
+ univ.ObjectIdentifier('1.2.840.113549.1.9.7'),
+ univ.ObjectIdentifier('1.2.840.10045.4.3.3')
+ )
+
+ the_attrTypes = (
+ univ.ObjectIdentifier('1.2.840.10045.2.1'),
+ univ.ObjectIdentifier('1.2.840.113549.1.9.14'),
+ )
+
+ the_attrVals = (
+ '1.3.132.0.34',
+ '1.3.6.1.1.1.1.22',
+ )
+
+ def setUp(self):
+ self.asn1Spec = rfc7030.CsrAttrs()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr_or_oid in asn1Object:
+ if attr_or_oid.getName() == 'oid':
+ self.assertIn(attr_or_oid['oid'], self.the_oids)
+
+ if attr_or_oid.getName() == 'attribute':
+ self.assertIn(
+ attr_or_oid['attribute']['attrType'], self.the_attrTypes)
+
+ def testOpenTypes(self):
+ openTypesMap = rfc5652.cmsAttributesMap.copy()
+
+ for at in self.the_attrTypes:
+ openTypesMap.update({at: univ.ObjectIdentifier()})
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr_or_oid in asn1Object:
+ if attr_or_oid.getName() == 'attribute':
+ valString = attr_or_oid['attribute']['attrValues'][0].prettyPrint()
+
+ if attr_or_oid['attribute']['attrType'] == self.the_attrTypes[0]:
+ self.assertEqual(self.the_attrVals[0], valString)
+
+ if attr_or_oid['attribute']['attrType'] == self.the_attrTypes[1]:
+ self.assertEqual(self.the_attrVals[1], valString)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7191.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7191.py
new file mode 100644
index 0000000000..40afbd42ea
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7191.py
@@ -0,0 +1,313 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7191
+
+
+class ReceiptRequestTestCase(unittest.TestCase):
+ message1_pem_text = """\
+MIIGfAYJKoZIhvcNAQcCoIIGbTCCBmkCAQMxDTALBglghkgBZQMEAgIwgb4GCyqGSIb3DQEJ
+EAEZoIGuBIGrMIGooEQwIwYLKoZIhvcNAQkQDAExFAwSVmlnaWwgU2VjdXJpdHkgTExDMB0G
+CyqGSIb3DQEJEAwDMQ4MDFByZXRlbmQgMDQ4QTBgMF4wVjAbBgsqhkiG9w0BCRAMGzEMDApl
+eGFtcGxlSUQxMBUGCyqGSIb3DQEJEAwKMQYMBEhPVFAwIAYLKoZIhvcNAQkQDAsxEQwPa3Rh
+LmV4YW1wbGUuY29tBAQxMjM0oIIChzCCAoMwggIKoAMCAQICCQCls1QoG7BuPTAKBggqhkjO
+PQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAP
+BgNVBAoMCEJvZ3VzIENBMB4XDTE5MDYxMjE0MzEwNFoXDTIwMDYxMTE0MzEwNFowfDELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRswGQYDVQQKExJWaWdp
+bCBTZWN1cml0eSBMTEMxFzAVBgNVBAsTDktleSBNYW5hZ2VtZW50MRgwFgYDVQQDEw9rdGEu
+ZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASX9l7E3VS3GAEiiRrVozgCBQfL
+F67IhOxtbQviD/ojhHSQmflLyfRJ8e7+nbWlOLstRc7lgmq+OQVaSlStkzVk/BO1wE5BgUyF
+xje+sieUtPRXVqfoVZCJJsgiSbo181ejgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIB
+DQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9z
+ZS4wHQYDVR0OBBYEFG2bXP0Dr7W51YvxZJ8aVuC1rU0PMB8GA1UdIwQYMBaAFPI12zQE2qVV
+8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMAZ4lqTtdbaDLFfHywaQYwOWBkL3d0wH
+EsNZTW1qQKy/oY3tXc0O6cbJZ5JJb9wk8QIwblXm8+JjdEJHsNjSv4rcJZou4vkMT7PzEme2
+BbMkwOWeIdhmy1vszd8TQgvdb36XMYIDBzCCAwMCAQOAFG2bXP0Dr7W51YvxZJ8aVuC1rU0P
+MAsGCWCGSAFlAwQCAqCCAmUwGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEZMBwGCSqGSIb3
+DQEJBTEPFw0xOTA2MTIxOTM1NTFaMCUGCyqGSIb3DQEJEAIHMRYEFCe4nFY7FiJRnReHHHm/
+rIht3/g9MD8GCSqGSIb3DQEJBDEyBDA3gzQlzfvylOn9Rf59kMSa1K2IyOBA5Eoeiyp83Bmj
+KasomGorn9htte1iFPbxPRUwggG/BglghkgBZQIBBUExggGwMIIBrAQUJ7icVjsWIlGdF4cc
+eb+siG3f+D0wggGSoIH+MH8GCWCGSAFlAgEQAARyMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQI
+EwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxp
+Y2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHsGCWCGSAFlAgEQAARuMGwx
+CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMH
+RXhhbXBsZTEMMAoGA1UEAxMDQm9iMR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20w
+gY4wgYsGCWCGSAFlAgEQAAR+MHwxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UE
+BxMHSGVybmRvbjEbMBkGA1UEChMSVmlnaWwgU2VjdXJpdHkgTExDMRcwFQYDVQQLEw5LZXkg
+TWFuYWdlbWVudDEYMBYGA1UEAxMPa3RhLmV4YW1wbGUuY29tMAoGCCqGSM49BAMDBGYwZAIw
+Z7DXliUb8FDKs+BadyCY+IJobPnQ6UoLldMj3pKEowONPifqrbWBJJ5cQQNgW6YuAjBbjSlY
+goRV+bq4fdgOOj25JFqa80xnXGtQqjm/7NSII5SbdJk+DT7KCkSbkElkbgQ=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.message1_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat == rfc7191.id_aa_KP_keyPkgIdAndReceiptReq:
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc7191.KeyPkgIdentifierAndReceiptReq())
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, sav['pkgID'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.message1_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ v3 = rfc5652.CMSVersion().subtype(value='v3')
+
+ self.assertEqual(v3, asn1Object['content']['version'])
+
+ for sa in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc7191.id_aa_KP_keyPkgIdAndReceiptReq:
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+ self.assertEqual(package_id, sa['attrValues'][0]['pkgID'])
+
+
+class ReceiptTestCase(unittest.TestCase):
+ message2_pem_text = """\
+MIIEdAYJKoZIhvcNAQcCoIIEZTCCBGECAQMxDTALBglghkgBZQMEAgIwgawGCmCGSAFlAgEC
+TgOggZ0EgZowgZcEFCe4nFY7FiJRnReHHHm/rIht3/g9MH8GCWCGSAFlAgEQAARyMHAxCzAJ
+BgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhh
+bXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29t
+oIICfDCCAngwggH+oAMCAQICCQCls1QoG7BuOzAKBggqhkjOPQQDAzA/MQswCQYDVQQGEwJV
+UzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4X
+DTE5MDUyOTE0NDU0MVoXDTIwMDUyODE0NDU0MVowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
+AlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGlj
+ZTEgMB4GCSqGSIb3DQEJARYRYWxpY2VAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6zXCYlmsEGD5vPu5hl9hDEjd1UHRgJIPoy
+3fJcWWeZ8FHCirICtuMgFisNscG/aTwKyDYOFDuqz/C2jyEwqgWCRyxyohuJXtmjgZQwgZEw
+CwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBi
+ZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFMS6Wg4+euM8gbD0Aqpouxbg
+lg41MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2gAMGUC
+MGO5H9E1uAveRGGaf48lN4pov2yH+hCAc5hOAuZKe/f40MKSF8q4w2ij+0euSaKFiAIxAL3g
+xp6sMitCmLQgOH6/RBIC/2syJ97y0KVp9da0PDAvwxLugCHTKZPjjpSLPHHc9TGCARwwggEY
+AgEDgBTEuloOPnrjPIGw9AKqaLsW4JYONTALBglghkgBZQMEAgKgejAZBgkqhkiG9w0BCQMx
+DAYKYIZIAWUCAQJOAzAcBgkqhkiG9w0BCQUxDxcNMTkwNjEzMTYxNjA4WjA/BgkqhkiG9w0B
+CQQxMgQwQSWYpq4jwhMkmS0as0JL3gjYxKLgDfzP2ndTNsAY0m9p8Igp8ZcK4+5n9fXJ43vU
+MAoGCCqGSM49BAMDBGgwZgIxAMfq2EJ5pSl9tGOEVJEgZitc266ljrOg5GDjkd2d089qw1A3
+bUcOYuCdivgxVuhlAgIxAPR9JavxziwCbVyBUWOAiKKYfglTgG3AwNmrKDj0NtXUQ9qDmGAc
+6L+EAY2P5OVB8Q==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.message2_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ oid = sd['encapContentInfo']['eContentType']
+
+ self.assertEqual(rfc7191.id_ct_KP_keyPackageReceipt, oid)
+
+ receipt, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc7191.KeyPackageReceipt())
+
+ self.assertFalse(rest)
+ self.assertTrue(receipt.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(receipt))
+
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, receipt['receiptOf']['pkgID'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.message2_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ v3 = rfc5652.CMSVersion().subtype(value='v3')
+
+ self.assertEqual(v3, asn1Object['content']['version'])
+
+ for sa in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ self.assertIn( sa['attrType'], rfc5652.cmsAttributesMap)
+ if sa['attrType'] == rfc5652.id_messageDigest:
+ self.assertIn(
+ '0x412598a6ae2', sa['attrValues'][0].prettyPrint())
+
+ ct_oid = asn1Object['content']['encapContentInfo']['eContentType']
+
+ self.assertIn(ct_oid, rfc5652.cmsContentTypesMap)
+ self.assertEqual(ct_oid, rfc7191.id_ct_KP_keyPackageReceipt)
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ sd_eci = asn1Object['content']['encapContentInfo']
+ receipt, rest = der_decoder(
+ sd_eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd_eci['eContentType']])
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, receipt['receiptOf']['pkgID'])
+
+
+class ErrorTestCase(unittest.TestCase):
+ message3_pem_text = """\
+MIIEbwYJKoZIhvcNAQcCoIIEYDCCBFwCAQMxDTALBglghkgBZQMEAgIwga0GCmCGSAFlAgEC
+TgaggZ4EgZswgZigFgQUJ7icVjsWIlGdF4cceb+siG3f+D0wewYJYIZIAWUCARAABG4wbDEL
+MAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAwDgYDVQQKEwdF
+eGFtcGxlMQwwCgYDVQQDEwNCb2IxHjAcBgkqhkiG9w0BCQEWD2JvYkBleGFtcGxlLmNvbQoB
+CqCCAncwggJzMIIB+qADAgECAgkApbNUKBuwbjwwCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMC
+VVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAe
+Fw0xOTA1MjkxOTIwMTNaFw0yMDA1MjgxOTIwMTNaMGwxCzAJBgNVBAYTAlVTMQswCQYDVQQI
+EwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEMMAoGA1UEAxMDQm9i
+MR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AAQxpGJVLxa83xhyal+rvmMFs4xS6Q19cCDoAvQkkFe0gUC4glxlWWQuf/FvLCRwwscr877D
+1FZRBrYKPD6Hxv/UKX6Aimou0TnnxsPk98zZpikn9gTrJn2cF9NCzvPVMfmjgZQwgZEwCwYD
+VR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0
+cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFMprZnLeLJtXf5iO4sMq02aOwhql
+MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMBVu
+hLo58RhCiYsOLZFSR3vWHPDCJBnO1vE1uixqEjONHxlBoeGN2MmWs/9PppcHCwIwN9HB5jPc
+J7gTjA9+ipCe+qkztmV+Gy2NBAY6xYC0gh+pb+X5OAI7y7HdctXp+PfrMYIBGzCCARcCAQOA
+FMprZnLeLJtXf5iO4sMq02aOwhqlMAsGCWCGSAFlAwQCAqB6MBkGCSqGSIb3DQEJAzEMBgpg
+hkgBZQIBAk4GMBwGCSqGSIb3DQEJBTEPFw0xOTA2MTMxNjE2MDhaMD8GCSqGSIb3DQEJBDEy
+BDCgXFTUc3ZInjt+MWYkYmXYERk4FgErEZNILlWgVl7Z9pImgLObIpdrGqGPt06/VkwwCgYI
+KoZIzj0EAwMEZzBlAjEAsjJ3iWRUteMKBVsjaYeN6TG9NITRTOpRVkSVq55DcnhwS9g9lu8D
+iNF8uKtW/lk0AjA7z2q40N0lamXkSU7ECasiWOYV1X4cWGiQwMZDKknBPDqXqB6Es6p4J+qe
+0V6+BtY=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.message3_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ oid = sd['encapContentInfo']['eContentType']
+
+ self.assertEqual(rfc7191.id_ct_KP_keyPackageError, oid)
+
+ kpe, rest = der_decoder(
+ sd['encapContentInfo']['eContent'],
+ asn1Spec=rfc7191.KeyPackageError())
+
+ self.assertFalse(rest)
+ self.assertTrue(kpe.prettyPrint())
+ self.assertEqual(sd['encapContentInfo']['eContent'], der_encoder(kpe))
+
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, kpe['errorOf']['pkgID'])
+ self.assertEqual(
+ rfc7191.EnumeratedErrorCode(value=10), kpe['errorCode'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.message3_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ v3 = rfc5652.CMSVersion().subtype(value='v3')
+
+ self.assertEqual(v3, asn1Object['content']['version'])
+
+ for sa in asn1Object['content']['signerInfos'][0]['signedAttrs']:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+ if sa['attrType'] == rfc5652.id_messageDigest:
+ self.assertIn(
+ '0xa05c54d4737', sa['attrValues'][0].prettyPrint())
+
+ ct_oid = asn1Object['content']['encapContentInfo']['eContentType']
+
+ self.assertIn(ct_oid, rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc7191.id_ct_KP_keyPackageError, ct_oid)
+
+ # Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
+ # automatically decode it
+ sd_eci = asn1Object['content']['encapContentInfo']
+ kpe, rest = der_decoder(
+ sd_eci['eContent'],
+ asn1Spec=rfc5652.cmsContentTypesMap[sd_eci['eContentType']])
+ package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
+ package_id = pem.readBase64fromText(package_id_pem_text)
+
+ self.assertEqual(package_id, kpe['errorOf']['pkgID'])
+ self.assertEqual(rfc7191.EnumeratedErrorCode(value=10), kpe['errorCode'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7229.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7229.py
new file mode 100644
index 0000000000..915b9be530
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7229.py
@@ -0,0 +1,93 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7229
+
+
+class CertificatePolicyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIDJDCCAqqgAwIBAgIJAKWzVCgbsG5AMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkxMDEzMTkwNTUzWhcNMjAxMDEyMTkwNTUzWjBTMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xJTAjBgNVBAoTHFRF
+U1QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AATwUXZUseiOaqWdrClDCMbp9YFAM87LTmFirygpzKDU9cfqSCg7zBDIphXCwMcS
+9zVWDoStCbcvN0jw5CljHcffzpHYX91P88SZRJ1w4hawHjOsWxvM3AkYgZ5nfdlL
+7EajggFcMIIBWDAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwbwYDVR0j
+BGgwZoAU8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQsw
+CQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GC
+CQDokdYGkU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgB
+hvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3Ig
+YW55IHB1cnBvc2UuMCEGA1UdIAQaMBgwCgYIKwYBBQUHDQEwCgYIKwYBBQUHDQIw
+CgYDVR02BAMCAQIwNQYDVR0hBC4wLDAUBggrBgEFBQcNAQYIKwYBBQUHDQcwFAYI
+KwYBBQUHDQIGCCsGAQUFBw0IMAoGCCqGSM49BAMDA2gAMGUCMHaWskjS7MKQCMcn
+zEKFOV3LWK8pL57vrECJd8ywKdwBJUNw9HhvSKkfUwL6rjlLpQIxAL2QO3CNoZRP
+PZs8K3IjUA5+U73pA8lpaTOPscLY22WL9pAGmyVUyEJ8lM7E+r4iDg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ test_oids = [
+ rfc7229.id_TEST_certPolicyOne,
+ rfc7229.id_TEST_certPolicyTwo,
+ rfc7229.id_TEST_certPolicyThree,
+ rfc7229.id_TEST_certPolicyFour,
+ rfc7229.id_TEST_certPolicyFive,
+ rfc7229.id_TEST_certPolicySix,
+ rfc7229.id_TEST_certPolicySeven,
+ rfc7229.id_TEST_certPolicyEight,
+ ]
+
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ count = 0
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ s = extn['extnValue']
+ ev, rest = der_decoder(
+ s, rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ev.prettyPrint())
+ self.assertEqual(s, der_encoder(ev))
+
+ if extn['extnID'] == rfc5280.id_ce_certificatePolicies:
+ for pol in ev:
+ if pol['policyIdentifier'] in test_oids:
+ count += 1
+
+ if extn['extnID'] == rfc5280.id_ce_policyMappings:
+ for pmap in ev:
+ if pmap['issuerDomainPolicy'] in test_oids:
+ count += 1
+ if pmap['subjectDomainPolicy'] in test_oids:
+ count += 1
+
+ self.assertEqual(6, count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7292.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7292.py
new file mode 100644
index 0000000000..583d396d67
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7292.py
@@ -0,0 +1,183 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7292
+
+
+class PKCS12TestCase(unittest.TestCase):
+ pfx_pem_text = """\
+MIIJ0wIBAzCCCY8GCSqGSIb3DQEHAaCCCYAEggl8MIIJeDCCBggGCSqGSIb3DQEHAaCCBfkE
+ggX1MIIF8TCCBe0GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAjuq0/+
+0pyutQICB9AEggTYZe/mYBpmkDvKsve4EwIVwo1TNv4ldyx1qHZW2Ih6qQCY+Nv1Mnv9we0z
+UTl4p3tQzCPWXnrSA82IgOdotLIez4YwXrgiKhcIkSSL+2yCmAoM+qkjiAIKq+l3UJ6Xhafe
+2Kg4Ek/0RkHpe6GwjTtdefkpXpZgccMEopOtKQMLJWsDM7p77x/amn6yIk2tpskKqUY/4n8Y
+xEiTWcRtTthYqZQIt+q94nKLYpt0o880SVOfvdEqp5KII7cTg60GJL+n6oN6hmP0bsAMvnk9
+1f8/lFKMi9tsNU/KnUhbDVpjJwBQkhgbqBx6GdtoqSLSlYNPVM0wlntwm1JhH4ybiQ5sNzqO
+7FlWC5bcYwkvOlx1gGrshY5jK/WjbA4paBpxSkgobJReirY9BeqITnvokXlub4tehHhM20Ik
+42pKa3kGaHmowvzflxqE+oysW5Oa9XbZxBCfkOMJ70o4hqa+n66+E/uKcN9NbKbTo3zt3xdt
+6ypOwHb74t5OcWaGx3EZsw0n0/V+WoLSpXOBwpx08+1yh7LV29aNQ0oEzVVkF6YYRQZtdIMe
+s3xB2i6sjLal21ntk7iBzMJwVoi524SAZ/oW8SuDAn1c93AWWwKZLALv5V3FZ2pDiQXArcfz
+DH2d5HJyNx7OlvKzNgEngwSyEC1XbjnOsZVUqGFENuDTa/brH4oEJHEkyWTyDudrz8iCEO80
+e1PE4qqJ5CllN0CSVWqz4CxGDFIQXzR6ohn8f3dR3+DAaLYvAjBVMLJjk7+nfnB2L0HpanhT
+Fz9AxPPIDf5pBQQwM14l8wKjEHIyfqclupeKNokBUr1ykioPyCr3nf4Rqe0Z4EKIY4OCpW6n
+hrkWHmvF7OKR+bnuSk3jnBxjSN0Ivy5q9q3fntYrhscMGGR73umfi8Z29tM1vSP9jBZvirAo
+geGf/sfOI0ewRvJf/5abnNg/78Zyk8WmlAHVFzNGcM3u3vhnNpTIVRuUyVkdSmOdbzeSfmqQ
+2HPCEdC9HNm25KJt1pD6v6aP3Tw7qGl+tZyps7VB2i+a+UGcwQcClcoXcPSdG7Z1gBTzSr84
+MuVPYlePuo1x+UwppSK3rM8ET6KqhGmESH5lKadvs8vdT6c407PfLcfxyAGzjH091prk2oRJ
+xB3oQAYcKvkuMcM6FSLJC263Dj+pe1GGEexk1AoysYe67tK0sB66hvbd92HcyWhW8/vI2/PM
+bX+OeEb7q+ugnsP+BmF/btWXn9AxfUqNWstyInKTn+XpqFViMIOG4e2xC4u/IvzG3VrTWUHF
+4pspH3k7GB/EOLvtbsR0uacBFlsColJy0FaWT9rrdueU3YEiIRCC8LGi1XpUa8f5adeBKWN+
+eRTrrF4o7uoNeGlnwZ7ebnb7k18Q0GRzzzTZPoMM4L703svfE/eNYWFHLY4NDQKSYgeum365
+WAfZpHOX7YOc6oRGrGB+QuGoyikTTDO8xpcEmb8vDz4ZwHhN0PS056LNJeMoI0A/5DJb3e10
+i1txlM48sbZBuIEIeixr52nwG4LuxqXGqShKaTfOrFxHjx4kI4/dp9dN/k8TGFsLWjuIgMJI
+6nRHbWrxB3F0XKXagtLLep1MDwDwAuCyiW2YC0JzRvsJViIgjDA+eiHX0O6/8xiK9dzMQpIz
+TVHSEqFlhORp0DGB2zATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IADMA
+ZgA3ADEAYQBmADYANQAtADEANgA4ADcALQA0ADQANABhAC0AOQBmADQANgAtAGMAOABiAGUA
+MQA5ADQAYwAzAGUAOABlMGsGCSsGAQQBgjcRATFeHlwATQBpAGMAcgBvAHMAbwBmAHQAIABF
+AG4AaABhAG4AYwBlAGQAIABDAHIAeQBwAHQAbwBnAHIAYQBwAGgAaQBjACAAUAByAG8AdgBp
+AGQAZQByACAAdgAxAC4AMDCCA2gGCSqGSIb3DQEHAaCCA1kEggNVMIIDUTCCA00GCyqGSIb3
+DQEMCgEDoIIDJTCCAyEGCiqGSIb3DQEJFgGgggMRBIIDDTCCAwkwggHxoAMCAQICEDbt9oc6
+oQinRwE1826MiBEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAxMJYW5vbnltb3VzMCAXDTE2
+MDcxOTIyMDAwMVoYDzIxMTYwNjI1MjIwMDAxWjAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8trBCTBjXXA4OgSO5nRTOU5T86ObCgc71
+J2oCuUigSddcTDzebaD0wcyAgf101hAdwMKQ9DvrK0nGvm7FAMnnUuVeATafKgshLuUTUUfK
+jx4Xif4LoS0/ev4BiOI5a1MlIRZ7T5Cyjg8bvuympzMuinQ/j1RPLIV0VGU2HuDxuuP3O898
+GqZ3+F6Al5CUcwmOX9zCs91JdN/ZFZ05SXIpHQuyPSPUX5Vy8F1ZeJ8VG3nkbemfFlVkuKQq
+vteL9mlT7z95rVZgGB3nUZL0tOB68eMcffA9zUksOmeTi5M6jnBcNeX2Jh9jS3YYd+IEliZm
+mggQG7kPta8f+NqezL77AgMBAAGjVTBTMBUGA1UdJQQOMAwGCisGAQQBgjcKAwQwLwYDVR0R
+BCgwJqAkBgorBgEEAYI3FAIDoBYMFGFub255bW91c0B3aW5kb3dzLXgAMAkGA1UdEwQCMAAw
+DQYJKoZIhvcNAQEFBQADggEBALh+4qmNPzC6M8BW9/SC2ACQxxPh06GQUGx0D+GLYnp61ErZ
+OtKyKdFh+uZWpu5vyYYAHCLXP7VdS/JhJy677ynAPjXiC/LAzrTNvGs74HDotD966Hiyy0Qr
+ospFGiplHGRA5vXA2CiKSX+0HrVkN7rhk5PYkc6R+/cdosd+QZ8lkEa9yDWc5l//vWEbzwVy
+mJf/PRf8NTkWAK6SPV7Y37j1mhkJjOH9VkRxNrd6kcihRa4u0ImXaXEsec77ER0so31DKCrP
+m+rqZPj9NZSIYP3sMGJ4Bmm/n2YRdeaUzTdocfD3TRnKxs65DSgpiSq1gmtsXM7jAPs/Egrg
+tbWEypgxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFKVgj/32UdEyuQcB
+rqr03dPnboinBBSU7mxdpB5LTCvorCI8Tk5OMiUzjgICB9A=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc7292.PFX()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pfx_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(3, asn1Object['version'])
+
+ oid = asn1Object['macData']['mac']['digestAlgorithm']['algorithm']
+
+ self.assertEqual(univ.ObjectIdentifier('1.3.14.3.2.26'), oid)
+
+ md_hex = asn1Object['macData']['mac']['digest'].prettyPrint()
+
+ self.assertEqual('0xa5608ffdf651d132b90701aeaaf4ddd3e76e88a7', md_hex)
+ self.assertEqual(
+ rfc5652.id_data, asn1Object['authSafe']['contentType'])
+
+ data, rest = der_decoder(
+ asn1Object['authSafe']['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ authsafe, rest = der_decoder(data, asn1Spec=rfc7292.AuthenticatedSafe())
+
+ self.assertFalse(rest)
+ self.assertTrue(authsafe.prettyPrint())
+ self.assertEqual(data, der_encoder(authsafe))
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+
+ data, rest = der_decoder(ci['content'], asn1Spec=univ.OctetString())
+
+ self.assertFalse(rest)
+
+ sc, rest = der_decoder(data, asn1Spec=rfc7292.SafeContents())
+
+ self.assertFalse(rest)
+ self.assertTrue(sc.prettyPrint())
+ self.assertEqual(data, der_encoder(sc))
+
+ for sb in sc:
+ if sb['bagId'] in rfc7292.pkcs12BagTypeMap:
+ bv, rest = der_decoder(
+ sb['bagValue'],
+ asn1Spec=rfc7292.pkcs12BagTypeMap[sb['bagId']])
+
+ self.assertFalse(rest)
+ self.assertTrue(bv.prettyPrint())
+ self.assertEqual(sb['bagValue'], der_encoder(bv))
+
+ for attr in sb['bagAttributes']:
+ if attr['attrType'] in rfc5652.cmsAttributesMap:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(
+ attr['attrValues'][0], der_encoder(av))
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pfx_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ digest_alg = asn1Object['macData']['mac']['digestAlgorithm']
+
+ self.assertFalse(digest_alg['parameters'].hasValue())
+
+ authsafe, rest = der_decoder(
+ asn1Object['authSafe']['content'],
+ asn1Spec=rfc7292.AuthenticatedSafe(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(authsafe.prettyPrint())
+ self.assertEqual(
+ asn1Object['authSafe']['content'], der_encoder(authsafe))
+
+ for ci in authsafe:
+ self.assertEqual(rfc5652.id_data, ci['contentType'])
+ sc, rest = der_decoder(
+ ci['content'], asn1Spec=rfc7292.SafeContents(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(sc.prettyPrint())
+ self.assertEqual(ci['content'], der_encoder(sc))
+
+ for sb in sc:
+ if sb['bagId'] == rfc7292.id_pkcs8ShroudedKeyBag:
+ bv = sb['bagValue']
+ enc_alg = bv['encryptionAlgorithm']['algorithm']
+ self.assertEqual(
+ rfc7292.pbeWithSHAAnd3_KeyTripleDES_CBC, enc_alg)
+ enc_alg_param = bv['encryptionAlgorithm']['parameters']
+ self.assertEqual(2000, enc_alg_param['iterations'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7296.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7296.py
new file mode 100644
index 0000000000..4bc7577073
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7296.py
@@ -0,0 +1,160 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc7296
+
+
+class CertBundleTestCase(unittest.TestCase):
+ cert_bundle_pem_text = """\
+MIITfqCCA8kwggPFMIICraADAgECAhACrFwmagtAm48LefKuRiV3MA0GCSqGSIb3
+DQEBBQUAMGwxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAX
+BgNVBAsTEHd3dy5kaWdpY2VydC5jb20xKzApBgNVBAMTIkRpZ2lDZXJ0IEhpZ2gg
+QXNzdXJhbmNlIEVWIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAw
+MDAwWjBsMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYD
+VQQLExB3d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFz
+c3VyYW5jZSBFViBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAxszlc+b71LvlLS0ypt/lgT/JzSVJtnEqw9WUNGeiChywX2mmQLHEt7KP0Jik
+qUFZOtPclNY823Q4pErMTSWC90qlUxI47vNJbXGRfmO2q6Zfw6SE+E9iUb74xezb
+OJLjBuUIkQzEKEFV+8taiRV+ceg1v01yCT2+OjhQW3cxG42zxyRFmqesbQAUWgS3
+uhPrUQqYQUEiTmVhh4FBUKZ5XIneGUpX1S7mXRxTLH6YzRoGFqRoc9A0BBNcoXHT
+WnxV215k4TeHMFYE5RG0KYAS8Xk5iKICEXwnZreIt3jyygqoOKsKZMK/Zl2VhMGh
+JR6HXRpQCyASzEG7bgtROLhLywIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUsT7DaQP4v0cB1JgmGggC72NkK8MwHwYD
+VR0jBBgwFoAUsT7DaQP4v0cB1JgmGggC72NkK8MwDQYJKoZIhvcNAQEFBQADggEB
+ABwaBpfc15yfPIhmBghXIdshR/gqZ6q/GDJ2QBBXwYrzetkRZY41+p78RbWe2Uwx
+S7iR6EMsjrN4ztvjU3lx1uUhlAHaVYeaJGT2imbM3pw3zag0sWmbI8ieeCIrcEPj
+VUcxYRnvWMWFL04w9qAxFiPI5+JlFjPLvxoboD34yl6LMYtgCIktDAZcUrfE+QqY
+0RVfnxK+fDZjOL1EpH/kJisKxJdpDemM4sAQV7jIdhKRVfJIadi8KgJbD0TUIDHb
+9LpwJl2QYJ68SxcJL7TLHkNoyQcnwdJc9+ohuWgSnDycv578gFybY83sR6olJ2eg
+N/MAgn1U16n46S4To3foH0qgggS6MIIEtjCCA56gAwIBAgIQDHmpRLCMEZUgkmFf
+4msdgzANBgkqhkiG9w0BAQsFADBsMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGln
+aUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJE
+aWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTEzMTAyMjEyMDAw
+MFoXDTI4MTAyMjEyMDAwMFowdTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lD
+ZXJ0IEluYzEZMBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTE0MDIGA1UEAxMrRGln
+aUNlcnQgU0hBMiBFeHRlbmRlZCBWYWxpZGF0aW9uIFNlcnZlciBDQTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBANdTpARR+JmmFkhLZyeqk0nQOe0MsLAA
+h/FnKIaFjI5j2ryxQDji0/XspQUYuD0+xZkXMuwYjPrxDKZkIYXLBxA0sFKIKx9o
+m9KxjxKws9LniB8f7zh3VFNfgHk/LhqqqB5LKw2rt2O5Nbd9FLxZS99RStKh4gzi
+kIKHaq7q12TWmFXo/a8aUGxUvBHy/Urynbt/DvTVvo4WiRJV2MBxNO723C3sxIcl
+ho3YIeSwTQyJ3DkmF93215SF2AQhcJ1vb/9cuhnhRctWVyh+HA1BV6q3uCe7seT6
+Ku8hI3UarS2bhjWMnHe1c63YlC3k8wyd7sFOYn4XwHGeLN7x+RAoGTMCAwEAAaOC
+AUkwggFFMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud
+JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA0BggrBgEFBQcBAQQoMCYwJAYIKwYB
+BQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBLBgNVHR8ERDBCMECgPqA8
+hjpodHRwOi8vY3JsNC5kaWdpY2VydC5jb20vRGlnaUNlcnRIaWdoQXNzdXJhbmNl
+RVZSb290Q0EuY3JsMD0GA1UdIAQ2MDQwMgYEVR0gADAqMCgGCCsGAQUFBwIBFhxo
+dHRwczovL3d3dy5kaWdpY2VydC5jb20vQ1BTMB0GA1UdDgQWBBQ901Cl1qCt7vNK
+YApl0yHU+PjWDzAfBgNVHSMEGDAWgBSxPsNpA/i/RwHUmCYaCALvY2QrwzANBgkq
+hkiG9w0BAQsFAAOCAQEAnbbQkIbhhgLtxaDwNBx0wY12zIYKqPBKikLWP8ipTa18
+CK3mtlC4ohpNiAexKSHc59rGPCHg4xFJcKx6HQGkyhE6V6t9VypAdP3THYUYUN9X
+R3WhfVUgLkc3UHKMf4Ib0mKPLQNa2sPIoc4sUqIAY+tzunHISScjl2SFnjgOrWNo
+PLpSgVh5oywM395t6zHyuqB8bPEs1OG9d4Q3A84ytciagRpKkk47RpqF/oOi+Z6M
+o8wNXrM9zwR4jxQUezKcxwCmXMS1oVWNWlZopCJwqjyBcdmdqEU79OX2olHdx3ti
+6G8MdOu42vi/hw15UJGQmxg7kVkn8TUoE6smftX3eqCCB9wwggfYMIIGwKADAgEC
+AhABW9pmX8RLdRe2iCweq9TcMA0GCSqGSIb3DQEBCwUAMHUxCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xNDAyBgNVBAMTK0RpZ2lDZXJ0IFNIQTIgRXh0ZW5kZWQgVmFsaWRhdGlvbiBT
+ZXJ2ZXIgQ0EwHhcNMTgwODE0MDAwMDAwWhcNMjAwODE4MTIwMDAwWjCB3DEdMBsG
+A1UEDwwUUHJpdmF0ZSBPcmdhbml6YXRpb24xEzARBgsrBgEEAYI3PAIBAxMCVVMx
+GTAXBgsrBgEEAYI3PAIBAhMIRGVsYXdhcmUxEDAOBgNVBAUTBzMwMTQyNjcxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMREwDwYDVQQHEwhTYW4gSm9z
+ZTEVMBMGA1UEChMMUGF5UGFsLCBJbmMuMRQwEgYDVQQLEwtDRE4gU3VwcG9ydDEX
+MBUGA1UEAxMOd3d3LnBheXBhbC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQDOofrgGYvXjVHH1WKEgxO51/bNk8Vw0WlZAyu0iwAUULZ3mrI8+xOw
+gE5VGghgoQY9QNIA0mdFPrEmRRQAZXitszlL5s8oks4+tFzBHHtJp2D9BixRKxAR
+Afo6c54tufaJUrQyIMwr2mpfbPox3palkK7RmHdimcOqtUjjQyS/WcHxMkyX3wa9
+e1JoEB9ofJGupNnC90uGgxilWLvOtn/27w56p2AYkKoSGgXsNRGE5ySxns23sZOo
+tgSeTRe16K7X5JuzPcGtZGMRxlkVagZsrp8rNsf4aq0wKkBjkvVzSvJTaDJSDqEt
+hV+ZoGSFYpwaHArVir0sJ63E/aq2Tb97AgMBAAGjggP6MIID9jAfBgNVHSMEGDAW
+gBQ901Cl1qCt7vNKYApl0yHU+PjWDzAdBgNVHQ4EFgQUuzrmqCkAmIQyec538AFt
+Xwp5Y7kwgaUGA1UdEQSBnTCBmoIOd3d3LnBheXBhbC5jb22CEmhpc3RvcnkucGF5
+cGFsLmNvbYIMdC5wYXlwYWwuY29tggxjLnBheXBhbC5jb22CDWM2LnBheXBhbC5j
+b22CFGRldmVsb3Blci5wYXlwYWwuY29tggxwLnBheXBhbC5jb22CFXd3dy5wYXlw
+YWxvYmplY3RzLmNvbYIOY21zLnBheXBhbC5jb20wDgYDVR0PAQH/BAQDAgWgMB0G
+A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjB1BgNVHR8EbjBsMDSgMqAwhi5o
+dHRwOi8vY3JsMy5kaWdpY2VydC5jb20vc2hhMi1ldi1zZXJ2ZXItZzIuY3JsMDSg
+MqAwhi5odHRwOi8vY3JsNC5kaWdpY2VydC5jb20vc2hhMi1ldi1zZXJ2ZXItZzIu
+Y3JsMEsGA1UdIAREMEIwNwYJYIZIAYb9bAIBMCowKAYIKwYBBQUHAgEWHGh0dHBz
+Oi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwBwYFZ4EMAQEwgYgGCCsGAQUFBwEBBHww
+ejAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMFIGCCsGAQUF
+BzAChkZodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRTSEEyRXh0
+ZW5kZWRWYWxpZGF0aW9uU2VydmVyQ0EuY3J0MAwGA1UdEwEB/wQCMAAwggF+Bgor
+BgEEAdZ5AgQCBIIBbgSCAWoBaAB3AKS5CZC0GFgUh7sTosxncAo8NZgE+RvfuON3
+zQ7IDdwQAAABZTquQ3wAAAQDAEgwRgIhAMvZlCpgP2+v8gH82y3PQoMNVUVQNBjG
+4DZy7qRFBo0JAiEAkzEfNkc2/B+88VR3QjutnaF1Qpj0QkSodPGAtB377UUAdQBW
+FAaaL9fC7NP14b1Esj7HRna5vJkRXMDvlJhV1onQ3QAAAWU6rkPZAAAEAwBGMEQC
+IHAvzbsYhbMy5jUazj6X3mDMjjyryN5BMwbDIFv58T9nAiBxzUIRTfj+Kevp0mmO
+Oe9q6K/klOU2klRuVmcs7Gzw8AB2ALvZ37wfinG1k5Qjl6qSe0c4V5UKq1LoGpCW
+ZDaOHtGFAAABZTquRGgAAAQDAEcwRQIhAMvzcJw5loOfVnDNFEr4+c4y/usA2pU5
+M7vhHND680tHAiASqPd7KXNaNTJsBJ9IfBN6J2XwGJjxccRy9fJc9+UgYjANBgkq
+hkiG9w0BAQsFAAOCAQEAoeuef8cXLigvTQs4lbtbyp4UOIzspiMmHztqB95OS0ER
+/u7995SO0C0mQjvyPeiptQ5Yh+/OVCqV6p2ZpBmSc+mn5tzjP3LaVxoyjwghja03
+mNBXPmdkEIG+V78Ov5iIm6vxGH1xSjHssV8iXpWo3gJ+xH3krtY1Atkg243JgwNC
+I3xgp01VMLAmvIvvTqmIKeEd88Ukc6kHcZsEjxwtNivWx2nl1cyDu9B1wJK0D5Mu
+IBXgbFKmqUhWlEXRimphvONOJGd71qT94bT/+bhq28oGleH1leTvqft0fj+e/a7e
+Hx1u3fYAxNWjNAImIxpGUyUwSVo29w/CYYc2cS69y6GB7TCB6jCBqQIBATALBgcq
+hkjOOAQDBQAwLjELMAkGA1UEBhMCdXMxDDAKBgNVBAoTA3N1bjERMA8GA1UEAxMI
+aGFuZmVpeXUXDTA1MDEwNzIwMDkxMFoXDTA2MDEwNzIwMDkxMFowSTAjAgMBCTIX
+DTA1MDEwNzIwMDkxMFowDTALBgNVHRUEBAoCAQQwIgICMDkXDTA1MDEwNzIwMDkx
+MFowDTALBgNVHRUEBAoCAQEwCwYHKoZIzjgEAwUAAy8AMCwCFFbxw8qxTDJqc8H9
+O1QIkzwkkvJfAhRF5zFU8mFsrKmnE50ERySS8vA6AKGCAh8wggIbMIIBAwIBATAN
+BgkqhkiG9w0BAQsFADBsMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQg
+SW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2Vy
+dCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBFw0xOTA1MDIyMjE1NTRaFw0xOTA1
+MjMyMjE1NTRaMDEwLwIQDPWCOBgZnlb4K9ZS7Sft6RcNMTgxMDI1MTYxMTM4WjAM
+MAoGA1UdFQQDCgEAoDAwLjAfBgNVHSMEGDAWgBSxPsNpA/i/RwHUmCYaCALvY2Qr
+wzALBgNVHRQEBAICAcQwDQYJKoZIhvcNAQELBQADggEBABPO3OA0OkQZ+RLVxz/c
+Nx5uNVEO416oOePkN0A4DxFztf337caS4OyfS9Wyu1j5yUdWJVpAKXSQeN95MqHk
+pSpYDssuqbuYjv8ViJfseGBgtXTczUzzNeNdY2uxMbCxuhmPkgacAo1lx9LkK2Sc
+YHWVbfFRF1UQ/dcmavaZsEOBNuLWOxQYA9MqfVNAymHe7vPqwm/8IY2FbHe9HsiJ
+ZfGxNWMDP5lmJiXmpntTeDQ2UjdiyXwGGKjyiSTFk2jVRutrGINufaoA/f7eCmIb
+4UDPbpMjVfD215dW8eBKouypCVoEvmCSSTacdiBI2yOluvMN0PzvPve0ECAE+D4e
+m9Y=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc7296.CertificateBundle()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_bundle_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ cert_count = 0
+ crl_count = 0
+ unk_count = 0
+
+ for item in asn1Object:
+ if item.getName() == 'cert':
+ cert_count += 1
+
+ elif item.getName() == 'crl':
+ crl_count += 1
+
+ else:
+ unk_count += 1
+
+ self.assertEqual(3, cert_count)
+ self.assertEqual(2, crl_count)
+ self.assertEqual(0, unk_count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7508.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7508.py
new file mode 100644
index 0000000000..914e6d8b2a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7508.py
@@ -0,0 +1,134 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc7508
+
+
+class SignedMessageTestCase(unittest.TestCase):
+ signed_message_pem_text = """\
+MIIE/AYJKoZIhvcNAQcCoIIE7TCCBOkCAQExDTALBglghkgBZQMEAgIwUQYJKoZI
+hvcNAQcBoEQEQkNvbnRlbnQtVHlwZTogdGV4dC9wbGFpbg0KDQpXYXRzb24sIGNv
+bWUgaGVyZSAtIEkgd2FudCB0byBzZWUgeW91LqCCAnwwggJ4MIIB/qADAgECAgkA
+pbNUKBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1Mjkx
+NDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJW
+QTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMF
+QWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+b
+z7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0E
+NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
+cnBvc2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAW
+gBTyNds0BNqlVfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL
+3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94Mae
+rDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUxggIA
+MIIB/AIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglghkgBZQME
+AgKgggElMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8X
+DTE5MDUyOTE4MjMxOVowKAYJKoZIhvcNAQk0MRswGTALBglghkgBZQMEAgKhCgYI
+KoZIzj0EAwMwMQYLKoZIhvcNAQkQAjcxIjEgCgEBMBswGRoERnJvbQwRYWxpY2VA
+ZXhhbXBsZS5jb20wPwYJKoZIhvcNAQkEMTIEMLbkIqT9gmce1Peqxm1E9OiwuY1R
+WHHGVufwmjb6XKzj4goQ5tryN5uJN9NM+ZkmbDBNBgsqhkiG9w0BCRACATE+MDwE
+IMdPIQ9kJ1cI9Q6HkRCzbXWdD331uAUCL3MMFXP4KFOjgAEBMBUwE4ERYWxpY2VA
+ZXhhbXBsZS5jb20wCgYIKoZIzj0EAwMEZzBlAjEAuZ8SebvwMRvLPn9+s3VHFUNU
+bEtkkWCao1uNm5TOzphK0NbxzOsD854aC5ReKPSDAjAm1U0siLQw5p4qzGwyxDw9
+5AI5J8Mvy+icNubmfsd4ofvxdaECdhr4rvsSMwbOsFk=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ secure_header_field_attr_found = False
+
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ sat = sa['attrType']
+ sav0 = sa['attrValues'][0]
+
+ if sat == rfc7508.id_aa_secureHeaderFieldsIdentifier:
+ self.assertIn(sat, rfc5652.cmsAttributesMap)
+ sav, rest = der_decoder(
+ sav0, asn1Spec=rfc5652.cmsAttributesMap[sat])
+
+ self.assertFalse(rest)
+ self.assertTrue(sav.prettyPrint())
+ self.assertEqual(sav0, der_encoder(sav))
+
+ from_field = rfc7508.HeaderFieldName('From')
+ alice_email = rfc7508.HeaderFieldValue('alice@example.com')
+ for shf in sav['secHeaderFields']:
+ if shf['field-Name'] == from_field:
+ self.assertEqual(alice_email, shf['field-Value'])
+ secure_header_field_attr_found = True
+
+ self.assertTrue(secure_header_field_attr_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.signed_message_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['contentType'], rfc5652.cmsContentTypesMap)
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+
+ sd = asn1Object['content']
+
+ self.assertEqual(
+ rfc5652.CMSVersion().subtype(value='v1'), sd['version'])
+
+ ect = sd['encapContentInfo']['eContentType']
+
+ self.assertIn(ect, rfc5652.cmsContentTypesMap)
+ self.assertEqual(rfc5652.id_data, ect)
+
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc7508.id_aa_secureHeaderFieldsIdentifier:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+
+ secure_header_field_attr_found = False
+ for sa in sd['signerInfos'][0]['signedAttrs']:
+ if sa['attrType'] == rfc7508.id_aa_secureHeaderFieldsIdentifier:
+ self.assertIn(sa['attrType'], rfc5652.cmsAttributesMap)
+ from_field = rfc7508.HeaderFieldName('From')
+ alice_email = rfc7508.HeaderFieldValue('alice@example.com')
+ for shf in sa['attrValues'][0]['secHeaderFields']:
+ if shf['field-Name'] == from_field:
+ self.assertEqual(alice_email, shf['field-Value'])
+ secure_header_field_attr_found = True
+
+ self.assertTrue(secure_header_field_attr_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7585.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7585.py
new file mode 100644
index 0000000000..5e538347c9
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7585.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7585
+
+
+class NAIRealmCertTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIEZzCCA0+gAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBkjELMAkGA1UEBhMCRlIx
+DzANBgNVBAgMBlJhZGl1czESMBAGA1UEBwwJU29tZXdoZXJlMRQwEgYDVQQKDAtF
+eGFtcGxlIEluYzEgMB4GCSqGSIb3DQEJARYRYWRtaW5AZXhhbXBsZS5vcmcxJjAk
+BgNVBAMMHUV4YW1wbGUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTE5MTExMTE4
+MDQyMVoXDTIwMDExMDE4MDQyMVowezELMAkGA1UEBhMCRlIxDzANBgNVBAgMBlJh
+ZGl1czEUMBIGA1UECgwLRXhhbXBsZSBJbmMxIzAhBgNVBAMMGkV4YW1wbGUgU2Vy
+dmVyIENlcnRpZmljYXRlMSAwHgYJKoZIhvcNAQkBFhFhZG1pbkBleGFtcGxlLm9y
+ZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM9HqbuyWpsTMKo739Dm
+DwmQo2HUkNdQYbvsB+e7ILsw8fWa2qnsF1CoRr/1bcZqXUR1To/QbHse7xSMZH9t
+F7rdlDMc7QtgdwVfn8TiL3hCg5LSE8iaBzfJUjrts/V5WOByP1DwJVM7W3Va/5dN
+oOiceVeC7ThghMlwIx/wN5cy78a8fPYV2FvPR6e+U2HG35zaIv2PizYcliF/QmZG
+gnw4Q9dYC1Lw/ogVBZBALlv+/MuGheb/xIuL8lu1PFZ0YbW65WLD9Cx4wvytAke7
+tKlhL/Kd4OBSeOY3OYmpxbc1gEUmFoLTlZesY2NP9Jyl5mGsIHtPdvVkh/tSBy8o
+VLUCAwEAAaOB3TCB2jAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DATBgNVHSUEDDAK
+BggrBgEFBQcDATA2BgNVHR8ELzAtMCugKaAnhiVodHRwOi8vd3d3LmV4YW1wbGUu
+Y29tL2V4YW1wbGVfY2EuY3JsMDcGCCsGAQUFBwEBBCswKTAnBggrBgEFBQcwAYYb
+aHR0cDovL3d3dy5leGFtcGxlLm9yZy9vY3NwMDoGA1UdEQQzMDGCEnJhZGl1cy5l
+eGFtcGxlLm9yZ6AbBggrBgEFBQcICKAPDA0qLmV4YW1wbGUuY29tMA0GCSqGSIb3
+DQEBCwUAA4IBAQBOhtH2Jpi0b0MZ8FBKTqDl44rIHL1rHG2mW/YYmRI4jZo8kFhA
+yWm/T8ZpdaotJgRqbQbeXvTXIg4/JNFheyLG4yLOzS1esdMAYDD5EN9/dXE++jND
+/wrfPU+QtTgzAjkgFDKuqO7gr1/vSizxLYTWLKBPRHhiQo7GGlEC6/CPb38x4mfQ
+5Y9DsKCp6BEZu+LByCho/HMDzcIPCdtXRX7Fs8rtX4/zRpVIdm6D+vebuo6CwRKp
+mIljfssCvZjb9YIxSVDmA/6Lapqsfsfo922kb+MTXvPrq2ynPx8LrPDrxKc8maYc
+Jiw8B0yjkokwojxyRGftMT8uxNjWQVsMDbxl
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ nai_realm_oid = rfc7585.id_on_naiRealm
+ nai_realm_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ self.assertEqual(
+ nai_realm_oid, gn['otherName']['type-id'])
+
+ onValue, rest = der_decoder(
+ gn['otherName']['value'], asn1Spec=rfc7585.NAIRealm())
+
+ self.assertFalse(rest)
+ self.assertTrue(onValue.prettyPrint())
+ self.assertEqual(
+ gn['otherName']['value'], der_encoder(onValue))
+ self.assertIn('example', onValue)
+
+ nai_realm_found = True
+
+ self.assertTrue(nai_realm_found)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ nai_realm_oid = rfc7585.id_on_naiRealm
+ nai_realm_found = False
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] == rfc5280.id_ce_subjectAltName:
+ extnValue, rest = der_decoder(
+ extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(extnValue.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ for gn in extnValue:
+ if gn['otherName'].hasValue():
+ self.assertEqual(
+ nai_realm_oid, gn['otherName']['type-id'])
+ self.assertIn('example', gn['otherName']['value'])
+
+ nai_realm_found = True
+
+ self.assertTrue(nai_realm_found)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7633.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7633.py
new file mode 100644
index 0000000000..64e874e7b1
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7633.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7633
+
+
+class TLSFeaturesExtnTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEbTCCBBOgAwIBAgIRAO5f2N8q74GBATjTMXQCjlgwCgYIKoZIzj0EAwIwgZYx
+CzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNV
+BAcTB1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMTwwOgYDVQQD
+EzNDT01PRE8gRUNDIE9yZ2FuaXphdGlvbiBWYWxpZGF0aW9uIFNlY3VyZSBTZXJ2
+ZXIgQ0EwHhcNMTYwMTE1MDAwMDAwWhcNMTgwMTE0MjM1OTU5WjCBwjELMAkGA1UE
+BhMCUlUxDzANBgNVBBETBjExNzY0NzEUMBIGA1UECBMLTW9zY293IENpdHkxDzAN
+BgNVBAcTBk1vc2NvdzE4MDYGA1UECRMvQWthZGVtaWthIEthcGljeSBzdHJlZXQs
+IGhvdXNlIDQsIGFwYXJ0bWVudCAxNjYxGDAWBgNVBAoTD0FuZHJleSBDaHVyYW5v
+djETMBEGA1UECxMKSW5zdGFudFNTTDESMBAGA1UEAxMJYWRtc2VsLmVjMHYwEAYH
+KoZIzj0CAQYFK4EEACIDYgAEwrPPzgBO1vDNmV0UVvYSBnys9B7LVkGLiIBbKYf2
+nNFRuJKo1gzNurI8pv4CbvqjkCX4Je/aSeYFHSCR9y82+zTwYQuJFt5LIL5f+Syp
+xZ7aLH56bOiQ+QhCtIvWP4YWo4IB9TCCAfEwHwYDVR0jBBgwFoAUdr4iSO4/PvZG
+A9mHGNBlfiKcC+EwHQYDVR0OBBYEFHTFQqV+H5a7+RVL+70Z6zqCbqq9MA4GA1Ud
+DwEB/wQEAwIFgDAMBgNVHRMBAf8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr
+BgEFBQcDAjBQBgNVHSAESTBHMDsGDCsGAQQBsjEBAgEDBDArMCkGCCsGAQUFBwIB
+Fh1odHRwczovL3NlY3VyZS5jb21vZG8uY29tL0NQUzAIBgZngQwBAgIwWgYDVR0f
+BFMwUTBPoE2gS4ZJaHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPRUNDT3Jn
+YW5pemF0aW9uVmFsaWRhdGlvblNlY3VyZVNlcnZlckNBLmNybDCBiwYIKwYBBQUH
+AQEEfzB9MFUGCCsGAQUFBzAChklodHRwOi8vY3J0LmNvbW9kb2NhLmNvbS9DT01P
+RE9FQ0NPcmdhbml6YXRpb25WYWxpZGF0aW9uU2VjdXJlU2VydmVyQ0EuY3J0MCQG
+CCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wEQYIKwYBBQUHARgE
+BTADAgEFMCMGA1UdEQQcMBqCCWFkbXNlbC5lY4INd3d3LmFkbXNlbC5lYzAKBggq
+hkjOPQQDAgNIADBFAiAi6TXl76FTKPP1AhqtEjU5BjAj9Ju7CSKChHZSmzxeXQIh
+AOQSxhs011emVxyBIXT0ZGbmBY8LFRh6eGIOCAJbkM5T
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] == rfc7633.id_pe_tlsfeature:
+ s = extn['extnValue']
+ features, rest = der_decoder(
+ s, rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(features.prettyPrint())
+ self.assertEqual(s, der_encoder(features))
+ self.assertEqual(1, len(features))
+ self.assertEqual(5, features[0])
+
+ self.assertIn(rfc7633.id_pe_tlsfeature, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7773.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7773.py
new file mode 100644
index 0000000000..2b4e50b7cc
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7773.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7773
+
+
+class AuthenticationContextExtnTestCase(unittest.TestCase):
+ pem_text = """\
+MIIMUjCCCzqgAwIBAgIQevDaX+wRYAlpUgjTYjCCRjANBgkqhkiG9w0BAQsFADCBuDELMAkGA1UE
+BhMCU0UxNTAzBgNVBAoTLERldiBURVNUIENBIG9yZyBBQiAoTk9UIEEgUkVBTCBPUkdBTklaQVRJ
+T04pMSAwHgYDVQQLExdDZW50cmFsIFNpZ25pbmcgU2VydmljZTEVMBMGA1UEBRMMQTEyMzQ1Ni03
+ODkwMTkwNwYDVQQDEzBDZW50cmFsIFNpZ25pbmcgQ0EwMDEgLSBFSUQgMi4wIERldiBURVNUIFNl
+cnZpY2UwHhcNMTkxMDA5MDc0ODI2WhcNMjAxMDA5MDc0ODI2WjBgMRUwEwYDVQQFEwwxODg4MDMw
+OTkzNjgxCzAJBgNVBAYTAlNFMQ0wCwYDVQQqEwRBZ2RhMRcwFQYDVQQDEw5BZ2RhIEFuZGVyc3Nv
+bjESMBAGA1UEBBMJQW5kZXJzc29uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjl1H
+7vveI/EUaF9z6EiL/AmTHDbpLAKoWh9JJjpRlb8lU0TseYOzZp6ySiAO8St2a/HxxhrNuAAELUwZ
+3oICkmxM/NeYgI7EEaLVPUwBAWfGZrRWb/+h8C6SrivWc73M/LI1A0B9tcEpUuh0CHTSVIBZsH+L
+IDyKW6n3T8YeI7+0CX391I/j3iyEBNFcfDaHaFChzkPxgPg6Xh1D1JWs+mUj1rOoTLxsyusWiIQk
+IkjDgFNUCpS1+NUvkTU1uFewvluxjOzRVqzYZWesOL+V/lGnyVPw4o1INEKYpOurYii2TXElTmXO
+iQdIG20S96uFH6vFFJ2cPwgYjWpory/K+QIDAQABo4IIrTCCCKkwCwYDVR0PBAQDAgZAMB0GA1Ud
+DgQWBBQo71oFnxX2kapLl3ZoYOylnJo01TATBgNVHSAEDDAKMAgGBgQAizABATBLBgNVHR8ERDBC
+MECgPqA8hjpodHRwczovL2VpZDJjc2lnLmtvbmtpLnNlL3B1Ymxpc2gvY3JsLzE4MTRiMGFiYzEx
+NGM3YmEuY3JsMIIH6wYHKoVwgUkFAQSCB94wggfaMIIH1gwraHR0cDovL2lkLmVsZWduYW1uZGVu
+LnNlL2F1dGgtY29udC8xLjAvc2FjaQyCB6U8c2FjaTpTQU1MQXV0aENvbnRleHQgeG1sbnM6c2Fj
+aT0iaHR0cDovL2lkLmVsZWduYW1uZGVuLnNlL2F1dGgtY29udC8xLjAvc2FjaSI+PHNhY2k6QXV0
+aENvbnRleHRJbmZvIElkZW50aXR5UHJvdmlkZXI9Imh0dHA6Ly9kZXYudGVzdC5zd2VkZW5jb25u
+ZWN0LnNlL2lkcCIgQXV0aGVudGljYXRpb25JbnN0YW50PSIyMDE5LTEwLTA5VDA3OjU4OjI2LjAw
+MFoiIFNlcnZpY2VJRD0iRmVkU2lnbmluZyIgQXV0aG5Db250ZXh0Q2xhc3NSZWY9Imh0dHA6Ly9p
+ZC5lbGVnbmFtbmRlbi5zZS9sb2EvMS4wL2xvYTMtc2lnbWVzc2FnZSIgQXNzZXJ0aW9uUmVmPSJf
+ZGM5MjM0Y2Y3Zjc5OWQwMDlmMjUwNWVhMzVlMWU0NmUiLz48c2FjaTpJZEF0dHJpYnV0ZXM+PHNh
+Y2k6QXR0cmlidXRlTWFwcGluZyBUeXBlPSJyZG4iIFJlZj0iMi41LjQuNSI+PHNhbWw6QXR0cmli
+dXRlIEZyaWVuZGx5TmFtZT0iU3dlZGlzaCBQZXJzb25udW1tZXIiIE5hbWU9InVybjpvaWQ6MS4y
+Ljc1Mi4yOS40LjEzIiB4bWxuczpzYW1sPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6YXNz
+ZXJ0aW9uIj48c2FtbDpBdHRyaWJ1dGVWYWx1ZSB4c2k6dHlwZT0ieHM6c3RyaW5nIiB4bWxuczp4
+cz0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3
+dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiPjE4ODgwMzA5OTM2ODwvc2FtbDpBdHRy
+aWJ1dGVWYWx1ZT48L3NhbWw6QXR0cmlidXRlPjwvc2FjaTpBdHRyaWJ1dGVNYXBwaW5nPjxzYWNp
+OkF0dHJpYnV0ZU1hcHBpbmcgVHlwZT0icmRuIiBSZWY9IjIuNS40LjQyIj48c2FtbDpBdHRyaWJ1
+dGUgRnJpZW5kbHlOYW1lPSJHaXZlbiBOYW1lIiBOYW1lPSJ1cm46b2lkOjIuNS40LjQyIiB4bWxu
+czpzYW1sPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6YXNzZXJ0aW9uIj48c2FtbDpBdHRy
+aWJ1dGVWYWx1ZSB4c2k6dHlwZT0ieHM6c3RyaW5nIiB4bWxuczp4cz0iaHR0cDovL3d3dy53My5v
+cmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxT
+Y2hlbWEtaW5zdGFuY2UiPkFnZGE8L3NhbWw6QXR0cmlidXRlVmFsdWU+PC9zYW1sOkF0dHJpYnV0
+ZT48L3NhY2k6QXR0cmlidXRlTWFwcGluZz48c2FjaTpBdHRyaWJ1dGVNYXBwaW5nIFR5cGU9InJk
+biIgUmVmPSIyLjUuNC4zIj48c2FtbDpBdHRyaWJ1dGUgRnJpZW5kbHlOYW1lPSJEaXNwbGF5IE5h
+bWUiIE5hbWU9InVybjpvaWQ6Mi4xNi44NDAuMS4xMTM3MzAuMy4xLjI0MSIgeG1sbnM6c2FtbD0i
+dXJuOm9hc2lzOm5hbWVzOnRjOlNBTUw6Mi4wOmFzc2VydGlvbiI+PHNhbWw6QXR0cmlidXRlVmFs
+dWUgeHNpOnR5cGU9InhzOnN0cmluZyIgeG1sbnM6eHM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDEv
+WE1MU2NoZW1hIiB4bWxuczp4c2k9Imh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWlu
+c3RhbmNlIj5BZ2RhIEFuZGVyc3Nvbjwvc2FtbDpBdHRyaWJ1dGVWYWx1ZT48L3NhbWw6QXR0cmli
+dXRlPjwvc2FjaTpBdHRyaWJ1dGVNYXBwaW5nPjxzYWNpOkF0dHJpYnV0ZU1hcHBpbmcgVHlwZT0i
+cmRuIiBSZWY9IjIuNS40LjQiPjxzYW1sOkF0dHJpYnV0ZSBGcmllbmRseU5hbWU9IlN1cm5hbWUi
+IE5hbWU9InVybjpvaWQ6Mi41LjQuNCIgeG1sbnM6c2FtbD0idXJuOm9hc2lzOm5hbWVzOnRjOlNB
+TUw6Mi4wOmFzc2VydGlvbiI+PHNhbWw6QXR0cmlidXRlVmFsdWUgeHNpOnR5cGU9InhzOnN0cmlu
+ZyIgeG1sbnM6eHM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hIiB4bWxuczp4c2k9
+Imh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlIj5BbmRlcnNzb248L3Nh
+bWw6QXR0cmlidXRlVmFsdWU+PC9zYW1sOkF0dHJpYnV0ZT48L3NhY2k6QXR0cmlidXRlTWFwcGlu
+Zz48L3NhY2k6SWRBdHRyaWJ1dGVzPjwvc2FjaTpTQU1MQXV0aENvbnRleHQ+MAkGA1UdEwQCMAAw
+HwYDVR0jBBgwFoAUqKv0QPwAYcLfcD/Vy1A2deHtiqcwDQYJKoZIhvcNAQELBQADggEBAETlZOIL
+NknxlMiYHCxoYypyzYuza2l3M4+YWakT0vFPgXpCk+l0dNst7h9nWvKKHCboSj+YP5dUCSsuUXhb
+7xTei/F2nj7q1oCPuVJGThZqhWgF/JkqOy34hHEM5VniJiQu2W9TjzRMSOSFzRlQsHcOuXzdTkhr
+CQpD1TWxYL9sCy4YoCdE4edfgBGBMujxoijl3/xJ5uI1FjhlSPVP88p8Wsi8i7GdMYuxqjZMwrt2
+PHIPgop3BNN9/BzW0cmdyNvFgcD9qR8Rv5aFBYuQbyg6fST8JdAOrbMrCST6v2U41OOXH5MC/kL6
+tAGXsYdcuQpglUngmo/FV4Z9qjIDkYQ=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc7773.id_ce_authContext:
+ s = extn['extnValue']
+ acs, rest = der_decoder(
+ s, asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+ self.assertFalse(rest)
+ self.assertTrue(acs.prettyPrint())
+ self.assertEqual(s, der_encoder(acs))
+ self.assertIn('id.elegnamnden.se', acs[0]['contextType'])
+ self.assertIn(
+ 'AuthContextInfo IdentityProvider', acs[0]['contextInfo'])
+
+ self.assertIn(rfc7773.id_ce_authContext, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7894.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7894.py
new file mode 100644
index 0000000000..3d38155c5a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7894.py
@@ -0,0 +1,84 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc7894
+
+
+class AlternativeChallengePasswordTestCase(unittest.TestCase):
+ otp_pem_text = """\
+MIICsjCCAZwCAQAwJDELMAkGA1UEBhMCVVMxFTATBgNVBAMTDDRUUzJWMk5MWEE2
+WjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKmF0oUj5+1rBB+pUO8X
+7FPxer+1BhWOa54RTSucJmBaLx0H95qNaBCcctNDl1kcmIro/a0zMcEvj5Do29vQ
+lStJdTeJ/B3X4qzOGShupxJcAhCreRZjN6Yz3T9z0zJ8OPnRvJOzcSiIzlubc9lK
+Cpq4U0UsCLLfymOgL9NH4lZi96J+PFuJr0J+rTY38076U2jcPqNq5/L/d6NV9Sz2
+IVOvCK1kqP/nElJVibIQZvj9YESLUKyVAfTNxLj3+IpioOOv2dT3kB9wdi4plAVi
+UFEUvED1okRrI29+LdPV1UXglOCksyJIIw+DgDtutDE5Co6QkTNURFEdKIV9Sg13
+zEECAwEAAaBLMBkGCyqGSIb3DQEJEAI4MQoTCDkwNTAzODQ2MC4GCSqGSIb3DQEJ
+DjEhMB8wHQYDVR0OBBYEFBj12LVowM16Ed0D+AmoElKNYP/kMAsGCSqGSIb3DQEB
+CwOCAQEAZZdDWKejs3UVfgZI3R9cMWGijmscVeZrjwFVkn7MI9pEDZ2aS1QaRYjY
+1cu9j3i+LQp9LWPIW/ztYk11e/OcZp3fo8pZ+MT66n7YTWfDXNkqqA5xmI84DMEx
+/cqenyzOBZWqpZGx7eyM9BtnrdeJ0r2qSc7LYU25FbIQFJJf8IvgMAXWMs50fvs2
+Gzns447x952se2ReQ3vYhXdHvYYcgAZfSJZvK+nCmhzzqowv5p15Y5S+IHpBSXTO
+a1qhNW4cjdicQZUeQ2R5kiuwZ+8vHaq9jKxAEk0hBeqG6RQaxvNOBQhHtTLNGw/C
+NmaF8Y2Sl/MgvC5tjs0Ck0/r3lsoLQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc6402.CertificationRequest()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.otp_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(0, asn1Object['certificationRequestInfo']['version'])
+
+ for attr in asn1Object['certificationRequestInfo']['attributes']:
+ self.assertIn(
+ attr['attrType'], rfc6402.cmcControlAttributesMap)
+
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ rfc6402.cmcControlAttributesMap[attr['attrType']])
+
+ self.assertFalse(rest)
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ if attr['attrType'] == rfc7894.id_aa_otpChallenge:
+ self.assertEqual('90503846', av['printableString'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.otp_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object['certificationRequestInfo']['attributes']:
+ self.assertIn(attr['attrType'], rfc6402.cmcControlAttributesMap)
+ if attr['attrType'] == rfc7894.id_aa_otpChallenge:
+ self.assertEqual(
+ '90503846', attr['attrValues'][0]['printableString'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7906.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7906.py
new file mode 100644
index 0000000000..3806987d4f
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7906.py
@@ -0,0 +1,168 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc7906
+
+
+class AttributeSetTestCase(unittest.TestCase):
+ attr_set_pem_text = """\
+MYIRmDAQBglghkgBZQIBDQcxA4IBATAQBglghkgBZQIBDQ0xAwoBUzAQBglghkgB
+ZQIBDQ4xAwoBAjAQBglghkgBZQIBDQ8xAwoBATARBglghkgBZQIBBUIxBAQCeQYw
+EgYJYIZIAWUCAQ0LMQUwAwoBATAVBglghkgBZQIBDQUxCDAGAgReAA//MBUGCyqG
+SIb3DQEJEAIuMQYCBF1qowYwGQYJYIZIAWUCAQVHMQwGCisGAQQBgaxgME0wGgYJ
+YIZIAWUCAQ0BMQ0wCwYJYIZIAWUDBAEtMBoGCWCGSAFlAgENDDENBgsqhkiG9w0B
+CRABGTAaBglghkgBZQIBDRUxDTALBglghkgBZQMEAS0wGwYJYIZIAWUCAQ0GMQ4w
+DAIEXQAAAAIEXwAP/zAdBgsqhkiG9w0BCRACKDEOMAwGCisGAQQBgaxgMDAwLQYJ
+YIZIAWUCAQVGMSAwHoYcaHR0cDovL3JlcG8uZXhhbXBsZS5jb20vcGtpLzAvBglg
+hkgBZQIBDQMxIjAgExFCb2d1cyBTaG9ydCBUaXRsZYEFQm9ndXOFATCHAU0wNAYJ
+YIZIAWUCAQVIMScwJRMRQm9ndXMgU2hvcnQgVGl0bGUTEEZha2UgU2hvcnQgVGl0
+bGUwOAYIKwYBBQUHAQsxLDAqMCgGCCsGAQUFBzAFhhxodHRwOi8vcmVwby5leGFt
+cGxlLmNvbS9wa2kvMEEGCyqGSIb3DQEJEAIEMTIwMAwjVGhlc2UgUkZDIDc5MDYg
+YXR0cmlidXRlcyBhcmUgYm9ndXMGCSqGSIb3DQEHATCBggYLKoZIhvcNAQkQAgIx
+czFxAgEBBgorBgEEAYGsYAEBMUwwJIAKYIZIAWUCAQgDA4EWMBQGCisGAQQBgaxg
+MEkxBgIBMAIBSTAkgApghkgBZQIBCAMEgRYwFAYKKwYBBAGBrGAwRTEGAgEwAgFF
+ExJCb2d1cyBQcml2YWN5IE1hcmswgYQGCWCGSAFlAgENFjF3MHUwMAYKYIZIAWUC
+AQJOAjAiMCAGCyqGSIb3DQEJEAwLMREMD2t0YS5leGFtcGxlLmNvbTAxBgsqhkiG
+9w0BCRABGTAiMCAGCyqGSIb3DQEJEAwLMREMD2t0YS5leGFtcGxlLmNvbTAOBgkq
+hkiG9w0BBwEKAQEwgaAGCWCGSAFlAgENEDGBkjCBj6EMBgorBgEEAYGsYDAwoH8G
+CWCGSAFlAgEQAARyMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UE
+BxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAe
+BgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMIIBvwYJYIZIAWUCAQVBMYIB
+sDCCAawEFO1lDTbJmd4voc2GDuaMzYO+XJSmMIIBkqCB/jB/BglghkgBZQIBEAAE
+cjBwMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24x
+EDAOBgNVBAoTB0V4YW1wbGUxDjAMBgNVBAMTBUFsaWNlMSAwHgYJKoZIhvcNAQkB
+FhFhbGljZUBleGFtcGxlLmNvbTB7BglghkgBZQIBEAAEbjBsMQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4YW1w
+bGUxDDAKBgNVBAMTA0JvYjEeMBwGCSqGSIb3DQEJARYPYm9iQGV4YW1wbGUuY29t
+MIGOMIGLBglghkgBZQIBEAAEfjB8MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkEx
+EDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZpZ2lsIFNlY3VyaXR5IExMQzEX
+MBUGA1UECxMOS2V5IE1hbmFnZW1lbnQxGDAWBgNVBAMTD2t0YS5leGFtcGxlLmNv
+bTCCAoUGA1UEJDGCAnwwggJ4MIIB/qADAgECAgkApbNUKBuwbjswCgYIKoZIzj0E
+AwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9u
+MREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1MjkxNDQ1NDFaFw0yMDA1MjgxNDQ1
+NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRv
+bjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0B
+CQEWEWFsaWNlQGV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE+M2f
+By/sRA6V1pKFqecRTE8+LuAHtZxes1wmJZrBBg+bz7uYZfYQxI3dVB0YCSD6Mt3y
+XFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/wto8hMKoFgkcscqIbiV7Zo4GU
+MIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0
+ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1UdDgQWBBTE
+uloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAWgBTyNds0BNqlVfK9aQOZsGLs
+4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL3kRhmn+PJTeKaL9sh/oQgHOY
+TgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94MaerDIrQpi0IDh+v0QSAv9rMife
+8tClafXWtDwwL8MS7oAh0ymT446Uizxx3PUwggSaBgNVBEYxggSRMIIEjTCCAgIw
+ggGIoAMCAQICCQDokdYGkU/O8jAKBggqhkjOPQQDAzA/MQswCQYDVQQGEwJVUzEL
+MAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENB
+MB4XDTE5MDUxNDA4NTgxMVoXDTIxMDUxMzA4NTgxMVowPzELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBD
+QTB2MBAGByqGSM49AgEGBSuBBAAiA2IABPBRdlSx6I5qpZ2sKUMIxun1gUAzzstO
+YWKvKCnMoNT1x+pIKDvMEMimFcLAxxL3NVYOhK0Jty83SPDkKWMdx9/Okdhf3U/z
+xJlEnXDiFrAeM6xbG8zcCRiBnmd92UvsRqNQME4wHQYDVR0OBBYEFPI12zQE2qVV
+8r1pA5mwYuziFQjBMB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAwG
+A1UdEwQFMAMBAf8wCgYIKoZIzj0EAwMDaAAwZQIwWlGNjb9NyqJSzUSdsEqDSvMZ
+b8yFkxYCIbAVqQ9UqScUUb9tpJKGsPWwbZsnLVvmAjEAt/ypozbUhQw4dSPpWzrn
+5BQ0kKbDM3DQJcBABEUBoIOol1/jYQPmxajQuxcheFlkMIICgzCCAgqgAwIBAgIJ
+AKWzVCgbsG49MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJW
+QTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNjEy
+MTQzMTA0WhcNMjAwNjExMTQzMTA0WjB8MQswCQYDVQQGEwJVUzELMAkGA1UECBMC
+VkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZpZ2lsIFNlY3VyaXR5IExM
+QzEXMBUGA1UECxMOS2V5IE1hbmFnZW1lbnQxGDAWBgNVBAMTD2t0YS5leGFtcGxl
+LmNvbTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJf2XsTdVLcYASKJGtWjOAIFB8sX
+rsiE7G1tC+IP+iOEdJCZ+UvJ9Enx7v6dtaU4uy1FzuWCar45BVpKVK2TNWT8E7XA
+TkGBTIXGN76yJ5S09FdWp+hVkIkmyCJJujXzV6OBlDCBkTALBgNVHQ8EBAMCB4Aw
+QgYJYIZIAYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0
+ZWQgZm9yIGFueSBwdXJwb3NlLjAdBgNVHQ4EFgQUbZtc/QOvtbnVi/FknxpW4LWt
+TQ8wHwYDVR0jBBgwFoAU8jXbNATapVXyvWkDmbBi7OIVCMEwCgYIKoZIzj0EAwMD
+ZwAwZAIwBniWpO11toMsV8fLBpBjA5YGQvd3TAcSw1lNbWpArL+hje1dzQ7pxsln
+kklv3CTxAjBuVebz4mN0Qkew2NK/itwlmi7i+QxPs/MSZ7YFsyTA5Z4h2GbLW+zN
+3xNCC91vfpcwggSgBglghkgBZQIBDRQxggSRMYIEjTCCAgIwggGIoAMCAQICCQDo
+kdYGkU/O8jAKBggqhkjOPQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkEx
+EDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4XDTE5MDUxNDA4
+NTgxMVoXDTIxMDUxMzA4NTgxMVowPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZB
+MRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABPBRdlSx6I5qpZ2sKUMIxun1gUAzzstOYWKvKCnMoNT1x+pI
+KDvMEMimFcLAxxL3NVYOhK0Jty83SPDkKWMdx9/Okdhf3U/zxJlEnXDiFrAeM6xb
+G8zcCRiBnmd92UvsRqNQME4wHQYDVR0OBBYEFPI12zQE2qVV8r1pA5mwYuziFQjB
+MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAwGA1UdEwQFMAMBAf8w
+CgYIKoZIzj0EAwMDaAAwZQIwWlGNjb9NyqJSzUSdsEqDSvMZb8yFkxYCIbAVqQ9U
+qScUUb9tpJKGsPWwbZsnLVvmAjEAt/ypozbUhQw4dSPpWzrn5BQ0kKbDM3DQJcBA
+BEUBoIOol1/jYQPmxajQuxcheFlkMIICgzCCAgqgAwIBAgIJAKWzVCgbsG49MAoG
+CCqGSM49BAMDMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwH
+SGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0EwHhcNMTkwNjEyMTQzMTA0WhcNMjAw
+NjExMTQzMTA0WjB8MQswCQYDVQQGEwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcT
+B0hlcm5kb24xGzAZBgNVBAoTElZpZ2lsIFNlY3VyaXR5IExMQzEXMBUGA1UECxMO
+S2V5IE1hbmFnZW1lbnQxGDAWBgNVBAMTD2t0YS5leGFtcGxlLmNvbTB2MBAGByqG
+SM49AgEGBSuBBAAiA2IABJf2XsTdVLcYASKJGtWjOAIFB8sXrsiE7G1tC+IP+iOE
+dJCZ+UvJ9Enx7v6dtaU4uy1FzuWCar45BVpKVK2TNWT8E7XATkGBTIXGN76yJ5S0
+9FdWp+hVkIkmyCJJujXzV6OBlDCBkTALBgNVHQ8EBAMCB4AwQgYJYIZIAYb4QgEN
+BDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9yIGFueSBw
+dXJwb3NlLjAdBgNVHQ4EFgQUbZtc/QOvtbnVi/FknxpW4LWtTQ8wHwYDVR0jBBgw
+FoAU8jXbNATapVXyvWkDmbBi7OIVCMEwCgYIKoZIzj0EAwMDZwAwZAIwBniWpO11
+toMsV8fLBpBjA5YGQvd3TAcSw1lNbWpArL+hje1dzQ7pxslnkklv3CTxAjBuVebz
+4mN0Qkew2NK/itwlmi7i+QxPs/MSZ7YFsyTA5Z4h2GbLW+zN3xNCC91vfpc=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.AttributeSet()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.attr_set_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object:
+ self.assertIn(attr['type'], rfc5652.cmsAttributesMap)
+
+ av, rest = der_decoder(
+ attr['values'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['type']])
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['values'][0], der_encoder(av))
+
+ if attr['type'] == rfc7906.id_aa_KP_contentDecryptKeyID:
+ self.assertEqual(univ.OctetString(hexValue='7906'), av)
+
+ def testOpenTypes(self):
+ openTypesMap = rfc5280.certificateAttributesMap.copy()
+ openTypesMap.update(rfc5652.cmsAttributesMap)
+
+ substrate = pem.readBase64fromText(self.attr_set_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, openTypes=openTypesMap,
+ decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object:
+ if attr['type'] == rfc7906.id_aa_KP_contentDecryptKeyID:
+ self.assertEqual(
+ univ.OctetString(hexValue='7906'), attr['values'][0])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc7914.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc7914.py
new file mode 100644
index 0000000000..e0b1cb3728
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc7914.py
@@ -0,0 +1,97 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc7914
+from pyasn1_modules import rfc8018
+
+
+# From RFC 7914, Section 13
+
+class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIHiME0GCSqGSIb3DQEFDTBAMB8GCSsGAQQB2kcECzASBAVNb3VzZQIDEAAAAgEI
+AgEBMB0GCWCGSAFlAwQBKgQQyYmguHMsOwzGMPoyObk/JgSBkJb47EWd5iAqJlyy
++ni5ftd6gZgOPaLQClL7mEZc2KQay0VhjZm/7MbBUNbqOAXNM6OGebXxVp6sHUAL
+iBGY/Dls7B1TsWeGObE0sS1MXEpuREuloZjcsNVcNXWPlLdZtkSH6uwWzR0PyG/Z
++ZXfNodZtd/voKlvLOw5B3opGIFaLkbtLZQwMiGtl42AS89lZg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5958.EncryptedPrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ea = asn1Object['encryptionAlgorithm']
+
+ self.assertEqual(rfc8018.id_PBES2, ea['algorithm'])
+ self.assertIn(ea['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ params, rest = der_decoder(
+ ea['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[ea['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(params.prettyPrint())
+ self.assertEqual(ea['parameters'], der_encoder(params))
+
+ kdf = params['keyDerivationFunc']
+
+ self.assertEqual(rfc7914.id_scrypt, kdf['algorithm'])
+ self.assertIn(kdf['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ kdfp, rest = der_decoder(
+ kdf['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[kdf['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(kdfp.prettyPrint())
+ self.assertTrue(kdf['parameters'], der_encoder(kdfp))
+ self.assertEqual(1048576, kdfp['costParameter'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ ea = asn1Object['encryptionAlgorithm']
+
+ self.assertEqual(rfc8018.id_PBES2, ea['algorithm'])
+
+ params = asn1Object['encryptionAlgorithm']['parameters']
+
+ self.assertEqual(
+ rfc7914.id_scrypt, params['keyDerivationFunc']['algorithm'])
+
+ kdfp = params['keyDerivationFunc']['parameters']
+
+ self.assertEqual(1048576, kdfp['costParameter'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8017.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8017.py
new file mode 100644
index 0000000000..9601997f1d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8017.py
@@ -0,0 +1,125 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8017
+from pyasn1_modules import rfc2985
+
+
+class SMIMECapabilitiesTestCase(unittest.TestCase):
+ smime_capabilities_pem_text = """\
+MIIBAzA8BgkqhkiG9w0BAQcwL6APMA0GCWCGSAFlAwQCAgUAoRwwGgYJKoZIhvcN
+AQEIMA0GCWCGSAFlAwQCAgUAMDwGCSqGSIb3DQEBCjAvoA8wDQYJYIZIAWUDBAIC
+BQChHDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAICBQAwDQYJKoZIhvcNAQECBQAw
+DQYJKoZIhvcNAQEEBQAwDQYJKoZIhvcNAQEFBQAwDQYJKoZIhvcNAQEOBQAwDQYJ
+KoZIhvcNAQELBQAwDQYJKoZIhvcNAQEMBQAwDQYJKoZIhvcNAQENBQAwDQYJKoZI
+hvcNAQEPBQAwDQYJKoZIhvcNAQEQBQA=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for cap in asn1Object:
+ self.assertIn(cap['algorithm'], rfc5280.algorithmIdentifierMap)
+
+ if cap['parameters'].hasValue():
+ p, rest = der_decoder(
+ cap['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[cap['algorithm']])
+
+ self.assertFalse(rest)
+ if not p == univ.Null(""):
+ self.assertTrue(p.prettyPrint())
+ self.assertEqual(cap['parameters'], der_encoder(p))
+
+ if cap['algorithm'] == rfc8017.id_RSAES_OAEP:
+ self.assertEqual(
+ rfc8017.id_sha384, p['hashFunc']['algorithm'])
+ self.assertEqual(
+ rfc8017.id_mgf1, p['maskGenFunc']['algorithm'])
+
+ def OpenTypesCodec(self):
+ substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for cap in asn1Object:
+ if cap['algorithm'] == rfc8017.id_RSAES_OAEP:
+ p = cap['parameters']
+ self.assertEqual(
+ rfc8017.id_sha384, p['hashFunc']['algorithm'])
+ self.assertEqual(
+ rfc8017.id_mgf1, p['maskGenFunc']['algorithm'])
+
+
+class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
+ pem_text = """\
+MIIE2QIBAQKCAQEAn82EqwXasE2TFNSmZucB8LNza2mOWLHF3nxpxKXalPMDvezc
+5Dq7Ytcv/k9jJL4j4jYfvR4yyZdU9iHLaD6hOINZ8E6hVpx/4c96ZUSOLzD2g+u+
+jIuoNfG+zygSBGYCS6BLCAIsZ+2wUyxYpLJknHJld9/jy+aLmmyrilhH9dH5AUiV
+3NeWht/68++dMXf4ZI/gV4bMSlWhggxkz2WJJpiQdCdJatGkwNDkHmLA9X0tC6OH
+SPE7qYdxG38cYS5F445SgnhDpiK7BodSqYLwgehaDjoOYdEgHVnOcpBCDI5zCJSL
+b1c/z8uhrB1xxlECR44wCLcKsIIYQxaEErRJ/wIDAQABAoIBAD+Ra5L0szeqxDVn
+GgKZJkZvvBwgU0WpAgMtDo3xQ/A4c2ab0IrhaiU5YJgTUGcPVArqaNm8J4vVrTBz
+5QxEzbFDXwWe4cMoYh6bgB7ElKLlIUr8/kGZUfgc7kI29luEjcAIEAC2/RQHesVn
+DHkL5OzqZL+4fIwckAMh0tXdflsPgZ/jgIaKca4OqKu4KGnczm3UvqtlvwisAjkx
+zMyfZXOLn0vEwP2bfbhQrCVrP7n6a+CV+Kqm8NBWnbiS6x2rWemVVssNTbfXQztq
+wC6ZJZCLK7plciDBWvHcS6vxdcsS9DUxuqSV6o/stCGTl1D+9tDx8Od0Eunna2B2
+wAoRHZECVgbNO1bqwfYpp5aFuySWoP+KZz8f/5ZkHjLwiNGpQcqVd4+7Ql2R4qgF
+NgSoQQOZFhKtiOeLVU0HYfp6doI4waSINZdF/fJDHD6fY3AMOc/IIMDHHIzbAlYG
+vKOocLXWj/2+gcyQ1XoAmrE70aIFUBLSvd7RCi8GI74zYWp5lCSvO850Z4GsWSZT
+41iF13sTDDJPm3+BbzMvEu2GuACi/8/IpbUr24/FP9Cp1Rf7kwJWAgMxfoshbrNu
+ebQB5laHNnT+DYhrOFVRNiNDaD2bUNSetrFidosWtD4ueHxMGENwa4BbFJ9+UrdP
+fyxC6k7exM7khGjaNZczwTep1VpYtKjzP/bp9KcCVgYoj9s9HZ1FCAsNEPodjGfd
+AcPTQS9mIa7wzy19B7uvFQJXPURi/p4KKBMVQ99Pp8/r9lJzxxiEf8FyPr8N7lZM
+EUKkFkDrZQDhKpsrHWSNj6yRFlltAlYC7dYR8KLEWoOUATLosxQhwgypv+23r+d4
+ZdPOdDv9n8Kmj+NFy/oISFfdXzlOU4RWQtMx3hEwAabwct7vjiJEej/kmiTqco02
+17tt13VvvQ5ZXF73dDCCAQwwggEIAlYDfMpM1WNfxcLLOgkRZ+0S9OvIrEOi0ALV
+SquTdi/thhCuCsK3lMD4miN9te8j16YtqEFVWXC3a6DWwIJ6m/xZ50bBwPqM8RsI
+6FWhZw4Dr5VqjYXUvwJWAvapRk9SydDYri/cAtGIkUJVlspkE1emALAaSw30vmfd
+hrgYLT6YGOmK3UmcNJ4NVeET275MXWF1ZOhkOGKTN6aj5wPhJaHBMnmUQrq7GwC6
+/LfUkSsCVgMCDTV9gbFW8u6TcTVW85dBIeUGxZh1T2pbU3dkGO3IOxOhzJUplH4/
+EeEs9dusHakg1ERXAg4Vo1YowPW8kuVbZ9faxeVrmuER5NcCuZzS5X/obGUw
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc8017.RSAPrivateKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8018.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8018.py
new file mode 100644
index 0000000000..f354c63eb2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8018.py
@@ -0,0 +1,58 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8018
+
+
+class PWRITestCase(unittest.TestCase):
+ rfc3211_ex1_pem_text = """\
+o1MCAQCgGgYJKoZIhvcNAQUMMA0ECBI0Vnh4VjQSAgEFMCAGCyqGSIb3DQEJEAMJMBEGBSsO
+AwIHBAjv5ZjvIbM9bQQQuBslZe43PKbe3KJqF4sMEA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.RecipientInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.rfc3211_ex1_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ alg_oid = asn1Object['pwri']['keyDerivationAlgorithm']['algorithm']
+
+ self.assertEqual(rfc8018.id_PBKDF2, alg_oid)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.rfc3211_ex1_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ icount = (asn1Object['pwri']['keyDerivationAlgorithm']
+ ['parameters']['iterationCount'])
+
+ self.assertEqual(5, icount)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8103.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8103.py
new file mode 100644
index 0000000000..002f5c9067
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8103.py
@@ -0,0 +1,53 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8103
+
+
+class CAEADChaCha20Poly1305TestCase(unittest.TestCase):
+ alg_id_pem_text = "MBsGCyqGSIb3DQEJEAMSBAzK/rq++s7brd7K+Ig="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8103.id_alg_AEADChaCha20Poly1305, asn1Object[0])
+
+ param, rest = der_decoder.decode(
+ asn1Object[1], rfc8103.AEADChaCha20Poly1305Nonce())
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(
+ rfc8103.AEADChaCha20Poly1305Nonce(value='\xca\xfe\xba\xbe\xfa'
+ '\xce\xdb\xad\xde\xca'
+ '\xf8\x88'),
+ param)
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
+
+
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8209.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8209.py
new file mode 100644
index 0000000000..1afd77f24a
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8209.py
@@ -0,0 +1,63 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8209
+
+
+class CertificateTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIIBiDCCAS+gAwIBAgIEAk3WfDAKBggqhkjOPQQDAjAaMRgwFgYDVQQDDA9ST1VU
+RVItMDAwMEZCRjAwHhcNMTcwMTAxMDUwMDAwWhcNMTgwNzAxMDUwMDAwWjAaMRgw
+FgYDVQQDDA9ST1VURVItMDAwMEZCRjAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
+AARzkbq7kqDLO+EOWbGev/shTgSpHgy6GxOafTjZD3flWqBbjmlWeOD6FpBLVdnU
+9cDfxYiV7lC8T3XSBaJb02/1o2MwYTALBgNVHQ8EBAMCB4AwHQYDVR0OBBYEFKtN
+kQ9VyucaIV7zyv46zEW17sFUMBMGA1UdJQQMMAoGCCsGAQUFBwMeMB4GCCsGAQUF
+BwEIAQH/BA8wDaAHMAUCAwD78KECBQAwCgYIKoZIzj0EAwIDRwAwRAIgB7e0al+k
+8cxoNjkDpIPsfIAC0vYInUay7Cp75pKzb7ECIACRBUqh9bAYnSck6LQi/dEc8D2x
+OCRdZCk1KI3uDDgp
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+ if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
+ self.assertIn(rfc8209.id_kp_bgpsec_router, extnValue)
+
+ self.assertIn(rfc5280.id_ce_extKeyUsage, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8226.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8226.py
new file mode 100644
index 0000000000..aa5257c3d2
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8226.py
@@ -0,0 +1,104 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8226
+
+
+class JWTClaimConstraintsTestCase(unittest.TestCase):
+ jwtcc_pem_text = ("MD2gBzAFFgNmb2+hMjAwMBkWA2ZvbzASDARmb28xDARmb28yDARmb2"
+ "8zMBMWA2JhcjAMDARiYXIxDARiYXIy")
+
+ def setUp(self):
+ self.asn1Spec = rfc8226.JWTClaimConstraints()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.jwtcc_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class TNAuthorizationListTestCase(unittest.TestCase):
+ tnal_pem_text = ("MCugBxYFYm9ndXOhEjAQFgo1NzE1NTUxMjEyAgIDFKIMFgo3MDM1NTU"
+ "xMjEy")
+
+ def setUp(self):
+ self.asn1Spec = rfc8226.TNAuthorizationList()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.tnal_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+class CertificateOpenTypesTestCase(unittest.TestCase):
+ cert_pem_text = """\
+MIICkTCCAhegAwIBAgIJAKWzVCgbsG4+MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
+AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
+dXMgQ0EwHhcNMTkwNzE4MTUwNzQ5WhcNMjAwNzE3MTUwNzQ5WjBxMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xKDAmBgNVBAoTH0Zh
+a2UgVGVsZXBob25lIFNlcnZpY2UgUHJvdmlkZXIxGTAXBgNVBAMTEGZha2UuZXhh
+bXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARLyLhnsvrS9WBY29tmN2LI
+CF/wuX4ohhUy3sxO0ynCplHHojpDg+tghGzusf0aLtMDu1II915O8YK5XVL+KZJD
+C82jybxWIKjjzX2qc5/O06joUttdEDzkTaD0kgbcXl6jgawwgakwCwYDVR0PBAQD
+AgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0
+cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFHOI3GpDt9dWsTAZxhcj
+96uyL2aIMB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMBYGCCsGAQUF
+BwEaBAowCKAGFgRmYWtlMAoGCCqGSM49BAMDA2gAMGUCMQCy+qFhT7X1i18jcyIa
+Jkgz/tumrPsaBA2RihkooTEr4GbqC650Z4Cwt7+x2xZq37sCMFSM6fRueLyV5StG
+yEFWA6G95b/HbtPMTjLpPKtrOjhofc4LyVCDYhFhKzpvHh1qeA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.cert_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder.decode(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertEqual(
+ extn['extnValue'], der_encoder.encode(extnValue))
+
+ if extn['extnID'] == rfc8226.id_pe_TNAuthList:
+ self.assertEqual('fake', extnValue[0]['spc'])
+
+ self.assertIn(rfc8226.id_pe_TNAuthList, extn_list)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8358.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8358.py
new file mode 100644
index 0000000000..48a01ce45b
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8358.py
@@ -0,0 +1,195 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8358
+
+
+class P7STestCase(unittest.TestCase):
+ pem_text_list = (
+"""\
+MIIJWgYJKoZIhvcNAQcCoIIJSzCCCUcCAQMxDTALBglghkgBZQMEAgEwDQYLKoZIhvcNAQkQ
+ARugggZ0MIIGcDCCBVigAwIBAgIRANa58hQvZ26svTWQaGtqo/YwDQYJKoZIhvcNAQELBQAw
+gZcxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcT
+B1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMT0wOwYDVQQDEzRDT01PRE8g
+UlNBIENsaWVudCBBdXRoZW50aWNhdGlvbiBhbmQgU2VjdXJlIEVtYWlsIENBMB4XDTE1MDIx
+MjAwMDAwMFoXDTIwMDIxMjIzNTk1OVowgZUxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhWaXJn
+aW5pYTEPMA0GA1UEBxMGUmVzdG9uMRMwEQYDVQQKEwpJRVRGIFRydXN0MRkwFwYDVQQLExBT
+ZWNyZXRhcmlhdCBXZXN0MQ0wCwYDVQQDEwRJRVRGMSMwIQYJKoZIhvcNAQkBFhRpZXRmLWFj
+dGlvbkBpZXRmLm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUkKtMPP1RA
+FU6sxMezYJKCt4rw30RDieB8/P67TMhA6j8Um4a2Xo+CP9Ce1oMri2bwaaQPYWB4ciEL32za
+0NUE0B0iCjZZl36hon6wW6mJw1NGD/AFxnKWzhkSWG6BHMoeOAzu/ye8sHu4Jp5nazpGptK7
+30SjTS3JJFU9pHwQY6JlcmwVv0j2rsT3gj92Cbj5S+U5wCSE6+mZbCC+VPFeeI1kFITwyaIm
+uK9kSYHr15OXua/jrYNrHNRfqtexGKSgnUT96KkTh9TVvkMETB1WJS4WuEIP6GITvwVTp0lA
+qS3oNO4SM4tgFVdYqppcvZBg52kHY9y7IdR156c99zzZDBfWBduqjs/AXa0uol0EJd7qFLUs
+xEJ96XN3tPgR/Cwq18ec29pZQH6kO81Kato/RsQrj6A05TFx/J0MYE0R1MZqvIDUu55vlicb
+wT2lpXMiz1szKuvjTZRR9H/IgbKPNpt/kDUSgXLYwuKBm+nBoJXgybEyJ+A4arb60d9Uiusu
+UA8/h6s1rDMuTnIYMbIii4Y+KgevBWPawqk0xioilEMJ0RBaBVrDreuFlK8aYh+Jo2piruBA
+QnB9ZaPmEM1HPNArJxqL6XcUJTkFxNSksOATDFV5sEoBWYIe6qv2nV2r/HWDAEaa4WH2h3o/
+kASarXk3SxPXmfjOOr1XgpKjAgMBAAGjggG1MIIBsTAfBgNVHSMEGDAWgBSCr2yM+MX+lmF8
+6B89K3FIXsSLwDAdBgNVHQ4EFgQU7Olc92Oy6nkCvpv6jCj6qN8YPtUwDgYDVR0PAQH/BAQD
+AgeAMAwGA1UdEwEB/wQCMAAwRgYDVR0gBD8wPTA7BgwrBgEEAbIxAQIBAwUwKzApBggrBgEF
+BQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwWgYDVR0fBFMwUTBPoE2gS4ZJ
+aHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPUlNBQ2xpZW50QXV0aGVudGljYXRpb25h
+bmRTZWN1cmVFbWFpbENBLmNybDCBiwYIKwYBBQUHAQEEfzB9MFUGCCsGAQUFBzAChklodHRw
+Oi8vY3J0LmNvbW9kb2NhLmNvbS9DT01PRE9SU0FDbGllbnRBdXRoZW50aWNhdGlvbmFuZFNl
+Y3VyZUVtYWlsQ0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20w
+HwYDVR0RBBgwFoEUaWV0Zi1hY3Rpb25AaWV0Zi5vcmcwDQYJKoZIhvcNAQELBQADggEBAGPm
+QUKHxkEQ9vk69vIP68anHc8UsTv5powtLSXLqUw3rAoKAdoWkKjb7ZByHCuFiNk1BvTnhQPh
+LAZm5dI8dYWKp3zgWVxsCXOQv2K4XbaQpIk8KKdLycHWsOq2OD4xBdhfTQqDj9EidhxaLf4B
+bRUePOuWVvwNqHI6OQ9FbRllUsTsSH3XK7z9Ru/0Ub07uEzmWyrIpeFRLJUg9EqQj25pw8j7
+N9ym8ItpfEQvK4Nrzt9KnGwFDaNOUjYAvejig9iUNdOXEQKVzbq8fC25HrXPQisq8u2jrP38
+cRqzwgGHZ1bJrQa8+LPBfADZ4ZHeqlEe6IqZhS/wDSuDNCIZHtkxggKqMIICpgIBA4AU7Olc
+92Oy6nkCvpv6jCj6qN8YPtUwCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3
+DQEJEAEbMBwGCSqGSIb3DQEJBTEPFw0xOTA2MDkxNjU3NTdaMC8GCSqGSIb3DQEJBDEiBCDx
+ACvH9u26K1BdX+IPp6vguUAtA9k0lp9JMNunvXTuQzANBgkqhkiG9w0BAQEFAASCAgBY8kFl
+SxQIvU4n6LaVoAV6ibHrlCqOp9KrUc9DmwXtDifsgoGfhDHb6i5k9BSHmerjTGF6mLlquPUV
+Z2EHSUuVpk8rX//ki6vngq91+f+ufrzEpvO6BLc2aO/zOat0W3U2hiq3zJSLMYMNZhX484Nq
+9+ImsU0S5f32ZpEXH0lFINUaZFo0eRAOZywqNuY57stjWBxTI6MA00S0+eMuWlmkMy0C2LL9
+BQvWW01/ri2UDEprAiKo2sXLcScgHimEVYHuWsrnP+sJ3XVWRsWGRW2i5qIalu2ZGmmIU/vg
+bdBzQnAjCoS2xC5Kwv+cqtUiiyLI0nnuu1aKKi4hivmt1n+hSIWWgGNwTFn3S4+mYDDNSH0u
+ocOr0uDFVv/SH9QPQuGh9rpSz3cd3hlA4R63Rylm46Tt6DnXiovu0mDoos68UQjIAPXWj1ES
+Peeubp+wSbuqN8Rh+koZU+HK7YpsR2bB4hL0GIwMA9lQjGSCxPCt1ViRL6zAWECzQC1YgLyc
++f1Fe8pkaWUbZz+18H/rJoKsXiNWH8yhfAyk+JGTxc4qxWJ/BuF0vzSyuVEffuxIHrOMZTpO
++xfAaJVDqFjxT5yKj3dCfy6XSDZq39AeX/w26/WfH+0ALRiViAAaMHSldbawVR/W3isecDWF
+tlU4NSJMLi/tTohe0QN1fjOaFryAvw==
+""",
+"""\
+MIIJWgYJKoZIhvcNAQcCoIIJSzCCCUcCAQMxDTALBglghkgBZQMEAgEwDQYLKoZIhvcNAQkQ
+ARygggZ0MIIGcDCCBVigAwIBAgIRANa58hQvZ26svTWQaGtqo/YwDQYJKoZIhvcNAQELBQAw
+gZcxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcT
+B1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMT0wOwYDVQQDEzRDT01PRE8g
+UlNBIENsaWVudCBBdXRoZW50aWNhdGlvbiBhbmQgU2VjdXJlIEVtYWlsIENBMB4XDTE1MDIx
+MjAwMDAwMFoXDTIwMDIxMjIzNTk1OVowgZUxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhWaXJn
+aW5pYTEPMA0GA1UEBxMGUmVzdG9uMRMwEQYDVQQKEwpJRVRGIFRydXN0MRkwFwYDVQQLExBT
+ZWNyZXRhcmlhdCBXZXN0MQ0wCwYDVQQDEwRJRVRGMSMwIQYJKoZIhvcNAQkBFhRpZXRmLWFj
+dGlvbkBpZXRmLm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUkKtMPP1RA
+FU6sxMezYJKCt4rw30RDieB8/P67TMhA6j8Um4a2Xo+CP9Ce1oMri2bwaaQPYWB4ciEL32za
+0NUE0B0iCjZZl36hon6wW6mJw1NGD/AFxnKWzhkSWG6BHMoeOAzu/ye8sHu4Jp5nazpGptK7
+30SjTS3JJFU9pHwQY6JlcmwVv0j2rsT3gj92Cbj5S+U5wCSE6+mZbCC+VPFeeI1kFITwyaIm
+uK9kSYHr15OXua/jrYNrHNRfqtexGKSgnUT96KkTh9TVvkMETB1WJS4WuEIP6GITvwVTp0lA
+qS3oNO4SM4tgFVdYqppcvZBg52kHY9y7IdR156c99zzZDBfWBduqjs/AXa0uol0EJd7qFLUs
+xEJ96XN3tPgR/Cwq18ec29pZQH6kO81Kato/RsQrj6A05TFx/J0MYE0R1MZqvIDUu55vlicb
+wT2lpXMiz1szKuvjTZRR9H/IgbKPNpt/kDUSgXLYwuKBm+nBoJXgybEyJ+A4arb60d9Uiusu
+UA8/h6s1rDMuTnIYMbIii4Y+KgevBWPawqk0xioilEMJ0RBaBVrDreuFlK8aYh+Jo2piruBA
+QnB9ZaPmEM1HPNArJxqL6XcUJTkFxNSksOATDFV5sEoBWYIe6qv2nV2r/HWDAEaa4WH2h3o/
+kASarXk3SxPXmfjOOr1XgpKjAgMBAAGjggG1MIIBsTAfBgNVHSMEGDAWgBSCr2yM+MX+lmF8
+6B89K3FIXsSLwDAdBgNVHQ4EFgQU7Olc92Oy6nkCvpv6jCj6qN8YPtUwDgYDVR0PAQH/BAQD
+AgeAMAwGA1UdEwEB/wQCMAAwRgYDVR0gBD8wPTA7BgwrBgEEAbIxAQIBAwUwKzApBggrBgEF
+BQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwWgYDVR0fBFMwUTBPoE2gS4ZJ
+aHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPUlNBQ2xpZW50QXV0aGVudGljYXRpb25h
+bmRTZWN1cmVFbWFpbENBLmNybDCBiwYIKwYBBQUHAQEEfzB9MFUGCCsGAQUFBzAChklodHRw
+Oi8vY3J0LmNvbW9kb2NhLmNvbS9DT01PRE9SU0FDbGllbnRBdXRoZW50aWNhdGlvbmFuZFNl
+Y3VyZUVtYWlsQ0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20w
+HwYDVR0RBBgwFoEUaWV0Zi1hY3Rpb25AaWV0Zi5vcmcwDQYJKoZIhvcNAQELBQADggEBAGPm
+QUKHxkEQ9vk69vIP68anHc8UsTv5powtLSXLqUw3rAoKAdoWkKjb7ZByHCuFiNk1BvTnhQPh
+LAZm5dI8dYWKp3zgWVxsCXOQv2K4XbaQpIk8KKdLycHWsOq2OD4xBdhfTQqDj9EidhxaLf4B
+bRUePOuWVvwNqHI6OQ9FbRllUsTsSH3XK7z9Ru/0Ub07uEzmWyrIpeFRLJUg9EqQj25pw8j7
+N9ym8ItpfEQvK4Nrzt9KnGwFDaNOUjYAvejig9iUNdOXEQKVzbq8fC25HrXPQisq8u2jrP38
+cRqzwgGHZ1bJrQa8+LPBfADZ4ZHeqlEe6IqZhS/wDSuDNCIZHtkxggKqMIICpgIBA4AU7Olc
+92Oy6nkCvpv6jCj6qN8YPtUwCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3
+DQEJEAEcMBwGCSqGSIb3DQEJBTEPFw0xOTA2MDkxNjU3NTdaMC8GCSqGSIb3DQEJBDEiBCBg
+ifxBsUb2E8RicFvqZB+NJEs1FOG4hFFU1bPqV2UwGzANBgkqhkiG9w0BAQEFAASCAgCApFAS
+4+cYrnkMebrANXw7/TGn6Qx01p9fuOugQb6lcfE5CysIKGLJJogs0BXwHK4jTeJRdt/lutuz
+bACg1bakABxuCiLWMu3pKCKS94qAgElYgWru+pAxPhuslz5MwAU0qFW3KnaNq3f5wXlVQ+h2
+l9spSiLhAQ+vLTLfotn6tCmUfjaaYsoNIUGg6b/2vH75QGYaXDq9YGoCrrkDbaRS4eDenSL5
+S2fBTZ5VMJE/1VQY1D5CWqt2CTfzRkNkU7mkarPy6SPvguDlqKJJnFaZJmeIYbGOpDt6KxWc
+DLFD9+J6CH492QwlHxDtM94nK1oIaqdu9TTV94t0ToGezElOZZuVA2DVkov5DzrYQLI5GjMw
+7iHXW1ewCaGF38DdOopqBYp7jcCCZpruKBWDq/uz40MzSBrffYTP/dg4//8Awvt/JomvTUoH
+E18Pt/G2cqdw0NqOE7YEcFpsLGfikTWmGhnrcYUkt8odDDAv/vqZRt8DLkB56waQeQw0TLit
+2M3gbTSHJ1KFsBM/kqHanVapGtnClkY7hYh8DVpgJymJpupkNFs8lDNbN4C42DhQ6Oz9P2qu
+8a/ybEb5gMZ3fsVLvvp6LhbJfqIvYgZO2uKXeKg3eLASD5nVY/Tuhnn2plhx+weKULGys0Ov
+zPKZ+N96KLerIBr3FmGByqhr3jNrBw==
+""",
+"""\
+MIIJWgYJKoZIhvcNAQcCoIIJSzCCCUcCAQMxDTALBglghkgBZQMEAgEwDQYLKoZIhvcNAQkQ
+AR2gggZ0MIIGcDCCBVigAwIBAgIRANa58hQvZ26svTWQaGtqo/YwDQYJKoZIhvcNAQELBQAw
+gZcxCzAJBgNVBAYTAkdCMRswGQYDVQQIExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcT
+B1NhbGZvcmQxGjAYBgNVBAoTEUNPTU9ETyBDQSBMaW1pdGVkMT0wOwYDVQQDEzRDT01PRE8g
+UlNBIENsaWVudCBBdXRoZW50aWNhdGlvbiBhbmQgU2VjdXJlIEVtYWlsIENBMB4XDTE1MDIx
+MjAwMDAwMFoXDTIwMDIxMjIzNTk1OVowgZUxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhWaXJn
+aW5pYTEPMA0GA1UEBxMGUmVzdG9uMRMwEQYDVQQKEwpJRVRGIFRydXN0MRkwFwYDVQQLExBT
+ZWNyZXRhcmlhdCBXZXN0MQ0wCwYDVQQDEwRJRVRGMSMwIQYJKoZIhvcNAQkBFhRpZXRmLWFj
+dGlvbkBpZXRmLm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUkKtMPP1RA
+FU6sxMezYJKCt4rw30RDieB8/P67TMhA6j8Um4a2Xo+CP9Ce1oMri2bwaaQPYWB4ciEL32za
+0NUE0B0iCjZZl36hon6wW6mJw1NGD/AFxnKWzhkSWG6BHMoeOAzu/ye8sHu4Jp5nazpGptK7
+30SjTS3JJFU9pHwQY6JlcmwVv0j2rsT3gj92Cbj5S+U5wCSE6+mZbCC+VPFeeI1kFITwyaIm
+uK9kSYHr15OXua/jrYNrHNRfqtexGKSgnUT96KkTh9TVvkMETB1WJS4WuEIP6GITvwVTp0lA
+qS3oNO4SM4tgFVdYqppcvZBg52kHY9y7IdR156c99zzZDBfWBduqjs/AXa0uol0EJd7qFLUs
+xEJ96XN3tPgR/Cwq18ec29pZQH6kO81Kato/RsQrj6A05TFx/J0MYE0R1MZqvIDUu55vlicb
+wT2lpXMiz1szKuvjTZRR9H/IgbKPNpt/kDUSgXLYwuKBm+nBoJXgybEyJ+A4arb60d9Uiusu
+UA8/h6s1rDMuTnIYMbIii4Y+KgevBWPawqk0xioilEMJ0RBaBVrDreuFlK8aYh+Jo2piruBA
+QnB9ZaPmEM1HPNArJxqL6XcUJTkFxNSksOATDFV5sEoBWYIe6qv2nV2r/HWDAEaa4WH2h3o/
+kASarXk3SxPXmfjOOr1XgpKjAgMBAAGjggG1MIIBsTAfBgNVHSMEGDAWgBSCr2yM+MX+lmF8
+6B89K3FIXsSLwDAdBgNVHQ4EFgQU7Olc92Oy6nkCvpv6jCj6qN8YPtUwDgYDVR0PAQH/BAQD
+AgeAMAwGA1UdEwEB/wQCMAAwRgYDVR0gBD8wPTA7BgwrBgEEAbIxAQIBAwUwKzApBggrBgEF
+BQcCARYdaHR0cHM6Ly9zZWN1cmUuY29tb2RvLmNvbS9DUFMwWgYDVR0fBFMwUTBPoE2gS4ZJ
+aHR0cDovL2NybC5jb21vZG9jYS5jb20vQ09NT0RPUlNBQ2xpZW50QXV0aGVudGljYXRpb25h
+bmRTZWN1cmVFbWFpbENBLmNybDCBiwYIKwYBBQUHAQEEfzB9MFUGCCsGAQUFBzAChklodHRw
+Oi8vY3J0LmNvbW9kb2NhLmNvbS9DT01PRE9SU0FDbGllbnRBdXRoZW50aWNhdGlvbmFuZFNl
+Y3VyZUVtYWlsQ0EuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20w
+HwYDVR0RBBgwFoEUaWV0Zi1hY3Rpb25AaWV0Zi5vcmcwDQYJKoZIhvcNAQELBQADggEBAGPm
+QUKHxkEQ9vk69vIP68anHc8UsTv5powtLSXLqUw3rAoKAdoWkKjb7ZByHCuFiNk1BvTnhQPh
+LAZm5dI8dYWKp3zgWVxsCXOQv2K4XbaQpIk8KKdLycHWsOq2OD4xBdhfTQqDj9EidhxaLf4B
+bRUePOuWVvwNqHI6OQ9FbRllUsTsSH3XK7z9Ru/0Ub07uEzmWyrIpeFRLJUg9EqQj25pw8j7
+N9ym8ItpfEQvK4Nrzt9KnGwFDaNOUjYAvejig9iUNdOXEQKVzbq8fC25HrXPQisq8u2jrP38
+cRqzwgGHZ1bJrQa8+LPBfADZ4ZHeqlEe6IqZhS/wDSuDNCIZHtkxggKqMIICpgIBA4AU7Olc
+92Oy6nkCvpv6jCj6qN8YPtUwCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3
+DQEJEAEdMBwGCSqGSIb3DQEJBTEPFw0xOTA3MTQwMTMyMTdaMC8GCSqGSIb3DQEJBDEiBCAJ
+zK6u0RRfrSQ2ebn+GOxnbovlG3Raul/1zOOGmTaIPzANBgkqhkiG9w0BAQEFAASCAgBlKYNd
+euVzPDqEa13k4nQthmyJUUqjWlAVolgohXioYok8Z5BkKmkp8ANLbvkJl0hV1Al1hutTRNeF
+a5ZeWyS6nAWyPFKfRSNqwWLMIi1dX+rO7Vhf15Lz944ZYsqO+O2f7rjWUJmi8/uJKD7cFDiW
+uKkPMgvqyIMnnC3ya/sC1vU+0Feqr5JcIMs2AHQeNVe8hzN4T9Pthyax7gqbxTkg3Gyt7Mwy
+WLZeK84oJmkl9ANeVgzq+P/cmqUaqtfkBFDSxaTag/eoYM3QfHNisr/jHCazqCh88VMgwhvk
+cl6NS9hdH+aOWqQ3FE1c7VJNoQRDT7ztyKCrRJFPc4wZL8tsGkKp1lP4WcaStcbUJ65AdWPb
+3CZonLY4UOBotAUpG/PObMCmWBEpr8MN0Q+kuEO2oAe9kBoFsv7MtNfyHE4CuOANRqGLRgOL
+72hN8Cy0sGWYUy+2chH3i50cT8XkDV5Rz2Z5xW3SfyAuW53j2WKLFsKkZjfkZBopSJM20V4E
+8pPnQQ/ByFwYPyS/xJZc24vsRxgogbrf11JU8hKVkfSsq3JXxUxe5w+Sh1XGTmO5tXDKFfyi
+S+VljWVifzXaR3pmTEQPhXH4nBa4K/HYytxofDP3EMli+imil2fFBbBedZkb5CIQ/Ly3soHZ
+dZlmZDkyeXJLpkNjRAsG6V82raZd9g==
+""",
+)
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ oids = [ ]
+ for pem_text in self.pem_text_list:
+ substrate = pem.readBase64fromText(pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc5652.id_signedData, asn1Object['contentType'])
+
+ sd, rest = der_decoder(asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(sd.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(sd))
+
+ oids.append(sd['encapContentInfo']['eContentType'])
+
+ self.assertIn(rfc8358.id_ct_asciiTextWithCRLF, oids)
+ self.assertIn(rfc8358.id_ct_pdf, oids)
+ self.assertIn(rfc8358.id_ct_xml, oids)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8360.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8360.py
new file mode 100644
index 0000000000..56a76cf303
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8360.py
@@ -0,0 +1,464 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8360
+
+
+class CertExtnsTestCase(unittest.TestCase):
+ extns_pem_text = """\
+MIJM7TCCRkUGCCsGAQUFBwEcBIJGNzCCRjMwgkXJBAIAATCCRcEDAgACMAoDAgAF
+AwQFBRwAMAoDBAMFHCgDAgEEMAoDAwINdAMDAg14AwMCDYwwCgMDAw2oAwMDDbAD
+AwIN8AMEAhcTMAMEAxcTOAMEAxdTQAMEBBdTcAMEBRdp4DAMAwQDF2poAwQDF2pw
+AwQEF2zQAwMAF20DBAMXbwAwDAMEBBdvEAMEBhdvADAMAwQEF29QAwQHF28AMAwD
+BAYXb8ADBAMXb/ADBAUX74ADBAMX+egwDAMEARf8QgMEABf8RDAMAwQAF/xHAwQA
+F/xIMAwDBAAX/EsDBAAX/EwDBAAX/E8DAwIYhAMEARjrFgMCABkwCwMDARtuAwQC
+G24QMAkDAgAfAwMAH84wDAMEAx/PCAMEAx/PMDAKAwQCH888AwIFADAJAwIAJQMD
+ACVuMAkDAwQlcAMCASQDBAIr4XADBAIr+6ADAwMtCAMEBS0rQAMEBS044AMEBi1B
+QAMDAS1CAwQHLUuAAwMELVADAwUtgAMEAi36GDAKAwIBLgMEBi6VgDAKAwQELpXQ
+AwIALgMDATEMAwMBMjwDAwAydzAJAwIAMwMDADNOAwMAM1AwCQMDATNSAwICMAME
+BTSQQAMCADUwCgMDAjYkAwMANiYDAgA5AwQCPQ7gMAoDAgE+AwQGPggAMAwDBAU+
+CGADBAU+DEAwDAMEBz4MgAMEBT4YQDAMAwQHPhiAAwQGPj2AMAsDAwE+PgMEBT5E
+ADAMAwQGPkRAAwQFPkTAMAoDAwA+RQMDAT5wMAsDAwA+cwMEBT51ADALAwQGPnVA
+AwMAPoYwCwMEBz6HgAMDAD6KAwQGPowAMAwDBAc+jIADBAU+rQAwDAMEBj6tQAME
+Bj7BAAMEBT7BgDALAwQGPsHAAwMBPugwCwMDAD7rAwQFPvAAAwQFPvBAMAwDBAc+
+8IADBAc+8QAwDAMEBT7xoAMEBz77AAMDAj78MAwDBAE/jSIDBAE/jSQDBAQ/jgAD
+BAVAEMADBAVAHAADBAdAKwADBAZAQUADBAZAicADBAJA7+wDBAVA/SADBAZBEoAD
+BARBEtADBAVBJ0ADBAZBY4ADBAVCT2ADBARCVEADBAZCdoADBAVCzUADBAVC1GAD
+BAJC+MwwDAMEBUMWIAMEAkMWOAMEAEOePgMEBUPRgAMEBERGwAMEBkUGAAMEBEVe
+QAMEBEVecAMEBUWsYAMEBkXCQDAJAwIATQMDAk3YMAwDBAVN3CADBAdQQwAwDAME
+BFBDkAMEBVBHADAMAwQEUEcwAwQFUEhAMAwDBARQSHADBAVQS4AwDAMEBlBLwAME
+BFBWADAMAwQFUFYgAwQGUFcAMAsDBAVQV2ADAwNQUDALAwQEUFgQAwMAUF4wDAME
+BFBfEAMEBlDwgDALAwQEUPDQAwMBUPQwCwMEBFD2EAMDA1DwMAwDBARQ+BADBAZQ
++AAwDAMEBFD4UAMEBlD5ADAMAwQEUPlQAwQFUPoAMAsDBARQ+jADAwJRADALAwQG
+UQRAAwMBUQgwDAMEB1EKgAMEBVEVQDAMAwQEURVwAwQGURYAMAwDBAVRFmADBARR
+F8AwDAMEBVEX4AMEBlEaADAMAwQEURpQAwQFUR1AMAsDBARRHXADAwJRQDALAwMD
+UUgDBAVRW8AwCwMEBFFb8AMDBlGAMAsDAwBRwQMEB1JlADALAwQGUmXAAwMHUgAw
+DAMEB1KAgAMEB1KBADALAwMBUoIDBAZSlwAwCwMEBVKXYAMDAlKYMAsDAwFSngME
+B1LJADALAwMBUsoDBANTjxAwCwMEBVOPIAMDAlQgMAsDAwBUJQMEBVTNQDALAwQH
+VM2AAwMAVOgwDAMEB1TpgAMEBVf3gDAMAwQDV/eoAwQFV/9AMAwDBAdX/4ADBARZ
+0OAwCwMDAFnRAwQEWe8gMAwDBAZZ70ADBAVb4QAwDAMEAVvhIgMEAlvrADALAwQB
+W+sGAwMBXMgwCwMDAlzMAwQFXbNAAwQDXbNoAwQCXbN4MAwDBAdds4ADBABeGhow
+DAMEAl4aHAMEAF4aHjAMAwQFXhogAwQCXhooAwQAXhotMAwDBAReGjADBABeGkIw
+DAMEAl4aRAMEAF4aSDAMAwQBXhpKAwQCXhpQMAwDBAFeGlYDBAJeZwAwDAMEA15n
+CAMEBF6+wAMEBF6+8DAMAwQHXr+AAwQGX6OAMAsDBARfo9ADAwBfqDAMAwQFX6kg
+AwQCX7W4MAoDBAZftcADAgVAAwQFYAmAAwQEYH2QAwMAYoADAwNlOAMEAmfhyAME
+AmfniAMEAmfxbAMEAmf7pAMEAmf8WAMEA2glIAMEAmiZVAMEAWjoJAMEBmjpQAME
+Amjz2AMEAmj0CDAMAwQBa5aiAwQCa5awAwQEa6GgAwQFa7WAAwQAa79EMAkDAgBt
+AwMCbegwCwMEBG3sEAMDAm3wMAkDAwBt9QMCAWwwCwMDAHHLAwQGccuAAwMBdMoD
+BAJ9PkgDAwCAAAMDAIAHAwMAgBAwCgMDAIAnAwMBgCgDAwCALTAKAwMAgEEDAwSA
+QAMDAYBWAwMAgF0DAwCAYgMEB4B0gAMDAIB8AwMAgH8DAwGAgjAKAwMAgIsDAwCA
+jgMDAICoAwMAgLADAwGAsgMDAIDHAwMAgMwDAwCA1gMDAIDoAwMAgOoDAwCA8AMD
+AIDzAwMAgPYwCgMDAIELAwMBgQwDAwCBEAMDAIEUAwMBgRoDAwCBHwMDAIFDMAoD
+AwCBRQMDAIFGAwMAgUkDAwCBWDAKAwMBgWYDAwCBaAMDAIF9AwMAgYEDAwCBhAMD
+AYGOAwMAgakDAwCBrzAKAwMAgbEDAwCBsjAKAwMAgbUDAwGBuAMDAIG7AwMBgcID
+AwCBxwMDAIHOAwMAgdADAwGB1gMDAIHZMAoDAwCB6QMDAIHqMAoDAwSB8AMDAIHy
+AwMAgfcDAwCCAAMDAYIYAwMAghwDAwCCIAMDAIIlAwMAgiswCgMDAII7AwMAgjwD
+AwGCQgMDAIJJAwMAgksDAwGCTjAKAwMBglIDAwCCVAMDAYJYAwMBglwDAwCCYgMD
+AIJkAwMAgmgDAwCCcAMDAIJzAwMAgngDAwCCfQMDAIKFAwMAgogwCgMDAYKKAwMB
+gpQDAwCCnwMDAIKhAwQHgqSAAwMAgrQDAwCCtzAKAwMAgrkDAwCCugMDAIK8AwMA
+gr4DAwGCwAMDAILGAwMAgswDAwCCzgMDAYLQAwMAgt8wCgMDAILhAwMAgvQDAwCC
+9gMEA4L4MDAMAwQBgvg6AwQGgvgAMAwDBAKC+EQDBAKC+GgDAwCC+wMDAIL/AwMA
+gwEDAwCDYQMDAINjAwMAg2YDAwCDbzAKAwMBg3IDAwGDdAMDAIOCAwMAg5gDAwGD
+mjAKAwMAg58DAwCDoDAKAwMCg6QDAwCDpgMDAIOpMAoDAwCDrQMDAYOwAwMAg7QD
+AwCDvAMDAIPNAwMAg88DAwCD0wMDAIPcAwMAg+AwCgMDAIPjAwMAg+QDAwCD5wMD
+AIPqAwMAg+0DAwCD9gMDAIP7AwMAg/4wCgMDBoRAAwMAhE4DAwCEkjAKAwMAhJUD
+AwCElgMDAISZAwMAhJswCgMDAISlAwMBhKgDAwCEqwMDAISwAwMAhLQwCgMDAIS5
+AwMChLgwCgMDAITDAwMAhMQDAwCExwMDAITjMAoDAwCE5QMDA4TgAwMAhPQDAwCE
+/AMDAoYAAwMAhhEDAwCGEwMDAIYVAwMAhhkwCgMDAIYbAwMAhhwDAwCGHjAKAwMB
+hiIDAwGGJAMDAIYvAwMAhjYwCgMDAYY6AwMBhjwDAwCGTAMDAIZRAwMAhlMwCgMD
+AYZaAwMAhm4DAwCGdwMDAIaCAwMAhooDAwGGjjAKAwMAhpEDAwKGkAMDAIaXAwMA
+hpswCgMDAIadAwMAhp4DAwCGqQMDAIarAwMAhrAwCgMDAIa3AwMAhrgDAwCGvAMD
+AIa/AwMAhssDAwCGzjAKAwMChtQDAwCG1jAKAwMAhtsDAwCG3jAKAwMAhuEDAwKG
+4AMDAIbvMAoDAwCG9QMDA4bwAwMAhvkDAwCG/wMEAodUNAMEAodUlAMDAIdaAwMA
+h8QDBAeIjwAwCwMEBIiQEAMDAIiQAwMAiJQwCgMDAIibAwMBiJwDBAeInoAwCgMD
+AIijAwMAiKQwCgMDAIipAwMAiKoDAwGIrAMDAIjHAwMAiMkDAwCIzgMDAIjhAwMB
+iOYDAwCI8wMDAIj/AwMAiREwCgMDAIkhAwMAiSIwCgMDAIkrAwMAiSwDAwCJLwMD
+AIkyMAoDAwCJNwMDAIk4AwMAiTowCgMDAok8AwMAiT4wCgMDAIlJAwMAiUoDAwCJ
+XQMDAIlgAwMAiWUDAwCJaQMDAIlsAwMBiXgDAwCJgQMDAImFAwMAiYoDAwCJnAMD
+AImjAwMAia4DAwCJvzAKAwMAicEDAwKJwAMDAYnMAwMAidADAwCJ1TAKAwMAidkD
+AwCJ2jAKAwMAid0DAwCJ4AMDAIniAwMAifgDAwGJ+gMDAIn9MAoDAwCKAwMDAIoE
+AwMAigYDAwCKDjAKAwMAihUDAwCKFjAKAwMAiiUDAwCKJgMDAYooAwMAijADAwGK
+PgMDAIpCAwMAikYDAwCKUQMDAIpgAwMAimQwCgMDAYpmAwMAimgDAwCKagMDAIp8
+AwQDioCIMAoDAwCKgwMDAIqEAwMAioYwCgMDAIq7AwMGioADAwCKwzAKAwMBisYD
+AwGKyAMDAIrLMAoDAwCKzQMDAIrOMAoDAwCK1wMDAIrYMAoDAwCK3QMDAIrkMAoD
+AwCK5wMDAYroMAoDAwKK9AMDAIr2AwMCivgDAwCK/TAKAwMAiwEDAwCLBDAKAwMB
+iwYDAwCLCDAKAwMBiwoDAwGLGDAKAwMAixsDAwCLHgMDAIstAwMAiy8DAwCLMgMD
+AIs2AwMAizowCwMDAIs/AwQHi0AAAwMAi0IDAwGLSgMDAItPAwMAi1MwCgMDAItZ
+AwMAi1wwCgMDBYtgAwMAi2IDAwCLZDAKAwMAi2kDAwCLeDAKAwMBi3oDAwCLfAMD
+AIuAAwMAi4UDBAWLisADAwCLjQMDAIuPAwMAi5EDAwCLlQMDAIuZMAoDAwKLnAMD
+AIueAwMAi6ADAwCLojAKAwMCi6QDAwCLpgMDAIuuAwMBi7IDAwCLuAMDAIu/AwMA
+i94wCgMDAIxNAwMAjE4wCgMDAoxUAwMAjFYwCgMDAIxdAwMAjF4DAwCMYQMDAIxp
+AwMBjJYDAwCMpAMDAIymAwMAjLUDAwCMywMDAIznMAkDAgCNAwMBjVQwCgMDAI1X
+AwMBjWQDAwCNaQMDAI1sAwMAjXEDAwCNcwMDAI19AwMAjYIwCgMDAY2GAwMAjYow
+CgMDAI2PAwMAjZQDAwCNowMEB42kgDAKAwMAjakDAwCNrDAKAwMAja8DAwCNsAMD
+AI3AAwQAjcEUAwQCjcFsAwQBjcHWAwMAjcIDAwCNxAMDAY3IAwMAjcsDAwCN0DAL
+AwMBjeIDBAON4oADBACN4oswCwMEBI3ikAMDAI3kAwMAjecDAwCN7QMDAI3xAwMB
+jfQwCgMDAI35AwMAjfoDAwGN/AMDAI3/AwQDjlsIAwQDjlt4AwQDjluYAwQHjpoA
+AwQDjuogAwMAjvcDAwCPKQMDAI8vMAoDAwGPMgMDAY80AwMAj0EwCwMEB49cgAMD
+AY9cAwMAj2EDAwCPYzAKAwMAj3UDAwCPdgMDAI95AwMAj34wCgMDAI+BAwMAj4ID
+AwCPoTAKAwMAj6MDAwCPpAMDAI+nAwMAj6kwCgMDBI+wAwMAj7QDAwCPxAMDAI/N
+AwMAj9IDAwCP2QMDAY/gMAoDAwCP6QMDAI/qAwMAj+0DAwCP7wMDAI/1AwMAj/wD
+AwCQAgMDAJAEAwMBkBQDAwCQGAMDAJAbAwMAkCADAwCQKTAKAwMAkCsDAwCQLAME
+ApAwPAMDAJA2AwMBkDgwCgMDAJA/AwMBkEADAwCQTAMDAJBSMAsDAwKQVAMEB5BW
+AAMDAJBXAwMAkF8DAwCQYgMDAJB3AwMAkHoDAwCQfAMDAJB/AwMAkJEDAwGQpAMD
+AJCtMAoDAwSQsAMDAZC0AwMAkMEDAwCQyAMDAJDMAwMAkM4DBAaQ0AAwCwMEB5DQ
+gAMDAZDQAwMAkPgwCQMCAJEDAwCSADAKAwMBkgIDAwCSBAMDAJITAwMAkhUDAwCS
+MAMDAJIyAwMAkjQwCgMDAJI7AwMAkjwDAwGSQgMDAJJGAwQBkkdeAwMAkkgDAwCS
+SwMDAJJNAwMBklADAwCSVwMEA5JY6AMDAJJaAwMAkmEwCgMDApJkAwMAkm4DAwCS
+cDAKAwMAkncDAwCSeAMDAJJ8AwMAkoUDAwCSiAMDAJKMAwMBkp4wCgMDAJKhAwMA
+kqIDAwCSqQMDAZKsMAoDAwCSrwMDAZKwAwMAkrMDAwCSuQMDAJK8MAoDAwCSvwMD
+AJLCMAoDAwGS0gMDAZLUAwMAktgwCgMDAJLbAwMAktwDAwCS4DAKAwMAkuMDAwCS
+5AMDAJLqAwMAkvEwCgMDAJL3AwMBkvgDAwCS+zAJAwMAkv0DAgCSAwMAkwcwCgMD
+ApMMAwMAkw4wCgMDAJMbAwMAkx4DAwGTIAMDAZMsAwMAkzQDAwCTNgMDAJM8MAoD
+AwCTQwMDAJNEAwMAk0swCgMDAJNNAwMAk04wCgMDAZNSAwMAk1QwCgMDAZNWAwMB
+k1gDAwCTWzAKAwMAk10DAwCTYDAKAwMBk2IDAwCTZAMDAJNmMAoDAwCTbwMDAJNw
+AwMAk3cDAwGTegMDAJN9AwMAk38DBAeTh4ADAwGTjjAKAwMAk5MDAwCTmAMDAJOc
+MAoDAwCToQMDApOgAwMAk6cwCgMDAJOrAwMBk6wDAwCTrwMDAZO0AwMAk7gDAwCT
+ugMDAZO8AwMAk8EDAwGTxAMDAJPJAwMAk8wDAwCT0jAKAwMAk9UDAwOT0AMDAJPc
+MAoDAwKT5AMDAZPsAwMAk/MwCgMDAZP6AwMAk/wDAwGUAgMDAJQGAwMAlDYDAwCU
+OAMEAJQ7cwMDAJQ8AwMAlD8DBAKUQDgDAwCURQMDAJRHAwQFlEzgAwMAlE8wCgMD
+AJRRAwMClFADAwCUWAMDAZRuMAoDAwGUdgMDApR4MAoDAwCUhwMDAJSIAwMAlIoD
+AwCUjAMDAJSPAwMAlJQDAwCUlwMDAJSgAwMAlKkDAwCUsAMDAJS1AwMAlLkDAwCU
+uzAKAwMClMQDAwCUxgMDAJTIMAoDAwCU+wMDAZT8AwMAlQADAwCVAwMDAJUbAwMA
+lTEDAwCVOwMDAJU+AwMAlVEDAwCVWgMDAJVtAwMAlX4wCgMDApWEAwMAlYYwCgMD
+AJWLAwMAlYwwCgMDAZWSAwMAlZQwCgMDAJWZAwMBlZwDAwCVqgMDAJWsMAoDAwCV
+sQMDAZXEMAoDAwOVyAMDAZXoMAwDBAOV6ggDBAeV6gAwCgMDAJXrAwMDlfAwCgMD
+AJX5AwMClfgDAwGV/gMDAJZqAwQClmvIAwMAlnADAwCWgAMEApaBCAMDAJaEAwMA
+lowwCgMDAJaRAwMAlpIDAwCWrwMDAJayAwMBlswwCgMDAJbVAwMAltYDAwCW2QMD
+AJbjAwMBluwDAwCW8QMDAJb0AwMAlvsDAwCW/jAJAwIAlwMDAJdkMAsDAwGXagME
+BZdqgDAMAwQGl2rAAwQEl2rgAwMAl3MDAwCXeAMDAJd/AwMAl4EDAwCXhTAKAwMA
+l4cDAwCXiAMDAZecAwMAl6oDAwCXrTAKAwMAl68DAwGXsAMDAJe0MAoDAwGXtgMD
+AJe4AwMAl7sDAwCXvTAKAwMAl9MDAwCX1DAJAwMDl9gDAgOQAwMAmEIDAwCYRwMD
+AJhJMAoDAwCYTQMDAJhOAwMAmFEwCgMDA5hYAwMAmFowCgMDAJhdAwMAmGADAwCY
+aQMDAZhyAwMAmIYDAwCYjwMDAJiWAwMAmJgDAwCZAQMDAJkFAwMAmQ8DAwCZEQMD
+AJkTAwMAmVgwCgMDAplcAwMAmWIDAwCZZDAKAwMCmWwDAwCZbgMDAJlwMAsDAwOa
+CAMEBJoIIAMEBpoIQAMDAZoOAwMAmiADAwCbBAMDAJstMAoDAwGbNgMDAJs4AwMA
+m0IDAwCbSQMDAJtpMAoDAwCbgwMDAZuEAwMBm4gDBAabikADAwCbjAMDAJuRAwMA
+m54DAwCbuQMDAJvAAwMAm8YDAwCbygMDAJvMAwMAm88wCgMDAJvRAwMAm9IDAwCb
+3zAKAwMAm+MDAwCb5AMDAJvnAwMAm/UwCgMDAJv5AwMAm/oDAwCb/QMEBZv+IAMD
+AJwKAwMAnA4wCgMDAJwRAwMAnBIDAwCcGQMDAJwcAwMAnCMDAwCcKwMDAZwwMAoD
+AwCcMwMDAJw0AwMAnDYDAwCcOgMDAJw9AwMAnEMDAwCcUwMDAJxqMAoDAwGccgMD
+AJx0AwMAnHYDAwCchQMDAJyHAwMAnJQDAwCcljAKAwMAnRcDAwGdHDAKAwMAnVMD
+AwCdVAMDAJ1YAwMAnV4DAwGdYAMDAJ1jAwQDnXjgMAsDBAKdeOwDAwCdeAMDAZ18
+AwMAnYEDAwCdiAMDAJ2KAwMAnYwDAwCdkDAKAwMAnZ0DAwWdgDAKAwMAnaEDAwCd
+pDAKAwMAnacDAwGdqDAKAwMAnasDAwGdrAMDAJ2xAwMBnbQDAwCdugMDAJ2+AwMA
+ncEDAwCdyAMDAJ3LAwMAneQDAwCd5wMDAZ3sAwMAnfMDAwCd9wMDAJ35AwMCniQw
+CgMDAJ4pAwMCnigDAwGeLjAKAwMAnjEDAwCeMgMDAJ46AwMAnjwDAwCeQAMDAZ5C
+AwMAnksDAwCeWgMDAJ5cAwMAnl4DAwCeYwMDAJ5mAwMAnmkwCgMDAJ5tAwMAnm4D
+AwCecAMDAJ53AwMCnnwDAwCegQMDAJ6DAwMAnoUDBASejBADBAaejEADAwCejwME
+BJ6SgDAKAwMCnpQDAwCelgMDAJ6YAwMAnpwDAwCeojAKAwMBnqYDAwGeqDAKAwMC
+nqwDAwGetDAKAwMBnr4DAwGexAMDAJ7YAwMAntoDAwCe3AMDAJ7fMAoDAwCe4QMD
+Ap7gMAoDAwGe5gMDAJ7qMAoDAwOe+AMDAJ76MAoDAwCe/wMDAJ8AMAoDAwCfBwMD
+AJ8IAwMAnwwDAwCfDwMDAJ8UAwMBnxYDAwCfGTAKAwMAnx0DAwWfAAMDAJ8iAwMA
+nyYDAwCfLgMDAZ8yAwMAnzoDAwCfPAMDAJ9FAwMAn0gDAwCfUQMDAJ9UAwMAn1YD
+AwGfXAMDAJ9fAwMBn2QwCgMDAJ9nAwMAn2gDAwCfawMDAJ9tAwMAn3IDBAOfdcAw
+DAMEAZ91ygMEBJ91wAMDAJ96AwMAn4IDAwCfhgMEBZ+HgAMDAJ+QMAoDAwGfkgMD
+AZ+UMAoDAwCflwMDAJ+YAwMAn5oDAwCfnAMDAJ+gAwMAn6IwCgMDAJ+nAwMAn6gD
+AwGfqgMDAJ+tMAoDAwCfswMDAJ+0AwMAn74wCgMDAJ/BAwMCn8ADAwCfxQMDAJ/I
+AwMAn80DAwCf0gMDAJ/VAwMBn9gDAwCf2wMDAJ/gAwMAn+gDAwGf7AMDAJ/vAwMA
+n/EDBAOf8ggDBAaf8kADBASf8uADAwGf9DAJAwMAn/0DAgWAMAoDAwCgBQMDAKAG
+AwMBoAgDBAKgE1wDBAKgE7QDBAOgFGADBAKgFGwDBASgFJADBAGgFNYDBACgFOUD
+BAGgFPgDAwCgJgMDAKAoMAoDAwKgLAMDBqAAMAoDAwGgQgMDAKBEAwMBoEYDAwCg
+SwMDAKBOAwMAoFADAwCgVQMDAKBcMAoDAwCgYQMDAKBkMAoDAwCgZwMDAKBoAwMA
+oHIDAwCgtAMEBKDKEAMDAaDSMAoDAwCg1QMDAKDWMAoDAwOg2AMDAaDcAwMAoOQD
+BAag50ADBACg7hUDBAKg7iQDBAKg7jQDBAGg7jwDBAKg7mADBASg7nAwCgMDAaEC
+AwMBoQQDAwGhCAMDAKEMAwMAoREDAwChFAMDAKEXAwMAoRsDAwChHgMDAKElMAoD
+AwChKQMDAKEqAwMAoTAwCgMDAqE0AwMAoTYDAwChOwMDAKE+AwMAoUMwCgMDAaFG
+AwMAoUoDAwChTAMDAKFOMAoDAwChUwMDAKFcMAoDAwOhaAMDAKFqMAoDAwGhbgMD
+AKFwAwMAoXQDAwChhgMDAKGTAwMAoZwDAwChngMDAKHKAwMAodoDAwCh5jAKAwMA
+ofsDAwCh/AMDAKILAwQDogzIAwMAog0DAwCiFQMDAKIXMAoDAwCiGQMDAKIaAwMA
+oiYDAwCiVgMDAKKFAwQHoo6AAwQBotiKAwQCotn4AwQDotpYAwQCotqwAwQDotzw
+MAwDBAOi3lgDBACi3loDBAKi9DQDBAKi9cwDBAKi+MQDBAKi+tgDBAOi+yADBAKi
+/8QDAwCjAQMDAKMDAwMAowUDAwCjCQMDAKMiMAoDAwGjPgMDAaN0AwMAo3cwCgMD
+AqOcAwMEo6ADAwCj8jAJAwICpAMDAKQoAwMApDAwCgMDAKQ7AwMBpDwDAwCkUQME
+B6RdgDAKAwMBpH4DAwSkgAMDAKSxAwMApNcDAwClTgMEBKVU0AMDAKVyAwMApcAD
+AwCl2gMDAKXeAwMDpggDBAemMYADAwCmVwMDAKdRMAsDBAGnVkIDAwOnUAMDAKdi
+AwQHp2SAAwMAp28DBAWnoAADAwCnqAMDAKesAwMAp8sDAwCn6QMDAKgBAwMAqIsD
+BAeolQADBAGolfgDAwCouwMEAaj1xAMDBakgAwQHqZQAAwQEqgpwAwQHqhGAMAwD
+BACqJcsDBACqJc4DAwCqPAMEB6plgAMEB6pmgAMEB6qFAAMDAarsAwMAqv8wCgMD
+BKsQAwMBqyADBAesUIADBAOsZ1gDBAOs8QAwDAMEA6zxKAMEA6zxMAMEA6zxQAME
+AqzxVAMEBKz/AAMEBKz/YAMEA6z/iAMEBKz/wAMEBKz/4AMEA6z/+AMEBq3UwAME
+Aq3WyAMEA63qgAMEBK30kAMEBq35AAMEA63/kAMEBq6MAAMEB69ugDAKAwIEsAME
+A7BvMDAMAwQAsG85AwQEsHOgMAwDBAOwc7gDBAewegAwCgMEBrB6wAMCALAwCgMC
+AbIDBASy7kAwCgMEBbLuYAMCALIDAwC06jAKAwIAuQMEArkIYDAMAwQDuQhoAwQC
+uRTYMAwDBAW5FOADBAO5HkAwDAMEArkeTAMEAbkm0DAMAwQCuSbUAwQCuVrwMAwD
+BAO5WvgDBAO5ZCAwDAMEArlkLAMEALlpBjAMAwQDuWkIAwQFuZIAMAwDBAK5kiQD
+BAK5qWgwDAMEBLmpcAMEBLmrwDAMAwQCuavUAwQCubAwMAwDBAO5sDgDBAS5soAw
+CgMEArmylAMCAbgwCgMCArwDBAe8gwAwCwMDAryEAwQEvNGAMAwDBAO80ZgDBAa8
+1gAwCgMEBLzWUAMCALwwDAMEAsAFHAMEAMAFHgMEAMAFJAMEAMAFMgMEAMAFOzAM
+AwQAwAU9AwQAwAU+AwQAwAVhAwQAwAWOAwQAwAWRAwQAwAWiAwQAwAXvAwQAwAX+
+AwQAwAwBAwQAwAwvAwQAwAw2AwQBwAxIAwQAwAxNAwQAwAxRAwQAwAxgAwQAwAxj
+MAwDBAbADMADBADADMIwDAMEAMAM2QMEAMAM2jAMAwQAwAznAwQAwAzoAwQAwAzr
+AwQAwAz3AwMAwA8wDAMEAMAQewMEAMAQpjAMAwQAwBC3AwQAwBDKAwQAwBLDMAwD
+BALAGhwDBATAGiAwDAMEAMAaaQMEAcAabDAMAwQAwBpvAwQAwBqAMAwDBADAGoUD
+BAPAGoAwDAMEAcAamgMEAcAawAMEAMAa5wMEAMAa6gMEAsAa7DAMAwQCwBx8AwQC
+wByAAwQAwB8OAwQAwB8XAwQBwB8aAwQAwB8fAwQAwB8oAwQAwB8+AwQAwB9mMAwD
+BAHAH6YDBALAH6gDBADAH9MDBADAH+cDBADAH/wwDAMEAMAhDwMEAMAhEAMEAMAh
+JDAMAwQAwCFXAwQEwCFgMAwDBADAIXEDBADAIXIwDAMEAcAhdgMEB8AhAAMEAcAh
+gjAMAwQAwCGPAwQBwCGQMAwDBADAIZMDBADAIaYwDAMEAMAhqQMEAsAhqDAMAwQE
+wCGwAwQAwCG2MAwDBAbAIcADBAPAIeAwDAMEAMAh6QMEBMAh4AMEAMAh/gMEAMAi
+EwMEAMAiMgMEAMAiawMEAMAidAMEAMAiszALAwMAwCMDBALAIxAwDAMEAMAjPwME
+AMAjSAMEAMAjWgMEAMAjXgMEAMAjbDAMAwQBwCOCAwQBwCOEAwQAwCOKAwQAwCOS
+MAwDBADAI5UDBAHAI5gDBADAI6wwDAMEAMAjtwMEAMAjwDAMAwQAwCPFAwQAwCPG
+MAwDBADAI80DBATAI8ADBADAI+UwDAMEBMAj8AMEAMAj9AMEAcAj9jAKAwMCwCQD
+AwDAJjAMAwQAwChFAwQAwChQAwQAwCjkMAwDBADAKWcDBADAKYgwDAMEAsApjAME
+AcApkAMEAMApkzAMAwQAwCmVAwQAwCmgAwQAwCnSAwQAwCnYAwQAwCnaAwQAwCnj
+AwQAwCoBMAwDBAHAKioDBATAKiADBADAKjUwDAMEAMAqPwMEAcAqQAMEAMAqVwME
+AMAqYQMEAMAqZAMEAMAqZjAMAwQAwCpxAwQAwCqEAwQAwCqPMAwDBALAKrQDBAHA
+KsgDBADAKv0wDAMEAcArogMEAsArqDAMAwQGwCvAAwQAwCvEAwQAwCvSAwQAwCvU
+AwQAwCvqMAsDAwLALAMEAsAsQDAMAwQAwCxHAwQAwCxaMAwDBATALPADBALALPgw
+DAMEAsAv9AMEAcAv+AMEAMAwHwMEAMAwawMEAMAwkQMEAMAw4AMEAMAw5wMDAMAx
+AwQEwDMAMAsDAwLANAMEAMA0MgMEAMA0mDAMAwQAwDSfAwQBwDSgMAwDBADANN0D
+BAXANMAwDAMEAMA0/QMEAMA0/jAMAwQAwDVnAwQAwDVoMAwDBADANh8DBADANjQw
+DAMEAcA2NgMEAMA2UAMEAMA2aAMEAMA2cTAMAwQAwDZzAwQAwDZ4AwQAwDZ6MAwD
+BADANn0DBADANoADBADANoQDBADANoswDAMEAMA2jQMEAcA23AMEAMA24QMEAcA2
+9AMEAMA2/gMEAMA3VAMEAMA3WQMEAMA3ZQMEAMA3aQMEAMA3bQMEAMA3cwMEAMA3
+gQMEAMA3hAMEAMA3vAMEAMA3wQMEAMA3xQMEAMA31AMEAMA39DAMAwQCwDocAwQC
+wDogMAwDBADAOikDBAHAOlgDBADAOsUDBADAOtowDAMEAcA64gMEAcA65AMEAMBA
+HAMEAcBALAMEAsBAZAMEAMBAfQMEAMBAyjAMAwQAwEEzAwQAwEFGMAwDBALAQVwD
+BADAQV4DBADAQWAwDAMEAMBBgwMEAMBBhAMEAMBBizAMAwQEwEGQAwQAwEGSAwQA
+wEGZMAwDBADAQbcDBAHAQcQwDAMEAMBB2wMEAMBB5AMDAMBCMAwDBADAQwMDBADA
+QwQDBADAQycDBADAQysDBADAQy8DBADAQzIDBADAQzQDBADAQzcDBADAQzoDBADA
+Q0wDBADAQ08DBADAQ1cDBADAQ14wDAMEAsBDZAMEAMBDaAMEAMBDhwMEAMBDpwME
+AMBDqjAMAwQAwEO9AwQAwEPQAwQAwEPaAwQBwEPcAwQAwEPfAwQAwEP5MAsDAwLA
+RAMEAsBEEAMEAMBEFzAMAwQAwEQfAwQCwEQwMAwDBALAREwDBALARGgwDAMEAMBE
+lwMEAcBEmDAMAwQAwESlAwQAwESqAwQAwESuAwQAwESwAwQAwES2AwQAwES6AwQA
+wETRMAwDBADARNMDBADARNgDBADARN0DBADAROADBADAROYwDAMEAcBE+gMEAMBE
+/AMEAMBE/jALAwMBwEYDBAPARnAwDAMEAMBGhQMEAMBGhgMEAMBGiDAMAwQCwEaM
+AwQFwEaAAwQDwEbAAwQAwEbyAwMAwEcwDAMEAMBJEwMEAMBJFDAMAwQBwEkiAwQA
+wEksAwQAwEniAwQAwEnlMAwDBAHATAYDBALATCAwDAMEAMBMewMEAMBMhDAMAwQB
+wEyGAwQAwEysAwQAwEywMAwDBADATPEDBADATPgDBADATQsDBAHATXIDBALATXgw
+DAMEAsBNhAMEAMBNigMEAcBNjAMEAcBQFDAMAwQAwFAfAwQAwFAqAwQAwFAuAwQA
+wFAzAwQAwFE7MAwDBADAUT0DBADAUT4DBADAUW0DBADAUXkDBADAUXsDBADAUaAw
+DAMEAcBRtgMEAMBRuAMEAMBRwgMEAMBR5gMEAMBR6gMEAMBSeQMEAMBSfAMEAMBS
+fwMEAMBSmTAMAwQAwFKdAwQAwFKeAwQAwFLWAwQBwFLcAwQAwFLxMAsDAwDAUwME
+AMBTZAMEAMBTZgMEAMBToAMEAMBTpQMEAMBTyAMEAMBTygMEAMBT2AMEAMBT3zAM
+AwQAwFPlAwQAwFPmAwQAwFQFAwQAwFQNAwQAwFQPAwQAwFQbAwQAwFQeAwQBwFQg
+AwQAwFQ+MAwDBADAVEsDBADAVFQDBADAVFcwDAMEAcBUWgMEAsBUYDAMAwQAwFRl
+AwQBwFRsMAwDBADAVH8DBADAVJwDBADAVKYDBADAVK0wDAMEBMBUsAMEBMBUwAME
+AMBU1AMEAcBU3DAMAwQBwFTiAwQBwFTkMAwDBADAVPUDBAPAVPADBADAVgsDBADA
+Vg4DBADAVhIDBADAVhkDBADAVhsDBADAVlkwDAMEAMBWfQMEB8BWAAMEAMBWhjAM
+AwQAwFaJAwQAwFaKAwQAwFajAwQBwFamAwQAwFapAwQAwFb+AwMAwFcDBADAWAED
+BADAWAQwDAMEAMBYCQMEAMBYCgMEAMBYETAMAwQAwFgXAwQAwFgYMAwDBADAWFMD
+BADAWFQDBADAWFYwDAMEAMBYYQMEAMBYYgMEAMBYbAMEAMBYdgMEAMBYewMEAMBY
+gAMEAMBYggMEAMBYhQMEAMBYxAMEAMBYzAMEAcBY7jAMAwQBwFj6AwQAwFj+AwMA
+wFkDBAHAW4wDBADAW7EDBADAW7oDBADAW70DBADAW78DBADAW8cDBADAW8kDBADA
+W9MwDAMEAcBb1gMEAcBb6DAMAwQCwFvsAwQDwFvwAwQAwFxWAwQAwFxeMAwDBAPA
+XGgDBADAXGoDBAHAXGwDBADAXHQwDAMEAMBcfQMEAsBciDAMAwQAwFyNAwQBwFyY
+MAwDBADAXJsDBADAXJwDBADAXNgDAwDAXQMEAMBeGAMEAMBeHDAMAwQAwF45AwQA
+wF46MAwDBADAXkMDBADAXkQDBADAXkwDBADAXk4wDAMEAMBebwMEAcBedDAMAwQC
+wF6cAwQCwF6gAwQAwF6sMAwDBADAXq8DBAPAXsADBADAXtQDBADAXt0DBADAXuID
+BADAXukDBADAXusDBADAXu8DAwDAYgMEAMBkEjAMAwQAwGQXAwQBwGQYAwQAwGQ0
+AwQAwGQ9AwQAwGQ/AwQAwGROAwQCwGRgMAwDBAHAZGYDBAHAZIQwDAMEAMBkhwME
+AMBkjAMEAMBkkAMEAMBkmgMEAMBlAQMEAMBlBAMEAMBlCAMEAMBlCwMEAMBlHAME
+AMBlIgMEAMBlSzAMAwQAwGVRAwQAwGVaMAwDBADAZW8DBADAZXIDBADAZXYDBADA
+ZYkwDAMEAMBloQMEAMBlqAMEAMBlqgMEAMBlsDAMAwQAwGWzAwQAwGW0AwQAwGXA
+MAwDBADAZcUDBADAZcYDBADAZfwDBADAZgEwDAMEAcBmBgMEAMBmCDAMAwQAwGYR
+AwQBwGZQAwQAwGZZAwQAwGZfMAwDBAHAZpIDBAHAZrADBADAZtYDBAHAZuAwDAME
+AMBm4wMEAcBm5AMEAMBnAgMEAMBnBwMEAMBnDgMEAMBnFAMEAMBnFzAMAwQAwGcb
+AwQAwGcoMAwDBADAZ1UDBADAZ3QwDAMEAMBniQMEAsBniAMEAMBnkwMEAMBoFwME
+AcBoHDAMAwQAwGgjAwQBwGgkAwQAwGgpAwQAwGgwAwQAwGg1MAwDBADAaDcDBADA
+aDoDBADAaEgDBADAaE0DBADAaFIDBADAaIwDBADAaI4DBADAaJMDBAHAaJowDAME
+AMBopwMEAMBoqAMEAMBo7gMEAMBo9QMEAMBo+AMEAMBo+wMEAMBpSwMDAMBqAwQA
+wGsCAwQBwGsEMAwDBADAawsDBAHAawwwDAMEAMBrMwMEAMBrZAMEAMBrbgMEAMBr
+cjAMAwQBwGt6AwQBwGuAAwQAwGuEAwQAwGuoAwQAwGuuMAwDBADAa7EDBADAa7ID
+BADAa7swDAMEA8BryAMEAcBr6DAMAwQAwGvrAwQAwGvsMAwDBADAbBcDBAHAbDAw
+DAMEAMBsMwMEAMBsXAMEAcBsZDAMAwQAwGxrAwQAwGxsMAwDBAHAbHIDBAHAbHgw
+DAMEAMBsfQMEAMBsfjAMAwQHwGyAAwQAwGyuMAwDBADAbMMDBADAbNYDBADAbOoD
+BADAbO4wCwMDAMBtAwQAwG0sMAwDBAHAbS4DBADAbUowDAMEAsBtTAMEAsBtWDAM
+AwQBwG1eAwQAwG1iMAwDBALAbWQDBADAbWYwDAMEAMBtaQMEA8BtcDAMAwQAwG15
+AwQBwG3wMAsDBADAbfMDAwHAbAMEAMBvIQMEAMBvJwMEAMBvLDAMAwQAwG8vAwQA
+wG8wAwQAwG9YAwQAwG9lMAwDBADAb2cDBADAb2gDBAHAb3wDBADAb38DBADAb/ww
+DAMEAcBwHgMEAMBwIAMEAMBwLQMEAMBwMQMEAMBwPTAMAwQBwHBGAwQEwHBAMAwD
+BAHAcGIDBADAcGQDBADAcMwDBADAcM4DBADAcNAwDAMEAMBw1QMEAMBw1gMEAMBw
+9wMEAMBw/jAKAwMAwHEDAwDAdgMDAMB5MAwDBADAegEDBADAeoIwDAMEAMB6jQME
+AMB6kjAMAwQAwHqXAwQAwHqqAwQAwHrWMAwDBAPAetgDBADAeuowDAMEAcB67gME
+AMB68gMEAMB6/jAMAwQAwHwZAwQAwHwcAwQAwHwgAwQAwHwnAwQAwHwuAwQAwHxw
+MAwDBADAfHMDBADAfHQDBADAfJswDAMEAcB8qgMEAsB82AMEAMB86zAMAwQAwHzt
+AwQBwHz0MAwDBADAfPcDBADAfPgwDAMEAcB8+gMEAMB8/gMDAMB9MAwDBADAfgED
+BADAfkAwDAMEAMCBAQMEAcCBPAMEAMCBUAMEAMCBVwMEAMCBYgMDAMCCAwQAwIMU
+MAwDBADAgxkDBADAgxoDBADAg08DBADAg1kDBADAg2ADBADAg2wDBADAg4QwDAME
+AMCECQMEAsCECAMEAMCEIgMEAMCENQMEAMCENwMEAMCEYwMEAMCE7wMEAcCE9AME
+AMCE/AMEAMCFDwMEAMCFHAMEAMCFIAMEAMCFJDAMAwQAwIU1AwQDwIUwAwQAwIU6
+AwQAwIVAAwQCwIVsAwQAwIV5AwQAwIWDAwQAwIX0AwMAwIYwDAMEAMCHBwMEAcCH
+JAMEAMCHLjAMAwQAwIczAwQBwIc0AwQAwIc/AwQAwIdCAwQAwIdEAwQAwIdSAwQA
+wIdkAwQAwIeBAwQAwIeFAwQAwIePMAwDBADAh5EDBADAh6gDBADAh68DBADAh7sD
+BADAh9sDBADAh+EwDAMEAMCH5wMEAMCH6jAMAwQAwIf9AwQAwIf+AwQAwIgHAwQA
+wIgJAwQBwIgSAwQAwIgXMAwDBADAiB0DBAXAiAADBAHAiCgDBADAiDEwDAMEAMCI
+MwMEAMCINAMEAMCIPTAMAwQAwIhHAwQAwIhmMAwDBAHAiJoDBADAiJwwDAMEAMCK
+AQMEAMCKCAMEAMCKVjAMAwQAwIppAwQBwIp0MAwDBADAipsDBADAip4DBADAircD
+BADAisADBADAiswwDAMEAsCK5AMEAMCK6AMEAMCK+AMEAMCLTgMEAcCMAgMEBsCQ
+ADAMAwQBwJBKAwQBwJBMAwQHwJEAAwQCwJHgAwQAwJJ1AwQAwJJ3MAwDBADAknsD
+BADAkn4DBADAkoQwDAMEAcCShgMEAcCSjAMEAMCSmDAMAwQAwJKjAwQAwJKqMAwD
+BALAkqwDBADAkrYDBADAkrkDBADAkrsDBADAksEDBADAkswwDAMEAMCS4wMEAMCS
+5DAMAwQAwJLpAwQAwJLqAwQBwJLuAwQAwJLyAwQAwJMXAwQAwJMiAwQAwJMkAwQA
+wJMqAwQCwJNMAwQAwJONAwQAwJOWAwQAwJObAwQAwJPUMAwDBADAk9cDBAHAk9gD
+BADAk9sDBADAk+QDBADAk/cDBADAk/swDAMEAMCUIQMEAMCUXAMEAMCUZwMEAcCU
+pjAMAwQAwJSxAwQAwJS6MAwDBAbAlMADBADAlMIwDAMEAcCUxgMEAMCU2AMEAMCU
+3AMEAMCVAwMEAMCVBQMEAMCVDwMEAMCVEwMEAMCVGzAMAwQAwJUdAwQBwJUgAwQA
+wJUjAwQAwJUpAwQAwJU5MAwDBADAlTsDBADAlTwwDAMEAMCVTQMEBMCVQAMEAMCV
+YgMEAMCVZAMEAMCVZgMEAcCVbjAMAwQAwJV1AwQBwJV4AwQAwJV+AwQAwJXjAwQA
+wJXoAwQAwJXuAwQAwJYUMAwDBAHAljoDBADAlkgwDAMEAMCWSwMEAMCWTDAMAwQB
+wJZOAwQAwJZUAwQAwJZZAwQAwJZcAwQAwJZeAwQAwJZoAwQAwJZqAwQAwJZ8AwQA
+wJaMAwQAwJaSMAwDBADAlrEDBADAlrgwDAMEAsCWvAMEAMCWvjAMAwQGwJbAAwQA
+wJbGMAwDBADAlssDBAHAlswDBAHAltADBADAlt8wDAMEAMCW4wMEAcCW6AMEAMCW
+7gMEAMCW+AMEAMCW/AMEAMCW/gMEAMCYBgMEAMCYDgMEAMCYEQMEAcCYGgMEAMCY
+KgMEAMCYLDAMAwQAwJgvAwQCwJgwAwQAwJg2MAwDBADAmD0DBAbAmAADBADAmEQD
+BADAmFIDBADAmGIwDAMEAMCYbwMEAMCYcAMEAMCYegMEAMCYfAMEAMCYjQMEAMCY
+lwMEAMCYnAMEAcCYpgMEAMCYrgMEAsCYuAMEAMCY8QMEAMCY9DAMAwQAwJj9AwQA
+wJj+AwQBwJkCAwQAwJkNAwQBwJkSAwQAwJlZAwQAwJl0AwQAwJl/AwQAwJmZAwQA
+wJmmAwQAwJmoAwQAwJmrMAwDBADAma0DBADAmbYDBAHAmbwDBADAmcIDBADAmdUw
+DAMEAMCbAQMEAMCbBgMEAMCchAMEAMCcogMEAMCcpwMEAMCc0gMEAMCc1QMEAMCc
+2QMEAMCc4wMEAMCc7wMEAMCc+DAMAwQAwJ0BAwQCwJ0AMAwDBAPAnQgDBAHAnRAD
+BADAnYEwDAMEAMCdpQMEAMCdrAMEAMCdrgMEAMCdsAMEAMCduQMEAMCduwMEAMCd
+vQMEAcCfJgMEAMCfRgMEAMCfSQMEAMCfTQMEAcCfVAMEAMCfWgMEAMCfXzAMAwQA
+wJ9jAwQDwJ9gAwQAwJ9pAwQBwJ9sAwQAwJ92MAwDBADAn3kDBADAn3oDBADAoAoD
+BADAoA8wDAMEAMCgFQMEA8CgEAMEAMCgGwMEAMCgIQMEAMCgJQMEAMCgQQMEAMCg
+QzAMAwQAwKBfAwQAwKBgAwQAwKBqMAwDBADAoG0DBADAoG4DBADAoHsDBADAoH4D
+BAHAoI4DBADAoJgDBADAoJwDBADAoKADBADAoKwwDAMEAMCgsQMEAsCgsAMEAcCg
+wgMEAcCg4DAMAwQAwKDnAwQBwKDoMAwDBADAoPUDBAHAoPgwDAMEAMCg+wMEAMCg
+/AMEAcChBgMEAcChQAMDAMCiMAwDBAXAoyADBAXAo4ADAwLApDAMAwQAwKsBAwQB
+wKsEMAwDBAfAq4ADBAPAq8ADBADArOgDBADArP0wDAMEAMCtAQMEAMCtBAMEA8Ct
+gDAMAwQGwK5AAwQAwK5EAwQBwK8OAwQEwK8gAwMAwLAwDAMEBMC7EAMEAcC7GAME
+AMC8CjAMAwQAwLw/AwQBwLxAAwQAwLxFAwQAwLxgAwQAwLxpAwQBwLx0MAwDBADA
+vHkDBADAvHoDBADAvH0DBADAvH8DBADAvIEDBADAvIQDBADAvIgDBADAvJEwDAME
+AMC8nQMEAMC8ngMEAMC8uwMEAMC8vTAMAwQAwLzpAwQBwLzsMAwDBAHAvPIDBADA
+vPgDBADAvQEDBALAvQgDBADAvQ4DBADAvRcDBADAvSkwDAMEAMC9MwMEAMC9NAME
+AMC9NwMEAMC9QjAMAwQAwL1FAwQAwL1GAwQAwL1JAwQAwL1MAwQAwL13AwQAwL2X
+AwQAwL2aAwQAwL2dAwQAwL2gMAwDBAHAvaYDBADAvaoDBAHAvcoDBADAvfsDBADA
+viwDBAHAvjoDBADAvkADBADAvkMDBADAvkUDBADAvl8wDAMEAMC+gQMEAMC+hDAM
+AwQAwL6tAwQAwL6uAwQBwL62MAwDBAHAvr4DBAHAvsAwDAMEAMC+yQMEAMC+yjAM
+AwQAwL7pAwQAwL7sAwQAwL7wAwQAwL7yMAwDBADAvvcDBAHAvvgDAwDAwgMEAMDD
+AQMEAMDDCAMEAcDDKgMEAMDDSAMEAMDDYjAMAwQAwMNpAwQAwMNqAwQAwMNuMAwD
+BALAw3QDBADAw3YwDAMEAcDDhgMEAMDDlDAMAwQAwMO3AwQAwMO4AwQAwMPDAwQA
+wMPsMAwDBADAxAEDBALAxJgDBATAxxADBADAy1ADBAHAy2wDBADAy+MwDAMEAMDO
+TQMEAcDOUAMEAMDOVjAMAwQBwM7eAwQAwM7iAwQAwM8OAwQAwM8fMAwDBADAz40D
+BADAz44DBADAz8QDBADA50MDBADA51IwDAMEAMDuAQMEAMDuCgMEAMD1mAMEAMD1
+qQMEAMD14TAMAwQAwPcBAwQAwPcKMAwDBADA+z0DBAHA+0ADBADA++IDBADA++Yw
+CgMCAMEDBADBEdYwDAMEA8ER2AMEA8ET4DAMAwQCwRPsAwQBwSmQMAwDBALBKZQD
+BAbBUgAwDAMEBcFSYAMEBcFSwDAKAwMAwVMDAwDBXjAMAwQHwV+AAwQAwWwWAwQC
+wWwYMAwDBADBbB0DBAHBbNQwDAMEAMFs1wMEAsFs+DALAwMAwW0DBAHBbUAwDAME
+AsFtRAMEA8FuYDALAwQBwW5qAwMEwWADAwDBcTAMAwQFwXIgAwQFwXJAMAwDBAXB
+cqADBAXBcwAwDAMEB8FzgAMEBcF0AAMEBsF0gAMEBsF1ADALAwQFwXVgAwMAwXYw
+CwMEB8F3gAMDAcF4MAsDAwLBfAMEAMG8BjALAwQDwbwIAwMAwbwwDAMEAcG9QgME
+B8G9ADAMAwQAwb2BAwQAwcIAMAwDBAHBwgYDBAXBwgADBAXBwmAwDAMEAMHCgQME
+BcHCgDAMAwQGwcLAAwQBwd3YMAsDBADB3dsDAwDB4gMEBsHjQDAMAwQAweOBAwQF
+wgbAMAwDBADCBuEDBAbCCQAwDAMEAcIJQgMEAcIJUDAMAwQCwglUAwQAwiO+MAwD
+BAbCI8ADBAXCT0AwCwMEB8JPgAMDAMJaMAsDAwLCXAMEBcLBAAMEBsLBQDAMAwQF
+wsHgAwQGwsyAMAoDAwDCzQMDAMLeMAwDBAXC32ADBAXC34AwDAMEBsLfwAMEBMMY
+QDAMAwQDwxhYAwQGwxiAMAwDBAXDGOADBAHDJ9gwCwMEAsMn3AMDAMMqMAwDBAXD
+KyADBALDgAAwDAMEA8OACAMEBcOmwDALAwMAw6cDBAXDp4AwDAMEBMOnsAMEBsPK
+ADAMAwQFw8pgAwQDw+pwMAwDBALD6nwDBAPD6qAwDAMEAMPqqQMEAMPquDAMAwQB
+w+q6AwQCw+r4MAsDAwDD6wMEBcP2ADAKAwQGw/ZAAwICwAMEAMQBAzAMAwQAxAEF
+AwQAxAEGAwQCxAFAMAwDBADEAUUDBADEAUYDBALEAgQwDAMEAcQDQgMEA8QDQAME
+AMQDWwMEBcQPIAMEAsYLAAMEAMYRTQMEAMYRdTAMAwQCxhG0AwQAxhG2AwQAxhYz
+MAwDBADGFl0DBAHGFmAwDAMEBcYkIAMEAMYkLgMEAsYtdAMEAMYzDDAMAwQAxjOP
+AwQAxjOSAwQCxjQsAwQCxjccAwQDxllYAwQAxmOUAwQAxmPeAwQFxmlgMAwDBATG
+hVADBAHGhVQDBADGhYwDBADGhc4DBADGheIwDAMEAMaHiQMEAMaHigMEAMaHpwME
+AMaToDAMAwQAxpSxAwQAxpSyMAwDBAHGtJYDBAHGtJgDBADGzsUwDAMEA8bOyAME
+AMbOygMEB8bwgAMEAccr9gMEAccw5gMDAMc1AwQDx1jQAwQEx1sQMAwDBAPHZwgD
+BADHZwwDBAXH9wADBAPH9zgDBAXH+oADBADKAE0DBATLn1ADBAPLvjgDBALMCwAD
+AwDMEgMEAcwwIAMEAMzh2gMEAM3JNwMEAM3TUwMEAc3c2AMEAM6nIQMEAs6+3AME
+Bc7DIAMEBc784AMEBs9ZQAMEBc+WoAMEAs+u2AMEBc+yQAMEBs+0wAMEBM+9wDAM
+AwQEz+VwAwQAz+V0MAwDBAHP5XYDBADP5XgwDAMEAc/legMEB8/lAAMEAtBSSAME
+BtEqwAMEBdGigAMEBdHOAAMEAdHOJgMEBNHVMAMEBdH64DAMAwQA0fvDAwQB0fvE
+AwQB0fv8AwQH1AAAMAwDBAXUAKADBAHUCOQwDAMEA9QI6AMEANQI8DAMAwQB1Ajy
+AwQB1Aj8MAsDAwDUCQMEBdQMwDALAwMA1A0DBAXUFoAwDAMEBtQWwAMEBtQxADAM
+AwQF1DFgAwQH1DQAMAwDBAXUNKADBAbUPAAwCwMEBdQ8YAMDBtQAMAwDBAfUQIAD
+BATURYAwDAMEBdRFoAMEBtRVgDAMAwQF1FXgAwQF1FhAMAwDBAfUWIADBAHUXGQw
+DAMEA9RcaAMEB9RfADALAwQF1F+gAwMF1EAwDAMEBdRgIAMEBtRkADAMAwQF1GRg
+AwQF1GeAMAwDBAbUZ8ADBATUdSAwDAMEBtR1QAMEBdR6wDALAwMA1HsDBAfUgQAw
+CgMDAdSCAwMA1NgwDAMEB9TZgAMEBtU3ADAMAwQH1TeAAwQG1YMAMAwDBAXVg2AD
+BAXViEAwDAMEB9WIgAMEBtWTADAMAwQF1ZNgAwQF1ZZAAwQF1ZaAMAwDBAXVluAD
+BAbVmAAwDAMEBdWYYAMEBdWaADAMAwQF1ZpgAwQF1Z6AMAwDBAbVnsADBAHVnwww
+DAMEBNWfEAMEAtWfgDAMAwQD1Z+IAwQH1awAMAwDBAXVrKADBAXVs4AwDAMEBtWz
+wAMEBdW1wDALAwMB1bYDBAXVwQAwDAMEBtXBQAMEBtXUgDAKAwMA1dUDAwDV9jAM
+AwQF1fcgAwQH1f8AMAoDBAXV/6ADAgHUAwQB2C58AwQA2GPeAwQE2J5gAwQE2KxA
+AwQG2NWAAwQF2PGAMAoDAgDZAwQE2Q5AMAwDBAXZDmADBAXZFMAwDAMEBNkU8AME
+BNkVYDAMAwQH2RWAAwQH2R0AMAwDBATZHZADBATZHcAwCwMEBdkd4AMDAtkwMAsD
+AwPZOAMEBdlAQDAMAwQE2UBwAwQG2U0AMAwDBATZTVADBAbZTgAwCwMEBNlOUAMD
+ANl0MAsDBATZdRADAwDZijALAwMC2YwDBAPZk7AwDAMEBtmTwAMEBNmqgDALAwQF
+2aqgAwMB2bAwCwMDANmzAwQE2ceAMAoDBAXZx6ADAgHYAwQC3J7EMGQEAgACMF4D
+BQAgAQAFMA0DBAEgAQYDBQEgAQf4MA0DBQAgAQf7AwQCIAEIAwQCIAEUMAwDBAEg
+ARoDBAEgAUADBAEgAUYwDAMEASABSgMEASABTAMEBCABUAMEBiADAAMDBCoAMIIG
+oAYIKwYBBQUHAR0EggaSMIIGjqCCBoowggaGAgEHAgEcAgIAiQICAOAwCAICAPgC
+AgD7AgIBBQICAR4CAgEgAgIBJgICAXcCAgF6AgICAQICAgUwCAICAhACAgIRAgIC
+IAICAikCAgIvAgICNQICAk4CAgJRAgICnTAIAgICpwICAqgwCAICArcCAgK5MAgC
+AgLFAgICxgICAsgCAgLPMAgCAgL4AgIC+QICAvwCAgL+MAgCAgMGAgIDDwICAxIw
+CAICAxUCAgMWMAgCAgRNAgIEsAICBLMCAgS1AgIEvTAIAgIE0gICBNMCAgTZAgIE
+4AICBOUCAgTpMAgCAgTzAgIE+wICBP8CAgUKAgIFETAIAgIFEwICBR0CAgUmAgIF
+PjAIAgIFSAICBUkCAgYLMAgCAgZ1AgIGdgICBn8CAgaQMAgCAgarAgIGvgICBsEC
+AgbEMAgCAgbKAgIGywICBs0CAgbUAgIG2DAIAgIG2gICBtwCAgbfAgIG5DAIAgIG
+6gICBusCAgbuAgIG8AICBvQCAgcpMAgCAgcrAgIHLQICBzEwCAICBzkCAgc6MAgC
+Agc9AgIHPjAIAgIHVQICB28wCAICB4ECAgeDAgIHhgICB4owCAICB48CAgejMAgC
+AgeoAgIHqgICB68CAgfUAgIH3DAIAgIH4AICB+EwCAICB+oCAgftAgIH9DAIAgIH
+9gICB/gCAgf7AgIH/QICB/8CAggBMAgCAggJAgIIWDAIAgIIYwICCGQwCAICCH4C
+AgjhMAgCAgjmAgIJSQICCUwwCAICCVMCAgm4AgIJvjAIAgIJ4QICCeICAgntMAgC
+AgnyAgIJ8wICChIwCAICChkCAgo2AgIKUwICClcCAgp7AgIKzjAIAgIK1QICCwYw
+CAICCw4CAgs/AgILTwICC2UCAgtpAgIL8jAIAgIMCwICDCUCAgxPMAgCAgxSAgIM
+hzAIAgIMiQICDRkwCAICDVQCAg1XAgIOKAICDwMwCAICD00CAg9OAgIQNDAIAgIR
+NQICEU4wCAICEWkCAhFqAgIRrDAIAgIR7AICEe0CAhNuAgIT4TAIAgIVAQICFZ8w
+CAICFaECAhX/AgIXswICF8UCAhgYAgIYsAICGQwwCAICGgACAho4MAgCAho6AgIa
+3jAIAgIa4AICGv8CAh+dMAgCAiAAAgIhSzAIAgIhTQICIkEwCAICIkMCAiOoMAgC
+AiOqAgIj/wICLE0CAi2MAgIvDjAIAgIwAAICMKYwCAICMKgCAjELMAgCAjENAgIz
+pzAIAgIzqQICM/8CAjY3MAgCAjwAAgI8JjAIAgI8KAICPHIwCAICPHQCAj1ZMAgC
+Aj1bAgI9uzAIAgI9vQICPdAwCAICPdICAj3ZMAgCAj3bAgI+WzAIAgI+XQICPrkw
+CAICPrsCAj9VMAgCAj9XAgI/mzAIAgI/nQICP/8CAkksAgJK6gICS7ACAkvHMAgC
+AlAAAgJQAzAIAgJQBQICUXkwCAICUXsCAlG/MAgCAlHBAgJSCjAIAgJSDAICUp8w
+CAICUqECAlL5MAgCAlL7AgJTFjAIAgJTGAICUx0CAlMfMAgCAlMhAgJTjjAIAgJT
+kAICU8swCAICU80CAlP/AgJWXAICWGMCAlibAgJayjAIAgJgAAICYJ8wCAICYKEC
+AmC0MAgCAmC2AgJg0zAIAgJg1QICYOAwCAICYOICAmECMAgCAmEEAgJhHjAIAgJh
+IAICYS0wCAICYS8CAmGaMAgCAmGcAgJiSjAIAgJiTAICYqEwCAICYqMCAmMRAgJj
+EzAIAgJjFQICY8YwCAICY8gCAmPfMAgCAmPhAgJj5zAIAgJj6QICY/8CAmUYMAgC
+AnAAAgJwCjAIAgJwDAICcBkwCAICcBsCAnDwMAgCAnDyAgJxojAIAgJxpAICcpkC
+AnKbMAgCAnKdAgJy8zAIAgJy9QICczYwCAICczgCAnNnMAgCAnNpAgJzgjAIAgJz
+hAICc60wCAICc68CAnPpMAgCAnPrAgJz/zAIAgJ4AAICeK8wCAICeLECAnkDAgJ5
+BTAIAgJ5GAICeVgwCAICeVoCAnoMMAgCAnoOAgJ7gjAIAgJ7hAICe/8wCgIDAIQA
+AgMAi/8wCgIDAJgAAgMAm/8wCgIDAKAAAgMApY0wCgIDAKWPAgMAr/8wCgIDALgA
+AgMAy/8wCgIDANwAAgMA4/8wCgIDAOgAAgMA7/8wCgIDAPIAAgMA8/8wCgIDAPuM
+AgMA++8wCgIDAwAAAgMDNZs=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extensions()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.extns_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oids = []
+ for extn in asn1Object:
+ oids.append(extn['extnID'])
+ extn_value, rest = der_decoder(
+ extn['extnValue'],
+ rfc5280.certificateExtensionsMap[extn['extnID']])
+
+ self.assertFalse(rest)
+ self.assertTrue(extn_value.prettyPrint())
+ self.assertEqual(extn['extnValue'], der_encoder(extn_value))
+
+ self.assertIn(rfc8360.id_pe_ipAddrBlocks_v2, oids)
+ self.assertIn(rfc8360.id_pe_autonomousSysIds_v2, oids)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8398.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8398.py
new file mode 100644
index 0000000000..b5248318b4
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8398.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8398
+
+
+class EAITestCase(unittest.TestCase):
+ pem_text = "oCAGCCsGAQUFBwgJoBQMEuiAgeW4q0BleGFtcGxlLmNvbQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.GeneralName()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertIn(asn1Object['otherName']['type-id'],
+ rfc5280.anotherNameMap)
+ self.assertEqual(rfc8398.id_on_SmtpUTF8Mailbox,
+ asn1Object['otherName']['type-id'])
+
+ eai, rest = der_decoder(
+ asn1Object['otherName']['value'],
+ asn1Spec=rfc5280.anotherNameMap[asn1Object['otherName']['type-id']])
+
+ self.assertFalse(rest)
+ self.assertTrue(eai.prettyPrint())
+ self.assertEqual(asn1Object['otherName']['value'], der_encoder(eai))
+ self.assertEqual(u'\u8001', eai[0])
+ self.assertEqual(u'\u5E2B', eai[1])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc8398.id_on_SmtpUTF8Mailbox, asn1Object['otherName']['type-id'])
+ self.assertEqual(u'\u8001', asn1Object['otherName']['value'][0])
+
+ self.assertEqual(u'\u5E2B', asn1Object['otherName']['value'][1])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8410.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8410.py
new file mode 100644
index 0000000000..d6df485536
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8410.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5208
+from pyasn1_modules import rfc8410
+
+
+class PrivateKeyTestCase(unittest.TestCase):
+ no_pub_key_pem_text = ("MC4CAQAwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwo"
+ "y/HU++CXqI9EdVhC")
+
+ def setUp(self):
+ self.asn1Spec = rfc5208.PrivateKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.no_pub_key_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ rfc8410.id_Ed25519, asn1Object['privateKeyAlgorithm']['algorithm'])
+ self.assertTrue(asn1Object['privateKey'].isValue)
+ self.assertEqual(
+ "0x0420d4ee", asn1Object['privateKey'].prettyPrint()[0:10])
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8418.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8418.py
new file mode 100644
index 0000000000..b5e8d3e829
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8418.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8418
+
+
+class KeyAgreeAlgTestCase(unittest.TestCase):
+ key_agree_alg_id_pem_text = "MBoGCyqGSIb3DQEJEAMUMAsGCWCGSAFlAwQBLQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_agree_alg_id_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(
+ rfc8418.dhSinglePass_stdDH_hkdf_sha384_scheme,
+ asn1Object['algorithm'])
+ self.assertTrue(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8419.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8419.py
new file mode 100644
index 0000000000..3ad05cb611
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8419.py
@@ -0,0 +1,130 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8419
+
+
+class Ed25519TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MAUGAytlcA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_Ed25519, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class Ed448TestCase(unittest.TestCase):
+ alg_id_2_pem_text = "MAUGAytlcQ=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_2_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_Ed448, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class SHA512TestCase(unittest.TestCase):
+ alg_id_3_pem_text = "MAsGCWCGSAFlAwQCAw=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_3_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_sha512, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class SHAKE256TestCase(unittest.TestCase):
+ alg_id_4_pem_text = "MAsGCWCGSAFlAwQCDA=="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_4_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_shake256, asn1Object['algorithm'])
+ self.assertFalse(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+class SHAKE256LENTestCase(unittest.TestCase):
+ alg_id_5_pem_text = "MA8GCWCGSAFlAwQCEgICAgA="
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.alg_id_5_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_shake256_len, asn1Object['algorithm'])
+ self.assertTrue(asn1Object['parameters'].isValue)
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ param, rest = der_decoder(
+ asn1Object['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[asn1Object['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(param.prettyPrint())
+ self.assertEqual(asn1Object['parameters'], der_encoder(param))
+ self.assertEqual(512, param)
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.alg_id_5_pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(rfc8419.id_shake256_len, asn1Object['algorithm'])
+ self.assertEqual(512, asn1Object['parameters'])
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8479.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8479.py
new file mode 100644
index 0000000000..e5b135f73d
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8479.py
@@ -0,0 +1,108 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc8479
+
+
+class ValidationParmTestCase(unittest.TestCase):
+ pem_text = """\
+MIIE/gIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCpPwXwfhDsWA3q
+jN2BWg1xfDjvZDVNfgTV/b95g304Aty3z13xPXAhHZ3ROW3pgPxTj9fiq7ZMy4Ua
+gMpPK81v3pHX1uokC2KcGXbgbAq2Q8ClxSXgEJllRwDENufjEdV10gArt8NlIP0N
+lota1kQUuI1DMsqc5DTIa35Nq4j1GW+KmLtP0kCrGq9fMGwjDbPEpSp9DTquEMHJ
+o7kyJIjB+93ikLvBUTgbxr+jcnTLXuhA8rC8r+KXre4NPPNPRyefRcALLt/URvfA
+rTvFOQfi3vIjNhBZL5FdC+FVAr5QnF3r2+cuDPbnczr4/rr81kzFGWrwyAgF5FWu
+pFtB5IYDAgMBAAECggEAHZ88vGNsNdmRkfhWupGW4cKCuo+Y7re8Q/H2Jd/4Nin2
+FKvUPuloaztiSGDbVm+vejama/Nu5FEIumNJRYMeoVJcx2DDuUxO1ZB1aIEwfMct
+/DWd0/JDzuCXB0Cu5GTWLhlz0zMGHXihIdQ0DtGKt++3Ncg5gy1D+cIqqJB515/z
+jYdZmb0Wqmz7H3DisuxvnhiCAOuNrjcDau80hpMA9TQlb+XKNGHIBgKpJe6lnB0P
+MsS/AjDiDoEpP9GG9mv9+96rAga4Nos6avYlwWwbC6d+hHIWvWEWsmrDfcJlm2gN
+tjvG8omj00t5dAt7qGhfOoNDGr5tvJVo/g96O/0I8QKBgQDdzytVRulo9aKVdAYW
+/Nj04thtnRaqsTyFH+7ibEVwNIUuld/Bp6NnuGrY+K1siX8+zA9f8mKxuXXV9KK4
+O89Ypw9js2BxM7VYO9Gmp6e1RY3Rrd8w7pG7/KqoPWXkuixTay9eybrJMWu3TT36
+q7NheNmBHqcFmSQQuUwEmvp3MQKBgQDDVaisMJkc/sIyQh3XrlfzmMLK+GlPDucD
+w5e50fHl8Q5PmTcP20zVLhTevffCqeItSyeAno94Xdzc9vZ/rt69410kJEHyBO9L
+CmhtYz94wvSdRhbqf4VzAl2WU184sIYiIZDGsnGScgIYvo6v6mITjRhc8AMdYoPR
+rL6xp6frcwKBgFi1+avCj6mFzD+fxqu89nyCmXLFiAI+nmjTy7PM/7yPlNB76qDG
+Dil2bW1Xj+y/1R9ld6S1CVnxRbqLe+TZLuVS82m5nRHJT3b5fbD8jquGJOE+e+xT
+DgA0XoCpBa6D8yRt0uVDIyxCUsVd5DL0JusN7VehzcUEaZMyuL+CyDeRAoGBAImB
+qH6mq3Kc6Komnwlw4ttJ436sxr1vuTKOIyYdZBNB0Zg5PGi+MWU0zl5LDroLi3vl
+FwbVGBxcvxkSBU63FHhKMQw7Ne0gii+iQQcYQdtKKpb4ezNS1+exd55WTIcExTgL
+tvYZMhgsh8tRgfLWpXor7kWmdBrgeflFiOxZIL1/AoGAeBP7sdE+gzsh8jqFnVRj
+7nOg+YllJAlWsf7cTH4pLIy2Eo9D+cNjhL9LK6RaAd7PSZ1adm8HfaROA2cfCm84
+RI4c7Ue0G+N6LZiFvC0Bfi5SaPVAExXOty8UqjOCoZavSaXBPuNcTXZuzswcgbxI
+G5/kaJNHoEcdlVsPsYWKRNKgPzA9BgorBgEEAZIIEggBMS8wLQYJYIZIAWUDBAIC
+BCCK9DKMh7687DHjA7j1U37/y2qR2UcITZmjaYI7NvAUYg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5958.OneAsymmetricKey()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object['attributes']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+
+ if attr['attrType'] == rfc8479.id_attr_validation_parameters:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']])
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+ self.assertEqual(rfc4055.id_sha384, av['hashAlg'])
+
+ seed = univ.OctetString(hexValue='8af4328c87bebcec31e303b8f55'
+ '37effcb6a91d947084d99a36982'
+ '3b36f01462')
+
+ self.assertEqual(seed, av['seed'])
+
+ def testOpenTypes(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for attr in asn1Object['attributes']:
+ self.assertIn(attr['attrType'], rfc5652.cmsAttributesMap)
+ if attr['attrType'] == rfc8479.id_attr_validation_parameters:
+ av = attr['attrValues'][0]
+
+ self.assertEqual(av['hashAlg'], rfc4055.id_sha384)
+
+ seed = univ.OctetString(hexValue='8af4328c87bebcec31e303b8f553'
+ '7effcb6a91d947084d99a369823b'
+ '36f01462')
+
+ self.assertEqual(seed, av['seed'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8494.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8494.py
new file mode 100644
index 0000000000..2951e39200
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8494.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc8494
+
+
+class CompresssedDataTestCase(unittest.TestCase):
+ pem_text = """\
+MIIBNqADAgEAMIIBLaADAgEZoIIBJASCASB4nG2P0U7CQBBF3/cr5l2K3YpSF5YA
+bYmbWArtQsJjKVuogd1mO0T8e0ti1IjJZB4md07OHZbWnMbqkp/qo+oW5jSCWDqL
+VCSpkBveg2kSbrg/FTIWcQRpJPlLmGYQzdci5MvlA+3Rx2cyREO/KVrhCOaJFLMN
+n03E6yqNIEmDheS2LHzPG0zNdqw0dn89XAnev4RsFQRRlnW+SITMWmMGf72JNAyk
+oXCj0mnPHtzwSZijYuD1YVJb8FzaB/rE2n3nUtcl2Xn7pgpkkAOqBsm1vrNWtqmM
+ZkC7LgmMxraFgx91y0F1wfv6mFd6AMUht41CfsbS8X9yNtdNqayjdGF2ld4z8LcV
+EiIPVQPtvBuLBxjW5qx3TbXXo6vHJ1OhhLY=
+
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc8494.CompressedData()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ 0, asn1Object['compressionAlgorithm']['algorithmID-ShortForm'])
+
+ cci = asn1Object['compressedContentInfo']
+
+ self.assertEqual(
+ 25, cci['unnamed']['contentType-ShortForm'])
+ self.assertEqual(
+ '0x789c6d8fd1', cci['compressedContent'].prettyPrint()[:12])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8520.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8520.py
new file mode 100644
index 0000000000..da615dccfe
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8520.py
@@ -0,0 +1,115 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8520
+
+
+class MUDCertTestCase(unittest.TestCase):
+ mud_cert_pem_text = """\
+MIIFODCCAyCgAwIBAgICEEAwDQYJKoZIhvcNAQELBQAwZTELMAkGA1UEBhMCQ0gx
+DzANBgNVBAgMBlp1cmljaDERMA8GA1UEBwwIV2V0emlrb24xEDAOBgNVBAoMB0lt
+UmlnaHQxIDAeBgNVBAMMF0ltUmlnaHQgVGVzdCA4MDIuMUFSIENBMB4XDTE5MDUw
+MTE4MDMyMVoXDTE5MDUzMTE4MDMyMVowZzELMAkGA1UEBhMCQ0gxEzARBgNVBAgM
+ClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEg
+MB4GA1UEAwwXTGlnaHRidWxiMjAwMCwgU04jMjAyMDIwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQCzntv6tCdkZWPUx+CK9A9PCgKF8zGCJwdU4eIjo0oe
+A81i7iltOPnU416GJMEc2jGhlZPn2Rjjy8tPbyh1RVBfkgdq4UPWPnZPb+Gkq1c8
+X8zLRrMSWKqkSGOPENieDuQpzcrkMfj7dCPcxTcJ5Gluv1jEI7bxoZOZXjNxaFXi
+vsaZWFub7b+5zDLWpvmpKDaeCU+gad7rWpRE/Hjh3FX8paW8KE/hMF/au4xX2Qj/
+rDwHSxgs3n8FtuFUELotSgL3Acy3aISmJILBx6XrSs3nLruZzamulwWupSryHo3L
+U+GsOETiXwxiyrfOZo3aJNnWzlEvrYCQGyqd8Nd/XOENAgMBAAGjge8wgewwCQYD
+VR0TBAIwADBABggrBgEFBQcBGQQ0FjJodHRwczovL3d3dy5vZmNvdXJzZWltcmln
+aHQuY29tL0x1bWluYWlyZV8xNTAuanNvbjBdBggrBgEFBQcBHgRRME8xCzAJBgNV
+BAYTAkNIMSswKQYJKoZIhvcNAQkBFhxhc2NlcnRpYUBvZmNvdXJzZWltcmlnaHQu
+Y29tMRMwEQYDVQQDEwpFbGlvdCBMZWFyMB0GA1UdDgQWBBS00spi6cRFdqz95TQI
+9AuPn5/DRjAfBgNVHSMEGDAWgBREKvrASIa7JJ41mQWDkJ06rXTCtTANBgkqhkiG
+9w0BAQsFAAOCAgEAiS4OlazkDpgR4qhrq5Wpx6m3Bmkk5RkXnqey1yyhyfZlAGH7
+ewQiybkF3nN6at/TcNWMRfGBLhRrQn1h75KEXKlc18RDorj72/bvkbJLoBmA43Mv
+xMF0w4YX8pQwzb4hSt04p79P2RVVYM3ex/vdok0KkouhLTlxzY7vhv1T8WGTVQHJ
+k2EyswS2nFa/OtIkwruXqJj+lotdV2yPgFav5j9lkw5VbOztlfSKT7qQInVm+VBI
+/qddz/LOYrls1A7KHzWkTvOwmvQBqI4e9xLjc3r8K4pZyMd7EsmepYmLOU+pfINf
+/sEjliCluR65mKcKGiUa5J31pzbVpCr6FM/NGEjqpp6F+slyNC8YM/UlaJK1W9ZI
+W7JAhmfil5z1CtQILFSnUh4VneTVOaYg6+gXr169fXUDlMM4ECnuqWAE2PLhfhI8
++lY8u18rFiX0bNSiUySgxU3asCC92xNmvJHuL4QwiYaGtTne36NMN7dH/32nMKl+
+G3XA8cX8yZIrIkmWLBSji8UwOXwVhYovmbhHjaUMTQommxYv/Cuqi5nJUJfh5YJr
+APeEK6fTYpPMiZ6U1++qzZDp78MRAq7UQbluJHh8ujPuK6kQmSLXmvK5yGpnJ+Cw
+izaUuU1EEwgOMELjeFL62Ssvq8X+x6hZFCLygI7GNeitlblNhCXhFFurqMs=
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Certificate()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.mud_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ extn_list = []
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ extn_list.append(extn['extnID'])
+
+ if extn['extnID'] == rfc8520.id_pe_mudsigner:
+ mudsigner, rest = der_decoder(
+ extn['extnValue'], rfc8520.MUDsignerSyntax())
+
+ self.assertEqual(extn['extnValue'], der_encoder(mudsigner))
+
+ c = rfc5280.X520countryName(value="CH")
+
+ self.assertEqual(mudsigner[0][0][0]['value'], der_encoder(c))
+
+ e = rfc5280.EmailAddress(value="ascertia@ofcourseimright.com")
+
+ self.assertEqual(mudsigner[0][1][0]['value'], der_encoder(e))
+
+ cn = rfc5280.X520CommonName()
+ cn['printableString'] = "Eliot Lear"
+
+ self.assertEqual(mudsigner[0][2][0]['value'], der_encoder(cn))
+
+ if extn['extnID'] == rfc8520.id_pe_mud_url:
+ mudurl, rest = der_decoder(
+ extn['extnValue'], rfc8520.MUDURLSyntax())
+
+ self.assertEqual(extn['extnValue'], der_encoder(mudurl))
+ self.assertEqual(".json", mudurl[-5:])
+
+ self.assertIn(rfc8520.id_pe_mudsigner, extn_list)
+ self.assertIn(rfc8520.id_pe_mud_url, extn_list)
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.mud_cert_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ for extn in asn1Object['tbsCertificate']['extensions']:
+ if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
+ extnValue, rest = der_decoder(
+ extn['extnValue'],
+ asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
+ self.assertEqual(extn['extnValue'], der_encoder(extnValue))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8619.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8619.py
new file mode 100644
index 0000000000..cd54db669e
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8619.py
@@ -0,0 +1,80 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der import decoder as der_decoder
+from pyasn1.codec.der import encoder as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8619
+
+
+class HKDFSHA256TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MA0GCyqGSIb3DQEJEAMc"
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+
+ self.assertEqual(
+ rfc8619.id_alg_hkdf_with_sha256, asn1Object['algorithm'])
+
+
+class HKDFSHA384TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MA0GCyqGSIb3DQEJEAMd"
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+ asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(
+ rfc8619.id_alg_hkdf_with_sha384, asn1Object['algorithm'])
+
+
+class HKDFSHA512TestCase(unittest.TestCase):
+ alg_id_1_pem_text = "MA0GCyqGSIb3DQEJEAMe"
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.AlgorithmIdentifier()
+
+ def testDerCodec(self):
+
+ substrate = pem.readBase64fromText(self.alg_id_1_pem_text)
+
+ asn1Object, rest = der_decoder.decode(
+ substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder.encode(asn1Object))
+ self.assertEqual(
+ rfc8619.id_alg_hkdf_with_sha512, asn1Object['algorithm'])
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8649.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8649.py
new file mode 100644
index 0000000000..67f8f9fd39
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8649.py
@@ -0,0 +1,60 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc8649
+
+
+class RootCertificateExtnTestCase(unittest.TestCase):
+ extn_pem_text = """\
+MGEGCisGAQQBg5IbAgEEUzBRMA0GCWCGSAFlAwQCAwUABEBxId+rK+WVDLOda2Yk
+FFRbqQAztXhs91j/RxHjYJIv/3gleQg3Qix/yQy2rIg3xysjCvHWw8AuYOGVh/sL
+GANG
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.Extension()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.extn_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+ self.assertEqual(rfc8649.id_ce_hashOfRootKey, asn1Object['extnID'])
+
+ hashed_root_key, rest = der_decoder(
+ asn1Object['extnValue'], rfc8649.HashedRootKey())
+
+ self.assertFalse(rest)
+ self.assertTrue(hashed_root_key.prettyPrint())
+ self.assertEqual(asn1Object['extnValue'], der_encoder(hashed_root_key))
+ self.assertEqual(
+ rfc4055.id_sha512, hashed_root_key['hashAlg']['algorithm'])
+
+ def testExtensionsMap(self):
+ substrate = pem.readBase64fromText(self.extn_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+
+ self.assertFalse(rest)
+ self.assertEqual(rfc8649.id_ce_hashOfRootKey, asn1Object['extnID'])
+ self.assertIn(asn1Object['extnID'], rfc5280.certificateExtensionsMap)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8692.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8692.py
new file mode 100644
index 0000000000..416b59ce07
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8692.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc8692
+
+
+class AlgorithmIdentifierTestCase(unittest.TestCase):
+ pem_text = """\
+MEowCwYJYIZIAWUDBAILMAsGCWCGSAFlAwQCDDAKBggrBgEFBQcGHjAKBggrBgEF
+BQcGHzAKBggrBgEFBQcGIDAKBggrBgEFBQcGIQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5751.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid_list = (
+ rfc8692.id_shake128,
+ rfc8692.id_shake256,
+ rfc8692.id_RSASSA_PSS_SHAKE128,
+ rfc8692.id_RSASSA_PSS_SHAKE256,
+ rfc8692.id_ecdsa_with_shake128,
+ rfc8692.id_ecdsa_with_shake256,
+ )
+
+ count = 0
+ for algid in asn1Object:
+ self.assertTrue(algid['capabilityID'] in oid_list)
+ count += 1
+
+ self.assertTrue(len(oid_list), count)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8696.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8696.py
new file mode 100644
index 0000000000..119f65826c
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8696.py
@@ -0,0 +1,193 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5083
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8696
+
+
+class KeyTransPSKTestCase(unittest.TestCase):
+ key_trans_psk_pem_text = """\
+MIICigYLKoZIhvcNAQkQARegggJ5MIICdQIBADGCAiekggIjBgsqhkiG9w0BCRANATCCAhIC
+AQAEE3B0Zi1rbWM6MTM2MTQxMjIxMTIwDQYLKoZIhvcNAQkQAx0wCwYJYIZIAWUDBAEtMIIB
+sDCCAawCAQKAFJ7rZ8m5WnTUTS8WOWaA6AG1y6ScMA0GCSqGSIb3DQEBAQUABIIBgKo/Hkhu
+eoOdn1/cIEpt38NbEEdSC586IWcG+0l+ND9pcmQvvKvscpvFFVAjqLjvoXGatmSazr2Q4BVS
+yWKm0JqlyVWEAhRsU7wNlD7zRAKI8+obWpU57gjEKs13D8gb1PI2YPZWajN1Ye+yHSF6h+fb
+7YtaQepxTGHYF0LgHaAC8cqtgwIRW8N4Gnvl0Uuz+YEZXUX0I8fvJG6MKCEFzwHvfrfPb3rW
+B8k7BHfekRpY+793JNrjSP2lY+W0fhqBN8dALDKGqlbUCyojMQkQiD/iXSBRbZWiJ1CE92iT
+x7Ji9irq8rhYDNoDP2vghJUaepoZgIJwPWqhoTH+KRPqHTjLnnbi/TGzEdeO5h0C9Gc0DVzs
+9OHvHknQ7mSxPT9xKMXGztVT+P3a9ct6TaMotpMqL9cuZxTYGpHMYNkLSUXFSadAGFrgP7QV
+FGwC/Z/YomEzSLPgZi8HnVHsAGkJzXxmM/PJBu4dAXcKjEv/GgpmaS2B7gKHUpTyyAgdsBsy
+2AQo6glHJQ+mbNUlWV5Sppqq3ojvzxsPEIq+KRBgORsc31kH82tAZ+RTQjA3BgkqhkiG9w0B
+BwEwGwYJYIZIAWUDBAEuMA4EDMr+ur76ztut3sr4iIANmvLRbyFUf87+2bPvLQQMoOWSXMGE
+4BckY8RM
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_trans_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertEqual(rfc8696.id_ori_keyTransPSK, ri['ori']['oriType'])
+
+ ktpsk, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc8696.KeyTransPSKRecipientInfo())
+
+ self.assertFalse(rest)
+ self.assertTrue(ktpsk.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(ktpsk))
+ self.assertEqual(0, ktpsk['version'])
+
+ ktri = ktpsk['ktris'][0]
+ self.assertEqual(2, ktri['version'])
+
+ def testOtherRecipientInfoMap(self):
+ substrate = pem.readBase64fromText(self.key_trans_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertIn(ri['ori']['oriType'], rfc5652.otherRecipientInfoMap)
+
+ ori, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc5652.otherRecipientInfoMap[ri['ori']['oriType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ori.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(ori))
+
+class KeyAgreePSKTestCase(unittest.TestCase):
+ key_agree_psk_pem_text = """\
+MIIBRwYLKoZIhvcNAQkQARegggE2MIIBMgIBADGB5aSB4gYLKoZIhvcNAQkQDQIwgdICAQAE
+FHB0Zi1rbWM6MjE2ODQwMTEwMTIxoFWhUzATBgYrgQQBCwEGCWCGSAFlAwQBLQM8AAQ5G0Em
+Jk/2ks8sXY1kzbuG3Uu3ttWwQRXALFDJICjvYfr+yTpOQVkchm88FAh9MEkw4NKctokKNgps
+MA0GCyqGSIb3DQEJEAMdMAsGCWCGSAFlAwQBLTBEMEKgFgQU6CGLmLi32Gtenr3IrrjE7NwF
+xSkEKCKf4LReQAA+fYJE7Bt+f/ssjcoWw29XNyIlU6cSY6kr3giGamAtY/QwNwYJKoZIhvcN
+AQcBMBsGCWCGSAFlAwQBLjAOBAzbrd7K+IjK/rq++s6ADfxtb4I+PtLSCdDG/88EDFUCYMQu
+WylxlCbB/w==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.key_agree_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertEqual(rfc8696.id_ori_keyAgreePSK, ri['ori']['oriType'])
+
+ kapsk, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc8696.KeyAgreePSKRecipientInfo())
+
+ self.assertFalse(rest)
+ self.assertTrue(kapsk.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(kapsk))
+ self.assertEqual(0, kapsk['version'])
+
+ rek = kapsk['recipientEncryptedKeys'][0]
+ ski = rek['rid']['rKeyId']['subjectKeyIdentifier']
+ expected_ski = univ.OctetString(
+ hexValue='e8218b98b8b7d86b5e9ebdc8aeb8c4ecdc05c529')
+
+ self.assertEqual(expected_ski, ski)
+
+ def testOtherRecipientInfoMap(self):
+ substrate = pem.readBase64fromText(self.key_agree_psk_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ rfc5083.id_ct_authEnvelopedData, asn1Object['contentType'])
+
+ aed, rest = der_decoder(
+ asn1Object['content'],
+ asn1Spec=rfc5083.AuthEnvelopedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(aed.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(aed))
+ self.assertEqual(0, aed['version'])
+
+ ri = aed['recipientInfos'][0]
+ self.assertIn(ri['ori']['oriType'], rfc5652.otherRecipientInfoMap)
+
+ ori, rest = der_decoder(
+ ri['ori']['oriValue'],
+ asn1Spec=rfc5652.otherRecipientInfoMap[ri['ori']['oriType']])
+
+ self.assertFalse(rest)
+ self.assertTrue(ori.prettyPrint())
+ self.assertEqual(ri['ori']['oriValue'], der_encoder(ori))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8702.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8702.py
new file mode 100644
index 0000000000..d6303cfca5
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8702.py
@@ -0,0 +1,140 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1.type import univ
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6211
+from pyasn1_modules import rfc8702
+
+
+class AlgorithmIdentifierTestCase(unittest.TestCase):
+ pem_text = """\
+MEowCwYJYIZIAWUDBAILMAsGCWCGSAFlAwQCDDAKBggrBgEFBQcGHjAKBggrBgEF
+BQcGHzAKBggrBgEFBQcGIDAKBggrBgEFBQcGIQ==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc2985.SMIMECapabilities()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ oid_list = (
+ rfc8702.id_shake128,
+ rfc8702.id_shake256,
+ rfc8702.id_RSASSA_PSS_SHAKE128,
+ rfc8702.id_RSASSA_PSS_SHAKE256,
+ rfc8702.id_ecdsa_with_shake128,
+ rfc8702.id_ecdsa_with_shake256,
+ )
+
+ for algid in asn1Object:
+ self.assertIn(algid['algorithm'], oid_list)
+
+
+class AuthenticatedDataTestCase(unittest.TestCase):
+ auth_message_pem_text = """\
+MIIDqgYLKoZIhvcNAQkQAQKgggOZMIIDlQIBADGCAk8wggJLAgEAMDMwJjEUMBIG
+A1UECgwLZXhhbXBsZS5jb20xDjAMBgNVBAMMBUFsaWNlAgkAg/ULtwvVxA4wDQYJ
+KoZIhvcNAQEBBQAEggIAdZphtN3x8a8kZoAFY15HYRD6JyPBueRUhLbTPoOH3pZ9
+xeDK+zVXGlahl1y1UOe+McEx2oD7cxAkhFuruNZMrCYEBCTZMwVhyEOZlBXdZEs8
+rZUHL3FFE5PJnygsSIO9DMxd1UuTFGTgCm5V5ZLFGmjeEGJRbsfTyo52S7iseJqI
+N3dl743DbApu0+yuUoXKxqKdUFlEVxmhvc+Qbg/zfiwu8PTsYiUQDMBi4cdIlju8
+iLjj389xQHNyndXHWD51is89GG8vpBe+IsN8mnbGtCcpqtJ/c65ErJhHTR7rSJSM
+EqQD0LPOCKIY1q9FaSSJfMXJZk9t/rPxgUEVjfw7hAkKpgOAqoZRN+FpnFyBl0Fn
+nXo8kLp55tfVyNibtUpmdCPkOwt9b3jAtKtnvDQ2YqY1/llfEUnFOVDKwuC6MYwi
+fm92qNlAQA/T0+ocjs6gA9zOLx+wD1zqM13hMD/L+T2OHL/WgvGb62JLrNHXuPWA
+8RShO4kIlPtARKXap2S3+MX/kpSUUrNa65Y5uK1jwFFclczG+CPCIBBn6iJiQT/v
+OX1I97YUP4Qq6OGkjK064Bq6o8+e5+NmIOBcygYRv6wA7vGkmPLSWbnw99qD728b
+Bh84fC3EjItdusqGIwjzL0eSUWXJ5eu0Z3mYhJGN1pe0R/TEB5ibiJsMLpWAr3gw
+FQYJYIZIAWUDBAITMAgEBnB5YXNuMaELBglghkgBZQMEAgswNQYJKoZIhvcNAQcB
+oCgEJldhdHNvbiwgY29tZSBoZXJlIC0gSSB3YW50IHRvIHNlZSB5b3UuooG/MBgG
+CSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE5MDkxOTEz
+NDEwMFowHwYJKoZIhvcNAQkEMRIEENiFx45okcgTCVIBhhgF+ogwLwYLKoZIhvcN
+AQkQAgQxIDAeDBFXYXRzb24sIGNvbWUgaGVyZQYJKoZIhvcNAQcBMDMGCSqGSIb3
+DQEJNDEmMCQwCwYJYIZIAWUDBAILohUGCWCGSAFlAwQCEzAIBAZweWFzbjEEIBxm
+7hx+iivDlWYp8iUmYYbc2xkpBAcTACkWH+KBRZuF
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.auth_message_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(rfc5652.id_ct_authData, asn1Object['contentType'])
+ ad, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.AuthenticatedData())
+
+ self.assertFalse(rest)
+ self.assertTrue(ad.prettyPrint())
+ self.assertEqual(asn1Object['content'], der_encoder(ad))
+
+ self.assertEqual(
+ rfc8702.id_shake128, ad['digestAlgorithm']['algorithm'])
+
+ ad_mac = ad['macAlgorithm']
+ self.assertEqual(
+ rfc8702.id_KMACWithSHAKE128, ad_mac['algorithm'])
+
+ kmac128_p, rest = der_decoder(
+ ad_mac['parameters'],
+ asn1Spec=rfc5280.algorithmIdentifierMap[ad_mac['algorithm']])
+
+ self.assertFalse(rest)
+ self.assertTrue(kmac128_p.prettyPrint())
+ self.assertEqual(ad_mac['parameters'], der_encoder(kmac128_p))
+
+ self.assertEqual(
+ univ.OctetString("pyasn1"), kmac128_p['customizationString'])
+
+ found_kmac128_params = False
+ for attr in ad['authAttrs']:
+ if attr['attrType'] == rfc6211.id_aa_cmsAlgorithmProtect:
+ av, rest = der_decoder(
+ attr['attrValues'][0],
+ asn1Spec=rfc6211.CMSAlgorithmProtection())
+
+ self.assertFalse(rest)
+ self.assertTrue(av.prettyPrint())
+ self.assertEqual(attr['attrValues'][0], der_encoder(av))
+
+ self.assertEqual(
+ rfc8702.id_shake128, av['digestAlgorithm']['algorithm'])
+
+ self.assertEqual(
+ rfc8702.id_KMACWithSHAKE128, av['macAlgorithm']['algorithm'])
+
+ found_kmac128_params = True
+
+ self.assertTrue(found_kmac128_params)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8708.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8708.py
new file mode 100644
index 0000000000..049aead8e8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8708.py
@@ -0,0 +1,127 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8708
+
+
+class HashSigPublicKeyTestCase(unittest.TestCase):
+ public_key_pem_text = """\
+MFAwDQYLKoZIhvcNAQkQAxEDPwAEPAAAAAIAAAAGAAAAA9CPq9SiCR/wqMtO2DTn
+RTQypYiFzZugQxI1Rmv/llHGySEkQE1F+lPPFhwo8a1ajg==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5280.SubjectPublicKeyInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.public_key_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(
+ asn1Object['algorithm']['algorithm'],
+ rfc8708.id_alg_hss_lms_hashsig)
+
+
+class HashSigSignedDataTestCase(unittest.TestCase):
+ signed_data_pem_text = """\
+MIIKfQYJKoZIhvcNAQcCoIIKbjCCCmoCAQMxADAtBgkqhkiG9w0BBwGgIAQe
+VGhpcyBpcyBzb21lIHNhbXBsZSBjb250ZW50Lg0KMYIKMjCCCi4CAQOABkhp
+TW9tITALBglghkgBZQMEAgGgMTAvBgkqhkiG9w0BCQQxIgQgF6DPgklChkQZ
+NfFTIwED50Du7vSlr2SKRDkhJIYWL8gwDQYLKoZIhvcNAQkQAxEEggnQAAAA
+AAAAAAEAAAADkSkd52zm4k0eKptgJmUZvIzoifgU3rD8AO3TEp3jq5v2DGW6
+SNGcy3dtMFWLhS9DutlHr2Iphd6AGwaL7RGA004g3b+BnpErvDqnGIPy5CZl
+u0Q1UlL2U9DEp6LaPrhZSv7LVR5odGEu9h8TvI/HCXs8IeaDW3S+mtaGOr+V
+sRLCwWpCOYQXBfErxfRJqCgJEtJjQ/KPdROB3u4yTRcFh8qyfHBJCN5Cphx7
+g/NkceH36hXJP/L7f2oFFMP9bloY7Tqyt9etZUKdlDiyrxZGCmdcmCJoj9SL
+hrkz+nIisCMKy3MjjZ+pT1XUOuv6QOCJcTezCDucuspflxyqJADXIbXnMn6B
+7H/vYfxuXCBWyRXLulOe00xNY2XaIAdJRGdm1oLuLWsNuv+v9stWiZGQT3j6
+AQlC0CV1PFno/TpAeTFUcKo+fxHOmDOfV7wGExWhOoh1+1c0eQjJujefNJMB
+9lgSFMCYcLcsOXN+xMRqlhmbZsrSmQvL5bsav96ZEHx/g7OkEenXupLA0RsG
+UrggIMHshcISeZAH6sYKPSVNYFx8ub9UVNgUvAxSygUei9UnDvTCUGAhs/1U
+ZZZnzwRwWh7BgyAb35mzl79jCRXgsZ84GFcZi9WtiWsQWoRN8/YM0d13o4NS
+6gtsCqOKdo21mAyQ7D9UnTZBWhlhRX1M9M14hblDGtkI02pvioJiVtKqNPiq
+BzGjV8Bg246A/v1hsRDOID+toMvgnneS8tBc279QGYessKBUWFvKjkzJFrui
+yv1WgFyyc+YxujldI+hqz26uYxgaWv4fCjYcu9X+/rcxwapgvSUgcdaJydnM
+Ht/oqgI1xlT3WPyJNlFa40QcO/BTuC7bzrX6j9H0tlSlbxJfakZwGuNL19o1
+tYSAnBhTkcz4OTVCTJAL1pgqK4zljUO/R+iPgvWavMKIh1HxXJB4EER4FF/L
+hFZMCqPdDN0EN5NOr+9n14ko34m+u/izqAKyAakF3eEi0ZISU52yEpajSvmA
+s8HIEbml3khWvmfH2W+FJ5thiPKCwfI4r68nfEL4Cbd+tDNQQVNieSBggWMB
+uPzWhjho+5IvRtLdXHDCxQ5cLOQsC+bE7q+8d1UG4vAS2RzpEmhc0vKj/R0Y
+ItqA9AVE0DcKkEqQTpvbpkfoeEOdyTKUPCDQIZSOlO7+H3PvMbdmUKrJ9DMJ
+1LmdDJiwHXb9YHXSCEUESszqVNxIcql8LbzwqZaAAct8IvnZOBgf1dOR8SjA
+3RBUwus1ph4uLzVTkWFqj4kpNfGx/nfcAcJMWPwbTKKPQKUwzjfCNOyy4pPV
+0HEDRR5YFF5wWfvFbpNqEIUfxhDKg8F/r5dbjzgnSSnzawQilxJyFp+XlOYW
+pU5gMDuGoISu2yCyLO/yShAHqKcJOofy+NBt+AIk0uZAQlGXDkJTmDXp+VBg
+ZnVOdMGOFFZMWEVR6pxEKiBPH72B+Vd16NAEJwPBslisrgN7f8neuZvYApG0
+jX+Kt7DrG4V4kIvXSB82luObdGQMoHsS9B4775mXkhn/tKpQNfavHXgaDfwu
+OSvUcFRvX6JhpA+7RJjJVwA85zWpYGPUJVHC/1Roc1GIH+4l885dHfLPAVCL
+GkuYhAPiqnOKPgsOfxlFakDLK+8EePw9ixr/0O2fz4sNgNnz0cMjyY7FmTBL
+E7kiyph3jBu2PHPFm7V8xPq0OTzu+wt6Wol/KK8lHEYF4dXmxpk/Rp8mAhTM
+OrK+UxZX/rEhgNMqiV3b15xjXXSzzv2zQ1MlfM6zdX3OeWF0djjj4TOGtd50
+LQjudbyhyZ8yJSWBvPcnrd9kzGf4Vd42x1/8EVsxlh8pLesJGbTTcfNKhSxL
+4oRppxB1iiLLlsmbsWrqSXee9+4GXMmk099+85HPZZWm2MFLDYD5MCOxs9Q3
+EjnamLZ6G2o3k2Iec2K8ExQICfHUFiz3Xqm/opVMO1AF57khY0QX8RsmTW+7
+jL+pOxTzyGj7qo2RolhjpsALRd65GXsPidNnb5jBYZY/xM4KrdBzoI67CX9A
+8gzDf/v+Ob0DF0Y1HWAZ+hGG4JNTrIdwuhWALnhoZNFlaoVOO0k/OsZ3upwD
+bYtbLqv0NPzcN1N/yOQhCRmB1N3pTI6fVQQN7AcIzzUxMVpNgk25yomzgE8N
+6FlBHVIEnKy3fVME5aokhb0TNU7RpGPWDYdSgcEuKltkCVZRObvB/QKu6HLM
+ErOdFtE0xnTqeIADeT84cIupofpnJPsguY8T/KJkzSPJa/MrZM5Pb3aw/cnk
+WkhczBk79aver+0v/4NyF/+n9e8khNPl8jQ0kayxKtIiYfXP2tXBuxLsmx7U
+cdm9qae446tt5uIkbUx4g9a58yCVDpEmZ0DG2/rWs8/lbeCqZliw3Ik7tuSe
+YiMfRtqA86MTf6ugKP6b9hF+zuSYxf0GfbZsvvYGQSgeCU+meiUKF7ckoav1
+LpfVoloCXq18TZ5hrRqnVpx2O6eb6F6Q9A7OJ205FmwCuNz3acJRXkq0IFQf
+fxs6faAXHE7cLaZY16Sal61qovvjsEPURnSVsG2j3GU2ed/gwfTiHmQKwFAF
+4ns49Wpt6TkX0QZ6sBtOHEhhDEjSxtl/CC8MWm9idDElxYCg56yRfi6aTuVG
+Bl8bYn7zvIVwDj+bDfvdzu3UvZUi1IDOylUDH6siBJDa7eEetRgLpTX+QIhQ
+5yqAyA/TQiJKO1PBsYXoVT6RZBQQiJr7+OWtDqAr+K+Bv34Daax5OUEIMavi
+eWzsJz/xLRH0cph04eobCfGRMoaJtYkCy6xORMkxQWtHzV4gAm1bgbQHoOKc
+quyB8cNShGMTLwBYmp+AIadBCfjb+B/igsH1i/PypSxWDji/1osYxM58O6Yb
+NmK1irtuh2PIVb2SUrqEB/2MvSr89bU5gwAAAAbtHOjG5DeRjUP7p72ThWlM
+QRgnA/a39wTe7dk4S6b4vDYslIZGs8mEiAPm2boffTln9wnN3TXcd9YDVvDD
+aAiQC0kctOy7q+wSjnyBpG5ipntXZAoKeL4cv33Z1BmhDNhobRZiGoCBa/21
+vcViEdcspwuB8RF9EpUpp1cM95z1KnAopIU47N07ONPV1i0mJGWVxPtzpSWl
+7SwwUk67HYzILgwZvEl3xomP+V/T0xCwuucWls75PGpVJFa/lunQdeODu3VD
+xnWEK6+/x824hIOzJ2wp1PCjQcLUBuQNRlO35NBFhRrPagoOqccQuAXM7UY1
+7owQc2Lw/I2AwU0KxJxRZwPSbRR1LzTBwNLEJHWBwYws9N5I6c6Um+fIiOnK
+6+SkFeKR/RB9IdwfCEsRWCCCSfKPT3x+kxuns70NgkpFcA==
+"""
+
+ def setUp(self):
+ self.asn1Spec = rfc5652.ContentInfo()
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.signed_data_pem_text)
+ asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ self.assertEqual(asn1Object['contentType'], rfc5652.id_signedData)
+ sd, rest = der_decoder(
+ asn1Object['content'], asn1Spec=rfc5652.SignedData())
+
+ oid = sd['signerInfos'][0]['signatureAlgorithm']['algorithm']
+ self.assertEqual(rfc8708.id_alg_hss_lms_hashsig, oid)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1-modules/py3/tests/test_rfc8769.py b/contrib/python/pyasn1-modules/py3/tests/test_rfc8769.py
new file mode 100644
index 0000000000..614f326720
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/test_rfc8769.py
@@ -0,0 +1,134 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2020, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+import unittest
+
+from pyasn1.codec.der.decoder import decode as der_decoder
+from pyasn1.codec.der.encoder import encode as der_encoder
+
+from pyasn1_modules import pem
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8769
+
+
+class CBORContentTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEHwYJKoZIhvcNAQcCoIIEEDCCBAwCAQMxDTALBglghkgBZQMEAgIwIQYLKoZIhvcNAQkQ
+ASygEgQQgw9kUnVzc/tADzMzMzMzM6CCAnwwggJ4MIIB/qADAgECAgkApbNUKBuwbjswCgYI
+KoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9u
+MREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1MjkxNDQ1NDFaFw0yMDA1MjgxNDQ1NDFaMHAx
+CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMH
+RXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUu
+Y29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+LuAHtZxes1wm
+JZrBBg+bz7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8Csg2DhQ7qs/w
+to8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhCAQ0ENRYzVGhp
+cyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBvc2UuMB0GA1Ud
+DgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAWgBTyNds0BNqlVfK9aQOZsGLs
+4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL3kRhmn+PJTeKaL9sh/oQgHOYTgLmSnv3
++NDCkhfKuMNoo/tHrkmihYgCMQC94MaerDIrQpi0IDh+v0QSAv9rMife8tClafXWtDwwL8MS
+7oAh0ymT446Uizxx3PUxggFTMIIBTwIBATBMMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJW
+QTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1QoG7BuOzALBglg
+hkgBZQMEAgKgezAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQASwwHAYJKoZIhvcNAQkFMQ8X
+DTIwMDExNDIyMjIxNVowPwYJKoZIhvcNAQkEMTIEMADSWdHn4vsesm9XnjJq1WxkoV6EtD+f
+qDAs1JEpZMZ+n8AtUxvC5SFobYpGCl+fsDAKBggqhkjOPQQDAwRmMGQCMGclPwvZLwVJqgON
+mOfnxSF8Cqn3AC+ZFBg7VplspiuhKPNIyu3IofqZjCxw0TzSpAIwEK0JxNlY28KDb5te0iN6
+I2hw+am26W+PRyltVVGUAISHM2kA4tG39HcxEQi+6HJx
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer in layers:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(rfc8769.id_ct_cbor, next_layer)
+
+
+class CBORSequenceContentTestCase(unittest.TestCase):
+ pem_text = """\
+MIIEKQYJKoZIhvcNAQcCoIIEGjCCBBYCAQMxDTALBglghkgBZQMEAgIwKgYLKoZIhvcNAQkQ
+AS2gGwQZgw9kUnVzc/tADzMzMzMzM6MDCSD1YWFhYqCCAnwwggJ4MIIB/qADAgECAgkApbNU
+KBuwbjswCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQH
+DAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAeFw0xOTA1MjkxNDQ1NDFaFw0yMDA1Mjgx
+NDQ1NDFaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQ
+MA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNl
+QGV4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE+M2fBy/sRA6V1pKFqecRTE8+
+LuAHtZxes1wmJZrBBg+bz7uYZfYQxI3dVB0YCSD6Mt3yXFlnmfBRwoqyArbjIBYrDbHBv2k8
+Csg2DhQ7qs/wto8hMKoFgkcscqIbiV7Zo4GUMIGRMAsGA1UdDwQEAwIHgDBCBglghkgBhvhC
+AQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1cnBv
+c2UuMB0GA1UdDgQWBBTEuloOPnrjPIGw9AKqaLsW4JYONTAfBgNVHSMEGDAWgBTyNds0BNql
+VfK9aQOZsGLs4hUIwTAKBggqhkjOPQQDAwNoADBlAjBjuR/RNbgL3kRhmn+PJTeKaL9sh/oQ
+gHOYTgLmSnv3+NDCkhfKuMNoo/tHrkmihYgCMQC94MaerDIrQpi0IDh+v0QSAv9rMife8tCl
+afXWtDwwL8MS7oAh0ymT446Uizxx3PUxggFUMIIBUAIBATBMMD8xCzAJBgNVBAYTAlVTMQsw
+CQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0ECCQCls1Qo
+G7BuOzALBglghkgBZQMEAgKgezAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAS0wHAYJKoZI
+hvcNAQkFMQ8XDTIwMDExNDIyMjIxNVowPwYJKoZIhvcNAQkEMTIEMOsEu3dGU5j6fKZbsZPL
+LDA8QWxpP36CPDZWr3BVJ3R5mMCKCSmoWtVRnB7XASQcjTAKBggqhkjOPQQDAwRnMGUCMBLW
+PyYw4c11nrH97KHnEmx3BSDX/SfepFNM6PoPR5HCI+OR/v/wlIIByuhyrIl8xAIxAK8dEwOe
+I06um+ATKQzUcbgq0PCKA7T31pAq46fsWc5tA+mMARTrxZjSXsDneeAWpw==
+"""
+
+ def testDerCodec(self):
+ substrate = pem.readBase64fromText(self.pem_text)
+
+ layers = { }
+ layers.update(rfc5652.cmsContentTypesMap)
+
+ getNextLayer = {
+ rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
+ }
+
+ getNextSubstrate = {
+ rfc5652.id_ct_contentInfo: lambda x: x['content'],
+ rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
+ }
+
+ next_layer = rfc5652.id_ct_contentInfo
+ while next_layer in layers:
+ asn1Object, rest = der_decoder(
+ substrate, asn1Spec=layers[next_layer])
+
+ self.assertFalse(rest)
+ self.assertTrue(asn1Object.prettyPrint())
+ self.assertEqual(substrate, der_encoder(asn1Object))
+
+ substrate = getNextSubstrate[next_layer](asn1Object)
+ next_layer = getNextLayer[next_layer](asn1Object)
+
+ self.assertEqual(rfc8769.id_ct_cborSequence, next_layer)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ import sys
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+ sys.exit(not result.wasSuccessful())
diff --git a/contrib/python/pyasn1-modules/py3/tests/ya.make b/contrib/python/pyasn1-modules/py3/tests/ya.make
new file mode 100644
index 0000000000..048b8309ce
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/tests/ya.make
@@ -0,0 +1,136 @@
+PY3TEST()
+
+PEERDIR(
+ contrib/python/pyasn1-modules
+)
+
+TEST_SRCS(
+ __init__.py
+ test_missing.py
+ test_pem.py
+ test_rfc2314.py
+ test_rfc2315.py
+ test_rfc2437.py
+ test_rfc2459.py
+ test_rfc2511.py
+ test_rfc2560.py
+ test_rfc2631.py
+ test_rfc2634.py
+ test_rfc2876.py
+ test_rfc2985.py
+ test_rfc2986.py
+ test_rfc3058.py
+ test_rfc3114.py
+ test_rfc3125.py
+ test_rfc3161.py
+ test_rfc3274.py
+ test_rfc3279.py
+ test_rfc3280.py
+ test_rfc3281.py
+ test_rfc3370.py
+ test_rfc3447.py
+ test_rfc3537.py
+ test_rfc3560.py
+ test_rfc3565.py
+ test_rfc3657.py
+ test_rfc3709.py
+ test_rfc3739.py
+ test_rfc3770.py
+ test_rfc3779.py
+ test_rfc3820.py
+ test_rfc3852.py
+ test_rfc4010.py
+ test_rfc4043.py
+ test_rfc4055.py
+ test_rfc4073.py
+ test_rfc4108.py
+ test_rfc4210.py
+ test_rfc4211.py
+ test_rfc4334.py
+ test_rfc4357.py
+ test_rfc4387.py
+ test_rfc4476.py
+ test_rfc4490.py
+ test_rfc4491.py
+ test_rfc4683.py
+ test_rfc4985.py
+ test_rfc5035.py
+ test_rfc5083.py
+ test_rfc5084.py
+ test_rfc5126.py
+ test_rfc5208.py
+ test_rfc5275.py
+ test_rfc5280.py
+ test_rfc5480.py
+ test_rfc5636.py
+ test_rfc5639.py
+ test_rfc5649.py
+ test_rfc5652.py
+ test_rfc5697.py
+ test_rfc5751.py
+ test_rfc5752.py
+ test_rfc5753.py
+ test_rfc5755.py
+ test_rfc5913.py
+ test_rfc5914.py
+ test_rfc5915.py
+ test_rfc5916.py
+ test_rfc5917.py
+ test_rfc5924.py
+ test_rfc5934.py
+ test_rfc5940.py
+ test_rfc5958.py
+ test_rfc5990.py
+ test_rfc6010.py
+ test_rfc6019.py
+ test_rfc6031.py
+ test_rfc6032.py
+ test_rfc6120.py
+ test_rfc6187.py
+ test_rfc6210.py
+ test_rfc6211.py
+ test_rfc6402.py
+ test_rfc6482.py
+ test_rfc6486.py
+ test_rfc6487.py
+ test_rfc6664.py
+ test_rfc6955.py
+ test_rfc6960.py
+ test_rfc7030.py
+ test_rfc7191.py
+ test_rfc7229.py
+ test_rfc7292.py
+ test_rfc7296.py
+ test_rfc7508.py
+ test_rfc7585.py
+ test_rfc7633.py
+ test_rfc7773.py
+ test_rfc7894.py
+ test_rfc7906.py
+ test_rfc7914.py
+ test_rfc8017.py
+ test_rfc8018.py
+ test_rfc8103.py
+ test_rfc8209.py
+ test_rfc8226.py
+ test_rfc8358.py
+ test_rfc8360.py
+ test_rfc8398.py
+ test_rfc8410.py
+ test_rfc8418.py
+ test_rfc8419.py
+ test_rfc8479.py
+ test_rfc8494.py
+ test_rfc8520.py
+ test_rfc8619.py
+ test_rfc8649.py
+ test_rfc8692.py
+ test_rfc8696.py
+ test_rfc8702.py
+ test_rfc8708.py
+ test_rfc8769.py
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/pyasn1-modules/py3/ya.make b/contrib/python/pyasn1-modules/py3/ya.make
new file mode 100644
index 0000000000..6f5441d5d8
--- /dev/null
+++ b/contrib/python/pyasn1-modules/py3/ya.make
@@ -0,0 +1,161 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(0.3.0)
+
+LICENSE(BSD-2-Clause)
+
+PEERDIR(
+ contrib/python/pyasn1
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ pyasn1_modules/__init__.py
+ pyasn1_modules/pem.py
+ pyasn1_modules/rfc1155.py
+ pyasn1_modules/rfc1157.py
+ pyasn1_modules/rfc1901.py
+ pyasn1_modules/rfc1902.py
+ pyasn1_modules/rfc1905.py
+ pyasn1_modules/rfc2251.py
+ pyasn1_modules/rfc2314.py
+ pyasn1_modules/rfc2315.py
+ pyasn1_modules/rfc2437.py
+ pyasn1_modules/rfc2459.py
+ pyasn1_modules/rfc2511.py
+ pyasn1_modules/rfc2560.py
+ pyasn1_modules/rfc2631.py
+ pyasn1_modules/rfc2634.py
+ pyasn1_modules/rfc2876.py
+ pyasn1_modules/rfc2985.py
+ pyasn1_modules/rfc2986.py
+ pyasn1_modules/rfc3058.py
+ pyasn1_modules/rfc3114.py
+ pyasn1_modules/rfc3125.py
+ pyasn1_modules/rfc3161.py
+ pyasn1_modules/rfc3274.py
+ pyasn1_modules/rfc3279.py
+ pyasn1_modules/rfc3280.py
+ pyasn1_modules/rfc3281.py
+ pyasn1_modules/rfc3370.py
+ pyasn1_modules/rfc3412.py
+ pyasn1_modules/rfc3414.py
+ pyasn1_modules/rfc3447.py
+ pyasn1_modules/rfc3537.py
+ pyasn1_modules/rfc3560.py
+ pyasn1_modules/rfc3565.py
+ pyasn1_modules/rfc3657.py
+ pyasn1_modules/rfc3709.py
+ pyasn1_modules/rfc3739.py
+ pyasn1_modules/rfc3770.py
+ pyasn1_modules/rfc3779.py
+ pyasn1_modules/rfc3820.py
+ pyasn1_modules/rfc3852.py
+ pyasn1_modules/rfc4010.py
+ pyasn1_modules/rfc4043.py
+ pyasn1_modules/rfc4055.py
+ pyasn1_modules/rfc4073.py
+ pyasn1_modules/rfc4108.py
+ pyasn1_modules/rfc4210.py
+ pyasn1_modules/rfc4211.py
+ pyasn1_modules/rfc4334.py
+ pyasn1_modules/rfc4357.py
+ pyasn1_modules/rfc4387.py
+ pyasn1_modules/rfc4476.py
+ pyasn1_modules/rfc4490.py
+ pyasn1_modules/rfc4491.py
+ pyasn1_modules/rfc4683.py
+ pyasn1_modules/rfc4985.py
+ pyasn1_modules/rfc5035.py
+ pyasn1_modules/rfc5083.py
+ pyasn1_modules/rfc5084.py
+ pyasn1_modules/rfc5126.py
+ pyasn1_modules/rfc5208.py
+ pyasn1_modules/rfc5275.py
+ pyasn1_modules/rfc5280.py
+ pyasn1_modules/rfc5480.py
+ pyasn1_modules/rfc5636.py
+ pyasn1_modules/rfc5639.py
+ pyasn1_modules/rfc5649.py
+ pyasn1_modules/rfc5652.py
+ pyasn1_modules/rfc5697.py
+ pyasn1_modules/rfc5751.py
+ pyasn1_modules/rfc5752.py
+ pyasn1_modules/rfc5753.py
+ pyasn1_modules/rfc5755.py
+ pyasn1_modules/rfc5913.py
+ pyasn1_modules/rfc5914.py
+ pyasn1_modules/rfc5915.py
+ pyasn1_modules/rfc5916.py
+ pyasn1_modules/rfc5917.py
+ pyasn1_modules/rfc5924.py
+ pyasn1_modules/rfc5934.py
+ pyasn1_modules/rfc5940.py
+ pyasn1_modules/rfc5958.py
+ pyasn1_modules/rfc5990.py
+ pyasn1_modules/rfc6010.py
+ pyasn1_modules/rfc6019.py
+ pyasn1_modules/rfc6031.py
+ pyasn1_modules/rfc6032.py
+ pyasn1_modules/rfc6120.py
+ pyasn1_modules/rfc6170.py
+ pyasn1_modules/rfc6187.py
+ pyasn1_modules/rfc6210.py
+ pyasn1_modules/rfc6211.py
+ pyasn1_modules/rfc6402.py
+ pyasn1_modules/rfc6482.py
+ pyasn1_modules/rfc6486.py
+ pyasn1_modules/rfc6487.py
+ pyasn1_modules/rfc6664.py
+ pyasn1_modules/rfc6955.py
+ pyasn1_modules/rfc6960.py
+ pyasn1_modules/rfc7030.py
+ pyasn1_modules/rfc7191.py
+ pyasn1_modules/rfc7229.py
+ pyasn1_modules/rfc7292.py
+ pyasn1_modules/rfc7296.py
+ pyasn1_modules/rfc7508.py
+ pyasn1_modules/rfc7585.py
+ pyasn1_modules/rfc7633.py
+ pyasn1_modules/rfc7773.py
+ pyasn1_modules/rfc7894.py
+ pyasn1_modules/rfc7906.py
+ pyasn1_modules/rfc7914.py
+ pyasn1_modules/rfc8017.py
+ pyasn1_modules/rfc8018.py
+ pyasn1_modules/rfc8103.py
+ pyasn1_modules/rfc8209.py
+ pyasn1_modules/rfc8226.py
+ pyasn1_modules/rfc8358.py
+ pyasn1_modules/rfc8360.py
+ pyasn1_modules/rfc8398.py
+ pyasn1_modules/rfc8410.py
+ pyasn1_modules/rfc8418.py
+ pyasn1_modules/rfc8419.py
+ pyasn1_modules/rfc8479.py
+ pyasn1_modules/rfc8494.py
+ pyasn1_modules/rfc8520.py
+ pyasn1_modules/rfc8619.py
+ pyasn1_modules/rfc8649.py
+ pyasn1_modules/rfc8692.py
+ pyasn1_modules/rfc8696.py
+ pyasn1_modules/rfc8702.py
+ pyasn1_modules/rfc8708.py
+ pyasn1_modules/rfc8769.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/pyasn1-modules/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/pyasn1-modules/ya.make b/contrib/python/pyasn1-modules/ya.make
new file mode 100644
index 0000000000..7d30eadc76
--- /dev/null
+++ b/contrib/python/pyasn1-modules/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/pyasn1-modules/py2)
+ELSE()
+ PEERDIR(contrib/python/pyasn1-modules/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/pyasn1/py2/.dist-info/METADATA b/contrib/python/pyasn1/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..530fe5bf7b
--- /dev/null
+++ b/contrib/python/pyasn1/py2/.dist-info/METADATA
@@ -0,0 +1,230 @@
+Metadata-Version: 2.1
+Name: pyasn1
+Version: 0.5.0
+Summary: Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)
+Home-page: https://github.com/pyasn1/pyasn1
+Author: Ilya Etingof
+Author-email: etingof@gmail.com
+Maintainer: pyasn1 maintenance organization
+Maintainer-email: Christian Heimes <christian@python.org>
+License: BSD-2-Clause
+Project-URL: Documentation, https://pyasn1.readthedocs.io
+Project-URL: Source, https://github.com/pyasn1/pyasn1
+Project-URL: Issues, https://github.com/pyasn1/pyasn1/issues
+Project-URL: Changelog, https://pyasn1.readthedocs.io/en/latest/changelog.html
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: Intended Audience :: Telecommunications Industry
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Communications
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7
+Description-Content-Type: text/markdown
+License-File: LICENSE.rst
+
+
+ASN.1 library for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1.svg?maxAge=2592000)](https://pypi.org/project/pyasn1)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1.svg)](https://pypi.org/project/pyasn1/)
+[![Build status](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1.svg)](https://codecov.io/github/pyasn1/pyasn1)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1/master/LICENSE.txt)
+
+This is a free and open source implementation of ASN.1 types and codecs
+as a Python package. It has been first written to support particular
+protocol (SNMP) but then generalized to be suitable for a wide range
+of protocols based on
+[ASN.1 specification](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items).
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1.
+
+Features
+--------
+
+* Generic implementation of ASN.1 types (X.208)
+* Standards compliant BER/CER/DER codecs
+* Can operate on streams of serialized data
+* Dumps/loads ASN.1 structures from Python types
+* 100% Python, works with Python 2.7 and 3.6+
+* MT-safe
+* Contributed ASN.1 compiler [Asn1ate](https://github.com/kimgr/asn1ate)
+
+Why using pyasn1
+----------------
+
+ASN.1 solves the data serialisation problem. This solution was
+designed long ago by the wise Ancients. Back then, they did not
+have the luxury of wasting bits. That is why ASN.1 is designed
+to serialise data structures of unbounded complexity into
+something compact and efficient when it comes to processing
+the data.
+
+That probably explains why many network protocols and file formats
+still rely on the 30+ years old technology. Including a number of
+high-profile Internet protocols and file formats.
+
+Quite a number of books cover the topic of ASN.1.
+[Communication between heterogeneous systems](http://www.oss.com/asn1/dubuisson.html)
+by Olivier Dubuisson is one of those high quality books freely
+available on the Internet.
+
+The pyasn1 package is designed to help Python programmers tackling
+network protocols and file formats at the comfort of their Python
+prompt. The tool struggles to capture all aspects of a rather
+complicated ASN.1 system and to represent it on the Python terms.
+
+How to use pyasn1
+-----------------
+
+With pyasn1 you can build Python objects from ASN.1 data structures.
+For example, the following ASN.1 data structure:
+
+```bash
+Record ::= SEQUENCE {
+ id INTEGER,
+ room [0] INTEGER OPTIONAL,
+ house [1] INTEGER DEFAULT 0
+}
+```
+
+Could be expressed in pyasn1 like this:
+
+```python
+class Record(Sequence):
+ componentType = NamedTypes(
+ NamedType('id', Integer()),
+ OptionalNamedType(
+ 'room', Integer().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
+ )
+ ),
+ DefaultedNamedType(
+ 'house', Integer(0).subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
+ )
+ )
+ )
+```
+
+It is in the spirit of ASN.1 to take abstract data description
+and turn it into a programming language specific form.
+Once you have your ASN.1 data structure expressed in Python, you
+can use it along the lines of similar Python type (e.g. ASN.1
+`SET` is similar to Python `dict`, `SET OF` to `list`):
+
+```python
+>>> record = Record()
+>>> record['id'] = 123
+>>> record['room'] = 321
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+Part of the power of ASN.1 comes from its serialisation features. You
+can serialise your data structure and send it over the network.
+
+```python
+>>> from pyasn1.codec.der.encoder import encode
+>>> substrate = encode(record)
+>>> hexdump(substrate)
+00000: 30 07 02 01 7B 80 02 01 41
+```
+
+Conversely, you can turn serialised ASN.1 content, as received from
+network or read from a file, into a Python object which you can
+introspect, modify, encode and send back.
+
+```python
+>>> from pyasn1.codec.der.decoder import decode
+>>> received_record, rest_of_substrate = decode(substrate, asn1Spec=Record())
+>>>
+>>> for field in received_record:
+>>> print('{} is {}'.format(field, received_record[field]))
+id is 123
+room is 321
+house is 0
+>>>
+>>> record == received_record
+True
+>>> received_record.update(room=123)
+>>> substrate = encode(received_record)
+>>> hexdump(substrate)
+00000: 30 06 02 01 7B 80 01 7B
+```
+
+The pyasn1 classes struggle to emulate their Python prototypes (e.g. int,
+list, dict etc.). But ASN.1 types exhibit more complicated behaviour.
+To make life easier for a Pythonista, they can turn their pyasn1
+classes into Python built-ins:
+
+```python
+>>> from pyasn1.codec.native.encoder import encode
+>>> encode(record)
+{'id': 123, 'room': 321, 'house': 0}
+```
+
+Or vice-versa -- you can initialize an ASN.1 structure from a tree of
+Python objects:
+
+```python
+>>> from pyasn1.codec.native.decoder import decode
+>>> record = decode({'id': 123, 'room': 321, 'house': 0}, asn1Spec=Record())
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+With ASN.1 design, serialisation codecs are decoupled from data objects,
+so you could turn every single ASN.1 object into many different
+serialised forms. As of this moment, pyasn1 supports BER, DER, CER and
+Python built-ins codecs. The extremely compact PER encoding is expected
+to be introduced in the upcoming pyasn1 release.
+
+More information on pyasn1 APIs can be found in the
+[documentation](https://pyasn1.readthedocs.io/en/latest/pyasn1/contents.html),
+compiled ASN.1 modules for different protocols and file formats
+could be found in the pyasn1-modules
+[repo](https://github.com/pyasn1/pyasn1-modules).
+
+How to get pyasn1
+-----------------
+
+The pyasn1 package is distributed under terms and conditions of 2-clause
+BSD [license](https://pyasn1.readthedocs.io/en/latest/license.html). Source code is freely
+available as a GitHub [repo](https://github.com/pyasn1/pyasn1).
+
+You could `pip install pyasn1` or download it from [PyPI](https://pypi.org/project/pyasn1).
+
+If something does not work as expected,
+[open an issue](https://github.com/epyasn1/pyasn1/issues) at GitHub or
+post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+or try browsing pyasn1
+[mailing list archives](https://sourceforge.net/p/pyasn1/mailman/pyasn1-users/).
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1/py2/.dist-info/top_level.txt b/contrib/python/pyasn1/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..38fe414575
--- /dev/null
+++ b/contrib/python/pyasn1/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyasn1
diff --git a/contrib/python/pyasn1/py2/LICENSE.rst b/contrib/python/pyasn1/py2/LICENSE.rst
new file mode 100644
index 0000000000..598b8430ef
--- /dev/null
+++ b/contrib/python/pyasn1/py2/LICENSE.rst
@@ -0,0 +1,24 @@
+Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/pyasn1/py2/README.md b/contrib/python/pyasn1/py2/README.md
new file mode 100644
index 0000000000..e1f9501b49
--- /dev/null
+++ b/contrib/python/pyasn1/py2/README.md
@@ -0,0 +1,188 @@
+
+ASN.1 library for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1.svg?maxAge=2592000)](https://pypi.org/project/pyasn1)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1.svg)](https://pypi.org/project/pyasn1/)
+[![Build status](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1.svg)](https://codecov.io/github/pyasn1/pyasn1)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1/master/LICENSE.txt)
+
+This is a free and open source implementation of ASN.1 types and codecs
+as a Python package. It has been first written to support particular
+protocol (SNMP) but then generalized to be suitable for a wide range
+of protocols based on
+[ASN.1 specification](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items).
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1.
+
+Features
+--------
+
+* Generic implementation of ASN.1 types (X.208)
+* Standards compliant BER/CER/DER codecs
+* Can operate on streams of serialized data
+* Dumps/loads ASN.1 structures from Python types
+* 100% Python, works with Python 2.7 and 3.6+
+* MT-safe
+* Contributed ASN.1 compiler [Asn1ate](https://github.com/kimgr/asn1ate)
+
+Why using pyasn1
+----------------
+
+ASN.1 solves the data serialisation problem. This solution was
+designed long ago by the wise Ancients. Back then, they did not
+have the luxury of wasting bits. That is why ASN.1 is designed
+to serialise data structures of unbounded complexity into
+something compact and efficient when it comes to processing
+the data.
+
+That probably explains why many network protocols and file formats
+still rely on the 30+ years old technology. Including a number of
+high-profile Internet protocols and file formats.
+
+Quite a number of books cover the topic of ASN.1.
+[Communication between heterogeneous systems](http://www.oss.com/asn1/dubuisson.html)
+by Olivier Dubuisson is one of those high quality books freely
+available on the Internet.
+
+The pyasn1 package is designed to help Python programmers tackling
+network protocols and file formats at the comfort of their Python
+prompt. The tool struggles to capture all aspects of a rather
+complicated ASN.1 system and to represent it on the Python terms.
+
+How to use pyasn1
+-----------------
+
+With pyasn1 you can build Python objects from ASN.1 data structures.
+For example, the following ASN.1 data structure:
+
+```bash
+Record ::= SEQUENCE {
+ id INTEGER,
+ room [0] INTEGER OPTIONAL,
+ house [1] INTEGER DEFAULT 0
+}
+```
+
+Could be expressed in pyasn1 like this:
+
+```python
+class Record(Sequence):
+ componentType = NamedTypes(
+ NamedType('id', Integer()),
+ OptionalNamedType(
+ 'room', Integer().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
+ )
+ ),
+ DefaultedNamedType(
+ 'house', Integer(0).subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
+ )
+ )
+ )
+```
+
+It is in the spirit of ASN.1 to take abstract data description
+and turn it into a programming language specific form.
+Once you have your ASN.1 data structure expressed in Python, you
+can use it along the lines of similar Python type (e.g. ASN.1
+`SET` is similar to Python `dict`, `SET OF` to `list`):
+
+```python
+>>> record = Record()
+>>> record['id'] = 123
+>>> record['room'] = 321
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+Part of the power of ASN.1 comes from its serialisation features. You
+can serialise your data structure and send it over the network.
+
+```python
+>>> from pyasn1.codec.der.encoder import encode
+>>> substrate = encode(record)
+>>> hexdump(substrate)
+00000: 30 07 02 01 7B 80 02 01 41
+```
+
+Conversely, you can turn serialised ASN.1 content, as received from
+network or read from a file, into a Python object which you can
+introspect, modify, encode and send back.
+
+```python
+>>> from pyasn1.codec.der.decoder import decode
+>>> received_record, rest_of_substrate = decode(substrate, asn1Spec=Record())
+>>>
+>>> for field in received_record:
+>>> print('{} is {}'.format(field, received_record[field]))
+id is 123
+room is 321
+house is 0
+>>>
+>>> record == received_record
+True
+>>> received_record.update(room=123)
+>>> substrate = encode(received_record)
+>>> hexdump(substrate)
+00000: 30 06 02 01 7B 80 01 7B
+```
+
+The pyasn1 classes struggle to emulate their Python prototypes (e.g. int,
+list, dict etc.). But ASN.1 types exhibit more complicated behaviour.
+To make life easier for a Pythonista, they can turn their pyasn1
+classes into Python built-ins:
+
+```python
+>>> from pyasn1.codec.native.encoder import encode
+>>> encode(record)
+{'id': 123, 'room': 321, 'house': 0}
+```
+
+Or vice-versa -- you can initialize an ASN.1 structure from a tree of
+Python objects:
+
+```python
+>>> from pyasn1.codec.native.decoder import decode
+>>> record = decode({'id': 123, 'room': 321, 'house': 0}, asn1Spec=Record())
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+With ASN.1 design, serialisation codecs are decoupled from data objects,
+so you could turn every single ASN.1 object into many different
+serialised forms. As of this moment, pyasn1 supports BER, DER, CER and
+Python built-ins codecs. The extremely compact PER encoding is expected
+to be introduced in the upcoming pyasn1 release.
+
+More information on pyasn1 APIs can be found in the
+[documentation](https://pyasn1.readthedocs.io/en/latest/pyasn1/contents.html),
+compiled ASN.1 modules for different protocols and file formats
+could be found in the pyasn1-modules
+[repo](https://github.com/pyasn1/pyasn1-modules).
+
+How to get pyasn1
+-----------------
+
+The pyasn1 package is distributed under terms and conditions of 2-clause
+BSD [license](https://pyasn1.readthedocs.io/en/latest/license.html). Source code is freely
+available as a GitHub [repo](https://github.com/pyasn1/pyasn1).
+
+You could `pip install pyasn1` or download it from [PyPI](https://pypi.org/project/pyasn1).
+
+If something does not work as expected,
+[open an issue](https://github.com/epyasn1/pyasn1/issues) at GitHub or
+post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+or try browsing pyasn1
+[mailing list archives](https://sourceforge.net/p/pyasn1/mailman/pyasn1-users/).
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1/py2/pyasn1/__init__.py b/contrib/python/pyasn1/py2/pyasn1/__init__.py
new file mode 100644
index 0000000000..a979d291f2
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/__init__.py
@@ -0,0 +1,2 @@
+# https://www.python.org/dev/peps/pep-0396/
+__version__ = '0.5.0'
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/__init__.py b/contrib/python/pyasn1/py2/pyasn1/codec/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/ber/__init__.py b/contrib/python/pyasn1/py2/pyasn1/codec/ber/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/ber/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/ber/decoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/ber/decoder.py
new file mode 100644
index 0000000000..070733fd28
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/ber/decoder.py
@@ -0,0 +1,2071 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import os
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.codec.streaming import asSeekableStream
+from pyasn1.codec.streaming import isEndOfStream
+from pyasn1.codec.streaming import peekIntoStream
+from pyasn1.codec.streaming import readFromStream
+from pyasn1.compat import _MISSING
+from pyasn1.compat.integer import from_bytes
+from pyasn1.compat.octets import oct2int, octs2ints, ints2octs, null
+from pyasn1.error import PyAsn1Error
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['StreamingDecoder', 'Decoder', 'decode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
+
+noValue = base.noValue
+
+SubstrateUnderrunError = error.SubstrateUnderrunError
+
+
+class AbstractPayloadDecoder(object):
+ protoComponent = None
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ """Decode value with fixed byte length.
+
+ The decoder is allowed to consume as many bytes as necessary.
+ """
+ raise error.PyAsn1Error('SingleItemDecoder not implemented for %s' % (tagSet,)) # TODO: Seems more like an NotImplementedError?
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ """Decode value with undefined length.
+
+ The decoder is allowed to consume as many bytes as necessary.
+ """
+ raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,)) # TODO: Seems more like an NotImplementedError?
+
+ @staticmethod
+ def _passAsn1Object(asn1Object, options):
+ if 'asn1Object' not in options:
+ options['asn1Object'] = asn1Object
+
+ return options
+
+
+class AbstractSimplePayloadDecoder(AbstractPayloadDecoder):
+ @staticmethod
+ def substrateCollector(asn1Object, substrate, length, options):
+ for chunk in readFromStream(substrate, length, options):
+ yield chunk
+
+ def _createComponent(self, asn1Spec, tagSet, value, **options):
+ if options.get('native'):
+ return value
+ elif asn1Spec is None:
+ return self.protoComponent.clone(value, tagSet=tagSet)
+ elif value is noValue:
+ return asn1Spec
+ else:
+ return asn1Spec.clone(value)
+
+
+class RawPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Any('')
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, '', **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ for value in decodeFun(substrate, asn1Spec, tagSet, length, **options):
+ yield value
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, '', **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ while True:
+ for value in decodeFun(
+ substrate, asn1Spec, tagSet, length,
+ allowEoo=True, **options):
+
+ if value is eoo.endOfOctets:
+ return
+
+ yield value
+
+
+rawPayloadDecoder = RawPayloadDecoder()
+
+
+class IntegerPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Integer(0)
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if chunk:
+ value = from_bytes(chunk, signed=True)
+
+ else:
+ value = 0
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+
+class BooleanPayloadDecoder(IntegerPayloadDecoder):
+ protoComponent = univ.Boolean(0)
+
+ def _createComponent(self, asn1Spec, tagSet, value, **options):
+ return IntegerPayloadDecoder._createComponent(
+ self, asn1Spec, tagSet, value and 1 or 0, **options)
+
+
+class BitStringPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.BitString(())
+ supportConstructedForm = True
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if not length:
+ raise error.PyAsn1Error('Empty BIT STRING substrate')
+
+ for chunk in isEndOfStream(substrate):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if chunk:
+ raise error.PyAsn1Error('Empty BIT STRING substrate')
+
+ if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
+
+ for trailingBits in readFromStream(substrate, 1, options):
+ if isinstance(trailingBits, SubstrateUnderrunError):
+ yield trailingBits
+
+ trailingBits = ord(trailingBits)
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ for chunk in readFromStream(substrate, length - 1, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ value = self.protoComponent.fromOctetString(
+ chunk, internalFormat=True, padding=trailingBits)
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+ return
+
+ if not self.supportConstructedForm:
+ raise error.PyAsn1Error('Constructed encoding form prohibited '
+ 'at %s' % self.__class__.__name__)
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
+
+ current_position = substrate.tell()
+
+ while substrate.tell() - current_position < length:
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ trailingBits = oct2int(component[0])
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ bitString = self.protoComponent.fromOctetString(
+ component[1:], internalFormat=True,
+ prepend=bitString, padding=trailingBits
+ )
+
+ yield self._createComponent(asn1Spec, tagSet, bitString, **options)
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
+
+ while True: # loop over fragments
+
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ allowEoo=True, **options):
+
+ if component is eoo.endOfOctets:
+ break
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ trailingBits = oct2int(component[0])
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ bitString = self.protoComponent.fromOctetString(
+ component[1:], internalFormat=True,
+ prepend=bitString, padding=trailingBits
+ )
+
+ yield self._createComponent(asn1Spec, tagSet, bitString, **options)
+
+
+class OctetStringPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.OctetString('')
+ supportConstructedForm = True
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ yield self._createComponent(asn1Spec, tagSet, chunk, **options)
+
+ return
+
+ if not self.supportConstructedForm:
+ raise error.PyAsn1Error('Constructed encoding form prohibited at %s' % self.__class__.__name__)
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ header = null
+
+ original_position = substrate.tell()
+ # head = popSubstream(substrate, length)
+ while substrate.tell() - original_position < length:
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ header += component
+
+ yield self._createComponent(asn1Spec, tagSet, header, **options)
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun and substrateFun is not self.substrateCollector:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ header = null
+
+ while True: # loop over fragments
+
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ header += component
+
+ yield self._createComponent(asn1Spec, tagSet, header, **options)
+
+
+class NullPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Null('')
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ component = self._createComponent(asn1Spec, tagSet, '', **options)
+
+ if chunk:
+ raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
+
+ yield component
+
+
+class ObjectIdentifierPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.ObjectIdentifier(())
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if not chunk:
+ raise error.PyAsn1Error('Empty substrate')
+
+ chunk = octs2ints(chunk)
+
+ oid = ()
+ index = 0
+ substrateLen = len(chunk)
+ while index < substrateLen:
+ subId = chunk[index]
+ index += 1
+ if subId < 128:
+ oid += (subId,)
+ elif subId > 128:
+ # Construct subid from a number of octets
+ nextSubId = subId
+ subId = 0
+ while nextSubId >= 128:
+ subId = (subId << 7) + (nextSubId & 0x7F)
+ if index >= substrateLen:
+ raise error.SubstrateUnderrunError(
+ 'Short substrate for sub-OID past %s' % (oid,)
+ )
+ nextSubId = chunk[index]
+ index += 1
+ oid += ((subId << 7) + nextSubId,)
+ elif subId == 128:
+ # ASN.1 spec forbids leading zeros (0x80) in OID
+ # encoding, tolerating it opens a vulnerability. See
+ # https://www.esat.kuleuven.be/cosic/publications/article-1432.pdf
+ # page 7
+ raise error.PyAsn1Error('Invalid octet 0x80 in OID encoding')
+
+ # Decode two leading arcs
+ if 0 <= oid[0] <= 39:
+ oid = (0,) + oid
+ elif 40 <= oid[0] <= 79:
+ oid = (1, oid[0] - 40) + oid[1:]
+ elif oid[0] >= 80:
+ oid = (2, oid[0] - 80) + oid[1:]
+ else:
+ raise error.PyAsn1Error('Malformed first OID octet: %s' % chunk[0])
+
+ yield self._createComponent(asn1Spec, tagSet, oid, **options)
+
+
+class RealPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Real()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if not chunk:
+ yield self._createComponent(asn1Spec, tagSet, 0.0, **options)
+ return
+
+ fo = oct2int(chunk[0])
+ chunk = chunk[1:]
+ if fo & 0x80: # binary encoding
+ if not chunk:
+ raise error.PyAsn1Error("Incomplete floating-point value")
+
+ if LOG:
+ LOG('decoding binary encoded REAL')
+
+ n = (fo & 0x03) + 1
+
+ if n == 4:
+ n = oct2int(chunk[0])
+ chunk = chunk[1:]
+
+ eo, chunk = chunk[:n], chunk[n:]
+
+ if not eo or not chunk:
+ raise error.PyAsn1Error('Real exponent screwed')
+
+ e = oct2int(eo[0]) & 0x80 and -1 or 0
+
+ while eo: # exponent
+ e <<= 8
+ e |= oct2int(eo[0])
+ eo = eo[1:]
+
+ b = fo >> 4 & 0x03 # base bits
+
+ if b > 2:
+ raise error.PyAsn1Error('Illegal Real base')
+
+ if b == 1: # encbase = 8
+ e *= 3
+
+ elif b == 2: # encbase = 16
+ e *= 4
+ p = 0
+
+ while chunk: # value
+ p <<= 8
+ p |= oct2int(chunk[0])
+ chunk = chunk[1:]
+
+ if fo & 0x40: # sign bit
+ p = -p
+
+ sf = fo >> 2 & 0x03 # scale bits
+ p *= 2 ** sf
+ value = (p, 2, e)
+
+ elif fo & 0x40: # infinite value
+ if LOG:
+ LOG('decoding infinite REAL')
+
+ value = fo & 0x01 and '-inf' or 'inf'
+
+ elif fo & 0xc0 == 0: # character encoding
+ if not chunk:
+ raise error.PyAsn1Error("Incomplete floating-point value")
+
+ if LOG:
+ LOG('decoding character encoded REAL')
+
+ try:
+ if fo & 0x3 == 0x1: # NR1
+ value = (int(chunk), 10, 0)
+
+ elif fo & 0x3 == 0x2: # NR2
+ value = float(chunk)
+
+ elif fo & 0x3 == 0x3: # NR3
+ value = float(chunk)
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'Unknown NR (tag %s)' % fo
+ )
+
+ except ValueError:
+ raise error.SubstrateUnderrunError(
+ 'Bad character Real syntax'
+ )
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'Unknown encoding (tag %s)' % fo
+ )
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+
+class AbstractConstructedPayloadDecoder(AbstractPayloadDecoder):
+ protoComponent = None
+
+
+class ConstructedPayloadDecoderBase(AbstractConstructedPayloadDecoder):
+ protoRecordComponent = None
+ protoSequenceComponent = None
+
+ def _getComponentTagMap(self, asn1Object, idx):
+ raise NotImplementedError()
+
+ def _getComponentPositionByType(self, asn1Object, tagSet, idx):
+ raise NotImplementedError()
+
+ def _decodeComponentsSchemaless(
+ self, substrate, tagSet=None, decodeFun=None,
+ length=None, **options):
+
+ asn1Object = None
+
+ components = []
+ componentTypes = set()
+
+ original_position = substrate.tell()
+
+ while length == -1 or substrate.tell() < original_position + length:
+ for component in decodeFun(substrate, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if length == -1 and component is eoo.endOfOctets:
+ break
+
+ components.append(component)
+ componentTypes.add(component.tagSet)
+
+ # Now we have to guess is it SEQUENCE/SET or SEQUENCE OF/SET OF
+ # The heuristics is:
+ # * 1+ components of different types -> likely SEQUENCE/SET
+ # * otherwise -> likely SEQUENCE OF/SET OF
+ if len(componentTypes) > 1:
+ protoComponent = self.protoRecordComponent
+
+ else:
+ protoComponent = self.protoSequenceComponent
+
+ asn1Object = protoComponent.clone(
+ # construct tagSet from base tag from prototype ASN.1 object
+ # and additional tags recovered from the substrate
+ tagSet=tag.TagSet(protoComponent.tagSet.baseTag, *tagSet.superTags)
+ )
+
+ if LOG:
+ LOG('guessed %r container type (pass `asn1Spec` to guide the '
+ 'decoder)' % asn1Object)
+
+ for idx, component in enumerate(components):
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ yield asn1Object
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatConstructed:
+ raise error.PyAsn1Error('Constructed tag format expected')
+
+ original_position = substrate.tell()
+
+ if substrateFun:
+ if asn1Spec is not None:
+ asn1Object = asn1Spec.clone()
+
+ elif self.protoComponent is not None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = self.protoRecordComponent, self.protoSequenceComponent
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if asn1Spec is None:
+ for asn1Object in self._decodeComponentsSchemaless(
+ substrate, tagSet=tagSet, decodeFun=decodeFun,
+ length=length, **options):
+ if isinstance(asn1Object, SubstrateUnderrunError):
+ yield asn1Object
+
+ if substrate.tell() < original_position + length:
+ if LOG:
+ for trailing in readFromStream(substrate, context=options):
+ if isinstance(trailing, SubstrateUnderrunError):
+ yield trailing
+
+ LOG('Unused trailing %d octets encountered: %s' % (
+ len(trailing), debug.hexdump(trailing)))
+
+ yield asn1Object
+
+ return
+
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
+
+ namedTypes = asn1Spec.componentType
+
+ isSetType = asn1Spec.typeId == univ.Set.typeId
+ isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
+
+ if LOG:
+ LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
+ not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
+ asn1Spec))
+
+ seenIndices = set()
+ idx = 0
+ while substrate.tell() - original_position < length:
+ if not namedTypes:
+ componentType = None
+
+ elif isSetType:
+ componentType = namedTypes.tagMapUnique
+
+ else:
+ try:
+ if isDeterministic:
+ componentType = namedTypes[idx].asn1Object
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ componentType = namedTypes.getTagMapNearPosition(idx)
+
+ else:
+ componentType = namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error(
+ 'Excessive components decoded at %r' % (asn1Spec,)
+ )
+
+ for component in decodeFun(substrate, componentType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if not isDeterministic and namedTypes:
+ if isSetType:
+ idx = namedTypes.getPositionByType(component.effectiveTagSet)
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ seenIndices.add(idx)
+ idx += 1
+
+ if LOG:
+ LOG('seen component indices %s' % seenIndices)
+
+ if namedTypes:
+ if not namedTypes.requiredComponents.issubset(seenIndices):
+ raise error.PyAsn1Error(
+ 'ASN.1 object %s has uninitialized '
+ 'components' % asn1Object.__class__.__name__)
+
+ if namedTypes.hasOpenTypes:
+
+ openTypes = options.get('openTypes', {})
+
+ if LOG:
+ LOG('user-specified open types map:')
+
+ for k, v in openTypes.items():
+ LOG('%s -> %r' % (k, v))
+
+ if openTypes or options.get('decodeOpenTypes', False):
+
+ for idx, namedType in enumerate(namedTypes.namedTypes):
+ if not namedType.openType:
+ continue
+
+ if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
+ continue
+
+ governingValue = asn1Object.getComponentByName(
+ namedType.openType.name
+ )
+
+ try:
+ openType = openTypes[governingValue]
+
+ except KeyError:
+
+ if LOG:
+ LOG('default open types map of component '
+ '"%s.%s" governed by component "%s.%s"'
+ ':' % (asn1Object.__class__.__name__,
+ namedType.name,
+ asn1Object.__class__.__name__,
+ namedType.openType.name))
+
+ for k, v in namedType.openType.items():
+ LOG('%s -> %r' % (k, v))
+
+ try:
+ openType = namedType.openType[governingValue]
+
+ except KeyError:
+ if LOG:
+ LOG('failed to resolve open type by governing '
+ 'value %r' % (governingValue,))
+ continue
+
+ if LOG:
+ LOG('resolved open type %r by governing '
+ 'value %r' % (openType, governingValue))
+
+ containerValue = asn1Object.getComponentByPosition(idx)
+
+ if containerValue.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ for pos, containerElement in enumerate(
+ containerValue):
+
+ stream = asSeekableStream(containerValue[pos].asOctets())
+
+ for component in decodeFun(stream, asn1Spec=openType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ containerValue[pos] = component
+
+ else:
+ stream = asSeekableStream(asn1Object.getComponentByPosition(idx).asOctets())
+
+ for component in decodeFun(stream, asn1Spec=openType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ asn1Object.setComponentByPosition(idx, component)
+
+ else:
+ inconsistency = asn1Object.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ componentType = asn1Spec.componentType
+
+ if LOG:
+ LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
+
+ idx = 0
+
+ while substrate.tell() - original_position < length:
+ for component in decodeFun(substrate, componentType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ idx += 1
+
+ yield asn1Object
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatConstructed:
+ raise error.PyAsn1Error('Constructed tag format expected')
+
+ if substrateFun is not None:
+ if asn1Spec is not None:
+ asn1Object = asn1Spec.clone()
+
+ elif self.protoComponent is not None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = self.protoRecordComponent, self.protoSequenceComponent
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if asn1Spec is None:
+ for asn1Object in self._decodeComponentsSchemaless(
+ substrate, tagSet=tagSet, decodeFun=decodeFun,
+ length=length, **dict(options, allowEoo=True)):
+ if isinstance(asn1Object, SubstrateUnderrunError):
+ yield asn1Object
+
+ yield asn1Object
+
+ return
+
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
+
+ namedTypes = asn1Object.componentType
+
+ isSetType = asn1Object.typeId == univ.Set.typeId
+ isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
+
+ if LOG:
+ LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
+ not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
+ asn1Spec))
+
+ seenIndices = set()
+
+ idx = 0
+
+ while True: # loop over components
+ if len(namedTypes) <= idx:
+ asn1Spec = None
+
+ elif isSetType:
+ asn1Spec = namedTypes.tagMapUnique
+
+ else:
+ try:
+ if isDeterministic:
+ asn1Spec = namedTypes[idx].asn1Object
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ asn1Spec = namedTypes.getTagMapNearPosition(idx)
+
+ else:
+ asn1Spec = namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error(
+ 'Excessive components decoded at %r' % (asn1Object,)
+ )
+
+ for component in decodeFun(substrate, asn1Spec, allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ if not isDeterministic and namedTypes:
+ if isSetType:
+ idx = namedTypes.getPositionByType(component.effectiveTagSet)
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ seenIndices.add(idx)
+ idx += 1
+
+ if LOG:
+ LOG('seen component indices %s' % seenIndices)
+
+ if namedTypes:
+ if not namedTypes.requiredComponents.issubset(seenIndices):
+ raise error.PyAsn1Error(
+ 'ASN.1 object %s has uninitialized '
+ 'components' % asn1Object.__class__.__name__)
+
+ if namedTypes.hasOpenTypes:
+
+ openTypes = options.get('openTypes', {})
+
+ if LOG:
+ LOG('user-specified open types map:')
+
+ for k, v in openTypes.items():
+ LOG('%s -> %r' % (k, v))
+
+ if openTypes or options.get('decodeOpenTypes', False):
+
+ for idx, namedType in enumerate(namedTypes.namedTypes):
+ if not namedType.openType:
+ continue
+
+ if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
+ continue
+
+ governingValue = asn1Object.getComponentByName(
+ namedType.openType.name
+ )
+
+ try:
+ openType = openTypes[governingValue]
+
+ except KeyError:
+
+ if LOG:
+ LOG('default open types map of component '
+ '"%s.%s" governed by component "%s.%s"'
+ ':' % (asn1Object.__class__.__name__,
+ namedType.name,
+ asn1Object.__class__.__name__,
+ namedType.openType.name))
+
+ for k, v in namedType.openType.items():
+ LOG('%s -> %r' % (k, v))
+
+ try:
+ openType = namedType.openType[governingValue]
+
+ except KeyError:
+ if LOG:
+ LOG('failed to resolve open type by governing '
+ 'value %r' % (governingValue,))
+ continue
+
+ if LOG:
+ LOG('resolved open type %r by governing '
+ 'value %r' % (openType, governingValue))
+
+ containerValue = asn1Object.getComponentByPosition(idx)
+
+ if containerValue.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ for pos, containerElement in enumerate(
+ containerValue):
+
+ stream = asSeekableStream(containerValue[pos].asOctets())
+
+ for component in decodeFun(stream, asn1Spec=openType,
+ **dict(options, allowEoo=True)):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ containerValue[pos] = component
+
+ else:
+ stream = asSeekableStream(asn1Object.getComponentByPosition(idx).asOctets())
+ for component in decodeFun(stream, asn1Spec=openType,
+ **dict(options, allowEoo=True)):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ asn1Object.setComponentByPosition(idx, component)
+
+ else:
+ inconsistency = asn1Object.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ componentType = asn1Spec.componentType
+
+ if LOG:
+ LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
+
+ idx = 0
+
+ while True:
+
+ for component in decodeFun(
+ substrate, componentType, allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ idx += 1
+
+ yield asn1Object
+
+
+class SequenceOrSequenceOfPayloadDecoder(ConstructedPayloadDecoderBase):
+ protoRecordComponent = univ.Sequence()
+ protoSequenceComponent = univ.SequenceOf()
+
+
+class SequencePayloadDecoder(SequenceOrSequenceOfPayloadDecoder):
+ protoComponent = univ.Sequence()
+
+
+class SequenceOfPayloadDecoder(SequenceOrSequenceOfPayloadDecoder):
+ protoComponent = univ.SequenceOf()
+
+
+class SetOrSetOfPayloadDecoder(ConstructedPayloadDecoderBase):
+ protoRecordComponent = univ.Set()
+ protoSequenceComponent = univ.SetOf()
+
+
+class SetPayloadDecoder(SetOrSetOfPayloadDecoder):
+ protoComponent = univ.Set()
+
+
+class SetOfPayloadDecoder(SetOrSetOfPayloadDecoder):
+ protoComponent = univ.SetOf()
+
+
+class ChoicePayloadDecoder(ConstructedPayloadDecoderBase):
+ protoComponent = univ.Choice()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = asn1Spec.clone()
+
+ if substrateFun:
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ if asn1Object.tagSet == tagSet:
+ if LOG:
+ LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,))
+
+ for component in decodeFun(
+ substrate, asn1Object.componentTagMap, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ else:
+ if LOG:
+ LOG('decoding %s as untagged CHOICE' % (tagSet,))
+
+ for component in decodeFun(
+ substrate, asn1Object.componentTagMap, tagSet, length,
+ state, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ effectiveTagSet = component.effectiveTagSet
+
+ if LOG:
+ LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet))
+
+ asn1Object.setComponentByType(
+ effectiveTagSet, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False,
+ innerFlag=False
+ )
+
+ yield asn1Object
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = asn1Spec.clone()
+
+ if substrateFun:
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ isTagged = asn1Object.tagSet == tagSet
+
+ if LOG:
+ LOG('decoding %s as %stagged CHOICE' % (
+ tagSet, isTagged and 'explicitly ' or 'un'))
+
+ while True:
+
+ if isTagged:
+ iterator = decodeFun(
+ substrate, asn1Object.componentType.tagMapUnique,
+ **dict(options, allowEoo=True))
+
+ else:
+ iterator = decodeFun(
+ substrate, asn1Object.componentType.tagMapUnique,
+ tagSet, length, state, **dict(options, allowEoo=True))
+
+ for component in iterator:
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ effectiveTagSet = component.effectiveTagSet
+
+ if LOG:
+ LOG('decoded component %s, effective tag set '
+ '%s' % (component, effectiveTagSet))
+
+ asn1Object.setComponentByType(
+ effectiveTagSet, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False,
+ innerFlag=False
+ )
+
+ if not isTagged:
+ break
+
+ if not isTagged or component is eoo.endOfOctets:
+ break
+
+ yield asn1Object
+
+
+class AnyPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Any()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ isUntagged = True
+
+ elif asn1Spec.__class__ is tagmap.TagMap:
+ isUntagged = tagSet not in asn1Spec.tagMap
+
+ else:
+ isUntagged = tagSet != asn1Spec.tagSet
+
+ if isUntagged:
+ fullPosition = substrate.markedPosition
+ currentPosition = substrate.tell()
+
+ substrate.seek(fullPosition, os.SEEK_SET)
+ length += currentPosition - fullPosition
+
+ if LOG:
+ for chunk in peekIntoStream(substrate, length):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+ LOG('decoding as untagged ANY, substrate '
+ '%s' % debug.hexdump(chunk))
+
+ if substrateFun:
+ for chunk in substrateFun(
+ self._createComponent(asn1Spec, tagSet, noValue, **options),
+ substrate, length, options):
+ yield chunk
+
+ return
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ yield self._createComponent(asn1Spec, tagSet, chunk, **options)
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ isTagged = False
+
+ elif asn1Spec.__class__ is tagmap.TagMap:
+ isTagged = tagSet in asn1Spec.tagMap
+
+ else:
+ isTagged = tagSet == asn1Spec.tagSet
+
+ if isTagged:
+ # tagged Any type -- consume header substrate
+ chunk = null
+
+ if LOG:
+ LOG('decoding as tagged ANY')
+
+ else:
+ # TODO: Seems not to be tested
+ fullPosition = substrate.markedPosition
+ currentPosition = substrate.tell()
+
+ substrate.seek(fullPosition, os.SEEK_SET)
+ for chunk in readFromStream(substrate, currentPosition - fullPosition, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if LOG:
+ LOG('decoding as untagged ANY, header substrate %s' % debug.hexdump(chunk))
+
+ # Any components do not inherit initial tag
+ asn1Spec = self.protoComponent
+
+ if substrateFun and substrateFun is not self.substrateCollector:
+ asn1Object = self._createComponent(
+ asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(
+ asn1Object, chunk + substrate, length + len(chunk), options):
+ yield chunk
+
+ return
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ while True: # loop over fragments
+
+ for component in decodeFun(
+ substrate, asn1Spec, substrateFun=substrateFun,
+ allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ chunk += component
+
+ if substrateFun:
+ yield chunk # TODO: Weird
+
+ else:
+ yield self._createComponent(asn1Spec, tagSet, chunk, **options)
+
+
+# character string types
+class UTF8StringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.UTF8String()
+
+
+class NumericStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.NumericString()
+
+
+class PrintableStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.PrintableString()
+
+
+class TeletexStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.TeletexString()
+
+
+class VideotexStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.VideotexString()
+
+
+class IA5StringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.IA5String()
+
+
+class GraphicStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.GraphicString()
+
+
+class VisibleStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.VisibleString()
+
+
+class GeneralStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.GeneralString()
+
+
+class UniversalStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.UniversalString()
+
+
+class BMPStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.BMPString()
+
+
+# "useful" types
+class ObjectDescriptorPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = useful.ObjectDescriptor()
+
+
+class GeneralizedTimePayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = useful.GeneralizedTime()
+
+
+class UTCTimePayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = useful.UTCTime()
+
+
+TAG_MAP = {
+ univ.Integer.tagSet: IntegerPayloadDecoder(),
+ univ.Boolean.tagSet: BooleanPayloadDecoder(),
+ univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: OctetStringPayloadDecoder(),
+ univ.Null.tagSet: NullPayloadDecoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierPayloadDecoder(),
+ univ.Enumerated.tagSet: IntegerPayloadDecoder(),
+ univ.Real.tagSet: RealPayloadDecoder(),
+ univ.Sequence.tagSet: SequenceOrSequenceOfPayloadDecoder(), # conflicts with SequenceOf
+ univ.Set.tagSet: SetOrSetOfPayloadDecoder(), # conflicts with SetOf
+ univ.Choice.tagSet: ChoicePayloadDecoder(), # conflicts with Any
+ # character string types
+ char.UTF8String.tagSet: UTF8StringPayloadDecoder(),
+ char.NumericString.tagSet: NumericStringPayloadDecoder(),
+ char.PrintableString.tagSet: PrintableStringPayloadDecoder(),
+ char.TeletexString.tagSet: TeletexStringPayloadDecoder(),
+ char.VideotexString.tagSet: VideotexStringPayloadDecoder(),
+ char.IA5String.tagSet: IA5StringPayloadDecoder(),
+ char.GraphicString.tagSet: GraphicStringPayloadDecoder(),
+ char.VisibleString.tagSet: VisibleStringPayloadDecoder(),
+ char.GeneralString.tagSet: GeneralStringPayloadDecoder(),
+ char.UniversalString.tagSet: UniversalStringPayloadDecoder(),
+ char.BMPString.tagSet: BMPStringPayloadDecoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: ObjectDescriptorPayloadDecoder(),
+ useful.GeneralizedTime.tagSet: GeneralizedTimePayloadDecoder(),
+ useful.UTCTime.tagSet: UTCTimePayloadDecoder()
+}
+
+# Type-to-codec map for ambiguous ASN.1 types
+TYPE_MAP = {
+ univ.Set.typeId: SetPayloadDecoder(),
+ univ.SetOf.typeId: SetOfPayloadDecoder(),
+ univ.Sequence.typeId: SequencePayloadDecoder(),
+ univ.SequenceOf.typeId: SequenceOfPayloadDecoder(),
+ univ.Choice.typeId: ChoicePayloadDecoder(),
+ univ.Any.typeId: AnyPayloadDecoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in TAG_MAP.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in TYPE_MAP:
+ TYPE_MAP[typeId] = typeDecoder
+
+
+(stDecodeTag,
+ stDecodeLength,
+ stGetValueDecoder,
+ stGetValueDecoderByAsn1Spec,
+ stGetValueDecoderByTag,
+ stTryAsExplicitTag,
+ stDecodeValue,
+ stDumpRawValue,
+ stErrorCondition,
+ stStop) = [x for x in range(10)]
+
+
+EOO_SENTINEL = ints2octs((0, 0))
+
+
+class SingleItemDecoder(object):
+ defaultErrorState = stErrorCondition
+ #defaultErrorState = stDumpRawValue
+ defaultRawDecoder = AnyPayloadDecoder()
+
+ supportIndefLength = True
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ # Tag & TagSet objects caches
+ self._tagCache = {}
+ self._tagSetCache = {}
+
+ def __call__(self, substrate, asn1Spec=None,
+ tagSet=None, length=None, state=stDecodeTag,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ allowEoo = options.pop('allowEoo', False)
+
+ if LOG:
+ LOG('decoder called at scope %s with state %d, working with up '
+ 'to %s octets of substrate: '
+ '%s' % (debug.scope, state, length, substrate))
+
+ # Look for end-of-octets sentinel
+ if allowEoo and self.supportIndefLength:
+
+ for eoo_candidate in readFromStream(substrate, 2, options):
+ if isinstance(eoo_candidate, SubstrateUnderrunError):
+ yield eoo_candidate
+
+ if eoo_candidate == EOO_SENTINEL:
+ if LOG:
+ LOG('end-of-octets sentinel found')
+ yield eoo.endOfOctets
+ return
+
+ else:
+ substrate.seek(-2, os.SEEK_CUR)
+
+ tagMap = self._tagMap
+ typeMap = self._typeMap
+ tagCache = self._tagCache
+ tagSetCache = self._tagSetCache
+
+ value = noValue
+
+ substrate.markedPosition = substrate.tell()
+
+ while state is not stStop:
+
+ if state is stDecodeTag:
+ # Decode tag
+ isShortTag = True
+
+ for firstByte in readFromStream(substrate, 1, options):
+ if isinstance(firstByte, SubstrateUnderrunError):
+ yield firstByte
+
+ firstOctet = ord(firstByte)
+
+ try:
+ lastTag = tagCache[firstOctet]
+
+ except KeyError:
+ integerTag = firstOctet
+ tagClass = integerTag & 0xC0
+ tagFormat = integerTag & 0x20
+ tagId = integerTag & 0x1F
+
+ if tagId == 0x1F:
+ isShortTag = False
+ lengthOctetIdx = 0
+ tagId = 0
+
+ while True:
+ for integerByte in readFromStream(substrate, 1, options):
+ if isinstance(integerByte, SubstrateUnderrunError):
+ yield integerByte
+
+ if not integerByte:
+ raise error.SubstrateUnderrunError(
+ 'Short octet stream on long tag decoding'
+ )
+
+ integerTag = ord(integerByte)
+ lengthOctetIdx += 1
+ tagId <<= 7
+ tagId |= (integerTag & 0x7F)
+
+ if not integerTag & 0x80:
+ break
+
+ lastTag = tag.Tag(
+ tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
+ )
+
+ if isShortTag:
+ # cache short tags
+ tagCache[firstOctet] = lastTag
+
+ if tagSet is None:
+ if isShortTag:
+ try:
+ tagSet = tagSetCache[firstOctet]
+
+ except KeyError:
+ # base tag not recovered
+ tagSet = tag.TagSet((), lastTag)
+ tagSetCache[firstOctet] = tagSet
+ else:
+ tagSet = tag.TagSet((), lastTag)
+
+ else:
+ tagSet = lastTag + tagSet
+
+ state = stDecodeLength
+
+ if LOG:
+ LOG('tag decoded into %s, decoding length' % tagSet)
+
+ if state is stDecodeLength:
+ # Decode length
+ for firstOctet in readFromStream(substrate, 1, options):
+ if isinstance(firstOctet, SubstrateUnderrunError):
+ yield firstOctet
+
+ firstOctet = ord(firstOctet)
+
+ if firstOctet < 128:
+ length = firstOctet
+
+ elif firstOctet > 128:
+ size = firstOctet & 0x7F
+ # encoded in size bytes
+ for encodedLength in readFromStream(substrate, size, options):
+ if isinstance(encodedLength, SubstrateUnderrunError):
+ yield encodedLength
+ encodedLength = list(encodedLength)
+ # missing check on maximum size, which shouldn't be a
+ # problem, we can handle more than is possible
+ if len(encodedLength) != size:
+ raise error.SubstrateUnderrunError(
+ '%s<%s at %s' % (size, len(encodedLength), tagSet)
+ )
+
+ length = 0
+ for lengthOctet in encodedLength:
+ length <<= 8
+ length |= oct2int(lengthOctet)
+ size += 1
+
+ else: # 128 means indefinite
+ length = -1
+
+ if length == -1 and not self.supportIndefLength:
+ raise error.PyAsn1Error('Indefinite length encoding not supported by this codec')
+
+ state = stGetValueDecoder
+
+ if LOG:
+ LOG('value length decoded into %d' % length)
+
+ if state is stGetValueDecoder:
+ if asn1Spec is None:
+ state = stGetValueDecoderByTag
+
+ else:
+ state = stGetValueDecoderByAsn1Spec
+ #
+ # There're two ways of creating subtypes in ASN.1 what influences
+ # decoder operation. These methods are:
+ # 1) Either base types used in or no IMPLICIT tagging has been
+ # applied on subtyping.
+ # 2) Subtype syntax drops base type information (by means of
+ # IMPLICIT tagging.
+ # The first case allows for complete tag recovery from substrate
+ # while the second one requires original ASN.1 type spec for
+ # decoding.
+ #
+ # In either case a set of tags (tagSet) is coming from substrate
+ # in an incremental, tag-by-tag fashion (this is the case of
+ # EXPLICIT tag which is most basic). Outermost tag comes first
+ # from the wire.
+ #
+ if state is stGetValueDecoderByTag:
+ try:
+ concreteDecoder = tagMap[tagSet]
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ state = stDecodeValue
+
+ else:
+ try:
+ concreteDecoder = tagMap[tagSet[:1]]
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ state = stDecodeValue
+ else:
+ state = stTryAsExplicitTag
+
+ if LOG:
+ LOG('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
+ debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__)
+
+ if state is stGetValueDecoderByAsn1Spec:
+
+ if asn1Spec.__class__ is tagmap.TagMap:
+ try:
+ chosenSpec = asn1Spec[tagSet]
+
+ except KeyError:
+ chosenSpec = None
+
+ if LOG:
+ LOG('candidate ASN.1 spec is a map of:')
+
+ for firstOctet, v in asn1Spec.presentTypes.items():
+ LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
+
+ if asn1Spec.skipTypes:
+ LOG('but neither of: ')
+ for firstOctet, v in asn1Spec.skipTypes.items():
+ LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
+ LOG('new candidate ASN.1 spec is %s, chosen by %s' % (chosenSpec is None and '<none>' or chosenSpec.prettyPrintType(), tagSet))
+
+ elif tagSet == asn1Spec.tagSet or tagSet in asn1Spec.tagMap:
+ chosenSpec = asn1Spec
+ if LOG:
+ LOG('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__)
+
+ else:
+ chosenSpec = None
+
+ if chosenSpec is not None:
+ try:
+ # ambiguous type or just faster codec lookup
+ concreteDecoder = typeMap[chosenSpec.typeId]
+
+ if LOG:
+ LOG('value decoder chosen for an ambiguous type by type ID %s' % (chosenSpec.typeId,))
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(chosenSpec.tagSet.baseTag, chosenSpec.tagSet.baseTag)
+ try:
+ # base type or tagged subtype
+ concreteDecoder = tagMap[baseTagSet]
+
+ if LOG:
+ LOG('value decoder chosen by base %s' % (baseTagSet,))
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ asn1Spec = chosenSpec
+ state = stDecodeValue
+
+ else:
+ state = stTryAsExplicitTag
+
+ else:
+ concreteDecoder = None
+ state = stTryAsExplicitTag
+
+ if LOG:
+ LOG('codec %s chosen by ASN.1 spec, decoding %s' % (state is stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
+ debug.scope.push(chosenSpec is None and '?' or chosenSpec.__class__.__name__)
+
+ if state is stDecodeValue:
+ if not options.get('recursiveFlag', True) and not substrateFun: # deprecate this
+ substrateFun = lambda a, b, c: (a, b[:c])
+
+ original_position = substrate.tell()
+
+ if length == -1: # indef length
+ for value in concreteDecoder.indefLenValueDecoder(
+ substrate, asn1Spec,
+ tagSet, length, stGetValueDecoder,
+ self, substrateFun, **options):
+ if isinstance(value, SubstrateUnderrunError):
+ yield value
+
+ else:
+ for value in concreteDecoder.valueDecoder(
+ substrate, asn1Spec,
+ tagSet, length, stGetValueDecoder,
+ self, substrateFun, **options):
+ if isinstance(value, SubstrateUnderrunError):
+ yield value
+
+ bytesRead = substrate.tell() - original_position
+ if bytesRead != length:
+ raise PyAsn1Error(
+ "Read %s bytes instead of expected %s." % (bytesRead, length))
+
+ if LOG:
+ LOG('codec %s yields type %s, value:\n%s\n...' % (
+ concreteDecoder.__class__.__name__, value.__class__.__name__,
+ isinstance(value, base.Asn1Item) and value.prettyPrint() or value))
+
+ state = stStop
+ break
+
+ if state is stTryAsExplicitTag:
+ if (tagSet and
+ tagSet[0].tagFormat == tag.tagFormatConstructed and
+ tagSet[0].tagClass != tag.tagClassUniversal):
+ # Assume explicit tagging
+ concreteDecoder = rawPayloadDecoder
+ state = stDecodeValue
+
+ else:
+ concreteDecoder = None
+ state = self.defaultErrorState
+
+ if LOG:
+ LOG('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as failure'))
+
+ if state is stDumpRawValue:
+ concreteDecoder = self.defaultRawDecoder
+
+ if LOG:
+ LOG('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
+
+ state = stDecodeValue
+
+ if state is stErrorCondition:
+ raise error.PyAsn1Error(
+ '%s not in asn1Spec: %r' % (tagSet, asn1Spec)
+ )
+
+ if LOG:
+ debug.scope.pop()
+ LOG('decoder left scope %s, call completed' % debug.scope)
+
+ yield value
+
+
+class StreamingDecoder(object):
+ """Create an iterator that turns BER/CER/DER byte stream into ASN.1 objects.
+
+ On each iteration, consume whatever BER/CER/DER serialization is
+ available in the `substrate` stream-like object and turns it into
+ one or more, possibly nested, ASN.1 objects.
+
+ Parameters
+ ----------
+ substrate: :py:class:`file`, :py:class:`io.BytesIO`
+ BER/CER/DER serialization in form of a byte stream
+
+ Keyword Args
+ ------------
+ asn1Spec: :py:class:`~pyasn1.type.base.PyAsn1Item`
+ A pyasn1 type object to act as a template guiding the decoder.
+ Depending on the ASN.1 structure being decoded, `asn1Spec` may
+ or may not be required. One of the reasons why `asn1Spec` may
+ me required is that ASN.1 structure is encoded in the *IMPLICIT*
+ tagging mode.
+
+ Yields
+ ------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`, :py:class:`~pyasn1.error.SubstrateUnderrunError`
+ Decoded ASN.1 object (possibly, nested) or
+ :py:class:`~pyasn1.error.SubstrateUnderrunError` object indicating
+ insufficient BER/CER/DER serialization on input to fully recover ASN.1
+ objects from it.
+
+ In the latter case the caller is advised to ensure some more data in
+ the input stream, then call the iterator again. The decoder will resume
+ the decoding process using the newly arrived data.
+
+ The `context` property of :py:class:`~pyasn1.error.SubstrateUnderrunError`
+ object might hold a reference to the partially populated ASN.1 object
+ being reconstructed.
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error, ~pyasn1.error.EndOfStreamError
+ `PyAsn1Error` on deserialization error, `EndOfStreamError` on
+ premature stream closure.
+
+ Examples
+ --------
+ Decode BER serialisation without ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> stream = io.BytesIO(
+ ... b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+ >>>
+ >>> for asn1Object in StreamingDecoder(stream):
+ ... print(asn1Object)
+ >>>
+ SequenceOf:
+ 1 2 3
+
+ Decode BER serialisation with ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> stream = io.BytesIO(
+ ... b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+ >>>
+ >>> schema = SequenceOf(componentType=Integer())
+ >>>
+ >>> decoder = StreamingDecoder(stream, asn1Spec=schema)
+ >>> for asn1Object in decoder:
+ ... print(asn1Object)
+ >>>
+ SequenceOf:
+ 1 2 3
+ """
+
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+ def __init__(self, substrate, asn1Spec=None, **options):
+ self._singleItemDecoder = self.SINGLE_ITEM_DECODER(**options)
+ self._substrate = asSeekableStream(substrate)
+ self._asn1Spec = asn1Spec
+ self._options = options
+
+ def __iter__(self):
+ while True:
+ for asn1Object in self._singleItemDecoder(
+ self._substrate, self._asn1Spec, **self._options):
+ yield asn1Object
+
+ for chunk in isEndOfStream(self._substrate):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield
+
+ break
+
+ if chunk:
+ break
+
+
+class Decoder(object):
+ """Create a BER decoder object.
+
+ Parse BER/CER/DER octet-stream into one, possibly nested, ASN.1 object.
+ """
+ STREAMING_DECODER = StreamingDecoder
+
+ @classmethod
+ def __call__(cls, substrate, asn1Spec=None, **options):
+ """Turns BER/CER/DER octet stream into an ASN.1 object.
+
+ Takes BER/CER/DER octet-stream in form of :py:class:`bytes` (Python 3)
+ or :py:class:`str` (Python 2) and decode it into an ASN.1 object
+ (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+ may be a scalar or an arbitrary nested structure.
+
+ Parameters
+ ----------
+ substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+ BER/CER/DER octet-stream to parse
+
+ Keyword Args
+ ------------
+ asn1Spec: :py:class:`~pyasn1.type.base.PyAsn1Item`
+ A pyasn1 type object (:py:class:`~pyasn1.type.base.PyAsn1Item`
+ derivative) to act as a template guiding the decoder.
+ Depending on the ASN.1 structure being decoded, `asn1Spec` may or
+ may not be required. Most common reason for it to require is that
+ ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+
+ Returns
+ -------
+ : :py:class:`tuple`
+ A tuple of :py:class:`~pyasn1.type.base.PyAsn1Item` object
+ recovered from BER/CER/DER substrate and the unprocessed trailing
+ portion of the `substrate` (may be empty)
+
+ Raises
+ ------
+ : :py:class:`~pyasn1.error.PyAsn1Error`
+ :py:class:`~pyasn1.error.SubstrateUnderrunError` on insufficient
+ input or :py:class:`~pyasn1.error.PyAsn1Error` on decoding error.
+
+ Examples
+ --------
+ Decode BER/CER/DER serialisation without ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> s, unprocessed = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+ >>> str(s)
+ SequenceOf:
+ 1 2 3
+
+ Decode BER/CER/DER serialisation with ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> seq = SequenceOf(componentType=Integer())
+ >>> s, unprocessed = decode(
+ b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+ >>> str(s)
+ SequenceOf:
+ 1 2 3
+
+ """
+ substrate = asSeekableStream(substrate)
+
+ streamingDecoder = cls.STREAMING_DECODER(
+ substrate, asn1Spec, **options)
+
+ for asn1Object in streamingDecoder:
+ if isinstance(asn1Object, SubstrateUnderrunError):
+ raise error.SubstrateUnderrunError('Short substrate on input')
+
+ try:
+ tail = next(readFromStream(substrate))
+
+ except error.EndOfStreamError:
+ tail = null
+
+ return asn1Object, tail
+
+
+#: Turns BER octet stream into an ASN.1 object.
+#:
+#: Takes BER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: BER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from BER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Notes
+#: -----
+#: This function is deprecated. Please use :py:class:`Decoder` or
+#: :py:class:`StreamingDecoder` class instance.
+#:
+#: Examples
+#: --------
+#: Decode BER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode BER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/ber/encoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/ber/encoder.py
new file mode 100644
index 0000000000..c59b43e455
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/ber/encoder.py
@@ -0,0 +1,917 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.compat import _MISSING
+from pyasn1.compat.integer import to_bytes
+from pyasn1.compat.octets import (int2oct, oct2int, ints2octs, null,
+ str2octs, isOctetsType)
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['Encoder', 'encode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER)
+
+
+class AbstractItemEncoder(object):
+ supportIndefLenMode = True
+
+ # An outcome of otherwise legit call `encodeFun(eoo.endOfOctets)`
+ eooIntegerSubstrate = (0, 0)
+ eooOctetsSubstrate = ints2octs(eooIntegerSubstrate)
+
+ # noinspection PyMethodMayBeStatic
+ def encodeTag(self, singleTag, isConstructed):
+ tagClass, tagFormat, tagId = singleTag
+ encodedTag = tagClass | tagFormat
+ if isConstructed:
+ encodedTag |= tag.tagFormatConstructed
+
+ if tagId < 31:
+ return encodedTag | tagId,
+
+ else:
+ substrate = tagId & 0x7f,
+
+ tagId >>= 7
+
+ while tagId:
+ substrate = (0x80 | (tagId & 0x7f),) + substrate
+ tagId >>= 7
+
+ return (encodedTag | 0x1F,) + substrate
+
+ def encodeLength(self, length, defMode):
+ if not defMode and self.supportIndefLenMode:
+ return (0x80,)
+
+ if length < 0x80:
+ return length,
+
+ else:
+ substrate = ()
+ while length:
+ substrate = (length & 0xff,) + substrate
+ length >>= 8
+
+ substrateLen = len(substrate)
+
+ if substrateLen > 126:
+ raise error.PyAsn1Error('Length octets overflow (%d)' % substrateLen)
+
+ return (0x80 | substrateLen,) + substrate
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ raise error.PyAsn1Error('Not implemented')
+
+ def encode(self, value, asn1Spec=None, encodeFun=None, **options):
+
+ if asn1Spec is None:
+ tagSet = value.tagSet
+ else:
+ tagSet = asn1Spec.tagSet
+
+ # untagged item?
+ if not tagSet:
+ substrate, isConstructed, isOctets = self.encodeValue(
+ value, asn1Spec, encodeFun, **options
+ )
+ return substrate
+
+ defMode = options.get('defMode', True)
+
+ substrate = null
+
+ for idx, singleTag in enumerate(tagSet.superTags):
+
+ defModeOverride = defMode
+
+ # base tag?
+ if not idx:
+ try:
+ substrate, isConstructed, isOctets = self.encodeValue(
+ value, asn1Spec, encodeFun, **options
+ )
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()
+ raise error.PyAsn1Error(
+ 'Error encoding %r: %s' % (value, exc[1]))
+
+ if LOG:
+ LOG('encoded %svalue %s into %s' % (
+ isConstructed and 'constructed ' or '', value, substrate
+ ))
+
+ if not substrate and isConstructed and options.get('ifNotEmpty', False):
+ return substrate
+
+ if not isConstructed:
+ defModeOverride = True
+
+ if LOG:
+ LOG('overridden encoding mode into definitive for primitive type')
+
+ header = self.encodeTag(singleTag, isConstructed)
+
+ if LOG:
+ LOG('encoded %stag %s into %s' % (
+ isConstructed and 'constructed ' or '',
+ singleTag, debug.hexdump(ints2octs(header))))
+
+ header += self.encodeLength(len(substrate), defModeOverride)
+
+ if LOG:
+ LOG('encoded %s octets (tag + payload) into %s' % (
+ len(substrate), debug.hexdump(ints2octs(header))))
+
+ if isOctets:
+ substrate = ints2octs(header) + substrate
+
+ if not defModeOverride:
+ substrate += self.eooOctetsSubstrate
+
+ else:
+ substrate = header + substrate
+
+ if not defModeOverride:
+ substrate += self.eooIntegerSubstrate
+
+ if not isOctets:
+ substrate = ints2octs(substrate)
+
+ return substrate
+
+
+class EndOfOctetsEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return null, False, True
+
+
+class BooleanEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return value and (1,) or (0,), False, False
+
+
+class IntegerEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+ supportCompactZero = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if value == 0:
+ if LOG:
+ LOG('encoding %spayload for zero INTEGER' % (
+ self.supportCompactZero and 'no ' or ''
+ ))
+
+ # de-facto way to encode zero
+ if self.supportCompactZero:
+ return (), False, False
+ else:
+ return (0,), False, False
+
+ return to_bytes(int(value), signed=True), False, True
+
+
+class BitStringEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ # TODO: try to avoid ASN.1 schema instantiation
+ value = asn1Spec.clone(value)
+
+ valueLength = len(value)
+ if valueLength % 8:
+ alignedValue = value << (8 - valueLength % 8)
+ else:
+ alignedValue = value
+
+ maxChunkSize = options.get('maxChunkSize', 0)
+ if not maxChunkSize or len(alignedValue) <= maxChunkSize * 8:
+ substrate = alignedValue.asOctets()
+ return int2oct(len(substrate) * 8 - valueLength) + substrate, False, True
+
+ if LOG:
+ LOG('encoding into up to %s-octet chunks' % maxChunkSize)
+
+ baseTag = value.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ alignedValue = alignedValue.clone(tagSet=tagSet)
+
+ stop = 0
+ substrate = null
+ while stop < valueLength:
+ start = stop
+ stop = min(start + maxChunkSize * 8, valueLength)
+ substrate += encodeFun(alignedValue[start:stop], asn1Spec, **options)
+
+ return substrate, True, True
+
+
+class OctetStringEncoder(AbstractItemEncoder):
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ if asn1Spec is None:
+ substrate = value.asOctets()
+
+ elif not isOctetsType(value):
+ substrate = asn1Spec.clone(value).asOctets()
+
+ else:
+ substrate = value
+
+ maxChunkSize = options.get('maxChunkSize', 0)
+
+ if not maxChunkSize or len(substrate) <= maxChunkSize:
+ return substrate, False, True
+
+ if LOG:
+ LOG('encoding into up to %s-octet chunks' % maxChunkSize)
+
+ # strip off explicit tags for inner chunks
+
+ if asn1Spec is None:
+ baseTag = value.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ asn1Spec = value.clone(tagSet=tagSet)
+
+ elif not isOctetsType(value):
+ baseTag = asn1Spec.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ asn1Spec = asn1Spec.clone(tagSet=tagSet)
+
+ pos = 0
+ substrate = null
+
+ while True:
+ chunk = value[pos:pos + maxChunkSize]
+ if not chunk:
+ break
+
+ substrate += encodeFun(chunk, asn1Spec, **options)
+ pos += maxChunkSize
+
+ return substrate, True, True
+
+
+class NullEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return null, False, True
+
+
+class ObjectIdentifierEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ oid = value.asTuple()
+
+ # Build the first pair
+ try:
+ first = oid[0]
+ second = oid[1]
+
+ except IndexError:
+ raise error.PyAsn1Error('Short OID %s' % (value,))
+
+ if 0 <= second <= 39:
+ if first == 1:
+ oid = (second + 40,) + oid[2:]
+ elif first == 0:
+ oid = (second,) + oid[2:]
+ elif first == 2:
+ oid = (second + 80,) + oid[2:]
+ else:
+ raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
+
+ elif first == 2:
+ oid = (second + 80,) + oid[2:]
+
+ else:
+ raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
+
+ octets = ()
+
+ # Cycle through subIds
+ for subOid in oid:
+ if 0 <= subOid <= 127:
+ # Optimize for the common case
+ octets += (subOid,)
+
+ elif subOid > 127:
+ # Pack large Sub-Object IDs
+ res = (subOid & 0x7f,)
+ subOid >>= 7
+
+ while subOid:
+ res = (0x80 | (subOid & 0x7f),) + res
+ subOid >>= 7
+
+ # Add packed Sub-Object ID to resulted Object ID
+ octets += res
+
+ else:
+ raise error.PyAsn1Error('Negative OID arc %s at %s' % (subOid, value))
+
+ return octets, False, False
+
+
+class RealEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+ binEncBase = 2 # set to None to choose encoding base automatically
+
+ @staticmethod
+ def _dropFloatingPoint(m, encbase, e):
+ ms, es = 1, 1
+ if m < 0:
+ ms = -1 # mantissa sign
+
+ if e < 0:
+ es = -1 # exponent sign
+
+ m *= ms
+
+ if encbase == 8:
+ m *= 2 ** (abs(e) % 3 * es)
+ e = abs(e) // 3 * es
+
+ elif encbase == 16:
+ m *= 2 ** (abs(e) % 4 * es)
+ e = abs(e) // 4 * es
+
+ while True:
+ if int(m) != m:
+ m *= encbase
+ e -= 1
+ continue
+ break
+
+ return ms, int(m), encbase, e
+
+ def _chooseEncBase(self, value):
+ m, b, e = value
+ encBase = [2, 8, 16]
+ if value.binEncBase in encBase:
+ return self._dropFloatingPoint(m, value.binEncBase, e)
+
+ elif self.binEncBase in encBase:
+ return self._dropFloatingPoint(m, self.binEncBase, e)
+
+ # auto choosing base 2/8/16
+ mantissa = [m, m, m]
+ exponent = [e, e, e]
+ sign = 1
+ encbase = 2
+ e = float('inf')
+
+ for i in range(3):
+ (sign,
+ mantissa[i],
+ encBase[i],
+ exponent[i]) = self._dropFloatingPoint(mantissa[i], encBase[i], exponent[i])
+
+ if abs(exponent[i]) < abs(e) or (abs(exponent[i]) == abs(e) and mantissa[i] < m):
+ e = exponent[i]
+ m = int(mantissa[i])
+ encbase = encBase[i]
+
+ if LOG:
+ LOG('automatically chosen REAL encoding base %s, sign %s, mantissa %s, '
+ 'exponent %s' % (encbase, sign, m, e))
+
+ return sign, m, encbase, e
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ if value.isPlusInf:
+ return (0x40,), False, False
+
+ if value.isMinusInf:
+ return (0x41,), False, False
+
+ m, b, e = value
+
+ if not m:
+ return null, False, True
+
+ if b == 10:
+ if LOG:
+ LOG('encoding REAL into character form')
+
+ return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), False, True
+
+ elif b == 2:
+ fo = 0x80 # binary encoding
+ ms, m, encbase, e = self._chooseEncBase(value)
+
+ if ms < 0: # mantissa sign
+ fo |= 0x40 # sign bit
+
+ # exponent & mantissa normalization
+ if encbase == 2:
+ while m & 0x1 == 0:
+ m >>= 1
+ e += 1
+
+ elif encbase == 8:
+ while m & 0x7 == 0:
+ m >>= 3
+ e += 1
+ fo |= 0x10
+
+ else: # encbase = 16
+ while m & 0xf == 0:
+ m >>= 4
+ e += 1
+ fo |= 0x20
+
+ sf = 0 # scale factor
+
+ while m & 0x1 == 0:
+ m >>= 1
+ sf += 1
+
+ if sf > 3:
+ raise error.PyAsn1Error('Scale factor overflow') # bug if raised
+
+ fo |= sf << 2
+ eo = null
+ if e == 0 or e == -1:
+ eo = int2oct(e & 0xff)
+
+ else:
+ while e not in (0, -1):
+ eo = int2oct(e & 0xff) + eo
+ e >>= 8
+
+ if e == 0 and eo and oct2int(eo[0]) & 0x80:
+ eo = int2oct(0) + eo
+
+ if e == -1 and eo and not (oct2int(eo[0]) & 0x80):
+ eo = int2oct(0xff) + eo
+
+ n = len(eo)
+ if n > 0xff:
+ raise error.PyAsn1Error('Real exponent overflow')
+
+ if n == 1:
+ pass
+
+ elif n == 2:
+ fo |= 1
+
+ elif n == 3:
+ fo |= 2
+
+ else:
+ fo |= 3
+ eo = int2oct(n & 0xff) + eo
+
+ po = null
+
+ while m:
+ po = int2oct(m & 0xff) + po
+ m >>= 8
+
+ substrate = int2oct(fo) + eo + po
+
+ return substrate, False, True
+
+ else:
+ raise error.PyAsn1Error('Prohibited Real base %s' % b)
+
+
+class SequenceEncoder(AbstractItemEncoder):
+ omitEmptyOptionals = False
+
+ # TODO: handling three flavors of input is too much -- split over codecs
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ substrate = null
+
+ omitEmptyOptionals = options.get(
+ 'omitEmptyOptionals', self.omitEmptyOptionals)
+
+ if LOG:
+ LOG('%sencoding empty OPTIONAL components' % (
+ omitEmptyOptionals and 'not ' or ''))
+
+ if asn1Spec is None:
+ # instance of ASN.1 schema
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+
+ for idx, component in enumerate(value.values()):
+ if namedTypes:
+ namedType = namedTypes[idx]
+
+ if namedType.isOptional and not component.isValue:
+ if LOG:
+ LOG('not encoding OPTIONAL component %r' % (namedType,))
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ if LOG:
+ LOG('not encoding DEFAULT component %r' % (namedType,))
+ continue
+
+ if omitEmptyOptionals:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ # wrap open type blob if needed
+ if namedTypes and namedType.openType:
+
+ wrapType = namedType.asn1Object
+
+ if wrapType.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ substrate += encodeFun(
+ component, asn1Spec,
+ **dict(options, wrapType=wrapType.componentType))
+
+ else:
+ chunk = encodeFun(component, asn1Spec, **options)
+
+ if wrapType.isSameTypeWith(component):
+ substrate += chunk
+
+ else:
+ substrate += encodeFun(chunk, wrapType, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (wrapType,))
+
+ else:
+ substrate += encodeFun(component, asn1Spec, **options)
+
+ else:
+ # bare Python value + ASN.1 schema
+ for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
+
+ try:
+ component = value[namedType.name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Component name "%s" not found in %r' % (
+ namedType.name, value))
+
+ if namedType.isOptional and namedType.name not in value:
+ if LOG:
+ LOG('not encoding OPTIONAL component %r' % (namedType,))
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ if LOG:
+ LOG('not encoding DEFAULT component %r' % (namedType,))
+ continue
+
+ if omitEmptyOptionals:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ componentSpec = namedType.asn1Object
+
+ # wrap open type blob if needed
+ if namedType.openType:
+
+ if componentSpec.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ substrate += encodeFun(
+ component, componentSpec,
+ **dict(options, wrapType=componentSpec.componentType))
+
+ else:
+ chunk = encodeFun(component, componentSpec, **options)
+
+ if componentSpec.isSameTypeWith(component):
+ substrate += chunk
+
+ else:
+ substrate += encodeFun(chunk, componentSpec, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (componentSpec,))
+
+ else:
+ substrate += encodeFun(component, componentSpec, **options)
+
+ return substrate, True, True
+
+
+class SequenceOfEncoder(AbstractItemEncoder):
+ def _encodeComponents(self, value, asn1Spec, encodeFun, **options):
+
+ if asn1Spec is None:
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ asn1Spec = asn1Spec.componentType
+
+ chunks = []
+
+ wrapType = options.pop('wrapType', None)
+
+ for idx, component in enumerate(value):
+ chunk = encodeFun(component, asn1Spec, **options)
+
+ if (wrapType is not None and
+ not wrapType.isSameTypeWith(component)):
+ # wrap encoded value with wrapper container (e.g. ANY)
+ chunk = encodeFun(chunk, wrapType, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (wrapType,))
+
+ chunks.append(chunk)
+
+ return chunks
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ return null.join(chunks), True, True
+
+
+class ChoiceEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is None:
+ component = value.getComponent()
+ else:
+ names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
+ if namedType.name in value]
+ if len(names) != 1:
+ raise error.PyAsn1Error('%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', value))
+
+ name = names[0]
+
+ component = value[name]
+ asn1Spec = asn1Spec[name]
+
+ return encodeFun(component, asn1Spec, **options), True, True
+
+
+class AnyEncoder(OctetStringEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is None:
+ value = value.asOctets()
+ elif not isOctetsType(value):
+ value = asn1Spec.clone(value).asOctets()
+
+ return value, not options.get('defMode', True), True
+
+
+TAG_MAP = {
+ eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Integer.tagSet: IntegerEncoder(),
+ univ.BitString.tagSet: BitStringEncoder(),
+ univ.OctetString.tagSet: OctetStringEncoder(),
+ univ.Null.tagSet: NullEncoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
+ univ.Enumerated.tagSet: IntegerEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SequenceOf.tagSet: SequenceOfEncoder(),
+ univ.SetOf.tagSet: SequenceOfEncoder(),
+ univ.Choice.tagSet: ChoiceEncoder(),
+ # character string types
+ char.UTF8String.tagSet: OctetStringEncoder(),
+ char.NumericString.tagSet: OctetStringEncoder(),
+ char.PrintableString.tagSet: OctetStringEncoder(),
+ char.TeletexString.tagSet: OctetStringEncoder(),
+ char.VideotexString.tagSet: OctetStringEncoder(),
+ char.IA5String.tagSet: OctetStringEncoder(),
+ char.GraphicString.tagSet: OctetStringEncoder(),
+ char.VisibleString.tagSet: OctetStringEncoder(),
+ char.GeneralString.tagSet: OctetStringEncoder(),
+ char.UniversalString.tagSet: OctetStringEncoder(),
+ char.BMPString.tagSet: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
+ useful.GeneralizedTime.tagSet: OctetStringEncoder(),
+ useful.UTCTime.tagSet: OctetStringEncoder()
+}
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+TYPE_MAP = {
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Integer.typeId: IntegerEncoder(),
+ univ.BitString.typeId: BitStringEncoder(),
+ univ.OctetString.typeId: OctetStringEncoder(),
+ univ.Null.typeId: NullEncoder(),
+ univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
+ univ.Enumerated.typeId: IntegerEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SequenceEncoder(),
+ univ.SetOf.typeId: SequenceOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder(),
+ univ.Choice.typeId: ChoiceEncoder(),
+ univ.Any.typeId: AnyEncoder(),
+ # character string types
+ char.UTF8String.typeId: OctetStringEncoder(),
+ char.NumericString.typeId: OctetStringEncoder(),
+ char.PrintableString.typeId: OctetStringEncoder(),
+ char.TeletexString.typeId: OctetStringEncoder(),
+ char.VideotexString.typeId: OctetStringEncoder(),
+ char.IA5String.typeId: OctetStringEncoder(),
+ char.GraphicString.typeId: OctetStringEncoder(),
+ char.VisibleString.typeId: OctetStringEncoder(),
+ char.GeneralString.typeId: OctetStringEncoder(),
+ char.UniversalString.typeId: OctetStringEncoder(),
+ char.BMPString.typeId: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: OctetStringEncoder(),
+ useful.GeneralizedTime.typeId: OctetStringEncoder(),
+ useful.UTCTime.typeId: OctetStringEncoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(object):
+ fixedDefLengthMode = None
+ fixedChunkSize = None
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ def __call__(self, value, asn1Spec=None, **options):
+ try:
+ if asn1Spec is None:
+ typeId = value.typeId
+ else:
+ typeId = asn1Spec.typeId
+
+ except AttributeError:
+ raise error.PyAsn1Error('Value %r is not ASN.1 type instance '
+ 'and "asn1Spec" not given' % (value,))
+
+ if LOG:
+ LOG('encoder called in %sdef mode, chunk size %s for type %s, '
+ 'value:\n%s' % (not options.get('defMode', True) and 'in' or '',
+ options.get('maxChunkSize', 0),
+ asn1Spec is None and value.prettyPrintType() or
+ asn1Spec.prettyPrintType(), value))
+
+ if self.fixedDefLengthMode is not None:
+ options.update(defMode=self.fixedDefLengthMode)
+
+ if self.fixedChunkSize is not None:
+ options.update(maxChunkSize=self.fixedChunkSize)
+
+ try:
+ concreteEncoder = self._typeMap[typeId]
+
+ if LOG:
+ LOG('using value codec %s chosen by type ID '
+ '%s' % (concreteEncoder.__class__.__name__, typeId))
+
+ except KeyError:
+ if asn1Spec is None:
+ tagSet = value.tagSet
+ else:
+ tagSet = asn1Spec.tagSet
+
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(tagSet.baseTag, tagSet.baseTag)
+
+ try:
+ concreteEncoder = self._tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('No encoder for %r (%s)' % (value, tagSet))
+
+ if LOG:
+ LOG('using value codec %s chosen by tagSet '
+ '%s' % (concreteEncoder.__class__.__name__, tagSet))
+
+ substrate = concreteEncoder.encode(value, asn1Spec, self, **options)
+
+ if LOG:
+ LOG('codec %s built %s octets of substrate: %s\nencoder '
+ 'completed' % (concreteEncoder, len(substrate),
+ debug.hexdump(substrate)))
+
+ return substrate
+
+
+class Encoder(object):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **options):
+ self._singleItemEncoder = self.SINGLE_ITEM_ENCODER(
+ tagMap=tagMap, typeMap=typeMap, **options
+ )
+
+ def __call__(self, pyObject, asn1Spec=None, **options):
+ return self._singleItemEncoder(
+ pyObject, asn1Spec=asn1Spec, **options)
+
+
+#: Turns ASN.1 object into BER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a BER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: defMode: :py:class:`bool`
+#: If :obj:`False`, produces indefinite length encoding
+#:
+#: maxChunkSize: :py:class:`int`
+#: Maximum chunk size in chunked encoding mode (0 denotes unlimited chunk size)
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octetstream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into BER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+#: Encode ASN.1 value object into BER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+encode = Encoder()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/ber/eoo.py b/contrib/python/pyasn1/py2/pyasn1/codec/ber/eoo.py
new file mode 100644
index 0000000000..8c91a3d285
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/ber/eoo.py
@@ -0,0 +1,28 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1.type import base
+from pyasn1.type import tag
+
+__all__ = ['endOfOctets']
+
+
+class EndOfOctets(base.SimpleAsn1Type):
+ defaultValue = 0
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x00)
+ )
+
+ _instance = None
+
+ def __new__(cls, *args, **kwargs):
+ if cls._instance is None:
+ cls._instance = object.__new__(cls, *args, **kwargs)
+
+ return cls._instance
+
+
+endOfOctets = EndOfOctets()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/cer/__init__.py b/contrib/python/pyasn1/py2/pyasn1/codec/cer/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/cer/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/cer/decoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/cer/decoder.py
new file mode 100644
index 0000000000..ed6391ff35
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/cer/decoder.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.streaming import readFromStream
+from pyasn1.codec.ber import decoder
+from pyasn1.compat.octets import oct2int
+from pyasn1.type import univ
+
+__all__ = ['decode', 'StreamingDecoder']
+
+SubstrateUnderrunError = error.SubstrateUnderrunError
+
+
+class BooleanPayloadDecoder(decoder.AbstractSimplePayloadDecoder):
+ protoComponent = univ.Boolean(0)
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if length != 1:
+ raise error.PyAsn1Error('Not single-octet Boolean payload')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ byte = oct2int(chunk[0])
+
+ # CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
+ # BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
+ # in https://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
+ if byte == 0xff:
+ value = 1
+
+ elif byte == 0x00:
+ value = 0
+
+ else:
+ raise error.PyAsn1Error('Unexpected Boolean payload: %s' % byte)
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+
+# TODO: prohibit non-canonical encoding
+BitStringPayloadDecoder = decoder.BitStringPayloadDecoder
+OctetStringPayloadDecoder = decoder.OctetStringPayloadDecoder
+RealPayloadDecoder = decoder.RealPayloadDecoder
+
+TAG_MAP = decoder.TAG_MAP.copy()
+TAG_MAP.update(
+ {univ.Boolean.tagSet: BooleanPayloadDecoder(),
+ univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: OctetStringPayloadDecoder(),
+ univ.Real.tagSet: RealPayloadDecoder()}
+)
+
+TYPE_MAP = decoder.TYPE_MAP.copy()
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in TAG_MAP.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in TYPE_MAP:
+ TYPE_MAP[typeId] = typeDecoder
+
+
+class SingleItemDecoder(decoder.SingleItemDecoder):
+ __doc__ = decoder.SingleItemDecoder.__doc__
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+
+class StreamingDecoder(decoder.StreamingDecoder):
+ __doc__ = decoder.StreamingDecoder.__doc__
+
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+
+class Decoder(decoder.Decoder):
+ __doc__ = decoder.Decoder.__doc__
+
+ STREAMING_DECODER = StreamingDecoder
+
+
+#: Turns CER octet stream into an ASN.1 object.
+#:
+#: Takes CER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: CER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from CER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode CER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode CER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/cer/encoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/cer/encoder.py
new file mode 100644
index 0000000000..0a198e3fdf
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/cer/encoder.py
@@ -0,0 +1,327 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.ber import encoder
+from pyasn1.compat.octets import str2octs, null
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['Encoder', 'encode']
+
+
+class BooleanEncoder(encoder.IntegerEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if value == 0:
+ substrate = (0,)
+ else:
+ substrate = (255,)
+ return substrate, False, False
+
+
+class RealEncoder(encoder.RealEncoder):
+ def _chooseEncBase(self, value):
+ m, b, e = value
+ return self._dropFloatingPoint(m, b, e)
+
+
+# specialized GeneralStringEncoder here
+
+class TimeEncoderMixIn(object):
+ Z_CHAR = ord('Z')
+ PLUS_CHAR = ord('+')
+ MINUS_CHAR = ord('-')
+ COMMA_CHAR = ord(',')
+ DOT_CHAR = ord('.')
+ ZERO_CHAR = ord('0')
+
+ MIN_LENGTH = 12
+ MAX_LENGTH = 19
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ # CER encoding constraints:
+ # - minutes are mandatory, seconds are optional
+ # - sub-seconds must NOT be zero / no meaningless zeros
+ # - no hanging fraction dot
+ # - time in UTC (Z)
+ # - only dot is allowed for fractions
+
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ numbers = value.asNumbers()
+
+ if self.PLUS_CHAR in numbers or self.MINUS_CHAR in numbers:
+ raise error.PyAsn1Error('Must be UTC time: %r' % value)
+
+ if numbers[-1] != self.Z_CHAR:
+ raise error.PyAsn1Error('Missing "Z" time zone specifier: %r' % value)
+
+ if self.COMMA_CHAR in numbers:
+ raise error.PyAsn1Error('Comma in fractions disallowed: %r' % value)
+
+ if self.DOT_CHAR in numbers:
+
+ isModified = False
+
+ numbers = list(numbers)
+
+ searchIndex = min(numbers.index(self.DOT_CHAR) + 4, len(numbers) - 1)
+
+ while numbers[searchIndex] != self.DOT_CHAR:
+ if numbers[searchIndex] == self.ZERO_CHAR:
+ del numbers[searchIndex]
+ isModified = True
+
+ searchIndex -= 1
+
+ searchIndex += 1
+
+ if searchIndex < len(numbers):
+ if numbers[searchIndex] == self.Z_CHAR:
+ # drop hanging comma
+ del numbers[searchIndex - 1]
+ isModified = True
+
+ if isModified:
+ value = value.clone(numbers)
+
+ if not self.MIN_LENGTH < len(numbers) < self.MAX_LENGTH:
+ raise error.PyAsn1Error('Length constraint violated: %r' % value)
+
+ options.update(maxChunkSize=1000)
+
+ return encoder.OctetStringEncoder.encodeValue(
+ self, value, asn1Spec, encodeFun, **options
+ )
+
+
+class GeneralizedTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
+ MIN_LENGTH = 12
+ MAX_LENGTH = 20
+
+
+class UTCTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
+ MIN_LENGTH = 10
+ MAX_LENGTH = 14
+
+
+class SetOfEncoder(encoder.SequenceOfEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ # sort by serialised and padded components
+ if len(chunks) > 1:
+ zero = str2octs('\x00')
+ maxLen = max(map(len, chunks))
+ paddedChunks = [
+ (x.ljust(maxLen, zero), x) for x in chunks
+ ]
+ paddedChunks.sort(key=lambda x: x[0])
+
+ chunks = [x[1] for x in paddedChunks]
+
+ return null.join(chunks), True, True
+
+
+class SequenceOfEncoder(encoder.SequenceOfEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ if options.get('ifNotEmpty', False) and not len(value):
+ return null, True, True
+
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ return null.join(chunks), True, True
+
+
+class SetEncoder(encoder.SequenceEncoder):
+ @staticmethod
+ def _componentSortKey(componentAndType):
+ """Sort SET components by tag
+
+ Sort regardless of the Choice value (static sort)
+ """
+ component, asn1Spec = componentAndType
+
+ if asn1Spec is None:
+ asn1Spec = component
+
+ if asn1Spec.typeId == univ.Choice.typeId and not asn1Spec.tagSet:
+ if asn1Spec.tagSet:
+ return asn1Spec.tagSet
+ else:
+ return asn1Spec.componentType.minTagSet
+ else:
+ return asn1Spec.tagSet
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ substrate = null
+
+ comps = []
+ compsMap = {}
+
+ if asn1Spec is None:
+ # instance of ASN.1 schema
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+
+ for idx, component in enumerate(value.values()):
+ if namedTypes:
+ namedType = namedTypes[idx]
+
+ if namedType.isOptional and not component.isValue:
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ continue
+
+ compsMap[id(component)] = namedType
+
+ else:
+ compsMap[id(component)] = None
+
+ comps.append((component, asn1Spec))
+
+ else:
+ # bare Python value + ASN.1 schema
+ for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
+
+ try:
+ component = value[namedType.name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Component name "%s" not found in %r' % (namedType.name, value))
+
+ if namedType.isOptional and namedType.name not in value:
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ continue
+
+ compsMap[id(component)] = namedType
+ comps.append((component, asn1Spec[idx]))
+
+ for comp, compType in sorted(comps, key=self._componentSortKey):
+ namedType = compsMap[id(comp)]
+
+ if namedType:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ chunk = encodeFun(comp, compType, **options)
+
+ # wrap open type blob if needed
+ if namedType and namedType.openType:
+ wrapType = namedType.asn1Object
+ if wrapType.tagSet and not wrapType.isSameTypeWith(comp):
+ chunk = encodeFun(chunk, wrapType, **options)
+
+ substrate += chunk
+
+ return substrate, True, True
+
+
+class SequenceEncoder(encoder.SequenceEncoder):
+ omitEmptyOptionals = True
+
+
+TAG_MAP = encoder.TAG_MAP.copy()
+
+TAG_MAP.update({
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ useful.GeneralizedTime.tagSet: GeneralizedTimeEncoder(),
+ useful.UTCTime.tagSet: UTCTimeEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SetOf.tagSet: SetOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder()
+})
+
+TYPE_MAP = encoder.TYPE_MAP.copy()
+
+TYPE_MAP.update({
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ useful.GeneralizedTime.typeId: GeneralizedTimeEncoder(),
+ useful.UTCTime.typeId: UTCTimeEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SetEncoder(),
+ univ.SetOf.typeId: SetOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder()
+})
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(encoder.SingleItemEncoder):
+ fixedDefLengthMode = False
+ fixedChunkSize = 1000
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+
+class Encoder(encoder.Encoder):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+
+#: Turns ASN.1 object into CER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a CER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octet-stream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into CER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
+#:
+#: Encode ASN.1 value object into CER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
+#:
+encode = Encoder()
+
+# EncoderFactory queries class instance and builds a map of tags -> encoders
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/der/__init__.py b/contrib/python/pyasn1/py2/pyasn1/codec/der/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/der/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/der/decoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/der/decoder.py
new file mode 100644
index 0000000000..215b72d9fd
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/der/decoder.py
@@ -0,0 +1,116 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1.codec.cer import decoder
+from pyasn1.type import univ
+
+__all__ = ['decode', 'StreamingDecoder']
+
+
+class BitStringPayloadDecoder(decoder.BitStringPayloadDecoder):
+ supportConstructedForm = False
+
+
+class OctetStringPayloadDecoder(decoder.OctetStringPayloadDecoder):
+ supportConstructedForm = False
+
+
+# TODO: prohibit non-canonical encoding
+RealPayloadDecoder = decoder.RealPayloadDecoder
+
+TAG_MAP = decoder.TAG_MAP.copy()
+TAG_MAP.update(
+ {univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: OctetStringPayloadDecoder(),
+ univ.Real.tagSet: RealPayloadDecoder()}
+)
+
+TYPE_MAP = decoder.TYPE_MAP.copy()
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in TAG_MAP.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in TYPE_MAP:
+ TYPE_MAP[typeId] = typeDecoder
+
+
+class SingleItemDecoder(decoder.SingleItemDecoder):
+ __doc__ = decoder.SingleItemDecoder.__doc__
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ supportIndefLength = False
+
+
+class StreamingDecoder(decoder.StreamingDecoder):
+ __doc__ = decoder.StreamingDecoder.__doc__
+
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+
+class Decoder(decoder.Decoder):
+ __doc__ = decoder.Decoder.__doc__
+
+ STREAMING_DECODER = StreamingDecoder
+
+
+#: Turns DER octet stream into an ASN.1 object.
+#:
+#: Takes DER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: DER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from DER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode DER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode DER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/der/encoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/der/encoder.py
new file mode 100644
index 0000000000..c231edc164
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/der/encoder.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.cer import encoder
+from pyasn1.type import univ
+
+__all__ = ['Encoder', 'encode']
+
+
+class SetEncoder(encoder.SetEncoder):
+ @staticmethod
+ def _componentSortKey(componentAndType):
+ """Sort SET components by tag
+
+ Sort depending on the actual Choice value (dynamic sort)
+ """
+ component, asn1Spec = componentAndType
+
+ if asn1Spec is None:
+ compType = component
+ else:
+ compType = asn1Spec
+
+ if compType.typeId == univ.Choice.typeId and not compType.tagSet:
+ if asn1Spec is None:
+ return component.getComponent().tagSet
+ else:
+ # TODO: move out of sorting key function
+ names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
+ if namedType.name in component]
+ if len(names) != 1:
+ raise error.PyAsn1Error(
+ '%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', component))
+
+ # TODO: support nested CHOICE ordering
+ return asn1Spec[names[0]].tagSet
+
+ else:
+ return compType.tagSet
+
+
+TAG_MAP = encoder.TAG_MAP.copy()
+
+TAG_MAP.update({
+ # Set & SetOf have same tags
+ univ.Set.tagSet: SetEncoder()
+})
+
+TYPE_MAP = encoder.TYPE_MAP.copy()
+
+TYPE_MAP.update({
+ # Set & SetOf have same tags
+ univ.Set.typeId: SetEncoder()
+})
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(encoder.SingleItemEncoder):
+ fixedDefLengthMode = True
+ fixedChunkSize = 0
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+
+class Encoder(encoder.Encoder):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+
+#: Turns ASN.1 object into DER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a DER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octet-stream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into DER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+#: Encode ASN.1 value object into DER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+encode = Encoder()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/native/__init__.py b/contrib/python/pyasn1/py2/pyasn1/codec/native/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/native/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/native/decoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/native/decoder.py
new file mode 100644
index 0000000000..e23f40ca4b
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/native/decoder.py
@@ -0,0 +1,238 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.compat import _MISSING
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['decode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
+
+
+class AbstractScalarPayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ return asn1Spec.clone(pyObject)
+
+
+class BitStringPayloadDecoder(AbstractScalarPayloadDecoder):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ return asn1Spec.clone(univ.BitString.fromBinaryString(pyObject))
+
+
+class SequenceOrSetPayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ componentsTypes = asn1Spec.componentType
+
+ for field in asn1Value:
+ if field in pyObject:
+ asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
+
+ return asn1Value
+
+
+class SequenceOfOrSetOfPayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ for pyValue in pyObject:
+ asn1Value.append(decodeFun(pyValue, asn1Spec.componentType), **options)
+
+ return asn1Value
+
+
+class ChoicePayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ componentsTypes = asn1Spec.componentType
+
+ for field in pyObject:
+ if field in componentsTypes:
+ asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
+ break
+
+ return asn1Value
+
+
+TAG_MAP = {
+ univ.Integer.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Boolean.tagSet: AbstractScalarPayloadDecoder(),
+ univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Null.tagSet: AbstractScalarPayloadDecoder(),
+ univ.ObjectIdentifier.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Enumerated.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Real.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Sequence.tagSet: SequenceOrSetPayloadDecoder(), # conflicts with SequenceOf
+ univ.Set.tagSet: SequenceOrSetPayloadDecoder(), # conflicts with SetOf
+ univ.Choice.tagSet: ChoicePayloadDecoder(), # conflicts with Any
+ # character string types
+ char.UTF8String.tagSet: AbstractScalarPayloadDecoder(),
+ char.NumericString.tagSet: AbstractScalarPayloadDecoder(),
+ char.PrintableString.tagSet: AbstractScalarPayloadDecoder(),
+ char.TeletexString.tagSet: AbstractScalarPayloadDecoder(),
+ char.VideotexString.tagSet: AbstractScalarPayloadDecoder(),
+ char.IA5String.tagSet: AbstractScalarPayloadDecoder(),
+ char.GraphicString.tagSet: AbstractScalarPayloadDecoder(),
+ char.VisibleString.tagSet: AbstractScalarPayloadDecoder(),
+ char.GeneralString.tagSet: AbstractScalarPayloadDecoder(),
+ char.UniversalString.tagSet: AbstractScalarPayloadDecoder(),
+ char.BMPString.tagSet: AbstractScalarPayloadDecoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: AbstractScalarPayloadDecoder(),
+ useful.GeneralizedTime.tagSet: AbstractScalarPayloadDecoder(),
+ useful.UTCTime.tagSet: AbstractScalarPayloadDecoder()
+}
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+TYPE_MAP = {
+ univ.Integer.typeId: AbstractScalarPayloadDecoder(),
+ univ.Boolean.typeId: AbstractScalarPayloadDecoder(),
+ univ.BitString.typeId: BitStringPayloadDecoder(),
+ univ.OctetString.typeId: AbstractScalarPayloadDecoder(),
+ univ.Null.typeId: AbstractScalarPayloadDecoder(),
+ univ.ObjectIdentifier.typeId: AbstractScalarPayloadDecoder(),
+ univ.Enumerated.typeId: AbstractScalarPayloadDecoder(),
+ univ.Real.typeId: AbstractScalarPayloadDecoder(),
+ # ambiguous base types
+ univ.Set.typeId: SequenceOrSetPayloadDecoder(),
+ univ.SetOf.typeId: SequenceOfOrSetOfPayloadDecoder(),
+ univ.Sequence.typeId: SequenceOrSetPayloadDecoder(),
+ univ.SequenceOf.typeId: SequenceOfOrSetOfPayloadDecoder(),
+ univ.Choice.typeId: ChoicePayloadDecoder(),
+ univ.Any.typeId: AbstractScalarPayloadDecoder(),
+ # character string types
+ char.UTF8String.typeId: AbstractScalarPayloadDecoder(),
+ char.NumericString.typeId: AbstractScalarPayloadDecoder(),
+ char.PrintableString.typeId: AbstractScalarPayloadDecoder(),
+ char.TeletexString.typeId: AbstractScalarPayloadDecoder(),
+ char.VideotexString.typeId: AbstractScalarPayloadDecoder(),
+ char.IA5String.typeId: AbstractScalarPayloadDecoder(),
+ char.GraphicString.typeId: AbstractScalarPayloadDecoder(),
+ char.VisibleString.typeId: AbstractScalarPayloadDecoder(),
+ char.GeneralString.typeId: AbstractScalarPayloadDecoder(),
+ char.UniversalString.typeId: AbstractScalarPayloadDecoder(),
+ char.BMPString.typeId: AbstractScalarPayloadDecoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: AbstractScalarPayloadDecoder(),
+ useful.GeneralizedTime.typeId: AbstractScalarPayloadDecoder(),
+ useful.UTCTime.typeId: AbstractScalarPayloadDecoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemDecoder(object):
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ def __call__(self, pyObject, asn1Spec, **options):
+
+ if LOG:
+ debug.scope.push(type(pyObject).__name__)
+ LOG('decoder called at scope %s, working with '
+ 'type %s' % (debug.scope, type(pyObject).__name__))
+
+ if asn1Spec is None or not isinstance(asn1Spec, base.Asn1Item):
+ raise error.PyAsn1Error(
+ 'asn1Spec is not valid (should be an instance of an ASN.1 '
+ 'Item, not %s)' % asn1Spec.__class__.__name__)
+
+ try:
+ valueDecoder = self._typeMap[asn1Spec.typeId]
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(asn1Spec.tagSet.baseTag, asn1Spec.tagSet.baseTag)
+
+ try:
+ valueDecoder = self._tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('Unknown ASN.1 tag %s' % asn1Spec.tagSet)
+
+ if LOG:
+ LOG('calling decoder %s on Python type %s '
+ '<%s>' % (type(valueDecoder).__name__,
+ type(pyObject).__name__, repr(pyObject)))
+
+ value = valueDecoder(pyObject, asn1Spec, self, **options)
+
+ if LOG:
+ LOG('decoder %s produced ASN.1 type %s '
+ '<%s>' % (type(valueDecoder).__name__,
+ type(value).__name__, repr(value)))
+ debug.scope.pop()
+
+ return value
+
+
+class Decoder(object):
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+ def __init__(self, **options):
+ self._singleItemDecoder = self.SINGLE_ITEM_DECODER(**options)
+
+ def __call__(self, pyObject, asn1Spec=None, **kwargs):
+ return self._singleItemDecoder(pyObject, asn1Spec=asn1Spec, **kwargs)
+
+
+#: Turns Python objects of built-in types into ASN.1 objects.
+#:
+#: Takes Python objects of built-in types and turns them into a tree of
+#: ASN.1 objects (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: pyObject: :py:class:`object`
+#: A scalar or nested Python objects
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. It is required
+#: for successful interpretation of Python objects mapping into their ASN.1
+#: representations.
+#:
+#: Returns
+#: -------
+#: : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A scalar or constructed pyasn1 object
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode native Python object into ASN.1 objects with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode([1, 2, 3], asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/native/encoder.py b/contrib/python/pyasn1/py2/pyasn1/codec/native/encoder.py
new file mode 100644
index 0000000000..a0d9f1c444
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/native/encoder.py
@@ -0,0 +1,274 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from collections import OrderedDict
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.compat import _MISSING
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['encode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER)
+
+
+class AbstractItemEncoder(object):
+ def encode(self, value, encodeFun, **options):
+ raise error.PyAsn1Error('Not implemented')
+
+
+class BooleanEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return bool(value)
+
+
+class IntegerEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return int(value)
+
+
+class BitStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class OctetStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return value.asOctets()
+
+
+class TextStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class NullEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return None
+
+
+class ObjectIdentifierEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class RealEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return float(value)
+
+
+class SetEncoder(AbstractItemEncoder):
+ protoDict = dict
+
+ def encode(self, value, encodeFun, **options):
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+ substrate = self.protoDict()
+
+ for idx, (key, subValue) in enumerate(value.items()):
+ if namedTypes and namedTypes[idx].isOptional and not value[idx].isValue:
+ continue
+ substrate[key] = encodeFun(subValue, **options)
+ return substrate
+
+
+class SequenceEncoder(SetEncoder):
+ protoDict = OrderedDict
+
+
+class SequenceOfEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+ return [encodeFun(x, **options) for x in value]
+
+
+class ChoiceEncoder(SequenceEncoder):
+ pass
+
+
+class AnyEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return value.asOctets()
+
+
+TAG_MAP = {
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Integer.tagSet: IntegerEncoder(),
+ univ.BitString.tagSet: BitStringEncoder(),
+ univ.OctetString.tagSet: OctetStringEncoder(),
+ univ.Null.tagSet: NullEncoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
+ univ.Enumerated.tagSet: IntegerEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SequenceOf.tagSet: SequenceOfEncoder(),
+ univ.SetOf.tagSet: SequenceOfEncoder(),
+ univ.Choice.tagSet: ChoiceEncoder(),
+ # character string types
+ char.UTF8String.tagSet: TextStringEncoder(),
+ char.NumericString.tagSet: TextStringEncoder(),
+ char.PrintableString.tagSet: TextStringEncoder(),
+ char.TeletexString.tagSet: TextStringEncoder(),
+ char.VideotexString.tagSet: TextStringEncoder(),
+ char.IA5String.tagSet: TextStringEncoder(),
+ char.GraphicString.tagSet: TextStringEncoder(),
+ char.VisibleString.tagSet: TextStringEncoder(),
+ char.GeneralString.tagSet: TextStringEncoder(),
+ char.UniversalString.tagSet: TextStringEncoder(),
+ char.BMPString.tagSet: TextStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
+ useful.GeneralizedTime.tagSet: OctetStringEncoder(),
+ useful.UTCTime.tagSet: OctetStringEncoder()
+}
+
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+TYPE_MAP = {
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Integer.typeId: IntegerEncoder(),
+ univ.BitString.typeId: BitStringEncoder(),
+ univ.OctetString.typeId: OctetStringEncoder(),
+ univ.Null.typeId: NullEncoder(),
+ univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
+ univ.Enumerated.typeId: IntegerEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SetEncoder(),
+ univ.SetOf.typeId: SequenceOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder(),
+ univ.Choice.typeId: ChoiceEncoder(),
+ univ.Any.typeId: AnyEncoder(),
+ # character string types
+ char.UTF8String.typeId: OctetStringEncoder(),
+ char.NumericString.typeId: OctetStringEncoder(),
+ char.PrintableString.typeId: OctetStringEncoder(),
+ char.TeletexString.typeId: OctetStringEncoder(),
+ char.VideotexString.typeId: OctetStringEncoder(),
+ char.IA5String.typeId: OctetStringEncoder(),
+ char.GraphicString.typeId: OctetStringEncoder(),
+ char.VisibleString.typeId: OctetStringEncoder(),
+ char.GeneralString.typeId: OctetStringEncoder(),
+ char.UniversalString.typeId: OctetStringEncoder(),
+ char.BMPString.typeId: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: OctetStringEncoder(),
+ useful.GeneralizedTime.typeId: OctetStringEncoder(),
+ useful.UTCTime.typeId: OctetStringEncoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(object):
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ def __call__(self, value, **options):
+ if not isinstance(value, base.Asn1Item):
+ raise error.PyAsn1Error(
+ 'value is not valid (should be an instance of an ASN.1 Item)')
+
+ if LOG:
+ debug.scope.push(type(value).__name__)
+ LOG('encoder called for type %s '
+ '<%s>' % (type(value).__name__, value.prettyPrint()))
+
+ tagSet = value.tagSet
+
+ try:
+ concreteEncoder = self._typeMap[value.typeId]
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(
+ value.tagSet.baseTag, value.tagSet.baseTag)
+
+ try:
+ concreteEncoder = self._tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('No encoder for %s' % (value,))
+
+ if LOG:
+ LOG('using value codec %s chosen by '
+ '%s' % (concreteEncoder.__class__.__name__, tagSet))
+
+ pyObject = concreteEncoder.encode(value, self, **options)
+
+ if LOG:
+ LOG('encoder %s produced: '
+ '%s' % (type(concreteEncoder).__name__, repr(pyObject)))
+ debug.scope.pop()
+
+ return pyObject
+
+
+class Encoder(object):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+ def __init__(self, **options):
+ self._singleItemEncoder = self.SINGLE_ITEM_ENCODER(**options)
+
+ def __call__(self, pyObject, asn1Spec=None, **options):
+ return self._singleItemEncoder(
+ pyObject, asn1Spec=asn1Spec, **options)
+
+
+#: Turns ASN.1 object into a Python built-in type object(s).
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a Python built-in type or a tree
+#: of those.
+#:
+#: One exception is that instead of :py:class:`dict`, the :py:class:`OrderedDict`
+#: is used to preserve ordering of the components in ASN.1 SEQUENCE.
+#:
+#: Parameters
+#: ----------
+# asn1Value: any pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: pyasn1 object to encode (or a tree of them)
+#:
+#: Returns
+#: -------
+#: : :py:class:`object`
+#: Python built-in type instance (or a tree of them)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode ASN.1 value object into native Python types
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: [1, 2, 3]
+#:
+encode = SingleItemEncoder()
diff --git a/contrib/python/pyasn1/py2/pyasn1/codec/streaming.py b/contrib/python/pyasn1/py2/pyasn1/codec/streaming.py
new file mode 100644
index 0000000000..231681c177
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/codec/streaming.py
@@ -0,0 +1,244 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import io
+import os
+import sys
+
+from pyasn1 import error
+from pyasn1.type import univ
+
+_PY2 = sys.version_info < (3,)
+
+
+class CachingStreamWrapper(io.IOBase):
+ """Wrapper around non-seekable streams.
+
+ Note that the implementation is tied to the decoder,
+ not checking for dangerous arguments for the sake
+ of performance.
+
+ The read bytes are kept in an internal cache until
+ setting _markedPosition which may reset the cache.
+ """
+ def __init__(self, raw):
+ self._raw = raw
+ self._cache = io.BytesIO()
+ self._markedPosition = 0
+
+ def peek(self, n):
+ result = self.read(n)
+ self._cache.seek(-len(result), os.SEEK_CUR)
+ return result
+
+ def seekable(self):
+ return True
+
+ def seek(self, n=-1, whence=os.SEEK_SET):
+ # Note that this not safe for seeking forward.
+ return self._cache.seek(n, whence)
+
+ def read(self, n=-1):
+ read_from_cache = self._cache.read(n)
+ if n != -1:
+ n -= len(read_from_cache)
+ if not n: # 0 bytes left to read
+ return read_from_cache
+
+ read_from_raw = self._raw.read(n)
+
+ self._cache.write(read_from_raw)
+
+ return read_from_cache + read_from_raw
+
+ @property
+ def markedPosition(self):
+ """Position where the currently processed element starts.
+
+ This is used for back-tracking in SingleItemDecoder.__call__
+ and (indefLen)ValueDecoder and should not be used for other purposes.
+ The client is not supposed to ever seek before this position.
+ """
+ return self._markedPosition
+
+ @markedPosition.setter
+ def markedPosition(self, value):
+ # By setting the value, we ensure we won't seek back before it.
+ # `value` should be the same as the current position
+ # We don't check for this for performance reasons.
+ self._markedPosition = value
+
+ # Whenever we set _marked_position, we know for sure
+ # that we will not return back, and thus it is
+ # safe to drop all cached data.
+ if self._cache.tell() > io.DEFAULT_BUFFER_SIZE:
+ self._cache = io.BytesIO(self._cache.read())
+ self._markedPosition = 0
+
+ def tell(self):
+ return self._cache.tell()
+
+
+def asSeekableStream(substrate):
+ """Convert object to seekable byte-stream.
+
+ Parameters
+ ----------
+ substrate: :py:class:`bytes` or :py:class:`io.IOBase` or :py:class:`univ.OctetString`
+
+ Returns
+ -------
+ : :py:class:`io.IOBase`
+
+ Raises
+ ------
+ : :py:class:`~pyasn1.error.PyAsn1Error`
+ If the supplied substrate cannot be converted to a seekable stream.
+ """
+ if isinstance(substrate, io.BytesIO):
+ return substrate
+
+ elif isinstance(substrate, bytes):
+ return io.BytesIO(substrate)
+
+ elif isinstance(substrate, univ.OctetString):
+ return io.BytesIO(substrate.asOctets())
+
+ try:
+ # Special case: impossible to set attributes on `file` built-in
+ # XXX: broken, BufferedReader expects a "readable" attribute.
+ if _PY2 and isinstance(substrate, file):
+ return io.BufferedReader(substrate)
+
+ elif substrate.seekable(): # Will fail for most invalid types
+ return substrate
+
+ else:
+ return CachingStreamWrapper(substrate)
+
+ except AttributeError:
+ raise error.UnsupportedSubstrateError(
+ "Cannot convert " + substrate.__class__.__name__ +
+ " to a seekable bit stream.")
+
+
+def isEndOfStream(substrate):
+ """Check whether we have reached the end of a stream.
+
+ Although it is more effective to read and catch exceptions, this
+ function
+
+ Parameters
+ ----------
+ substrate: :py:class:`IOBase`
+ Stream to check
+
+ Returns
+ -------
+ : :py:class:`bool`
+ """
+ if isinstance(substrate, io.BytesIO):
+ cp = substrate.tell()
+ substrate.seek(0, os.SEEK_END)
+ result = substrate.tell() == cp
+ substrate.seek(cp, os.SEEK_SET)
+ yield result
+
+ else:
+ received = substrate.read(1)
+ if received is None:
+ yield
+
+ if received:
+ substrate.seek(-1, os.SEEK_CUR)
+
+ yield not received
+
+
+def peekIntoStream(substrate, size=-1):
+ """Peek into stream.
+
+ Parameters
+ ----------
+ substrate: :py:class:`IOBase`
+ Stream to read from.
+
+ size: :py:class:`int`
+ How many bytes to peek (-1 = all available)
+
+ Returns
+ -------
+ : :py:class:`bytes` or :py:class:`str`
+ The return type depends on Python major version
+ """
+ if hasattr(substrate, "peek"):
+ received = substrate.peek(size)
+ if received is None:
+ yield
+
+ while len(received) < size:
+ yield
+
+ yield received
+
+ else:
+ current_position = substrate.tell()
+ try:
+ for chunk in readFromStream(substrate, size):
+ yield chunk
+
+ finally:
+ substrate.seek(current_position)
+
+
+def readFromStream(substrate, size=-1, context=None):
+ """Read from the stream.
+
+ Parameters
+ ----------
+ substrate: :py:class:`IOBase`
+ Stream to read from.
+
+ Keyword parameters
+ ------------------
+ size: :py:class:`int`
+ How many bytes to read (-1 = all available)
+
+ context: :py:class:`dict`
+ Opaque caller context will be attached to exception objects created
+ by this function.
+
+ Yields
+ ------
+ : :py:class:`bytes` or :py:class:`str` or :py:class:`SubstrateUnderrunError`
+ Read data or :py:class:`~pyasn1.error.SubstrateUnderrunError`
+ object if no `size` bytes is readily available in the stream. The
+ data type depends on Python major version
+
+ Raises
+ ------
+ : :py:class:`~pyasn1.error.EndOfStreamError`
+ Input stream is exhausted
+ """
+ while True:
+ # this will block unless stream is non-blocking
+ received = substrate.read(size)
+ if received is None: # non-blocking stream can do this
+ yield error.SubstrateUnderrunError(context=context)
+
+ elif not received and size != 0: # end-of-stream
+ raise error.EndOfStreamError(context=context)
+
+ elif len(received) < size:
+ substrate.seek(-len(received), os.SEEK_CUR)
+
+ # behave like a non-blocking stream
+ yield error.SubstrateUnderrunError(context=context)
+
+ else:
+ break
+
+ yield received
diff --git a/contrib/python/pyasn1/py2/pyasn1/compat/__init__.py b/contrib/python/pyasn1/py2/pyasn1/compat/__init__.py
new file mode 100644
index 0000000000..d3e676ac6a
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/compat/__init__.py
@@ -0,0 +1,4 @@
+# This file is necessary to make this directory a package.
+
+# sentinal for missing argument
+_MISSING = object()
diff --git a/contrib/python/pyasn1/py2/pyasn1/compat/integer.py b/contrib/python/pyasn1/py2/pyasn1/compat/integer.py
new file mode 100644
index 0000000000..b41d849fcd
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/compat/integer.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import platform
+
+from pyasn1.compat.octets import oct2int, null, ensureString
+
+
+implementation = platform.python_implementation()
+
+if sys.version_info[0] < 3:
+ from binascii import a2b_hex, b2a_hex
+
+ def from_bytes(octets, signed=False):
+ if not octets:
+ return 0
+
+ value = long(b2a_hex(ensureString(octets)), 16)
+
+ if signed and oct2int(octets[0]) & 0x80:
+ return value - (1 << len(octets) * 8)
+
+ return value
+
+ def to_bytes(value, signed=False, length=0):
+ if value < 0:
+ if signed:
+ bits = bitLength(value)
+
+ # two's complement form
+ maxValue = 1 << bits
+ valueToEncode = (value + maxValue) % maxValue
+
+ else:
+ raise OverflowError('can\'t convert negative int to unsigned')
+ elif value == 0 and length == 0:
+ return null
+ else:
+ bits = 0
+ valueToEncode = value
+
+ hexValue = hex(valueToEncode)[2:]
+ if hexValue.endswith('L'):
+ hexValue = hexValue[:-1]
+
+ if len(hexValue) & 1:
+ hexValue = '0' + hexValue
+
+ # padding may be needed for two's complement encoding
+ if value != valueToEncode or length:
+ hexLength = len(hexValue) * 4
+
+ padLength = max(length, bits)
+
+ if padLength > hexLength:
+ hexValue = '00' * ((padLength - hexLength - 1) // 8 + 1) + hexValue
+ elif length and hexLength - length > 7:
+ raise OverflowError('int too big to convert')
+
+ firstOctet = int(hexValue[:2], 16)
+
+ if signed:
+ if firstOctet & 0x80:
+ if value >= 0:
+ hexValue = '00' + hexValue
+ elif value < 0:
+ hexValue = 'ff' + hexValue
+
+ octets_value = a2b_hex(hexValue)
+
+ return octets_value
+
+ def bitLength(number):
+ # bits in unsigned number
+ hexValue = hex(abs(number))
+ bits = len(hexValue) - 2
+ if hexValue.endswith('L'):
+ bits -= 1
+ if bits & 1:
+ bits += 1
+ bits *= 4
+ # TODO: strip lhs zeros
+ return bits
+
+else:
+
+ def from_bytes(octets, signed=False):
+ return int.from_bytes(bytes(octets), 'big', signed=signed)
+
+ def to_bytes(value, signed=False, length=0):
+ length = max(value.bit_length(), length)
+
+ if signed and length % 8 == 0:
+ length += 1
+
+ return value.to_bytes(length // 8 + (length % 8 and 1 or 0), 'big', signed=signed)
+
+ def bitLength(number):
+ return int(number).bit_length()
diff --git a/contrib/python/pyasn1/py2/pyasn1/compat/octets.py b/contrib/python/pyasn1/py2/pyasn1/compat/octets.py
new file mode 100644
index 0000000000..d871f46c8a
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/compat/octets.py
@@ -0,0 +1,46 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from sys import version_info
+
+if version_info[0] <= 2:
+ int2oct = chr
+ # noinspection PyPep8
+ ints2octs = lambda s: ''.join([int2oct(x) for x in s])
+ null = ''
+ oct2int = ord
+ # TODO: refactor to return a sequence of ints
+ # noinspection PyPep8
+ octs2ints = lambda s: [oct2int(x) for x in s]
+ # noinspection PyPep8
+ str2octs = lambda x: x
+ # noinspection PyPep8
+ octs2str = lambda x: x
+ # noinspection PyPep8
+ isOctetsType = lambda s: isinstance(s, str)
+ # noinspection PyPep8
+ isStringType = lambda s: isinstance(s, (str, unicode))
+ # noinspection PyPep8
+ ensureString = str
+else:
+ ints2octs = bytes
+ # noinspection PyPep8
+ int2oct = lambda x: ints2octs((x,))
+ null = ints2octs()
+ # noinspection PyPep8
+ oct2int = lambda x: x
+ # noinspection PyPep8
+ octs2ints = lambda x: x
+ # noinspection PyPep8
+ str2octs = lambda x: x.encode('iso-8859-1')
+ # noinspection PyPep8
+ octs2str = lambda x: x.decode('iso-8859-1')
+ # noinspection PyPep8
+ isOctetsType = lambda s: isinstance(s, bytes)
+ # noinspection PyPep8
+ isStringType = lambda s: isinstance(s, str)
+ # noinspection PyPep8
+ ensureString = bytes
diff --git a/contrib/python/pyasn1/py2/pyasn1/debug.py b/contrib/python/pyasn1/py2/pyasn1/debug.py
new file mode 100644
index 0000000000..6be80c3a70
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/debug.py
@@ -0,0 +1,147 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import logging
+import sys
+
+from pyasn1 import __version__
+from pyasn1 import error
+from pyasn1.compat.octets import octs2ints
+
+__all__ = ['Debug', 'setLogger', 'hexdump']
+
+DEBUG_NONE = 0x0000
+DEBUG_ENCODER = 0x0001
+DEBUG_DECODER = 0x0002
+DEBUG_ALL = 0xffff
+
+FLAG_MAP = {
+ 'none': DEBUG_NONE,
+ 'encoder': DEBUG_ENCODER,
+ 'decoder': DEBUG_DECODER,
+ 'all': DEBUG_ALL
+}
+
+LOGGEE_MAP = {}
+
+
+class Printer(object):
+ # noinspection PyShadowingNames
+ def __init__(self, logger=None, handler=None, formatter=None):
+ if logger is None:
+ logger = logging.getLogger('pyasn1')
+
+ logger.setLevel(logging.DEBUG)
+
+ if handler is None:
+ handler = logging.StreamHandler()
+
+ if formatter is None:
+ formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
+
+ handler.setFormatter(formatter)
+ handler.setLevel(logging.DEBUG)
+ logger.addHandler(handler)
+
+ self.__logger = logger
+
+ def __call__(self, msg):
+ self.__logger.debug(msg)
+
+ def __str__(self):
+ return '<python logging>'
+
+
+class Debug(object):
+ defaultPrinter = Printer()
+
+ def __init__(self, *flags, **options):
+ self._flags = DEBUG_NONE
+
+ if 'loggerName' in options:
+ # route our logs to parent logger
+ self._printer = Printer(
+ logger=logging.getLogger(options['loggerName']),
+ handler=logging.NullHandler()
+ )
+
+ elif 'printer' in options:
+ self._printer = options.get('printer')
+
+ else:
+ self._printer = self.defaultPrinter
+
+ self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags)))
+
+ for flag in flags:
+ inverse = flag and flag[0] in ('!', '~')
+ if inverse:
+ flag = flag[1:]
+ try:
+ if inverse:
+ self._flags &= ~FLAG_MAP[flag]
+ else:
+ self._flags |= FLAG_MAP[flag]
+ except KeyError:
+ raise error.PyAsn1Error('bad debug flag %s' % flag)
+
+ self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled'))
+
+ def __str__(self):
+ return 'logger %s, flags %x' % (self._printer, self._flags)
+
+ def __call__(self, msg):
+ self._printer(msg)
+
+ def __and__(self, flag):
+ return self._flags & flag
+
+ def __rand__(self, flag):
+ return flag & self._flags
+
+_LOG = DEBUG_NONE
+
+
+def setLogger(userLogger):
+ global _LOG
+
+ if userLogger:
+ _LOG = userLogger
+ else:
+ _LOG = DEBUG_NONE
+
+ # Update registered logging clients
+ for module, (name, flags) in LOGGEE_MAP.items():
+ setattr(module, name, _LOG & flags and _LOG or DEBUG_NONE)
+
+
+def registerLoggee(module, name='LOG', flags=DEBUG_NONE):
+ LOGGEE_MAP[sys.modules[module]] = name, flags
+ setLogger(_LOG)
+ return _LOG
+
+
+def hexdump(octets):
+ return ' '.join(
+ ['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x)
+ for n, x in zip(range(len(octets)), octs2ints(octets))]
+ )
+
+
+class Scope(object):
+ def __init__(self):
+ self._list = []
+
+ def __str__(self): return '.'.join(self._list)
+
+ def push(self, token):
+ self._list.append(token)
+
+ def pop(self):
+ return self._list.pop()
+
+
+scope = Scope()
diff --git a/contrib/python/pyasn1/py2/pyasn1/error.py b/contrib/python/pyasn1/py2/pyasn1/error.py
new file mode 100644
index 0000000000..75c9a3f4cd
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/error.py
@@ -0,0 +1,116 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+
+
+class PyAsn1Error(Exception):
+ """Base pyasn1 exception
+
+ `PyAsn1Error` is the base exception class (based on
+ :class:`Exception`) that represents all possible ASN.1 related
+ errors.
+
+ Parameters
+ ----------
+ args:
+ Opaque positional parameters
+
+ Keyword Args
+ ------------
+ kwargs:
+ Opaque keyword parameters
+
+ """
+ def __init__(self, *args, **kwargs):
+ self._args = args
+ self._kwargs = kwargs
+
+ @property
+ def context(self):
+ """Return exception context
+
+ When exception object is created, the caller can supply some opaque
+ context for the upper layers to better understand the cause of the
+ exception.
+
+ Returns
+ -------
+ : :py:class:`dict`
+ Dict holding context specific data
+ """
+ return self._kwargs.get('context', {})
+
+
+class ValueConstraintError(PyAsn1Error):
+ """ASN.1 type constraints violation exception
+
+ The `ValueConstraintError` exception indicates an ASN.1 value
+ constraint violation.
+
+ It might happen on value object instantiation (for scalar types) or on
+ serialization (for constructed types).
+ """
+
+
+class SubstrateUnderrunError(PyAsn1Error):
+ """ASN.1 data structure deserialization error
+
+ The `SubstrateUnderrunError` exception indicates insufficient serialised
+ data on input of a de-serialization codec.
+ """
+
+
+class EndOfStreamError(SubstrateUnderrunError):
+ """ASN.1 data structure deserialization error
+
+ The `EndOfStreamError` exception indicates the condition of the input
+ stream has been closed.
+ """
+
+
+class UnsupportedSubstrateError(PyAsn1Error):
+ """Unsupported substrate type to parse as ASN.1 data."""
+
+
+class PyAsn1UnicodeError(PyAsn1Error, UnicodeError):
+ """Unicode text processing error
+
+ The `PyAsn1UnicodeError` exception is a base class for errors relating to
+ unicode text de/serialization.
+
+ Apart from inheriting from :class:`PyAsn1Error`, it also inherits from
+ :class:`UnicodeError` to help the caller catching unicode-related errors.
+ """
+ def __init__(self, message, unicode_error=None):
+ if isinstance(unicode_error, UnicodeError):
+ UnicodeError.__init__(self, *unicode_error.args)
+ PyAsn1Error.__init__(self, message)
+
+
+class PyAsn1UnicodeDecodeError(PyAsn1UnicodeError, UnicodeDecodeError):
+ """Unicode text decoding error
+
+ The `PyAsn1UnicodeDecodeError` exception represents a failure to
+ deserialize unicode text.
+
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
+ from :class:`UnicodeDecodeError` to help the caller catching unicode-related
+ errors.
+ """
+
+
+class PyAsn1UnicodeEncodeError(PyAsn1UnicodeError, UnicodeEncodeError):
+ """Unicode text encoding error
+
+ The `PyAsn1UnicodeEncodeError` exception represents a failure to
+ serialize unicode text.
+
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
+ from :class:`UnicodeEncodeError` to help the caller catching
+ unicode-related errors.
+ """
+
+
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/__init__.py b/contrib/python/pyasn1/py2/pyasn1/type/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/base.py b/contrib/python/pyasn1/py2/pyasn1/type/base.py
new file mode 100644
index 0000000000..ac92c51afb
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/base.py
@@ -0,0 +1,706 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import constraint
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+__all__ = ['Asn1Item', 'Asn1Type', 'SimpleAsn1Type',
+ 'ConstructedAsn1Type']
+
+
+class Asn1Item(object):
+ @classmethod
+ def getTypeId(cls, increment=1):
+ try:
+ Asn1Item._typeCounter += increment
+ except AttributeError:
+ Asn1Item._typeCounter = increment
+ return Asn1Item._typeCounter
+
+
+class Asn1Type(Asn1Item):
+ """Base class for all classes representing ASN.1 types.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+ #: Set or return a :py:class:`~pyasn1.type.tag.TagSet` object representing
+ #: ASN.1 tag(s) associated with |ASN.1| type.
+ tagSet = tag.TagSet()
+
+ #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ #: object imposing constraints on initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = None
+
+ def __init__(self, **kwargs):
+ readOnly = {
+ 'tagSet': self.tagSet,
+ 'subtypeSpec': self.subtypeSpec
+ }
+
+ readOnly.update(kwargs)
+
+ self.__dict__.update(readOnly)
+
+ self._readOnly = readOnly
+
+ def __setattr__(self, name, value):
+ if name[0] != '_' and name in self._readOnly:
+ raise error.PyAsn1Error('read-only instance attribute "%s"' % name)
+
+ self.__dict__[name] = value
+
+ def __str__(self):
+ return self.prettyPrint()
+
+ @property
+ def readOnly(self):
+ return self._readOnly
+
+ @property
+ def effectiveTagSet(self):
+ """For |ASN.1| type is equivalent to *tagSet*
+ """
+ return self.tagSet # used by untagged types
+
+ @property
+ def tagMap(self):
+ """Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
+ """
+ return tagmap.TagMap({self.tagSet: self})
+
+ def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
+ """Examine |ASN.1| type for equality with other ASN.1 type.
+
+ ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
+ (:py:mod:`~pyasn1.type.constraint`) are examined when carrying
+ out ASN.1 types comparison.
+
+ Python class inheritance relationship is NOT considered.
+
+ Parameters
+ ----------
+ other: a pyasn1 type object
+ Class instance representing ASN.1 type.
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if *other* is |ASN.1| type,
+ :obj:`False` otherwise.
+ """
+ return (self is other or
+ (not matchTags or self.tagSet == other.tagSet) and
+ (not matchConstraints or self.subtypeSpec == other.subtypeSpec))
+
+ def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
+ """Examine |ASN.1| type for subtype relationship with other ASN.1 type.
+
+ ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
+ (:py:mod:`~pyasn1.type.constraint`) are examined when carrying
+ out ASN.1 types comparison.
+
+ Python class inheritance relationship is NOT considered.
+
+ Parameters
+ ----------
+ other: a pyasn1 type object
+ Class instance representing ASN.1 type.
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if *other* is a subtype of |ASN.1| type,
+ :obj:`False` otherwise.
+ """
+ return (not matchTags or
+ (self.tagSet.isSuperTagSetOf(other.tagSet)) and
+ (not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec)))
+
+ @staticmethod
+ def isNoValue(*values):
+ for value in values:
+ if value is not noValue:
+ return False
+ return True
+
+ def prettyPrint(self, scope=0):
+ raise NotImplementedError()
+
+ # backward compatibility
+
+ def getTagSet(self):
+ return self.tagSet
+
+ def getEffectiveTagSet(self):
+ return self.effectiveTagSet
+
+ def getTagMap(self):
+ return self.tagMap
+
+ def getSubtypeSpec(self):
+ return self.subtypeSpec
+
+ # backward compatibility
+ def hasValue(self):
+ return self.isValue
+
+# Backward compatibility
+Asn1ItemBase = Asn1Type
+
+
+class NoValue(object):
+ """Create a singleton instance of NoValue class.
+
+ The *NoValue* sentinel object represents an instance of ASN.1 schema
+ object as opposed to ASN.1 value object.
+
+ Only ASN.1 schema-related operations can be performed on ASN.1
+ schema objects.
+
+ Warning
+ -------
+ Any operation attempted on the *noValue* object will raise the
+ *PyAsn1Error* exception.
+ """
+ skipMethods = {
+ '__slots__',
+ # attributes
+ '__getattribute__',
+ '__getattr__',
+ '__setattr__',
+ '__delattr__',
+ # class instance
+ '__class__',
+ '__init__',
+ '__del__',
+ '__new__',
+ '__repr__',
+ '__qualname__',
+ '__objclass__',
+ 'im_class',
+ '__sizeof__',
+ # pickle protocol
+ '__reduce__',
+ '__reduce_ex__',
+ '__getnewargs__',
+ '__getinitargs__',
+ '__getstate__',
+ '__setstate__',
+ }
+
+ _instance = None
+
+ def __new__(cls):
+ if cls._instance is None:
+ def getPlug(name):
+ def plug(self, *args, **kw):
+ raise error.PyAsn1Error('Attempted "%s" operation on ASN.1 schema object' % name)
+ return plug
+
+ op_names = [name
+ for typ in (str, int, list, dict)
+ for name in dir(typ)
+ if (name not in cls.skipMethods and
+ name.startswith('__') and
+ name.endswith('__') and
+ callable(getattr(typ, name)))]
+
+ for name in set(op_names):
+ setattr(cls, name, getPlug(name))
+
+ cls._instance = object.__new__(cls)
+
+ return cls._instance
+
+ def __getattr__(self, attr):
+ if attr in self.skipMethods:
+ raise AttributeError('Attribute %s not present' % attr)
+
+ raise error.PyAsn1Error('Attempted "%s" operation on ASN.1 schema object' % attr)
+
+ def __repr__(self):
+ return '<%s object>' % self.__class__.__name__
+
+
+noValue = NoValue()
+
+
+class SimpleAsn1Type(Asn1Type):
+ """Base class for all simple classes representing ASN.1 types.
+
+ ASN.1 distinguishes types by their ability to hold other objects.
+ Scalar types are known as *simple* in ASN.1.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+ #: Default payload value
+ defaultValue = noValue
+
+ def __init__(self, value=noValue, **kwargs):
+ Asn1Type.__init__(self, **kwargs)
+ if value is noValue:
+ value = self.defaultValue
+ else:
+ value = self.prettyIn(value)
+ try:
+ self.subtypeSpec(value)
+
+ except error.PyAsn1Error:
+ exType, exValue, exTb = sys.exc_info()
+ raise exType('%s at %s' % (exValue, self.__class__.__name__))
+
+ self._value = value
+
+ def __repr__(self):
+ representation = '%s %s object' % (
+ self.__class__.__name__, self.isValue and 'value' or 'schema')
+
+ for attr, value in self.readOnly.items():
+ if value:
+ representation += ', %s %s' % (attr, value)
+
+ if self.isValue:
+ value = self.prettyPrint()
+ if len(value) > 32:
+ value = value[:16] + '...' + value[-16:]
+ representation += ', payload [%s]' % value
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other and True or self._value == other
+
+ def __ne__(self, other):
+ return self._value != other
+
+ def __lt__(self, other):
+ return self._value < other
+
+ def __le__(self, other):
+ return self._value <= other
+
+ def __gt__(self, other):
+ return self._value > other
+
+ def __ge__(self, other):
+ return self._value >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._value and True or False
+ else:
+ def __bool__(self):
+ return self._value and True or False
+
+ def __hash__(self):
+ return hash(self._value)
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just
+ ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema
+ features, this object can also be used like a Python built-in object
+ (e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ return self._value is not noValue
+
+ def clone(self, value=noValue, **kwargs):
+ """Create a modified version of |ASN.1| schema or value object.
+
+ The `clone()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all arguments
+ of the `clone()` method are optional.
+
+ Whatever arguments are supplied, they are used to create a copy
+ of `self` taking precedence over the ones used to instantiate `self`.
+
+ Note
+ ----
+ Due to the immutable nature of the |ASN.1| object, if no arguments
+ are supplied, no new |ASN.1| object will be created and `self` will
+ be returned instead.
+ """
+ if value is noValue:
+ if not kwargs:
+ return self
+
+ value = self._value
+
+ initializers = self.readOnly.copy()
+ initializers.update(kwargs)
+
+ return self.__class__(value, **initializers)
+
+ def subtype(self, value=noValue, **kwargs):
+ """Create a specialization of |ASN.1| schema or value object.
+
+ The subtype relationship between ASN.1 types has no correlation with
+ subtype relationship between Python types. ASN.1 type is mainly identified
+ by its tag(s) (:py:class:`~pyasn1.type.tag.TagSet`) and value range
+ constraints (:py:class:`~pyasn1.type.constraint.ConstraintsIntersection`).
+ These ASN.1 type properties are implemented as |ASN.1| attributes.
+
+ The `subtype()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all parameters
+ of the `subtype()` method are optional.
+
+ With the exception of the arguments described below, the rest of
+ supplied arguments they are used to create a copy of `self` taking
+ precedence over the ones used to instantiate `self`.
+
+ The following arguments to `subtype()` create a ASN.1 subtype out of
+ |ASN.1| type:
+
+ Other Parameters
+ ----------------
+ implicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Implicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ explicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Explicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Add ASN.1 constraints object to one of the `self`'s, then
+ use the result as new object's ASN.1 constraints.
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| schema or value object
+
+ Note
+ ----
+ Due to the immutable nature of the |ASN.1| object, if no arguments
+ are supplied, no new |ASN.1| object will be created and `self` will
+ be returned instead.
+ """
+ if value is noValue:
+ if not kwargs:
+ return self
+
+ value = self._value
+
+ initializers = self.readOnly.copy()
+
+ implicitTag = kwargs.pop('implicitTag', None)
+ if implicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
+
+ explicitTag = kwargs.pop('explicitTag', None)
+ if explicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
+
+ for arg, option in kwargs.items():
+ initializers[arg] += option
+
+ return self.__class__(value, **initializers)
+
+ def prettyIn(self, value):
+ return value
+
+ def prettyOut(self, value):
+ return str(value)
+
+ def prettyPrint(self, scope=0):
+ return self.prettyOut(self._value)
+
+ def prettyPrintType(self, scope=0):
+ return '%s -> %s' % (self.tagSet, self.__class__.__name__)
+
+# Backward compatibility
+AbstractSimpleAsn1Item = SimpleAsn1Type
+
+#
+# Constructed types:
+# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
+# * ASN1 types and values are represened by Python class instances
+# * Value initialization is made for defaulted components only
+# * Primary method of component addressing is by-position. Data model for base
+# type is Python sequence. Additional type-specific addressing methods
+# may be implemented for particular types.
+# * SequenceOf and SetOf types do not implement any additional methods
+# * Sequence, Set and Choice types also implement by-identifier addressing
+# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
+# * Sequence and Set types may include optional and defaulted
+# components
+# * Constructed types hold a reference to component types used for value
+# verification and ordering.
+# * Component type is a scalar type for SequenceOf/SetOf types and a list
+# of types for Sequence/Set/Choice.
+#
+
+
+class ConstructedAsn1Type(Asn1Type):
+ """Base class for all constructed classes representing ASN.1 types.
+
+ ASN.1 distinguishes types by their ability to hold other objects.
+ Those "nesting" types are known as *constructed* in ASN.1.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+
+ #: If :obj:`True`, requires exact component type matching,
+ #: otherwise subtype relation is only enforced
+ strictConstraints = False
+
+ componentType = None
+
+ # backward compatibility, unused
+ sizeSpec = constraint.ConstraintsIntersection()
+
+ def __init__(self, **kwargs):
+ readOnly = {
+ 'componentType': self.componentType,
+ # backward compatibility, unused
+ 'sizeSpec': self.sizeSpec
+ }
+
+ # backward compatibility: preserve legacy sizeSpec support
+ kwargs = self._moveSizeSpec(**kwargs)
+
+ readOnly.update(kwargs)
+
+ Asn1Type.__init__(self, **readOnly)
+
+ def _moveSizeSpec(self, **kwargs):
+ # backward compatibility, unused
+ sizeSpec = kwargs.pop('sizeSpec', self.sizeSpec)
+ if sizeSpec:
+ subtypeSpec = kwargs.pop('subtypeSpec', self.subtypeSpec)
+ if subtypeSpec:
+ subtypeSpec = sizeSpec
+
+ else:
+ subtypeSpec += sizeSpec
+
+ kwargs['subtypeSpec'] = subtypeSpec
+
+ return kwargs
+
+ def __repr__(self):
+ representation = '%s %s object' % (
+ self.__class__.__name__, self.isValue and 'value' or 'schema'
+ )
+
+ for attr, value in self.readOnly.items():
+ if value is not noValue:
+ representation += ', %s=%r' % (attr, value)
+
+ if self.isValue and self.components:
+ representation += ', payload [%s]' % ', '.join(
+ [repr(x) for x in self.components])
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other or self.components == other
+
+ def __ne__(self, other):
+ return self.components != other
+
+ def __lt__(self, other):
+ return self.components < other
+
+ def __le__(self, other):
+ return self.components <= other
+
+ def __gt__(self, other):
+ return self.components > other
+
+ def __ge__(self, other):
+ return self.components >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return bool(self.components)
+ else:
+ def __bool__(self):
+ return bool(self.components)
+
+ @property
+ def components(self):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ pass
+
+ def clone(self, **kwargs):
+ """Create a modified version of |ASN.1| schema object.
+
+ The `clone()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all arguments
+ of the `clone()` method are optional.
+
+ Whatever arguments are supplied, they are used to create a copy
+ of `self` taking precedence over the ones used to instantiate `self`.
+
+ Possible values of `self` are never copied over thus `clone()` can
+ only create a new schema object.
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| type/value
+
+ Note
+ ----
+ Due to the mutable nature of the |ASN.1| object, even if no arguments
+ are supplied, a new |ASN.1| object will be created and returned.
+ """
+ cloneValueFlag = kwargs.pop('cloneValueFlag', False)
+
+ initializers = self.readOnly.copy()
+ initializers.update(kwargs)
+
+ clone = self.__class__(**initializers)
+
+ if cloneValueFlag:
+ self._cloneComponentValues(clone, cloneValueFlag)
+
+ return clone
+
+ def subtype(self, **kwargs):
+ """Create a specialization of |ASN.1| schema object.
+
+ The `subtype()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all parameters
+ of the `subtype()` method are optional.
+
+ With the exception of the arguments described below, the rest of
+ supplied arguments they are used to create a copy of `self` taking
+ precedence over the ones used to instantiate `self`.
+
+ The following arguments to `subtype()` create a ASN.1 subtype out of
+ |ASN.1| type.
+
+ Other Parameters
+ ----------------
+ implicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Implicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ explicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Explicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Add ASN.1 constraints object to one of the `self`'s, then
+ use the result as new object's ASN.1 constraints.
+
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| type/value
+
+ Note
+ ----
+ Due to the mutable nature of the |ASN.1| object, even if no arguments
+ are supplied, a new |ASN.1| object will be created and returned.
+ """
+
+ initializers = self.readOnly.copy()
+
+ cloneValueFlag = kwargs.pop('cloneValueFlag', False)
+
+ implicitTag = kwargs.pop('implicitTag', None)
+ if implicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
+
+ explicitTag = kwargs.pop('explicitTag', None)
+ if explicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
+
+ for arg, option in kwargs.items():
+ initializers[arg] += option
+
+ clone = self.__class__(**initializers)
+
+ if cloneValueFlag:
+ self._cloneComponentValues(clone, cloneValueFlag)
+
+ return clone
+
+ def getComponentByPosition(self, idx):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def setComponentByPosition(self, idx, value, verifyConstraints=True):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def setComponents(self, *args, **kwargs):
+ for idx, value in enumerate(args):
+ self[idx] = value
+ for k in kwargs:
+ self[k] = kwargs[k]
+ return self
+
+ # backward compatibility
+
+ def setDefaultComponents(self):
+ pass
+
+ def getComponentType(self):
+ return self.componentType
+
+ # backward compatibility, unused
+ def verifySizeSpec(self):
+ self.subtypeSpec(self)
+
+
+ # Backward compatibility
+AbstractConstructedAsn1Item = ConstructedAsn1Type
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/char.py b/contrib/python/pyasn1/py2/pyasn1/type/char.py
new file mode 100644
index 0000000000..13fbc7fa27
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/char.py
@@ -0,0 +1,335 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
+ 'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
+ 'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
+
+NoValue = univ.NoValue
+noValue = univ.noValue
+
+
+class AbstractCharacterString(univ.OctetString):
+ """Creates |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`,
+ its objects are immutable and duck-type Python 2 :class:`str` or Python 3
+ :class:`bytes`. When used in octet-stream context, |ASN.1| type assumes
+ "|encoding|" encoding.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ :class:`unicode` object (Python 2) or :class:`str` (Python 3),
+ alternatively :class:`str` (Python 2) or :class:`bytes` (Python 3)
+ representing octet-stream of serialised unicode string
+ (note `encoding` parameter) or |ASN.1| class instance.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in octet-stream context.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+ """
+
+ if sys.version_info[0] <= 2:
+ def __str__(self):
+ try:
+ # `str` is Py2 text representation
+ return self._value.encode(self.encoding)
+
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def __unicode__(self):
+ return unicode(self._value)
+
+ def prettyIn(self, value):
+ try:
+ if isinstance(value, unicode):
+ return value
+ elif isinstance(value, str):
+ return value.decode(self.encoding)
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(''.join([chr(x) for x in value]))
+ elif isinstance(value, univ.OctetString):
+ return value.asOctets().decode(self.encoding)
+ else:
+ return unicode(value)
+
+ except (UnicodeDecodeError, LookupError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ def asOctets(self, padding=True):
+ return str(self)
+
+ def asNumbers(self, padding=True):
+ return tuple([ord(x) for x in str(self)])
+
+ else:
+ def __str__(self):
+ # `unicode` is Py3 text representation
+ return str(self._value)
+
+ def __bytes__(self):
+ try:
+ return self._value.encode(self.encoding)
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def prettyIn(self, value):
+ try:
+ if isinstance(value, str):
+ return value
+ elif isinstance(value, bytes):
+ return value.decode(self.encoding)
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(bytes(value))
+ elif isinstance(value, univ.OctetString):
+ return value.asOctets().decode(self.encoding)
+ else:
+ return str(value)
+
+ except (UnicodeDecodeError, LookupError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ def asOctets(self, padding=True):
+ return bytes(self)
+
+ def asNumbers(self, padding=True):
+ return tuple(bytes(self))
+
+ #
+ # See OctetString.prettyPrint() for the explanation
+ #
+
+ def prettyOut(self, value):
+ return value
+
+ def prettyPrint(self, scope=0):
+ # first see if subclass has its own .prettyOut()
+ value = self.prettyOut(self._value)
+
+ if value is not self._value:
+ return value
+
+ return AbstractCharacterString.__str__(self)
+
+ def __reversed__(self):
+ return reversed(self._value)
+
+
+class NumericString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class PrintableString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class TeletexString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class T61String(TeletexString):
+ __doc__ = TeletexString.__doc__
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class VideotexString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class IA5String(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class GraphicString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class VisibleString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class ISO646String(VisibleString):
+ __doc__ = VisibleString.__doc__
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+class GeneralString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class UniversalString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
+ )
+ encoding = "utf-32-be"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class BMPString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
+ )
+ encoding = "utf-16-be"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class UTF8String(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )
+ encoding = "utf-8"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/constraint.py b/contrib/python/pyasn1/py2/pyasn1/type/constraint.py
new file mode 100644
index 0000000000..34b0060d9f
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/constraint.py
@@ -0,0 +1,756 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+# Original concept and code by Mike C. Fletcher.
+#
+import sys
+
+from pyasn1.type import error
+
+__all__ = ['SingleValueConstraint', 'ContainedSubtypeConstraint',
+ 'ValueRangeConstraint', 'ValueSizeConstraint',
+ 'PermittedAlphabetConstraint', 'InnerTypeConstraint',
+ 'ConstraintsExclusion', 'ConstraintsIntersection',
+ 'ConstraintsUnion']
+
+
+class AbstractConstraint(object):
+
+ def __init__(self, *values):
+ self._valueMap = set()
+ self._setValues(values)
+ self.__hash = hash((self.__class__.__name__, self._values))
+
+ def __call__(self, value, idx=None):
+ if not self._values:
+ return
+
+ try:
+ self._testValue(value, idx)
+
+ except error.ValueConstraintError:
+ raise error.ValueConstraintError(
+ '%s failed at: %r' % (self, sys.exc_info()[1])
+ )
+
+ def __repr__(self):
+ representation = '%s object' % (self.__class__.__name__)
+
+ if self._values:
+ representation += ', consts %s' % ', '.join(
+ [repr(x) for x in self._values])
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other and True or self._values == other
+
+ def __ne__(self, other):
+ return self._values != other
+
+ def __lt__(self, other):
+ return self._values < other
+
+ def __le__(self, other):
+ return self._values <= other
+
+ def __gt__(self, other):
+ return self._values > other
+
+ def __ge__(self, other):
+ return self._values >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._values and True or False
+ else:
+ def __bool__(self):
+ return self._values and True or False
+
+ def __hash__(self):
+ return self.__hash
+
+ def _setValues(self, values):
+ self._values = values
+
+ def _testValue(self, value, idx):
+ raise error.ValueConstraintError(value)
+
+ # Constraints derivation logic
+ def getValueMap(self):
+ return self._valueMap
+
+ def isSuperTypeOf(self, otherConstraint):
+ # TODO: fix possible comparison of set vs scalars here
+ return (otherConstraint is self or
+ not self._values or
+ otherConstraint == self or
+ self in otherConstraint.getValueMap())
+
+ def isSubTypeOf(self, otherConstraint):
+ return (otherConstraint is self or
+ not self or
+ otherConstraint == self or
+ otherConstraint in self._valueMap)
+
+
+class SingleValueConstraint(AbstractConstraint):
+ """Create a SingleValueConstraint object.
+
+ The SingleValueConstraint satisfies any value that
+ is present in the set of permitted values.
+
+ Objects of this type are iterable (emitting constraint values) and
+ can act as operands for some arithmetic operations e.g. addition
+ and subtraction. The latter can be used for combining multiple
+ SingleValueConstraint objects into one.
+
+ The SingleValueConstraint object can be applied to
+ any ASN.1 type.
+
+ Parameters
+ ----------
+ *values: :class:`int`
+ Full set of values permitted by this constraint object.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class DivisorOfSix(Integer):
+ '''
+ ASN.1 specification:
+
+ Divisor-Of-6 ::= INTEGER (1 | 2 | 3 | 6)
+ '''
+ subtypeSpec = SingleValueConstraint(1, 2, 3, 6)
+
+ # this will succeed
+ divisor_of_six = DivisorOfSix(1)
+
+ # this will raise ValueConstraintError
+ divisor_of_six = DivisorOfSix(7)
+ """
+ def _setValues(self, values):
+ self._values = values
+ self._set = set(values)
+
+ def _testValue(self, value, idx):
+ if value not in self._set:
+ raise error.ValueConstraintError(value)
+
+ # Constrains can be merged or reduced
+
+ def __contains__(self, item):
+ return item in self._set
+
+ def __iter__(self):
+ return iter(self._set)
+
+ def __sub__(self, constraint):
+ return self.__class__(*(self._set.difference(constraint)))
+
+ def __add__(self, constraint):
+ return self.__class__(*(self._set.union(constraint)))
+
+ def __sub__(self, constraint):
+ return self.__class__(*(self._set.difference(constraint)))
+
+
+class ContainedSubtypeConstraint(AbstractConstraint):
+ """Create a ContainedSubtypeConstraint object.
+
+ The ContainedSubtypeConstraint satisfies any value that
+ is present in the set of permitted values and also
+ satisfies included constraints.
+
+ The ContainedSubtypeConstraint object can be applied to
+ any ASN.1 type.
+
+ Parameters
+ ----------
+ *values:
+ Full set of values and constraint objects permitted
+ by this constraint object.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class DivisorOfEighteen(Integer):
+ '''
+ ASN.1 specification:
+
+ Divisors-of-18 ::= INTEGER (INCLUDES Divisors-of-6 | 9 | 18)
+ '''
+ subtypeSpec = ContainedSubtypeConstraint(
+ SingleValueConstraint(1, 2, 3, 6), 9, 18
+ )
+
+ # this will succeed
+ divisor_of_eighteen = DivisorOfEighteen(9)
+
+ # this will raise ValueConstraintError
+ divisor_of_eighteen = DivisorOfEighteen(10)
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ if isinstance(constraint, AbstractConstraint):
+ constraint(value, idx)
+ elif value not in self._set:
+ raise error.ValueConstraintError(value)
+
+
+class ValueRangeConstraint(AbstractConstraint):
+ """Create a ValueRangeConstraint object.
+
+ The ValueRangeConstraint satisfies any value that
+ falls in the range of permitted values.
+
+ The ValueRangeConstraint object can only be applied
+ to :class:`~pyasn1.type.univ.Integer` and
+ :class:`~pyasn1.type.univ.Real` types.
+
+ Parameters
+ ----------
+ start: :class:`int`
+ Minimum permitted value in the range (inclusive)
+
+ end: :class:`int`
+ Maximum permitted value in the range (inclusive)
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class TeenAgeYears(Integer):
+ '''
+ ASN.1 specification:
+
+ TeenAgeYears ::= INTEGER (13 .. 19)
+ '''
+ subtypeSpec = ValueRangeConstraint(13, 19)
+
+ # this will succeed
+ teen_year = TeenAgeYears(18)
+
+ # this will raise ValueConstraintError
+ teen_year = TeenAgeYears(20)
+ """
+ def _testValue(self, value, idx):
+ if value < self.start or value > self.stop:
+ raise error.ValueConstraintError(value)
+
+ def _setValues(self, values):
+ if len(values) != 2:
+ raise error.PyAsn1Error(
+ '%s: bad constraint values' % (self.__class__.__name__,)
+ )
+ self.start, self.stop = values
+ if self.start > self.stop:
+ raise error.PyAsn1Error(
+ '%s: screwed constraint values (start > stop): %s > %s' % (
+ self.__class__.__name__,
+ self.start, self.stop
+ )
+ )
+ AbstractConstraint._setValues(self, values)
+
+
+class ValueSizeConstraint(ValueRangeConstraint):
+ """Create a ValueSizeConstraint object.
+
+ The ValueSizeConstraint satisfies any value for
+ as long as its size falls within the range of
+ permitted sizes.
+
+ The ValueSizeConstraint object can be applied
+ to :class:`~pyasn1.type.univ.BitString`,
+ :class:`~pyasn1.type.univ.OctetString` (including
+ all :ref:`character ASN.1 types <type.char>`),
+ :class:`~pyasn1.type.univ.SequenceOf`
+ and :class:`~pyasn1.type.univ.SetOf` types.
+
+ Parameters
+ ----------
+ minimum: :class:`int`
+ Minimum permitted size of the value (inclusive)
+
+ maximum: :class:`int`
+ Maximum permitted size of the value (inclusive)
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class BaseballTeamRoster(SetOf):
+ '''
+ ASN.1 specification:
+
+ BaseballTeamRoster ::= SET SIZE (1..25) OF PlayerNames
+ '''
+ componentType = PlayerNames()
+ subtypeSpec = ValueSizeConstraint(1, 25)
+
+ # this will succeed
+ team = BaseballTeamRoster()
+ team.extend(['Jan', 'Matej'])
+ encode(team)
+
+ # this will raise ValueConstraintError
+ team = BaseballTeamRoster()
+ team.extend(['Jan'] * 26)
+ encode(team)
+
+ Note
+ ----
+ Whenever ValueSizeConstraint is applied to mutable types
+ (e.g. :class:`~pyasn1.type.univ.SequenceOf`,
+ :class:`~pyasn1.type.univ.SetOf`), constraint
+ validation only happens at the serialisation phase rather
+ than schema instantiation phase (as it is with immutable
+ types).
+ """
+ def _testValue(self, value, idx):
+ valueSize = len(value)
+ if valueSize < self.start or valueSize > self.stop:
+ raise error.ValueConstraintError(value)
+
+
+class PermittedAlphabetConstraint(SingleValueConstraint):
+ """Create a PermittedAlphabetConstraint object.
+
+ The PermittedAlphabetConstraint satisfies any character
+ string for as long as all its characters are present in
+ the set of permitted characters.
+
+ Objects of this type are iterable (emitting constraint values) and
+ can act as operands for some arithmetic operations e.g. addition
+ and subtraction.
+
+ The PermittedAlphabetConstraint object can only be applied
+ to the :ref:`character ASN.1 types <type.char>` such as
+ :class:`~pyasn1.type.char.IA5String`.
+
+ Parameters
+ ----------
+ *alphabet: :class:`str`
+ Full set of characters permitted by this constraint object.
+
+ Example
+ -------
+ .. code-block:: python
+
+ class BooleanValue(IA5String):
+ '''
+ ASN.1 specification:
+
+ BooleanValue ::= IA5String (FROM ('T' | 'F'))
+ '''
+ subtypeSpec = PermittedAlphabetConstraint('T', 'F')
+
+ # this will succeed
+ truth = BooleanValue('T')
+ truth = BooleanValue('TF')
+
+ # this will raise ValueConstraintError
+ garbage = BooleanValue('TAF')
+
+ ASN.1 `FROM ... EXCEPT ...` clause can be modelled by combining multiple
+ PermittedAlphabetConstraint objects into one:
+
+ Example
+ -------
+ .. code-block:: python
+
+ class Lipogramme(IA5String):
+ '''
+ ASN.1 specification:
+
+ Lipogramme ::=
+ IA5String (FROM (ALL EXCEPT ("e"|"E")))
+ '''
+ subtypeSpec = (
+ PermittedAlphabetConstraint(*string.printable) -
+ PermittedAlphabetConstraint('e', 'E')
+ )
+
+ # this will succeed
+ lipogramme = Lipogramme('A work of fiction?')
+
+ # this will raise ValueConstraintError
+ lipogramme = Lipogramme('Eel')
+
+ Note
+ ----
+ Although `ConstraintsExclusion` object could seemingly be used for this
+ purpose, practically, for it to work, it needs to represent its operand
+ constraints as sets and intersect one with the other. That would require
+ the insight into the constraint values (and their types) that are otherwise
+ hidden inside the constraint object.
+
+ Therefore it's more practical to model `EXCEPT` clause at
+ `PermittedAlphabetConstraint` level instead.
+ """
+ def _setValues(self, values):
+ self._values = values
+ self._set = set(values)
+
+ def _testValue(self, value, idx):
+ if not self._set.issuperset(value):
+ raise error.ValueConstraintError(value)
+
+
+class ComponentPresentConstraint(AbstractConstraint):
+ """Create a ComponentPresentConstraint object.
+
+ The ComponentPresentConstraint is only satisfied when the value
+ is not `None`.
+
+ The ComponentPresentConstraint object is typically used with
+ `WithComponentsConstraint`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ present = ComponentPresentConstraint()
+
+ # this will succeed
+ present('whatever')
+
+ # this will raise ValueConstraintError
+ present(None)
+ """
+ def _setValues(self, values):
+ self._values = ('<must be present>',)
+
+ if values:
+ raise error.PyAsn1Error('No arguments expected')
+
+ def _testValue(self, value, idx):
+ if value is None:
+ raise error.ValueConstraintError(
+ 'Component is not present:')
+
+
+class ComponentAbsentConstraint(AbstractConstraint):
+ """Create a ComponentAbsentConstraint object.
+
+ The ComponentAbsentConstraint is only satisfied when the value
+ is `None`.
+
+ The ComponentAbsentConstraint object is typically used with
+ `WithComponentsConstraint`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ absent = ComponentAbsentConstraint()
+
+ # this will succeed
+ absent(None)
+
+ # this will raise ValueConstraintError
+ absent('whatever')
+ """
+ def _setValues(self, values):
+ self._values = ('<must be absent>',)
+
+ if values:
+ raise error.PyAsn1Error('No arguments expected')
+
+ def _testValue(self, value, idx):
+ if value is not None:
+ raise error.ValueConstraintError(
+ 'Component is not absent: %r' % value)
+
+
+class WithComponentsConstraint(AbstractConstraint):
+ """Create a WithComponentsConstraint object.
+
+ The `WithComponentsConstraint` satisfies any mapping object that has
+ constrained fields present or absent, what is indicated by
+ `ComponentPresentConstraint` and `ComponentAbsentConstraint`
+ objects respectively.
+
+ The `WithComponentsConstraint` object is typically applied
+ to :class:`~pyasn1.type.univ.Set` or
+ :class:`~pyasn1.type.univ.Sequence` types.
+
+ Parameters
+ ----------
+ *fields: :class:`tuple`
+ Zero or more tuples of (`field`, `constraint`) indicating constrained
+ fields.
+
+ Notes
+ -----
+ On top of the primary use of `WithComponentsConstraint` (ensuring presence
+ or absence of particular components of a :class:`~pyasn1.type.univ.Set` or
+ :class:`~pyasn1.type.univ.Sequence`), it is also possible to pass any other
+ constraint objects or their combinations. In case of scalar fields, these
+ constraints will be verified in addition to the constraints belonging to
+ scalar components themselves. However, formally, these additional
+ constraints do not change the type of these ASN.1 objects.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Item(Sequence): # Set is similar
+ '''
+ ASN.1 specification:
+
+ Item ::= SEQUENCE {
+ id INTEGER OPTIONAL,
+ name OCTET STRING OPTIONAL
+ } WITH COMPONENTS id PRESENT, name ABSENT | id ABSENT, name PRESENT
+ '''
+ componentType = NamedTypes(
+ OptionalNamedType('id', Integer()),
+ OptionalNamedType('name', OctetString())
+ )
+ withComponents = ConstraintsUnion(
+ WithComponentsConstraint(
+ ('id', ComponentPresentConstraint()),
+ ('name', ComponentAbsentConstraint())
+ ),
+ WithComponentsConstraint(
+ ('id', ComponentAbsentConstraint()),
+ ('name', ComponentPresentConstraint())
+ )
+ )
+
+ item = Item()
+
+ # This will succeed
+ item['id'] = 1
+
+ # This will succeed
+ item.reset()
+ item['name'] = 'John'
+
+ # This will fail (on encoding)
+ item.reset()
+ descr['id'] = 1
+ descr['name'] = 'John'
+ """
+ def _testValue(self, value, idx):
+ for field, constraint in self._values:
+ constraint(value.get(field))
+
+ def _setValues(self, values):
+ AbstractConstraint._setValues(self, values)
+
+
+# This is a bit kludgy, meaning two op modes within a single constraint
+class InnerTypeConstraint(AbstractConstraint):
+ """Value must satisfy the type and presence constraints"""
+
+ def _testValue(self, value, idx):
+ if self.__singleTypeConstraint:
+ self.__singleTypeConstraint(value)
+ elif self.__multipleTypeConstraint:
+ if idx not in self.__multipleTypeConstraint:
+ raise error.ValueConstraintError(value)
+ constraint, status = self.__multipleTypeConstraint[idx]
+ if status == 'ABSENT': # XXX presence is not checked!
+ raise error.ValueConstraintError(value)
+ constraint(value)
+
+ def _setValues(self, values):
+ self.__multipleTypeConstraint = {}
+ self.__singleTypeConstraint = None
+ for v in values:
+ if isinstance(v, tuple):
+ self.__multipleTypeConstraint[v[0]] = v[1], v[2]
+ else:
+ self.__singleTypeConstraint = v
+ AbstractConstraint._setValues(self, values)
+
+
+# Logic operations on constraints
+
+class ConstraintsExclusion(AbstractConstraint):
+ """Create a ConstraintsExclusion logic operator object.
+
+ The ConstraintsExclusion logic operator succeeds when the
+ value does *not* satisfy the operand constraint.
+
+ The ConstraintsExclusion object can be applied to
+ any constraint and logic operator object.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class LuckyNumber(Integer):
+ subtypeSpec = ConstraintsExclusion(
+ SingleValueConstraint(13)
+ )
+
+ # this will succeed
+ luckyNumber = LuckyNumber(12)
+
+ # this will raise ValueConstraintError
+ luckyNumber = LuckyNumber(13)
+
+ Note
+ ----
+ The `FROM ... EXCEPT ...` ASN.1 clause should be modeled by combining
+ constraint objects into one. See `PermittedAlphabetConstraint` for more
+ information.
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ try:
+ constraint(value, idx)
+
+ except error.ValueConstraintError:
+ continue
+
+ raise error.ValueConstraintError(value)
+
+ def _setValues(self, values):
+ AbstractConstraint._setValues(self, values)
+
+
+class AbstractConstraintSet(AbstractConstraint):
+
+ def __getitem__(self, idx):
+ return self._values[idx]
+
+ def __iter__(self):
+ return iter(self._values)
+
+ def __add__(self, value):
+ return self.__class__(*(self._values + (value,)))
+
+ def __radd__(self, value):
+ return self.__class__(*((value,) + self._values))
+
+ def __len__(self):
+ return len(self._values)
+
+ # Constraints inclusion in sets
+
+ def _setValues(self, values):
+ self._values = values
+ for constraint in values:
+ if constraint:
+ self._valueMap.add(constraint)
+ self._valueMap.update(constraint.getValueMap())
+
+
+class ConstraintsIntersection(AbstractConstraintSet):
+ """Create a ConstraintsIntersection logic operator object.
+
+ The ConstraintsIntersection logic operator only succeeds
+ if *all* its operands succeed.
+
+ The ConstraintsIntersection object can be applied to
+ any constraint and logic operator objects.
+
+ The ConstraintsIntersection object duck-types the immutable
+ container object like Python :py:class:`tuple`.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class CapitalAndSmall(IA5String):
+ '''
+ ASN.1 specification:
+
+ CapitalAndSmall ::=
+ IA5String (FROM ("A".."Z"|"a".."z"))
+ '''
+ subtypeSpec = ConstraintsIntersection(
+ PermittedAlphabetConstraint('A', 'Z'),
+ PermittedAlphabetConstraint('a', 'z')
+ )
+
+ # this will succeed
+ capital_and_small = CapitalAndSmall('Hello')
+
+ # this will raise ValueConstraintError
+ capital_and_small = CapitalAndSmall('hello')
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ constraint(value, idx)
+
+
+class ConstraintsUnion(AbstractConstraintSet):
+ """Create a ConstraintsUnion logic operator object.
+
+ The ConstraintsUnion logic operator succeeds if
+ *at least* a single operand succeeds.
+
+ The ConstraintsUnion object can be applied to
+ any constraint and logic operator objects.
+
+ The ConstraintsUnion object duck-types the immutable
+ container object like Python :py:class:`tuple`.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class CapitalOrSmall(IA5String):
+ '''
+ ASN.1 specification:
+
+ CapitalOrSmall ::=
+ IA5String (FROM ("A".."Z") | FROM ("a".."z"))
+ '''
+ subtypeSpec = ConstraintsUnion(
+ PermittedAlphabetConstraint('A', 'Z'),
+ PermittedAlphabetConstraint('a', 'z')
+ )
+
+ # this will succeed
+ capital_or_small = CapitalAndSmall('Hello')
+
+ # this will raise ValueConstraintError
+ capital_or_small = CapitalOrSmall('hello!')
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ try:
+ constraint(value, idx)
+ except error.ValueConstraintError:
+ pass
+ else:
+ return
+
+ raise error.ValueConstraintError(
+ 'all of %s failed for "%s"' % (self._values, value)
+ )
+
+# TODO:
+# refactor InnerTypeConstraint
+# add tests for type check
+# implement other constraint types
+# make constraint validation easy to skip
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/error.py b/contrib/python/pyasn1/py2/pyasn1/type/error.py
new file mode 100644
index 0000000000..0ff082abc2
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/error.py
@@ -0,0 +1,11 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1.error import PyAsn1Error
+
+
+class ValueConstraintError(PyAsn1Error):
+ pass
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/namedtype.py b/contrib/python/pyasn1/py2/pyasn1/type/namedtype.py
new file mode 100644
index 0000000000..8dbc81f3c7
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/namedtype.py
@@ -0,0 +1,561 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+__all__ = ['NamedType', 'OptionalNamedType', 'DefaultedNamedType',
+ 'NamedTypes']
+
+try:
+ any
+
+except NameError:
+ any = lambda x: bool(filter(bool, x))
+
+
+class NamedType(object):
+ """Create named field object for a constructed ASN.1 type.
+
+ The |NamedType| object represents a single name and ASN.1 type of a constructed ASN.1 type.
+
+ |NamedType| objects are immutable and duck-type Python :class:`tuple` objects
+ holding *name* and *asn1Object* components.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ asn1Object:
+ ASN.1 type object
+ """
+ isOptional = False
+ isDefaulted = False
+
+ def __init__(self, name, asn1Object, openType=None):
+ self.__name = name
+ self.__type = asn1Object
+ self.__nameAndType = name, asn1Object
+ self.__openType = openType
+
+ def __repr__(self):
+ representation = '%s=%r' % (self.name, self.asn1Object)
+
+ if self.openType:
+ representation += ', open type %r' % self.openType
+
+ return '<%s object, type %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__nameAndType == other
+
+ def __ne__(self, other):
+ return self.__nameAndType != other
+
+ def __lt__(self, other):
+ return self.__nameAndType < other
+
+ def __le__(self, other):
+ return self.__nameAndType <= other
+
+ def __gt__(self, other):
+ return self.__nameAndType > other
+
+ def __ge__(self, other):
+ return self.__nameAndType >= other
+
+ def __hash__(self):
+ return hash(self.__nameAndType)
+
+ def __getitem__(self, idx):
+ return self.__nameAndType[idx]
+
+ def __iter__(self):
+ return iter(self.__nameAndType)
+
+ @property
+ def name(self):
+ return self.__name
+
+ @property
+ def asn1Object(self):
+ return self.__type
+
+ @property
+ def openType(self):
+ return self.__openType
+
+ # Backward compatibility
+
+ def getName(self):
+ return self.name
+
+ def getType(self):
+ return self.asn1Object
+
+
+class OptionalNamedType(NamedType):
+ __doc__ = NamedType.__doc__
+
+ isOptional = True
+
+
+class DefaultedNamedType(NamedType):
+ __doc__ = NamedType.__doc__
+
+ isDefaulted = True
+
+
+class NamedTypes(object):
+ """Create a collection of named fields for a constructed ASN.1 type.
+
+ The NamedTypes object represents a collection of named fields of a constructed ASN.1 type.
+
+ *NamedTypes* objects are immutable and duck-type Python :class:`dict` objects
+ holding *name* as keys and ASN.1 type object as values.
+
+ Parameters
+ ----------
+ *namedTypes: :class:`~pyasn1.type.namedtype.NamedType`
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Description(Sequence):
+ '''
+ ASN.1 specification:
+
+ Description ::= SEQUENCE {
+ surname IA5String,
+ first-name IA5String OPTIONAL,
+ age INTEGER DEFAULT 40
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('surname', IA5String()),
+ OptionalNamedType('first-name', IA5String()),
+ DefaultedNamedType('age', Integer(40))
+ )
+
+ descr = Description()
+ descr['surname'] = 'Smith'
+ descr['first-name'] = 'John'
+ """
+ def __init__(self, *namedTypes, **kwargs):
+ self.__namedTypes = namedTypes
+ self.__namedTypesLen = len(self.__namedTypes)
+ self.__minTagSet = self.__computeMinTagSet()
+ self.__nameToPosMap = self.__computeNameToPosMap()
+ self.__tagToPosMap = self.__computeTagToPosMap()
+ self.__ambiguousTypes = 'terminal' not in kwargs and self.__computeAmbiguousTypes() or {}
+ self.__uniqueTagMap = self.__computeTagMaps(unique=True)
+ self.__nonUniqueTagMap = self.__computeTagMaps(unique=False)
+ self.__hasOptionalOrDefault = any([True for namedType in self.__namedTypes
+ if namedType.isDefaulted or namedType.isOptional])
+ self.__hasOpenTypes = any([True for namedType in self.__namedTypes
+ if namedType.openType])
+
+ self.__requiredComponents = frozenset(
+ [idx for idx, nt in enumerate(self.__namedTypes) if not nt.isOptional and not nt.isDefaulted]
+ )
+ self.__keys = frozenset([namedType.name for namedType in self.__namedTypes])
+ self.__values = tuple([namedType.asn1Object for namedType in self.__namedTypes])
+ self.__items = tuple([(namedType.name, namedType.asn1Object) for namedType in self.__namedTypes])
+
+ def __repr__(self):
+ representation = ', '.join(['%r' % x for x in self.__namedTypes])
+ return '<%s object, types %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__namedTypes == other
+
+ def __ne__(self, other):
+ return self.__namedTypes != other
+
+ def __lt__(self, other):
+ return self.__namedTypes < other
+
+ def __le__(self, other):
+ return self.__namedTypes <= other
+
+ def __gt__(self, other):
+ return self.__namedTypes > other
+
+ def __ge__(self, other):
+ return self.__namedTypes >= other
+
+ def __hash__(self):
+ return hash(self.__namedTypes)
+
+ def __getitem__(self, idx):
+ try:
+ return self.__namedTypes[idx]
+
+ except TypeError:
+ return self.__namedTypes[self.__nameToPosMap[idx]]
+
+ def __contains__(self, key):
+ return key in self.__nameToPosMap
+
+ def __iter__(self):
+ return (x[0] for x in self.__namedTypes)
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self.__namedTypesLen > 0
+ else:
+ def __bool__(self):
+ return self.__namedTypesLen > 0
+
+ def __len__(self):
+ return self.__namedTypesLen
+
+ # Python dict protocol
+
+ def values(self):
+ return self.__values
+
+ def keys(self):
+ return self.__keys
+
+ def items(self):
+ return self.__items
+
+ def clone(self):
+ return self.__class__(*self.__namedTypes)
+
+ class PostponedError(object):
+ def __init__(self, errorMsg):
+ self.__errorMsg = errorMsg
+
+ def __getitem__(self, item):
+ raise error.PyAsn1Error(self.__errorMsg)
+
+ def __computeTagToPosMap(self):
+ tagToPosMap = {}
+ for idx, namedType in enumerate(self.__namedTypes):
+ tagMap = namedType.asn1Object.tagMap
+ if isinstance(tagMap, NamedTypes.PostponedError):
+ return tagMap
+ if not tagMap:
+ continue
+ for _tagSet in tagMap.presentTypes:
+ if _tagSet in tagToPosMap:
+ return NamedTypes.PostponedError('Duplicate component tag %s at %s' % (_tagSet, namedType))
+ tagToPosMap[_tagSet] = idx
+
+ return tagToPosMap
+
+ def __computeNameToPosMap(self):
+ nameToPosMap = {}
+ for idx, namedType in enumerate(self.__namedTypes):
+ if namedType.name in nameToPosMap:
+ return NamedTypes.PostponedError('Duplicate component name %s at %s' % (namedType.name, namedType))
+ nameToPosMap[namedType.name] = idx
+
+ return nameToPosMap
+
+ def __computeAmbiguousTypes(self):
+ ambiguousTypes = {}
+ partialAmbiguousTypes = ()
+ for idx, namedType in reversed(tuple(enumerate(self.__namedTypes))):
+ if namedType.isOptional or namedType.isDefaulted:
+ partialAmbiguousTypes = (namedType,) + partialAmbiguousTypes
+ else:
+ partialAmbiguousTypes = (namedType,)
+ if len(partialAmbiguousTypes) == len(self.__namedTypes):
+ ambiguousTypes[idx] = self
+ else:
+ ambiguousTypes[idx] = NamedTypes(*partialAmbiguousTypes, **dict(terminal=True))
+ return ambiguousTypes
+
+ def getTypeByPosition(self, idx):
+ """Return ASN.1 type object by its position in fields set.
+
+ Parameters
+ ----------
+ idx: :py:class:`int`
+ Field index
+
+ Returns
+ -------
+ :
+ ASN.1 type
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given position is out of fields range
+ """
+ try:
+ return self.__namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByType(self, tagSet):
+ """Return field position by its ASN.1 type.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pysnmp.type.tag.TagSet`
+ ASN.1 tag set distinguishing one ASN.1 type from others.
+
+ Returns
+ -------
+ : :py:class:`int`
+ ASN.1 type position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *tagSet* is not present or ASN.1 types are not unique within callee *NamedTypes*
+ """
+ try:
+ return self.__tagToPosMap[tagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('Type %s not found' % (tagSet,))
+
+ def getNameByPosition(self, idx):
+ """Return field name by its position in fields set.
+
+ Parameters
+ ----------
+ idx: :py:class:`idx`
+ Field index
+
+ Returns
+ -------
+ : :py:class:`str`
+ Field name
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given field name is not present in callee *NamedTypes*
+ """
+ try:
+ return self.__namedTypes[idx].name
+
+ except IndexError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByName(self, name):
+ """Return field position by filed name.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ Returns
+ -------
+ : :py:class:`int`
+ Field position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *name* is not present or not unique within callee *NamedTypes*
+ """
+ try:
+ return self.__nameToPosMap[name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ def getTagMapNearPosition(self, idx):
+ """Return ASN.1 types that are allowed at or past given field position.
+
+ Some ASN.1 serialisation allow for skipping optional and defaulted fields.
+ Some constructed ASN.1 types allow reordering of the fields. When recovering
+ such objects it may be important to know which types can possibly be
+ present at any given position in the field sets.
+
+ Parameters
+ ----------
+ idx: :py:class:`int`
+ Field index
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tagmap.TagMap`
+ Map if ASN.1 types allowed at given field position
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given position is out of fields range
+ """
+ try:
+ return self.__ambiguousTypes[idx].tagMap
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionNearType(self, tagSet, idx):
+ """Return the closest field position where given ASN.1 type is allowed.
+
+ Some ASN.1 serialisation allow for skipping optional and defaulted fields.
+ Some constructed ASN.1 types allow reordering of the fields. When recovering
+ such objects it may be important to know at which field position, in field set,
+ given *tagSet* is allowed at or past *idx* position.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pyasn1.type.tag.TagSet`
+ ASN.1 type which field position to look up
+
+ idx: :py:class:`int`
+ Field position at or past which to perform ASN.1 type look up
+
+ Returns
+ -------
+ : :py:class:`int`
+ Field position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *tagSet* is not present or not unique within callee *NamedTypes*
+ or *idx* is out of fields range
+ """
+ try:
+ return idx + self.__ambiguousTypes[idx].getPositionByType(tagSet)
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def __computeMinTagSet(self):
+ minTagSet = None
+ for namedType in self.__namedTypes:
+ asn1Object = namedType.asn1Object
+
+ try:
+ tagSet = asn1Object.minTagSet
+
+ except AttributeError:
+ tagSet = asn1Object.tagSet
+
+ if minTagSet is None or tagSet < minTagSet:
+ minTagSet = tagSet
+
+ return minTagSet or tag.TagSet()
+
+ @property
+ def minTagSet(self):
+ """Return the minimal TagSet among ASN.1 type in callee *NamedTypes*.
+
+ Some ASN.1 types/serialisation protocols require ASN.1 types to be
+ arranged based on their numerical tag value. The *minTagSet* property
+ returns that.
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tagset.TagSet`
+ Minimal TagSet among ASN.1 types in callee *NamedTypes*
+ """
+ return self.__minTagSet
+
+ def __computeTagMaps(self, unique):
+ presentTypes = {}
+ skipTypes = {}
+ defaultType = None
+ for namedType in self.__namedTypes:
+ tagMap = namedType.asn1Object.tagMap
+ if isinstance(tagMap, NamedTypes.PostponedError):
+ return tagMap
+ for tagSet in tagMap:
+ if unique and tagSet in presentTypes:
+ return NamedTypes.PostponedError('Non-unique tagSet %s of %s at %s' % (tagSet, namedType, self))
+ presentTypes[tagSet] = namedType.asn1Object
+ skipTypes.update(tagMap.skipTypes)
+
+ if defaultType is None:
+ defaultType = tagMap.defaultType
+ elif tagMap.defaultType is not None:
+ return NamedTypes.PostponedError('Duplicate default ASN.1 type at %s' % (self,))
+
+ return tagmap.TagMap(presentTypes, skipTypes, defaultType)
+
+ @property
+ def tagMap(self):
+ """Return a *TagMap* object from tags and types recursively.
+
+ Return a :class:`~pyasn1.type.tagmap.TagMap` object by
+ combining tags from *TagMap* objects of children types and
+ associating them with their immediate child type.
+
+ Example
+ -------
+ .. code-block:: python
+
+ OuterType ::= CHOICE {
+ innerType INTEGER
+ }
+
+ Calling *.tagMap* on *OuterType* will yield a map like this:
+
+ .. code-block:: python
+
+ Integer.tagSet -> Choice
+ """
+ return self.__nonUniqueTagMap
+
+ @property
+ def tagMapUnique(self):
+ """Return a *TagMap* object from unique tags and types recursively.
+
+ Return a :class:`~pyasn1.type.tagmap.TagMap` object by
+ combining tags from *TagMap* objects of children types and
+ associating them with their immediate child type.
+
+ Example
+ -------
+ .. code-block:: python
+
+ OuterType ::= CHOICE {
+ innerType INTEGER
+ }
+
+ Calling *.tagMapUnique* on *OuterType* will yield a map like this:
+
+ .. code-block:: python
+
+ Integer.tagSet -> Choice
+
+ Note
+ ----
+
+ Duplicate *TagSet* objects found in the tree of children
+ types would cause error.
+ """
+ return self.__uniqueTagMap
+
+ @property
+ def hasOptionalOrDefault(self):
+ return self.__hasOptionalOrDefault
+
+ @property
+ def hasOpenTypes(self):
+ return self.__hasOpenTypes
+
+ @property
+ def namedTypes(self):
+ return tuple(self.__namedTypes)
+
+ @property
+ def requiredComponents(self):
+ return self.__requiredComponents
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/namedval.py b/contrib/python/pyasn1/py2/pyasn1/type/namedval.py
new file mode 100644
index 0000000000..46a6496d03
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/namedval.py
@@ -0,0 +1,192 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+# ASN.1 named integers
+#
+from pyasn1 import error
+
+__all__ = ['NamedValues']
+
+
+class NamedValues(object):
+ """Create named values object.
+
+ The |NamedValues| object represents a collection of string names
+ associated with numeric IDs. These objects are used for giving
+ names to otherwise numerical values.
+
+ |NamedValues| objects are immutable and duck-type Python
+ :class:`dict` object mapping ID to name and vice-versa.
+
+ Parameters
+ ----------
+ *args: variable number of two-element :py:class:`tuple`
+
+ name: :py:class:`str`
+ Value label
+
+ value: :py:class:`int`
+ Numeric value
+
+ Keyword Args
+ ------------
+ name: :py:class:`str`
+ Value label
+
+ value: :py:class:`int`
+ Numeric value
+
+ Examples
+ --------
+
+ .. code-block:: pycon
+
+ >>> nv = NamedValues('a', 'b', ('c', 0), d=1)
+ >>> nv
+ >>> {'c': 0, 'd': 1, 'a': 2, 'b': 3}
+ >>> nv[0]
+ 'c'
+ >>> nv['a']
+ 2
+ """
+ def __init__(self, *args, **kwargs):
+ self.__names = {}
+ self.__numbers = {}
+
+ anonymousNames = []
+
+ for namedValue in args:
+ if isinstance(namedValue, (tuple, list)):
+ try:
+ name, number = namedValue
+
+ except ValueError:
+ raise error.PyAsn1Error('Not a proper attribute-value pair %r' % (namedValue,))
+
+ else:
+ anonymousNames.append(namedValue)
+ continue
+
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ if number in self.__numbers:
+ raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ for name, number in kwargs.items():
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ if number in self.__numbers:
+ raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ if anonymousNames:
+
+ number = self.__numbers and max(self.__numbers) + 1 or 0
+
+ for name in anonymousNames:
+
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ number += 1
+
+ def __repr__(self):
+ representation = ', '.join(['%s=%d' % x for x in self.items()])
+
+ if len(representation) > 64:
+ representation = representation[:32] + '...' + representation[-32:]
+
+ return '<%s object, enums %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return dict(self) == other
+
+ def __ne__(self, other):
+ return dict(self) != other
+
+ def __lt__(self, other):
+ return dict(self) < other
+
+ def __le__(self, other):
+ return dict(self) <= other
+
+ def __gt__(self, other):
+ return dict(self) > other
+
+ def __ge__(self, other):
+ return dict(self) >= other
+
+ def __hash__(self):
+ return hash(self.items())
+
+ # Python dict protocol (read-only)
+
+ def __getitem__(self, key):
+ try:
+ return self.__numbers[key]
+
+ except KeyError:
+ return self.__names[key]
+
+ def __len__(self):
+ return len(self.__names)
+
+ def __contains__(self, key):
+ return key in self.__names or key in self.__numbers
+
+ def __iter__(self):
+ return iter(self.__names)
+
+ def values(self):
+ return iter(self.__numbers)
+
+ def keys(self):
+ return iter(self.__names)
+
+ def items(self):
+ for name in self.__names:
+ yield name, self.__names[name]
+
+ # support merging
+
+ def __add__(self, namedValues):
+ return self.__class__(*tuple(self.items()) + tuple(namedValues.items()))
+
+ # XXX clone/subtype?
+
+ def clone(self, *args, **kwargs):
+ new = self.__class__(*args, **kwargs)
+ return self + new
+
+ # legacy protocol
+
+ def getName(self, value):
+ if value in self.__numbers:
+ return self.__numbers[value]
+
+ def getValue(self, name):
+ if name in self.__names:
+ return self.__names[name]
+
+ def getValues(self, *names):
+ try:
+ return [self.__names[name] for name in names]
+
+ except KeyError:
+ raise error.PyAsn1Error(
+ 'Unknown bit identifier(s): %s' % (set(names).difference(self.__names),)
+ )
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/opentype.py b/contrib/python/pyasn1/py2/pyasn1/type/opentype.py
new file mode 100644
index 0000000000..5a15f896da
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/opentype.py
@@ -0,0 +1,104 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+
+__all__ = ['OpenType']
+
+
+class OpenType(object):
+ """Create ASN.1 type map indexed by a value
+
+ The *OpenType* object models an untyped field of a constructed ASN.1
+ type. In ASN.1 syntax it is usually represented by the
+ `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
+ `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
+ used together with :class:`~pyasn1.type.univ.Any` object.
+
+ OpenType objects duck-type a read-only Python :class:`dict` objects,
+ however the passed `typeMap` is not copied, but stored by reference.
+ That means the user can manipulate `typeMap` at run time having this
+ reflected on *OpenType* object behavior.
+
+ The |OpenType| class models an untyped field of a constructed ASN.1
+ type. In ASN.1 syntax it is usually represented by the
+ `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
+ `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
+ used with :class:`~pyasn1.type.univ.Any` type.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ typeMap: :py:class:`dict`
+ A map of value->ASN.1 type. It's stored by reference and can be
+ mutated later to register new mappings.
+
+ Examples
+ --------
+
+ For untyped scalars:
+
+ .. code-block:: python
+
+ openType = OpenType(
+ 'id', {1: Integer(),
+ 2: OctetString()}
+ )
+ Sequence(
+ componentType=NamedTypes(
+ NamedType('id', Integer()),
+ NamedType('blob', Any(), openType=openType)
+ )
+ )
+
+ For untyped `SET OF` or `SEQUENCE OF` vectors:
+
+ .. code-block:: python
+
+ openType = OpenType(
+ 'id', {1: Integer(),
+ 2: OctetString()}
+ )
+ Sequence(
+ componentType=NamedTypes(
+ NamedType('id', Integer()),
+ NamedType('blob', SetOf(componentType=Any()),
+ openType=openType)
+ )
+ )
+ """
+
+ def __init__(self, name, typeMap=None):
+ self.__name = name
+ if typeMap is None:
+ self.__typeMap = {}
+ else:
+ self.__typeMap = typeMap
+
+ @property
+ def name(self):
+ return self.__name
+
+ # Python dict protocol
+
+ def values(self):
+ return self.__typeMap.values()
+
+ def keys(self):
+ return self.__typeMap.keys()
+
+ def items(self):
+ return self.__typeMap.items()
+
+ def __contains__(self, key):
+ return key in self.__typeMap
+
+ def __getitem__(self, key):
+ return self.__typeMap[key]
+
+ def __iter__(self):
+ return iter(self.__typeMap)
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/tag.py b/contrib/python/pyasn1/py2/pyasn1/type/tag.py
new file mode 100644
index 0000000000..a21a405eb1
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/tag.py
@@ -0,0 +1,335 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+
+__all__ = ['tagClassUniversal', 'tagClassApplication', 'tagClassContext',
+ 'tagClassPrivate', 'tagFormatSimple', 'tagFormatConstructed',
+ 'tagCategoryImplicit', 'tagCategoryExplicit',
+ 'tagCategoryUntagged', 'Tag', 'TagSet']
+
+#: Identifier for ASN.1 class UNIVERSAL
+tagClassUniversal = 0x00
+
+#: Identifier for ASN.1 class APPLICATION
+tagClassApplication = 0x40
+
+#: Identifier for ASN.1 class context-specific
+tagClassContext = 0x80
+
+#: Identifier for ASN.1 class private
+tagClassPrivate = 0xC0
+
+#: Identifier for "simple" ASN.1 structure (e.g. scalar)
+tagFormatSimple = 0x00
+
+#: Identifier for "constructed" ASN.1 structure (e.g. may have inner components)
+tagFormatConstructed = 0x20
+
+tagCategoryImplicit = 0x01
+tagCategoryExplicit = 0x02
+tagCategoryUntagged = 0x04
+
+
+class Tag(object):
+ """Create ASN.1 tag
+
+ Represents ASN.1 tag that can be attached to a ASN.1 type to make
+ types distinguishable from each other.
+
+ *Tag* objects are immutable and duck-type Python :class:`tuple` objects
+ holding three integer components of a tag.
+
+ Parameters
+ ----------
+ tagClass: :py:class:`int`
+ Tag *class* value
+
+ tagFormat: :py:class:`int`
+ Tag *format* value
+
+ tagId: :py:class:`int`
+ Tag ID value
+ """
+ def __init__(self, tagClass, tagFormat, tagId):
+ if tagId < 0:
+ raise error.PyAsn1Error('Negative tag ID (%s) not allowed' % tagId)
+ self.__tagClass = tagClass
+ self.__tagFormat = tagFormat
+ self.__tagId = tagId
+ self.__tagClassId = tagClass, tagId
+ self.__hash = hash(self.__tagClassId)
+
+ def __repr__(self):
+ representation = '[%s:%s:%s]' % (
+ self.__tagClass, self.__tagFormat, self.__tagId)
+ return '<%s object, tag %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__tagClassId == other
+
+ def __ne__(self, other):
+ return self.__tagClassId != other
+
+ def __lt__(self, other):
+ return self.__tagClassId < other
+
+ def __le__(self, other):
+ return self.__tagClassId <= other
+
+ def __gt__(self, other):
+ return self.__tagClassId > other
+
+ def __ge__(self, other):
+ return self.__tagClassId >= other
+
+ def __hash__(self):
+ return self.__hash
+
+ def __getitem__(self, idx):
+ if idx == 0:
+ return self.__tagClass
+ elif idx == 1:
+ return self.__tagFormat
+ elif idx == 2:
+ return self.__tagId
+ else:
+ raise IndexError()
+
+ def __iter__(self):
+ yield self.__tagClass
+ yield self.__tagFormat
+ yield self.__tagId
+
+ def __and__(self, otherTag):
+ return self.__class__(self.__tagClass & otherTag.tagClass,
+ self.__tagFormat & otherTag.tagFormat,
+ self.__tagId & otherTag.tagId)
+
+ def __or__(self, otherTag):
+ return self.__class__(self.__tagClass | otherTag.tagClass,
+ self.__tagFormat | otherTag.tagFormat,
+ self.__tagId | otherTag.tagId)
+
+ @property
+ def tagClass(self):
+ """ASN.1 tag class
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag class
+ """
+ return self.__tagClass
+
+ @property
+ def tagFormat(self):
+ """ASN.1 tag format
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag format
+ """
+ return self.__tagFormat
+
+ @property
+ def tagId(self):
+ """ASN.1 tag ID
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag ID
+ """
+ return self.__tagId
+
+
+class TagSet(object):
+ """Create a collection of ASN.1 tags
+
+ Represents a combination of :class:`~pyasn1.type.tag.Tag` objects
+ that can be attached to a ASN.1 type to make types distinguishable
+ from each other.
+
+ *TagSet* objects are immutable and duck-type Python :class:`tuple` objects
+ holding arbitrary number of :class:`~pyasn1.type.tag.Tag` objects.
+
+ Parameters
+ ----------
+ baseTag: :class:`~pyasn1.type.tag.Tag`
+ Base *Tag* object. This tag survives IMPLICIT tagging.
+
+ *superTags: :class:`~pyasn1.type.tag.Tag`
+ Additional *Tag* objects taking part in subtyping.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class OrderNumber(NumericString):
+ '''
+ ASN.1 specification
+
+ Order-number ::=
+ [APPLICATION 5] IMPLICIT NumericString
+ '''
+ tagSet = NumericString.tagSet.tagImplicitly(
+ Tag(tagClassApplication, tagFormatSimple, 5)
+ )
+
+ orderNumber = OrderNumber('1234')
+ """
+ def __init__(self, baseTag=(), *superTags):
+ self.__baseTag = baseTag
+ self.__superTags = superTags
+ self.__superTagsClassId = tuple(
+ [(superTag.tagClass, superTag.tagId) for superTag in superTags]
+ )
+ self.__lenOfSuperTags = len(superTags)
+ self.__hash = hash(self.__superTagsClassId)
+
+ def __repr__(self):
+ representation = '-'.join(['%s:%s:%s' % (x.tagClass, x.tagFormat, x.tagId)
+ for x in self.__superTags])
+ if representation:
+ representation = 'tags ' + representation
+ else:
+ representation = 'untagged'
+
+ return '<%s object, %s>' % (self.__class__.__name__, representation)
+
+ def __add__(self, superTag):
+ return self.__class__(self.__baseTag, *self.__superTags + (superTag,))
+
+ def __radd__(self, superTag):
+ return self.__class__(self.__baseTag, *(superTag,) + self.__superTags)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.__class__(self.__baseTag, *self.__superTags[i])
+ else:
+ return self.__superTags[i]
+
+ def __eq__(self, other):
+ return self.__superTagsClassId == other
+
+ def __ne__(self, other):
+ return self.__superTagsClassId != other
+
+ def __lt__(self, other):
+ return self.__superTagsClassId < other
+
+ def __le__(self, other):
+ return self.__superTagsClassId <= other
+
+ def __gt__(self, other):
+ return self.__superTagsClassId > other
+
+ def __ge__(self, other):
+ return self.__superTagsClassId >= other
+
+ def __hash__(self):
+ return self.__hash
+
+ def __len__(self):
+ return self.__lenOfSuperTags
+
+ @property
+ def baseTag(self):
+ """Return base ASN.1 tag
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.Tag`
+ Base tag of this *TagSet*
+ """
+ return self.__baseTag
+
+ @property
+ def superTags(self):
+ """Return ASN.1 tags
+
+ Returns
+ -------
+ : :py:class:`tuple`
+ Tuple of :class:`~pyasn1.type.tag.Tag` objects that this *TagSet* contains
+ """
+ return self.__superTags
+
+ def tagExplicitly(self, superTag):
+ """Return explicitly tagged *TagSet*
+
+ Create a new *TagSet* representing callee *TagSet* explicitly tagged
+ with passed tag(s). With explicit tagging mode, new tags are appended
+ to existing tag(s).
+
+ Parameters
+ ----------
+ superTag: :class:`~pyasn1.type.tag.Tag`
+ *Tag* object to tag this *TagSet*
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.TagSet`
+ New *TagSet* object
+ """
+ if superTag.tagClass == tagClassUniversal:
+ raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag")
+ if superTag.tagFormat != tagFormatConstructed:
+ superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId)
+ return self + superTag
+
+ def tagImplicitly(self, superTag):
+ """Return implicitly tagged *TagSet*
+
+ Create a new *TagSet* representing callee *TagSet* implicitly tagged
+ with passed tag(s). With implicit tagging mode, new tag(s) replace the
+ last existing tag.
+
+ Parameters
+ ----------
+ superTag: :class:`~pyasn1.type.tag.Tag`
+ *Tag* object to tag this *TagSet*
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.TagSet`
+ New *TagSet* object
+ """
+ if self.__superTags:
+ superTag = Tag(superTag.tagClass, self.__superTags[-1].tagFormat, superTag.tagId)
+ return self[:-1] + superTag
+
+ def isSuperTagSetOf(self, tagSet):
+ """Test type relationship against given *TagSet*
+
+ The callee is considered to be a supertype of given *TagSet*
+ tag-wise if all tags in *TagSet* are present in the callee and
+ they are in the same order.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pyasn1.type.tag.TagSet`
+ *TagSet* object to evaluate against the callee
+
+ Returns
+ -------
+ : :py:class:`bool`
+ :obj:`True` if callee is a supertype of *tagSet*
+ """
+ if len(tagSet) < self.__lenOfSuperTags:
+ return False
+ return self.__superTags == tagSet[:self.__lenOfSuperTags]
+
+ # Backward compatibility
+
+ def getBaseTag(self):
+ return self.__baseTag
+
+def initTagSet(tag):
+ return TagSet(tag, tag)
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/tagmap.py b/contrib/python/pyasn1/py2/pyasn1/type/tagmap.py
new file mode 100644
index 0000000000..2f0e660264
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/tagmap.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+
+__all__ = ['TagMap']
+
+
+class TagMap(object):
+ """Map *TagSet* objects to ASN.1 types
+
+ Create an object mapping *TagSet* object to ASN.1 type.
+
+ *TagMap* objects are immutable and duck-type read-only Python
+ :class:`dict` objects holding *TagSet* objects as keys and ASN.1
+ type objects as values.
+
+ Parameters
+ ----------
+ presentTypes: :py:class:`dict`
+ Map of :class:`~pyasn1.type.tag.TagSet` to ASN.1 objects considered
+ as being unconditionally present in the *TagMap*.
+
+ skipTypes: :py:class:`dict`
+ A collection of :class:`~pyasn1.type.tag.TagSet` objects considered
+ as absent in the *TagMap* even when *defaultType* is present.
+
+ defaultType: ASN.1 type object
+ An ASN.1 type object callee *TagMap* returns for any *TagSet* key not present
+ in *presentTypes* (unless given key is present in *skipTypes*).
+ """
+ def __init__(self, presentTypes=None, skipTypes=None, defaultType=None):
+ self.__presentTypes = presentTypes or {}
+ self.__skipTypes = skipTypes or {}
+ self.__defaultType = defaultType
+
+ def __contains__(self, tagSet):
+ return (tagSet in self.__presentTypes or
+ self.__defaultType is not None and tagSet not in self.__skipTypes)
+
+ def __getitem__(self, tagSet):
+ try:
+ return self.__presentTypes[tagSet]
+ except KeyError:
+ if self.__defaultType is None:
+ raise KeyError()
+ elif tagSet in self.__skipTypes:
+ raise error.PyAsn1Error('Key in negative map')
+ else:
+ return self.__defaultType
+
+ def __iter__(self):
+ return iter(self.__presentTypes)
+
+ def __repr__(self):
+ representation = '%s object' % self.__class__.__name__
+
+ if self.__presentTypes:
+ representation += ', present %s' % repr(self.__presentTypes)
+
+ if self.__skipTypes:
+ representation += ', skip %s' % repr(self.__skipTypes)
+
+ if self.__defaultType is not None:
+ representation += ', default %s' % repr(self.__defaultType)
+
+ return '<%s>' % representation
+
+ @property
+ def presentTypes(self):
+ """Return *TagSet* to ASN.1 type map present in callee *TagMap*"""
+ return self.__presentTypes
+
+ @property
+ def skipTypes(self):
+ """Return *TagSet* collection unconditionally absent in callee *TagMap*"""
+ return self.__skipTypes
+
+ @property
+ def defaultType(self):
+ """Return default ASN.1 type being returned for any missing *TagSet*"""
+ return self.__defaultType
+
+ # Backward compatibility
+
+ def getPosMap(self):
+ return self.presentTypes
+
+ def getNegMap(self):
+ return self.skipTypes
+
+ def getDef(self):
+ return self.defaultType
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/univ.py b/contrib/python/pyasn1/py2/pyasn1/type/univ.py
new file mode 100644
index 0000000000..c5d0778096
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/univ.py
@@ -0,0 +1,3305 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import math
+import sys
+
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.compat import integer
+from pyasn1.compat import octets
+from pyasn1.type import base
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+NoValue = base.NoValue
+noValue = NoValue()
+
+__all__ = ['Integer', 'Boolean', 'BitString', 'OctetString', 'Null',
+ 'ObjectIdentifier', 'Real', 'Enumerated',
+ 'SequenceOfAndSetOfBase', 'SequenceOf', 'SetOf',
+ 'SequenceAndSetBase', 'Sequence', 'Set', 'Choice', 'Any',
+ 'NoValue', 'noValue']
+
+# "Simple" ASN.1 types (yet incomplete)
+
+
+class Integer(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| class
+ instance. If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class ErrorCode(Integer):
+ '''
+ ASN.1 specification:
+
+ ErrorCode ::=
+ INTEGER { disk-full(1), no-disk(-1),
+ disk-not-formatted(2) }
+
+ error ErrorCode ::= disk-full
+ '''
+ namedValues = NamedValues(
+ ('disk-full', 1), ('no-disk', -1),
+ ('disk-not-formatted', 2)
+ )
+
+ error = ErrorCode('disk-full')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ def __init__(self, value=noValue, **kwargs):
+ if 'namedValues' not in kwargs:
+ kwargs['namedValues'] = self.namedValues
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ def __and__(self, value):
+ return self.clone(self._value & value)
+
+ def __rand__(self, value):
+ return self.clone(value & self._value)
+
+ def __or__(self, value):
+ return self.clone(self._value | value)
+
+ def __ror__(self, value):
+ return self.clone(value | self._value)
+
+ def __xor__(self, value):
+ return self.clone(self._value ^ value)
+
+ def __rxor__(self, value):
+ return self.clone(value ^ self._value)
+
+ def __lshift__(self, value):
+ return self.clone(self._value << value)
+
+ def __rshift__(self, value):
+ return self.clone(self._value >> value)
+
+ def __add__(self, value):
+ return self.clone(self._value + value)
+
+ def __radd__(self, value):
+ return self.clone(value + self._value)
+
+ def __sub__(self, value):
+ return self.clone(self._value - value)
+
+ def __rsub__(self, value):
+ return self.clone(value - self._value)
+
+ def __mul__(self, value):
+ return self.clone(self._value * value)
+
+ def __rmul__(self, value):
+ return self.clone(value * self._value)
+
+ def __mod__(self, value):
+ return self.clone(self._value % value)
+
+ def __rmod__(self, value):
+ return self.clone(value % self._value)
+
+ def __pow__(self, value, modulo=None):
+ return self.clone(pow(self._value, value, modulo))
+
+ def __rpow__(self, value):
+ return self.clone(pow(value, self._value))
+
+ def __floordiv__(self, value):
+ return self.clone(self._value // value)
+
+ def __rfloordiv__(self, value):
+ return self.clone(value // self._value)
+
+ if sys.version_info[0] <= 2:
+ def __div__(self, value):
+ if isinstance(value, float):
+ return Real(self._value / value)
+ else:
+ return self.clone(self._value / value)
+
+ def __rdiv__(self, value):
+ if isinstance(value, float):
+ return Real(value / self._value)
+ else:
+ return self.clone(value / self._value)
+ else:
+ def __truediv__(self, value):
+ return Real(self._value / value)
+
+ def __rtruediv__(self, value):
+ return Real(value / self._value)
+
+ def __divmod__(self, value):
+ return self.clone(divmod(self._value, value))
+
+ def __rdivmod__(self, value):
+ return self.clone(divmod(value, self._value))
+
+ __hash__ = base.SimpleAsn1Type.__hash__
+
+ def __int__(self):
+ return int(self._value)
+
+ if sys.version_info[0] <= 2:
+ def __long__(self):
+ return long(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ def __abs__(self):
+ return self.clone(abs(self._value))
+
+ def __index__(self):
+ return int(self._value)
+
+ def __pos__(self):
+ return self.clone(+self._value)
+
+ def __neg__(self):
+ return self.clone(-self._value)
+
+ def __invert__(self):
+ return self.clone(~self._value)
+
+ def __round__(self, n=0):
+ r = round(self._value, n)
+ if n:
+ return self.clone(r)
+ else:
+ return r
+
+ def __floor__(self):
+ return math.floor(self._value)
+
+ def __ceil__(self):
+ return math.ceil(self._value)
+
+ def __trunc__(self):
+ return self.clone(math.trunc(self._value))
+
+ def __lt__(self, value):
+ return self._value < value
+
+ def __le__(self, value):
+ return self._value <= value
+
+ def __eq__(self, value):
+ return self._value == value
+
+ def __ne__(self, value):
+ return self._value != value
+
+ def __gt__(self, value):
+ return self._value > value
+
+ def __ge__(self, value):
+ return self._value >= value
+
+ def prettyIn(self, value):
+ try:
+ return int(value)
+
+ except ValueError:
+ try:
+ return self.namedValues[value]
+
+ except KeyError:
+ raise error.PyAsn1Error(
+ 'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
+ )
+
+ def prettyOut(self, value):
+ try:
+ return str(self.namedValues[value])
+
+ except KeyError:
+ return str(value)
+
+ # backward compatibility
+
+ def getNamedValues(self):
+ return self.namedValues
+
+
+class Boolean(Integer):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| class
+ instance. If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s).Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class RoundResult(Boolean):
+ '''
+ ASN.1 specification:
+
+ RoundResult ::= BOOLEAN
+
+ ok RoundResult ::= TRUE
+ ko RoundResult ::= FALSE
+ '''
+ ok = RoundResult(True)
+ ko = RoundResult(False)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = Integer.subtypeSpec + constraint.SingleValueConstraint(0, 1)
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues(('False', 0), ('True', 1))
+
+ # Optimization for faster codec lookup
+ typeId = Integer.getTypeId()
+
+if sys.version_info[0] < 3:
+ SizedIntegerBase = long
+else:
+ SizedIntegerBase = int
+
+
+class SizedInteger(SizedIntegerBase):
+ bitLength = leadingZeroBits = None
+
+ def setBitLength(self, bitLength):
+ self.bitLength = bitLength
+ self.leadingZeroBits = max(bitLength - integer.bitLength(self), 0)
+ return self
+
+ def __len__(self):
+ if self.bitLength is None:
+ self.setBitLength(integer.bitLength(self))
+
+ return self.bitLength
+
+
+class BitString(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type both Python :class:`tuple` (as a tuple
+ of bits) and :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal representing binary
+ or hexadecimal number or sequence of integer bits or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Rights(BitString):
+ '''
+ ASN.1 specification:
+
+ Rights ::= BIT STRING { user-read(0), user-write(1),
+ group-read(2), group-write(3),
+ other-read(4), other-write(5) }
+
+ group1 Rights ::= { group-read, group-write }
+ group2 Rights ::= '0011'B
+ group3 Rights ::= '3'H
+ '''
+ namedValues = NamedValues(
+ ('user-read', 0), ('user-write', 1),
+ ('group-read', 2), ('group-write', 3),
+ ('other-read', 4), ('other-write', 5)
+ )
+
+ group1 = Rights(('group-read', 'group-write'))
+ group2 = Rights('0011')
+ group3 = Rights(0x3)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ defaultBinValue = defaultHexValue = noValue
+
+ def __init__(self, value=noValue, **kwargs):
+ if value is noValue:
+ if kwargs:
+ try:
+ value = self.fromBinaryString(kwargs.pop('binValue'), internalFormat=True)
+
+ except KeyError:
+ pass
+
+ try:
+ value = self.fromHexString(kwargs.pop('hexValue'), internalFormat=True)
+
+ except KeyError:
+ pass
+
+ if value is noValue:
+ if self.defaultBinValue is not noValue:
+ value = self.fromBinaryString(self.defaultBinValue, internalFormat=True)
+
+ elif self.defaultHexValue is not noValue:
+ value = self.fromHexString(self.defaultHexValue, internalFormat=True)
+
+ if 'namedValues' not in kwargs:
+ kwargs['namedValues'] = self.namedValues
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ def __str__(self):
+ return self.asBinary()
+
+ def __eq__(self, other):
+ other = self.prettyIn(other)
+ return self is other or self._value == other and len(self._value) == len(other)
+
+ def __ne__(self, other):
+ other = self.prettyIn(other)
+ return self._value != other or len(self._value) != len(other)
+
+ def __lt__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) < len(other) or len(self._value) == len(other) and self._value < other
+
+ def __le__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) <= len(other) or len(self._value) == len(other) and self._value <= other
+
+ def __gt__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) > len(other) or len(self._value) == len(other) and self._value > other
+
+ def __ge__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) >= len(other) or len(self._value) == len(other) and self._value >= other
+
+ # Immutable sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone([self[x] for x in range(*i.indices(len(self)))])
+ else:
+ length = len(self._value) - 1
+ if i > length or i < 0:
+ raise IndexError('bit index out of range')
+ return (self._value >> (length - i)) & 1
+
+ def __iter__(self):
+ length = len(self._value)
+ while length:
+ length -= 1
+ yield (self._value >> length) & 1
+
+ def __reversed__(self):
+ return reversed(tuple(self))
+
+ # arithmetic operators
+
+ def __add__(self, value):
+ value = self.prettyIn(value)
+ return self.clone(SizedInteger(self._value << len(value) | value).setBitLength(len(self._value) + len(value)))
+
+ def __radd__(self, value):
+ value = self.prettyIn(value)
+ return self.clone(SizedInteger(value << len(self._value) | self._value).setBitLength(len(self._value) + len(value)))
+
+ def __mul__(self, value):
+ bitString = self._value
+ while value > 1:
+ bitString <<= len(self._value)
+ bitString |= self._value
+ value -= 1
+ return self.clone(bitString)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __lshift__(self, count):
+ return self.clone(SizedInteger(self._value << count).setBitLength(len(self._value) + count))
+
+ def __rshift__(self, count):
+ return self.clone(SizedInteger(self._value >> count).setBitLength(max(0, len(self._value) - count)))
+
+ def __int__(self):
+ return int(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ if sys.version_info[0] < 3:
+ def __long__(self):
+ return self._value
+
+ def asNumbers(self):
+ """Get |ASN.1| value as a sequence of 8-bit integers.
+
+ If |ASN.1| object length is not a multiple of 8, result
+ will be left-padded with zeros.
+ """
+ return tuple(octets.octs2ints(self.asOctets()))
+
+ def asOctets(self):
+ """Get |ASN.1| value as a sequence of octets.
+
+ If |ASN.1| object length is not a multiple of 8, result
+ will be left-padded with zeros.
+ """
+ return integer.to_bytes(self._value, length=len(self))
+
+ def asInteger(self):
+ """Get |ASN.1| value as a single integer value.
+ """
+ return self._value
+
+ def asBinary(self):
+ """Get |ASN.1| value as a text string of bits.
+ """
+ binString = bin(self._value)[2:]
+ return '0' * (len(self._value) - len(binString)) + binString
+
+ @classmethod
+ def fromHexString(cls, value, internalFormat=False, prepend=None):
+ """Create a |ASN.1| object initialized from the hex string.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like 'DEADBEEF'
+ """
+ try:
+ value = SizedInteger(value, 16).setBitLength(len(value) * 4)
+
+ except ValueError:
+ raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1]))
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ @classmethod
+ def fromBinaryString(cls, value, internalFormat=False, prepend=None):
+ """Create a |ASN.1| object initialized from a string of '0' and '1'.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like '1010111'
+ """
+ try:
+ value = SizedInteger(value or '0', 2).setBitLength(len(value))
+
+ except ValueError:
+ raise error.PyAsn1Error('%s.fromBinaryString() error: %s' % (cls.__name__, sys.exc_info()[1]))
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ @classmethod
+ def fromOctetString(cls, value, internalFormat=False, prepend=None, padding=0):
+ """Create a |ASN.1| object initialized from a string.
+
+ Parameters
+ ----------
+ value: :class:`str` (Py2) or :class:`bytes` (Py3)
+ Text string like '\\\\x01\\\\xff' (Py2) or b'\\\\x01\\\\xff' (Py3)
+ """
+ value = SizedInteger(integer.from_bytes(value) >> padding).setBitLength(len(value) * 8 - padding)
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ def prettyIn(self, value):
+ if isinstance(value, SizedInteger):
+ return value
+ elif octets.isStringType(value):
+ if not value:
+ return SizedInteger(0).setBitLength(0)
+
+ elif value[0] == '\'': # "'1011'B" -- ASN.1 schema representation (deprecated)
+ if value[-2:] == '\'B':
+ return self.fromBinaryString(value[1:-2], internalFormat=True)
+ elif value[-2:] == '\'H':
+ return self.fromHexString(value[1:-2], internalFormat=True)
+ else:
+ raise error.PyAsn1Error(
+ 'Bad BIT STRING value notation %s' % (value,)
+ )
+
+ elif self.namedValues and not value.isdigit(): # named bits like 'Urgent, Active'
+ names = [x.strip() for x in value.split(',')]
+
+ try:
+
+ bitPositions = [self.namedValues[name] for name in names]
+
+ except KeyError:
+ raise error.PyAsn1Error('unknown bit name(s) in %r' % (names,))
+
+ rightmostPosition = max(bitPositions)
+
+ number = 0
+ for bitPosition in bitPositions:
+ number |= 1 << (rightmostPosition - bitPosition)
+
+ return SizedInteger(number).setBitLength(rightmostPosition + 1)
+
+ elif value.startswith('0x'):
+ return self.fromHexString(value[2:], internalFormat=True)
+
+ elif value.startswith('0b'):
+ return self.fromBinaryString(value[2:], internalFormat=True)
+
+ else: # assume plain binary string like '1011'
+ return self.fromBinaryString(value, internalFormat=True)
+
+ elif isinstance(value, (tuple, list)):
+ return self.fromBinaryString(''.join([b and '1' or '0' for b in value]), internalFormat=True)
+
+ elif isinstance(value, BitString):
+ return SizedInteger(value).setBitLength(len(value))
+
+ elif isinstance(value, intTypes):
+ return SizedInteger(value)
+
+ else:
+ raise error.PyAsn1Error(
+ 'Bad BitString initializer type \'%s\'' % (value,)
+ )
+
+
+class OctetString(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python 2 :class:`str` or
+ Python 3 :class:`bytes`. When used in Unicode context, |ASN.1| type
+ assumes "|encoding|" serialisation.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively
+ class:`unicode` object (Python 2) or :class:`str` (Python 3)
+ representing character string to be serialised into octets
+ (note `encoding` parameter) or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in text string context.
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Icon(OctetString):
+ '''
+ ASN.1 specification:
+
+ Icon ::= OCTET STRING
+
+ icon1 Icon ::= '001100010011001000110011'B
+ icon2 Icon ::= '313233'H
+ '''
+ icon1 = Icon.fromBinaryString('001100010011001000110011')
+ icon2 = Icon.fromHexString('313233')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ defaultBinValue = defaultHexValue = noValue
+ encoding = 'iso-8859-1'
+
+ def __init__(self, value=noValue, **kwargs):
+ if kwargs:
+ if value is noValue:
+ try:
+ value = self.fromBinaryString(kwargs.pop('binValue'))
+
+ except KeyError:
+ pass
+
+ try:
+ value = self.fromHexString(kwargs.pop('hexValue'))
+
+ except KeyError:
+ pass
+
+ if value is noValue:
+ if self.defaultBinValue is not noValue:
+ value = self.fromBinaryString(self.defaultBinValue)
+
+ elif self.defaultHexValue is not noValue:
+ value = self.fromHexString(self.defaultHexValue)
+
+ if 'encoding' not in kwargs:
+ kwargs['encoding'] = self.encoding
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ if sys.version_info[0] <= 2:
+ def prettyIn(self, value):
+ if isinstance(value, str):
+ return value
+
+ elif isinstance(value, unicode):
+ try:
+ return value.encode(self.encoding)
+
+ except (LookupError, UnicodeEncodeError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ elif isinstance(value, (tuple, list)):
+ try:
+ return ''.join([chr(x) for x in value])
+
+ except ValueError:
+ raise error.PyAsn1Error(
+ "Bad %s initializer '%s'" % (self.__class__.__name__, value)
+ )
+
+ else:
+ return str(value)
+
+ def __str__(self):
+ return str(self._value)
+
+ def __unicode__(self):
+ try:
+ return self._value.decode(self.encoding)
+
+ except UnicodeDecodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def asOctets(self):
+ return str(self._value)
+
+ def asNumbers(self):
+ return tuple([ord(x) for x in self._value])
+
+ else:
+ def prettyIn(self, value):
+ if isinstance(value, bytes):
+ return value
+
+ elif isinstance(value, str):
+ try:
+ return value.encode(self.encoding)
+
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with '%s' "
+ "codec" % (value, self.encoding), exc
+ )
+ elif isinstance(value, OctetString): # a shortcut, bytes() would work the same way
+ return value.asOctets()
+
+ elif isinstance(value, base.SimpleAsn1Type): # this mostly targets Integer objects
+ return self.prettyIn(str(value))
+
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(bytes(value))
+
+ else:
+ return bytes(value)
+
+ def __str__(self):
+ try:
+ return self._value.decode(self.encoding)
+
+ except UnicodeDecodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with '%s' codec at "
+ "'%s'" % (self._value, self.encoding,
+ self.__class__.__name__), exc
+ )
+
+ def __bytes__(self):
+ return bytes(self._value)
+
+ def asOctets(self):
+ return bytes(self._value)
+
+ def asNumbers(self):
+ return tuple(self._value)
+
+ #
+ # Normally, `.prettyPrint()` is called from `__str__()`. Historically,
+ # OctetString.prettyPrint() used to return hexified payload
+ # representation in cases when non-printable content is present. At the
+ # same time `str()` used to produce either octet-stream (Py2) or
+ # text (Py3) representations.
+ #
+ # Therefore `OctetString.__str__()` -> `.prettyPrint()` call chain is
+ # reversed to preserve the original behaviour.
+ #
+ # Eventually we should deprecate `.prettyPrint()` / `.prettyOut()` harness
+ # and end up with just `__str__()` producing hexified representation while
+ # both text and octet-stream representation should only be requested via
+ # the `.asOctets()` method.
+ #
+ # Note: ASN.1 OCTET STRING is never mean to contain text!
+ #
+
+ def prettyOut(self, value):
+ return value
+
+ def prettyPrint(self, scope=0):
+ # first see if subclass has its own .prettyOut()
+ value = self.prettyOut(self._value)
+
+ if value is not self._value:
+ return value
+
+ numbers = self.asNumbers()
+
+ for x in numbers:
+ # hexify if needed
+ if x < 32 or x > 126:
+ return '0x' + ''.join(('%.2x' % x for x in numbers))
+ else:
+ # this prevents infinite recursion
+ return OctetString.__str__(self)
+
+ @staticmethod
+ def fromBinaryString(value):
+ """Create a |ASN.1| object initialized from a string of '0' and '1'.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like '1010111'
+ """
+ bitNo = 8
+ byte = 0
+ r = []
+ for v in value:
+ if bitNo:
+ bitNo -= 1
+ else:
+ bitNo = 7
+ r.append(byte)
+ byte = 0
+ if v in ('0', '1'):
+ v = int(v)
+ else:
+ raise error.PyAsn1Error(
+ 'Non-binary OCTET STRING initializer %s' % (v,)
+ )
+ byte |= v << bitNo
+
+ r.append(byte)
+
+ return octets.ints2octs(r)
+
+ @staticmethod
+ def fromHexString(value):
+ """Create a |ASN.1| object initialized from the hex string.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like 'DEADBEEF'
+ """
+ r = []
+ p = []
+ for v in value:
+ if p:
+ r.append(int(p + v, 16))
+ p = None
+ else:
+ p = v
+ if p:
+ r.append(int(p + '0', 16))
+
+ return octets.ints2octs(r)
+
+ # Immutable sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone(self._value[i])
+ else:
+ return self._value[i]
+
+ def __iter__(self):
+ return iter(self._value)
+
+ def __contains__(self, value):
+ return value in self._value
+
+ def __add__(self, value):
+ return self.clone(self._value + self.prettyIn(value))
+
+ def __radd__(self, value):
+ return self.clone(self.prettyIn(value) + self._value)
+
+ def __mul__(self, value):
+ return self.clone(self._value * value)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __int__(self):
+ return int(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ def __reversed__(self):
+ return reversed(self._value)
+
+
+class Null(OctetString):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`str` objects
+ (always empty).
+
+ Keyword Args
+ ------------
+ value: :class:`str` or |ASN.1| object
+ Python empty :class:`str` literal or any object that evaluates to :obj:`False`
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Ack(Null):
+ '''
+ ASN.1 specification:
+
+ Ack ::= NULL
+ '''
+ ack = Ack('')
+ """
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
+ )
+ subtypeSpec = OctetString.subtypeSpec + constraint.SingleValueConstraint(octets.str2octs(''))
+
+ # Optimization for faster codec lookup
+ typeId = OctetString.getTypeId()
+
+ def prettyIn(self, value):
+ if value:
+ return value
+
+ return octets.str2octs('')
+
+if sys.version_info[0] <= 2:
+ intTypes = (int, long)
+else:
+ intTypes = (int,)
+
+numericTypes = intTypes + (float,)
+
+
+class ObjectIdentifier(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`tuple` objects
+ (tuple of non-negative integers).
+
+ Keyword Args
+ ------------
+ value: :class:`tuple`, :class:`str` or |ASN.1| object
+ Python sequence of :class:`int` or :class:`str` literal or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class ID(ObjectIdentifier):
+ '''
+ ASN.1 specification:
+
+ ID ::= OBJECT IDENTIFIER
+
+ id-edims ID ::= { joint-iso-itu-t mhs-motif(6) edims(7) }
+ id-bp ID ::= { id-edims 11 }
+ '''
+ id_edims = ID('2.6.7')
+ id_bp = id_edims + (11,)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ def __add__(self, other):
+ return self.clone(self._value + other)
+
+ def __radd__(self, other):
+ return self.clone(other + self._value)
+
+ def asTuple(self):
+ return self._value
+
+ # Sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone(self._value[i])
+ else:
+ return self._value[i]
+
+ def __iter__(self):
+ return iter(self._value)
+
+ def __contains__(self, value):
+ return value in self._value
+
+ def index(self, suboid):
+ return self._value.index(suboid)
+
+ def isPrefixOf(self, other):
+ """Indicate if this |ASN.1| object is a prefix of other |ASN.1| object.
+
+ Parameters
+ ----------
+ other: |ASN.1| object
+ |ASN.1| object
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object
+ or :obj:`False` otherwise.
+ """
+ l = len(self)
+ if l <= len(other):
+ if self._value[:l] == other[:l]:
+ return True
+ return False
+
+ def prettyIn(self, value):
+ if isinstance(value, ObjectIdentifier):
+ return tuple(value)
+ elif octets.isStringType(value):
+ if '-' in value:
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+ try:
+ return tuple([int(subOid) for subOid in value.split('.') if subOid])
+ except ValueError:
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+
+ try:
+ tupleOfInts = tuple([int(subOid) for subOid in value if subOid >= 0])
+
+ except (ValueError, TypeError):
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+
+ if len(tupleOfInts) == len(value):
+ return tupleOfInts
+
+ raise error.PyAsn1Error('Malformed Object ID %s at %s' % (value, self.__class__.__name__))
+
+ def prettyOut(self, value):
+ return '.'.join([str(x) for x in value])
+
+
+class Real(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`float` objects.
+ Additionally, |ASN.1| objects behave like a :class:`tuple` in which case its
+ elements are mantissa, base and exponent.
+
+ Keyword Args
+ ------------
+ value: :class:`tuple`, :class:`float` or |ASN.1| object
+ Python sequence of :class:`int` (representing mantissa, base and
+ exponent) or :class:`float` instance or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Pi(Real):
+ '''
+ ASN.1 specification:
+
+ Pi ::= REAL
+
+ pi Pi ::= { mantissa 314159, base 10, exponent -5 }
+
+ '''
+ pi = Pi((314159, 10, -5))
+ """
+ binEncBase = None # binEncBase = 16 is recommended for large numbers
+
+ try:
+ _plusInf = float('inf')
+ _minusInf = float('-inf')
+ _inf = _plusInf, _minusInf
+
+ except ValueError:
+ # Infinity support is platform and Python dependent
+ _plusInf = _minusInf = None
+ _inf = ()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ @staticmethod
+ def __normalizeBase10(value):
+ m, b, e = value
+ while m and m % 10 == 0:
+ m /= 10
+ e += 1
+ return m, b, e
+
+ def prettyIn(self, value):
+ if isinstance(value, tuple) and len(value) == 3:
+ if (not isinstance(value[0], numericTypes) or
+ not isinstance(value[1], intTypes) or
+ not isinstance(value[2], intTypes)):
+ raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
+ if (isinstance(value[0], float) and
+ self._inf and value[0] in self._inf):
+ return value[0]
+ if value[1] not in (2, 10):
+ raise error.PyAsn1Error(
+ 'Prohibited base for Real value: %s' % (value[1],)
+ )
+ if value[1] == 10:
+ value = self.__normalizeBase10(value)
+ return value
+ elif isinstance(value, intTypes):
+ return self.__normalizeBase10((value, 10, 0))
+ elif isinstance(value, float) or octets.isStringType(value):
+ if octets.isStringType(value):
+ try:
+ value = float(value)
+ except ValueError:
+ raise error.PyAsn1Error(
+ 'Bad real value syntax: %s' % (value,)
+ )
+ if self._inf and value in self._inf:
+ return value
+ else:
+ e = 0
+ while int(value) != value:
+ value *= 10
+ e -= 1
+ return self.__normalizeBase10((int(value), 10, e))
+ elif isinstance(value, Real):
+ return tuple(value)
+ raise error.PyAsn1Error(
+ 'Bad real value syntax: %s' % (value,)
+ )
+
+ def prettyPrint(self, scope=0):
+ try:
+ return self.prettyOut(float(self))
+
+ except OverflowError:
+ return '<overflow>'
+
+ @property
+ def isPlusInf(self):
+ """Indicate PLUS-INFINITY object value
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if calling object represents plus infinity
+ or :obj:`False` otherwise.
+
+ """
+ return self._value == self._plusInf
+
+ @property
+ def isMinusInf(self):
+ """Indicate MINUS-INFINITY object value
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if calling object represents minus infinity
+ or :obj:`False` otherwise.
+ """
+ return self._value == self._minusInf
+
+ @property
+ def isInf(self):
+ return self._value in self._inf
+
+ def __add__(self, value):
+ return self.clone(float(self) + value)
+
+ def __radd__(self, value):
+ return self + value
+
+ def __mul__(self, value):
+ return self.clone(float(self) * value)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __sub__(self, value):
+ return self.clone(float(self) - value)
+
+ def __rsub__(self, value):
+ return self.clone(value - float(self))
+
+ def __mod__(self, value):
+ return self.clone(float(self) % value)
+
+ def __rmod__(self, value):
+ return self.clone(value % float(self))
+
+ def __pow__(self, value, modulo=None):
+ return self.clone(pow(float(self), value, modulo))
+
+ def __rpow__(self, value):
+ return self.clone(pow(value, float(self)))
+
+ if sys.version_info[0] <= 2:
+ def __div__(self, value):
+ return self.clone(float(self) / value)
+
+ def __rdiv__(self, value):
+ return self.clone(value / float(self))
+ else:
+ def __truediv__(self, value):
+ return self.clone(float(self) / value)
+
+ def __rtruediv__(self, value):
+ return self.clone(value / float(self))
+
+ def __divmod__(self, value):
+ return self.clone(float(self) // value)
+
+ def __rdivmod__(self, value):
+ return self.clone(value // float(self))
+
+ def __int__(self):
+ return int(float(self))
+
+ if sys.version_info[0] <= 2:
+ def __long__(self):
+ return long(float(self))
+
+ def __float__(self):
+ if self._value in self._inf:
+ return self._value
+ else:
+ return float(
+ self._value[0] * pow(self._value[1], self._value[2])
+ )
+
+ def __abs__(self):
+ return self.clone(abs(float(self)))
+
+ def __pos__(self):
+ return self.clone(+float(self))
+
+ def __neg__(self):
+ return self.clone(-float(self))
+
+ def __round__(self, n=0):
+ r = round(float(self), n)
+ if n:
+ return self.clone(r)
+ else:
+ return r
+
+ def __floor__(self):
+ return self.clone(math.floor(float(self)))
+
+ def __ceil__(self):
+ return self.clone(math.ceil(float(self)))
+
+ def __trunc__(self):
+ return self.clone(math.trunc(float(self)))
+
+ def __lt__(self, value):
+ return float(self) < value
+
+ def __le__(self, value):
+ return float(self) <= value
+
+ def __eq__(self, value):
+ return float(self) == value
+
+ def __ne__(self, value):
+ return float(self) != value
+
+ def __gt__(self, value):
+ return float(self) > value
+
+ def __ge__(self, value):
+ return float(self) >= value
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return bool(float(self))
+ else:
+ def __bool__(self):
+ return bool(float(self))
+
+ __hash__ = base.SimpleAsn1Type.__hash__
+
+ def __getitem__(self, idx):
+ if self._value in self._inf:
+ raise error.PyAsn1Error('Invalid infinite value operation')
+ else:
+ return self._value[idx]
+
+ # compatibility stubs
+
+ def isPlusInfinity(self):
+ return self.isPlusInf
+
+ def isMinusInfinity(self):
+ return self.isMinusInf
+
+ def isInfinity(self):
+ return self.isInf
+
+
+class Enumerated(Integer):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class RadioButton(Enumerated):
+ '''
+ ASN.1 specification:
+
+ RadioButton ::= ENUMERATED { button1(0), button2(1),
+ button3(2) }
+
+ selected-by-default RadioButton ::= button1
+ '''
+ namedValues = NamedValues(
+ ('button1', 0), ('button2', 1),
+ ('button3', 2)
+ )
+
+ selected_by_default = RadioButton('button1')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = Integer.getTypeId()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+
+# "Structured" ASN.1 types
+
+class SequenceOfAndSetOfBase(base.ConstructedAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`list` objects.
+
+ Keyword Args
+ ------------
+ componentType : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A pyasn1 object representing ASN.1 type allowed within |ASN.1| type
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class LotteryDraw(SequenceOf): # SetOf is similar
+ '''
+ ASN.1 specification:
+
+ LotteryDraw ::= SEQUENCE OF INTEGER
+ '''
+ componentType = Integer()
+
+ lotteryDraw = LotteryDraw()
+ lotteryDraw.extend([123, 456, 789])
+ """
+ def __init__(self, *args, **kwargs):
+ # support positional params for backward compatibility
+ if args:
+ for key, value in zip(('componentType', 'tagSet',
+ 'subtypeSpec'), args):
+ if key in kwargs:
+ raise error.PyAsn1Error('Conflicting positional and keyword params!')
+ kwargs['componentType'] = value
+
+ self._componentValues = noValue
+
+ base.ConstructedAsn1Type.__init__(self, **kwargs)
+
+ # Python list protocol
+
+ def __getitem__(self, idx):
+ try:
+ return self.getComponentByPosition(idx)
+
+ except error.PyAsn1Error:
+ raise IndexError(sys.exc_info()[1])
+
+ def __setitem__(self, idx, value):
+ try:
+ self.setComponentByPosition(idx, value)
+
+ except error.PyAsn1Error:
+ raise IndexError(sys.exc_info()[1])
+
+ def append(self, value):
+ if self._componentValues is noValue:
+ pos = 0
+
+ else:
+ pos = len(self._componentValues)
+
+ self[pos] = value
+
+ def count(self, value):
+ return list(self._componentValues.values()).count(value)
+
+ def extend(self, values):
+ for value in values:
+ self.append(value)
+
+ if self._componentValues is noValue:
+ self._componentValues = {}
+
+ def index(self, value, start=0, stop=None):
+ if stop is None:
+ stop = len(self)
+
+ indices, values = zip(*self._componentValues.items())
+
+ # TODO: remove when Py2.5 support is gone
+ values = list(values)
+
+ try:
+ return indices[values.index(value, start, stop)]
+
+ except error.PyAsn1Error:
+ raise ValueError(sys.exc_info()[1])
+
+ def reverse(self):
+ self._componentValues.reverse()
+
+ def sort(self, key=None, reverse=False):
+ self._componentValues = dict(
+ enumerate(sorted(self._componentValues.values(),
+ key=key, reverse=reverse)))
+
+ def __len__(self):
+ if self._componentValues is noValue or not self._componentValues:
+ return 0
+
+ return max(self._componentValues) + 1
+
+ def __iter__(self):
+ for idx in range(0, len(self)):
+ yield self.getComponentByPosition(idx)
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ for idx, componentValue in self._componentValues.items():
+ if componentValue is not noValue:
+ if isinstance(componentValue, base.ConstructedAsn1Type):
+ myClone.setComponentByPosition(
+ idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByPosition(idx, componentValue.clone())
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ """Return |ASN.1| type component value by position.
+
+ Equivalent to Python sequence subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx : :class:`int`
+ Component index (zero-based). Must either refer to an existing
+ component or to N+1 component (if *componentType* is set). In the latter
+ case a new component type gets instantiated and appended to the |ASN.1|
+ sequence.
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue` object will be
+ returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ Instantiate |ASN.1| component type or return existing component value
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ # can also be SetOf
+ class MySequenceOf(SequenceOf):
+ componentType = OctetString()
+
+ s = MySequenceOf()
+
+ # returns component #0 with `.isValue` property False
+ s.getComponentByPosition(0)
+
+ # returns None
+ s.getComponentByPosition(0, default=None)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+
+ # sets component #0 to OctetString() ASN.1 schema
+ # object and returns it
+ s.getComponentByPosition(0, instantiate=True)
+
+ # sets component #0 to ASN.1 value object
+ s.setComponentByPosition(0, 'ABCD')
+
+ # returns OctetString('ABCD') value object
+ s.getComponentByPosition(0, instantiate=False)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+ """
+ if isinstance(idx, slice):
+ indices = tuple(range(len(self)))
+ return [self.getComponentByPosition(subidx, default, instantiate)
+ for subidx in indices[idx]]
+
+ if idx < 0:
+ idx = len(self) + idx
+ if idx < 0:
+ raise error.PyAsn1Error(
+ 'SequenceOf/SetOf index is out of range')
+
+ try:
+ componentValue = self._componentValues[idx]
+
+ except (KeyError, error.PyAsn1Error):
+ if not instantiate:
+ return default
+
+ self.setComponentByPosition(idx)
+
+ componentValue = self._componentValues[idx]
+
+ if default is noValue or componentValue.isValue:
+ return componentValue
+ else:
+ return default
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`)
+ or list.append() (when idx == len(self)).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component or to N+1 component. In the latter case a new component
+ type gets instantiated (if *componentType* is set, or given ASN.1
+ object is taken otherwise) and appended to the |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints: :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer
+ IndexError
+ When idx > len(self)
+ """
+ if isinstance(idx, slice):
+ indices = tuple(range(len(self)))
+ startIdx = indices and indices[idx][0] or 0
+ for subIdx, subValue in enumerate(value):
+ self.setComponentByPosition(
+ startIdx + subIdx, subValue, verifyConstraints,
+ matchTags, matchConstraints)
+ return self
+
+ if idx < 0:
+ idx = len(self) + idx
+ if idx < 0:
+ raise error.PyAsn1Error(
+ 'SequenceOf/SetOf index is out of range')
+
+ componentType = self.componentType
+
+ if self._componentValues is noValue:
+ componentValues = {}
+
+ else:
+ componentValues = self._componentValues
+
+ currentValue = componentValues.get(idx, noValue)
+
+ if value is noValue:
+ if componentType is not None:
+ value = componentType.clone()
+
+ elif currentValue is noValue:
+ raise error.PyAsn1Error('Component type not defined')
+
+ elif not isinstance(value, base.Asn1Item):
+ if (componentType is not None and
+ isinstance(componentType, base.SimpleAsn1Type)):
+ value = componentType.clone(value=value)
+
+ elif (currentValue is not noValue and
+ isinstance(currentValue, base.SimpleAsn1Type)):
+ value = currentValue.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error(
+ 'Non-ASN.1 value %r and undefined component'
+ ' type at %r' % (value, self))
+
+ elif componentType is not None and (matchTags or matchConstraints):
+ subtypeChecker = (
+ self.strictConstraints and
+ componentType.isSameTypeWith or
+ componentType.isSuperTypeOf)
+
+ if not subtypeChecker(value, verifyConstraints and matchTags,
+ verifyConstraints and matchConstraints):
+ # TODO: we should wrap componentType with UnnamedType to carry
+ # additional properties associated with componentType
+ if componentType.typeId != Any.typeId:
+ raise error.PyAsn1Error(
+ 'Component value is tag-incompatible: %r vs '
+ '%r' % (value, componentType))
+
+ componentValues[idx] = value
+
+ self._componentValues = componentValues
+
+ return self
+
+ @property
+ def componentTagMap(self):
+ if self.componentType is not None:
+ return self.componentType.tagMap
+
+ @property
+ def components(self):
+ return [self._componentValues[idx]
+ for idx in sorted(self._componentValues)]
+
+ def clear(self):
+ """Remove all components and become an empty |ASN.1| value object.
+
+ Has the same effect on |ASN.1| object as it does on :class:`list`
+ built-in.
+ """
+ self._componentValues = {}
+ return self
+
+ def reset(self):
+ """Remove all components and become a |ASN.1| schema object.
+
+ See :meth:`isValue` property for more information on the
+ distinction between value and schema objects.
+ """
+ self._componentValues = noValue
+ return self
+
+ def prettyPrint(self, scope=0):
+ scope += 1
+ representation = self.__class__.__name__ + ':\n'
+
+ if not self.isValue:
+ return representation
+
+ for idx, componentValue in enumerate(self):
+ representation += ' ' * scope
+ if (componentValue is noValue and
+ self.componentType is not None):
+ representation += '<empty>'
+ else:
+ representation += componentValue.prettyPrint(scope)
+
+ return representation
+
+ def prettyPrintType(self, scope=0):
+ scope += 1
+ representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
+ if self.componentType is not None:
+ representation += ' ' * scope
+ representation += self.componentType.prettyPrintType(scope)
+ return representation + '\n' + ' ' * (scope - 1) + '}'
+
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object
+ (e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ if self._componentValues is noValue:
+ return False
+
+ if len(self._componentValues) != len(self):
+ return False
+
+ for componentValue in self._componentValues.values():
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ return True
+
+ @property
+ def isInconsistent(self):
+ """Run necessary checks to ensure |ASN.1| object consistency.
+
+ Default action is to verify |ASN.1| object against constraints imposed
+ by `subtypeSpec`.
+
+ Raises
+ ------
+ :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found
+ """
+ if self.componentType is noValue or not self.subtypeSpec:
+ return False
+
+ if self._componentValues is noValue:
+ return True
+
+ mapping = {}
+
+ for idx, value in self._componentValues.items():
+ # Absent fields are not in the mapping
+ if value is noValue:
+ continue
+
+ mapping[idx] = value
+
+ try:
+ # Represent SequenceOf/SetOf as a bare dict to constraints chain
+ self.subtypeSpec(mapping)
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ return exc
+
+ return False
+
+class SequenceOf(SequenceOfAndSetOfBase):
+ __doc__ = SequenceOfAndSetOfBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ )
+
+ #: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = None
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceOfAndSetOfBase.getTypeId()
+
+
+class SetOf(SequenceOfAndSetOfBase):
+ __doc__ = SequenceOfAndSetOfBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ )
+
+ #: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = None
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceOfAndSetOfBase.getTypeId()
+
+
+class SequenceAndSetBase(base.ConstructedAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`dict` objects.
+
+ Keyword Args
+ ------------
+ componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
+ Object holding named ASN.1 types allowed within this collection
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Description(Sequence): # Set is similar
+ '''
+ ASN.1 specification:
+
+ Description ::= SEQUENCE {
+ surname IA5String,
+ first-name IA5String OPTIONAL,
+ age INTEGER DEFAULT 40
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('surname', IA5String()),
+ OptionalNamedType('first-name', IA5String()),
+ DefaultedNamedType('age', Integer(40))
+ )
+
+ descr = Description()
+ descr['surname'] = 'Smith'
+ descr['first-name'] = 'John'
+ """
+ #: Default :py:class:`~pyasn1.type.namedtype.NamedTypes`
+ #: object representing named ASN.1 types allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+
+ class DynamicNames(object):
+ """Fields names/positions mapping for component-less objects"""
+ def __init__(self):
+ self._keyToIdxMap = {}
+ self._idxToKeyMap = {}
+
+ def __len__(self):
+ return len(self._keyToIdxMap)
+
+ def __contains__(self, item):
+ return item in self._keyToIdxMap or item in self._idxToKeyMap
+
+ def __iter__(self):
+ return (self._idxToKeyMap[idx] for idx in range(len(self._idxToKeyMap)))
+
+ def __getitem__(self, item):
+ try:
+ return self._keyToIdxMap[item]
+
+ except KeyError:
+ return self._idxToKeyMap[item]
+
+ def getNameByPosition(self, idx):
+ try:
+ return self._idxToKeyMap[idx]
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByName(self, name):
+ try:
+ return self._keyToIdxMap[name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ def addField(self, idx):
+ self._keyToIdxMap['field-%d' % idx] = idx
+ self._idxToKeyMap[idx] = 'field-%d' % idx
+
+
+ def __init__(self, **kwargs):
+ base.ConstructedAsn1Type.__init__(self, **kwargs)
+ self._componentTypeLen = len(self.componentType)
+ if self._componentTypeLen:
+ self._componentValues = []
+ else:
+ self._componentValues = noValue
+ self._dynamicNames = self._componentTypeLen or self.DynamicNames()
+
+ def __getitem__(self, idx):
+ if octets.isStringType(idx):
+ try:
+ return self.getComponentByName(idx)
+
+ except error.PyAsn1Error:
+ # duck-typing dict
+ raise KeyError(sys.exc_info()[1])
+
+ else:
+ try:
+ return self.getComponentByPosition(idx)
+
+ except error.PyAsn1Error:
+ # duck-typing list
+ raise IndexError(sys.exc_info()[1])
+
+ def __setitem__(self, idx, value):
+ if octets.isStringType(idx):
+ try:
+ self.setComponentByName(idx, value)
+
+ except error.PyAsn1Error:
+ # duck-typing dict
+ raise KeyError(sys.exc_info()[1])
+
+ else:
+ try:
+ self.setComponentByPosition(idx, value)
+
+ except error.PyAsn1Error:
+ # duck-typing list
+ raise IndexError(sys.exc_info()[1])
+
+ def __contains__(self, key):
+ if self._componentTypeLen:
+ return key in self.componentType
+ else:
+ return key in self._dynamicNames
+
+ def __len__(self):
+ return len(self._componentValues)
+
+ def __iter__(self):
+ return iter(self.componentType or self._dynamicNames)
+
+ # Python dict protocol
+
+ def values(self):
+ for idx in range(self._componentTypeLen or len(self._dynamicNames)):
+ yield self[idx]
+
+ def keys(self):
+ return iter(self)
+
+ def items(self):
+ for idx in range(self._componentTypeLen or len(self._dynamicNames)):
+ if self._componentTypeLen:
+ yield self.componentType[idx].name, self[idx]
+ else:
+ yield self._dynamicNames[idx], self[idx]
+
+ def update(self, *iterValue, **mappingValue):
+ for k, v in iterValue:
+ self[k] = v
+ for k in mappingValue:
+ self[k] = mappingValue[k]
+
+ def clear(self):
+ """Remove all components and become an empty |ASN.1| value object.
+
+ Has the same effect on |ASN.1| object as it does on :class:`dict`
+ built-in.
+ """
+ self._componentValues = []
+ self._dynamicNames = self.DynamicNames()
+ return self
+
+ def reset(self):
+ """Remove all components and become a |ASN.1| schema object.
+
+ See :meth:`isValue` property for more information on the
+ distinction between value and schema objects.
+ """
+ self._componentValues = noValue
+ self._dynamicNames = self.DynamicNames()
+ return self
+
+ @property
+ def components(self):
+ return self._componentValues
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ if self._componentValues is noValue:
+ return
+
+ for idx, componentValue in enumerate(self._componentValues):
+ if componentValue is not noValue:
+ if isinstance(componentValue, base.ConstructedAsn1Type):
+ myClone.setComponentByPosition(
+ idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByPosition(idx, componentValue.clone())
+
+ def getComponentByName(self, name, default=noValue, instantiate=True):
+ """Returns |ASN.1| type component by name.
+
+ Equivalent to Python :class:`dict` subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ name: :class:`str`
+ |ASN.1| type component name
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ Instantiate |ASN.1| component type or return existing
+ component value
+ """
+ if self._componentTypeLen:
+ idx = self.componentType.getPositionByName(name)
+ else:
+ try:
+ idx = self._dynamicNames.getPositionByName(name)
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ return self.getComponentByPosition(idx, default=default, instantiate=instantiate)
+
+ def setComponentByName(self, name, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by name.
+
+ Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ name: :class:`str`
+ |ASN.1| type component name
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints: :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ if self._componentTypeLen:
+ idx = self.componentType.getPositionByName(name)
+ else:
+ try:
+ idx = self._dynamicNames.getPositionByName(name)
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ """Returns |ASN.1| type component by index.
+
+ Equivalent to Python sequence subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to an existing
+ component or (if *componentType* is set) new ASN.1 schema object gets
+ instantiated.
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a PyASN1 object
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ # can also be Set
+ class MySequence(Sequence):
+ componentType = NamedTypes(
+ NamedType('id', OctetString())
+ )
+
+ s = MySequence()
+
+ # returns component #0 with `.isValue` property False
+ s.getComponentByPosition(0)
+
+ # returns None
+ s.getComponentByPosition(0, default=None)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+
+ # sets component #0 to OctetString() ASN.1 schema
+ # object and returns it
+ s.getComponentByPosition(0, instantiate=True)
+
+ # sets component #0 to ASN.1 value object
+ s.setComponentByPosition(0, 'ABCD')
+
+ # returns OctetString('ABCD') value object
+ s.getComponentByPosition(0, instantiate=False)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+ """
+ try:
+ if self._componentValues is noValue:
+ componentValue = noValue
+
+ else:
+ componentValue = self._componentValues[idx]
+
+ except IndexError:
+ componentValue = noValue
+
+ if not instantiate:
+ if componentValue is noValue or not componentValue.isValue:
+ return default
+ else:
+ return componentValue
+
+ if componentValue is noValue:
+ self.setComponentByPosition(idx)
+
+ componentValue = self._componentValues[idx]
+
+ if default is noValue or componentValue.isValue:
+ return componentValue
+ else:
+ return default
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx : :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component (if *componentType* is set) or to N+1 component
+ otherwise. In the latter case a new component of given ASN.1
+ type gets instantiated and appended to |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ componentType = self.componentType
+ componentTypeLen = self._componentTypeLen
+
+ if self._componentValues is noValue:
+ componentValues = []
+
+ else:
+ componentValues = self._componentValues
+
+ try:
+ currentValue = componentValues[idx]
+
+ except IndexError:
+ currentValue = noValue
+ if componentTypeLen:
+ if componentTypeLen < idx:
+ raise error.PyAsn1Error('component index out of range')
+
+ componentValues = [noValue] * componentTypeLen
+
+ if value is noValue:
+ if componentTypeLen:
+ value = componentType.getTypeByPosition(idx)
+ if isinstance(value, base.ConstructedAsn1Type):
+ value = value.clone(cloneValueFlag=componentType[idx].isDefaulted)
+
+ elif currentValue is noValue:
+ raise error.PyAsn1Error('Component type not defined')
+
+ elif not isinstance(value, base.Asn1Item):
+ if componentTypeLen:
+ subComponentType = componentType.getTypeByPosition(idx)
+ if isinstance(subComponentType, base.SimpleAsn1Type):
+ value = subComponentType.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__)
+
+ elif currentValue is not noValue and isinstance(currentValue, base.SimpleAsn1Type):
+ value = currentValue.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__)
+
+ elif ((verifyConstraints or matchTags or matchConstraints) and
+ componentTypeLen):
+ subComponentType = componentType.getTypeByPosition(idx)
+ if subComponentType is not noValue:
+ subtypeChecker = (self.strictConstraints and
+ subComponentType.isSameTypeWith or
+ subComponentType.isSuperTypeOf)
+
+ if not subtypeChecker(value, verifyConstraints and matchTags,
+ verifyConstraints and matchConstraints):
+ if not componentType[idx].openType:
+ raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
+
+ if componentTypeLen or idx in self._dynamicNames:
+ componentValues[idx] = value
+
+ elif len(componentValues) == idx:
+ componentValues.append(value)
+ self._dynamicNames.addField(idx)
+
+ else:
+ raise error.PyAsn1Error('Component index out of range')
+
+ self._componentValues = componentValues
+
+ return self
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object (e.g.
+ :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a
+ normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+
+ It is sufficient for |ASN.1| objects to have all non-optional and non-defaulted
+ components being value objects to be considered as a value objects as a whole.
+ In other words, even having one or more optional components not turned into
+ value objects, |ASN.1| object is still considered as a value object. Defaulted
+ components are normally value objects by default.
+ """
+ if self._componentValues is noValue:
+ return False
+
+ componentType = self.componentType
+
+ if componentType:
+ for idx, subComponentType in enumerate(componentType.namedTypes):
+ if subComponentType.isDefaulted or subComponentType.isOptional:
+ continue
+
+ if not self._componentValues:
+ return False
+
+ componentValue = self._componentValues[idx]
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ else:
+ for componentValue in self._componentValues:
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ return True
+
+ @property
+ def isInconsistent(self):
+ """Run necessary checks to ensure |ASN.1| object consistency.
+
+ Default action is to verify |ASN.1| object against constraints imposed
+ by `subtypeSpec`.
+
+ Raises
+ ------
+ :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found
+ """
+ if self.componentType is noValue or not self.subtypeSpec:
+ return False
+
+ if self._componentValues is noValue:
+ return True
+
+ mapping = {}
+
+ for idx, value in enumerate(self._componentValues):
+ # Absent fields are not in the mapping
+ if value is noValue:
+ continue
+
+ name = self.componentType.getNameByPosition(idx)
+
+ mapping[name] = value
+
+ try:
+ # Represent Sequence/Set as a bare dict to constraints chain
+ self.subtypeSpec(mapping)
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ return exc
+
+ return False
+
+ def prettyPrint(self, scope=0):
+ """Return an object representation string.
+
+ Returns
+ -------
+ : :class:`str`
+ Human-friendly object representation.
+ """
+ scope += 1
+ representation = self.__class__.__name__ + ':\n'
+ for idx, componentValue in enumerate(self._componentValues):
+ if componentValue is not noValue and componentValue.isValue:
+ representation += ' ' * scope
+ if self.componentType:
+ representation += self.componentType.getNameByPosition(idx)
+ else:
+ representation += self._dynamicNames.getNameByPosition(idx)
+ representation = '%s=%s\n' % (
+ representation, componentValue.prettyPrint(scope)
+ )
+ return representation
+
+ def prettyPrintType(self, scope=0):
+ scope += 1
+ representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
+ for idx, componentType in enumerate(self.componentType.values() or self._componentValues):
+ representation += ' ' * scope
+ if self.componentType:
+ representation += '"%s"' % self.componentType.getNameByPosition(idx)
+ else:
+ representation += '"%s"' % self._dynamicNames.getNameByPosition(idx)
+ representation = '%s = %s\n' % (
+ representation, componentType.prettyPrintType(scope)
+ )
+ return representation + '\n' + ' ' * (scope - 1) + '}'
+
+ # backward compatibility
+
+ def setDefaultComponents(self):
+ return self
+
+ def getComponentType(self):
+ if self._componentTypeLen:
+ return self.componentType
+
+ def getNameByPosition(self, idx):
+ if self._componentTypeLen:
+ return self.componentType[idx].name
+
+class Sequence(SequenceAndSetBase):
+ __doc__ = SequenceAndSetBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object imposing size constraint on |ASN.1| objects
+ componentType = namedtype.NamedTypes()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceAndSetBase.getTypeId()
+
+ # backward compatibility
+
+ def getComponentTagMapNearPosition(self, idx):
+ if self.componentType:
+ return self.componentType.getTagMapNearPosition(idx)
+
+ def getComponentPositionNearType(self, tagSet, idx):
+ if self.componentType:
+ return self.componentType.getPositionNearType(tagSet, idx)
+ else:
+ return idx
+
+
+class Set(SequenceAndSetBase):
+ __doc__ = SequenceAndSetBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ )
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceAndSetBase.getTypeId()
+
+ def getComponent(self, innerFlag=False):
+ return self
+
+ def getComponentByType(self, tagSet, default=noValue,
+ instantiate=True, innerFlag=False):
+ """Returns |ASN.1| type component by ASN.1 tag.
+
+ Parameters
+ ----------
+ tagSet : :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing ASN.1 tags to identify one of
+ |ASN.1| object component
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`noValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a pyasn1 object
+ """
+ componentValue = self.getComponentByPosition(
+ self.componentType.getPositionByType(tagSet),
+ default=default, instantiate=instantiate
+ )
+ if innerFlag and isinstance(componentValue, Set):
+ # get inner component by inner tagSet
+ return componentValue.getComponent(innerFlag=True)
+ else:
+ # get outer component by inner tagSet
+ return componentValue
+
+ def setComponentByType(self, tagSet, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True,
+ innerFlag=False):
+ """Assign |ASN.1| type component by ASN.1 tag.
+
+ Parameters
+ ----------
+ tagSet : :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing ASN.1 tags to identify one of
+ |ASN.1| object component
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ innerFlag: :class:`bool`
+ If :obj:`True`, search for matching *tagSet* recursively.
+
+ Returns
+ -------
+ self
+ """
+ idx = self.componentType.getPositionByType(tagSet)
+
+ if innerFlag: # set inner component by inner tagSet
+ componentType = self.componentType.getTypeByPosition(idx)
+
+ if componentType.tagSet:
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+ else:
+ componentType = self.getComponentByPosition(idx)
+ return componentType.setComponentByType(
+ tagSet, value, verifyConstraints, matchTags, matchConstraints, innerFlag=innerFlag
+ )
+ else: # set outer component by inner tagSet
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+
+ @property
+ def componentTagMap(self):
+ if self.componentType:
+ return self.componentType.tagMapUnique
+
+
+class Choice(Set):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`list` objects.
+
+ Keyword Args
+ ------------
+ componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
+ Object holding named ASN.1 types allowed within this collection
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Afters(Choice):
+ '''
+ ASN.1 specification:
+
+ Afters ::= CHOICE {
+ cheese [0] IA5String,
+ dessert [1] IA5String
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('cheese', IA5String().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
+ ),
+ NamedType('dessert', IA5String().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
+ )
+ )
+
+ afters = Afters()
+ afters['cheese'] = 'Mascarpone'
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.TagSet() # untagged
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 1)
+ )
+
+ # Disambiguation ASN.1 types identification
+ typeId = Set.getTypeId()
+
+ _currentIdx = None
+
+ def __eq__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] == other
+ return NotImplemented
+
+ def __ne__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] != other
+ return NotImplemented
+
+ def __lt__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] < other
+ return NotImplemented
+
+ def __le__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] <= other
+ return NotImplemented
+
+ def __gt__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] > other
+ return NotImplemented
+
+ def __ge__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] >= other
+ return NotImplemented
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._componentValues and True or False
+ else:
+ def __bool__(self):
+ return self._componentValues and True or False
+
+ def __len__(self):
+ return self._currentIdx is not None and 1 or 0
+
+ def __contains__(self, key):
+ if self._currentIdx is None:
+ return False
+ return key == self.componentType[self._currentIdx].getName()
+
+ def __iter__(self):
+ if self._currentIdx is None:
+ raise StopIteration
+ yield self.componentType[self._currentIdx].getName()
+
+ # Python dict protocol
+
+ def values(self):
+ if self._currentIdx is not None:
+ yield self._componentValues[self._currentIdx]
+
+ def keys(self):
+ if self._currentIdx is not None:
+ yield self.componentType[self._currentIdx].getName()
+
+ def items(self):
+ if self._currentIdx is not None:
+ yield self.componentType[self._currentIdx].getName(), self[self._currentIdx]
+
+ def checkConsistency(self):
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ try:
+ component = self.getComponent()
+ except error.PyAsn1Error:
+ pass
+ else:
+ if isinstance(component, Choice):
+ tagSet = component.effectiveTagSet
+ else:
+ tagSet = component.tagSet
+ if isinstance(component, base.ConstructedAsn1Type):
+ myClone.setComponentByType(
+ tagSet, component.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByType(tagSet, component.clone())
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ __doc__ = Set.__doc__
+
+ if self._currentIdx is None or self._currentIdx != idx:
+ return Set.getComponentByPosition(self, idx, default=default,
+ instantiate=instantiate)
+
+ return self._componentValues[idx]
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component or to N+1 component. In the latter case a new component
+ type gets instantiated (if *componentType* is set, or given ASN.1
+ object is taken otherwise) and appended to the |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component. Once a new value is
+ set to *idx* component, previous value is dropped.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ oldIdx = self._currentIdx
+ Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints)
+ self._currentIdx = idx
+ if oldIdx is not None and oldIdx != idx:
+ self._componentValues[oldIdx] = noValue
+ return self
+
+ @property
+ def effectiveTagSet(self):
+ """Return a :class:`~pyasn1.type.tag.TagSet` object of the currently initialized component or self (if |ASN.1| is tagged)."""
+ if self.tagSet:
+ return self.tagSet
+ else:
+ component = self.getComponent()
+ return component.effectiveTagSet
+
+ @property
+ def tagMap(self):
+ """"Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
+ ASN.1 tags to ASN.1 objects contained within callee.
+ """
+ if self.tagSet:
+ return Set.tagMap.fget(self)
+ else:
+ return self.componentType.tagMapUnique
+
+ def getComponent(self, innerFlag=False):
+ """Return currently assigned component of the |ASN.1| object.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a PyASN1 object
+ """
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+ else:
+ c = self._componentValues[self._currentIdx]
+ if innerFlag and isinstance(c, Choice):
+ return c.getComponent(innerFlag)
+ else:
+ return c
+
+ def getName(self, innerFlag=False):
+ """Return the name of currently assigned component of the |ASN.1| object.
+
+ Returns
+ -------
+ : :py:class:`str`
+ |ASN.1| component name
+ """
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+ else:
+ if innerFlag:
+ c = self._componentValues[self._currentIdx]
+ if isinstance(c, Choice):
+ return c.getName(innerFlag)
+ return self.componentType.getNameByPosition(self._currentIdx)
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object (e.g.
+ :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal
+ value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ if self._currentIdx is None:
+ return False
+
+ componentValue = self._componentValues[self._currentIdx]
+
+ return componentValue is not noValue and componentValue.isValue
+
+ def clear(self):
+ self._currentIdx = None
+ return Set.clear(self)
+
+ # compatibility stubs
+
+ def getMinTagSet(self):
+ return self.minTagSet
+
+
+class Any(OctetString):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`,
+ its objects are immutable and duck-type Python 2 :class:`str` or Python 3
+ :class:`bytes`. When used in Unicode context, |ASN.1| type assumes
+ "|encoding|" serialisation.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ :class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively
+ :class:`unicode` object (Python 2) or :class:`str` (Python 3)
+ representing character string to be serialised into octets (note
+ `encoding` parameter) or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in text string context.
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Error(Sequence):
+ '''
+ ASN.1 specification:
+
+ Error ::= SEQUENCE {
+ code INTEGER,
+ parameter ANY DEFINED BY code -- Either INTEGER or REAL
+ }
+ '''
+ componentType=NamedTypes(
+ NamedType('code', Integer()),
+ NamedType('parameter', Any(),
+ openType=OpenType('code', {1: Integer(),
+ 2: Real()}))
+ )
+
+ error = Error()
+ error['code'] = 1
+ error['parameter'] = Integer(1234)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.TagSet() # untagged
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = OctetString.getTypeId()
+
+ @property
+ def tagMap(self):
+ """"Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
+ ASN.1 tags to ASN.1 objects contained within callee.
+ """
+ try:
+ return self._tagMap
+
+ except AttributeError:
+ self._tagMap = tagmap.TagMap(
+ {self.tagSet: self},
+ {eoo.endOfOctets.tagSet: eoo.endOfOctets},
+ self
+ )
+
+ return self._tagMap
+
+# XXX
+# coercion rules?
diff --git a/contrib/python/pyasn1/py2/pyasn1/type/useful.py b/contrib/python/pyasn1/py2/pyasn1/type/useful.py
new file mode 100644
index 0000000000..a8ae874057
--- /dev/null
+++ b/contrib/python/pyasn1/py2/pyasn1/type/useful.py
@@ -0,0 +1,189 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import datetime
+
+from pyasn1 import error
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
+
+NoValue = univ.NoValue
+noValue = univ.noValue
+
+
+class ObjectDescriptor(char.GraphicString):
+ __doc__ = char.GraphicString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.GraphicString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.GraphicString.getTypeId()
+
+
+class TimeMixIn(object):
+
+ _yearsDigits = 4
+ _hasSubsecond = False
+ _optionalMinutes = False
+ _shortTZ = False
+
+ class FixedOffset(datetime.tzinfo):
+ """Fixed offset in minutes east from UTC."""
+
+ # defaulted arguments required
+ # https: // docs.python.org / 2.3 / lib / datetime - tzinfo.html
+ def __init__(self, offset=0, name='UTC'):
+ self.__offset = datetime.timedelta(minutes=offset)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+ UTC = FixedOffset()
+
+ @property
+ def asDateTime(self):
+ """Create :py:class:`datetime.datetime` object from a |ASN.1| object.
+
+ Returns
+ -------
+ :
+ new instance of :py:class:`datetime.datetime` object
+ """
+ text = str(self)
+ if text.endswith('Z'):
+ tzinfo = TimeMixIn.UTC
+ text = text[:-1]
+
+ elif '-' in text or '+' in text:
+ if '+' in text:
+ text, plusminus, tz = text.partition('+')
+ else:
+ text, plusminus, tz = text.partition('-')
+
+ if self._shortTZ and len(tz) == 2:
+ tz += '00'
+
+ if len(tz) != 4:
+ raise error.PyAsn1Error('malformed time zone offset %s' % tz)
+
+ try:
+ minutes = int(tz[:2]) * 60 + int(tz[2:])
+ if plusminus == '-':
+ minutes *= -1
+
+ except ValueError:
+ raise error.PyAsn1Error('unknown time specification %s' % self)
+
+ tzinfo = TimeMixIn.FixedOffset(minutes, '?')
+
+ else:
+ tzinfo = None
+
+ if '.' in text or ',' in text:
+ if '.' in text:
+ text, _, ms = text.partition('.')
+ else:
+ text, _, ms = text.partition(',')
+
+ try:
+ ms = int(ms) * 1000
+
+ except ValueError:
+ raise error.PyAsn1Error('bad sub-second time specification %s' % self)
+
+ else:
+ ms = 0
+
+ if self._optionalMinutes and len(text) - self._yearsDigits == 6:
+ text += '0000'
+ elif len(text) - self._yearsDigits == 8:
+ text += '00'
+
+ try:
+ dt = datetime.datetime.strptime(text, self._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
+
+ except ValueError:
+ raise error.PyAsn1Error('malformed datetime format %s' % self)
+
+ return dt.replace(microsecond=ms, tzinfo=tzinfo)
+
+ @classmethod
+ def fromDateTime(cls, dt):
+ """Create |ASN.1| object from a :py:class:`datetime.datetime` object.
+
+ Parameters
+ ----------
+ dt: :py:class:`datetime.datetime` object
+ The `datetime.datetime` object to initialize the |ASN.1| object
+ from
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| value
+ """
+ text = dt.strftime(cls._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
+ if cls._hasSubsecond:
+ text += '.%d' % (dt.microsecond // 1000)
+
+ if dt.utcoffset():
+ seconds = dt.utcoffset().seconds
+ if seconds < 0:
+ text += '-'
+ else:
+ text += '+'
+ text += '%.2d%.2d' % (seconds // 3600, seconds % 3600)
+ else:
+ text += 'Z'
+
+ return cls(text)
+
+
+class GeneralizedTime(char.VisibleString, TimeMixIn):
+ __doc__ = char.VisibleString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.VisibleString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.VideotexString.getTypeId()
+
+ _yearsDigits = 4
+ _hasSubsecond = True
+ _optionalMinutes = True
+ _shortTZ = True
+
+
+class UTCTime(char.VisibleString, TimeMixIn):
+ __doc__ = char.VisibleString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.VisibleString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.VideotexString.getTypeId()
+
+ _yearsDigits = 2
+ _hasSubsecond = False
+ _optionalMinutes = False
+ _shortTZ = False
diff --git a/contrib/python/pyasn1/py2/tests/__init__.py b/contrib/python/pyasn1/py2/tests/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/__main__.py b/contrib/python/pyasn1/py2/tests/__main__.py
new file mode 100644
index 0000000000..d32d511557
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/__main__.py
@@ -0,0 +1,18 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.test_debug.suite',
+ 'tests.type.__main__.suite',
+ 'tests.codec.__main__.suite',
+ 'tests.compat.__main__.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/base.py b/contrib/python/pyasn1/py2/tests/base.py
new file mode 100644
index 0000000000..f7513d8d9e
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/base.py
@@ -0,0 +1,18 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+from pyasn1 import debug
+
+
+class BaseTestCase(unittest.TestCase):
+
+ def setUp(self):
+ debug.setLogger(debug.Debug('all', printer=lambda *x: None))
+
+ def tearDown(self):
+ debug.setLogger(None)
diff --git a/contrib/python/pyasn1/py2/tests/codec/__init__.py b/contrib/python/pyasn1/py2/tests/codec/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/codec/__main__.py b/contrib/python/pyasn1/py2/tests/codec/__main__.py
new file mode 100644
index 0000000000..b02f0723ca
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/__main__.py
@@ -0,0 +1,19 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.test_streaming.suite',
+ 'tests.codec.ber.__main__.suite',
+ 'tests.codec.cer.__main__.suite',
+ 'tests.codec.der.__main__.suite',
+ 'tests.codec.native.__main__.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/ber/__init__.py b/contrib/python/pyasn1/py2/tests/codec/ber/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/ber/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/codec/ber/__main__.py b/contrib/python/pyasn1/py2/tests/codec/ber/__main__.py
new file mode 100644
index 0000000000..ff38c97011
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/ber/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.ber.test_encoder.suite',
+ 'tests.codec.ber.test_decoder.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/ber/test_decoder.py b/contrib/python/pyasn1/py2/tests/codec/ber/test_decoder.py
new file mode 100644
index 0000000000..9e238cd458
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/ber/test_decoder.py
@@ -0,0 +1,1847 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import gzip
+import io
+import os
+import sys
+import tempfile
+import unittest
+import zipfile
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import char
+from pyasn1.codec import streaming
+from pyasn1.codec.ber import decoder
+from pyasn1.codec.ber import eoo
+from pyasn1.compat.octets import ints2octs, str2octs, null
+from pyasn1 import error
+
+
+class LargeTagDecoderTestCase(BaseTestCase):
+ def testLargeTag(self):
+ assert decoder.decode(ints2octs((127, 141, 245, 182, 253, 47, 3, 2, 1, 1))) == (1, null)
+
+ def testLongTag(self):
+ assert decoder.decode(ints2octs((0x1f, 2, 1, 0)))[0].tagSet == univ.Integer.tagSet
+
+ def testTagsEquivalence(self):
+ integer = univ.Integer(2).subtype(implicitTag=tag.Tag(tag.tagClassContext, 0, 0))
+ assert decoder.decode(ints2octs((0x9f, 0x80, 0x00, 0x02, 0x01, 0x02)), asn1Spec=integer) == decoder.decode(
+ ints2octs((0x9f, 0x00, 0x02, 0x01, 0x02)), asn1Spec=integer)
+
+
+class DecoderCacheTestCase(BaseTestCase):
+ def testCache(self):
+ assert decoder.decode(ints2octs((0x1f, 2, 1, 0))) == decoder.decode(ints2octs((0x1f, 2, 1, 0)))
+
+
+class IntegerDecoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert decoder.decode(ints2octs((2, 1, 12))) == (12, null)
+
+ def testNegInt(self):
+ assert decoder.decode(ints2octs((2, 1, 244))) == (-12, null)
+
+ def testZero(self):
+ assert decoder.decode(ints2octs((2, 0))) == (0, null)
+
+ def testZeroLong(self):
+ assert decoder.decode(ints2octs((2, 1, 0))) == (0, null)
+
+ def testMinusOne(self):
+ assert decoder.decode(ints2octs((2, 1, 255))) == (-1, null)
+
+ def testPosLong(self):
+ assert decoder.decode(
+ ints2octs((2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255))
+ ) == (0xffffffffffffffff, null)
+
+ def testNegLong(self):
+ assert decoder.decode(
+ ints2octs((2, 9, 255, 0, 0, 0, 0, 0, 0, 0, 1))
+ ) == (-0xffffffffffffffff, null)
+
+ def testSpec(self):
+ try:
+ decoder.decode(
+ ints2octs((2, 1, 12)), asn1Spec=univ.Null()
+ ) == (12, null)
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong asn1Spec worked out'
+ assert decoder.decode(
+ ints2octs((2, 1, 12)), asn1Spec=univ.Integer()
+ ) == (12, null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((34, 1, 12)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class BooleanDecoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert decoder.decode(ints2octs((1, 1, 1))) == (1, null)
+
+ def testTrueNeg(self):
+ assert decoder.decode(ints2octs((1, 1, 255))) == (1, null)
+
+ def testExtraTrue(self):
+ assert decoder.decode(ints2octs((1, 1, 1, 0, 120, 50, 50))) == (1, ints2octs((0, 120, 50, 50)))
+
+ def testFalse(self):
+ assert decoder.decode(ints2octs((1, 1, 0))) == (0, null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((33, 1, 1)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((3, 3, 1, 169, 138))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((3, 3, 1, 169, 138))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testDefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((3, 2, 0, 169, 3, 2, 1, 138)), str2octs(''))
+
+ def testIndefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((3, 2, 0, 169, 3, 2, 1, 138, 0, 0)), str2octs(''))
+
+ def testTypeChecking(self):
+ try:
+ decoder.decode(ints2octs((35, 4, 2, 2, 42, 42)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'accepted mis-encoded bit-string constructed out of an integer'
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs(
+ (36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testDefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs(
+ (36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120)), str2octs(''))
+
+ def testIndefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111,
+ 120, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0)), str2octs(''))
+
+
+class ExpTaggedOctetStringDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString(
+ 'Quick brown fox',
+ tagSet=univ.OctetString.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 5)
+ ))
+
+ def testDefMode(self):
+ o, r = decoder.decode(
+ ints2octs((101, 17, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testIndefMode(self):
+ o, r = decoder.decode(
+ ints2octs((101, 128, 36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0, 0, 0))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testDefModeChunked(self):
+ o, r = decoder.decode(
+ ints2octs((101, 25, 36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testIndefModeChunked(self):
+ o, r = decoder.decode(
+ ints2octs((101, 128, 36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0, 0, 0))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testDefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((101, 17, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)), str2octs(''))
+
+ def testIndefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((
+ 101, 128, 36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0,
+ 0, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0, 0, 0)), str2octs(''))
+
+
+class NullDecoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert decoder.decode(ints2octs((5, 0))) == (null, null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((37, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+# Useful analysis of OID encoding issues could be found here:
+# https://misc.daniel-marschall.de/asn.1/oid_facts.html
+class ObjectIdentifierDecoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert decoder.decode(
+ ints2octs((6, 6, 43, 6, 0, 191, 255, 126))
+ ) == ((1, 3, 6, 0, 0xffffe), null)
+
+ def testEdge1(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 39))
+ ) == ((0, 39), null)
+
+ def testEdge2(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 79))
+ ) == ((1, 39), null)
+
+ def testEdge3(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 120))
+ ) == ((2, 40), null)
+
+ def testEdge4(self):
+ assert decoder.decode(
+ ints2octs((6, 5, 0x90, 0x80, 0x80, 0x80, 0x4F))
+ ) == ((2, 0xffffffff), null)
+
+ def testEdge5(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 0x7F))
+ ) == ((2, 47), null)
+
+ def testEdge6(self):
+ assert decoder.decode(
+ ints2octs((6, 2, 0x81, 0x00))
+ ) == ((2, 48), null)
+
+ def testEdge7(self):
+ assert decoder.decode(
+ ints2octs((6, 3, 0x81, 0x34, 0x03))
+ ) == ((2, 100, 3), null)
+
+ def testEdge8(self):
+ assert decoder.decode(
+ ints2octs((6, 2, 133, 0))
+ ) == ((2, 560), null)
+
+ def testEdge9(self):
+ assert decoder.decode(
+ ints2octs((6, 4, 0x88, 0x84, 0x87, 0x02))
+ ) == ((2, 16843570), null)
+
+ def testNonLeading0x80(self):
+ assert decoder.decode(
+ ints2octs((6, 5, 85, 4, 129, 128, 0)),
+ ) == ((2, 5, 4, 16384), null)
+
+ def testLeading0x80Case1(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 5, 85, 4, 128, 129, 0))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testLeading0x80Case2(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 7, 1, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7F))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testLeading0x80Case3(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 2, 0x80, 1))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testLeading0x80Case4(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 2, 0x80, 0x7F))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((38, 1, 239)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+ def testZeroLength(self):
+ try:
+ decoder.decode(ints2octs((6, 0, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'zero length tolerated'
+
+ def testIndefiniteLength(self):
+ try:
+ decoder.decode(ints2octs((6, 128, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'indefinite length tolerated'
+
+ def testReservedLength(self):
+ try:
+ decoder.decode(ints2octs((6, 255, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'reserved length tolerated'
+
+ def testLarge1(self):
+ assert decoder.decode(
+ ints2octs((0x06, 0x11, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6, 0xB8, 0xCB, 0xE2, 0xB7, 0x17))
+ ) == ((2, 18446744073709551535184467440737095), null)
+
+ def testLarge2(self):
+ assert decoder.decode(
+ ints2octs((0x06, 0x13, 0x88, 0x37, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6, 0xB8, 0xCB, 0xE2, 0xB6, 0x47))
+ ) == ((2, 999, 18446744073709551535184467440737095), null)
+
+
+class RealDecoderTestCase(BaseTestCase):
+ def testChar(self):
+ assert decoder.decode(
+ ints2octs((9, 7, 3, 49, 50, 51, 69, 49, 49))
+ ) == (univ.Real((123, 10, 11)), null)
+
+ def testBin1(self): # check base = 2
+ assert decoder.decode( # (0.5, 2, 0) encoded with base = 2
+ ints2octs((9, 3, 128, 255, 1))
+ ) == (univ.Real((1, 2, -1)), null)
+
+ def testBin2(self): # check base = 2 and scale factor
+ assert decoder.decode( # (3.25, 2, 0) encoded with base = 8
+ ints2octs((9, 3, 148, 255, 13))
+ ) == (univ.Real((26, 2, -3)), null)
+
+ def testBin3(self): # check base = 16
+ assert decoder.decode( # (0.00390625, 2, 0) encoded with base = 16
+ ints2octs((9, 3, 160, 254, 1))
+ ) == (univ.Real((1, 2, -8)), null)
+
+ def testBin4(self): # check exponent = 0
+ assert decoder.decode( # (1, 2, 0) encoded with base = 2
+ ints2octs((9, 3, 128, 0, 1))
+ ) == (univ.Real((1, 2, 0)), null)
+
+ def testBin5(self): # case of 2 octs for exponent and negative exponent
+ assert decoder.decode( # (3, 2, -1020) encoded with base = 16
+ ints2octs((9, 4, 161, 255, 1, 3))
+ ) == (univ.Real((3, 2, -1020)), null)
+
+# TODO: this requires Real type comparison fix
+
+# def testBin6(self):
+# assert decoder.decode(
+# ints2octs((9, 5, 162, 0, 255, 255, 1))
+# ) == (univ.Real((1, 2, 262140)), null)
+
+# def testBin7(self):
+# assert decoder.decode(
+# ints2octs((9, 7, 227, 4, 1, 35, 69, 103, 1))
+# ) == (univ.Real((-1, 2, 76354972)), null)
+
+ def testPlusInf(self):
+ assert decoder.decode(
+ ints2octs((9, 1, 64))
+ ) == (univ.Real('inf'), null)
+
+ def testMinusInf(self):
+ assert decoder.decode(
+ ints2octs((9, 1, 65))
+ ) == (univ.Real('-inf'), null)
+
+ def testEmpty(self):
+ assert decoder.decode(
+ ints2octs((9, 0))
+ ) == (univ.Real(0.0), null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((41, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+ def testShortEncoding(self):
+ try:
+ decoder.decode(ints2octs((9, 1, 131)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'accepted too-short real'
+
+
+class UniversalStringDecoderTestCase(BaseTestCase):
+ def testDecoder(self):
+ assert decoder.decode(ints2octs((28, 12, 0, 0, 0, 97, 0, 0, 0, 98, 0, 0, 0, 99))) == (char.UniversalString(sys.version_info[0] >= 3 and 'abc' or unicode('abc')), null)
+
+
+class BMPStringDecoderTestCase(BaseTestCase):
+ def testDecoder(self):
+ assert decoder.decode(ints2octs((30, 6, 0, 97, 0, 98, 0, 99))) == (char.BMPString(sys.version_info[0] >= 3 and 'abc' or unicode('abc')), null)
+
+
+class UTF8StringDecoderTestCase(BaseTestCase):
+ def testDecoder(self):
+ assert decoder.decode(ints2octs((12, 3, 97, 98, 99))) == (char.UTF8String(sys.version_info[0] >= 3 and 'abc' or unicode('abc')), null)
+
+
+class SequenceOfDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+ ) == (self.s, null)
+
+ def testSchemalessDecoder(self):
+ assert decoder.decode(
+ ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=univ.SequenceOf()
+ ) == (self.s, null)
+
+
+class ExpTaggedSequenceOfDecoderTestCase(BaseTestCase):
+
+ def testWithSchema(self):
+ s = univ.SequenceOf().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))
+ s2, r = decoder.decode(
+ ints2octs((163, 15, 48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=s
+ )
+ assert not r
+ assert s2 == [str2octs('quick brown')]
+ assert s.tagSet == s2.tagSet
+
+ def testWithoutSchema(self):
+ s = univ.SequenceOf().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))
+ s2, r = decoder.decode(
+ ints2octs((163, 15, 48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ )
+ assert not r
+ assert s2 == [str2octs('quick brown')]
+ assert s.tagSet == s2.tagSet
+
+
+class SequenceOfDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SetOfDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+ ) == (self.s, null)
+
+ def testSchemalessDecoder(self):
+ assert decoder.decode(
+ ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=univ.SetOf()
+ ) == (self.s, null)
+
+
+class SetOfDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SequenceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('first-name', univ.OctetString(null)),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs(
+ (48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), str2octs(''))
+
+ def testWithOptionalAndDefaultedIndefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), str2octs(''))
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(
+ ints2octs((16, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class SequenceDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 2, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 2, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionaIndefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
+ 0, 0, 0)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1,
+ 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs(
+ (48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
+ 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SequenceDecoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except error.PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 3, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='060127')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithUnaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(componentType=univ.Any()),
+ openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except error.PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs( (48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SetDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('first-name', univ.OctetString(null)),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs(
+ (49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), str2octs(''))
+
+ def testWithOptionalAndDefaultedIndefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), str2octs(''))
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(
+ ints2octs((16, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class SetDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 2, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeReordered(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 18, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeReordered(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 2, 1, 1, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SequenceOfWithExpTaggedOctetStringDecoder(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(
+ componentType=univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
+ )
+ self.s.setComponentByPosition(0, 'q')
+ self.s2 = univ.SequenceOf()
+
+ def testDefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+
+class SequenceWithExpTaggedOctetStringDecoder(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'x', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
+ )
+ )
+ )
+ self.s.setComponentByPosition(0, 'q')
+ self.s2 = univ.Sequence()
+
+ def testDefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+
+class ChoiceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+
+ def testBySpec(self):
+ self.s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(
+ ints2octs((5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithoutSpec(self):
+ self.s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(ints2octs((5, 0))) == (self.s, null)
+ assert decoder.decode(ints2octs((5, 0))) == (univ.Null(null), null)
+
+ def testUndefLength(self):
+ self.s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
+ assert decoder.decode(ints2octs((36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0)),
+ asn1Spec=self.s) == (self.s, null)
+
+ def testExplicitTag(self):
+ s = self.s.subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 4))
+ s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(ints2octs((164, 2, 5, 0)), asn1Spec=s) == (s, null)
+
+ def testExplicitTagUndefLength(self):
+ s = self.s.subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 4))
+ s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(ints2octs((164, 128, 5, 0, 0, 0)), asn1Spec=s) == (s, null)
+
+
+class AnyDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any()
+
+ def testByUntagged(self):
+ assert decoder.decode(
+ ints2octs((4, 3, 102, 111, 120)), asn1Spec=self.s
+ ) == (univ.Any('\004\003fox'), null)
+
+ def testTaggedEx(self):
+ s = univ.Any('\004\003fox').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((164, 5, 4, 3, 102, 111, 120)), asn1Spec=s) == (s, null)
+
+ def testTaggedIm(self):
+ s = univ.Any('\004\003fox').subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((132, 5, 4, 3, 102, 111, 120)), asn1Spec=s) == (s, null)
+
+ def testByUntaggedIndefMode(self):
+ assert decoder.decode(
+ ints2octs((4, 3, 102, 111, 120)), asn1Spec=self.s
+ ) == (univ.Any('\004\003fox'), null)
+
+ def testTaggedExIndefMode(self):
+ s = univ.Any('\004\003fox').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((164, 128, 4, 3, 102, 111, 120, 0, 0)), asn1Spec=s) == (s, null)
+
+ def testTaggedImIndefMode(self):
+ s = univ.Any('\004\003fox').subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((164, 128, 4, 3, 102, 111, 120, 0, 0)), asn1Spec=s) == (s, null)
+
+ def testByUntaggedSubst(self):
+ assert decoder.decode(
+ ints2octs((4, 3, 102, 111, 120)),
+ asn1Spec=self.s,
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((4, 3, 102, 111, 120)), str2octs(''))
+
+ def testTaggedExSubst(self):
+ assert decoder.decode(
+ ints2octs((164, 5, 4, 3, 102, 111, 120)),
+ asn1Spec=self.s,
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((164, 5, 4, 3, 102, 111, 120)), str2octs(''))
+
+
+class EndOfOctetsTestCase(BaseTestCase):
+ def testUnexpectedEoo(self):
+ try:
+ decoder.decode(ints2octs((0, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted at top level'
+
+ def testExpectedEoo(self):
+ result, remainder = decoder.decode(ints2octs((0, 0)), allowEoo=True)
+ assert eoo.endOfOctets.isSameTypeWith(result) and result == eoo.endOfOctets and result is eoo.endOfOctets
+ assert remainder == null
+
+ def testDefiniteNoEoo(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x02, 0x00, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted inside definite-length encoding'
+
+ def testIndefiniteEoo(self):
+ result, remainder = decoder.decode(ints2octs((0x23, 0x80, 0x00, 0x00)))
+ assert result == () and remainder == null, 'incorrect decoding of indefinite length end-of-octets'
+
+ def testNoLongFormEoo(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x80, 0x00, 0x81, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted with invalid long-form length'
+
+ def testNoConstructedEoo(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x80, 0x20, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted with invalid constructed encoding'
+
+ def testNoEooData(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x80, 0x00, 0x01, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted with unexpected data'
+
+
+class NonStringDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('first-name', univ.OctetString(null)),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ self.substrate = ints2octs([48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1])
+
+ def testOctetString(self):
+ s = list(decoder.StreamingDecoder(
+ univ.OctetString(self.substrate), asn1Spec=self.s))
+ assert [self.s] == s
+
+ def testAny(self):
+ s = list(decoder.StreamingDecoder(
+ univ.Any(self.substrate), asn1Spec=self.s))
+ assert [self.s] == s
+
+
+class ErrorOnDecodingTestCase(BaseTestCase):
+
+ def testErrorCondition(self):
+ decode = decoder.SingleItemDecoder(
+ tagMap=decoder.TAG_MAP, typeMap=decoder.TYPE_MAP)
+ substrate = ints2octs((00, 1, 2))
+ stream = streaming.asSeekableStream(substrate)
+
+ try:
+ asn1Object = next(decode(stream))
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ assert isinstance(exc, error.PyAsn1Error), (
+ 'Unexpected exception raised %r' % (exc,))
+
+ else:
+ assert False, 'Unexpected decoder result %r' % (asn1Object,)
+
+ def testRawDump(self):
+ substrate = ints2octs((31, 8, 2, 1, 1, 131, 3, 2, 1, 12))
+ stream = streaming.asSeekableStream(substrate)
+
+ class SingleItemEncoder(decoder.SingleItemDecoder):
+ defaultErrorState = decoder.stDumpRawValue
+
+ class StreamingDecoder(decoder.StreamingDecoder):
+ SINGLE_ITEM_DECODER = SingleItemEncoder
+
+ class OneShotDecoder(decoder.Decoder):
+ STREAMING_DECODER = StreamingDecoder
+
+ d = OneShotDecoder()
+
+ asn1Object, rest = d(stream)
+
+ assert isinstance(asn1Object, univ.Any), (
+ 'Unexpected raw dump type %r' % (asn1Object,))
+ assert asn1Object.asNumbers() == (31, 8, 2, 1, 1), (
+ 'Unexpected raw dump value %r' % (asn1Object,))
+ assert rest == ints2octs((131, 3, 2, 1, 12)), (
+ 'Unexpected rest of substrate after raw dump %r' % rest)
+
+
+@unittest.skipIf(sys.version_info < (3,), "Unsupported on Python 2")
+class BinaryFileTestCase(BaseTestCase):
+ """Assure that decode works on open binary files."""
+ def testOneObject(self):
+ _, path = tempfile.mkstemp()
+ try:
+ with open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12)))
+
+ with open(path, "rb") as source:
+ values = list(decoder.StreamingDecoder(source))
+
+ assert values == [12]
+ finally:
+ os.remove(path)
+
+ def testMoreObjects(self):
+ _, path = tempfile.mkstemp()
+ try:
+ with open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)))
+
+ with open(path, "rb") as source:
+ values = list(decoder.StreamingDecoder(source))
+
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+
+ finally:
+ os.remove(path)
+
+ def testInvalidFileContent(self):
+ _, path = tempfile.mkstemp()
+ try:
+ with open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0, 7)))
+
+ with open(path, "rb") as source:
+ list(decoder.StreamingDecoder(source))
+
+ except error.EndOfStreamError:
+ pass
+
+ finally:
+ os.remove(path)
+
+
+class BytesIOTestCase(BaseTestCase):
+ def testRead(self):
+ source = ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+ stream = io.BytesIO(source)
+ values = list(decoder.StreamingDecoder(stream))
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+
+
+class UnicodeTestCase(BaseTestCase):
+ def testFail(self):
+ # This ensures that unicode objects in Python 2 & str objects in Python 3.7 cannot be parsed.
+ source = ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)).decode("latin-1")
+ try:
+ next(decoder.StreamingDecoder(source))
+
+ except error.UnsupportedSubstrateError:
+ pass
+
+ else:
+ assert False, 'Tolerated parsing broken unicode strings'
+
+
+class RestartableDecoderTestCase(BaseTestCase):
+
+ class NonBlockingStream(io.BytesIO):
+ block = False
+
+ def read(self, size=-1):
+ self.block = not self.block
+ if self.block:
+ return # this is what non-blocking streams sometimes do
+
+ return io.BytesIO.read(self, size)
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ source = ints2octs(
+ (48, 26,
+ 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110,
+ 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ self.stream = self.NonBlockingStream(source)
+
+ def testPartialReadingFromNonBlockingStream(self):
+ iterator = iter(decoder.StreamingDecoder(self.stream, asn1Spec=self.s))
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' not in res.context
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' not in res.context
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 0
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 0
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 0
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 1
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 1
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 1
+
+ res = next(iterator)
+
+ assert isinstance(res, univ.SequenceOf)
+ assert res.isValue
+ assert len(res) == 2
+
+ try:
+ next(iterator)
+
+ except StopIteration:
+ pass
+
+ else:
+ assert False, 'End of stream not raised'
+
+
+class CompressedFilesTestCase(BaseTestCase):
+ def testGzip(self):
+ _, path = tempfile.mkstemp(suffix=".gz")
+ try:
+ with gzip.open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)))
+
+ with gzip.open(path, "rb") as source:
+ values = list(decoder.StreamingDecoder(source))
+
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+
+ finally:
+ os.remove(path)
+
+ def testZipfile(self):
+ # File from ZIP archive is a good example of non-seekable stream in Python 2.7
+ # In Python 3.7, it is a seekable stream.
+ _, path = tempfile.mkstemp(suffix=".zip")
+ try:
+ with zipfile.ZipFile(path, "w") as myzip:
+ myzip.writestr("data", ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)))
+
+ with zipfile.ZipFile(path, "r") as myzip:
+ with myzip.open("data", "r") as source:
+ values = list(decoder.StreamingDecoder(source))
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+ finally:
+ os.remove(path)
+
+ def testZipfileMany(self):
+ _, path = tempfile.mkstemp(suffix=".zip")
+ try:
+ with zipfile.ZipFile(path, "w") as myzip:
+ #for i in range(100):
+ myzip.writestr("data", ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)) * 1000)
+
+ with zipfile.ZipFile(path, "r") as myzip:
+ with myzip.open("data", "r") as source:
+ values = list(decoder.StreamingDecoder(source))
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)] * 1000
+ finally:
+ os.remove(path)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/ber/test_encoder.py b/contrib/python/pyasn1/py2/tests/codec/ber/test_encoder.py
new file mode 100644
index 0000000000..7701348d06
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/ber/test_encoder.py
@@ -0,0 +1,1497 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import char
+from pyasn1.codec.ber import encoder
+from pyasn1.compat.octets import ints2octs
+from pyasn1.error import PyAsn1Error
+
+
+class LargeTagEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.o = univ.Integer().subtype(
+ value=1, explicitTag=tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0xdeadbeaf)
+ )
+
+ def testEncoder(self):
+ assert encoder.encode(self.o) == ints2octs((127, 141, 245, 182, 253, 47, 3, 2, 1, 1))
+
+
+class IntegerEncoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert encoder.encode(univ.Integer(12)) == ints2octs((2, 1, 12))
+
+ def testNegInt(self):
+ assert encoder.encode(univ.Integer(-12)) == ints2octs((2, 1, 244))
+
+ def testZero(self):
+ assert encoder.encode(univ.Integer(0)) == ints2octs((2, 1, 0))
+
+ def testCompactZero(self):
+ encoder.IntegerEncoder.supportCompactZero = True
+ substrate = encoder.encode(univ.Integer(0))
+ encoder.IntegerEncoder.supportCompactZero = False
+ assert substrate == ints2octs((2, 0))
+
+ def testMinusOne(self):
+ assert encoder.encode(univ.Integer(-1)) == ints2octs((2, 1, 255))
+
+ def testPosLong(self):
+ assert encoder.encode(
+ univ.Integer(0xffffffffffffffff)
+ ) == ints2octs((2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255))
+
+ def testNegLong(self):
+ assert encoder.encode(
+ univ.Integer(-0xffffffffffffffff)
+ ) == ints2octs((2, 9, 255, 0, 0, 0, 0, 0, 0, 0, 1))
+
+
+class IntegerEncoderWithSchemaTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert encoder.encode(12, asn1Spec=univ.Integer()) == ints2octs((2, 1, 12))
+
+ def testNegInt(self):
+ assert encoder.encode(-12, asn1Spec=univ.Integer()) == ints2octs((2, 1, 244))
+
+ def testZero(self):
+ assert encoder.encode(0, asn1Spec=univ.Integer()) == ints2octs((2, 1, 0))
+
+ def testPosLong(self):
+ assert encoder.encode(
+ 0xffffffffffffffff, asn1Spec=univ.Integer()
+ ) == ints2octs((2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255))
+
+
+class BooleanEncoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(univ.Boolean(1)) == ints2octs((1, 1, 1))
+
+ def testFalse(self):
+ assert encoder.encode(univ.Boolean(0)) == ints2octs((1, 1, 0))
+
+
+class BooleanEncoderWithSchemaTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(True, asn1Spec=univ.Boolean()) == ints2octs((1, 1, 1))
+
+ def testFalse(self):
+ assert encoder.encode(False, asn1Spec=univ.Boolean()) == ints2octs((1, 1, 0))
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.b = univ.BitString((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1))
+
+ def testDefMode(self):
+ assert encoder.encode(self.b) == ints2octs((3, 3, 1, 169, 138))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.b, defMode=False
+ ) == ints2octs((3, 3, 1, 169, 138))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.b, maxChunkSize=1
+ ) == ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.b, defMode=False, maxChunkSize=1
+ ) == ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+
+ def testEmptyValue(self):
+ assert encoder.encode(univ.BitString([])) == ints2octs((3, 1, 0))
+
+
+class BitStringEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.b = (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)
+ self.s = univ.BitString()
+
+ def testDefMode(self):
+ assert encoder.encode(self.b, asn1Spec=self.s) == ints2octs((3, 3, 1, 169, 138))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.b, asn1Spec=self.s, defMode=False
+ ) == ints2octs((3, 3, 1, 169, 138))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.b, asn1Spec=self.s, maxChunkSize=1
+ ) == ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.b, asn1Spec=self.s, defMode=False, maxChunkSize=1
+ ) == ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+
+ def testEmptyValue(self):
+ assert encoder.encode([], asn1Spec=self.s) == ints2octs((3, 1, 0))
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString('Quick brown fox')
+
+ def testDefMode(self):
+ assert encoder.encode(self.o) == ints2octs(
+ (4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.o, defMode=False
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.o, maxChunkSize=4
+ ) == ints2octs((36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119,
+ 110, 32, 4, 3, 102, 111, 120))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.o, defMode=False, maxChunkSize=4
+ ) == ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110,
+ 32, 4, 3, 102, 111, 120, 0, 0))
+
+
+class OctetStringEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.OctetString()
+ self.o = 'Quick brown fox'
+
+ def testDefMode(self):
+ assert encoder.encode(self.o, asn1Spec=self.s) == ints2octs(
+ (4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.o, asn1Spec=self.s, defMode=False
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.o, asn1Spec=self.s, maxChunkSize=4
+ ) == ints2octs((36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119,
+ 110, 32, 4, 3, 102, 111, 120))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.o, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110,
+ 32, 4, 3, 102, 111, 120, 0, 0))
+
+
+class ExpTaggedOctetStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString().subtype(
+ value='Quick brown fox',
+ explicitTag=tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 5)
+ )
+
+ def testDefMode(self):
+ assert encoder.encode(self.o) == ints2octs(
+ (101, 17, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.o, defMode=False
+ ) == ints2octs((101, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.o, defMode=True, maxChunkSize=4
+ ) == ints2octs((101, 25, 36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3,
+ 102, 111, 120))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.o, defMode=False, maxChunkSize=4
+ ) == ints2octs((101, 128, 36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0, 0, 0))
+
+
+class NullEncoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert encoder.encode(univ.Null('')) == ints2octs((5, 0))
+
+
+class NullEncoderWithSchemaTestCase(BaseTestCase):
+ def testNull(self):
+ assert encoder.encode(None, univ.Null()) == ints2octs((5, 0))
+
+
+class ObjectIdentifierEncoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((1, 3, 6, 0, 0xffffe))
+ ) == ints2octs((6, 6, 43, 6, 0, 191, 255, 126))
+
+ def testEdge1(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((0, 39))
+ ) == ints2octs((6, 1, 39))
+
+ def testEdge2(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((1, 39))
+ ) == ints2octs((6, 1, 79))
+
+ def testEdge3(self):
+ # 01111111
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 40))
+ ) == ints2octs((6, 1, 120))
+
+ def testEdge4(self):
+ # 10010000|10000000|10000000|10000000|01001111
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 0xffffffff))
+ ) == ints2octs((6, 5, 0x90, 0x80, 0x80, 0x80, 0x4F))
+
+ def testEdge5(self):
+ # 01111111
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 47))
+ ) == ints2octs((6, 1, 0x7F))
+
+ def testEdge6(self):
+ # 10000001|00000000
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 48))
+ ) == ints2octs((6, 2, 0x81, 0x00))
+
+ def testEdge7(self):
+ # 10000001|00110100|00000003
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 100, 3))
+ ) == ints2octs((6, 3, 0x81, 0x34, 0x03))
+
+ def testEdge8(self):
+ # 10000101|00000000
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 560))
+ ) == ints2octs((6, 2, 133, 0))
+
+ def testEdge9(self):
+ # 10001000|10000100|10000111|0000010
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 16843570))
+ ) == ints2octs((6, 4, 0x88, 0x84, 0x87, 0x02))
+
+ def testEdgeA(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 5))
+ ) == ints2octs((6, 1, 85))
+
+ def testImpossible1(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((3, 1, 2)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'impossible leading arc tolerated'
+
+ def testImpossible2(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((0,)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'single arc OID tolerated'
+
+ def testImpossible3(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((0, 40)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'second arc overflow tolerated'
+
+ def testImpossible4(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((1, 40)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'second arc overflow tolerated'
+
+ def testLarge1(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 18446744073709551535184467440737095))
+ ) == ints2octs((0x06, 0x11, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6, 0xB8, 0xCB,
+ 0xE2, 0xB7, 0x17))
+
+ def testLarge2(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 999, 18446744073709551535184467440737095))
+ ) == ints2octs((0x06, 0x13, 0x88, 0x37, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6,
+ 0xB8, 0xCB, 0xE2, 0xB6, 0x47))
+
+
+class ObjectIdentifierWithSchemaEncoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert encoder.encode(
+ (1, 3, 6, 0, 0xffffe), asn1Spec=univ.ObjectIdentifier()
+ ) == ints2octs((6, 6, 43, 6, 0, 191, 255, 126))
+
+
+class RealEncoderTestCase(BaseTestCase):
+ def testChar(self):
+ assert encoder.encode(
+ univ.Real((123, 10, 11))
+ ) == ints2octs((9, 7, 3, 49, 50, 51, 69, 49, 49))
+
+ def testBin1(self):
+ assert encoder.encode( # default binEncBase = 2
+ univ.Real((0.5, 2, 0)) # check encbase = 2 and exponent = -1
+ ) == ints2octs((9, 3, 128, 255, 1))
+
+ def testBin2(self):
+ r = univ.Real((3.25, 2, 0))
+ r.binEncBase = 8 # change binEncBase only for this instance of Real
+ assert encoder.encode(
+ r # check encbase = 8
+ ) == ints2octs((9, 3, 148, 255, 13))
+
+ def testBin3(self):
+ # change binEncBase in the RealEncoder instance => for all further Real
+ binEncBase, encoder.TYPE_MAP[univ.Real.typeId].binEncBase = encoder.TYPE_MAP[univ.Real.typeId].binEncBase, 16
+ assert encoder.encode(
+ univ.Real((0.00390625, 2, 0)) # check encbase = 16
+ ) == ints2octs((9, 3, 160, 254, 1))
+ encoder.TYPE_MAP[univ.Real.typeId].binEncBase = binEncBase
+
+ def testBin4(self):
+ # choose binEncBase automatically for all further Real (testBin[4-7])
+ binEncBase, encoder.TYPE_MAP[univ.Real.typeId].binEncBase = encoder.TYPE_MAP[univ.Real.typeId].binEncBase, None
+ assert encoder.encode(
+ univ.Real((1, 2, 0)) # check exponent = 0
+ ) == ints2octs((9, 3, 128, 0, 1))
+ encoder.TYPE_MAP[univ.Real.typeId].binEncBase = binEncBase
+
+ def testBin5(self):
+ assert encoder.encode(
+ univ.Real((3, 2, -1020)) # case of 2 octs for exponent and
+ # negative exponent and abs(exponent) is
+ # all 1's and fills the whole octet(s)
+ ) == ints2octs((9, 4, 129, 252, 4, 3))
+
+ def testBin6(self):
+ assert encoder.encode(
+ univ.Real((1, 2, 262140)) # case of 3 octs for exponent and
+ # check that first 9 bits for exponent
+ # are not all 1's
+ ) == ints2octs((9, 5, 130, 3, 255, 252, 1))
+
+ def testBin7(self):
+ assert encoder.encode(
+ univ.Real((-1, 2, 76354972)) # case of >3 octs for exponent and
+ # mantissa < 0
+ ) == ints2octs((9, 7, 195, 4, 4, 141, 21, 156, 1))
+
+ def testPlusInf(self):
+ assert encoder.encode(univ.Real('inf')) == ints2octs((9, 1, 64))
+
+ def testMinusInf(self):
+ assert encoder.encode(univ.Real('-inf')) == ints2octs((9, 1, 65))
+
+ def testZero(self):
+ assert encoder.encode(univ.Real(0)) == ints2octs((9, 0))
+
+
+class RealEncoderWithSchemaTestCase(BaseTestCase):
+ def testChar(self):
+ assert encoder.encode(
+ (123, 10, 11), asn1Spec=univ.Real()
+ ) == ints2octs((9, 7, 3, 49, 50, 51, 69, 49, 49))
+
+
+class UniversalStringEncoderTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(char.UniversalString(sys.version_info[0] >= 3 and 'abc' or unicode('abc'))) == ints2octs(
+ (28, 12, 0, 0, 0, 97, 0, 0, 0, 98, 0, 0, 0, 99)), 'Incorrect encoding'
+
+
+class UniversalStringEncoderWithSchemaTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(
+ sys.version_info[0] >= 3 and 'abc' or unicode('abc'), asn1Spec=char.UniversalString()
+ ) == ints2octs((28, 12, 0, 0, 0, 97, 0, 0, 0, 98, 0, 0, 0, 99)), 'Incorrect encoding'
+
+
+class BMPStringEncoderTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(char.BMPString(sys.version_info[0] >= 3 and 'abc' or unicode('abc'))) == ints2octs(
+ (30, 6, 0, 97, 0, 98, 0, 99)), 'Incorrect encoding'
+
+
+class BMPStringEncoderWithSchemaTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(
+ sys.version_info[0] >= 3 and 'abc' or unicode('abc'), asn1Spec=char.BMPString()
+ ) == ints2octs((30, 6, 0, 97, 0, 98, 0, 99)), 'Incorrect encoding'
+
+
+class UTF8StringEncoderTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(char.UTF8String(sys.version_info[0] >= 3 and 'abc' or unicode('abc'))) == ints2octs(
+ (12, 3, 97, 98, 99)), 'Incorrect encoding'
+
+
+class UTF8StringEncoderWithSchemaTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(
+ sys.version_info[0] >= 3 and 'abc' or unicode('abc'), asn1Spec=char.UTF8String()
+ ) == ints2octs((12, 3, 97, 98, 99)), 'Incorrect encoding'
+
+
+class SequenceOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SequenceOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+ def testDefMode(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(s) == ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False
+ ) == ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SequenceOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.v = ['quick brown']
+
+ def testEmpty(self):
+ assert encoder.encode([], asn1Spec=self.s) == ints2octs((48, 0))
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SequenceOfEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, 'quick brown')
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SetOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SetOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((49, 0))
+
+ def testDefMode(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(s) == ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SetOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+ self.v = ['quick brown']
+
+ def testEmpty(self):
+ s = univ.SetOf()
+ assert encoder.encode([], asn1Spec=self.s) == ints2octs((49, 0))
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs(
+ (49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SetOfEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, 'quick brown')
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+ self.v = {
+ 'place-holder': None,
+ 'first-name': 'quick brown',
+ 'age': 1
+ }
+
+ def testEmpty(self):
+ try:
+ assert encoder.encode({}, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'empty bare sequence tolerated'
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 5, 2, 1, 1, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.OctetString('quick brown')
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 14, 2, 1, 2, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any()), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 7, 2, 1, 1, 49, 2, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.OctetString('quick brown'))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 16, 2, 1, 2, 49, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((48, 2, 5, 0))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 2, 5, 0))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 0, 0))
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(self.s) == ints2octs(
+ (48, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs(
+ (48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(self.s) == ints2octs((48, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(self.s) == ints2octs(
+ (48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs(
+ (48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
+ 0, 2, 1, 1, 0, 0))
+
+
+class ExpTaggedSequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', univ.Integer()),
+ )
+ )
+
+ s = s.subtype(
+ explicitTag=tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5)
+ )
+
+ s[0] = 12
+
+ self.s = s
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((101, 5, 48, 3, 2, 1, 12))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((101, 128, 48, 128, 2, 1, 12, 0, 0, 0, 0))
+
+
+class ExpTaggedSequenceComponentEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', univ.Boolean().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ )
+ )
+
+ self.s[0] = True
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((48, 5, 160, 3, 1, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 160, 3, 1, 1, 1, 0, 0, 0, 0))
+
+
+class SetEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SetEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+ self.v = {
+ 'place-holder': None,
+ 'first-name': 'quick brown',
+ 'age': 1
+ }
+
+ def testEmpty(self):
+ try:
+ assert encoder.encode({}, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'empty bare SET tolerated'
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SetEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((49, 2, 5, 0))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 2, 5, 0))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 0, 0))
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(self.s) == ints2octs(
+ (49, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs(
+ (49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(self.s) == ints2octs((49, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(self.s) == ints2octs(
+ (49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs(
+ (49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class ChoiceEncoderTestCase(BaseTestCase):
+
+ def testEmpty(self):
+ s = univ.Choice()
+ try:
+ encoder.encode(s)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'encoded unset choice'
+
+ def testDefModeOptionOne(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(s) == ints2octs((5, 0))
+
+ def testDefModeOptionTwo(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(s) == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False
+ ) == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testDefModeChunked(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=True, maxChunkSize=4
+ ) == ints2octs((36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False, maxChunkSize=4
+ ) == ints2octs((36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0))
+
+
+class ChoiceEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+ self.v = {
+ 'place-holder': None
+ }
+
+ def testFilled(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((5, 0))
+
+
+class ChoiceEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+
+ def testEmpty(self):
+ try:
+ encoder.encode(self.s)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'encoded unset choice'
+
+ def testFilled(self):
+ self.s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(self.s) == ints2octs((5, 0))
+
+ def testTagged(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
+ )
+ s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(s) == ints2octs((164, 2, 5, 0))
+
+ def testUndefLength(self):
+ self.s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
+ assert encoder.encode(self.s, defMode=False, maxChunkSize=3) == ints2octs(
+ (36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0))
+
+ def testTaggedUndefLength(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
+ )
+ s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
+ assert encoder.encode(s, defMode=False, maxChunkSize=3) == ints2octs(
+ (164, 128, 36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0, 0, 0))
+
+
+class AnyEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any(encoder.encode(univ.OctetString('fox')))
+
+ def testUntagged(self):
+ assert encoder.encode(self.s) == ints2octs((4, 3, 102, 111, 120))
+
+ def testTaggedEx(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(s) == ints2octs((164, 5, 4, 3, 102, 111, 120))
+
+ def testTaggedIm(self):
+ s = self.s.subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(s) == ints2octs((132, 5, 4, 3, 102, 111, 120))
+
+
+class AnyEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any()
+ self.v = encoder.encode(univ.OctetString('fox'))
+
+ def testUntagged(self):
+ assert encoder.encode(self.v, asn1Spec=self.s) == ints2octs((4, 3, 102, 111, 120))
+
+ def testTaggedEx(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(self.v, asn1Spec=s) == ints2octs((164, 5, 4, 3, 102, 111, 120))
+
+ def testTaggedIm(self):
+ s = self.s.subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(self.v, asn1Spec=s) == ints2octs((132, 5, 4, 3, 102, 111, 120))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/cer/__init__.py b/contrib/python/pyasn1/py2/tests/codec/cer/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/cer/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/codec/cer/__main__.py b/contrib/python/pyasn1/py2/tests/codec/cer/__main__.py
new file mode 100644
index 0000000000..122d7275b3
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/cer/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.cer.test_encoder.suite',
+ 'tests.codec.cer.test_decoder.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/cer/test_decoder.py b/contrib/python/pyasn1/py2/tests/codec/cer/test_decoder.py
new file mode 100644
index 0000000000..fddd36bb57
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/cer/test_decoder.py
@@ -0,0 +1,370 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.codec.cer import decoder
+from pyasn1.compat.octets import ints2octs, str2octs, null
+from pyasn1.error import PyAsn1Error
+
+
+class BooleanDecoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert decoder.decode(ints2octs((1, 1, 255))) == (1, null)
+
+ def testFalse(self):
+ assert decoder.decode(ints2octs((1, 1, 0))) == (0, null)
+
+ def testEmpty(self):
+ try:
+ decoder.decode(ints2octs((1, 0)))
+ except PyAsn1Error:
+ pass
+
+ def testOverflow(self):
+ try:
+ decoder.decode(ints2octs((1, 2, 0, 0)))
+ except PyAsn1Error:
+ pass
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ ints2octs((3, 3, 6, 170, 128))
+ ) == (((1, 0) * 5), null)
+
+ def testLongMode(self):
+ assert decoder.decode(
+ ints2octs((3, 127, 6) + (170,) * 125 + (128,))
+ ) == (((1, 0) * 501), null)
+
+ # TODO: test failures on short chunked and long unchunked substrate samples
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)),
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testLongMode(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 130, 3, 232) + (81,) * 1000 + (4, 1, 81, 0, 0))
+ ) == (str2octs('Q' * 1001), null)
+
+ # TODO: test failures on short chunked and long unchunked substrate samples
+
+
+class SequenceDecoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 2, 1, 12, 0, 0)),
+ asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98,
+ 114, 111, 119, 110, 0, 0)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 128, 6, 1, 1, 2, 1, 12, 0, 0)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 6, 1, 12, 0, 0)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='06010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 2, 1, 12, 0, 0)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98,
+ 114, 111, 119, 110, 0, 0)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(componentType=univ.Any()),
+ openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 49, 128, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 128, 6, 1, 1, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 49, 128, 4, 11, 113, 117, 105, 99, 107, 32,
+ 98, 114, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs( (48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/cer/test_encoder.py b/contrib/python/pyasn1/py2/tests/codec/cer/test_encoder.py
new file mode 100644
index 0000000000..680f720c3f
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/cer/test_encoder.py
@@ -0,0 +1,956 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import useful
+from pyasn1.codec.cer import encoder
+from pyasn1.compat.octets import ints2octs
+from pyasn1.error import PyAsn1Error
+
+
+class BooleanEncoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(univ.Boolean(1)) == ints2octs((1, 1, 255))
+
+ def testFalse(self):
+ assert encoder.encode(univ.Boolean(0)) == ints2octs((1, 1, 0))
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert encoder.encode(
+ univ.BitString((1, 0) * 5)
+ ) == ints2octs((3, 3, 6, 170, 128))
+
+ def testLongMode(self):
+ assert encoder.encode(univ.BitString((1, 0) * 501)) == ints2octs((3, 127, 6) + (170,) * 125 + (128,))
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert encoder.encode(
+ univ.OctetString('Quick brown fox')
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testLongMode(self):
+ assert encoder.encode(
+ univ.OctetString('Q' * 1001)
+ ) == ints2octs((36, 128, 4, 130, 3, 232) + (81,) * 1000 + (4, 1, 81, 0, 0))
+
+
+class GeneralizedTimeEncoderTestCase(BaseTestCase):
+ # def testExtraZeroInSeconds(self):
+ # try:
+ # assert encoder.encode(
+ # useful.GeneralizedTime('20150501120112.10Z')
+ # )
+ # except PyAsn1Error:
+ # pass
+ # else:
+ # assert 0, 'Meaningless trailing zero in fraction part tolerated'
+
+ def testLocalTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.GeneralizedTime('20150501120112.1+0200')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Local timezone tolerated'
+
+ def testMissingTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.GeneralizedTime('20150501120112.1')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Missing timezone tolerated'
+
+ def testDecimalCommaPoint(self):
+ try:
+ assert encoder.encode(
+ useful.GeneralizedTime('20150501120112,1Z')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Decimal comma tolerated'
+
+ def testWithSubseconds(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.59Z')
+ ) == ints2octs((24, 18, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 46, 53, 57, 90))
+
+ def testWithSubsecondsWithZeros(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.099Z')
+ ) == ints2octs((24, 18, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 46, 57, 57, 90))
+
+ def testWithSubsecondsMax(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.999Z')
+ ) == ints2octs((24, 19, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 46, 57, 57, 57, 90))
+
+ def testWithSubsecondsMin(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.000Z')
+ ) == ints2octs((24, 15, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithSubsecondsDanglingDot(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.Z')
+ ) == ints2octs((24, 15, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithSeconds(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112Z')
+ ) == ints2octs((24, 15, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithMinutes(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('201708011201Z')
+ ) == ints2octs((24, 13, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 90))
+
+
+class UTCTimeEncoderTestCase(BaseTestCase):
+ def testFractionOfSecond(self):
+ try:
+ assert encoder.encode(
+ useful.UTCTime('150501120112.10Z')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Decimal point tolerated'
+
+ def testMissingTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.UTCTime('150501120112')
+ ) == ints2octs((23, 13, 49, 53, 48, 53, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Missing timezone tolerated'
+
+ def testLocalTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.UTCTime('150501120112+0200')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Local timezone tolerated'
+
+ def testWithSeconds(self):
+ assert encoder.encode(
+ useful.UTCTime('990801120112Z')
+ ) == ints2octs((23, 13, 57, 57, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithMinutes(self):
+ assert encoder.encode(
+ useful.UTCTime('9908011201Z')
+ ) == ints2octs((23, 11, 57, 57, 48, 56, 48, 49, 49, 50, 48, 49, 90))
+
+
+class SequenceOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SequenceOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+ def testDefMode1(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('ab'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testDefMode2(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('ab'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 2, 97, 98, 4, 1, 97, 0, 0))
+
+ def testDefMode3(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('b'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 1, 98, 4, 1, 97, 0, 0))
+
+ def testDefMode4(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('b'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SequenceOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+
+ def testEmpty(self):
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((48, 128, 0, 0))
+
+ def testIndefMode1(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('ab')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testIndefMode2(self):
+ self.s.clear()
+ self.s.append('ab')
+ self.s.append('a')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 2, 97, 98, 4, 1, 97, 0, 0))
+
+ def testIndefMode3(self):
+ self.s.clear()
+ self.s.append('b')
+ self.s.append('a')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 1, 98, 4, 1, 97, 0, 0))
+
+ def testIndefMode4(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('b')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SetOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SetOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((49, 128, 0, 0))
+
+ def testDefMode1(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('ab'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testDefMode2(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('ab'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testDefMode3(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('b'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+ def testDefMode4(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('b'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SetOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+
+ def testEmpty(self):
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((49, 128, 0, 0))
+
+ def testIndefMode1(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('ab')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testIndefMode2(self):
+ self.s.clear()
+ self.s.append('ab')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testIndefMode3(self):
+ self.s.clear()
+ self.s.append('b')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+ def testIndefMode4(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('b')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SetEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ assert encoder.encode(self.s) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+
+class SetEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33))
+ ))
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((49, 128, 5, 0, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 5, 0, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+
+class SetEncoderWithChoiceWithSchemaEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ c = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('actual', univ.Boolean(0))
+ ))
+ self.s = univ.Set(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('status', c)
+ ))
+
+ def testIndefMode(self):
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByName('status')
+ self.s.getComponentByName('status').setComponentByPosition(0, 1)
+ assert encoder.encode(self.s) == ints2octs((49, 128, 1, 1, 255, 5, 0, 0, 0))
+
+
+class SetEncoderWithTaggedChoiceEncoderTestCase(BaseTestCase):
+
+ def testWithUntaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ )
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 128, 1, 1, 255, 4, 1, 65, 0, 0))
+
+ def testWithTaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ ).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 65, 167, 128, 1, 1, 255, 0, 0, 0, 0))
+
+
+class SequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ assert encoder.encode(self.s) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33))
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((48, 128, 5, 0, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 50, 0, 0)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.OctetString('quick brown')
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 2, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110, 0, 0)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 163, 128, 163, 128, 49, 50, 0, 0, 0, 0, 0, 0)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 163, 128, 163, 128, 49, 50, 0, 0, 0, 0, 0, 0)
+ )
+
+
+class SequenceEncoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any()), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 128, 49, 50, 0, 0, 0, 0)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.OctetString('quick brown'))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 2, 49, 128, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110, 0, 0, 0, 0)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 128, 163, 128, 163, 128, 49, 50, 0, 0,
+ 0, 0, 0, 0, 0, 0)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 128, 163, 128, 163, 128, 49, 50, 0, 0,
+ 0, 0, 0, 0, 0, 0)
+ )
+
+
+class NestedOptionalSequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ inner = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ outerWithOptional = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', inner),
+ )
+ )
+
+ outerWithDefault = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('inner', inner),
+ )
+ )
+
+ self.s1 = outerWithOptional
+ self.s2 = outerWithDefault
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithDefault(self):
+ self.s1.clear()
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ return self.s1
+
+ def __initOptional(self):
+ self.s1.clear()
+ return self.s1
+
+ def __initDefaultWithDefaultAndOptional(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ self.s2[0][1] = 123
+ return self.s2
+
+ def __initDefaultWithDefault(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ return self.s2
+
+ def __initDefaultWithOptional(self):
+ self.s2.clear()
+ self.s2[0][1] = 123
+ return self.s2
+
+ def testOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+ def testDefaultWithDefaultAndOptional(self):
+ s = self.__initDefaultWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 2, 1, 123, 0, 0, 0, 0))
+
+ def testDefaultWithDefault(self):
+ s = self.__initDefaultWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testDefaultWithOptional(self):
+ s = self.__initDefaultWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 2, 1, 123, 0, 0, 0, 0))
+
+
+class NestedOptionalChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ layer3 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ layer2 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('inner', layer3),
+ namedtype.NamedType('first-name', univ.OctetString())
+ )
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithDefault(self):
+ self.s.clear()
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+
+class NestedOptionalSequenceOfEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ layer2 = univ.SequenceOf(
+ componentType=univ.OctetString()
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithValue(self):
+ self.s.clear()
+ self.s[0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testOptionalWithValue(self):
+ s = self.__initOptionalWithValue()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/der/__init__.py b/contrib/python/pyasn1/py2/tests/codec/der/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/der/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/codec/der/__main__.py b/contrib/python/pyasn1/py2/tests/codec/der/__main__.py
new file mode 100644
index 0000000000..23560098fd
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/der/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.der.test_encoder.suite',
+ 'tests.codec.der.test_decoder.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/der/test_decoder.py b/contrib/python/pyasn1/py2/tests/codec/der/test_decoder.py
new file mode 100644
index 0000000000..5f61408317
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/der/test_decoder.py
@@ -0,0 +1,368 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.codec.der import decoder
+from pyasn1.compat.octets import ints2octs, null
+from pyasn1.error import PyAsn1Error
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ ints2octs((3, 127, 6) + (170,) * 125 + (128,))
+ ) == (((1, 0) * 501), null)
+
+ def testIndefMode(self):
+ try:
+ decoder.decode(
+ ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'indefinite length encoding tolerated'
+
+ def testDefModeChunked(self):
+ try:
+ assert decoder.decode(
+ ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'chunked encoding tolerated'
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ '\004\017Quick brown fox'.encode()
+ ) == ('Quick brown fox'.encode(), ''.encode())
+
+ def testIndefMode(self):
+ try:
+ decoder.decode(
+ ints2octs((36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'indefinite length encoding tolerated'
+
+ def testChunkedMode(self):
+ try:
+ decoder.decode(
+ ints2octs((36, 23, 4, 2, 81, 117, 4, 2, 105, 99, 4, 2, 107, 32, 4, 2, 98, 114, 4, 2, 111, 119, 4, 1, 110))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'chunked encoding tolerated'
+
+
+class SequenceDecoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 3, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='060127')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithUnaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(componentType=univ.Any()),
+ openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs( (48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/der/test_encoder.py b/contrib/python/pyasn1/py2/tests/codec/der/test_encoder.py
new file mode 100644
index 0000000000..6500396115
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/der/test_encoder.py
@@ -0,0 +1,665 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.codec.der import encoder
+from pyasn1.compat.octets import ints2octs
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def testDefModeShort(self):
+ assert encoder.encode(
+ univ.OctetString('Quick brown fox')
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testDefModeLong(self):
+ assert encoder.encode(
+ univ.OctetString('Q' * 10000)
+ ) == ints2octs((4, 130, 39, 16) + (81,) * 10000)
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def testDefModeShort(self):
+ assert encoder.encode(
+ univ.BitString((1,))
+ ) == ints2octs((3, 2, 7, 128))
+
+ def testDefModeLong(self):
+ assert encoder.encode(
+ univ.BitString((1,) * 80000)
+ ) == ints2octs((3, 130, 39, 17, 0) + (255,) * 10000)
+
+
+class SetOfEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.SetOf(componentType=univ.OctetString())
+
+ def testDefMode1(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('ab')
+
+ assert encoder.encode(self.s) == ints2octs((49, 7, 4, 1, 97, 4, 2, 97, 98))
+
+ def testDefMode2(self):
+ self.s.clear()
+ self.s.append('ab')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 7, 4, 1, 97, 4, 2, 97, 98))
+
+ def testDefMode3(self):
+ self.s.clear()
+ self.s.append('b')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 6, 4, 1, 97, 4, 1, 98))
+
+ def testDefMode4(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('b')
+
+ assert encoder.encode(self.s) == ints2octs((49, 6, 4, 1, 97, 4, 1, 98))
+
+
+class SetWithAlternatingChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ c = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('amount', univ.Boolean()))
+ )
+
+ self.s = univ.Set(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('value', univ.Integer(5)),
+ namedtype.NamedType('status', c))
+ )
+
+ def testComponentsOrdering1(self):
+ self.s.setComponentByName('status')
+ self.s.getComponentByName('status').setComponentByPosition(0, 'A')
+ assert encoder.encode(self.s) == ints2octs((49, 6, 2, 1, 5, 4, 1, 65))
+
+ def testComponentsOrdering2(self):
+ self.s.setComponentByName('status')
+ self.s.getComponentByName('status').setComponentByPosition(1, True)
+ assert encoder.encode(self.s) == ints2octs((49, 6, 1, 1, 255, 2, 1, 5))
+
+
+class SetWithTaggedChoiceEncoderTestCase(BaseTestCase):
+
+ def testWithUntaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ )
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 6, 1, 1, 255, 4, 1, 65))
+
+ def testWithTaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ ).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 8, 4, 1, 65, 167, 3, 1, 1, 255))
+
+
+class SequenceEncoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 5, 2, 1, 1, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.OctetString('quick brown')
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 14, 2, 1, 2, 113, 117, 105, 99, 107, 32,
+ 98, 114, 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any()), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 7, 2, 1, 1, 49, 2, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.OctetString('quick brown'))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 16, 2, 1, 2, 49, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class NestedOptionalSequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ inner = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ outerWithOptional = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', inner),
+ )
+ )
+
+ outerWithDefault = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('inner', inner),
+ )
+ )
+
+ self.s1 = outerWithOptional
+ self.s2 = outerWithDefault
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithDefault(self):
+ self.s1.clear()
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ return self.s1
+
+ def __initOptional(self):
+ self.s1.clear()
+ return self.s1
+
+ def __initDefaultWithDefaultAndOptional(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ self.s2[0][1] = 123
+ return self.s2
+
+ def __initDefaultWithDefault(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ return self.s2
+
+ def __initDefaultWithOptional(self):
+ self.s2.clear()
+ self.s2[0][1] = 123
+ return self.s2
+
+ def testDefModeOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 11, 48, 9, 4, 4, 116, 101, 115, 116, 2, 1, 123))
+
+ def testDefModeOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 5, 48, 3, 2, 1, 123))
+
+ def testDefModeOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+ def testDefModeDefaultWithDefaultAndOptional(self):
+ s = self.__initDefaultWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 11, 48, 9, 4, 4, 116, 101, 115, 116, 2, 1, 123))
+
+ def testDefModeDefaultWithDefault(self):
+ s = self.__initDefaultWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeDefaultWithOptional(self):
+ s = self.__initDefaultWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 5, 48, 3, 2, 1, 123))
+
+
+class NestedOptionalChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ layer3 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ layer2 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('inner', layer3),
+ namedtype.NamedType('first-name', univ.OctetString())
+ )
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithDefault(self):
+ self.s.clear()
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testDefModeOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 11, 48, 9, 4, 4, 116, 101, 115, 116, 2, 1, 123))
+
+ def testDefModeOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 5, 48, 3, 2, 1, 123))
+
+ def testDefModeOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+
+class NestedOptionalSequenceOfEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ layer2 = univ.SequenceOf(
+ componentType=univ.OctetString()
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithValue(self):
+ self.s.clear()
+ self.s[0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testDefModeOptionalWithValue(self):
+ s = self.__initOptionalWithValue()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+
+class EmptyInnerFieldOfSequenceEncoderTestCase(BaseTestCase):
+
+ def testInitializedOptionalNullIsEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('null', univ.Null())
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 2, 5, 0))
+
+ def testUninitializedOptionalNullIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('null', univ.Null())
+ )
+ )
+
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+ def testInitializedDefaultNullIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('null', univ.Null(''))
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+ def testInitializedOptionalOctetStringIsEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('str', univ.OctetString())
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 2, 4, 0))
+
+ def testUninitializedOptionalOctetStringIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('str', univ.OctetString())
+ )
+ )
+
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+ def testInitializedDefaultOctetStringIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('str', univ.OctetString(''))
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+
+class ClassConstructorTestCase(BaseTestCase):
+ def testKeywords(self):
+ tagmap = {"tagmap": True}
+ typemap = {"typemap": True}
+
+ sie = encoder.Encoder()._singleItemEncoder
+ self.assertIs(sie._tagMap, encoder.TAG_MAP)
+ self.assertIs(sie._typeMap, encoder.TYPE_MAP)
+
+ sie = encoder.Encoder(
+ tagMap=tagmap, typeMap=typemap
+ )._singleItemEncoder
+ self.assertIs(sie._tagMap, tagmap)
+ self.assertIs(sie._typeMap, typemap)
+
+ sie = encoder.Encoder(tagmap, typemap)._singleItemEncoder
+ self.assertIs(sie._tagMap, tagmap)
+ self.assertIs(sie._typeMap, typemap)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/native/__init__.py b/contrib/python/pyasn1/py2/tests/codec/native/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/native/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/codec/native/__main__.py b/contrib/python/pyasn1/py2/tests/codec/native/__main__.py
new file mode 100644
index 0000000000..ab7faea877
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/native/__main__.py
@@ -0,0 +1,15 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.native.test_encoder.suite',
+ 'tests.codec.native.test_decoder.suite']
+)
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/native/test_decoder.py b/contrib/python/pyasn1/py2/tests/codec/native/test_decoder.py
new file mode 100644
index 0000000000..be7fd7ec0a
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/native/test_decoder.py
@@ -0,0 +1,120 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.codec.native import decoder
+from pyasn1.error import PyAsn1Error
+
+
+class BadAsn1SpecTestCase(BaseTestCase):
+ def testBadSpec(self):
+ try:
+ decoder.decode('', asn1Spec='not an Asn1Item')
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Invalid asn1Spec accepted'
+
+
+class IntegerDecoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert decoder.decode(12, asn1Spec=univ.Integer()) == univ.Integer(12)
+
+ def testNegInt(self):
+ assert decoder.decode(-12, asn1Spec=univ.Integer()) == univ.Integer(-12)
+
+
+class BooleanDecoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert decoder.decode(True, asn1Spec=univ.Boolean()) == univ.Boolean(True)
+
+ def testTrueNeg(self):
+ assert decoder.decode(False, asn1Spec=univ.Boolean()) == univ.Boolean(False)
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testSimple(self):
+ assert decoder.decode('11111111', asn1Spec=univ.BitString()) == univ.BitString(hexValue='ff')
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testSimple(self):
+ assert decoder.decode('Quick brown fox', asn1Spec=univ.OctetString()) == univ.OctetString('Quick brown fox')
+
+
+class NullDecoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert decoder.decode(None, asn1Spec=univ.Null()) == univ.Null('')
+
+
+class ObjectIdentifierDecoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert decoder.decode('1.3.6.11', asn1Spec=univ.ObjectIdentifier()) == univ.ObjectIdentifier('1.3.6.11')
+
+
+class RealDecoderTestCase(BaseTestCase):
+ def testSimple(self):
+ assert decoder.decode(1.33, asn1Spec=univ.Real()) == univ.Real(1.33)
+
+
+class SequenceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.NamedType('first-name', univ.OctetString()),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+
+ def testSimple(self):
+ s = self.s.clone()
+ s[0] = univ.Null('')
+ s[1] = univ.OctetString('xx')
+ s[2] = univ.Integer(33)
+ assert decoder.decode({'place-holder': None, 'first-name': 'xx', 'age': 33}, asn1Spec=self.s) == s
+
+
+class ChoiceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.NamedType('first-name', univ.OctetString()),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+
+ def testSimple(self):
+ s = self.s.clone()
+ s[1] = univ.OctetString('xx')
+ assert decoder.decode({'first-name': 'xx'}, asn1Spec=self.s) == s
+
+
+class AnyDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Any()
+
+ def testSimple(self):
+ assert decoder.decode('fox', asn1Spec=univ.Any()) == univ.Any('fox')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/native/test_encoder.py b/contrib/python/pyasn1/py2/tests/codec/native/test_encoder.py
new file mode 100644
index 0000000000..662c284b3c
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/native/test_encoder.py
@@ -0,0 +1,141 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.codec.native import encoder
+from pyasn1.compat.octets import str2octs
+from pyasn1.error import PyAsn1Error
+
+
+class BadAsn1SpecTestCase(BaseTestCase):
+ def testBadValueType(self):
+ try:
+ encoder.encode('not an Asn1Item')
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert 0, 'Invalid value type accepted'
+
+
+class IntegerEncoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert encoder.encode(univ.Integer(12)) == 12
+
+ def testNegInt(self):
+ assert encoder.encode(univ.Integer(-12)) == -12
+
+
+class BooleanEncoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(univ.Boolean(1)) is True
+
+ def testFalse(self):
+ assert encoder.encode(univ.Boolean(0)) is False
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.b = univ.BitString((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1))
+
+ def testValue(self):
+ assert encoder.encode(self.b) == '101010011000101'
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString('Quick brown fox')
+
+ def testValue(self):
+ assert encoder.encode(self.o) == str2octs('Quick brown fox')
+
+
+class NullEncoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert encoder.encode(univ.Null('')) is None
+
+
+class ObjectIdentifierEncoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert encoder.encode(univ.ObjectIdentifier((1, 3, 6, 0, 12345))) == '1.3.6.0.12345'
+
+
+class RealEncoderTestCase(BaseTestCase):
+ def testChar(self):
+ assert encoder.encode(univ.Real((123, 10, 11))) == 1.23e+13
+
+ def testPlusInf(self):
+ assert encoder.encode(univ.Real('inf')) == float('inf')
+
+ def testMinusInf(self):
+ assert encoder.encode(univ.Real('-inf')) == float('-inf')
+
+
+class SequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.OptionalNamedType('first-name', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ ))
+
+ def testSimple(self):
+ s = self.s.clone()
+ s[0] = univ.Null('')
+ s[1] = 'abc'
+ s[2] = 123
+ assert encoder.encode(s) == {'place-holder': None, 'first-name': str2octs('abc'), 'age': 123}
+
+
+class ChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+
+ def testEmpty(self):
+ try:
+ encoder.encode(self.s)
+ except PyAsn1Error:
+ pass
+ else:
+ assert False, 'encoded unset choice'
+
+ def testFilled(self):
+ self.s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(self.s) == {'place-holder': None}
+
+
+class AnyEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any(encoder.encode(univ.OctetString('fox')))
+
+ def testSimple(self):
+ assert encoder.encode(self.s) == str2octs('fox')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/codec/test_streaming.py b/contrib/python/pyasn1/py2/tests/codec/test_streaming.py
new file mode 100644
index 0000000000..7dc87257f2
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/codec/test_streaming.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import io
+import sys
+
+try:
+ import unittest2 as unittest
+
+except ImportError:
+ import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.codec import streaming
+
+
+class CachingStreamWrapperTestCase(BaseTestCase):
+ def setUp(self):
+ self.shortText = b"abcdefghij"
+ self.longText = self.shortText * (io.DEFAULT_BUFFER_SIZE * 5)
+ self.shortStream = io.BytesIO(self.shortText)
+ self.longStream = io.BytesIO(self.longText)
+
+ def testReadJustFromCache(self):
+ wrapper = streaming.CachingStreamWrapper(self.shortStream)
+ wrapper.read(6)
+ wrapper.seek(3)
+ assert wrapper.read(1) == b"d"
+ assert wrapper.read(1) == b"e"
+ assert wrapper.tell() == 5
+
+ def testReadFromCacheAndStream(self):
+ wrapper = streaming.CachingStreamWrapper(self.shortStream)
+ wrapper.read(6)
+ wrapper.seek(3)
+ assert wrapper.read(4) == b"defg"
+ assert wrapper.tell() == 7
+
+ def testReadJustFromStream(self):
+ wrapper = streaming.CachingStreamWrapper(self.shortStream)
+ assert wrapper.read(6) == b"abcdef"
+ assert wrapper.tell() == 6
+
+ def testPeek(self):
+ wrapper = streaming.CachingStreamWrapper(self.longStream)
+ read_bytes = wrapper.peek(io.DEFAULT_BUFFER_SIZE + 73)
+ assert len(read_bytes) == io.DEFAULT_BUFFER_SIZE + 73
+ assert read_bytes.startswith(b"abcdefg")
+ assert wrapper.tell() == 0
+ assert wrapper.read(4) == b"abcd"
+
+ def testMarkedPositionResets(self):
+ wrapper = streaming.CachingStreamWrapper(self.longStream)
+ wrapper.read(10)
+ wrapper.markedPosition = wrapper.tell()
+ assert wrapper.markedPosition == 10
+
+ # Reach the maximum capacity of cache
+ wrapper.read(io.DEFAULT_BUFFER_SIZE)
+ assert wrapper.tell() == 10 + io.DEFAULT_BUFFER_SIZE
+
+ # The following should clear the cache
+ wrapper.markedPosition = wrapper.tell()
+ assert wrapper.markedPosition == 0
+ assert len(wrapper._cache.getvalue()) == 0
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/compat/__init__.py b/contrib/python/pyasn1/py2/tests/compat/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/compat/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/compat/__main__.py b/contrib/python/pyasn1/py2/tests/compat/__main__.py
new file mode 100644
index 0000000000..94436847ba
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/compat/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.compat.test_integer.suite',
+ 'tests.compat.test_octets.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/compat/test_integer.py b/contrib/python/pyasn1/py2/tests/compat/test_integer.py
new file mode 100644
index 0000000000..4026b75402
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/compat/test_integer.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.compat import integer
+
+
+class IntegerTestCase(BaseTestCase):
+
+ if sys.version_info[0] > 2:
+
+ def test_from_bytes_zero(self):
+ assert 0 == integer.from_bytes(bytes([0]), signed=False)
+
+ def test_from_bytes_unsigned(self):
+ assert -66051 == integer.from_bytes(bytes([254, 253, 253]), signed=True)
+
+ def test_from_bytes_signed(self):
+ assert 66051 == integer.from_bytes(bytes([0, 1, 2, 3]), signed=False)
+
+ def test_from_bytes_empty(self):
+ assert 0 == integer.from_bytes(bytes([]))
+
+ else:
+
+ def test_from_bytes_zero(self):
+ assert 0 == integer.from_bytes('\x00', signed=False)
+
+ def test_from_bytes_unsigned(self):
+ assert -66051 == integer.from_bytes('\xfe\xfd\xfd', signed=True)
+
+ def test_from_bytes_signed(self):
+ assert 66051 == integer.from_bytes('\x01\x02\x03', signed=False)
+
+ def test_from_bytes_empty(self):
+ assert 0 == integer.from_bytes('')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/compat/test_octets.py b/contrib/python/pyasn1/py2/tests/compat/test_octets.py
new file mode 100644
index 0000000000..4133950704
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/compat/test_octets.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.compat import octets
+
+
+class OctetsTestCase(BaseTestCase):
+
+ if sys.version_info[0] > 2:
+
+ def test_ints2octs(self):
+ assert [1, 2, 3] == list(octets.ints2octs([1, 2, 3]))
+
+ def test_ints2octs_empty(self):
+ assert not octets.ints2octs([])
+
+ def test_int2oct(self):
+ assert [12] == list(octets.int2oct(12))
+
+ def test_octs2ints(self):
+ assert [1, 2, 3] == list(octets.octs2ints(bytes([1, 2, 3])))
+
+ def test_octs2ints_empty(self):
+ assert not octets.octs2ints(bytes([]))
+
+ def test_oct2int(self):
+ assert 12 == octets.oct2int(bytes([12]))[0]
+
+ def test_str2octs(self):
+ assert bytes([1, 2, 3]) == octets.str2octs('\x01\x02\x03')
+
+ def test_str2octs_empty(self):
+ assert not octets.str2octs('')
+
+ def test_octs2str(self):
+ assert '\x01\x02\x03' == octets.octs2str(bytes([1, 2, 3]))
+
+ def test_octs2str_empty(self):
+ assert not octets.octs2str(bytes([]))
+
+ def test_isOctetsType(self):
+ assert octets.isOctetsType('abc') == False
+ assert octets.isOctetsType(123) == False
+ assert octets.isOctetsType(bytes()) == True
+
+ def test_isStringType(self):
+ assert octets.isStringType('abc') == True
+ assert octets.isStringType(123) == False
+ assert octets.isStringType(bytes()) == False
+
+ def test_ensureString(self):
+ assert 'abc'.encode() == octets.ensureString('abc'.encode())
+ assert bytes([1, 2, 3]) == octets.ensureString([1, 2, 3])
+
+ else:
+
+ def test_ints2octs(self):
+ assert '\x01\x02\x03' == octets.ints2octs([1, 2, 3])
+
+ def test_ints2octs_empty(self):
+ assert not octets.ints2octs([])
+
+ def test_int2oct(self):
+ assert '\x0c' == octets.int2oct(12)
+
+ def test_octs2ints(self):
+ assert [1, 2, 3] == octets.octs2ints('\x01\x02\x03')
+
+ def test_octs2ints_empty(self):
+ assert not octets.octs2ints('')
+
+ def test_oct2int(self):
+ assert 12 == octets.oct2int('\x0c')
+
+ def test_str2octs(self):
+ assert '\x01\x02\x03' == octets.str2octs('\x01\x02\x03')
+
+ def test_str2octs_empty(self):
+ assert not octets.str2octs('')
+
+ def test_octs2str(self):
+ assert '\x01\x02\x03' == octets.octs2str('\x01\x02\x03')
+
+ def test_octs2str_empty(self):
+ assert not octets.octs2str('')
+
+ def test_isOctetsType(self):
+ assert octets.isOctetsType('abc') == True
+ assert octets.isOctetsType(123) == False
+ assert octets.isOctetsType(unicode('abc')) == False
+
+ def test_isStringType(self):
+ assert octets.isStringType('abc') == True
+ assert octets.isStringType(123) == False
+ assert octets.isStringType(unicode('abc')) == True
+
+ def test_ensureString(self):
+ assert 'abc' == octets.ensureString('abc')
+ assert '123' == octets.ensureString(123)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/test_debug.py b/contrib/python/pyasn1/py2/tests/test_debug.py
new file mode 100644
index 0000000000..84ba4f44c4
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/test_debug.py
@@ -0,0 +1,37 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1 import debug
+from pyasn1 import error
+
+class DebugCaseBase(BaseTestCase):
+ def testKnownFlags(self):
+ debug.setLogger(0)
+ debug.setLogger(debug.Debug('all', 'encoder', 'decoder'))
+ debug.setLogger(0)
+
+ def testUnknownFlags(self):
+ try:
+ debug.setLogger(debug.Debug('all', 'unknown', loggerName='xxx'))
+
+ except error.PyAsn1Error:
+ debug.setLogger(0)
+ return
+
+ else:
+ debug.setLogger(0)
+ assert 0, 'unknown debug flag tolerated'
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/__init__.py b/contrib/python/pyasn1/py2/tests/type/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py2/tests/type/__main__.py b/contrib/python/pyasn1/py2/tests/type/__main__.py
new file mode 100644
index 0000000000..67ff23e3c0
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/__main__.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.type.test_constraint.suite',
+ 'tests.type.test_opentype.suite',
+ 'tests.type.test_namedtype.suite',
+ 'tests.type.test_namedval.suite',
+ 'tests.type.test_tag.suite',
+ 'tests.type.test_univ.suite',
+ 'tests.type.test_char.suite',
+ 'tests.type.test_useful.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_char.py b/contrib/python/pyasn1/py2/tests/type/test_char.py
new file mode 100644
index 0000000000..efa179eb0e
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_char.py
@@ -0,0 +1,169 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import pickle
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import char
+from pyasn1.type import univ
+from pyasn1.type import constraint
+from pyasn1.compat.octets import ints2octs
+from pyasn1.error import PyAsn1Error
+
+
+class AbstractStringTestCase(object):
+
+ initializer = ()
+ encoding = 'us-ascii'
+ asn1Type = None
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.asn1String = self.asn1Type(ints2octs(self.initializer), encoding=self.encoding)
+ self.pythonString = ints2octs(self.initializer).decode(self.encoding)
+
+ def testUnicode(self):
+ assert self.asn1String == self.pythonString, 'unicode init fails'
+
+ def testLength(self):
+ assert len(self.asn1String) == len(self.pythonString), 'unicode len() fails'
+
+ def testSizeConstraint(self):
+ asn1Spec = self.asn1Type(subtypeSpec=constraint.ValueSizeConstraint(1, 1))
+
+ try:
+ asn1Spec.clone(self.pythonString)
+ except PyAsn1Error:
+ pass
+ else:
+ assert False, 'Size constraint tolerated'
+
+ try:
+ asn1Spec.clone(self.pythonString[0])
+ except PyAsn1Error:
+ assert False, 'Size constraint failed'
+
+ def testSerialised(self):
+ if sys.version_info[0] < 3:
+ assert str(self.asn1String) == self.pythonString.encode(self.encoding), '__str__() fails'
+ else:
+ assert bytes(self.asn1String) == self.pythonString.encode(self.encoding), '__str__() fails'
+
+ def testPrintable(self):
+ if sys.version_info[0] < 3:
+ assert unicode(self.asn1String) == self.pythonString, '__str__() fails'
+ else:
+ assert str(self.asn1String) == self.pythonString, '__str__() fails'
+
+ def testInit(self):
+ assert self.asn1Type(self.pythonString) == self.pythonString
+ assert self.asn1Type(self.pythonString.encode(self.encoding)) == self.pythonString
+ assert self.asn1Type(univ.OctetString(self.pythonString.encode(self.encoding))) == self.pythonString
+ assert self.asn1Type(self.asn1Type(self.pythonString)) == self.pythonString
+ assert self.asn1Type(self.initializer, encoding=self.encoding) == self.pythonString
+
+ def testInitFromAsn1(self):
+ assert self.asn1Type(self.asn1Type(self.pythonString)) == self.pythonString
+ assert self.asn1Type(univ.OctetString(self.pythonString.encode(self.encoding), encoding=self.encoding)) == self.pythonString
+
+ def testAsOctets(self):
+ assert self.asn1String.asOctets() == self.pythonString.encode(self.encoding), 'testAsOctets() fails'
+
+ def testAsNumbers(self):
+ assert self.asn1String.asNumbers() == self.initializer, 'testAsNumbers() fails'
+
+ def testSeq(self):
+ assert self.asn1String[0] == self.pythonString[0], '__getitem__() fails'
+
+ def testEmpty(self):
+ try:
+ str(self.asn1Type())
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Value operation on ASN1 type tolerated'
+
+ def testAdd(self):
+ assert self.asn1String + self.pythonString.encode(self.encoding) == self.pythonString + self.pythonString, '__add__() fails'
+
+ def testRadd(self):
+ assert self.pythonString.encode(self.encoding) + self.asn1String == self.pythonString + self.pythonString, '__radd__() fails'
+
+ def testMul(self):
+ assert self.asn1String * 2 == self.pythonString * 2, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * self.asn1String == 2 * self.pythonString, '__rmul__() fails'
+
+ def testContains(self):
+ assert self.pythonString in self.asn1String
+ assert self.pythonString + self.pythonString not in self.asn1String
+
+ def testReverse(self):
+ assert list(reversed(self.asn1String)) == list(reversed(self.pythonString))
+
+ def testSchemaPickling(self):
+ old_asn1 = self.asn1Type()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == self.asn1Type
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = self.asn1String
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == self.asn1String
+
+
+class VisibleStringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (97, 102)
+ encoding = 'us-ascii'
+ asn1Type = char.VisibleString
+
+
+class GeneralStringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (169, 174)
+ encoding = 'iso-8859-1'
+ asn1Type = char.GeneralString
+
+
+class UTF8StringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (209, 132, 208, 176)
+ encoding = 'utf-8'
+ asn1Type = char.UTF8String
+
+
+class BMPStringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (4, 48, 4, 68)
+ encoding = 'utf-16-be'
+ asn1Type = char.BMPString
+
+
+if sys.version_info[0] > 2:
+
+ # Somehow comparison of UTF-32 encoded strings does not work in Py2
+
+ class UniversalStringTestCase(AbstractStringTestCase, BaseTestCase):
+ initializer = (0, 0, 4, 48, 0, 0, 4, 68)
+ encoding = 'utf-32-be'
+ asn1Type = char.UniversalString
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_constraint.py b/contrib/python/pyasn1/py2/tests/type/test_constraint.py
new file mode 100644
index 0000000000..1ae95ef61a
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_constraint.py
@@ -0,0 +1,420 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import constraint
+from pyasn1.type import error
+
+
+class SingleValueConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.v1 = 1, 2
+ self.v2 = 3, 4
+ self.c1 = constraint.SingleValueConstraint(*self.v1)
+ self.c2 = constraint.SingleValueConstraint(*self.v2)
+
+ def testCmp(self):
+ assert self.c1 == self.c1, 'comparison fails'
+
+ def testHash(self):
+ assert hash(self.c1) != hash(self.c2), 'hash() fails'
+
+ def testGoodVal(self):
+ try:
+ self.c1(1)
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(4)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+ def testContains(self):
+ for v in self.v1:
+ assert v in self.c1
+ assert v not in self.c2
+
+ for v in self.v2:
+ assert v in self.c2
+ assert v not in self.c1
+
+ def testIter(self):
+ assert set(self.v1) == set(self.c1)
+ assert set(self.v2) == set(self.c2)
+
+ def testSub(self):
+ subconst = self.c1 - constraint.SingleValueConstraint(self.v1[0])
+ assert list(subconst) == [self.v1[1]]
+
+ def testAdd(self):
+ superconst = self.c1 + self.c2
+ assert set(superconst) == set(self.v1 + self.v2)
+
+
+class ContainedSubtypeConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ContainedSubtypeConstraint(
+ constraint.SingleValueConstraint(12)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(12)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(4)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ValueRangeConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ValueRangeConstraint(1, 4)
+
+ def testGoodVal(self):
+ try:
+ self.c1(1)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(-5)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ValueSizeConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ValueSizeConstraint(1, 2)
+
+ def testGoodVal(self):
+ try:
+ self.c1('a')
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1('abc')
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class PermittedAlphabetConstraintTestCase(SingleValueConstraintTestCase):
+ def setUp(self):
+ self.v1 = 'A', 'B'
+ self.v2 = 'C', 'D'
+ self.c1 = constraint.PermittedAlphabetConstraint(*self.v1)
+ self.c2 = constraint.PermittedAlphabetConstraint(*self.v2)
+
+ def testGoodVal(self):
+ try:
+ self.c1('A')
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1('E')
+
+ except error.ValueConstraintError:
+ pass
+
+ else:
+ assert 0, 'constraint check fails'
+
+
+class WithComponentsConstraintTestCase(BaseTestCase):
+
+ def testGoodVal(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint()),
+ ('B', constraint.ComponentAbsentConstraint()))
+
+ try:
+ c({'A': 1})
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testGoodValWithExtraFields(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint()),
+ ('B', constraint.ComponentAbsentConstraint())
+ )
+
+ try:
+ c({'A': 1, 'C': 2})
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testEmptyConstraint(self):
+ c = constraint.WithComponentsConstraint()
+
+ try:
+ c({'A': 1})
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint())
+ )
+
+ try:
+ c({'B': 2})
+
+ except error.ValueConstraintError:
+ pass
+
+ else:
+ assert 0, 'constraint check fails'
+
+ def testBadValExtraFields(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint())
+ )
+
+ try:
+ c({'B': 2, 'C': 3})
+
+ except error.ValueConstraintError:
+ pass
+
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ConstraintsIntersectionTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsIntersection(
+ constraint.SingleValueConstraint(4),
+ constraint.ValueRangeConstraint(2, 4)
+ )
+
+ def testCmp1(self):
+ assert constraint.SingleValueConstraint(4) in self.c1, '__cmp__() fails'
+
+ def testCmp2(self):
+ assert constraint.SingleValueConstraint(5) not in self.c1, \
+ '__cmp__() fails'
+
+ def testCmp3(self):
+ c = constraint.ConstraintsUnion(constraint.ConstraintsIntersection(
+ constraint.SingleValueConstraint(4),
+ constraint.ValueRangeConstraint(2, 4))
+ )
+ assert self.c1 in c, '__cmp__() fails'
+
+ def testCmp4(self):
+ c = constraint.ConstraintsUnion(
+ constraint.ConstraintsIntersection(constraint.SingleValueConstraint(5))
+ )
+ assert self.c1 not in c, '__cmp__() fails'
+
+ def testGoodVal(self):
+ try:
+ self.c1(4)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(-5)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class InnerTypeConstraintTestCase(BaseTestCase):
+ def testConst1(self):
+ c = constraint.InnerTypeConstraint(
+ constraint.SingleValueConstraint(4)
+ )
+ try:
+ c(4, 32)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+ try:
+ c(5, 32)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+ def testConst2(self):
+ c = constraint.InnerTypeConstraint(
+ (0, constraint.SingleValueConstraint(4), 'PRESENT'),
+ (1, constraint.SingleValueConstraint(4), 'ABSENT')
+ )
+ try:
+ c(4, 0)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+ try:
+ c(4, 1)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+ try:
+ c(3, 0)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+ # Constraints compositions
+
+
+class ConstraintsIntersectionRangeTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsIntersection(
+ constraint.ValueRangeConstraint(1, 9),
+ constraint.ValueRangeConstraint(2, 5)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(3)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(0)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ConstraintsUnionTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsUnion(
+ constraint.SingleValueConstraint(5),
+ constraint.ValueRangeConstraint(1, 3)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(2)
+ self.c1(5)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(-5)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ConstraintsExclusionTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsExclusion(
+ constraint.ValueRangeConstraint(2, 4)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(6)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(2)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+# Constraints derivations
+
+class DirectDerivationTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.c1 = constraint.SingleValueConstraint(5)
+
+ self.c2 = constraint.ConstraintsUnion(
+ self.c1, constraint.ValueRangeConstraint(1, 3)
+ )
+
+ def testGoodVal(self):
+ assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
+ assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
+
+ def testBadVal(self):
+ assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
+ assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
+
+
+class IndirectDerivationTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.c1 = constraint.ConstraintsIntersection(
+ constraint.ValueRangeConstraint(1, 30)
+ )
+
+ self.c2 = constraint.ConstraintsIntersection(
+ self.c1, constraint.ValueRangeConstraint(1, 20)
+ )
+
+ self.c2 = constraint.ConstraintsIntersection(
+ self.c2, constraint.ValueRangeConstraint(1, 10)
+ )
+
+ def testGoodVal(self):
+ assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
+ assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
+
+ def testBadVal(self):
+ assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
+ assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
+
+# TODO: how to apply size constraints to constructed types?
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_namedtype.py b/contrib/python/pyasn1/py2/tests/type/test_namedtype.py
new file mode 100644
index 0000000000..4585984e6a
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_namedtype.py
@@ -0,0 +1,135 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.error import PyAsn1Error
+
+
+class NamedTypeCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.e = namedtype.NamedType('age', univ.Integer(0))
+
+ def testIter(self):
+ n, t = self.e
+ assert n == 'age' or t == univ.Integer(), 'unpack fails'
+
+ def testRepr(self):
+ assert 'age' in repr(self.e)
+
+
+class NamedTypesCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.e = namedtype.NamedTypes(
+ namedtype.NamedType('first-name', univ.OctetString('')),
+ namedtype.OptionalNamedType('age', univ.Integer(0)),
+ namedtype.NamedType('family-name', univ.OctetString(''))
+ )
+
+ def testRepr(self):
+ assert 'first-name' in repr(self.e)
+
+ def testContains(self):
+ assert 'first-name' in self.e
+ assert '<missing>' not in self.e
+
+ # noinspection PyUnusedLocal
+ def testGetItem(self):
+ assert self.e[0] == namedtype.NamedType('first-name', univ.OctetString(''))
+
+ def testIter(self):
+ assert list(self.e) == ['first-name', 'age', 'family-name']
+
+ def testGetTypeByPosition(self):
+ assert self.e.getTypeByPosition(0) == univ.OctetString(''), \
+ 'getTypeByPosition() fails'
+
+ def testGetNameByPosition(self):
+ assert self.e.getNameByPosition(0) == 'first-name', \
+ 'getNameByPosition() fails'
+
+ def testGetPositionByName(self):
+ assert self.e.getPositionByName('first-name') == 0, \
+ 'getPositionByName() fails'
+
+ def testGetTypesNearPosition(self):
+ assert self.e.getTagMapNearPosition(0).presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+ assert self.e.getTagMapNearPosition(1).presentTypes == {
+ univ.Integer.tagSet: univ.Integer(0),
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+ assert self.e.getTagMapNearPosition(2).presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+
+ def testGetTagMap(self):
+ assert self.e.tagMap.presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString(''),
+ univ.Integer.tagSet: univ.Integer(0)
+ }
+
+ def testStrTagMap(self):
+ assert 'TagMap' in str(self.e.tagMap)
+ assert 'OctetString' in str(self.e.tagMap)
+ assert 'Integer' in str(self.e.tagMap)
+
+ def testReprTagMap(self):
+ assert 'TagMap' in repr(self.e.tagMap)
+ assert 'OctetString' in repr(self.e.tagMap)
+ assert 'Integer' in repr(self.e.tagMap)
+
+ def testGetTagMapWithDups(self):
+ try:
+ self.e.tagMapUnique[0]
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Duped types not noticed'
+
+ def testGetPositionNearType(self):
+ assert self.e.getPositionNearType(univ.OctetString.tagSet, 0) == 0
+ assert self.e.getPositionNearType(univ.Integer.tagSet, 1) == 1
+ assert self.e.getPositionNearType(univ.OctetString.tagSet, 2) == 2
+
+
+class OrderedNamedTypesCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.e = namedtype.NamedTypes(
+ namedtype.NamedType('first-name', univ.OctetString('')),
+ namedtype.NamedType('age', univ.Integer(0))
+ )
+
+ def testGetTypeByPosition(self):
+ assert self.e.getTypeByPosition(0) == univ.OctetString(''), \
+ 'getTypeByPosition() fails'
+
+
+class DuplicateNamedTypesCaseBase(BaseTestCase):
+ def testDuplicateDefaultTags(self):
+ nt = namedtype.NamedTypes(
+ namedtype.NamedType('first-name', univ.Any()),
+ namedtype.NamedType('age', univ.Any())
+ )
+
+ assert isinstance(nt.tagMap, namedtype.NamedTypes.PostponedError)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_namedval.py b/contrib/python/pyasn1/py2/tests/type/test_namedval.py
new file mode 100644
index 0000000000..fda2da2a95
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_namedval.py
@@ -0,0 +1,53 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedval
+
+
+class NamedValuesCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.e = namedval.NamedValues(('off', 0), ('on', 1))
+
+ def testDict(self):
+ assert set(self.e.items()) == {('off', 0), ('on', 1)}
+ assert set(self.e.keys()) == {'off', 'on'}
+ assert set(self.e) == {'off', 'on'}
+ assert set(self.e.values()) == {0, 1}
+ assert 'on' in self.e and 'off' in self.e and 'xxx' not in self.e
+ assert 0 in self.e and 1 in self.e and 2 not in self.e
+
+ def testInit(self):
+ assert namedval.NamedValues(off=0, on=1) == {'off': 0, 'on': 1}
+ assert namedval.NamedValues('off', 'on') == {'off': 0, 'on': 1}
+ assert namedval.NamedValues(('c', 0)) == {'c': 0}
+ assert namedval.NamedValues('a', 'b', ('c', 0), d=1) == {'c': 0, 'd': 1, 'a': 2, 'b': 3}
+
+ def testLen(self):
+ assert len(self.e) == 2
+ assert len(namedval.NamedValues()) == 0
+
+ def testAdd(self):
+ assert namedval.NamedValues(off=0) + namedval.NamedValues(on=1) == {'off': 0, 'on': 1}
+
+ def testClone(self):
+ assert namedval.NamedValues(off=0).clone(('on', 1)) == {'off': 0, 'on': 1}
+ assert namedval.NamedValues(off=0).clone(on=1) == {'off': 0, 'on': 1}
+
+ def testStrRepr(self):
+ assert str(self.e)
+ assert repr(self.e)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_opentype.py b/contrib/python/pyasn1/py2/tests/type/test_opentype.py
new file mode 100644
index 0000000000..5ae9715f40
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_opentype.py
@@ -0,0 +1,101 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import univ
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.compat.octets import str2octs
+from pyasn1.error import PyAsn1Error
+
+
+class UntaggedAnyTestCase(BaseTestCase):
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any())
+ )
+
+ self.s = Sequence()
+
+ def testTypeCheckOnAssignment(self):
+
+ self.s.clear()
+
+ self.s['blob'] = univ.Any(str2octs('xxx'))
+
+ # this should succeed because Any is untagged and unconstrained
+ self.s['blob'] = univ.Integer(123)
+
+
+class TaggedAnyTestCase(BaseTestCase):
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.taggedAny = univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 20))
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', self.taggedAny)
+ )
+
+ self.s = Sequence()
+
+ def testTypeCheckOnAssignment(self):
+
+ self.s.clear()
+
+ self.s['blob'] = self.taggedAny.clone('xxx')
+
+ try:
+ self.s.setComponentByName('blob', univ.Integer(123))
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'non-open type assignment tolerated'
+
+
+class TaggedAnyOpenTypeTestCase(BaseTestCase):
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.taggedAny = univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 20))
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', self.taggedAny, openType=opentype.OpenType(name='id'))
+ )
+
+ self.s = Sequence()
+
+ def testTypeCheckOnAssignment(self):
+
+ self.s.clear()
+
+ self.s['blob'] = univ.Any(str2octs('xxx'))
+ self.s['blob'] = univ.Integer(123)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_tag.py b/contrib/python/pyasn1/py2/tests/type/test_tag.py
new file mode 100644
index 0000000000..5d27b72b8b
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_tag.py
@@ -0,0 +1,133 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+
+
+class TagTestCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.t1 = tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 3)
+ self.t2 = tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 3)
+
+
+class TagReprTestCase(TagTestCaseBase):
+ def testRepr(self):
+ assert 'Tag' in repr(self.t1)
+
+
+class TagCmpTestCase(TagTestCaseBase):
+ def testCmp(self):
+ assert self.t1 == self.t2, 'tag comparison fails'
+
+ def testHash(self):
+ assert hash(self.t1) == hash(self.t2), 'tag hash comparison fails'
+
+ def testSequence(self):
+ assert self.t1[0] == self.t2[0] and \
+ self.t1[1] == self.t2[1] and \
+ self.t1[2] == self.t2[2], 'tag sequence protocol fails'
+
+
+class TagSetTestCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.ts1 = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )
+
+ self.ts2 = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )
+
+
+class TagSetReprTestCase(TagSetTestCaseBase):
+ def testRepr(self):
+ assert 'TagSet' in repr(self.ts1)
+
+
+class TagSetCmpTestCase(TagSetTestCaseBase):
+ def testCmp(self):
+ assert self.ts1 == self.ts2, 'tag set comparison fails'
+
+ def testHash(self):
+ assert hash(self.ts1) == hash(self.ts2), 'tag set hash comp. fails'
+
+ def testLen(self):
+ assert len(self.ts1) == len(self.ts2), 'tag length comparison fails'
+
+
+class TaggingTestSuite(TagSetTestCaseBase):
+ def testImplicitTag(self):
+ t = self.ts1.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 14)
+ )
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 14)
+ ), 'implicit tagging went wrong'
+
+ def testExplicitTag(self):
+ t = self.ts1.tagExplicitly(
+ tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 32)
+ )
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassPrivate, tag.tagFormatConstructed, 32)
+ ), 'explicit tagging went wrong'
+
+
+class TagSetAddTestSuite(TagSetTestCaseBase):
+ def testAdd(self):
+ t = self.ts1 + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ ), 'TagSet.__add__() fails'
+
+ def testRadd(self):
+ t = tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2) + self.ts1
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ ), 'TagSet.__radd__() fails'
+
+
+class SuperTagSetTestCase(TagSetTestCaseBase):
+ def testSuperTagCheck1(self):
+ assert self.ts1.isSuperTagSetOf(
+ tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )), 'isSuperTagSetOf() fails'
+
+ def testSuperTagCheck2(self):
+ assert not self.ts1.isSuperTagSetOf(
+ tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 13)
+ )), 'isSuperTagSetOf() fails'
+
+ def testSuperTagCheck3(self):
+ assert self.ts1.isSuperTagSetOf(
+ tag.TagSet((), tag.Tag(tag.tagClassUniversal,
+ tag.tagFormatSimple, 12))
+ ), 'isSuperTagSetOf() fails'
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_univ.py b/contrib/python/pyasn1/py2/tests/type/test_univ.py
new file mode 100644
index 0000000000..001f978d48
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_univ.py
@@ -0,0 +1,2184 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import math
+import pickle
+import platform
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import univ
+from pyasn1.type import tag
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import error
+from pyasn1.compat.octets import str2octs, ints2octs, octs2ints, octs2str
+from pyasn1.error import PyAsn1Error
+from pyasn1.error import PyAsn1UnicodeEncodeError, PyAsn1UnicodeDecodeError
+
+
+class NoValueTestCase(BaseTestCase):
+ def testSingleton(self):
+ assert univ.NoValue() is univ.NoValue(), 'NoValue is not a singleton'
+
+ def testRepr(self):
+ try:
+ repr(univ.noValue)
+
+ except PyAsn1Error:
+ assert False, 'repr() on NoValue object fails'
+
+ def testIsInstance(self):
+ try:
+ assert isinstance(univ.noValue, univ.NoValue), 'isinstance() on NoValue() object fails'
+
+ except PyAsn1Error:
+ assert False, 'isinstance() on NoValue object fails'
+
+ def testStr(self):
+ try:
+ str(univ.noValue)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'str() works for NoValue object'
+
+ def testLen(self):
+ try:
+ len(univ.noValue)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'len() works for NoValue object'
+
+ def testCmp(self):
+ try:
+ univ.noValue == 1
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'comparison works for NoValue object'
+
+ def testSubs(self):
+ try:
+ univ.noValue[0]
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, '__getitem__() works for NoValue object'
+
+ def testKey(self):
+ try:
+ univ.noValue['key']
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, '__getitem__() works for NoValue object'
+
+ def testKeyAssignment(self):
+ try:
+ univ.noValue['key'] = 123
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, '__setitem__() works for NoValue object'
+
+ def testInt(self):
+ try:
+ int(univ.noValue)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'integer conversion works for NoValue object'
+
+ def testAdd(self):
+ try:
+ univ.noValue + univ.noValue
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'addition works for NoValue object'
+
+ def testBitShift(self):
+ try:
+ univ.noValue << 1
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'bitshift works for NoValue object'
+
+ def testBooleanEvaluation(self):
+ try:
+ if univ.noValue:
+ pass
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'boolean evaluation works for NoValue object'
+
+ @unittest.skipIf(
+ platform.python_implementation() == "PyPy",
+ "getsizeof() raises TypeError on PyPy"
+ )
+ def testSizeOf(self):
+ try:
+ sys.getsizeof(univ.noValue)
+
+ except PyAsn1Error:
+ assert False, 'sizeof failed for NoValue object'
+
+
+class IntegerTestCase(BaseTestCase):
+ def testStr(self):
+ assert str(univ.Integer(1)) in ('1', '1L'), 'str() fails'
+
+ def testRepr(self):
+ assert '123' in repr(univ.Integer(123))
+
+ def testAnd(self):
+ assert univ.Integer(1) & 0 == 0, '__and__() fails'
+
+ def testOr(self):
+ assert univ.Integer(1) | 0 == 1, '__or__() fails'
+
+ def testXor(self):
+ assert univ.Integer(1) ^ 0 == 1, '__xor__() fails'
+
+ def testRand(self):
+ assert 0 & univ.Integer(1) == 0, '__rand__() fails'
+
+ def testRor(self):
+ assert 0 | univ.Integer(1) == 1, '__ror__() fails'
+
+ def testRxor(self):
+ assert 0 ^ univ.Integer(1) == 1, '__rxor__() fails'
+
+ def testAdd(self):
+ assert univ.Integer(-4) + 6 == 2, '__add__() fails'
+
+ def testRadd(self):
+ assert 4 + univ.Integer(5) == 9, '__radd__() fails'
+
+ def testSub(self):
+ assert univ.Integer(3) - 6 == -3, '__sub__() fails'
+
+ def testRsub(self):
+ assert 6 - univ.Integer(3) == 3, '__rsub__() fails'
+
+ def testMul(self):
+ assert univ.Integer(3) * -3 == -9, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * univ.Integer(3) == 6, '__rmul__() fails'
+
+ def testDivInt(self):
+ assert univ.Integer(4) / 2 == 2, '__div__() fails'
+
+ if sys.version_info[0] > 2:
+ def testDivFloat(self):
+ assert univ.Integer(3) / 2 == 1.5, '__div__() fails'
+
+ def testRdivFloat(self):
+ assert 3 / univ.Integer(2) == 1.5, '__rdiv__() fails'
+ else:
+ def testDivFloat(self):
+ assert univ.Integer(3) / 2 == 1, '__div__() fails'
+
+ def testRdivFloat(self):
+ assert 3 / univ.Integer(2) == 1, '__rdiv__() fails'
+
+ def testRdivInt(self):
+ assert 6 / univ.Integer(3) == 2, '__rdiv__() fails'
+
+ if sys.version_info[0] > 2:
+ def testTrueDiv(self):
+ assert univ.Integer(3) / univ.Integer(2) == 1.5, '__truediv__() fails'
+
+ def testFloorDiv(self):
+ assert univ.Integer(3) // univ.Integer(2) == 1, '__floordiv__() fails'
+
+ def testMod(self):
+ assert univ.Integer(3) % 2 == 1, '__mod__() fails'
+
+ def testRmod(self):
+ assert 4 % univ.Integer(3) == 1, '__rmod__() fails'
+
+ def testPow(self):
+ assert univ.Integer(3) ** 2 == 9, '__pow__() fails'
+
+ def testRpow(self):
+ assert 2 ** univ.Integer(2) == 4, '__rpow__() fails'
+
+ def testLshift(self):
+ assert univ.Integer(1) << 1 == 2, '<< fails'
+
+ def testRshift(self):
+ assert univ.Integer(2) >> 1 == 1, '>> fails'
+
+ def testInt(self):
+ assert int(univ.Integer(3)) == 3, '__int__() fails'
+
+ def testLong(self):
+ assert int(univ.Integer(8)) == 8, '__long__() fails'
+
+ def testFloat(self):
+ assert float(univ.Integer(4)) == 4.0, '__float__() fails'
+
+ def testPos(self):
+ assert +univ.Integer(1) == 1, '__pos__() fails'
+
+ def testNeg(self):
+ assert -univ.Integer(1) == -1, '__neg__() fails'
+
+ def testInvert(self):
+ assert ~univ.Integer(1) == -2, '__invert__() fails'
+
+ def testRound(self):
+ assert round(univ.Integer(1), 3) == 1.0, '__round__() fails'
+
+ def testFloor(self):
+ assert math.floor(univ.Integer(1)) == 1, '__floor__() fails'
+
+ def testCeil(self):
+ assert math.ceil(univ.Integer(1)) == 1, '__ceil__() fails'
+
+ def testTrunc(self):
+ assert math.trunc(univ.Integer(1)) == 1, '__trunc__() fails'
+
+ def testPrettyIn(self):
+ assert univ.Integer('3') == 3, 'prettyIn() fails'
+
+ def testTag(self):
+ assert univ.Integer().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
+ )
+
+ def testNamedVals(self):
+
+ class Integer(univ.Integer):
+ namedValues = univ.Integer.namedValues.clone(('asn1', 1))
+
+ assert Integer('asn1') == 1, 'named val fails'
+ assert int(Integer('asn1')) == 1, 'named val fails'
+ assert str(Integer('asn1')) == 'asn1', 'named val __str__() fails'
+
+ def testSubtype(self):
+ assert univ.Integer().subtype(
+ value=1,
+ implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 2),
+ subtypeSpec=constraint.SingleValueConstraint(1, 3)
+ ) == univ.Integer(
+ value=1,
+ tagSet=tag.TagSet(tag.Tag(tag.tagClassPrivate,
+ tag.tagFormatSimple, 2)),
+ subtypeSpec=constraint.ConstraintsIntersection(constraint.SingleValueConstraint(1, 3))
+ )
+
+
+class IntegerPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Integer()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Integer
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Integer(-123)
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == -123
+
+
+class BooleanTestCase(BaseTestCase):
+ def testTruth(self):
+ assert univ.Boolean(True) and univ.Boolean(1), 'Truth initializer fails'
+
+ def testFalse(self):
+ assert not univ.Boolean(False) and not univ.Boolean(0), 'False initializer fails'
+
+ def testStr(self):
+ assert str(univ.Boolean(1)) == 'True', 'str() fails'
+
+ def testInt(self):
+ assert int(univ.Boolean(1)) == 1, 'int() fails'
+
+ def testRepr(self):
+ assert 'Boolean' in repr(univ.Boolean(1))
+
+ def testTag(self):
+ assert univ.Boolean().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01)
+ )
+
+ def testConstraints(self):
+
+ class Boolean(univ.Boolean):
+ pass
+
+ try:
+ Boolean(2)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint fail'
+
+
+class BooleanPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Boolean()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Boolean
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Boolean(True)
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == True
+
+
+class BitStringTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.b = univ.BitString(
+ namedValues=namedval.NamedValues(('Active', 0), ('Urgent', 1))
+ )
+
+ def testBinDefault(self):
+
+ class BinDefault(univ.BitString):
+ defaultBinValue = '1010100110001010'
+
+ assert BinDefault() == univ.BitString(binValue='1010100110001010')
+
+ def testHexDefault(self):
+
+ class HexDefault(univ.BitString):
+ defaultHexValue = 'A98A'
+
+ assert HexDefault() == univ.BitString(hexValue='A98A')
+
+ def testSet(self):
+ assert self.b.clone('Active') == (1,)
+ assert self.b.clone('Urgent') == (0, 1)
+ assert self.b.clone('Urgent, Active') == (1, 1)
+ assert self.b.clone("'1010100110001010'B") == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone("'A98A'H") == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone(binValue='1010100110001010') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone(hexValue='A98A') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone('1010100110001010') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone((1, 0, 1)) == (1, 0, 1)
+
+ def testStr(self):
+ assert str(self.b.clone('Urgent')) == '01'
+
+ def testRepr(self):
+ assert 'BitString' in repr(self.b.clone('Urgent,Active'))
+
+ def testTag(self):
+ assert univ.BitString().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
+ )
+
+ def testLen(self):
+ assert len(self.b.clone("'A98A'H")) == 16
+
+ def testGetItem(self):
+ assert self.b.clone("'A98A'H")[0] == 1
+ assert self.b.clone("'A98A'H")[1] == 0
+ assert self.b.clone("'A98A'H")[2] == 1
+
+ def testReverse(self):
+ assert list(reversed(univ.BitString([0, 0, 1]))) == list(univ.BitString([1, 0, 0]))
+
+ def testAsOctets(self):
+ assert self.b.clone(hexValue='A98A').asOctets() == ints2octs((0xa9, 0x8a)), 'testAsOctets() fails'
+
+ def testAsInts(self):
+ assert self.b.clone(hexValue='A98A').asNumbers() == (0xa9, 0x8a), 'testAsNumbers() fails'
+
+ def testMultipleOfEightPadding(self):
+ assert self.b.clone((1, 0, 1)).asNumbers() == (5,)
+
+ def testAsInteger(self):
+ assert self.b.clone('11000000011001').asInteger() == 12313
+ assert self.b.clone('1100110011011111').asInteger() == 52447
+
+ def testStaticDef(self):
+
+ class BitString(univ.BitString):
+ pass
+
+ assert BitString('11000000011001').asInteger() == 12313
+
+
+class BitStringPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.BitString()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.BitString
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.BitString((1, 0, 1, 0))
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == (1, 0, 1, 0)
+
+
+class OctetStringWithUnicodeMixIn(object):
+
+ initializer = ()
+ encoding = 'us-ascii'
+
+ def setUp(self):
+ self.pythonString = ints2octs(self.initializer).decode(self.encoding)
+ self.encodedPythonString = self.pythonString.encode(self.encoding)
+ self.numbersString = tuple(octs2ints(self.encodedPythonString))
+
+ def testInit(self):
+ assert univ.OctetString(self.encodedPythonString) == self.encodedPythonString, '__init__() fails'
+
+ def testInitFromAsn1(self):
+ assert univ.OctetString(univ.OctetString(self.encodedPythonString)) == self.encodedPythonString
+ assert univ.OctetString(univ.Integer(123)) == univ.OctetString('123')
+
+ def testSerialised(self):
+ if sys.version_info[0] < 3:
+ assert str(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
+ else:
+ assert bytes(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
+
+ def testPrintable(self):
+ if sys.version_info[0] < 3:
+ assert str(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
+ assert unicode(univ.OctetString(self.pythonString, encoding=self.encoding)) == self.pythonString, 'unicode init fails'
+ else:
+ assert str(univ.OctetString(self.pythonString, encoding=self.encoding)) == self.pythonString, 'unicode init fails'
+
+ def testSeq(self):
+ assert univ.OctetString(self.encodedPythonString)[0] == self.encodedPythonString[0], '__getitem__() fails'
+
+ def testRepr(self):
+ assert 'abc' in repr(univ.OctetString('abc'))
+
+ def testAsOctets(self):
+ assert univ.OctetString(self.encodedPythonString).asOctets() == self.encodedPythonString, 'testAsOctets() fails'
+
+ def testAsInts(self):
+ assert univ.OctetString(self.encodedPythonString).asNumbers() == self.numbersString, 'testAsNumbers() fails'
+
+ def testAdd(self):
+ assert univ.OctetString(self.encodedPythonString) + self.encodedPythonString == self.encodedPythonString + self.encodedPythonString, '__add__() fails'
+
+ def testRadd(self):
+ assert self.encodedPythonString + univ.OctetString(self.encodedPythonString) == self.encodedPythonString + self.encodedPythonString, '__radd__() fails'
+
+ def testMul(self):
+ assert univ.OctetString(self.encodedPythonString) * 2 == self.encodedPythonString * 2, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * univ.OctetString(self.encodedPythonString) == 2 * self.encodedPythonString, '__rmul__() fails'
+
+ def testContains(self):
+ s = univ.OctetString(self.encodedPythonString)
+ assert self.encodedPythonString in s
+ assert self.encodedPythonString * 2 not in s
+
+ def testReverse(self):
+ assert list(reversed(univ.OctetString(self.encodedPythonString))) == list(reversed(self.encodedPythonString))
+
+
+class OctetStringWithAsciiTestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (97, 102)
+ encoding = 'us-ascii'
+
+
+class OctetStringUnicodeErrorTestCase(BaseTestCase):
+ def testEncodeError(self):
+ serialized = ints2octs((0xff, 0xfe))
+
+ if sys.version_info < (3, 0):
+ text = serialized.decode('iso-8859-1')
+
+ else:
+ text = octs2str(serialized)
+
+ try:
+ univ.OctetString(text, encoding='us-ascii')
+
+ except PyAsn1UnicodeEncodeError:
+ pass
+
+ def testDecodeError(self):
+ serialized = ints2octs((0xff, 0xfe))
+
+ octetString = univ.OctetString(serialized, encoding='us-ascii')
+
+ try:
+ if sys.version_info < (3, 0):
+ unicode(octetString)
+
+ else:
+ str(octetString)
+
+ except PyAsn1UnicodeDecodeError:
+ pass
+
+
+class OctetStringWithUtf8TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (208, 176, 208, 177, 208, 178)
+ encoding = 'utf-8'
+
+
+class OctetStringWithUtf16TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (4, 48, 4, 49, 4, 50)
+ encoding = 'utf-16-be'
+
+
+if sys.version_info[0] > 2:
+
+ # Somehow comparison of UTF-32 encoded strings does not work in Py2
+
+ class OctetStringWithUtf32TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (0, 0, 4, 48, 0, 0, 4, 49, 0, 0, 4, 50)
+ encoding = 'utf-32-be'
+
+
+class OctetStringTestCase(BaseTestCase):
+
+ def testBinDefault(self):
+
+ class BinDefault(univ.OctetString):
+ defaultBinValue = '1000010111101110101111000000111011'
+
+ assert BinDefault() == univ.OctetString(binValue='1000010111101110101111000000111011')
+
+ def testHexDefault(self):
+
+ class HexDefault(univ.OctetString):
+ defaultHexValue = 'FA9823C43E43510DE3422'
+
+ assert HexDefault() == univ.OctetString(hexValue='FA9823C43E43510DE3422')
+
+ def testBinStr(self):
+ assert univ.OctetString(binValue="1000010111101110101111000000111011") == ints2octs((133, 238, 188, 14, 192)), 'bin init fails'
+
+ def testHexStr(self):
+ assert univ.OctetString(hexValue="FA9823C43E43510DE3422") == ints2octs((250, 152, 35, 196, 62, 67, 81, 13, 227, 66, 32)), 'hex init fails'
+
+ def testTuple(self):
+ assert univ.OctetString((1, 2, 3, 4, 5)) == ints2octs((1, 2, 3, 4, 5)), 'tuple init failed'
+
+ def testRepr(self):
+ assert 'abc' in repr(univ.OctetString('abc'))
+
+ def testEmpty(self):
+ try:
+ str(univ.OctetString())
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'empty OctetString() not reported'
+
+ def testTag(self):
+ assert univ.OctetString().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
+ )
+
+ def testStaticDef(self):
+
+ class OctetString(univ.OctetString):
+ pass
+
+ assert OctetString(hexValue="FA9823C43E43510DE3422") == ints2octs((250, 152, 35, 196, 62, 67, 81, 13, 227, 66, 32))
+
+
+class OctetStringPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.BitString()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.BitString
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.BitString((1, 0, 1, 0))
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == (1, 0, 1, 0)
+
+
+class Null(BaseTestCase):
+
+ def testInit(self):
+ assert not univ.Null().isValue
+ assert univ.Null(0) == str2octs('')
+ assert univ.Null(False) == str2octs('')
+ assert univ.Null('') == str2octs('')
+ assert univ.Null(None) == str2octs('')
+
+ try:
+ assert univ.Null(True)
+
+ except PyAsn1Error:
+ pass
+
+ try:
+ assert univ.Null('xxx')
+
+ except PyAsn1Error:
+ pass
+
+ def testStr(self):
+ assert str(univ.Null('')) == '', 'str() fails'
+
+ def testRepr(self):
+ assert 'Null' in repr(univ.Null(''))
+
+ def testTag(self):
+ assert univ.Null().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
+ )
+
+ def testConstraints(self):
+ try:
+ univ.Null(2)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint fail'
+
+ def testStaticDef(self):
+
+ class Null(univ.Null):
+ pass
+
+ assert not Null('')
+
+
+class NullPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Null()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Null
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Null('')
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert not new_asn1
+
+
+class RealTestCase(BaseTestCase):
+ def testFloat4BinEnc(self):
+ assert univ.Real((0.25, 2, 3)) == 2.0, 'float initializer for binary encoding fails'
+
+ def testStr(self):
+ assert str(univ.Real(1.0)) == '1.0', 'str() fails'
+
+ def testRepr(self):
+ assert 'Real' in repr(univ.Real(-4.1))
+ assert 'Real' in repr(univ.Real(-4.1))
+ assert 'inf' in repr(univ.Real('inf'))
+ assert '-inf' in repr(univ.Real('-inf'))
+
+ def testAdd(self):
+ assert univ.Real(-4.1) + 1.4 == -2.7, '__add__() fails'
+
+ def testRadd(self):
+ assert 4 + univ.Real(0.5) == 4.5, '__radd__() fails'
+
+ def testSub(self):
+ assert univ.Real(3.9) - 1.7 == 2.2, '__sub__() fails'
+
+ def testRsub(self):
+ assert 6.1 - univ.Real(0.1) == 6, '__rsub__() fails'
+
+ def testMul(self):
+ assert univ.Real(3.0) * -3 == -9, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * univ.Real(3.0) == 6, '__rmul__() fails'
+
+ def testDiv(self):
+ assert univ.Real(3.0) / 2 == 1.5, '__div__() fails'
+
+ def testRdiv(self):
+ assert 6 / univ.Real(3.0) == 2, '__rdiv__() fails'
+
+ def testMod(self):
+ assert univ.Real(3.0) % 2 == 1, '__mod__() fails'
+
+ def testRmod(self):
+ assert 4 % univ.Real(3.0) == 1, '__rmod__() fails'
+
+ def testPow(self):
+ assert univ.Real(3.0) ** 2 == 9, '__pow__() fails'
+
+ def testRpow(self):
+ assert 2 ** univ.Real(2.0) == 4, '__rpow__() fails'
+
+ def testInt(self):
+ assert int(univ.Real(3.0)) == 3, '__int__() fails'
+
+ def testLong(self):
+ assert int(univ.Real(8.0)) == 8, '__long__() fails'
+
+ def testFloat(self):
+ assert float(univ.Real(4.0)) == 4.0, '__float__() fails'
+
+ def testPrettyIn(self):
+ assert univ.Real((3, 10, 0)) == 3, 'prettyIn() fails'
+
+ # infinite float values
+ def testStrInf(self):
+ assert str(univ.Real('inf')) == 'inf', 'str() fails'
+
+ def testAddInf(self):
+ assert univ.Real('inf') + 1 == float('inf'), '__add__() fails'
+
+ def testRaddInf(self):
+ assert 1 + univ.Real('inf') == float('inf'), '__radd__() fails'
+
+ def testIntInf(self):
+ try:
+ assert int(univ.Real('inf'))
+ except OverflowError:
+ pass
+ else:
+ assert 0, '__int__() fails'
+
+ def testLongInf(self):
+ try:
+ assert int(univ.Real('inf'))
+ except OverflowError:
+ pass
+ else:
+ assert 0, '__long__() fails'
+ assert int(univ.Real(8.0)) == 8, '__long__() fails'
+
+ def testFloatInf(self):
+ assert float(univ.Real('-inf')) == float('-inf'), '__float__() fails'
+
+ def testPrettyInInf(self):
+ assert univ.Real(float('inf')) == float('inf'), 'prettyIn() fails'
+
+ def testPlusInf(self):
+ assert univ.Real('inf').isPlusInf, 'isPlusInfinity failed'
+
+ def testMinusInf(self):
+ assert univ.Real('-inf').isMinusInf, 'isMinusInfinity failed'
+
+ def testPos(self):
+ assert +univ.Real(1.0) == 1.0, '__pos__() fails'
+
+ def testNeg(self):
+ assert -univ.Real(1.0) == -1.0, '__neg__() fails'
+
+ def testRound(self):
+ assert round(univ.Real(1.123), 2) == 1.12, '__round__() fails'
+
+ def testFloor(self):
+ assert math.floor(univ.Real(1.6)) == 1.0, '__floor__() fails'
+
+ def testCeil(self):
+ assert math.ceil(univ.Real(1.2)) == 2.0, '__ceil__() fails'
+
+ def testTrunc(self):
+ assert math.trunc(univ.Real(1.1)) == 1.0, '__trunc__() fails'
+
+ def testTag(self):
+ assert univ.Real().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
+ )
+
+ def testStaticDef(self):
+
+ class Real(univ.Real):
+ pass
+
+ assert Real(1.0) == 1.0
+
+
+class RealPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Real()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Real
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Real((1, 10, 3))
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == 1000
+
+
+class ObjectIdentifier(BaseTestCase):
+ def testStr(self):
+ assert str(univ.ObjectIdentifier((1, 3, 6))) == '1.3.6', 'str() fails'
+
+ def testRepr(self):
+ assert '1.3.6' in repr(univ.ObjectIdentifier('1.3.6'))
+
+ def testEq(self):
+ assert univ.ObjectIdentifier((1, 3, 6)) == (1, 3, 6), '__cmp__() fails'
+
+ def testAdd(self):
+ assert univ.ObjectIdentifier((1, 3)) + (6,) == (1, 3, 6), '__add__() fails'
+
+ def testRadd(self):
+ assert (1,) + univ.ObjectIdentifier((3, 6)) == (1, 3, 6), '__radd__() fails'
+
+ def testLen(self):
+ assert len(univ.ObjectIdentifier((1, 3))) == 2, '__len__() fails'
+
+ def testPrefix(self):
+ o = univ.ObjectIdentifier('1.3.6')
+ assert o.isPrefixOf((1, 3, 6)), 'isPrefixOf() fails'
+ assert o.isPrefixOf((1, 3, 6, 1)), 'isPrefixOf() fails'
+ assert not o.isPrefixOf((1, 3)), 'isPrefixOf() fails'
+
+ def testInput1(self):
+ assert univ.ObjectIdentifier('1.3.6') == (1, 3, 6), 'prettyIn() fails'
+
+ def testInput2(self):
+ assert univ.ObjectIdentifier((1, 3, 6)) == (1, 3, 6), 'prettyIn() fails'
+
+ def testInput3(self):
+ assert univ.ObjectIdentifier(univ.ObjectIdentifier('1.3') + (6,)) == (1, 3, 6), 'prettyIn() fails'
+
+ def testUnicode(self):
+ s = '1.3.6'
+ if sys.version_info[0] < 3:
+ s = s.decode()
+ assert univ.ObjectIdentifier(s) == (1, 3, 6), 'unicode init fails'
+
+ def testTag(self):
+ assert univ.ObjectIdentifier().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
+ )
+
+ def testContains(self):
+ s = univ.ObjectIdentifier('1.3.6.1234.99999')
+ assert 1234 in s
+ assert 4321 not in s
+
+ def testStaticDef(self):
+
+ class ObjectIdentifier(univ.ObjectIdentifier):
+ pass
+
+ assert str(ObjectIdentifier((1, 3, 6))) == '1.3.6'
+
+
+class ObjectIdentifierPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.ObjectIdentifier()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.ObjectIdentifier
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.ObjectIdentifier('2.3.1.1.2')
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == (2, 3, 1, 1, 2)
+
+
+class SequenceOf(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s1 = univ.SequenceOf(
+ componentType=univ.OctetString('')
+ )
+ self.s2 = self.s1.clone()
+
+ def testRepr(self):
+ assert 'a' in repr(self.s1.clone().setComponents('a', 'b'))
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ ), 'wrong tagSet'
+
+ def testSeq(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert self.s1[0] == str2octs('abc'), 'set by idx fails'
+ self.s1[0] = 'cba'
+ assert self.s1[0] == str2octs('cba'), 'set by idx fails'
+
+ def testCmp(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, 'abc')
+ self.s2.clear()
+ self.s2.setComponentByPosition(0, univ.OctetString('abc'))
+ assert self.s1 == self.s2, '__cmp__() fails'
+
+ def testSubtypeSpec(self):
+ s = self.s1.clone(
+ componentType=univ.OctetString().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(str2octs('abc'))))
+ try:
+ s.setComponentByPosition(
+ 0, univ.OctetString().subtype(
+ 'abc', subtypeSpec=constraint.SingleValueConstraint(str2octs('abc'))))
+ except PyAsn1Error:
+ assert 0, 'constraint fails'
+ try:
+ s.setComponentByPosition(1, univ.OctetString('Abc'))
+ except PyAsn1Error:
+ try:
+ s.setComponentByPosition(1, univ.OctetString('Abc'),
+ verifyConstraints=False)
+ except PyAsn1Error:
+ assert 0, 'constraint fails with verifyConstraints=False'
+ else:
+ assert 0, 'constraint fails'
+
+ def testComponentTagsMatching(self):
+ s = self.s1.clone()
+ s.strictConstraints = True # This requires types equality
+ o = univ.OctetString('abc').subtype(explicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 12))
+ try:
+ s.setComponentByPosition(0, o)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype tag allowed'
+
+ def testComponentConstraintsMatching(self):
+ s = self.s1.clone()
+ o = univ.OctetString().subtype(
+ subtypeSpec=constraint.ConstraintsUnion(constraint.SingleValueConstraint(str2octs('cba'))))
+ s.strictConstraints = True # This requires types equality
+ try:
+ s.setComponentByPosition(0, o.clone('cba'))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype constraint allowed'
+ s.strictConstraints = False # This requires subtype relationships
+ try:
+ s.setComponentByPosition(0, o.clone('cba'))
+ except PyAsn1Error:
+ assert 0, 'inner supertype constraint disallowed'
+ else:
+ pass
+
+ def testConsistency(self):
+ s = self.s1.clone(subtypeSpec=constraint.ConstraintsUnion(
+ constraint.ValueSizeConstraint(1, 1)
+ ))
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ assert not s.isInconsistent, 'size spec fails'
+ s.setComponentByPosition(1, univ.OctetString('abc'))
+ assert s.isInconsistent, 'size spec fails'
+
+ def testGetComponentTagMap(self):
+ assert self.s1.componentType.tagMap.presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+
+ def testSubtype(self):
+ subtype = self.s1.subtype(
+ implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 2),
+ subtypeSpec=constraint.ValueSizeConstraint(0, 1)
+ )
+ subtype.clear()
+ clone = self.s1.clone(
+ tagSet=tag.TagSet(tag.Tag(tag.tagClassPrivate,
+ tag.tagFormatSimple, 2)),
+ subtypeSpec=constraint.ValueSizeConstraint(0, 1)
+ )
+ clone.clear()
+ assert clone == subtype
+
+ def testClone(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ s = self.s1.clone()
+ s.clear()
+ assert len(s) == 0
+ s = self.s1.clone(cloneValueFlag=1)
+ assert len(s) == 1
+ assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
+
+ def testSetComponents(self):
+ assert self.s1.clone().setComponents('abc', 'def') == \
+ self.s1.setComponentByPosition(0, 'abc').setComponentByPosition(1, 'def')
+
+ def testGetItem(self):
+ s = self.s1.clone()
+ s.append('xxx')
+ assert s[0]
+
+ # this is a deviation from standard sequence protocol
+ assert not s[2]
+
+ def testGetItemSlice(self):
+ s = self.s1.clone()
+ s.extend(['xxx', 'yyy', 'zzz'])
+ assert s[:1] == [str2octs('xxx')]
+ assert s[-2:] == [str2octs('yyy'), str2octs('zzz')]
+ assert s[1:2] == [str2octs('yyy')]
+
+ def testSetItem(self):
+ s = self.s1.clone()
+ s.append('xxx')
+ s[2] = 'yyy'
+ assert len(s) == 3
+ assert s[1] == str2octs('')
+
+ def testSetItemSlice(self):
+ s = self.s1.clone()
+ s[:1] = ['xxx']
+ assert s == [str2octs('xxx')]
+ s[-2:] = ['yyy', 'zzz']
+ assert s == [str2octs('yyy'), str2octs('zzz')]
+ s[1:2] = ['yyy']
+ assert s == [str2octs('yyy'), str2octs('yyy')]
+ assert len(s) == 2
+
+ def testAppend(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert len(self.s1) == 1
+ self.s1.append('def')
+ assert len(self.s1) == 2
+ assert list(self.s1) == [str2octs(x) for x in ['abc', 'def']]
+
+ def testExtend(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert len(self.s1) == 1
+ self.s1.extend(['def', 'ghi'])
+ assert len(self.s1) == 3
+ assert list(self.s1) == [str2octs(x) for x in ['abc', 'def', 'ghi']]
+
+ def testCount(self):
+ self.s1.clear()
+ for x in ['abc', 'def', 'abc']:
+ self.s1.append(x)
+ assert self.s1.count(str2octs('abc')) == 2
+ assert self.s1.count(str2octs('def')) == 1
+ assert self.s1.count(str2octs('ghi')) == 0
+
+ def testIndex(self):
+ self.s1.clear()
+ for x in ['abc', 'def', 'abc']:
+ self.s1.append(x)
+ assert self.s1.index(str2octs('abc')) == 0
+ assert self.s1.index(str2octs('def')) == 1
+ assert self.s1.index(str2octs('abc'), 1) == 2
+
+ def testSort(self):
+ self.s1.clear()
+ self.s1[0] = 'b'
+ self.s1[1] = 'a'
+ assert list(self.s1) == [str2octs('b'), str2octs('a')]
+ self.s1.sort()
+ assert list(self.s1) == [str2octs('a'), str2octs('b')]
+
+ def testStaticDef(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString('')
+
+ s = SequenceOf()
+ s[0] = 'abc'
+ assert len(s) == 1
+ assert s == [str2octs('abc')]
+
+ def testUntyped(self):
+ n = univ.SequenceOf()
+
+ assert not n.isValue
+
+ n[0] = univ.OctetString('fox')
+
+ assert n.isValue
+
+ def testLegacyInitializer(self):
+ n = univ.SequenceOf(
+ componentType=univ.OctetString()
+ )
+ o = univ.SequenceOf(
+ univ.OctetString() # this is the old way
+ )
+
+ assert n.isSameTypeWith(o) and o.isSameTypeWith(n)
+
+ n[0] = 'fox'
+ o[0] = 'fox'
+
+ assert n == o
+
+ def testGetComponentWithDefault(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ assert s.getComponentByPosition(0, default=None, instantiate=False) is None
+ assert s.getComponentByPosition(0, default=None) is None
+ s[0] = 'test'
+ assert s.getComponentByPosition(0, default=None) is not None
+ assert s.getComponentByPosition(0, default=None) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(0, default=None) is None
+
+ def testGetComponentNoInstantiation(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
+ s[0] = 'test'
+ assert s.getComponentByPosition(0, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(0, instantiate=False) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
+
+ def testClear(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ s.setComponentByPosition(0, 'test')
+
+ assert s.getComponentByPosition(0) == str2octs('test')
+ assert len(s) == 1
+ assert s.isValue
+
+ s.clear()
+
+ assert len(s) == 0
+ assert s == []
+ assert s.isValue
+
+ def testReset(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ s.setComponentByPosition(0, 'test')
+
+ assert s.getComponentByPosition(0) == str2octs('test')
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testIsInconsistentSizeConstraint(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+ subtypeSpec = constraint.ValueSizeConstraint(0, 1)
+
+ s = SequenceOf()
+
+ assert s.isInconsistent
+
+ s[0] = 'test'
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+ s[1] = 'test'
+
+ assert s.isInconsistent
+
+ s.clear()
+
+ assert not s.isInconsistent
+
+ s.reset()
+
+ assert s.isInconsistent
+
+ s[1] = 'test'
+
+ assert not s.isInconsistent
+
+
+class SequenceOfPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.SequenceOf(componentType=univ.OctetString())
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.SequenceOf
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.SequenceOf(componentType=univ.OctetString())
+ old_asn1[0] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1 == [str2octs('test')]
+
+
+class Sequence(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+ )
+
+ def testRepr(self):
+ assert 'name' in repr(self.s1.clone().setComponents('a', 'b'))
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ ), 'wrong tagSet'
+
+ def testById(self):
+ self.s1.setComponentByName('name', univ.OctetString('abc'))
+ assert self.s1.getComponentByName('name') == str2octs('abc'), 'set by name fails'
+
+ def testByKey(self):
+ self.s1['name'] = 'abc'
+ assert self.s1['name'] == str2octs('abc'), 'set by key fails'
+
+ def testContains(self):
+ assert 'name' in self.s1
+ assert '<missing>' not in self.s1
+
+ def testGetNearPosition(self):
+ assert self.s1.componentType.getTagMapNearPosition(1).presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString(''),
+ univ.Integer.tagSet: univ.Integer(34)
+ }
+ assert self.s1.componentType.getPositionNearType(
+ univ.OctetString.tagSet, 1
+ ) == 1
+
+ def testSetDefaultComponents(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, univ.OctetString('Ping'))
+ self.s1.setComponentByPosition(1, univ.OctetString('Pong'))
+ assert self.s1.getComponentByPosition(2) == 34
+
+ def testClone(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ s = self.s1.clone()
+ assert s.getComponentByPosition(0) != self.s1.getComponentByPosition(0)
+ assert s.getComponentByPosition(1) != self.s1.getComponentByPosition(1)
+ assert s.getComponentByPosition(2) != self.s1.getComponentByPosition(2)
+ s = self.s1.clone(cloneValueFlag=1)
+ assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
+ assert s.getComponentByPosition(1) == self.s1.getComponentByPosition(1)
+ assert s.getComponentByPosition(2) == self.s1.getComponentByPosition(2)
+
+ def testComponentTagsMatching(self):
+ s = self.s1.clone()
+ s.strictConstraints = True # This requires types equality
+ o = univ.OctetString('abc').subtype(explicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 12))
+ try:
+ s.setComponentByName('name', o)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype tag allowed'
+
+ def testComponentConstraintsMatching(self):
+ s = self.s1.clone()
+ o = univ.OctetString().subtype(
+ subtypeSpec=constraint.ConstraintsUnion(constraint.SingleValueConstraint(str2octs('cba'))))
+ s.strictConstraints = True # This requires types equality
+ try:
+ s.setComponentByName('name', o.clone('cba'))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype constraint allowed'
+ s.strictConstraints = False # This requires subtype relationships
+ try:
+ s.setComponentByName('name', o.clone('cba'))
+ except PyAsn1Error:
+ assert 0, 'inner supertype constraint disallowed'
+ else:
+ pass
+
+ def testSetComponents(self):
+ assert self.s1.clone().setComponents(name='a', nick='b', age=1) == \
+ self.s1.setComponentByPosition(0, 'a').setComponentByPosition(1, 'b').setComponentByPosition(2, 1)
+
+ def testSetToDefault(self):
+ s = self.s1.clone()
+ s.setComponentByPosition(0, univ.noValue)
+ s[2] = univ.noValue
+ assert s[0] == univ.OctetString('')
+ assert s[2] == univ.Integer(34)
+
+ def testGetItem(self):
+ s = self.s1.clone()
+ s['name'] = 'xxx'
+ assert s['name']
+ assert s[0]
+
+ try:
+ s['xxx']
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ try:
+ s[100]
+
+ except IndexError:
+ pass
+
+ else:
+ assert False, 'IndexError not raised'
+
+ def testSetItem(self):
+ s = self.s1.clone()
+ s['name'] = 'xxx'
+
+ try:
+
+ s['xxx'] = 'xxx'
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ try:
+
+ s[100] = 'xxx'
+
+ except IndexError:
+ pass
+
+ else:
+ assert False, 'IndexError not raised'
+
+ def testIter(self):
+ assert list(self.s1) == ['name', 'nick', 'age']
+
+ def testKeys(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ assert list(self.s1.keys()) == ['name', 'nick', 'age']
+
+ def testValues(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ assert list(self.s1.values()) == [str2octs('abc'), str2octs('def'), 123]
+
+ def testItems(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'abc'), ('nick', 'def')]] + [('age', 123)]
+
+ def testUpdate(self):
+ self.s1.clear()
+ assert list(self.s1.values()) == [str2octs(''), str2octs(''), 34]
+ self.s1.update(**{'name': 'abc', 'nick': 'def', 'age': 123})
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'abc'), ('nick', 'def')]] + [('age', 123)]
+ self.s1.update(('name', 'ABC'))
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'ABC'), ('nick', 'def')]] + [('age', 123)]
+ self.s1.update(name='CBA')
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'CBA'), ('nick', 'def')]] + [('age', 123)]
+
+ def testStaticDef(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+
+ s = Sequence()
+ s['name'] = 'abc'
+ assert s['name'] == str2octs('abc')
+
+ def testGetComponentWithDefault(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Sequence()
+
+ assert s[0] == str2octs('')
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByName('nick', default=None) is None
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, default=None) is not None
+ assert s.getComponentByPosition(1, default=None) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, default=None) is None
+
+ def testGetComponentWithConstructedDefault(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.DefaultedNamedType('nick', univ.SequenceOf(
+ componentType=univ.Integer()
+ ).setComponentByPosition(0, 1)),
+ )
+
+ s = Sequence()
+
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByPosition(1) == [1]
+
+ def testGetComponentNoInstantiation(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Sequence()
+ assert s[0] == str2octs('')
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByName('nick', instantiate=False) is univ.noValue
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+
+ def testSchemaWithComponents(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+
+ s = Sequence()
+
+ assert not s.isValue
+
+ s[0] = 'test'
+
+ assert s.isValue
+
+ s.clear()
+
+ assert not s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testSchemaWithOptionalComponents(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('name', univ.OctetString())
+ )
+
+ s = Sequence()
+
+ assert s.isValue
+
+ s[0] = 'test'
+
+ assert s.isValue
+
+ s.clear()
+
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testSchemaWithOptionalComponents(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('name', univ.OctetString(''))
+ )
+
+ s = Sequence()
+
+ assert s.isValue
+
+ s[0] = 'test'
+
+ assert s.isValue
+
+ s.clear()
+
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testIsInconsistentWithComponentsConstraint(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(65))
+ )
+ subtypeSpec = constraint.WithComponentsConstraint(
+ ('name', constraint.ComponentPresentConstraint()),
+ ('age', constraint.ComponentAbsentConstraint())
+ )
+
+ s = Sequence()
+
+ assert s.isInconsistent
+
+ s[0] = 'test'
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+ s[1] = 23
+
+ assert s.isInconsistent
+
+ s.clear()
+
+ assert s.isInconsistent
+
+ s.reset()
+
+ assert s.isInconsistent
+
+ s[1] = 23
+
+ assert s.isInconsistent
+
+ def testIsInconsistentSizeConstraint(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(65))
+ )
+ subtypeSpec = constraint.ValueSizeConstraint(0, 1)
+
+ s = Sequence()
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+ s[1] = 23
+
+ assert s.isInconsistent
+
+ s.clear()
+
+ assert not s.isInconsistent
+
+ s.reset()
+
+ assert s.isInconsistent
+
+ s[1] = 23
+
+ assert not s.isInconsistent
+
+
+class SequenceWithoutSchema(BaseTestCase):
+
+ def testGetItem(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s[0] = 'abc'
+ assert s['field-0']
+ assert s[0]
+
+ try:
+ s['field-1']
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ def testSetItem(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s['field-0'] = 'xxx'
+
+ try:
+
+ s['field-1'] = 'xxx'
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ def testIter(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s) == ['field-0', 'field-1']
+
+ def testKeys(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s.keys()) == ['field-0', 'field-1']
+
+ def testValues(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s.values()) == [str2octs('abc'), 123]
+
+ def testItems(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s.items()) == [('field-0', str2octs('abc')), ('field-1', 123)]
+
+ def testUpdate(self):
+ s = univ.Sequence().clear()
+ assert not s
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert s
+ assert list(s.keys()) == ['field-0', 'field-1']
+ assert list(s.values()) == [str2octs('abc'), 123]
+ assert list(s.items()) == [('field-0', str2octs('abc')), ('field-1', 123)]
+ s['field-0'] = univ.OctetString('def')
+ assert list(s.values()) == [str2octs('def'), 123]
+ s['field-1'] = univ.OctetString('ghi')
+ assert list(s.values()) == [str2octs('def'), str2octs('ghi')]
+ try:
+ s['field-2'] = univ.OctetString('xxx')
+ except KeyError:
+ pass
+ else:
+ assert False, 'unknown field at schema-less object tolerated'
+ assert 'field-0' in s
+ s.clear()
+ assert 'field-0' not in s
+
+ def testSchema(self):
+
+ class Sequence(univ.Sequence):
+ pass
+
+ s = Sequence()
+
+ assert not s.isValue
+
+ s[0] = univ.OctetString('test')
+
+ assert s.isValue
+
+ s.clear()
+
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+
+class SequencePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Sequence
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ old_asn1['name'] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1['name'] == str2octs('test')
+
+
+class SetOf(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s1 = univ.SetOf(componentType=univ.OctetString(''))
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ ), 'wrong tagSet'
+
+ def testSeq(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert self.s1[0] == str2octs('abc'), 'set by idx fails'
+ self.s1.setComponentByPosition(0, self.s1[0].clone('cba'))
+ assert self.s1[0] == str2octs('cba'), 'set by idx fails'
+
+ def testStaticDef(self):
+
+ class SetOf(univ.SequenceOf):
+ componentType = univ.OctetString('')
+
+ s = SetOf()
+ s[0] = 'abc'
+ assert len(s) == 1
+ assert s == [str2octs('abc')]
+
+
+
+class SetOfPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.SetOf(componentType=univ.OctetString())
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.SetOf
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.SetOf(componentType=univ.OctetString())
+ old_asn1[0] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1 == [str2octs('test')]
+
+
+class Set(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s1 = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('null', univ.Null('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+ )
+ self.s2 = self.s1.clone()
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ ), 'wrong tagSet'
+
+ def testByTypeWithPythonValue(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc'), 'set by name fails'
+
+ def testByTypeWithInstance(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, univ.OctetString('abc'))
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc'), 'set by name fails'
+
+ def testGetTagMap(self):
+ assert self.s1.tagMap.presentTypes == {
+ univ.Set.tagSet: univ.Set().clear()
+ }
+
+ def testGetComponentTagMap(self):
+ assert self.s1.componentType.tagMapUnique.presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString(''),
+ univ.Null.tagSet: univ.Null(''),
+ univ.Integer.tagSet: univ.Integer(34)
+ }
+
+ def testGetPositionByType(self):
+ assert self.s1.componentType.getPositionByType(univ.Null().tagSet) == 1
+
+ def testSetToDefault(self):
+ self.s1.setComponentByName('name', univ.noValue)
+ assert self.s1['name'] == univ.OctetString('')
+
+ def testIter(self):
+ assert list(self.s1) == ['name', 'null', 'age']
+
+ def testStaticDef(self):
+
+ class Set(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+
+ s = Set()
+ s['name'] = 'abc'
+ assert s['name'] == str2octs('abc')
+
+ def testGetComponentWithDefault(self):
+
+ class Set(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer(123)),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Set()
+ assert s[0] == 123
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByName('nick', default=None) is None
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, default=None) is not None
+ assert s.getComponentByPosition(1, default=None) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, default=None) is None
+
+ def testGetComponentNoInstantiation(self):
+
+ class Set(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer(123)),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Set()
+ assert s[0] == 123
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByName('nick', instantiate=False) is univ.noValue
+ assert s.getComponentByType(univ.OctetString.tagSet, instantiate=False) is univ.noValue
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+
+
+class SetPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Set
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ old_asn1['name'] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1['name'] == str2octs('test')
+
+
+class Choice(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ innerComp = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('count', univ.Integer()),
+ namedtype.NamedType('flag', univ.Boolean())
+ )
+ )
+ self.s1 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('sex', innerComp)
+ )
+ )
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(), 'wrong tagSet'
+
+ def testRepr(self):
+ assert 'Choice' in repr(self.s1.clone().setComponents('a'))
+ s = self.s1.clone().setComponents(
+ sex=self.s1.setComponentByPosition(1).getComponentByPosition(1).clone().setComponents(count=univ.Integer(123))
+ )
+ assert 'Choice' in repr(s)
+
+ def testContains(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert 'name' in self.s1
+ assert 'sex' not in self.s1
+
+ self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
+ assert 'name' not in self.s1
+ assert 'sex' in self.s1
+
+ def testIter(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert list(self.s1) == ['name']
+ self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
+ assert list(self.s1) == ['sex']
+
+ def testOuterByTypeWithPythonValue(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc')
+
+ def testOuterByTypeWithInstanceValue(self):
+ self.s1.setComponentByType(
+ univ.OctetString.tagSet, univ.OctetString('abc')
+ )
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc')
+
+ def testInnerByTypeWithPythonValue(self):
+ self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
+ assert self.s1.getComponentByType(
+ univ.Integer.tagSet, 1
+ ) == 123
+
+ def testInnerByTypeWithInstanceValue(self):
+ self.s1.setComponentByType(
+ univ.Integer.tagSet, univ.Integer(123), innerFlag=True
+ )
+ assert self.s1.getComponentByType(
+ univ.Integer.tagSet, 1
+ ) == 123
+
+ def testCmp(self):
+ self.s1.setComponentByName('name', univ.OctetString('abc'))
+ assert self.s1 == str2octs('abc'), '__cmp__() fails'
+
+ def testGetComponent(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getComponent() == str2octs('abc'), 'getComponent() fails'
+
+ def testGetName(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getName() == 'name', 'getName() fails'
+
+ def testSetComponentByPosition(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('Jim'))
+ assert self.s1 == str2octs('Jim')
+
+ def testClone(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ s = self.s1.clone()
+ assert len(s) == 0
+ s = self.s1.clone(cloneValueFlag=1)
+ assert len(s) == 1
+ assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
+
+ def testSetToDefault(self):
+ s = self.s1.clone()
+ s.setComponentByName('sex', univ.noValue)
+ assert s['sex'] is not univ.noValue
+
+ def testStaticDef(self):
+
+ class InnerChoice(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('count', univ.Integer()),
+ namedtype.NamedType('flag', univ.Boolean())
+ )
+
+ class OuterChoice(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('sex', InnerChoice())
+ )
+
+ c = OuterChoice()
+
+ c.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert c.getName() == 'name'
+
+ def testGetComponentWithDefault(self):
+
+ s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+
+ assert s.getComponentByPosition(0, default=None, instantiate=False) is None
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByName('name', default=None, instantiate=False) is None
+ assert s.getComponentByName('id', default=None, instantiate=False) is None
+ assert s.getComponentByType(univ.OctetString.tagSet, default=None) is None
+ assert s.getComponentByType(univ.Integer.tagSet, default=None) is None
+ s[1] = 123
+ assert s.getComponentByPosition(1, default=None) is not None
+ assert s.getComponentByPosition(1, univ.noValue) == 123
+ s.clear()
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+
+ def testGetComponentNoInstantiation(self):
+
+ s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+
+ assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByName('name', instantiate=False) is univ.noValue
+ assert s.getComponentByName('id', instantiate=False) is univ.noValue
+ assert s.getComponentByType(univ.OctetString.tagSet, instantiate=False) is univ.noValue
+ assert s.getComponentByType(univ.Integer.tagSet, instantiate=False) is univ.noValue
+ s[1] = 123
+ assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) == 123
+ s.clear()
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+
+
+class ChoicePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Choice
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+ old_asn1['name'] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1['name'] == str2octs('test')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/type/test_useful.py b/contrib/python/pyasn1/py2/tests/type/test_useful.py
new file mode 100644
index 0000000000..cd5ba566f9
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/type/test_useful.py
@@ -0,0 +1,138 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import datetime
+import pickle
+import sys
+from copy import deepcopy
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import useful
+
+
+class FixedOffset(datetime.tzinfo):
+ def __init__(self, offset, name):
+ self.__offset = datetime.timedelta(minutes=offset)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+
+UTC = FixedOffset(0, 'UTC')
+UTC2 = FixedOffset(120, 'UTC')
+
+
+class ObjectDescriptorTestCase(BaseTestCase):
+ pass
+
+
+class GeneralizedTimeTestCase(BaseTestCase):
+
+ def testFromDateTime(self):
+ assert useful.GeneralizedTime.fromDateTime(datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC)) == '20170711000102.3Z'
+
+ def testToDateTime0(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2) == useful.GeneralizedTime('20170711000102').asDateTime
+
+ def testToDateTime1(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.GeneralizedTime('20170711000102Z').asDateTime
+
+ def testToDateTime2(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102.3Z').asDateTime
+
+ def testToDateTime3(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102,3Z').asDateTime
+
+ def testToDateTime4(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102.3+0000').asDateTime
+
+ def testToDateTime5(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC2) == useful.GeneralizedTime('20170711000102.3+0200').asDateTime
+
+ def testToDateTime6(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC2) == useful.GeneralizedTime('20170711000102.3+02').asDateTime
+
+ def testToDateTime7(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1) == useful.GeneralizedTime('201707110001').asDateTime
+
+ def testToDateTime8(self):
+ assert datetime.datetime(2017, 7, 11, 0) == useful.GeneralizedTime('2017071100').asDateTime
+
+ def testCopy(self):
+ dt = useful.GeneralizedTime("20170916234254+0130").asDateTime
+ assert dt == deepcopy(dt)
+
+
+class GeneralizedTimePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = useful.GeneralizedTime()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == useful.GeneralizedTime
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = useful.GeneralizedTime("20170916234254+0130")
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == old_asn1
+
+
+class UTCTimeTestCase(BaseTestCase):
+
+ def testFromDateTime(self):
+ assert useful.UTCTime.fromDateTime(datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC)) == '170711000102Z'
+
+ def testToDateTime0(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2) == useful.UTCTime('170711000102').asDateTime
+
+ def testToDateTime1(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.UTCTime('170711000102Z').asDateTime
+
+ def testToDateTime2(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.UTCTime('170711000102+0000').asDateTime
+
+ def testToDateTime3(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC2) == useful.UTCTime('170711000102+0200').asDateTime
+
+ def testToDateTime4(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1) == useful.UTCTime('1707110001').asDateTime
+
+
+class UTCTimePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = useful.UTCTime()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == useful.UTCTime
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = useful.UTCTime("170711000102")
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == old_asn1
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py2/tests/ya.make b/contrib/python/pyasn1/py2/tests/ya.make
new file mode 100644
index 0000000000..7b6f392cd2
--- /dev/null
+++ b/contrib/python/pyasn1/py2/tests/ya.make
@@ -0,0 +1,41 @@
+PY2TEST()
+
+PEERDIR(
+ contrib/python/pyasn1
+)
+
+TEST_SRCS(
+ __init__.py
+ base.py
+ codec/__init__.py
+ codec/ber/__init__.py
+ codec/ber/test_decoder.py
+ codec/ber/test_encoder.py
+ codec/cer/__init__.py
+ codec/cer/test_decoder.py
+ codec/cer/test_encoder.py
+ codec/der/__init__.py
+ codec/der/test_decoder.py
+ codec/der/test_encoder.py
+ codec/native/__init__.py
+ codec/native/test_decoder.py
+ codec/native/test_encoder.py
+ codec/test_streaming.py
+ compat/__init__.py
+ compat/test_integer.py
+ compat/test_octets.py
+ test_debug.py
+ type/__init__.py
+ type/test_char.py
+ type/test_constraint.py
+ type/test_namedtype.py
+ type/test_namedval.py
+ type/test_opentype.py
+ type/test_tag.py
+ type/test_univ.py
+ type/test_useful.py
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/pyasn1/py2/ya.make b/contrib/python/pyasn1/py2/ya.make
new file mode 100644
index 0000000000..cd11432c2f
--- /dev/null
+++ b/contrib/python/pyasn1/py2/ya.make
@@ -0,0 +1,58 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(0.5.0)
+
+LICENSE(BSD-3-Clause)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ pyasn1/__init__.py
+ pyasn1/codec/__init__.py
+ pyasn1/codec/ber/__init__.py
+ pyasn1/codec/ber/decoder.py
+ pyasn1/codec/ber/encoder.py
+ pyasn1/codec/ber/eoo.py
+ pyasn1/codec/cer/__init__.py
+ pyasn1/codec/cer/decoder.py
+ pyasn1/codec/cer/encoder.py
+ pyasn1/codec/der/__init__.py
+ pyasn1/codec/der/decoder.py
+ pyasn1/codec/der/encoder.py
+ pyasn1/codec/native/__init__.py
+ pyasn1/codec/native/decoder.py
+ pyasn1/codec/native/encoder.py
+ pyasn1/codec/streaming.py
+ pyasn1/compat/__init__.py
+ pyasn1/compat/integer.py
+ pyasn1/compat/octets.py
+ pyasn1/debug.py
+ pyasn1/error.py
+ pyasn1/type/__init__.py
+ pyasn1/type/base.py
+ pyasn1/type/char.py
+ pyasn1/type/constraint.py
+ pyasn1/type/error.py
+ pyasn1/type/namedtype.py
+ pyasn1/type/namedval.py
+ pyasn1/type/opentype.py
+ pyasn1/type/tag.py
+ pyasn1/type/tagmap.py
+ pyasn1/type/univ.py
+ pyasn1/type/useful.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/pyasn1/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/pyasn1/py3/.dist-info/METADATA b/contrib/python/pyasn1/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..530fe5bf7b
--- /dev/null
+++ b/contrib/python/pyasn1/py3/.dist-info/METADATA
@@ -0,0 +1,230 @@
+Metadata-Version: 2.1
+Name: pyasn1
+Version: 0.5.0
+Summary: Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)
+Home-page: https://github.com/pyasn1/pyasn1
+Author: Ilya Etingof
+Author-email: etingof@gmail.com
+Maintainer: pyasn1 maintenance organization
+Maintainer-email: Christian Heimes <christian@python.org>
+License: BSD-2-Clause
+Project-URL: Documentation, https://pyasn1.readthedocs.io
+Project-URL: Source, https://github.com/pyasn1/pyasn1
+Project-URL: Issues, https://github.com/pyasn1/pyasn1/issues
+Project-URL: Changelog, https://pyasn1.readthedocs.io/en/latest/changelog.html
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: Intended Audience :: Telecommunications Industry
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Communications
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7
+Description-Content-Type: text/markdown
+License-File: LICENSE.rst
+
+
+ASN.1 library for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1.svg?maxAge=2592000)](https://pypi.org/project/pyasn1)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1.svg)](https://pypi.org/project/pyasn1/)
+[![Build status](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1.svg)](https://codecov.io/github/pyasn1/pyasn1)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1/master/LICENSE.txt)
+
+This is a free and open source implementation of ASN.1 types and codecs
+as a Python package. It has been first written to support particular
+protocol (SNMP) but then generalized to be suitable for a wide range
+of protocols based on
+[ASN.1 specification](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items).
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1.
+
+Features
+--------
+
+* Generic implementation of ASN.1 types (X.208)
+* Standards compliant BER/CER/DER codecs
+* Can operate on streams of serialized data
+* Dumps/loads ASN.1 structures from Python types
+* 100% Python, works with Python 2.7 and 3.6+
+* MT-safe
+* Contributed ASN.1 compiler [Asn1ate](https://github.com/kimgr/asn1ate)
+
+Why using pyasn1
+----------------
+
+ASN.1 solves the data serialisation problem. This solution was
+designed long ago by the wise Ancients. Back then, they did not
+have the luxury of wasting bits. That is why ASN.1 is designed
+to serialise data structures of unbounded complexity into
+something compact and efficient when it comes to processing
+the data.
+
+That probably explains why many network protocols and file formats
+still rely on the 30+ years old technology. Including a number of
+high-profile Internet protocols and file formats.
+
+Quite a number of books cover the topic of ASN.1.
+[Communication between heterogeneous systems](http://www.oss.com/asn1/dubuisson.html)
+by Olivier Dubuisson is one of those high quality books freely
+available on the Internet.
+
+The pyasn1 package is designed to help Python programmers tackling
+network protocols and file formats at the comfort of their Python
+prompt. The tool struggles to capture all aspects of a rather
+complicated ASN.1 system and to represent it on the Python terms.
+
+How to use pyasn1
+-----------------
+
+With pyasn1 you can build Python objects from ASN.1 data structures.
+For example, the following ASN.1 data structure:
+
+```bash
+Record ::= SEQUENCE {
+ id INTEGER,
+ room [0] INTEGER OPTIONAL,
+ house [1] INTEGER DEFAULT 0
+}
+```
+
+Could be expressed in pyasn1 like this:
+
+```python
+class Record(Sequence):
+ componentType = NamedTypes(
+ NamedType('id', Integer()),
+ OptionalNamedType(
+ 'room', Integer().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
+ )
+ ),
+ DefaultedNamedType(
+ 'house', Integer(0).subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
+ )
+ )
+ )
+```
+
+It is in the spirit of ASN.1 to take abstract data description
+and turn it into a programming language specific form.
+Once you have your ASN.1 data structure expressed in Python, you
+can use it along the lines of similar Python type (e.g. ASN.1
+`SET` is similar to Python `dict`, `SET OF` to `list`):
+
+```python
+>>> record = Record()
+>>> record['id'] = 123
+>>> record['room'] = 321
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+Part of the power of ASN.1 comes from its serialisation features. You
+can serialise your data structure and send it over the network.
+
+```python
+>>> from pyasn1.codec.der.encoder import encode
+>>> substrate = encode(record)
+>>> hexdump(substrate)
+00000: 30 07 02 01 7B 80 02 01 41
+```
+
+Conversely, you can turn serialised ASN.1 content, as received from
+network or read from a file, into a Python object which you can
+introspect, modify, encode and send back.
+
+```python
+>>> from pyasn1.codec.der.decoder import decode
+>>> received_record, rest_of_substrate = decode(substrate, asn1Spec=Record())
+>>>
+>>> for field in received_record:
+>>> print('{} is {}'.format(field, received_record[field]))
+id is 123
+room is 321
+house is 0
+>>>
+>>> record == received_record
+True
+>>> received_record.update(room=123)
+>>> substrate = encode(received_record)
+>>> hexdump(substrate)
+00000: 30 06 02 01 7B 80 01 7B
+```
+
+The pyasn1 classes struggle to emulate their Python prototypes (e.g. int,
+list, dict etc.). But ASN.1 types exhibit more complicated behaviour.
+To make life easier for a Pythonista, they can turn their pyasn1
+classes into Python built-ins:
+
+```python
+>>> from pyasn1.codec.native.encoder import encode
+>>> encode(record)
+{'id': 123, 'room': 321, 'house': 0}
+```
+
+Or vice-versa -- you can initialize an ASN.1 structure from a tree of
+Python objects:
+
+```python
+>>> from pyasn1.codec.native.decoder import decode
+>>> record = decode({'id': 123, 'room': 321, 'house': 0}, asn1Spec=Record())
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+With ASN.1 design, serialisation codecs are decoupled from data objects,
+so you could turn every single ASN.1 object into many different
+serialised forms. As of this moment, pyasn1 supports BER, DER, CER and
+Python built-ins codecs. The extremely compact PER encoding is expected
+to be introduced in the upcoming pyasn1 release.
+
+More information on pyasn1 APIs can be found in the
+[documentation](https://pyasn1.readthedocs.io/en/latest/pyasn1/contents.html),
+compiled ASN.1 modules for different protocols and file formats
+could be found in the pyasn1-modules
+[repo](https://github.com/pyasn1/pyasn1-modules).
+
+How to get pyasn1
+-----------------
+
+The pyasn1 package is distributed under terms and conditions of 2-clause
+BSD [license](https://pyasn1.readthedocs.io/en/latest/license.html). Source code is freely
+available as a GitHub [repo](https://github.com/pyasn1/pyasn1).
+
+You could `pip install pyasn1` or download it from [PyPI](https://pypi.org/project/pyasn1).
+
+If something does not work as expected,
+[open an issue](https://github.com/epyasn1/pyasn1/issues) at GitHub or
+post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+or try browsing pyasn1
+[mailing list archives](https://sourceforge.net/p/pyasn1/mailman/pyasn1-users/).
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1/py3/.dist-info/top_level.txt b/contrib/python/pyasn1/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..38fe414575
--- /dev/null
+++ b/contrib/python/pyasn1/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyasn1
diff --git a/contrib/python/pyasn1/py3/LICENSE.rst b/contrib/python/pyasn1/py3/LICENSE.rst
new file mode 100644
index 0000000000..598b8430ef
--- /dev/null
+++ b/contrib/python/pyasn1/py3/LICENSE.rst
@@ -0,0 +1,24 @@
+Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/pyasn1/py3/README.md b/contrib/python/pyasn1/py3/README.md
new file mode 100644
index 0000000000..e1f9501b49
--- /dev/null
+++ b/contrib/python/pyasn1/py3/README.md
@@ -0,0 +1,188 @@
+
+ASN.1 library for Python
+------------------------
+[![PyPI](https://img.shields.io/pypi/v/pyasn1.svg?maxAge=2592000)](https://pypi.org/project/pyasn1)
+[![Python Versions](https://img.shields.io/pypi/pyversions/pyasn1.svg)](https://pypi.org/project/pyasn1/)
+[![Build status](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml/badge.svg)](https://github.com/pyasn1/pyasn1/actions/workflows/main.yml)
+[![Coverage Status](https://img.shields.io/codecov/c/github/pyasn1/pyasn1.svg)](https://codecov.io/github/pyasn1/pyasn1)
+[![GitHub license](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/pyasn1/pyasn1/master/LICENSE.txt)
+
+This is a free and open source implementation of ASN.1 types and codecs
+as a Python package. It has been first written to support particular
+protocol (SNMP) but then generalized to be suitable for a wide range
+of protocols based on
+[ASN.1 specification](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.208-198811-W!!PDF-E&type=items).
+
+**NOTE:** The package is now maintained by *Christian Heimes* and
+*Simon Pichugin* in project https://github.com/pyasn1/pyasn1.
+
+Features
+--------
+
+* Generic implementation of ASN.1 types (X.208)
+* Standards compliant BER/CER/DER codecs
+* Can operate on streams of serialized data
+* Dumps/loads ASN.1 structures from Python types
+* 100% Python, works with Python 2.7 and 3.6+
+* MT-safe
+* Contributed ASN.1 compiler [Asn1ate](https://github.com/kimgr/asn1ate)
+
+Why using pyasn1
+----------------
+
+ASN.1 solves the data serialisation problem. This solution was
+designed long ago by the wise Ancients. Back then, they did not
+have the luxury of wasting bits. That is why ASN.1 is designed
+to serialise data structures of unbounded complexity into
+something compact and efficient when it comes to processing
+the data.
+
+That probably explains why many network protocols and file formats
+still rely on the 30+ years old technology. Including a number of
+high-profile Internet protocols and file formats.
+
+Quite a number of books cover the topic of ASN.1.
+[Communication between heterogeneous systems](http://www.oss.com/asn1/dubuisson.html)
+by Olivier Dubuisson is one of those high quality books freely
+available on the Internet.
+
+The pyasn1 package is designed to help Python programmers tackling
+network protocols and file formats at the comfort of their Python
+prompt. The tool struggles to capture all aspects of a rather
+complicated ASN.1 system and to represent it on the Python terms.
+
+How to use pyasn1
+-----------------
+
+With pyasn1 you can build Python objects from ASN.1 data structures.
+For example, the following ASN.1 data structure:
+
+```bash
+Record ::= SEQUENCE {
+ id INTEGER,
+ room [0] INTEGER OPTIONAL,
+ house [1] INTEGER DEFAULT 0
+}
+```
+
+Could be expressed in pyasn1 like this:
+
+```python
+class Record(Sequence):
+ componentType = NamedTypes(
+ NamedType('id', Integer()),
+ OptionalNamedType(
+ 'room', Integer().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
+ )
+ ),
+ DefaultedNamedType(
+ 'house', Integer(0).subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
+ )
+ )
+ )
+```
+
+It is in the spirit of ASN.1 to take abstract data description
+and turn it into a programming language specific form.
+Once you have your ASN.1 data structure expressed in Python, you
+can use it along the lines of similar Python type (e.g. ASN.1
+`SET` is similar to Python `dict`, `SET OF` to `list`):
+
+```python
+>>> record = Record()
+>>> record['id'] = 123
+>>> record['room'] = 321
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+Part of the power of ASN.1 comes from its serialisation features. You
+can serialise your data structure and send it over the network.
+
+```python
+>>> from pyasn1.codec.der.encoder import encode
+>>> substrate = encode(record)
+>>> hexdump(substrate)
+00000: 30 07 02 01 7B 80 02 01 41
+```
+
+Conversely, you can turn serialised ASN.1 content, as received from
+network or read from a file, into a Python object which you can
+introspect, modify, encode and send back.
+
+```python
+>>> from pyasn1.codec.der.decoder import decode
+>>> received_record, rest_of_substrate = decode(substrate, asn1Spec=Record())
+>>>
+>>> for field in received_record:
+>>> print('{} is {}'.format(field, received_record[field]))
+id is 123
+room is 321
+house is 0
+>>>
+>>> record == received_record
+True
+>>> received_record.update(room=123)
+>>> substrate = encode(received_record)
+>>> hexdump(substrate)
+00000: 30 06 02 01 7B 80 01 7B
+```
+
+The pyasn1 classes struggle to emulate their Python prototypes (e.g. int,
+list, dict etc.). But ASN.1 types exhibit more complicated behaviour.
+To make life easier for a Pythonista, they can turn their pyasn1
+classes into Python built-ins:
+
+```python
+>>> from pyasn1.codec.native.encoder import encode
+>>> encode(record)
+{'id': 123, 'room': 321, 'house': 0}
+```
+
+Or vice-versa -- you can initialize an ASN.1 structure from a tree of
+Python objects:
+
+```python
+>>> from pyasn1.codec.native.decoder import decode
+>>> record = decode({'id': 123, 'room': 321, 'house': 0}, asn1Spec=Record())
+>>> str(record)
+Record:
+ id=123
+ room=321
+>>>
+```
+
+With ASN.1 design, serialisation codecs are decoupled from data objects,
+so you could turn every single ASN.1 object into many different
+serialised forms. As of this moment, pyasn1 supports BER, DER, CER and
+Python built-ins codecs. The extremely compact PER encoding is expected
+to be introduced in the upcoming pyasn1 release.
+
+More information on pyasn1 APIs can be found in the
+[documentation](https://pyasn1.readthedocs.io/en/latest/pyasn1/contents.html),
+compiled ASN.1 modules for different protocols and file formats
+could be found in the pyasn1-modules
+[repo](https://github.com/pyasn1/pyasn1-modules).
+
+How to get pyasn1
+-----------------
+
+The pyasn1 package is distributed under terms and conditions of 2-clause
+BSD [license](https://pyasn1.readthedocs.io/en/latest/license.html). Source code is freely
+available as a GitHub [repo](https://github.com/pyasn1/pyasn1).
+
+You could `pip install pyasn1` or download it from [PyPI](https://pypi.org/project/pyasn1).
+
+If something does not work as expected,
+[open an issue](https://github.com/epyasn1/pyasn1/issues) at GitHub or
+post your question [on Stack Overflow](https://stackoverflow.com/questions/ask)
+or try browsing pyasn1
+[mailing list archives](https://sourceforge.net/p/pyasn1/mailman/pyasn1-users/).
+
+Copyright (c) 2005-2020, [Ilya Etingof](mailto:etingof@gmail.com).
+All rights reserved.
diff --git a/contrib/python/pyasn1/py3/pyasn1/__init__.py b/contrib/python/pyasn1/py3/pyasn1/__init__.py
new file mode 100644
index 0000000000..a979d291f2
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/__init__.py
@@ -0,0 +1,2 @@
+# https://www.python.org/dev/peps/pep-0396/
+__version__ = '0.5.0'
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/__init__.py b/contrib/python/pyasn1/py3/pyasn1/codec/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/ber/__init__.py b/contrib/python/pyasn1/py3/pyasn1/codec/ber/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/ber/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/ber/decoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/ber/decoder.py
new file mode 100644
index 0000000000..070733fd28
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/ber/decoder.py
@@ -0,0 +1,2071 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import os
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.codec.streaming import asSeekableStream
+from pyasn1.codec.streaming import isEndOfStream
+from pyasn1.codec.streaming import peekIntoStream
+from pyasn1.codec.streaming import readFromStream
+from pyasn1.compat import _MISSING
+from pyasn1.compat.integer import from_bytes
+from pyasn1.compat.octets import oct2int, octs2ints, ints2octs, null
+from pyasn1.error import PyAsn1Error
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['StreamingDecoder', 'Decoder', 'decode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
+
+noValue = base.noValue
+
+SubstrateUnderrunError = error.SubstrateUnderrunError
+
+
+class AbstractPayloadDecoder(object):
+ protoComponent = None
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ """Decode value with fixed byte length.
+
+ The decoder is allowed to consume as many bytes as necessary.
+ """
+ raise error.PyAsn1Error('SingleItemDecoder not implemented for %s' % (tagSet,)) # TODO: Seems more like an NotImplementedError?
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ """Decode value with undefined length.
+
+ The decoder is allowed to consume as many bytes as necessary.
+ """
+ raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,)) # TODO: Seems more like an NotImplementedError?
+
+ @staticmethod
+ def _passAsn1Object(asn1Object, options):
+ if 'asn1Object' not in options:
+ options['asn1Object'] = asn1Object
+
+ return options
+
+
+class AbstractSimplePayloadDecoder(AbstractPayloadDecoder):
+ @staticmethod
+ def substrateCollector(asn1Object, substrate, length, options):
+ for chunk in readFromStream(substrate, length, options):
+ yield chunk
+
+ def _createComponent(self, asn1Spec, tagSet, value, **options):
+ if options.get('native'):
+ return value
+ elif asn1Spec is None:
+ return self.protoComponent.clone(value, tagSet=tagSet)
+ elif value is noValue:
+ return asn1Spec
+ else:
+ return asn1Spec.clone(value)
+
+
+class RawPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Any('')
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, '', **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ for value in decodeFun(substrate, asn1Spec, tagSet, length, **options):
+ yield value
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, '', **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ while True:
+ for value in decodeFun(
+ substrate, asn1Spec, tagSet, length,
+ allowEoo=True, **options):
+
+ if value is eoo.endOfOctets:
+ return
+
+ yield value
+
+
+rawPayloadDecoder = RawPayloadDecoder()
+
+
+class IntegerPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Integer(0)
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if chunk:
+ value = from_bytes(chunk, signed=True)
+
+ else:
+ value = 0
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+
+class BooleanPayloadDecoder(IntegerPayloadDecoder):
+ protoComponent = univ.Boolean(0)
+
+ def _createComponent(self, asn1Spec, tagSet, value, **options):
+ return IntegerPayloadDecoder._createComponent(
+ self, asn1Spec, tagSet, value and 1 or 0, **options)
+
+
+class BitStringPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.BitString(())
+ supportConstructedForm = True
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if not length:
+ raise error.PyAsn1Error('Empty BIT STRING substrate')
+
+ for chunk in isEndOfStream(substrate):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if chunk:
+ raise error.PyAsn1Error('Empty BIT STRING substrate')
+
+ if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
+
+ for trailingBits in readFromStream(substrate, 1, options):
+ if isinstance(trailingBits, SubstrateUnderrunError):
+ yield trailingBits
+
+ trailingBits = ord(trailingBits)
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ for chunk in readFromStream(substrate, length - 1, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ value = self.protoComponent.fromOctetString(
+ chunk, internalFormat=True, padding=trailingBits)
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+ return
+
+ if not self.supportConstructedForm:
+ raise error.PyAsn1Error('Constructed encoding form prohibited '
+ 'at %s' % self.__class__.__name__)
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
+
+ current_position = substrate.tell()
+
+ while substrate.tell() - current_position < length:
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ trailingBits = oct2int(component[0])
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ bitString = self.protoComponent.fromOctetString(
+ component[1:], internalFormat=True,
+ prepend=bitString, padding=trailingBits
+ )
+
+ yield self._createComponent(asn1Spec, tagSet, bitString, **options)
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
+
+ while True: # loop over fragments
+
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ allowEoo=True, **options):
+
+ if component is eoo.endOfOctets:
+ break
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ trailingBits = oct2int(component[0])
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ bitString = self.protoComponent.fromOctetString(
+ component[1:], internalFormat=True,
+ prepend=bitString, padding=trailingBits
+ )
+
+ yield self._createComponent(asn1Spec, tagSet, bitString, **options)
+
+
+class OctetStringPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.OctetString('')
+ supportConstructedForm = True
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ yield self._createComponent(asn1Spec, tagSet, chunk, **options)
+
+ return
+
+ if not self.supportConstructedForm:
+ raise error.PyAsn1Error('Constructed encoding form prohibited at %s' % self.__class__.__name__)
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ header = null
+
+ original_position = substrate.tell()
+ # head = popSubstream(substrate, length)
+ while substrate.tell() - original_position < length:
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ header += component
+
+ yield self._createComponent(asn1Spec, tagSet, header, **options)
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun and substrateFun is not self.substrateCollector:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ header = null
+
+ while True: # loop over fragments
+
+ for component in decodeFun(
+ substrate, self.protoComponent, substrateFun=substrateFun,
+ allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ header += component
+
+ yield self._createComponent(asn1Spec, tagSet, header, **options)
+
+
+class NullPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Null('')
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ component = self._createComponent(asn1Spec, tagSet, '', **options)
+
+ if chunk:
+ raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
+
+ yield component
+
+
+class ObjectIdentifierPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.ObjectIdentifier(())
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if not chunk:
+ raise error.PyAsn1Error('Empty substrate')
+
+ chunk = octs2ints(chunk)
+
+ oid = ()
+ index = 0
+ substrateLen = len(chunk)
+ while index < substrateLen:
+ subId = chunk[index]
+ index += 1
+ if subId < 128:
+ oid += (subId,)
+ elif subId > 128:
+ # Construct subid from a number of octets
+ nextSubId = subId
+ subId = 0
+ while nextSubId >= 128:
+ subId = (subId << 7) + (nextSubId & 0x7F)
+ if index >= substrateLen:
+ raise error.SubstrateUnderrunError(
+ 'Short substrate for sub-OID past %s' % (oid,)
+ )
+ nextSubId = chunk[index]
+ index += 1
+ oid += ((subId << 7) + nextSubId,)
+ elif subId == 128:
+ # ASN.1 spec forbids leading zeros (0x80) in OID
+ # encoding, tolerating it opens a vulnerability. See
+ # https://www.esat.kuleuven.be/cosic/publications/article-1432.pdf
+ # page 7
+ raise error.PyAsn1Error('Invalid octet 0x80 in OID encoding')
+
+ # Decode two leading arcs
+ if 0 <= oid[0] <= 39:
+ oid = (0,) + oid
+ elif 40 <= oid[0] <= 79:
+ oid = (1, oid[0] - 40) + oid[1:]
+ elif oid[0] >= 80:
+ oid = (2, oid[0] - 80) + oid[1:]
+ else:
+ raise error.PyAsn1Error('Malformed first OID octet: %s' % chunk[0])
+
+ yield self._createComponent(asn1Spec, tagSet, oid, **options)
+
+
+class RealPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Real()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if not chunk:
+ yield self._createComponent(asn1Spec, tagSet, 0.0, **options)
+ return
+
+ fo = oct2int(chunk[0])
+ chunk = chunk[1:]
+ if fo & 0x80: # binary encoding
+ if not chunk:
+ raise error.PyAsn1Error("Incomplete floating-point value")
+
+ if LOG:
+ LOG('decoding binary encoded REAL')
+
+ n = (fo & 0x03) + 1
+
+ if n == 4:
+ n = oct2int(chunk[0])
+ chunk = chunk[1:]
+
+ eo, chunk = chunk[:n], chunk[n:]
+
+ if not eo or not chunk:
+ raise error.PyAsn1Error('Real exponent screwed')
+
+ e = oct2int(eo[0]) & 0x80 and -1 or 0
+
+ while eo: # exponent
+ e <<= 8
+ e |= oct2int(eo[0])
+ eo = eo[1:]
+
+ b = fo >> 4 & 0x03 # base bits
+
+ if b > 2:
+ raise error.PyAsn1Error('Illegal Real base')
+
+ if b == 1: # encbase = 8
+ e *= 3
+
+ elif b == 2: # encbase = 16
+ e *= 4
+ p = 0
+
+ while chunk: # value
+ p <<= 8
+ p |= oct2int(chunk[0])
+ chunk = chunk[1:]
+
+ if fo & 0x40: # sign bit
+ p = -p
+
+ sf = fo >> 2 & 0x03 # scale bits
+ p *= 2 ** sf
+ value = (p, 2, e)
+
+ elif fo & 0x40: # infinite value
+ if LOG:
+ LOG('decoding infinite REAL')
+
+ value = fo & 0x01 and '-inf' or 'inf'
+
+ elif fo & 0xc0 == 0: # character encoding
+ if not chunk:
+ raise error.PyAsn1Error("Incomplete floating-point value")
+
+ if LOG:
+ LOG('decoding character encoded REAL')
+
+ try:
+ if fo & 0x3 == 0x1: # NR1
+ value = (int(chunk), 10, 0)
+
+ elif fo & 0x3 == 0x2: # NR2
+ value = float(chunk)
+
+ elif fo & 0x3 == 0x3: # NR3
+ value = float(chunk)
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'Unknown NR (tag %s)' % fo
+ )
+
+ except ValueError:
+ raise error.SubstrateUnderrunError(
+ 'Bad character Real syntax'
+ )
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'Unknown encoding (tag %s)' % fo
+ )
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+
+class AbstractConstructedPayloadDecoder(AbstractPayloadDecoder):
+ protoComponent = None
+
+
+class ConstructedPayloadDecoderBase(AbstractConstructedPayloadDecoder):
+ protoRecordComponent = None
+ protoSequenceComponent = None
+
+ def _getComponentTagMap(self, asn1Object, idx):
+ raise NotImplementedError()
+
+ def _getComponentPositionByType(self, asn1Object, tagSet, idx):
+ raise NotImplementedError()
+
+ def _decodeComponentsSchemaless(
+ self, substrate, tagSet=None, decodeFun=None,
+ length=None, **options):
+
+ asn1Object = None
+
+ components = []
+ componentTypes = set()
+
+ original_position = substrate.tell()
+
+ while length == -1 or substrate.tell() < original_position + length:
+ for component in decodeFun(substrate, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if length == -1 and component is eoo.endOfOctets:
+ break
+
+ components.append(component)
+ componentTypes.add(component.tagSet)
+
+ # Now we have to guess is it SEQUENCE/SET or SEQUENCE OF/SET OF
+ # The heuristics is:
+ # * 1+ components of different types -> likely SEQUENCE/SET
+ # * otherwise -> likely SEQUENCE OF/SET OF
+ if len(componentTypes) > 1:
+ protoComponent = self.protoRecordComponent
+
+ else:
+ protoComponent = self.protoSequenceComponent
+
+ asn1Object = protoComponent.clone(
+ # construct tagSet from base tag from prototype ASN.1 object
+ # and additional tags recovered from the substrate
+ tagSet=tag.TagSet(protoComponent.tagSet.baseTag, *tagSet.superTags)
+ )
+
+ if LOG:
+ LOG('guessed %r container type (pass `asn1Spec` to guide the '
+ 'decoder)' % asn1Object)
+
+ for idx, component in enumerate(components):
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ yield asn1Object
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatConstructed:
+ raise error.PyAsn1Error('Constructed tag format expected')
+
+ original_position = substrate.tell()
+
+ if substrateFun:
+ if asn1Spec is not None:
+ asn1Object = asn1Spec.clone()
+
+ elif self.protoComponent is not None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = self.protoRecordComponent, self.protoSequenceComponent
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if asn1Spec is None:
+ for asn1Object in self._decodeComponentsSchemaless(
+ substrate, tagSet=tagSet, decodeFun=decodeFun,
+ length=length, **options):
+ if isinstance(asn1Object, SubstrateUnderrunError):
+ yield asn1Object
+
+ if substrate.tell() < original_position + length:
+ if LOG:
+ for trailing in readFromStream(substrate, context=options):
+ if isinstance(trailing, SubstrateUnderrunError):
+ yield trailing
+
+ LOG('Unused trailing %d octets encountered: %s' % (
+ len(trailing), debug.hexdump(trailing)))
+
+ yield asn1Object
+
+ return
+
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
+
+ namedTypes = asn1Spec.componentType
+
+ isSetType = asn1Spec.typeId == univ.Set.typeId
+ isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
+
+ if LOG:
+ LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
+ not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
+ asn1Spec))
+
+ seenIndices = set()
+ idx = 0
+ while substrate.tell() - original_position < length:
+ if not namedTypes:
+ componentType = None
+
+ elif isSetType:
+ componentType = namedTypes.tagMapUnique
+
+ else:
+ try:
+ if isDeterministic:
+ componentType = namedTypes[idx].asn1Object
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ componentType = namedTypes.getTagMapNearPosition(idx)
+
+ else:
+ componentType = namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error(
+ 'Excessive components decoded at %r' % (asn1Spec,)
+ )
+
+ for component in decodeFun(substrate, componentType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if not isDeterministic and namedTypes:
+ if isSetType:
+ idx = namedTypes.getPositionByType(component.effectiveTagSet)
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ seenIndices.add(idx)
+ idx += 1
+
+ if LOG:
+ LOG('seen component indices %s' % seenIndices)
+
+ if namedTypes:
+ if not namedTypes.requiredComponents.issubset(seenIndices):
+ raise error.PyAsn1Error(
+ 'ASN.1 object %s has uninitialized '
+ 'components' % asn1Object.__class__.__name__)
+
+ if namedTypes.hasOpenTypes:
+
+ openTypes = options.get('openTypes', {})
+
+ if LOG:
+ LOG('user-specified open types map:')
+
+ for k, v in openTypes.items():
+ LOG('%s -> %r' % (k, v))
+
+ if openTypes or options.get('decodeOpenTypes', False):
+
+ for idx, namedType in enumerate(namedTypes.namedTypes):
+ if not namedType.openType:
+ continue
+
+ if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
+ continue
+
+ governingValue = asn1Object.getComponentByName(
+ namedType.openType.name
+ )
+
+ try:
+ openType = openTypes[governingValue]
+
+ except KeyError:
+
+ if LOG:
+ LOG('default open types map of component '
+ '"%s.%s" governed by component "%s.%s"'
+ ':' % (asn1Object.__class__.__name__,
+ namedType.name,
+ asn1Object.__class__.__name__,
+ namedType.openType.name))
+
+ for k, v in namedType.openType.items():
+ LOG('%s -> %r' % (k, v))
+
+ try:
+ openType = namedType.openType[governingValue]
+
+ except KeyError:
+ if LOG:
+ LOG('failed to resolve open type by governing '
+ 'value %r' % (governingValue,))
+ continue
+
+ if LOG:
+ LOG('resolved open type %r by governing '
+ 'value %r' % (openType, governingValue))
+
+ containerValue = asn1Object.getComponentByPosition(idx)
+
+ if containerValue.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ for pos, containerElement in enumerate(
+ containerValue):
+
+ stream = asSeekableStream(containerValue[pos].asOctets())
+
+ for component in decodeFun(stream, asn1Spec=openType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ containerValue[pos] = component
+
+ else:
+ stream = asSeekableStream(asn1Object.getComponentByPosition(idx).asOctets())
+
+ for component in decodeFun(stream, asn1Spec=openType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ asn1Object.setComponentByPosition(idx, component)
+
+ else:
+ inconsistency = asn1Object.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ componentType = asn1Spec.componentType
+
+ if LOG:
+ LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
+
+ idx = 0
+
+ while substrate.tell() - original_position < length:
+ for component in decodeFun(substrate, componentType, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ idx += 1
+
+ yield asn1Object
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatConstructed:
+ raise error.PyAsn1Error('Constructed tag format expected')
+
+ if substrateFun is not None:
+ if asn1Spec is not None:
+ asn1Object = asn1Spec.clone()
+
+ elif self.protoComponent is not None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = self.protoRecordComponent, self.protoSequenceComponent
+
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ if asn1Spec is None:
+ for asn1Object in self._decodeComponentsSchemaless(
+ substrate, tagSet=tagSet, decodeFun=decodeFun,
+ length=length, **dict(options, allowEoo=True)):
+ if isinstance(asn1Object, SubstrateUnderrunError):
+ yield asn1Object
+
+ yield asn1Object
+
+ return
+
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
+
+ namedTypes = asn1Object.componentType
+
+ isSetType = asn1Object.typeId == univ.Set.typeId
+ isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
+
+ if LOG:
+ LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
+ not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
+ asn1Spec))
+
+ seenIndices = set()
+
+ idx = 0
+
+ while True: # loop over components
+ if len(namedTypes) <= idx:
+ asn1Spec = None
+
+ elif isSetType:
+ asn1Spec = namedTypes.tagMapUnique
+
+ else:
+ try:
+ if isDeterministic:
+ asn1Spec = namedTypes[idx].asn1Object
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ asn1Spec = namedTypes.getTagMapNearPosition(idx)
+
+ else:
+ asn1Spec = namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error(
+ 'Excessive components decoded at %r' % (asn1Object,)
+ )
+
+ for component in decodeFun(substrate, asn1Spec, allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ if not isDeterministic and namedTypes:
+ if isSetType:
+ idx = namedTypes.getPositionByType(component.effectiveTagSet)
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ seenIndices.add(idx)
+ idx += 1
+
+ if LOG:
+ LOG('seen component indices %s' % seenIndices)
+
+ if namedTypes:
+ if not namedTypes.requiredComponents.issubset(seenIndices):
+ raise error.PyAsn1Error(
+ 'ASN.1 object %s has uninitialized '
+ 'components' % asn1Object.__class__.__name__)
+
+ if namedTypes.hasOpenTypes:
+
+ openTypes = options.get('openTypes', {})
+
+ if LOG:
+ LOG('user-specified open types map:')
+
+ for k, v in openTypes.items():
+ LOG('%s -> %r' % (k, v))
+
+ if openTypes or options.get('decodeOpenTypes', False):
+
+ for idx, namedType in enumerate(namedTypes.namedTypes):
+ if not namedType.openType:
+ continue
+
+ if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
+ continue
+
+ governingValue = asn1Object.getComponentByName(
+ namedType.openType.name
+ )
+
+ try:
+ openType = openTypes[governingValue]
+
+ except KeyError:
+
+ if LOG:
+ LOG('default open types map of component '
+ '"%s.%s" governed by component "%s.%s"'
+ ':' % (asn1Object.__class__.__name__,
+ namedType.name,
+ asn1Object.__class__.__name__,
+ namedType.openType.name))
+
+ for k, v in namedType.openType.items():
+ LOG('%s -> %r' % (k, v))
+
+ try:
+ openType = namedType.openType[governingValue]
+
+ except KeyError:
+ if LOG:
+ LOG('failed to resolve open type by governing '
+ 'value %r' % (governingValue,))
+ continue
+
+ if LOG:
+ LOG('resolved open type %r by governing '
+ 'value %r' % (openType, governingValue))
+
+ containerValue = asn1Object.getComponentByPosition(idx)
+
+ if containerValue.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ for pos, containerElement in enumerate(
+ containerValue):
+
+ stream = asSeekableStream(containerValue[pos].asOctets())
+
+ for component in decodeFun(stream, asn1Spec=openType,
+ **dict(options, allowEoo=True)):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ containerValue[pos] = component
+
+ else:
+ stream = asSeekableStream(asn1Object.getComponentByPosition(idx).asOctets())
+ for component in decodeFun(stream, asn1Spec=openType,
+ **dict(options, allowEoo=True)):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ asn1Object.setComponentByPosition(idx, component)
+
+ else:
+ inconsistency = asn1Object.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ componentType = asn1Spec.componentType
+
+ if LOG:
+ LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
+
+ idx = 0
+
+ while True:
+
+ for component in decodeFun(
+ substrate, componentType, allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ idx += 1
+
+ yield asn1Object
+
+
+class SequenceOrSequenceOfPayloadDecoder(ConstructedPayloadDecoderBase):
+ protoRecordComponent = univ.Sequence()
+ protoSequenceComponent = univ.SequenceOf()
+
+
+class SequencePayloadDecoder(SequenceOrSequenceOfPayloadDecoder):
+ protoComponent = univ.Sequence()
+
+
+class SequenceOfPayloadDecoder(SequenceOrSequenceOfPayloadDecoder):
+ protoComponent = univ.SequenceOf()
+
+
+class SetOrSetOfPayloadDecoder(ConstructedPayloadDecoderBase):
+ protoRecordComponent = univ.Set()
+ protoSequenceComponent = univ.SetOf()
+
+
+class SetPayloadDecoder(SetOrSetOfPayloadDecoder):
+ protoComponent = univ.Set()
+
+
+class SetOfPayloadDecoder(SetOrSetOfPayloadDecoder):
+ protoComponent = univ.SetOf()
+
+
+class ChoicePayloadDecoder(ConstructedPayloadDecoderBase):
+ protoComponent = univ.Choice()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = asn1Spec.clone()
+
+ if substrateFun:
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ if asn1Object.tagSet == tagSet:
+ if LOG:
+ LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,))
+
+ for component in decodeFun(
+ substrate, asn1Object.componentTagMap, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ else:
+ if LOG:
+ LOG('decoding %s as untagged CHOICE' % (tagSet,))
+
+ for component in decodeFun(
+ substrate, asn1Object.componentTagMap, tagSet, length,
+ state, **options):
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ effectiveTagSet = component.effectiveTagSet
+
+ if LOG:
+ LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet))
+
+ asn1Object.setComponentByType(
+ effectiveTagSet, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False,
+ innerFlag=False
+ )
+
+ yield asn1Object
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = asn1Spec.clone()
+
+ if substrateFun:
+ for chunk in substrateFun(asn1Object, substrate, length, options):
+ yield chunk
+
+ return
+
+ options = self._passAsn1Object(asn1Object, options)
+
+ isTagged = asn1Object.tagSet == tagSet
+
+ if LOG:
+ LOG('decoding %s as %stagged CHOICE' % (
+ tagSet, isTagged and 'explicitly ' or 'un'))
+
+ while True:
+
+ if isTagged:
+ iterator = decodeFun(
+ substrate, asn1Object.componentType.tagMapUnique,
+ **dict(options, allowEoo=True))
+
+ else:
+ iterator = decodeFun(
+ substrate, asn1Object.componentType.tagMapUnique,
+ tagSet, length, state, **dict(options, allowEoo=True))
+
+ for component in iterator:
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ effectiveTagSet = component.effectiveTagSet
+
+ if LOG:
+ LOG('decoded component %s, effective tag set '
+ '%s' % (component, effectiveTagSet))
+
+ asn1Object.setComponentByType(
+ effectiveTagSet, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False,
+ innerFlag=False
+ )
+
+ if not isTagged:
+ break
+
+ if not isTagged or component is eoo.endOfOctets:
+ break
+
+ yield asn1Object
+
+
+class AnyPayloadDecoder(AbstractSimplePayloadDecoder):
+ protoComponent = univ.Any()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ isUntagged = True
+
+ elif asn1Spec.__class__ is tagmap.TagMap:
+ isUntagged = tagSet not in asn1Spec.tagMap
+
+ else:
+ isUntagged = tagSet != asn1Spec.tagSet
+
+ if isUntagged:
+ fullPosition = substrate.markedPosition
+ currentPosition = substrate.tell()
+
+ substrate.seek(fullPosition, os.SEEK_SET)
+ length += currentPosition - fullPosition
+
+ if LOG:
+ for chunk in peekIntoStream(substrate, length):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+ LOG('decoding as untagged ANY, substrate '
+ '%s' % debug.hexdump(chunk))
+
+ if substrateFun:
+ for chunk in substrateFun(
+ self._createComponent(asn1Spec, tagSet, noValue, **options),
+ substrate, length, options):
+ yield chunk
+
+ return
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ yield self._createComponent(asn1Spec, tagSet, chunk, **options)
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ isTagged = False
+
+ elif asn1Spec.__class__ is tagmap.TagMap:
+ isTagged = tagSet in asn1Spec.tagMap
+
+ else:
+ isTagged = tagSet == asn1Spec.tagSet
+
+ if isTagged:
+ # tagged Any type -- consume header substrate
+ chunk = null
+
+ if LOG:
+ LOG('decoding as tagged ANY')
+
+ else:
+ # TODO: Seems not to be tested
+ fullPosition = substrate.markedPosition
+ currentPosition = substrate.tell()
+
+ substrate.seek(fullPosition, os.SEEK_SET)
+ for chunk in readFromStream(substrate, currentPosition - fullPosition, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ if LOG:
+ LOG('decoding as untagged ANY, header substrate %s' % debug.hexdump(chunk))
+
+ # Any components do not inherit initial tag
+ asn1Spec = self.protoComponent
+
+ if substrateFun and substrateFun is not self.substrateCollector:
+ asn1Object = self._createComponent(
+ asn1Spec, tagSet, noValue, **options)
+
+ for chunk in substrateFun(
+ asn1Object, chunk + substrate, length + len(chunk), options):
+ yield chunk
+
+ return
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ while True: # loop over fragments
+
+ for component in decodeFun(
+ substrate, asn1Spec, substrateFun=substrateFun,
+ allowEoo=True, **options):
+
+ if isinstance(component, SubstrateUnderrunError):
+ yield component
+
+ if component is eoo.endOfOctets:
+ break
+
+ if component is eoo.endOfOctets:
+ break
+
+ chunk += component
+
+ if substrateFun:
+ yield chunk # TODO: Weird
+
+ else:
+ yield self._createComponent(asn1Spec, tagSet, chunk, **options)
+
+
+# character string types
+class UTF8StringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.UTF8String()
+
+
+class NumericStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.NumericString()
+
+
+class PrintableStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.PrintableString()
+
+
+class TeletexStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.TeletexString()
+
+
+class VideotexStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.VideotexString()
+
+
+class IA5StringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.IA5String()
+
+
+class GraphicStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.GraphicString()
+
+
+class VisibleStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.VisibleString()
+
+
+class GeneralStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.GeneralString()
+
+
+class UniversalStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.UniversalString()
+
+
+class BMPStringPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = char.BMPString()
+
+
+# "useful" types
+class ObjectDescriptorPayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = useful.ObjectDescriptor()
+
+
+class GeneralizedTimePayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = useful.GeneralizedTime()
+
+
+class UTCTimePayloadDecoder(OctetStringPayloadDecoder):
+ protoComponent = useful.UTCTime()
+
+
+TAG_MAP = {
+ univ.Integer.tagSet: IntegerPayloadDecoder(),
+ univ.Boolean.tagSet: BooleanPayloadDecoder(),
+ univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: OctetStringPayloadDecoder(),
+ univ.Null.tagSet: NullPayloadDecoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierPayloadDecoder(),
+ univ.Enumerated.tagSet: IntegerPayloadDecoder(),
+ univ.Real.tagSet: RealPayloadDecoder(),
+ univ.Sequence.tagSet: SequenceOrSequenceOfPayloadDecoder(), # conflicts with SequenceOf
+ univ.Set.tagSet: SetOrSetOfPayloadDecoder(), # conflicts with SetOf
+ univ.Choice.tagSet: ChoicePayloadDecoder(), # conflicts with Any
+ # character string types
+ char.UTF8String.tagSet: UTF8StringPayloadDecoder(),
+ char.NumericString.tagSet: NumericStringPayloadDecoder(),
+ char.PrintableString.tagSet: PrintableStringPayloadDecoder(),
+ char.TeletexString.tagSet: TeletexStringPayloadDecoder(),
+ char.VideotexString.tagSet: VideotexStringPayloadDecoder(),
+ char.IA5String.tagSet: IA5StringPayloadDecoder(),
+ char.GraphicString.tagSet: GraphicStringPayloadDecoder(),
+ char.VisibleString.tagSet: VisibleStringPayloadDecoder(),
+ char.GeneralString.tagSet: GeneralStringPayloadDecoder(),
+ char.UniversalString.tagSet: UniversalStringPayloadDecoder(),
+ char.BMPString.tagSet: BMPStringPayloadDecoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: ObjectDescriptorPayloadDecoder(),
+ useful.GeneralizedTime.tagSet: GeneralizedTimePayloadDecoder(),
+ useful.UTCTime.tagSet: UTCTimePayloadDecoder()
+}
+
+# Type-to-codec map for ambiguous ASN.1 types
+TYPE_MAP = {
+ univ.Set.typeId: SetPayloadDecoder(),
+ univ.SetOf.typeId: SetOfPayloadDecoder(),
+ univ.Sequence.typeId: SequencePayloadDecoder(),
+ univ.SequenceOf.typeId: SequenceOfPayloadDecoder(),
+ univ.Choice.typeId: ChoicePayloadDecoder(),
+ univ.Any.typeId: AnyPayloadDecoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in TAG_MAP.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in TYPE_MAP:
+ TYPE_MAP[typeId] = typeDecoder
+
+
+(stDecodeTag,
+ stDecodeLength,
+ stGetValueDecoder,
+ stGetValueDecoderByAsn1Spec,
+ stGetValueDecoderByTag,
+ stTryAsExplicitTag,
+ stDecodeValue,
+ stDumpRawValue,
+ stErrorCondition,
+ stStop) = [x for x in range(10)]
+
+
+EOO_SENTINEL = ints2octs((0, 0))
+
+
+class SingleItemDecoder(object):
+ defaultErrorState = stErrorCondition
+ #defaultErrorState = stDumpRawValue
+ defaultRawDecoder = AnyPayloadDecoder()
+
+ supportIndefLength = True
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ # Tag & TagSet objects caches
+ self._tagCache = {}
+ self._tagSetCache = {}
+
+ def __call__(self, substrate, asn1Spec=None,
+ tagSet=None, length=None, state=stDecodeTag,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ allowEoo = options.pop('allowEoo', False)
+
+ if LOG:
+ LOG('decoder called at scope %s with state %d, working with up '
+ 'to %s octets of substrate: '
+ '%s' % (debug.scope, state, length, substrate))
+
+ # Look for end-of-octets sentinel
+ if allowEoo and self.supportIndefLength:
+
+ for eoo_candidate in readFromStream(substrate, 2, options):
+ if isinstance(eoo_candidate, SubstrateUnderrunError):
+ yield eoo_candidate
+
+ if eoo_candidate == EOO_SENTINEL:
+ if LOG:
+ LOG('end-of-octets sentinel found')
+ yield eoo.endOfOctets
+ return
+
+ else:
+ substrate.seek(-2, os.SEEK_CUR)
+
+ tagMap = self._tagMap
+ typeMap = self._typeMap
+ tagCache = self._tagCache
+ tagSetCache = self._tagSetCache
+
+ value = noValue
+
+ substrate.markedPosition = substrate.tell()
+
+ while state is not stStop:
+
+ if state is stDecodeTag:
+ # Decode tag
+ isShortTag = True
+
+ for firstByte in readFromStream(substrate, 1, options):
+ if isinstance(firstByte, SubstrateUnderrunError):
+ yield firstByte
+
+ firstOctet = ord(firstByte)
+
+ try:
+ lastTag = tagCache[firstOctet]
+
+ except KeyError:
+ integerTag = firstOctet
+ tagClass = integerTag & 0xC0
+ tagFormat = integerTag & 0x20
+ tagId = integerTag & 0x1F
+
+ if tagId == 0x1F:
+ isShortTag = False
+ lengthOctetIdx = 0
+ tagId = 0
+
+ while True:
+ for integerByte in readFromStream(substrate, 1, options):
+ if isinstance(integerByte, SubstrateUnderrunError):
+ yield integerByte
+
+ if not integerByte:
+ raise error.SubstrateUnderrunError(
+ 'Short octet stream on long tag decoding'
+ )
+
+ integerTag = ord(integerByte)
+ lengthOctetIdx += 1
+ tagId <<= 7
+ tagId |= (integerTag & 0x7F)
+
+ if not integerTag & 0x80:
+ break
+
+ lastTag = tag.Tag(
+ tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
+ )
+
+ if isShortTag:
+ # cache short tags
+ tagCache[firstOctet] = lastTag
+
+ if tagSet is None:
+ if isShortTag:
+ try:
+ tagSet = tagSetCache[firstOctet]
+
+ except KeyError:
+ # base tag not recovered
+ tagSet = tag.TagSet((), lastTag)
+ tagSetCache[firstOctet] = tagSet
+ else:
+ tagSet = tag.TagSet((), lastTag)
+
+ else:
+ tagSet = lastTag + tagSet
+
+ state = stDecodeLength
+
+ if LOG:
+ LOG('tag decoded into %s, decoding length' % tagSet)
+
+ if state is stDecodeLength:
+ # Decode length
+ for firstOctet in readFromStream(substrate, 1, options):
+ if isinstance(firstOctet, SubstrateUnderrunError):
+ yield firstOctet
+
+ firstOctet = ord(firstOctet)
+
+ if firstOctet < 128:
+ length = firstOctet
+
+ elif firstOctet > 128:
+ size = firstOctet & 0x7F
+ # encoded in size bytes
+ for encodedLength in readFromStream(substrate, size, options):
+ if isinstance(encodedLength, SubstrateUnderrunError):
+ yield encodedLength
+ encodedLength = list(encodedLength)
+ # missing check on maximum size, which shouldn't be a
+ # problem, we can handle more than is possible
+ if len(encodedLength) != size:
+ raise error.SubstrateUnderrunError(
+ '%s<%s at %s' % (size, len(encodedLength), tagSet)
+ )
+
+ length = 0
+ for lengthOctet in encodedLength:
+ length <<= 8
+ length |= oct2int(lengthOctet)
+ size += 1
+
+ else: # 128 means indefinite
+ length = -1
+
+ if length == -1 and not self.supportIndefLength:
+ raise error.PyAsn1Error('Indefinite length encoding not supported by this codec')
+
+ state = stGetValueDecoder
+
+ if LOG:
+ LOG('value length decoded into %d' % length)
+
+ if state is stGetValueDecoder:
+ if asn1Spec is None:
+ state = stGetValueDecoderByTag
+
+ else:
+ state = stGetValueDecoderByAsn1Spec
+ #
+ # There're two ways of creating subtypes in ASN.1 what influences
+ # decoder operation. These methods are:
+ # 1) Either base types used in or no IMPLICIT tagging has been
+ # applied on subtyping.
+ # 2) Subtype syntax drops base type information (by means of
+ # IMPLICIT tagging.
+ # The first case allows for complete tag recovery from substrate
+ # while the second one requires original ASN.1 type spec for
+ # decoding.
+ #
+ # In either case a set of tags (tagSet) is coming from substrate
+ # in an incremental, tag-by-tag fashion (this is the case of
+ # EXPLICIT tag which is most basic). Outermost tag comes first
+ # from the wire.
+ #
+ if state is stGetValueDecoderByTag:
+ try:
+ concreteDecoder = tagMap[tagSet]
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ state = stDecodeValue
+
+ else:
+ try:
+ concreteDecoder = tagMap[tagSet[:1]]
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ state = stDecodeValue
+ else:
+ state = stTryAsExplicitTag
+
+ if LOG:
+ LOG('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
+ debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__)
+
+ if state is stGetValueDecoderByAsn1Spec:
+
+ if asn1Spec.__class__ is tagmap.TagMap:
+ try:
+ chosenSpec = asn1Spec[tagSet]
+
+ except KeyError:
+ chosenSpec = None
+
+ if LOG:
+ LOG('candidate ASN.1 spec is a map of:')
+
+ for firstOctet, v in asn1Spec.presentTypes.items():
+ LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
+
+ if asn1Spec.skipTypes:
+ LOG('but neither of: ')
+ for firstOctet, v in asn1Spec.skipTypes.items():
+ LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
+ LOG('new candidate ASN.1 spec is %s, chosen by %s' % (chosenSpec is None and '<none>' or chosenSpec.prettyPrintType(), tagSet))
+
+ elif tagSet == asn1Spec.tagSet or tagSet in asn1Spec.tagMap:
+ chosenSpec = asn1Spec
+ if LOG:
+ LOG('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__)
+
+ else:
+ chosenSpec = None
+
+ if chosenSpec is not None:
+ try:
+ # ambiguous type or just faster codec lookup
+ concreteDecoder = typeMap[chosenSpec.typeId]
+
+ if LOG:
+ LOG('value decoder chosen for an ambiguous type by type ID %s' % (chosenSpec.typeId,))
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(chosenSpec.tagSet.baseTag, chosenSpec.tagSet.baseTag)
+ try:
+ # base type or tagged subtype
+ concreteDecoder = tagMap[baseTagSet]
+
+ if LOG:
+ LOG('value decoder chosen by base %s' % (baseTagSet,))
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ asn1Spec = chosenSpec
+ state = stDecodeValue
+
+ else:
+ state = stTryAsExplicitTag
+
+ else:
+ concreteDecoder = None
+ state = stTryAsExplicitTag
+
+ if LOG:
+ LOG('codec %s chosen by ASN.1 spec, decoding %s' % (state is stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
+ debug.scope.push(chosenSpec is None and '?' or chosenSpec.__class__.__name__)
+
+ if state is stDecodeValue:
+ if not options.get('recursiveFlag', True) and not substrateFun: # deprecate this
+ substrateFun = lambda a, b, c: (a, b[:c])
+
+ original_position = substrate.tell()
+
+ if length == -1: # indef length
+ for value in concreteDecoder.indefLenValueDecoder(
+ substrate, asn1Spec,
+ tagSet, length, stGetValueDecoder,
+ self, substrateFun, **options):
+ if isinstance(value, SubstrateUnderrunError):
+ yield value
+
+ else:
+ for value in concreteDecoder.valueDecoder(
+ substrate, asn1Spec,
+ tagSet, length, stGetValueDecoder,
+ self, substrateFun, **options):
+ if isinstance(value, SubstrateUnderrunError):
+ yield value
+
+ bytesRead = substrate.tell() - original_position
+ if bytesRead != length:
+ raise PyAsn1Error(
+ "Read %s bytes instead of expected %s." % (bytesRead, length))
+
+ if LOG:
+ LOG('codec %s yields type %s, value:\n%s\n...' % (
+ concreteDecoder.__class__.__name__, value.__class__.__name__,
+ isinstance(value, base.Asn1Item) and value.prettyPrint() or value))
+
+ state = stStop
+ break
+
+ if state is stTryAsExplicitTag:
+ if (tagSet and
+ tagSet[0].tagFormat == tag.tagFormatConstructed and
+ tagSet[0].tagClass != tag.tagClassUniversal):
+ # Assume explicit tagging
+ concreteDecoder = rawPayloadDecoder
+ state = stDecodeValue
+
+ else:
+ concreteDecoder = None
+ state = self.defaultErrorState
+
+ if LOG:
+ LOG('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as failure'))
+
+ if state is stDumpRawValue:
+ concreteDecoder = self.defaultRawDecoder
+
+ if LOG:
+ LOG('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
+
+ state = stDecodeValue
+
+ if state is stErrorCondition:
+ raise error.PyAsn1Error(
+ '%s not in asn1Spec: %r' % (tagSet, asn1Spec)
+ )
+
+ if LOG:
+ debug.scope.pop()
+ LOG('decoder left scope %s, call completed' % debug.scope)
+
+ yield value
+
+
+class StreamingDecoder(object):
+ """Create an iterator that turns BER/CER/DER byte stream into ASN.1 objects.
+
+ On each iteration, consume whatever BER/CER/DER serialization is
+ available in the `substrate` stream-like object and turns it into
+ one or more, possibly nested, ASN.1 objects.
+
+ Parameters
+ ----------
+ substrate: :py:class:`file`, :py:class:`io.BytesIO`
+ BER/CER/DER serialization in form of a byte stream
+
+ Keyword Args
+ ------------
+ asn1Spec: :py:class:`~pyasn1.type.base.PyAsn1Item`
+ A pyasn1 type object to act as a template guiding the decoder.
+ Depending on the ASN.1 structure being decoded, `asn1Spec` may
+ or may not be required. One of the reasons why `asn1Spec` may
+ me required is that ASN.1 structure is encoded in the *IMPLICIT*
+ tagging mode.
+
+ Yields
+ ------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`, :py:class:`~pyasn1.error.SubstrateUnderrunError`
+ Decoded ASN.1 object (possibly, nested) or
+ :py:class:`~pyasn1.error.SubstrateUnderrunError` object indicating
+ insufficient BER/CER/DER serialization on input to fully recover ASN.1
+ objects from it.
+
+ In the latter case the caller is advised to ensure some more data in
+ the input stream, then call the iterator again. The decoder will resume
+ the decoding process using the newly arrived data.
+
+ The `context` property of :py:class:`~pyasn1.error.SubstrateUnderrunError`
+ object might hold a reference to the partially populated ASN.1 object
+ being reconstructed.
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error, ~pyasn1.error.EndOfStreamError
+ `PyAsn1Error` on deserialization error, `EndOfStreamError` on
+ premature stream closure.
+
+ Examples
+ --------
+ Decode BER serialisation without ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> stream = io.BytesIO(
+ ... b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+ >>>
+ >>> for asn1Object in StreamingDecoder(stream):
+ ... print(asn1Object)
+ >>>
+ SequenceOf:
+ 1 2 3
+
+ Decode BER serialisation with ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> stream = io.BytesIO(
+ ... b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+ >>>
+ >>> schema = SequenceOf(componentType=Integer())
+ >>>
+ >>> decoder = StreamingDecoder(stream, asn1Spec=schema)
+ >>> for asn1Object in decoder:
+ ... print(asn1Object)
+ >>>
+ SequenceOf:
+ 1 2 3
+ """
+
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+ def __init__(self, substrate, asn1Spec=None, **options):
+ self._singleItemDecoder = self.SINGLE_ITEM_DECODER(**options)
+ self._substrate = asSeekableStream(substrate)
+ self._asn1Spec = asn1Spec
+ self._options = options
+
+ def __iter__(self):
+ while True:
+ for asn1Object in self._singleItemDecoder(
+ self._substrate, self._asn1Spec, **self._options):
+ yield asn1Object
+
+ for chunk in isEndOfStream(self._substrate):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield
+
+ break
+
+ if chunk:
+ break
+
+
+class Decoder(object):
+ """Create a BER decoder object.
+
+ Parse BER/CER/DER octet-stream into one, possibly nested, ASN.1 object.
+ """
+ STREAMING_DECODER = StreamingDecoder
+
+ @classmethod
+ def __call__(cls, substrate, asn1Spec=None, **options):
+ """Turns BER/CER/DER octet stream into an ASN.1 object.
+
+ Takes BER/CER/DER octet-stream in form of :py:class:`bytes` (Python 3)
+ or :py:class:`str` (Python 2) and decode it into an ASN.1 object
+ (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+ may be a scalar or an arbitrary nested structure.
+
+ Parameters
+ ----------
+ substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+ BER/CER/DER octet-stream to parse
+
+ Keyword Args
+ ------------
+ asn1Spec: :py:class:`~pyasn1.type.base.PyAsn1Item`
+ A pyasn1 type object (:py:class:`~pyasn1.type.base.PyAsn1Item`
+ derivative) to act as a template guiding the decoder.
+ Depending on the ASN.1 structure being decoded, `asn1Spec` may or
+ may not be required. Most common reason for it to require is that
+ ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+
+ Returns
+ -------
+ : :py:class:`tuple`
+ A tuple of :py:class:`~pyasn1.type.base.PyAsn1Item` object
+ recovered from BER/CER/DER substrate and the unprocessed trailing
+ portion of the `substrate` (may be empty)
+
+ Raises
+ ------
+ : :py:class:`~pyasn1.error.PyAsn1Error`
+ :py:class:`~pyasn1.error.SubstrateUnderrunError` on insufficient
+ input or :py:class:`~pyasn1.error.PyAsn1Error` on decoding error.
+
+ Examples
+ --------
+ Decode BER/CER/DER serialisation without ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> s, unprocessed = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+ >>> str(s)
+ SequenceOf:
+ 1 2 3
+
+ Decode BER/CER/DER serialisation with ASN.1 schema
+
+ .. code-block:: pycon
+
+ >>> seq = SequenceOf(componentType=Integer())
+ >>> s, unprocessed = decode(
+ b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+ >>> str(s)
+ SequenceOf:
+ 1 2 3
+
+ """
+ substrate = asSeekableStream(substrate)
+
+ streamingDecoder = cls.STREAMING_DECODER(
+ substrate, asn1Spec, **options)
+
+ for asn1Object in streamingDecoder:
+ if isinstance(asn1Object, SubstrateUnderrunError):
+ raise error.SubstrateUnderrunError('Short substrate on input')
+
+ try:
+ tail = next(readFromStream(substrate))
+
+ except error.EndOfStreamError:
+ tail = null
+
+ return asn1Object, tail
+
+
+#: Turns BER octet stream into an ASN.1 object.
+#:
+#: Takes BER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: BER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from BER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Notes
+#: -----
+#: This function is deprecated. Please use :py:class:`Decoder` or
+#: :py:class:`StreamingDecoder` class instance.
+#:
+#: Examples
+#: --------
+#: Decode BER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode BER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/ber/encoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/ber/encoder.py
new file mode 100644
index 0000000000..c59b43e455
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/ber/encoder.py
@@ -0,0 +1,917 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.compat import _MISSING
+from pyasn1.compat.integer import to_bytes
+from pyasn1.compat.octets import (int2oct, oct2int, ints2octs, null,
+ str2octs, isOctetsType)
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['Encoder', 'encode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER)
+
+
+class AbstractItemEncoder(object):
+ supportIndefLenMode = True
+
+ # An outcome of otherwise legit call `encodeFun(eoo.endOfOctets)`
+ eooIntegerSubstrate = (0, 0)
+ eooOctetsSubstrate = ints2octs(eooIntegerSubstrate)
+
+ # noinspection PyMethodMayBeStatic
+ def encodeTag(self, singleTag, isConstructed):
+ tagClass, tagFormat, tagId = singleTag
+ encodedTag = tagClass | tagFormat
+ if isConstructed:
+ encodedTag |= tag.tagFormatConstructed
+
+ if tagId < 31:
+ return encodedTag | tagId,
+
+ else:
+ substrate = tagId & 0x7f,
+
+ tagId >>= 7
+
+ while tagId:
+ substrate = (0x80 | (tagId & 0x7f),) + substrate
+ tagId >>= 7
+
+ return (encodedTag | 0x1F,) + substrate
+
+ def encodeLength(self, length, defMode):
+ if not defMode and self.supportIndefLenMode:
+ return (0x80,)
+
+ if length < 0x80:
+ return length,
+
+ else:
+ substrate = ()
+ while length:
+ substrate = (length & 0xff,) + substrate
+ length >>= 8
+
+ substrateLen = len(substrate)
+
+ if substrateLen > 126:
+ raise error.PyAsn1Error('Length octets overflow (%d)' % substrateLen)
+
+ return (0x80 | substrateLen,) + substrate
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ raise error.PyAsn1Error('Not implemented')
+
+ def encode(self, value, asn1Spec=None, encodeFun=None, **options):
+
+ if asn1Spec is None:
+ tagSet = value.tagSet
+ else:
+ tagSet = asn1Spec.tagSet
+
+ # untagged item?
+ if not tagSet:
+ substrate, isConstructed, isOctets = self.encodeValue(
+ value, asn1Spec, encodeFun, **options
+ )
+ return substrate
+
+ defMode = options.get('defMode', True)
+
+ substrate = null
+
+ for idx, singleTag in enumerate(tagSet.superTags):
+
+ defModeOverride = defMode
+
+ # base tag?
+ if not idx:
+ try:
+ substrate, isConstructed, isOctets = self.encodeValue(
+ value, asn1Spec, encodeFun, **options
+ )
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()
+ raise error.PyAsn1Error(
+ 'Error encoding %r: %s' % (value, exc[1]))
+
+ if LOG:
+ LOG('encoded %svalue %s into %s' % (
+ isConstructed and 'constructed ' or '', value, substrate
+ ))
+
+ if not substrate and isConstructed and options.get('ifNotEmpty', False):
+ return substrate
+
+ if not isConstructed:
+ defModeOverride = True
+
+ if LOG:
+ LOG('overridden encoding mode into definitive for primitive type')
+
+ header = self.encodeTag(singleTag, isConstructed)
+
+ if LOG:
+ LOG('encoded %stag %s into %s' % (
+ isConstructed and 'constructed ' or '',
+ singleTag, debug.hexdump(ints2octs(header))))
+
+ header += self.encodeLength(len(substrate), defModeOverride)
+
+ if LOG:
+ LOG('encoded %s octets (tag + payload) into %s' % (
+ len(substrate), debug.hexdump(ints2octs(header))))
+
+ if isOctets:
+ substrate = ints2octs(header) + substrate
+
+ if not defModeOverride:
+ substrate += self.eooOctetsSubstrate
+
+ else:
+ substrate = header + substrate
+
+ if not defModeOverride:
+ substrate += self.eooIntegerSubstrate
+
+ if not isOctets:
+ substrate = ints2octs(substrate)
+
+ return substrate
+
+
+class EndOfOctetsEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return null, False, True
+
+
+class BooleanEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return value and (1,) or (0,), False, False
+
+
+class IntegerEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+ supportCompactZero = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if value == 0:
+ if LOG:
+ LOG('encoding %spayload for zero INTEGER' % (
+ self.supportCompactZero and 'no ' or ''
+ ))
+
+ # de-facto way to encode zero
+ if self.supportCompactZero:
+ return (), False, False
+ else:
+ return (0,), False, False
+
+ return to_bytes(int(value), signed=True), False, True
+
+
+class BitStringEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ # TODO: try to avoid ASN.1 schema instantiation
+ value = asn1Spec.clone(value)
+
+ valueLength = len(value)
+ if valueLength % 8:
+ alignedValue = value << (8 - valueLength % 8)
+ else:
+ alignedValue = value
+
+ maxChunkSize = options.get('maxChunkSize', 0)
+ if not maxChunkSize or len(alignedValue) <= maxChunkSize * 8:
+ substrate = alignedValue.asOctets()
+ return int2oct(len(substrate) * 8 - valueLength) + substrate, False, True
+
+ if LOG:
+ LOG('encoding into up to %s-octet chunks' % maxChunkSize)
+
+ baseTag = value.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ alignedValue = alignedValue.clone(tagSet=tagSet)
+
+ stop = 0
+ substrate = null
+ while stop < valueLength:
+ start = stop
+ stop = min(start + maxChunkSize * 8, valueLength)
+ substrate += encodeFun(alignedValue[start:stop], asn1Spec, **options)
+
+ return substrate, True, True
+
+
+class OctetStringEncoder(AbstractItemEncoder):
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ if asn1Spec is None:
+ substrate = value.asOctets()
+
+ elif not isOctetsType(value):
+ substrate = asn1Spec.clone(value).asOctets()
+
+ else:
+ substrate = value
+
+ maxChunkSize = options.get('maxChunkSize', 0)
+
+ if not maxChunkSize or len(substrate) <= maxChunkSize:
+ return substrate, False, True
+
+ if LOG:
+ LOG('encoding into up to %s-octet chunks' % maxChunkSize)
+
+ # strip off explicit tags for inner chunks
+
+ if asn1Spec is None:
+ baseTag = value.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ asn1Spec = value.clone(tagSet=tagSet)
+
+ elif not isOctetsType(value):
+ baseTag = asn1Spec.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ asn1Spec = asn1Spec.clone(tagSet=tagSet)
+
+ pos = 0
+ substrate = null
+
+ while True:
+ chunk = value[pos:pos + maxChunkSize]
+ if not chunk:
+ break
+
+ substrate += encodeFun(chunk, asn1Spec, **options)
+ pos += maxChunkSize
+
+ return substrate, True, True
+
+
+class NullEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return null, False, True
+
+
+class ObjectIdentifierEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ oid = value.asTuple()
+
+ # Build the first pair
+ try:
+ first = oid[0]
+ second = oid[1]
+
+ except IndexError:
+ raise error.PyAsn1Error('Short OID %s' % (value,))
+
+ if 0 <= second <= 39:
+ if first == 1:
+ oid = (second + 40,) + oid[2:]
+ elif first == 0:
+ oid = (second,) + oid[2:]
+ elif first == 2:
+ oid = (second + 80,) + oid[2:]
+ else:
+ raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
+
+ elif first == 2:
+ oid = (second + 80,) + oid[2:]
+
+ else:
+ raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
+
+ octets = ()
+
+ # Cycle through subIds
+ for subOid in oid:
+ if 0 <= subOid <= 127:
+ # Optimize for the common case
+ octets += (subOid,)
+
+ elif subOid > 127:
+ # Pack large Sub-Object IDs
+ res = (subOid & 0x7f,)
+ subOid >>= 7
+
+ while subOid:
+ res = (0x80 | (subOid & 0x7f),) + res
+ subOid >>= 7
+
+ # Add packed Sub-Object ID to resulted Object ID
+ octets += res
+
+ else:
+ raise error.PyAsn1Error('Negative OID arc %s at %s' % (subOid, value))
+
+ return octets, False, False
+
+
+class RealEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+ binEncBase = 2 # set to None to choose encoding base automatically
+
+ @staticmethod
+ def _dropFloatingPoint(m, encbase, e):
+ ms, es = 1, 1
+ if m < 0:
+ ms = -1 # mantissa sign
+
+ if e < 0:
+ es = -1 # exponent sign
+
+ m *= ms
+
+ if encbase == 8:
+ m *= 2 ** (abs(e) % 3 * es)
+ e = abs(e) // 3 * es
+
+ elif encbase == 16:
+ m *= 2 ** (abs(e) % 4 * es)
+ e = abs(e) // 4 * es
+
+ while True:
+ if int(m) != m:
+ m *= encbase
+ e -= 1
+ continue
+ break
+
+ return ms, int(m), encbase, e
+
+ def _chooseEncBase(self, value):
+ m, b, e = value
+ encBase = [2, 8, 16]
+ if value.binEncBase in encBase:
+ return self._dropFloatingPoint(m, value.binEncBase, e)
+
+ elif self.binEncBase in encBase:
+ return self._dropFloatingPoint(m, self.binEncBase, e)
+
+ # auto choosing base 2/8/16
+ mantissa = [m, m, m]
+ exponent = [e, e, e]
+ sign = 1
+ encbase = 2
+ e = float('inf')
+
+ for i in range(3):
+ (sign,
+ mantissa[i],
+ encBase[i],
+ exponent[i]) = self._dropFloatingPoint(mantissa[i], encBase[i], exponent[i])
+
+ if abs(exponent[i]) < abs(e) or (abs(exponent[i]) == abs(e) and mantissa[i] < m):
+ e = exponent[i]
+ m = int(mantissa[i])
+ encbase = encBase[i]
+
+ if LOG:
+ LOG('automatically chosen REAL encoding base %s, sign %s, mantissa %s, '
+ 'exponent %s' % (encbase, sign, m, e))
+
+ return sign, m, encbase, e
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ if value.isPlusInf:
+ return (0x40,), False, False
+
+ if value.isMinusInf:
+ return (0x41,), False, False
+
+ m, b, e = value
+
+ if not m:
+ return null, False, True
+
+ if b == 10:
+ if LOG:
+ LOG('encoding REAL into character form')
+
+ return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), False, True
+
+ elif b == 2:
+ fo = 0x80 # binary encoding
+ ms, m, encbase, e = self._chooseEncBase(value)
+
+ if ms < 0: # mantissa sign
+ fo |= 0x40 # sign bit
+
+ # exponent & mantissa normalization
+ if encbase == 2:
+ while m & 0x1 == 0:
+ m >>= 1
+ e += 1
+
+ elif encbase == 8:
+ while m & 0x7 == 0:
+ m >>= 3
+ e += 1
+ fo |= 0x10
+
+ else: # encbase = 16
+ while m & 0xf == 0:
+ m >>= 4
+ e += 1
+ fo |= 0x20
+
+ sf = 0 # scale factor
+
+ while m & 0x1 == 0:
+ m >>= 1
+ sf += 1
+
+ if sf > 3:
+ raise error.PyAsn1Error('Scale factor overflow') # bug if raised
+
+ fo |= sf << 2
+ eo = null
+ if e == 0 or e == -1:
+ eo = int2oct(e & 0xff)
+
+ else:
+ while e not in (0, -1):
+ eo = int2oct(e & 0xff) + eo
+ e >>= 8
+
+ if e == 0 and eo and oct2int(eo[0]) & 0x80:
+ eo = int2oct(0) + eo
+
+ if e == -1 and eo and not (oct2int(eo[0]) & 0x80):
+ eo = int2oct(0xff) + eo
+
+ n = len(eo)
+ if n > 0xff:
+ raise error.PyAsn1Error('Real exponent overflow')
+
+ if n == 1:
+ pass
+
+ elif n == 2:
+ fo |= 1
+
+ elif n == 3:
+ fo |= 2
+
+ else:
+ fo |= 3
+ eo = int2oct(n & 0xff) + eo
+
+ po = null
+
+ while m:
+ po = int2oct(m & 0xff) + po
+ m >>= 8
+
+ substrate = int2oct(fo) + eo + po
+
+ return substrate, False, True
+
+ else:
+ raise error.PyAsn1Error('Prohibited Real base %s' % b)
+
+
+class SequenceEncoder(AbstractItemEncoder):
+ omitEmptyOptionals = False
+
+ # TODO: handling three flavors of input is too much -- split over codecs
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ substrate = null
+
+ omitEmptyOptionals = options.get(
+ 'omitEmptyOptionals', self.omitEmptyOptionals)
+
+ if LOG:
+ LOG('%sencoding empty OPTIONAL components' % (
+ omitEmptyOptionals and 'not ' or ''))
+
+ if asn1Spec is None:
+ # instance of ASN.1 schema
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+
+ for idx, component in enumerate(value.values()):
+ if namedTypes:
+ namedType = namedTypes[idx]
+
+ if namedType.isOptional and not component.isValue:
+ if LOG:
+ LOG('not encoding OPTIONAL component %r' % (namedType,))
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ if LOG:
+ LOG('not encoding DEFAULT component %r' % (namedType,))
+ continue
+
+ if omitEmptyOptionals:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ # wrap open type blob if needed
+ if namedTypes and namedType.openType:
+
+ wrapType = namedType.asn1Object
+
+ if wrapType.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ substrate += encodeFun(
+ component, asn1Spec,
+ **dict(options, wrapType=wrapType.componentType))
+
+ else:
+ chunk = encodeFun(component, asn1Spec, **options)
+
+ if wrapType.isSameTypeWith(component):
+ substrate += chunk
+
+ else:
+ substrate += encodeFun(chunk, wrapType, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (wrapType,))
+
+ else:
+ substrate += encodeFun(component, asn1Spec, **options)
+
+ else:
+ # bare Python value + ASN.1 schema
+ for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
+
+ try:
+ component = value[namedType.name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Component name "%s" not found in %r' % (
+ namedType.name, value))
+
+ if namedType.isOptional and namedType.name not in value:
+ if LOG:
+ LOG('not encoding OPTIONAL component %r' % (namedType,))
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ if LOG:
+ LOG('not encoding DEFAULT component %r' % (namedType,))
+ continue
+
+ if omitEmptyOptionals:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ componentSpec = namedType.asn1Object
+
+ # wrap open type blob if needed
+ if namedType.openType:
+
+ if componentSpec.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ substrate += encodeFun(
+ component, componentSpec,
+ **dict(options, wrapType=componentSpec.componentType))
+
+ else:
+ chunk = encodeFun(component, componentSpec, **options)
+
+ if componentSpec.isSameTypeWith(component):
+ substrate += chunk
+
+ else:
+ substrate += encodeFun(chunk, componentSpec, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (componentSpec,))
+
+ else:
+ substrate += encodeFun(component, componentSpec, **options)
+
+ return substrate, True, True
+
+
+class SequenceOfEncoder(AbstractItemEncoder):
+ def _encodeComponents(self, value, asn1Spec, encodeFun, **options):
+
+ if asn1Spec is None:
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ asn1Spec = asn1Spec.componentType
+
+ chunks = []
+
+ wrapType = options.pop('wrapType', None)
+
+ for idx, component in enumerate(value):
+ chunk = encodeFun(component, asn1Spec, **options)
+
+ if (wrapType is not None and
+ not wrapType.isSameTypeWith(component)):
+ # wrap encoded value with wrapper container (e.g. ANY)
+ chunk = encodeFun(chunk, wrapType, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (wrapType,))
+
+ chunks.append(chunk)
+
+ return chunks
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ return null.join(chunks), True, True
+
+
+class ChoiceEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is None:
+ component = value.getComponent()
+ else:
+ names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
+ if namedType.name in value]
+ if len(names) != 1:
+ raise error.PyAsn1Error('%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', value))
+
+ name = names[0]
+
+ component = value[name]
+ asn1Spec = asn1Spec[name]
+
+ return encodeFun(component, asn1Spec, **options), True, True
+
+
+class AnyEncoder(OctetStringEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is None:
+ value = value.asOctets()
+ elif not isOctetsType(value):
+ value = asn1Spec.clone(value).asOctets()
+
+ return value, not options.get('defMode', True), True
+
+
+TAG_MAP = {
+ eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Integer.tagSet: IntegerEncoder(),
+ univ.BitString.tagSet: BitStringEncoder(),
+ univ.OctetString.tagSet: OctetStringEncoder(),
+ univ.Null.tagSet: NullEncoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
+ univ.Enumerated.tagSet: IntegerEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SequenceOf.tagSet: SequenceOfEncoder(),
+ univ.SetOf.tagSet: SequenceOfEncoder(),
+ univ.Choice.tagSet: ChoiceEncoder(),
+ # character string types
+ char.UTF8String.tagSet: OctetStringEncoder(),
+ char.NumericString.tagSet: OctetStringEncoder(),
+ char.PrintableString.tagSet: OctetStringEncoder(),
+ char.TeletexString.tagSet: OctetStringEncoder(),
+ char.VideotexString.tagSet: OctetStringEncoder(),
+ char.IA5String.tagSet: OctetStringEncoder(),
+ char.GraphicString.tagSet: OctetStringEncoder(),
+ char.VisibleString.tagSet: OctetStringEncoder(),
+ char.GeneralString.tagSet: OctetStringEncoder(),
+ char.UniversalString.tagSet: OctetStringEncoder(),
+ char.BMPString.tagSet: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
+ useful.GeneralizedTime.tagSet: OctetStringEncoder(),
+ useful.UTCTime.tagSet: OctetStringEncoder()
+}
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+TYPE_MAP = {
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Integer.typeId: IntegerEncoder(),
+ univ.BitString.typeId: BitStringEncoder(),
+ univ.OctetString.typeId: OctetStringEncoder(),
+ univ.Null.typeId: NullEncoder(),
+ univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
+ univ.Enumerated.typeId: IntegerEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SequenceEncoder(),
+ univ.SetOf.typeId: SequenceOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder(),
+ univ.Choice.typeId: ChoiceEncoder(),
+ univ.Any.typeId: AnyEncoder(),
+ # character string types
+ char.UTF8String.typeId: OctetStringEncoder(),
+ char.NumericString.typeId: OctetStringEncoder(),
+ char.PrintableString.typeId: OctetStringEncoder(),
+ char.TeletexString.typeId: OctetStringEncoder(),
+ char.VideotexString.typeId: OctetStringEncoder(),
+ char.IA5String.typeId: OctetStringEncoder(),
+ char.GraphicString.typeId: OctetStringEncoder(),
+ char.VisibleString.typeId: OctetStringEncoder(),
+ char.GeneralString.typeId: OctetStringEncoder(),
+ char.UniversalString.typeId: OctetStringEncoder(),
+ char.BMPString.typeId: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: OctetStringEncoder(),
+ useful.GeneralizedTime.typeId: OctetStringEncoder(),
+ useful.UTCTime.typeId: OctetStringEncoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(object):
+ fixedDefLengthMode = None
+ fixedChunkSize = None
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ def __call__(self, value, asn1Spec=None, **options):
+ try:
+ if asn1Spec is None:
+ typeId = value.typeId
+ else:
+ typeId = asn1Spec.typeId
+
+ except AttributeError:
+ raise error.PyAsn1Error('Value %r is not ASN.1 type instance '
+ 'and "asn1Spec" not given' % (value,))
+
+ if LOG:
+ LOG('encoder called in %sdef mode, chunk size %s for type %s, '
+ 'value:\n%s' % (not options.get('defMode', True) and 'in' or '',
+ options.get('maxChunkSize', 0),
+ asn1Spec is None and value.prettyPrintType() or
+ asn1Spec.prettyPrintType(), value))
+
+ if self.fixedDefLengthMode is not None:
+ options.update(defMode=self.fixedDefLengthMode)
+
+ if self.fixedChunkSize is not None:
+ options.update(maxChunkSize=self.fixedChunkSize)
+
+ try:
+ concreteEncoder = self._typeMap[typeId]
+
+ if LOG:
+ LOG('using value codec %s chosen by type ID '
+ '%s' % (concreteEncoder.__class__.__name__, typeId))
+
+ except KeyError:
+ if asn1Spec is None:
+ tagSet = value.tagSet
+ else:
+ tagSet = asn1Spec.tagSet
+
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(tagSet.baseTag, tagSet.baseTag)
+
+ try:
+ concreteEncoder = self._tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('No encoder for %r (%s)' % (value, tagSet))
+
+ if LOG:
+ LOG('using value codec %s chosen by tagSet '
+ '%s' % (concreteEncoder.__class__.__name__, tagSet))
+
+ substrate = concreteEncoder.encode(value, asn1Spec, self, **options)
+
+ if LOG:
+ LOG('codec %s built %s octets of substrate: %s\nencoder '
+ 'completed' % (concreteEncoder, len(substrate),
+ debug.hexdump(substrate)))
+
+ return substrate
+
+
+class Encoder(object):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **options):
+ self._singleItemEncoder = self.SINGLE_ITEM_ENCODER(
+ tagMap=tagMap, typeMap=typeMap, **options
+ )
+
+ def __call__(self, pyObject, asn1Spec=None, **options):
+ return self._singleItemEncoder(
+ pyObject, asn1Spec=asn1Spec, **options)
+
+
+#: Turns ASN.1 object into BER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a BER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: defMode: :py:class:`bool`
+#: If :obj:`False`, produces indefinite length encoding
+#:
+#: maxChunkSize: :py:class:`int`
+#: Maximum chunk size in chunked encoding mode (0 denotes unlimited chunk size)
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octetstream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into BER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+#: Encode ASN.1 value object into BER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+encode = Encoder()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/ber/eoo.py b/contrib/python/pyasn1/py3/pyasn1/codec/ber/eoo.py
new file mode 100644
index 0000000000..8c91a3d285
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/ber/eoo.py
@@ -0,0 +1,28 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1.type import base
+from pyasn1.type import tag
+
+__all__ = ['endOfOctets']
+
+
+class EndOfOctets(base.SimpleAsn1Type):
+ defaultValue = 0
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x00)
+ )
+
+ _instance = None
+
+ def __new__(cls, *args, **kwargs):
+ if cls._instance is None:
+ cls._instance = object.__new__(cls, *args, **kwargs)
+
+ return cls._instance
+
+
+endOfOctets = EndOfOctets()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/cer/__init__.py b/contrib/python/pyasn1/py3/pyasn1/codec/cer/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/cer/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/cer/decoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/cer/decoder.py
new file mode 100644
index 0000000000..ed6391ff35
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/cer/decoder.py
@@ -0,0 +1,146 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.streaming import readFromStream
+from pyasn1.codec.ber import decoder
+from pyasn1.compat.octets import oct2int
+from pyasn1.type import univ
+
+__all__ = ['decode', 'StreamingDecoder']
+
+SubstrateUnderrunError = error.SubstrateUnderrunError
+
+
+class BooleanPayloadDecoder(decoder.AbstractSimplePayloadDecoder):
+ protoComponent = univ.Boolean(0)
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if length != 1:
+ raise error.PyAsn1Error('Not single-octet Boolean payload')
+
+ for chunk in readFromStream(substrate, length, options):
+ if isinstance(chunk, SubstrateUnderrunError):
+ yield chunk
+
+ byte = oct2int(chunk[0])
+
+ # CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
+ # BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
+ # in https://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
+ if byte == 0xff:
+ value = 1
+
+ elif byte == 0x00:
+ value = 0
+
+ else:
+ raise error.PyAsn1Error('Unexpected Boolean payload: %s' % byte)
+
+ yield self._createComponent(asn1Spec, tagSet, value, **options)
+
+
+# TODO: prohibit non-canonical encoding
+BitStringPayloadDecoder = decoder.BitStringPayloadDecoder
+OctetStringPayloadDecoder = decoder.OctetStringPayloadDecoder
+RealPayloadDecoder = decoder.RealPayloadDecoder
+
+TAG_MAP = decoder.TAG_MAP.copy()
+TAG_MAP.update(
+ {univ.Boolean.tagSet: BooleanPayloadDecoder(),
+ univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: OctetStringPayloadDecoder(),
+ univ.Real.tagSet: RealPayloadDecoder()}
+)
+
+TYPE_MAP = decoder.TYPE_MAP.copy()
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in TAG_MAP.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in TYPE_MAP:
+ TYPE_MAP[typeId] = typeDecoder
+
+
+class SingleItemDecoder(decoder.SingleItemDecoder):
+ __doc__ = decoder.SingleItemDecoder.__doc__
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+
+class StreamingDecoder(decoder.StreamingDecoder):
+ __doc__ = decoder.StreamingDecoder.__doc__
+
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+
+class Decoder(decoder.Decoder):
+ __doc__ = decoder.Decoder.__doc__
+
+ STREAMING_DECODER = StreamingDecoder
+
+
+#: Turns CER octet stream into an ASN.1 object.
+#:
+#: Takes CER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: CER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from CER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode CER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode CER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/cer/encoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/cer/encoder.py
new file mode 100644
index 0000000000..0a198e3fdf
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/cer/encoder.py
@@ -0,0 +1,327 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.ber import encoder
+from pyasn1.compat.octets import str2octs, null
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['Encoder', 'encode']
+
+
+class BooleanEncoder(encoder.IntegerEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if value == 0:
+ substrate = (0,)
+ else:
+ substrate = (255,)
+ return substrate, False, False
+
+
+class RealEncoder(encoder.RealEncoder):
+ def _chooseEncBase(self, value):
+ m, b, e = value
+ return self._dropFloatingPoint(m, b, e)
+
+
+# specialized GeneralStringEncoder here
+
+class TimeEncoderMixIn(object):
+ Z_CHAR = ord('Z')
+ PLUS_CHAR = ord('+')
+ MINUS_CHAR = ord('-')
+ COMMA_CHAR = ord(',')
+ DOT_CHAR = ord('.')
+ ZERO_CHAR = ord('0')
+
+ MIN_LENGTH = 12
+ MAX_LENGTH = 19
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ # CER encoding constraints:
+ # - minutes are mandatory, seconds are optional
+ # - sub-seconds must NOT be zero / no meaningless zeros
+ # - no hanging fraction dot
+ # - time in UTC (Z)
+ # - only dot is allowed for fractions
+
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ numbers = value.asNumbers()
+
+ if self.PLUS_CHAR in numbers or self.MINUS_CHAR in numbers:
+ raise error.PyAsn1Error('Must be UTC time: %r' % value)
+
+ if numbers[-1] != self.Z_CHAR:
+ raise error.PyAsn1Error('Missing "Z" time zone specifier: %r' % value)
+
+ if self.COMMA_CHAR in numbers:
+ raise error.PyAsn1Error('Comma in fractions disallowed: %r' % value)
+
+ if self.DOT_CHAR in numbers:
+
+ isModified = False
+
+ numbers = list(numbers)
+
+ searchIndex = min(numbers.index(self.DOT_CHAR) + 4, len(numbers) - 1)
+
+ while numbers[searchIndex] != self.DOT_CHAR:
+ if numbers[searchIndex] == self.ZERO_CHAR:
+ del numbers[searchIndex]
+ isModified = True
+
+ searchIndex -= 1
+
+ searchIndex += 1
+
+ if searchIndex < len(numbers):
+ if numbers[searchIndex] == self.Z_CHAR:
+ # drop hanging comma
+ del numbers[searchIndex - 1]
+ isModified = True
+
+ if isModified:
+ value = value.clone(numbers)
+
+ if not self.MIN_LENGTH < len(numbers) < self.MAX_LENGTH:
+ raise error.PyAsn1Error('Length constraint violated: %r' % value)
+
+ options.update(maxChunkSize=1000)
+
+ return encoder.OctetStringEncoder.encodeValue(
+ self, value, asn1Spec, encodeFun, **options
+ )
+
+
+class GeneralizedTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
+ MIN_LENGTH = 12
+ MAX_LENGTH = 20
+
+
+class UTCTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
+ MIN_LENGTH = 10
+ MAX_LENGTH = 14
+
+
+class SetOfEncoder(encoder.SequenceOfEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ # sort by serialised and padded components
+ if len(chunks) > 1:
+ zero = str2octs('\x00')
+ maxLen = max(map(len, chunks))
+ paddedChunks = [
+ (x.ljust(maxLen, zero), x) for x in chunks
+ ]
+ paddedChunks.sort(key=lambda x: x[0])
+
+ chunks = [x[1] for x in paddedChunks]
+
+ return null.join(chunks), True, True
+
+
+class SequenceOfEncoder(encoder.SequenceOfEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ if options.get('ifNotEmpty', False) and not len(value):
+ return null, True, True
+
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ return null.join(chunks), True, True
+
+
+class SetEncoder(encoder.SequenceEncoder):
+ @staticmethod
+ def _componentSortKey(componentAndType):
+ """Sort SET components by tag
+
+ Sort regardless of the Choice value (static sort)
+ """
+ component, asn1Spec = componentAndType
+
+ if asn1Spec is None:
+ asn1Spec = component
+
+ if asn1Spec.typeId == univ.Choice.typeId and not asn1Spec.tagSet:
+ if asn1Spec.tagSet:
+ return asn1Spec.tagSet
+ else:
+ return asn1Spec.componentType.minTagSet
+ else:
+ return asn1Spec.tagSet
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ substrate = null
+
+ comps = []
+ compsMap = {}
+
+ if asn1Spec is None:
+ # instance of ASN.1 schema
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+
+ for idx, component in enumerate(value.values()):
+ if namedTypes:
+ namedType = namedTypes[idx]
+
+ if namedType.isOptional and not component.isValue:
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ continue
+
+ compsMap[id(component)] = namedType
+
+ else:
+ compsMap[id(component)] = None
+
+ comps.append((component, asn1Spec))
+
+ else:
+ # bare Python value + ASN.1 schema
+ for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
+
+ try:
+ component = value[namedType.name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Component name "%s" not found in %r' % (namedType.name, value))
+
+ if namedType.isOptional and namedType.name not in value:
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ continue
+
+ compsMap[id(component)] = namedType
+ comps.append((component, asn1Spec[idx]))
+
+ for comp, compType in sorted(comps, key=self._componentSortKey):
+ namedType = compsMap[id(comp)]
+
+ if namedType:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ chunk = encodeFun(comp, compType, **options)
+
+ # wrap open type blob if needed
+ if namedType and namedType.openType:
+ wrapType = namedType.asn1Object
+ if wrapType.tagSet and not wrapType.isSameTypeWith(comp):
+ chunk = encodeFun(chunk, wrapType, **options)
+
+ substrate += chunk
+
+ return substrate, True, True
+
+
+class SequenceEncoder(encoder.SequenceEncoder):
+ omitEmptyOptionals = True
+
+
+TAG_MAP = encoder.TAG_MAP.copy()
+
+TAG_MAP.update({
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ useful.GeneralizedTime.tagSet: GeneralizedTimeEncoder(),
+ useful.UTCTime.tagSet: UTCTimeEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SetOf.tagSet: SetOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder()
+})
+
+TYPE_MAP = encoder.TYPE_MAP.copy()
+
+TYPE_MAP.update({
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ useful.GeneralizedTime.typeId: GeneralizedTimeEncoder(),
+ useful.UTCTime.typeId: UTCTimeEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SetEncoder(),
+ univ.SetOf.typeId: SetOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder()
+})
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(encoder.SingleItemEncoder):
+ fixedDefLengthMode = False
+ fixedChunkSize = 1000
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+
+class Encoder(encoder.Encoder):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+
+#: Turns ASN.1 object into CER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a CER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octet-stream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into CER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
+#:
+#: Encode ASN.1 value object into CER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
+#:
+encode = Encoder()
+
+# EncoderFactory queries class instance and builds a map of tags -> encoders
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/der/__init__.py b/contrib/python/pyasn1/py3/pyasn1/codec/der/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/der/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/der/decoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/der/decoder.py
new file mode 100644
index 0000000000..215b72d9fd
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/der/decoder.py
@@ -0,0 +1,116 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1.codec.cer import decoder
+from pyasn1.type import univ
+
+__all__ = ['decode', 'StreamingDecoder']
+
+
+class BitStringPayloadDecoder(decoder.BitStringPayloadDecoder):
+ supportConstructedForm = False
+
+
+class OctetStringPayloadDecoder(decoder.OctetStringPayloadDecoder):
+ supportConstructedForm = False
+
+
+# TODO: prohibit non-canonical encoding
+RealPayloadDecoder = decoder.RealPayloadDecoder
+
+TAG_MAP = decoder.TAG_MAP.copy()
+TAG_MAP.update(
+ {univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: OctetStringPayloadDecoder(),
+ univ.Real.tagSet: RealPayloadDecoder()}
+)
+
+TYPE_MAP = decoder.TYPE_MAP.copy()
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in TAG_MAP.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in TYPE_MAP:
+ TYPE_MAP[typeId] = typeDecoder
+
+
+class SingleItemDecoder(decoder.SingleItemDecoder):
+ __doc__ = decoder.SingleItemDecoder.__doc__
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ supportIndefLength = False
+
+
+class StreamingDecoder(decoder.StreamingDecoder):
+ __doc__ = decoder.StreamingDecoder.__doc__
+
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+
+class Decoder(decoder.Decoder):
+ __doc__ = decoder.Decoder.__doc__
+
+ STREAMING_DECODER = StreamingDecoder
+
+
+#: Turns DER octet stream into an ASN.1 object.
+#:
+#: Takes DER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: DER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from DER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode DER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode DER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/der/encoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/der/encoder.py
new file mode 100644
index 0000000000..c231edc164
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/der/encoder.py
@@ -0,0 +1,122 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.cer import encoder
+from pyasn1.type import univ
+
+__all__ = ['Encoder', 'encode']
+
+
+class SetEncoder(encoder.SetEncoder):
+ @staticmethod
+ def _componentSortKey(componentAndType):
+ """Sort SET components by tag
+
+ Sort depending on the actual Choice value (dynamic sort)
+ """
+ component, asn1Spec = componentAndType
+
+ if asn1Spec is None:
+ compType = component
+ else:
+ compType = asn1Spec
+
+ if compType.typeId == univ.Choice.typeId and not compType.tagSet:
+ if asn1Spec is None:
+ return component.getComponent().tagSet
+ else:
+ # TODO: move out of sorting key function
+ names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
+ if namedType.name in component]
+ if len(names) != 1:
+ raise error.PyAsn1Error(
+ '%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', component))
+
+ # TODO: support nested CHOICE ordering
+ return asn1Spec[names[0]].tagSet
+
+ else:
+ return compType.tagSet
+
+
+TAG_MAP = encoder.TAG_MAP.copy()
+
+TAG_MAP.update({
+ # Set & SetOf have same tags
+ univ.Set.tagSet: SetEncoder()
+})
+
+TYPE_MAP = encoder.TYPE_MAP.copy()
+
+TYPE_MAP.update({
+ # Set & SetOf have same tags
+ univ.Set.typeId: SetEncoder()
+})
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(encoder.SingleItemEncoder):
+ fixedDefLengthMode = True
+ fixedChunkSize = 0
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+
+class Encoder(encoder.Encoder):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+
+#: Turns ASN.1 object into DER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a DER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octet-stream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into DER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+#: Encode ASN.1 value object into DER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+encode = Encoder()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/native/__init__.py b/contrib/python/pyasn1/py3/pyasn1/codec/native/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/native/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/native/decoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/native/decoder.py
new file mode 100644
index 0000000000..e23f40ca4b
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/native/decoder.py
@@ -0,0 +1,238 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.compat import _MISSING
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['decode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
+
+
+class AbstractScalarPayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ return asn1Spec.clone(pyObject)
+
+
+class BitStringPayloadDecoder(AbstractScalarPayloadDecoder):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ return asn1Spec.clone(univ.BitString.fromBinaryString(pyObject))
+
+
+class SequenceOrSetPayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ componentsTypes = asn1Spec.componentType
+
+ for field in asn1Value:
+ if field in pyObject:
+ asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
+
+ return asn1Value
+
+
+class SequenceOfOrSetOfPayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ for pyValue in pyObject:
+ asn1Value.append(decodeFun(pyValue, asn1Spec.componentType), **options)
+
+ return asn1Value
+
+
+class ChoicePayloadDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ componentsTypes = asn1Spec.componentType
+
+ for field in pyObject:
+ if field in componentsTypes:
+ asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
+ break
+
+ return asn1Value
+
+
+TAG_MAP = {
+ univ.Integer.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Boolean.tagSet: AbstractScalarPayloadDecoder(),
+ univ.BitString.tagSet: BitStringPayloadDecoder(),
+ univ.OctetString.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Null.tagSet: AbstractScalarPayloadDecoder(),
+ univ.ObjectIdentifier.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Enumerated.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Real.tagSet: AbstractScalarPayloadDecoder(),
+ univ.Sequence.tagSet: SequenceOrSetPayloadDecoder(), # conflicts with SequenceOf
+ univ.Set.tagSet: SequenceOrSetPayloadDecoder(), # conflicts with SetOf
+ univ.Choice.tagSet: ChoicePayloadDecoder(), # conflicts with Any
+ # character string types
+ char.UTF8String.tagSet: AbstractScalarPayloadDecoder(),
+ char.NumericString.tagSet: AbstractScalarPayloadDecoder(),
+ char.PrintableString.tagSet: AbstractScalarPayloadDecoder(),
+ char.TeletexString.tagSet: AbstractScalarPayloadDecoder(),
+ char.VideotexString.tagSet: AbstractScalarPayloadDecoder(),
+ char.IA5String.tagSet: AbstractScalarPayloadDecoder(),
+ char.GraphicString.tagSet: AbstractScalarPayloadDecoder(),
+ char.VisibleString.tagSet: AbstractScalarPayloadDecoder(),
+ char.GeneralString.tagSet: AbstractScalarPayloadDecoder(),
+ char.UniversalString.tagSet: AbstractScalarPayloadDecoder(),
+ char.BMPString.tagSet: AbstractScalarPayloadDecoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: AbstractScalarPayloadDecoder(),
+ useful.GeneralizedTime.tagSet: AbstractScalarPayloadDecoder(),
+ useful.UTCTime.tagSet: AbstractScalarPayloadDecoder()
+}
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+TYPE_MAP = {
+ univ.Integer.typeId: AbstractScalarPayloadDecoder(),
+ univ.Boolean.typeId: AbstractScalarPayloadDecoder(),
+ univ.BitString.typeId: BitStringPayloadDecoder(),
+ univ.OctetString.typeId: AbstractScalarPayloadDecoder(),
+ univ.Null.typeId: AbstractScalarPayloadDecoder(),
+ univ.ObjectIdentifier.typeId: AbstractScalarPayloadDecoder(),
+ univ.Enumerated.typeId: AbstractScalarPayloadDecoder(),
+ univ.Real.typeId: AbstractScalarPayloadDecoder(),
+ # ambiguous base types
+ univ.Set.typeId: SequenceOrSetPayloadDecoder(),
+ univ.SetOf.typeId: SequenceOfOrSetOfPayloadDecoder(),
+ univ.Sequence.typeId: SequenceOrSetPayloadDecoder(),
+ univ.SequenceOf.typeId: SequenceOfOrSetOfPayloadDecoder(),
+ univ.Choice.typeId: ChoicePayloadDecoder(),
+ univ.Any.typeId: AbstractScalarPayloadDecoder(),
+ # character string types
+ char.UTF8String.typeId: AbstractScalarPayloadDecoder(),
+ char.NumericString.typeId: AbstractScalarPayloadDecoder(),
+ char.PrintableString.typeId: AbstractScalarPayloadDecoder(),
+ char.TeletexString.typeId: AbstractScalarPayloadDecoder(),
+ char.VideotexString.typeId: AbstractScalarPayloadDecoder(),
+ char.IA5String.typeId: AbstractScalarPayloadDecoder(),
+ char.GraphicString.typeId: AbstractScalarPayloadDecoder(),
+ char.VisibleString.typeId: AbstractScalarPayloadDecoder(),
+ char.GeneralString.typeId: AbstractScalarPayloadDecoder(),
+ char.UniversalString.typeId: AbstractScalarPayloadDecoder(),
+ char.BMPString.typeId: AbstractScalarPayloadDecoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: AbstractScalarPayloadDecoder(),
+ useful.GeneralizedTime.typeId: AbstractScalarPayloadDecoder(),
+ useful.UTCTime.typeId: AbstractScalarPayloadDecoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemDecoder(object):
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ def __call__(self, pyObject, asn1Spec, **options):
+
+ if LOG:
+ debug.scope.push(type(pyObject).__name__)
+ LOG('decoder called at scope %s, working with '
+ 'type %s' % (debug.scope, type(pyObject).__name__))
+
+ if asn1Spec is None or not isinstance(asn1Spec, base.Asn1Item):
+ raise error.PyAsn1Error(
+ 'asn1Spec is not valid (should be an instance of an ASN.1 '
+ 'Item, not %s)' % asn1Spec.__class__.__name__)
+
+ try:
+ valueDecoder = self._typeMap[asn1Spec.typeId]
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(asn1Spec.tagSet.baseTag, asn1Spec.tagSet.baseTag)
+
+ try:
+ valueDecoder = self._tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('Unknown ASN.1 tag %s' % asn1Spec.tagSet)
+
+ if LOG:
+ LOG('calling decoder %s on Python type %s '
+ '<%s>' % (type(valueDecoder).__name__,
+ type(pyObject).__name__, repr(pyObject)))
+
+ value = valueDecoder(pyObject, asn1Spec, self, **options)
+
+ if LOG:
+ LOG('decoder %s produced ASN.1 type %s '
+ '<%s>' % (type(valueDecoder).__name__,
+ type(value).__name__, repr(value)))
+ debug.scope.pop()
+
+ return value
+
+
+class Decoder(object):
+ SINGLE_ITEM_DECODER = SingleItemDecoder
+
+ def __init__(self, **options):
+ self._singleItemDecoder = self.SINGLE_ITEM_DECODER(**options)
+
+ def __call__(self, pyObject, asn1Spec=None, **kwargs):
+ return self._singleItemDecoder(pyObject, asn1Spec=asn1Spec, **kwargs)
+
+
+#: Turns Python objects of built-in types into ASN.1 objects.
+#:
+#: Takes Python objects of built-in types and turns them into a tree of
+#: ASN.1 objects (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: pyObject: :py:class:`object`
+#: A scalar or nested Python objects
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. It is required
+#: for successful interpretation of Python objects mapping into their ASN.1
+#: representations.
+#:
+#: Returns
+#: -------
+#: : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A scalar or constructed pyasn1 object
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode native Python object into ASN.1 objects with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode([1, 2, 3], asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/native/encoder.py b/contrib/python/pyasn1/py3/pyasn1/codec/native/encoder.py
new file mode 100644
index 0000000000..a0d9f1c444
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/native/encoder.py
@@ -0,0 +1,274 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from collections import OrderedDict
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.compat import _MISSING
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['encode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER)
+
+
+class AbstractItemEncoder(object):
+ def encode(self, value, encodeFun, **options):
+ raise error.PyAsn1Error('Not implemented')
+
+
+class BooleanEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return bool(value)
+
+
+class IntegerEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return int(value)
+
+
+class BitStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class OctetStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return value.asOctets()
+
+
+class TextStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class NullEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return None
+
+
+class ObjectIdentifierEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class RealEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return float(value)
+
+
+class SetEncoder(AbstractItemEncoder):
+ protoDict = dict
+
+ def encode(self, value, encodeFun, **options):
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+ substrate = self.protoDict()
+
+ for idx, (key, subValue) in enumerate(value.items()):
+ if namedTypes and namedTypes[idx].isOptional and not value[idx].isValue:
+ continue
+ substrate[key] = encodeFun(subValue, **options)
+ return substrate
+
+
+class SequenceEncoder(SetEncoder):
+ protoDict = OrderedDict
+
+
+class SequenceOfEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+ return [encodeFun(x, **options) for x in value]
+
+
+class ChoiceEncoder(SequenceEncoder):
+ pass
+
+
+class AnyEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return value.asOctets()
+
+
+TAG_MAP = {
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Integer.tagSet: IntegerEncoder(),
+ univ.BitString.tagSet: BitStringEncoder(),
+ univ.OctetString.tagSet: OctetStringEncoder(),
+ univ.Null.tagSet: NullEncoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
+ univ.Enumerated.tagSet: IntegerEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SequenceOf.tagSet: SequenceOfEncoder(),
+ univ.SetOf.tagSet: SequenceOfEncoder(),
+ univ.Choice.tagSet: ChoiceEncoder(),
+ # character string types
+ char.UTF8String.tagSet: TextStringEncoder(),
+ char.NumericString.tagSet: TextStringEncoder(),
+ char.PrintableString.tagSet: TextStringEncoder(),
+ char.TeletexString.tagSet: TextStringEncoder(),
+ char.VideotexString.tagSet: TextStringEncoder(),
+ char.IA5String.tagSet: TextStringEncoder(),
+ char.GraphicString.tagSet: TextStringEncoder(),
+ char.VisibleString.tagSet: TextStringEncoder(),
+ char.GeneralString.tagSet: TextStringEncoder(),
+ char.UniversalString.tagSet: TextStringEncoder(),
+ char.BMPString.tagSet: TextStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
+ useful.GeneralizedTime.tagSet: OctetStringEncoder(),
+ useful.UTCTime.tagSet: OctetStringEncoder()
+}
+
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+TYPE_MAP = {
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Integer.typeId: IntegerEncoder(),
+ univ.BitString.typeId: BitStringEncoder(),
+ univ.OctetString.typeId: OctetStringEncoder(),
+ univ.Null.typeId: NullEncoder(),
+ univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
+ univ.Enumerated.typeId: IntegerEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SetEncoder(),
+ univ.SetOf.typeId: SequenceOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder(),
+ univ.Choice.typeId: ChoiceEncoder(),
+ univ.Any.typeId: AnyEncoder(),
+ # character string types
+ char.UTF8String.typeId: OctetStringEncoder(),
+ char.NumericString.typeId: OctetStringEncoder(),
+ char.PrintableString.typeId: OctetStringEncoder(),
+ char.TeletexString.typeId: OctetStringEncoder(),
+ char.VideotexString.typeId: OctetStringEncoder(),
+ char.IA5String.typeId: OctetStringEncoder(),
+ char.GraphicString.typeId: OctetStringEncoder(),
+ char.VisibleString.typeId: OctetStringEncoder(),
+ char.GeneralString.typeId: OctetStringEncoder(),
+ char.UniversalString.typeId: OctetStringEncoder(),
+ char.BMPString.typeId: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: OctetStringEncoder(),
+ useful.GeneralizedTime.typeId: OctetStringEncoder(),
+ useful.UTCTime.typeId: OctetStringEncoder()
+}
+
+# deprecated aliases, https://github.com/pyasn1/pyasn1/issues/9
+tagMap = TAG_MAP
+typeMap = TYPE_MAP
+
+
+class SingleItemEncoder(object):
+
+ TAG_MAP = TAG_MAP
+ TYPE_MAP = TYPE_MAP
+
+ def __init__(self, tagMap=_MISSING, typeMap=_MISSING, **ignored):
+ self._tagMap = tagMap if tagMap is not _MISSING else self.TAG_MAP
+ self._typeMap = typeMap if typeMap is not _MISSING else self.TYPE_MAP
+
+ def __call__(self, value, **options):
+ if not isinstance(value, base.Asn1Item):
+ raise error.PyAsn1Error(
+ 'value is not valid (should be an instance of an ASN.1 Item)')
+
+ if LOG:
+ debug.scope.push(type(value).__name__)
+ LOG('encoder called for type %s '
+ '<%s>' % (type(value).__name__, value.prettyPrint()))
+
+ tagSet = value.tagSet
+
+ try:
+ concreteEncoder = self._typeMap[value.typeId]
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(
+ value.tagSet.baseTag, value.tagSet.baseTag)
+
+ try:
+ concreteEncoder = self._tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('No encoder for %s' % (value,))
+
+ if LOG:
+ LOG('using value codec %s chosen by '
+ '%s' % (concreteEncoder.__class__.__name__, tagSet))
+
+ pyObject = concreteEncoder.encode(value, self, **options)
+
+ if LOG:
+ LOG('encoder %s produced: '
+ '%s' % (type(concreteEncoder).__name__, repr(pyObject)))
+ debug.scope.pop()
+
+ return pyObject
+
+
+class Encoder(object):
+ SINGLE_ITEM_ENCODER = SingleItemEncoder
+
+ def __init__(self, **options):
+ self._singleItemEncoder = self.SINGLE_ITEM_ENCODER(**options)
+
+ def __call__(self, pyObject, asn1Spec=None, **options):
+ return self._singleItemEncoder(
+ pyObject, asn1Spec=asn1Spec, **options)
+
+
+#: Turns ASN.1 object into a Python built-in type object(s).
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a Python built-in type or a tree
+#: of those.
+#:
+#: One exception is that instead of :py:class:`dict`, the :py:class:`OrderedDict`
+#: is used to preserve ordering of the components in ASN.1 SEQUENCE.
+#:
+#: Parameters
+#: ----------
+# asn1Value: any pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: pyasn1 object to encode (or a tree of them)
+#:
+#: Returns
+#: -------
+#: : :py:class:`object`
+#: Python built-in type instance (or a tree of them)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode ASN.1 value object into native Python types
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: [1, 2, 3]
+#:
+encode = SingleItemEncoder()
diff --git a/contrib/python/pyasn1/py3/pyasn1/codec/streaming.py b/contrib/python/pyasn1/py3/pyasn1/codec/streaming.py
new file mode 100644
index 0000000000..231681c177
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/codec/streaming.py
@@ -0,0 +1,244 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import io
+import os
+import sys
+
+from pyasn1 import error
+from pyasn1.type import univ
+
+_PY2 = sys.version_info < (3,)
+
+
+class CachingStreamWrapper(io.IOBase):
+ """Wrapper around non-seekable streams.
+
+ Note that the implementation is tied to the decoder,
+ not checking for dangerous arguments for the sake
+ of performance.
+
+ The read bytes are kept in an internal cache until
+ setting _markedPosition which may reset the cache.
+ """
+ def __init__(self, raw):
+ self._raw = raw
+ self._cache = io.BytesIO()
+ self._markedPosition = 0
+
+ def peek(self, n):
+ result = self.read(n)
+ self._cache.seek(-len(result), os.SEEK_CUR)
+ return result
+
+ def seekable(self):
+ return True
+
+ def seek(self, n=-1, whence=os.SEEK_SET):
+ # Note that this not safe for seeking forward.
+ return self._cache.seek(n, whence)
+
+ def read(self, n=-1):
+ read_from_cache = self._cache.read(n)
+ if n != -1:
+ n -= len(read_from_cache)
+ if not n: # 0 bytes left to read
+ return read_from_cache
+
+ read_from_raw = self._raw.read(n)
+
+ self._cache.write(read_from_raw)
+
+ return read_from_cache + read_from_raw
+
+ @property
+ def markedPosition(self):
+ """Position where the currently processed element starts.
+
+ This is used for back-tracking in SingleItemDecoder.__call__
+ and (indefLen)ValueDecoder and should not be used for other purposes.
+ The client is not supposed to ever seek before this position.
+ """
+ return self._markedPosition
+
+ @markedPosition.setter
+ def markedPosition(self, value):
+ # By setting the value, we ensure we won't seek back before it.
+ # `value` should be the same as the current position
+ # We don't check for this for performance reasons.
+ self._markedPosition = value
+
+ # Whenever we set _marked_position, we know for sure
+ # that we will not return back, and thus it is
+ # safe to drop all cached data.
+ if self._cache.tell() > io.DEFAULT_BUFFER_SIZE:
+ self._cache = io.BytesIO(self._cache.read())
+ self._markedPosition = 0
+
+ def tell(self):
+ return self._cache.tell()
+
+
+def asSeekableStream(substrate):
+ """Convert object to seekable byte-stream.
+
+ Parameters
+ ----------
+ substrate: :py:class:`bytes` or :py:class:`io.IOBase` or :py:class:`univ.OctetString`
+
+ Returns
+ -------
+ : :py:class:`io.IOBase`
+
+ Raises
+ ------
+ : :py:class:`~pyasn1.error.PyAsn1Error`
+ If the supplied substrate cannot be converted to a seekable stream.
+ """
+ if isinstance(substrate, io.BytesIO):
+ return substrate
+
+ elif isinstance(substrate, bytes):
+ return io.BytesIO(substrate)
+
+ elif isinstance(substrate, univ.OctetString):
+ return io.BytesIO(substrate.asOctets())
+
+ try:
+ # Special case: impossible to set attributes on `file` built-in
+ # XXX: broken, BufferedReader expects a "readable" attribute.
+ if _PY2 and isinstance(substrate, file):
+ return io.BufferedReader(substrate)
+
+ elif substrate.seekable(): # Will fail for most invalid types
+ return substrate
+
+ else:
+ return CachingStreamWrapper(substrate)
+
+ except AttributeError:
+ raise error.UnsupportedSubstrateError(
+ "Cannot convert " + substrate.__class__.__name__ +
+ " to a seekable bit stream.")
+
+
+def isEndOfStream(substrate):
+ """Check whether we have reached the end of a stream.
+
+ Although it is more effective to read and catch exceptions, this
+ function
+
+ Parameters
+ ----------
+ substrate: :py:class:`IOBase`
+ Stream to check
+
+ Returns
+ -------
+ : :py:class:`bool`
+ """
+ if isinstance(substrate, io.BytesIO):
+ cp = substrate.tell()
+ substrate.seek(0, os.SEEK_END)
+ result = substrate.tell() == cp
+ substrate.seek(cp, os.SEEK_SET)
+ yield result
+
+ else:
+ received = substrate.read(1)
+ if received is None:
+ yield
+
+ if received:
+ substrate.seek(-1, os.SEEK_CUR)
+
+ yield not received
+
+
+def peekIntoStream(substrate, size=-1):
+ """Peek into stream.
+
+ Parameters
+ ----------
+ substrate: :py:class:`IOBase`
+ Stream to read from.
+
+ size: :py:class:`int`
+ How many bytes to peek (-1 = all available)
+
+ Returns
+ -------
+ : :py:class:`bytes` or :py:class:`str`
+ The return type depends on Python major version
+ """
+ if hasattr(substrate, "peek"):
+ received = substrate.peek(size)
+ if received is None:
+ yield
+
+ while len(received) < size:
+ yield
+
+ yield received
+
+ else:
+ current_position = substrate.tell()
+ try:
+ for chunk in readFromStream(substrate, size):
+ yield chunk
+
+ finally:
+ substrate.seek(current_position)
+
+
+def readFromStream(substrate, size=-1, context=None):
+ """Read from the stream.
+
+ Parameters
+ ----------
+ substrate: :py:class:`IOBase`
+ Stream to read from.
+
+ Keyword parameters
+ ------------------
+ size: :py:class:`int`
+ How many bytes to read (-1 = all available)
+
+ context: :py:class:`dict`
+ Opaque caller context will be attached to exception objects created
+ by this function.
+
+ Yields
+ ------
+ : :py:class:`bytes` or :py:class:`str` or :py:class:`SubstrateUnderrunError`
+ Read data or :py:class:`~pyasn1.error.SubstrateUnderrunError`
+ object if no `size` bytes is readily available in the stream. The
+ data type depends on Python major version
+
+ Raises
+ ------
+ : :py:class:`~pyasn1.error.EndOfStreamError`
+ Input stream is exhausted
+ """
+ while True:
+ # this will block unless stream is non-blocking
+ received = substrate.read(size)
+ if received is None: # non-blocking stream can do this
+ yield error.SubstrateUnderrunError(context=context)
+
+ elif not received and size != 0: # end-of-stream
+ raise error.EndOfStreamError(context=context)
+
+ elif len(received) < size:
+ substrate.seek(-len(received), os.SEEK_CUR)
+
+ # behave like a non-blocking stream
+ yield error.SubstrateUnderrunError(context=context)
+
+ else:
+ break
+
+ yield received
diff --git a/contrib/python/pyasn1/py3/pyasn1/compat/__init__.py b/contrib/python/pyasn1/py3/pyasn1/compat/__init__.py
new file mode 100644
index 0000000000..d3e676ac6a
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/compat/__init__.py
@@ -0,0 +1,4 @@
+# This file is necessary to make this directory a package.
+
+# sentinal for missing argument
+_MISSING = object()
diff --git a/contrib/python/pyasn1/py3/pyasn1/compat/integer.py b/contrib/python/pyasn1/py3/pyasn1/compat/integer.py
new file mode 100644
index 0000000000..b41d849fcd
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/compat/integer.py
@@ -0,0 +1,103 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import platform
+
+from pyasn1.compat.octets import oct2int, null, ensureString
+
+
+implementation = platform.python_implementation()
+
+if sys.version_info[0] < 3:
+ from binascii import a2b_hex, b2a_hex
+
+ def from_bytes(octets, signed=False):
+ if not octets:
+ return 0
+
+ value = long(b2a_hex(ensureString(octets)), 16)
+
+ if signed and oct2int(octets[0]) & 0x80:
+ return value - (1 << len(octets) * 8)
+
+ return value
+
+ def to_bytes(value, signed=False, length=0):
+ if value < 0:
+ if signed:
+ bits = bitLength(value)
+
+ # two's complement form
+ maxValue = 1 << bits
+ valueToEncode = (value + maxValue) % maxValue
+
+ else:
+ raise OverflowError('can\'t convert negative int to unsigned')
+ elif value == 0 and length == 0:
+ return null
+ else:
+ bits = 0
+ valueToEncode = value
+
+ hexValue = hex(valueToEncode)[2:]
+ if hexValue.endswith('L'):
+ hexValue = hexValue[:-1]
+
+ if len(hexValue) & 1:
+ hexValue = '0' + hexValue
+
+ # padding may be needed for two's complement encoding
+ if value != valueToEncode or length:
+ hexLength = len(hexValue) * 4
+
+ padLength = max(length, bits)
+
+ if padLength > hexLength:
+ hexValue = '00' * ((padLength - hexLength - 1) // 8 + 1) + hexValue
+ elif length and hexLength - length > 7:
+ raise OverflowError('int too big to convert')
+
+ firstOctet = int(hexValue[:2], 16)
+
+ if signed:
+ if firstOctet & 0x80:
+ if value >= 0:
+ hexValue = '00' + hexValue
+ elif value < 0:
+ hexValue = 'ff' + hexValue
+
+ octets_value = a2b_hex(hexValue)
+
+ return octets_value
+
+ def bitLength(number):
+ # bits in unsigned number
+ hexValue = hex(abs(number))
+ bits = len(hexValue) - 2
+ if hexValue.endswith('L'):
+ bits -= 1
+ if bits & 1:
+ bits += 1
+ bits *= 4
+ # TODO: strip lhs zeros
+ return bits
+
+else:
+
+ def from_bytes(octets, signed=False):
+ return int.from_bytes(bytes(octets), 'big', signed=signed)
+
+ def to_bytes(value, signed=False, length=0):
+ length = max(value.bit_length(), length)
+
+ if signed and length % 8 == 0:
+ length += 1
+
+ return value.to_bytes(length // 8 + (length % 8 and 1 or 0), 'big', signed=signed)
+
+ def bitLength(number):
+ return int(number).bit_length()
diff --git a/contrib/python/pyasn1/py3/pyasn1/compat/octets.py b/contrib/python/pyasn1/py3/pyasn1/compat/octets.py
new file mode 100644
index 0000000000..d871f46c8a
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/compat/octets.py
@@ -0,0 +1,46 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from sys import version_info
+
+if version_info[0] <= 2:
+ int2oct = chr
+ # noinspection PyPep8
+ ints2octs = lambda s: ''.join([int2oct(x) for x in s])
+ null = ''
+ oct2int = ord
+ # TODO: refactor to return a sequence of ints
+ # noinspection PyPep8
+ octs2ints = lambda s: [oct2int(x) for x in s]
+ # noinspection PyPep8
+ str2octs = lambda x: x
+ # noinspection PyPep8
+ octs2str = lambda x: x
+ # noinspection PyPep8
+ isOctetsType = lambda s: isinstance(s, str)
+ # noinspection PyPep8
+ isStringType = lambda s: isinstance(s, (str, unicode))
+ # noinspection PyPep8
+ ensureString = str
+else:
+ ints2octs = bytes
+ # noinspection PyPep8
+ int2oct = lambda x: ints2octs((x,))
+ null = ints2octs()
+ # noinspection PyPep8
+ oct2int = lambda x: x
+ # noinspection PyPep8
+ octs2ints = lambda x: x
+ # noinspection PyPep8
+ str2octs = lambda x: x.encode('iso-8859-1')
+ # noinspection PyPep8
+ octs2str = lambda x: x.decode('iso-8859-1')
+ # noinspection PyPep8
+ isOctetsType = lambda s: isinstance(s, bytes)
+ # noinspection PyPep8
+ isStringType = lambda s: isinstance(s, str)
+ # noinspection PyPep8
+ ensureString = bytes
diff --git a/contrib/python/pyasn1/py3/pyasn1/debug.py b/contrib/python/pyasn1/py3/pyasn1/debug.py
new file mode 100644
index 0000000000..6be80c3a70
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/debug.py
@@ -0,0 +1,147 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import logging
+import sys
+
+from pyasn1 import __version__
+from pyasn1 import error
+from pyasn1.compat.octets import octs2ints
+
+__all__ = ['Debug', 'setLogger', 'hexdump']
+
+DEBUG_NONE = 0x0000
+DEBUG_ENCODER = 0x0001
+DEBUG_DECODER = 0x0002
+DEBUG_ALL = 0xffff
+
+FLAG_MAP = {
+ 'none': DEBUG_NONE,
+ 'encoder': DEBUG_ENCODER,
+ 'decoder': DEBUG_DECODER,
+ 'all': DEBUG_ALL
+}
+
+LOGGEE_MAP = {}
+
+
+class Printer(object):
+ # noinspection PyShadowingNames
+ def __init__(self, logger=None, handler=None, formatter=None):
+ if logger is None:
+ logger = logging.getLogger('pyasn1')
+
+ logger.setLevel(logging.DEBUG)
+
+ if handler is None:
+ handler = logging.StreamHandler()
+
+ if formatter is None:
+ formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
+
+ handler.setFormatter(formatter)
+ handler.setLevel(logging.DEBUG)
+ logger.addHandler(handler)
+
+ self.__logger = logger
+
+ def __call__(self, msg):
+ self.__logger.debug(msg)
+
+ def __str__(self):
+ return '<python logging>'
+
+
+class Debug(object):
+ defaultPrinter = Printer()
+
+ def __init__(self, *flags, **options):
+ self._flags = DEBUG_NONE
+
+ if 'loggerName' in options:
+ # route our logs to parent logger
+ self._printer = Printer(
+ logger=logging.getLogger(options['loggerName']),
+ handler=logging.NullHandler()
+ )
+
+ elif 'printer' in options:
+ self._printer = options.get('printer')
+
+ else:
+ self._printer = self.defaultPrinter
+
+ self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags)))
+
+ for flag in flags:
+ inverse = flag and flag[0] in ('!', '~')
+ if inverse:
+ flag = flag[1:]
+ try:
+ if inverse:
+ self._flags &= ~FLAG_MAP[flag]
+ else:
+ self._flags |= FLAG_MAP[flag]
+ except KeyError:
+ raise error.PyAsn1Error('bad debug flag %s' % flag)
+
+ self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled'))
+
+ def __str__(self):
+ return 'logger %s, flags %x' % (self._printer, self._flags)
+
+ def __call__(self, msg):
+ self._printer(msg)
+
+ def __and__(self, flag):
+ return self._flags & flag
+
+ def __rand__(self, flag):
+ return flag & self._flags
+
+_LOG = DEBUG_NONE
+
+
+def setLogger(userLogger):
+ global _LOG
+
+ if userLogger:
+ _LOG = userLogger
+ else:
+ _LOG = DEBUG_NONE
+
+ # Update registered logging clients
+ for module, (name, flags) in LOGGEE_MAP.items():
+ setattr(module, name, _LOG & flags and _LOG or DEBUG_NONE)
+
+
+def registerLoggee(module, name='LOG', flags=DEBUG_NONE):
+ LOGGEE_MAP[sys.modules[module]] = name, flags
+ setLogger(_LOG)
+ return _LOG
+
+
+def hexdump(octets):
+ return ' '.join(
+ ['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x)
+ for n, x in zip(range(len(octets)), octs2ints(octets))]
+ )
+
+
+class Scope(object):
+ def __init__(self):
+ self._list = []
+
+ def __str__(self): return '.'.join(self._list)
+
+ def push(self, token):
+ self._list.append(token)
+
+ def pop(self):
+ return self._list.pop()
+
+
+scope = Scope()
diff --git a/contrib/python/pyasn1/py3/pyasn1/error.py b/contrib/python/pyasn1/py3/pyasn1/error.py
new file mode 100644
index 0000000000..75c9a3f4cd
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/error.py
@@ -0,0 +1,116 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+
+
+class PyAsn1Error(Exception):
+ """Base pyasn1 exception
+
+ `PyAsn1Error` is the base exception class (based on
+ :class:`Exception`) that represents all possible ASN.1 related
+ errors.
+
+ Parameters
+ ----------
+ args:
+ Opaque positional parameters
+
+ Keyword Args
+ ------------
+ kwargs:
+ Opaque keyword parameters
+
+ """
+ def __init__(self, *args, **kwargs):
+ self._args = args
+ self._kwargs = kwargs
+
+ @property
+ def context(self):
+ """Return exception context
+
+ When exception object is created, the caller can supply some opaque
+ context for the upper layers to better understand the cause of the
+ exception.
+
+ Returns
+ -------
+ : :py:class:`dict`
+ Dict holding context specific data
+ """
+ return self._kwargs.get('context', {})
+
+
+class ValueConstraintError(PyAsn1Error):
+ """ASN.1 type constraints violation exception
+
+ The `ValueConstraintError` exception indicates an ASN.1 value
+ constraint violation.
+
+ It might happen on value object instantiation (for scalar types) or on
+ serialization (for constructed types).
+ """
+
+
+class SubstrateUnderrunError(PyAsn1Error):
+ """ASN.1 data structure deserialization error
+
+ The `SubstrateUnderrunError` exception indicates insufficient serialised
+ data on input of a de-serialization codec.
+ """
+
+
+class EndOfStreamError(SubstrateUnderrunError):
+ """ASN.1 data structure deserialization error
+
+ The `EndOfStreamError` exception indicates the condition of the input
+ stream has been closed.
+ """
+
+
+class UnsupportedSubstrateError(PyAsn1Error):
+ """Unsupported substrate type to parse as ASN.1 data."""
+
+
+class PyAsn1UnicodeError(PyAsn1Error, UnicodeError):
+ """Unicode text processing error
+
+ The `PyAsn1UnicodeError` exception is a base class for errors relating to
+ unicode text de/serialization.
+
+ Apart from inheriting from :class:`PyAsn1Error`, it also inherits from
+ :class:`UnicodeError` to help the caller catching unicode-related errors.
+ """
+ def __init__(self, message, unicode_error=None):
+ if isinstance(unicode_error, UnicodeError):
+ UnicodeError.__init__(self, *unicode_error.args)
+ PyAsn1Error.__init__(self, message)
+
+
+class PyAsn1UnicodeDecodeError(PyAsn1UnicodeError, UnicodeDecodeError):
+ """Unicode text decoding error
+
+ The `PyAsn1UnicodeDecodeError` exception represents a failure to
+ deserialize unicode text.
+
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
+ from :class:`UnicodeDecodeError` to help the caller catching unicode-related
+ errors.
+ """
+
+
+class PyAsn1UnicodeEncodeError(PyAsn1UnicodeError, UnicodeEncodeError):
+ """Unicode text encoding error
+
+ The `PyAsn1UnicodeEncodeError` exception represents a failure to
+ serialize unicode text.
+
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
+ from :class:`UnicodeEncodeError` to help the caller catching
+ unicode-related errors.
+ """
+
+
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/__init__.py b/contrib/python/pyasn1/py3/pyasn1/type/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/base.py b/contrib/python/pyasn1/py3/pyasn1/type/base.py
new file mode 100644
index 0000000000..ac92c51afb
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/base.py
@@ -0,0 +1,706 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import constraint
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+__all__ = ['Asn1Item', 'Asn1Type', 'SimpleAsn1Type',
+ 'ConstructedAsn1Type']
+
+
+class Asn1Item(object):
+ @classmethod
+ def getTypeId(cls, increment=1):
+ try:
+ Asn1Item._typeCounter += increment
+ except AttributeError:
+ Asn1Item._typeCounter = increment
+ return Asn1Item._typeCounter
+
+
+class Asn1Type(Asn1Item):
+ """Base class for all classes representing ASN.1 types.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+ #: Set or return a :py:class:`~pyasn1.type.tag.TagSet` object representing
+ #: ASN.1 tag(s) associated with |ASN.1| type.
+ tagSet = tag.TagSet()
+
+ #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ #: object imposing constraints on initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = None
+
+ def __init__(self, **kwargs):
+ readOnly = {
+ 'tagSet': self.tagSet,
+ 'subtypeSpec': self.subtypeSpec
+ }
+
+ readOnly.update(kwargs)
+
+ self.__dict__.update(readOnly)
+
+ self._readOnly = readOnly
+
+ def __setattr__(self, name, value):
+ if name[0] != '_' and name in self._readOnly:
+ raise error.PyAsn1Error('read-only instance attribute "%s"' % name)
+
+ self.__dict__[name] = value
+
+ def __str__(self):
+ return self.prettyPrint()
+
+ @property
+ def readOnly(self):
+ return self._readOnly
+
+ @property
+ def effectiveTagSet(self):
+ """For |ASN.1| type is equivalent to *tagSet*
+ """
+ return self.tagSet # used by untagged types
+
+ @property
+ def tagMap(self):
+ """Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
+ """
+ return tagmap.TagMap({self.tagSet: self})
+
+ def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
+ """Examine |ASN.1| type for equality with other ASN.1 type.
+
+ ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
+ (:py:mod:`~pyasn1.type.constraint`) are examined when carrying
+ out ASN.1 types comparison.
+
+ Python class inheritance relationship is NOT considered.
+
+ Parameters
+ ----------
+ other: a pyasn1 type object
+ Class instance representing ASN.1 type.
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if *other* is |ASN.1| type,
+ :obj:`False` otherwise.
+ """
+ return (self is other or
+ (not matchTags or self.tagSet == other.tagSet) and
+ (not matchConstraints or self.subtypeSpec == other.subtypeSpec))
+
+ def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
+ """Examine |ASN.1| type for subtype relationship with other ASN.1 type.
+
+ ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
+ (:py:mod:`~pyasn1.type.constraint`) are examined when carrying
+ out ASN.1 types comparison.
+
+ Python class inheritance relationship is NOT considered.
+
+ Parameters
+ ----------
+ other: a pyasn1 type object
+ Class instance representing ASN.1 type.
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if *other* is a subtype of |ASN.1| type,
+ :obj:`False` otherwise.
+ """
+ return (not matchTags or
+ (self.tagSet.isSuperTagSetOf(other.tagSet)) and
+ (not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec)))
+
+ @staticmethod
+ def isNoValue(*values):
+ for value in values:
+ if value is not noValue:
+ return False
+ return True
+
+ def prettyPrint(self, scope=0):
+ raise NotImplementedError()
+
+ # backward compatibility
+
+ def getTagSet(self):
+ return self.tagSet
+
+ def getEffectiveTagSet(self):
+ return self.effectiveTagSet
+
+ def getTagMap(self):
+ return self.tagMap
+
+ def getSubtypeSpec(self):
+ return self.subtypeSpec
+
+ # backward compatibility
+ def hasValue(self):
+ return self.isValue
+
+# Backward compatibility
+Asn1ItemBase = Asn1Type
+
+
+class NoValue(object):
+ """Create a singleton instance of NoValue class.
+
+ The *NoValue* sentinel object represents an instance of ASN.1 schema
+ object as opposed to ASN.1 value object.
+
+ Only ASN.1 schema-related operations can be performed on ASN.1
+ schema objects.
+
+ Warning
+ -------
+ Any operation attempted on the *noValue* object will raise the
+ *PyAsn1Error* exception.
+ """
+ skipMethods = {
+ '__slots__',
+ # attributes
+ '__getattribute__',
+ '__getattr__',
+ '__setattr__',
+ '__delattr__',
+ # class instance
+ '__class__',
+ '__init__',
+ '__del__',
+ '__new__',
+ '__repr__',
+ '__qualname__',
+ '__objclass__',
+ 'im_class',
+ '__sizeof__',
+ # pickle protocol
+ '__reduce__',
+ '__reduce_ex__',
+ '__getnewargs__',
+ '__getinitargs__',
+ '__getstate__',
+ '__setstate__',
+ }
+
+ _instance = None
+
+ def __new__(cls):
+ if cls._instance is None:
+ def getPlug(name):
+ def plug(self, *args, **kw):
+ raise error.PyAsn1Error('Attempted "%s" operation on ASN.1 schema object' % name)
+ return plug
+
+ op_names = [name
+ for typ in (str, int, list, dict)
+ for name in dir(typ)
+ if (name not in cls.skipMethods and
+ name.startswith('__') and
+ name.endswith('__') and
+ callable(getattr(typ, name)))]
+
+ for name in set(op_names):
+ setattr(cls, name, getPlug(name))
+
+ cls._instance = object.__new__(cls)
+
+ return cls._instance
+
+ def __getattr__(self, attr):
+ if attr in self.skipMethods:
+ raise AttributeError('Attribute %s not present' % attr)
+
+ raise error.PyAsn1Error('Attempted "%s" operation on ASN.1 schema object' % attr)
+
+ def __repr__(self):
+ return '<%s object>' % self.__class__.__name__
+
+
+noValue = NoValue()
+
+
+class SimpleAsn1Type(Asn1Type):
+ """Base class for all simple classes representing ASN.1 types.
+
+ ASN.1 distinguishes types by their ability to hold other objects.
+ Scalar types are known as *simple* in ASN.1.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+ #: Default payload value
+ defaultValue = noValue
+
+ def __init__(self, value=noValue, **kwargs):
+ Asn1Type.__init__(self, **kwargs)
+ if value is noValue:
+ value = self.defaultValue
+ else:
+ value = self.prettyIn(value)
+ try:
+ self.subtypeSpec(value)
+
+ except error.PyAsn1Error:
+ exType, exValue, exTb = sys.exc_info()
+ raise exType('%s at %s' % (exValue, self.__class__.__name__))
+
+ self._value = value
+
+ def __repr__(self):
+ representation = '%s %s object' % (
+ self.__class__.__name__, self.isValue and 'value' or 'schema')
+
+ for attr, value in self.readOnly.items():
+ if value:
+ representation += ', %s %s' % (attr, value)
+
+ if self.isValue:
+ value = self.prettyPrint()
+ if len(value) > 32:
+ value = value[:16] + '...' + value[-16:]
+ representation += ', payload [%s]' % value
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other and True or self._value == other
+
+ def __ne__(self, other):
+ return self._value != other
+
+ def __lt__(self, other):
+ return self._value < other
+
+ def __le__(self, other):
+ return self._value <= other
+
+ def __gt__(self, other):
+ return self._value > other
+
+ def __ge__(self, other):
+ return self._value >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._value and True or False
+ else:
+ def __bool__(self):
+ return self._value and True or False
+
+ def __hash__(self):
+ return hash(self._value)
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just
+ ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema
+ features, this object can also be used like a Python built-in object
+ (e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ return self._value is not noValue
+
+ def clone(self, value=noValue, **kwargs):
+ """Create a modified version of |ASN.1| schema or value object.
+
+ The `clone()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all arguments
+ of the `clone()` method are optional.
+
+ Whatever arguments are supplied, they are used to create a copy
+ of `self` taking precedence over the ones used to instantiate `self`.
+
+ Note
+ ----
+ Due to the immutable nature of the |ASN.1| object, if no arguments
+ are supplied, no new |ASN.1| object will be created and `self` will
+ be returned instead.
+ """
+ if value is noValue:
+ if not kwargs:
+ return self
+
+ value = self._value
+
+ initializers = self.readOnly.copy()
+ initializers.update(kwargs)
+
+ return self.__class__(value, **initializers)
+
+ def subtype(self, value=noValue, **kwargs):
+ """Create a specialization of |ASN.1| schema or value object.
+
+ The subtype relationship between ASN.1 types has no correlation with
+ subtype relationship between Python types. ASN.1 type is mainly identified
+ by its tag(s) (:py:class:`~pyasn1.type.tag.TagSet`) and value range
+ constraints (:py:class:`~pyasn1.type.constraint.ConstraintsIntersection`).
+ These ASN.1 type properties are implemented as |ASN.1| attributes.
+
+ The `subtype()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all parameters
+ of the `subtype()` method are optional.
+
+ With the exception of the arguments described below, the rest of
+ supplied arguments they are used to create a copy of `self` taking
+ precedence over the ones used to instantiate `self`.
+
+ The following arguments to `subtype()` create a ASN.1 subtype out of
+ |ASN.1| type:
+
+ Other Parameters
+ ----------------
+ implicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Implicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ explicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Explicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Add ASN.1 constraints object to one of the `self`'s, then
+ use the result as new object's ASN.1 constraints.
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| schema or value object
+
+ Note
+ ----
+ Due to the immutable nature of the |ASN.1| object, if no arguments
+ are supplied, no new |ASN.1| object will be created and `self` will
+ be returned instead.
+ """
+ if value is noValue:
+ if not kwargs:
+ return self
+
+ value = self._value
+
+ initializers = self.readOnly.copy()
+
+ implicitTag = kwargs.pop('implicitTag', None)
+ if implicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
+
+ explicitTag = kwargs.pop('explicitTag', None)
+ if explicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
+
+ for arg, option in kwargs.items():
+ initializers[arg] += option
+
+ return self.__class__(value, **initializers)
+
+ def prettyIn(self, value):
+ return value
+
+ def prettyOut(self, value):
+ return str(value)
+
+ def prettyPrint(self, scope=0):
+ return self.prettyOut(self._value)
+
+ def prettyPrintType(self, scope=0):
+ return '%s -> %s' % (self.tagSet, self.__class__.__name__)
+
+# Backward compatibility
+AbstractSimpleAsn1Item = SimpleAsn1Type
+
+#
+# Constructed types:
+# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
+# * ASN1 types and values are represened by Python class instances
+# * Value initialization is made for defaulted components only
+# * Primary method of component addressing is by-position. Data model for base
+# type is Python sequence. Additional type-specific addressing methods
+# may be implemented for particular types.
+# * SequenceOf and SetOf types do not implement any additional methods
+# * Sequence, Set and Choice types also implement by-identifier addressing
+# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
+# * Sequence and Set types may include optional and defaulted
+# components
+# * Constructed types hold a reference to component types used for value
+# verification and ordering.
+# * Component type is a scalar type for SequenceOf/SetOf types and a list
+# of types for Sequence/Set/Choice.
+#
+
+
+class ConstructedAsn1Type(Asn1Type):
+ """Base class for all constructed classes representing ASN.1 types.
+
+ ASN.1 distinguishes types by their ability to hold other objects.
+ Those "nesting" types are known as *constructed* in ASN.1.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+
+ #: If :obj:`True`, requires exact component type matching,
+ #: otherwise subtype relation is only enforced
+ strictConstraints = False
+
+ componentType = None
+
+ # backward compatibility, unused
+ sizeSpec = constraint.ConstraintsIntersection()
+
+ def __init__(self, **kwargs):
+ readOnly = {
+ 'componentType': self.componentType,
+ # backward compatibility, unused
+ 'sizeSpec': self.sizeSpec
+ }
+
+ # backward compatibility: preserve legacy sizeSpec support
+ kwargs = self._moveSizeSpec(**kwargs)
+
+ readOnly.update(kwargs)
+
+ Asn1Type.__init__(self, **readOnly)
+
+ def _moveSizeSpec(self, **kwargs):
+ # backward compatibility, unused
+ sizeSpec = kwargs.pop('sizeSpec', self.sizeSpec)
+ if sizeSpec:
+ subtypeSpec = kwargs.pop('subtypeSpec', self.subtypeSpec)
+ if subtypeSpec:
+ subtypeSpec = sizeSpec
+
+ else:
+ subtypeSpec += sizeSpec
+
+ kwargs['subtypeSpec'] = subtypeSpec
+
+ return kwargs
+
+ def __repr__(self):
+ representation = '%s %s object' % (
+ self.__class__.__name__, self.isValue and 'value' or 'schema'
+ )
+
+ for attr, value in self.readOnly.items():
+ if value is not noValue:
+ representation += ', %s=%r' % (attr, value)
+
+ if self.isValue and self.components:
+ representation += ', payload [%s]' % ', '.join(
+ [repr(x) for x in self.components])
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other or self.components == other
+
+ def __ne__(self, other):
+ return self.components != other
+
+ def __lt__(self, other):
+ return self.components < other
+
+ def __le__(self, other):
+ return self.components <= other
+
+ def __gt__(self, other):
+ return self.components > other
+
+ def __ge__(self, other):
+ return self.components >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return bool(self.components)
+ else:
+ def __bool__(self):
+ return bool(self.components)
+
+ @property
+ def components(self):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ pass
+
+ def clone(self, **kwargs):
+ """Create a modified version of |ASN.1| schema object.
+
+ The `clone()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all arguments
+ of the `clone()` method are optional.
+
+ Whatever arguments are supplied, they are used to create a copy
+ of `self` taking precedence over the ones used to instantiate `self`.
+
+ Possible values of `self` are never copied over thus `clone()` can
+ only create a new schema object.
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| type/value
+
+ Note
+ ----
+ Due to the mutable nature of the |ASN.1| object, even if no arguments
+ are supplied, a new |ASN.1| object will be created and returned.
+ """
+ cloneValueFlag = kwargs.pop('cloneValueFlag', False)
+
+ initializers = self.readOnly.copy()
+ initializers.update(kwargs)
+
+ clone = self.__class__(**initializers)
+
+ if cloneValueFlag:
+ self._cloneComponentValues(clone, cloneValueFlag)
+
+ return clone
+
+ def subtype(self, **kwargs):
+ """Create a specialization of |ASN.1| schema object.
+
+ The `subtype()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all parameters
+ of the `subtype()` method are optional.
+
+ With the exception of the arguments described below, the rest of
+ supplied arguments they are used to create a copy of `self` taking
+ precedence over the ones used to instantiate `self`.
+
+ The following arguments to `subtype()` create a ASN.1 subtype out of
+ |ASN.1| type.
+
+ Other Parameters
+ ----------------
+ implicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Implicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ explicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Explicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Add ASN.1 constraints object to one of the `self`'s, then
+ use the result as new object's ASN.1 constraints.
+
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| type/value
+
+ Note
+ ----
+ Due to the mutable nature of the |ASN.1| object, even if no arguments
+ are supplied, a new |ASN.1| object will be created and returned.
+ """
+
+ initializers = self.readOnly.copy()
+
+ cloneValueFlag = kwargs.pop('cloneValueFlag', False)
+
+ implicitTag = kwargs.pop('implicitTag', None)
+ if implicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
+
+ explicitTag = kwargs.pop('explicitTag', None)
+ if explicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
+
+ for arg, option in kwargs.items():
+ initializers[arg] += option
+
+ clone = self.__class__(**initializers)
+
+ if cloneValueFlag:
+ self._cloneComponentValues(clone, cloneValueFlag)
+
+ return clone
+
+ def getComponentByPosition(self, idx):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def setComponentByPosition(self, idx, value, verifyConstraints=True):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def setComponents(self, *args, **kwargs):
+ for idx, value in enumerate(args):
+ self[idx] = value
+ for k in kwargs:
+ self[k] = kwargs[k]
+ return self
+
+ # backward compatibility
+
+ def setDefaultComponents(self):
+ pass
+
+ def getComponentType(self):
+ return self.componentType
+
+ # backward compatibility, unused
+ def verifySizeSpec(self):
+ self.subtypeSpec(self)
+
+
+ # Backward compatibility
+AbstractConstructedAsn1Item = ConstructedAsn1Type
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/char.py b/contrib/python/pyasn1/py3/pyasn1/type/char.py
new file mode 100644
index 0000000000..13fbc7fa27
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/char.py
@@ -0,0 +1,335 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
+ 'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
+ 'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
+
+NoValue = univ.NoValue
+noValue = univ.noValue
+
+
+class AbstractCharacterString(univ.OctetString):
+ """Creates |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`,
+ its objects are immutable and duck-type Python 2 :class:`str` or Python 3
+ :class:`bytes`. When used in octet-stream context, |ASN.1| type assumes
+ "|encoding|" encoding.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ :class:`unicode` object (Python 2) or :class:`str` (Python 3),
+ alternatively :class:`str` (Python 2) or :class:`bytes` (Python 3)
+ representing octet-stream of serialised unicode string
+ (note `encoding` parameter) or |ASN.1| class instance.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in octet-stream context.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+ """
+
+ if sys.version_info[0] <= 2:
+ def __str__(self):
+ try:
+ # `str` is Py2 text representation
+ return self._value.encode(self.encoding)
+
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def __unicode__(self):
+ return unicode(self._value)
+
+ def prettyIn(self, value):
+ try:
+ if isinstance(value, unicode):
+ return value
+ elif isinstance(value, str):
+ return value.decode(self.encoding)
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(''.join([chr(x) for x in value]))
+ elif isinstance(value, univ.OctetString):
+ return value.asOctets().decode(self.encoding)
+ else:
+ return unicode(value)
+
+ except (UnicodeDecodeError, LookupError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ def asOctets(self, padding=True):
+ return str(self)
+
+ def asNumbers(self, padding=True):
+ return tuple([ord(x) for x in str(self)])
+
+ else:
+ def __str__(self):
+ # `unicode` is Py3 text representation
+ return str(self._value)
+
+ def __bytes__(self):
+ try:
+ return self._value.encode(self.encoding)
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def prettyIn(self, value):
+ try:
+ if isinstance(value, str):
+ return value
+ elif isinstance(value, bytes):
+ return value.decode(self.encoding)
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(bytes(value))
+ elif isinstance(value, univ.OctetString):
+ return value.asOctets().decode(self.encoding)
+ else:
+ return str(value)
+
+ except (UnicodeDecodeError, LookupError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ def asOctets(self, padding=True):
+ return bytes(self)
+
+ def asNumbers(self, padding=True):
+ return tuple(bytes(self))
+
+ #
+ # See OctetString.prettyPrint() for the explanation
+ #
+
+ def prettyOut(self, value):
+ return value
+
+ def prettyPrint(self, scope=0):
+ # first see if subclass has its own .prettyOut()
+ value = self.prettyOut(self._value)
+
+ if value is not self._value:
+ return value
+
+ return AbstractCharacterString.__str__(self)
+
+ def __reversed__(self):
+ return reversed(self._value)
+
+
+class NumericString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class PrintableString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class TeletexString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class T61String(TeletexString):
+ __doc__ = TeletexString.__doc__
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class VideotexString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class IA5String(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class GraphicString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class VisibleString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class ISO646String(VisibleString):
+ __doc__ = VisibleString.__doc__
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+class GeneralString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class UniversalString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
+ )
+ encoding = "utf-32-be"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class BMPString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
+ )
+ encoding = "utf-16-be"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class UTF8String(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )
+ encoding = "utf-8"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/constraint.py b/contrib/python/pyasn1/py3/pyasn1/type/constraint.py
new file mode 100644
index 0000000000..34b0060d9f
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/constraint.py
@@ -0,0 +1,756 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+# Original concept and code by Mike C. Fletcher.
+#
+import sys
+
+from pyasn1.type import error
+
+__all__ = ['SingleValueConstraint', 'ContainedSubtypeConstraint',
+ 'ValueRangeConstraint', 'ValueSizeConstraint',
+ 'PermittedAlphabetConstraint', 'InnerTypeConstraint',
+ 'ConstraintsExclusion', 'ConstraintsIntersection',
+ 'ConstraintsUnion']
+
+
+class AbstractConstraint(object):
+
+ def __init__(self, *values):
+ self._valueMap = set()
+ self._setValues(values)
+ self.__hash = hash((self.__class__.__name__, self._values))
+
+ def __call__(self, value, idx=None):
+ if not self._values:
+ return
+
+ try:
+ self._testValue(value, idx)
+
+ except error.ValueConstraintError:
+ raise error.ValueConstraintError(
+ '%s failed at: %r' % (self, sys.exc_info()[1])
+ )
+
+ def __repr__(self):
+ representation = '%s object' % (self.__class__.__name__)
+
+ if self._values:
+ representation += ', consts %s' % ', '.join(
+ [repr(x) for x in self._values])
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other and True or self._values == other
+
+ def __ne__(self, other):
+ return self._values != other
+
+ def __lt__(self, other):
+ return self._values < other
+
+ def __le__(self, other):
+ return self._values <= other
+
+ def __gt__(self, other):
+ return self._values > other
+
+ def __ge__(self, other):
+ return self._values >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._values and True or False
+ else:
+ def __bool__(self):
+ return self._values and True or False
+
+ def __hash__(self):
+ return self.__hash
+
+ def _setValues(self, values):
+ self._values = values
+
+ def _testValue(self, value, idx):
+ raise error.ValueConstraintError(value)
+
+ # Constraints derivation logic
+ def getValueMap(self):
+ return self._valueMap
+
+ def isSuperTypeOf(self, otherConstraint):
+ # TODO: fix possible comparison of set vs scalars here
+ return (otherConstraint is self or
+ not self._values or
+ otherConstraint == self or
+ self in otherConstraint.getValueMap())
+
+ def isSubTypeOf(self, otherConstraint):
+ return (otherConstraint is self or
+ not self or
+ otherConstraint == self or
+ otherConstraint in self._valueMap)
+
+
+class SingleValueConstraint(AbstractConstraint):
+ """Create a SingleValueConstraint object.
+
+ The SingleValueConstraint satisfies any value that
+ is present in the set of permitted values.
+
+ Objects of this type are iterable (emitting constraint values) and
+ can act as operands for some arithmetic operations e.g. addition
+ and subtraction. The latter can be used for combining multiple
+ SingleValueConstraint objects into one.
+
+ The SingleValueConstraint object can be applied to
+ any ASN.1 type.
+
+ Parameters
+ ----------
+ *values: :class:`int`
+ Full set of values permitted by this constraint object.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class DivisorOfSix(Integer):
+ '''
+ ASN.1 specification:
+
+ Divisor-Of-6 ::= INTEGER (1 | 2 | 3 | 6)
+ '''
+ subtypeSpec = SingleValueConstraint(1, 2, 3, 6)
+
+ # this will succeed
+ divisor_of_six = DivisorOfSix(1)
+
+ # this will raise ValueConstraintError
+ divisor_of_six = DivisorOfSix(7)
+ """
+ def _setValues(self, values):
+ self._values = values
+ self._set = set(values)
+
+ def _testValue(self, value, idx):
+ if value not in self._set:
+ raise error.ValueConstraintError(value)
+
+ # Constrains can be merged or reduced
+
+ def __contains__(self, item):
+ return item in self._set
+
+ def __iter__(self):
+ return iter(self._set)
+
+ def __sub__(self, constraint):
+ return self.__class__(*(self._set.difference(constraint)))
+
+ def __add__(self, constraint):
+ return self.__class__(*(self._set.union(constraint)))
+
+ def __sub__(self, constraint):
+ return self.__class__(*(self._set.difference(constraint)))
+
+
+class ContainedSubtypeConstraint(AbstractConstraint):
+ """Create a ContainedSubtypeConstraint object.
+
+ The ContainedSubtypeConstraint satisfies any value that
+ is present in the set of permitted values and also
+ satisfies included constraints.
+
+ The ContainedSubtypeConstraint object can be applied to
+ any ASN.1 type.
+
+ Parameters
+ ----------
+ *values:
+ Full set of values and constraint objects permitted
+ by this constraint object.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class DivisorOfEighteen(Integer):
+ '''
+ ASN.1 specification:
+
+ Divisors-of-18 ::= INTEGER (INCLUDES Divisors-of-6 | 9 | 18)
+ '''
+ subtypeSpec = ContainedSubtypeConstraint(
+ SingleValueConstraint(1, 2, 3, 6), 9, 18
+ )
+
+ # this will succeed
+ divisor_of_eighteen = DivisorOfEighteen(9)
+
+ # this will raise ValueConstraintError
+ divisor_of_eighteen = DivisorOfEighteen(10)
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ if isinstance(constraint, AbstractConstraint):
+ constraint(value, idx)
+ elif value not in self._set:
+ raise error.ValueConstraintError(value)
+
+
+class ValueRangeConstraint(AbstractConstraint):
+ """Create a ValueRangeConstraint object.
+
+ The ValueRangeConstraint satisfies any value that
+ falls in the range of permitted values.
+
+ The ValueRangeConstraint object can only be applied
+ to :class:`~pyasn1.type.univ.Integer` and
+ :class:`~pyasn1.type.univ.Real` types.
+
+ Parameters
+ ----------
+ start: :class:`int`
+ Minimum permitted value in the range (inclusive)
+
+ end: :class:`int`
+ Maximum permitted value in the range (inclusive)
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class TeenAgeYears(Integer):
+ '''
+ ASN.1 specification:
+
+ TeenAgeYears ::= INTEGER (13 .. 19)
+ '''
+ subtypeSpec = ValueRangeConstraint(13, 19)
+
+ # this will succeed
+ teen_year = TeenAgeYears(18)
+
+ # this will raise ValueConstraintError
+ teen_year = TeenAgeYears(20)
+ """
+ def _testValue(self, value, idx):
+ if value < self.start or value > self.stop:
+ raise error.ValueConstraintError(value)
+
+ def _setValues(self, values):
+ if len(values) != 2:
+ raise error.PyAsn1Error(
+ '%s: bad constraint values' % (self.__class__.__name__,)
+ )
+ self.start, self.stop = values
+ if self.start > self.stop:
+ raise error.PyAsn1Error(
+ '%s: screwed constraint values (start > stop): %s > %s' % (
+ self.__class__.__name__,
+ self.start, self.stop
+ )
+ )
+ AbstractConstraint._setValues(self, values)
+
+
+class ValueSizeConstraint(ValueRangeConstraint):
+ """Create a ValueSizeConstraint object.
+
+ The ValueSizeConstraint satisfies any value for
+ as long as its size falls within the range of
+ permitted sizes.
+
+ The ValueSizeConstraint object can be applied
+ to :class:`~pyasn1.type.univ.BitString`,
+ :class:`~pyasn1.type.univ.OctetString` (including
+ all :ref:`character ASN.1 types <type.char>`),
+ :class:`~pyasn1.type.univ.SequenceOf`
+ and :class:`~pyasn1.type.univ.SetOf` types.
+
+ Parameters
+ ----------
+ minimum: :class:`int`
+ Minimum permitted size of the value (inclusive)
+
+ maximum: :class:`int`
+ Maximum permitted size of the value (inclusive)
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class BaseballTeamRoster(SetOf):
+ '''
+ ASN.1 specification:
+
+ BaseballTeamRoster ::= SET SIZE (1..25) OF PlayerNames
+ '''
+ componentType = PlayerNames()
+ subtypeSpec = ValueSizeConstraint(1, 25)
+
+ # this will succeed
+ team = BaseballTeamRoster()
+ team.extend(['Jan', 'Matej'])
+ encode(team)
+
+ # this will raise ValueConstraintError
+ team = BaseballTeamRoster()
+ team.extend(['Jan'] * 26)
+ encode(team)
+
+ Note
+ ----
+ Whenever ValueSizeConstraint is applied to mutable types
+ (e.g. :class:`~pyasn1.type.univ.SequenceOf`,
+ :class:`~pyasn1.type.univ.SetOf`), constraint
+ validation only happens at the serialisation phase rather
+ than schema instantiation phase (as it is with immutable
+ types).
+ """
+ def _testValue(self, value, idx):
+ valueSize = len(value)
+ if valueSize < self.start or valueSize > self.stop:
+ raise error.ValueConstraintError(value)
+
+
+class PermittedAlphabetConstraint(SingleValueConstraint):
+ """Create a PermittedAlphabetConstraint object.
+
+ The PermittedAlphabetConstraint satisfies any character
+ string for as long as all its characters are present in
+ the set of permitted characters.
+
+ Objects of this type are iterable (emitting constraint values) and
+ can act as operands for some arithmetic operations e.g. addition
+ and subtraction.
+
+ The PermittedAlphabetConstraint object can only be applied
+ to the :ref:`character ASN.1 types <type.char>` such as
+ :class:`~pyasn1.type.char.IA5String`.
+
+ Parameters
+ ----------
+ *alphabet: :class:`str`
+ Full set of characters permitted by this constraint object.
+
+ Example
+ -------
+ .. code-block:: python
+
+ class BooleanValue(IA5String):
+ '''
+ ASN.1 specification:
+
+ BooleanValue ::= IA5String (FROM ('T' | 'F'))
+ '''
+ subtypeSpec = PermittedAlphabetConstraint('T', 'F')
+
+ # this will succeed
+ truth = BooleanValue('T')
+ truth = BooleanValue('TF')
+
+ # this will raise ValueConstraintError
+ garbage = BooleanValue('TAF')
+
+ ASN.1 `FROM ... EXCEPT ...` clause can be modelled by combining multiple
+ PermittedAlphabetConstraint objects into one:
+
+ Example
+ -------
+ .. code-block:: python
+
+ class Lipogramme(IA5String):
+ '''
+ ASN.1 specification:
+
+ Lipogramme ::=
+ IA5String (FROM (ALL EXCEPT ("e"|"E")))
+ '''
+ subtypeSpec = (
+ PermittedAlphabetConstraint(*string.printable) -
+ PermittedAlphabetConstraint('e', 'E')
+ )
+
+ # this will succeed
+ lipogramme = Lipogramme('A work of fiction?')
+
+ # this will raise ValueConstraintError
+ lipogramme = Lipogramme('Eel')
+
+ Note
+ ----
+ Although `ConstraintsExclusion` object could seemingly be used for this
+ purpose, practically, for it to work, it needs to represent its operand
+ constraints as sets and intersect one with the other. That would require
+ the insight into the constraint values (and their types) that are otherwise
+ hidden inside the constraint object.
+
+ Therefore it's more practical to model `EXCEPT` clause at
+ `PermittedAlphabetConstraint` level instead.
+ """
+ def _setValues(self, values):
+ self._values = values
+ self._set = set(values)
+
+ def _testValue(self, value, idx):
+ if not self._set.issuperset(value):
+ raise error.ValueConstraintError(value)
+
+
+class ComponentPresentConstraint(AbstractConstraint):
+ """Create a ComponentPresentConstraint object.
+
+ The ComponentPresentConstraint is only satisfied when the value
+ is not `None`.
+
+ The ComponentPresentConstraint object is typically used with
+ `WithComponentsConstraint`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ present = ComponentPresentConstraint()
+
+ # this will succeed
+ present('whatever')
+
+ # this will raise ValueConstraintError
+ present(None)
+ """
+ def _setValues(self, values):
+ self._values = ('<must be present>',)
+
+ if values:
+ raise error.PyAsn1Error('No arguments expected')
+
+ def _testValue(self, value, idx):
+ if value is None:
+ raise error.ValueConstraintError(
+ 'Component is not present:')
+
+
+class ComponentAbsentConstraint(AbstractConstraint):
+ """Create a ComponentAbsentConstraint object.
+
+ The ComponentAbsentConstraint is only satisfied when the value
+ is `None`.
+
+ The ComponentAbsentConstraint object is typically used with
+ `WithComponentsConstraint`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ absent = ComponentAbsentConstraint()
+
+ # this will succeed
+ absent(None)
+
+ # this will raise ValueConstraintError
+ absent('whatever')
+ """
+ def _setValues(self, values):
+ self._values = ('<must be absent>',)
+
+ if values:
+ raise error.PyAsn1Error('No arguments expected')
+
+ def _testValue(self, value, idx):
+ if value is not None:
+ raise error.ValueConstraintError(
+ 'Component is not absent: %r' % value)
+
+
+class WithComponentsConstraint(AbstractConstraint):
+ """Create a WithComponentsConstraint object.
+
+ The `WithComponentsConstraint` satisfies any mapping object that has
+ constrained fields present or absent, what is indicated by
+ `ComponentPresentConstraint` and `ComponentAbsentConstraint`
+ objects respectively.
+
+ The `WithComponentsConstraint` object is typically applied
+ to :class:`~pyasn1.type.univ.Set` or
+ :class:`~pyasn1.type.univ.Sequence` types.
+
+ Parameters
+ ----------
+ *fields: :class:`tuple`
+ Zero or more tuples of (`field`, `constraint`) indicating constrained
+ fields.
+
+ Notes
+ -----
+ On top of the primary use of `WithComponentsConstraint` (ensuring presence
+ or absence of particular components of a :class:`~pyasn1.type.univ.Set` or
+ :class:`~pyasn1.type.univ.Sequence`), it is also possible to pass any other
+ constraint objects or their combinations. In case of scalar fields, these
+ constraints will be verified in addition to the constraints belonging to
+ scalar components themselves. However, formally, these additional
+ constraints do not change the type of these ASN.1 objects.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Item(Sequence): # Set is similar
+ '''
+ ASN.1 specification:
+
+ Item ::= SEQUENCE {
+ id INTEGER OPTIONAL,
+ name OCTET STRING OPTIONAL
+ } WITH COMPONENTS id PRESENT, name ABSENT | id ABSENT, name PRESENT
+ '''
+ componentType = NamedTypes(
+ OptionalNamedType('id', Integer()),
+ OptionalNamedType('name', OctetString())
+ )
+ withComponents = ConstraintsUnion(
+ WithComponentsConstraint(
+ ('id', ComponentPresentConstraint()),
+ ('name', ComponentAbsentConstraint())
+ ),
+ WithComponentsConstraint(
+ ('id', ComponentAbsentConstraint()),
+ ('name', ComponentPresentConstraint())
+ )
+ )
+
+ item = Item()
+
+ # This will succeed
+ item['id'] = 1
+
+ # This will succeed
+ item.reset()
+ item['name'] = 'John'
+
+ # This will fail (on encoding)
+ item.reset()
+ descr['id'] = 1
+ descr['name'] = 'John'
+ """
+ def _testValue(self, value, idx):
+ for field, constraint in self._values:
+ constraint(value.get(field))
+
+ def _setValues(self, values):
+ AbstractConstraint._setValues(self, values)
+
+
+# This is a bit kludgy, meaning two op modes within a single constraint
+class InnerTypeConstraint(AbstractConstraint):
+ """Value must satisfy the type and presence constraints"""
+
+ def _testValue(self, value, idx):
+ if self.__singleTypeConstraint:
+ self.__singleTypeConstraint(value)
+ elif self.__multipleTypeConstraint:
+ if idx not in self.__multipleTypeConstraint:
+ raise error.ValueConstraintError(value)
+ constraint, status = self.__multipleTypeConstraint[idx]
+ if status == 'ABSENT': # XXX presence is not checked!
+ raise error.ValueConstraintError(value)
+ constraint(value)
+
+ def _setValues(self, values):
+ self.__multipleTypeConstraint = {}
+ self.__singleTypeConstraint = None
+ for v in values:
+ if isinstance(v, tuple):
+ self.__multipleTypeConstraint[v[0]] = v[1], v[2]
+ else:
+ self.__singleTypeConstraint = v
+ AbstractConstraint._setValues(self, values)
+
+
+# Logic operations on constraints
+
+class ConstraintsExclusion(AbstractConstraint):
+ """Create a ConstraintsExclusion logic operator object.
+
+ The ConstraintsExclusion logic operator succeeds when the
+ value does *not* satisfy the operand constraint.
+
+ The ConstraintsExclusion object can be applied to
+ any constraint and logic operator object.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class LuckyNumber(Integer):
+ subtypeSpec = ConstraintsExclusion(
+ SingleValueConstraint(13)
+ )
+
+ # this will succeed
+ luckyNumber = LuckyNumber(12)
+
+ # this will raise ValueConstraintError
+ luckyNumber = LuckyNumber(13)
+
+ Note
+ ----
+ The `FROM ... EXCEPT ...` ASN.1 clause should be modeled by combining
+ constraint objects into one. See `PermittedAlphabetConstraint` for more
+ information.
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ try:
+ constraint(value, idx)
+
+ except error.ValueConstraintError:
+ continue
+
+ raise error.ValueConstraintError(value)
+
+ def _setValues(self, values):
+ AbstractConstraint._setValues(self, values)
+
+
+class AbstractConstraintSet(AbstractConstraint):
+
+ def __getitem__(self, idx):
+ return self._values[idx]
+
+ def __iter__(self):
+ return iter(self._values)
+
+ def __add__(self, value):
+ return self.__class__(*(self._values + (value,)))
+
+ def __radd__(self, value):
+ return self.__class__(*((value,) + self._values))
+
+ def __len__(self):
+ return len(self._values)
+
+ # Constraints inclusion in sets
+
+ def _setValues(self, values):
+ self._values = values
+ for constraint in values:
+ if constraint:
+ self._valueMap.add(constraint)
+ self._valueMap.update(constraint.getValueMap())
+
+
+class ConstraintsIntersection(AbstractConstraintSet):
+ """Create a ConstraintsIntersection logic operator object.
+
+ The ConstraintsIntersection logic operator only succeeds
+ if *all* its operands succeed.
+
+ The ConstraintsIntersection object can be applied to
+ any constraint and logic operator objects.
+
+ The ConstraintsIntersection object duck-types the immutable
+ container object like Python :py:class:`tuple`.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class CapitalAndSmall(IA5String):
+ '''
+ ASN.1 specification:
+
+ CapitalAndSmall ::=
+ IA5String (FROM ("A".."Z"|"a".."z"))
+ '''
+ subtypeSpec = ConstraintsIntersection(
+ PermittedAlphabetConstraint('A', 'Z'),
+ PermittedAlphabetConstraint('a', 'z')
+ )
+
+ # this will succeed
+ capital_and_small = CapitalAndSmall('Hello')
+
+ # this will raise ValueConstraintError
+ capital_and_small = CapitalAndSmall('hello')
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ constraint(value, idx)
+
+
+class ConstraintsUnion(AbstractConstraintSet):
+ """Create a ConstraintsUnion logic operator object.
+
+ The ConstraintsUnion logic operator succeeds if
+ *at least* a single operand succeeds.
+
+ The ConstraintsUnion object can be applied to
+ any constraint and logic operator objects.
+
+ The ConstraintsUnion object duck-types the immutable
+ container object like Python :py:class:`tuple`.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class CapitalOrSmall(IA5String):
+ '''
+ ASN.1 specification:
+
+ CapitalOrSmall ::=
+ IA5String (FROM ("A".."Z") | FROM ("a".."z"))
+ '''
+ subtypeSpec = ConstraintsUnion(
+ PermittedAlphabetConstraint('A', 'Z'),
+ PermittedAlphabetConstraint('a', 'z')
+ )
+
+ # this will succeed
+ capital_or_small = CapitalAndSmall('Hello')
+
+ # this will raise ValueConstraintError
+ capital_or_small = CapitalOrSmall('hello!')
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ try:
+ constraint(value, idx)
+ except error.ValueConstraintError:
+ pass
+ else:
+ return
+
+ raise error.ValueConstraintError(
+ 'all of %s failed for "%s"' % (self._values, value)
+ )
+
+# TODO:
+# refactor InnerTypeConstraint
+# add tests for type check
+# implement other constraint types
+# make constraint validation easy to skip
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/error.py b/contrib/python/pyasn1/py3/pyasn1/type/error.py
new file mode 100644
index 0000000000..0ff082abc2
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/error.py
@@ -0,0 +1,11 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1.error import PyAsn1Error
+
+
+class ValueConstraintError(PyAsn1Error):
+ pass
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/namedtype.py b/contrib/python/pyasn1/py3/pyasn1/type/namedtype.py
new file mode 100644
index 0000000000..8dbc81f3c7
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/namedtype.py
@@ -0,0 +1,561 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+__all__ = ['NamedType', 'OptionalNamedType', 'DefaultedNamedType',
+ 'NamedTypes']
+
+try:
+ any
+
+except NameError:
+ any = lambda x: bool(filter(bool, x))
+
+
+class NamedType(object):
+ """Create named field object for a constructed ASN.1 type.
+
+ The |NamedType| object represents a single name and ASN.1 type of a constructed ASN.1 type.
+
+ |NamedType| objects are immutable and duck-type Python :class:`tuple` objects
+ holding *name* and *asn1Object* components.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ asn1Object:
+ ASN.1 type object
+ """
+ isOptional = False
+ isDefaulted = False
+
+ def __init__(self, name, asn1Object, openType=None):
+ self.__name = name
+ self.__type = asn1Object
+ self.__nameAndType = name, asn1Object
+ self.__openType = openType
+
+ def __repr__(self):
+ representation = '%s=%r' % (self.name, self.asn1Object)
+
+ if self.openType:
+ representation += ', open type %r' % self.openType
+
+ return '<%s object, type %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__nameAndType == other
+
+ def __ne__(self, other):
+ return self.__nameAndType != other
+
+ def __lt__(self, other):
+ return self.__nameAndType < other
+
+ def __le__(self, other):
+ return self.__nameAndType <= other
+
+ def __gt__(self, other):
+ return self.__nameAndType > other
+
+ def __ge__(self, other):
+ return self.__nameAndType >= other
+
+ def __hash__(self):
+ return hash(self.__nameAndType)
+
+ def __getitem__(self, idx):
+ return self.__nameAndType[idx]
+
+ def __iter__(self):
+ return iter(self.__nameAndType)
+
+ @property
+ def name(self):
+ return self.__name
+
+ @property
+ def asn1Object(self):
+ return self.__type
+
+ @property
+ def openType(self):
+ return self.__openType
+
+ # Backward compatibility
+
+ def getName(self):
+ return self.name
+
+ def getType(self):
+ return self.asn1Object
+
+
+class OptionalNamedType(NamedType):
+ __doc__ = NamedType.__doc__
+
+ isOptional = True
+
+
+class DefaultedNamedType(NamedType):
+ __doc__ = NamedType.__doc__
+
+ isDefaulted = True
+
+
+class NamedTypes(object):
+ """Create a collection of named fields for a constructed ASN.1 type.
+
+ The NamedTypes object represents a collection of named fields of a constructed ASN.1 type.
+
+ *NamedTypes* objects are immutable and duck-type Python :class:`dict` objects
+ holding *name* as keys and ASN.1 type object as values.
+
+ Parameters
+ ----------
+ *namedTypes: :class:`~pyasn1.type.namedtype.NamedType`
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Description(Sequence):
+ '''
+ ASN.1 specification:
+
+ Description ::= SEQUENCE {
+ surname IA5String,
+ first-name IA5String OPTIONAL,
+ age INTEGER DEFAULT 40
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('surname', IA5String()),
+ OptionalNamedType('first-name', IA5String()),
+ DefaultedNamedType('age', Integer(40))
+ )
+
+ descr = Description()
+ descr['surname'] = 'Smith'
+ descr['first-name'] = 'John'
+ """
+ def __init__(self, *namedTypes, **kwargs):
+ self.__namedTypes = namedTypes
+ self.__namedTypesLen = len(self.__namedTypes)
+ self.__minTagSet = self.__computeMinTagSet()
+ self.__nameToPosMap = self.__computeNameToPosMap()
+ self.__tagToPosMap = self.__computeTagToPosMap()
+ self.__ambiguousTypes = 'terminal' not in kwargs and self.__computeAmbiguousTypes() or {}
+ self.__uniqueTagMap = self.__computeTagMaps(unique=True)
+ self.__nonUniqueTagMap = self.__computeTagMaps(unique=False)
+ self.__hasOptionalOrDefault = any([True for namedType in self.__namedTypes
+ if namedType.isDefaulted or namedType.isOptional])
+ self.__hasOpenTypes = any([True for namedType in self.__namedTypes
+ if namedType.openType])
+
+ self.__requiredComponents = frozenset(
+ [idx for idx, nt in enumerate(self.__namedTypes) if not nt.isOptional and not nt.isDefaulted]
+ )
+ self.__keys = frozenset([namedType.name for namedType in self.__namedTypes])
+ self.__values = tuple([namedType.asn1Object for namedType in self.__namedTypes])
+ self.__items = tuple([(namedType.name, namedType.asn1Object) for namedType in self.__namedTypes])
+
+ def __repr__(self):
+ representation = ', '.join(['%r' % x for x in self.__namedTypes])
+ return '<%s object, types %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__namedTypes == other
+
+ def __ne__(self, other):
+ return self.__namedTypes != other
+
+ def __lt__(self, other):
+ return self.__namedTypes < other
+
+ def __le__(self, other):
+ return self.__namedTypes <= other
+
+ def __gt__(self, other):
+ return self.__namedTypes > other
+
+ def __ge__(self, other):
+ return self.__namedTypes >= other
+
+ def __hash__(self):
+ return hash(self.__namedTypes)
+
+ def __getitem__(self, idx):
+ try:
+ return self.__namedTypes[idx]
+
+ except TypeError:
+ return self.__namedTypes[self.__nameToPosMap[idx]]
+
+ def __contains__(self, key):
+ return key in self.__nameToPosMap
+
+ def __iter__(self):
+ return (x[0] for x in self.__namedTypes)
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self.__namedTypesLen > 0
+ else:
+ def __bool__(self):
+ return self.__namedTypesLen > 0
+
+ def __len__(self):
+ return self.__namedTypesLen
+
+ # Python dict protocol
+
+ def values(self):
+ return self.__values
+
+ def keys(self):
+ return self.__keys
+
+ def items(self):
+ return self.__items
+
+ def clone(self):
+ return self.__class__(*self.__namedTypes)
+
+ class PostponedError(object):
+ def __init__(self, errorMsg):
+ self.__errorMsg = errorMsg
+
+ def __getitem__(self, item):
+ raise error.PyAsn1Error(self.__errorMsg)
+
+ def __computeTagToPosMap(self):
+ tagToPosMap = {}
+ for idx, namedType in enumerate(self.__namedTypes):
+ tagMap = namedType.asn1Object.tagMap
+ if isinstance(tagMap, NamedTypes.PostponedError):
+ return tagMap
+ if not tagMap:
+ continue
+ for _tagSet in tagMap.presentTypes:
+ if _tagSet in tagToPosMap:
+ return NamedTypes.PostponedError('Duplicate component tag %s at %s' % (_tagSet, namedType))
+ tagToPosMap[_tagSet] = idx
+
+ return tagToPosMap
+
+ def __computeNameToPosMap(self):
+ nameToPosMap = {}
+ for idx, namedType in enumerate(self.__namedTypes):
+ if namedType.name in nameToPosMap:
+ return NamedTypes.PostponedError('Duplicate component name %s at %s' % (namedType.name, namedType))
+ nameToPosMap[namedType.name] = idx
+
+ return nameToPosMap
+
+ def __computeAmbiguousTypes(self):
+ ambiguousTypes = {}
+ partialAmbiguousTypes = ()
+ for idx, namedType in reversed(tuple(enumerate(self.__namedTypes))):
+ if namedType.isOptional or namedType.isDefaulted:
+ partialAmbiguousTypes = (namedType,) + partialAmbiguousTypes
+ else:
+ partialAmbiguousTypes = (namedType,)
+ if len(partialAmbiguousTypes) == len(self.__namedTypes):
+ ambiguousTypes[idx] = self
+ else:
+ ambiguousTypes[idx] = NamedTypes(*partialAmbiguousTypes, **dict(terminal=True))
+ return ambiguousTypes
+
+ def getTypeByPosition(self, idx):
+ """Return ASN.1 type object by its position in fields set.
+
+ Parameters
+ ----------
+ idx: :py:class:`int`
+ Field index
+
+ Returns
+ -------
+ :
+ ASN.1 type
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given position is out of fields range
+ """
+ try:
+ return self.__namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByType(self, tagSet):
+ """Return field position by its ASN.1 type.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pysnmp.type.tag.TagSet`
+ ASN.1 tag set distinguishing one ASN.1 type from others.
+
+ Returns
+ -------
+ : :py:class:`int`
+ ASN.1 type position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *tagSet* is not present or ASN.1 types are not unique within callee *NamedTypes*
+ """
+ try:
+ return self.__tagToPosMap[tagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('Type %s not found' % (tagSet,))
+
+ def getNameByPosition(self, idx):
+ """Return field name by its position in fields set.
+
+ Parameters
+ ----------
+ idx: :py:class:`idx`
+ Field index
+
+ Returns
+ -------
+ : :py:class:`str`
+ Field name
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given field name is not present in callee *NamedTypes*
+ """
+ try:
+ return self.__namedTypes[idx].name
+
+ except IndexError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByName(self, name):
+ """Return field position by filed name.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ Returns
+ -------
+ : :py:class:`int`
+ Field position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *name* is not present or not unique within callee *NamedTypes*
+ """
+ try:
+ return self.__nameToPosMap[name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ def getTagMapNearPosition(self, idx):
+ """Return ASN.1 types that are allowed at or past given field position.
+
+ Some ASN.1 serialisation allow for skipping optional and defaulted fields.
+ Some constructed ASN.1 types allow reordering of the fields. When recovering
+ such objects it may be important to know which types can possibly be
+ present at any given position in the field sets.
+
+ Parameters
+ ----------
+ idx: :py:class:`int`
+ Field index
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tagmap.TagMap`
+ Map if ASN.1 types allowed at given field position
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given position is out of fields range
+ """
+ try:
+ return self.__ambiguousTypes[idx].tagMap
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionNearType(self, tagSet, idx):
+ """Return the closest field position where given ASN.1 type is allowed.
+
+ Some ASN.1 serialisation allow for skipping optional and defaulted fields.
+ Some constructed ASN.1 types allow reordering of the fields. When recovering
+ such objects it may be important to know at which field position, in field set,
+ given *tagSet* is allowed at or past *idx* position.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pyasn1.type.tag.TagSet`
+ ASN.1 type which field position to look up
+
+ idx: :py:class:`int`
+ Field position at or past which to perform ASN.1 type look up
+
+ Returns
+ -------
+ : :py:class:`int`
+ Field position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *tagSet* is not present or not unique within callee *NamedTypes*
+ or *idx* is out of fields range
+ """
+ try:
+ return idx + self.__ambiguousTypes[idx].getPositionByType(tagSet)
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def __computeMinTagSet(self):
+ minTagSet = None
+ for namedType in self.__namedTypes:
+ asn1Object = namedType.asn1Object
+
+ try:
+ tagSet = asn1Object.minTagSet
+
+ except AttributeError:
+ tagSet = asn1Object.tagSet
+
+ if minTagSet is None or tagSet < minTagSet:
+ minTagSet = tagSet
+
+ return minTagSet or tag.TagSet()
+
+ @property
+ def minTagSet(self):
+ """Return the minimal TagSet among ASN.1 type in callee *NamedTypes*.
+
+ Some ASN.1 types/serialisation protocols require ASN.1 types to be
+ arranged based on their numerical tag value. The *minTagSet* property
+ returns that.
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tagset.TagSet`
+ Minimal TagSet among ASN.1 types in callee *NamedTypes*
+ """
+ return self.__minTagSet
+
+ def __computeTagMaps(self, unique):
+ presentTypes = {}
+ skipTypes = {}
+ defaultType = None
+ for namedType in self.__namedTypes:
+ tagMap = namedType.asn1Object.tagMap
+ if isinstance(tagMap, NamedTypes.PostponedError):
+ return tagMap
+ for tagSet in tagMap:
+ if unique and tagSet in presentTypes:
+ return NamedTypes.PostponedError('Non-unique tagSet %s of %s at %s' % (tagSet, namedType, self))
+ presentTypes[tagSet] = namedType.asn1Object
+ skipTypes.update(tagMap.skipTypes)
+
+ if defaultType is None:
+ defaultType = tagMap.defaultType
+ elif tagMap.defaultType is not None:
+ return NamedTypes.PostponedError('Duplicate default ASN.1 type at %s' % (self,))
+
+ return tagmap.TagMap(presentTypes, skipTypes, defaultType)
+
+ @property
+ def tagMap(self):
+ """Return a *TagMap* object from tags and types recursively.
+
+ Return a :class:`~pyasn1.type.tagmap.TagMap` object by
+ combining tags from *TagMap* objects of children types and
+ associating them with their immediate child type.
+
+ Example
+ -------
+ .. code-block:: python
+
+ OuterType ::= CHOICE {
+ innerType INTEGER
+ }
+
+ Calling *.tagMap* on *OuterType* will yield a map like this:
+
+ .. code-block:: python
+
+ Integer.tagSet -> Choice
+ """
+ return self.__nonUniqueTagMap
+
+ @property
+ def tagMapUnique(self):
+ """Return a *TagMap* object from unique tags and types recursively.
+
+ Return a :class:`~pyasn1.type.tagmap.TagMap` object by
+ combining tags from *TagMap* objects of children types and
+ associating them with their immediate child type.
+
+ Example
+ -------
+ .. code-block:: python
+
+ OuterType ::= CHOICE {
+ innerType INTEGER
+ }
+
+ Calling *.tagMapUnique* on *OuterType* will yield a map like this:
+
+ .. code-block:: python
+
+ Integer.tagSet -> Choice
+
+ Note
+ ----
+
+ Duplicate *TagSet* objects found in the tree of children
+ types would cause error.
+ """
+ return self.__uniqueTagMap
+
+ @property
+ def hasOptionalOrDefault(self):
+ return self.__hasOptionalOrDefault
+
+ @property
+ def hasOpenTypes(self):
+ return self.__hasOpenTypes
+
+ @property
+ def namedTypes(self):
+ return tuple(self.__namedTypes)
+
+ @property
+ def requiredComponents(self):
+ return self.__requiredComponents
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/namedval.py b/contrib/python/pyasn1/py3/pyasn1/type/namedval.py
new file mode 100644
index 0000000000..46a6496d03
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/namedval.py
@@ -0,0 +1,192 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+# ASN.1 named integers
+#
+from pyasn1 import error
+
+__all__ = ['NamedValues']
+
+
+class NamedValues(object):
+ """Create named values object.
+
+ The |NamedValues| object represents a collection of string names
+ associated with numeric IDs. These objects are used for giving
+ names to otherwise numerical values.
+
+ |NamedValues| objects are immutable and duck-type Python
+ :class:`dict` object mapping ID to name and vice-versa.
+
+ Parameters
+ ----------
+ *args: variable number of two-element :py:class:`tuple`
+
+ name: :py:class:`str`
+ Value label
+
+ value: :py:class:`int`
+ Numeric value
+
+ Keyword Args
+ ------------
+ name: :py:class:`str`
+ Value label
+
+ value: :py:class:`int`
+ Numeric value
+
+ Examples
+ --------
+
+ .. code-block:: pycon
+
+ >>> nv = NamedValues('a', 'b', ('c', 0), d=1)
+ >>> nv
+ >>> {'c': 0, 'd': 1, 'a': 2, 'b': 3}
+ >>> nv[0]
+ 'c'
+ >>> nv['a']
+ 2
+ """
+ def __init__(self, *args, **kwargs):
+ self.__names = {}
+ self.__numbers = {}
+
+ anonymousNames = []
+
+ for namedValue in args:
+ if isinstance(namedValue, (tuple, list)):
+ try:
+ name, number = namedValue
+
+ except ValueError:
+ raise error.PyAsn1Error('Not a proper attribute-value pair %r' % (namedValue,))
+
+ else:
+ anonymousNames.append(namedValue)
+ continue
+
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ if number in self.__numbers:
+ raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ for name, number in kwargs.items():
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ if number in self.__numbers:
+ raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ if anonymousNames:
+
+ number = self.__numbers and max(self.__numbers) + 1 or 0
+
+ for name in anonymousNames:
+
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ number += 1
+
+ def __repr__(self):
+ representation = ', '.join(['%s=%d' % x for x in self.items()])
+
+ if len(representation) > 64:
+ representation = representation[:32] + '...' + representation[-32:]
+
+ return '<%s object, enums %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return dict(self) == other
+
+ def __ne__(self, other):
+ return dict(self) != other
+
+ def __lt__(self, other):
+ return dict(self) < other
+
+ def __le__(self, other):
+ return dict(self) <= other
+
+ def __gt__(self, other):
+ return dict(self) > other
+
+ def __ge__(self, other):
+ return dict(self) >= other
+
+ def __hash__(self):
+ return hash(self.items())
+
+ # Python dict protocol (read-only)
+
+ def __getitem__(self, key):
+ try:
+ return self.__numbers[key]
+
+ except KeyError:
+ return self.__names[key]
+
+ def __len__(self):
+ return len(self.__names)
+
+ def __contains__(self, key):
+ return key in self.__names or key in self.__numbers
+
+ def __iter__(self):
+ return iter(self.__names)
+
+ def values(self):
+ return iter(self.__numbers)
+
+ def keys(self):
+ return iter(self.__names)
+
+ def items(self):
+ for name in self.__names:
+ yield name, self.__names[name]
+
+ # support merging
+
+ def __add__(self, namedValues):
+ return self.__class__(*tuple(self.items()) + tuple(namedValues.items()))
+
+ # XXX clone/subtype?
+
+ def clone(self, *args, **kwargs):
+ new = self.__class__(*args, **kwargs)
+ return self + new
+
+ # legacy protocol
+
+ def getName(self, value):
+ if value in self.__numbers:
+ return self.__numbers[value]
+
+ def getValue(self, name):
+ if name in self.__names:
+ return self.__names[name]
+
+ def getValues(self, *names):
+ try:
+ return [self.__names[name] for name in names]
+
+ except KeyError:
+ raise error.PyAsn1Error(
+ 'Unknown bit identifier(s): %s' % (set(names).difference(self.__names),)
+ )
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/opentype.py b/contrib/python/pyasn1/py3/pyasn1/type/opentype.py
new file mode 100644
index 0000000000..5a15f896da
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/opentype.py
@@ -0,0 +1,104 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+
+__all__ = ['OpenType']
+
+
+class OpenType(object):
+ """Create ASN.1 type map indexed by a value
+
+ The *OpenType* object models an untyped field of a constructed ASN.1
+ type. In ASN.1 syntax it is usually represented by the
+ `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
+ `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
+ used together with :class:`~pyasn1.type.univ.Any` object.
+
+ OpenType objects duck-type a read-only Python :class:`dict` objects,
+ however the passed `typeMap` is not copied, but stored by reference.
+ That means the user can manipulate `typeMap` at run time having this
+ reflected on *OpenType* object behavior.
+
+ The |OpenType| class models an untyped field of a constructed ASN.1
+ type. In ASN.1 syntax it is usually represented by the
+ `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
+ `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
+ used with :class:`~pyasn1.type.univ.Any` type.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ typeMap: :py:class:`dict`
+ A map of value->ASN.1 type. It's stored by reference and can be
+ mutated later to register new mappings.
+
+ Examples
+ --------
+
+ For untyped scalars:
+
+ .. code-block:: python
+
+ openType = OpenType(
+ 'id', {1: Integer(),
+ 2: OctetString()}
+ )
+ Sequence(
+ componentType=NamedTypes(
+ NamedType('id', Integer()),
+ NamedType('blob', Any(), openType=openType)
+ )
+ )
+
+ For untyped `SET OF` or `SEQUENCE OF` vectors:
+
+ .. code-block:: python
+
+ openType = OpenType(
+ 'id', {1: Integer(),
+ 2: OctetString()}
+ )
+ Sequence(
+ componentType=NamedTypes(
+ NamedType('id', Integer()),
+ NamedType('blob', SetOf(componentType=Any()),
+ openType=openType)
+ )
+ )
+ """
+
+ def __init__(self, name, typeMap=None):
+ self.__name = name
+ if typeMap is None:
+ self.__typeMap = {}
+ else:
+ self.__typeMap = typeMap
+
+ @property
+ def name(self):
+ return self.__name
+
+ # Python dict protocol
+
+ def values(self):
+ return self.__typeMap.values()
+
+ def keys(self):
+ return self.__typeMap.keys()
+
+ def items(self):
+ return self.__typeMap.items()
+
+ def __contains__(self, key):
+ return key in self.__typeMap
+
+ def __getitem__(self, key):
+ return self.__typeMap[key]
+
+ def __iter__(self):
+ return iter(self.__typeMap)
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/tag.py b/contrib/python/pyasn1/py3/pyasn1/type/tag.py
new file mode 100644
index 0000000000..a21a405eb1
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/tag.py
@@ -0,0 +1,335 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+
+__all__ = ['tagClassUniversal', 'tagClassApplication', 'tagClassContext',
+ 'tagClassPrivate', 'tagFormatSimple', 'tagFormatConstructed',
+ 'tagCategoryImplicit', 'tagCategoryExplicit',
+ 'tagCategoryUntagged', 'Tag', 'TagSet']
+
+#: Identifier for ASN.1 class UNIVERSAL
+tagClassUniversal = 0x00
+
+#: Identifier for ASN.1 class APPLICATION
+tagClassApplication = 0x40
+
+#: Identifier for ASN.1 class context-specific
+tagClassContext = 0x80
+
+#: Identifier for ASN.1 class private
+tagClassPrivate = 0xC0
+
+#: Identifier for "simple" ASN.1 structure (e.g. scalar)
+tagFormatSimple = 0x00
+
+#: Identifier for "constructed" ASN.1 structure (e.g. may have inner components)
+tagFormatConstructed = 0x20
+
+tagCategoryImplicit = 0x01
+tagCategoryExplicit = 0x02
+tagCategoryUntagged = 0x04
+
+
+class Tag(object):
+ """Create ASN.1 tag
+
+ Represents ASN.1 tag that can be attached to a ASN.1 type to make
+ types distinguishable from each other.
+
+ *Tag* objects are immutable and duck-type Python :class:`tuple` objects
+ holding three integer components of a tag.
+
+ Parameters
+ ----------
+ tagClass: :py:class:`int`
+ Tag *class* value
+
+ tagFormat: :py:class:`int`
+ Tag *format* value
+
+ tagId: :py:class:`int`
+ Tag ID value
+ """
+ def __init__(self, tagClass, tagFormat, tagId):
+ if tagId < 0:
+ raise error.PyAsn1Error('Negative tag ID (%s) not allowed' % tagId)
+ self.__tagClass = tagClass
+ self.__tagFormat = tagFormat
+ self.__tagId = tagId
+ self.__tagClassId = tagClass, tagId
+ self.__hash = hash(self.__tagClassId)
+
+ def __repr__(self):
+ representation = '[%s:%s:%s]' % (
+ self.__tagClass, self.__tagFormat, self.__tagId)
+ return '<%s object, tag %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__tagClassId == other
+
+ def __ne__(self, other):
+ return self.__tagClassId != other
+
+ def __lt__(self, other):
+ return self.__tagClassId < other
+
+ def __le__(self, other):
+ return self.__tagClassId <= other
+
+ def __gt__(self, other):
+ return self.__tagClassId > other
+
+ def __ge__(self, other):
+ return self.__tagClassId >= other
+
+ def __hash__(self):
+ return self.__hash
+
+ def __getitem__(self, idx):
+ if idx == 0:
+ return self.__tagClass
+ elif idx == 1:
+ return self.__tagFormat
+ elif idx == 2:
+ return self.__tagId
+ else:
+ raise IndexError()
+
+ def __iter__(self):
+ yield self.__tagClass
+ yield self.__tagFormat
+ yield self.__tagId
+
+ def __and__(self, otherTag):
+ return self.__class__(self.__tagClass & otherTag.tagClass,
+ self.__tagFormat & otherTag.tagFormat,
+ self.__tagId & otherTag.tagId)
+
+ def __or__(self, otherTag):
+ return self.__class__(self.__tagClass | otherTag.tagClass,
+ self.__tagFormat | otherTag.tagFormat,
+ self.__tagId | otherTag.tagId)
+
+ @property
+ def tagClass(self):
+ """ASN.1 tag class
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag class
+ """
+ return self.__tagClass
+
+ @property
+ def tagFormat(self):
+ """ASN.1 tag format
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag format
+ """
+ return self.__tagFormat
+
+ @property
+ def tagId(self):
+ """ASN.1 tag ID
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag ID
+ """
+ return self.__tagId
+
+
+class TagSet(object):
+ """Create a collection of ASN.1 tags
+
+ Represents a combination of :class:`~pyasn1.type.tag.Tag` objects
+ that can be attached to a ASN.1 type to make types distinguishable
+ from each other.
+
+ *TagSet* objects are immutable and duck-type Python :class:`tuple` objects
+ holding arbitrary number of :class:`~pyasn1.type.tag.Tag` objects.
+
+ Parameters
+ ----------
+ baseTag: :class:`~pyasn1.type.tag.Tag`
+ Base *Tag* object. This tag survives IMPLICIT tagging.
+
+ *superTags: :class:`~pyasn1.type.tag.Tag`
+ Additional *Tag* objects taking part in subtyping.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class OrderNumber(NumericString):
+ '''
+ ASN.1 specification
+
+ Order-number ::=
+ [APPLICATION 5] IMPLICIT NumericString
+ '''
+ tagSet = NumericString.tagSet.tagImplicitly(
+ Tag(tagClassApplication, tagFormatSimple, 5)
+ )
+
+ orderNumber = OrderNumber('1234')
+ """
+ def __init__(self, baseTag=(), *superTags):
+ self.__baseTag = baseTag
+ self.__superTags = superTags
+ self.__superTagsClassId = tuple(
+ [(superTag.tagClass, superTag.tagId) for superTag in superTags]
+ )
+ self.__lenOfSuperTags = len(superTags)
+ self.__hash = hash(self.__superTagsClassId)
+
+ def __repr__(self):
+ representation = '-'.join(['%s:%s:%s' % (x.tagClass, x.tagFormat, x.tagId)
+ for x in self.__superTags])
+ if representation:
+ representation = 'tags ' + representation
+ else:
+ representation = 'untagged'
+
+ return '<%s object, %s>' % (self.__class__.__name__, representation)
+
+ def __add__(self, superTag):
+ return self.__class__(self.__baseTag, *self.__superTags + (superTag,))
+
+ def __radd__(self, superTag):
+ return self.__class__(self.__baseTag, *(superTag,) + self.__superTags)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.__class__(self.__baseTag, *self.__superTags[i])
+ else:
+ return self.__superTags[i]
+
+ def __eq__(self, other):
+ return self.__superTagsClassId == other
+
+ def __ne__(self, other):
+ return self.__superTagsClassId != other
+
+ def __lt__(self, other):
+ return self.__superTagsClassId < other
+
+ def __le__(self, other):
+ return self.__superTagsClassId <= other
+
+ def __gt__(self, other):
+ return self.__superTagsClassId > other
+
+ def __ge__(self, other):
+ return self.__superTagsClassId >= other
+
+ def __hash__(self):
+ return self.__hash
+
+ def __len__(self):
+ return self.__lenOfSuperTags
+
+ @property
+ def baseTag(self):
+ """Return base ASN.1 tag
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.Tag`
+ Base tag of this *TagSet*
+ """
+ return self.__baseTag
+
+ @property
+ def superTags(self):
+ """Return ASN.1 tags
+
+ Returns
+ -------
+ : :py:class:`tuple`
+ Tuple of :class:`~pyasn1.type.tag.Tag` objects that this *TagSet* contains
+ """
+ return self.__superTags
+
+ def tagExplicitly(self, superTag):
+ """Return explicitly tagged *TagSet*
+
+ Create a new *TagSet* representing callee *TagSet* explicitly tagged
+ with passed tag(s). With explicit tagging mode, new tags are appended
+ to existing tag(s).
+
+ Parameters
+ ----------
+ superTag: :class:`~pyasn1.type.tag.Tag`
+ *Tag* object to tag this *TagSet*
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.TagSet`
+ New *TagSet* object
+ """
+ if superTag.tagClass == tagClassUniversal:
+ raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag")
+ if superTag.tagFormat != tagFormatConstructed:
+ superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId)
+ return self + superTag
+
+ def tagImplicitly(self, superTag):
+ """Return implicitly tagged *TagSet*
+
+ Create a new *TagSet* representing callee *TagSet* implicitly tagged
+ with passed tag(s). With implicit tagging mode, new tag(s) replace the
+ last existing tag.
+
+ Parameters
+ ----------
+ superTag: :class:`~pyasn1.type.tag.Tag`
+ *Tag* object to tag this *TagSet*
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.TagSet`
+ New *TagSet* object
+ """
+ if self.__superTags:
+ superTag = Tag(superTag.tagClass, self.__superTags[-1].tagFormat, superTag.tagId)
+ return self[:-1] + superTag
+
+ def isSuperTagSetOf(self, tagSet):
+ """Test type relationship against given *TagSet*
+
+ The callee is considered to be a supertype of given *TagSet*
+ tag-wise if all tags in *TagSet* are present in the callee and
+ they are in the same order.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pyasn1.type.tag.TagSet`
+ *TagSet* object to evaluate against the callee
+
+ Returns
+ -------
+ : :py:class:`bool`
+ :obj:`True` if callee is a supertype of *tagSet*
+ """
+ if len(tagSet) < self.__lenOfSuperTags:
+ return False
+ return self.__superTags == tagSet[:self.__lenOfSuperTags]
+
+ # Backward compatibility
+
+ def getBaseTag(self):
+ return self.__baseTag
+
+def initTagSet(tag):
+ return TagSet(tag, tag)
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/tagmap.py b/contrib/python/pyasn1/py3/pyasn1/type/tagmap.py
new file mode 100644
index 0000000000..2f0e660264
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/tagmap.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+from pyasn1 import error
+
+__all__ = ['TagMap']
+
+
+class TagMap(object):
+ """Map *TagSet* objects to ASN.1 types
+
+ Create an object mapping *TagSet* object to ASN.1 type.
+
+ *TagMap* objects are immutable and duck-type read-only Python
+ :class:`dict` objects holding *TagSet* objects as keys and ASN.1
+ type objects as values.
+
+ Parameters
+ ----------
+ presentTypes: :py:class:`dict`
+ Map of :class:`~pyasn1.type.tag.TagSet` to ASN.1 objects considered
+ as being unconditionally present in the *TagMap*.
+
+ skipTypes: :py:class:`dict`
+ A collection of :class:`~pyasn1.type.tag.TagSet` objects considered
+ as absent in the *TagMap* even when *defaultType* is present.
+
+ defaultType: ASN.1 type object
+ An ASN.1 type object callee *TagMap* returns for any *TagSet* key not present
+ in *presentTypes* (unless given key is present in *skipTypes*).
+ """
+ def __init__(self, presentTypes=None, skipTypes=None, defaultType=None):
+ self.__presentTypes = presentTypes or {}
+ self.__skipTypes = skipTypes or {}
+ self.__defaultType = defaultType
+
+ def __contains__(self, tagSet):
+ return (tagSet in self.__presentTypes or
+ self.__defaultType is not None and tagSet not in self.__skipTypes)
+
+ def __getitem__(self, tagSet):
+ try:
+ return self.__presentTypes[tagSet]
+ except KeyError:
+ if self.__defaultType is None:
+ raise KeyError()
+ elif tagSet in self.__skipTypes:
+ raise error.PyAsn1Error('Key in negative map')
+ else:
+ return self.__defaultType
+
+ def __iter__(self):
+ return iter(self.__presentTypes)
+
+ def __repr__(self):
+ representation = '%s object' % self.__class__.__name__
+
+ if self.__presentTypes:
+ representation += ', present %s' % repr(self.__presentTypes)
+
+ if self.__skipTypes:
+ representation += ', skip %s' % repr(self.__skipTypes)
+
+ if self.__defaultType is not None:
+ representation += ', default %s' % repr(self.__defaultType)
+
+ return '<%s>' % representation
+
+ @property
+ def presentTypes(self):
+ """Return *TagSet* to ASN.1 type map present in callee *TagMap*"""
+ return self.__presentTypes
+
+ @property
+ def skipTypes(self):
+ """Return *TagSet* collection unconditionally absent in callee *TagMap*"""
+ return self.__skipTypes
+
+ @property
+ def defaultType(self):
+ """Return default ASN.1 type being returned for any missing *TagSet*"""
+ return self.__defaultType
+
+ # Backward compatibility
+
+ def getPosMap(self):
+ return self.presentTypes
+
+ def getNegMap(self):
+ return self.skipTypes
+
+ def getDef(self):
+ return self.defaultType
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/univ.py b/contrib/python/pyasn1/py3/pyasn1/type/univ.py
new file mode 100644
index 0000000000..c5d0778096
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/univ.py
@@ -0,0 +1,3305 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import math
+import sys
+
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.compat import integer
+from pyasn1.compat import octets
+from pyasn1.type import base
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+NoValue = base.NoValue
+noValue = NoValue()
+
+__all__ = ['Integer', 'Boolean', 'BitString', 'OctetString', 'Null',
+ 'ObjectIdentifier', 'Real', 'Enumerated',
+ 'SequenceOfAndSetOfBase', 'SequenceOf', 'SetOf',
+ 'SequenceAndSetBase', 'Sequence', 'Set', 'Choice', 'Any',
+ 'NoValue', 'noValue']
+
+# "Simple" ASN.1 types (yet incomplete)
+
+
+class Integer(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| class
+ instance. If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class ErrorCode(Integer):
+ '''
+ ASN.1 specification:
+
+ ErrorCode ::=
+ INTEGER { disk-full(1), no-disk(-1),
+ disk-not-formatted(2) }
+
+ error ErrorCode ::= disk-full
+ '''
+ namedValues = NamedValues(
+ ('disk-full', 1), ('no-disk', -1),
+ ('disk-not-formatted', 2)
+ )
+
+ error = ErrorCode('disk-full')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ def __init__(self, value=noValue, **kwargs):
+ if 'namedValues' not in kwargs:
+ kwargs['namedValues'] = self.namedValues
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ def __and__(self, value):
+ return self.clone(self._value & value)
+
+ def __rand__(self, value):
+ return self.clone(value & self._value)
+
+ def __or__(self, value):
+ return self.clone(self._value | value)
+
+ def __ror__(self, value):
+ return self.clone(value | self._value)
+
+ def __xor__(self, value):
+ return self.clone(self._value ^ value)
+
+ def __rxor__(self, value):
+ return self.clone(value ^ self._value)
+
+ def __lshift__(self, value):
+ return self.clone(self._value << value)
+
+ def __rshift__(self, value):
+ return self.clone(self._value >> value)
+
+ def __add__(self, value):
+ return self.clone(self._value + value)
+
+ def __radd__(self, value):
+ return self.clone(value + self._value)
+
+ def __sub__(self, value):
+ return self.clone(self._value - value)
+
+ def __rsub__(self, value):
+ return self.clone(value - self._value)
+
+ def __mul__(self, value):
+ return self.clone(self._value * value)
+
+ def __rmul__(self, value):
+ return self.clone(value * self._value)
+
+ def __mod__(self, value):
+ return self.clone(self._value % value)
+
+ def __rmod__(self, value):
+ return self.clone(value % self._value)
+
+ def __pow__(self, value, modulo=None):
+ return self.clone(pow(self._value, value, modulo))
+
+ def __rpow__(self, value):
+ return self.clone(pow(value, self._value))
+
+ def __floordiv__(self, value):
+ return self.clone(self._value // value)
+
+ def __rfloordiv__(self, value):
+ return self.clone(value // self._value)
+
+ if sys.version_info[0] <= 2:
+ def __div__(self, value):
+ if isinstance(value, float):
+ return Real(self._value / value)
+ else:
+ return self.clone(self._value / value)
+
+ def __rdiv__(self, value):
+ if isinstance(value, float):
+ return Real(value / self._value)
+ else:
+ return self.clone(value / self._value)
+ else:
+ def __truediv__(self, value):
+ return Real(self._value / value)
+
+ def __rtruediv__(self, value):
+ return Real(value / self._value)
+
+ def __divmod__(self, value):
+ return self.clone(divmod(self._value, value))
+
+ def __rdivmod__(self, value):
+ return self.clone(divmod(value, self._value))
+
+ __hash__ = base.SimpleAsn1Type.__hash__
+
+ def __int__(self):
+ return int(self._value)
+
+ if sys.version_info[0] <= 2:
+ def __long__(self):
+ return long(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ def __abs__(self):
+ return self.clone(abs(self._value))
+
+ def __index__(self):
+ return int(self._value)
+
+ def __pos__(self):
+ return self.clone(+self._value)
+
+ def __neg__(self):
+ return self.clone(-self._value)
+
+ def __invert__(self):
+ return self.clone(~self._value)
+
+ def __round__(self, n=0):
+ r = round(self._value, n)
+ if n:
+ return self.clone(r)
+ else:
+ return r
+
+ def __floor__(self):
+ return math.floor(self._value)
+
+ def __ceil__(self):
+ return math.ceil(self._value)
+
+ def __trunc__(self):
+ return self.clone(math.trunc(self._value))
+
+ def __lt__(self, value):
+ return self._value < value
+
+ def __le__(self, value):
+ return self._value <= value
+
+ def __eq__(self, value):
+ return self._value == value
+
+ def __ne__(self, value):
+ return self._value != value
+
+ def __gt__(self, value):
+ return self._value > value
+
+ def __ge__(self, value):
+ return self._value >= value
+
+ def prettyIn(self, value):
+ try:
+ return int(value)
+
+ except ValueError:
+ try:
+ return self.namedValues[value]
+
+ except KeyError:
+ raise error.PyAsn1Error(
+ 'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
+ )
+
+ def prettyOut(self, value):
+ try:
+ return str(self.namedValues[value])
+
+ except KeyError:
+ return str(value)
+
+ # backward compatibility
+
+ def getNamedValues(self):
+ return self.namedValues
+
+
+class Boolean(Integer):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| class
+ instance. If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s).Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class RoundResult(Boolean):
+ '''
+ ASN.1 specification:
+
+ RoundResult ::= BOOLEAN
+
+ ok RoundResult ::= TRUE
+ ko RoundResult ::= FALSE
+ '''
+ ok = RoundResult(True)
+ ko = RoundResult(False)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = Integer.subtypeSpec + constraint.SingleValueConstraint(0, 1)
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues(('False', 0), ('True', 1))
+
+ # Optimization for faster codec lookup
+ typeId = Integer.getTypeId()
+
+if sys.version_info[0] < 3:
+ SizedIntegerBase = long
+else:
+ SizedIntegerBase = int
+
+
+class SizedInteger(SizedIntegerBase):
+ bitLength = leadingZeroBits = None
+
+ def setBitLength(self, bitLength):
+ self.bitLength = bitLength
+ self.leadingZeroBits = max(bitLength - integer.bitLength(self), 0)
+ return self
+
+ def __len__(self):
+ if self.bitLength is None:
+ self.setBitLength(integer.bitLength(self))
+
+ return self.bitLength
+
+
+class BitString(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type both Python :class:`tuple` (as a tuple
+ of bits) and :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal representing binary
+ or hexadecimal number or sequence of integer bits or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Rights(BitString):
+ '''
+ ASN.1 specification:
+
+ Rights ::= BIT STRING { user-read(0), user-write(1),
+ group-read(2), group-write(3),
+ other-read(4), other-write(5) }
+
+ group1 Rights ::= { group-read, group-write }
+ group2 Rights ::= '0011'B
+ group3 Rights ::= '3'H
+ '''
+ namedValues = NamedValues(
+ ('user-read', 0), ('user-write', 1),
+ ('group-read', 2), ('group-write', 3),
+ ('other-read', 4), ('other-write', 5)
+ )
+
+ group1 = Rights(('group-read', 'group-write'))
+ group2 = Rights('0011')
+ group3 = Rights(0x3)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ defaultBinValue = defaultHexValue = noValue
+
+ def __init__(self, value=noValue, **kwargs):
+ if value is noValue:
+ if kwargs:
+ try:
+ value = self.fromBinaryString(kwargs.pop('binValue'), internalFormat=True)
+
+ except KeyError:
+ pass
+
+ try:
+ value = self.fromHexString(kwargs.pop('hexValue'), internalFormat=True)
+
+ except KeyError:
+ pass
+
+ if value is noValue:
+ if self.defaultBinValue is not noValue:
+ value = self.fromBinaryString(self.defaultBinValue, internalFormat=True)
+
+ elif self.defaultHexValue is not noValue:
+ value = self.fromHexString(self.defaultHexValue, internalFormat=True)
+
+ if 'namedValues' not in kwargs:
+ kwargs['namedValues'] = self.namedValues
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ def __str__(self):
+ return self.asBinary()
+
+ def __eq__(self, other):
+ other = self.prettyIn(other)
+ return self is other or self._value == other and len(self._value) == len(other)
+
+ def __ne__(self, other):
+ other = self.prettyIn(other)
+ return self._value != other or len(self._value) != len(other)
+
+ def __lt__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) < len(other) or len(self._value) == len(other) and self._value < other
+
+ def __le__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) <= len(other) or len(self._value) == len(other) and self._value <= other
+
+ def __gt__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) > len(other) or len(self._value) == len(other) and self._value > other
+
+ def __ge__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) >= len(other) or len(self._value) == len(other) and self._value >= other
+
+ # Immutable sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone([self[x] for x in range(*i.indices(len(self)))])
+ else:
+ length = len(self._value) - 1
+ if i > length or i < 0:
+ raise IndexError('bit index out of range')
+ return (self._value >> (length - i)) & 1
+
+ def __iter__(self):
+ length = len(self._value)
+ while length:
+ length -= 1
+ yield (self._value >> length) & 1
+
+ def __reversed__(self):
+ return reversed(tuple(self))
+
+ # arithmetic operators
+
+ def __add__(self, value):
+ value = self.prettyIn(value)
+ return self.clone(SizedInteger(self._value << len(value) | value).setBitLength(len(self._value) + len(value)))
+
+ def __radd__(self, value):
+ value = self.prettyIn(value)
+ return self.clone(SizedInteger(value << len(self._value) | self._value).setBitLength(len(self._value) + len(value)))
+
+ def __mul__(self, value):
+ bitString = self._value
+ while value > 1:
+ bitString <<= len(self._value)
+ bitString |= self._value
+ value -= 1
+ return self.clone(bitString)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __lshift__(self, count):
+ return self.clone(SizedInteger(self._value << count).setBitLength(len(self._value) + count))
+
+ def __rshift__(self, count):
+ return self.clone(SizedInteger(self._value >> count).setBitLength(max(0, len(self._value) - count)))
+
+ def __int__(self):
+ return int(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ if sys.version_info[0] < 3:
+ def __long__(self):
+ return self._value
+
+ def asNumbers(self):
+ """Get |ASN.1| value as a sequence of 8-bit integers.
+
+ If |ASN.1| object length is not a multiple of 8, result
+ will be left-padded with zeros.
+ """
+ return tuple(octets.octs2ints(self.asOctets()))
+
+ def asOctets(self):
+ """Get |ASN.1| value as a sequence of octets.
+
+ If |ASN.1| object length is not a multiple of 8, result
+ will be left-padded with zeros.
+ """
+ return integer.to_bytes(self._value, length=len(self))
+
+ def asInteger(self):
+ """Get |ASN.1| value as a single integer value.
+ """
+ return self._value
+
+ def asBinary(self):
+ """Get |ASN.1| value as a text string of bits.
+ """
+ binString = bin(self._value)[2:]
+ return '0' * (len(self._value) - len(binString)) + binString
+
+ @classmethod
+ def fromHexString(cls, value, internalFormat=False, prepend=None):
+ """Create a |ASN.1| object initialized from the hex string.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like 'DEADBEEF'
+ """
+ try:
+ value = SizedInteger(value, 16).setBitLength(len(value) * 4)
+
+ except ValueError:
+ raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1]))
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ @classmethod
+ def fromBinaryString(cls, value, internalFormat=False, prepend=None):
+ """Create a |ASN.1| object initialized from a string of '0' and '1'.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like '1010111'
+ """
+ try:
+ value = SizedInteger(value or '0', 2).setBitLength(len(value))
+
+ except ValueError:
+ raise error.PyAsn1Error('%s.fromBinaryString() error: %s' % (cls.__name__, sys.exc_info()[1]))
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ @classmethod
+ def fromOctetString(cls, value, internalFormat=False, prepend=None, padding=0):
+ """Create a |ASN.1| object initialized from a string.
+
+ Parameters
+ ----------
+ value: :class:`str` (Py2) or :class:`bytes` (Py3)
+ Text string like '\\\\x01\\\\xff' (Py2) or b'\\\\x01\\\\xff' (Py3)
+ """
+ value = SizedInteger(integer.from_bytes(value) >> padding).setBitLength(len(value) * 8 - padding)
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ def prettyIn(self, value):
+ if isinstance(value, SizedInteger):
+ return value
+ elif octets.isStringType(value):
+ if not value:
+ return SizedInteger(0).setBitLength(0)
+
+ elif value[0] == '\'': # "'1011'B" -- ASN.1 schema representation (deprecated)
+ if value[-2:] == '\'B':
+ return self.fromBinaryString(value[1:-2], internalFormat=True)
+ elif value[-2:] == '\'H':
+ return self.fromHexString(value[1:-2], internalFormat=True)
+ else:
+ raise error.PyAsn1Error(
+ 'Bad BIT STRING value notation %s' % (value,)
+ )
+
+ elif self.namedValues and not value.isdigit(): # named bits like 'Urgent, Active'
+ names = [x.strip() for x in value.split(',')]
+
+ try:
+
+ bitPositions = [self.namedValues[name] for name in names]
+
+ except KeyError:
+ raise error.PyAsn1Error('unknown bit name(s) in %r' % (names,))
+
+ rightmostPosition = max(bitPositions)
+
+ number = 0
+ for bitPosition in bitPositions:
+ number |= 1 << (rightmostPosition - bitPosition)
+
+ return SizedInteger(number).setBitLength(rightmostPosition + 1)
+
+ elif value.startswith('0x'):
+ return self.fromHexString(value[2:], internalFormat=True)
+
+ elif value.startswith('0b'):
+ return self.fromBinaryString(value[2:], internalFormat=True)
+
+ else: # assume plain binary string like '1011'
+ return self.fromBinaryString(value, internalFormat=True)
+
+ elif isinstance(value, (tuple, list)):
+ return self.fromBinaryString(''.join([b and '1' or '0' for b in value]), internalFormat=True)
+
+ elif isinstance(value, BitString):
+ return SizedInteger(value).setBitLength(len(value))
+
+ elif isinstance(value, intTypes):
+ return SizedInteger(value)
+
+ else:
+ raise error.PyAsn1Error(
+ 'Bad BitString initializer type \'%s\'' % (value,)
+ )
+
+
+class OctetString(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python 2 :class:`str` or
+ Python 3 :class:`bytes`. When used in Unicode context, |ASN.1| type
+ assumes "|encoding|" serialisation.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively
+ class:`unicode` object (Python 2) or :class:`str` (Python 3)
+ representing character string to be serialised into octets
+ (note `encoding` parameter) or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in text string context.
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Icon(OctetString):
+ '''
+ ASN.1 specification:
+
+ Icon ::= OCTET STRING
+
+ icon1 Icon ::= '001100010011001000110011'B
+ icon2 Icon ::= '313233'H
+ '''
+ icon1 = Icon.fromBinaryString('001100010011001000110011')
+ icon2 = Icon.fromHexString('313233')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ defaultBinValue = defaultHexValue = noValue
+ encoding = 'iso-8859-1'
+
+ def __init__(self, value=noValue, **kwargs):
+ if kwargs:
+ if value is noValue:
+ try:
+ value = self.fromBinaryString(kwargs.pop('binValue'))
+
+ except KeyError:
+ pass
+
+ try:
+ value = self.fromHexString(kwargs.pop('hexValue'))
+
+ except KeyError:
+ pass
+
+ if value is noValue:
+ if self.defaultBinValue is not noValue:
+ value = self.fromBinaryString(self.defaultBinValue)
+
+ elif self.defaultHexValue is not noValue:
+ value = self.fromHexString(self.defaultHexValue)
+
+ if 'encoding' not in kwargs:
+ kwargs['encoding'] = self.encoding
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ if sys.version_info[0] <= 2:
+ def prettyIn(self, value):
+ if isinstance(value, str):
+ return value
+
+ elif isinstance(value, unicode):
+ try:
+ return value.encode(self.encoding)
+
+ except (LookupError, UnicodeEncodeError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ elif isinstance(value, (tuple, list)):
+ try:
+ return ''.join([chr(x) for x in value])
+
+ except ValueError:
+ raise error.PyAsn1Error(
+ "Bad %s initializer '%s'" % (self.__class__.__name__, value)
+ )
+
+ else:
+ return str(value)
+
+ def __str__(self):
+ return str(self._value)
+
+ def __unicode__(self):
+ try:
+ return self._value.decode(self.encoding)
+
+ except UnicodeDecodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def asOctets(self):
+ return str(self._value)
+
+ def asNumbers(self):
+ return tuple([ord(x) for x in self._value])
+
+ else:
+ def prettyIn(self, value):
+ if isinstance(value, bytes):
+ return value
+
+ elif isinstance(value, str):
+ try:
+ return value.encode(self.encoding)
+
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with '%s' "
+ "codec" % (value, self.encoding), exc
+ )
+ elif isinstance(value, OctetString): # a shortcut, bytes() would work the same way
+ return value.asOctets()
+
+ elif isinstance(value, base.SimpleAsn1Type): # this mostly targets Integer objects
+ return self.prettyIn(str(value))
+
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(bytes(value))
+
+ else:
+ return bytes(value)
+
+ def __str__(self):
+ try:
+ return self._value.decode(self.encoding)
+
+ except UnicodeDecodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with '%s' codec at "
+ "'%s'" % (self._value, self.encoding,
+ self.__class__.__name__), exc
+ )
+
+ def __bytes__(self):
+ return bytes(self._value)
+
+ def asOctets(self):
+ return bytes(self._value)
+
+ def asNumbers(self):
+ return tuple(self._value)
+
+ #
+ # Normally, `.prettyPrint()` is called from `__str__()`. Historically,
+ # OctetString.prettyPrint() used to return hexified payload
+ # representation in cases when non-printable content is present. At the
+ # same time `str()` used to produce either octet-stream (Py2) or
+ # text (Py3) representations.
+ #
+ # Therefore `OctetString.__str__()` -> `.prettyPrint()` call chain is
+ # reversed to preserve the original behaviour.
+ #
+ # Eventually we should deprecate `.prettyPrint()` / `.prettyOut()` harness
+ # and end up with just `__str__()` producing hexified representation while
+ # both text and octet-stream representation should only be requested via
+ # the `.asOctets()` method.
+ #
+ # Note: ASN.1 OCTET STRING is never mean to contain text!
+ #
+
+ def prettyOut(self, value):
+ return value
+
+ def prettyPrint(self, scope=0):
+ # first see if subclass has its own .prettyOut()
+ value = self.prettyOut(self._value)
+
+ if value is not self._value:
+ return value
+
+ numbers = self.asNumbers()
+
+ for x in numbers:
+ # hexify if needed
+ if x < 32 or x > 126:
+ return '0x' + ''.join(('%.2x' % x for x in numbers))
+ else:
+ # this prevents infinite recursion
+ return OctetString.__str__(self)
+
+ @staticmethod
+ def fromBinaryString(value):
+ """Create a |ASN.1| object initialized from a string of '0' and '1'.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like '1010111'
+ """
+ bitNo = 8
+ byte = 0
+ r = []
+ for v in value:
+ if bitNo:
+ bitNo -= 1
+ else:
+ bitNo = 7
+ r.append(byte)
+ byte = 0
+ if v in ('0', '1'):
+ v = int(v)
+ else:
+ raise error.PyAsn1Error(
+ 'Non-binary OCTET STRING initializer %s' % (v,)
+ )
+ byte |= v << bitNo
+
+ r.append(byte)
+
+ return octets.ints2octs(r)
+
+ @staticmethod
+ def fromHexString(value):
+ """Create a |ASN.1| object initialized from the hex string.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like 'DEADBEEF'
+ """
+ r = []
+ p = []
+ for v in value:
+ if p:
+ r.append(int(p + v, 16))
+ p = None
+ else:
+ p = v
+ if p:
+ r.append(int(p + '0', 16))
+
+ return octets.ints2octs(r)
+
+ # Immutable sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone(self._value[i])
+ else:
+ return self._value[i]
+
+ def __iter__(self):
+ return iter(self._value)
+
+ def __contains__(self, value):
+ return value in self._value
+
+ def __add__(self, value):
+ return self.clone(self._value + self.prettyIn(value))
+
+ def __radd__(self, value):
+ return self.clone(self.prettyIn(value) + self._value)
+
+ def __mul__(self, value):
+ return self.clone(self._value * value)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __int__(self):
+ return int(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ def __reversed__(self):
+ return reversed(self._value)
+
+
+class Null(OctetString):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`str` objects
+ (always empty).
+
+ Keyword Args
+ ------------
+ value: :class:`str` or |ASN.1| object
+ Python empty :class:`str` literal or any object that evaluates to :obj:`False`
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Ack(Null):
+ '''
+ ASN.1 specification:
+
+ Ack ::= NULL
+ '''
+ ack = Ack('')
+ """
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
+ )
+ subtypeSpec = OctetString.subtypeSpec + constraint.SingleValueConstraint(octets.str2octs(''))
+
+ # Optimization for faster codec lookup
+ typeId = OctetString.getTypeId()
+
+ def prettyIn(self, value):
+ if value:
+ return value
+
+ return octets.str2octs('')
+
+if sys.version_info[0] <= 2:
+ intTypes = (int, long)
+else:
+ intTypes = (int,)
+
+numericTypes = intTypes + (float,)
+
+
+class ObjectIdentifier(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`tuple` objects
+ (tuple of non-negative integers).
+
+ Keyword Args
+ ------------
+ value: :class:`tuple`, :class:`str` or |ASN.1| object
+ Python sequence of :class:`int` or :class:`str` literal or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class ID(ObjectIdentifier):
+ '''
+ ASN.1 specification:
+
+ ID ::= OBJECT IDENTIFIER
+
+ id-edims ID ::= { joint-iso-itu-t mhs-motif(6) edims(7) }
+ id-bp ID ::= { id-edims 11 }
+ '''
+ id_edims = ID('2.6.7')
+ id_bp = id_edims + (11,)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ def __add__(self, other):
+ return self.clone(self._value + other)
+
+ def __radd__(self, other):
+ return self.clone(other + self._value)
+
+ def asTuple(self):
+ return self._value
+
+ # Sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone(self._value[i])
+ else:
+ return self._value[i]
+
+ def __iter__(self):
+ return iter(self._value)
+
+ def __contains__(self, value):
+ return value in self._value
+
+ def index(self, suboid):
+ return self._value.index(suboid)
+
+ def isPrefixOf(self, other):
+ """Indicate if this |ASN.1| object is a prefix of other |ASN.1| object.
+
+ Parameters
+ ----------
+ other: |ASN.1| object
+ |ASN.1| object
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object
+ or :obj:`False` otherwise.
+ """
+ l = len(self)
+ if l <= len(other):
+ if self._value[:l] == other[:l]:
+ return True
+ return False
+
+ def prettyIn(self, value):
+ if isinstance(value, ObjectIdentifier):
+ return tuple(value)
+ elif octets.isStringType(value):
+ if '-' in value:
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+ try:
+ return tuple([int(subOid) for subOid in value.split('.') if subOid])
+ except ValueError:
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+
+ try:
+ tupleOfInts = tuple([int(subOid) for subOid in value if subOid >= 0])
+
+ except (ValueError, TypeError):
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+
+ if len(tupleOfInts) == len(value):
+ return tupleOfInts
+
+ raise error.PyAsn1Error('Malformed Object ID %s at %s' % (value, self.__class__.__name__))
+
+ def prettyOut(self, value):
+ return '.'.join([str(x) for x in value])
+
+
+class Real(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`float` objects.
+ Additionally, |ASN.1| objects behave like a :class:`tuple` in which case its
+ elements are mantissa, base and exponent.
+
+ Keyword Args
+ ------------
+ value: :class:`tuple`, :class:`float` or |ASN.1| object
+ Python sequence of :class:`int` (representing mantissa, base and
+ exponent) or :class:`float` instance or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Pi(Real):
+ '''
+ ASN.1 specification:
+
+ Pi ::= REAL
+
+ pi Pi ::= { mantissa 314159, base 10, exponent -5 }
+
+ '''
+ pi = Pi((314159, 10, -5))
+ """
+ binEncBase = None # binEncBase = 16 is recommended for large numbers
+
+ try:
+ _plusInf = float('inf')
+ _minusInf = float('-inf')
+ _inf = _plusInf, _minusInf
+
+ except ValueError:
+ # Infinity support is platform and Python dependent
+ _plusInf = _minusInf = None
+ _inf = ()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ @staticmethod
+ def __normalizeBase10(value):
+ m, b, e = value
+ while m and m % 10 == 0:
+ m /= 10
+ e += 1
+ return m, b, e
+
+ def prettyIn(self, value):
+ if isinstance(value, tuple) and len(value) == 3:
+ if (not isinstance(value[0], numericTypes) or
+ not isinstance(value[1], intTypes) or
+ not isinstance(value[2], intTypes)):
+ raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
+ if (isinstance(value[0], float) and
+ self._inf and value[0] in self._inf):
+ return value[0]
+ if value[1] not in (2, 10):
+ raise error.PyAsn1Error(
+ 'Prohibited base for Real value: %s' % (value[1],)
+ )
+ if value[1] == 10:
+ value = self.__normalizeBase10(value)
+ return value
+ elif isinstance(value, intTypes):
+ return self.__normalizeBase10((value, 10, 0))
+ elif isinstance(value, float) or octets.isStringType(value):
+ if octets.isStringType(value):
+ try:
+ value = float(value)
+ except ValueError:
+ raise error.PyAsn1Error(
+ 'Bad real value syntax: %s' % (value,)
+ )
+ if self._inf and value in self._inf:
+ return value
+ else:
+ e = 0
+ while int(value) != value:
+ value *= 10
+ e -= 1
+ return self.__normalizeBase10((int(value), 10, e))
+ elif isinstance(value, Real):
+ return tuple(value)
+ raise error.PyAsn1Error(
+ 'Bad real value syntax: %s' % (value,)
+ )
+
+ def prettyPrint(self, scope=0):
+ try:
+ return self.prettyOut(float(self))
+
+ except OverflowError:
+ return '<overflow>'
+
+ @property
+ def isPlusInf(self):
+ """Indicate PLUS-INFINITY object value
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if calling object represents plus infinity
+ or :obj:`False` otherwise.
+
+ """
+ return self._value == self._plusInf
+
+ @property
+ def isMinusInf(self):
+ """Indicate MINUS-INFINITY object value
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if calling object represents minus infinity
+ or :obj:`False` otherwise.
+ """
+ return self._value == self._minusInf
+
+ @property
+ def isInf(self):
+ return self._value in self._inf
+
+ def __add__(self, value):
+ return self.clone(float(self) + value)
+
+ def __radd__(self, value):
+ return self + value
+
+ def __mul__(self, value):
+ return self.clone(float(self) * value)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __sub__(self, value):
+ return self.clone(float(self) - value)
+
+ def __rsub__(self, value):
+ return self.clone(value - float(self))
+
+ def __mod__(self, value):
+ return self.clone(float(self) % value)
+
+ def __rmod__(self, value):
+ return self.clone(value % float(self))
+
+ def __pow__(self, value, modulo=None):
+ return self.clone(pow(float(self), value, modulo))
+
+ def __rpow__(self, value):
+ return self.clone(pow(value, float(self)))
+
+ if sys.version_info[0] <= 2:
+ def __div__(self, value):
+ return self.clone(float(self) / value)
+
+ def __rdiv__(self, value):
+ return self.clone(value / float(self))
+ else:
+ def __truediv__(self, value):
+ return self.clone(float(self) / value)
+
+ def __rtruediv__(self, value):
+ return self.clone(value / float(self))
+
+ def __divmod__(self, value):
+ return self.clone(float(self) // value)
+
+ def __rdivmod__(self, value):
+ return self.clone(value // float(self))
+
+ def __int__(self):
+ return int(float(self))
+
+ if sys.version_info[0] <= 2:
+ def __long__(self):
+ return long(float(self))
+
+ def __float__(self):
+ if self._value in self._inf:
+ return self._value
+ else:
+ return float(
+ self._value[0] * pow(self._value[1], self._value[2])
+ )
+
+ def __abs__(self):
+ return self.clone(abs(float(self)))
+
+ def __pos__(self):
+ return self.clone(+float(self))
+
+ def __neg__(self):
+ return self.clone(-float(self))
+
+ def __round__(self, n=0):
+ r = round(float(self), n)
+ if n:
+ return self.clone(r)
+ else:
+ return r
+
+ def __floor__(self):
+ return self.clone(math.floor(float(self)))
+
+ def __ceil__(self):
+ return self.clone(math.ceil(float(self)))
+
+ def __trunc__(self):
+ return self.clone(math.trunc(float(self)))
+
+ def __lt__(self, value):
+ return float(self) < value
+
+ def __le__(self, value):
+ return float(self) <= value
+
+ def __eq__(self, value):
+ return float(self) == value
+
+ def __ne__(self, value):
+ return float(self) != value
+
+ def __gt__(self, value):
+ return float(self) > value
+
+ def __ge__(self, value):
+ return float(self) >= value
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return bool(float(self))
+ else:
+ def __bool__(self):
+ return bool(float(self))
+
+ __hash__ = base.SimpleAsn1Type.__hash__
+
+ def __getitem__(self, idx):
+ if self._value in self._inf:
+ raise error.PyAsn1Error('Invalid infinite value operation')
+ else:
+ return self._value[idx]
+
+ # compatibility stubs
+
+ def isPlusInfinity(self):
+ return self.isPlusInf
+
+ def isMinusInfinity(self):
+ return self.isMinusInf
+
+ def isInfinity(self):
+ return self.isInf
+
+
+class Enumerated(Integer):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class RadioButton(Enumerated):
+ '''
+ ASN.1 specification:
+
+ RadioButton ::= ENUMERATED { button1(0), button2(1),
+ button3(2) }
+
+ selected-by-default RadioButton ::= button1
+ '''
+ namedValues = NamedValues(
+ ('button1', 0), ('button2', 1),
+ ('button3', 2)
+ )
+
+ selected_by_default = RadioButton('button1')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = Integer.getTypeId()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+
+# "Structured" ASN.1 types
+
+class SequenceOfAndSetOfBase(base.ConstructedAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`list` objects.
+
+ Keyword Args
+ ------------
+ componentType : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A pyasn1 object representing ASN.1 type allowed within |ASN.1| type
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class LotteryDraw(SequenceOf): # SetOf is similar
+ '''
+ ASN.1 specification:
+
+ LotteryDraw ::= SEQUENCE OF INTEGER
+ '''
+ componentType = Integer()
+
+ lotteryDraw = LotteryDraw()
+ lotteryDraw.extend([123, 456, 789])
+ """
+ def __init__(self, *args, **kwargs):
+ # support positional params for backward compatibility
+ if args:
+ for key, value in zip(('componentType', 'tagSet',
+ 'subtypeSpec'), args):
+ if key in kwargs:
+ raise error.PyAsn1Error('Conflicting positional and keyword params!')
+ kwargs['componentType'] = value
+
+ self._componentValues = noValue
+
+ base.ConstructedAsn1Type.__init__(self, **kwargs)
+
+ # Python list protocol
+
+ def __getitem__(self, idx):
+ try:
+ return self.getComponentByPosition(idx)
+
+ except error.PyAsn1Error:
+ raise IndexError(sys.exc_info()[1])
+
+ def __setitem__(self, idx, value):
+ try:
+ self.setComponentByPosition(idx, value)
+
+ except error.PyAsn1Error:
+ raise IndexError(sys.exc_info()[1])
+
+ def append(self, value):
+ if self._componentValues is noValue:
+ pos = 0
+
+ else:
+ pos = len(self._componentValues)
+
+ self[pos] = value
+
+ def count(self, value):
+ return list(self._componentValues.values()).count(value)
+
+ def extend(self, values):
+ for value in values:
+ self.append(value)
+
+ if self._componentValues is noValue:
+ self._componentValues = {}
+
+ def index(self, value, start=0, stop=None):
+ if stop is None:
+ stop = len(self)
+
+ indices, values = zip(*self._componentValues.items())
+
+ # TODO: remove when Py2.5 support is gone
+ values = list(values)
+
+ try:
+ return indices[values.index(value, start, stop)]
+
+ except error.PyAsn1Error:
+ raise ValueError(sys.exc_info()[1])
+
+ def reverse(self):
+ self._componentValues.reverse()
+
+ def sort(self, key=None, reverse=False):
+ self._componentValues = dict(
+ enumerate(sorted(self._componentValues.values(),
+ key=key, reverse=reverse)))
+
+ def __len__(self):
+ if self._componentValues is noValue or not self._componentValues:
+ return 0
+
+ return max(self._componentValues) + 1
+
+ def __iter__(self):
+ for idx in range(0, len(self)):
+ yield self.getComponentByPosition(idx)
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ for idx, componentValue in self._componentValues.items():
+ if componentValue is not noValue:
+ if isinstance(componentValue, base.ConstructedAsn1Type):
+ myClone.setComponentByPosition(
+ idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByPosition(idx, componentValue.clone())
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ """Return |ASN.1| type component value by position.
+
+ Equivalent to Python sequence subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx : :class:`int`
+ Component index (zero-based). Must either refer to an existing
+ component or to N+1 component (if *componentType* is set). In the latter
+ case a new component type gets instantiated and appended to the |ASN.1|
+ sequence.
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue` object will be
+ returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ Instantiate |ASN.1| component type or return existing component value
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ # can also be SetOf
+ class MySequenceOf(SequenceOf):
+ componentType = OctetString()
+
+ s = MySequenceOf()
+
+ # returns component #0 with `.isValue` property False
+ s.getComponentByPosition(0)
+
+ # returns None
+ s.getComponentByPosition(0, default=None)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+
+ # sets component #0 to OctetString() ASN.1 schema
+ # object and returns it
+ s.getComponentByPosition(0, instantiate=True)
+
+ # sets component #0 to ASN.1 value object
+ s.setComponentByPosition(0, 'ABCD')
+
+ # returns OctetString('ABCD') value object
+ s.getComponentByPosition(0, instantiate=False)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+ """
+ if isinstance(idx, slice):
+ indices = tuple(range(len(self)))
+ return [self.getComponentByPosition(subidx, default, instantiate)
+ for subidx in indices[idx]]
+
+ if idx < 0:
+ idx = len(self) + idx
+ if idx < 0:
+ raise error.PyAsn1Error(
+ 'SequenceOf/SetOf index is out of range')
+
+ try:
+ componentValue = self._componentValues[idx]
+
+ except (KeyError, error.PyAsn1Error):
+ if not instantiate:
+ return default
+
+ self.setComponentByPosition(idx)
+
+ componentValue = self._componentValues[idx]
+
+ if default is noValue or componentValue.isValue:
+ return componentValue
+ else:
+ return default
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`)
+ or list.append() (when idx == len(self)).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component or to N+1 component. In the latter case a new component
+ type gets instantiated (if *componentType* is set, or given ASN.1
+ object is taken otherwise) and appended to the |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints: :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer
+ IndexError
+ When idx > len(self)
+ """
+ if isinstance(idx, slice):
+ indices = tuple(range(len(self)))
+ startIdx = indices and indices[idx][0] or 0
+ for subIdx, subValue in enumerate(value):
+ self.setComponentByPosition(
+ startIdx + subIdx, subValue, verifyConstraints,
+ matchTags, matchConstraints)
+ return self
+
+ if idx < 0:
+ idx = len(self) + idx
+ if idx < 0:
+ raise error.PyAsn1Error(
+ 'SequenceOf/SetOf index is out of range')
+
+ componentType = self.componentType
+
+ if self._componentValues is noValue:
+ componentValues = {}
+
+ else:
+ componentValues = self._componentValues
+
+ currentValue = componentValues.get(idx, noValue)
+
+ if value is noValue:
+ if componentType is not None:
+ value = componentType.clone()
+
+ elif currentValue is noValue:
+ raise error.PyAsn1Error('Component type not defined')
+
+ elif not isinstance(value, base.Asn1Item):
+ if (componentType is not None and
+ isinstance(componentType, base.SimpleAsn1Type)):
+ value = componentType.clone(value=value)
+
+ elif (currentValue is not noValue and
+ isinstance(currentValue, base.SimpleAsn1Type)):
+ value = currentValue.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error(
+ 'Non-ASN.1 value %r and undefined component'
+ ' type at %r' % (value, self))
+
+ elif componentType is not None and (matchTags or matchConstraints):
+ subtypeChecker = (
+ self.strictConstraints and
+ componentType.isSameTypeWith or
+ componentType.isSuperTypeOf)
+
+ if not subtypeChecker(value, verifyConstraints and matchTags,
+ verifyConstraints and matchConstraints):
+ # TODO: we should wrap componentType with UnnamedType to carry
+ # additional properties associated with componentType
+ if componentType.typeId != Any.typeId:
+ raise error.PyAsn1Error(
+ 'Component value is tag-incompatible: %r vs '
+ '%r' % (value, componentType))
+
+ componentValues[idx] = value
+
+ self._componentValues = componentValues
+
+ return self
+
+ @property
+ def componentTagMap(self):
+ if self.componentType is not None:
+ return self.componentType.tagMap
+
+ @property
+ def components(self):
+ return [self._componentValues[idx]
+ for idx in sorted(self._componentValues)]
+
+ def clear(self):
+ """Remove all components and become an empty |ASN.1| value object.
+
+ Has the same effect on |ASN.1| object as it does on :class:`list`
+ built-in.
+ """
+ self._componentValues = {}
+ return self
+
+ def reset(self):
+ """Remove all components and become a |ASN.1| schema object.
+
+ See :meth:`isValue` property for more information on the
+ distinction between value and schema objects.
+ """
+ self._componentValues = noValue
+ return self
+
+ def prettyPrint(self, scope=0):
+ scope += 1
+ representation = self.__class__.__name__ + ':\n'
+
+ if not self.isValue:
+ return representation
+
+ for idx, componentValue in enumerate(self):
+ representation += ' ' * scope
+ if (componentValue is noValue and
+ self.componentType is not None):
+ representation += '<empty>'
+ else:
+ representation += componentValue.prettyPrint(scope)
+
+ return representation
+
+ def prettyPrintType(self, scope=0):
+ scope += 1
+ representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
+ if self.componentType is not None:
+ representation += ' ' * scope
+ representation += self.componentType.prettyPrintType(scope)
+ return representation + '\n' + ' ' * (scope - 1) + '}'
+
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object
+ (e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ if self._componentValues is noValue:
+ return False
+
+ if len(self._componentValues) != len(self):
+ return False
+
+ for componentValue in self._componentValues.values():
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ return True
+
+ @property
+ def isInconsistent(self):
+ """Run necessary checks to ensure |ASN.1| object consistency.
+
+ Default action is to verify |ASN.1| object against constraints imposed
+ by `subtypeSpec`.
+
+ Raises
+ ------
+ :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found
+ """
+ if self.componentType is noValue or not self.subtypeSpec:
+ return False
+
+ if self._componentValues is noValue:
+ return True
+
+ mapping = {}
+
+ for idx, value in self._componentValues.items():
+ # Absent fields are not in the mapping
+ if value is noValue:
+ continue
+
+ mapping[idx] = value
+
+ try:
+ # Represent SequenceOf/SetOf as a bare dict to constraints chain
+ self.subtypeSpec(mapping)
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ return exc
+
+ return False
+
+class SequenceOf(SequenceOfAndSetOfBase):
+ __doc__ = SequenceOfAndSetOfBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ )
+
+ #: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = None
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceOfAndSetOfBase.getTypeId()
+
+
+class SetOf(SequenceOfAndSetOfBase):
+ __doc__ = SequenceOfAndSetOfBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ )
+
+ #: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = None
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceOfAndSetOfBase.getTypeId()
+
+
+class SequenceAndSetBase(base.ConstructedAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`dict` objects.
+
+ Keyword Args
+ ------------
+ componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
+ Object holding named ASN.1 types allowed within this collection
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Description(Sequence): # Set is similar
+ '''
+ ASN.1 specification:
+
+ Description ::= SEQUENCE {
+ surname IA5String,
+ first-name IA5String OPTIONAL,
+ age INTEGER DEFAULT 40
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('surname', IA5String()),
+ OptionalNamedType('first-name', IA5String()),
+ DefaultedNamedType('age', Integer(40))
+ )
+
+ descr = Description()
+ descr['surname'] = 'Smith'
+ descr['first-name'] = 'John'
+ """
+ #: Default :py:class:`~pyasn1.type.namedtype.NamedTypes`
+ #: object representing named ASN.1 types allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+
+ class DynamicNames(object):
+ """Fields names/positions mapping for component-less objects"""
+ def __init__(self):
+ self._keyToIdxMap = {}
+ self._idxToKeyMap = {}
+
+ def __len__(self):
+ return len(self._keyToIdxMap)
+
+ def __contains__(self, item):
+ return item in self._keyToIdxMap or item in self._idxToKeyMap
+
+ def __iter__(self):
+ return (self._idxToKeyMap[idx] for idx in range(len(self._idxToKeyMap)))
+
+ def __getitem__(self, item):
+ try:
+ return self._keyToIdxMap[item]
+
+ except KeyError:
+ return self._idxToKeyMap[item]
+
+ def getNameByPosition(self, idx):
+ try:
+ return self._idxToKeyMap[idx]
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByName(self, name):
+ try:
+ return self._keyToIdxMap[name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ def addField(self, idx):
+ self._keyToIdxMap['field-%d' % idx] = idx
+ self._idxToKeyMap[idx] = 'field-%d' % idx
+
+
+ def __init__(self, **kwargs):
+ base.ConstructedAsn1Type.__init__(self, **kwargs)
+ self._componentTypeLen = len(self.componentType)
+ if self._componentTypeLen:
+ self._componentValues = []
+ else:
+ self._componentValues = noValue
+ self._dynamicNames = self._componentTypeLen or self.DynamicNames()
+
+ def __getitem__(self, idx):
+ if octets.isStringType(idx):
+ try:
+ return self.getComponentByName(idx)
+
+ except error.PyAsn1Error:
+ # duck-typing dict
+ raise KeyError(sys.exc_info()[1])
+
+ else:
+ try:
+ return self.getComponentByPosition(idx)
+
+ except error.PyAsn1Error:
+ # duck-typing list
+ raise IndexError(sys.exc_info()[1])
+
+ def __setitem__(self, idx, value):
+ if octets.isStringType(idx):
+ try:
+ self.setComponentByName(idx, value)
+
+ except error.PyAsn1Error:
+ # duck-typing dict
+ raise KeyError(sys.exc_info()[1])
+
+ else:
+ try:
+ self.setComponentByPosition(idx, value)
+
+ except error.PyAsn1Error:
+ # duck-typing list
+ raise IndexError(sys.exc_info()[1])
+
+ def __contains__(self, key):
+ if self._componentTypeLen:
+ return key in self.componentType
+ else:
+ return key in self._dynamicNames
+
+ def __len__(self):
+ return len(self._componentValues)
+
+ def __iter__(self):
+ return iter(self.componentType or self._dynamicNames)
+
+ # Python dict protocol
+
+ def values(self):
+ for idx in range(self._componentTypeLen or len(self._dynamicNames)):
+ yield self[idx]
+
+ def keys(self):
+ return iter(self)
+
+ def items(self):
+ for idx in range(self._componentTypeLen or len(self._dynamicNames)):
+ if self._componentTypeLen:
+ yield self.componentType[idx].name, self[idx]
+ else:
+ yield self._dynamicNames[idx], self[idx]
+
+ def update(self, *iterValue, **mappingValue):
+ for k, v in iterValue:
+ self[k] = v
+ for k in mappingValue:
+ self[k] = mappingValue[k]
+
+ def clear(self):
+ """Remove all components and become an empty |ASN.1| value object.
+
+ Has the same effect on |ASN.1| object as it does on :class:`dict`
+ built-in.
+ """
+ self._componentValues = []
+ self._dynamicNames = self.DynamicNames()
+ return self
+
+ def reset(self):
+ """Remove all components and become a |ASN.1| schema object.
+
+ See :meth:`isValue` property for more information on the
+ distinction between value and schema objects.
+ """
+ self._componentValues = noValue
+ self._dynamicNames = self.DynamicNames()
+ return self
+
+ @property
+ def components(self):
+ return self._componentValues
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ if self._componentValues is noValue:
+ return
+
+ for idx, componentValue in enumerate(self._componentValues):
+ if componentValue is not noValue:
+ if isinstance(componentValue, base.ConstructedAsn1Type):
+ myClone.setComponentByPosition(
+ idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByPosition(idx, componentValue.clone())
+
+ def getComponentByName(self, name, default=noValue, instantiate=True):
+ """Returns |ASN.1| type component by name.
+
+ Equivalent to Python :class:`dict` subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ name: :class:`str`
+ |ASN.1| type component name
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ Instantiate |ASN.1| component type or return existing
+ component value
+ """
+ if self._componentTypeLen:
+ idx = self.componentType.getPositionByName(name)
+ else:
+ try:
+ idx = self._dynamicNames.getPositionByName(name)
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ return self.getComponentByPosition(idx, default=default, instantiate=instantiate)
+
+ def setComponentByName(self, name, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by name.
+
+ Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ name: :class:`str`
+ |ASN.1| type component name
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints: :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ if self._componentTypeLen:
+ idx = self.componentType.getPositionByName(name)
+ else:
+ try:
+ idx = self._dynamicNames.getPositionByName(name)
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ """Returns |ASN.1| type component by index.
+
+ Equivalent to Python sequence subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to an existing
+ component or (if *componentType* is set) new ASN.1 schema object gets
+ instantiated.
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a PyASN1 object
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ # can also be Set
+ class MySequence(Sequence):
+ componentType = NamedTypes(
+ NamedType('id', OctetString())
+ )
+
+ s = MySequence()
+
+ # returns component #0 with `.isValue` property False
+ s.getComponentByPosition(0)
+
+ # returns None
+ s.getComponentByPosition(0, default=None)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+
+ # sets component #0 to OctetString() ASN.1 schema
+ # object and returns it
+ s.getComponentByPosition(0, instantiate=True)
+
+ # sets component #0 to ASN.1 value object
+ s.setComponentByPosition(0, 'ABCD')
+
+ # returns OctetString('ABCD') value object
+ s.getComponentByPosition(0, instantiate=False)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+ """
+ try:
+ if self._componentValues is noValue:
+ componentValue = noValue
+
+ else:
+ componentValue = self._componentValues[idx]
+
+ except IndexError:
+ componentValue = noValue
+
+ if not instantiate:
+ if componentValue is noValue or not componentValue.isValue:
+ return default
+ else:
+ return componentValue
+
+ if componentValue is noValue:
+ self.setComponentByPosition(idx)
+
+ componentValue = self._componentValues[idx]
+
+ if default is noValue or componentValue.isValue:
+ return componentValue
+ else:
+ return default
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx : :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component (if *componentType* is set) or to N+1 component
+ otherwise. In the latter case a new component of given ASN.1
+ type gets instantiated and appended to |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ componentType = self.componentType
+ componentTypeLen = self._componentTypeLen
+
+ if self._componentValues is noValue:
+ componentValues = []
+
+ else:
+ componentValues = self._componentValues
+
+ try:
+ currentValue = componentValues[idx]
+
+ except IndexError:
+ currentValue = noValue
+ if componentTypeLen:
+ if componentTypeLen < idx:
+ raise error.PyAsn1Error('component index out of range')
+
+ componentValues = [noValue] * componentTypeLen
+
+ if value is noValue:
+ if componentTypeLen:
+ value = componentType.getTypeByPosition(idx)
+ if isinstance(value, base.ConstructedAsn1Type):
+ value = value.clone(cloneValueFlag=componentType[idx].isDefaulted)
+
+ elif currentValue is noValue:
+ raise error.PyAsn1Error('Component type not defined')
+
+ elif not isinstance(value, base.Asn1Item):
+ if componentTypeLen:
+ subComponentType = componentType.getTypeByPosition(idx)
+ if isinstance(subComponentType, base.SimpleAsn1Type):
+ value = subComponentType.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__)
+
+ elif currentValue is not noValue and isinstance(currentValue, base.SimpleAsn1Type):
+ value = currentValue.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__)
+
+ elif ((verifyConstraints or matchTags or matchConstraints) and
+ componentTypeLen):
+ subComponentType = componentType.getTypeByPosition(idx)
+ if subComponentType is not noValue:
+ subtypeChecker = (self.strictConstraints and
+ subComponentType.isSameTypeWith or
+ subComponentType.isSuperTypeOf)
+
+ if not subtypeChecker(value, verifyConstraints and matchTags,
+ verifyConstraints and matchConstraints):
+ if not componentType[idx].openType:
+ raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
+
+ if componentTypeLen or idx in self._dynamicNames:
+ componentValues[idx] = value
+
+ elif len(componentValues) == idx:
+ componentValues.append(value)
+ self._dynamicNames.addField(idx)
+
+ else:
+ raise error.PyAsn1Error('Component index out of range')
+
+ self._componentValues = componentValues
+
+ return self
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object (e.g.
+ :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a
+ normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+
+ It is sufficient for |ASN.1| objects to have all non-optional and non-defaulted
+ components being value objects to be considered as a value objects as a whole.
+ In other words, even having one or more optional components not turned into
+ value objects, |ASN.1| object is still considered as a value object. Defaulted
+ components are normally value objects by default.
+ """
+ if self._componentValues is noValue:
+ return False
+
+ componentType = self.componentType
+
+ if componentType:
+ for idx, subComponentType in enumerate(componentType.namedTypes):
+ if subComponentType.isDefaulted or subComponentType.isOptional:
+ continue
+
+ if not self._componentValues:
+ return False
+
+ componentValue = self._componentValues[idx]
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ else:
+ for componentValue in self._componentValues:
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ return True
+
+ @property
+ def isInconsistent(self):
+ """Run necessary checks to ensure |ASN.1| object consistency.
+
+ Default action is to verify |ASN.1| object against constraints imposed
+ by `subtypeSpec`.
+
+ Raises
+ ------
+ :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found
+ """
+ if self.componentType is noValue or not self.subtypeSpec:
+ return False
+
+ if self._componentValues is noValue:
+ return True
+
+ mapping = {}
+
+ for idx, value in enumerate(self._componentValues):
+ # Absent fields are not in the mapping
+ if value is noValue:
+ continue
+
+ name = self.componentType.getNameByPosition(idx)
+
+ mapping[name] = value
+
+ try:
+ # Represent Sequence/Set as a bare dict to constraints chain
+ self.subtypeSpec(mapping)
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ return exc
+
+ return False
+
+ def prettyPrint(self, scope=0):
+ """Return an object representation string.
+
+ Returns
+ -------
+ : :class:`str`
+ Human-friendly object representation.
+ """
+ scope += 1
+ representation = self.__class__.__name__ + ':\n'
+ for idx, componentValue in enumerate(self._componentValues):
+ if componentValue is not noValue and componentValue.isValue:
+ representation += ' ' * scope
+ if self.componentType:
+ representation += self.componentType.getNameByPosition(idx)
+ else:
+ representation += self._dynamicNames.getNameByPosition(idx)
+ representation = '%s=%s\n' % (
+ representation, componentValue.prettyPrint(scope)
+ )
+ return representation
+
+ def prettyPrintType(self, scope=0):
+ scope += 1
+ representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
+ for idx, componentType in enumerate(self.componentType.values() or self._componentValues):
+ representation += ' ' * scope
+ if self.componentType:
+ representation += '"%s"' % self.componentType.getNameByPosition(idx)
+ else:
+ representation += '"%s"' % self._dynamicNames.getNameByPosition(idx)
+ representation = '%s = %s\n' % (
+ representation, componentType.prettyPrintType(scope)
+ )
+ return representation + '\n' + ' ' * (scope - 1) + '}'
+
+ # backward compatibility
+
+ def setDefaultComponents(self):
+ return self
+
+ def getComponentType(self):
+ if self._componentTypeLen:
+ return self.componentType
+
+ def getNameByPosition(self, idx):
+ if self._componentTypeLen:
+ return self.componentType[idx].name
+
+class Sequence(SequenceAndSetBase):
+ __doc__ = SequenceAndSetBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object imposing size constraint on |ASN.1| objects
+ componentType = namedtype.NamedTypes()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceAndSetBase.getTypeId()
+
+ # backward compatibility
+
+ def getComponentTagMapNearPosition(self, idx):
+ if self.componentType:
+ return self.componentType.getTagMapNearPosition(idx)
+
+ def getComponentPositionNearType(self, tagSet, idx):
+ if self.componentType:
+ return self.componentType.getPositionNearType(tagSet, idx)
+ else:
+ return idx
+
+
+class Set(SequenceAndSetBase):
+ __doc__ = SequenceAndSetBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ )
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceAndSetBase.getTypeId()
+
+ def getComponent(self, innerFlag=False):
+ return self
+
+ def getComponentByType(self, tagSet, default=noValue,
+ instantiate=True, innerFlag=False):
+ """Returns |ASN.1| type component by ASN.1 tag.
+
+ Parameters
+ ----------
+ tagSet : :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing ASN.1 tags to identify one of
+ |ASN.1| object component
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`noValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a pyasn1 object
+ """
+ componentValue = self.getComponentByPosition(
+ self.componentType.getPositionByType(tagSet),
+ default=default, instantiate=instantiate
+ )
+ if innerFlag and isinstance(componentValue, Set):
+ # get inner component by inner tagSet
+ return componentValue.getComponent(innerFlag=True)
+ else:
+ # get outer component by inner tagSet
+ return componentValue
+
+ def setComponentByType(self, tagSet, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True,
+ innerFlag=False):
+ """Assign |ASN.1| type component by ASN.1 tag.
+
+ Parameters
+ ----------
+ tagSet : :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing ASN.1 tags to identify one of
+ |ASN.1| object component
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ innerFlag: :class:`bool`
+ If :obj:`True`, search for matching *tagSet* recursively.
+
+ Returns
+ -------
+ self
+ """
+ idx = self.componentType.getPositionByType(tagSet)
+
+ if innerFlag: # set inner component by inner tagSet
+ componentType = self.componentType.getTypeByPosition(idx)
+
+ if componentType.tagSet:
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+ else:
+ componentType = self.getComponentByPosition(idx)
+ return componentType.setComponentByType(
+ tagSet, value, verifyConstraints, matchTags, matchConstraints, innerFlag=innerFlag
+ )
+ else: # set outer component by inner tagSet
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+
+ @property
+ def componentTagMap(self):
+ if self.componentType:
+ return self.componentType.tagMapUnique
+
+
+class Choice(Set):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`list` objects.
+
+ Keyword Args
+ ------------
+ componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
+ Object holding named ASN.1 types allowed within this collection
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Afters(Choice):
+ '''
+ ASN.1 specification:
+
+ Afters ::= CHOICE {
+ cheese [0] IA5String,
+ dessert [1] IA5String
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('cheese', IA5String().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
+ ),
+ NamedType('dessert', IA5String().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
+ )
+ )
+
+ afters = Afters()
+ afters['cheese'] = 'Mascarpone'
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.TagSet() # untagged
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 1)
+ )
+
+ # Disambiguation ASN.1 types identification
+ typeId = Set.getTypeId()
+
+ _currentIdx = None
+
+ def __eq__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] == other
+ return NotImplemented
+
+ def __ne__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] != other
+ return NotImplemented
+
+ def __lt__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] < other
+ return NotImplemented
+
+ def __le__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] <= other
+ return NotImplemented
+
+ def __gt__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] > other
+ return NotImplemented
+
+ def __ge__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] >= other
+ return NotImplemented
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._componentValues and True or False
+ else:
+ def __bool__(self):
+ return self._componentValues and True or False
+
+ def __len__(self):
+ return self._currentIdx is not None and 1 or 0
+
+ def __contains__(self, key):
+ if self._currentIdx is None:
+ return False
+ return key == self.componentType[self._currentIdx].getName()
+
+ def __iter__(self):
+ if self._currentIdx is None:
+ raise StopIteration
+ yield self.componentType[self._currentIdx].getName()
+
+ # Python dict protocol
+
+ def values(self):
+ if self._currentIdx is not None:
+ yield self._componentValues[self._currentIdx]
+
+ def keys(self):
+ if self._currentIdx is not None:
+ yield self.componentType[self._currentIdx].getName()
+
+ def items(self):
+ if self._currentIdx is not None:
+ yield self.componentType[self._currentIdx].getName(), self[self._currentIdx]
+
+ def checkConsistency(self):
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ try:
+ component = self.getComponent()
+ except error.PyAsn1Error:
+ pass
+ else:
+ if isinstance(component, Choice):
+ tagSet = component.effectiveTagSet
+ else:
+ tagSet = component.tagSet
+ if isinstance(component, base.ConstructedAsn1Type):
+ myClone.setComponentByType(
+ tagSet, component.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByType(tagSet, component.clone())
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ __doc__ = Set.__doc__
+
+ if self._currentIdx is None or self._currentIdx != idx:
+ return Set.getComponentByPosition(self, idx, default=default,
+ instantiate=instantiate)
+
+ return self._componentValues[idx]
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component or to N+1 component. In the latter case a new component
+ type gets instantiated (if *componentType* is set, or given ASN.1
+ object is taken otherwise) and appended to the |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component. Once a new value is
+ set to *idx* component, previous value is dropped.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ oldIdx = self._currentIdx
+ Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints)
+ self._currentIdx = idx
+ if oldIdx is not None and oldIdx != idx:
+ self._componentValues[oldIdx] = noValue
+ return self
+
+ @property
+ def effectiveTagSet(self):
+ """Return a :class:`~pyasn1.type.tag.TagSet` object of the currently initialized component or self (if |ASN.1| is tagged)."""
+ if self.tagSet:
+ return self.tagSet
+ else:
+ component = self.getComponent()
+ return component.effectiveTagSet
+
+ @property
+ def tagMap(self):
+ """"Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
+ ASN.1 tags to ASN.1 objects contained within callee.
+ """
+ if self.tagSet:
+ return Set.tagMap.fget(self)
+ else:
+ return self.componentType.tagMapUnique
+
+ def getComponent(self, innerFlag=False):
+ """Return currently assigned component of the |ASN.1| object.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a PyASN1 object
+ """
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+ else:
+ c = self._componentValues[self._currentIdx]
+ if innerFlag and isinstance(c, Choice):
+ return c.getComponent(innerFlag)
+ else:
+ return c
+
+ def getName(self, innerFlag=False):
+ """Return the name of currently assigned component of the |ASN.1| object.
+
+ Returns
+ -------
+ : :py:class:`str`
+ |ASN.1| component name
+ """
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+ else:
+ if innerFlag:
+ c = self._componentValues[self._currentIdx]
+ if isinstance(c, Choice):
+ return c.getName(innerFlag)
+ return self.componentType.getNameByPosition(self._currentIdx)
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object (e.g.
+ :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal
+ value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ if self._currentIdx is None:
+ return False
+
+ componentValue = self._componentValues[self._currentIdx]
+
+ return componentValue is not noValue and componentValue.isValue
+
+ def clear(self):
+ self._currentIdx = None
+ return Set.clear(self)
+
+ # compatibility stubs
+
+ def getMinTagSet(self):
+ return self.minTagSet
+
+
+class Any(OctetString):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`,
+ its objects are immutable and duck-type Python 2 :class:`str` or Python 3
+ :class:`bytes`. When used in Unicode context, |ASN.1| type assumes
+ "|encoding|" serialisation.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ :class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively
+ :class:`unicode` object (Python 2) or :class:`str` (Python 3)
+ representing character string to be serialised into octets (note
+ `encoding` parameter) or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in text string context.
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Error(Sequence):
+ '''
+ ASN.1 specification:
+
+ Error ::= SEQUENCE {
+ code INTEGER,
+ parameter ANY DEFINED BY code -- Either INTEGER or REAL
+ }
+ '''
+ componentType=NamedTypes(
+ NamedType('code', Integer()),
+ NamedType('parameter', Any(),
+ openType=OpenType('code', {1: Integer(),
+ 2: Real()}))
+ )
+
+ error = Error()
+ error['code'] = 1
+ error['parameter'] = Integer(1234)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.TagSet() # untagged
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = OctetString.getTypeId()
+
+ @property
+ def tagMap(self):
+ """"Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
+ ASN.1 tags to ASN.1 objects contained within callee.
+ """
+ try:
+ return self._tagMap
+
+ except AttributeError:
+ self._tagMap = tagmap.TagMap(
+ {self.tagSet: self},
+ {eoo.endOfOctets.tagSet: eoo.endOfOctets},
+ self
+ )
+
+ return self._tagMap
+
+# XXX
+# coercion rules?
diff --git a/contrib/python/pyasn1/py3/pyasn1/type/useful.py b/contrib/python/pyasn1/py3/pyasn1/type/useful.py
new file mode 100644
index 0000000000..a8ae874057
--- /dev/null
+++ b/contrib/python/pyasn1/py3/pyasn1/type/useful.py
@@ -0,0 +1,189 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import datetime
+
+from pyasn1 import error
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
+
+NoValue = univ.NoValue
+noValue = univ.noValue
+
+
+class ObjectDescriptor(char.GraphicString):
+ __doc__ = char.GraphicString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.GraphicString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.GraphicString.getTypeId()
+
+
+class TimeMixIn(object):
+
+ _yearsDigits = 4
+ _hasSubsecond = False
+ _optionalMinutes = False
+ _shortTZ = False
+
+ class FixedOffset(datetime.tzinfo):
+ """Fixed offset in minutes east from UTC."""
+
+ # defaulted arguments required
+ # https: // docs.python.org / 2.3 / lib / datetime - tzinfo.html
+ def __init__(self, offset=0, name='UTC'):
+ self.__offset = datetime.timedelta(minutes=offset)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+ UTC = FixedOffset()
+
+ @property
+ def asDateTime(self):
+ """Create :py:class:`datetime.datetime` object from a |ASN.1| object.
+
+ Returns
+ -------
+ :
+ new instance of :py:class:`datetime.datetime` object
+ """
+ text = str(self)
+ if text.endswith('Z'):
+ tzinfo = TimeMixIn.UTC
+ text = text[:-1]
+
+ elif '-' in text or '+' in text:
+ if '+' in text:
+ text, plusminus, tz = text.partition('+')
+ else:
+ text, plusminus, tz = text.partition('-')
+
+ if self._shortTZ and len(tz) == 2:
+ tz += '00'
+
+ if len(tz) != 4:
+ raise error.PyAsn1Error('malformed time zone offset %s' % tz)
+
+ try:
+ minutes = int(tz[:2]) * 60 + int(tz[2:])
+ if plusminus == '-':
+ minutes *= -1
+
+ except ValueError:
+ raise error.PyAsn1Error('unknown time specification %s' % self)
+
+ tzinfo = TimeMixIn.FixedOffset(minutes, '?')
+
+ else:
+ tzinfo = None
+
+ if '.' in text or ',' in text:
+ if '.' in text:
+ text, _, ms = text.partition('.')
+ else:
+ text, _, ms = text.partition(',')
+
+ try:
+ ms = int(ms) * 1000
+
+ except ValueError:
+ raise error.PyAsn1Error('bad sub-second time specification %s' % self)
+
+ else:
+ ms = 0
+
+ if self._optionalMinutes and len(text) - self._yearsDigits == 6:
+ text += '0000'
+ elif len(text) - self._yearsDigits == 8:
+ text += '00'
+
+ try:
+ dt = datetime.datetime.strptime(text, self._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
+
+ except ValueError:
+ raise error.PyAsn1Error('malformed datetime format %s' % self)
+
+ return dt.replace(microsecond=ms, tzinfo=tzinfo)
+
+ @classmethod
+ def fromDateTime(cls, dt):
+ """Create |ASN.1| object from a :py:class:`datetime.datetime` object.
+
+ Parameters
+ ----------
+ dt: :py:class:`datetime.datetime` object
+ The `datetime.datetime` object to initialize the |ASN.1| object
+ from
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| value
+ """
+ text = dt.strftime(cls._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
+ if cls._hasSubsecond:
+ text += '.%d' % (dt.microsecond // 1000)
+
+ if dt.utcoffset():
+ seconds = dt.utcoffset().seconds
+ if seconds < 0:
+ text += '-'
+ else:
+ text += '+'
+ text += '%.2d%.2d' % (seconds // 3600, seconds % 3600)
+ else:
+ text += 'Z'
+
+ return cls(text)
+
+
+class GeneralizedTime(char.VisibleString, TimeMixIn):
+ __doc__ = char.VisibleString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.VisibleString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.VideotexString.getTypeId()
+
+ _yearsDigits = 4
+ _hasSubsecond = True
+ _optionalMinutes = True
+ _shortTZ = True
+
+
+class UTCTime(char.VisibleString, TimeMixIn):
+ __doc__ = char.VisibleString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.VisibleString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.VideotexString.getTypeId()
+
+ _yearsDigits = 2
+ _hasSubsecond = False
+ _optionalMinutes = False
+ _shortTZ = False
diff --git a/contrib/python/pyasn1/py3/tests/__init__.py b/contrib/python/pyasn1/py3/tests/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/__main__.py b/contrib/python/pyasn1/py3/tests/__main__.py
new file mode 100644
index 0000000000..d32d511557
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/__main__.py
@@ -0,0 +1,18 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.test_debug.suite',
+ 'tests.type.__main__.suite',
+ 'tests.codec.__main__.suite',
+ 'tests.compat.__main__.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/base.py b/contrib/python/pyasn1/py3/tests/base.py
new file mode 100644
index 0000000000..f7513d8d9e
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/base.py
@@ -0,0 +1,18 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+from pyasn1 import debug
+
+
+class BaseTestCase(unittest.TestCase):
+
+ def setUp(self):
+ debug.setLogger(debug.Debug('all', printer=lambda *x: None))
+
+ def tearDown(self):
+ debug.setLogger(None)
diff --git a/contrib/python/pyasn1/py3/tests/codec/__init__.py b/contrib/python/pyasn1/py3/tests/codec/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/codec/__main__.py b/contrib/python/pyasn1/py3/tests/codec/__main__.py
new file mode 100644
index 0000000000..b02f0723ca
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/__main__.py
@@ -0,0 +1,19 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.test_streaming.suite',
+ 'tests.codec.ber.__main__.suite',
+ 'tests.codec.cer.__main__.suite',
+ 'tests.codec.der.__main__.suite',
+ 'tests.codec.native.__main__.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/ber/__init__.py b/contrib/python/pyasn1/py3/tests/codec/ber/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/ber/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/codec/ber/__main__.py b/contrib/python/pyasn1/py3/tests/codec/ber/__main__.py
new file mode 100644
index 0000000000..ff38c97011
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/ber/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.ber.test_encoder.suite',
+ 'tests.codec.ber.test_decoder.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/ber/test_decoder.py b/contrib/python/pyasn1/py3/tests/codec/ber/test_decoder.py
new file mode 100644
index 0000000000..9e238cd458
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/ber/test_decoder.py
@@ -0,0 +1,1847 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import gzip
+import io
+import os
+import sys
+import tempfile
+import unittest
+import zipfile
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import char
+from pyasn1.codec import streaming
+from pyasn1.codec.ber import decoder
+from pyasn1.codec.ber import eoo
+from pyasn1.compat.octets import ints2octs, str2octs, null
+from pyasn1 import error
+
+
+class LargeTagDecoderTestCase(BaseTestCase):
+ def testLargeTag(self):
+ assert decoder.decode(ints2octs((127, 141, 245, 182, 253, 47, 3, 2, 1, 1))) == (1, null)
+
+ def testLongTag(self):
+ assert decoder.decode(ints2octs((0x1f, 2, 1, 0)))[0].tagSet == univ.Integer.tagSet
+
+ def testTagsEquivalence(self):
+ integer = univ.Integer(2).subtype(implicitTag=tag.Tag(tag.tagClassContext, 0, 0))
+ assert decoder.decode(ints2octs((0x9f, 0x80, 0x00, 0x02, 0x01, 0x02)), asn1Spec=integer) == decoder.decode(
+ ints2octs((0x9f, 0x00, 0x02, 0x01, 0x02)), asn1Spec=integer)
+
+
+class DecoderCacheTestCase(BaseTestCase):
+ def testCache(self):
+ assert decoder.decode(ints2octs((0x1f, 2, 1, 0))) == decoder.decode(ints2octs((0x1f, 2, 1, 0)))
+
+
+class IntegerDecoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert decoder.decode(ints2octs((2, 1, 12))) == (12, null)
+
+ def testNegInt(self):
+ assert decoder.decode(ints2octs((2, 1, 244))) == (-12, null)
+
+ def testZero(self):
+ assert decoder.decode(ints2octs((2, 0))) == (0, null)
+
+ def testZeroLong(self):
+ assert decoder.decode(ints2octs((2, 1, 0))) == (0, null)
+
+ def testMinusOne(self):
+ assert decoder.decode(ints2octs((2, 1, 255))) == (-1, null)
+
+ def testPosLong(self):
+ assert decoder.decode(
+ ints2octs((2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255))
+ ) == (0xffffffffffffffff, null)
+
+ def testNegLong(self):
+ assert decoder.decode(
+ ints2octs((2, 9, 255, 0, 0, 0, 0, 0, 0, 0, 1))
+ ) == (-0xffffffffffffffff, null)
+
+ def testSpec(self):
+ try:
+ decoder.decode(
+ ints2octs((2, 1, 12)), asn1Spec=univ.Null()
+ ) == (12, null)
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong asn1Spec worked out'
+ assert decoder.decode(
+ ints2octs((2, 1, 12)), asn1Spec=univ.Integer()
+ ) == (12, null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((34, 1, 12)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class BooleanDecoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert decoder.decode(ints2octs((1, 1, 1))) == (1, null)
+
+ def testTrueNeg(self):
+ assert decoder.decode(ints2octs((1, 1, 255))) == (1, null)
+
+ def testExtraTrue(self):
+ assert decoder.decode(ints2octs((1, 1, 1, 0, 120, 50, 50))) == (1, ints2octs((0, 120, 50, 50)))
+
+ def testFalse(self):
+ assert decoder.decode(ints2octs((1, 1, 0))) == (0, null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((33, 1, 1)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((3, 3, 1, 169, 138))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((3, 3, 1, 169, 138))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+ ) == ((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1), null)
+
+ def testDefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((3, 2, 0, 169, 3, 2, 1, 138)), str2octs(''))
+
+ def testIndefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((3, 2, 0, 169, 3, 2, 1, 138, 0, 0)), str2octs(''))
+
+ def testTypeChecking(self):
+ try:
+ decoder.decode(ints2octs((35, 4, 2, 2, 42, 42)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'accepted mis-encoded bit-string constructed out of an integer'
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs(
+ (36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0))
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testDefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs(
+ (36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120)), str2octs(''))
+
+ def testIndefModeChunkedSubst(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111,
+ 120, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0)), str2octs(''))
+
+
+class ExpTaggedOctetStringDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString(
+ 'Quick brown fox',
+ tagSet=univ.OctetString.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 5)
+ ))
+
+ def testDefMode(self):
+ o, r = decoder.decode(
+ ints2octs((101, 17, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testIndefMode(self):
+ o, r = decoder.decode(
+ ints2octs((101, 128, 36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0, 0, 0))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testDefModeChunked(self):
+ o, r = decoder.decode(
+ ints2octs((101, 25, 36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testIndefModeChunked(self):
+ o, r = decoder.decode(
+ ints2octs((101, 128, 36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0, 0, 0))
+ )
+ assert not r
+ assert self.o == o
+ assert self.o.tagSet == o.tagSet
+ assert self.o.isSameTypeWith(o)
+
+ def testDefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((101, 17, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)), str2octs(''))
+
+ def testIndefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((
+ 101, 128, 36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0,
+ 0, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0, 0, 0)), str2octs(''))
+
+
+class NullDecoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert decoder.decode(ints2octs((5, 0))) == (null, null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((37, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+# Useful analysis of OID encoding issues could be found here:
+# https://misc.daniel-marschall.de/asn.1/oid_facts.html
+class ObjectIdentifierDecoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert decoder.decode(
+ ints2octs((6, 6, 43, 6, 0, 191, 255, 126))
+ ) == ((1, 3, 6, 0, 0xffffe), null)
+
+ def testEdge1(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 39))
+ ) == ((0, 39), null)
+
+ def testEdge2(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 79))
+ ) == ((1, 39), null)
+
+ def testEdge3(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 120))
+ ) == ((2, 40), null)
+
+ def testEdge4(self):
+ assert decoder.decode(
+ ints2octs((6, 5, 0x90, 0x80, 0x80, 0x80, 0x4F))
+ ) == ((2, 0xffffffff), null)
+
+ def testEdge5(self):
+ assert decoder.decode(
+ ints2octs((6, 1, 0x7F))
+ ) == ((2, 47), null)
+
+ def testEdge6(self):
+ assert decoder.decode(
+ ints2octs((6, 2, 0x81, 0x00))
+ ) == ((2, 48), null)
+
+ def testEdge7(self):
+ assert decoder.decode(
+ ints2octs((6, 3, 0x81, 0x34, 0x03))
+ ) == ((2, 100, 3), null)
+
+ def testEdge8(self):
+ assert decoder.decode(
+ ints2octs((6, 2, 133, 0))
+ ) == ((2, 560), null)
+
+ def testEdge9(self):
+ assert decoder.decode(
+ ints2octs((6, 4, 0x88, 0x84, 0x87, 0x02))
+ ) == ((2, 16843570), null)
+
+ def testNonLeading0x80(self):
+ assert decoder.decode(
+ ints2octs((6, 5, 85, 4, 129, 128, 0)),
+ ) == ((2, 5, 4, 16384), null)
+
+ def testLeading0x80Case1(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 5, 85, 4, 128, 129, 0))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testLeading0x80Case2(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 7, 1, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7F))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testLeading0x80Case3(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 2, 0x80, 1))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testLeading0x80Case4(self):
+ try:
+ decoder.decode(
+ ints2octs((6, 2, 0x80, 0x7F))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Leading 0x80 tolerated'
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((38, 1, 239)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+ def testZeroLength(self):
+ try:
+ decoder.decode(ints2octs((6, 0, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'zero length tolerated'
+
+ def testIndefiniteLength(self):
+ try:
+ decoder.decode(ints2octs((6, 128, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'indefinite length tolerated'
+
+ def testReservedLength(self):
+ try:
+ decoder.decode(ints2octs((6, 255, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'reserved length tolerated'
+
+ def testLarge1(self):
+ assert decoder.decode(
+ ints2octs((0x06, 0x11, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6, 0xB8, 0xCB, 0xE2, 0xB7, 0x17))
+ ) == ((2, 18446744073709551535184467440737095), null)
+
+ def testLarge2(self):
+ assert decoder.decode(
+ ints2octs((0x06, 0x13, 0x88, 0x37, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6, 0xB8, 0xCB, 0xE2, 0xB6, 0x47))
+ ) == ((2, 999, 18446744073709551535184467440737095), null)
+
+
+class RealDecoderTestCase(BaseTestCase):
+ def testChar(self):
+ assert decoder.decode(
+ ints2octs((9, 7, 3, 49, 50, 51, 69, 49, 49))
+ ) == (univ.Real((123, 10, 11)), null)
+
+ def testBin1(self): # check base = 2
+ assert decoder.decode( # (0.5, 2, 0) encoded with base = 2
+ ints2octs((9, 3, 128, 255, 1))
+ ) == (univ.Real((1, 2, -1)), null)
+
+ def testBin2(self): # check base = 2 and scale factor
+ assert decoder.decode( # (3.25, 2, 0) encoded with base = 8
+ ints2octs((9, 3, 148, 255, 13))
+ ) == (univ.Real((26, 2, -3)), null)
+
+ def testBin3(self): # check base = 16
+ assert decoder.decode( # (0.00390625, 2, 0) encoded with base = 16
+ ints2octs((9, 3, 160, 254, 1))
+ ) == (univ.Real((1, 2, -8)), null)
+
+ def testBin4(self): # check exponent = 0
+ assert decoder.decode( # (1, 2, 0) encoded with base = 2
+ ints2octs((9, 3, 128, 0, 1))
+ ) == (univ.Real((1, 2, 0)), null)
+
+ def testBin5(self): # case of 2 octs for exponent and negative exponent
+ assert decoder.decode( # (3, 2, -1020) encoded with base = 16
+ ints2octs((9, 4, 161, 255, 1, 3))
+ ) == (univ.Real((3, 2, -1020)), null)
+
+# TODO: this requires Real type comparison fix
+
+# def testBin6(self):
+# assert decoder.decode(
+# ints2octs((9, 5, 162, 0, 255, 255, 1))
+# ) == (univ.Real((1, 2, 262140)), null)
+
+# def testBin7(self):
+# assert decoder.decode(
+# ints2octs((9, 7, 227, 4, 1, 35, 69, 103, 1))
+# ) == (univ.Real((-1, 2, 76354972)), null)
+
+ def testPlusInf(self):
+ assert decoder.decode(
+ ints2octs((9, 1, 64))
+ ) == (univ.Real('inf'), null)
+
+ def testMinusInf(self):
+ assert decoder.decode(
+ ints2octs((9, 1, 65))
+ ) == (univ.Real('-inf'), null)
+
+ def testEmpty(self):
+ assert decoder.decode(
+ ints2octs((9, 0))
+ ) == (univ.Real(0.0), null)
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(ints2octs((41, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+ def testShortEncoding(self):
+ try:
+ decoder.decode(ints2octs((9, 1, 131)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'accepted too-short real'
+
+
+class UniversalStringDecoderTestCase(BaseTestCase):
+ def testDecoder(self):
+ assert decoder.decode(ints2octs((28, 12, 0, 0, 0, 97, 0, 0, 0, 98, 0, 0, 0, 99))) == (char.UniversalString(sys.version_info[0] >= 3 and 'abc' or unicode('abc')), null)
+
+
+class BMPStringDecoderTestCase(BaseTestCase):
+ def testDecoder(self):
+ assert decoder.decode(ints2octs((30, 6, 0, 97, 0, 98, 0, 99))) == (char.BMPString(sys.version_info[0] >= 3 and 'abc' or unicode('abc')), null)
+
+
+class UTF8StringDecoderTestCase(BaseTestCase):
+ def testDecoder(self):
+ assert decoder.decode(ints2octs((12, 3, 97, 98, 99))) == (char.UTF8String(sys.version_info[0] >= 3 and 'abc' or unicode('abc')), null)
+
+
+class SequenceOfDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+ ) == (self.s, null)
+
+ def testSchemalessDecoder(self):
+ assert decoder.decode(
+ ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=univ.SequenceOf()
+ ) == (self.s, null)
+
+
+class ExpTaggedSequenceOfDecoderTestCase(BaseTestCase):
+
+ def testWithSchema(self):
+ s = univ.SequenceOf().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))
+ s2, r = decoder.decode(
+ ints2octs((163, 15, 48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=s
+ )
+ assert not r
+ assert s2 == [str2octs('quick brown')]
+ assert s.tagSet == s2.tagSet
+
+ def testWithoutSchema(self):
+ s = univ.SequenceOf().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))
+ s2, r = decoder.decode(
+ ints2octs((163, 15, 48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ )
+ assert not r
+ assert s2 == [str2octs('quick brown')]
+ assert s.tagSet == s2.tagSet
+
+
+class SequenceOfDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SetOfDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+ ) == (self.s, null)
+
+ def testSchemalessDecoder(self):
+ assert decoder.decode(
+ ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=univ.SetOf()
+ ) == (self.s, null)
+
+
+class SetOfDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+
+ def testDefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SequenceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('first-name', univ.OctetString(null)),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs(
+ (48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), str2octs(''))
+
+ def testWithOptionalAndDefaultedIndefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), str2octs(''))
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(
+ ints2octs((16, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class SequenceDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 2, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 2, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionaIndefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
+ 0, 0, 0)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1,
+ 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs(
+ (48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1)),
+ asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
+ 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SequenceDecoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except error.PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 3, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='060127')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithUnaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(componentType=univ.Any()),
+ openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except error.PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs( (48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SetDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('first-name', univ.OctetString(null)),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ assert decoder.decode(
+ ints2octs(
+ (49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), str2octs(''))
+
+ def testWithOptionalAndDefaultedIndefModeSubst(self):
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)),
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs(
+ (5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), str2octs(''))
+
+ def testTagFormat(self):
+ try:
+ decoder.decode(
+ ints2octs((16, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+ )
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'wrong tagFormat worked out'
+
+
+class SetDecoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefMode(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 2, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 5, 5, 0, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeReordered(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 18, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeReordered(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 2, 1, 1, 5, 0, 36, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert decoder.decode(
+ ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+
+class SequenceOfWithExpTaggedOctetStringDecoder(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(
+ componentType=univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
+ )
+ self.s.setComponentByPosition(0, 'q')
+ self.s2 = univ.SequenceOf()
+
+ def testDefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+
+class SequenceWithExpTaggedOctetStringDecoder(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'x', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
+ )
+ )
+ )
+ self.s.setComponentByPosition(0, 'q')
+ self.s2 = univ.Sequence()
+
+ def testDefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchema(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeNoComponent(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)), asn1Spec=self.s2)
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testDefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 5, 163, 3, 4, 1, 113)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+ def testIndefModeSchemaless(self):
+ s, r = decoder.decode(ints2octs((48, 128, 163, 128, 4, 1, 113, 0, 0, 0, 0)))
+ assert not r
+ assert s == self.s
+ assert s.tagSet == self.s.tagSet
+
+
+class ChoiceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+
+ def testBySpec(self):
+ self.s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(
+ ints2octs((5, 0)), asn1Spec=self.s
+ ) == (self.s, null)
+
+ def testWithoutSpec(self):
+ self.s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(ints2octs((5, 0))) == (self.s, null)
+ assert decoder.decode(ints2octs((5, 0))) == (univ.Null(null), null)
+
+ def testUndefLength(self):
+ self.s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
+ assert decoder.decode(ints2octs((36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0)),
+ asn1Spec=self.s) == (self.s, null)
+
+ def testExplicitTag(self):
+ s = self.s.subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 4))
+ s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(ints2octs((164, 2, 5, 0)), asn1Spec=s) == (s, null)
+
+ def testExplicitTagUndefLength(self):
+ s = self.s.subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 4))
+ s.setComponentByPosition(0, univ.Null(null))
+ assert decoder.decode(ints2octs((164, 128, 5, 0, 0, 0)), asn1Spec=s) == (s, null)
+
+
+class AnyDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any()
+
+ def testByUntagged(self):
+ assert decoder.decode(
+ ints2octs((4, 3, 102, 111, 120)), asn1Spec=self.s
+ ) == (univ.Any('\004\003fox'), null)
+
+ def testTaggedEx(self):
+ s = univ.Any('\004\003fox').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((164, 5, 4, 3, 102, 111, 120)), asn1Spec=s) == (s, null)
+
+ def testTaggedIm(self):
+ s = univ.Any('\004\003fox').subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((132, 5, 4, 3, 102, 111, 120)), asn1Spec=s) == (s, null)
+
+ def testByUntaggedIndefMode(self):
+ assert decoder.decode(
+ ints2octs((4, 3, 102, 111, 120)), asn1Spec=self.s
+ ) == (univ.Any('\004\003fox'), null)
+
+ def testTaggedExIndefMode(self):
+ s = univ.Any('\004\003fox').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((164, 128, 4, 3, 102, 111, 120, 0, 0)), asn1Spec=s) == (s, null)
+
+ def testTaggedImIndefMode(self):
+ s = univ.Any('\004\003fox').subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
+ assert decoder.decode(ints2octs((164, 128, 4, 3, 102, 111, 120, 0, 0)), asn1Spec=s) == (s, null)
+
+ def testByUntaggedSubst(self):
+ assert decoder.decode(
+ ints2octs((4, 3, 102, 111, 120)),
+ asn1Spec=self.s,
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((4, 3, 102, 111, 120)), str2octs(''))
+
+ def testTaggedExSubst(self):
+ assert decoder.decode(
+ ints2octs((164, 5, 4, 3, 102, 111, 120)),
+ asn1Spec=self.s,
+ substrateFun=lambda a, b, c, d: streaming.readFromStream(b, c)
+ ) == (ints2octs((164, 5, 4, 3, 102, 111, 120)), str2octs(''))
+
+
+class EndOfOctetsTestCase(BaseTestCase):
+ def testUnexpectedEoo(self):
+ try:
+ decoder.decode(ints2octs((0, 0)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted at top level'
+
+ def testExpectedEoo(self):
+ result, remainder = decoder.decode(ints2octs((0, 0)), allowEoo=True)
+ assert eoo.endOfOctets.isSameTypeWith(result) and result == eoo.endOfOctets and result is eoo.endOfOctets
+ assert remainder == null
+
+ def testDefiniteNoEoo(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x02, 0x00, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted inside definite-length encoding'
+
+ def testIndefiniteEoo(self):
+ result, remainder = decoder.decode(ints2octs((0x23, 0x80, 0x00, 0x00)))
+ assert result == () and remainder == null, 'incorrect decoding of indefinite length end-of-octets'
+
+ def testNoLongFormEoo(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x80, 0x00, 0x81, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted with invalid long-form length'
+
+ def testNoConstructedEoo(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x80, 0x20, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted with invalid constructed encoding'
+
+ def testNoEooData(self):
+ try:
+ decoder.decode(ints2octs((0x23, 0x80, 0x00, 0x01, 0x00)))
+ except error.PyAsn1Error:
+ pass
+ else:
+ assert 0, 'end-of-contents octets accepted with unexpected data'
+
+
+class NonStringDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null(null)),
+ namedtype.NamedType('first-name', univ.OctetString(null)),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+ self.s.setComponentByPosition(0, univ.Null(null))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ self.substrate = ints2octs([48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1])
+
+ def testOctetString(self):
+ s = list(decoder.StreamingDecoder(
+ univ.OctetString(self.substrate), asn1Spec=self.s))
+ assert [self.s] == s
+
+ def testAny(self):
+ s = list(decoder.StreamingDecoder(
+ univ.Any(self.substrate), asn1Spec=self.s))
+ assert [self.s] == s
+
+
+class ErrorOnDecodingTestCase(BaseTestCase):
+
+ def testErrorCondition(self):
+ decode = decoder.SingleItemDecoder(
+ tagMap=decoder.TAG_MAP, typeMap=decoder.TYPE_MAP)
+ substrate = ints2octs((00, 1, 2))
+ stream = streaming.asSeekableStream(substrate)
+
+ try:
+ asn1Object = next(decode(stream))
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ assert isinstance(exc, error.PyAsn1Error), (
+ 'Unexpected exception raised %r' % (exc,))
+
+ else:
+ assert False, 'Unexpected decoder result %r' % (asn1Object,)
+
+ def testRawDump(self):
+ substrate = ints2octs((31, 8, 2, 1, 1, 131, 3, 2, 1, 12))
+ stream = streaming.asSeekableStream(substrate)
+
+ class SingleItemEncoder(decoder.SingleItemDecoder):
+ defaultErrorState = decoder.stDumpRawValue
+
+ class StreamingDecoder(decoder.StreamingDecoder):
+ SINGLE_ITEM_DECODER = SingleItemEncoder
+
+ class OneShotDecoder(decoder.Decoder):
+ STREAMING_DECODER = StreamingDecoder
+
+ d = OneShotDecoder()
+
+ asn1Object, rest = d(stream)
+
+ assert isinstance(asn1Object, univ.Any), (
+ 'Unexpected raw dump type %r' % (asn1Object,))
+ assert asn1Object.asNumbers() == (31, 8, 2, 1, 1), (
+ 'Unexpected raw dump value %r' % (asn1Object,))
+ assert rest == ints2octs((131, 3, 2, 1, 12)), (
+ 'Unexpected rest of substrate after raw dump %r' % rest)
+
+
+@unittest.skipIf(sys.version_info < (3,), "Unsupported on Python 2")
+class BinaryFileTestCase(BaseTestCase):
+ """Assure that decode works on open binary files."""
+ def testOneObject(self):
+ _, path = tempfile.mkstemp()
+ try:
+ with open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12)))
+
+ with open(path, "rb") as source:
+ values = list(decoder.StreamingDecoder(source))
+
+ assert values == [12]
+ finally:
+ os.remove(path)
+
+ def testMoreObjects(self):
+ _, path = tempfile.mkstemp()
+ try:
+ with open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)))
+
+ with open(path, "rb") as source:
+ values = list(decoder.StreamingDecoder(source))
+
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+
+ finally:
+ os.remove(path)
+
+ def testInvalidFileContent(self):
+ _, path = tempfile.mkstemp()
+ try:
+ with open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0, 7)))
+
+ with open(path, "rb") as source:
+ list(decoder.StreamingDecoder(source))
+
+ except error.EndOfStreamError:
+ pass
+
+ finally:
+ os.remove(path)
+
+
+class BytesIOTestCase(BaseTestCase):
+ def testRead(self):
+ source = ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+ stream = io.BytesIO(source)
+ values = list(decoder.StreamingDecoder(stream))
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+
+
+class UnicodeTestCase(BaseTestCase):
+ def testFail(self):
+ # This ensures that unicode objects in Python 2 & str objects in Python 3.7 cannot be parsed.
+ source = ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)).decode("latin-1")
+ try:
+ next(decoder.StreamingDecoder(source))
+
+ except error.UnsupportedSubstrateError:
+ pass
+
+ else:
+ assert False, 'Tolerated parsing broken unicode strings'
+
+
+class RestartableDecoderTestCase(BaseTestCase):
+
+ class NonBlockingStream(io.BytesIO):
+ block = False
+
+ def read(self, size=-1):
+ self.block = not self.block
+ if self.block:
+ return # this is what non-blocking streams sometimes do
+
+ return io.BytesIO.read(self, size)
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ source = ints2octs(
+ (48, 26,
+ 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110,
+ 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+ self.stream = self.NonBlockingStream(source)
+
+ def testPartialReadingFromNonBlockingStream(self):
+ iterator = iter(decoder.StreamingDecoder(self.stream, asn1Spec=self.s))
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' not in res.context
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' not in res.context
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 0
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 0
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 0
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 1
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 1
+
+ res = next(iterator)
+
+ assert isinstance(res, error.SubstrateUnderrunError)
+ assert 'asn1Object' in res.context
+ assert isinstance(res.context['asn1Object'], univ.SequenceOf)
+ assert res.context['asn1Object'].isValue
+ assert len(res.context['asn1Object']) == 1
+
+ res = next(iterator)
+
+ assert isinstance(res, univ.SequenceOf)
+ assert res.isValue
+ assert len(res) == 2
+
+ try:
+ next(iterator)
+
+ except StopIteration:
+ pass
+
+ else:
+ assert False, 'End of stream not raised'
+
+
+class CompressedFilesTestCase(BaseTestCase):
+ def testGzip(self):
+ _, path = tempfile.mkstemp(suffix=".gz")
+ try:
+ with gzip.open(path, "wb") as out:
+ out.write(ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)))
+
+ with gzip.open(path, "rb") as source:
+ values = list(decoder.StreamingDecoder(source))
+
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+
+ finally:
+ os.remove(path)
+
+ def testZipfile(self):
+ # File from ZIP archive is a good example of non-seekable stream in Python 2.7
+ # In Python 3.7, it is a seekable stream.
+ _, path = tempfile.mkstemp(suffix=".zip")
+ try:
+ with zipfile.ZipFile(path, "w") as myzip:
+ myzip.writestr("data", ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)))
+
+ with zipfile.ZipFile(path, "r") as myzip:
+ with myzip.open("data", "r") as source:
+ values = list(decoder.StreamingDecoder(source))
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)]
+ finally:
+ os.remove(path)
+
+ def testZipfileMany(self):
+ _, path = tempfile.mkstemp(suffix=".zip")
+ try:
+ with zipfile.ZipFile(path, "w") as myzip:
+ #for i in range(100):
+ myzip.writestr("data", ints2octs((2, 1, 12, 35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0)) * 1000)
+
+ with zipfile.ZipFile(path, "r") as myzip:
+ with myzip.open("data", "r") as source:
+ values = list(decoder.StreamingDecoder(source))
+ assert values == [12, (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)] * 1000
+ finally:
+ os.remove(path)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/ber/test_encoder.py b/contrib/python/pyasn1/py3/tests/codec/ber/test_encoder.py
new file mode 100644
index 0000000000..7701348d06
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/ber/test_encoder.py
@@ -0,0 +1,1497 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import char
+from pyasn1.codec.ber import encoder
+from pyasn1.compat.octets import ints2octs
+from pyasn1.error import PyAsn1Error
+
+
+class LargeTagEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.o = univ.Integer().subtype(
+ value=1, explicitTag=tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0xdeadbeaf)
+ )
+
+ def testEncoder(self):
+ assert encoder.encode(self.o) == ints2octs((127, 141, 245, 182, 253, 47, 3, 2, 1, 1))
+
+
+class IntegerEncoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert encoder.encode(univ.Integer(12)) == ints2octs((2, 1, 12))
+
+ def testNegInt(self):
+ assert encoder.encode(univ.Integer(-12)) == ints2octs((2, 1, 244))
+
+ def testZero(self):
+ assert encoder.encode(univ.Integer(0)) == ints2octs((2, 1, 0))
+
+ def testCompactZero(self):
+ encoder.IntegerEncoder.supportCompactZero = True
+ substrate = encoder.encode(univ.Integer(0))
+ encoder.IntegerEncoder.supportCompactZero = False
+ assert substrate == ints2octs((2, 0))
+
+ def testMinusOne(self):
+ assert encoder.encode(univ.Integer(-1)) == ints2octs((2, 1, 255))
+
+ def testPosLong(self):
+ assert encoder.encode(
+ univ.Integer(0xffffffffffffffff)
+ ) == ints2octs((2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255))
+
+ def testNegLong(self):
+ assert encoder.encode(
+ univ.Integer(-0xffffffffffffffff)
+ ) == ints2octs((2, 9, 255, 0, 0, 0, 0, 0, 0, 0, 1))
+
+
+class IntegerEncoderWithSchemaTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert encoder.encode(12, asn1Spec=univ.Integer()) == ints2octs((2, 1, 12))
+
+ def testNegInt(self):
+ assert encoder.encode(-12, asn1Spec=univ.Integer()) == ints2octs((2, 1, 244))
+
+ def testZero(self):
+ assert encoder.encode(0, asn1Spec=univ.Integer()) == ints2octs((2, 1, 0))
+
+ def testPosLong(self):
+ assert encoder.encode(
+ 0xffffffffffffffff, asn1Spec=univ.Integer()
+ ) == ints2octs((2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255))
+
+
+class BooleanEncoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(univ.Boolean(1)) == ints2octs((1, 1, 1))
+
+ def testFalse(self):
+ assert encoder.encode(univ.Boolean(0)) == ints2octs((1, 1, 0))
+
+
+class BooleanEncoderWithSchemaTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(True, asn1Spec=univ.Boolean()) == ints2octs((1, 1, 1))
+
+ def testFalse(self):
+ assert encoder.encode(False, asn1Spec=univ.Boolean()) == ints2octs((1, 1, 0))
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.b = univ.BitString((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1))
+
+ def testDefMode(self):
+ assert encoder.encode(self.b) == ints2octs((3, 3, 1, 169, 138))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.b, defMode=False
+ ) == ints2octs((3, 3, 1, 169, 138))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.b, maxChunkSize=1
+ ) == ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.b, defMode=False, maxChunkSize=1
+ ) == ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+
+ def testEmptyValue(self):
+ assert encoder.encode(univ.BitString([])) == ints2octs((3, 1, 0))
+
+
+class BitStringEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.b = (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1)
+ self.s = univ.BitString()
+
+ def testDefMode(self):
+ assert encoder.encode(self.b, asn1Spec=self.s) == ints2octs((3, 3, 1, 169, 138))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.b, asn1Spec=self.s, defMode=False
+ ) == ints2octs((3, 3, 1, 169, 138))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.b, asn1Spec=self.s, maxChunkSize=1
+ ) == ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.b, asn1Spec=self.s, defMode=False, maxChunkSize=1
+ ) == ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+
+ def testEmptyValue(self):
+ assert encoder.encode([], asn1Spec=self.s) == ints2octs((3, 1, 0))
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString('Quick brown fox')
+
+ def testDefMode(self):
+ assert encoder.encode(self.o) == ints2octs(
+ (4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.o, defMode=False
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.o, maxChunkSize=4
+ ) == ints2octs((36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119,
+ 110, 32, 4, 3, 102, 111, 120))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.o, defMode=False, maxChunkSize=4
+ ) == ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110,
+ 32, 4, 3, 102, 111, 120, 0, 0))
+
+
+class OctetStringEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.OctetString()
+ self.o = 'Quick brown fox'
+
+ def testDefMode(self):
+ assert encoder.encode(self.o, asn1Spec=self.s) == ints2octs(
+ (4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.o, asn1Spec=self.s, defMode=False
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.o, asn1Spec=self.s, maxChunkSize=4
+ ) == ints2octs((36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119,
+ 110, 32, 4, 3, 102, 111, 120))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.o, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110,
+ 32, 4, 3, 102, 111, 120, 0, 0))
+
+
+class ExpTaggedOctetStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString().subtype(
+ value='Quick brown fox',
+ explicitTag=tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 5)
+ )
+
+ def testDefMode(self):
+ assert encoder.encode(self.o) == ints2octs(
+ (101, 17, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.o, defMode=False
+ ) == ints2octs((101, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.o, defMode=True, maxChunkSize=4
+ ) == ints2octs((101, 25, 36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3,
+ 102, 111, 120))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.o, defMode=False, maxChunkSize=4
+ ) == ints2octs((101, 128, 36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0, 0, 0))
+
+
+class NullEncoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert encoder.encode(univ.Null('')) == ints2octs((5, 0))
+
+
+class NullEncoderWithSchemaTestCase(BaseTestCase):
+ def testNull(self):
+ assert encoder.encode(None, univ.Null()) == ints2octs((5, 0))
+
+
+class ObjectIdentifierEncoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((1, 3, 6, 0, 0xffffe))
+ ) == ints2octs((6, 6, 43, 6, 0, 191, 255, 126))
+
+ def testEdge1(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((0, 39))
+ ) == ints2octs((6, 1, 39))
+
+ def testEdge2(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((1, 39))
+ ) == ints2octs((6, 1, 79))
+
+ def testEdge3(self):
+ # 01111111
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 40))
+ ) == ints2octs((6, 1, 120))
+
+ def testEdge4(self):
+ # 10010000|10000000|10000000|10000000|01001111
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 0xffffffff))
+ ) == ints2octs((6, 5, 0x90, 0x80, 0x80, 0x80, 0x4F))
+
+ def testEdge5(self):
+ # 01111111
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 47))
+ ) == ints2octs((6, 1, 0x7F))
+
+ def testEdge6(self):
+ # 10000001|00000000
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 48))
+ ) == ints2octs((6, 2, 0x81, 0x00))
+
+ def testEdge7(self):
+ # 10000001|00110100|00000003
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 100, 3))
+ ) == ints2octs((6, 3, 0x81, 0x34, 0x03))
+
+ def testEdge8(self):
+ # 10000101|00000000
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 560))
+ ) == ints2octs((6, 2, 133, 0))
+
+ def testEdge9(self):
+ # 10001000|10000100|10000111|0000010
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 16843570))
+ ) == ints2octs((6, 4, 0x88, 0x84, 0x87, 0x02))
+
+ def testEdgeA(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 5))
+ ) == ints2octs((6, 1, 85))
+
+ def testImpossible1(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((3, 1, 2)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'impossible leading arc tolerated'
+
+ def testImpossible2(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((0,)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'single arc OID tolerated'
+
+ def testImpossible3(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((0, 40)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'second arc overflow tolerated'
+
+ def testImpossible4(self):
+ try:
+ encoder.encode(univ.ObjectIdentifier((1, 40)))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'second arc overflow tolerated'
+
+ def testLarge1(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 18446744073709551535184467440737095))
+ ) == ints2octs((0x06, 0x11, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6, 0xB8, 0xCB,
+ 0xE2, 0xB7, 0x17))
+
+ def testLarge2(self):
+ assert encoder.encode(
+ univ.ObjectIdentifier((2, 999, 18446744073709551535184467440737095))
+ ) == ints2octs((0x06, 0x13, 0x88, 0x37, 0x83, 0xC6, 0xDF, 0xD4, 0xCC, 0xB3, 0xFF, 0xFF, 0xFE, 0xF0, 0xB8, 0xD6,
+ 0xB8, 0xCB, 0xE2, 0xB6, 0x47))
+
+
+class ObjectIdentifierWithSchemaEncoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert encoder.encode(
+ (1, 3, 6, 0, 0xffffe), asn1Spec=univ.ObjectIdentifier()
+ ) == ints2octs((6, 6, 43, 6, 0, 191, 255, 126))
+
+
+class RealEncoderTestCase(BaseTestCase):
+ def testChar(self):
+ assert encoder.encode(
+ univ.Real((123, 10, 11))
+ ) == ints2octs((9, 7, 3, 49, 50, 51, 69, 49, 49))
+
+ def testBin1(self):
+ assert encoder.encode( # default binEncBase = 2
+ univ.Real((0.5, 2, 0)) # check encbase = 2 and exponent = -1
+ ) == ints2octs((9, 3, 128, 255, 1))
+
+ def testBin2(self):
+ r = univ.Real((3.25, 2, 0))
+ r.binEncBase = 8 # change binEncBase only for this instance of Real
+ assert encoder.encode(
+ r # check encbase = 8
+ ) == ints2octs((9, 3, 148, 255, 13))
+
+ def testBin3(self):
+ # change binEncBase in the RealEncoder instance => for all further Real
+ binEncBase, encoder.TYPE_MAP[univ.Real.typeId].binEncBase = encoder.TYPE_MAP[univ.Real.typeId].binEncBase, 16
+ assert encoder.encode(
+ univ.Real((0.00390625, 2, 0)) # check encbase = 16
+ ) == ints2octs((9, 3, 160, 254, 1))
+ encoder.TYPE_MAP[univ.Real.typeId].binEncBase = binEncBase
+
+ def testBin4(self):
+ # choose binEncBase automatically for all further Real (testBin[4-7])
+ binEncBase, encoder.TYPE_MAP[univ.Real.typeId].binEncBase = encoder.TYPE_MAP[univ.Real.typeId].binEncBase, None
+ assert encoder.encode(
+ univ.Real((1, 2, 0)) # check exponent = 0
+ ) == ints2octs((9, 3, 128, 0, 1))
+ encoder.TYPE_MAP[univ.Real.typeId].binEncBase = binEncBase
+
+ def testBin5(self):
+ assert encoder.encode(
+ univ.Real((3, 2, -1020)) # case of 2 octs for exponent and
+ # negative exponent and abs(exponent) is
+ # all 1's and fills the whole octet(s)
+ ) == ints2octs((9, 4, 129, 252, 4, 3))
+
+ def testBin6(self):
+ assert encoder.encode(
+ univ.Real((1, 2, 262140)) # case of 3 octs for exponent and
+ # check that first 9 bits for exponent
+ # are not all 1's
+ ) == ints2octs((9, 5, 130, 3, 255, 252, 1))
+
+ def testBin7(self):
+ assert encoder.encode(
+ univ.Real((-1, 2, 76354972)) # case of >3 octs for exponent and
+ # mantissa < 0
+ ) == ints2octs((9, 7, 195, 4, 4, 141, 21, 156, 1))
+
+ def testPlusInf(self):
+ assert encoder.encode(univ.Real('inf')) == ints2octs((9, 1, 64))
+
+ def testMinusInf(self):
+ assert encoder.encode(univ.Real('-inf')) == ints2octs((9, 1, 65))
+
+ def testZero(self):
+ assert encoder.encode(univ.Real(0)) == ints2octs((9, 0))
+
+
+class RealEncoderWithSchemaTestCase(BaseTestCase):
+ def testChar(self):
+ assert encoder.encode(
+ (123, 10, 11), asn1Spec=univ.Real()
+ ) == ints2octs((9, 7, 3, 49, 50, 51, 69, 49, 49))
+
+
+class UniversalStringEncoderTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(char.UniversalString(sys.version_info[0] >= 3 and 'abc' or unicode('abc'))) == ints2octs(
+ (28, 12, 0, 0, 0, 97, 0, 0, 0, 98, 0, 0, 0, 99)), 'Incorrect encoding'
+
+
+class UniversalStringEncoderWithSchemaTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(
+ sys.version_info[0] >= 3 and 'abc' or unicode('abc'), asn1Spec=char.UniversalString()
+ ) == ints2octs((28, 12, 0, 0, 0, 97, 0, 0, 0, 98, 0, 0, 0, 99)), 'Incorrect encoding'
+
+
+class BMPStringEncoderTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(char.BMPString(sys.version_info[0] >= 3 and 'abc' or unicode('abc'))) == ints2octs(
+ (30, 6, 0, 97, 0, 98, 0, 99)), 'Incorrect encoding'
+
+
+class BMPStringEncoderWithSchemaTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(
+ sys.version_info[0] >= 3 and 'abc' or unicode('abc'), asn1Spec=char.BMPString()
+ ) == ints2octs((30, 6, 0, 97, 0, 98, 0, 99)), 'Incorrect encoding'
+
+
+class UTF8StringEncoderTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(char.UTF8String(sys.version_info[0] >= 3 and 'abc' or unicode('abc'))) == ints2octs(
+ (12, 3, 97, 98, 99)), 'Incorrect encoding'
+
+
+class UTF8StringEncoderWithSchemaTestCase(BaseTestCase):
+ def testEncoding(self):
+ assert encoder.encode(
+ sys.version_info[0] >= 3 and 'abc' or unicode('abc'), asn1Spec=char.UTF8String()
+ ) == ints2octs((12, 3, 97, 98, 99)), 'Incorrect encoding'
+
+
+class SequenceOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SequenceOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+ def testDefMode(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(s) == ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False
+ ) == ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ s = univ.SequenceOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SequenceOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+ self.v = ['quick brown']
+
+ def testEmpty(self):
+ assert encoder.encode([], asn1Spec=self.s) == ints2octs((48, 0))
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SequenceOfEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, 'quick brown')
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((48, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SetOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SetOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((49, 0))
+
+ def testDefMode(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(s) == ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ s = univ.SetOf()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SetOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+ self.v = ['quick brown']
+
+ def testEmpty(self):
+ s = univ.SetOf()
+ assert encoder.encode([], asn1Spec=self.s) == ints2octs((49, 0))
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs(
+ (49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SetOfEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, 'quick brown')
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((49, 13, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 19, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+
+class SequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+ self.v = {
+ 'place-holder': None,
+ 'first-name': 'quick brown',
+ 'age': 1
+ }
+
+ def testEmpty(self):
+ try:
+ assert encoder.encode({}, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'empty bare sequence tolerated'
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 5, 2, 1, 1, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.OctetString('quick brown')
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 14, 2, 1, 2, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any()), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 7, 2, 1, 1, 49, 2, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.OctetString('quick brown'))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 16, 2, 1, 2, 49, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((48, 2, 5, 0))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 2, 5, 0))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 0, 0))
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(self.s) == ints2octs(
+ (48, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs(
+ (48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(self.s) == ints2octs((48, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((48, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(self.s) == ints2octs(
+ (48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs(
+ (48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0,
+ 0, 2, 1, 1, 0, 0))
+
+
+class ExpTaggedSequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', univ.Integer()),
+ )
+ )
+
+ s = s.subtype(
+ explicitTag=tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5)
+ )
+
+ s[0] = 12
+
+ self.s = s
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((101, 5, 48, 3, 2, 1, 12))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((101, 128, 48, 128, 2, 1, 12, 0, 0, 0, 0))
+
+
+class ExpTaggedSequenceComponentEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', univ.Boolean().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ )
+ )
+
+ self.s[0] = True
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((48, 5, 160, 3, 1, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((48, 128, 160, 3, 1, 1, 1, 0, 0, 0, 0))
+
+
+class SetEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ assert encoder.encode(self.s) == ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SetEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+ self.v = {
+ 'place-holder': None,
+ 'first-name': 'quick brown',
+ 'age': 1
+ }
+
+ def testEmpty(self):
+ try:
+ assert encoder.encode({}, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'empty bare SET tolerated'
+
+ def testDefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testIndefMode(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testDefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testIndefModeChunked(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class SetEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, '')
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testDefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((49, 2, 5, 0))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 0, 0))
+
+ def testDefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 2, 5, 0))
+
+ def testIndefModeChunked(self):
+ self.__init()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 0, 0))
+
+ def testWithOptionalDefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(self.s) == ints2octs(
+ (49, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testWithOptionalDefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testWithOptionalIndefModeChunked(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs(
+ (49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
+
+ def testWithDefaultedDefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(self.s) == ints2octs((49, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithDefaultedDefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs((49, 5, 5, 0, 2, 1, 1))
+
+ def testWithDefaultedIndefModeChunked(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(self.s) == ints2octs(
+ (49, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False
+ ) == ints2octs((49, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedDefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=True, maxChunkSize=4
+ ) == ints2octs(
+ (49, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
+
+ def testWithOptionalAndDefaultedIndefModeChunked(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s, defMode=False, maxChunkSize=4
+ ) == ints2octs((49, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
+
+
+class ChoiceEncoderTestCase(BaseTestCase):
+
+ def testEmpty(self):
+ s = univ.Choice()
+ try:
+ encoder.encode(s)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'encoded unset choice'
+
+ def testDefModeOptionOne(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(s) == ints2octs((5, 0))
+
+ def testDefModeOptionTwo(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(s) == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testIndefMode(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False
+ ) == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+ def testDefModeChunked(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=True, maxChunkSize=4
+ ) == ints2octs((36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
+
+ def testIndefModeChunked(self):
+ s = univ.Choice()
+ s.setComponentByPosition(0, univ.OctetString('quick brown'))
+ assert encoder.encode(
+ s, defMode=False, maxChunkSize=4
+ ) == ints2octs((36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0))
+
+
+class ChoiceEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+ self.v = {
+ 'place-holder': None
+ }
+
+ def testFilled(self):
+ assert encoder.encode(
+ self.v, asn1Spec=self.s
+ ) == ints2octs((5, 0))
+
+
+class ChoiceEncoderWithComponentsSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+
+ def testEmpty(self):
+ try:
+ encoder.encode(self.s)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'encoded unset choice'
+
+ def testFilled(self):
+ self.s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(self.s) == ints2octs((5, 0))
+
+ def testTagged(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
+ )
+ s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(s) == ints2octs((164, 2, 5, 0))
+
+ def testUndefLength(self):
+ self.s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
+ assert encoder.encode(self.s, defMode=False, maxChunkSize=3) == ints2octs(
+ (36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0))
+
+ def testTaggedUndefLength(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
+ )
+ s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
+ assert encoder.encode(s, defMode=False, maxChunkSize=3) == ints2octs(
+ (164, 128, 36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0, 0, 0))
+
+
+class AnyEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any(encoder.encode(univ.OctetString('fox')))
+
+ def testUntagged(self):
+ assert encoder.encode(self.s) == ints2octs((4, 3, 102, 111, 120))
+
+ def testTaggedEx(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(s) == ints2octs((164, 5, 4, 3, 102, 111, 120))
+
+ def testTaggedIm(self):
+ s = self.s.subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(s) == ints2octs((132, 5, 4, 3, 102, 111, 120))
+
+
+class AnyEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any()
+ self.v = encoder.encode(univ.OctetString('fox'))
+
+ def testUntagged(self):
+ assert encoder.encode(self.v, asn1Spec=self.s) == ints2octs((4, 3, 102, 111, 120))
+
+ def testTaggedEx(self):
+ s = self.s.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(self.v, asn1Spec=s) == ints2octs((164, 5, 4, 3, 102, 111, 120))
+
+ def testTaggedIm(self):
+ s = self.s.subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
+ )
+ assert encoder.encode(self.v, asn1Spec=s) == ints2octs((132, 5, 4, 3, 102, 111, 120))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/cer/__init__.py b/contrib/python/pyasn1/py3/tests/codec/cer/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/cer/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/codec/cer/__main__.py b/contrib/python/pyasn1/py3/tests/codec/cer/__main__.py
new file mode 100644
index 0000000000..122d7275b3
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/cer/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.cer.test_encoder.suite',
+ 'tests.codec.cer.test_decoder.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/cer/test_decoder.py b/contrib/python/pyasn1/py3/tests/codec/cer/test_decoder.py
new file mode 100644
index 0000000000..fddd36bb57
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/cer/test_decoder.py
@@ -0,0 +1,370 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.codec.cer import decoder
+from pyasn1.compat.octets import ints2octs, str2octs, null
+from pyasn1.error import PyAsn1Error
+
+
+class BooleanDecoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert decoder.decode(ints2octs((1, 1, 255))) == (1, null)
+
+ def testFalse(self):
+ assert decoder.decode(ints2octs((1, 1, 0))) == (0, null)
+
+ def testEmpty(self):
+ try:
+ decoder.decode(ints2octs((1, 0)))
+ except PyAsn1Error:
+ pass
+
+ def testOverflow(self):
+ try:
+ decoder.decode(ints2octs((1, 2, 0, 0)))
+ except PyAsn1Error:
+ pass
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ ints2octs((3, 3, 6, 170, 128))
+ ) == (((1, 0) * 5), null)
+
+ def testLongMode(self):
+ assert decoder.decode(
+ ints2octs((3, 127, 6) + (170,) * 125 + (128,))
+ ) == (((1, 0) * 501), null)
+
+ # TODO: test failures on short chunked and long unchunked substrate samples
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)),
+ ) == (str2octs('Quick brown fox'), null)
+
+ def testLongMode(self):
+ assert decoder.decode(
+ ints2octs((36, 128, 4, 130, 3, 232) + (81,) * 1000 + (4, 1, 81, 0, 0))
+ ) == (str2octs('Q' * 1001), null)
+
+ # TODO: test failures on short chunked and long unchunked substrate samples
+
+
+class SequenceDecoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 2, 1, 12, 0, 0)),
+ asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98,
+ 114, 111, 119, 110, 0, 0)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 128, 6, 1, 1, 2, 1, 12, 0, 0)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 6, 1, 12, 0, 0)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='06010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 2, 1, 12, 0, 0)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98,
+ 114, 111, 119, 110, 0, 0)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 163, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(componentType=univ.Any()),
+ openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 49, 128, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 128, 6, 1, 1, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 3, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 1, 49, 128, 2, 1, 12, 0, 0, 0, 0)),
+ asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 128, 2, 1, 2, 49, 128, 4, 11, 113, 117, 105, 99, 107, 32,
+ 98, 114, 111, 119, 110, 0, 0, 0, 0)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs( (48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/cer/test_encoder.py b/contrib/python/pyasn1/py3/tests/codec/cer/test_encoder.py
new file mode 100644
index 0000000000..680f720c3f
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/cer/test_encoder.py
@@ -0,0 +1,956 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.type import useful
+from pyasn1.codec.cer import encoder
+from pyasn1.compat.octets import ints2octs
+from pyasn1.error import PyAsn1Error
+
+
+class BooleanEncoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(univ.Boolean(1)) == ints2octs((1, 1, 255))
+
+ def testFalse(self):
+ assert encoder.encode(univ.Boolean(0)) == ints2octs((1, 1, 0))
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert encoder.encode(
+ univ.BitString((1, 0) * 5)
+ ) == ints2octs((3, 3, 6, 170, 128))
+
+ def testLongMode(self):
+ assert encoder.encode(univ.BitString((1, 0) * 501)) == ints2octs((3, 127, 6) + (170,) * 125 + (128,))
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert encoder.encode(
+ univ.OctetString('Quick brown fox')
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testLongMode(self):
+ assert encoder.encode(
+ univ.OctetString('Q' * 1001)
+ ) == ints2octs((36, 128, 4, 130, 3, 232) + (81,) * 1000 + (4, 1, 81, 0, 0))
+
+
+class GeneralizedTimeEncoderTestCase(BaseTestCase):
+ # def testExtraZeroInSeconds(self):
+ # try:
+ # assert encoder.encode(
+ # useful.GeneralizedTime('20150501120112.10Z')
+ # )
+ # except PyAsn1Error:
+ # pass
+ # else:
+ # assert 0, 'Meaningless trailing zero in fraction part tolerated'
+
+ def testLocalTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.GeneralizedTime('20150501120112.1+0200')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Local timezone tolerated'
+
+ def testMissingTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.GeneralizedTime('20150501120112.1')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Missing timezone tolerated'
+
+ def testDecimalCommaPoint(self):
+ try:
+ assert encoder.encode(
+ useful.GeneralizedTime('20150501120112,1Z')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Decimal comma tolerated'
+
+ def testWithSubseconds(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.59Z')
+ ) == ints2octs((24, 18, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 46, 53, 57, 90))
+
+ def testWithSubsecondsWithZeros(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.099Z')
+ ) == ints2octs((24, 18, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 46, 57, 57, 90))
+
+ def testWithSubsecondsMax(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.999Z')
+ ) == ints2octs((24, 19, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 46, 57, 57, 57, 90))
+
+ def testWithSubsecondsMin(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.000Z')
+ ) == ints2octs((24, 15, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithSubsecondsDanglingDot(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112.Z')
+ ) == ints2octs((24, 15, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithSeconds(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('20170801120112Z')
+ ) == ints2octs((24, 15, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithMinutes(self):
+ assert encoder.encode(
+ useful.GeneralizedTime('201708011201Z')
+ ) == ints2octs((24, 13, 50, 48, 49, 55, 48, 56, 48, 49, 49, 50, 48, 49, 90))
+
+
+class UTCTimeEncoderTestCase(BaseTestCase):
+ def testFractionOfSecond(self):
+ try:
+ assert encoder.encode(
+ useful.UTCTime('150501120112.10Z')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Decimal point tolerated'
+
+ def testMissingTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.UTCTime('150501120112')
+ ) == ints2octs((23, 13, 49, 53, 48, 53, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Missing timezone tolerated'
+
+ def testLocalTimezone(self):
+ try:
+ assert encoder.encode(
+ useful.UTCTime('150501120112+0200')
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Local timezone tolerated'
+
+ def testWithSeconds(self):
+ assert encoder.encode(
+ useful.UTCTime('990801120112Z')
+ ) == ints2octs((23, 13, 57, 57, 48, 56, 48, 49, 49, 50, 48, 49, 49, 50, 90))
+
+ def testWithMinutes(self):
+ assert encoder.encode(
+ useful.UTCTime('9908011201Z')
+ ) == ints2octs((23, 11, 57, 57, 48, 56, 48, 49, 49, 50, 48, 49, 90))
+
+
+class SequenceOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SequenceOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+ def testDefMode1(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('ab'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testDefMode2(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('ab'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 2, 97, 98, 4, 1, 97, 0, 0))
+
+ def testDefMode3(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('b'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 1, 98, 4, 1, 97, 0, 0))
+
+ def testDefMode4(self):
+ s = univ.SequenceOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('b'))
+ assert encoder.encode(s) == ints2octs((48, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SequenceOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SequenceOf(componentType=univ.OctetString())
+
+ def testEmpty(self):
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((48, 128, 0, 0))
+
+ def testIndefMode1(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('ab')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testIndefMode2(self):
+ self.s.clear()
+ self.s.append('ab')
+ self.s.append('a')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 2, 97, 98, 4, 1, 97, 0, 0))
+
+ def testIndefMode3(self):
+ self.s.clear()
+ self.s.append('b')
+ self.s.append('a')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 1, 98, 4, 1, 97, 0, 0))
+
+ def testIndefMode4(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('b')
+ assert encoder.encode(self.s) == ints2octs((48, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SetOfEncoderTestCase(BaseTestCase):
+ def testEmpty(self):
+ s = univ.SetOf()
+ s.clear()
+ assert encoder.encode(s) == ints2octs((49, 128, 0, 0))
+
+ def testDefMode1(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('ab'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testDefMode2(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('ab'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testDefMode3(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('b'))
+ s.append(univ.OctetString('a'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+ def testDefMode4(self):
+ s = univ.SetOf()
+ s.append(univ.OctetString('a'))
+ s.append(univ.OctetString('b'))
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SetOfEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.SetOf(componentType=univ.OctetString())
+
+ def testEmpty(self):
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((49, 128, 0, 0))
+
+ def testIndefMode1(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('ab')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testIndefMode2(self):
+ self.s.clear()
+ self.s.append('ab')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 2, 97, 98, 0, 0))
+
+ def testIndefMode3(self):
+ self.s.clear()
+ self.s.append('b')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+ def testIndefMode4(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('b')
+
+ assert encoder.encode(self.s) == ints2octs((49, 128, 4, 1, 97, 4, 1, 98, 0, 0))
+
+
+class SetEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ assert encoder.encode(self.s) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+
+class SetEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Set(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33))
+ ))
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((49, 128, 5, 0, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 5, 0, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((49, 128, 2, 1, 1, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 5, 0, 0, 0))
+
+
+class SetEncoderWithChoiceWithSchemaEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ c = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('actual', univ.Boolean(0))
+ ))
+ self.s = univ.Set(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('status', c)
+ ))
+
+ def testIndefMode(self):
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByName('status')
+ self.s.getComponentByName('status').setComponentByPosition(0, 1)
+ assert encoder.encode(self.s) == ints2octs((49, 128, 1, 1, 255, 5, 0, 0, 0))
+
+
+class SetEncoderWithTaggedChoiceEncoderTestCase(BaseTestCase):
+
+ def testWithUntaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ )
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 128, 1, 1, 255, 4, 1, 65, 0, 0))
+
+ def testWithTaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ ).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 128, 4, 1, 65, 167, 128, 1, 1, 255, 0, 0, 0, 0))
+
+
+class SequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ assert encoder.encode(self.s) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithSchemaTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33))
+ )
+ )
+
+ def __init(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+
+ def __initWithOptional(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(1, 'quick brown')
+
+ def __initWithDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0)
+ self.s.setComponentByPosition(2, 1)
+
+ def __initWithOptionalAndDefaulted(self):
+ self.s.clear()
+ self.s.setComponentByPosition(0, univ.Null(''))
+ self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
+ self.s.setComponentByPosition(2, univ.Integer(1))
+
+ def testIndefMode(self):
+ self.__init()
+ assert encoder.encode(self.s) == ints2octs((48, 128, 5, 0, 0, 0))
+
+ def testWithOptionalIndefMode(self):
+ self.__initWithOptional()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
+
+ def testWithDefaultedIndefMode(self):
+ self.__initWithDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
+
+ def testWithOptionalAndDefaultedIndefMode(self):
+ self.__initWithOptionalAndDefaulted()
+ assert encoder.encode(
+ self.s
+ ) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
+
+
+class SequenceEncoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 50, 0, 0)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.OctetString('quick brown')
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 2, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110, 0, 0)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 163, 128, 163, 128, 49, 50, 0, 0, 0, 0, 0, 0)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 163, 128, 163, 128, 49, 50, 0, 0, 0, 0, 0, 0)
+ )
+
+
+class SequenceEncoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any()), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 128, 49, 50, 0, 0, 0, 0)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.OctetString('quick brown'))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 2, 49, 128, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110, 0, 0, 0, 0)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 128, 163, 128, 163, 128, 49, 50, 0, 0,
+ 0, 0, 0, 0, 0, 0)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 128, 2, 1, 1, 49, 128, 163, 128, 163, 128, 49, 50, 0, 0,
+ 0, 0, 0, 0, 0, 0)
+ )
+
+
+class NestedOptionalSequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ inner = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ outerWithOptional = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', inner),
+ )
+ )
+
+ outerWithDefault = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('inner', inner),
+ )
+ )
+
+ self.s1 = outerWithOptional
+ self.s2 = outerWithDefault
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithDefault(self):
+ self.s1.clear()
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ return self.s1
+
+ def __initOptional(self):
+ self.s1.clear()
+ return self.s1
+
+ def __initDefaultWithDefaultAndOptional(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ self.s2[0][1] = 123
+ return self.s2
+
+ def __initDefaultWithDefault(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ return self.s2
+
+ def __initDefaultWithOptional(self):
+ self.s2.clear()
+ self.s2[0][1] = 123
+ return self.s2
+
+ def testOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+ def testDefaultWithDefaultAndOptional(self):
+ s = self.__initDefaultWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 2, 1, 123, 0, 0, 0, 0))
+
+ def testDefaultWithDefault(self):
+ s = self.__initDefaultWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testDefaultWithOptional(self):
+ s = self.__initDefaultWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 2, 1, 123, 0, 0, 0, 0))
+
+
+class NestedOptionalChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ layer3 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ layer2 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('inner', layer3),
+ namedtype.NamedType('first-name', univ.OctetString())
+ )
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithDefault(self):
+ self.s.clear()
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 2, 1, 123, 0, 0, 0, 0))
+
+ def testOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+
+class NestedOptionalSequenceOfEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ layer2 = univ.SequenceOf(
+ componentType=univ.OctetString()
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithValue(self):
+ self.s.clear()
+ self.s[0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testOptionalWithValue(self):
+ s = self.__initOptionalWithValue()
+ assert encoder.encode(s) == ints2octs((48, 128, 48, 128, 4, 4, 116, 101, 115, 116, 0, 0, 0, 0))
+
+ def testOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 128, 0, 0))
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/der/__init__.py b/contrib/python/pyasn1/py3/tests/codec/der/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/der/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/codec/der/__main__.py b/contrib/python/pyasn1/py3/tests/codec/der/__main__.py
new file mode 100644
index 0000000000..23560098fd
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/der/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.der.test_encoder.suite',
+ 'tests.codec.der.test_decoder.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/der/test_decoder.py b/contrib/python/pyasn1/py3/tests/codec/der/test_decoder.py
new file mode 100644
index 0000000000..5f61408317
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/der/test_decoder.py
@@ -0,0 +1,368 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.codec.der import decoder
+from pyasn1.compat.octets import ints2octs, null
+from pyasn1.error import PyAsn1Error
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ ints2octs((3, 127, 6) + (170,) * 125 + (128,))
+ ) == (((1, 0) * 501), null)
+
+ def testIndefMode(self):
+ try:
+ decoder.decode(
+ ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'indefinite length encoding tolerated'
+
+ def testDefModeChunked(self):
+ try:
+ assert decoder.decode(
+ ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'chunked encoding tolerated'
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testShortMode(self):
+ assert decoder.decode(
+ '\004\017Quick brown fox'.encode()
+ ) == ('Quick brown fox'.encode(), ''.encode())
+
+ def testIndefMode(self):
+ try:
+ decoder.decode(
+ ints2octs((36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'indefinite length encoding tolerated'
+
+ def testChunkedMode(self):
+ try:
+ decoder.decode(
+ ints2octs((36, 23, 4, 2, 81, 117, 4, 2, 105, 99, 4, 2, 107, 32, 4, 2, 98, 114, 4, 2, 111, 119, 4, 1, 110))
+ )
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'chunked encoding tolerated'
+
+
+class SequenceDecoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 3, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='060127')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 1, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 16, 2, 1, 2, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 131, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 163, 3, 2, 1, 12)), asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithUnaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(componentType=univ.Any()),
+ openType=openType)
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == univ.OctetString('quick brown')
+
+ def testDecodeOpenTypesUnknownType(self):
+ try:
+ s, r = decoder.decode(
+ ints2octs((48, 6, 2, 1, 2, 6, 1, 39)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'unknown open type tolerated'
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 3, 49, 3, 2, 1, 12)), asn1Spec=self.s,
+ decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010c')
+
+ def testDontDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 8, 2, 1, 1, 49, 3, 2, 1, 12)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == ints2octs((2, 1, 12))
+
+ def testDontDecodeOpenTypesChoiceTwo(self):
+ s, r = decoder.decode(
+ ints2octs((48, 18, 2, 1, 2, 49, 13, 4, 11, 113, 117, 105, 99,
+ 107, 32, 98, 114, 111, 119, 110)), asn1Spec=self.s
+ )
+ assert not r
+ assert s[0] == 2
+ assert s[1][0] == ints2octs((4, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110))
+
+
+class SequenceDecoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+class SequenceDecoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType(
+ 'blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType
+ )
+ )
+ )
+
+ def testDecodeOpenTypesChoiceOne(self):
+ s, r = decoder.decode(
+ ints2octs((48, 10, 2, 1, 1, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 1
+ assert s[1][0] == 12
+
+ def testDecodeOpenTypesUnknownId(self):
+ s, r = decoder.decode(
+ ints2octs( (48, 10, 2, 1, 3, 49, 5, 131, 3, 2, 1, 12)),
+ asn1Spec=self.s, decodeOpenTypes=True
+ )
+ assert not r
+ assert s[0] == 3
+ assert s[1][0] == univ.OctetString(hexValue='02010C')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/der/test_encoder.py b/contrib/python/pyasn1/py3/tests/codec/der/test_encoder.py
new file mode 100644
index 0000000000..6500396115
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/der/test_encoder.py
@@ -0,0 +1,665 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import univ
+from pyasn1.codec.der import encoder
+from pyasn1.compat.octets import ints2octs
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def testDefModeShort(self):
+ assert encoder.encode(
+ univ.OctetString('Quick brown fox')
+ ) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
+
+ def testDefModeLong(self):
+ assert encoder.encode(
+ univ.OctetString('Q' * 10000)
+ ) == ints2octs((4, 130, 39, 16) + (81,) * 10000)
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def testDefModeShort(self):
+ assert encoder.encode(
+ univ.BitString((1,))
+ ) == ints2octs((3, 2, 7, 128))
+
+ def testDefModeLong(self):
+ assert encoder.encode(
+ univ.BitString((1,) * 80000)
+ ) == ints2octs((3, 130, 39, 17, 0) + (255,) * 10000)
+
+
+class SetOfEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.SetOf(componentType=univ.OctetString())
+
+ def testDefMode1(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('ab')
+
+ assert encoder.encode(self.s) == ints2octs((49, 7, 4, 1, 97, 4, 2, 97, 98))
+
+ def testDefMode2(self):
+ self.s.clear()
+ self.s.append('ab')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 7, 4, 1, 97, 4, 2, 97, 98))
+
+ def testDefMode3(self):
+ self.s.clear()
+ self.s.append('b')
+ self.s.append('a')
+
+ assert encoder.encode(self.s) == ints2octs((49, 6, 4, 1, 97, 4, 1, 98))
+
+ def testDefMode4(self):
+ self.s.clear()
+ self.s.append('a')
+ self.s.append('b')
+
+ assert encoder.encode(self.s) == ints2octs((49, 6, 4, 1, 97, 4, 1, 98))
+
+
+class SetWithAlternatingChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ c = univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('amount', univ.Boolean()))
+ )
+
+ self.s = univ.Set(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('value', univ.Integer(5)),
+ namedtype.NamedType('status', c))
+ )
+
+ def testComponentsOrdering1(self):
+ self.s.setComponentByName('status')
+ self.s.getComponentByName('status').setComponentByPosition(0, 'A')
+ assert encoder.encode(self.s) == ints2octs((49, 6, 2, 1, 5, 4, 1, 65))
+
+ def testComponentsOrdering2(self):
+ self.s.setComponentByName('status')
+ self.s.getComponentByName('status').setComponentByPosition(1, True)
+ assert encoder.encode(self.s) == ints2octs((49, 6, 1, 1, 255, 2, 1, 5))
+
+
+class SetWithTaggedChoiceEncoderTestCase(BaseTestCase):
+
+ def testWithUntaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ )
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 6, 1, 1, 255, 4, 1, 65))
+
+ def testWithTaggedChoice(self):
+
+ c = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('premium', univ.Boolean())
+ )
+ ).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))
+
+ s = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('customer', c)
+ )
+ )
+
+ s.setComponentByName('name', 'A')
+ s.getComponentByName('customer').setComponentByName('premium', True)
+
+ assert encoder.encode(s) == ints2octs((49, 8, 4, 1, 65, 167, 3, 1, 1, 255))
+
+
+class SequenceEncoderWithUntaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any(), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 5, 2, 1, 1, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.OctetString('quick brown')
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 14, 2, 1, 2, 113, 117, 105, 99, 107, 32,
+ 98, 114, 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1] = univ.ObjectIdentifier('1.3.6')
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1] = univ.Integer(12)
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 9, 2, 1, 1, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithUntaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any()), openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 7, 2, 1, 1, 49, 2, 49, 50)
+ )
+
+ def testEncodeOpenTypeChoiceTwo(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.OctetString('quick brown'))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 16, 2, 1, 2, 49, 11, 113, 117, 105, 99, 107, 32, 98, 114,
+ 111, 119, 110)
+ )
+
+ def testEncodeOpenTypeUnknownId(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+ def testEncodeOpenTypeIncompatibleType(self):
+ self.s.clear()
+
+ self.s[0] = 2
+ self.s[1].append(univ.ObjectIdentifier('1.3.6'))
+
+ try:
+ encoder.encode(self.s, asn1Spec=self.s)
+
+ except PyAsn1Error:
+ assert False, 'incompatible open type tolerated'
+
+
+class SequenceEncoderWithImplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 131, 4, 131, 2, 49, 50)
+ )
+
+
+class SequenceEncoderWithExplicitlyTaggedSetOfOpenTypesTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ openType = opentype.OpenType(
+ 'id',
+ {1: univ.Integer(),
+ 2: univ.OctetString()}
+ )
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.SetOf(
+ componentType=univ.Any().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ openType=openType)
+ )
+ )
+
+ def testEncodeOpenTypeChoiceOne(self):
+ self.s.clear()
+
+ self.s[0] = 1
+ self.s[1].append(univ.Integer(12))
+
+ assert encoder.encode(self.s, asn1Spec=self.s) == ints2octs(
+ (48, 11, 2, 1, 1, 49, 6, 163, 4, 163, 2, 49, 50)
+ )
+
+
+class NestedOptionalSequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ inner = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ outerWithOptional = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', inner),
+ )
+ )
+
+ outerWithDefault = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('inner', inner),
+ )
+ )
+
+ self.s1 = outerWithOptional
+ self.s2 = outerWithDefault
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithDefault(self):
+ self.s1.clear()
+ self.s1[0][1] = 123
+ return self.s1
+
+ def __initOptionalWithOptional(self):
+ self.s1.clear()
+ self.s1[0][0] = 'test'
+ return self.s1
+
+ def __initOptional(self):
+ self.s1.clear()
+ return self.s1
+
+ def __initDefaultWithDefaultAndOptional(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ self.s2[0][1] = 123
+ return self.s2
+
+ def __initDefaultWithDefault(self):
+ self.s2.clear()
+ self.s2[0][0] = 'test'
+ return self.s2
+
+ def __initDefaultWithOptional(self):
+ self.s2.clear()
+ self.s2[0][1] = 123
+ return self.s2
+
+ def testDefModeOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 11, 48, 9, 4, 4, 116, 101, 115, 116, 2, 1, 123))
+
+ def testDefModeOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 5, 48, 3, 2, 1, 123))
+
+ def testDefModeOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+ def testDefModeDefaultWithDefaultAndOptional(self):
+ s = self.__initDefaultWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 11, 48, 9, 4, 4, 116, 101, 115, 116, 2, 1, 123))
+
+ def testDefModeDefaultWithDefault(self):
+ s = self.__initDefaultWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeDefaultWithOptional(self):
+ s = self.__initDefaultWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 5, 48, 3, 2, 1, 123))
+
+
+class NestedOptionalChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ layer3 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('first-name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ )
+ )
+
+ layer2 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('inner', layer3),
+ namedtype.NamedType('first-name', univ.OctetString())
+ )
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithDefaultAndOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithDefault(self):
+ self.s.clear()
+ self.s[0][0][1] = 123
+ return self.s
+
+ def __initOptionalWithOptional(self):
+ self.s.clear()
+ self.s[0][0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testDefModeOptionalWithDefaultAndOptional(self):
+ s = self.__initOptionalWithDefaultAndOptional()
+ assert encoder.encode(s) == ints2octs((48, 11, 48, 9, 4, 4, 116, 101, 115, 116, 2, 1, 123))
+
+ def testDefModeOptionalWithDefault(self):
+ s = self.__initOptionalWithDefault()
+ assert encoder.encode(s) == ints2octs((48, 5, 48, 3, 2, 1, 123))
+
+ def testDefModeOptionalWithOptional(self):
+ s = self.__initOptionalWithOptional()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+
+class NestedOptionalSequenceOfEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ layer2 = univ.SequenceOf(
+ componentType=univ.OctetString()
+ )
+
+ layer1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('inner', layer2),
+ )
+ )
+
+ self.s = layer1
+
+ def __initOptionalWithValue(self):
+ self.s.clear()
+ self.s[0][0] = 'test'
+ return self.s
+
+ def __initOptional(self):
+ self.s.clear()
+ return self.s
+
+ def testDefModeOptionalWithValue(self):
+ s = self.__initOptionalWithValue()
+ assert encoder.encode(s) == ints2octs((48, 8, 48, 6, 4, 4, 116, 101, 115, 116))
+
+ def testDefModeOptional(self):
+ s = self.__initOptional()
+ assert encoder.encode(s) == ints2octs((48, 0))
+
+
+class EmptyInnerFieldOfSequenceEncoderTestCase(BaseTestCase):
+
+ def testInitializedOptionalNullIsEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('null', univ.Null())
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 2, 5, 0))
+
+ def testUninitializedOptionalNullIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('null', univ.Null())
+ )
+ )
+
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+ def testInitializedDefaultNullIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('null', univ.Null(''))
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+ def testInitializedOptionalOctetStringIsEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('str', univ.OctetString())
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 2, 4, 0))
+
+ def testUninitializedOptionalOctetStringIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.OptionalNamedType('str', univ.OctetString())
+ )
+ )
+
+ self.s.clear()
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+ def testInitializedDefaultOctetStringIsNotEncoded(self):
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('str', univ.OctetString(''))
+ )
+ )
+
+ self.s.clear()
+ self.s[0] = ''
+ assert encoder.encode(self.s) == ints2octs((48, 0))
+
+
+class ClassConstructorTestCase(BaseTestCase):
+ def testKeywords(self):
+ tagmap = {"tagmap": True}
+ typemap = {"typemap": True}
+
+ sie = encoder.Encoder()._singleItemEncoder
+ self.assertIs(sie._tagMap, encoder.TAG_MAP)
+ self.assertIs(sie._typeMap, encoder.TYPE_MAP)
+
+ sie = encoder.Encoder(
+ tagMap=tagmap, typeMap=typemap
+ )._singleItemEncoder
+ self.assertIs(sie._tagMap, tagmap)
+ self.assertIs(sie._typeMap, typemap)
+
+ sie = encoder.Encoder(tagmap, typemap)._singleItemEncoder
+ self.assertIs(sie._tagMap, tagmap)
+ self.assertIs(sie._typeMap, typemap)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/native/__init__.py b/contrib/python/pyasn1/py3/tests/codec/native/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/native/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/codec/native/__main__.py b/contrib/python/pyasn1/py3/tests/codec/native/__main__.py
new file mode 100644
index 0000000000..ab7faea877
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/native/__main__.py
@@ -0,0 +1,15 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.codec.native.test_encoder.suite',
+ 'tests.codec.native.test_decoder.suite']
+)
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/native/test_decoder.py b/contrib/python/pyasn1/py3/tests/codec/native/test_decoder.py
new file mode 100644
index 0000000000..be7fd7ec0a
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/native/test_decoder.py
@@ -0,0 +1,120 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.codec.native import decoder
+from pyasn1.error import PyAsn1Error
+
+
+class BadAsn1SpecTestCase(BaseTestCase):
+ def testBadSpec(self):
+ try:
+ decoder.decode('', asn1Spec='not an Asn1Item')
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Invalid asn1Spec accepted'
+
+
+class IntegerDecoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert decoder.decode(12, asn1Spec=univ.Integer()) == univ.Integer(12)
+
+ def testNegInt(self):
+ assert decoder.decode(-12, asn1Spec=univ.Integer()) == univ.Integer(-12)
+
+
+class BooleanDecoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert decoder.decode(True, asn1Spec=univ.Boolean()) == univ.Boolean(True)
+
+ def testTrueNeg(self):
+ assert decoder.decode(False, asn1Spec=univ.Boolean()) == univ.Boolean(False)
+
+
+class BitStringDecoderTestCase(BaseTestCase):
+ def testSimple(self):
+ assert decoder.decode('11111111', asn1Spec=univ.BitString()) == univ.BitString(hexValue='ff')
+
+
+class OctetStringDecoderTestCase(BaseTestCase):
+ def testSimple(self):
+ assert decoder.decode('Quick brown fox', asn1Spec=univ.OctetString()) == univ.OctetString('Quick brown fox')
+
+
+class NullDecoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert decoder.decode(None, asn1Spec=univ.Null()) == univ.Null('')
+
+
+class ObjectIdentifierDecoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert decoder.decode('1.3.6.11', asn1Spec=univ.ObjectIdentifier()) == univ.ObjectIdentifier('1.3.6.11')
+
+
+class RealDecoderTestCase(BaseTestCase):
+ def testSimple(self):
+ assert decoder.decode(1.33, asn1Spec=univ.Real()) == univ.Real(1.33)
+
+
+class SequenceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.NamedType('first-name', univ.OctetString()),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+
+ def testSimple(self):
+ s = self.s.clone()
+ s[0] = univ.Null('')
+ s[1] = univ.OctetString('xx')
+ s[2] = univ.Integer(33)
+ assert decoder.decode({'place-holder': None, 'first-name': 'xx', 'age': 33}, asn1Spec=self.s) == s
+
+
+class ChoiceDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null()),
+ namedtype.NamedType('first-name', univ.OctetString()),
+ namedtype.NamedType('age', univ.Integer(33))
+ )
+ )
+
+ def testSimple(self):
+ s = self.s.clone()
+ s[1] = univ.OctetString('xx')
+ assert decoder.decode({'first-name': 'xx'}, asn1Spec=self.s) == s
+
+
+class AnyDecoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Any()
+
+ def testSimple(self):
+ assert decoder.decode('fox', asn1Spec=univ.Any()) == univ.Any('fox')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/native/test_encoder.py b/contrib/python/pyasn1/py3/tests/codec/native/test_encoder.py
new file mode 100644
index 0000000000..662c284b3c
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/native/test_encoder.py
@@ -0,0 +1,141 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.codec.native import encoder
+from pyasn1.compat.octets import str2octs
+from pyasn1.error import PyAsn1Error
+
+
+class BadAsn1SpecTestCase(BaseTestCase):
+ def testBadValueType(self):
+ try:
+ encoder.encode('not an Asn1Item')
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert 0, 'Invalid value type accepted'
+
+
+class IntegerEncoderTestCase(BaseTestCase):
+ def testPosInt(self):
+ assert encoder.encode(univ.Integer(12)) == 12
+
+ def testNegInt(self):
+ assert encoder.encode(univ.Integer(-12)) == -12
+
+
+class BooleanEncoderTestCase(BaseTestCase):
+ def testTrue(self):
+ assert encoder.encode(univ.Boolean(1)) is True
+
+ def testFalse(self):
+ assert encoder.encode(univ.Boolean(0)) is False
+
+
+class BitStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.b = univ.BitString((1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1))
+
+ def testValue(self):
+ assert encoder.encode(self.b) == '101010011000101'
+
+
+class OctetStringEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.o = univ.OctetString('Quick brown fox')
+
+ def testValue(self):
+ assert encoder.encode(self.o) == str2octs('Quick brown fox')
+
+
+class NullEncoderTestCase(BaseTestCase):
+ def testNull(self):
+ assert encoder.encode(univ.Null('')) is None
+
+
+class ObjectIdentifierEncoderTestCase(BaseTestCase):
+ def testOne(self):
+ assert encoder.encode(univ.ObjectIdentifier((1, 3, 6, 0, 12345))) == '1.3.6.0.12345'
+
+
+class RealEncoderTestCase(BaseTestCase):
+ def testChar(self):
+ assert encoder.encode(univ.Real((123, 10, 11))) == 1.23e+13
+
+ def testPlusInf(self):
+ assert encoder.encode(univ.Real('inf')) == float('inf')
+
+ def testMinusInf(self):
+ assert encoder.encode(univ.Real('-inf')) == float('-inf')
+
+
+class SequenceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.OptionalNamedType('first-name', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(33)),
+ ))
+
+ def testSimple(self):
+ s = self.s.clone()
+ s[0] = univ.Null('')
+ s[1] = 'abc'
+ s[2] = 123
+ assert encoder.encode(s) == {'place-holder': None, 'first-name': str2octs('abc'), 'age': 123}
+
+
+class ChoiceEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('place-holder', univ.Null('')),
+ namedtype.NamedType('number', univ.Integer(0)),
+ namedtype.NamedType('string', univ.OctetString())
+ )
+ )
+
+ def testEmpty(self):
+ try:
+ encoder.encode(self.s)
+ except PyAsn1Error:
+ pass
+ else:
+ assert False, 'encoded unset choice'
+
+ def testFilled(self):
+ self.s.setComponentByPosition(0, univ.Null(''))
+ assert encoder.encode(self.s) == {'place-holder': None}
+
+
+class AnyEncoderTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s = univ.Any(encoder.encode(univ.OctetString('fox')))
+
+ def testSimple(self):
+ assert encoder.encode(self.s) == str2octs('fox')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/codec/test_streaming.py b/contrib/python/pyasn1/py3/tests/codec/test_streaming.py
new file mode 100644
index 0000000000..7dc87257f2
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/codec/test_streaming.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import io
+import sys
+
+try:
+ import unittest2 as unittest
+
+except ImportError:
+ import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.codec import streaming
+
+
+class CachingStreamWrapperTestCase(BaseTestCase):
+ def setUp(self):
+ self.shortText = b"abcdefghij"
+ self.longText = self.shortText * (io.DEFAULT_BUFFER_SIZE * 5)
+ self.shortStream = io.BytesIO(self.shortText)
+ self.longStream = io.BytesIO(self.longText)
+
+ def testReadJustFromCache(self):
+ wrapper = streaming.CachingStreamWrapper(self.shortStream)
+ wrapper.read(6)
+ wrapper.seek(3)
+ assert wrapper.read(1) == b"d"
+ assert wrapper.read(1) == b"e"
+ assert wrapper.tell() == 5
+
+ def testReadFromCacheAndStream(self):
+ wrapper = streaming.CachingStreamWrapper(self.shortStream)
+ wrapper.read(6)
+ wrapper.seek(3)
+ assert wrapper.read(4) == b"defg"
+ assert wrapper.tell() == 7
+
+ def testReadJustFromStream(self):
+ wrapper = streaming.CachingStreamWrapper(self.shortStream)
+ assert wrapper.read(6) == b"abcdef"
+ assert wrapper.tell() == 6
+
+ def testPeek(self):
+ wrapper = streaming.CachingStreamWrapper(self.longStream)
+ read_bytes = wrapper.peek(io.DEFAULT_BUFFER_SIZE + 73)
+ assert len(read_bytes) == io.DEFAULT_BUFFER_SIZE + 73
+ assert read_bytes.startswith(b"abcdefg")
+ assert wrapper.tell() == 0
+ assert wrapper.read(4) == b"abcd"
+
+ def testMarkedPositionResets(self):
+ wrapper = streaming.CachingStreamWrapper(self.longStream)
+ wrapper.read(10)
+ wrapper.markedPosition = wrapper.tell()
+ assert wrapper.markedPosition == 10
+
+ # Reach the maximum capacity of cache
+ wrapper.read(io.DEFAULT_BUFFER_SIZE)
+ assert wrapper.tell() == 10 + io.DEFAULT_BUFFER_SIZE
+
+ # The following should clear the cache
+ wrapper.markedPosition = wrapper.tell()
+ assert wrapper.markedPosition == 0
+ assert len(wrapper._cache.getvalue()) == 0
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/compat/__init__.py b/contrib/python/pyasn1/py3/tests/compat/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/compat/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/compat/__main__.py b/contrib/python/pyasn1/py3/tests/compat/__main__.py
new file mode 100644
index 0000000000..94436847ba
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/compat/__main__.py
@@ -0,0 +1,16 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.compat.test_integer.suite',
+ 'tests.compat.test_octets.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/compat/test_integer.py b/contrib/python/pyasn1/py3/tests/compat/test_integer.py
new file mode 100644
index 0000000000..4026b75402
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/compat/test_integer.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.compat import integer
+
+
+class IntegerTestCase(BaseTestCase):
+
+ if sys.version_info[0] > 2:
+
+ def test_from_bytes_zero(self):
+ assert 0 == integer.from_bytes(bytes([0]), signed=False)
+
+ def test_from_bytes_unsigned(self):
+ assert -66051 == integer.from_bytes(bytes([254, 253, 253]), signed=True)
+
+ def test_from_bytes_signed(self):
+ assert 66051 == integer.from_bytes(bytes([0, 1, 2, 3]), signed=False)
+
+ def test_from_bytes_empty(self):
+ assert 0 == integer.from_bytes(bytes([]))
+
+ else:
+
+ def test_from_bytes_zero(self):
+ assert 0 == integer.from_bytes('\x00', signed=False)
+
+ def test_from_bytes_unsigned(self):
+ assert -66051 == integer.from_bytes('\xfe\xfd\xfd', signed=True)
+
+ def test_from_bytes_signed(self):
+ assert 66051 == integer.from_bytes('\x01\x02\x03', signed=False)
+
+ def test_from_bytes_empty(self):
+ assert 0 == integer.from_bytes('')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/compat/test_octets.py b/contrib/python/pyasn1/py3/tests/compat/test_octets.py
new file mode 100644
index 0000000000..4133950704
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/compat/test_octets.py
@@ -0,0 +1,113 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.compat import octets
+
+
+class OctetsTestCase(BaseTestCase):
+
+ if sys.version_info[0] > 2:
+
+ def test_ints2octs(self):
+ assert [1, 2, 3] == list(octets.ints2octs([1, 2, 3]))
+
+ def test_ints2octs_empty(self):
+ assert not octets.ints2octs([])
+
+ def test_int2oct(self):
+ assert [12] == list(octets.int2oct(12))
+
+ def test_octs2ints(self):
+ assert [1, 2, 3] == list(octets.octs2ints(bytes([1, 2, 3])))
+
+ def test_octs2ints_empty(self):
+ assert not octets.octs2ints(bytes([]))
+
+ def test_oct2int(self):
+ assert 12 == octets.oct2int(bytes([12]))[0]
+
+ def test_str2octs(self):
+ assert bytes([1, 2, 3]) == octets.str2octs('\x01\x02\x03')
+
+ def test_str2octs_empty(self):
+ assert not octets.str2octs('')
+
+ def test_octs2str(self):
+ assert '\x01\x02\x03' == octets.octs2str(bytes([1, 2, 3]))
+
+ def test_octs2str_empty(self):
+ assert not octets.octs2str(bytes([]))
+
+ def test_isOctetsType(self):
+ assert octets.isOctetsType('abc') == False
+ assert octets.isOctetsType(123) == False
+ assert octets.isOctetsType(bytes()) == True
+
+ def test_isStringType(self):
+ assert octets.isStringType('abc') == True
+ assert octets.isStringType(123) == False
+ assert octets.isStringType(bytes()) == False
+
+ def test_ensureString(self):
+ assert 'abc'.encode() == octets.ensureString('abc'.encode())
+ assert bytes([1, 2, 3]) == octets.ensureString([1, 2, 3])
+
+ else:
+
+ def test_ints2octs(self):
+ assert '\x01\x02\x03' == octets.ints2octs([1, 2, 3])
+
+ def test_ints2octs_empty(self):
+ assert not octets.ints2octs([])
+
+ def test_int2oct(self):
+ assert '\x0c' == octets.int2oct(12)
+
+ def test_octs2ints(self):
+ assert [1, 2, 3] == octets.octs2ints('\x01\x02\x03')
+
+ def test_octs2ints_empty(self):
+ assert not octets.octs2ints('')
+
+ def test_oct2int(self):
+ assert 12 == octets.oct2int('\x0c')
+
+ def test_str2octs(self):
+ assert '\x01\x02\x03' == octets.str2octs('\x01\x02\x03')
+
+ def test_str2octs_empty(self):
+ assert not octets.str2octs('')
+
+ def test_octs2str(self):
+ assert '\x01\x02\x03' == octets.octs2str('\x01\x02\x03')
+
+ def test_octs2str_empty(self):
+ assert not octets.octs2str('')
+
+ def test_isOctetsType(self):
+ assert octets.isOctetsType('abc') == True
+ assert octets.isOctetsType(123) == False
+ assert octets.isOctetsType(unicode('abc')) == False
+
+ def test_isStringType(self):
+ assert octets.isStringType('abc') == True
+ assert octets.isStringType(123) == False
+ assert octets.isStringType(unicode('abc')) == True
+
+ def test_ensureString(self):
+ assert 'abc' == octets.ensureString('abc')
+ assert '123' == octets.ensureString(123)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/test_debug.py b/contrib/python/pyasn1/py3/tests/test_debug.py
new file mode 100644
index 0000000000..84ba4f44c4
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/test_debug.py
@@ -0,0 +1,37 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1 import debug
+from pyasn1 import error
+
+class DebugCaseBase(BaseTestCase):
+ def testKnownFlags(self):
+ debug.setLogger(0)
+ debug.setLogger(debug.Debug('all', 'encoder', 'decoder'))
+ debug.setLogger(0)
+
+ def testUnknownFlags(self):
+ try:
+ debug.setLogger(debug.Debug('all', 'unknown', loggerName='xxx'))
+
+ except error.PyAsn1Error:
+ debug.setLogger(0)
+ return
+
+ else:
+ debug.setLogger(0)
+ assert 0, 'unknown debug flag tolerated'
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/__init__.py b/contrib/python/pyasn1/py3/tests/type/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/contrib/python/pyasn1/py3/tests/type/__main__.py b/contrib/python/pyasn1/py3/tests/type/__main__.py
new file mode 100644
index 0000000000..67ff23e3c0
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/__main__.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import unittest
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ ['tests.type.test_constraint.suite',
+ 'tests.type.test_opentype.suite',
+ 'tests.type.test_namedtype.suite',
+ 'tests.type.test_namedval.suite',
+ 'tests.type.test_tag.suite',
+ 'tests.type.test_univ.suite',
+ 'tests.type.test_char.suite',
+ 'tests.type.test_useful.suite']
+)
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_char.py b/contrib/python/pyasn1/py3/tests/type/test_char.py
new file mode 100644
index 0000000000..efa179eb0e
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_char.py
@@ -0,0 +1,169 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import pickle
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import char
+from pyasn1.type import univ
+from pyasn1.type import constraint
+from pyasn1.compat.octets import ints2octs
+from pyasn1.error import PyAsn1Error
+
+
+class AbstractStringTestCase(object):
+
+ initializer = ()
+ encoding = 'us-ascii'
+ asn1Type = None
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.asn1String = self.asn1Type(ints2octs(self.initializer), encoding=self.encoding)
+ self.pythonString = ints2octs(self.initializer).decode(self.encoding)
+
+ def testUnicode(self):
+ assert self.asn1String == self.pythonString, 'unicode init fails'
+
+ def testLength(self):
+ assert len(self.asn1String) == len(self.pythonString), 'unicode len() fails'
+
+ def testSizeConstraint(self):
+ asn1Spec = self.asn1Type(subtypeSpec=constraint.ValueSizeConstraint(1, 1))
+
+ try:
+ asn1Spec.clone(self.pythonString)
+ except PyAsn1Error:
+ pass
+ else:
+ assert False, 'Size constraint tolerated'
+
+ try:
+ asn1Spec.clone(self.pythonString[0])
+ except PyAsn1Error:
+ assert False, 'Size constraint failed'
+
+ def testSerialised(self):
+ if sys.version_info[0] < 3:
+ assert str(self.asn1String) == self.pythonString.encode(self.encoding), '__str__() fails'
+ else:
+ assert bytes(self.asn1String) == self.pythonString.encode(self.encoding), '__str__() fails'
+
+ def testPrintable(self):
+ if sys.version_info[0] < 3:
+ assert unicode(self.asn1String) == self.pythonString, '__str__() fails'
+ else:
+ assert str(self.asn1String) == self.pythonString, '__str__() fails'
+
+ def testInit(self):
+ assert self.asn1Type(self.pythonString) == self.pythonString
+ assert self.asn1Type(self.pythonString.encode(self.encoding)) == self.pythonString
+ assert self.asn1Type(univ.OctetString(self.pythonString.encode(self.encoding))) == self.pythonString
+ assert self.asn1Type(self.asn1Type(self.pythonString)) == self.pythonString
+ assert self.asn1Type(self.initializer, encoding=self.encoding) == self.pythonString
+
+ def testInitFromAsn1(self):
+ assert self.asn1Type(self.asn1Type(self.pythonString)) == self.pythonString
+ assert self.asn1Type(univ.OctetString(self.pythonString.encode(self.encoding), encoding=self.encoding)) == self.pythonString
+
+ def testAsOctets(self):
+ assert self.asn1String.asOctets() == self.pythonString.encode(self.encoding), 'testAsOctets() fails'
+
+ def testAsNumbers(self):
+ assert self.asn1String.asNumbers() == self.initializer, 'testAsNumbers() fails'
+
+ def testSeq(self):
+ assert self.asn1String[0] == self.pythonString[0], '__getitem__() fails'
+
+ def testEmpty(self):
+ try:
+ str(self.asn1Type())
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Value operation on ASN1 type tolerated'
+
+ def testAdd(self):
+ assert self.asn1String + self.pythonString.encode(self.encoding) == self.pythonString + self.pythonString, '__add__() fails'
+
+ def testRadd(self):
+ assert self.pythonString.encode(self.encoding) + self.asn1String == self.pythonString + self.pythonString, '__radd__() fails'
+
+ def testMul(self):
+ assert self.asn1String * 2 == self.pythonString * 2, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * self.asn1String == 2 * self.pythonString, '__rmul__() fails'
+
+ def testContains(self):
+ assert self.pythonString in self.asn1String
+ assert self.pythonString + self.pythonString not in self.asn1String
+
+ def testReverse(self):
+ assert list(reversed(self.asn1String)) == list(reversed(self.pythonString))
+
+ def testSchemaPickling(self):
+ old_asn1 = self.asn1Type()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == self.asn1Type
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = self.asn1String
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == self.asn1String
+
+
+class VisibleStringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (97, 102)
+ encoding = 'us-ascii'
+ asn1Type = char.VisibleString
+
+
+class GeneralStringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (169, 174)
+ encoding = 'iso-8859-1'
+ asn1Type = char.GeneralString
+
+
+class UTF8StringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (209, 132, 208, 176)
+ encoding = 'utf-8'
+ asn1Type = char.UTF8String
+
+
+class BMPStringTestCase(AbstractStringTestCase, BaseTestCase):
+
+ initializer = (4, 48, 4, 68)
+ encoding = 'utf-16-be'
+ asn1Type = char.BMPString
+
+
+if sys.version_info[0] > 2:
+
+ # Somehow comparison of UTF-32 encoded strings does not work in Py2
+
+ class UniversalStringTestCase(AbstractStringTestCase, BaseTestCase):
+ initializer = (0, 0, 4, 48, 0, 0, 4, 68)
+ encoding = 'utf-32-be'
+ asn1Type = char.UniversalString
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_constraint.py b/contrib/python/pyasn1/py3/tests/type/test_constraint.py
new file mode 100644
index 0000000000..1ae95ef61a
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_constraint.py
@@ -0,0 +1,420 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import constraint
+from pyasn1.type import error
+
+
+class SingleValueConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.v1 = 1, 2
+ self.v2 = 3, 4
+ self.c1 = constraint.SingleValueConstraint(*self.v1)
+ self.c2 = constraint.SingleValueConstraint(*self.v2)
+
+ def testCmp(self):
+ assert self.c1 == self.c1, 'comparison fails'
+
+ def testHash(self):
+ assert hash(self.c1) != hash(self.c2), 'hash() fails'
+
+ def testGoodVal(self):
+ try:
+ self.c1(1)
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(4)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+ def testContains(self):
+ for v in self.v1:
+ assert v in self.c1
+ assert v not in self.c2
+
+ for v in self.v2:
+ assert v in self.c2
+ assert v not in self.c1
+
+ def testIter(self):
+ assert set(self.v1) == set(self.c1)
+ assert set(self.v2) == set(self.c2)
+
+ def testSub(self):
+ subconst = self.c1 - constraint.SingleValueConstraint(self.v1[0])
+ assert list(subconst) == [self.v1[1]]
+
+ def testAdd(self):
+ superconst = self.c1 + self.c2
+ assert set(superconst) == set(self.v1 + self.v2)
+
+
+class ContainedSubtypeConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ContainedSubtypeConstraint(
+ constraint.SingleValueConstraint(12)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(12)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(4)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ValueRangeConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ValueRangeConstraint(1, 4)
+
+ def testGoodVal(self):
+ try:
+ self.c1(1)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(-5)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ValueSizeConstraintTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ValueSizeConstraint(1, 2)
+
+ def testGoodVal(self):
+ try:
+ self.c1('a')
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1('abc')
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class PermittedAlphabetConstraintTestCase(SingleValueConstraintTestCase):
+ def setUp(self):
+ self.v1 = 'A', 'B'
+ self.v2 = 'C', 'D'
+ self.c1 = constraint.PermittedAlphabetConstraint(*self.v1)
+ self.c2 = constraint.PermittedAlphabetConstraint(*self.v2)
+
+ def testGoodVal(self):
+ try:
+ self.c1('A')
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1('E')
+
+ except error.ValueConstraintError:
+ pass
+
+ else:
+ assert 0, 'constraint check fails'
+
+
+class WithComponentsConstraintTestCase(BaseTestCase):
+
+ def testGoodVal(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint()),
+ ('B', constraint.ComponentAbsentConstraint()))
+
+ try:
+ c({'A': 1})
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testGoodValWithExtraFields(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint()),
+ ('B', constraint.ComponentAbsentConstraint())
+ )
+
+ try:
+ c({'A': 1, 'C': 2})
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testEmptyConstraint(self):
+ c = constraint.WithComponentsConstraint()
+
+ try:
+ c({'A': 1})
+
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint())
+ )
+
+ try:
+ c({'B': 2})
+
+ except error.ValueConstraintError:
+ pass
+
+ else:
+ assert 0, 'constraint check fails'
+
+ def testBadValExtraFields(self):
+ c = constraint.WithComponentsConstraint(
+ ('A', constraint.ComponentPresentConstraint())
+ )
+
+ try:
+ c({'B': 2, 'C': 3})
+
+ except error.ValueConstraintError:
+ pass
+
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ConstraintsIntersectionTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsIntersection(
+ constraint.SingleValueConstraint(4),
+ constraint.ValueRangeConstraint(2, 4)
+ )
+
+ def testCmp1(self):
+ assert constraint.SingleValueConstraint(4) in self.c1, '__cmp__() fails'
+
+ def testCmp2(self):
+ assert constraint.SingleValueConstraint(5) not in self.c1, \
+ '__cmp__() fails'
+
+ def testCmp3(self):
+ c = constraint.ConstraintsUnion(constraint.ConstraintsIntersection(
+ constraint.SingleValueConstraint(4),
+ constraint.ValueRangeConstraint(2, 4))
+ )
+ assert self.c1 in c, '__cmp__() fails'
+
+ def testCmp4(self):
+ c = constraint.ConstraintsUnion(
+ constraint.ConstraintsIntersection(constraint.SingleValueConstraint(5))
+ )
+ assert self.c1 not in c, '__cmp__() fails'
+
+ def testGoodVal(self):
+ try:
+ self.c1(4)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(-5)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class InnerTypeConstraintTestCase(BaseTestCase):
+ def testConst1(self):
+ c = constraint.InnerTypeConstraint(
+ constraint.SingleValueConstraint(4)
+ )
+ try:
+ c(4, 32)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+ try:
+ c(5, 32)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+ def testConst2(self):
+ c = constraint.InnerTypeConstraint(
+ (0, constraint.SingleValueConstraint(4), 'PRESENT'),
+ (1, constraint.SingleValueConstraint(4), 'ABSENT')
+ )
+ try:
+ c(4, 0)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+ try:
+ c(4, 1)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+ try:
+ c(3, 0)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+ # Constraints compositions
+
+
+class ConstraintsIntersectionRangeTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsIntersection(
+ constraint.ValueRangeConstraint(1, 9),
+ constraint.ValueRangeConstraint(2, 5)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(3)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(0)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ConstraintsUnionTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsUnion(
+ constraint.SingleValueConstraint(5),
+ constraint.ValueRangeConstraint(1, 3)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(2)
+ self.c1(5)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(-5)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+class ConstraintsExclusionTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.c1 = constraint.ConstraintsExclusion(
+ constraint.ValueRangeConstraint(2, 4)
+ )
+
+ def testGoodVal(self):
+ try:
+ self.c1(6)
+ except error.ValueConstraintError:
+ assert 0, 'constraint check fails'
+
+ def testBadVal(self):
+ try:
+ self.c1(2)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint check fails'
+
+
+# Constraints derivations
+
+class DirectDerivationTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.c1 = constraint.SingleValueConstraint(5)
+
+ self.c2 = constraint.ConstraintsUnion(
+ self.c1, constraint.ValueRangeConstraint(1, 3)
+ )
+
+ def testGoodVal(self):
+ assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
+ assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
+
+ def testBadVal(self):
+ assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
+ assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
+
+
+class IndirectDerivationTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.c1 = constraint.ConstraintsIntersection(
+ constraint.ValueRangeConstraint(1, 30)
+ )
+
+ self.c2 = constraint.ConstraintsIntersection(
+ self.c1, constraint.ValueRangeConstraint(1, 20)
+ )
+
+ self.c2 = constraint.ConstraintsIntersection(
+ self.c2, constraint.ValueRangeConstraint(1, 10)
+ )
+
+ def testGoodVal(self):
+ assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
+ assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
+
+ def testBadVal(self):
+ assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
+ assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
+
+# TODO: how to apply size constraints to constructed types?
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_namedtype.py b/contrib/python/pyasn1/py3/tests/type/test_namedtype.py
new file mode 100644
index 0000000000..4585984e6a
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_namedtype.py
@@ -0,0 +1,135 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+from pyasn1.error import PyAsn1Error
+
+
+class NamedTypeCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.e = namedtype.NamedType('age', univ.Integer(0))
+
+ def testIter(self):
+ n, t = self.e
+ assert n == 'age' or t == univ.Integer(), 'unpack fails'
+
+ def testRepr(self):
+ assert 'age' in repr(self.e)
+
+
+class NamedTypesCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.e = namedtype.NamedTypes(
+ namedtype.NamedType('first-name', univ.OctetString('')),
+ namedtype.OptionalNamedType('age', univ.Integer(0)),
+ namedtype.NamedType('family-name', univ.OctetString(''))
+ )
+
+ def testRepr(self):
+ assert 'first-name' in repr(self.e)
+
+ def testContains(self):
+ assert 'first-name' in self.e
+ assert '<missing>' not in self.e
+
+ # noinspection PyUnusedLocal
+ def testGetItem(self):
+ assert self.e[0] == namedtype.NamedType('first-name', univ.OctetString(''))
+
+ def testIter(self):
+ assert list(self.e) == ['first-name', 'age', 'family-name']
+
+ def testGetTypeByPosition(self):
+ assert self.e.getTypeByPosition(0) == univ.OctetString(''), \
+ 'getTypeByPosition() fails'
+
+ def testGetNameByPosition(self):
+ assert self.e.getNameByPosition(0) == 'first-name', \
+ 'getNameByPosition() fails'
+
+ def testGetPositionByName(self):
+ assert self.e.getPositionByName('first-name') == 0, \
+ 'getPositionByName() fails'
+
+ def testGetTypesNearPosition(self):
+ assert self.e.getTagMapNearPosition(0).presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+ assert self.e.getTagMapNearPosition(1).presentTypes == {
+ univ.Integer.tagSet: univ.Integer(0),
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+ assert self.e.getTagMapNearPosition(2).presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+
+ def testGetTagMap(self):
+ assert self.e.tagMap.presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString(''),
+ univ.Integer.tagSet: univ.Integer(0)
+ }
+
+ def testStrTagMap(self):
+ assert 'TagMap' in str(self.e.tagMap)
+ assert 'OctetString' in str(self.e.tagMap)
+ assert 'Integer' in str(self.e.tagMap)
+
+ def testReprTagMap(self):
+ assert 'TagMap' in repr(self.e.tagMap)
+ assert 'OctetString' in repr(self.e.tagMap)
+ assert 'Integer' in repr(self.e.tagMap)
+
+ def testGetTagMapWithDups(self):
+ try:
+ self.e.tagMapUnique[0]
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'Duped types not noticed'
+
+ def testGetPositionNearType(self):
+ assert self.e.getPositionNearType(univ.OctetString.tagSet, 0) == 0
+ assert self.e.getPositionNearType(univ.Integer.tagSet, 1) == 1
+ assert self.e.getPositionNearType(univ.OctetString.tagSet, 2) == 2
+
+
+class OrderedNamedTypesCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.e = namedtype.NamedTypes(
+ namedtype.NamedType('first-name', univ.OctetString('')),
+ namedtype.NamedType('age', univ.Integer(0))
+ )
+
+ def testGetTypeByPosition(self):
+ assert self.e.getTypeByPosition(0) == univ.OctetString(''), \
+ 'getTypeByPosition() fails'
+
+
+class DuplicateNamedTypesCaseBase(BaseTestCase):
+ def testDuplicateDefaultTags(self):
+ nt = namedtype.NamedTypes(
+ namedtype.NamedType('first-name', univ.Any()),
+ namedtype.NamedType('age', univ.Any())
+ )
+
+ assert isinstance(nt.tagMap, namedtype.NamedTypes.PostponedError)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_namedval.py b/contrib/python/pyasn1/py3/tests/type/test_namedval.py
new file mode 100644
index 0000000000..fda2da2a95
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_namedval.py
@@ -0,0 +1,53 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import namedval
+
+
+class NamedValuesCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.e = namedval.NamedValues(('off', 0), ('on', 1))
+
+ def testDict(self):
+ assert set(self.e.items()) == {('off', 0), ('on', 1)}
+ assert set(self.e.keys()) == {'off', 'on'}
+ assert set(self.e) == {'off', 'on'}
+ assert set(self.e.values()) == {0, 1}
+ assert 'on' in self.e and 'off' in self.e and 'xxx' not in self.e
+ assert 0 in self.e and 1 in self.e and 2 not in self.e
+
+ def testInit(self):
+ assert namedval.NamedValues(off=0, on=1) == {'off': 0, 'on': 1}
+ assert namedval.NamedValues('off', 'on') == {'off': 0, 'on': 1}
+ assert namedval.NamedValues(('c', 0)) == {'c': 0}
+ assert namedval.NamedValues('a', 'b', ('c', 0), d=1) == {'c': 0, 'd': 1, 'a': 2, 'b': 3}
+
+ def testLen(self):
+ assert len(self.e) == 2
+ assert len(namedval.NamedValues()) == 0
+
+ def testAdd(self):
+ assert namedval.NamedValues(off=0) + namedval.NamedValues(on=1) == {'off': 0, 'on': 1}
+
+ def testClone(self):
+ assert namedval.NamedValues(off=0).clone(('on', 1)) == {'off': 0, 'on': 1}
+ assert namedval.NamedValues(off=0).clone(on=1) == {'off': 0, 'on': 1}
+
+ def testStrRepr(self):
+ assert str(self.e)
+ assert repr(self.e)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_opentype.py b/contrib/python/pyasn1/py3/tests/type/test_opentype.py
new file mode 100644
index 0000000000..5ae9715f40
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_opentype.py
@@ -0,0 +1,101 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import univ
+from pyasn1.type import tag
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.compat.octets import str2octs
+from pyasn1.error import PyAsn1Error
+
+
+class UntaggedAnyTestCase(BaseTestCase):
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', univ.Any())
+ )
+
+ self.s = Sequence()
+
+ def testTypeCheckOnAssignment(self):
+
+ self.s.clear()
+
+ self.s['blob'] = univ.Any(str2octs('xxx'))
+
+ # this should succeed because Any is untagged and unconstrained
+ self.s['blob'] = univ.Integer(123)
+
+
+class TaggedAnyTestCase(BaseTestCase):
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.taggedAny = univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 20))
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', self.taggedAny)
+ )
+
+ self.s = Sequence()
+
+ def testTypeCheckOnAssignment(self):
+
+ self.s.clear()
+
+ self.s['blob'] = self.taggedAny.clone('xxx')
+
+ try:
+ self.s.setComponentByName('blob', univ.Integer(123))
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'non-open type assignment tolerated'
+
+
+class TaggedAnyOpenTypeTestCase(BaseTestCase):
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.taggedAny = univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 20))
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer()),
+ namedtype.NamedType('blob', self.taggedAny, openType=opentype.OpenType(name='id'))
+ )
+
+ self.s = Sequence()
+
+ def testTypeCheckOnAssignment(self):
+
+ self.s.clear()
+
+ self.s['blob'] = univ.Any(str2octs('xxx'))
+ self.s['blob'] = univ.Integer(123)
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_tag.py b/contrib/python/pyasn1/py3/tests/type/test_tag.py
new file mode 100644
index 0000000000..5d27b72b8b
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_tag.py
@@ -0,0 +1,133 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import tag
+
+
+class TagTestCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.t1 = tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 3)
+ self.t2 = tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 3)
+
+
+class TagReprTestCase(TagTestCaseBase):
+ def testRepr(self):
+ assert 'Tag' in repr(self.t1)
+
+
+class TagCmpTestCase(TagTestCaseBase):
+ def testCmp(self):
+ assert self.t1 == self.t2, 'tag comparison fails'
+
+ def testHash(self):
+ assert hash(self.t1) == hash(self.t2), 'tag hash comparison fails'
+
+ def testSequence(self):
+ assert self.t1[0] == self.t2[0] and \
+ self.t1[1] == self.t2[1] and \
+ self.t1[2] == self.t2[2], 'tag sequence protocol fails'
+
+
+class TagSetTestCaseBase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.ts1 = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )
+
+ self.ts2 = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )
+
+
+class TagSetReprTestCase(TagSetTestCaseBase):
+ def testRepr(self):
+ assert 'TagSet' in repr(self.ts1)
+
+
+class TagSetCmpTestCase(TagSetTestCaseBase):
+ def testCmp(self):
+ assert self.ts1 == self.ts2, 'tag set comparison fails'
+
+ def testHash(self):
+ assert hash(self.ts1) == hash(self.ts2), 'tag set hash comp. fails'
+
+ def testLen(self):
+ assert len(self.ts1) == len(self.ts2), 'tag length comparison fails'
+
+
+class TaggingTestSuite(TagSetTestCaseBase):
+ def testImplicitTag(self):
+ t = self.ts1.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 14)
+ )
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 14)
+ ), 'implicit tagging went wrong'
+
+ def testExplicitTag(self):
+ t = self.ts1.tagExplicitly(
+ tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 32)
+ )
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassPrivate, tag.tagFormatConstructed, 32)
+ ), 'explicit tagging went wrong'
+
+
+class TagSetAddTestSuite(TagSetTestCaseBase):
+ def testAdd(self):
+ t = self.ts1 + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ ), 'TagSet.__add__() fails'
+
+ def testRadd(self):
+ t = tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2) + self.ts1
+ assert t == tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ ), 'TagSet.__radd__() fails'
+
+
+class SuperTagSetTestCase(TagSetTestCaseBase):
+ def testSuperTagCheck1(self):
+ assert self.ts1.isSuperTagSetOf(
+ tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )), 'isSuperTagSetOf() fails'
+
+ def testSuperTagCheck2(self):
+ assert not self.ts1.isSuperTagSetOf(
+ tag.TagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 13)
+ )), 'isSuperTagSetOf() fails'
+
+ def testSuperTagCheck3(self):
+ assert self.ts1.isSuperTagSetOf(
+ tag.TagSet((), tag.Tag(tag.tagClassUniversal,
+ tag.tagFormatSimple, 12))
+ ), 'isSuperTagSetOf() fails'
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_univ.py b/contrib/python/pyasn1/py3/tests/type/test_univ.py
new file mode 100644
index 0000000000..001f978d48
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_univ.py
@@ -0,0 +1,2184 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import math
+import pickle
+import platform
+import sys
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import univ
+from pyasn1.type import tag
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import error
+from pyasn1.compat.octets import str2octs, ints2octs, octs2ints, octs2str
+from pyasn1.error import PyAsn1Error
+from pyasn1.error import PyAsn1UnicodeEncodeError, PyAsn1UnicodeDecodeError
+
+
+class NoValueTestCase(BaseTestCase):
+ def testSingleton(self):
+ assert univ.NoValue() is univ.NoValue(), 'NoValue is not a singleton'
+
+ def testRepr(self):
+ try:
+ repr(univ.noValue)
+
+ except PyAsn1Error:
+ assert False, 'repr() on NoValue object fails'
+
+ def testIsInstance(self):
+ try:
+ assert isinstance(univ.noValue, univ.NoValue), 'isinstance() on NoValue() object fails'
+
+ except PyAsn1Error:
+ assert False, 'isinstance() on NoValue object fails'
+
+ def testStr(self):
+ try:
+ str(univ.noValue)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'str() works for NoValue object'
+
+ def testLen(self):
+ try:
+ len(univ.noValue)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'len() works for NoValue object'
+
+ def testCmp(self):
+ try:
+ univ.noValue == 1
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'comparison works for NoValue object'
+
+ def testSubs(self):
+ try:
+ univ.noValue[0]
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, '__getitem__() works for NoValue object'
+
+ def testKey(self):
+ try:
+ univ.noValue['key']
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, '__getitem__() works for NoValue object'
+
+ def testKeyAssignment(self):
+ try:
+ univ.noValue['key'] = 123
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, '__setitem__() works for NoValue object'
+
+ def testInt(self):
+ try:
+ int(univ.noValue)
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'integer conversion works for NoValue object'
+
+ def testAdd(self):
+ try:
+ univ.noValue + univ.noValue
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'addition works for NoValue object'
+
+ def testBitShift(self):
+ try:
+ univ.noValue << 1
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'bitshift works for NoValue object'
+
+ def testBooleanEvaluation(self):
+ try:
+ if univ.noValue:
+ pass
+
+ except PyAsn1Error:
+ pass
+
+ else:
+ assert False, 'boolean evaluation works for NoValue object'
+
+ @unittest.skipIf(
+ platform.python_implementation() == "PyPy",
+ "getsizeof() raises TypeError on PyPy"
+ )
+ def testSizeOf(self):
+ try:
+ sys.getsizeof(univ.noValue)
+
+ except PyAsn1Error:
+ assert False, 'sizeof failed for NoValue object'
+
+
+class IntegerTestCase(BaseTestCase):
+ def testStr(self):
+ assert str(univ.Integer(1)) in ('1', '1L'), 'str() fails'
+
+ def testRepr(self):
+ assert '123' in repr(univ.Integer(123))
+
+ def testAnd(self):
+ assert univ.Integer(1) & 0 == 0, '__and__() fails'
+
+ def testOr(self):
+ assert univ.Integer(1) | 0 == 1, '__or__() fails'
+
+ def testXor(self):
+ assert univ.Integer(1) ^ 0 == 1, '__xor__() fails'
+
+ def testRand(self):
+ assert 0 & univ.Integer(1) == 0, '__rand__() fails'
+
+ def testRor(self):
+ assert 0 | univ.Integer(1) == 1, '__ror__() fails'
+
+ def testRxor(self):
+ assert 0 ^ univ.Integer(1) == 1, '__rxor__() fails'
+
+ def testAdd(self):
+ assert univ.Integer(-4) + 6 == 2, '__add__() fails'
+
+ def testRadd(self):
+ assert 4 + univ.Integer(5) == 9, '__radd__() fails'
+
+ def testSub(self):
+ assert univ.Integer(3) - 6 == -3, '__sub__() fails'
+
+ def testRsub(self):
+ assert 6 - univ.Integer(3) == 3, '__rsub__() fails'
+
+ def testMul(self):
+ assert univ.Integer(3) * -3 == -9, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * univ.Integer(3) == 6, '__rmul__() fails'
+
+ def testDivInt(self):
+ assert univ.Integer(4) / 2 == 2, '__div__() fails'
+
+ if sys.version_info[0] > 2:
+ def testDivFloat(self):
+ assert univ.Integer(3) / 2 == 1.5, '__div__() fails'
+
+ def testRdivFloat(self):
+ assert 3 / univ.Integer(2) == 1.5, '__rdiv__() fails'
+ else:
+ def testDivFloat(self):
+ assert univ.Integer(3) / 2 == 1, '__div__() fails'
+
+ def testRdivFloat(self):
+ assert 3 / univ.Integer(2) == 1, '__rdiv__() fails'
+
+ def testRdivInt(self):
+ assert 6 / univ.Integer(3) == 2, '__rdiv__() fails'
+
+ if sys.version_info[0] > 2:
+ def testTrueDiv(self):
+ assert univ.Integer(3) / univ.Integer(2) == 1.5, '__truediv__() fails'
+
+ def testFloorDiv(self):
+ assert univ.Integer(3) // univ.Integer(2) == 1, '__floordiv__() fails'
+
+ def testMod(self):
+ assert univ.Integer(3) % 2 == 1, '__mod__() fails'
+
+ def testRmod(self):
+ assert 4 % univ.Integer(3) == 1, '__rmod__() fails'
+
+ def testPow(self):
+ assert univ.Integer(3) ** 2 == 9, '__pow__() fails'
+
+ def testRpow(self):
+ assert 2 ** univ.Integer(2) == 4, '__rpow__() fails'
+
+ def testLshift(self):
+ assert univ.Integer(1) << 1 == 2, '<< fails'
+
+ def testRshift(self):
+ assert univ.Integer(2) >> 1 == 1, '>> fails'
+
+ def testInt(self):
+ assert int(univ.Integer(3)) == 3, '__int__() fails'
+
+ def testLong(self):
+ assert int(univ.Integer(8)) == 8, '__long__() fails'
+
+ def testFloat(self):
+ assert float(univ.Integer(4)) == 4.0, '__float__() fails'
+
+ def testPos(self):
+ assert +univ.Integer(1) == 1, '__pos__() fails'
+
+ def testNeg(self):
+ assert -univ.Integer(1) == -1, '__neg__() fails'
+
+ def testInvert(self):
+ assert ~univ.Integer(1) == -2, '__invert__() fails'
+
+ def testRound(self):
+ assert round(univ.Integer(1), 3) == 1.0, '__round__() fails'
+
+ def testFloor(self):
+ assert math.floor(univ.Integer(1)) == 1, '__floor__() fails'
+
+ def testCeil(self):
+ assert math.ceil(univ.Integer(1)) == 1, '__ceil__() fails'
+
+ def testTrunc(self):
+ assert math.trunc(univ.Integer(1)) == 1, '__trunc__() fails'
+
+ def testPrettyIn(self):
+ assert univ.Integer('3') == 3, 'prettyIn() fails'
+
+ def testTag(self):
+ assert univ.Integer().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
+ )
+
+ def testNamedVals(self):
+
+ class Integer(univ.Integer):
+ namedValues = univ.Integer.namedValues.clone(('asn1', 1))
+
+ assert Integer('asn1') == 1, 'named val fails'
+ assert int(Integer('asn1')) == 1, 'named val fails'
+ assert str(Integer('asn1')) == 'asn1', 'named val __str__() fails'
+
+ def testSubtype(self):
+ assert univ.Integer().subtype(
+ value=1,
+ implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 2),
+ subtypeSpec=constraint.SingleValueConstraint(1, 3)
+ ) == univ.Integer(
+ value=1,
+ tagSet=tag.TagSet(tag.Tag(tag.tagClassPrivate,
+ tag.tagFormatSimple, 2)),
+ subtypeSpec=constraint.ConstraintsIntersection(constraint.SingleValueConstraint(1, 3))
+ )
+
+
+class IntegerPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Integer()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Integer
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Integer(-123)
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == -123
+
+
+class BooleanTestCase(BaseTestCase):
+ def testTruth(self):
+ assert univ.Boolean(True) and univ.Boolean(1), 'Truth initializer fails'
+
+ def testFalse(self):
+ assert not univ.Boolean(False) and not univ.Boolean(0), 'False initializer fails'
+
+ def testStr(self):
+ assert str(univ.Boolean(1)) == 'True', 'str() fails'
+
+ def testInt(self):
+ assert int(univ.Boolean(1)) == 1, 'int() fails'
+
+ def testRepr(self):
+ assert 'Boolean' in repr(univ.Boolean(1))
+
+ def testTag(self):
+ assert univ.Boolean().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01)
+ )
+
+ def testConstraints(self):
+
+ class Boolean(univ.Boolean):
+ pass
+
+ try:
+ Boolean(2)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint fail'
+
+
+class BooleanPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Boolean()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Boolean
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Boolean(True)
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == True
+
+
+class BitStringTestCase(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.b = univ.BitString(
+ namedValues=namedval.NamedValues(('Active', 0), ('Urgent', 1))
+ )
+
+ def testBinDefault(self):
+
+ class BinDefault(univ.BitString):
+ defaultBinValue = '1010100110001010'
+
+ assert BinDefault() == univ.BitString(binValue='1010100110001010')
+
+ def testHexDefault(self):
+
+ class HexDefault(univ.BitString):
+ defaultHexValue = 'A98A'
+
+ assert HexDefault() == univ.BitString(hexValue='A98A')
+
+ def testSet(self):
+ assert self.b.clone('Active') == (1,)
+ assert self.b.clone('Urgent') == (0, 1)
+ assert self.b.clone('Urgent, Active') == (1, 1)
+ assert self.b.clone("'1010100110001010'B") == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone("'A98A'H") == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone(binValue='1010100110001010') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone(hexValue='A98A') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone('1010100110001010') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
+ assert self.b.clone((1, 0, 1)) == (1, 0, 1)
+
+ def testStr(self):
+ assert str(self.b.clone('Urgent')) == '01'
+
+ def testRepr(self):
+ assert 'BitString' in repr(self.b.clone('Urgent,Active'))
+
+ def testTag(self):
+ assert univ.BitString().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
+ )
+
+ def testLen(self):
+ assert len(self.b.clone("'A98A'H")) == 16
+
+ def testGetItem(self):
+ assert self.b.clone("'A98A'H")[0] == 1
+ assert self.b.clone("'A98A'H")[1] == 0
+ assert self.b.clone("'A98A'H")[2] == 1
+
+ def testReverse(self):
+ assert list(reversed(univ.BitString([0, 0, 1]))) == list(univ.BitString([1, 0, 0]))
+
+ def testAsOctets(self):
+ assert self.b.clone(hexValue='A98A').asOctets() == ints2octs((0xa9, 0x8a)), 'testAsOctets() fails'
+
+ def testAsInts(self):
+ assert self.b.clone(hexValue='A98A').asNumbers() == (0xa9, 0x8a), 'testAsNumbers() fails'
+
+ def testMultipleOfEightPadding(self):
+ assert self.b.clone((1, 0, 1)).asNumbers() == (5,)
+
+ def testAsInteger(self):
+ assert self.b.clone('11000000011001').asInteger() == 12313
+ assert self.b.clone('1100110011011111').asInteger() == 52447
+
+ def testStaticDef(self):
+
+ class BitString(univ.BitString):
+ pass
+
+ assert BitString('11000000011001').asInteger() == 12313
+
+
+class BitStringPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.BitString()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.BitString
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.BitString((1, 0, 1, 0))
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == (1, 0, 1, 0)
+
+
+class OctetStringWithUnicodeMixIn(object):
+
+ initializer = ()
+ encoding = 'us-ascii'
+
+ def setUp(self):
+ self.pythonString = ints2octs(self.initializer).decode(self.encoding)
+ self.encodedPythonString = self.pythonString.encode(self.encoding)
+ self.numbersString = tuple(octs2ints(self.encodedPythonString))
+
+ def testInit(self):
+ assert univ.OctetString(self.encodedPythonString) == self.encodedPythonString, '__init__() fails'
+
+ def testInitFromAsn1(self):
+ assert univ.OctetString(univ.OctetString(self.encodedPythonString)) == self.encodedPythonString
+ assert univ.OctetString(univ.Integer(123)) == univ.OctetString('123')
+
+ def testSerialised(self):
+ if sys.version_info[0] < 3:
+ assert str(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
+ else:
+ assert bytes(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
+
+ def testPrintable(self):
+ if sys.version_info[0] < 3:
+ assert str(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
+ assert unicode(univ.OctetString(self.pythonString, encoding=self.encoding)) == self.pythonString, 'unicode init fails'
+ else:
+ assert str(univ.OctetString(self.pythonString, encoding=self.encoding)) == self.pythonString, 'unicode init fails'
+
+ def testSeq(self):
+ assert univ.OctetString(self.encodedPythonString)[0] == self.encodedPythonString[0], '__getitem__() fails'
+
+ def testRepr(self):
+ assert 'abc' in repr(univ.OctetString('abc'))
+
+ def testAsOctets(self):
+ assert univ.OctetString(self.encodedPythonString).asOctets() == self.encodedPythonString, 'testAsOctets() fails'
+
+ def testAsInts(self):
+ assert univ.OctetString(self.encodedPythonString).asNumbers() == self.numbersString, 'testAsNumbers() fails'
+
+ def testAdd(self):
+ assert univ.OctetString(self.encodedPythonString) + self.encodedPythonString == self.encodedPythonString + self.encodedPythonString, '__add__() fails'
+
+ def testRadd(self):
+ assert self.encodedPythonString + univ.OctetString(self.encodedPythonString) == self.encodedPythonString + self.encodedPythonString, '__radd__() fails'
+
+ def testMul(self):
+ assert univ.OctetString(self.encodedPythonString) * 2 == self.encodedPythonString * 2, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * univ.OctetString(self.encodedPythonString) == 2 * self.encodedPythonString, '__rmul__() fails'
+
+ def testContains(self):
+ s = univ.OctetString(self.encodedPythonString)
+ assert self.encodedPythonString in s
+ assert self.encodedPythonString * 2 not in s
+
+ def testReverse(self):
+ assert list(reversed(univ.OctetString(self.encodedPythonString))) == list(reversed(self.encodedPythonString))
+
+
+class OctetStringWithAsciiTestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (97, 102)
+ encoding = 'us-ascii'
+
+
+class OctetStringUnicodeErrorTestCase(BaseTestCase):
+ def testEncodeError(self):
+ serialized = ints2octs((0xff, 0xfe))
+
+ if sys.version_info < (3, 0):
+ text = serialized.decode('iso-8859-1')
+
+ else:
+ text = octs2str(serialized)
+
+ try:
+ univ.OctetString(text, encoding='us-ascii')
+
+ except PyAsn1UnicodeEncodeError:
+ pass
+
+ def testDecodeError(self):
+ serialized = ints2octs((0xff, 0xfe))
+
+ octetString = univ.OctetString(serialized, encoding='us-ascii')
+
+ try:
+ if sys.version_info < (3, 0):
+ unicode(octetString)
+
+ else:
+ str(octetString)
+
+ except PyAsn1UnicodeDecodeError:
+ pass
+
+
+class OctetStringWithUtf8TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (208, 176, 208, 177, 208, 178)
+ encoding = 'utf-8'
+
+
+class OctetStringWithUtf16TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (4, 48, 4, 49, 4, 50)
+ encoding = 'utf-16-be'
+
+
+if sys.version_info[0] > 2:
+
+ # Somehow comparison of UTF-32 encoded strings does not work in Py2
+
+ class OctetStringWithUtf32TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
+ initializer = (0, 0, 4, 48, 0, 0, 4, 49, 0, 0, 4, 50)
+ encoding = 'utf-32-be'
+
+
+class OctetStringTestCase(BaseTestCase):
+
+ def testBinDefault(self):
+
+ class BinDefault(univ.OctetString):
+ defaultBinValue = '1000010111101110101111000000111011'
+
+ assert BinDefault() == univ.OctetString(binValue='1000010111101110101111000000111011')
+
+ def testHexDefault(self):
+
+ class HexDefault(univ.OctetString):
+ defaultHexValue = 'FA9823C43E43510DE3422'
+
+ assert HexDefault() == univ.OctetString(hexValue='FA9823C43E43510DE3422')
+
+ def testBinStr(self):
+ assert univ.OctetString(binValue="1000010111101110101111000000111011") == ints2octs((133, 238, 188, 14, 192)), 'bin init fails'
+
+ def testHexStr(self):
+ assert univ.OctetString(hexValue="FA9823C43E43510DE3422") == ints2octs((250, 152, 35, 196, 62, 67, 81, 13, 227, 66, 32)), 'hex init fails'
+
+ def testTuple(self):
+ assert univ.OctetString((1, 2, 3, 4, 5)) == ints2octs((1, 2, 3, 4, 5)), 'tuple init failed'
+
+ def testRepr(self):
+ assert 'abc' in repr(univ.OctetString('abc'))
+
+ def testEmpty(self):
+ try:
+ str(univ.OctetString())
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'empty OctetString() not reported'
+
+ def testTag(self):
+ assert univ.OctetString().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
+ )
+
+ def testStaticDef(self):
+
+ class OctetString(univ.OctetString):
+ pass
+
+ assert OctetString(hexValue="FA9823C43E43510DE3422") == ints2octs((250, 152, 35, 196, 62, 67, 81, 13, 227, 66, 32))
+
+
+class OctetStringPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.BitString()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.BitString
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.BitString((1, 0, 1, 0))
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == (1, 0, 1, 0)
+
+
+class Null(BaseTestCase):
+
+ def testInit(self):
+ assert not univ.Null().isValue
+ assert univ.Null(0) == str2octs('')
+ assert univ.Null(False) == str2octs('')
+ assert univ.Null('') == str2octs('')
+ assert univ.Null(None) == str2octs('')
+
+ try:
+ assert univ.Null(True)
+
+ except PyAsn1Error:
+ pass
+
+ try:
+ assert univ.Null('xxx')
+
+ except PyAsn1Error:
+ pass
+
+ def testStr(self):
+ assert str(univ.Null('')) == '', 'str() fails'
+
+ def testRepr(self):
+ assert 'Null' in repr(univ.Null(''))
+
+ def testTag(self):
+ assert univ.Null().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
+ )
+
+ def testConstraints(self):
+ try:
+ univ.Null(2)
+ except error.ValueConstraintError:
+ pass
+ else:
+ assert 0, 'constraint fail'
+
+ def testStaticDef(self):
+
+ class Null(univ.Null):
+ pass
+
+ assert not Null('')
+
+
+class NullPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Null()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Null
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Null('')
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert not new_asn1
+
+
+class RealTestCase(BaseTestCase):
+ def testFloat4BinEnc(self):
+ assert univ.Real((0.25, 2, 3)) == 2.0, 'float initializer for binary encoding fails'
+
+ def testStr(self):
+ assert str(univ.Real(1.0)) == '1.0', 'str() fails'
+
+ def testRepr(self):
+ assert 'Real' in repr(univ.Real(-4.1))
+ assert 'Real' in repr(univ.Real(-4.1))
+ assert 'inf' in repr(univ.Real('inf'))
+ assert '-inf' in repr(univ.Real('-inf'))
+
+ def testAdd(self):
+ assert univ.Real(-4.1) + 1.4 == -2.7, '__add__() fails'
+
+ def testRadd(self):
+ assert 4 + univ.Real(0.5) == 4.5, '__radd__() fails'
+
+ def testSub(self):
+ assert univ.Real(3.9) - 1.7 == 2.2, '__sub__() fails'
+
+ def testRsub(self):
+ assert 6.1 - univ.Real(0.1) == 6, '__rsub__() fails'
+
+ def testMul(self):
+ assert univ.Real(3.0) * -3 == -9, '__mul__() fails'
+
+ def testRmul(self):
+ assert 2 * univ.Real(3.0) == 6, '__rmul__() fails'
+
+ def testDiv(self):
+ assert univ.Real(3.0) / 2 == 1.5, '__div__() fails'
+
+ def testRdiv(self):
+ assert 6 / univ.Real(3.0) == 2, '__rdiv__() fails'
+
+ def testMod(self):
+ assert univ.Real(3.0) % 2 == 1, '__mod__() fails'
+
+ def testRmod(self):
+ assert 4 % univ.Real(3.0) == 1, '__rmod__() fails'
+
+ def testPow(self):
+ assert univ.Real(3.0) ** 2 == 9, '__pow__() fails'
+
+ def testRpow(self):
+ assert 2 ** univ.Real(2.0) == 4, '__rpow__() fails'
+
+ def testInt(self):
+ assert int(univ.Real(3.0)) == 3, '__int__() fails'
+
+ def testLong(self):
+ assert int(univ.Real(8.0)) == 8, '__long__() fails'
+
+ def testFloat(self):
+ assert float(univ.Real(4.0)) == 4.0, '__float__() fails'
+
+ def testPrettyIn(self):
+ assert univ.Real((3, 10, 0)) == 3, 'prettyIn() fails'
+
+ # infinite float values
+ def testStrInf(self):
+ assert str(univ.Real('inf')) == 'inf', 'str() fails'
+
+ def testAddInf(self):
+ assert univ.Real('inf') + 1 == float('inf'), '__add__() fails'
+
+ def testRaddInf(self):
+ assert 1 + univ.Real('inf') == float('inf'), '__radd__() fails'
+
+ def testIntInf(self):
+ try:
+ assert int(univ.Real('inf'))
+ except OverflowError:
+ pass
+ else:
+ assert 0, '__int__() fails'
+
+ def testLongInf(self):
+ try:
+ assert int(univ.Real('inf'))
+ except OverflowError:
+ pass
+ else:
+ assert 0, '__long__() fails'
+ assert int(univ.Real(8.0)) == 8, '__long__() fails'
+
+ def testFloatInf(self):
+ assert float(univ.Real('-inf')) == float('-inf'), '__float__() fails'
+
+ def testPrettyInInf(self):
+ assert univ.Real(float('inf')) == float('inf'), 'prettyIn() fails'
+
+ def testPlusInf(self):
+ assert univ.Real('inf').isPlusInf, 'isPlusInfinity failed'
+
+ def testMinusInf(self):
+ assert univ.Real('-inf').isMinusInf, 'isMinusInfinity failed'
+
+ def testPos(self):
+ assert +univ.Real(1.0) == 1.0, '__pos__() fails'
+
+ def testNeg(self):
+ assert -univ.Real(1.0) == -1.0, '__neg__() fails'
+
+ def testRound(self):
+ assert round(univ.Real(1.123), 2) == 1.12, '__round__() fails'
+
+ def testFloor(self):
+ assert math.floor(univ.Real(1.6)) == 1.0, '__floor__() fails'
+
+ def testCeil(self):
+ assert math.ceil(univ.Real(1.2)) == 2.0, '__ceil__() fails'
+
+ def testTrunc(self):
+ assert math.trunc(univ.Real(1.1)) == 1.0, '__trunc__() fails'
+
+ def testTag(self):
+ assert univ.Real().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
+ )
+
+ def testStaticDef(self):
+
+ class Real(univ.Real):
+ pass
+
+ assert Real(1.0) == 1.0
+
+
+class RealPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Real()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Real
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Real((1, 10, 3))
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == 1000
+
+
+class ObjectIdentifier(BaseTestCase):
+ def testStr(self):
+ assert str(univ.ObjectIdentifier((1, 3, 6))) == '1.3.6', 'str() fails'
+
+ def testRepr(self):
+ assert '1.3.6' in repr(univ.ObjectIdentifier('1.3.6'))
+
+ def testEq(self):
+ assert univ.ObjectIdentifier((1, 3, 6)) == (1, 3, 6), '__cmp__() fails'
+
+ def testAdd(self):
+ assert univ.ObjectIdentifier((1, 3)) + (6,) == (1, 3, 6), '__add__() fails'
+
+ def testRadd(self):
+ assert (1,) + univ.ObjectIdentifier((3, 6)) == (1, 3, 6), '__radd__() fails'
+
+ def testLen(self):
+ assert len(univ.ObjectIdentifier((1, 3))) == 2, '__len__() fails'
+
+ def testPrefix(self):
+ o = univ.ObjectIdentifier('1.3.6')
+ assert o.isPrefixOf((1, 3, 6)), 'isPrefixOf() fails'
+ assert o.isPrefixOf((1, 3, 6, 1)), 'isPrefixOf() fails'
+ assert not o.isPrefixOf((1, 3)), 'isPrefixOf() fails'
+
+ def testInput1(self):
+ assert univ.ObjectIdentifier('1.3.6') == (1, 3, 6), 'prettyIn() fails'
+
+ def testInput2(self):
+ assert univ.ObjectIdentifier((1, 3, 6)) == (1, 3, 6), 'prettyIn() fails'
+
+ def testInput3(self):
+ assert univ.ObjectIdentifier(univ.ObjectIdentifier('1.3') + (6,)) == (1, 3, 6), 'prettyIn() fails'
+
+ def testUnicode(self):
+ s = '1.3.6'
+ if sys.version_info[0] < 3:
+ s = s.decode()
+ assert univ.ObjectIdentifier(s) == (1, 3, 6), 'unicode init fails'
+
+ def testTag(self):
+ assert univ.ObjectIdentifier().tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
+ )
+
+ def testContains(self):
+ s = univ.ObjectIdentifier('1.3.6.1234.99999')
+ assert 1234 in s
+ assert 4321 not in s
+
+ def testStaticDef(self):
+
+ class ObjectIdentifier(univ.ObjectIdentifier):
+ pass
+
+ assert str(ObjectIdentifier((1, 3, 6))) == '1.3.6'
+
+
+class ObjectIdentifierPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.ObjectIdentifier()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.ObjectIdentifier
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.ObjectIdentifier('2.3.1.1.2')
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == (2, 3, 1, 1, 2)
+
+
+class SequenceOf(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s1 = univ.SequenceOf(
+ componentType=univ.OctetString('')
+ )
+ self.s2 = self.s1.clone()
+
+ def testRepr(self):
+ assert 'a' in repr(self.s1.clone().setComponents('a', 'b'))
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ ), 'wrong tagSet'
+
+ def testSeq(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert self.s1[0] == str2octs('abc'), 'set by idx fails'
+ self.s1[0] = 'cba'
+ assert self.s1[0] == str2octs('cba'), 'set by idx fails'
+
+ def testCmp(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, 'abc')
+ self.s2.clear()
+ self.s2.setComponentByPosition(0, univ.OctetString('abc'))
+ assert self.s1 == self.s2, '__cmp__() fails'
+
+ def testSubtypeSpec(self):
+ s = self.s1.clone(
+ componentType=univ.OctetString().subtype(
+ subtypeSpec=constraint.SingleValueConstraint(str2octs('abc'))))
+ try:
+ s.setComponentByPosition(
+ 0, univ.OctetString().subtype(
+ 'abc', subtypeSpec=constraint.SingleValueConstraint(str2octs('abc'))))
+ except PyAsn1Error:
+ assert 0, 'constraint fails'
+ try:
+ s.setComponentByPosition(1, univ.OctetString('Abc'))
+ except PyAsn1Error:
+ try:
+ s.setComponentByPosition(1, univ.OctetString('Abc'),
+ verifyConstraints=False)
+ except PyAsn1Error:
+ assert 0, 'constraint fails with verifyConstraints=False'
+ else:
+ assert 0, 'constraint fails'
+
+ def testComponentTagsMatching(self):
+ s = self.s1.clone()
+ s.strictConstraints = True # This requires types equality
+ o = univ.OctetString('abc').subtype(explicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 12))
+ try:
+ s.setComponentByPosition(0, o)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype tag allowed'
+
+ def testComponentConstraintsMatching(self):
+ s = self.s1.clone()
+ o = univ.OctetString().subtype(
+ subtypeSpec=constraint.ConstraintsUnion(constraint.SingleValueConstraint(str2octs('cba'))))
+ s.strictConstraints = True # This requires types equality
+ try:
+ s.setComponentByPosition(0, o.clone('cba'))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype constraint allowed'
+ s.strictConstraints = False # This requires subtype relationships
+ try:
+ s.setComponentByPosition(0, o.clone('cba'))
+ except PyAsn1Error:
+ assert 0, 'inner supertype constraint disallowed'
+ else:
+ pass
+
+ def testConsistency(self):
+ s = self.s1.clone(subtypeSpec=constraint.ConstraintsUnion(
+ constraint.ValueSizeConstraint(1, 1)
+ ))
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ assert not s.isInconsistent, 'size spec fails'
+ s.setComponentByPosition(1, univ.OctetString('abc'))
+ assert s.isInconsistent, 'size spec fails'
+
+ def testGetComponentTagMap(self):
+ assert self.s1.componentType.tagMap.presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString('')
+ }
+
+ def testSubtype(self):
+ subtype = self.s1.subtype(
+ implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 2),
+ subtypeSpec=constraint.ValueSizeConstraint(0, 1)
+ )
+ subtype.clear()
+ clone = self.s1.clone(
+ tagSet=tag.TagSet(tag.Tag(tag.tagClassPrivate,
+ tag.tagFormatSimple, 2)),
+ subtypeSpec=constraint.ValueSizeConstraint(0, 1)
+ )
+ clone.clear()
+ assert clone == subtype
+
+ def testClone(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ s = self.s1.clone()
+ s.clear()
+ assert len(s) == 0
+ s = self.s1.clone(cloneValueFlag=1)
+ assert len(s) == 1
+ assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
+
+ def testSetComponents(self):
+ assert self.s1.clone().setComponents('abc', 'def') == \
+ self.s1.setComponentByPosition(0, 'abc').setComponentByPosition(1, 'def')
+
+ def testGetItem(self):
+ s = self.s1.clone()
+ s.append('xxx')
+ assert s[0]
+
+ # this is a deviation from standard sequence protocol
+ assert not s[2]
+
+ def testGetItemSlice(self):
+ s = self.s1.clone()
+ s.extend(['xxx', 'yyy', 'zzz'])
+ assert s[:1] == [str2octs('xxx')]
+ assert s[-2:] == [str2octs('yyy'), str2octs('zzz')]
+ assert s[1:2] == [str2octs('yyy')]
+
+ def testSetItem(self):
+ s = self.s1.clone()
+ s.append('xxx')
+ s[2] = 'yyy'
+ assert len(s) == 3
+ assert s[1] == str2octs('')
+
+ def testSetItemSlice(self):
+ s = self.s1.clone()
+ s[:1] = ['xxx']
+ assert s == [str2octs('xxx')]
+ s[-2:] = ['yyy', 'zzz']
+ assert s == [str2octs('yyy'), str2octs('zzz')]
+ s[1:2] = ['yyy']
+ assert s == [str2octs('yyy'), str2octs('yyy')]
+ assert len(s) == 2
+
+ def testAppend(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert len(self.s1) == 1
+ self.s1.append('def')
+ assert len(self.s1) == 2
+ assert list(self.s1) == [str2octs(x) for x in ['abc', 'def']]
+
+ def testExtend(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert len(self.s1) == 1
+ self.s1.extend(['def', 'ghi'])
+ assert len(self.s1) == 3
+ assert list(self.s1) == [str2octs(x) for x in ['abc', 'def', 'ghi']]
+
+ def testCount(self):
+ self.s1.clear()
+ for x in ['abc', 'def', 'abc']:
+ self.s1.append(x)
+ assert self.s1.count(str2octs('abc')) == 2
+ assert self.s1.count(str2octs('def')) == 1
+ assert self.s1.count(str2octs('ghi')) == 0
+
+ def testIndex(self):
+ self.s1.clear()
+ for x in ['abc', 'def', 'abc']:
+ self.s1.append(x)
+ assert self.s1.index(str2octs('abc')) == 0
+ assert self.s1.index(str2octs('def')) == 1
+ assert self.s1.index(str2octs('abc'), 1) == 2
+
+ def testSort(self):
+ self.s1.clear()
+ self.s1[0] = 'b'
+ self.s1[1] = 'a'
+ assert list(self.s1) == [str2octs('b'), str2octs('a')]
+ self.s1.sort()
+ assert list(self.s1) == [str2octs('a'), str2octs('b')]
+
+ def testStaticDef(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString('')
+
+ s = SequenceOf()
+ s[0] = 'abc'
+ assert len(s) == 1
+ assert s == [str2octs('abc')]
+
+ def testUntyped(self):
+ n = univ.SequenceOf()
+
+ assert not n.isValue
+
+ n[0] = univ.OctetString('fox')
+
+ assert n.isValue
+
+ def testLegacyInitializer(self):
+ n = univ.SequenceOf(
+ componentType=univ.OctetString()
+ )
+ o = univ.SequenceOf(
+ univ.OctetString() # this is the old way
+ )
+
+ assert n.isSameTypeWith(o) and o.isSameTypeWith(n)
+
+ n[0] = 'fox'
+ o[0] = 'fox'
+
+ assert n == o
+
+ def testGetComponentWithDefault(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ assert s.getComponentByPosition(0, default=None, instantiate=False) is None
+ assert s.getComponentByPosition(0, default=None) is None
+ s[0] = 'test'
+ assert s.getComponentByPosition(0, default=None) is not None
+ assert s.getComponentByPosition(0, default=None) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(0, default=None) is None
+
+ def testGetComponentNoInstantiation(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
+ s[0] = 'test'
+ assert s.getComponentByPosition(0, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(0, instantiate=False) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
+
+ def testClear(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ s.setComponentByPosition(0, 'test')
+
+ assert s.getComponentByPosition(0) == str2octs('test')
+ assert len(s) == 1
+ assert s.isValue
+
+ s.clear()
+
+ assert len(s) == 0
+ assert s == []
+ assert s.isValue
+
+ def testReset(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+
+ s = SequenceOf()
+ s.setComponentByPosition(0, 'test')
+
+ assert s.getComponentByPosition(0) == str2octs('test')
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testIsInconsistentSizeConstraint(self):
+
+ class SequenceOf(univ.SequenceOf):
+ componentType = univ.OctetString()
+ subtypeSpec = constraint.ValueSizeConstraint(0, 1)
+
+ s = SequenceOf()
+
+ assert s.isInconsistent
+
+ s[0] = 'test'
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+ s[1] = 'test'
+
+ assert s.isInconsistent
+
+ s.clear()
+
+ assert not s.isInconsistent
+
+ s.reset()
+
+ assert s.isInconsistent
+
+ s[1] = 'test'
+
+ assert not s.isInconsistent
+
+
+class SequenceOfPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.SequenceOf(componentType=univ.OctetString())
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.SequenceOf
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.SequenceOf(componentType=univ.OctetString())
+ old_asn1[0] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1 == [str2octs('test')]
+
+
+class Sequence(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+ )
+
+ def testRepr(self):
+ assert 'name' in repr(self.s1.clone().setComponents('a', 'b'))
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ ), 'wrong tagSet'
+
+ def testById(self):
+ self.s1.setComponentByName('name', univ.OctetString('abc'))
+ assert self.s1.getComponentByName('name') == str2octs('abc'), 'set by name fails'
+
+ def testByKey(self):
+ self.s1['name'] = 'abc'
+ assert self.s1['name'] == str2octs('abc'), 'set by key fails'
+
+ def testContains(self):
+ assert 'name' in self.s1
+ assert '<missing>' not in self.s1
+
+ def testGetNearPosition(self):
+ assert self.s1.componentType.getTagMapNearPosition(1).presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString(''),
+ univ.Integer.tagSet: univ.Integer(34)
+ }
+ assert self.s1.componentType.getPositionNearType(
+ univ.OctetString.tagSet, 1
+ ) == 1
+
+ def testSetDefaultComponents(self):
+ self.s1.clear()
+ self.s1.setComponentByPosition(0, univ.OctetString('Ping'))
+ self.s1.setComponentByPosition(1, univ.OctetString('Pong'))
+ assert self.s1.getComponentByPosition(2) == 34
+
+ def testClone(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ s = self.s1.clone()
+ assert s.getComponentByPosition(0) != self.s1.getComponentByPosition(0)
+ assert s.getComponentByPosition(1) != self.s1.getComponentByPosition(1)
+ assert s.getComponentByPosition(2) != self.s1.getComponentByPosition(2)
+ s = self.s1.clone(cloneValueFlag=1)
+ assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
+ assert s.getComponentByPosition(1) == self.s1.getComponentByPosition(1)
+ assert s.getComponentByPosition(2) == self.s1.getComponentByPosition(2)
+
+ def testComponentTagsMatching(self):
+ s = self.s1.clone()
+ s.strictConstraints = True # This requires types equality
+ o = univ.OctetString('abc').subtype(explicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 12))
+ try:
+ s.setComponentByName('name', o)
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype tag allowed'
+
+ def testComponentConstraintsMatching(self):
+ s = self.s1.clone()
+ o = univ.OctetString().subtype(
+ subtypeSpec=constraint.ConstraintsUnion(constraint.SingleValueConstraint(str2octs('cba'))))
+ s.strictConstraints = True # This requires types equality
+ try:
+ s.setComponentByName('name', o.clone('cba'))
+ except PyAsn1Error:
+ pass
+ else:
+ assert 0, 'inner supertype constraint allowed'
+ s.strictConstraints = False # This requires subtype relationships
+ try:
+ s.setComponentByName('name', o.clone('cba'))
+ except PyAsn1Error:
+ assert 0, 'inner supertype constraint disallowed'
+ else:
+ pass
+
+ def testSetComponents(self):
+ assert self.s1.clone().setComponents(name='a', nick='b', age=1) == \
+ self.s1.setComponentByPosition(0, 'a').setComponentByPosition(1, 'b').setComponentByPosition(2, 1)
+
+ def testSetToDefault(self):
+ s = self.s1.clone()
+ s.setComponentByPosition(0, univ.noValue)
+ s[2] = univ.noValue
+ assert s[0] == univ.OctetString('')
+ assert s[2] == univ.Integer(34)
+
+ def testGetItem(self):
+ s = self.s1.clone()
+ s['name'] = 'xxx'
+ assert s['name']
+ assert s[0]
+
+ try:
+ s['xxx']
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ try:
+ s[100]
+
+ except IndexError:
+ pass
+
+ else:
+ assert False, 'IndexError not raised'
+
+ def testSetItem(self):
+ s = self.s1.clone()
+ s['name'] = 'xxx'
+
+ try:
+
+ s['xxx'] = 'xxx'
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ try:
+
+ s[100] = 'xxx'
+
+ except IndexError:
+ pass
+
+ else:
+ assert False, 'IndexError not raised'
+
+ def testIter(self):
+ assert list(self.s1) == ['name', 'nick', 'age']
+
+ def testKeys(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ assert list(self.s1.keys()) == ['name', 'nick', 'age']
+
+ def testValues(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ assert list(self.s1.values()) == [str2octs('abc'), str2octs('def'), 123]
+
+ def testItems(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ self.s1.setComponentByPosition(1, univ.OctetString('def'))
+ self.s1.setComponentByPosition(2, univ.Integer(123))
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'abc'), ('nick', 'def')]] + [('age', 123)]
+
+ def testUpdate(self):
+ self.s1.clear()
+ assert list(self.s1.values()) == [str2octs(''), str2octs(''), 34]
+ self.s1.update(**{'name': 'abc', 'nick': 'def', 'age': 123})
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'abc'), ('nick', 'def')]] + [('age', 123)]
+ self.s1.update(('name', 'ABC'))
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'ABC'), ('nick', 'def')]] + [('age', 123)]
+ self.s1.update(name='CBA')
+ assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'CBA'), ('nick', 'def')]] + [('age', 123)]
+
+ def testStaticDef(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+
+ s = Sequence()
+ s['name'] = 'abc'
+ assert s['name'] == str2octs('abc')
+
+ def testGetComponentWithDefault(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Sequence()
+
+ assert s[0] == str2octs('')
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByName('nick', default=None) is None
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, default=None) is not None
+ assert s.getComponentByPosition(1, default=None) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, default=None) is None
+
+ def testGetComponentWithConstructedDefault(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.DefaultedNamedType('nick', univ.SequenceOf(
+ componentType=univ.Integer()
+ ).setComponentByPosition(0, 1)),
+ )
+
+ s = Sequence()
+
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByPosition(1) == [1]
+
+ def testGetComponentNoInstantiation(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Sequence()
+ assert s[0] == str2octs('')
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByName('nick', instantiate=False) is univ.noValue
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+
+ def testSchemaWithComponents(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+
+ s = Sequence()
+
+ assert not s.isValue
+
+ s[0] = 'test'
+
+ assert s.isValue
+
+ s.clear()
+
+ assert not s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testSchemaWithOptionalComponents(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('name', univ.OctetString())
+ )
+
+ s = Sequence()
+
+ assert s.isValue
+
+ s[0] = 'test'
+
+ assert s.isValue
+
+ s.clear()
+
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testSchemaWithOptionalComponents(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('name', univ.OctetString(''))
+ )
+
+ s = Sequence()
+
+ assert s.isValue
+
+ s[0] = 'test'
+
+ assert s.isValue
+
+ s.clear()
+
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+ def testIsInconsistentWithComponentsConstraint(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(65))
+ )
+ subtypeSpec = constraint.WithComponentsConstraint(
+ ('name', constraint.ComponentPresentConstraint()),
+ ('age', constraint.ComponentAbsentConstraint())
+ )
+
+ s = Sequence()
+
+ assert s.isInconsistent
+
+ s[0] = 'test'
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+ s[1] = 23
+
+ assert s.isInconsistent
+
+ s.clear()
+
+ assert s.isInconsistent
+
+ s.reset()
+
+ assert s.isInconsistent
+
+ s[1] = 23
+
+ assert s.isInconsistent
+
+ def testIsInconsistentSizeConstraint(self):
+
+ class Sequence(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('name', univ.OctetString()),
+ namedtype.DefaultedNamedType('age', univ.Integer(65))
+ )
+ subtypeSpec = constraint.ValueSizeConstraint(0, 1)
+
+ s = Sequence()
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+
+ assert not s.isInconsistent
+
+ s[0] = 'test'
+ s[1] = 23
+
+ assert s.isInconsistent
+
+ s.clear()
+
+ assert not s.isInconsistent
+
+ s.reset()
+
+ assert s.isInconsistent
+
+ s[1] = 23
+
+ assert not s.isInconsistent
+
+
+class SequenceWithoutSchema(BaseTestCase):
+
+ def testGetItem(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s[0] = 'abc'
+ assert s['field-0']
+ assert s[0]
+
+ try:
+ s['field-1']
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ def testSetItem(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s['field-0'] = 'xxx'
+
+ try:
+
+ s['field-1'] = 'xxx'
+
+ except KeyError:
+ pass
+
+ else:
+ assert False, 'KeyError not raised'
+
+ def testIter(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s) == ['field-0', 'field-1']
+
+ def testKeys(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s.keys()) == ['field-0', 'field-1']
+
+ def testValues(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s.values()) == [str2octs('abc'), 123]
+
+ def testItems(self):
+ s = univ.Sequence()
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert list(s.items()) == [('field-0', str2octs('abc')), ('field-1', 123)]
+
+ def testUpdate(self):
+ s = univ.Sequence().clear()
+ assert not s
+ s.setComponentByPosition(0, univ.OctetString('abc'))
+ s.setComponentByPosition(1, univ.Integer(123))
+ assert s
+ assert list(s.keys()) == ['field-0', 'field-1']
+ assert list(s.values()) == [str2octs('abc'), 123]
+ assert list(s.items()) == [('field-0', str2octs('abc')), ('field-1', 123)]
+ s['field-0'] = univ.OctetString('def')
+ assert list(s.values()) == [str2octs('def'), 123]
+ s['field-1'] = univ.OctetString('ghi')
+ assert list(s.values()) == [str2octs('def'), str2octs('ghi')]
+ try:
+ s['field-2'] = univ.OctetString('xxx')
+ except KeyError:
+ pass
+ else:
+ assert False, 'unknown field at schema-less object tolerated'
+ assert 'field-0' in s
+ s.clear()
+ assert 'field-0' not in s
+
+ def testSchema(self):
+
+ class Sequence(univ.Sequence):
+ pass
+
+ s = Sequence()
+
+ assert not s.isValue
+
+ s[0] = univ.OctetString('test')
+
+ assert s.isValue
+
+ s.clear()
+
+ assert s.isValue
+
+ s.reset()
+
+ assert not s.isValue
+
+
+class SequencePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Sequence
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ old_asn1['name'] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1['name'] == str2octs('test')
+
+
+class SetOf(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self.s1 = univ.SetOf(componentType=univ.OctetString(''))
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ ), 'wrong tagSet'
+
+ def testSeq(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ assert self.s1[0] == str2octs('abc'), 'set by idx fails'
+ self.s1.setComponentByPosition(0, self.s1[0].clone('cba'))
+ assert self.s1[0] == str2octs('cba'), 'set by idx fails'
+
+ def testStaticDef(self):
+
+ class SetOf(univ.SequenceOf):
+ componentType = univ.OctetString('')
+
+ s = SetOf()
+ s[0] = 'abc'
+ assert len(s) == 1
+ assert s == [str2octs('abc')]
+
+
+
+class SetOfPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.SetOf(componentType=univ.OctetString())
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.SetOf
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.SetOf(componentType=univ.OctetString())
+ old_asn1[0] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1 == [str2octs('test')]
+
+
+class Set(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ self.s1 = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('null', univ.Null('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+ )
+ self.s2 = self.s1.clone()
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(
+ (),
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ ), 'wrong tagSet'
+
+ def testByTypeWithPythonValue(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc'), 'set by name fails'
+
+ def testByTypeWithInstance(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, univ.OctetString('abc'))
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc'), 'set by name fails'
+
+ def testGetTagMap(self):
+ assert self.s1.tagMap.presentTypes == {
+ univ.Set.tagSet: univ.Set().clear()
+ }
+
+ def testGetComponentTagMap(self):
+ assert self.s1.componentType.tagMapUnique.presentTypes == {
+ univ.OctetString.tagSet: univ.OctetString(''),
+ univ.Null.tagSet: univ.Null(''),
+ univ.Integer.tagSet: univ.Integer(34)
+ }
+
+ def testGetPositionByType(self):
+ assert self.s1.componentType.getPositionByType(univ.Null().tagSet) == 1
+
+ def testSetToDefault(self):
+ self.s1.setComponentByName('name', univ.noValue)
+ assert self.s1['name'] == univ.OctetString('')
+
+ def testIter(self):
+ assert list(self.s1) == ['name', 'null', 'age']
+
+ def testStaticDef(self):
+
+ class Set(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString('')),
+ namedtype.OptionalNamedType('nick', univ.OctetString('')),
+ namedtype.DefaultedNamedType('age', univ.Integer(34))
+ )
+
+ s = Set()
+ s['name'] = 'abc'
+ assert s['name'] == str2octs('abc')
+
+ def testGetComponentWithDefault(self):
+
+ class Set(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer(123)),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Set()
+ assert s[0] == 123
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByName('nick', default=None) is None
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, default=None) is not None
+ assert s.getComponentByPosition(1, default=None) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, default=None) is None
+
+ def testGetComponentNoInstantiation(self):
+
+ class Set(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.Integer(123)),
+ namedtype.OptionalNamedType('nick', univ.OctetString()),
+ )
+
+ s = Set()
+ assert s[0] == 123
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByName('nick', instantiate=False) is univ.noValue
+ assert s.getComponentByType(univ.OctetString.tagSet, instantiate=False) is univ.noValue
+ s[1] = 'test'
+ assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) == str2octs('test')
+ s.clear()
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+
+
+class SetPicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Set
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Set(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString())
+ )
+ )
+ old_asn1['name'] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1['name'] == str2octs('test')
+
+
+class Choice(BaseTestCase):
+ def setUp(self):
+ BaseTestCase.setUp(self)
+
+ innerComp = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('count', univ.Integer()),
+ namedtype.NamedType('flag', univ.Boolean())
+ )
+ )
+ self.s1 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('sex', innerComp)
+ )
+ )
+
+ def testTag(self):
+ assert self.s1.tagSet == tag.TagSet(), 'wrong tagSet'
+
+ def testRepr(self):
+ assert 'Choice' in repr(self.s1.clone().setComponents('a'))
+ s = self.s1.clone().setComponents(
+ sex=self.s1.setComponentByPosition(1).getComponentByPosition(1).clone().setComponents(count=univ.Integer(123))
+ )
+ assert 'Choice' in repr(s)
+
+ def testContains(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert 'name' in self.s1
+ assert 'sex' not in self.s1
+
+ self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
+ assert 'name' not in self.s1
+ assert 'sex' in self.s1
+
+ def testIter(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert list(self.s1) == ['name']
+ self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
+ assert list(self.s1) == ['sex']
+
+ def testOuterByTypeWithPythonValue(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc')
+
+ def testOuterByTypeWithInstanceValue(self):
+ self.s1.setComponentByType(
+ univ.OctetString.tagSet, univ.OctetString('abc')
+ )
+ assert self.s1.getComponentByType(
+ univ.OctetString.tagSet
+ ) == str2octs('abc')
+
+ def testInnerByTypeWithPythonValue(self):
+ self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
+ assert self.s1.getComponentByType(
+ univ.Integer.tagSet, 1
+ ) == 123
+
+ def testInnerByTypeWithInstanceValue(self):
+ self.s1.setComponentByType(
+ univ.Integer.tagSet, univ.Integer(123), innerFlag=True
+ )
+ assert self.s1.getComponentByType(
+ univ.Integer.tagSet, 1
+ ) == 123
+
+ def testCmp(self):
+ self.s1.setComponentByName('name', univ.OctetString('abc'))
+ assert self.s1 == str2octs('abc'), '__cmp__() fails'
+
+ def testGetComponent(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getComponent() == str2octs('abc'), 'getComponent() fails'
+
+ def testGetName(self):
+ self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert self.s1.getName() == 'name', 'getName() fails'
+
+ def testSetComponentByPosition(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('Jim'))
+ assert self.s1 == str2octs('Jim')
+
+ def testClone(self):
+ self.s1.setComponentByPosition(0, univ.OctetString('abc'))
+ s = self.s1.clone()
+ assert len(s) == 0
+ s = self.s1.clone(cloneValueFlag=1)
+ assert len(s) == 1
+ assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
+
+ def testSetToDefault(self):
+ s = self.s1.clone()
+ s.setComponentByName('sex', univ.noValue)
+ assert s['sex'] is not univ.noValue
+
+ def testStaticDef(self):
+
+ class InnerChoice(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('count', univ.Integer()),
+ namedtype.NamedType('flag', univ.Boolean())
+ )
+
+ class OuterChoice(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('sex', InnerChoice())
+ )
+
+ c = OuterChoice()
+
+ c.setComponentByType(univ.OctetString.tagSet, 'abc')
+ assert c.getName() == 'name'
+
+ def testGetComponentWithDefault(self):
+
+ s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+
+ assert s.getComponentByPosition(0, default=None, instantiate=False) is None
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+ assert s.getComponentByName('name', default=None, instantiate=False) is None
+ assert s.getComponentByName('id', default=None, instantiate=False) is None
+ assert s.getComponentByType(univ.OctetString.tagSet, default=None) is None
+ assert s.getComponentByType(univ.Integer.tagSet, default=None) is None
+ s[1] = 123
+ assert s.getComponentByPosition(1, default=None) is not None
+ assert s.getComponentByPosition(1, univ.noValue) == 123
+ s.clear()
+ assert s.getComponentByPosition(1, default=None, instantiate=False) is None
+
+ def testGetComponentNoInstantiation(self):
+
+ s = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+
+ assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+ assert s.getComponentByName('name', instantiate=False) is univ.noValue
+ assert s.getComponentByName('id', instantiate=False) is univ.noValue
+ assert s.getComponentByType(univ.OctetString.tagSet, instantiate=False) is univ.noValue
+ assert s.getComponentByType(univ.Integer.tagSet, instantiate=False) is univ.noValue
+ s[1] = 123
+ assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
+ assert s.getComponentByPosition(1, instantiate=False) == 123
+ s.clear()
+ assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
+
+
+class ChoicePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == univ.Choice
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('name', univ.OctetString()),
+ namedtype.NamedType('id', univ.Integer())
+ )
+ )
+ old_asn1['name'] = 'test'
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1
+ assert new_asn1['name'] == str2octs('test')
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/type/test_useful.py b/contrib/python/pyasn1/py3/tests/type/test_useful.py
new file mode 100644
index 0000000000..cd5ba566f9
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/type/test_useful.py
@@ -0,0 +1,138 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
+# License: https://pyasn1.readthedocs.io/en/latest/license.html
+#
+import datetime
+import pickle
+import sys
+from copy import deepcopy
+import unittest
+
+from __tests__.base import BaseTestCase
+
+from pyasn1.type import useful
+
+
+class FixedOffset(datetime.tzinfo):
+ def __init__(self, offset, name):
+ self.__offset = datetime.timedelta(minutes=offset)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+
+UTC = FixedOffset(0, 'UTC')
+UTC2 = FixedOffset(120, 'UTC')
+
+
+class ObjectDescriptorTestCase(BaseTestCase):
+ pass
+
+
+class GeneralizedTimeTestCase(BaseTestCase):
+
+ def testFromDateTime(self):
+ assert useful.GeneralizedTime.fromDateTime(datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC)) == '20170711000102.3Z'
+
+ def testToDateTime0(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2) == useful.GeneralizedTime('20170711000102').asDateTime
+
+ def testToDateTime1(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.GeneralizedTime('20170711000102Z').asDateTime
+
+ def testToDateTime2(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102.3Z').asDateTime
+
+ def testToDateTime3(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102,3Z').asDateTime
+
+ def testToDateTime4(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102.3+0000').asDateTime
+
+ def testToDateTime5(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC2) == useful.GeneralizedTime('20170711000102.3+0200').asDateTime
+
+ def testToDateTime6(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC2) == useful.GeneralizedTime('20170711000102.3+02').asDateTime
+
+ def testToDateTime7(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1) == useful.GeneralizedTime('201707110001').asDateTime
+
+ def testToDateTime8(self):
+ assert datetime.datetime(2017, 7, 11, 0) == useful.GeneralizedTime('2017071100').asDateTime
+
+ def testCopy(self):
+ dt = useful.GeneralizedTime("20170916234254+0130").asDateTime
+ assert dt == deepcopy(dt)
+
+
+class GeneralizedTimePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = useful.GeneralizedTime()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == useful.GeneralizedTime
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = useful.GeneralizedTime("20170916234254+0130")
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == old_asn1
+
+
+class UTCTimeTestCase(BaseTestCase):
+
+ def testFromDateTime(self):
+ assert useful.UTCTime.fromDateTime(datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC)) == '170711000102Z'
+
+ def testToDateTime0(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2) == useful.UTCTime('170711000102').asDateTime
+
+ def testToDateTime1(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.UTCTime('170711000102Z').asDateTime
+
+ def testToDateTime2(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.UTCTime('170711000102+0000').asDateTime
+
+ def testToDateTime3(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC2) == useful.UTCTime('170711000102+0200').asDateTime
+
+ def testToDateTime4(self):
+ assert datetime.datetime(2017, 7, 11, 0, 1) == useful.UTCTime('1707110001').asDateTime
+
+
+class UTCTimePicklingTestCase(unittest.TestCase):
+
+ def testSchemaPickling(self):
+ old_asn1 = useful.UTCTime()
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert type(new_asn1) == useful.UTCTime
+ assert old_asn1.isSameTypeWith(new_asn1)
+
+ def testValuePickling(self):
+ old_asn1 = useful.UTCTime("170711000102")
+ serialised = pickle.dumps(old_asn1)
+ assert serialised
+ new_asn1 = pickle.loads(serialised)
+ assert new_asn1 == old_asn1
+
+
+suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+
+if __name__ == '__main__':
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/contrib/python/pyasn1/py3/tests/ya.make b/contrib/python/pyasn1/py3/tests/ya.make
new file mode 100644
index 0000000000..95837ba4a4
--- /dev/null
+++ b/contrib/python/pyasn1/py3/tests/ya.make
@@ -0,0 +1,41 @@
+PY3TEST()
+
+PEERDIR(
+ contrib/python/pyasn1
+)
+
+TEST_SRCS(
+ __init__.py
+ base.py
+ codec/__init__.py
+ codec/ber/__init__.py
+ codec/ber/test_decoder.py
+ codec/ber/test_encoder.py
+ codec/cer/__init__.py
+ codec/cer/test_decoder.py
+ codec/cer/test_encoder.py
+ codec/der/__init__.py
+ codec/der/test_decoder.py
+ codec/der/test_encoder.py
+ codec/native/__init__.py
+ codec/native/test_decoder.py
+ codec/native/test_encoder.py
+ codec/test_streaming.py
+ compat/__init__.py
+ compat/test_integer.py
+ compat/test_octets.py
+ test_debug.py
+ type/__init__.py
+ type/test_char.py
+ type/test_constraint.py
+ type/test_namedtype.py
+ type/test_namedval.py
+ type/test_opentype.py
+ type/test_tag.py
+ type/test_univ.py
+ type/test_useful.py
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/pyasn1/py3/ya.make b/contrib/python/pyasn1/py3/ya.make
new file mode 100644
index 0000000000..772312ad0e
--- /dev/null
+++ b/contrib/python/pyasn1/py3/ya.make
@@ -0,0 +1,58 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(0.5.0)
+
+LICENSE(BSD-3-Clause)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ pyasn1/__init__.py
+ pyasn1/codec/__init__.py
+ pyasn1/codec/ber/__init__.py
+ pyasn1/codec/ber/decoder.py
+ pyasn1/codec/ber/encoder.py
+ pyasn1/codec/ber/eoo.py
+ pyasn1/codec/cer/__init__.py
+ pyasn1/codec/cer/decoder.py
+ pyasn1/codec/cer/encoder.py
+ pyasn1/codec/der/__init__.py
+ pyasn1/codec/der/decoder.py
+ pyasn1/codec/der/encoder.py
+ pyasn1/codec/native/__init__.py
+ pyasn1/codec/native/decoder.py
+ pyasn1/codec/native/encoder.py
+ pyasn1/codec/streaming.py
+ pyasn1/compat/__init__.py
+ pyasn1/compat/integer.py
+ pyasn1/compat/octets.py
+ pyasn1/debug.py
+ pyasn1/error.py
+ pyasn1/type/__init__.py
+ pyasn1/type/base.py
+ pyasn1/type/char.py
+ pyasn1/type/constraint.py
+ pyasn1/type/error.py
+ pyasn1/type/namedtype.py
+ pyasn1/type/namedval.py
+ pyasn1/type/opentype.py
+ pyasn1/type/tag.py
+ pyasn1/type/tagmap.py
+ pyasn1/type/univ.py
+ pyasn1/type/useful.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/pyasn1/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/pyasn1/ya.make b/contrib/python/pyasn1/ya.make
new file mode 100644
index 0000000000..1601f9047f
--- /dev/null
+++ b/contrib/python/pyasn1/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/pyasn1/py2)
+ELSE()
+ PEERDIR(contrib/python/pyasn1/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/pytest-localserver/py3/.dist-info/METADATA b/contrib/python/pytest-localserver/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..42c4db02b3
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/.dist-info/METADATA
@@ -0,0 +1,300 @@
+Metadata-Version: 2.1
+Name: pytest-localserver
+Version: 0.8.1
+Summary: pytest plugin to test server connections locally.
+Home-page: https://github.com/pytest-dev/pytest-localserver
+Author: Sebastian Rahlf
+Author-email: basti@redtoad.de
+Maintainer: David Zaslavsky
+Maintainer-email: diazona@ellipsix.net
+License: MIT License
+Keywords: pytest server localhost http smtp
+Classifier: Framework :: Pytest
+Classifier: Operating System :: OS Independent
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development :: Testing
+Requires-Python: >=3.5
+License-File: LICENSE
+License-File: AUTHORS
+Requires-Dist: werkzeug >=0.10
+Provides-Extra: smtp
+Requires-Dist: aiosmtpd ; extra == 'smtp'
+
+.. image:: https://img.shields.io/pypi/v/pytest-localserver.svg?style=flat
+ :alt: PyPI Version
+ :target: https://pypi.python.org/pypi/pytest-localserver
+
+.. image:: https://img.shields.io/pypi/pyversions/pytest-localserver.svg
+ :alt: Supported Python versions
+ :target: https://pypi.python.org/pypi/pytest-localserver
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+
+.. image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest-localserver/master.svg
+ :target: https://results.pre-commit.ci/latest/github/pytest-dev/pytest-localserver/master
+ :alt: pre-commit.ci status
+
+==================
+pytest-localserver
+==================
+
+pytest-localserver is a plugin for the `pytest`_ testing framework which enables
+you to test server connections locally.
+
+Sometimes `monkeypatching`_ ``urllib2.urlopen()`` just does not cut it, for
+instance if you work with ``urllib2.Request``, define your own openers/handlers
+or work with ``httplib``. In these cases it may come in handy to have an HTTP
+server running locally which behaves just like the real thing [1]_. Well, look
+no further!
+
+Quickstart
+==========
+
+Let's say you have a function to scrape HTML which only required to be pointed
+at a URL ::
+
+ import requests
+ def scrape(url):
+ html = requests.get(url).text
+ # some parsing happens here
+ # ...
+ return result
+
+You want to test this function in its entirety without having to rely on a
+remote server whose content you cannot control, neither do you want to waste
+time setting up a complex mechanism to mock or patch the underlying Python
+modules dealing with the actual HTTP request (of which there are more than one
+BTW). So what do you do?
+
+You simply use pytest's `funcargs feature`_ and simulate an entire server
+locally! ::
+
+ def test_retrieve_some_content(httpserver):
+ httpserver.serve_content(open('cached-content.html').read())
+ assert scrape(httpserver.url) == 'Found it!'
+
+What happened here is that for the duration of your tests an HTTP server is
+started on a random port on localhost which will serve the content you tell it
+to and behaves just like the real thing.
+
+The added bonus is that you can test whether your code behaves gracefully if
+there is a network problem::
+
+ def test_content_retrieval_fails_graciously(httpserver):
+ httpserver.serve_content('File not found!', 404)
+ pytest.raises(ContentNotFoundException, scrape, httpserver.url)
+
+The same thing works for SMTP servers, too::
+
+ def test_sending_some_message(smtpserver):
+ mailer = MyMailer(host=smtpserver.addr[0], port=smtpserver.addr[1])
+ mailer.send(to='bob@example.com', from_='alice@example.com',
+ subject='MyMailer v1.0', body='Check out my mailer!')
+ assert len(smtpserver.outbox)==1
+
+Here an SMTP server is started which accepts e-mails being sent to it. The
+nice feature here is that you can actually check if the message was received
+and what was sent by looking into the smtpserver's ``outbox``.
+
+It is really that easy!
+
+Available funcargs
+==================
+
+Here is a short overview of the available funcargs. For more details I suggest
+poking around in the code itself.
+
+``httpserver``
+ provides a threaded HTTP server instance running on localhost. It has the
+ following attributes:
+
+ * ``code`` - HTTP response code (int)
+ * ``content`` - content of next response (str, bytes, or iterable of either)
+ * ``headers`` - response headers (dict)
+ * ``chunked`` - whether to chunk-encode the response (enumeration)
+
+ Once these attributes are set, all subsequent requests will be answered with
+ these values until they are changed or the server is stopped. A more
+ convenient way to change these is ::
+
+ httpserver.serve_content(content=None, code=200, headers=None, chunked=pytest_localserver.http.Chunked.NO)
+
+ The ``chunked`` attribute or parameter can be set to
+
+ * ``Chunked.YES``, telling the server to always apply chunk encoding
+ * ``Chunked.NO``, telling the server to never apply chunk encoding
+ * ``Chunked.AUTO``, telling the server to apply chunk encoding only if
+ the ``Transfer-Encoding`` header includes ``chunked``
+
+ If chunk encoding is applied, each str or bytes in ``content`` becomes one
+ chunk in the response.
+
+ The server address can be found in property
+
+ * ``url``
+
+ which is the string representation of tuple ``server_address`` (host as str,
+ port as int).
+
+ If you want to check which form fields have been POSTed, Try ::
+
+ httpserver.serve_content(..., show_post_vars=True)
+
+ which will display them as parsable text.
+
+ If you need to inspect the requests sent to the server, a list of all
+ received requests can be found in property
+
+ * ``requests``
+
+ which is a list of ``werkzeug.wrappers.Request`` objects.
+
+``httpsserver``
+ is the same as ``httpserver`` only with SSL encryption.
+
+``smtpserver``
+ provides a threaded SMTP server, with an API similar to ``smtpd.SMTPServer``,
+ (the deprecated class from the Python standard library) running on localhost.
+ It has the following attributes:
+
+ * ``addr`` - server address as tuple (host as str, port as int)
+ * ``outbox`` - list of ``email.message.Message`` instances received.
+
+Using your a WSGI application as test server
+============================================
+
+As of version 0.3 you can now use a `WSGI application`_ to run on the test
+server ::
+
+ from pytest_localserver.http import WSGIServer
+
+ def simple_app(environ, start_response):
+ """Simplest possible WSGI application"""
+ status = '200 OK'
+ response_headers = [('Content-type', 'text/plain')]
+ start_response(status, response_headers)
+ return ['Hello world!\n']
+
+ @pytest.fixture
+ def testserver(request):
+ """Defines the testserver funcarg"""
+ server = WSGIServer(application=simple_app)
+ server.start()
+ request.addfinalizer(server.stop)
+ return server
+
+ def test_retrieve_some_content(testserver):
+ assert scrape(testserver.url) == 'Hello world!\n'
+
+Have a look at the following page for more information on WSGI:
+http://wsgi.readthedocs.org/en/latest/learn.html
+
+Download and Installation
+=========================
+
+You can install the plugin by running ::
+
+ pip install pytest-localserver
+
+Alternatively, get the latest stable version from `PyPI`_ or the latest
+`bleeding-edge`_ from Github.
+
+License and Credits
+===================
+
+This plugin is released under the MIT license. You can find the full text of
+the license in the LICENSE file.
+
+Copyright (C) 2010-2022 Sebastian Rahlf and others (see AUTHORS).
+
+Some parts of this package is based on ideas or code from other people:
+
+- I borrowed some implementation ideas for the httpserver from `linkchecker`_.
+- The implementation for the SMTP server is based on the `Mailsink recipe`_ by
+ Adam Feuer, Matt Branthwaite and Troy Frever.
+- The HTTPS implementation is based on work by `Sebastien Martini`_.
+
+Thanks guys!
+
+Development and future plans
+============================
+
+Feel free to clone the repository and add your own changes. Pull requests are
+always welcome!::
+
+ git clone https://github.com/pytest-dev/pytest-localserver
+
+If you find any bugs, please file a `report`_.
+
+Test can be run with tox.
+
+I already have a couple of ideas for future versions:
+
+* support for FTP, SSH (maybe base all on twisted?)
+* making the SMTP outbox as convenient to use as ``django.core.mail.outbox``
+* add your own here!
+
+Preparing a release
+-------------------
+
+For package maintainers, here is how we release a new version:
+
+#. Ensure that the ``CHANGES`` file is up to date with the latest changes.
+#. Make sure that all tests pass on the version you want to release.
+#. Use the `new release form on Github`_ (or some other equivalent method) to
+ create a new release, following the pattern of previous releases.
+
+ * Each release has to be based on a tag. You can either create the tag first
+ (e.g. using ``git tag``) and then make a release from that tag, or you can
+ have Github create the tag as part of the process of making a release;
+ either way works.
+ * The tag name **must** be the `PEP 440`_-compliant version number prefixed
+ by ``v``, making sure to include at least three version number components
+ (e.g. ``v0.6.0``).
+ * The "Auto-generate release notes" button will be useful in summarizing
+ the changes since the last release.
+
+#. Using either the `release workflows page`_ or the link in the email you
+ received about a "Deployment review", go to the workflow run created for
+ the new release and click "Review deployments", then either approve or reject
+ the two deployments, one to Test PyPI and one to real PyPI. (It should not be
+ necessary to reject a deployment unless something really weird happens.)
+ Once the deployment is approved, Github will automatically upload the files.
+
+----
+
+.. [1] The idea for this project was born when I needed to check that `a piece
+ of software`_ behaved itself when receiving HTTP error codes 404 and 500.
+ Having unsuccessfully tried to mock a server, I stumbled across
+ `linkchecker`_ which uses a the same idea to test its internals.
+
+.. _monkeypatching: http://pytest.org/latest/monkeypatch.html
+.. _pytest: http://pytest.org/
+.. _funcargs feature: http://pytest.org/latest/funcargs.html
+.. _linkchecker: http://linkchecker.sourceforge.net/
+.. _WSGI application: http://www.python.org/dev/peps/pep-0333/
+.. _PyPI: http://pypi.python.org/pypi/pytest-localserver/
+.. _bleeding-edge: https://github.com/pytest-dev/pytest-localserver
+.. _report: https://github.com/pytest-dev/pytest-localserver/issues/
+.. _tox: http://testrun.org/tox/
+.. _a piece of software: http://pypi.python.org/pypi/python-amazon-product-api/
+.. _Mailsink recipe: http://code.activestate.com/recipes/440690/
+.. _Sebastien Martini: http://code.activestate.com/recipes/442473/
+.. _PEP 440: https://peps.python.org/pep-0440/
+.. _build: https://pypa-build.readthedocs.io/en/latest/
+.. _twine: https://twine.readthedocs.io/en/stable/
+.. _new release form on Github: https://github.com/pytest-dev/pytest-localserver/releases/new
+.. _release workflows page: https://github.com/pytest-dev/pytest-localserver/actions/workflows/release.yml
diff --git a/contrib/python/pytest-localserver/py3/.dist-info/entry_points.txt b/contrib/python/pytest-localserver/py3/.dist-info/entry_points.txt
new file mode 100644
index 0000000000..72608edbee
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[pytest11]
+localserver = pytest_localserver.plugin
diff --git a/contrib/python/pytest-localserver/py3/.dist-info/top_level.txt b/contrib/python/pytest-localserver/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..cfe3f2cf35
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+pytest_localserver
diff --git a/contrib/python/pytest-localserver/py3/pytest_localserver/__init__.py b/contrib/python/pytest-localserver/py3/pytest_localserver/__init__.py
new file mode 100644
index 0000000000..482058b822
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/pytest_localserver/__init__.py
@@ -0,0 +1 @@
+from pytest_localserver._version import version as VERSION # noqa
diff --git a/contrib/python/pytest-localserver/py3/pytest_localserver/_version.py b/contrib/python/pytest-localserver/py3/pytest_localserver/_version.py
new file mode 100644
index 0000000000..044524b166
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/pytest_localserver/_version.py
@@ -0,0 +1,16 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+ from typing import Tuple, Union
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+ VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '0.8.1'
+__version_tuple__ = version_tuple = (0, 8, 1)
diff --git a/contrib/python/pytest-localserver/py3/pytest_localserver/http.py b/contrib/python/pytest-localserver/py3/pytest_localserver/http.py
new file mode 100644
index 0000000000..0899597f5e
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/pytest_localserver/http.py
@@ -0,0 +1,183 @@
+# Copyright (C) 2010-2013 Sebastian Rahlf and others (see AUTHORS).
+#
+# This program is release under the MIT license. You can find the full text of
+# the license in the LICENSE file.
+import enum
+import itertools
+import json
+import sys
+import threading
+
+from werkzeug.datastructures import Headers
+from werkzeug.serving import make_server
+from werkzeug.wrappers import Request
+from werkzeug.wrappers import Response
+
+
+class WSGIServer(threading.Thread):
+
+ """
+ HTTP server running a WSGI application in its own thread.
+ """
+
+ def __init__(self, host="127.0.0.1", port=0, application=None, **kwargs):
+ self.app = application
+ self._server = make_server(host, port, self.app, **kwargs)
+ self.server_address = self._server.server_address
+
+ super().__init__(name=self.__class__, target=self._server.serve_forever)
+
+ def __del__(self):
+ self.stop()
+
+ def stop(self):
+ try:
+ server = self._server
+ except AttributeError:
+ pass
+ else:
+ server.shutdown()
+
+ @property
+ def url(self):
+ host, port = self.server_address
+ proto = "http" if self._server.ssl_context is None else "https"
+ return "%s://%s:%i" % (proto, host, port)
+
+
+class Chunked(enum.Enum):
+ NO = False
+ YES = True
+ AUTO = None
+
+ def __bool__(self):
+ return bool(self.value)
+
+
+def _encode_chunk(chunk, charset):
+ if isinstance(chunk, str):
+ chunk = chunk.encode(charset)
+ return "{:x}".format(len(chunk)).encode(charset) + b"\r\n" + chunk + b"\r\n"
+
+
+class ContentServer(WSGIServer):
+
+ """
+ Small test server which can be taught which content (i.e. string) to serve
+ with which response code. Try the following snippet for testing API calls::
+
+ server = ContentServer(port=8080)
+ server.start()
+ print 'Test server running at http://%s:%i' % server.server_address
+
+ # any request to http://localhost:8080 will get a 503 response.
+ server.content = 'Hello World!'
+ server.code = 503
+
+ # ...
+
+ # we're done
+ server.stop()
+
+ """
+
+ def __init__(self, host="127.0.0.1", port=0, ssl_context=None):
+ super().__init__(host, port, self, ssl_context=ssl_context)
+ self.content, self.code = ("", 204) # HTTP 204: No Content
+ self.headers = {}
+ self.show_post_vars = False
+ self.compress = None
+ self.requests = []
+ self.chunked = Chunked.NO
+
+ def __call__(self, environ, start_response):
+ """
+ This is the WSGI application.
+ """
+ request = Request(environ)
+ self.requests.append(request)
+ if (
+ request.content_type == "application/x-www-form-urlencoded"
+ and request.method == "POST"
+ and self.show_post_vars
+ ):
+ content = json.dumps(request.form)
+ else:
+ content = self.content
+
+ if self.chunked == Chunked.YES or (
+ self.chunked == Chunked.AUTO and "chunked" in self.headers.get("Transfer-encoding", "")
+ ):
+ # If the code below ever changes to allow setting the charset of
+ # the Response object, the charset used here should also be changed
+ # to match. But until that happens, use UTF-8 since it is Werkzeug's
+ # default.
+ charset = "utf-8"
+ if isinstance(content, (str, bytes)):
+ content = (_encode_chunk(content, charset), "0\r\n\r\n")
+ else:
+ content = itertools.chain((_encode_chunk(item, charset) for item in content), ["0\r\n\r\n"])
+
+ response = Response(response=content, status=self.code)
+ response.headers.clear()
+ response.headers.extend(self.headers)
+
+ # FIXME get compression working!
+ # if self.compress == 'gzip':
+ # content = gzip.compress(content.encode('utf-8'))
+ # response.content_encoding = 'gzip'
+
+ return response(environ, start_response)
+
+ def serve_content(self, content, code=200, headers=None, chunked=Chunked.NO):
+ """
+ Serves string content (with specified HTTP error code) as response to
+ all subsequent request.
+
+ :param content: content to be displayed
+ :param code: HTTP status code
+ :param headers: HTTP headers to be returned
+ :param chunked: whether to apply chunked transfer encoding to the content
+ """
+ if not isinstance(content, (str, bytes, list, tuple)):
+ # If content is an iterable which is not known to be a string,
+ # bytes, or sequence, it might be something that can only be iterated
+ # through once, in which case we need to cache it so it can be reused
+ # to handle multiple requests.
+ try:
+ content = tuple(iter(content))
+ except TypeError:
+ # this probably means that content is not iterable, so just go
+ # ahead in case it's some type that Response knows how to handle
+ pass
+ self.content = content
+ self.code = code
+ self.chunked = chunked
+ if headers:
+ self.headers = Headers(headers)
+
+
+if __name__ == "__main__": # pragma: no cover
+ import os.path
+ import time
+
+ app = ContentServer()
+ server = WSGIServer(application=app)
+ server.start()
+
+ print("HTTP server is running at %s" % server.url)
+ print("Type <Ctrl-C> to stop")
+
+ try:
+ path = sys.argv[1]
+ except IndexError:
+ path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "README.rst")
+
+ app.serve_content(open(path).read(), 302)
+
+ try:
+ while True:
+ time.sleep(1)
+ except KeyboardInterrupt:
+ print("\rstopping...")
+ server.stop()
diff --git a/contrib/python/pytest-localserver/py3/pytest_localserver/https.py b/contrib/python/pytest-localserver/py3/pytest_localserver/https.py
new file mode 100644
index 0000000000..856e222ac5
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/pytest_localserver/https.py
@@ -0,0 +1,149 @@
+# Copyright (C) 2010-2013 Sebastian Rahlf and others (see AUTHORS).
+#
+# This program is release under the MIT license. You can find the full text of
+# the license in the LICENSE file.
+import os.path
+
+from pytest_localserver.http import ContentServer
+
+#: default server certificate
+DEFAULT_CERTIFICATE = os.path.join(os.getcwd(), "server.pem")
+
+
+class SecureContentServer(ContentServer):
+
+ """
+ Small test server which works just like :class:`http.Server` over HTTP::
+
+ server = SecureContentServer(
+ port=8080, key='/srv/my.key', cert='my.certificate')
+ server.start()
+ print 'Test server running at %s' % server.url
+ server.serve_content(open('/path/to/some.file').read())
+ # any call to https://localhost:8080 will get the contents of
+ # /path/to/some.file as a response.
+
+ To avoid *ssl handshake failures* you can import the `pytest-localserver
+ CA`_ into your browser of choice.
+
+ How to create a self-signed certificate
+ ---------------------------------------
+
+ If you want to create your own server certificate, you need `OpenSSL`_
+ installed on your machine. A self-signed certificate consists of a
+ certificate and a private key for your server. It can be created with
+ a command like this, using OpenSSL 1.1.1::
+
+ openssl req \
+ -x509 \
+ -newkey rsa:4096 \
+ -sha256 \
+ -days 3650 \
+ -nodes \
+ -keyout server.pem \
+ -out server.pem \
+ -subj "/CN=127.0.0.1/O=pytest-localserver/OU=Testing Dept." \
+ -addext "subjectAltName=DNS:localhost"
+
+ Note that both key and certificate are in a single file now named
+ ``server.pem``.
+
+ How to create your own Certificate Authority
+ --------------------------------------------
+
+ Generate a server key and request for signing (csr). Make sure that the
+ common name (CN) is your IP address/domain name (e.g. ``localhost``). ::
+
+ openssl genpkey \
+ -algorithm RSA \
+ -pkeyopt rsa_keygen_bits:4096 \
+ -out server.key
+ openssl req \
+ -new \
+ -addext "subjectAltName=DNS:localhost" \
+ -key server.key \
+ -out server.csr
+
+ Generate your own CA. Make sure that this time the CN is *not* your IP
+ address/domain name (e.g. ``localhost CA``). ::
+
+ openssl genpkey \
+ -algorithm RSA \
+ -pkeyopt rsa_keygen_bits:4096 \
+ -aes256 \
+ -out ca.key
+ openssl req \
+ -new \
+ -x509 \
+ -key ca.key \
+ -out ca.crt
+
+ Sign the certificate signing request (csr) with the self-created CA that
+ you made earlier. Note that OpenSSL does not copy the subjectAltName field
+ from the request (csr), so you have to provide it again as a file. If you
+ issue subsequent certificates and your browser already knows about previous
+ ones simply increment the serial number. ::
+
+ echo "subjectAltName=DNS:localhost" >server-extensions.txt
+ openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \
+ -set_serial 01 -extfile server-extensions.txt -out server.crt
+
+ Create a single file for both key and certificate::
+
+ cat server.key server.crt > server.pem
+
+ Now you only need to import ``ca.crt`` as a CA in your browser.
+
+ Want to know more?
+ ------------------
+
+ This information was compiled from the following sources, which you might
+ find helpful if you want to dig deeper into `pyOpenSSH`_, certificates and
+ CAs:
+
+ - http://code.activestate.com/recipes/442473/
+ - http://www.tc.umn.edu/~brams006/selfsign.html
+ -
+
+ A more advanced tutorial can be found `here`_.
+
+ .. _pytest-localserver CA: https://raw.githubusercontent.com/pytest-dev/pytest-localserver/master/pytest_localserver/ca.crt # noqa: E501
+ .. _pyOpenSSH: https://launchpad.net/pyopenssl
+ """
+
+ def __init__(self, host="localhost", port=0, key=DEFAULT_CERTIFICATE, cert=DEFAULT_CERTIFICATE):
+ """
+ :param key: location of file containing the server private key.
+ :param cert: location of file containing server certificate.
+ """
+
+ super().__init__(host, port, ssl_context=(key, cert))
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ import sys
+ import time
+
+ print("Using certificate %s." % DEFAULT_CERTIFICATE)
+
+ server = SecureContentServer()
+ server.start()
+ server.logging = True
+
+ print("HTTPS server is running at %s" % server.url)
+ print("Type <Ctrl-C> to stop")
+
+ try:
+ path = sys.argv[1]
+ except IndexError:
+ path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "README.rst")
+
+ server.serve_content(open(path).read(), 302)
+
+ try:
+ while True:
+ time.sleep(1)
+ except KeyboardInterrupt:
+ print("\rstopping...")
+ server.stop()
diff --git a/contrib/python/pytest-localserver/py3/pytest_localserver/plugin.py b/contrib/python/pytest-localserver/py3/pytest_localserver/plugin.py
new file mode 100644
index 0000000000..1e6ad2f172
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/pytest_localserver/plugin.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2011 Sebastian Rahlf <basti at redtoad dot de>
+#
+# This program is release under the MIT license. You can find the full text of
+# the license in the LICENSE file.
+import os
+import pkgutil
+
+import pytest
+
+
+@pytest.fixture
+def httpserver(request):
+ """The returned ``httpserver`` provides a threaded HTTP server instance
+ running on a randomly assigned port on localhost. It can be taught which
+ content (i.e. string) to serve with which response code and comes with
+ following attributes:
+
+ * ``code`` - HTTP response code (int)
+ * ``content`` - content of next response (str)
+ * ``headers`` - response headers (dict)
+
+ Once these attribute are set, all subsequent requests will be answered with
+ these values until they are changed or the server is stopped. A more
+ convenient way to change these is ::
+
+ httpserver.serve_content(
+ content='My content', code=200,
+ headers={'content-type': 'text/plain'})
+
+ The server address can be found in property
+
+ * ``url``
+
+ which is the string representation of tuple ``server_address`` (host as
+ str, port as int).
+
+ Example::
+
+ import requests
+ def scrape(url):
+ html = requests.get(url).text
+ # some parsing happens here
+ # ...
+ return result
+
+ def test_retrieve_some_content(httpserver):
+ httpserver.serve_content(open('cached-content.html').read())
+ assert scrape(httpserver.url) == 'Found it!'
+
+ """
+ from pytest_localserver import http
+
+ server = http.ContentServer()
+ server.start()
+ request.addfinalizer(server.stop)
+ return server
+
+
+@pytest.fixture
+def httpsserver(request):
+ """The returned ``httpsserver`` (note the additional S!) provides a
+ threaded HTTP server instance similar to funcarg ``httpserver`` but with
+ SSL encryption.
+ """
+ from pytest_localserver import https
+ try:
+ with open(https.DEFAULT_CERTIFICATE, 'wb') as f:
+ f.write(pkgutil.get_data('pytest_localserver', 'server.pem'))
+ server = https.SecureContentServer()
+ server.start()
+ request.addfinalizer(server.stop)
+ yield server
+ finally:
+ os.remove(https.DEFAULT_CERTIFICATE)
+
+
+@pytest.fixture
+def smtpserver(request):
+ """The returned ``smtpserver`` provides a threaded instance of
+ ``smtpd.SMTPServer`` running on localhost. It has the following
+ attributes:
+
+ * ``addr`` - server address as tuple (host as str, port as int)
+ """
+ from pytest_localserver import smtp
+
+ server = smtp.Server()
+ server.start()
+ request.addfinalizer(server.stop)
+ return server
diff --git a/contrib/python/pytest-localserver/py3/pytest_localserver/server.pem b/contrib/python/pytest-localserver/py3/pytest_localserver/server.pem
new file mode 100644
index 0000000000..4f7f1ed322
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/pytest_localserver/server.pem
@@ -0,0 +1,84 @@
+-----BEGIN PRIVATE KEY-----
+MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQC31RlRDMN6eGpQ
+zZaqtgk7/c5h98PoPNRpFFoUuuWGdf5PlHv4fMym7Zmz2ljx1DqutKhIIUKqS1vh
+xd5zMFpheOUTVPVfQc5evgTIm1GF0rMSSaQSFPVOX3nNXGmF1/Jq9YWTc/rb2ns/
+s+Ip1zSKBqDsdRbrkvpSa7cyCkxYcuYtYo5jRa930Fbn4cNj+aA3dxGXd4bLLfnR
+BpRA0V5SzBv93MtOK9kngQwQhjBJC/L/acHPO5dzQISBhM9NTSCAH4zm0SlTiExK
+DhBdExSbdjAjnJ3k82hNFLUqY1JAm4yVlvwD3vNY4hkf/gWzuQeJIhzK8kE4A+dD
+8BZzdHroK9xnnpmSlS7/P0raQd3VZPc8swEDyw9MrdA5UU95b07sUVs0LM0vWhi+
+rwNAJHfiQ77twc0bP7niyy/Kg+UYf7m0i/nyvJFKq75rHOfZvmsPNs+gOdJff+yy
+4vv9pmImj2nulgOgrGrzc4ICnx3GpoKmGFDq/p+hqk99P92dFHmwd7c2bYHQNpC9
+BJh8VzrVuyndX2mL5P+/LfmEi8tI06Imykzqtk/UODLJks7ZIrJfYlYmm7aVdrvO
+1U2s10AfloCX/ZVO7u3k4lH7Stj+/C8Ap+5Cm4Q46sZGO0Z5b808p4ETcoAI/AAl
+OwpHAMi9ueLqJ7J0ykCDl/LrTyNqrQIDAQABAoICACObXRn71Okl5cHc8HAEbml2
+UcFcElAraCEqVgBp6wdOV4HmitSop6M1pm3VvyCoMO2iBG5kMtt1WUiz4NCC7x6u
+IgDKlfRrdKOZPqf0nafEFfdW2DbAZHtXtun2GmJYX5YkFElpT4/CE9lU6FueWYja
+m9TxIQ1kHKRWRNemcv820iq8SkQkPUaBzjN/4S6+LTBRGdEyz6MPNrIsCg87/n8f
+FdToLWDo0Vj7f/C7bSLY86pRO77+Fem293N23AhnBgKLGemjXdPWNKCrdLPyfC1Y
+iR58uYCdPPihKC4bqtTkzCg1ZH8DcjMnKCKwOz6CelkviFAu+D73UpYwLMkUKLH3
+p3meFBwa0oEzUUof+W9J5HPnVX6nGR2V4fXkejcJoOBHUaSsuRFiPS4XJMj++DI7
+uiMOt7QljqCKirmCp8tVQ5raT9zwFgNCsR3+gemD1KC3zlXixGs1DyI4x2YwTgKU
+c16vnh9fGS9zq/drxqbeMvVbyVZF98LjJfgPxcmyEAXVH46Rs3/KSr6ve6MpRk9G
+3vLd7BVfEXoGA1Sha7PRg9OaKBgODfkDRsyZJqqkqHurE4P+8NQZ3mhzdGa4Prj9
+er5BrE3gmvagtQUJf0n+E6HRHGCFoq4i+jOeBw8qiwxgWV0ITfhneQDJF8JvBrzJ
+IByC9fVUYB4R4wESRoOBAoIBAQDgJz1etpf/47PwM5A5bdqJEEMaT6d0ycI7QtFQ
+1L3PgdRmu1ag2pCYogPQx1zrkMUe4qX0K1h372toRUz0RvoQFYF6qVLJNK/hOd+O
+GQ/Rw3XuCvCCs+6QbeQNqUjhLeYf3+TH4IEbIvukIACnDlHvAhtu+IQR7sVwCXda
+Slu1zW0ya7Pa/pPnEOQpA9D758/GjcZpe44hCBq+EnrV40Q3jHXEVtsDexq1ubzz
+BZEVLr4iwVrEjELZo4pbT+wQx2waRFqTVej5RaQadnSMCdRC0LCTa+t7hfuzN+KN
+DBoSUeOlcQ88TyEGvcZXo0jAyDBdN5HC38ujZlkqHHCZVEGxAoIBAQDR81WeHMYW
+/vtUhrP3BaJMj3RL/Vmpujac/i9IjdxrP2bi9mweunkZBH9UHNPcJp2b/+uAdSJO
+aQRzghCM+DmuOIuu4rB9FU6qpXGhcag126iu328eSYS1sJg5CVGs9ZhaxKk5xbro
+1cV0uUS6Gxl2z1Kpsb2dy/zhPTSwf6nrKXYwfrM65+EURz0fniKGfgxs6+p+uTVS
+kkLMe2nusJ1KLGrXqfJfa25sQKo1zaRFHLDd0/pgchijvVkhXDY3A7913i+xbQZu
+KIfbGp0pH4XFUJn1AR4XqPpE+wmHiLeqEmFJ5xcDl4q3j2dGnO3mHUYYNFOxh1nt
+1MCDCCbKJVu9AoIBAQCOUtv4o19nrqC1x0ev7zxvAtBYiHL/CIw3LHnTJQFQHFNM
+125tu9lL0LMzgSJSwB0pOye8HTmTDYXZMwdloxtr0vvfcluKPdXe3+w+QVN2EPF0
+L6X+l1jGg7/lnLMVpxsS6gpNjxLqtA+ralZ/u+vyIhhhIZJaAI2EUb5iqgwJJ2JK
+PXB5gGNQt7zm/fFXwRyAKcztdPINryOrw/gSjrblvl2YSL3PO/79m+2JMOOp24AG
+eVa0rYpUvi4/REPTc4wEMZqBKm8+tyU3WDcwI52Ovwsez8s5Jx1l8fn7LM/xCeXN
+SjguRt/lc+HYC2lKXtG2nm4Cmi6mlXnP7zbfZExBAoIBAQC1xAkU+W5ajGjFllWK
+gJsx02TpQS92bVxI8Ru4ofD5/QszZgrXU7Px/93I0ahuShRb8eZO8ZpA7lTHOAzi
+Lymo9xWf1Gzd7iuMO+4zyrXJ4yGYPKL0QswdjQVNJA9NQdekhezIsrKOUD1CQAAL
+a9jQ7s9vUQ2L5wZJbvcF85EFooDLnXXIguZv6vk1PXBApjJVvq3nBqvuj+g7JoHg
+/5E9nVTm4CCRke4o1JdIO4CDwUIy2wpCo6VHZXAcHLxnRtxkzHbYEj7l8jska1cz
+OjJTUOPppQ0LiOUcAYcPi0MPgBgwplxbZMDZCNNt5AFnH2MHI45t/XPTH0WIa+9B
+RbS1AoIBAQCqZri7tm9ngZfvKNNvdVTgBcKukDFek4f7ar0bOkISALtNrn1xXIID
+1ggELNy9afTmzPlttqMVQIxSTL3p7LIkZzTsuK0uthbsyzXxLHw2m+oHgaYut7he
+j2v7qTmaw7rgpTiORTDg00+5HDtdMmp3Km4aurNasPA80i8Z2ElI20i50LlQ4K5Q
+lIqpHR4fwrBr4SLStzvBo9UK1YYQ94FyKd7xou3uXLLTlY3G8rD6jjKJE2Gg8Ga/
+gGzbRCZWH6AOk1iO/CmOPH6AdFn5axXTx+uAML1Lr2VQ+azrYZCtIhKmW/kuQPQg
+apeiobcSY1vsX7eM8mQkM8TxrDLyNjtl
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIFiTCCA3GgAwIBAgIUUpWEFJm0PzrYTkhLe05yIBhBMuowDQYJKoZIhvcNAQEL
+BQAwSTESMBAGA1UEAwwJMTI3LjAuMC4xMRswGQYDVQQKDBJweXRlc3QtbG9jYWxz
+ZXJ2ZXIxFjAUBgNVBAsMDVRlc3RpbmcgRGVwdC4wHhcNMjEwOTE0MDU1NzAxWhcN
+MzEwOTEyMDU1NzAxWjBJMRIwEAYDVQQDDAkxMjcuMC4wLjExGzAZBgNVBAoMEnB5
+dGVzdC1sb2NhbHNlcnZlcjEWMBQGA1UECwwNVGVzdGluZyBEZXB0LjCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBALfVGVEMw3p4alDNlqq2CTv9zmH3w+g8
+1GkUWhS65YZ1/k+Ue/h8zKbtmbPaWPHUOq60qEghQqpLW+HF3nMwWmF45RNU9V9B
+zl6+BMibUYXSsxJJpBIU9U5fec1caYXX8mr1hZNz+tvaez+z4inXNIoGoOx1FuuS
++lJrtzIKTFhy5i1ijmNFr3fQVufhw2P5oDd3EZd3hsst+dEGlEDRXlLMG/3cy04r
+2SeBDBCGMEkL8v9pwc87l3NAhIGEz01NIIAfjObRKVOITEoOEF0TFJt2MCOcneTz
+aE0UtSpjUkCbjJWW/APe81jiGR/+BbO5B4kiHMryQTgD50PwFnN0eugr3GeemZKV
+Lv8/StpB3dVk9zyzAQPLD0yt0DlRT3lvTuxRWzQszS9aGL6vA0Akd+JDvu3BzRs/
+ueLLL8qD5Rh/ubSL+fK8kUqrvmsc59m+aw82z6A50l9/7LLi+/2mYiaPae6WA6Cs
+avNzggKfHcamgqYYUOr+n6GqT30/3Z0UebB3tzZtgdA2kL0EmHxXOtW7Kd1faYvk
+/78t+YSLy0jToibKTOq2T9Q4MsmSztkisl9iViabtpV2u87VTazXQB+WgJf9lU7u
+7eTiUftK2P78LwCn7kKbhDjqxkY7RnlvzTyngRNygAj8ACU7CkcAyL254uonsnTK
+QIOX8utPI2qtAgMBAAGjaTBnMB0GA1UdDgQWBBRzl1iPBK4XZwChdNhdPHfjAb/z
+BzAfBgNVHSMEGDAWgBRzl1iPBK4XZwChdNhdPHfjAb/zBzAPBgNVHRMBAf8EBTAD
+AQH/MBQGA1UdEQQNMAuCCWxvY2FsaG9zdDANBgkqhkiG9w0BAQsFAAOCAgEATk+Q
+t6psMrtGeFcZKYdmSFqW3SZUba4l76PzvHRf8nMcB1eFuZ4mCdiv0NgcQkE8c9T+
+i/J4wEmJ+mf1033MP1vQmrGqnaYBsVHNBTaTsP+gLg6Z7AGPvPaL2fwmWWNwTT0O
+1352bdz9ORacKSXW3Pq0Vi1pTMho0kAya3VQpl2paqz8qSUG7ijyGQ46VXjgqNZ1
+P5lv+6CWa3AwEQo6Edv1x+HLesRWVqVAkxxhlaGOPQm1cDlpnI4rxuYIMlsb5cNZ
+XTAIxw6Es1eqlPcZ96EoGXyIrG7Ej6Yb9447PrC1ulMnIu74cWLY25eu+oVr7Nvk
+Gjp2I7qbVjz9Ful0o0M9Wps4RzCgrpO4WeirCK/jFIUpmXJdn7V4mX0h2ako+dal
+vczg+bAd4ZedJWHTiqJs9lVMh4/YD7Ck6n+iAZ8Jusq6OhyTY43/Nyp2zQbwQmYv
+y3V6JVX+vY4Cq8pR1i8x5FBHnOCMPoT4sbOjKuoFWVi9wH1d65Q1JOo6/0eYzfwJ
+nuGUJza7+aCxYNlqxtqX0ItM670ClxB7fuWUpKh5WHrHD2dqBhYwtXOl9yBHrFOJ
+O8toKk3PmtlMqVZ8QXmgSqEy7wkfxhjJLgi2AQsqeA6nDrCLtr2pWdqDWoUfxY8r
+r5rc71nFLay/H2CbOYELI+20VFMp8GF3kOZbkRA=
+-----END CERTIFICATE-----
diff --git a/contrib/python/pytest-localserver/py3/pytest_localserver/smtp.py b/contrib/python/pytest-localserver/py3/pytest_localserver/smtp.py
new file mode 100644
index 0000000000..82dbc394b9
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/pytest_localserver/smtp.py
@@ -0,0 +1,177 @@
+# Copyright (C) 2011 Sebastian Rahlf <basti at redtoad dot de>
+# with some ideas from http://code.activestate.com/recipes/440690/
+# SmtpMailsink Copyright 2005 Aviarc Corporation
+# Written by Adam Feuer, Matt Branthwaite, and Troy Frever
+# which is Licensed under the PSF License
+import email
+
+import aiosmtpd.controller
+
+
+class MessageDetails:
+ def __init__(self, peer, mailfrom, rcpttos, *, mail_options=None, rcpt_options=None):
+ self.peer = peer
+ self.mailfrom = mailfrom
+ self.rcpttos = rcpttos
+ if mail_options:
+ self.mail_options = mail_options
+ if rcpt_options:
+ self.rcpt_options = rcpt_options
+
+
+class Handler:
+ def __init__(self):
+ self.outbox = []
+
+ async def handle_DATA(self, server, session, envelope):
+ message = email.message_from_bytes(envelope.content)
+ message.details = MessageDetails(session.peer, envelope.mail_from, envelope.rcpt_tos)
+ self.outbox.append(message)
+ return "250 OK"
+
+
+class Server(aiosmtpd.controller.Controller):
+
+ """
+ Small SMTP test server.
+
+ This is little more than a wrapper around aiosmtpd.controller.Controller
+ which offers a slightly different interface for backward compatibility with
+ earlier versions of pytest-localserver. You can just as well use a standard
+ Controller and pass it a Handler instance.
+
+ Here is how to use this class for sending an email, if you really need to::
+
+ server = Server(port=8080)
+ server.start()
+ print 'SMTP server is running on %s:%i' % server.addr
+
+ # any e-mail sent to localhost:8080 will end up in server.outbox
+ # ...
+
+ server.stop()
+
+ """
+
+ def __init__(self, host="localhost", port=0):
+ try:
+ super().__init__(Handler(), hostname=host, port=port, server_hostname=host)
+ except TypeError:
+ # for aiosmtpd <1.3
+ super().__init__(Handler(), hostname=host, port=port)
+
+ @property
+ def outbox(self):
+ return self.handler.outbox
+
+ def _set_server_socket_attributes(self):
+ """
+ Set the addr and port attributes on this Server instance, if they're not
+ already set.
+ """
+
+ # I split this out into its own method to allow running this code in
+ # aiosmtpd <1.4, which doesn't have the _trigger_server() method on
+ # the Controller class. If I put it directly in _trigger_server(), it
+ # would fail when calling super()._trigger_server(). In the future, when
+ # we can safely require aiosmtpd >=1.4, this method can be inlined
+ # directly into _trigger_server().
+ if hasattr(self, "addr"):
+ assert hasattr(self, "port")
+ return
+
+ self.addr = self.server.sockets[0].getsockname()[:2]
+
+ # Work around a bug/missing feature in aiosmtpd (https://github.com/aio-libs/aiosmtpd/issues/276)
+ if self.port == 0:
+ self.port = self.addr[1]
+ assert self.port != 0
+
+ def _trigger_server(self):
+ self._set_server_socket_attributes()
+ super()._trigger_server()
+
+ def is_alive(self):
+ return self._thread is not None and self._thread.is_alive()
+
+ @property
+ def accepting(self):
+ try:
+ return self.server.is_serving()
+ except AttributeError:
+ # asyncio.base_events.Server.is_serving() only exists in Python 3.6
+ # and up. For Python 3.5, asyncio.base_events.BaseEventLoop.is_running()
+ # is a close approximation; it should mostly return the same value
+ # except for brief periods when the server is starting up or shutting
+ # down. Once we drop support for Python 3.5, this branch becomes
+ # unnecessary.
+ return self.loop.is_running()
+
+ # for aiosmtpd <1.4
+ if not hasattr(aiosmtpd.controller.Controller, "_trigger_server"):
+
+ def start(self):
+ super().start()
+ self._set_server_socket_attributes()
+
+ def stop(self, timeout=None):
+ """
+ Stops test server.
+ :param timeout: When the timeout argument is present and not None, it
+ should be a floating point number specifying a timeout for the
+ operation in seconds (or fractions thereof).
+ """
+
+ # This mostly copies the implementation from Controller.stop(), with two
+ # differences:
+ # - It removes the assertion that the thread exists, allowing stop() to
+ # be called more than once safely
+ # - It passes the timeout argument to Thread.join()
+ if self.loop.is_running():
+ try:
+ self.loop.call_soon_threadsafe(self.cancel_tasks)
+ except AttributeError:
+ # for aiosmtpd < 1.4.3
+ self.loop.call_soon_threadsafe(self._stop)
+ if self._thread is not None:
+ self._thread.join(timeout)
+ self._thread = None
+ self._thread_exception = None
+ self._factory_invoked = None
+ self.server_coro = None
+ self.server = None
+ self.smtpd = None
+
+ def __del__(self):
+ # This is just for backward compatibility, to preserve the behavior that
+ # the server is stopped when this object is finalized. But it seems
+ # sketchy to rely on this to stop the server. Typically, the server
+ # should be stopped "manually", before it gets deleted.
+ if self.is_alive():
+ self.stop()
+
+ def __repr__(self): # pragma: no cover
+ return "<smtp.Server %s:%s>" % self.addr
+
+
+def main():
+ import time
+
+ server = Server()
+ server.start()
+
+ print("SMTP server is running on %s:%i" % server.addr)
+ print("Type <Ctrl-C> to stop")
+
+ try:
+ while True:
+ time.sleep(1)
+ except KeyboardInterrupt:
+ pass
+ finally:
+ print("\rstopping...")
+ server.stop()
+
+
+if __name__ == "__main__": # pragma: no cover
+ main()
diff --git a/contrib/python/pytest-localserver/py3/ya.make b/contrib/python/pytest-localserver/py3/ya.make
new file mode 100644
index 0000000000..0a1b91f6c3
--- /dev/null
+++ b/contrib/python/pytest-localserver/py3/ya.make
@@ -0,0 +1,41 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(0.8.1)
+
+LICENSE(MIT)
+
+PEERDIR(
+ contrib/python/Werkzeug
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ pytest_localserver.smtp
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ pytest_localserver/__init__.py
+ pytest_localserver/_version.py
+ pytest_localserver/http.py
+ pytest_localserver/https.py
+ pytest_localserver/plugin.py
+ pytest_localserver/smtp.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/pytest-localserver/py3/
+ .dist-info/METADATA
+ .dist-info/entry_points.txt
+ .dist-info/top_level.txt
+ pytest_localserver/server.pem
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/requests-mock/py2/AUTHORS b/contrib/python/requests-mock/py2/AUTHORS
new file mode 100644
index 0000000000..ae2e02a612
--- /dev/null
+++ b/contrib/python/requests-mock/py2/AUTHORS
@@ -0,0 +1,50 @@
+Adam Johnson <me@adamj.eu>
+Alex Peters <alex@peters.net>
+Allan Lewis <allanlewis99@gmail.com>
+Andreas Jaeger <aj@suse.com>
+Andrii Oriekhov <andriyorehov@gmail.com>
+Arjan Keeman <arjan.keeman@falckon.nl>
+Axel H <noirbizarre@users.noreply.github.com>
+Christian Clauss <cclauss@me.com>
+Colas Le Guernic <clslgrnc@users.noreply.github.com>
+Cyrille Corpet <cyrille@bayesimpact.org>
+Darragh Bailey <dbailey@hpe.com>
+David Kremer <courrier@david-kremer.fr>
+Ian Cordasco <ian.cordasco@rackspace.com>
+Ilya Konstantinov <ilya.konstantinov@gmail.com>
+Jamie Lennox <jamie.lennox@agoda.com>
+Jamie Lennox <jamie@vibrato.com.au>
+Jamie Lennox <jamielennox@gmail.com>
+Jamie Lennox <jamielennox@redhat.com>
+Janne Pulkkinen <janne.pulkkinen@protonmail.com>
+Janonymous <janonymous.codevulture@gmail.com>
+Jelle van der Waa <jelle@archlinux.org>
+Jeremy Stanley <fungi@yuggoth.org>
+Jochen Kupperschmidt <homework@nwsnet.de>
+Joel Andrews <oldsneerjaw@gmail.com>
+Jon Dufresne <jon.dufresne@gmail.com>
+Kenny Nguyen <kkenny.nguyen@pm.me>
+Louis Taylor <louis@kragniz.eu>
+Manuel Kaufmann <humitos@gmail.com>
+Matthias Bilger <matthias@bilger.info>
+Michał Górny <mgorny@gentoo.org>
+Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>
+Monty Taylor <mordred@inaugust.com>
+Noam <noamkush@gmail.com>
+Pascal Corpet <pascal@bayesimpact.org>
+Peter Hodge <peter.hodge84@gmail.com>
+Petre Mierlutiu <petrem@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Ryan Brooke Payne <ryan.payne@daveramsey.com>
+Sebastian Kalinowski <sebastian@kalinowski.eu>
+Simon Willison <swillison@gmail.com>
+Stefaan Lippens <stefaan.lippens@vito.be>
+Swapnil Kulkarni (coolsvap) <me@coolsvap.net>
+Ville Skyttä <ville.skytta@iki.fi>
+boncheff <boncheff@users.noreply.github.com>
+clslgrnc <clslgrnc@users.noreply.github.com>
+dongfangtianyu <7629022+dongfangtianyu@users.noreply.github.com>
+popokatapepel <jan-seins@hotmail.de>
+reedip <reedip.banerjee@nectechnologies.in>
+rfportilla <rfportilla@yahoo.com>
+voith <voithjm1@gmail.com>
diff --git a/contrib/python/requests-mock/py2/LICENSE b/contrib/python/requests-mock/py2/LICENSE
new file mode 100644
index 0000000000..d88b5784ba
--- /dev/null
+++ b/contrib/python/requests-mock/py2/LICENSE
@@ -0,0 +1,180 @@
+Copyright (c) 2014, Jamie Lennox
+All rights reserved.
+
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+
diff --git a/contrib/python/requests-mock/py2/README.rst b/contrib/python/requests-mock/py2/README.rst
new file mode 100644
index 0000000000..1281d339a5
--- /dev/null
+++ b/contrib/python/requests-mock/py2/README.rst
@@ -0,0 +1,101 @@
+===============================
+requests-mock
+===============================
+
+.. image:: https://badge.fury.io/py/requests-mock.png
+ :target: https://pypi.org/project/requests-mock/
+
+Intro
+=====
+
+`requests-mock` provides a building block to stub out the HTTP `requests`_ portions of your testing code.
+You should checkout the `docs`_ for more information.
+
+The Basics
+==========
+
+Everything in `requests`_ eventually goes through an adapter to do the transport work.
+`requests-mock` creates a custom `adapter` that allows you to predefine responses when certain URIs are called.
+
+There are then a number of methods provided to get the adapter used.
+
+A simple example:
+
+.. code:: python
+
+ >>> import requests
+ >>> import requests_mock
+
+ >>> session = requests.Session()
+ >>> adapter = requests_mock.Adapter()
+ >>> session.mount('mock://', adapter)
+
+ >>> adapter.register_uri('GET', 'mock://test.com', text='data')
+ >>> resp = session.get('mock://test.com')
+ >>> resp.status_code, resp.text
+ (200, 'data')
+
+Obviously having all URLs be `mock://` prefixed isn't going to be useful,
+so you can use `requests_mock.Mocker` to get the adapter into place.
+
+As a context manager:
+
+.. code:: python
+
+ >>> with requests_mock.Mocker() as m:
+ ... m.get('http://test.com', text='data')
+ ... requests.get('http://test.com').text
+ ...
+ 'data'
+
+Or as a decorator:
+
+.. code:: python
+
+ >>> @requests_mock.Mocker()
+ ... def test_func(m):
+ ... m.get('http://test.com', text='data')
+ ... return requests.get('http://test.com').text
+ ...
+ >>> test_func()
+ 'data'
+
+Or as a pytest fixture:
+
+.. code:: python
+
+ >>> def test_simple(requests_mock):
+ ... requests_mock.get('http://test.com', text='data')
+ ... assert 'data' == requests.get('http://test.com').text
+
+For more information checkout the `docs`_.
+
+Reporting Bugs
+==============
+
+Development and bug tracking is performed on `GitHub`_.
+
+Questions
+=========
+
+There is a tag dedicated to `requests-mock` on `StackOverflow`_ where you can ask usage questions.
+
+License
+=======
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may
+not use this file except in compliance with the License. You may obtain
+a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+.. _requests: https://requests.readthedocs.io
+.. _docs: https://requests-mock.readthedocs.io/
+.. _GitHub: https://github.com/jamielennox/requests-mock
+.. _StackOverflow: https://stackoverflow.com/questions/tagged/requests-mock
diff --git a/contrib/python/requests-mock/py3/.dist-info/METADATA b/contrib/python/requests-mock/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..d8eadeaec2
--- /dev/null
+++ b/contrib/python/requests-mock/py3/.dist-info/METADATA
@@ -0,0 +1,144 @@
+Metadata-Version: 2.1
+Name: requests-mock
+Version: 1.11.0
+Summary: Mock out responses from the requests package
+Home-page: https://requests-mock.readthedocs.io/
+Author: Jamie Lennox
+Author-email: jamielennox@gmail.com
+License: Apache-2
+Project-URL: Source, https://github.com/jamielennox/requests-mock
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Testing
+License-File: LICENSE
+Requires-Dist: requests (<3,>=2.3)
+Requires-Dist: six
+Provides-Extra: fixture
+Requires-Dist: fixtures ; extra == 'fixture'
+Provides-Extra: test
+Requires-Dist: fixtures ; extra == 'test'
+Requires-Dist: purl ; extra == 'test'
+Requires-Dist: pytest ; extra == 'test'
+Requires-Dist: sphinx ; extra == 'test'
+Requires-Dist: testtools ; extra == 'test'
+Requires-Dist: requests-futures ; extra == 'test'
+Requires-Dist: mock ; (( python_version < '3.3')) and extra == 'test'
+
+===============================
+requests-mock
+===============================
+
+.. image:: https://badge.fury.io/py/requests-mock.png
+ :target: https://pypi.org/project/requests-mock/
+
+Intro
+=====
+
+`requests-mock` provides a building block to stub out the HTTP `requests`_ portions of your testing code.
+You should checkout the `docs`_ for more information.
+
+The Basics
+==========
+
+Everything in `requests`_ eventually goes through an adapter to do the transport work.
+`requests-mock` creates a custom `adapter` that allows you to predefine responses when certain URIs are called.
+
+There are then a number of methods provided to get the adapter used.
+
+A simple example:
+
+.. code:: python
+
+ >>> import requests
+ >>> import requests_mock
+
+ >>> session = requests.Session()
+ >>> adapter = requests_mock.Adapter()
+ >>> session.mount('mock://', adapter)
+
+ >>> adapter.register_uri('GET', 'mock://test.com', text='data')
+ >>> resp = session.get('mock://test.com')
+ >>> resp.status_code, resp.text
+ (200, 'data')
+
+Obviously having all URLs be `mock://` prefixed isn't going to be useful,
+so you can use `requests_mock.Mocker` to get the adapter into place.
+
+As a context manager:
+
+.. code:: python
+
+ >>> with requests_mock.Mocker() as m:
+ ... m.get('http://test.com', text='data')
+ ... requests.get('http://test.com').text
+ ...
+ 'data'
+
+Or as a decorator:
+
+.. code:: python
+
+ >>> @requests_mock.Mocker()
+ ... def test_func(m):
+ ... m.get('http://test.com', text='data')
+ ... return requests.get('http://test.com').text
+ ...
+ >>> test_func()
+ 'data'
+
+Or as a pytest fixture:
+
+.. code:: python
+
+ >>> def test_simple(requests_mock):
+ ... requests_mock.get('http://test.com', text='data')
+ ... assert 'data' == requests.get('http://test.com').text
+
+For more information checkout the `docs`_.
+
+Reporting Bugs
+==============
+
+Development and bug tracking is performed on `GitHub`_.
+
+Questions
+=========
+
+There is a tag dedicated to `requests-mock` on `StackOverflow`_ where you can ask usage questions.
+
+License
+=======
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may
+not use this file except in compliance with the License. You may obtain
+a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+.. _requests: https://requests.readthedocs.io
+.. _docs: https://requests-mock.readthedocs.io/
+.. _GitHub: https://github.com/jamielennox/requests-mock
+.. _StackOverflow: https://stackoverflow.com/questions/tagged/requests-mock
+
diff --git a/contrib/python/requests-mock/py3/.dist-info/entry_points.txt b/contrib/python/requests-mock/py3/.dist-info/entry_points.txt
new file mode 100644
index 0000000000..b157e5a5ec
--- /dev/null
+++ b/contrib/python/requests-mock/py3/.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[pytest11]
+requests_mock = requests_mock.contrib._pytest_plugin
diff --git a/contrib/python/requests-mock/py3/.dist-info/top_level.txt b/contrib/python/requests-mock/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..65a92dd61d
--- /dev/null
+++ b/contrib/python/requests-mock/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+requests_mock
diff --git a/contrib/python/requests-mock/py3/AUTHORS b/contrib/python/requests-mock/py3/AUTHORS
new file mode 100644
index 0000000000..ae2e02a612
--- /dev/null
+++ b/contrib/python/requests-mock/py3/AUTHORS
@@ -0,0 +1,50 @@
+Adam Johnson <me@adamj.eu>
+Alex Peters <alex@peters.net>
+Allan Lewis <allanlewis99@gmail.com>
+Andreas Jaeger <aj@suse.com>
+Andrii Oriekhov <andriyorehov@gmail.com>
+Arjan Keeman <arjan.keeman@falckon.nl>
+Axel H <noirbizarre@users.noreply.github.com>
+Christian Clauss <cclauss@me.com>
+Colas Le Guernic <clslgrnc@users.noreply.github.com>
+Cyrille Corpet <cyrille@bayesimpact.org>
+Darragh Bailey <dbailey@hpe.com>
+David Kremer <courrier@david-kremer.fr>
+Ian Cordasco <ian.cordasco@rackspace.com>
+Ilya Konstantinov <ilya.konstantinov@gmail.com>
+Jamie Lennox <jamie.lennox@agoda.com>
+Jamie Lennox <jamie@vibrato.com.au>
+Jamie Lennox <jamielennox@gmail.com>
+Jamie Lennox <jamielennox@redhat.com>
+Janne Pulkkinen <janne.pulkkinen@protonmail.com>
+Janonymous <janonymous.codevulture@gmail.com>
+Jelle van der Waa <jelle@archlinux.org>
+Jeremy Stanley <fungi@yuggoth.org>
+Jochen Kupperschmidt <homework@nwsnet.de>
+Joel Andrews <oldsneerjaw@gmail.com>
+Jon Dufresne <jon.dufresne@gmail.com>
+Kenny Nguyen <kkenny.nguyen@pm.me>
+Louis Taylor <louis@kragniz.eu>
+Manuel Kaufmann <humitos@gmail.com>
+Matthias Bilger <matthias@bilger.info>
+Michał Górny <mgorny@gentoo.org>
+Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>
+Monty Taylor <mordred@inaugust.com>
+Noam <noamkush@gmail.com>
+Pascal Corpet <pascal@bayesimpact.org>
+Peter Hodge <peter.hodge84@gmail.com>
+Petre Mierlutiu <petrem@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Ryan Brooke Payne <ryan.payne@daveramsey.com>
+Sebastian Kalinowski <sebastian@kalinowski.eu>
+Simon Willison <swillison@gmail.com>
+Stefaan Lippens <stefaan.lippens@vito.be>
+Swapnil Kulkarni (coolsvap) <me@coolsvap.net>
+Ville Skyttä <ville.skytta@iki.fi>
+boncheff <boncheff@users.noreply.github.com>
+clslgrnc <clslgrnc@users.noreply.github.com>
+dongfangtianyu <7629022+dongfangtianyu@users.noreply.github.com>
+popokatapepel <jan-seins@hotmail.de>
+reedip <reedip.banerjee@nectechnologies.in>
+rfportilla <rfportilla@yahoo.com>
+voith <voithjm1@gmail.com>
diff --git a/contrib/python/requests-mock/py3/LICENSE b/contrib/python/requests-mock/py3/LICENSE
new file mode 100644
index 0000000000..d88b5784ba
--- /dev/null
+++ b/contrib/python/requests-mock/py3/LICENSE
@@ -0,0 +1,180 @@
+Copyright (c) 2014, Jamie Lennox
+All rights reserved.
+
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+
diff --git a/contrib/python/requests-mock/py3/README.rst b/contrib/python/requests-mock/py3/README.rst
new file mode 100644
index 0000000000..1281d339a5
--- /dev/null
+++ b/contrib/python/requests-mock/py3/README.rst
@@ -0,0 +1,101 @@
+===============================
+requests-mock
+===============================
+
+.. image:: https://badge.fury.io/py/requests-mock.png
+ :target: https://pypi.org/project/requests-mock/
+
+Intro
+=====
+
+`requests-mock` provides a building block to stub out the HTTP `requests`_ portions of your testing code.
+You should checkout the `docs`_ for more information.
+
+The Basics
+==========
+
+Everything in `requests`_ eventually goes through an adapter to do the transport work.
+`requests-mock` creates a custom `adapter` that allows you to predefine responses when certain URIs are called.
+
+There are then a number of methods provided to get the adapter used.
+
+A simple example:
+
+.. code:: python
+
+ >>> import requests
+ >>> import requests_mock
+
+ >>> session = requests.Session()
+ >>> adapter = requests_mock.Adapter()
+ >>> session.mount('mock://', adapter)
+
+ >>> adapter.register_uri('GET', 'mock://test.com', text='data')
+ >>> resp = session.get('mock://test.com')
+ >>> resp.status_code, resp.text
+ (200, 'data')
+
+Obviously having all URLs be `mock://` prefixed isn't going to be useful,
+so you can use `requests_mock.Mocker` to get the adapter into place.
+
+As a context manager:
+
+.. code:: python
+
+ >>> with requests_mock.Mocker() as m:
+ ... m.get('http://test.com', text='data')
+ ... requests.get('http://test.com').text
+ ...
+ 'data'
+
+Or as a decorator:
+
+.. code:: python
+
+ >>> @requests_mock.Mocker()
+ ... def test_func(m):
+ ... m.get('http://test.com', text='data')
+ ... return requests.get('http://test.com').text
+ ...
+ >>> test_func()
+ 'data'
+
+Or as a pytest fixture:
+
+.. code:: python
+
+ >>> def test_simple(requests_mock):
+ ... requests_mock.get('http://test.com', text='data')
+ ... assert 'data' == requests.get('http://test.com').text
+
+For more information checkout the `docs`_.
+
+Reporting Bugs
+==============
+
+Development and bug tracking is performed on `GitHub`_.
+
+Questions
+=========
+
+There is a tag dedicated to `requests-mock` on `StackOverflow`_ where you can ask usage questions.
+
+License
+=======
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may
+not use this file except in compliance with the License. You may obtain
+a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+.. _requests: https://requests.readthedocs.io
+.. _docs: https://requests-mock.readthedocs.io/
+.. _GitHub: https://github.com/jamielennox/requests-mock
+.. _StackOverflow: https://stackoverflow.com/questions/tagged/requests-mock
diff --git a/contrib/python/requests-mock/py3/requests_mock/__init__.py b/contrib/python/requests-mock/py3/requests_mock/__init__.py
new file mode 100644
index 0000000000..799b752ee7
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/__init__.py
@@ -0,0 +1,37 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from requests_mock.adapter import Adapter, ANY
+from requests_mock.exceptions import MockException, NoMockAddress
+from requests_mock.mocker import mock, Mocker, MockerCore
+from requests_mock.mocker import DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT
+from requests_mock.response import create_response, CookieJar
+
+
+__all__ = ['Adapter',
+ 'ANY',
+ 'create_response',
+ 'CookieJar',
+ 'mock',
+ 'Mocker',
+ 'MockerCore',
+ 'MockException',
+ 'NoMockAddress',
+
+ 'DELETE',
+ 'GET',
+ 'HEAD',
+ 'OPTIONS',
+ 'PATCH',
+ 'POST',
+ 'PUT',
+ ]
diff --git a/contrib/python/requests-mock/py3/requests_mock/adapter.py b/contrib/python/requests-mock/py3/requests_mock/adapter.py
new file mode 100644
index 0000000000..e0560b2226
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/adapter.py
@@ -0,0 +1,323 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import weakref
+
+from requests.adapters import BaseAdapter
+from requests.utils import requote_uri
+import six
+from six.moves.urllib import parse as urlparse
+
+from requests_mock import exceptions
+from requests_mock.request import _RequestObjectProxy
+from requests_mock.response import _MatcherResponse
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+try:
+ import purl
+ purl_types = (purl.URL,)
+except ImportError:
+ purl = None
+ purl_types = ()
+
+ANY = object()
+
+
+class _RequestHistoryTracker(object):
+
+ def __init__(self):
+ self.request_history = []
+
+ def _add_to_history(self, request):
+ self.request_history.append(request)
+
+ @property
+ def last_request(self):
+ """Retrieve the latest request sent"""
+ try:
+ return self.request_history[-1]
+ except IndexError:
+ return None
+
+ @property
+ def called(self):
+ return self.call_count > 0
+
+ @property
+ def called_once(self):
+ return self.call_count == 1
+
+ @property
+ def call_count(self):
+ return len(self.request_history)
+
+ def reset(self):
+ self.request_history = []
+
+
+class _RunRealHTTP(Exception):
+ """A fake exception to jump out of mocking and allow a real request.
+
+ This exception is caught at the mocker level and allows it to execute this
+ request through the real requests mechanism rather than the mocker.
+
+ It should never be exposed to a user.
+ """
+
+
+class _Matcher(_RequestHistoryTracker):
+ """Contains all the information about a provided URL to match."""
+
+ def __init__(self, method, url, responses, complete_qs, request_headers,
+ additional_matcher, real_http, case_sensitive):
+ """
+ :param bool complete_qs: Match the entire query string. By default URLs
+ match if all the provided matcher query arguments are matched and
+ extra query arguments are ignored. Set complete_qs to true to
+ require that the entire query string needs to match.
+ """
+ super(_Matcher, self).__init__()
+
+ self._method = method
+ self._url = url
+ self._responses = responses
+ self._complete_qs = complete_qs
+ self._request_headers = request_headers
+ self._real_http = real_http
+ self._additional_matcher = additional_matcher
+
+ # url can be a regex object or ANY so don't always run urlparse
+ if isinstance(url, six.string_types):
+ url_parts = urlparse.urlparse(url)
+ self._scheme = url_parts.scheme.lower()
+ self._netloc = url_parts.netloc.lower()
+ self._path = requote_uri(url_parts.path or '/')
+ self._query = url_parts.query
+
+ if not case_sensitive:
+ self._path = self._path.lower()
+ self._query = self._query.lower()
+
+ elif isinstance(url, purl_types):
+ self._scheme = url.scheme()
+ self._netloc = url.netloc()
+ self._path = url.path()
+ self._query = url.query()
+
+ if not case_sensitive:
+ self._path = self._path.lower()
+ self._query = self._query.lower()
+
+ else:
+ self._scheme = None
+ self._netloc = None
+ self._path = None
+ self._query = None
+
+ def _match_method(self, request):
+ if self._method is ANY:
+ return True
+
+ if request.method.lower() == self._method.lower():
+ return True
+
+ return False
+
+ def _match_url(self, request):
+ if self._url is ANY:
+ return True
+
+ # regular expression matching
+ if hasattr(self._url, 'search'):
+ return self._url.search(request.url) is not None
+
+ # scheme is always matched case insensitive
+ if self._scheme and request.scheme.lower() != self._scheme:
+ return False
+
+ # netloc is always matched case insensitive
+ if self._netloc and request.netloc.lower() != self._netloc:
+ return False
+
+ if (request.path or '/') != self._path:
+ return False
+
+ # construct our own qs structure as we remove items from it below
+ request_qs = urlparse.parse_qs(request.query, keep_blank_values=True)
+ matcher_qs = urlparse.parse_qs(self._query, keep_blank_values=True)
+
+ for k, vals in six.iteritems(matcher_qs):
+ for v in vals:
+ try:
+ request_qs.get(k, []).remove(v)
+ except ValueError:
+ return False
+
+ if self._complete_qs:
+ for v in six.itervalues(request_qs):
+ if v:
+ return False
+
+ return True
+
+ def _match_headers(self, request):
+ for k, vals in six.iteritems(self._request_headers):
+
+ try:
+ header = request.headers[k]
+ except KeyError:
+ # NOTE(jamielennox): This seems to be a requests 1.2/2
+ # difference, in 2 they are just whatever the user inputted in
+ # 1 they are bytes. Let's optionally handle both and look at
+ # removing this when we depend on requests 2.
+ if not isinstance(k, six.text_type):
+ return False
+
+ try:
+ header = request.headers[k.encode('utf-8')]
+ except KeyError:
+ return False
+
+ if header != vals:
+ return False
+
+ return True
+
+ def _match_additional(self, request):
+ if callable(self._additional_matcher):
+ return self._additional_matcher(request)
+
+ if self._additional_matcher is not None:
+ raise TypeError("Unexpected format of additional matcher.")
+
+ return True
+
+ def _match(self, request):
+ return (self._match_method(request) and
+ self._match_url(request) and
+ self._match_headers(request) and
+ self._match_additional(request))
+
+ def __call__(self, request):
+ if not self._match(request):
+ return None
+
+ # doing this before _add_to_history means real requests are not stored
+ # in the request history. I'm not sure what is better here.
+ if self._real_http:
+ raise _RunRealHTTP()
+
+ if len(self._responses) > 1:
+ response_matcher = self._responses.pop(0)
+ else:
+ response_matcher = self._responses[0]
+
+ self._add_to_history(request)
+ return response_matcher.get_response(request)
+
+
+class Adapter(BaseAdapter, _RequestHistoryTracker):
+ """A fake adapter than can return predefined responses.
+
+ """
+ def __init__(self, case_sensitive=False):
+ super(Adapter, self).__init__()
+ self._case_sensitive = case_sensitive
+ self._matchers = []
+
+ def send(self, request, **kwargs):
+ request = _RequestObjectProxy(request,
+ case_sensitive=self._case_sensitive,
+ **kwargs)
+ self._add_to_history(request)
+
+ for matcher in reversed(self._matchers):
+ try:
+ resp = matcher(request)
+ except Exception:
+ request._matcher = weakref.ref(matcher)
+ raise
+
+ if resp is not None:
+ request._matcher = weakref.ref(matcher)
+ resp.connection = self
+ logger.debug('{} {} {}'.format(request._request.method,
+ request._request.url,
+ resp.status_code))
+ return resp
+
+ raise exceptions.NoMockAddress(request)
+
+ def close(self):
+ pass
+
+ def register_uri(self, method, url, response_list=None, **kwargs):
+ """Register a new URI match and fake response.
+
+ :param str method: The HTTP method to match.
+ :param str url: The URL to match.
+ """
+ complete_qs = kwargs.pop('complete_qs', False)
+ additional_matcher = kwargs.pop('additional_matcher', None)
+ request_headers = kwargs.pop('request_headers', {})
+ real_http = kwargs.pop('_real_http', False)
+ json_encoder = kwargs.pop('json_encoder', None)
+
+ if response_list and kwargs:
+ raise RuntimeError('You should specify either a list of '
+ 'responses OR response kwargs. Not both.')
+ elif real_http and (response_list or kwargs):
+ raise RuntimeError('You should specify either response data '
+ 'OR real_http. Not both.')
+ elif not response_list:
+ if json_encoder is not None:
+ kwargs['json_encoder'] = json_encoder
+ response_list = [] if real_http else [kwargs]
+
+ # NOTE(jamielennox): case_sensitive is not present as a kwarg because i
+ # think there would be an edge case where the adapter and register_uri
+ # had different values.
+ # Ideally case_sensitive would be a value passed to match() however
+ # this would change the contract of matchers so we pass ito to the
+ # proxy and the matcher separately.
+ responses = [_MatcherResponse(**k) for k in response_list]
+ matcher = _Matcher(method,
+ url,
+ responses,
+ case_sensitive=self._case_sensitive,
+ complete_qs=complete_qs,
+ additional_matcher=additional_matcher,
+ request_headers=request_headers,
+ real_http=real_http)
+ self.add_matcher(matcher)
+ return matcher
+
+ def add_matcher(self, matcher):
+ """Register a custom matcher.
+
+ A matcher is a callable that takes a `requests.Request` and returns a
+ `requests.Response` if it matches or None if not.
+
+ :param callable matcher: The matcher to execute.
+ """
+ self._matchers.append(matcher)
+
+ def reset(self):
+ super(Adapter, self).reset()
+ for matcher in self._matchers:
+ matcher.reset()
+
+
+__all__ = ['Adapter']
diff --git a/contrib/python/requests-mock/py3/requests_mock/compat.py b/contrib/python/requests-mock/py3/requests_mock/compat.py
new file mode 100644
index 0000000000..8b6293af15
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/compat.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class _FakeHTTPMessage(object):
+
+ def __init__(self, headers):
+ self.headers = headers
+
+ def getheaders(self, name):
+ try:
+ return [self.headers[name]]
+ except KeyError:
+ return []
+
+ def get_all(self, name, failobj=None):
+ # python 3 only, overrides email.message.Message.get_all
+ try:
+ return [self.headers[name]]
+ except KeyError:
+ return failobj
diff --git a/contrib/python/requests-mock/py3/requests_mock/contrib/__init__.py b/contrib/python/requests-mock/py3/requests_mock/contrib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/contrib/__init__.py
diff --git a/contrib/python/requests-mock/py3/requests_mock/contrib/_pytest_plugin.py b/contrib/python/requests-mock/py3/requests_mock/contrib/_pytest_plugin.py
new file mode 100644
index 0000000000..bb6cd2b973
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/contrib/_pytest_plugin.py
@@ -0,0 +1,86 @@
+import pytest
+
+
+# RHEL 7 ships pytest 2.7 which doesn't have the 'bool' type to addini. This
+# broke pytest for EPEL: https://bugzilla.redhat.com/show_bug.cgi?id=1605138
+# If it's older than 2.9 we handle bool conversion ourselves. Remove this when
+# we can rely on a newer pytest.
+#
+# Version 3 is also where the @yield_fixture decorator was deprecated and you
+# can now just use @fixture, so we handle both of those cases as well.
+
+try:
+ _pytest_version = tuple([
+ int(x) for x in pytest.__version__.split('.')[:2]
+ ])
+ _pytest29 = _pytest_version >= (2, 9)
+ _pytest30 = _pytest_version >= (3, 0)
+except Exception:
+ _pytest29 = False
+ _pytest30 = False
+
+
+if not _pytest29:
+ _case_type = None
+ _case_default = 'false'
+
+ # Copied from pytest 2.9.0 where bool was introduced. It's what happens
+ # internally if we specify a bool type argument.
+ def _strtobool(val):
+ """Convert a string representation of truth to true (1) or false (0).
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
+ 'val' is anything else.
+
+ .. note:: copied from distutils.util
+ """
+ val = val.lower()
+ if val in ('y', 'yes', 't', 'true', 'on', '1'):
+ return 1
+ elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+ return 0
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
+
+ def _bool_value(value):
+ return bool(_strtobool(value.strip()))
+
+else:
+ _case_type = 'bool'
+ _case_default = False
+
+ def _bool_value(value):
+ return value
+
+
+if _pytest30:
+ _fixture_type = pytest.fixture
+else:
+ _fixture_type = pytest.yield_fixture
+
+
+def pytest_addoption(parser):
+ parser.addini('requests_mock_case_sensitive',
+ 'Use case sensitive matching in requests_mock',
+ type=_case_type,
+ default=_case_default)
+
+
+@_fixture_type(scope='function') # executed on every test
+def requests_mock(request):
+ """Mock out the requests component of your code with defined responses.
+
+ Mocks out any requests made through the python requests library with useful
+ responses for unit testing. See:
+ https://requests-mock.readthedocs.io/en/latest/
+ """
+ # pytest plugins get loaded immediately. If we import requests_mock it
+ # imports requests and then SSL which prevents gevent patching. Late load.
+ import requests_mock as rm_module
+
+ case_sensitive = request.config.getini('requests_mock_case_sensitive')
+ kw = {'case_sensitive': _bool_value(case_sensitive)}
+
+ with rm_module.Mocker(**kw) as m:
+ yield m
diff --git a/contrib/python/requests-mock/py3/requests_mock/contrib/fixture.py b/contrib/python/requests-mock/py3/requests_mock/contrib/fixture.py
new file mode 100644
index 0000000000..0c23947566
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/contrib/fixture.py
@@ -0,0 +1,27 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from requests_mock import mocker
+
+
+class Fixture(fixtures.Fixture, mocker.MockerCore):
+
+ def __init__(self, **kwargs):
+ fixtures.Fixture.__init__(self)
+ mocker.MockerCore.__init__(self, **kwargs)
+
+ def setUp(self):
+ super(Fixture, self).setUp()
+ self.start()
+ self.addCleanup(self.stop)
diff --git a/contrib/python/requests-mock/py3/requests_mock/exceptions.py b/contrib/python/requests-mock/py3/requests_mock/exceptions.py
new file mode 100644
index 0000000000..feeb1aa312
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/exceptions.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class MockException(Exception):
+ """Base Exception for library"""
+
+
+class NoMockAddress(MockException):
+ """The requested URL was not mocked"""
+
+ def __init__(self, request):
+ self.request = request
+
+ def __str__(self):
+ return "No mock address: %s %s" % (self.request.method,
+ self.request.url)
+
+
+class InvalidRequest(MockException):
+ """This call cannot be made under a mocked environment"""
diff --git a/contrib/python/requests-mock/py3/requests_mock/mocker.py b/contrib/python/requests-mock/py3/requests_mock/mocker.py
new file mode 100644
index 0000000000..d3bc85538e
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/mocker.py
@@ -0,0 +1,342 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import functools
+import sys
+import threading
+import types
+
+import requests
+import six
+
+from requests_mock import adapter
+from requests_mock import exceptions
+
+DELETE = 'DELETE'
+GET = 'GET'
+HEAD = 'HEAD'
+OPTIONS = 'OPTIONS'
+PATCH = 'PATCH'
+POST = 'POST'
+PUT = 'PUT'
+
+_original_send = requests.Session.send
+
+# NOTE(phodge): we need to use an RLock (reentrant lock) here because
+# requests.Session.send() is reentrant. See further comments where we
+# monkeypatch get_adapter()
+_send_lock = threading.RLock()
+
+
+@contextlib.contextmanager
+def threading_rlock(timeout):
+ kwargs = {}
+ if sys.version_info.major >= 3:
+ # python2 doesn't support the timeout argument
+ kwargs['timeout'] = timeout
+
+ if not _send_lock.acquire(**kwargs):
+ m = "Could not acquire threading lock - possible deadlock scenario"
+ raise Exception(m)
+
+ try:
+ yield
+ finally:
+ _send_lock.release()
+
+
+def _is_bound_method(method):
+ """
+ bound_method 's self is a obj
+ unbound_method 's self is None
+ """
+ if isinstance(method, types.MethodType) and six.get_method_self(method):
+ return True
+ return False
+
+
+def _set_method(target, name, method):
+ """ Set a mocked method onto the target.
+
+ Target may be either an instance of a Session object of the
+ requests.Session class. First we Bind the method if it's an instance.
+
+ If method is a bound_method, can direct setattr
+ """
+ if not isinstance(target, type) and not _is_bound_method(method):
+ method = six.create_bound_method(method, target)
+
+ setattr(target, name, method)
+
+
+class MockerCore(object):
+ """A wrapper around common mocking functions.
+
+ Automate the process of mocking the requests library. This will keep the
+ same general options available and prevent repeating code.
+ """
+
+ _PROXY_FUNCS = {
+ 'last_request',
+ 'add_matcher',
+ 'request_history',
+ 'called',
+ 'called_once',
+ 'call_count',
+ 'reset',
+ }
+
+ case_sensitive = False
+ """case_sensitive handles a backwards incompatible bug. The URL used to
+ match against our matches and that is saved in request_history is always
+ lowercased. This is incorrect as it reports incorrect history to the user
+ and doesn't allow case sensitive path matching.
+
+ Unfortunately fixing this change is backwards incompatible in the 1.X
+ series as people may rely on this behaviour. To work around this you can
+ globally set:
+
+ requests_mock.mock.case_sensitive = True
+
+ or for pytest set in your configuration:
+
+ [pytest]
+ requests_mock_case_sensitive = True
+
+ which will prevent the lowercase being executed and return case sensitive
+ url and query information.
+
+ This will become the default in a 2.X release. See bug: #1584008.
+ """
+
+ def __init__(self, session=None, **kwargs):
+ if session and not isinstance(session, requests.Session):
+ raise TypeError("Only a requests.Session object can be mocked")
+
+ self._mock_target = session or requests.Session
+ self.case_sensitive = kwargs.pop('case_sensitive', self.case_sensitive)
+ self._adapter = (
+ kwargs.pop('adapter', None) or
+ adapter.Adapter(case_sensitive=self.case_sensitive)
+ )
+
+ self._json_encoder = kwargs.pop('json_encoder', None)
+ self.real_http = kwargs.pop('real_http', False)
+ self._last_send = None
+
+ if kwargs:
+ raise TypeError('Unexpected Arguments: %s' % ', '.join(kwargs))
+
+ def start(self):
+ """Start mocking requests.
+
+ Install the adapter and the wrappers required to intercept requests.
+ """
+ if self._last_send:
+ raise RuntimeError('Mocker has already been started')
+
+ # backup last `send` for restoration on `self.stop`
+ self._last_send = self._mock_target.send
+ self._last_get_adapter = self._mock_target.get_adapter
+
+ def _fake_get_adapter(session, url):
+ return self._adapter
+
+ def _fake_send(session, request, **kwargs):
+ # NOTE(phodge): we need to use a threading lock here in case there
+ # are multiple threads running - one thread could restore the
+ # original get_adapter() just as a second thread is about to
+ # execute _original_send() below
+ with threading_rlock(timeout=10):
+ # mock get_adapter
+ #
+ # NOTE(phodge): requests.Session.send() is actually
+ # reentrant due to how it resolves redirects with nested
+ # calls to send(), however the reentry occurs _after_ the
+ # call to self.get_adapter(), so it doesn't matter that we
+ # will restore _last_get_adapter before a nested send() has
+ # completed as long as we monkeypatch get_adapter() each
+ # time immediately before calling original send() like we
+ # are doing here.
+ _set_method(session, "get_adapter", _fake_get_adapter)
+
+ # NOTE(jamielennox): self._last_send vs _original_send. Whilst
+ # it seems like here we would use _last_send there is the
+ # possibility that the user has messed up and is somehow
+ # nesting their mockers. If we call last_send at this point
+ # then we end up calling this function again and the outer
+ # level adapter ends up winning. All we really care about here
+ # is that our adapter is in place before calling send so we
+ # always jump directly to the real function so that our most
+ # recently patched send call ends up putting in the most recent
+ # adapter. It feels funny, but it works.
+
+ try:
+ return _original_send(session, request, **kwargs)
+ except exceptions.NoMockAddress:
+ if not self.real_http:
+ raise
+ except adapter._RunRealHTTP:
+ # this mocker wants you to run the request through the real
+ # requests library rather than the mocking. Let it.
+ pass
+ finally:
+ # restore get_adapter
+ _set_method(session, "get_adapter", self._last_get_adapter)
+
+ # if we are here it means we must run the real http request
+ # Or, with nested mocks, to the parent mock, that is why we use
+ # _last_send here instead of _original_send
+ if isinstance(self._mock_target, type):
+ return self._last_send(session, request, **kwargs)
+ else:
+ return self._last_send(request, **kwargs)
+
+ _set_method(self._mock_target, "send", _fake_send)
+
+ def stop(self):
+ """Stop mocking requests.
+
+ This should have no impact if mocking has not been started.
+ When nesting mockers, make sure to stop the innermost first.
+ """
+ if self._last_send:
+ self._mock_target.send = self._last_send
+ self._last_send = None
+
+ # for familiarity with MagicMock
+ def reset_mock(self):
+ self.reset()
+
+ def __getattr__(self, name):
+ if name in self._PROXY_FUNCS:
+ try:
+ return getattr(self._adapter, name)
+ except AttributeError:
+ pass
+
+ raise AttributeError(name)
+
+ def register_uri(self, *args, **kwargs):
+ # you can pass real_http here, but it's private to pass direct to the
+ # adapter, because if you pass direct to the adapter you'll see the exc
+ kwargs['_real_http'] = kwargs.pop('real_http', False)
+ kwargs.setdefault('json_encoder', self._json_encoder)
+ return self._adapter.register_uri(*args, **kwargs)
+
+ def request(self, *args, **kwargs):
+ return self.register_uri(*args, **kwargs)
+
+ def get(self, *args, **kwargs):
+ return self.request(GET, *args, **kwargs)
+
+ def options(self, *args, **kwargs):
+ return self.request(OPTIONS, *args, **kwargs)
+
+ def head(self, *args, **kwargs):
+ return self.request(HEAD, *args, **kwargs)
+
+ def post(self, *args, **kwargs):
+ return self.request(POST, *args, **kwargs)
+
+ def put(self, *args, **kwargs):
+ return self.request(PUT, *args, **kwargs)
+
+ def patch(self, *args, **kwargs):
+ return self.request(PATCH, *args, **kwargs)
+
+ def delete(self, *args, **kwargs):
+ return self.request(DELETE, *args, **kwargs)
+
+
+class Mocker(MockerCore):
+ """The standard entry point for mock Adapter loading.
+ """
+
+ #: Defines with what should method name begin to be patched
+ TEST_PREFIX = 'test'
+
+ def __init__(self, **kwargs):
+ """Create a new mocker adapter.
+
+ :param str kw: Pass the mock object through to the decorated function
+ as this named keyword argument, rather than a positional argument.
+ :param bool real_http: True to send the request to the real requested
+ uri if there is not a mock installed for it. Defaults to False.
+ """
+ self._kw = kwargs.pop('kw', None)
+ super(Mocker, self).__init__(**kwargs)
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+
+ def __call__(self, obj):
+ if isinstance(obj, type):
+ return self.decorate_class(obj)
+
+ return self.decorate_callable(obj)
+
+ def copy(self):
+ """Returns an exact copy of current mock
+ """
+ m = type(self)(
+ kw=self._kw,
+ real_http=self.real_http,
+ case_sensitive=self.case_sensitive
+ )
+ return m
+
+ def decorate_callable(self, func):
+ """Decorates a callable
+
+ :param callable func: callable to decorate
+ """
+ @functools.wraps(func)
+ def inner(*args, **kwargs):
+ with self.copy() as m:
+ if self._kw:
+ kwargs[self._kw] = m
+ else:
+ args = list(args)
+ args.append(m)
+
+ return func(*args, **kwargs)
+
+ return inner
+
+ def decorate_class(self, klass):
+ """Decorates methods in a class with request_mock
+
+ Method will be decorated only if it name begins with `TEST_PREFIX`
+
+ :param object klass: class which methods will be decorated
+ """
+ for attr_name in dir(klass):
+ if not attr_name.startswith(self.TEST_PREFIX):
+ continue
+
+ attr = getattr(klass, attr_name)
+ if not hasattr(attr, '__call__'):
+ continue
+
+ m = self.copy()
+ setattr(klass, attr_name, m(attr))
+
+ return klass
+
+
+mock = Mocker
diff --git a/contrib/python/requests-mock/py3/requests_mock/py.typed b/contrib/python/requests-mock/py3/requests_mock/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/py.typed
diff --git a/contrib/python/requests-mock/py3/requests_mock/request.py b/contrib/python/requests-mock/py3/requests_mock/request.py
new file mode 100644
index 0000000000..05cbc3d4a3
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/request.py
@@ -0,0 +1,178 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import json
+
+import requests
+import six
+from six.moves.urllib import parse as urlparse
+
+
+class _RequestObjectProxy(object):
+ """A wrapper around a requests.Request that gives some extra information.
+
+ This will be important both for matching and so that when it's save into
+ the request_history users will be able to access these properties.
+ """
+
+ def __init__(self, request, **kwargs):
+ self._request = request
+ self._matcher = None
+ self._url_parts_ = None
+ self._qs = None
+
+ # All of these params should always exist but we use a default
+ # to make the test setup easier.
+ self._timeout = kwargs.pop('timeout', None)
+ self._allow_redirects = kwargs.pop('allow_redirects', None)
+ self._verify = kwargs.pop('verify', None)
+ self._stream = kwargs.pop('stream', None)
+ self._cert = kwargs.pop('cert', None)
+ self._proxies = copy.deepcopy(kwargs.pop('proxies', {}))
+
+ # FIXME(jamielennox): This is part of bug #1584008 and should default
+ # to True (or simply removed) in a major version bump.
+ self._case_sensitive = kwargs.pop('case_sensitive', False)
+
+ def __getattr__(self, name):
+ # there should be a better way to exclude this, but I don't want to
+ # implement __setstate__ just not forward it to the request. You can't
+ # actually define the method and raise AttributeError there either.
+ if name in ('__setstate__',):
+ raise AttributeError(name)
+
+ return getattr(self._request, name)
+
+ @property
+ def _url_parts(self):
+ if self._url_parts_ is None:
+ url = self._request.url
+
+ if not self._case_sensitive:
+ url = url.lower()
+
+ self._url_parts_ = urlparse.urlparse(url)
+
+ return self._url_parts_
+
+ @property
+ def scheme(self):
+ return self._url_parts.scheme
+
+ @property
+ def netloc(self):
+ return self._url_parts.netloc
+
+ @property
+ def hostname(self):
+ try:
+ return self.netloc.split(':')[0]
+ except IndexError:
+ return ''
+
+ @property
+ def port(self):
+ components = self.netloc.split(':')
+
+ try:
+ return int(components[1])
+ except (IndexError, ValueError):
+ pass
+
+ if self.scheme == 'https':
+ return 443
+ if self.scheme == 'http':
+ return 80
+
+ # The default return shouldn't matter too much because if you are
+ # wanting to test this value you really should be explicitly setting it
+ # somewhere. 0 at least is a boolean False and an int.
+ return 0
+
+ @property
+ def path(self):
+ return self._url_parts.path
+
+ @property
+ def query(self):
+ return self._url_parts.query
+
+ @property
+ def qs(self):
+ if self._qs is None:
+ self._qs = urlparse.parse_qs(self.query, keep_blank_values=True)
+
+ return self._qs
+
+ @property
+ def timeout(self):
+ return self._timeout
+
+ @property
+ def allow_redirects(self):
+ return self._allow_redirects
+
+ @property
+ def verify(self):
+ return self._verify
+
+ @property
+ def stream(self):
+ return self._stream
+
+ @property
+ def cert(self):
+ return self._cert
+
+ @property
+ def proxies(self):
+ return self._proxies
+
+ @classmethod
+ def _create(cls, *args, **kwargs):
+ return cls(requests.Request(*args, **kwargs).prepare())
+
+ @property
+ def text(self):
+ body = self.body
+
+ if isinstance(body, six.binary_type):
+ body = body.decode('utf-8')
+
+ return body
+
+ def json(self, **kwargs):
+ return json.loads(self.text, **kwargs)
+
+ def __getstate__(self):
+ # Can't pickle a weakref, but it's a weakref so ok to drop it.
+ d = self.__dict__.copy()
+ d['_matcher'] = None
+ return d
+
+ @property
+ def matcher(self):
+ """The matcher that this request was handled by.
+
+ The matcher object is handled by a weakref. It will return the matcher
+ object if it is still available - so if the mock is still in place. If
+ the matcher is not available it will return None.
+ """
+ # if unpickled or not from a response this will be None
+ if self._matcher is None:
+ return None
+
+ return self._matcher()
+
+ def __str__(self):
+ return "{0.method} {0.url}".format(self._request)
diff --git a/contrib/python/requests-mock/py3/requests_mock/response.py b/contrib/python/requests-mock/py3/requests_mock/response.py
new file mode 100644
index 0000000000..5855539273
--- /dev/null
+++ b/contrib/python/requests-mock/py3/requests_mock/response.py
@@ -0,0 +1,281 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json as jsonutils
+
+from requests.adapters import HTTPAdapter
+from requests.cookies import MockRequest, MockResponse
+from requests.cookies import RequestsCookieJar
+from requests.cookies import merge_cookies, cookiejar_from_dict
+from requests.packages.urllib3.response import HTTPResponse
+from requests.utils import get_encoding_from_headers
+import six
+
+from requests_mock import compat
+from requests_mock import exceptions
+
+_BODY_ARGS = frozenset(['raw', 'body', 'content', 'text', 'json'])
+_HTTP_ARGS = frozenset([
+ 'status_code',
+ 'reason',
+ 'headers',
+ 'cookies',
+ 'json_encoder',
+])
+
+_DEFAULT_STATUS = 200
+_http_adapter = HTTPAdapter()
+
+
+class CookieJar(RequestsCookieJar):
+
+ def set(self, name, value, **kwargs):
+ """Add a cookie to the Jar.
+
+ :param str name: cookie name/key.
+ :param str value: cookie value.
+ :param int version: Integer or None. Netscape cookies have version 0.
+ RFC 2965 and RFC 2109 cookies have a version cookie-attribute of 1.
+ However, note that cookielib may 'downgrade' RFC 2109 cookies to
+ Netscape cookies, in which case version is 0.
+ :param str port: String representing a port or a set of ports
+ (eg. '80', or '80,8080'),
+ :param str domain: The domain the cookie should apply to.
+ :param str path: Cookie path (a string, eg. '/acme/rocket_launchers').
+ :param bool secure: True if cookie should only be returned over a
+ secure connection.
+ :param int expires: Integer expiry date in seconds since epoch or None.
+ :param bool discard: True if this is a session cookie.
+ :param str comment: String comment from the server explaining the
+ function of this cookie.
+ :param str comment_url: URL linking to a comment from the server
+ explaining the function of this cookie.
+ """
+ # just here to provide the function documentation
+ return super(CookieJar, self).set(name, value, **kwargs)
+
+
+def _check_body_arguments(**kwargs):
+ # mutual exclusion, only 1 body method may be provided
+ provided = [x for x in _BODY_ARGS if kwargs.pop(x, None) is not None]
+
+ if len(provided) > 1:
+ raise RuntimeError('You may only supply one body element. You '
+ 'supplied %s' % ', '.join(provided))
+
+ extra = [x for x in kwargs if x not in _HTTP_ARGS]
+
+ if extra:
+ raise TypeError('Too many arguments provided. Unexpected '
+ 'arguments %s.' % ', '.join(extra))
+
+
+class _FakeConnection(object):
+ """An object that can mock the necessary parts of a socket interface."""
+
+ def send(self, request, **kwargs):
+ msg = 'This response was created without a connection. You are ' \
+ 'therefore unable to make a request directly on that connection.'
+ raise exceptions.InvalidRequest(msg)
+
+ def close(self):
+ pass
+
+
+def _extract_cookies(request, response, cookies):
+ """Add cookies to the response.
+
+ Cookies in requests are extracted from the headers in the original_response
+ httplib.HTTPMessage which we don't create so we have to do this step
+ manually.
+ """
+ # This will add cookies set manually via the Set-Cookie or Set-Cookie2
+ # header but this only allows 1 cookie to be set.
+ http_message = compat._FakeHTTPMessage(response.headers)
+ response.cookies.extract_cookies(MockResponse(http_message),
+ MockRequest(request))
+
+ # This allows you to pass either a CookieJar or a dictionary to request_uri
+ # or directly to create_response. To allow more than one cookie to be set.
+ if cookies:
+ merge_cookies(response.cookies, cookies)
+
+
+class _IOReader(six.BytesIO):
+ """A reader that makes a BytesIO look like a HTTPResponse.
+
+ A HTTPResponse will return an empty string when you read from it after
+ the socket has been closed. A BytesIO will raise a ValueError. For
+ compatibility we want to do the same thing a HTTPResponse does.
+ """
+
+ def read(self, *args, **kwargs):
+ if self.closed:
+ return six.b('')
+
+ # if the file is open, but you asked for zero bytes read you should get
+ # back zero without closing the stream.
+ if len(args) > 0 and args[0] == 0:
+ return six.b('')
+
+ # not a new style object in python 2
+ result = six.BytesIO.read(self, *args, **kwargs)
+
+ # when using resp.iter_content(None) it'll go through a different
+ # request path in urllib3. This path checks whether the object is
+ # marked closed instead of the return value. see gh124.
+ if result == six.b(''):
+ self.close()
+
+ return result
+
+
+def create_response(request, **kwargs):
+ """
+ :param int status_code: The status code to return upon a successful
+ match. Defaults to 200.
+ :param HTTPResponse raw: A HTTPResponse object to return upon a
+ successful match.
+ :param io.IOBase body: An IO object with a read() method that can
+ return a body on successful match.
+ :param bytes content: A byte string to return upon a successful match.
+ :param unicode text: A text string to return upon a successful match.
+ :param object json: A python object to be converted to a JSON string
+ and returned upon a successful match.
+ :param class json_encoder: Encoder object to use for JOSON.
+ :param dict headers: A dictionary object containing headers that are
+ returned upon a successful match.
+ :param CookieJar cookies: A cookie jar with cookies to set on the
+ response.
+
+ :returns requests.Response: A response object that can
+ be returned to requests.
+ """
+ connection = kwargs.pop('connection', _FakeConnection())
+
+ _check_body_arguments(**kwargs)
+
+ raw = kwargs.pop('raw', None)
+ body = kwargs.pop('body', None)
+ content = kwargs.pop('content', None)
+ text = kwargs.pop('text', None)
+ json = kwargs.pop('json', None)
+ headers = kwargs.pop('headers', {})
+ encoding = None
+
+ if content is not None and not isinstance(content, six.binary_type):
+ raise TypeError('Content should be binary data')
+ if text is not None and not isinstance(text, six.string_types):
+ raise TypeError('Text should be string data')
+
+ if json is not None:
+ encoder = kwargs.pop('json_encoder', None) or jsonutils.JSONEncoder
+ text = jsonutils.dumps(json, cls=encoder)
+ if text is not None:
+ encoding = get_encoding_from_headers(headers) or 'utf-8'
+ content = text.encode(encoding)
+ if content is not None:
+ body = _IOReader(content)
+ if not raw:
+ status = kwargs.get('status_code', _DEFAULT_STATUS)
+ reason = kwargs.get('reason',
+ six.moves.http_client.responses.get(status))
+
+ raw = HTTPResponse(status=status,
+ reason=reason,
+ headers=headers,
+ body=body or _IOReader(six.b('')),
+ decode_content=False,
+ enforce_content_length=False,
+ preload_content=False,
+ original_response=None)
+
+ response = _http_adapter.build_response(request, raw)
+ response.connection = connection
+
+ if encoding and not response.encoding:
+ response.encoding = encoding
+
+ _extract_cookies(request, response, kwargs.get('cookies'))
+
+ return response
+
+
+class _Context(object):
+ """Stores the data being used to process a current URL match."""
+
+ def __init__(self, headers, status_code, reason, cookies):
+ self.headers = headers
+ self.status_code = status_code
+ self.reason = reason
+ self.cookies = cookies
+
+
+class _MatcherResponse(object):
+
+ def __init__(self, **kwargs):
+ self._exc = kwargs.pop('exc', None)
+
+ # If the user is asking for an exception to be thrown then prevent them
+ # specifying any sort of body or status response as it won't be used.
+ # This may be protecting the user too much but can be removed later.
+ if self._exc and kwargs:
+ raise TypeError('Cannot provide other arguments with exc.')
+
+ _check_body_arguments(**kwargs)
+ self._params = kwargs
+
+ # whilst in general you shouldn't do type checking in python this
+ # makes sure we don't end up with differences between the way types
+ # are handled between python 2 and 3.
+ content = self._params.get('content')
+ text = self._params.get('text')
+
+ if content is not None and not (callable(content) or
+ isinstance(content, six.binary_type)):
+ raise TypeError('Content should be a callback or binary data')
+
+ if text is not None and not (callable(text) or
+ isinstance(text, six.string_types)):
+ raise TypeError('Text should be a callback or string data')
+
+ def get_response(self, request):
+ # if an error was requested then raise that instead of doing response
+ if self._exc:
+ raise self._exc
+
+ # If a cookie dict is passed convert it into a CookieJar so that the
+ # cookies object available in a callback context is always a jar.
+ cookies = self._params.get('cookies', CookieJar())
+ if isinstance(cookies, dict):
+ cookies = cookiejar_from_dict(cookies, CookieJar())
+
+ context = _Context(self._params.get('headers', {}).copy(),
+ self._params.get('status_code', _DEFAULT_STATUS),
+ self._params.get('reason'),
+ cookies)
+
+ # if a body element is a callback then execute it
+ def _call(f, *args, **kwargs):
+ return f(request, context, *args, **kwargs) if callable(f) else f
+
+ return create_response(request,
+ json=_call(self._params.get('json')),
+ text=_call(self._params.get('text')),
+ content=_call(self._params.get('content')),
+ body=_call(self._params.get('body')),
+ raw=self._params.get('raw'),
+ json_encoder=self._params.get('json_encoder'),
+ status_code=context.status_code,
+ reason=context.reason,
+ headers=context.headers,
+ cookies=context.cookies)
diff --git a/contrib/python/requests-mock/py3/ya.make b/contrib/python/requests-mock/py3/ya.make
new file mode 100644
index 0000000000..b022b84019
--- /dev/null
+++ b/contrib/python/requests-mock/py3/ya.make
@@ -0,0 +1,54 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(1.11.0)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/requests
+ contrib/python/six
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ requests_mock.contrib._pytest_plugin
+ requests_mock.contrib.fixture
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ requests_mock/__init__.py
+ requests_mock/__init__.pyi
+ requests_mock/adapter.py
+ requests_mock/adapter.pyi
+ requests_mock/compat.py
+ requests_mock/contrib/__init__.py
+ requests_mock/contrib/_pytest_plugin.py
+ requests_mock/contrib/_pytest_plugin.pyi
+ requests_mock/contrib/fixture.py
+ requests_mock/exceptions.py
+ requests_mock/exceptions.pyi
+ requests_mock/mocker.py
+ requests_mock/mocker.pyi
+ requests_mock/request.py
+ requests_mock/request.pyi
+ requests_mock/response.py
+ requests_mock/response.pyi
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/requests-mock/py3/
+ .dist-info/METADATA
+ .dist-info/entry_points.txt
+ .dist-info/top_level.txt
+ requests_mock/py.typed
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/requests-mock/ya.make b/contrib/python/requests-mock/ya.make
new file mode 100644
index 0000000000..ed5ea40daa
--- /dev/null
+++ b/contrib/python/requests-mock/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/requests-mock/py2)
+ELSE()
+ PEERDIR(contrib/python/requests-mock/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/requests-oauthlib/.dist-info/METADATA b/contrib/python/requests-oauthlib/.dist-info/METADATA
new file mode 100644
index 0000000000..975ce567fc
--- /dev/null
+++ b/contrib/python/requests-oauthlib/.dist-info/METADATA
@@ -0,0 +1,245 @@
+Metadata-Version: 2.1
+Name: requests-oauthlib
+Version: 1.3.1
+Summary: OAuthlib authentication support for Requests.
+Home-page: https://github.com/requests/requests-oauthlib
+Author: Kenneth Reitz
+Author-email: me@kennethreitz.com
+License: ISC
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Requires-Dist: oauthlib (>=3.0.0)
+Requires-Dist: requests (>=2.0.0)
+Provides-Extra: rsa
+Requires-Dist: oauthlib[signedtoken] (>=3.0.0) ; extra == 'rsa'
+
+Requests-OAuthlib |build-status| |coverage-status| |docs|
+=========================================================
+
+This project provides first-class OAuth library support for `Requests <http://python-requests.org>`_.
+
+The OAuth 1 workflow
+--------------------
+
+OAuth 1 can seem overly complicated and it sure has its quirks. Luckily,
+requests_oauthlib hides most of these and let you focus at the task at hand.
+
+Accessing protected resources using requests_oauthlib is as simple as:
+
+.. code-block:: pycon
+
+ >>> from requests_oauthlib import OAuth1Session
+ >>> twitter = OAuth1Session('client_key',
+ client_secret='client_secret',
+ resource_owner_key='resource_owner_key',
+ resource_owner_secret='resource_owner_secret')
+ >>> url = 'https://api.twitter.com/1/account/settings.json'
+ >>> r = twitter.get(url)
+
+Before accessing resources you will need to obtain a few credentials from your
+provider (e.g. Twitter) and authorization from the user for whom you wish to
+retrieve resources for. You can read all about this in the full
+`OAuth 1 workflow guide on RTD <https://requests-oauthlib.readthedocs.io/en/latest/oauth1_workflow.html>`_.
+
+The OAuth 2 workflow
+--------------------
+
+OAuth 2 is generally simpler than OAuth 1 but comes in more flavours. The most
+common being the Authorization Code Grant, also known as the WebApplication
+flow.
+
+Fetching a protected resource after obtaining an access token can be extremely
+simple. However, before accessing resources you will need to obtain a few
+credentials from your provider (e.g. Google) and authorization from the user
+for whom you wish to retrieve resources for. You can read all about this in the
+full `OAuth 2 workflow guide on RTD <https://requests-oauthlib.readthedocs.io/en/latest/oauth2_workflow.html>`_.
+
+Installation
+-------------
+
+To install requests and requests_oauthlib you can use pip:
+
+.. code-block:: bash
+
+ $ pip install requests requests_oauthlib
+
+.. |build-status| image:: https://github.com/requests/requests-oauthlib/actions/workflows/run-tests.yml/badge.svg
+ :target: https://github.com/requests/requests-oauthlib/actions
+.. |coverage-status| image:: https://img.shields.io/coveralls/requests/requests-oauthlib.svg
+ :target: https://coveralls.io/r/requests/requests-oauthlib
+.. |docs| image:: https://readthedocs.org/projects/requests-oauthlib/badge/
+ :alt: Documentation Status
+ :scale: 100%
+ :target: https://requests-oauthlib.readthedocs.io/
+
+
+History
+-------
+
+v1.3.1 (21 January 2022)
+++++++++++++++++++++++++
+
+- Add initial support for OAuth Mutual TLS (draft-ietf-oauth-mtls)
+- Add eBay compliance fix
+- Add Spotify OAuth 2 Tutorial
+- Add support for python 3.8, 3.9
+- Fixed LinkedIn Compliance Fixes
+- Fixed ReadTheDocs Documentation and sphinx errors
+- Moved pipeline to GitHub Actions
+
+v1.3.0 (6 November 2019)
+++++++++++++++++++++++++
+
+- Instagram compliance fix
+- Added ``force_querystring`` argument to fetch_token() method on OAuth2Session
+
+v1.2.0 (14 January 2019)
+++++++++++++++++++++++++
+
+- This project now depends on OAuthlib 3.0.0 and above. It does **not** support
+ versions of OAuthlib before 3.0.0.
+- Updated oauth2 tests to use 'sess' for an OAuth2Session instance instead of `auth`
+ because OAuth2Session objects and methods acceept an `auth` paramether which is
+ typically an instance of `requests.auth.HTTPBasicAuth`
+- `OAuth2Session.fetch_token` previously tried to guess how and where to provide
+ "client" and "user" credentials incorrectly. This was incompatible with some
+ OAuth servers and incompatible with breaking changes in oauthlib that seek to
+ correctly provide the `client_id`. The older implementation also did not raise
+ the correct exceptions when username and password are not present on Legacy
+ clients.
+- Avoid automatic netrc authentication for OAuth2Session.
+
+v1.1.0 (9 January 2019)
++++++++++++++++++++++++
+
+- Adjusted version specifier for ``oauthlib`` dependency: this project is
+ not yet compatible with ``oauthlib`` 3.0.0.
+- Dropped dependency on ``nose``.
+- Minor changes to clean up the code and make it more readable/maintainable.
+
+v1.0.0 (4 June 2018)
+++++++++++++++++++++
+
+- **Removed support for Python 2.6 and Python 3.3.**
+ This project now supports Python 2.7, and Python 3.4 and above.
+- Added several examples to the documentation.
+- Added plentymarkets compliance fix.
+- Added a ``token`` property to OAuth1Session, to match the corresponding
+ ``token`` property on OAuth2Session.
+
+v0.8.0 (14 February 2017)
++++++++++++++++++++++++++
+
+- Added Fitbit compliance fix.
+- Fixed an issue where newlines in the response body for the access token
+ request would cause errors when trying to extract the token.
+- Fixed an issue introduced in v0.7.0 where users passing ``auth`` to several
+ methods would encounter conflicts with the ``client_id`` and
+ ``client_secret``-derived auth. The user-supplied ``auth`` argument is now
+ used in preference to those options.
+
+v0.7.0 (22 September 2016)
+++++++++++++++++++++++++++
+
+- Allowed ``OAuth2Session.request`` to take the ``client_id`` and
+ ``client_secret`` parameters for the purposes of automatic token refresh,
+ which may need them.
+
+v0.6.2 (12 July 2016)
++++++++++++++++++++++
+
+- Use ``client_id`` and ``client_secret`` for the Authorization header if
+ provided.
+- Allow explicit bypass of the Authorization header by setting ``auth=False``.
+- Pass through the ``proxies`` kwarg when refreshing tokens.
+- Miscellaneous cleanups.
+
+v0.6.1 (19 February 2016)
++++++++++++++++++++++++++
+
+- Fixed a bug when sending authorization in headers with no username and
+ password present.
+- Make sure we clear the session token before obtaining a new one.
+- Some improvements to the Slack compliance fix.
+- Avoid timing problems around token refresh.
+- Allow passing arbitrary arguments to requests when calling
+ ``fetch_request_token`` and ``fetch_access_token``.
+
+v0.6.0 (14 December 2015)
++++++++++++++++++++++++++
+
+- Add compliance fix for Slack.
+- Add compliance fix for Mailchimp.
+- ``TokenRequestDenied`` exceptions now carry the entire response, not just the
+ status code.
+- Pass through keyword arguments when refreshing tokens automatically.
+- Send authorization in headers, not just body, to maximize compatibility.
+- More getters/setters available for OAuth2 session client values.
+- Allow sending custom headers when refreshing tokens, and set some defaults.
+
+
+v0.5.0 (4 May 2015)
++++++++++++++++++++
+- Fix ``TypeError`` being raised instead of ``TokenMissing`` error.
+- Raise requests exceptions on 4XX and 5XX responses in the OAuth2 flow.
+- Avoid ``AttributeError`` when initializing the ``OAuth2Session`` class
+ without complete client information.
+
+v0.4.2 (16 October 2014)
+++++++++++++++++++++++++
+- New ``authorized`` property on OAuth1Session and OAuth2Session, which allows
+ you to easily determine if the session is already authorized with OAuth tokens
+ or not.
+- New ``TokenMissing`` and ``VerifierMissing`` exception classes for OAuth1Session:
+ this will make it easier to catch and identify these exceptions.
+
+v0.4.1 (6 June 2014)
+++++++++++++++++++++
+- New install target ``[rsa]`` for people using OAuth1 RSA-SHA1 signature
+ method.
+- Fixed bug in OAuth2 where supplied state param was not used in auth url.
+- OAuth2 HTTPS checking can be disabled by setting environment variable
+ ``OAUTHLIB_INSECURE_TRANSPORT``.
+- OAuth1 now re-authorize upon redirects.
+- OAuth1 token fetching now raise a detailed error message when the
+ response body is incorrectly encoded or the request was denied.
+- Added support for custom OAuth1 clients.
+- OAuth2 compliance fix for Sina Weibo.
+- Multiple fixes to facebook compliance fix.
+- Compliance fixes now re-encode body properly as bytes in Python 3.
+- Logging now properly done under ``requests_oauthlib`` namespace instead
+ of piggybacking on oauthlib namespace.
+- Logging introduced for OAuth1 auth and session.
+
+v0.4.0 (29 September 2013)
+++++++++++++++++++++++++++
+- OAuth1Session methods only return unicode strings. #55.
+- Renamed requests_oauthlib.core to requests_oauthlib.oauth1_auth for consistency. #79.
+- Added Facebook compliance fix and access_token_response hook to OAuth2Session. #63.
+- Added LinkedIn compliance fix.
+- Added refresh_token_response compliance hook, invoked before parsing the refresh token.
+- Correctly limit compliance hooks to running only once!
+- Content type guessing should only be done when no content type is given
+- OAuth1 now updates r.headers instead of replacing it with non case insensitive dict
+- Remove last use of Response.content (in OAuth1Session). #44.
+- State param can now be supplied in OAuth2Session.authorize_url
+
+
diff --git a/contrib/python/requests-oauthlib/.dist-info/top_level.txt b/contrib/python/requests-oauthlib/.dist-info/top_level.txt
new file mode 100644
index 0000000000..55d4f9073f
--- /dev/null
+++ b/contrib/python/requests-oauthlib/.dist-info/top_level.txt
@@ -0,0 +1 @@
+requests_oauthlib
diff --git a/contrib/python/requests-oauthlib/AUTHORS.rst b/contrib/python/requests-oauthlib/AUTHORS.rst
new file mode 100644
index 0000000000..c8fba5e997
--- /dev/null
+++ b/contrib/python/requests-oauthlib/AUTHORS.rst
@@ -0,0 +1,25 @@
+Requests-oauthlib is written and maintained by Kenneth Reitz and various
+contributors:
+
+Development Lead
+----------------
+
+- Kenneth Reitz <me@kennethreitz.com>
+
+Patches and Suggestions
+-----------------------
+
+- Cory Benfield <cory@lukasa.co.uk>
+- Ib Lundgren <ib.lundgren@gmail.com>
+- Devin Sevilla <dasevilla@gmail.com>
+- Imad Mouhtassem <mouhtasi@gmail.com>
+- Johan Euphrosine <proppy@google.com>
+- Johannes Spielmann <js@shezi.de>
+- Martin Trigaux <me@mart-e.be>
+- Matt McClure <matt.mcclure@mapmyfitness.com>
+- Mikhail Sobolev <mss@mawhrin.net>
+- Paul Bonser <misterpib@gmail.com>
+- Vinay Raikar <rockraikar@gmail.com>
+- kracekumar <me@kracekumar.com>
+- David Baumgold <david@davidbaumgold.com>
+- Craig Anderson <craiga@craiga.id.au>
diff --git a/contrib/python/requests-oauthlib/LICENSE b/contrib/python/requests-oauthlib/LICENSE
new file mode 100644
index 0000000000..de09f408ce
--- /dev/null
+++ b/contrib/python/requests-oauthlib/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2014 Kenneth Reitz.
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/contrib/python/requests-oauthlib/README.rst b/contrib/python/requests-oauthlib/README.rst
new file mode 100644
index 0000000000..9fd1bb9767
--- /dev/null
+++ b/contrib/python/requests-oauthlib/README.rst
@@ -0,0 +1,58 @@
+Requests-OAuthlib |build-status| |coverage-status| |docs|
+=========================================================
+
+This project provides first-class OAuth library support for `Requests <http://python-requests.org>`_.
+
+The OAuth 1 workflow
+--------------------
+
+OAuth 1 can seem overly complicated and it sure has its quirks. Luckily,
+requests_oauthlib hides most of these and let you focus at the task at hand.
+
+Accessing protected resources using requests_oauthlib is as simple as:
+
+.. code-block:: pycon
+
+ >>> from requests_oauthlib import OAuth1Session
+ >>> twitter = OAuth1Session('client_key',
+ client_secret='client_secret',
+ resource_owner_key='resource_owner_key',
+ resource_owner_secret='resource_owner_secret')
+ >>> url = 'https://api.twitter.com/1/account/settings.json'
+ >>> r = twitter.get(url)
+
+Before accessing resources you will need to obtain a few credentials from your
+provider (e.g. Twitter) and authorization from the user for whom you wish to
+retrieve resources for. You can read all about this in the full
+`OAuth 1 workflow guide on RTD <https://requests-oauthlib.readthedocs.io/en/latest/oauth1_workflow.html>`_.
+
+The OAuth 2 workflow
+--------------------
+
+OAuth 2 is generally simpler than OAuth 1 but comes in more flavours. The most
+common being the Authorization Code Grant, also known as the WebApplication
+flow.
+
+Fetching a protected resource after obtaining an access token can be extremely
+simple. However, before accessing resources you will need to obtain a few
+credentials from your provider (e.g. Google) and authorization from the user
+for whom you wish to retrieve resources for. You can read all about this in the
+full `OAuth 2 workflow guide on RTD <https://requests-oauthlib.readthedocs.io/en/latest/oauth2_workflow.html>`_.
+
+Installation
+-------------
+
+To install requests and requests_oauthlib you can use pip:
+
+.. code-block:: bash
+
+ $ pip install requests requests_oauthlib
+
+.. |build-status| image:: https://github.com/requests/requests-oauthlib/actions/workflows/run-tests.yml/badge.svg
+ :target: https://github.com/requests/requests-oauthlib/actions
+.. |coverage-status| image:: https://img.shields.io/coveralls/requests/requests-oauthlib.svg
+ :target: https://coveralls.io/r/requests/requests-oauthlib
+.. |docs| image:: https://readthedocs.org/projects/requests-oauthlib/badge/
+ :alt: Documentation Status
+ :scale: 100%
+ :target: https://requests-oauthlib.readthedocs.io/
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/__init__.py b/contrib/python/requests-oauthlib/requests_oauthlib/__init__.py
new file mode 100644
index 0000000000..0d3e49f991
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/__init__.py
@@ -0,0 +1,19 @@
+import logging
+
+from .oauth1_auth import OAuth1
+from .oauth1_session import OAuth1Session
+from .oauth2_auth import OAuth2
+from .oauth2_session import OAuth2Session, TokenUpdated
+
+__version__ = "1.3.1"
+
+import requests
+
+if requests.__version__ < "2.0.0":
+ msg = (
+ "You are using requests version %s, which is older than "
+ "requests-oauthlib expects, please upgrade to 2.0.0 or later."
+ )
+ raise Warning(msg % requests.__version__)
+
+logging.getLogger("requests_oauthlib").addHandler(logging.NullHandler())
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/__init__.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/__init__.py
new file mode 100644
index 0000000000..0e8e3ac84f
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/__init__.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+
+from .facebook import facebook_compliance_fix
+from .fitbit import fitbit_compliance_fix
+from .slack import slack_compliance_fix
+from .instagram import instagram_compliance_fix
+from .mailchimp import mailchimp_compliance_fix
+from .weibo import weibo_compliance_fix
+from .plentymarkets import plentymarkets_compliance_fix
+from .ebay import ebay_compliance_fix
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/douban.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/douban.py
new file mode 100644
index 0000000000..ecc57b0818
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/douban.py
@@ -0,0 +1,17 @@
+import json
+
+from oauthlib.common import to_unicode
+
+
+def douban_compliance_fix(session):
+ def fix_token_type(r):
+ token = json.loads(r.text)
+ token.setdefault("token_type", "Bearer")
+ fixed_token = json.dumps(token)
+ r._content = to_unicode(fixed_token).encode("utf-8")
+ return r
+
+ session._client_default_token_placement = "query"
+ session.register_compliance_hook("access_token_response", fix_token_type)
+
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/ebay.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/ebay.py
new file mode 100644
index 0000000000..4aa423b3fe
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/ebay.py
@@ -0,0 +1,23 @@
+import json
+from oauthlib.common import to_unicode
+
+
+def ebay_compliance_fix(session):
+ def _compliance_fix(response):
+ token = json.loads(response.text)
+
+ # eBay responds with non-compliant token types.
+ # https://developer.ebay.com/api-docs/static/oauth-client-credentials-grant.html
+ # https://developer.ebay.com/api-docs/static/oauth-auth-code-grant-request.html
+ # Modify these to be "Bearer".
+ if token.get("token_type") in ["Application Access Token", "User Access Token"]:
+ token["token_type"] = "Bearer"
+ fixed_token = json.dumps(token)
+ response._content = to_unicode(fixed_token).encode("utf-8")
+
+ return response
+
+ session.register_compliance_hook("access_token_response", _compliance_fix)
+ session.register_compliance_hook("refresh_token_response", _compliance_fix)
+
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/facebook.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/facebook.py
new file mode 100644
index 0000000000..90e7921272
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/facebook.py
@@ -0,0 +1,33 @@
+from json import dumps
+
+try:
+ from urlparse import parse_qsl
+except ImportError:
+ from urllib.parse import parse_qsl
+
+from oauthlib.common import to_unicode
+
+
+def facebook_compliance_fix(session):
+ def _compliance_fix(r):
+ # if Facebook claims to be sending us json, let's trust them.
+ if "application/json" in r.headers.get("content-type", {}):
+ return r
+
+ # Facebook returns a content-type of text/plain when sending their
+ # x-www-form-urlencoded responses, along with a 200. If not, let's
+ # assume we're getting JSON and bail on the fix.
+ if "text/plain" in r.headers.get("content-type", {}) and r.status_code == 200:
+ token = dict(parse_qsl(r.text, keep_blank_values=True))
+ else:
+ return r
+
+ expires = token.get("expires")
+ if expires is not None:
+ token["expires_in"] = expires
+ token["token_type"] = "Bearer"
+ r._content = to_unicode(dumps(token)).encode("UTF-8")
+ return r
+
+ session.register_compliance_hook("access_token_response", _compliance_fix)
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/fitbit.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/fitbit.py
new file mode 100644
index 0000000000..7e62702401
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/fitbit.py
@@ -0,0 +1,25 @@
+"""
+The Fitbit API breaks from the OAuth2 RFC standard by returning an "errors"
+object list, rather than a single "error" string. This puts hooks in place so
+that oauthlib can process an error in the results from access token and refresh
+token responses. This is necessary to prevent getting the generic red herring
+MissingTokenError.
+"""
+
+from json import loads, dumps
+
+from oauthlib.common import to_unicode
+
+
+def fitbit_compliance_fix(session):
+ def _missing_error(r):
+ token = loads(r.text)
+ if "errors" in token:
+ # Set the error to the first one we have
+ token["error"] = token["errors"][0]["errorType"]
+ r._content = to_unicode(dumps(token)).encode("UTF-8")
+ return r
+
+ session.register_compliance_hook("access_token_response", _missing_error)
+ session.register_compliance_hook("refresh_token_response", _missing_error)
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/instagram.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/instagram.py
new file mode 100644
index 0000000000..4e07fe08b5
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/instagram.py
@@ -0,0 +1,26 @@
+try:
+ from urlparse import urlparse, parse_qs
+except ImportError:
+ from urllib.parse import urlparse, parse_qs
+
+from oauthlib.common import add_params_to_uri
+
+
+def instagram_compliance_fix(session):
+ def _non_compliant_param_name(url, headers, data):
+ # If the user has already specified the token in the URL
+ # then there's nothing to do.
+ # If the specified token is different from ``session.access_token``,
+ # we assume the user intends to override the access token.
+ url_query = dict(parse_qs(urlparse(url).query))
+ token = url_query.get("access_token")
+ if token:
+ # Nothing to do, just return.
+ return url, headers, data
+
+ token = [("access_token", session.access_token)]
+ url = add_params_to_uri(url, token)
+ return url, headers, data
+
+ session.register_compliance_hook("protected_request", _non_compliant_param_name)
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/mailchimp.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/mailchimp.py
new file mode 100644
index 0000000000..c69ce9fdae
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/mailchimp.py
@@ -0,0 +1,23 @@
+import json
+
+from oauthlib.common import to_unicode
+
+
+def mailchimp_compliance_fix(session):
+ def _null_scope(r):
+ token = json.loads(r.text)
+ if "scope" in token and token["scope"] is None:
+ token.pop("scope")
+ r._content = to_unicode(json.dumps(token)).encode("utf-8")
+ return r
+
+ def _non_zero_expiration(r):
+ token = json.loads(r.text)
+ if "expires_in" in token and token["expires_in"] == 0:
+ token["expires_in"] = 3600
+ r._content = to_unicode(json.dumps(token)).encode("utf-8")
+ return r
+
+ session.register_compliance_hook("access_token_response", _null_scope)
+ session.register_compliance_hook("access_token_response", _non_zero_expiration)
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/plentymarkets.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/plentymarkets.py
new file mode 100644
index 0000000000..9f605f058c
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/plentymarkets.py
@@ -0,0 +1,29 @@
+from json import dumps, loads
+import re
+
+from oauthlib.common import to_unicode
+
+
+def plentymarkets_compliance_fix(session):
+ def _to_snake_case(n):
+ return re.sub("(.)([A-Z][a-z]+)", r"\1_\2", n).lower()
+
+ def _compliance_fix(r):
+ # Plenty returns the Token in CamelCase instead of _
+ if (
+ "application/json" in r.headers.get("content-type", {})
+ and r.status_code == 200
+ ):
+ token = loads(r.text)
+ else:
+ return r
+
+ fixed_token = {}
+ for k, v in token.items():
+ fixed_token[_to_snake_case(k)] = v
+
+ r._content = to_unicode(dumps(fixed_token)).encode("UTF-8")
+ return r
+
+ session.register_compliance_hook("access_token_response", _compliance_fix)
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/slack.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/slack.py
new file mode 100644
index 0000000000..3f574b03ad
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/slack.py
@@ -0,0 +1,37 @@
+try:
+ from urlparse import urlparse, parse_qs
+except ImportError:
+ from urllib.parse import urlparse, parse_qs
+
+from oauthlib.common import add_params_to_uri
+
+
+def slack_compliance_fix(session):
+ def _non_compliant_param_name(url, headers, data):
+ # If the user has already specified the token, either in the URL
+ # or in a data dictionary, then there's nothing to do.
+ # If the specified token is different from ``session.access_token``,
+ # we assume the user intends to override the access token.
+ url_query = dict(parse_qs(urlparse(url).query))
+ token = url_query.get("token")
+ if not token and isinstance(data, dict):
+ token = data.get("token")
+
+ if token:
+ # Nothing to do, just return.
+ return url, headers, data
+
+ if not data:
+ data = {"token": session.access_token}
+ elif isinstance(data, dict):
+ data["token"] = session.access_token
+ else:
+ # ``data`` is something other than a dict: maybe a stream,
+ # maybe a file object, maybe something else. We can't easily
+ # modify it, so we'll set the token by modifying the URL instead.
+ token = [("token", session.access_token)]
+ url = add_params_to_uri(url, token)
+ return url, headers, data
+
+ session.register_compliance_hook("protected_request", _non_compliant_param_name)
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/weibo.py b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/weibo.py
new file mode 100644
index 0000000000..6733abeb15
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/compliance_fixes/weibo.py
@@ -0,0 +1,15 @@
+from json import loads, dumps
+
+from oauthlib.common import to_unicode
+
+
+def weibo_compliance_fix(session):
+ def _missing_token_type(r):
+ token = loads(r.text)
+ token["token_type"] = "Bearer"
+ r._content = to_unicode(dumps(token)).encode("UTF-8")
+ return r
+
+ session._client.default_token_placement = "query"
+ session.register_compliance_hook("access_token_response", _missing_token_type)
+ return session
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/oauth1_auth.py b/contrib/python/requests-oauthlib/requests_oauthlib/oauth1_auth.py
new file mode 100644
index 0000000000..cfbbd5902c
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/oauth1_auth.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import logging
+
+from oauthlib.common import extract_params
+from oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
+from oauthlib.oauth1 import SIGNATURE_TYPE_BODY
+from requests.compat import is_py3
+from requests.utils import to_native_string
+from requests.auth import AuthBase
+
+CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
+CONTENT_TYPE_MULTI_PART = "multipart/form-data"
+
+if is_py3:
+ unicode = str
+
+log = logging.getLogger(__name__)
+
+# OBS!: Correct signing of requests are conditional on invoking OAuth1
+# as the last step of preparing a request, or at least having the
+# content-type set properly.
+class OAuth1(AuthBase):
+ """Signs the request using OAuth 1 (RFC5849)"""
+
+ client_class = Client
+
+ def __init__(
+ self,
+ client_key,
+ client_secret=None,
+ resource_owner_key=None,
+ resource_owner_secret=None,
+ callback_uri=None,
+ signature_method=SIGNATURE_HMAC,
+ signature_type=SIGNATURE_TYPE_AUTH_HEADER,
+ rsa_key=None,
+ verifier=None,
+ decoding="utf-8",
+ client_class=None,
+ force_include_body=False,
+ **kwargs
+ ):
+
+ try:
+ signature_type = signature_type.upper()
+ except AttributeError:
+ pass
+
+ client_class = client_class or self.client_class
+
+ self.force_include_body = force_include_body
+
+ self.client = client_class(
+ client_key,
+ client_secret,
+ resource_owner_key,
+ resource_owner_secret,
+ callback_uri,
+ signature_method,
+ signature_type,
+ rsa_key,
+ verifier,
+ decoding=decoding,
+ **kwargs
+ )
+
+ def __call__(self, r):
+ """Add OAuth parameters to the request.
+
+ Parameters may be included from the body if the content-type is
+ urlencoded, if no content type is set a guess is made.
+ """
+ # Overwriting url is safe here as request will not modify it past
+ # this point.
+ log.debug("Signing request %s using client %s", r, self.client)
+
+ content_type = r.headers.get("Content-Type", "")
+ if (
+ not content_type
+ and extract_params(r.body)
+ or self.client.signature_type == SIGNATURE_TYPE_BODY
+ ):
+ content_type = CONTENT_TYPE_FORM_URLENCODED
+ if not isinstance(content_type, unicode):
+ content_type = content_type.decode("utf-8")
+
+ is_form_encoded = CONTENT_TYPE_FORM_URLENCODED in content_type
+
+ log.debug(
+ "Including body in call to sign: %s",
+ is_form_encoded or self.force_include_body,
+ )
+
+ if is_form_encoded:
+ r.headers["Content-Type"] = CONTENT_TYPE_FORM_URLENCODED
+ r.url, headers, r.body = self.client.sign(
+ unicode(r.url), unicode(r.method), r.body or "", r.headers
+ )
+ elif self.force_include_body:
+ # To allow custom clients to work on non form encoded bodies.
+ r.url, headers, r.body = self.client.sign(
+ unicode(r.url), unicode(r.method), r.body or "", r.headers
+ )
+ else:
+ # Omit body data in the signing of non form-encoded requests
+ r.url, headers, _ = self.client.sign(
+ unicode(r.url), unicode(r.method), None, r.headers
+ )
+
+ r.prepare_headers(headers)
+ r.url = to_native_string(r.url)
+ log.debug("Updated url: %s", r.url)
+ log.debug("Updated headers: %s", headers)
+ log.debug("Updated body: %r", r.body)
+ return r
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/oauth1_session.py b/contrib/python/requests-oauthlib/requests_oauthlib/oauth1_session.py
new file mode 100644
index 0000000000..88f2853ca0
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/oauth1_session.py
@@ -0,0 +1,400 @@
+from __future__ import unicode_literals
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
+import logging
+
+from oauthlib.common import add_params_to_uri
+from oauthlib.common import urldecode as _urldecode
+from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER
+import requests
+
+from . import OAuth1
+
+
+log = logging.getLogger(__name__)
+
+
+def urldecode(body):
+ """Parse query or json to python dictionary"""
+ try:
+ return _urldecode(body)
+ except Exception:
+ import json
+
+ return json.loads(body)
+
+
+class TokenRequestDenied(ValueError):
+ def __init__(self, message, response):
+ super(TokenRequestDenied, self).__init__(message)
+ self.response = response
+
+ @property
+ def status_code(self):
+ """For backwards-compatibility purposes"""
+ return self.response.status_code
+
+
+class TokenMissing(ValueError):
+ def __init__(self, message, response):
+ super(TokenMissing, self).__init__(message)
+ self.response = response
+
+
+class VerifierMissing(ValueError):
+ pass
+
+
+class OAuth1Session(requests.Session):
+ """Request signing and convenience methods for the oauth dance.
+
+ What is the difference between OAuth1Session and OAuth1?
+
+ OAuth1Session actually uses OAuth1 internally and its purpose is to assist
+ in the OAuth workflow through convenience methods to prepare authorization
+ URLs and parse the various token and redirection responses. It also provide
+ rudimentary validation of responses.
+
+ An example of the OAuth workflow using a basic CLI app and Twitter.
+
+ >>> # Credentials obtained during the registration.
+ >>> client_key = 'client key'
+ >>> client_secret = 'secret'
+ >>> callback_uri = 'https://127.0.0.1/callback'
+ >>>
+ >>> # Endpoints found in the OAuth provider API documentation
+ >>> request_token_url = 'https://api.twitter.com/oauth/request_token'
+ >>> authorization_url = 'https://api.twitter.com/oauth/authorize'
+ >>> access_token_url = 'https://api.twitter.com/oauth/access_token'
+ >>>
+ >>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri)
+ >>>
+ >>> # First step, fetch the request token.
+ >>> oauth_session.fetch_request_token(request_token_url)
+ {
+ 'oauth_token': 'kjerht2309u',
+ 'oauth_token_secret': 'lsdajfh923874',
+ }
+ >>>
+ >>> # Second step. Follow this link and authorize
+ >>> oauth_session.authorization_url(authorization_url)
+ 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
+ >>>
+ >>> # Third step. Fetch the access token
+ >>> redirect_response = raw_input('Paste the full redirect URL here.')
+ >>> oauth_session.parse_authorization_response(redirect_response)
+ {
+ 'oauth_token: 'kjerht2309u',
+ 'oauth_token_secret: 'lsdajfh923874',
+ 'oauth_verifier: 'w34o8967345',
+ }
+ >>> oauth_session.fetch_access_token(access_token_url)
+ {
+ 'oauth_token': 'sdf0o9823sjdfsdf',
+ 'oauth_token_secret': '2kjshdfp92i34asdasd',
+ }
+ >>> # Done. You can now make OAuth requests.
+ >>> status_url = 'http://api.twitter.com/1/statuses/update.json'
+ >>> new_status = {'status': 'hello world!'}
+ >>> oauth_session.post(status_url, data=new_status)
+ <Response [200]>
+ """
+
+ def __init__(
+ self,
+ client_key,
+ client_secret=None,
+ resource_owner_key=None,
+ resource_owner_secret=None,
+ callback_uri=None,
+ signature_method=SIGNATURE_HMAC,
+ signature_type=SIGNATURE_TYPE_AUTH_HEADER,
+ rsa_key=None,
+ verifier=None,
+ client_class=None,
+ force_include_body=False,
+ **kwargs
+ ):
+ """Construct the OAuth 1 session.
+
+ :param client_key: A client specific identifier.
+ :param client_secret: A client specific secret used to create HMAC and
+ plaintext signatures.
+ :param resource_owner_key: A resource owner key, also referred to as
+ request token or access token depending on
+ when in the workflow it is used.
+ :param resource_owner_secret: A resource owner secret obtained with
+ either a request or access token. Often
+ referred to as token secret.
+ :param callback_uri: The URL the user is redirect back to after
+ authorization.
+ :param signature_method: Signature methods determine how the OAuth
+ signature is created. The three options are
+ oauthlib.oauth1.SIGNATURE_HMAC (default),
+ oauthlib.oauth1.SIGNATURE_RSA and
+ oauthlib.oauth1.SIGNATURE_PLAIN.
+ :param signature_type: Signature type decides where the OAuth
+ parameters are added. Either in the
+ Authorization header (default) or to the URL
+ query parameters or the request body. Defined as
+ oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER,
+ oauthlib.oauth1.SIGNATURE_TYPE_QUERY and
+ oauthlib.oauth1.SIGNATURE_TYPE_BODY
+ respectively.
+ :param rsa_key: The private RSA key as a string. Can only be used with
+ signature_method=oauthlib.oauth1.SIGNATURE_RSA.
+ :param verifier: A verifier string to prove authorization was granted.
+ :param client_class: A subclass of `oauthlib.oauth1.Client` to use with
+ `requests_oauthlib.OAuth1` instead of the default
+ :param force_include_body: Always include the request body in the
+ signature creation.
+ :param **kwargs: Additional keyword arguments passed to `OAuth1`
+ """
+ super(OAuth1Session, self).__init__()
+ self._client = OAuth1(
+ client_key,
+ client_secret=client_secret,
+ resource_owner_key=resource_owner_key,
+ resource_owner_secret=resource_owner_secret,
+ callback_uri=callback_uri,
+ signature_method=signature_method,
+ signature_type=signature_type,
+ rsa_key=rsa_key,
+ verifier=verifier,
+ client_class=client_class,
+ force_include_body=force_include_body,
+ **kwargs
+ )
+ self.auth = self._client
+
+ @property
+ def token(self):
+ oauth_token = self._client.client.resource_owner_key
+ oauth_token_secret = self._client.client.resource_owner_secret
+ oauth_verifier = self._client.client.verifier
+
+ token_dict = {}
+ if oauth_token:
+ token_dict["oauth_token"] = oauth_token
+ if oauth_token_secret:
+ token_dict["oauth_token_secret"] = oauth_token_secret
+ if oauth_verifier:
+ token_dict["oauth_verifier"] = oauth_verifier
+
+ return token_dict
+
+ @token.setter
+ def token(self, value):
+ self._populate_attributes(value)
+
+ @property
+ def authorized(self):
+ """Boolean that indicates whether this session has an OAuth token
+ or not. If `self.authorized` is True, you can reasonably expect
+ OAuth-protected requests to the resource to succeed. If
+ `self.authorized` is False, you need the user to go through the OAuth
+ authentication dance before OAuth-protected requests to the resource
+ will succeed.
+ """
+ if self._client.client.signature_method == SIGNATURE_RSA:
+ # RSA only uses resource_owner_key
+ return bool(self._client.client.resource_owner_key)
+ else:
+ # other methods of authentication use all three pieces
+ return (
+ bool(self._client.client.client_secret)
+ and bool(self._client.client.resource_owner_key)
+ and bool(self._client.client.resource_owner_secret)
+ )
+
+ def authorization_url(self, url, request_token=None, **kwargs):
+ """Create an authorization URL by appending request_token and optional
+ kwargs to url.
+
+ This is the second step in the OAuth 1 workflow. The user should be
+ redirected to this authorization URL, grant access to you, and then
+ be redirected back to you. The redirection back can either be specified
+ during client registration or by supplying a callback URI per request.
+
+ :param url: The authorization endpoint URL.
+ :param request_token: The previously obtained request token.
+ :param kwargs: Optional parameters to append to the URL.
+ :returns: The authorization URL with new parameters embedded.
+
+ An example using a registered default callback URI.
+
+ >>> request_token_url = 'https://api.twitter.com/oauth/request_token'
+ >>> authorization_url = 'https://api.twitter.com/oauth/authorize'
+ >>> oauth_session = OAuth1Session('client-key', client_secret='secret')
+ >>> oauth_session.fetch_request_token(request_token_url)
+ {
+ 'oauth_token': 'sdf0o9823sjdfsdf',
+ 'oauth_token_secret': '2kjshdfp92i34asdasd',
+ }
+ >>> oauth_session.authorization_url(authorization_url)
+ 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf'
+ >>> oauth_session.authorization_url(authorization_url, foo='bar')
+ 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar'
+
+ An example using an explicit callback URI.
+
+ >>> request_token_url = 'https://api.twitter.com/oauth/request_token'
+ >>> authorization_url = 'https://api.twitter.com/oauth/authorize'
+ >>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback')
+ >>> oauth_session.fetch_request_token(request_token_url)
+ {
+ 'oauth_token': 'sdf0o9823sjdfsdf',
+ 'oauth_token_secret': '2kjshdfp92i34asdasd',
+ }
+ >>> oauth_session.authorization_url(authorization_url)
+ 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
+ """
+ kwargs["oauth_token"] = request_token or self._client.client.resource_owner_key
+ log.debug("Adding parameters %s to url %s", kwargs, url)
+ return add_params_to_uri(url, kwargs.items())
+
+ def fetch_request_token(self, url, realm=None, **request_kwargs):
+ r"""Fetch a request token.
+
+ This is the first step in the OAuth 1 workflow. A request token is
+ obtained by making a signed post request to url. The token is then
+ parsed from the application/x-www-form-urlencoded response and ready
+ to be used to construct an authorization url.
+
+ :param url: The request token endpoint URL.
+ :param realm: A list of realms to request access to.
+ :param \*\*request_kwargs: Optional arguments passed to ''post''
+ function in ''requests.Session''
+ :returns: The response in dict format.
+
+ Note that a previously set callback_uri will be reset for your
+ convenience, or else signature creation will be incorrect on
+ consecutive requests.
+
+ >>> request_token_url = 'https://api.twitter.com/oauth/request_token'
+ >>> oauth_session = OAuth1Session('client-key', client_secret='secret')
+ >>> oauth_session.fetch_request_token(request_token_url)
+ {
+ 'oauth_token': 'sdf0o9823sjdfsdf',
+ 'oauth_token_secret': '2kjshdfp92i34asdasd',
+ }
+ """
+ self._client.client.realm = " ".join(realm) if realm else None
+ token = self._fetch_token(url, **request_kwargs)
+ log.debug("Resetting callback_uri and realm (not needed in next phase).")
+ self._client.client.callback_uri = None
+ self._client.client.realm = None
+ return token
+
+ def fetch_access_token(self, url, verifier=None, **request_kwargs):
+ """Fetch an access token.
+
+ This is the final step in the OAuth 1 workflow. An access token is
+ obtained using all previously obtained credentials, including the
+ verifier from the authorization step.
+
+ Note that a previously set verifier will be reset for your
+ convenience, or else signature creation will be incorrect on
+ consecutive requests.
+
+ >>> access_token_url = 'https://api.twitter.com/oauth/access_token'
+ >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
+ >>> oauth_session = OAuth1Session('client-key', client_secret='secret')
+ >>> oauth_session.parse_authorization_response(redirect_response)
+ {
+ 'oauth_token: 'kjerht2309u',
+ 'oauth_token_secret: 'lsdajfh923874',
+ 'oauth_verifier: 'w34o8967345',
+ }
+ >>> oauth_session.fetch_access_token(access_token_url)
+ {
+ 'oauth_token': 'sdf0o9823sjdfsdf',
+ 'oauth_token_secret': '2kjshdfp92i34asdasd',
+ }
+ """
+ if verifier:
+ self._client.client.verifier = verifier
+ if not getattr(self._client.client, "verifier", None):
+ raise VerifierMissing("No client verifier has been set.")
+ token = self._fetch_token(url, **request_kwargs)
+ log.debug("Resetting verifier attribute, should not be used anymore.")
+ self._client.client.verifier = None
+ return token
+
+ def parse_authorization_response(self, url):
+ """Extract parameters from the post authorization redirect response URL.
+
+ :param url: The full URL that resulted from the user being redirected
+ back from the OAuth provider to you, the client.
+ :returns: A dict of parameters extracted from the URL.
+
+ >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
+ >>> oauth_session = OAuth1Session('client-key', client_secret='secret')
+ >>> oauth_session.parse_authorization_response(redirect_response)
+ {
+ 'oauth_token: 'kjerht2309u',
+ 'oauth_token_secret: 'lsdajfh923874',
+ 'oauth_verifier: 'w34o8967345',
+ }
+ """
+ log.debug("Parsing token from query part of url %s", url)
+ token = dict(urldecode(urlparse(url).query))
+ log.debug("Updating internal client token attribute.")
+ self._populate_attributes(token)
+ self.token = token
+ return token
+
+ def _populate_attributes(self, token):
+ if "oauth_token" in token:
+ self._client.client.resource_owner_key = token["oauth_token"]
+ else:
+ raise TokenMissing(
+ "Response does not contain a token: {resp}".format(resp=token), token
+ )
+ if "oauth_token_secret" in token:
+ self._client.client.resource_owner_secret = token["oauth_token_secret"]
+ if "oauth_verifier" in token:
+ self._client.client.verifier = token["oauth_verifier"]
+
+ def _fetch_token(self, url, **request_kwargs):
+ log.debug("Fetching token from %s using client %s", url, self._client.client)
+ r = self.post(url, **request_kwargs)
+
+ if r.status_code >= 400:
+ error = "Token request failed with code %s, response was '%s'."
+ raise TokenRequestDenied(error % (r.status_code, r.text), r)
+
+ log.debug('Decoding token from response "%s"', r.text)
+ try:
+ token = dict(urldecode(r.text.strip()))
+ except ValueError as e:
+ error = (
+ "Unable to decode token from token response. "
+ "This is commonly caused by an unsuccessful request where"
+ " a non urlencoded error message is returned. "
+ "The decoding error was %s"
+ "" % e
+ )
+ raise ValueError(error)
+
+ log.debug("Obtained token %s", token)
+ log.debug("Updating internal client attributes from token data.")
+ self._populate_attributes(token)
+ self.token = token
+ return token
+
+ def rebuild_auth(self, prepared_request, response):
+ """
+ When being redirected we should always strip Authorization
+ header, since nonce may not be reused as per OAuth spec.
+ """
+ if "Authorization" in prepared_request.headers:
+ # If we get redirected to a new host, we should strip out
+ # any authentication headers.
+ prepared_request.headers.pop("Authorization", True)
+ prepared_request.prepare_auth(self.auth)
+ return
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/oauth2_auth.py b/contrib/python/requests-oauthlib/requests_oauthlib/oauth2_auth.py
new file mode 100644
index 0000000000..b880f72f58
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/oauth2_auth.py
@@ -0,0 +1,37 @@
+from __future__ import unicode_literals
+from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
+from oauthlib.oauth2 import is_secure_transport
+from requests.auth import AuthBase
+
+
+class OAuth2(AuthBase):
+ """Adds proof of authorization (OAuth2 token) to the request."""
+
+ def __init__(self, client_id=None, client=None, token=None):
+ """Construct a new OAuth 2 authorization object.
+
+ :param client_id: Client id obtained during registration
+ :param client: :class:`oauthlib.oauth2.Client` to be used. Default is
+ WebApplicationClient which is useful for any
+ hosted application but not mobile or desktop.
+ :param token: Token dictionary, must include access_token
+ and token_type.
+ """
+ self._client = client or WebApplicationClient(client_id, token=token)
+ if token:
+ for k, v in token.items():
+ setattr(self._client, k, v)
+
+ def __call__(self, r):
+ """Append an OAuth 2 token to the request.
+
+ Note that currently HTTPS is required for all requests. There may be
+ a token type that allows for plain HTTP in the future and then this
+ should be updated to allow plain HTTP on a white list basis.
+ """
+ if not is_secure_transport(r.url):
+ raise InsecureTransportError()
+ r.url, r.headers, r.body = self._client.add_token(
+ r.url, http_method=r.method, body=r.body, headers=r.headers
+ )
+ return r
diff --git a/contrib/python/requests-oauthlib/requests_oauthlib/oauth2_session.py b/contrib/python/requests-oauthlib/requests_oauthlib/oauth2_session.py
new file mode 100644
index 0000000000..db4468089b
--- /dev/null
+++ b/contrib/python/requests-oauthlib/requests_oauthlib/oauth2_session.py
@@ -0,0 +1,540 @@
+from __future__ import unicode_literals
+
+import logging
+
+from oauthlib.common import generate_token, urldecode
+from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
+from oauthlib.oauth2 import LegacyApplicationClient
+from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
+import requests
+
+log = logging.getLogger(__name__)
+
+
+class TokenUpdated(Warning):
+ def __init__(self, token):
+ super(TokenUpdated, self).__init__()
+ self.token = token
+
+
+class OAuth2Session(requests.Session):
+ """Versatile OAuth 2 extension to :class:`requests.Session`.
+
+ Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
+ including the four core OAuth 2 grants.
+
+ Can be used to create authorization urls, fetch tokens and access protected
+ resources using the :class:`requests.Session` interface you are used to.
+
+ - :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
+ - :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
+ - :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
+ - :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
+
+ Note that the only time you will be using Implicit Grant from python is if
+ you are driving a user agent able to obtain URL fragments.
+ """
+
+ def __init__(
+ self,
+ client_id=None,
+ client=None,
+ auto_refresh_url=None,
+ auto_refresh_kwargs=None,
+ scope=None,
+ redirect_uri=None,
+ token=None,
+ state=None,
+ token_updater=None,
+ **kwargs
+ ):
+ """Construct a new OAuth 2 client session.
+
+ :param client_id: Client id obtained during registration
+ :param client: :class:`oauthlib.oauth2.Client` to be used. Default is
+ WebApplicationClient which is useful for any
+ hosted application but not mobile or desktop.
+ :param scope: List of scopes you wish to request access to
+ :param redirect_uri: Redirect URI you registered as callback
+ :param token: Token dictionary, must include access_token
+ and token_type.
+ :param state: State string used to prevent CSRF. This will be given
+ when creating the authorization url and must be supplied
+ when parsing the authorization response.
+ Can be either a string or a no argument callable.
+ :auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
+ this if you wish the client to automatically refresh
+ your access tokens.
+ :auto_refresh_kwargs: Extra arguments to pass to the refresh token
+ endpoint.
+ :token_updater: Method with one argument, token, to be used to update
+ your token database on automatic token refresh. If not
+ set a TokenUpdated warning will be raised when a token
+ has been refreshed. This warning will carry the token
+ in its token argument.
+ :param kwargs: Arguments to pass to the Session constructor.
+ """
+ super(OAuth2Session, self).__init__(**kwargs)
+ self._client = client or WebApplicationClient(client_id, token=token)
+ self.token = token or {}
+ self.scope = scope
+ self.redirect_uri = redirect_uri
+ self.state = state or generate_token
+ self._state = state
+ self.auto_refresh_url = auto_refresh_url
+ self.auto_refresh_kwargs = auto_refresh_kwargs or {}
+ self.token_updater = token_updater
+
+ # Ensure that requests doesn't do any automatic auth. See #278.
+ # The default behavior can be re-enabled by setting auth to None.
+ self.auth = lambda r: r
+
+ # Allow customizations for non compliant providers through various
+ # hooks to adjust requests and responses.
+ self.compliance_hook = {
+ "access_token_response": set(),
+ "refresh_token_response": set(),
+ "protected_request": set(),
+ }
+
+ def new_state(self):
+ """Generates a state string to be used in authorizations."""
+ try:
+ self._state = self.state()
+ log.debug("Generated new state %s.", self._state)
+ except TypeError:
+ self._state = self.state
+ log.debug("Re-using previously supplied state %s.", self._state)
+ return self._state
+
+ @property
+ def client_id(self):
+ return getattr(self._client, "client_id", None)
+
+ @client_id.setter
+ def client_id(self, value):
+ self._client.client_id = value
+
+ @client_id.deleter
+ def client_id(self):
+ del self._client.client_id
+
+ @property
+ def token(self):
+ return getattr(self._client, "token", None)
+
+ @token.setter
+ def token(self, value):
+ self._client.token = value
+ self._client.populate_token_attributes(value)
+
+ @property
+ def access_token(self):
+ return getattr(self._client, "access_token", None)
+
+ @access_token.setter
+ def access_token(self, value):
+ self._client.access_token = value
+
+ @access_token.deleter
+ def access_token(self):
+ del self._client.access_token
+
+ @property
+ def authorized(self):
+ """Boolean that indicates whether this session has an OAuth token
+ or not. If `self.authorized` is True, you can reasonably expect
+ OAuth-protected requests to the resource to succeed. If
+ `self.authorized` is False, you need the user to go through the OAuth
+ authentication dance before OAuth-protected requests to the resource
+ will succeed.
+ """
+ return bool(self.access_token)
+
+ def authorization_url(self, url, state=None, **kwargs):
+ """Form an authorization URL.
+
+ :param url: Authorization endpoint url, must be HTTPS.
+ :param state: An optional state string for CSRF protection. If not
+ given it will be generated for you.
+ :param kwargs: Extra parameters to include.
+ :return: authorization_url, state
+ """
+ state = state or self.new_state()
+ return (
+ self._client.prepare_request_uri(
+ url,
+ redirect_uri=self.redirect_uri,
+ scope=self.scope,
+ state=state,
+ **kwargs
+ ),
+ state,
+ )
+
+ def fetch_token(
+ self,
+ token_url,
+ code=None,
+ authorization_response=None,
+ body="",
+ auth=None,
+ username=None,
+ password=None,
+ method="POST",
+ force_querystring=False,
+ timeout=None,
+ headers=None,
+ verify=True,
+ proxies=None,
+ include_client_id=None,
+ client_secret=None,
+ cert=None,
+ **kwargs
+ ):
+ """Generic method for fetching an access token from the token endpoint.
+
+ If you are using the MobileApplicationClient you will want to use
+ `token_from_fragment` instead of `fetch_token`.
+
+ The current implementation enforces the RFC guidelines.
+
+ :param token_url: Token endpoint URL, must use HTTPS.
+ :param code: Authorization code (used by WebApplicationClients).
+ :param authorization_response: Authorization response URL, the callback
+ URL of the request back to you. Used by
+ WebApplicationClients instead of code.
+ :param body: Optional application/x-www-form-urlencoded body to add the
+ include in the token request. Prefer kwargs over body.
+ :param auth: An auth tuple or method as accepted by `requests`.
+ :param username: Username required by LegacyApplicationClients to appear
+ in the request body.
+ :param password: Password required by LegacyApplicationClients to appear
+ in the request body.
+ :param method: The HTTP method used to make the request. Defaults
+ to POST, but may also be GET. Other methods should
+ be added as needed.
+ :param force_querystring: If True, force the request body to be sent
+ in the querystring instead.
+ :param timeout: Timeout of the request in seconds.
+ :param headers: Dict to default request headers with.
+ :param verify: Verify SSL certificate.
+ :param proxies: The `proxies` argument is passed onto `requests`.
+ :param include_client_id: Should the request body include the
+ `client_id` parameter. Default is `None`,
+ which will attempt to autodetect. This can be
+ forced to always include (True) or never
+ include (False).
+ :param client_secret: The `client_secret` paired to the `client_id`.
+ This is generally required unless provided in the
+ `auth` tuple. If the value is `None`, it will be
+ omitted from the request, however if the value is
+ an empty string, an empty string will be sent.
+ :param cert: Client certificate to send for OAuth 2.0 Mutual-TLS Client
+ Authentication (draft-ietf-oauth-mtls). Can either be the
+ path of a file containing the private key and certificate or
+ a tuple of two filenames for certificate and key.
+ :param kwargs: Extra parameters to include in the token request.
+ :return: A token dict
+ """
+ if not is_secure_transport(token_url):
+ raise InsecureTransportError()
+
+ if not code and authorization_response:
+ self._client.parse_request_uri_response(
+ authorization_response, state=self._state
+ )
+ code = self._client.code
+ elif not code and isinstance(self._client, WebApplicationClient):
+ code = self._client.code
+ if not code:
+ raise ValueError(
+ "Please supply either code or " "authorization_response parameters."
+ )
+
+ # Earlier versions of this library build an HTTPBasicAuth header out of
+ # `username` and `password`. The RFC states, however these attributes
+ # must be in the request body and not the header.
+ # If an upstream server is not spec compliant and requires them to
+ # appear as an Authorization header, supply an explicit `auth` header
+ # to this function.
+ # This check will allow for empty strings, but not `None`.
+ #
+ # References
+ # 4.3.2 - Resource Owner Password Credentials Grant
+ # https://tools.ietf.org/html/rfc6749#section-4.3.2
+
+ if isinstance(self._client, LegacyApplicationClient):
+ if username is None:
+ raise ValueError(
+ "`LegacyApplicationClient` requires both the "
+ "`username` and `password` parameters."
+ )
+ if password is None:
+ raise ValueError(
+ "The required parameter `username` was supplied, "
+ "but `password` was not."
+ )
+
+ # merge username and password into kwargs for `prepare_request_body`
+ if username is not None:
+ kwargs["username"] = username
+ if password is not None:
+ kwargs["password"] = password
+
+ # is an auth explicitly supplied?
+ if auth is not None:
+ # if we're dealing with the default of `include_client_id` (None):
+ # we will assume the `auth` argument is for an RFC compliant server
+ # and we should not send the `client_id` in the body.
+ # This approach allows us to still force the client_id by submitting
+ # `include_client_id=True` along with an `auth` object.
+ if include_client_id is None:
+ include_client_id = False
+
+ # otherwise we may need to create an auth header
+ else:
+ # since we don't have an auth header, we MAY need to create one
+ # it is possible that we want to send the `client_id` in the body
+ # if so, `include_client_id` should be set to True
+ # otherwise, we will generate an auth header
+ if include_client_id is not True:
+ client_id = self.client_id
+ if client_id:
+ log.debug(
+ 'Encoding `client_id` "%s" with `client_secret` '
+ "as Basic auth credentials.",
+ client_id,
+ )
+ client_secret = client_secret if client_secret is not None else ""
+ auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
+
+ if include_client_id:
+ # this was pulled out of the params
+ # it needs to be passed into prepare_request_body
+ if client_secret is not None:
+ kwargs["client_secret"] = client_secret
+
+ body = self._client.prepare_request_body(
+ code=code,
+ body=body,
+ redirect_uri=self.redirect_uri,
+ include_client_id=include_client_id,
+ **kwargs
+ )
+
+ headers = headers or {
+ "Accept": "application/json",
+ "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
+ }
+ self.token = {}
+ request_kwargs = {}
+ if method.upper() == "POST":
+ request_kwargs["params" if force_querystring else "data"] = dict(
+ urldecode(body)
+ )
+ elif method.upper() == "GET":
+ request_kwargs["params"] = dict(urldecode(body))
+ else:
+ raise ValueError("The method kwarg must be POST or GET.")
+
+ r = self.request(
+ method=method,
+ url=token_url,
+ timeout=timeout,
+ headers=headers,
+ auth=auth,
+ verify=verify,
+ proxies=proxies,
+ cert=cert,
+ **request_kwargs
+ )
+
+ log.debug("Request to fetch token completed with status %s.", r.status_code)
+ log.debug("Request url was %s", r.request.url)
+ log.debug("Request headers were %s", r.request.headers)
+ log.debug("Request body was %s", r.request.body)
+ log.debug("Response headers were %s and content %s.", r.headers, r.text)
+ log.debug(
+ "Invoking %d token response hooks.",
+ len(self.compliance_hook["access_token_response"]),
+ )
+ for hook in self.compliance_hook["access_token_response"]:
+ log.debug("Invoking hook %s.", hook)
+ r = hook(r)
+
+ self._client.parse_request_body_response(r.text, scope=self.scope)
+ self.token = self._client.token
+ log.debug("Obtained token %s.", self.token)
+ return self.token
+
+ def token_from_fragment(self, authorization_response):
+ """Parse token from the URI fragment, used by MobileApplicationClients.
+
+ :param authorization_response: The full URL of the redirect back to you
+ :return: A token dict
+ """
+ self._client.parse_request_uri_response(
+ authorization_response, state=self._state
+ )
+ self.token = self._client.token
+ return self.token
+
+ def refresh_token(
+ self,
+ token_url,
+ refresh_token=None,
+ body="",
+ auth=None,
+ timeout=None,
+ headers=None,
+ verify=True,
+ proxies=None,
+ **kwargs
+ ):
+ """Fetch a new access token using a refresh token.
+
+ :param token_url: The token endpoint, must be HTTPS.
+ :param refresh_token: The refresh_token to use.
+ :param body: Optional application/x-www-form-urlencoded body to add the
+ include in the token request. Prefer kwargs over body.
+ :param auth: An auth tuple or method as accepted by `requests`.
+ :param timeout: Timeout of the request in seconds.
+ :param headers: A dict of headers to be used by `requests`.
+ :param verify: Verify SSL certificate.
+ :param proxies: The `proxies` argument will be passed to `requests`.
+ :param kwargs: Extra parameters to include in the token request.
+ :return: A token dict
+ """
+ if not token_url:
+ raise ValueError("No token endpoint set for auto_refresh.")
+
+ if not is_secure_transport(token_url):
+ raise InsecureTransportError()
+
+ refresh_token = refresh_token or self.token.get("refresh_token")
+
+ log.debug(
+ "Adding auto refresh key word arguments %s.", self.auto_refresh_kwargs
+ )
+ kwargs.update(self.auto_refresh_kwargs)
+ body = self._client.prepare_refresh_body(
+ body=body, refresh_token=refresh_token, scope=self.scope, **kwargs
+ )
+ log.debug("Prepared refresh token request body %s", body)
+
+ if headers is None:
+ headers = {
+ "Accept": "application/json",
+ "Content-Type": ("application/x-www-form-urlencoded;charset=UTF-8"),
+ }
+
+ r = self.post(
+ token_url,
+ data=dict(urldecode(body)),
+ auth=auth,
+ timeout=timeout,
+ headers=headers,
+ verify=verify,
+ withhold_token=True,
+ proxies=proxies,
+ )
+ log.debug("Request to refresh token completed with status %s.", r.status_code)
+ log.debug("Response headers were %s and content %s.", r.headers, r.text)
+ log.debug(
+ "Invoking %d token response hooks.",
+ len(self.compliance_hook["refresh_token_response"]),
+ )
+ for hook in self.compliance_hook["refresh_token_response"]:
+ log.debug("Invoking hook %s.", hook)
+ r = hook(r)
+
+ self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
+ if not "refresh_token" in self.token:
+ log.debug("No new refresh token given. Re-using old.")
+ self.token["refresh_token"] = refresh_token
+ return self.token
+
+ def request(
+ self,
+ method,
+ url,
+ data=None,
+ headers=None,
+ withhold_token=False,
+ client_id=None,
+ client_secret=None,
+ **kwargs
+ ):
+ """Intercept all requests and add the OAuth 2 token if present."""
+ if not is_secure_transport(url):
+ raise InsecureTransportError()
+ if self.token and not withhold_token:
+ log.debug(
+ "Invoking %d protected resource request hooks.",
+ len(self.compliance_hook["protected_request"]),
+ )
+ for hook in self.compliance_hook["protected_request"]:
+ log.debug("Invoking hook %s.", hook)
+ url, headers, data = hook(url, headers, data)
+
+ log.debug("Adding token %s to request.", self.token)
+ try:
+ url, headers, data = self._client.add_token(
+ url, http_method=method, body=data, headers=headers
+ )
+ # Attempt to retrieve and save new access token if expired
+ except TokenExpiredError:
+ if self.auto_refresh_url:
+ log.debug(
+ "Auto refresh is set, attempting to refresh at %s.",
+ self.auto_refresh_url,
+ )
+
+ # We mustn't pass auth twice.
+ auth = kwargs.pop("auth", None)
+ if client_id and client_secret and (auth is None):
+ log.debug(
+ 'Encoding client_id "%s" with client_secret as Basic auth credentials.',
+ client_id,
+ )
+ auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
+ token = self.refresh_token(
+ self.auto_refresh_url, auth=auth, **kwargs
+ )
+ if self.token_updater:
+ log.debug(
+ "Updating token to %s using %s.", token, self.token_updater
+ )
+ self.token_updater(token)
+ url, headers, data = self._client.add_token(
+ url, http_method=method, body=data, headers=headers
+ )
+ else:
+ raise TokenUpdated(token)
+ else:
+ raise
+
+ log.debug("Requesting url %s using method %s.", url, method)
+ log.debug("Supplying headers %s and data %s", headers, data)
+ log.debug("Passing through key word arguments %s.", kwargs)
+ return super(OAuth2Session, self).request(
+ method, url, headers=headers, data=data, **kwargs
+ )
+
+ def register_compliance_hook(self, hook_type, hook):
+ """Register a hook for request/response tweaking.
+
+ Available hooks are:
+ access_token_response invoked before token parsing.
+ refresh_token_response invoked before refresh token parsing.
+ protected_request invoked before making a request.
+
+ If you find a new hook is needed please send a GitHub PR request
+ or open an issue.
+ """
+ if hook_type not in self.compliance_hook:
+ raise ValueError(
+ "Hook type %s is not in %s.", hook_type, self.compliance_hook
+ )
+ self.compliance_hook[hook_type].add(hook)
diff --git a/contrib/python/requests-oauthlib/tests/__init__.py b/contrib/python/requests-oauthlib/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/__init__.py
diff --git a/contrib/python/requests-oauthlib/tests/test.bin b/contrib/python/requests-oauthlib/tests/test.bin
new file mode 100644
index 0000000000..b00d4f4796
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/test.bin
@@ -0,0 +1 @@
+¥Æ \ No newline at end of file
diff --git a/contrib/python/requests-oauthlib/tests/test_compliance_fixes.py b/contrib/python/requests-oauthlib/tests/test_compliance_fixes.py
new file mode 100644
index 0000000000..5c90d52660
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/test_compliance_fixes.py
@@ -0,0 +1,334 @@
+from __future__ import unicode_literals
+from unittest import TestCase
+
+import requests
+import requests_mock
+import time
+
+try:
+ from urlparse import urlparse, parse_qs
+except ImportError:
+ from urllib.parse import urlparse, parse_qs
+
+from oauthlib.oauth2.rfc6749.errors import InvalidGrantError
+from requests_oauthlib import OAuth2Session
+from requests_oauthlib.compliance_fixes import facebook_compliance_fix
+from requests_oauthlib.compliance_fixes import fitbit_compliance_fix
+from requests_oauthlib.compliance_fixes import mailchimp_compliance_fix
+from requests_oauthlib.compliance_fixes import weibo_compliance_fix
+from requests_oauthlib.compliance_fixes import slack_compliance_fix
+from requests_oauthlib.compliance_fixes import instagram_compliance_fix
+from requests_oauthlib.compliance_fixes import plentymarkets_compliance_fix
+from requests_oauthlib.compliance_fixes import ebay_compliance_fix
+
+
+class FacebookComplianceFixTest(TestCase):
+ def setUp(self):
+ mocker = requests_mock.Mocker()
+ mocker.post(
+ "https://graph.facebook.com/oauth/access_token",
+ text="access_token=urlencoded",
+ headers={"Content-Type": "text/plain"},
+ )
+ mocker.start()
+ self.addCleanup(mocker.stop)
+
+ facebook = OAuth2Session("someclientid", redirect_uri="https://i.b")
+ self.session = facebook_compliance_fix(facebook)
+
+ def test_fetch_access_token(self):
+ token = self.session.fetch_token(
+ "https://graph.facebook.com/oauth/access_token",
+ client_secret="someclientsecret",
+ authorization_response="https://i.b/?code=hello",
+ )
+ self.assertEqual(token, {"access_token": "urlencoded", "token_type": "Bearer"})
+
+
+class FitbitComplianceFixTest(TestCase):
+ def setUp(self):
+ self.mocker = requests_mock.Mocker()
+ self.mocker.post(
+ "https://api.fitbit.com/oauth2/token",
+ json={"errors": [{"errorType": "invalid_grant"}]},
+ )
+ self.mocker.start()
+ self.addCleanup(self.mocker.stop)
+
+ fitbit = OAuth2Session("someclientid", redirect_uri="https://i.b")
+ self.session = fitbit_compliance_fix(fitbit)
+
+ def test_fetch_access_token(self):
+ self.assertRaises(
+ InvalidGrantError,
+ self.session.fetch_token,
+ "https://api.fitbit.com/oauth2/token",
+ client_secret="someclientsecret",
+ authorization_response="https://i.b/?code=hello",
+ )
+
+ self.mocker.post(
+ "https://api.fitbit.com/oauth2/token", json={"access_token": "fitbit"}
+ )
+
+ token = self.session.fetch_token(
+ "https://api.fitbit.com/oauth2/token", client_secret="good"
+ )
+
+ self.assertEqual(token, {"access_token": "fitbit"})
+
+ def test_refresh_token(self):
+ self.assertRaises(
+ InvalidGrantError,
+ self.session.refresh_token,
+ "https://api.fitbit.com/oauth2/token",
+ auth=requests.auth.HTTPBasicAuth("someclientid", "someclientsecret"),
+ )
+
+ self.mocker.post(
+ "https://api.fitbit.com/oauth2/token",
+ json={"access_token": "access", "refresh_token": "refresh"},
+ )
+
+ token = self.session.refresh_token(
+ "https://api.fitbit.com/oauth2/token",
+ auth=requests.auth.HTTPBasicAuth("someclientid", "someclientsecret"),
+ )
+
+ self.assertEqual(token["access_token"], "access")
+ self.assertEqual(token["refresh_token"], "refresh")
+
+
+class MailChimpComplianceFixTest(TestCase):
+ def setUp(self):
+ mocker = requests_mock.Mocker()
+ mocker.post(
+ "https://login.mailchimp.com/oauth2/token",
+ json={"access_token": "mailchimp", "expires_in": 0, "scope": None},
+ )
+ mocker.start()
+ self.addCleanup(mocker.stop)
+
+ mailchimp = OAuth2Session("someclientid", redirect_uri="https://i.b")
+ self.session = mailchimp_compliance_fix(mailchimp)
+
+ def test_fetch_access_token(self):
+ token = self.session.fetch_token(
+ "https://login.mailchimp.com/oauth2/token",
+ client_secret="someclientsecret",
+ authorization_response="https://i.b/?code=hello",
+ )
+ # Times should be close
+ approx_expires_at = time.time() + 3600
+ actual_expires_at = token.pop("expires_at")
+ self.assertAlmostEqual(actual_expires_at, approx_expires_at, places=2)
+
+ # Other token values exact
+ self.assertEqual(token, {"access_token": "mailchimp", "expires_in": 3600})
+
+ # And no scope at all
+ self.assertNotIn("scope", token)
+
+
+class WeiboComplianceFixTest(TestCase):
+ def setUp(self):
+ mocker = requests_mock.Mocker()
+ mocker.post(
+ "https://api.weibo.com/oauth2/access_token", json={"access_token": "weibo"}
+ )
+ mocker.start()
+ self.addCleanup(mocker.stop)
+
+ weibo = OAuth2Session("someclientid", redirect_uri="https://i.b")
+ self.session = weibo_compliance_fix(weibo)
+
+ def test_fetch_access_token(self):
+ token = self.session.fetch_token(
+ "https://api.weibo.com/oauth2/access_token",
+ client_secret="someclientsecret",
+ authorization_response="https://i.b/?code=hello",
+ )
+ self.assertEqual(token, {"access_token": "weibo", "token_type": "Bearer"})
+
+
+class SlackComplianceFixTest(TestCase):
+ def setUp(self):
+ mocker = requests_mock.Mocker()
+ mocker.post(
+ "https://slack.com/api/oauth.access",
+ json={"access_token": "xoxt-23984754863-2348975623103", "scope": "read"},
+ )
+ for method in ("GET", "POST"):
+ mocker.request(
+ method=method,
+ url="https://slack.com/api/auth.test",
+ json={
+ "ok": True,
+ "url": "https://myteam.slack.com/",
+ "team": "My Team",
+ "user": "cal",
+ "team_id": "T12345",
+ "user_id": "U12345",
+ },
+ )
+ mocker.start()
+ self.addCleanup(mocker.stop)
+
+ slack = OAuth2Session("someclientid", redirect_uri="https://i.b")
+ self.session = slack_compliance_fix(slack)
+
+ def test_protected_request(self):
+ self.session.token = {"access_token": "dummy-access-token"}
+ response = self.session.get("https://slack.com/api/auth.test")
+ url = response.request.url
+ query = parse_qs(urlparse(url).query)
+ self.assertNotIn("token", query)
+ body = response.request.body
+ data = parse_qs(body)
+ self.assertEqual(data["token"], ["dummy-access-token"])
+
+ def test_protected_request_override_token_get(self):
+ self.session.token = {"access_token": "dummy-access-token"}
+ response = self.session.get(
+ "https://slack.com/api/auth.test", data={"token": "different-token"}
+ )
+ url = response.request.url
+ query = parse_qs(urlparse(url).query)
+ self.assertNotIn("token", query)
+ body = response.request.body
+ data = parse_qs(body)
+ self.assertEqual(data["token"], ["different-token"])
+
+ def test_protected_request_override_token_post(self):
+ self.session.token = {"access_token": "dummy-access-token"}
+ response = self.session.post(
+ "https://slack.com/api/auth.test", data={"token": "different-token"}
+ )
+ url = response.request.url
+ query = parse_qs(urlparse(url).query)
+ self.assertNotIn("token", query)
+ body = response.request.body
+ data = parse_qs(body)
+ self.assertEqual(data["token"], ["different-token"])
+
+ def test_protected_request_override_token_url(self):
+ self.session.token = {"access_token": "dummy-access-token"}
+ response = self.session.get(
+ "https://slack.com/api/auth.test?token=different-token"
+ )
+ url = response.request.url
+ query = parse_qs(urlparse(url).query)
+ self.assertEqual(query["token"], ["different-token"])
+ self.assertIsNone(response.request.body)
+
+
+class InstagramComplianceFixTest(TestCase):
+ def setUp(self):
+ mocker = requests_mock.Mocker()
+ mocker.request(
+ method="GET",
+ url="https://api.instagram.com/v1/users/self",
+ json={
+ "data": {
+ "id": "1574083",
+ "username": "snoopdogg",
+ "full_name": "Snoop Dogg",
+ "profile_picture": "http://distillery.s3.amazonaws.com/profiles/profile_1574083_75sq_1295469061.jpg",
+ "bio": "This is my bio",
+ "website": "http://snoopdogg.com",
+ "is_business": False,
+ "counts": {"media": 1320, "follows": 420, "followed_by": 3410},
+ }
+ },
+ )
+ mocker.start()
+ self.addCleanup(mocker.stop)
+
+ instagram = OAuth2Session("someclientid", redirect_uri="https://i.b")
+ self.session = instagram_compliance_fix(instagram)
+
+ def test_protected_request(self):
+ self.session.token = {"access_token": "dummy-access-token"}
+ response = self.session.get("https://api.instagram.com/v1/users/self")
+ url = response.request.url
+ query = parse_qs(urlparse(url).query)
+ self.assertIn("access_token", query)
+ self.assertEqual(query["access_token"], ["dummy-access-token"])
+
+ def test_protected_request_dont_override(self):
+ """check that if the access_token param
+ already exist we don't override it"""
+ self.session.token = {"access_token": "dummy-access-token"}
+ response = self.session.get(
+ "https://api.instagram.com/v1/users/self?access_token=correct-access-token"
+ )
+ url = response.request.url
+ query = parse_qs(urlparse(url).query)
+ self.assertIn("access_token", query)
+ self.assertEqual(query["access_token"], ["correct-access-token"])
+
+
+class PlentymarketsComplianceFixTest(TestCase):
+ def setUp(self):
+ mocker = requests_mock.Mocker()
+ mocker.post(
+ "https://shop.plentymarkets-cloud02.com",
+ json={
+ "accessToken": "ecUN1r8KhJewMCdLAmpHOdZ4O0ofXKB9zf6CXK61",
+ "tokenType": "Bearer",
+ "expiresIn": 86400,
+ "refreshToken": "iG2kBGIjcXaRE4xmTVUnv7xwxX7XMcWCHqJmFaSX",
+ },
+ headers={"Content-Type": "application/json"},
+ )
+ mocker.start()
+ self.addCleanup(mocker.stop)
+
+ plentymarkets = OAuth2Session("someclientid", redirect_uri="https://i.b")
+ self.session = plentymarkets_compliance_fix(plentymarkets)
+
+ def test_fetch_access_token(self):
+ token = self.session.fetch_token(
+ "https://shop.plentymarkets-cloud02.com",
+ authorization_response="https://i.b/?code=hello",
+ )
+
+ approx_expires_at = time.time() + 86400
+ actual_expires_at = token.pop("expires_at")
+ self.assertAlmostEqual(actual_expires_at, approx_expires_at, places=2)
+
+ self.assertEqual(
+ token,
+ {
+ "access_token": "ecUN1r8KhJewMCdLAmpHOdZ4O0ofXKB9zf6CXK61",
+ "expires_in": 86400,
+ "token_type": "Bearer",
+ "refresh_token": "iG2kBGIjcXaRE4xmTVUnv7xwxX7XMcWCHqJmFaSX",
+ },
+ )
+
+
+class EbayComplianceFixTest(TestCase):
+ def setUp(self):
+ mocker = requests_mock.Mocker()
+ mocker.post(
+ "https://api.ebay.com/identity/v1/oauth2/token",
+ json={
+ "access_token": "this is the access token",
+ "expires_in": 7200,
+ "token_type": "Application Access Token",
+ },
+ headers={"Content-Type": "application/json"},
+ )
+ mocker.start()
+ self.addCleanup(mocker.stop)
+
+ session = OAuth2Session()
+ self.fixed_session = ebay_compliance_fix(session)
+
+ def test_fetch_access_token(self):
+ token = self.fixed_session.fetch_token(
+ "https://api.ebay.com/identity/v1/oauth2/token",
+ authorization_response="https://i.b/?code=hello",
+ )
+ assert token["token_type"] == "Bearer"
diff --git a/contrib/python/requests-oauthlib/tests/test_core.py b/contrib/python/requests-oauthlib/tests/test_core.py
new file mode 100644
index 0000000000..6892e9f1ce
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/test_core.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+import requests
+import requests_oauthlib
+import oauthlib
+import os.path
+from io import StringIO
+import unittest
+
+try:
+ import mock
+except ImportError:
+ from unittest import mock
+
+
+@mock.patch("oauthlib.oauth1.rfc5849.generate_timestamp")
+@mock.patch("oauthlib.oauth1.rfc5849.generate_nonce")
+class OAuth1Test(unittest.TestCase):
+ def testFormEncoded(self, generate_nonce, generate_timestamp):
+ """OAuth1 assumes form encoded if content type is not specified."""
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "1"
+ oauth = requests_oauthlib.OAuth1("client_key")
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ r = requests.Request(
+ method="POST",
+ url="http://a.b/path?query=retain",
+ auth=oauth,
+ data="this=really&is=&+form=encoded",
+ headers=headers,
+ )
+ a = r.prepare()
+
+ self.assertEqual(a.url, "http://a.b/path?query=retain")
+ self.assertEqual(a.body, b"this=really&is=&+form=encoded")
+ self.assertEqual(
+ a.headers.get("Content-Type"), b"application/x-www-form-urlencoded"
+ )
+
+ # guess content-type
+ r = requests.Request(
+ method="POST",
+ url="http://a.b/path?query=retain",
+ auth=oauth,
+ data="this=really&is=&+form=encoded",
+ )
+ b = r.prepare()
+ self.assertEqual(b.url, "http://a.b/path?query=retain")
+ self.assertEqual(b.body, b"this=really&is=&+form=encoded")
+ self.assertEqual(
+ b.headers.get("Content-Type"), b"application/x-www-form-urlencoded"
+ )
+
+ self.assertEqual(a.headers.get("Authorization"), b.headers.get("Authorization"))
+
+ def testNonFormEncoded(self, generate_nonce, generate_timestamp):
+ """OAuth signature only depend on body if it is form encoded."""
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "1"
+ oauth = requests_oauthlib.OAuth1("client_key")
+
+ r = requests.Request(
+ method="POST",
+ url="http://a.b/path?query=retain",
+ auth=oauth,
+ data="this really is not form encoded",
+ )
+ a = r.prepare()
+
+ r = requests.Request(
+ method="POST", url="http://a.b/path?query=retain", auth=oauth
+ )
+ b = r.prepare()
+
+ self.assertEqual(a.headers.get("Authorization"), b.headers.get("Authorization"))
+
+ r = requests.Request(
+ method="POST",
+ url="http://a.b/path?query=retain",
+ auth=oauth,
+ files={"test": StringIO("hello")},
+ )
+ c = r.prepare()
+
+ self.assertEqual(b.headers.get("Authorization"), c.headers.get("Authorization"))
+
+ @unittest.skip("test uses real http://httpbin.org")
+ def testCanPostBinaryData(self, generate_nonce, generate_timestamp):
+ """
+ Test we can post binary data. Should prevent regression of the
+ UnicodeDecodeError issue.
+ """
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "1"
+ oauth = requests_oauthlib.OAuth1("client_key")
+ import yatest.common
+ dirname = yatest.common.test_source_path()
+ fname = os.path.join(dirname, "test.bin")
+
+ with open(fname, "rb") as f:
+ r = requests.post(
+ "http://httpbin.org/post",
+ data={"hi": "there"},
+ files={"media": (os.path.basename(f.name), f)},
+ headers={"content-type": "application/octet-stream"},
+ auth=oauth,
+ )
+ self.assertEqual(r.status_code, 200)
+
+ @unittest.skip("test uses real http://httpbin.org")
+ def test_url_is_native_str(self, generate_nonce, generate_timestamp):
+ """
+ Test that the URL is always a native string.
+ """
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "1"
+ oauth = requests_oauthlib.OAuth1("client_key")
+
+ r = requests.get("http://httpbin.org/get", auth=oauth)
+ self.assertIsInstance(r.request.url, str)
+
+ @unittest.skip("test uses real http://httpbin.org")
+ def test_content_type_override(self, generate_nonce, generate_timestamp):
+ """
+ Content type should only be guessed if none is given.
+ """
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "1"
+ oauth = requests_oauthlib.OAuth1("client_key")
+ data = "a"
+ r = requests.post("http://httpbin.org/get", data=data, auth=oauth)
+ self.assertEqual(
+ r.request.headers.get("Content-Type"), b"application/x-www-form-urlencoded"
+ )
+ r = requests.post(
+ "http://httpbin.org/get",
+ auth=oauth,
+ data=data,
+ headers={"Content-type": "application/json"},
+ )
+ self.assertEqual(r.request.headers.get("Content-Type"), b"application/json")
+
+ def test_register_client_class(self, generate_timestamp, generate_nonce):
+ class ClientSubclass(oauthlib.oauth1.Client):
+ pass
+
+ self.assertTrue(hasattr(requests_oauthlib.OAuth1, "client_class"))
+
+ self.assertEqual(requests_oauthlib.OAuth1.client_class, oauthlib.oauth1.Client)
+
+ normal = requests_oauthlib.OAuth1("client_key")
+
+ self.assertIsInstance(normal.client, oauthlib.oauth1.Client)
+ self.assertNotIsInstance(normal.client, ClientSubclass)
+
+ requests_oauthlib.OAuth1.client_class = ClientSubclass
+
+ self.assertEqual(requests_oauthlib.OAuth1.client_class, ClientSubclass)
+
+ custom = requests_oauthlib.OAuth1("client_key")
+
+ self.assertIsInstance(custom.client, oauthlib.oauth1.Client)
+ self.assertIsInstance(custom.client, ClientSubclass)
+
+ overridden = requests_oauthlib.OAuth1(
+ "client_key", client_class=oauthlib.oauth1.Client
+ )
+
+ self.assertIsInstance(overridden.client, oauthlib.oauth1.Client)
+ self.assertNotIsInstance(normal.client, ClientSubclass)
diff --git a/contrib/python/requests-oauthlib/tests/test_oauth1_session.py b/contrib/python/requests-oauthlib/tests/test_oauth1_session.py
new file mode 100644
index 0000000000..1dd2b2f158
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/test_oauth1_session.py
@@ -0,0 +1,348 @@
+from __future__ import unicode_literals, print_function
+import unittest
+import sys
+import requests
+from io import StringIO
+
+from oauthlib.oauth1 import SIGNATURE_TYPE_QUERY, SIGNATURE_TYPE_BODY
+from oauthlib.oauth1 import SIGNATURE_RSA, SIGNATURE_PLAINTEXT
+from requests_oauthlib import OAuth1Session
+
+try:
+ import mock
+except ImportError:
+ from unittest import mock
+
+try:
+ import cryptography
+except ImportError:
+ cryptography = None
+
+try:
+ import jwt
+except ImportError:
+ jwt = None
+
+if sys.version[0] == "3":
+ unicode_type = str
+else:
+ unicode_type = unicode
+
+
+TEST_RSA_KEY = (
+ "-----BEGIN RSA PRIVATE KEY-----\n"
+ "MIIEogIBAAKCAQEApF1JaMSN8TEsh4N4O/5SpEAVLivJyLH+Cgl3OQBPGgJkt8cg\n"
+ "49oasl+5iJS+VdrILxWM9/JCJyURpUuslX4Eb4eUBtQ0x5BaPa8+S2NLdGTaL7nB\n"
+ "OO8o8n0C5FEUU+qlEip79KE8aqOj+OC44VsIquSmOvWIQD26n3fCVlgwoRBD1gzz\n"
+ "sDOeaSyzpKrZR851Kh6rEmF2qjJ8jt6EkxMsRNACmBomzgA4M1TTsisSUO87444p\n"
+ "e35Z4/n5c735o2fZMrGgMwiJNh7rT8SYxtIkxngioiGnwkxGQxQ4NzPAHg+XSY0J\n"
+ "04pNm7KqTkgtxyrqOANJLIjXlR+U9SQ90NjHVQIDAQABAoIBABuBPOKaWcJt3yzC\n"
+ "NGGduoif7KtwSnEaUA+v69KPGa2Zju8uFHPssKD+4dZYRc2qMeunKJLpaGaSjnRh\n"
+ "yHyvvOBJCN1nr3lhz6gY5kzJTfwpUFXCOPJlGy4Q+2Xnp4YvcvYqQ9n5DVovDiZ8\n"
+ "vJOBn16xqpudMPLHIa7D5LJ8SY76HBjE+imTXw1EShdh5TOV9bmPFQqH6JFzowRH\n"
+ "hyH2DPHuyHJj6cl8FyqJw5lVWzG3n6Prvk7bYHsjmGjurN35UsumNAp6VouNyUP1\n"
+ "RAEcUJega49aIs6/FJ0ENJzQjlsAzVbTleHkpez2aIok+wsWJGJ4SVxAjADOWAaZ\n"
+ "uEJPc3UCgYEA1g4ZGrXOuo75p9/MRIepXGpBWxip4V7B9XmO9WzPCv8nMorJntWB\n"
+ "msYV1I01aITxadHatO4Gl2xLniNkDyrEQzJ7w38RQgsVK+CqbnC0K9N77QPbHeC1\n"
+ "YQd9RCNyUohOimKvb7jyv798FBU1GO5QI2eNgfnnfteSVXhD2iOoTOsCgYEAxJJ+\n"
+ "8toxJdnLa0uUsAbql6zeNXGbUBMzu3FomKlyuWuq841jS2kIalaO/TRj5hbnE45j\n"
+ "mCjeLgTVO6Ach3Wfk4zrqajqfFJ0zUg/Wexp49lC3RWiV4icBb85Q6bzeJD9Dn9v\n"
+ "hjpfWVkczf/NeA1fGH/pcgfkT6Dm706GFFttLL8CgYBl/HeXk1H47xAiHO4dJKnb\n"
+ "v0B+X8To/RXamF01r+8BpUoOubOQetdyX7ic+d6deuHu8i6LD/GSCeYJZYFR/KVg\n"
+ "AtiW757QYalnq3ZogkhFrVCZP8IRfTPOFBxp752TlyAcrSI7T9pQ47IBe4094KXM\n"
+ "CJWSfPgAJkOxd0iU0XJpmwKBgGfQxuMTgSlwYRKFlD1zKap5TdID8fbUbVnth0Q5\n"
+ "GbH7vwlp/qrxCdS/aj0n0irOpbOaW9ccnlrHiqY25VpVMLYIkt3DrDOEiNNx+KNR\n"
+ "TItdTwbcSiTYrS4L0/56ydM/H6bsfsXxRjI18hSJqMZiqXqS84OZz2aOn+h7HCzc\n"
+ "LEiZAoGASk20wFvilpRKHq79xxFWiDUPHi0x0pp82dYIEntGQkKUWkbSlhgf3MAi\n"
+ "5NEQTDmXdnB+rVeWIvEi+BXfdnNgdn8eC4zSdtF4sIAhYr5VWZo0WVWDhT7u2ccv\n"
+ "ZBFymiz8lo3gN57wGUCi9pbZqzV1+ZppX6YTNDdDCE0q+KO3Cec=\n"
+ "-----END RSA PRIVATE KEY-----"
+)
+
+TEST_RSA_OAUTH_SIGNATURE = (
+ "j8WF8PGjojT82aUDd2EL%2Bz7HCoHInFzWUpiEKMCy%2BJ2cYHWcBS7mXlmFDLgAKV0"
+ "P%2FyX4TrpXODYnJ6dRWdfghqwDpi%2FlQmB2jxCiGMdJoYxh3c5zDf26gEbGdP6D7O"
+ "Ssp5HUnzH6sNkmVjuE%2FxoJcHJdc23H6GhOs7VJ2LWNdbhKWP%2FMMlTrcoQDn8lz"
+ "%2Fb24WsJ6ae1txkUzpFOOlLM8aTdNtGL4OtsubOlRhNqnAFq93FyhXg0KjzUyIZzmMX"
+ "9Vx90jTks5QeBGYcLE0Op2iHb2u%2FO%2BEgdwFchgEwE5LgMUyHUI4F3Wglp28yHOAM"
+ "jPkI%2FkWMvpxtMrU3Z3KN31WQ%3D%3D"
+)
+
+
+class OAuth1SessionTest(unittest.TestCase):
+ def test_signature_types(self):
+ def verify_signature(getter):
+ def fake_send(r, **kwargs):
+ signature = getter(r)
+ if isinstance(signature, bytes):
+ signature = signature.decode("utf-8")
+ self.assertIn("oauth_signature", signature)
+ resp = mock.MagicMock(spec=requests.Response)
+ resp.cookies = []
+ return resp
+
+ return fake_send
+
+ header = OAuth1Session("foo")
+ header.send = verify_signature(lambda r: r.headers["Authorization"])
+ header.post("https://i.b")
+
+ query = OAuth1Session("foo", signature_type=SIGNATURE_TYPE_QUERY)
+ query.send = verify_signature(lambda r: r.url)
+ query.post("https://i.b")
+
+ body = OAuth1Session("foo", signature_type=SIGNATURE_TYPE_BODY)
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ body.send = verify_signature(lambda r: r.body)
+ body.post("https://i.b", headers=headers, data="")
+
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_timestamp")
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_nonce")
+ def test_signature_methods(self, generate_nonce, generate_timestamp):
+ if not cryptography:
+ raise unittest.SkipTest("cryptography module is required")
+ if not jwt:
+ raise unittest.SkipTest("pyjwt module is required")
+
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "123"
+
+ signature = 'OAuth oauth_nonce="abc", oauth_timestamp="123", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", oauth_consumer_key="foo", oauth_signature="h2sRqLArjhlc5p3FTkuNogVHlKE%3D"'
+ auth = OAuth1Session("foo")
+ auth.send = self.verify_signature(signature)
+ auth.post("https://i.b")
+
+ signature = 'OAuth oauth_nonce="abc", oauth_timestamp="123", oauth_version="1.0", oauth_signature_method="PLAINTEXT", oauth_consumer_key="foo", oauth_signature="%26"'
+ auth = OAuth1Session("foo", signature_method=SIGNATURE_PLAINTEXT)
+ auth.send = self.verify_signature(signature)
+ auth.post("https://i.b")
+
+ signature = (
+ "OAuth "
+ 'oauth_nonce="abc", oauth_timestamp="123", oauth_version="1.0", '
+ 'oauth_signature_method="RSA-SHA1", oauth_consumer_key="foo", '
+ 'oauth_signature="{sig}"'
+ ).format(sig=TEST_RSA_OAUTH_SIGNATURE)
+ auth = OAuth1Session(
+ "foo", signature_method=SIGNATURE_RSA, rsa_key=TEST_RSA_KEY
+ )
+ auth.send = self.verify_signature(signature)
+ auth.post("https://i.b")
+
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_timestamp")
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_nonce")
+ def test_binary_upload(self, generate_nonce, generate_timestamp):
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "123"
+ fake_xml = StringIO("hello world")
+ headers = {"Content-Type": "application/xml"}
+ signature = 'OAuth oauth_nonce="abc", oauth_timestamp="123", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", oauth_consumer_key="foo", oauth_signature="h2sRqLArjhlc5p3FTkuNogVHlKE%3D"'
+ auth = OAuth1Session("foo")
+ auth.send = self.verify_signature(signature)
+ auth.post("https://i.b", headers=headers, files=[("fake", fake_xml)])
+
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_timestamp")
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_nonce")
+ def test_nonascii(self, generate_nonce, generate_timestamp):
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "123"
+ signature = 'OAuth oauth_nonce="abc", oauth_timestamp="123", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", oauth_consumer_key="foo", oauth_signature="W0haoue5IZAZoaJiYCtfqwMf8x8%3D"'
+ auth = OAuth1Session("foo")
+ auth.send = self.verify_signature(signature)
+ auth.post("https://i.b?cjk=%E5%95%A6%E5%95%A6")
+
+ def test_authorization_url(self):
+ auth = OAuth1Session("foo")
+ url = "https://example.comm/authorize"
+ token = "asluif023sf"
+ auth_url = auth.authorization_url(url, request_token=token)
+ self.assertEqual(auth_url, url + "?oauth_token=" + token)
+
+ def test_parse_response_url(self):
+ url = "https://i.b/callback?oauth_token=foo&oauth_verifier=bar"
+ auth = OAuth1Session("foo")
+ resp = auth.parse_authorization_response(url)
+ self.assertEqual(resp["oauth_token"], "foo")
+ self.assertEqual(resp["oauth_verifier"], "bar")
+ for k, v in resp.items():
+ self.assertIsInstance(k, unicode_type)
+ self.assertIsInstance(v, unicode_type)
+
+ def test_fetch_request_token(self):
+ auth = OAuth1Session("foo")
+ auth.send = self.fake_body("oauth_token=foo")
+ resp = auth.fetch_request_token("https://example.com/token")
+ self.assertEqual(resp["oauth_token"], "foo")
+ for k, v in resp.items():
+ self.assertIsInstance(k, unicode_type)
+ self.assertIsInstance(v, unicode_type)
+
+ def test_fetch_request_token_with_optional_arguments(self):
+ auth = OAuth1Session("foo")
+ auth.send = self.fake_body("oauth_token=foo")
+ resp = auth.fetch_request_token(
+ "https://example.com/token", verify=False, stream=True
+ )
+ self.assertEqual(resp["oauth_token"], "foo")
+ for k, v in resp.items():
+ self.assertIsInstance(k, unicode_type)
+ self.assertIsInstance(v, unicode_type)
+
+ def test_fetch_access_token(self):
+ auth = OAuth1Session("foo", verifier="bar")
+ auth.send = self.fake_body("oauth_token=foo")
+ resp = auth.fetch_access_token("https://example.com/token")
+ self.assertEqual(resp["oauth_token"], "foo")
+ for k, v in resp.items():
+ self.assertIsInstance(k, unicode_type)
+ self.assertIsInstance(v, unicode_type)
+
+ def test_fetch_access_token_with_optional_arguments(self):
+ auth = OAuth1Session("foo", verifier="bar")
+ auth.send = self.fake_body("oauth_token=foo")
+ resp = auth.fetch_access_token(
+ "https://example.com/token", verify=False, stream=True
+ )
+ self.assertEqual(resp["oauth_token"], "foo")
+ for k, v in resp.items():
+ self.assertIsInstance(k, unicode_type)
+ self.assertIsInstance(v, unicode_type)
+
+ def _test_fetch_access_token_raises_error(self, auth):
+ """Assert that an error is being raised whenever there's no verifier
+ passed in to the client.
+ """
+ auth.send = self.fake_body("oauth_token=foo")
+ with self.assertRaises(ValueError) as cm:
+ auth.fetch_access_token("https://example.com/token")
+ self.assertEqual("No client verifier has been set.", str(cm.exception))
+
+ def test_fetch_token_invalid_response(self):
+ auth = OAuth1Session("foo")
+ auth.send = self.fake_body("not valid urlencoded response!")
+ self.assertRaises(
+ ValueError, auth.fetch_request_token, "https://example.com/token"
+ )
+
+ for code in (400, 401, 403):
+ auth.send = self.fake_body("valid=response", code)
+ with self.assertRaises(ValueError) as cm:
+ auth.fetch_request_token("https://example.com/token")
+ self.assertEqual(cm.exception.status_code, code)
+ self.assertIsInstance(cm.exception.response, requests.Response)
+
+ def test_fetch_access_token_missing_verifier(self):
+ self._test_fetch_access_token_raises_error(OAuth1Session("foo"))
+
+ def test_fetch_access_token_has_verifier_is_none(self):
+ auth = OAuth1Session("foo")
+ del auth._client.client.verifier
+ self._test_fetch_access_token_raises_error(auth)
+
+ def test_token_proxy_set(self):
+ token = {
+ "oauth_token": "fake-key",
+ "oauth_token_secret": "fake-secret",
+ "oauth_verifier": "fake-verifier",
+ }
+ sess = OAuth1Session("foo")
+ self.assertIsNone(sess._client.client.resource_owner_key)
+ self.assertIsNone(sess._client.client.resource_owner_secret)
+ self.assertIsNone(sess._client.client.verifier)
+ self.assertEqual(sess.token, {})
+
+ sess.token = token
+ self.assertEqual(sess._client.client.resource_owner_key, "fake-key")
+ self.assertEqual(sess._client.client.resource_owner_secret, "fake-secret")
+ self.assertEqual(sess._client.client.verifier, "fake-verifier")
+
+ def test_token_proxy_get(self):
+ token = {
+ "oauth_token": "fake-key",
+ "oauth_token_secret": "fake-secret",
+ "oauth_verifier": "fake-verifier",
+ }
+ sess = OAuth1Session(
+ "foo",
+ resource_owner_key=token["oauth_token"],
+ resource_owner_secret=token["oauth_token_secret"],
+ verifier=token["oauth_verifier"],
+ )
+ self.assertEqual(sess.token, token)
+
+ sess._client.client.resource_owner_key = "different-key"
+ token["oauth_token"] = "different-key"
+
+ self.assertEqual(sess.token, token)
+
+ def test_authorized_false(self):
+ sess = OAuth1Session("foo")
+ self.assertIs(sess.authorized, False)
+
+ def test_authorized_false_rsa(self):
+ signature = (
+ "OAuth "
+ 'oauth_nonce="abc", oauth_timestamp="123", oauth_version="1.0", '
+ 'oauth_signature_method="RSA-SHA1", oauth_consumer_key="foo", '
+ 'oauth_signature="{sig}"'
+ ).format(sig=TEST_RSA_OAUTH_SIGNATURE)
+ sess = OAuth1Session(
+ "foo", signature_method=SIGNATURE_RSA, rsa_key=TEST_RSA_KEY
+ )
+ sess.send = self.verify_signature(signature)
+ self.assertIs(sess.authorized, False)
+
+ def test_authorized_true(self):
+ sess = OAuth1Session("key", "secret", verifier="bar")
+ sess.send = self.fake_body("oauth_token=foo&oauth_token_secret=bar")
+ sess.fetch_access_token("https://example.com/token")
+ self.assertIs(sess.authorized, True)
+
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_timestamp")
+ @mock.patch("oauthlib.oauth1.rfc5849.generate_nonce")
+ def test_authorized_true_rsa(self, generate_nonce, generate_timestamp):
+ if not cryptography:
+ raise unittest.SkipTest("cryptography module is required")
+ if not jwt:
+ raise unittest.SkipTest("pyjwt module is required")
+
+ generate_nonce.return_value = "abc"
+ generate_timestamp.return_value = "123"
+ signature = (
+ "OAuth "
+ 'oauth_nonce="abc", oauth_timestamp="123", oauth_version="1.0", '
+ 'oauth_signature_method="RSA-SHA1", oauth_consumer_key="foo", '
+ 'oauth_verifier="bar", oauth_signature="{sig}"'
+ ).format(sig=TEST_RSA_OAUTH_SIGNATURE)
+ sess = OAuth1Session(
+ "key",
+ "secret",
+ signature_method=SIGNATURE_RSA,
+ rsa_key=TEST_RSA_KEY,
+ verifier="bar",
+ )
+ sess.send = self.fake_body("oauth_token=foo&oauth_token_secret=bar")
+ sess.fetch_access_token("https://example.com/token")
+ self.assertIs(sess.authorized, True)
+
+ def verify_signature(self, signature):
+ def fake_send(r, **kwargs):
+ auth_header = r.headers["Authorization"]
+ if isinstance(auth_header, bytes):
+ auth_header = auth_header.decode("utf-8")
+ self.assertEqual(auth_header, signature)
+ resp = mock.MagicMock(spec=requests.Response)
+ resp.cookies = []
+ return resp
+
+ return fake_send
+
+ def fake_body(self, body, status_code=200):
+ def fake_send(r, **kwargs):
+ resp = mock.MagicMock(spec=requests.Response)
+ resp.cookies = []
+ resp.text = body
+ resp.status_code = status_code
+ return resp
+
+ return fake_send
diff --git a/contrib/python/requests-oauthlib/tests/test_oauth2_auth.py b/contrib/python/requests-oauthlib/tests/test_oauth2_auth.py
new file mode 100644
index 0000000000..accb561ef6
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/test_oauth2_auth.py
@@ -0,0 +1,54 @@
+from __future__ import unicode_literals
+import unittest
+
+from oauthlib.oauth2 import WebApplicationClient, MobileApplicationClient
+from oauthlib.oauth2 import LegacyApplicationClient, BackendApplicationClient
+from requests import Request
+from requests_oauthlib import OAuth2
+
+
+class OAuth2AuthTest(unittest.TestCase):
+ def setUp(self):
+ self.token = {
+ "token_type": "Bearer",
+ "access_token": "asdfoiw37850234lkjsdfsdf",
+ "expires_in": "3600",
+ }
+ self.client_id = "foo"
+ self.clients = [
+ WebApplicationClient(self.client_id),
+ MobileApplicationClient(self.client_id),
+ LegacyApplicationClient(self.client_id),
+ BackendApplicationClient(self.client_id),
+ ]
+
+ def test_add_token_to_url(self):
+ url = "https://example.com/resource?foo=bar"
+ new_url = url + "&access_token=" + self.token["access_token"]
+ for client in self.clients:
+ client.default_token_placement = "query"
+ auth = OAuth2(client=client, token=self.token)
+ r = Request("GET", url, auth=auth).prepare()
+ self.assertEqual(r.url, new_url)
+
+ def test_add_token_to_headers(self):
+ token = "Bearer " + self.token["access_token"]
+ for client in self.clients:
+ auth = OAuth2(client=client, token=self.token)
+ r = Request("GET", "https://i.b", auth=auth).prepare()
+ self.assertEqual(r.headers["Authorization"], token)
+
+ def test_add_token_to_body(self):
+ body = "foo=bar"
+ new_body = body + "&access_token=" + self.token["access_token"]
+ for client in self.clients:
+ client.default_token_placement = "body"
+ auth = OAuth2(client=client, token=self.token)
+ r = Request("GET", "https://i.b", data=body, auth=auth).prepare()
+ self.assertEqual(r.body, new_body)
+
+ def test_add_nonexisting_token(self):
+ for client in self.clients:
+ auth = OAuth2(client=client)
+ r = Request("GET", "https://i.b", auth=auth)
+ self.assertRaises(ValueError, r.prepare)
diff --git a/contrib/python/requests-oauthlib/tests/test_oauth2_session.py b/contrib/python/requests-oauthlib/tests/test_oauth2_session.py
new file mode 100644
index 0000000000..cfc6236855
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/test_oauth2_session.py
@@ -0,0 +1,527 @@
+from __future__ import unicode_literals
+import json
+import time
+import tempfile
+import shutil
+import os
+from base64 import b64encode
+from copy import deepcopy
+from unittest import TestCase
+
+try:
+ import mock
+except ImportError:
+ from unittest import mock
+
+from oauthlib.common import urlencode
+from oauthlib.oauth2 import TokenExpiredError, OAuth2Error
+from oauthlib.oauth2 import MismatchingStateError
+from oauthlib.oauth2 import WebApplicationClient, MobileApplicationClient
+from oauthlib.oauth2 import LegacyApplicationClient, BackendApplicationClient
+from requests_oauthlib import OAuth2Session, TokenUpdated
+import requests
+
+from requests.auth import _basic_auth_str
+
+
+fake_time = time.time()
+CODE = "asdf345xdf"
+
+
+def fake_token(token):
+ def fake_send(r, **kwargs):
+ resp = mock.MagicMock()
+ resp.text = json.dumps(token)
+ return resp
+
+ return fake_send
+
+
+class OAuth2SessionTest(TestCase):
+ def setUp(self):
+ self.token = {
+ "token_type": "Bearer",
+ "access_token": "asdfoiw37850234lkjsdfsdf",
+ "refresh_token": "sldvafkjw34509s8dfsdf",
+ "expires_in": 3600,
+ "expires_at": fake_time + 3600,
+ }
+ # use someclientid:someclientsecret to easily differentiate between client and user credentials
+ # these are the values used in oauthlib tests
+ self.client_id = "someclientid"
+ self.client_secret = "someclientsecret"
+ self.user_username = "user_username"
+ self.user_password = "user_password"
+ self.client_WebApplication = WebApplicationClient(self.client_id, code=CODE)
+ self.client_LegacyApplication = LegacyApplicationClient(self.client_id)
+ self.client_BackendApplication = BackendApplicationClient(self.client_id)
+ self.client_MobileApplication = MobileApplicationClient(self.client_id)
+ self.clients = [
+ self.client_WebApplication,
+ self.client_LegacyApplication,
+ self.client_BackendApplication,
+ ]
+ self.all_clients = self.clients + [self.client_MobileApplication]
+
+ def test_add_token(self):
+ token = "Bearer " + self.token["access_token"]
+
+ def verifier(r, **kwargs):
+ auth_header = r.headers.get(str("Authorization"), None)
+ self.assertEqual(auth_header, token)
+ resp = mock.MagicMock()
+ resp.cookes = []
+ return resp
+
+ for client in self.all_clients:
+ sess = OAuth2Session(client=client, token=self.token)
+ sess.send = verifier
+ sess.get("https://i.b")
+
+ def test_mtls(self):
+ cert = (
+ "testsomething.example-client.pem",
+ "testsomething.example-client-key.pem",
+ )
+
+ def verifier(r, **kwargs):
+ self.assertIn("cert", kwargs)
+ self.assertEqual(cert, kwargs["cert"])
+ self.assertIn("client_id=" + self.client_id, r.body)
+ resp = mock.MagicMock()
+ resp.text = json.dumps(self.token)
+ return resp
+
+ for client in self.clients:
+ sess = OAuth2Session(client=client)
+ sess.send = verifier
+
+ if isinstance(client, LegacyApplicationClient):
+ sess.fetch_token(
+ "https://i.b",
+ include_client_id=True,
+ cert=cert,
+ username="username1",
+ password="password1",
+ )
+ else:
+ sess.fetch_token("https://i.b", include_client_id=True, cert=cert)
+
+ def test_authorization_url(self):
+ url = "https://example.com/authorize?foo=bar"
+
+ web = WebApplicationClient(self.client_id)
+ s = OAuth2Session(client=web)
+ auth_url, state = s.authorization_url(url)
+ self.assertIn(state, auth_url)
+ self.assertIn(self.client_id, auth_url)
+ self.assertIn("response_type=code", auth_url)
+
+ mobile = MobileApplicationClient(self.client_id)
+ s = OAuth2Session(client=mobile)
+ auth_url, state = s.authorization_url(url)
+ self.assertIn(state, auth_url)
+ self.assertIn(self.client_id, auth_url)
+ self.assertIn("response_type=token", auth_url)
+
+ @mock.patch("time.time", new=lambda: fake_time)
+ def test_refresh_token_request(self):
+ self.expired_token = dict(self.token)
+ self.expired_token["expires_in"] = "-1"
+ del self.expired_token["expires_at"]
+
+ def fake_refresh(r, **kwargs):
+ if "/refresh" in r.url:
+ self.assertNotIn("Authorization", r.headers)
+ resp = mock.MagicMock()
+ resp.text = json.dumps(self.token)
+ return resp
+
+ # No auto refresh setup
+ for client in self.clients:
+ sess = OAuth2Session(client=client, token=self.expired_token)
+ self.assertRaises(TokenExpiredError, sess.get, "https://i.b")
+
+ # Auto refresh but no auto update
+ for client in self.clients:
+ sess = OAuth2Session(
+ client=client,
+ token=self.expired_token,
+ auto_refresh_url="https://i.b/refresh",
+ )
+ sess.send = fake_refresh
+ self.assertRaises(TokenUpdated, sess.get, "https://i.b")
+
+ # Auto refresh and auto update
+ def token_updater(token):
+ self.assertEqual(token, self.token)
+
+ for client in self.clients:
+ sess = OAuth2Session(
+ client=client,
+ token=self.expired_token,
+ auto_refresh_url="https://i.b/refresh",
+ token_updater=token_updater,
+ )
+ sess.send = fake_refresh
+ sess.get("https://i.b")
+
+ def fake_refresh_with_auth(r, **kwargs):
+ if "/refresh" in r.url:
+ self.assertIn("Authorization", r.headers)
+ encoded = b64encode(
+ "{client_id}:{client_secret}".format(
+ client_id=self.client_id, client_secret=self.client_secret
+ ).encode("latin1")
+ )
+ content = "Basic {encoded}".format(encoded=encoded.decode("latin1"))
+ self.assertEqual(r.headers["Authorization"], content)
+ resp = mock.MagicMock()
+ resp.text = json.dumps(self.token)
+ return resp
+
+ for client in self.clients:
+ sess = OAuth2Session(
+ client=client,
+ token=self.expired_token,
+ auto_refresh_url="https://i.b/refresh",
+ token_updater=token_updater,
+ )
+ sess.send = fake_refresh_with_auth
+ sess.get(
+ "https://i.b",
+ client_id=self.client_id,
+ client_secret=self.client_secret,
+ )
+
+ @mock.patch("time.time", new=lambda: fake_time)
+ def test_token_from_fragment(self):
+ mobile = MobileApplicationClient(self.client_id)
+ response_url = "https://i.b/callback#" + urlencode(self.token.items())
+ sess = OAuth2Session(client=mobile)
+ self.assertEqual(sess.token_from_fragment(response_url), self.token)
+
+ @mock.patch("time.time", new=lambda: fake_time)
+ def test_fetch_token(self):
+ url = "https://example.com/token"
+
+ for client in self.clients:
+ sess = OAuth2Session(client=client, token=self.token)
+ sess.send = fake_token(self.token)
+ if isinstance(client, LegacyApplicationClient):
+ # this client requires a username+password
+ # if unset, an error will be raised
+ self.assertRaises(ValueError, sess.fetch_token, url)
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, username="username1"
+ )
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, password="password1"
+ )
+ # otherwise it will pass
+ self.assertEqual(
+ sess.fetch_token(url, username="username1", password="password1"),
+ self.token,
+ )
+ else:
+ self.assertEqual(sess.fetch_token(url), self.token)
+
+ error = {"error": "invalid_request"}
+ for client in self.clients:
+ sess = OAuth2Session(client=client, token=self.token)
+ sess.send = fake_token(error)
+ if isinstance(client, LegacyApplicationClient):
+ # this client requires a username+password
+ # if unset, an error will be raised
+ self.assertRaises(ValueError, sess.fetch_token, url)
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, username="username1"
+ )
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, password="password1"
+ )
+ # otherwise it will pass
+ self.assertRaises(
+ OAuth2Error,
+ sess.fetch_token,
+ url,
+ username="username1",
+ password="password1",
+ )
+ else:
+ self.assertRaises(OAuth2Error, sess.fetch_token, url)
+
+ # there are different scenarios in which the `client_id` can be specified
+ # reference `oauthlib.tests.oauth2.rfc6749.clients.test_web_application.WebApplicationClientTest.test_prepare_request_body`
+ # this only needs to test WebApplicationClient
+ client = self.client_WebApplication
+ client.tester = True
+
+ # this should be a tuple of (r.url, r.body, r.headers.get('Authorization'))
+ _fetch_history = []
+
+ def fake_token_history(token):
+ def fake_send(r, **kwargs):
+ resp = mock.MagicMock()
+ resp.text = json.dumps(token)
+ _fetch_history.append(
+ (r.url, r.body, r.headers.get("Authorization", None))
+ )
+ return resp
+
+ return fake_send
+
+ sess = OAuth2Session(client=client, token=self.token)
+ sess.send = fake_token_history(self.token)
+ expected_auth_header = _basic_auth_str(self.client_id, self.client_secret)
+
+ # scenario 1 - default request
+ # this should send the `client_id` in the headers, as that is recommended by the RFC
+ self.assertEqual(
+ sess.fetch_token(url, client_secret="someclientsecret"), self.token
+ )
+ self.assertEqual(len(_fetch_history), 1)
+ self.assertNotIn(
+ "client_id", _fetch_history[0][1]
+ ) # no `client_id` in the body
+ self.assertNotIn(
+ "client_secret", _fetch_history[0][1]
+ ) # no `client_secret` in the body
+ self.assertEqual(
+ _fetch_history[0][2], expected_auth_header
+ ) # ensure a Basic Authorization header
+
+ # scenario 2 - force the `client_id` into the body
+ self.assertEqual(
+ sess.fetch_token(
+ url, client_secret="someclientsecret", include_client_id=True
+ ),
+ self.token,
+ )
+ self.assertEqual(len(_fetch_history), 2)
+ self.assertIn("client_id=%s" % self.client_id, _fetch_history[1][1])
+ self.assertIn("client_secret=%s" % self.client_secret, _fetch_history[1][1])
+ self.assertEqual(
+ _fetch_history[1][2], None
+ ) # ensure NO Basic Authorization header
+
+ # scenario 3 - send in an auth object
+ auth = requests.auth.HTTPBasicAuth(self.client_id, self.client_secret)
+ self.assertEqual(sess.fetch_token(url, auth=auth), self.token)
+ self.assertEqual(len(_fetch_history), 3)
+ self.assertNotIn(
+ "client_id", _fetch_history[2][1]
+ ) # no `client_id` in the body
+ self.assertNotIn(
+ "client_secret", _fetch_history[2][1]
+ ) # no `client_secret` in the body
+ self.assertEqual(
+ _fetch_history[2][2], expected_auth_header
+ ) # ensure a Basic Authorization header
+
+ # scenario 4 - send in a username/password combo
+ # this should send the `client_id` in the headers, like scenario 1
+ self.assertEqual(
+ sess.fetch_token(
+ url, username=self.user_username, password=self.user_password
+ ),
+ self.token,
+ )
+ self.assertEqual(len(_fetch_history), 4)
+ self.assertNotIn(
+ "client_id", _fetch_history[3][1]
+ ) # no `client_id` in the body
+ self.assertNotIn(
+ "client_secret", _fetch_history[3][1]
+ ) # no `client_secret` in the body
+ self.assertEqual(
+ _fetch_history[0][2], expected_auth_header
+ ) # ensure a Basic Authorization header
+ self.assertIn("username=%s" % self.user_username, _fetch_history[3][1])
+ self.assertIn("password=%s" % self.user_password, _fetch_history[3][1])
+
+ # scenario 5 - send data in `params` and not in `data` for providers
+ # that expect data in URL
+ self.assertEqual(
+ sess.fetch_token(url, client_secret="somesecret", force_querystring=True),
+ self.token,
+ )
+ self.assertIn("code=%s" % CODE, _fetch_history[4][0])
+
+ # some quick tests for valid ways of supporting `client_secret`
+
+ # scenario 2b - force the `client_id` into the body; but the `client_secret` is `None`
+ self.assertEqual(
+ sess.fetch_token(url, client_secret=None, include_client_id=True),
+ self.token,
+ )
+ self.assertEqual(len(_fetch_history), 6)
+ self.assertIn("client_id=%s" % self.client_id, _fetch_history[5][1])
+ self.assertNotIn(
+ "client_secret=", _fetch_history[5][1]
+ ) # no `client_secret` in the body
+ self.assertEqual(
+ _fetch_history[5][2], None
+ ) # ensure NO Basic Authorization header
+
+ # scenario 2c - force the `client_id` into the body; but the `client_secret` is an empty string
+ self.assertEqual(
+ sess.fetch_token(url, client_secret="", include_client_id=True), self.token
+ )
+ self.assertEqual(len(_fetch_history), 7)
+ self.assertIn("client_id=%s" % self.client_id, _fetch_history[6][1])
+ self.assertIn("client_secret=", _fetch_history[6][1])
+ self.assertEqual(
+ _fetch_history[6][2], None
+ ) # ensure NO Basic Authorization header
+
+ def test_cleans_previous_token_before_fetching_new_one(self):
+ """Makes sure the previous token is cleaned before fetching a new one.
+
+ The reason behind it is that, if the previous token is expired, this
+ method shouldn't fail with a TokenExpiredError, since it's attempting
+ to get a new one (which shouldn't be expired).
+
+ """
+ new_token = deepcopy(self.token)
+ past = time.time() - 7200
+ now = time.time()
+ self.token["expires_at"] = past
+ new_token["expires_at"] = now + 3600
+ url = "https://example.com/token"
+
+ with mock.patch("time.time", lambda: now):
+ for client in self.clients:
+ sess = OAuth2Session(client=client, token=self.token)
+ sess.send = fake_token(new_token)
+ if isinstance(client, LegacyApplicationClient):
+ # this client requires a username+password
+ # if unset, an error will be raised
+ self.assertRaises(ValueError, sess.fetch_token, url)
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, username="username1"
+ )
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, password="password1"
+ )
+ # otherwise it will pass
+ self.assertEqual(
+ sess.fetch_token(
+ url, username="username1", password="password1"
+ ),
+ new_token,
+ )
+ else:
+ self.assertEqual(sess.fetch_token(url), new_token)
+
+ def test_web_app_fetch_token(self):
+ # Ensure the state parameter is used, see issue #105.
+ client = OAuth2Session("someclientid", state="somestate")
+ self.assertRaises(
+ MismatchingStateError,
+ client.fetch_token,
+ "https://i.b/token",
+ authorization_response="https://i.b/no-state?code=abc",
+ )
+
+ def test_client_id_proxy(self):
+ sess = OAuth2Session("test-id")
+ self.assertEqual(sess.client_id, "test-id")
+ sess.client_id = "different-id"
+ self.assertEqual(sess.client_id, "different-id")
+ sess._client.client_id = "something-else"
+ self.assertEqual(sess.client_id, "something-else")
+ del sess.client_id
+ self.assertIsNone(sess.client_id)
+
+ def test_access_token_proxy(self):
+ sess = OAuth2Session("test-id")
+ self.assertIsNone(sess.access_token)
+ sess.access_token = "test-token"
+ self.assertEqual(sess.access_token, "test-token")
+ sess._client.access_token = "different-token"
+ self.assertEqual(sess.access_token, "different-token")
+ del sess.access_token
+ self.assertIsNone(sess.access_token)
+
+ def test_token_proxy(self):
+ token = {"access_token": "test-access"}
+ sess = OAuth2Session("test-id", token=token)
+ self.assertEqual(sess.access_token, "test-access")
+ self.assertEqual(sess.token, token)
+ token["access_token"] = "something-else"
+ sess.token = token
+ self.assertEqual(sess.access_token, "something-else")
+ self.assertEqual(sess.token, token)
+ sess._client.access_token = "different-token"
+ token["access_token"] = "different-token"
+ self.assertEqual(sess.access_token, "different-token")
+ self.assertEqual(sess.token, token)
+ # can't delete token attribute
+ with self.assertRaises(AttributeError):
+ del sess.token
+
+ def test_authorized_false(self):
+ sess = OAuth2Session("someclientid")
+ self.assertFalse(sess.authorized)
+
+ @mock.patch("time.time", new=lambda: fake_time)
+ def test_authorized_true(self):
+ def fake_token(token):
+ def fake_send(r, **kwargs):
+ resp = mock.MagicMock()
+ resp.text = json.dumps(token)
+ return resp
+
+ return fake_send
+
+ url = "https://example.com/token"
+
+ for client in self.clients:
+ sess = OAuth2Session(client=client)
+ sess.send = fake_token(self.token)
+ self.assertFalse(sess.authorized)
+ if isinstance(client, LegacyApplicationClient):
+ # this client requires a username+password
+ # if unset, an error will be raised
+ self.assertRaises(ValueError, sess.fetch_token, url)
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, username="username1"
+ )
+ self.assertRaises(
+ ValueError, sess.fetch_token, url, password="password1"
+ )
+ # otherwise it will pass
+ sess.fetch_token(url, username="username1", password="password1")
+ else:
+ sess.fetch_token(url)
+ self.assertTrue(sess.authorized)
+
+
+class OAuth2SessionNetrcTest(OAuth2SessionTest):
+ """Ensure that there is no magic auth handling.
+
+ By default, requests sessions have magic handling of netrc files,
+ which is undesirable for this library because it will take
+ precedence over manually set authentication headers.
+ """
+
+ def setUp(self):
+ # Set up a temporary home directory
+ self.homedir = tempfile.mkdtemp()
+ self.prehome = os.environ.get("HOME", None)
+ os.environ["HOME"] = self.homedir
+
+ # Write a .netrc file that will cause problems
+ netrc_loc = os.path.expanduser("~/.netrc")
+ with open(netrc_loc, "w") as f:
+ f.write("machine i.b\n" " password abc123\n" " login spam@eggs.co\n")
+
+ super(OAuth2SessionNetrcTest, self).setUp()
+
+ def tearDown(self):
+ super(OAuth2SessionNetrcTest, self).tearDown()
+
+ if self.prehome is not None:
+ os.environ["HOME"] = self.prehome
+ shutil.rmtree(self.homedir)
diff --git a/contrib/python/requests-oauthlib/tests/ya.make b/contrib/python/requests-oauthlib/tests/ya.make
new file mode 100644
index 0000000000..a8f7328ae7
--- /dev/null
+++ b/contrib/python/requests-oauthlib/tests/ya.make
@@ -0,0 +1,28 @@
+PY3TEST()
+
+PEERDIR(
+ contrib/python/requests-oauthlib
+ contrib/python/requests-mock
+)
+
+# These tests use real http://httpbin.org that is why they are disabled:
+# testCanPostBinaryData
+# test_url_is_native_str
+# test_content_type_override
+
+TEST_SRCS(
+ __init__.py
+ test_compliance_fixes.py
+ test_core.py
+ test_oauth1_session.py
+ test_oauth2_auth.py
+ test_oauth2_session.py
+)
+
+DATA(
+ arcadia/contrib/python/requests-oauthlib/tests
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/requests-oauthlib/ya.make b/contrib/python/requests-oauthlib/ya.make
new file mode 100644
index 0000000000..2145e60cc4
--- /dev/null
+++ b/contrib/python/requests-oauthlib/ya.make
@@ -0,0 +1,45 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(1.3.1)
+
+LICENSE(ISC)
+
+PEERDIR(
+ contrib/python/oauthlib
+ contrib/python/requests
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ requests_oauthlib/__init__.py
+ requests_oauthlib/compliance_fixes/__init__.py
+ requests_oauthlib/compliance_fixes/douban.py
+ requests_oauthlib/compliance_fixes/ebay.py
+ requests_oauthlib/compliance_fixes/facebook.py
+ requests_oauthlib/compliance_fixes/fitbit.py
+ requests_oauthlib/compliance_fixes/instagram.py
+ requests_oauthlib/compliance_fixes/mailchimp.py
+ requests_oauthlib/compliance_fixes/plentymarkets.py
+ requests_oauthlib/compliance_fixes/slack.py
+ requests_oauthlib/compliance_fixes/weibo.py
+ requests_oauthlib/oauth1_auth.py
+ requests_oauthlib/oauth1_session.py
+ requests_oauthlib/oauth2_auth.py
+ requests_oauthlib/oauth2_session.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/requests-oauthlib/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/rsa/py2/.dist-info/METADATA b/contrib/python/rsa/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..3ab66353aa
--- /dev/null
+++ b/contrib/python/rsa/py2/.dist-info/METADATA
@@ -0,0 +1,85 @@
+Metadata-Version: 2.1
+Name: rsa
+Version: 4.5
+Summary: Pure-Python RSA implementation
+Home-page: https://stuvel.eu/rsa
+Author: Sybren A. Stuvel
+Author-email: sybren@stuvel.eu
+Maintainer: Sybren A. Stuvel
+Maintainer-email: sybren@stuvel.eu
+License: ASL 2
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Security :: Cryptography
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4
+Description-Content-Type: text/markdown
+Requires-Dist: pyasn1 (>=0.1.3)
+
+Pure Python RSA implementation
+==============================
+
+[![PyPI](https://img.shields.io/pypi/v/rsa.svg)](https://pypi.org/project/rsa/)
+[![Build Status](https://travis-ci.org/sybrenstuvel/python-rsa.svg?branch=master)](https://travis-ci.org/sybrenstuvel/python-rsa)
+[![Coverage Status](https://coveralls.io/repos/github/sybrenstuvel/python-rsa/badge.svg?branch=master)](https://coveralls.io/github/sybrenstuvel/python-rsa?branch=master)
+[![Code Climate](https://img.shields.io/codeclimate/github/sybrenstuvel/python-rsa.svg)](https://codeclimate.com/github/sybrenstuvel/python-rsa)
+
+[Python-RSA](https://stuvel.eu/rsa) is a pure-Python RSA implementation. It supports
+encryption and decryption, signing and verifying signatures, and key
+generation according to PKCS#1 version 1.5. It can be used as a Python
+library as well as on the commandline. The code was mostly written by
+Sybren A. Stüvel.
+
+Documentation can be found at the [Python-RSA homepage](https://stuvel.eu/rsa).
+
+Download and install using:
+
+ pip install rsa
+
+or download it from the [Python Package Index](https://pypi.org/project/rsa/).
+
+The source code is maintained at [GitHub](https://github.com/sybrenstuvel/python-rsa/) and is
+licensed under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
+
+Changes in 4.1-4.4
+------------------
+
+Version 4.1 dropped support for Python 2.7, and soon after that version 4.2 was released. Neither of the two made it explicit in `setup.cfg` that Python 3.5 or newer is required. This caused issues on Python 2.7, as Pip happily upgraded to the new version.
+
+Version 4.3 is a re-tagged release of version 4.0. It is the last to support Python 2.7.
+
+Version 4.4 will be a re-tagged release of version 4.2, and explicitly require Python 3.5 or newer.
+
+
+Major changes in 4.0
+--------------------
+
+Version 3.4 was the last version in the 3.x range. Version 4.0 drops the following modules,
+as they are insecure:
+
+- `rsa._version133`
+- `rsa._version200`
+- `rsa.bigfile`
+- `rsa.varblock`
+
+Those modules were marked as deprecated in version 3.4.
+
+Furthermore, in 4.0 the I/O functions is streamlined to always work with bytes on all
+supported versions of Python.
+
+Version 4.0 drops support for Python 2.6 and 3.3.
+
+
diff --git a/contrib/python/rsa/py2/.dist-info/entry_points.txt b/contrib/python/rsa/py2/.dist-info/entry_points.txt
new file mode 100644
index 0000000000..1c27571272
--- /dev/null
+++ b/contrib/python/rsa/py2/.dist-info/entry_points.txt
@@ -0,0 +1,8 @@
+[console_scripts]
+pyrsa-decrypt = rsa.cli:decrypt
+pyrsa-encrypt = rsa.cli:encrypt
+pyrsa-keygen = rsa.cli:keygen
+pyrsa-priv2pub = rsa.util:private_to_public
+pyrsa-sign = rsa.cli:sign
+pyrsa-verify = rsa.cli:verify
+
diff --git a/contrib/python/rsa/py2/.dist-info/top_level.txt b/contrib/python/rsa/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..703f551006
--- /dev/null
+++ b/contrib/python/rsa/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+rsa
diff --git a/contrib/python/rsa/py2/LICENSE b/contrib/python/rsa/py2/LICENSE
new file mode 100644
index 0000000000..67589cbb86
--- /dev/null
+++ b/contrib/python/rsa/py2/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/contrib/python/rsa/py2/README.md b/contrib/python/rsa/py2/README.md
new file mode 100644
index 0000000000..b7df4fbde4
--- /dev/null
+++ b/contrib/python/rsa/py2/README.md
@@ -0,0 +1,52 @@
+Pure Python RSA implementation
+==============================
+
+[![PyPI](https://img.shields.io/pypi/v/rsa.svg)](https://pypi.org/project/rsa/)
+[![Build Status](https://travis-ci.org/sybrenstuvel/python-rsa.svg?branch=master)](https://travis-ci.org/sybrenstuvel/python-rsa)
+[![Coverage Status](https://coveralls.io/repos/github/sybrenstuvel/python-rsa/badge.svg?branch=master)](https://coveralls.io/github/sybrenstuvel/python-rsa?branch=master)
+[![Code Climate](https://img.shields.io/codeclimate/github/sybrenstuvel/python-rsa.svg)](https://codeclimate.com/github/sybrenstuvel/python-rsa)
+
+[Python-RSA](https://stuvel.eu/rsa) is a pure-Python RSA implementation. It supports
+encryption and decryption, signing and verifying signatures, and key
+generation according to PKCS#1 version 1.5. It can be used as a Python
+library as well as on the commandline. The code was mostly written by
+Sybren A. Stüvel.
+
+Documentation can be found at the [Python-RSA homepage](https://stuvel.eu/rsa).
+
+Download and install using:
+
+ pip install rsa
+
+or download it from the [Python Package Index](https://pypi.org/project/rsa/).
+
+The source code is maintained at [GitHub](https://github.com/sybrenstuvel/python-rsa/) and is
+licensed under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
+
+Changes in 4.1-4.4
+------------------
+
+Version 4.1 dropped support for Python 2.7, and soon after that version 4.2 was released. Neither of the two made it explicit in `setup.cfg` that Python 3.5 or newer is required. This caused issues on Python 2.7, as Pip happily upgraded to the new version.
+
+Version 4.3 is a re-tagged release of version 4.0. It is the last to support Python 2.7.
+
+Version 4.4 will be a re-tagged release of version 4.2, and explicitly require Python 3.5 or newer.
+
+
+Major changes in 4.0
+--------------------
+
+Version 3.4 was the last version in the 3.x range. Version 4.0 drops the following modules,
+as they are insecure:
+
+- `rsa._version133`
+- `rsa._version200`
+- `rsa.bigfile`
+- `rsa.varblock`
+
+Those modules were marked as deprecated in version 3.4.
+
+Furthermore, in 4.0 the I/O functions is streamlined to always work with bytes on all
+supported versions of Python.
+
+Version 4.0 drops support for Python 2.6 and 3.3.
diff --git a/contrib/python/rsa/py2/rsa/__init__.py b/contrib/python/rsa/py2/rsa/__init__.py
new file mode 100644
index 0000000000..af5487f3d3
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/__init__.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RSA module
+
+Module for calculating large primes, and RSA encryption, decryption, signing
+and verification. Includes generating public and private keys.
+
+WARNING: this implementation does not use compression of the cleartext input to
+prevent repetitions, or other common security improvements. Use with care.
+
+"""
+
+from rsa.key import newkeys, PrivateKey, PublicKey
+from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
+ VerificationError, find_signature_hash, sign_hash, compute_hash
+
+__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
+__date__ = "2020-06-12"
+__version__ = '4.5'
+
+# Do doctest if we're run directly
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
+ 'PrivateKey', 'DecryptionError', 'VerificationError',
+ 'compute_hash', 'sign_hash']
diff --git a/contrib/python/rsa/py2/rsa/_compat.py b/contrib/python/rsa/py2/rsa/_compat.py
new file mode 100644
index 0000000000..71197a55b8
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/_compat.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Python compatibility wrappers."""
+
+from __future__ import absolute_import
+
+import itertools
+import sys
+from struct import pack
+
+MAX_INT = sys.maxsize
+MAX_INT64 = (1 << 63) - 1
+MAX_INT32 = (1 << 31) - 1
+MAX_INT16 = (1 << 15) - 1
+
+PY2 = sys.version_info[0] == 2
+
+# Determine the word size of the processor.
+if MAX_INT == MAX_INT64:
+ # 64-bit processor.
+ MACHINE_WORD_SIZE = 64
+elif MAX_INT == MAX_INT32:
+ # 32-bit processor.
+ MACHINE_WORD_SIZE = 32
+else:
+ # Else we just assume 64-bit processor keeping up with modern times.
+ MACHINE_WORD_SIZE = 64
+
+if PY2:
+ integer_types = (int, long)
+ range = xrange
+ zip = itertools.izip
+else:
+ integer_types = (int, )
+ range = range
+ zip = zip
+
+
+def write_to_stdout(data):
+ """Writes bytes to stdout
+
+ :type data: bytes
+ """
+ if PY2:
+ sys.stdout.write(data)
+ else:
+ # On Py3 we must use the buffer interface to write bytes.
+ sys.stdout.buffer.write(data)
+
+
+def is_bytes(obj):
+ """
+ Determines whether the given value is a byte string.
+
+ :param obj:
+ The value to test.
+ :returns:
+ ``True`` if ``value`` is a byte string; ``False`` otherwise.
+ """
+ return isinstance(obj, bytes)
+
+
+def is_integer(obj):
+ """
+ Determines whether the given value is an integer.
+
+ :param obj:
+ The value to test.
+ :returns:
+ ``True`` if ``value`` is an integer; ``False`` otherwise.
+ """
+ return isinstance(obj, integer_types)
+
+
+def byte(num):
+ """
+ Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
+ representation.
+
+ Use it as a replacement for ``chr`` where you are expecting a byte
+ because this will work on all current versions of Python::
+
+ :param num:
+ An unsigned integer between 0 and 255 (both inclusive).
+ :returns:
+ A single byte.
+ """
+ return pack("B", num)
+
+
+def xor_bytes(b1, b2):
+ """
+ Returns the bitwise XOR result between two bytes objects, b1 ^ b2.
+
+ Bitwise XOR operation is commutative, so order of parameters doesn't
+ generate different results. If parameters have different length, extra
+ length of the largest one is ignored.
+
+ :param b1:
+ First bytes object.
+ :param b2:
+ Second bytes object.
+ :returns:
+ Bytes object, result of XOR operation.
+ """
+ if PY2:
+ return ''.join(byte(ord(x) ^ ord(y)) for x, y in zip(b1, b2))
+
+ return bytes(x ^ y for x, y in zip(b1, b2))
+
+
+def get_word_alignment(num, force_arch=64,
+ _machine_word_size=MACHINE_WORD_SIZE):
+ """
+ Returns alignment details for the given number based on the platform
+ Python is running on.
+
+ :param num:
+ Unsigned integral number.
+ :param force_arch:
+ If you don't want to use 64-bit unsigned chunks, set this to
+ anything other than 64. 32-bit chunks will be preferred then.
+ Default 64 will be used when on a 64-bit machine.
+ :param _machine_word_size:
+ (Internal) The machine word size used for alignment.
+ :returns:
+ 4-tuple::
+
+ (word_bits, word_bytes,
+ max_uint, packing_format_type)
+ """
+ max_uint64 = 0xffffffffffffffff
+ max_uint32 = 0xffffffff
+ max_uint16 = 0xffff
+ max_uint8 = 0xff
+
+ if force_arch == 64 and _machine_word_size >= 64 and num > max_uint32:
+ # 64-bit unsigned integer.
+ return 64, 8, max_uint64, "Q"
+ elif num > max_uint16:
+ # 32-bit unsigned integer
+ return 32, 4, max_uint32, "L"
+ elif num > max_uint8:
+ # 16-bit unsigned integer.
+ return 16, 2, max_uint16, "H"
+ else:
+ # 8-bit unsigned integer.
+ return 8, 1, max_uint8, "B"
diff --git a/contrib/python/rsa/py2/rsa/asn1.py b/contrib/python/rsa/py2/rsa/asn1.py
new file mode 100644
index 0000000000..b724b8f53d
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/asn1.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""ASN.1 definitions.
+
+Not all ASN.1-handling code use these definitions, but when it does, they should be here.
+"""
+
+from pyasn1.type import univ, namedtype, tag
+
+
+class PubKeyHeader(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Null()),
+ )
+
+
+class OpenSSLPubKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PubKeyHeader()),
+
+ # This little hack (the implicit tag) allows us to get a Bit String as Octet String
+ namedtype.NamedType('key', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tagClass=0, tagFormat=0, tagId=3))),
+ )
+
+
+class AsnPubKey(univ.Sequence):
+ """ASN.1 contents of DER encoded public key:
+
+ RSAPublicKey ::= SEQUENCE {
+ modulus INTEGER, -- n
+ publicExponent INTEGER, -- e
+ """
+
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ )
diff --git a/contrib/python/rsa/py2/rsa/cli.py b/contrib/python/rsa/py2/rsa/cli.py
new file mode 100644
index 0000000000..6450af427f
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/cli.py
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Commandline scripts.
+
+These scripts are called by the executables defined in setup.py.
+"""
+
+from __future__ import with_statement, print_function
+
+import abc
+import sys
+from optparse import OptionParser
+
+import rsa
+import rsa.pkcs1
+
+HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
+
+
+def keygen():
+ """Key generator."""
+
+ # Parse the CLI options
+ parser = OptionParser(usage='usage: %prog [options] keysize',
+ description='Generates a new RSA keypair of "keysize" bits.')
+
+ parser.add_option('--pubout', type='string',
+ help='Output filename for the public key. The public key is '
+ 'not saved if this option is not present. You can use '
+ 'pyrsa-priv2pub to create the public key file later.')
+
+ parser.add_option('-o', '--out', type='string',
+ help='Output filename for the private key. The key is '
+ 'written to stdout if this option is not present.')
+
+ parser.add_option('--form',
+ help='key format of the private and public keys - default PEM',
+ choices=('PEM', 'DER'), default='PEM')
+
+ (cli, cli_args) = parser.parse_args(sys.argv[1:])
+
+ if len(cli_args) != 1:
+ parser.print_help()
+ raise SystemExit(1)
+
+ try:
+ keysize = int(cli_args[0])
+ except ValueError:
+ parser.print_help()
+ print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
+ raise SystemExit(1)
+
+ print('Generating %i-bit key' % keysize, file=sys.stderr)
+ (pub_key, priv_key) = rsa.newkeys(keysize)
+
+ # Save public key
+ if cli.pubout:
+ print('Writing public key to %s' % cli.pubout, file=sys.stderr)
+ data = pub_key.save_pkcs1(format=cli.form)
+ with open(cli.pubout, 'wb') as outfile:
+ outfile.write(data)
+
+ # Save private key
+ data = priv_key.save_pkcs1(format=cli.form)
+
+ if cli.out:
+ print('Writing private key to %s' % cli.out, file=sys.stderr)
+ with open(cli.out, 'wb') as outfile:
+ outfile.write(data)
+ else:
+ print('Writing private key to stdout', file=sys.stderr)
+ rsa._compat.write_to_stdout(data)
+
+
+class CryptoOperation(object):
+ """CLI callable that operates with input, output, and a key."""
+
+ __metaclass__ = abc.ABCMeta
+
+ keyname = 'public' # or 'private'
+ usage = 'usage: %%prog [options] %(keyname)s_key'
+ description = None
+ operation = 'decrypt'
+ operation_past = 'decrypted'
+ operation_progressive = 'decrypting'
+ input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
+ 'not specified.'
+ output_help = 'Name of the file to write the %(operation_past)s file ' \
+ 'to. Written to stdout if this option is not present.'
+ expected_cli_args = 1
+ has_output = True
+
+ key_class = rsa.PublicKey
+
+ def __init__(self):
+ self.usage = self.usage % self.__class__.__dict__
+ self.input_help = self.input_help % self.__class__.__dict__
+ self.output_help = self.output_help % self.__class__.__dict__
+
+ @abc.abstractmethod
+ def perform_operation(self, indata, key, cli_args):
+ """Performs the program's operation.
+
+ Implement in a subclass.
+
+ :returns: the data to write to the output.
+ """
+
+ def __call__(self):
+ """Runs the program."""
+
+ (cli, cli_args) = self.parse_cli()
+
+ key = self.read_key(cli_args[0], cli.keyform)
+
+ indata = self.read_infile(cli.input)
+
+ print(self.operation_progressive.title(), file=sys.stderr)
+ outdata = self.perform_operation(indata, key, cli_args)
+
+ if self.has_output:
+ self.write_outfile(outdata, cli.output)
+
+ def parse_cli(self):
+ """Parse the CLI options
+
+ :returns: (cli_opts, cli_args)
+ """
+
+ parser = OptionParser(usage=self.usage, description=self.description)
+
+ parser.add_option('-i', '--input', type='string', help=self.input_help)
+
+ if self.has_output:
+ parser.add_option('-o', '--output', type='string', help=self.output_help)
+
+ parser.add_option('--keyform',
+ help='Key format of the %s key - default PEM' % self.keyname,
+ choices=('PEM', 'DER'), default='PEM')
+
+ (cli, cli_args) = parser.parse_args(sys.argv[1:])
+
+ if len(cli_args) != self.expected_cli_args:
+ parser.print_help()
+ raise SystemExit(1)
+
+ return cli, cli_args
+
+ def read_key(self, filename, keyform):
+ """Reads a public or private key."""
+
+ print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
+ with open(filename, 'rb') as keyfile:
+ keydata = keyfile.read()
+
+ return self.key_class.load_pkcs1(keydata, keyform)
+
+ def read_infile(self, inname):
+ """Read the input file"""
+
+ if inname:
+ print('Reading input from %s' % inname, file=sys.stderr)
+ with open(inname, 'rb') as infile:
+ return infile.read()
+
+ print('Reading input from stdin', file=sys.stderr)
+ return sys.stdin.read()
+
+ def write_outfile(self, outdata, outname):
+ """Write the output file"""
+
+ if outname:
+ print('Writing output to %s' % outname, file=sys.stderr)
+ with open(outname, 'wb') as outfile:
+ outfile.write(outdata)
+ else:
+ print('Writing output to stdout', file=sys.stderr)
+ rsa._compat.write_to_stdout(outdata)
+
+
+class EncryptOperation(CryptoOperation):
+ """Encrypts a file."""
+
+ keyname = 'public'
+ description = ('Encrypts a file. The file must be shorter than the key '
+ 'length in order to be encrypted.')
+ operation = 'encrypt'
+ operation_past = 'encrypted'
+ operation_progressive = 'encrypting'
+
+ def perform_operation(self, indata, pub_key, cli_args=None):
+ """Encrypts files."""
+
+ return rsa.encrypt(indata, pub_key)
+
+
+class DecryptOperation(CryptoOperation):
+ """Decrypts a file."""
+
+ keyname = 'private'
+ description = ('Decrypts a file. The original file must be shorter than '
+ 'the key length in order to have been encrypted.')
+ operation = 'decrypt'
+ operation_past = 'decrypted'
+ operation_progressive = 'decrypting'
+ key_class = rsa.PrivateKey
+
+ def perform_operation(self, indata, priv_key, cli_args=None):
+ """Decrypts files."""
+
+ return rsa.decrypt(indata, priv_key)
+
+
+class SignOperation(CryptoOperation):
+ """Signs a file."""
+
+ keyname = 'private'
+ usage = 'usage: %%prog [options] private_key hash_method'
+ description = ('Signs a file, outputs the signature. Choose the hash '
+ 'method from %s' % ', '.join(HASH_METHODS))
+ operation = 'sign'
+ operation_past = 'signature'
+ operation_progressive = 'Signing'
+ key_class = rsa.PrivateKey
+ expected_cli_args = 2
+
+ output_help = ('Name of the file to write the signature to. Written '
+ 'to stdout if this option is not present.')
+
+ def perform_operation(self, indata, priv_key, cli_args):
+ """Signs files."""
+
+ hash_method = cli_args[1]
+ if hash_method not in HASH_METHODS:
+ raise SystemExit('Invalid hash method, choose one of %s' %
+ ', '.join(HASH_METHODS))
+
+ return rsa.sign(indata, priv_key, hash_method)
+
+
+class VerifyOperation(CryptoOperation):
+ """Verify a signature."""
+
+ keyname = 'public'
+ usage = 'usage: %%prog [options] public_key signature_file'
+ description = ('Verifies a signature, exits with status 0 upon success, '
+ 'prints an error message and exits with status 1 upon error.')
+ operation = 'verify'
+ operation_past = 'verified'
+ operation_progressive = 'Verifying'
+ key_class = rsa.PublicKey
+ expected_cli_args = 2
+ has_output = False
+
+ def perform_operation(self, indata, pub_key, cli_args):
+ """Verifies files."""
+
+ signature_file = cli_args[1]
+
+ with open(signature_file, 'rb') as sigfile:
+ signature = sigfile.read()
+
+ try:
+ rsa.verify(indata, signature, pub_key)
+ except rsa.VerificationError:
+ raise SystemExit('Verification failed.')
+
+ print('Verification OK', file=sys.stderr)
+
+
+encrypt = EncryptOperation()
+decrypt = DecryptOperation()
+sign = SignOperation()
+verify = VerifyOperation()
diff --git a/contrib/python/rsa/py2/rsa/common.py b/contrib/python/rsa/py2/rsa/common.py
new file mode 100644
index 0000000000..f7aa2d1496
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/common.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from rsa._compat import zip
+
+"""Common functionality shared by several modules."""
+
+
+class NotRelativePrimeError(ValueError):
+ def __init__(self, a, b, d, msg=None):
+ super(NotRelativePrimeError, self).__init__(
+ msg or "%d and %d are not relatively prime, divider=%i" % (a, b, d))
+ self.a = a
+ self.b = b
+ self.d = d
+
+
+def bit_size(num):
+ """
+ Number of bits needed to represent a integer excluding any prefix
+ 0 bits.
+
+ Usage::
+
+ >>> bit_size(1023)
+ 10
+ >>> bit_size(1024)
+ 11
+ >>> bit_size(1025)
+ 11
+
+ :param num:
+ Integer value. If num is 0, returns 0. Only the absolute value of the
+ number is considered. Therefore, signed integers will be abs(num)
+ before the number's bit length is determined.
+ :returns:
+ Returns the number of bits in the integer.
+ """
+
+ try:
+ return num.bit_length()
+ except AttributeError:
+ raise TypeError('bit_size(num) only supports integers, not %r' % type(num))
+
+
+def byte_size(number):
+ """
+ Returns the number of bytes required to hold a specific long number.
+
+ The number of bytes is rounded up.
+
+ Usage::
+
+ >>> byte_size(1 << 1023)
+ 128
+ >>> byte_size((1 << 1024) - 1)
+ 128
+ >>> byte_size(1 << 1024)
+ 129
+
+ :param number:
+ An unsigned integer
+ :returns:
+ The number of bytes required to hold a specific long number.
+ """
+ if number == 0:
+ return 1
+ return ceil_div(bit_size(number), 8)
+
+
+def ceil_div(num, div):
+ """
+ Returns the ceiling function of a division between `num` and `div`.
+
+ Usage::
+
+ >>> ceil_div(100, 7)
+ 15
+ >>> ceil_div(100, 10)
+ 10
+ >>> ceil_div(1, 4)
+ 1
+
+ :param num: Division's numerator, a number
+ :param div: Division's divisor, a number
+
+ :return: Rounded up result of the division between the parameters.
+ """
+ quanta, mod = divmod(num, div)
+ if mod:
+ quanta += 1
+ return quanta
+
+
+def extended_gcd(a, b):
+ """Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
+ """
+ # r = gcd(a,b) i = multiplicitive inverse of a mod b
+ # or j = multiplicitive inverse of b mod a
+ # Neg return values for i or j are made positive mod b or a respectively
+ # Iterateive Version is faster and uses much less stack space
+ x = 0
+ y = 1
+ lx = 1
+ ly = 0
+ oa = a # Remember original a/b to remove
+ ob = b # negative values from return results
+ while b != 0:
+ q = a // b
+ (a, b) = (b, a % b)
+ (x, lx) = ((lx - (q * x)), x)
+ (y, ly) = ((ly - (q * y)), y)
+ if lx < 0:
+ lx += ob # If neg wrap modulo orignal b
+ if ly < 0:
+ ly += oa # If neg wrap modulo orignal a
+ return a, lx, ly # Return only positive values
+
+
+def inverse(x, n):
+ """Returns the inverse of x % n under multiplication, a.k.a x^-1 (mod n)
+
+ >>> inverse(7, 4)
+ 3
+ >>> (inverse(143, 4) * 143) % 4
+ 1
+ """
+
+ (divider, inv, _) = extended_gcd(x, n)
+
+ if divider != 1:
+ raise NotRelativePrimeError(x, n, divider)
+
+ return inv
+
+
+def crt(a_values, modulo_values):
+ """Chinese Remainder Theorem.
+
+ Calculates x such that x = a[i] (mod m[i]) for each i.
+
+ :param a_values: the a-values of the above equation
+ :param modulo_values: the m-values of the above equation
+ :returns: x such that x = a[i] (mod m[i]) for each i
+
+
+ >>> crt([2, 3], [3, 5])
+ 8
+
+ >>> crt([2, 3, 2], [3, 5, 7])
+ 23
+
+ >>> crt([2, 3, 0], [7, 11, 15])
+ 135
+ """
+
+ m = 1
+ x = 0
+
+ for modulo in modulo_values:
+ m *= modulo
+
+ for (m_i, a_i) in zip(modulo_values, a_values):
+ M_i = m // m_i
+ inv = inverse(M_i, m_i)
+
+ x = (x + a_i * M_i * inv) % m
+
+ return x
+
+
+if __name__ == '__main__':
+ import doctest
+
+ doctest.testmod()
diff --git a/contrib/python/rsa/py2/rsa/core.py b/contrib/python/rsa/py2/rsa/core.py
new file mode 100644
index 0000000000..b3114d9e9c
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/core.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Core mathematical operations.
+
+This is the actual core RSA implementation, which is only defined
+mathematically on integers.
+"""
+
+from rsa._compat import is_integer
+
+
+def assert_int(var, name):
+ if is_integer(var):
+ return
+
+ raise TypeError('%s should be an integer, not %s' % (name, var.__class__))
+
+
+def encrypt_int(message, ekey, n):
+ """Encrypts a message using encryption key 'ekey', working modulo n"""
+
+ assert_int(message, 'message')
+ assert_int(ekey, 'ekey')
+ assert_int(n, 'n')
+
+ if message < 0:
+ raise ValueError('Only non-negative numbers are supported')
+
+ if message > n:
+ raise OverflowError("The message %i is too long for n=%i" % (message, n))
+
+ return pow(message, ekey, n)
+
+
+def decrypt_int(cyphertext, dkey, n):
+ """Decrypts a cypher text using the decryption key 'dkey', working modulo n"""
+
+ assert_int(cyphertext, 'cyphertext')
+ assert_int(dkey, 'dkey')
+ assert_int(n, 'n')
+
+ message = pow(cyphertext, dkey, n)
+ return message
diff --git a/contrib/python/rsa/py2/rsa/key.py b/contrib/python/rsa/py2/rsa/key.py
new file mode 100644
index 0000000000..1e2f6fe455
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/key.py
@@ -0,0 +1,798 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""RSA key generation code.
+
+Create new keys with the newkeys() function. It will give you a PublicKey and a
+PrivateKey object.
+
+Loading and saving keys requires the pyasn1 module. This module is imported as
+late as possible, such that other functionality will remain working in absence
+of pyasn1.
+
+.. note::
+
+ Storing public and private keys via the `pickle` module is possible.
+ However, it is insecure to load a key from an untrusted source.
+ The pickle module is not secure against erroneous or maliciously
+ constructed data. Never unpickle data received from an untrusted
+ or unauthenticated source.
+
+"""
+
+import logging
+import warnings
+
+from rsa._compat import range
+import rsa.prime
+import rsa.pem
+import rsa.common
+import rsa.randnum
+import rsa.core
+
+
+log = logging.getLogger(__name__)
+DEFAULT_EXPONENT = 65537
+
+
+class AbstractKey(object):
+ """Abstract superclass for private and public keys."""
+
+ __slots__ = ('n', 'e')
+
+ def __init__(self, n, e):
+ self.n = n
+ self.e = e
+
+ @classmethod
+ def _load_pkcs1_pem(cls, keyfile):
+ """Loads a key in PKCS#1 PEM format, implement in a subclass.
+
+ :param keyfile: contents of a PEM-encoded file that contains
+ the public key.
+ :type keyfile: bytes
+
+ :return: the loaded key
+ :rtype: AbstractKey
+ """
+
+ @classmethod
+ def _load_pkcs1_der(cls, keyfile):
+ """Loads a key in PKCS#1 PEM format, implement in a subclass.
+
+ :param keyfile: contents of a DER-encoded file that contains
+ the public key.
+ :type keyfile: bytes
+
+ :return: the loaded key
+ :rtype: AbstractKey
+ """
+
+ def _save_pkcs1_pem(self):
+ """Saves the key in PKCS#1 PEM format, implement in a subclass.
+
+ :returns: the PEM-encoded key.
+ :rtype: bytes
+ """
+
+ def _save_pkcs1_der(self):
+ """Saves the key in PKCS#1 DER format, implement in a subclass.
+
+ :returns: the DER-encoded key.
+ :rtype: bytes
+ """
+
+ @classmethod
+ def load_pkcs1(cls, keyfile, format='PEM'):
+ """Loads a key in PKCS#1 DER or PEM format.
+
+ :param keyfile: contents of a DER- or PEM-encoded file that contains
+ the key.
+ :type keyfile: bytes
+ :param format: the format of the file to load; 'PEM' or 'DER'
+ :type format: str
+
+ :return: the loaded key
+ :rtype: AbstractKey
+ """
+
+ methods = {
+ 'PEM': cls._load_pkcs1_pem,
+ 'DER': cls._load_pkcs1_der,
+ }
+
+ method = cls._assert_format_exists(format, methods)
+ return method(keyfile)
+
+ @staticmethod
+ def _assert_format_exists(file_format, methods):
+ """Checks whether the given file format exists in 'methods'.
+ """
+
+ try:
+ return methods[file_format]
+ except KeyError:
+ formats = ', '.join(sorted(methods.keys()))
+ raise ValueError('Unsupported format: %r, try one of %s' % (file_format,
+ formats))
+
+ def save_pkcs1(self, format='PEM'):
+ """Saves the key in PKCS#1 DER or PEM format.
+
+ :param format: the format to save; 'PEM' or 'DER'
+ :type format: str
+ :returns: the DER- or PEM-encoded key.
+ :rtype: bytes
+ """
+
+ methods = {
+ 'PEM': self._save_pkcs1_pem,
+ 'DER': self._save_pkcs1_der,
+ }
+
+ method = self._assert_format_exists(format, methods)
+ return method()
+
+ def blind(self, message, r):
+ """Performs blinding on the message using random number 'r'.
+
+ :param message: the message, as integer, to blind.
+ :type message: int
+ :param r: the random number to blind with.
+ :type r: int
+ :return: the blinded message.
+ :rtype: int
+
+ The blinding is such that message = unblind(decrypt(blind(encrypt(message))).
+
+ See https://en.wikipedia.org/wiki/Blinding_%28cryptography%29
+ """
+
+ return (message * pow(r, self.e, self.n)) % self.n
+
+ def unblind(self, blinded, r):
+ """Performs blinding on the message using random number 'r'.
+
+ :param blinded: the blinded message, as integer, to unblind.
+ :param r: the random number to unblind with.
+ :return: the original message.
+
+ The blinding is such that message = unblind(decrypt(blind(encrypt(message))).
+
+ See https://en.wikipedia.org/wiki/Blinding_%28cryptography%29
+ """
+
+ return (rsa.common.inverse(r, self.n) * blinded) % self.n
+
+
+class PublicKey(AbstractKey):
+ """Represents a public RSA key.
+
+ This key is also known as the 'encryption key'. It contains the 'n' and 'e'
+ values.
+
+ Supports attributes as well as dictionary-like access. Attribute access is
+ faster, though.
+
+ >>> PublicKey(5, 3)
+ PublicKey(5, 3)
+
+ >>> key = PublicKey(5, 3)
+ >>> key.n
+ 5
+ >>> key['n']
+ 5
+ >>> key.e
+ 3
+ >>> key['e']
+ 3
+
+ """
+
+ __slots__ = ('n', 'e')
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __repr__(self):
+ return 'PublicKey(%i, %i)' % (self.n, self.e)
+
+ def __getstate__(self):
+ """Returns the key as tuple for pickling."""
+ return self.n, self.e
+
+ def __setstate__(self, state):
+ """Sets the key from tuple."""
+ self.n, self.e = state
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+
+ if not isinstance(other, PublicKey):
+ return False
+
+ return self.n == other.n and self.e == other.e
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ return hash((self.n, self.e))
+
+ @classmethod
+ def _load_pkcs1_der(cls, keyfile):
+ """Loads a key in PKCS#1 DER format.
+
+ :param keyfile: contents of a DER-encoded file that contains the public
+ key.
+ :return: a PublicKey object
+
+ First let's construct a DER encoded key:
+
+ >>> import base64
+ >>> b64der = 'MAwCBQCNGmYtAgMBAAE='
+ >>> der = base64.standard_b64decode(b64der)
+
+ This loads the file:
+
+ >>> PublicKey._load_pkcs1_der(der)
+ PublicKey(2367317549, 65537)
+
+ """
+
+ from pyasn1.codec.der import decoder
+ from rsa.asn1 import AsnPubKey
+
+ (priv, _) = decoder.decode(keyfile, asn1Spec=AsnPubKey())
+ return cls(n=int(priv['modulus']), e=int(priv['publicExponent']))
+
+ def _save_pkcs1_der(self):
+ """Saves the public key in PKCS#1 DER format.
+
+ :returns: the DER-encoded public key.
+ :rtype: bytes
+ """
+
+ from pyasn1.codec.der import encoder
+ from rsa.asn1 import AsnPubKey
+
+ # Create the ASN object
+ asn_key = AsnPubKey()
+ asn_key.setComponentByName('modulus', self.n)
+ asn_key.setComponentByName('publicExponent', self.e)
+
+ return encoder.encode(asn_key)
+
+ @classmethod
+ def _load_pkcs1_pem(cls, keyfile):
+ """Loads a PKCS#1 PEM-encoded public key file.
+
+ The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
+ after the "-----END RSA PUBLIC KEY-----" lines is ignored.
+
+ :param keyfile: contents of a PEM-encoded file that contains the public
+ key.
+ :return: a PublicKey object
+ """
+
+ der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
+ return cls._load_pkcs1_der(der)
+
+ def _save_pkcs1_pem(self):
+ """Saves a PKCS#1 PEM-encoded public key file.
+
+ :return: contents of a PEM-encoded file that contains the public key.
+ :rtype: bytes
+ """
+
+ der = self._save_pkcs1_der()
+ return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
+
+ @classmethod
+ def load_pkcs1_openssl_pem(cls, keyfile):
+ """Loads a PKCS#1.5 PEM-encoded public key file from OpenSSL.
+
+ These files can be recognised in that they start with BEGIN PUBLIC KEY
+ rather than BEGIN RSA PUBLIC KEY.
+
+ The contents of the file before the "-----BEGIN PUBLIC KEY-----" and
+ after the "-----END PUBLIC KEY-----" lines is ignored.
+
+ :param keyfile: contents of a PEM-encoded file that contains the public
+ key, from OpenSSL.
+ :type keyfile: bytes
+ :return: a PublicKey object
+ """
+
+ der = rsa.pem.load_pem(keyfile, 'PUBLIC KEY')
+ return cls.load_pkcs1_openssl_der(der)
+
+ @classmethod
+ def load_pkcs1_openssl_der(cls, keyfile):
+ """Loads a PKCS#1 DER-encoded public key file from OpenSSL.
+
+ :param keyfile: contents of a DER-encoded file that contains the public
+ key, from OpenSSL.
+ :return: a PublicKey object
+ :rtype: bytes
+
+ """
+
+ from rsa.asn1 import OpenSSLPubKey
+ from pyasn1.codec.der import decoder
+ from pyasn1.type import univ
+
+ (keyinfo, _) = decoder.decode(keyfile, asn1Spec=OpenSSLPubKey())
+
+ if keyinfo['header']['oid'] != univ.ObjectIdentifier('1.2.840.113549.1.1.1'):
+ raise TypeError("This is not a DER-encoded OpenSSL-compatible public key")
+
+ return cls._load_pkcs1_der(keyinfo['key'][1:])
+
+
+class PrivateKey(AbstractKey):
+ """Represents a private RSA key.
+
+ This key is also known as the 'decryption key'. It contains the 'n', 'e',
+ 'd', 'p', 'q' and other values.
+
+ Supports attributes as well as dictionary-like access. Attribute access is
+ faster, though.
+
+ >>> PrivateKey(3247, 65537, 833, 191, 17)
+ PrivateKey(3247, 65537, 833, 191, 17)
+
+ exp1, exp2 and coef will be calculated:
+
+ >>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+ >>> pk.exp1
+ 55063
+ >>> pk.exp2
+ 10095
+ >>> pk.coef
+ 50797
+
+ """
+
+ __slots__ = ('n', 'e', 'd', 'p', 'q', 'exp1', 'exp2', 'coef')
+
+ def __init__(self, n, e, d, p, q):
+ AbstractKey.__init__(self, n, e)
+ self.d = d
+ self.p = p
+ self.q = q
+
+ # Calculate exponents and coefficient.
+ self.exp1 = int(d % (p - 1))
+ self.exp2 = int(d % (q - 1))
+ self.coef = rsa.common.inverse(q, p)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __repr__(self):
+ return 'PrivateKey(%(n)i, %(e)i, %(d)i, %(p)i, %(q)i)' % self
+
+ def __getstate__(self):
+ """Returns the key as tuple for pickling."""
+ return self.n, self.e, self.d, self.p, self.q, self.exp1, self.exp2, self.coef
+
+ def __setstate__(self, state):
+ """Sets the key from tuple."""
+ self.n, self.e, self.d, self.p, self.q, self.exp1, self.exp2, self.coef = state
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+
+ if not isinstance(other, PrivateKey):
+ return False
+
+ return (self.n == other.n and
+ self.e == other.e and
+ self.d == other.d and
+ self.p == other.p and
+ self.q == other.q and
+ self.exp1 == other.exp1 and
+ self.exp2 == other.exp2 and
+ self.coef == other.coef)
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ return hash((self.n, self.e, self.d, self.p, self.q, self.exp1, self.exp2, self.coef))
+
+ def _get_blinding_factor(self):
+ for _ in range(1000):
+ blind_r = rsa.randnum.randint(self.n - 1)
+ if rsa.prime.are_relatively_prime(self.n, blind_r):
+ return blind_r
+ raise RuntimeError('unable to find blinding factor')
+
+ def blinded_decrypt(self, encrypted):
+ """Decrypts the message using blinding to prevent side-channel attacks.
+
+ :param encrypted: the encrypted message
+ :type encrypted: int
+
+ :returns: the decrypted message
+ :rtype: int
+ """
+
+ blind_r = self._get_blinding_factor()
+ blinded = self.blind(encrypted, blind_r) # blind before decrypting
+ decrypted = rsa.core.decrypt_int(blinded, self.d, self.n)
+
+ return self.unblind(decrypted, blind_r)
+
+ def blinded_encrypt(self, message):
+ """Encrypts the message using blinding to prevent side-channel attacks.
+
+ :param message: the message to encrypt
+ :type message: int
+
+ :returns: the encrypted message
+ :rtype: int
+ """
+
+ blind_r = self._get_blinding_factor()
+ blinded = self.blind(message, blind_r) # blind before encrypting
+ encrypted = rsa.core.encrypt_int(blinded, self.d, self.n)
+ return self.unblind(encrypted, blind_r)
+
+ @classmethod
+ def _load_pkcs1_der(cls, keyfile):
+ """Loads a key in PKCS#1 DER format.
+
+ :param keyfile: contents of a DER-encoded file that contains the private
+ key.
+ :type keyfile: bytes
+ :return: a PrivateKey object
+
+ First let's construct a DER encoded key:
+
+ >>> import base64
+ >>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
+ >>> der = base64.standard_b64decode(b64der)
+
+ This loads the file:
+
+ >>> PrivateKey._load_pkcs1_der(der)
+ PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ """
+
+ from pyasn1.codec.der import decoder
+ (priv, _) = decoder.decode(keyfile)
+
+ # ASN.1 contents of DER encoded private key:
+ #
+ # RSAPrivateKey ::= SEQUENCE {
+ # version Version,
+ # modulus INTEGER, -- n
+ # publicExponent INTEGER, -- e
+ # privateExponent INTEGER, -- d
+ # prime1 INTEGER, -- p
+ # prime2 INTEGER, -- q
+ # exponent1 INTEGER, -- d mod (p-1)
+ # exponent2 INTEGER, -- d mod (q-1)
+ # coefficient INTEGER, -- (inverse of q) mod p
+ # otherPrimeInfos OtherPrimeInfos OPTIONAL
+ # }
+
+ if priv[0] != 0:
+ raise ValueError('Unable to read this file, version %s != 0' % priv[0])
+
+ as_ints = map(int, priv[1:6])
+ key = cls(*as_ints)
+
+ exp1, exp2, coef = map(int, priv[6:9])
+
+ if (key.exp1, key.exp2, key.coef) != (exp1, exp2, coef):
+ warnings.warn(
+ 'You have provided a malformed keyfile. Either the exponents '
+ 'or the coefficient are incorrect. Using the correct values '
+ 'instead.',
+ UserWarning,
+ )
+
+ return key
+
+ def _save_pkcs1_der(self):
+ """Saves the private key in PKCS#1 DER format.
+
+ :returns: the DER-encoded private key.
+ :rtype: bytes
+ """
+
+ from pyasn1.type import univ, namedtype
+ from pyasn1.codec.der import encoder
+
+ class AsnPrivKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer()),
+ )
+
+ # Create the ASN object
+ asn_key = AsnPrivKey()
+ asn_key.setComponentByName('version', 0)
+ asn_key.setComponentByName('modulus', self.n)
+ asn_key.setComponentByName('publicExponent', self.e)
+ asn_key.setComponentByName('privateExponent', self.d)
+ asn_key.setComponentByName('prime1', self.p)
+ asn_key.setComponentByName('prime2', self.q)
+ asn_key.setComponentByName('exponent1', self.exp1)
+ asn_key.setComponentByName('exponent2', self.exp2)
+ asn_key.setComponentByName('coefficient', self.coef)
+
+ return encoder.encode(asn_key)
+
+ @classmethod
+ def _load_pkcs1_pem(cls, keyfile):
+ """Loads a PKCS#1 PEM-encoded private key file.
+
+ The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
+ after the "-----END RSA PRIVATE KEY-----" lines is ignored.
+
+ :param keyfile: contents of a PEM-encoded file that contains the private
+ key.
+ :type keyfile: bytes
+ :return: a PrivateKey object
+ """
+
+ der = rsa.pem.load_pem(keyfile, b'RSA PRIVATE KEY')
+ return cls._load_pkcs1_der(der)
+
+ def _save_pkcs1_pem(self):
+ """Saves a PKCS#1 PEM-encoded private key file.
+
+ :return: contents of a PEM-encoded file that contains the private key.
+ :rtype: bytes
+ """
+
+ der = self._save_pkcs1_der()
+ return rsa.pem.save_pem(der, b'RSA PRIVATE KEY')
+
+
+def find_p_q(nbits, getprime_func=rsa.prime.getprime, accurate=True):
+ """Returns a tuple of two different primes of nbits bits each.
+
+ The resulting p * q has exacty 2 * nbits bits, and the returned p and q
+ will not be equal.
+
+ :param nbits: the number of bits in each of p and q.
+ :param getprime_func: the getprime function, defaults to
+ :py:func:`rsa.prime.getprime`.
+
+ *Introduced in Python-RSA 3.1*
+
+ :param accurate: whether to enable accurate mode or not.
+ :returns: (p, q), where p > q
+
+ >>> (p, q) = find_p_q(128)
+ >>> from rsa import common
+ >>> common.bit_size(p * q)
+ 256
+
+ When not in accurate mode, the number of bits can be slightly less
+
+ >>> (p, q) = find_p_q(128, accurate=False)
+ >>> from rsa import common
+ >>> common.bit_size(p * q) <= 256
+ True
+ >>> common.bit_size(p * q) > 240
+ True
+
+ """
+
+ total_bits = nbits * 2
+
+ # Make sure that p and q aren't too close or the factoring programs can
+ # factor n.
+ shift = nbits // 16
+ pbits = nbits + shift
+ qbits = nbits - shift
+
+ # Choose the two initial primes
+ log.debug('find_p_q(%i): Finding p', nbits)
+ p = getprime_func(pbits)
+ log.debug('find_p_q(%i): Finding q', nbits)
+ q = getprime_func(qbits)
+
+ def is_acceptable(p, q):
+ """Returns True iff p and q are acceptable:
+
+ - p and q differ
+ - (p * q) has the right nr of bits (when accurate=True)
+ """
+
+ if p == q:
+ return False
+
+ if not accurate:
+ return True
+
+ # Make sure we have just the right amount of bits
+ found_size = rsa.common.bit_size(p * q)
+ return total_bits == found_size
+
+ # Keep choosing other primes until they match our requirements.
+ change_p = False
+ while not is_acceptable(p, q):
+ # Change p on one iteration and q on the other
+ if change_p:
+ p = getprime_func(pbits)
+ else:
+ q = getprime_func(qbits)
+
+ change_p = not change_p
+
+ # We want p > q as described on
+ # http://www.di-mgt.com.au/rsa_alg.html#crt
+ return max(p, q), min(p, q)
+
+
+def calculate_keys_custom_exponent(p, q, exponent):
+ """Calculates an encryption and a decryption key given p, q and an exponent,
+ and returns them as a tuple (e, d)
+
+ :param p: the first large prime
+ :param q: the second large prime
+ :param exponent: the exponent for the key; only change this if you know
+ what you're doing, as the exponent influences how difficult your
+ private key can be cracked. A very common choice for e is 65537.
+ :type exponent: int
+
+ """
+
+ phi_n = (p - 1) * (q - 1)
+
+ try:
+ d = rsa.common.inverse(exponent, phi_n)
+ except rsa.common.NotRelativePrimeError as ex:
+ raise rsa.common.NotRelativePrimeError(
+ exponent, phi_n, ex.d,
+ msg="e (%d) and phi_n (%d) are not relatively prime (divider=%i)" %
+ (exponent, phi_n, ex.d))
+
+ if (exponent * d) % phi_n != 1:
+ raise ValueError("e (%d) and d (%d) are not mult. inv. modulo "
+ "phi_n (%d)" % (exponent, d, phi_n))
+
+ return exponent, d
+
+
+def calculate_keys(p, q):
+ """Calculates an encryption and a decryption key given p and q, and
+ returns them as a tuple (e, d)
+
+ :param p: the first large prime
+ :param q: the second large prime
+
+ :return: tuple (e, d) with the encryption and decryption exponents.
+ """
+
+ return calculate_keys_custom_exponent(p, q, DEFAULT_EXPONENT)
+
+
+def gen_keys(nbits, getprime_func, accurate=True, exponent=DEFAULT_EXPONENT):
+ """Generate RSA keys of nbits bits. Returns (p, q, e, d).
+
+ Note: this can take a long time, depending on the key size.
+
+ :param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
+ ``q`` will use ``nbits/2`` bits.
+ :param getprime_func: either :py:func:`rsa.prime.getprime` or a function
+ with similar signature.
+ :param exponent: the exponent for the key; only change this if you know
+ what you're doing, as the exponent influences how difficult your
+ private key can be cracked. A very common choice for e is 65537.
+ :type exponent: int
+ """
+
+ # Regenerate p and q values, until calculate_keys doesn't raise a
+ # ValueError.
+ while True:
+ (p, q) = find_p_q(nbits // 2, getprime_func, accurate)
+ try:
+ (e, d) = calculate_keys_custom_exponent(p, q, exponent=exponent)
+ break
+ except ValueError:
+ pass
+
+ return p, q, e, d
+
+
+def newkeys(nbits, accurate=True, poolsize=1, exponent=DEFAULT_EXPONENT):
+ """Generates public and private keys, and returns them as (pub, priv).
+
+ The public key is also known as the 'encryption key', and is a
+ :py:class:`rsa.PublicKey` object. The private key is also known as the
+ 'decryption key' and is a :py:class:`rsa.PrivateKey` object.
+
+ :param nbits: the number of bits required to store ``n = p*q``.
+ :param accurate: when True, ``n`` will have exactly the number of bits you
+ asked for. However, this makes key generation much slower. When False,
+ `n`` may have slightly less bits.
+ :param poolsize: the number of processes to use to generate the prime
+ numbers. If set to a number > 1, a parallel algorithm will be used.
+ This requires Python 2.6 or newer.
+ :param exponent: the exponent for the key; only change this if you know
+ what you're doing, as the exponent influences how difficult your
+ private key can be cracked. A very common choice for e is 65537.
+ :type exponent: int
+
+ :returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
+
+ The ``poolsize`` parameter was added in *Python-RSA 3.1* and requires
+ Python 2.6 or newer.
+
+ """
+
+ if nbits < 16:
+ raise ValueError('Key too small')
+
+ if poolsize < 1:
+ raise ValueError('Pool size (%i) should be >= 1' % poolsize)
+
+ # Determine which getprime function to use
+ if poolsize > 1:
+ from rsa import parallel
+ import functools
+
+ getprime_func = functools.partial(parallel.getprime, poolsize=poolsize)
+ else:
+ getprime_func = rsa.prime.getprime
+
+ # Generate the key components
+ (p, q, e, d) = gen_keys(nbits, getprime_func, accurate=accurate, exponent=exponent)
+
+ # Create the key objects
+ n = p * q
+
+ return (
+ PublicKey(n, e),
+ PrivateKey(n, e, d, p, q)
+ )
+
+
+__all__ = ['PublicKey', 'PrivateKey', 'newkeys']
+
+if __name__ == '__main__':
+ import doctest
+
+ try:
+ for count in range(100):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if (count % 10 == 0 and count) or count == 1:
+ print('%i times' % count)
+ except KeyboardInterrupt:
+ print('Aborted')
+ else:
+ print('Doctests done')
diff --git a/contrib/python/rsa/py2/rsa/machine_size.py b/contrib/python/rsa/py2/rsa/machine_size.py
new file mode 100644
index 0000000000..2a871b8f6e
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/machine_size.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Detection of 32-bit and 64-bit machines and byte alignment."""
+
+import sys
+
+MAX_INT = sys.maxsize
+MAX_INT64 = (1 << 63) - 1
+MAX_INT32 = (1 << 31) - 1
+MAX_INT16 = (1 << 15) - 1
+
+# Determine the word size of the processor.
+if MAX_INT == MAX_INT64:
+ # 64-bit processor.
+ MACHINE_WORD_SIZE = 64
+elif MAX_INT == MAX_INT32:
+ # 32-bit processor.
+ MACHINE_WORD_SIZE = 32
+else:
+ # Else we just assume 64-bit processor keeping up with modern times.
+ MACHINE_WORD_SIZE = 64
+
+
+def get_word_alignment(num, force_arch=64,
+ _machine_word_size=MACHINE_WORD_SIZE):
+ """
+ Returns alignment details for the given number based on the platform
+ Python is running on.
+
+ :param num:
+ Unsigned integral number.
+ :param force_arch:
+ If you don't want to use 64-bit unsigned chunks, set this to
+ anything other than 64. 32-bit chunks will be preferred then.
+ Default 64 will be used when on a 64-bit machine.
+ :param _machine_word_size:
+ (Internal) The machine word size used for alignment.
+ :returns:
+ 4-tuple::
+
+ (word_bits, word_bytes,
+ max_uint, packing_format_type)
+ """
+ max_uint64 = 0xffffffffffffffff
+ max_uint32 = 0xffffffff
+ max_uint16 = 0xffff
+ max_uint8 = 0xff
+
+ if force_arch == 64 and _machine_word_size >= 64 and num > max_uint32:
+ # 64-bit unsigned integer.
+ return 64, 8, max_uint64, "Q"
+ elif num > max_uint16:
+ # 32-bit unsigned integer
+ return 32, 4, max_uint32, "L"
+ elif num > max_uint8:
+ # 16-bit unsigned integer.
+ return 16, 2, max_uint16, "H"
+ else:
+ # 8-bit unsigned integer.
+ return 8, 1, max_uint8, "B"
diff --git a/contrib/python/rsa/py2/rsa/parallel.py b/contrib/python/rsa/py2/rsa/parallel.py
new file mode 100644
index 0000000000..a3fe312204
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/parallel.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for parallel computation on multiple cores.
+
+Introduced in Python-RSA 3.1.
+
+.. note::
+
+ Requires Python 2.6 or newer.
+
+"""
+
+from __future__ import print_function
+
+import multiprocessing as mp
+
+from rsa._compat import range
+import rsa.prime
+import rsa.randnum
+
+
+def _find_prime(nbits, pipe):
+ while True:
+ integer = rsa.randnum.read_random_odd_int(nbits)
+
+ # Test for primeness
+ if rsa.prime.is_prime(integer):
+ pipe.send(integer)
+ return
+
+
+def getprime(nbits, poolsize):
+ """Returns a prime number that can be stored in 'nbits' bits.
+
+ Works in multiple threads at the same time.
+
+ >>> p = getprime(128, 3)
+ >>> rsa.prime.is_prime(p-1)
+ False
+ >>> rsa.prime.is_prime(p)
+ True
+ >>> rsa.prime.is_prime(p+1)
+ False
+
+ >>> from rsa import common
+ >>> common.bit_size(p) == 128
+ True
+
+ """
+
+ (pipe_recv, pipe_send) = mp.Pipe(duplex=False)
+
+ # Create processes
+ try:
+ procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send))
+ for _ in range(poolsize)]
+ # Start processes
+ for p in procs:
+ p.start()
+
+ result = pipe_recv.recv()
+ finally:
+ pipe_recv.close()
+ pipe_send.close()
+
+ # Terminate processes
+ for p in procs:
+ p.terminate()
+
+ return result
+
+
+__all__ = ['getprime']
+
+if __name__ == '__main__':
+ print('Running doctests 1000x or until failure')
+ import doctest
+
+ for count in range(100):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 10 == 0 and count:
+ print('%i times' % count)
+
+ print('Doctests done')
diff --git a/contrib/python/rsa/py2/rsa/pem.py b/contrib/python/rsa/py2/rsa/pem.py
new file mode 100644
index 0000000000..2ddfae86e2
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/pem.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions that load and write PEM-encoded files."""
+
+import base64
+
+from rsa._compat import is_bytes, range
+
+
+def _markers(pem_marker):
+ """
+ Returns the start and end PEM markers, as bytes.
+ """
+
+ if not is_bytes(pem_marker):
+ pem_marker = pem_marker.encode('ascii')
+
+ return (b'-----BEGIN ' + pem_marker + b'-----',
+ b'-----END ' + pem_marker + b'-----')
+
+
+def load_pem(contents, pem_marker):
+ """Loads a PEM file.
+
+ :param contents: the contents of the file to interpret
+ :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
+ when your file has '-----BEGIN RSA PRIVATE KEY-----' and
+ '-----END RSA PRIVATE KEY-----' markers.
+
+ :return: the base64-decoded content between the start and end markers.
+
+ @raise ValueError: when the content is invalid, for example when the start
+ marker cannot be found.
+
+ """
+
+ # We want bytes, not text. If it's text, it can be converted to ASCII bytes.
+ if not is_bytes(contents):
+ contents = contents.encode('ascii')
+
+ (pem_start, pem_end) = _markers(pem_marker)
+
+ pem_lines = []
+ in_pem_part = False
+
+ for line in contents.splitlines():
+ line = line.strip()
+
+ # Skip empty lines
+ if not line:
+ continue
+
+ # Handle start marker
+ if line == pem_start:
+ if in_pem_part:
+ raise ValueError('Seen start marker "%s" twice' % pem_start)
+
+ in_pem_part = True
+ continue
+
+ # Skip stuff before first marker
+ if not in_pem_part:
+ continue
+
+ # Handle end marker
+ if in_pem_part and line == pem_end:
+ in_pem_part = False
+ break
+
+ # Load fields
+ if b':' in line:
+ continue
+
+ pem_lines.append(line)
+
+ # Do some sanity checks
+ if not pem_lines:
+ raise ValueError('No PEM start marker "%s" found' % pem_start)
+
+ if in_pem_part:
+ raise ValueError('No PEM end marker "%s" found' % pem_end)
+
+ # Base64-decode the contents
+ pem = b''.join(pem_lines)
+ return base64.standard_b64decode(pem)
+
+
+def save_pem(contents, pem_marker):
+ """Saves a PEM file.
+
+ :param contents: the contents to encode in PEM format
+ :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
+ when your file has '-----BEGIN RSA PRIVATE KEY-----' and
+ '-----END RSA PRIVATE KEY-----' markers.
+
+ :return: the base64-encoded content between the start and end markers, as bytes.
+
+ """
+
+ (pem_start, pem_end) = _markers(pem_marker)
+
+ b64 = base64.standard_b64encode(contents).replace(b'\n', b'')
+ pem_lines = [pem_start]
+
+ for block_start in range(0, len(b64), 64):
+ block = b64[block_start:block_start + 64]
+ pem_lines.append(block)
+
+ pem_lines.append(pem_end)
+ pem_lines.append(b'')
+
+ return b'\n'.join(pem_lines)
diff --git a/contrib/python/rsa/py2/rsa/pkcs1.py b/contrib/python/rsa/py2/rsa/pkcs1.py
new file mode 100644
index 0000000000..c05239afce
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/pkcs1.py
@@ -0,0 +1,448 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for PKCS#1 version 1.5 encryption and signing
+
+This module implements certain functionality from PKCS#1 version 1.5. For a
+very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
+
+At least 8 bytes of random padding is used when encrypting a message. This makes
+these methods much more secure than the ones in the ``rsa`` module.
+
+WARNING: this module leaks information when decryption fails. The exceptions
+that are raised contain the Python traceback information, which can be used to
+deduce where in the process the failure occurred. DO NOT PASS SUCH INFORMATION
+to your users.
+"""
+
+import hashlib
+import os
+
+from rsa._compat import range
+from rsa import common, transform, core
+
+# ASN.1 codes that describe the hash algorithm used.
+HASH_ASN1 = {
+ 'MD5': b'\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10',
+ 'SHA-1': b'\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14',
+ 'SHA-224': b'\x30\x2d\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04\x05\x00\x04\x1c',
+ 'SHA-256': b'\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20',
+ 'SHA-384': b'\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30',
+ 'SHA-512': b'\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40',
+}
+
+HASH_METHODS = {
+ 'MD5': hashlib.md5,
+ 'SHA-1': hashlib.sha1,
+ 'SHA-224': hashlib.sha224,
+ 'SHA-256': hashlib.sha256,
+ 'SHA-384': hashlib.sha384,
+ 'SHA-512': hashlib.sha512,
+}
+
+
+class CryptoError(Exception):
+ """Base class for all exceptions in this module."""
+
+
+class DecryptionError(CryptoError):
+ """Raised when decryption fails."""
+
+
+class VerificationError(CryptoError):
+ """Raised when verification fails."""
+
+
+def _pad_for_encryption(message, target_length):
+ r"""Pads the message for encryption, returning the padded message.
+
+ :return: 00 02 RANDOM_DATA 00 MESSAGE
+
+ >>> block = _pad_for_encryption(b'hello', 16)
+ >>> len(block)
+ 16
+ >>> block[0:2]
+ b'\x00\x02'
+ >>> block[-6:]
+ b'\x00hello'
+
+ """
+
+ max_msglength = target_length - 11
+ msglength = len(message)
+
+ if msglength > max_msglength:
+ raise OverflowError('%i bytes needed for message, but there is only'
+ ' space for %i' % (msglength, max_msglength))
+
+ # Get random padding
+ padding = b''
+ padding_length = target_length - msglength - 3
+
+ # We remove 0-bytes, so we'll end up with less padding than we've asked for,
+ # so keep adding data until we're at the correct length.
+ while len(padding) < padding_length:
+ needed_bytes = padding_length - len(padding)
+
+ # Always read at least 8 bytes more than we need, and trim off the rest
+ # after removing the 0-bytes. This increases the chance of getting
+ # enough bytes, especially when needed_bytes is small
+ new_padding = os.urandom(needed_bytes + 5)
+ new_padding = new_padding.replace(b'\x00', b'')
+ padding = padding + new_padding[:needed_bytes]
+
+ assert len(padding) == padding_length
+
+ return b''.join([b'\x00\x02',
+ padding,
+ b'\x00',
+ message])
+
+
+def _pad_for_signing(message, target_length):
+ r"""Pads the message for signing, returning the padded message.
+
+ The padding is always a repetition of FF bytes.
+
+ :return: 00 01 PADDING 00 MESSAGE
+
+ >>> block = _pad_for_signing(b'hello', 16)
+ >>> len(block)
+ 16
+ >>> block[0:2]
+ b'\x00\x01'
+ >>> block[-6:]
+ b'\x00hello'
+ >>> block[2:-6]
+ b'\xff\xff\xff\xff\xff\xff\xff\xff'
+
+ """
+
+ max_msglength = target_length - 11
+ msglength = len(message)
+
+ if msglength > max_msglength:
+ raise OverflowError('%i bytes needed for message, but there is only'
+ ' space for %i' % (msglength, max_msglength))
+
+ padding_length = target_length - msglength - 3
+
+ return b''.join([b'\x00\x01',
+ padding_length * b'\xff',
+ b'\x00',
+ message])
+
+
+def encrypt(message, pub_key):
+ """Encrypts the given message using PKCS#1 v1.5
+
+ :param message: the message to encrypt. Must be a byte string no longer than
+ ``k-11`` bytes, where ``k`` is the number of bytes needed to encode
+ the ``n`` component of the public key.
+ :param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
+ :raise OverflowError: when the message is too large to fit in the padded
+ block.
+
+ >>> from rsa import key, common
+ >>> (pub_key, priv_key) = key.newkeys(256)
+ >>> message = b'hello'
+ >>> crypto = encrypt(message, pub_key)
+
+ The crypto text should be just as long as the public key 'n' component:
+
+ >>> len(crypto) == common.byte_size(pub_key.n)
+ True
+
+ """
+
+ keylength = common.byte_size(pub_key.n)
+ padded = _pad_for_encryption(message, keylength)
+
+ payload = transform.bytes2int(padded)
+ encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
+ block = transform.int2bytes(encrypted, keylength)
+
+ return block
+
+
+def decrypt(crypto, priv_key):
+ r"""Decrypts the given message using PKCS#1 v1.5
+
+ The decryption is considered 'failed' when the resulting cleartext doesn't
+ start with the bytes 00 02, or when the 00 byte between the padding and
+ the message cannot be found.
+
+ :param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
+ :param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
+ :raise DecryptionError: when the decryption fails. No details are given as
+ to why the code thinks the decryption fails, as this would leak
+ information about the private key.
+
+
+ >>> import rsa
+ >>> (pub_key, priv_key) = rsa.newkeys(256)
+
+ It works with strings:
+
+ >>> crypto = encrypt(b'hello', pub_key)
+ >>> decrypt(crypto, priv_key)
+ b'hello'
+
+ And with binary data:
+
+ >>> crypto = encrypt(b'\x00\x00\x00\x00\x01', pub_key)
+ >>> decrypt(crypto, priv_key)
+ b'\x00\x00\x00\x00\x01'
+
+ Altering the encrypted information will *likely* cause a
+ :py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
+ :py:func:`rsa.sign`.
+
+
+ .. warning::
+
+ Never display the stack trace of a
+ :py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
+ code the exception occurred, and thus leaks information about the key.
+ It's only a tiny bit of information, but every bit makes cracking the
+ keys easier.
+
+ >>> crypto = encrypt(b'hello', pub_key)
+ >>> crypto = crypto[0:5] + b'X' + crypto[6:] # change a byte
+ >>> decrypt(crypto, priv_key)
+ Traceback (most recent call last):
+ ...
+ rsa.pkcs1.DecryptionError: Decryption failed
+
+ """
+
+ blocksize = common.byte_size(priv_key.n)
+ encrypted = transform.bytes2int(crypto)
+ decrypted = priv_key.blinded_decrypt(encrypted)
+ cleartext = transform.int2bytes(decrypted, blocksize)
+
+ # Detect leading zeroes in the crypto. These are not reflected in the
+ # encrypted value (as leading zeroes do not influence the value of an
+ # integer). This fixes CVE-2020-13757.
+ if len(crypto) > blocksize:
+ raise DecryptionError('Decryption failed')
+
+ # If we can't find the cleartext marker, decryption failed.
+ if cleartext[0:2] != b'\x00\x02':
+ raise DecryptionError('Decryption failed')
+
+ # Find the 00 separator between the padding and the message
+ try:
+ sep_idx = cleartext.index(b'\x00', 2)
+ except ValueError:
+ raise DecryptionError('Decryption failed')
+
+ return cleartext[sep_idx + 1:]
+
+
+def sign_hash(hash_value, priv_key, hash_method):
+ """Signs a precomputed hash with the private key.
+
+ Hashes the message, then signs the hash with the given key. This is known
+ as a "detached signature", because the message itself isn't altered.
+
+ :param hash_value: A precomputed hash to sign (ignores message). Should be set to
+ None if needing to hash and sign message.
+ :param priv_key: the :py:class:`rsa.PrivateKey` to sign with
+ :param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
+ 'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
+ :return: a message signature block.
+ :raise OverflowError: if the private key is too small to contain the
+ requested hash.
+
+ """
+
+ # Get the ASN1 code for this hash method
+ if hash_method not in HASH_ASN1:
+ raise ValueError('Invalid hash method: %s' % hash_method)
+ asn1code = HASH_ASN1[hash_method]
+
+ # Encrypt the hash with the private key
+ cleartext = asn1code + hash_value
+ keylength = common.byte_size(priv_key.n)
+ padded = _pad_for_signing(cleartext, keylength)
+
+ payload = transform.bytes2int(padded)
+ encrypted = priv_key.blinded_encrypt(payload)
+ block = transform.int2bytes(encrypted, keylength)
+
+ return block
+
+
+def sign(message, priv_key, hash_method):
+ """Signs the message with the private key.
+
+ Hashes the message, then signs the hash with the given key. This is known
+ as a "detached signature", because the message itself isn't altered.
+
+ :param message: the message to sign. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param priv_key: the :py:class:`rsa.PrivateKey` to sign with
+ :param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
+ 'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
+ :return: a message signature block.
+ :raise OverflowError: if the private key is too small to contain the
+ requested hash.
+
+ """
+
+ msg_hash = compute_hash(message, hash_method)
+ return sign_hash(msg_hash, priv_key, hash_method)
+
+
+def verify(message, signature, pub_key):
+ """Verifies that the signature matches the message.
+
+ The hash method is detected automatically from the signature.
+
+ :param message: the signed message. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param signature: the signature block, as created with :py:func:`rsa.sign`.
+ :param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
+ :raise VerificationError: when the signature doesn't match the message.
+ :returns: the name of the used hash.
+
+ """
+
+ keylength = common.byte_size(pub_key.n)
+ encrypted = transform.bytes2int(signature)
+ decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
+ clearsig = transform.int2bytes(decrypted, keylength)
+
+ # Get the hash method
+ method_name = _find_method_hash(clearsig)
+ message_hash = compute_hash(message, method_name)
+
+ # Reconstruct the expected padded hash
+ cleartext = HASH_ASN1[method_name] + message_hash
+ expected = _pad_for_signing(cleartext, keylength)
+
+ if len(signature) != keylength:
+ raise VerificationError('Verification failed')
+
+ # Compare with the signed one
+ if expected != clearsig:
+ raise VerificationError('Verification failed')
+
+ return method_name
+
+
+def find_signature_hash(signature, pub_key):
+ """Returns the hash name detected from the signature.
+
+ If you also want to verify the message, use :py:func:`rsa.verify()` instead.
+ It also returns the name of the used hash.
+
+ :param signature: the signature block, as created with :py:func:`rsa.sign`.
+ :param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
+ :returns: the name of the used hash.
+ """
+
+ keylength = common.byte_size(pub_key.n)
+ encrypted = transform.bytes2int(signature)
+ decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
+ clearsig = transform.int2bytes(decrypted, keylength)
+
+ return _find_method_hash(clearsig)
+
+
+def yield_fixedblocks(infile, blocksize):
+ """Generator, yields each block of ``blocksize`` bytes in the input file.
+
+ :param infile: file to read and separate in blocks.
+ :param blocksize: block size in bytes.
+ :returns: a generator that yields the contents of each block
+ """
+
+ while True:
+ block = infile.read(blocksize)
+
+ read_bytes = len(block)
+ if read_bytes == 0:
+ break
+
+ yield block
+
+ if read_bytes < blocksize:
+ break
+
+
+def compute_hash(message, method_name):
+ """Returns the message digest.
+
+ :param message: the signed message. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param method_name: the hash method, must be a key of
+ :py:const:`HASH_METHODS`.
+
+ """
+
+ if method_name not in HASH_METHODS:
+ raise ValueError('Invalid hash method: %s' % method_name)
+
+ method = HASH_METHODS[method_name]
+ hasher = method()
+
+ if hasattr(message, 'read') and hasattr(message.read, '__call__'):
+ # read as 1K blocks
+ for block in yield_fixedblocks(message, 1024):
+ hasher.update(block)
+ else:
+ # hash the message object itself.
+ hasher.update(message)
+
+ return hasher.digest()
+
+
+def _find_method_hash(clearsig):
+ """Finds the hash method.
+
+ :param clearsig: full padded ASN1 and hash.
+ :return: the used hash method.
+ :raise VerificationFailed: when the hash method cannot be found
+ """
+
+ for (hashname, asn1code) in HASH_ASN1.items():
+ if asn1code in clearsig:
+ return hashname
+
+ raise VerificationError('Verification failed')
+
+
+__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
+ 'DecryptionError', 'VerificationError', 'CryptoError']
+
+if __name__ == '__main__':
+ print('Running doctests 1000x or until failure')
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 100 == 0 and count:
+ print('%i times' % count)
+
+ print('Doctests done')
diff --git a/contrib/python/rsa/py2/rsa/pkcs1_v2.py b/contrib/python/rsa/py2/rsa/pkcs1_v2.py
new file mode 100644
index 0000000000..5f9c7ddcea
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/pkcs1_v2.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for PKCS#1 version 2 encryption and signing
+
+This module implements certain functionality from PKCS#1 version 2. Main
+documentation is RFC 2437: https://tools.ietf.org/html/rfc2437
+"""
+
+from rsa._compat import range
+from rsa import (
+ common,
+ pkcs1,
+ transform,
+)
+
+
+def mgf1(seed, length, hasher='SHA-1'):
+ """
+ MGF1 is a Mask Generation Function based on a hash function.
+
+ A mask generation function takes an octet string of variable length and a
+ desired output length as input, and outputs an octet string of the desired
+ length. The plaintext-awareness of RSAES-OAEP relies on the random nature of
+ the output of the mask generation function, which in turn relies on the
+ random nature of the underlying hash.
+
+ :param bytes seed: seed from which mask is generated, an octet string
+ :param int length: intended length in octets of the mask, at most 2^32(hLen)
+ :param str hasher: hash function (hLen denotes the length in octets of the hash
+ function output)
+
+ :return: mask, an octet string of length `length`
+ :rtype: bytes
+
+ :raise OverflowError: when `length` is too large for the specified `hasher`
+ :raise ValueError: when specified `hasher` is invalid
+ """
+
+ try:
+ hash_length = pkcs1.HASH_METHODS[hasher]().digest_size
+ except KeyError:
+ raise ValueError(
+ 'Invalid `hasher` specified. Please select one of: {hash_list}'.format(
+ hash_list=', '.join(sorted(pkcs1.HASH_METHODS.keys()))
+ )
+ )
+
+ # If l > 2^32(hLen), output "mask too long" and stop.
+ if length > (2**32 * hash_length):
+ raise OverflowError(
+ "Desired length should be at most 2**32 times the hasher's output "
+ "length ({hash_length} for {hasher} function)".format(
+ hash_length=hash_length,
+ hasher=hasher,
+ )
+ )
+
+ # Looping `counter` from 0 to ceil(l / hLen)-1, build `output` based on the
+ # hashes formed by (`seed` + C), being `C` an octet string of length 4
+ # generated by converting `counter` with the primitive I2OSP
+ output = b''.join(
+ pkcs1.compute_hash(
+ seed + transform.int2bytes(counter, fill_size=4),
+ method_name=hasher,
+ )
+ for counter in range(common.ceil_div(length, hash_length) + 1)
+ )
+
+ # Output the leading `length` octets of `output` as the octet string mask.
+ return output[:length]
+
+
+__all__ = [
+ 'mgf1',
+]
+
+if __name__ == '__main__':
+ print('Running doctests 1000x or until failure')
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 100 == 0 and count:
+ print('%i times' % count)
+
+ print('Doctests done')
diff --git a/contrib/python/rsa/py2/rsa/prime.py b/contrib/python/rsa/py2/rsa/prime.py
new file mode 100644
index 0000000000..3d63542e66
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/prime.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Numerical functions related to primes.
+
+Implementation based on the book Algorithm Design by Michael T. Goodrich and
+Roberto Tamassia, 2002.
+"""
+
+from rsa._compat import range
+import rsa.common
+import rsa.randnum
+
+__all__ = ['getprime', 'are_relatively_prime']
+
+
+def gcd(p, q):
+ """Returns the greatest common divisor of p and q
+
+ >>> gcd(48, 180)
+ 12
+ """
+
+ while q != 0:
+ (p, q) = (q, p % q)
+ return p
+
+
+def get_primality_testing_rounds(number):
+ """Returns minimum number of rounds for Miller-Rabing primality testing,
+ based on number bitsize.
+
+ According to NIST FIPS 186-4, Appendix C, Table C.3, minimum number of
+ rounds of M-R testing, using an error probability of 2 ** (-100), for
+ different p, q bitsizes are:
+ * p, q bitsize: 512; rounds: 7
+ * p, q bitsize: 1024; rounds: 4
+ * p, q bitsize: 1536; rounds: 3
+ See: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
+ """
+
+ # Calculate number bitsize.
+ bitsize = rsa.common.bit_size(number)
+ # Set number of rounds.
+ if bitsize >= 1536:
+ return 3
+ if bitsize >= 1024:
+ return 4
+ if bitsize >= 512:
+ return 7
+ # For smaller bitsizes, set arbitrary number of rounds.
+ return 10
+
+
+def miller_rabin_primality_testing(n, k):
+ """Calculates whether n is composite (which is always correct) or prime
+ (which theoretically is incorrect with error probability 4**-k), by
+ applying Miller-Rabin primality testing.
+
+ For reference and implementation example, see:
+ https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
+
+ :param n: Integer to be tested for primality.
+ :type n: int
+ :param k: Number of rounds (witnesses) of Miller-Rabin testing.
+ :type k: int
+ :return: False if the number is composite, True if it's probably prime.
+ :rtype: bool
+ """
+
+ # prevent potential infinite loop when d = 0
+ if n < 2:
+ return False
+
+ # Decompose (n - 1) to write it as (2 ** r) * d
+ # While d is even, divide it by 2 and increase the exponent.
+ d = n - 1
+ r = 0
+
+ while not (d & 1):
+ r += 1
+ d >>= 1
+
+ # Test k witnesses.
+ for _ in range(k):
+ # Generate random integer a, where 2 <= a <= (n - 2)
+ a = rsa.randnum.randint(n - 3) + 1
+
+ x = pow(a, d, n)
+ if x == 1 or x == n - 1:
+ continue
+
+ for _ in range(r - 1):
+ x = pow(x, 2, n)
+ if x == 1:
+ # n is composite.
+ return False
+ if x == n - 1:
+ # Exit inner loop and continue with next witness.
+ break
+ else:
+ # If loop doesn't break, n is composite.
+ return False
+
+ return True
+
+
+def is_prime(number):
+ """Returns True if the number is prime, and False otherwise.
+
+ >>> is_prime(2)
+ True
+ >>> is_prime(42)
+ False
+ >>> is_prime(41)
+ True
+ """
+
+ # Check for small numbers.
+ if number < 10:
+ return number in {2, 3, 5, 7}
+
+ # Check for even numbers.
+ if not (number & 1):
+ return False
+
+ # Calculate minimum number of rounds.
+ k = get_primality_testing_rounds(number)
+
+ # Run primality testing with (minimum + 1) rounds.
+ return miller_rabin_primality_testing(number, k + 1)
+
+
+def getprime(nbits):
+ """Returns a prime number that can be stored in 'nbits' bits.
+
+ >>> p = getprime(128)
+ >>> is_prime(p-1)
+ False
+ >>> is_prime(p)
+ True
+ >>> is_prime(p+1)
+ False
+
+ >>> from rsa import common
+ >>> common.bit_size(p) == 128
+ True
+ """
+
+ assert nbits > 3 # the loop wil hang on too small numbers
+
+ while True:
+ integer = rsa.randnum.read_random_odd_int(nbits)
+
+ # Test for primeness
+ if is_prime(integer):
+ return integer
+
+ # Retry if not prime
+
+
+def are_relatively_prime(a, b):
+ """Returns True if a and b are relatively prime, and False if they
+ are not.
+
+ >>> are_relatively_prime(2, 3)
+ True
+ >>> are_relatively_prime(2, 4)
+ False
+ """
+
+ d = gcd(a, b)
+ return d == 1
+
+
+if __name__ == '__main__':
+ print('Running doctests 1000x or until failure')
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 100 == 0 and count:
+ print('%i times' % count)
+
+ print('Doctests done')
diff --git a/contrib/python/rsa/py2/rsa/randnum.py b/contrib/python/rsa/py2/rsa/randnum.py
new file mode 100644
index 0000000000..310acaa620
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/randnum.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for generating random numbers."""
+
+# Source inspired by code by Yesudeep Mangalapilly <yesudeep@gmail.com>
+
+import os
+
+from rsa import common, transform
+from rsa._compat import byte
+
+
+def read_random_bits(nbits):
+ """Reads 'nbits' random bits.
+
+ If nbits isn't a whole number of bytes, an extra byte will be appended with
+ only the lower bits set.
+ """
+
+ nbytes, rbits = divmod(nbits, 8)
+
+ # Get the random bytes
+ randomdata = os.urandom(nbytes)
+
+ # Add the remaining random bits
+ if rbits > 0:
+ randomvalue = ord(os.urandom(1))
+ randomvalue >>= (8 - rbits)
+ randomdata = byte(randomvalue) + randomdata
+
+ return randomdata
+
+
+def read_random_int(nbits):
+ """Reads a random integer of approximately nbits bits.
+ """
+
+ randomdata = read_random_bits(nbits)
+ value = transform.bytes2int(randomdata)
+
+ # Ensure that the number is large enough to just fill out the required
+ # number of bits.
+ value |= 1 << (nbits - 1)
+
+ return value
+
+
+def read_random_odd_int(nbits):
+ """Reads a random odd integer of approximately nbits bits.
+
+ >>> read_random_odd_int(512) & 1
+ 1
+ """
+
+ value = read_random_int(nbits)
+
+ # Make sure it's odd
+ return value | 1
+
+
+def randint(maxvalue):
+ """Returns a random integer x with 1 <= x <= maxvalue
+
+ May take a very long time in specific situations. If maxvalue needs N bits
+ to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
+ is.
+ """
+
+ bit_size = common.bit_size(maxvalue)
+
+ tries = 0
+ while True:
+ value = read_random_int(bit_size)
+ if value <= maxvalue:
+ break
+
+ if tries % 10 == 0 and tries:
+ # After a lot of tries to get the right number of bits but still
+ # smaller than maxvalue, decrease the number of bits by 1. That'll
+ # dramatically increase the chances to get a large enough number.
+ bit_size -= 1
+ tries += 1
+
+ return value
diff --git a/contrib/python/rsa/py2/rsa/transform.py b/contrib/python/rsa/py2/rsa/transform.py
new file mode 100644
index 0000000000..628d0afb55
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/transform.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Data transformation functions.
+
+From bytes to a number, number to bytes, etc.
+"""
+
+from __future__ import absolute_import
+
+import binascii
+from struct import pack
+
+from rsa._compat import byte, is_integer
+from rsa import common, machine_size
+
+
+def bytes2int(raw_bytes):
+ r"""Converts a list of bytes or an 8-bit string to an integer.
+
+ When using unicode strings, encode it to some encoding like UTF8 first.
+
+ >>> (((128 * 256) + 64) * 256) + 15
+ 8405007
+ >>> bytes2int(b'\x80@\x0f')
+ 8405007
+
+ """
+
+ return int(binascii.hexlify(raw_bytes), 16)
+
+
+def _int2bytes(number, block_size=None):
+ r"""Converts a number to a string of bytes.
+
+ Usage::
+
+ >>> _int2bytes(123456789)
+ b'\x07[\xcd\x15'
+ >>> bytes2int(_int2bytes(123456789))
+ 123456789
+
+ >>> _int2bytes(123456789, 6)
+ b'\x00\x00\x07[\xcd\x15'
+ >>> bytes2int(_int2bytes(123456789, 128))
+ 123456789
+
+ >>> _int2bytes(123456789, 3)
+ Traceback (most recent call last):
+ ...
+ OverflowError: Needed 4 bytes for number, but block size is 3
+
+ @param number: the number to convert
+ @param block_size: the number of bytes to output. If the number encoded to
+ bytes is less than this, the block will be zero-padded. When not given,
+ the returned block is not padded.
+
+ @throws OverflowError when block_size is given and the number takes up more
+ bytes than fit into the block.
+ """
+
+ # Type checking
+ if not is_integer(number):
+ raise TypeError("You must pass an integer for 'number', not %s" %
+ number.__class__)
+
+ if number < 0:
+ raise ValueError('Negative numbers cannot be used: %i' % number)
+
+ # Do some bounds checking
+ if number == 0:
+ needed_bytes = 1
+ raw_bytes = [b'\x00']
+ else:
+ needed_bytes = common.byte_size(number)
+ raw_bytes = []
+
+ # You cannot compare None > 0 in Python 3x. It will fail with a TypeError.
+ if block_size and block_size > 0:
+ if needed_bytes > block_size:
+ raise OverflowError('Needed %i bytes for number, but block size '
+ 'is %i' % (needed_bytes, block_size))
+
+ # Convert the number to bytes.
+ while number > 0:
+ raw_bytes.insert(0, byte(number & 0xFF))
+ number >>= 8
+
+ # Pad with zeroes to fill the block
+ if block_size and block_size > 0:
+ padding = (block_size - needed_bytes) * b'\x00'
+ else:
+ padding = b''
+
+ return padding + b''.join(raw_bytes)
+
+
+def bytes_leading(raw_bytes, needle=b'\x00'):
+ """
+ Finds the number of prefixed byte occurrences in the haystack.
+
+ Useful when you want to deal with padding.
+
+ :param raw_bytes:
+ Raw bytes.
+ :param needle:
+ The byte to count. Default \x00.
+ :returns:
+ The number of leading needle bytes.
+ """
+
+ leading = 0
+ # Indexing keeps compatibility between Python 2.x and Python 3.x
+ _byte = needle[0]
+ for x in raw_bytes:
+ if x == _byte:
+ leading += 1
+ else:
+ break
+ return leading
+
+
+def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
+ """
+ Convert an unsigned integer to bytes (base-256 representation)::
+
+ Does not preserve leading zeros if you don't specify a chunk size or
+ fill size.
+
+ .. NOTE:
+ You must not specify both fill_size and chunk_size. Only one
+ of them is allowed.
+
+ :param number:
+ Integer value
+ :param fill_size:
+ If the optional fill size is given the length of the resulting
+ byte string is expected to be the fill size and will be padded
+ with prefix zero bytes to satisfy that length.
+ :param chunk_size:
+ If optional chunk size is given and greater than zero, pad the front of
+ the byte string with binary zeros so that the length is a multiple of
+ ``chunk_size``.
+ :param overflow:
+ ``False`` (default). If this is ``True``, no ``OverflowError``
+ will be raised when the fill_size is shorter than the length
+ of the generated byte sequence. Instead the byte sequence will
+ be returned as is.
+ :returns:
+ Raw bytes (base-256 representation).
+ :raises:
+ ``OverflowError`` when fill_size is given and the number takes up more
+ bytes than fit into the block. This requires the ``overflow``
+ argument to this function to be set to ``False`` otherwise, no
+ error will be raised.
+ """
+
+ if number < 0:
+ raise ValueError("Number must be an unsigned integer: %d" % number)
+
+ if fill_size and chunk_size:
+ raise ValueError("You can either fill or pad chunks, but not both")
+
+ # Ensure these are integers.
+ number & 1
+
+ raw_bytes = b''
+
+ # Pack the integer one machine word at a time into bytes.
+ num = number
+ word_bits, _, max_uint, pack_type = machine_size.get_word_alignment(num)
+ pack_format = ">%s" % pack_type
+ while num > 0:
+ raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
+ num >>= word_bits
+ # Obtain the index of the first non-zero byte.
+ zero_leading = bytes_leading(raw_bytes)
+ if number == 0:
+ raw_bytes = b'\x00'
+ # De-padding.
+ raw_bytes = raw_bytes[zero_leading:]
+
+ length = len(raw_bytes)
+ if fill_size and fill_size > 0:
+ if not overflow and length > fill_size:
+ raise OverflowError(
+ "Need %d bytes for number, but fill size is %d" %
+ (length, fill_size)
+ )
+ raw_bytes = raw_bytes.rjust(fill_size, b'\x00')
+ elif chunk_size and chunk_size > 0:
+ remainder = length % chunk_size
+ if remainder:
+ padding_size = chunk_size - remainder
+ raw_bytes = raw_bytes.rjust(length + padding_size, b'\x00')
+ return raw_bytes
+
+
+if __name__ == '__main__':
+ import doctest
+
+ doctest.testmod()
diff --git a/contrib/python/rsa/py2/rsa/util.py b/contrib/python/rsa/py2/rsa/util.py
new file mode 100644
index 0000000000..29d5eb1218
--- /dev/null
+++ b/contrib/python/rsa/py2/rsa/util.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions."""
+
+from __future__ import with_statement, print_function
+
+import sys
+from optparse import OptionParser
+
+import rsa.key
+
+
+def private_to_public():
+ """Reads a private key and outputs the corresponding public key."""
+
+ # Parse the CLI options
+ parser = OptionParser(usage='usage: %prog [options]',
+ description='Reads a private key and outputs the '
+ 'corresponding public key. Both private and public keys use '
+ 'the format described in PKCS#1 v1.5')
+
+ parser.add_option('-i', '--input', dest='infilename', type='string',
+ help='Input filename. Reads from stdin if not specified')
+ parser.add_option('-o', '--output', dest='outfilename', type='string',
+ help='Output filename. Writes to stdout of not specified')
+
+ parser.add_option('--inform', dest='inform',
+ help='key format of input - default PEM',
+ choices=('PEM', 'DER'), default='PEM')
+
+ parser.add_option('--outform', dest='outform',
+ help='key format of output - default PEM',
+ choices=('PEM', 'DER'), default='PEM')
+
+ (cli, cli_args) = parser.parse_args(sys.argv)
+
+ # Read the input data
+ if cli.infilename:
+ print('Reading private key from %s in %s format' %
+ (cli.infilename, cli.inform), file=sys.stderr)
+ with open(cli.infilename, 'rb') as infile:
+ in_data = infile.read()
+ else:
+ print('Reading private key from stdin in %s format' % cli.inform,
+ file=sys.stderr)
+ in_data = sys.stdin.read().encode('ascii')
+
+ assert type(in_data) == bytes, type(in_data)
+
+ # Take the public fields and create a public key
+ priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
+ pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
+
+ # Save to the output file
+ out_data = pub_key.save_pkcs1(cli.outform)
+
+ if cli.outfilename:
+ print('Writing public key to %s in %s format' %
+ (cli.outfilename, cli.outform), file=sys.stderr)
+ with open(cli.outfilename, 'wb') as outfile:
+ outfile.write(out_data)
+ else:
+ print('Writing public key to stdout in %s format' % cli.outform,
+ file=sys.stderr)
+ sys.stdout.write(out_data.decode('ascii'))
diff --git a/contrib/python/rsa/py2/tests/__init__.py b/contrib/python/rsa/py2/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/__init__.py
diff --git a/contrib/python/rsa/py2/tests/private.pem b/contrib/python/rsa/py2/tests/private.pem
new file mode 100644
index 0000000000..1a17279f23
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/private.pem
@@ -0,0 +1,5 @@
+-----BEGIN RSA PRIVATE KEY-----
+MGECAQACEQCvWovlXBvfEeOMZPEleO9NAgMBAAECEA20Y+6fDkaWvC24horBzQEC
+CQDdS2PAL/tK4QIJAMratZuNnT3tAghs7iNYA0ZrgQIIQQ5nU93U4fkCCHR55el6
+/K+2
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/python/rsa/py2/tests/test_cli.py b/contrib/python/rsa/py2/tests/test_cli.py
new file mode 100644
index 0000000000..7ce57ebd99
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_cli.py
@@ -0,0 +1,296 @@
+"""
+Unit tests for CLI entry points.
+"""
+
+from __future__ import print_function
+
+import unittest
+import sys
+import functools
+from contextlib import contextmanager
+
+import os
+from io import StringIO, BytesIO
+
+import rsa
+import rsa.cli
+import rsa.util
+from rsa._compat import PY2
+
+
+def make_buffer():
+ if PY2:
+ return BytesIO()
+ buf = StringIO()
+ buf.buffer = BytesIO()
+ return buf
+
+
+def get_bytes_out(out):
+ if PY2:
+ # Python 2.x writes 'str' to stdout
+ return out.getvalue()
+ # Python 3.x writes 'bytes' to stdout.buffer
+ return out.buffer.getvalue()
+
+
+@contextmanager
+def captured_output():
+ """Captures output to stdout and stderr"""
+
+ new_out, new_err = make_buffer(), make_buffer()
+ old_out, old_err = sys.stdout, sys.stderr
+ try:
+ sys.stdout, sys.stderr = new_out, new_err
+ yield new_out, new_err
+ finally:
+ sys.stdout, sys.stderr = old_out, old_err
+
+
+@contextmanager
+def cli_args(*new_argv):
+ """Updates sys.argv[1:] for a single test."""
+
+ old_args = sys.argv[:]
+ sys.argv[1:] = [str(arg) for arg in new_argv]
+
+ try:
+ yield
+ finally:
+ sys.argv[1:] = old_args
+
+
+def remove_if_exists(fname):
+ """Removes a file if it exists."""
+
+ if os.path.exists(fname):
+ os.unlink(fname)
+
+
+def cleanup_files(*filenames):
+ """Makes sure the files don't exist when the test runs, and deletes them afterward."""
+
+ def remove():
+ for fname in filenames:
+ remove_if_exists(fname)
+
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ remove()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ remove()
+
+ return wrapper
+
+ return decorator
+
+
+class AbstractCliTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ # Ensure there is a key to use
+ cls.pub_key, cls.priv_key = rsa.newkeys(512)
+ cls.pub_fname = '%s.pub' % cls.__name__
+ cls.priv_fname = '%s.key' % cls.__name__
+
+ with open(cls.pub_fname, 'wb') as outfile:
+ outfile.write(cls.pub_key.save_pkcs1())
+
+ with open(cls.priv_fname, 'wb') as outfile:
+ outfile.write(cls.priv_key.save_pkcs1())
+
+ @classmethod
+ def tearDownClass(cls):
+ if hasattr(cls, 'pub_fname'):
+ remove_if_exists(cls.pub_fname)
+ if hasattr(cls, 'priv_fname'):
+ remove_if_exists(cls.priv_fname)
+
+ def assertExits(self, status_code, func, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except SystemExit as ex:
+ if status_code == ex.code:
+ return
+ self.fail('SystemExit() raised by %r, but exited with code %r, expected %r' % (
+ func, ex.code, status_code))
+ else:
+ self.fail('SystemExit() not raised by %r' % func)
+
+
+class KeygenTest(AbstractCliTest):
+ def test_keygen_no_args(self):
+ with cli_args():
+ self.assertExits(1, rsa.cli.keygen)
+
+ def test_keygen_priv_stdout(self):
+ with captured_output() as (out, err):
+ with cli_args(128):
+ rsa.cli.keygen()
+
+ lines = get_bytes_out(out).splitlines()
+ self.assertEqual(b'-----BEGIN RSA PRIVATE KEY-----', lines[0])
+ self.assertEqual(b'-----END RSA PRIVATE KEY-----', lines[-1])
+
+ # The key size should be shown on stderr
+ self.assertTrue('128-bit key' in err.getvalue())
+
+ @cleanup_files('test_cli_privkey_out.pem')
+ def test_keygen_priv_out_pem(self):
+ with captured_output() as (out, err):
+ with cli_args('--out=test_cli_privkey_out.pem', '--form=PEM', 128):
+ rsa.cli.keygen()
+
+ # The key size should be shown on stderr
+ self.assertTrue('128-bit key' in err.getvalue())
+
+ # The output file should be shown on stderr
+ self.assertTrue('test_cli_privkey_out.pem' in err.getvalue())
+
+ # If we can load the file as PEM, it's good enough.
+ with open('test_cli_privkey_out.pem', 'rb') as pemfile:
+ rsa.PrivateKey.load_pkcs1(pemfile.read())
+
+ @cleanup_files('test_cli_privkey_out.der')
+ def test_keygen_priv_out_der(self):
+ with captured_output() as (out, err):
+ with cli_args('--out=test_cli_privkey_out.der', '--form=DER', 128):
+ rsa.cli.keygen()
+
+ # The key size should be shown on stderr
+ self.assertTrue('128-bit key' in err.getvalue())
+
+ # The output file should be shown on stderr
+ self.assertTrue('test_cli_privkey_out.der' in err.getvalue())
+
+ # If we can load the file as der, it's good enough.
+ with open('test_cli_privkey_out.der', 'rb') as derfile:
+ rsa.PrivateKey.load_pkcs1(derfile.read(), format='DER')
+
+ @cleanup_files('test_cli_privkey_out.pem', 'test_cli_pubkey_out.pem')
+ def test_keygen_pub_out_pem(self):
+ with captured_output() as (out, err):
+ with cli_args('--out=test_cli_privkey_out.pem',
+ '--pubout=test_cli_pubkey_out.pem',
+ '--form=PEM', 256):
+ rsa.cli.keygen()
+
+ # The key size should be shown on stderr
+ self.assertTrue('256-bit key' in err.getvalue())
+
+ # The output files should be shown on stderr
+ self.assertTrue('test_cli_privkey_out.pem' in err.getvalue())
+ self.assertTrue('test_cli_pubkey_out.pem' in err.getvalue())
+
+ # If we can load the file as PEM, it's good enough.
+ with open('test_cli_pubkey_out.pem', 'rb') as pemfile:
+ rsa.PublicKey.load_pkcs1(pemfile.read())
+
+
+class EncryptDecryptTest(AbstractCliTest):
+ def test_empty_decrypt(self):
+ with cli_args():
+ self.assertExits(1, rsa.cli.decrypt)
+
+ def test_empty_encrypt(self):
+ with cli_args():
+ self.assertExits(1, rsa.cli.encrypt)
+
+ @cleanup_files('encrypted.txt', 'cleartext.txt')
+ def test_encrypt_decrypt(self):
+ with open('cleartext.txt', 'wb') as outfile:
+ outfile.write(b'Hello cleartext RSA users!')
+
+ with cli_args('-i', 'cleartext.txt', '--out=encrypted.txt', self.pub_fname):
+ with captured_output():
+ rsa.cli.encrypt()
+
+ with cli_args('-i', 'encrypted.txt', self.priv_fname):
+ with captured_output() as (out, err):
+ rsa.cli.decrypt()
+
+ # We should have the original cleartext on stdout now.
+ output = get_bytes_out(out)
+ self.assertEqual(b'Hello cleartext RSA users!', output)
+
+ @cleanup_files('encrypted.txt', 'cleartext.txt')
+ def test_encrypt_decrypt_unhappy(self):
+ with open('cleartext.txt', 'wb') as outfile:
+ outfile.write(b'Hello cleartext RSA users!')
+
+ with cli_args('-i', 'cleartext.txt', '--out=encrypted.txt', self.pub_fname):
+ with captured_output():
+ rsa.cli.encrypt()
+
+ # Change a few bytes in the encrypted stream.
+ with open('encrypted.txt', 'r+b') as encfile:
+ encfile.seek(40)
+ encfile.write(b'hahaha')
+
+ with cli_args('-i', 'encrypted.txt', self.priv_fname):
+ with captured_output() as (out, err):
+ self.assertRaises(rsa.DecryptionError, rsa.cli.decrypt)
+
+
+class SignVerifyTest(AbstractCliTest):
+ def test_empty_verify(self):
+ with cli_args():
+ self.assertExits(1, rsa.cli.verify)
+
+ def test_empty_sign(self):
+ with cli_args():
+ self.assertExits(1, rsa.cli.sign)
+
+ @cleanup_files('signature.txt', 'cleartext.txt')
+ def test_sign_verify(self):
+ with open('cleartext.txt', 'wb') as outfile:
+ outfile.write(b'Hello RSA users!')
+
+ with cli_args('-i', 'cleartext.txt', '--out=signature.txt', self.priv_fname, 'SHA-256'):
+ with captured_output():
+ rsa.cli.sign()
+
+ with cli_args('-i', 'cleartext.txt', self.pub_fname, 'signature.txt'):
+ with captured_output() as (out, err):
+ rsa.cli.verify()
+
+ self.assertFalse(b'Verification OK' in get_bytes_out(out))
+
+ @cleanup_files('signature.txt', 'cleartext.txt')
+ def test_sign_verify_unhappy(self):
+ with open('cleartext.txt', 'wb') as outfile:
+ outfile.write(b'Hello RSA users!')
+
+ with cli_args('-i', 'cleartext.txt', '--out=signature.txt', self.priv_fname, 'SHA-256'):
+ with captured_output():
+ rsa.cli.sign()
+
+ # Change a few bytes in the cleartext file.
+ with open('cleartext.txt', 'r+b') as encfile:
+ encfile.seek(6)
+ encfile.write(b'DSA')
+
+ with cli_args('-i', 'cleartext.txt', self.pub_fname, 'signature.txt'):
+ with captured_output() as (out, err):
+ self.assertExits('Verification failed.', rsa.cli.verify)
+
+
+class PrivatePublicTest(AbstractCliTest):
+ """Test CLI command to convert a private to a public key."""
+
+ @cleanup_files('test_private_to_public.pem')
+ def test_private_to_public(self):
+
+ with cli_args('-i', self.priv_fname, '-o', 'test_private_to_public.pem'):
+ with captured_output():
+ rsa.util.private_to_public()
+
+ # Check that the key is indeed valid.
+ with open('test_private_to_public.pem', 'rb') as pemfile:
+ key = rsa.PublicKey.load_pkcs1(pemfile.read())
+
+ self.assertEqual(self.priv_key.n, key.n)
+ self.assertEqual(self.priv_key.e, key.e)
diff --git a/contrib/python/rsa/py2/tests/test_common.py b/contrib/python/rsa/py2/tests/test_common.py
new file mode 100644
index 0000000000..af13695a7e
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_common.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import struct
+from rsa._compat import byte
+from rsa.common import byte_size, bit_size, inverse
+
+
+class TestByte(unittest.TestCase):
+ def test_values(self):
+ self.assertEqual(byte(0), b'\x00')
+ self.assertEqual(byte(255), b'\xff')
+
+ def test_struct_error_when_out_of_bounds(self):
+ self.assertRaises(struct.error, byte, 256)
+ self.assertRaises(struct.error, byte, -1)
+
+
+class TestByteSize(unittest.TestCase):
+ def test_values(self):
+ self.assertEqual(byte_size(1 << 1023), 128)
+ self.assertEqual(byte_size((1 << 1024) - 1), 128)
+ self.assertEqual(byte_size(1 << 1024), 129)
+ self.assertEqual(byte_size(255), 1)
+ self.assertEqual(byte_size(256), 2)
+ self.assertEqual(byte_size(0xffff), 2)
+ self.assertEqual(byte_size(0xffffff), 3)
+ self.assertEqual(byte_size(0xffffffff), 4)
+ self.assertEqual(byte_size(0xffffffffff), 5)
+ self.assertEqual(byte_size(0xffffffffffff), 6)
+ self.assertEqual(byte_size(0xffffffffffffff), 7)
+ self.assertEqual(byte_size(0xffffffffffffffff), 8)
+
+ def test_zero(self):
+ self.assertEqual(byte_size(0), 1)
+
+ def test_bad_type(self):
+ self.assertRaises(TypeError, byte_size, [])
+ self.assertRaises(TypeError, byte_size, ())
+ self.assertRaises(TypeError, byte_size, dict())
+ self.assertRaises(TypeError, byte_size, "")
+ self.assertRaises(TypeError, byte_size, None)
+
+
+class TestBitSize(unittest.TestCase):
+ def test_zero(self):
+ self.assertEqual(bit_size(0), 0)
+
+ def test_values(self):
+ self.assertEqual(bit_size(1023), 10)
+ self.assertEqual(bit_size(1024), 11)
+ self.assertEqual(bit_size(1025), 11)
+ self.assertEqual(bit_size(1 << 1024), 1025)
+ self.assertEqual(bit_size((1 << 1024) + 1), 1025)
+ self.assertEqual(bit_size((1 << 1024) - 1), 1024)
+
+ def test_negative_values(self):
+ self.assertEqual(bit_size(-1023), 10)
+ self.assertEqual(bit_size(-1024), 11)
+ self.assertEqual(bit_size(-1025), 11)
+ self.assertEqual(bit_size(-1 << 1024), 1025)
+ self.assertEqual(bit_size(-((1 << 1024) + 1)), 1025)
+ self.assertEqual(bit_size(-((1 << 1024) - 1)), 1024)
+
+ def test_bad_type(self):
+ self.assertRaises(TypeError, bit_size, [])
+ self.assertRaises(TypeError, bit_size, ())
+ self.assertRaises(TypeError, bit_size, dict())
+ self.assertRaises(TypeError, bit_size, "")
+ self.assertRaises(TypeError, bit_size, None)
+ self.assertRaises(TypeError, bit_size, 0.0)
+
+
+class TestInverse(unittest.TestCase):
+ def test_normal(self):
+ self.assertEqual(3, inverse(7, 4))
+ self.assertEqual(9, inverse(5, 11))
+
+ def test_not_relprime(self):
+ self.assertRaises(ValueError, inverse, 4, 8)
+ self.assertRaises(ValueError, inverse, 25, 5)
diff --git a/contrib/python/rsa/py2/tests/test_compat.py b/contrib/python/rsa/py2/tests/test_compat.py
new file mode 100644
index 0000000000..62e933f25f
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_compat.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import struct
+
+from rsa._compat import byte, is_bytes, range, xor_bytes
+
+
+class TestByte(unittest.TestCase):
+ """Tests for single bytes."""
+
+ def test_byte(self):
+ for i in range(256):
+ byt = byte(i)
+ self.assertTrue(is_bytes(byt))
+ self.assertEqual(ord(byt), i)
+
+ def test_raises_StructError_on_overflow(self):
+ self.assertRaises(struct.error, byte, 256)
+ self.assertRaises(struct.error, byte, -1)
+
+ def test_byte_literal(self):
+ self.assertIsInstance(b'abc', bytes)
+
+
+class TestBytes(unittest.TestCase):
+ """Tests for bytes objects."""
+
+ def setUp(self):
+ self.b1 = b'\xff\xff\xff\xff'
+ self.b2 = b'\x00\x00\x00\x00'
+ self.b3 = b'\xf0\xf0\xf0\xf0'
+ self.b4 = b'\x4d\x23\xca\xe2'
+ self.b5 = b'\x9b\x61\x3b\xdc'
+ self.b6 = b'\xff\xff'
+
+ self.byte_strings = (self.b1, self.b2, self.b3, self.b4, self.b5, self.b6)
+
+ def test_xor_bytes(self):
+ self.assertEqual(xor_bytes(self.b1, self.b2), b'\xff\xff\xff\xff')
+ self.assertEqual(xor_bytes(self.b1, self.b3), b'\x0f\x0f\x0f\x0f')
+ self.assertEqual(xor_bytes(self.b1, self.b4), b'\xb2\xdc\x35\x1d')
+ self.assertEqual(xor_bytes(self.b1, self.b5), b'\x64\x9e\xc4\x23')
+ self.assertEqual(xor_bytes(self.b2, self.b3), b'\xf0\xf0\xf0\xf0')
+ self.assertEqual(xor_bytes(self.b2, self.b4), b'\x4d\x23\xca\xe2')
+ self.assertEqual(xor_bytes(self.b2, self.b5), b'\x9b\x61\x3b\xdc')
+ self.assertEqual(xor_bytes(self.b3, self.b4), b'\xbd\xd3\x3a\x12')
+ self.assertEqual(xor_bytes(self.b3, self.b5), b'\x6b\x91\xcb\x2c')
+ self.assertEqual(xor_bytes(self.b4, self.b5), b'\xd6\x42\xf1\x3e')
+
+ def test_xor_bytes_length(self):
+ self.assertEqual(xor_bytes(self.b1, self.b6), b'\x00\x00')
+ self.assertEqual(xor_bytes(self.b2, self.b6), b'\xff\xff')
+ self.assertEqual(xor_bytes(self.b3, self.b6), b'\x0f\x0f')
+ self.assertEqual(xor_bytes(self.b4, self.b6), b'\xb2\xdc')
+ self.assertEqual(xor_bytes(self.b5, self.b6), b'\x64\x9e')
+ self.assertEqual(xor_bytes(self.b6, b''), b'')
+
+ def test_xor_bytes_commutative(self):
+ for first in self.byte_strings:
+ for second in self.byte_strings:
+ min_length = min(len(first), len(second))
+ result = xor_bytes(first, second)
+
+ self.assertEqual(result, xor_bytes(second, first))
+ self.assertEqual(len(result), min_length)
diff --git a/contrib/python/rsa/py2/tests/test_integers.py b/contrib/python/rsa/py2/tests/test_integers.py
new file mode 100644
index 0000000000..fb29ba41d3
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_integers.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests integer operations."""
+
+import unittest
+
+import rsa
+import rsa.core
+
+
+class IntegerTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(64)
+
+ def test_enc_dec(self):
+ message = 42
+ print("\tMessage: %d" % message)
+
+ encrypted = rsa.core.encrypt_int(message, self.pub.e, self.pub.n)
+ print("\tEncrypted: %d" % encrypted)
+
+ decrypted = rsa.core.decrypt_int(encrypted, self.priv.d, self.pub.n)
+ print("\tDecrypted: %d" % decrypted)
+
+ self.assertEqual(message, decrypted)
+
+ def test_sign_verify(self):
+ message = 42
+
+ signed = rsa.core.encrypt_int(message, self.priv.d, self.pub.n)
+ print("\tSigned: %d" % signed)
+
+ verified = rsa.core.decrypt_int(signed, self.pub.e, self.pub.n)
+ print("\tVerified: %d" % verified)
+
+ self.assertEqual(message, verified)
diff --git a/contrib/python/rsa/py2/tests/test_key.py b/contrib/python/rsa/py2/tests/test_key.py
new file mode 100644
index 0000000000..9db30cedf6
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_key.py
@@ -0,0 +1,79 @@
+"""
+Some tests for the rsa/key.py file.
+"""
+
+import unittest
+
+import rsa.key
+import rsa.core
+
+
+class BlindingTest(unittest.TestCase):
+ def test_blinding(self):
+ """Test blinding and unblinding.
+
+ This is basically the doctest of the PrivateKey.blind method, but then
+ implemented as unittest to allow running on different Python versions.
+ """
+
+ pk = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ message = 12345
+ encrypted = rsa.core.encrypt_int(message, pk.e, pk.n)
+
+ blinded = pk.blind(encrypted, 4134431) # blind before decrypting
+ decrypted = rsa.core.decrypt_int(blinded, pk.d, pk.n)
+ unblinded = pk.unblind(decrypted, 4134431)
+
+ self.assertEqual(unblinded, message)
+
+
+class KeyGenTest(unittest.TestCase):
+ def test_custom_exponent(self):
+ priv, pub = rsa.key.newkeys(16, exponent=3)
+
+ self.assertEqual(3, priv.e)
+ self.assertEqual(3, pub.e)
+
+ def test_default_exponent(self):
+ priv, pub = rsa.key.newkeys(16)
+
+ self.assertEqual(0x10001, priv.e)
+ self.assertEqual(0x10001, pub.e)
+
+ def test_exponents_coefficient_calculation(self):
+ pk = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ self.assertEqual(pk.exp1, 55063)
+ self.assertEqual(pk.exp2, 10095)
+ self.assertEqual(pk.coef, 50797)
+
+ def test_custom_getprime_func(self):
+ # List of primes to test with, in order [p, q, p, q, ....]
+ # By starting with two of the same primes, we test that this is
+ # properly rejected.
+ primes = [64123, 64123, 64123, 50957, 39317, 33107]
+
+ def getprime(_):
+ return primes.pop(0)
+
+ # This exponent will cause two other primes to be generated.
+ exponent = 136407
+
+ (p, q, e, d) = rsa.key.gen_keys(64,
+ accurate=False,
+ getprime_func=getprime,
+ exponent=exponent)
+ self.assertEqual(39317, p)
+ self.assertEqual(33107, q)
+
+
+class HashTest(unittest.TestCase):
+ """Test hashing of keys"""
+
+ def test_hash_possible(self):
+ priv, pub = rsa.key.newkeys(16)
+
+ # This raises a TypeError when hashing isn't possible.
+ hash(priv)
+ hash(pub)
diff --git a/contrib/python/rsa/py2/tests/test_load_save_keys.py b/contrib/python/rsa/py2/tests/test_load_save_keys.py
new file mode 100644
index 0000000000..967c946e74
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_load_save_keys.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for saving and loading keys."""
+
+import base64
+import mock
+import os.path
+import pickle
+import unittest
+import warnings
+
+from rsa._compat import range
+import rsa.key
+
+B64PRIV_DER = b'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
+PRIVATE_DER = base64.standard_b64decode(B64PRIV_DER)
+
+B64PUB_DER = b'MAwCBQDeKYlRAgMBAAE='
+PUBLIC_DER = base64.standard_b64decode(B64PUB_DER)
+
+PRIVATE_PEM = b'''\
+-----BEGIN CONFUSING STUFF-----
+Cruft before the key
+
+-----BEGIN RSA PRIVATE KEY-----
+Comment: something blah
+
+''' + B64PRIV_DER + b'''
+-----END RSA PRIVATE KEY-----
+
+Stuff after the key
+-----END CONFUSING STUFF-----
+'''
+
+CLEAN_PRIVATE_PEM = b'''\
+-----BEGIN RSA PRIVATE KEY-----
+''' + B64PRIV_DER + b'''
+-----END RSA PRIVATE KEY-----
+'''
+
+PUBLIC_PEM = b'''\
+-----BEGIN CONFUSING STUFF-----
+Cruft before the key
+
+-----BEGIN RSA PUBLIC KEY-----
+Comment: something blah
+
+''' + B64PUB_DER + b'''
+-----END RSA PUBLIC KEY-----
+
+Stuff after the key
+-----END CONFUSING STUFF-----
+'''
+
+CLEAN_PUBLIC_PEM = b'''\
+-----BEGIN RSA PUBLIC KEY-----
+''' + B64PUB_DER + b'''
+-----END RSA PUBLIC KEY-----
+'''
+
+
+class DerTest(unittest.TestCase):
+ """Test saving and loading DER keys."""
+
+ def test_load_private_key(self):
+ """Test loading private DER keys."""
+
+ key = rsa.key.PrivateKey.load_pkcs1(PRIVATE_DER, 'DER')
+ expected = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ self.assertEqual(expected, key)
+ self.assertEqual(key.exp1, 55063)
+ self.assertEqual(key.exp2, 10095)
+ self.assertEqual(key.coef, 50797)
+
+ @mock.patch('pyasn1.codec.der.decoder.decode')
+ def test_load_malformed_private_key(self, der_decode):
+ """Test loading malformed private DER keys."""
+
+ # Decode returns an invalid exp2 value.
+ der_decode.return_value = (
+ [0, 3727264081, 65537, 3349121513, 65063, 57287, 55063, 0, 50797],
+ 0,
+ )
+
+ with warnings.catch_warnings(record=True) as w:
+ # Always print warnings
+ warnings.simplefilter('always')
+
+ # Load 3 keys
+ for _ in range(3):
+ key = rsa.key.PrivateKey.load_pkcs1(PRIVATE_DER, 'DER')
+
+ # Check that 3 warnings were generated.
+ self.assertEqual(3, len(w))
+
+ for warning in w:
+ self.assertTrue(issubclass(warning.category, UserWarning))
+ self.assertIn('malformed', str(warning.message))
+
+ # Check that we are creating the key with correct values
+ self.assertEqual(key.exp1, 55063)
+ self.assertEqual(key.exp2, 10095)
+ self.assertEqual(key.coef, 50797)
+
+ def test_save_private_key(self):
+ """Test saving private DER keys."""
+
+ key = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+ der = key.save_pkcs1('DER')
+
+ self.assertIsInstance(der, bytes)
+ self.assertEqual(PRIVATE_DER, der)
+
+ def test_load_public_key(self):
+ """Test loading public DER keys."""
+
+ key = rsa.key.PublicKey.load_pkcs1(PUBLIC_DER, 'DER')
+ expected = rsa.key.PublicKey(3727264081, 65537)
+
+ self.assertEqual(expected, key)
+
+ def test_save_public_key(self):
+ """Test saving public DER keys."""
+
+ key = rsa.key.PublicKey(3727264081, 65537)
+ der = key.save_pkcs1('DER')
+
+ self.assertIsInstance(der, bytes)
+ self.assertEqual(PUBLIC_DER, der)
+
+
+class PemTest(unittest.TestCase):
+ """Test saving and loading PEM keys."""
+
+ def test_load_private_key(self):
+ """Test loading private PEM files."""
+
+ key = rsa.key.PrivateKey.load_pkcs1(PRIVATE_PEM, 'PEM')
+ expected = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ self.assertEqual(expected, key)
+ self.assertEqual(key.exp1, 55063)
+ self.assertEqual(key.exp2, 10095)
+ self.assertEqual(key.coef, 50797)
+
+ def test_save_private_key(self):
+ """Test saving private PEM files."""
+
+ key = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+ pem = key.save_pkcs1('PEM')
+
+ self.assertIsInstance(pem, bytes)
+ self.assertEqual(CLEAN_PRIVATE_PEM, pem)
+
+ def test_load_public_key(self):
+ """Test loading public PEM files."""
+
+ key = rsa.key.PublicKey.load_pkcs1(PUBLIC_PEM, 'PEM')
+ expected = rsa.key.PublicKey(3727264081, 65537)
+
+ self.assertEqual(expected, key)
+
+ def test_save_public_key(self):
+ """Test saving public PEM files."""
+
+ key = rsa.key.PublicKey(3727264081, 65537)
+ pem = key.save_pkcs1('PEM')
+
+ self.assertIsInstance(pem, bytes)
+ self.assertEqual(CLEAN_PUBLIC_PEM, pem)
+
+ def test_load_from_disk(self):
+ from yatest.common import source_path
+
+ """Test loading a PEM file from disk."""
+
+ fname = source_path('contrib/python/rsa/py2/tests/private.pem')
+ with open(fname, mode='rb') as privatefile:
+ keydata = privatefile.read()
+ privkey = rsa.key.PrivateKey.load_pkcs1(keydata)
+
+ self.assertEqual(15945948582725241569, privkey.p)
+ self.assertEqual(14617195220284816877, privkey.q)
+
+
+class PickleTest(unittest.TestCase):
+ """Test saving and loading keys by pickling."""
+
+ def test_private_key(self):
+ pk = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ pickled = pickle.dumps(pk)
+ unpickled = pickle.loads(pickled)
+ self.assertEqual(pk, unpickled)
+
+ def test_public_key(self):
+ pk = rsa.key.PublicKey(3727264081, 65537)
+
+ pickled = pickle.dumps(pk)
+ unpickled = pickle.loads(pickled)
+
+ self.assertEqual(pk, unpickled)
diff --git a/contrib/python/rsa/py2/tests/test_parallel.py b/contrib/python/rsa/py2/tests/test_parallel.py
new file mode 100644
index 0000000000..1a69e9ece6
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_parallel.py
@@ -0,0 +1,20 @@
+"""Test for multiprocess prime generation."""
+
+import unittest
+
+import rsa.prime
+import rsa.parallel
+import rsa.common
+
+
+class ParallelTest(unittest.TestCase):
+ """Tests for multiprocess prime generation."""
+
+ def test_parallel_primegen(self):
+ p = rsa.parallel.getprime(1024, 3)
+
+ self.assertFalse(rsa.prime.is_prime(p - 1))
+ self.assertTrue(rsa.prime.is_prime(p))
+ self.assertFalse(rsa.prime.is_prime(p + 1))
+
+ self.assertEqual(1024, rsa.common.bit_size(p))
diff --git a/contrib/python/rsa/py2/tests/test_pem.py b/contrib/python/rsa/py2/tests/test_pem.py
new file mode 100644
index 0000000000..5fb96002af
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_pem.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from rsa._compat import is_bytes
+from rsa.pem import _markers
+import rsa.key
+
+# 512-bit key. Too small for practical purposes, but good enough for testing with.
+public_key_pem = '''
+-----BEGIN PUBLIC KEY-----
+MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKH0aYP9ZFuctlPnXhEyHjgc8ltKKx9M
+0c+h4sKMXwjhjbQAZdtWIw8RRghpUJnKj+6bN2XzZDazyULxgPhtax0CAwEAAQ==
+-----END PUBLIC KEY-----
+'''
+
+private_key_pem = '''
+-----BEGIN RSA PRIVATE KEY-----
+MIIBOwIBAAJBAKH0aYP9ZFuctlPnXhEyHjgc8ltKKx9M0c+h4sKMXwjhjbQAZdtW
+Iw8RRghpUJnKj+6bN2XzZDazyULxgPhtax0CAwEAAQJADwR36EpNzQTqDzusCFIq
+ZS+h9X8aIovgBK3RNhMIGO2ThpsnhiDTcqIvgQ56knbl6B2W4iOl54tJ6CNtf6l6
+zQIhANTaNLFGsJfOvZHcI0WL1r89+1A4JVxR+lpslJJwAvgDAiEAwsjqqZ2wY2F0
+F8p1J98BEbtjU2mEZIVCMn6vQuhWdl8CIDRL4IJl4eGKlB0QP0JJF1wpeGO/R76l
+DaPF5cMM7k3NAiEAss28m/ck9BWBfFVdNjx/vsdFZkx2O9AX9EJWoBSnSgECIQCa
++sVQMUVJFGsdE/31C7wCIbE3IpB7ziABZ7mN+V3Dhg==
+-----END RSA PRIVATE KEY-----
+'''
+
+# Private key components
+prime1 = 96275860229939261876671084930484419185939191875438854026071315955024109172739
+prime2 = 88103681619592083641803383393198542599284510949756076218404908654323473741407
+
+
+class TestMarkers(unittest.TestCase):
+ def test_values(self):
+ self.assertEqual(_markers('RSA PRIVATE KEY'),
+ (b'-----BEGIN RSA PRIVATE KEY-----',
+ b'-----END RSA PRIVATE KEY-----'))
+
+
+class TestBytesAndStrings(unittest.TestCase):
+ """Test that we can use PEM in both Unicode strings and bytes."""
+
+ def test_unicode_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem)
+ self.assertEqual(prime1 * prime2, key.n)
+
+ def test_bytes_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem.encode('ascii'))
+ self.assertEqual(prime1 * prime2, key.n)
+
+ def test_unicode_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem)
+ self.assertEqual(prime1 * prime2, key.n)
+
+ def test_bytes_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem.encode('ascii'))
+ self.assertEqual(prime1, key.p)
+ self.assertEqual(prime2, key.q)
+
+
+class TestByteOutput(unittest.TestCase):
+ """Tests that PEM and DER are returned as bytes."""
+
+ def test_bytes_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem)
+ self.assertTrue(is_bytes(key.save_pkcs1(format='DER')))
+ self.assertTrue(is_bytes(key.save_pkcs1(format='PEM')))
+
+ def test_bytes_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem)
+ self.assertTrue(is_bytes(key.save_pkcs1(format='DER')))
+ self.assertTrue(is_bytes(key.save_pkcs1(format='PEM')))
+
+
+class TestByteInput(unittest.TestCase):
+ """Tests that PEM and DER can be loaded from bytes."""
+
+ def test_bytes_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem.encode('ascii'))
+ self.assertTrue(is_bytes(key.save_pkcs1(format='DER')))
+ self.assertTrue(is_bytes(key.save_pkcs1(format='PEM')))
+
+ def test_bytes_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem.encode('ascii'))
+ self.assertTrue(is_bytes(key.save_pkcs1(format='DER')))
+ self.assertTrue(is_bytes(key.save_pkcs1(format='PEM')))
diff --git a/contrib/python/rsa/py2/tests/test_pkcs1.py b/contrib/python/rsa/py2/tests/test_pkcs1.py
new file mode 100644
index 0000000000..9f7dcea7ad
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_pkcs1.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests string operations."""
+
+import struct
+import sys
+import unittest
+
+import rsa
+from rsa import pkcs1
+from rsa._compat import byte, is_bytes
+
+
+class BinaryTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(256)
+
+ def test_enc_dec(self):
+ message = struct.pack('>IIII', 0, 0, 0, 1)
+ print("\tMessage: %r" % message)
+
+ encrypted = pkcs1.encrypt(message, self.pub)
+ print("\tEncrypted: %r" % encrypted)
+
+ decrypted = pkcs1.decrypt(encrypted, self.priv)
+ print("\tDecrypted: %r" % decrypted)
+
+ self.assertEqual(message, decrypted)
+
+ def test_decoding_failure(self):
+ message = struct.pack('>IIII', 0, 0, 0, 1)
+ encrypted = pkcs1.encrypt(message, self.pub)
+
+ # Alter the encrypted stream
+ a = encrypted[5]
+ if is_bytes(a):
+ a = ord(a)
+ altered_a = (a + 1) % 256
+ encrypted = encrypted[:5] + byte(altered_a) + encrypted[6:]
+
+ self.assertRaises(pkcs1.DecryptionError, pkcs1.decrypt, encrypted,
+ self.priv)
+
+ def test_randomness(self):
+ """Encrypting the same message twice should result in different
+ cryptos.
+ """
+
+ message = struct.pack('>IIII', 0, 0, 0, 1)
+ encrypted1 = pkcs1.encrypt(message, self.pub)
+ encrypted2 = pkcs1.encrypt(message, self.pub)
+
+ self.assertNotEqual(encrypted1, encrypted2)
+
+
+class ExtraZeroesTest(unittest.TestCase):
+ def setUp(self):
+ # Key, cyphertext, and plaintext taken from https://github.com/sybrenstuvel/python-rsa/issues/146
+ self.private_key = rsa.PrivateKey.load_pkcs1(
+ "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAs1EKK81M5kTFtZSuUFnhKy8FS2WNXaWVmi/fGHG4CLw98+Yo\n0nkuUarVwSS0O9pFPcpc3kvPKOe9Tv+6DLS3Qru21aATy2PRqjqJ4CYn71OYtSwM\n/ZfSCKvrjXybzgu+sBmobdtYm+sppbdL+GEHXGd8gdQw8DDCZSR6+dPJFAzLZTCd\nB+Ctwe/RXPF+ewVdfaOGjkZIzDoYDw7n+OHnsYCYozkbTOcWHpjVevipR+IBpGPi\n1rvKgFnlcG6d/tj0hWRl/6cS7RqhjoiNEtxqoJzpXs/Kg8xbCxXbCchkf11STA8u\ndiCjQWuWI8rcDwl69XMmHJjIQAqhKvOOQ8rYTQIDAQABAoIBABpQLQ7qbHtp4h1Y\nORAfcFRW7Q74UvtH/iEHH1TF8zyM6wZsYtcn4y0mxYE3Mp+J0xlTJbeVJkwZXYVH\nL3UH29CWHSlR+TWiazTwrCTRVJDhEoqbcTiRW8fb+o/jljVxMcVDrpyYUHNo2c6w\njBxhmKPtp66hhaDpds1Cwi0A8APZ8Z2W6kya/L/hRBzMgCz7Bon1nYBMak5PQEwV\nF0dF7Wy4vIjvCzO6DSqA415DvJDzUAUucgFudbANNXo4HJwNRnBpymYIh8mHdmNJ\n/MQ0YLSqUWvOB57dh7oWQwe3UsJ37ZUorTugvxh3NJ7Tt5ZqbCQBEECb9ND63gxo\n/a3YR/0CgYEA7BJc834xCi/0YmO5suBinWOQAF7IiRPU+3G9TdhWEkSYquupg9e6\nK9lC5k0iP+t6I69NYF7+6mvXDTmv6Z01o6oV50oXaHeAk74O3UqNCbLe9tybZ/+F\ndkYlwuGSNttMQBzjCiVy0+y0+Wm3rRnFIsAtd0RlZ24aN3bFTWJINIsCgYEAwnQq\nvNmJe9SwtnH5c/yCqPhKv1cF/4jdQZSGI6/p3KYNxlQzkHZ/6uvrU5V27ov6YbX8\nvKlKfO91oJFQxUD6lpTdgAStI3GMiJBJIZNpyZ9EWNSvwUj28H34cySpbZz3s4Xd\nhiJBShgy+fKURvBQwtWmQHZJ3EGrcOI7PcwiyYcCgYEAlql5jSUCY0ALtidzQogW\nJ+B87N+RGHsBuJ/0cxQYinwg+ySAAVbSyF1WZujfbO/5+YBN362A/1dn3lbswCnH\nK/bHF9+fZNqvwprPnceQj5oK1n4g6JSZNsy6GNAhosT+uwQ0misgR8SQE4W25dDG\nkdEYsz+BgCsyrCcu8J5C+tUCgYAFVPQbC4f2ikVyKzvgz0qx4WUDTBqRACq48p6e\n+eLatv7nskVbr7QgN+nS9+Uz80ihR0Ev1yCAvnwmM/XYAskcOea87OPmdeWZlQM8\nVXNwINrZ6LMNBLgorfuTBK1UoRo1pPUHCYdqxbEYI2unak18mikd2WB7Fp3h0YI4\nVpGZnwKBgBxkAYnZv+jGI4MyEKdsQgxvROXXYOJZkWzsKuKxVkVpYP2V4nR2YMOJ\nViJQ8FUEnPq35cMDlUk4SnoqrrHIJNOvcJSCqM+bWHAioAsfByLbUPM8sm3CDdIk\nXVJl32HuKYPJOMIWfc7hIfxLRHnCN+coz2M6tgqMDs0E/OfjuqVZ\n-----END RSA PRIVATE KEY-----",
+ format='PEM')
+ cyphertext = "4501b4d669e01b9ef2dc800aa1b06d49196f5a09fe8fbcd037323c60eaf027bfb98432be4e4a26c567ffec718bcbea977dd26812fa071c33808b4d5ebb742d9879806094b6fbeea63d25ea3141733b60e31c6912106e1b758a7fe0014f075193faa8b4622bfd5d3013f0a32190a95de61a3604711bc62945f95a6522bd4dfed0a994ef185b28c281f7b5e4c8ed41176d12d9fc1b837e6a0111d0132d08a6d6f0580de0c9eed8ed105531799482d1e466c68c23b0c222af7fc12ac279bc4ff57e7b4586d209371b38c4c1035edd418dc5f960441cb21ea2bedbfea86de0d7861e81021b650a1de51002c315f1e7c12debe4dcebf790caaa54a2f26b149cf9e77d"
+ plaintext = "54657374"
+
+ if sys.version_info < (3, 0):
+ self.cyphertext = cyphertext.decode("hex")
+ self.plaintext = plaintext.decode('hex')
+ else:
+ self.cyphertext = bytes.fromhex(cyphertext)
+ self.plaintext = bytes.fromhex(plaintext)
+
+ def test_unmodified(self):
+ message = rsa.decrypt(self.cyphertext, self.private_key)
+ self.assertEqual(message, self.plaintext)
+
+ def test_prepend_zeroes(self):
+ cyphertext = b'\00\00' + self.cyphertext
+ with self.assertRaises(rsa.DecryptionError):
+ rsa.decrypt(cyphertext, self.private_key)
+
+ def test_append_zeroes(self):
+ cyphertext = self.cyphertext + b'\00\00'
+ with self.assertRaises(rsa.DecryptionError):
+ rsa.decrypt(cyphertext, self.private_key)
+
+
+class SignatureTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(512)
+
+ def test_sign_verify(self):
+ """Test happy flow of sign and verify"""
+
+ message = b'je moeder'
+ signature = pkcs1.sign(message, self.priv, 'SHA-256')
+
+ self.assertEqual('SHA-256', pkcs1.verify(message, signature, self.pub))
+
+ def test_find_signature_hash(self):
+ """Test happy flow of sign and find_signature_hash"""
+
+ message = b'je moeder'
+ signature = pkcs1.sign(message, self.priv, 'SHA-256')
+
+ self.assertEqual('SHA-256', pkcs1.find_signature_hash(signature, self.pub))
+
+ def test_alter_message(self):
+ """Altering the message should let the verification fail."""
+
+ signature = pkcs1.sign(b'je moeder', self.priv, 'SHA-256')
+ self.assertRaises(pkcs1.VerificationError, pkcs1.verify,
+ b'mijn moeder', signature, self.pub)
+
+ def test_sign_different_key(self):
+ """Signing with another key should let the verification fail."""
+
+ (otherpub, _) = rsa.newkeys(512)
+
+ message = b'je moeder'
+ signature = pkcs1.sign(message, self.priv, 'SHA-256')
+ self.assertRaises(pkcs1.VerificationError, pkcs1.verify,
+ message, signature, otherpub)
+
+ def test_multiple_signings(self):
+ """Signing the same message twice should return the same signatures."""
+
+ message = struct.pack('>IIII', 0, 0, 0, 1)
+ signature1 = pkcs1.sign(message, self.priv, 'SHA-1')
+ signature2 = pkcs1.sign(message, self.priv, 'SHA-1')
+
+ self.assertEqual(signature1, signature2)
+
+ def test_split_hash_sign(self):
+ """Hashing and then signing should match with directly signing the message. """
+
+ message = b'je moeder'
+ msg_hash = pkcs1.compute_hash(message, 'SHA-256')
+ signature1 = pkcs1.sign_hash(msg_hash, self.priv, 'SHA-256')
+
+ # Calculate the signature using the unified method
+ signature2 = pkcs1.sign(message, self.priv, 'SHA-256')
+
+ self.assertEqual(signature1, signature2)
+
+ def test_hash_sign_verify(self):
+ """Test happy flow of hash, sign, and verify"""
+
+ message = b'je moeder'
+ msg_hash = pkcs1.compute_hash(message, 'SHA-224')
+ signature = pkcs1.sign_hash(msg_hash, self.priv, 'SHA-224')
+
+ self.assertTrue(pkcs1.verify(message, signature, self.pub))
+
+ def test_prepend_zeroes(self):
+ """Prepending the signature with zeroes should be detected."""
+
+ message = b'je moeder'
+ signature = pkcs1.sign(message, self.priv, 'SHA-256')
+ signature = b'\00\00' + signature
+ with self.assertRaises(rsa.VerificationError):
+ pkcs1.verify(message, signature, self.pub)
+
+ def test_apppend_zeroes(self):
+ """Apppending the signature with zeroes should be detected."""
+
+ message = b'je moeder'
+ signature = pkcs1.sign(message, self.priv, 'SHA-256')
+ signature = signature + b'\00\00'
+ with self.assertRaises(rsa.VerificationError):
+ pkcs1.verify(message, signature, self.pub)
diff --git a/contrib/python/rsa/py2/tests/test_pkcs1_v2.py b/contrib/python/rsa/py2/tests/test_pkcs1_v2.py
new file mode 100644
index 0000000000..1d8f0010de
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_pkcs1_v2.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests PKCS #1 version 2 functionality.
+
+Most of the mocked values come from the test vectors found at:
+http://www.itomorrowmag.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm
+"""
+
+import unittest
+
+from rsa import pkcs1_v2
+
+
+class MGFTest(unittest.TestCase):
+ def test_oaep_int_db_mask(self):
+ seed = (
+ b'\xaa\xfd\x12\xf6\x59\xca\xe6\x34\x89\xb4\x79\xe5\x07\x6d\xde\xc2'
+ b'\xf0\x6c\xb5\x8f'
+ )
+ db = (
+ b'\xda\x39\xa3\xee\x5e\x6b\x4b\x0d\x32\x55\xbf\xef\x95\x60\x18\x90'
+ b'\xaf\xd8\x07\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xd4\x36\xe9\x95\x69'
+ b'\xfd\x32\xa7\xc8\xa0\x5b\xbc\x90\xd3\x2c\x49'
+ )
+ masked_db = (
+ b'\xdc\xd8\x7d\x5c\x68\xf1\xee\xa8\xf5\x52\x67\xc3\x1b\x2e\x8b\xb4'
+ b'\x25\x1f\x84\xd7\xe0\xb2\xc0\x46\x26\xf5\xaf\xf9\x3e\xdc\xfb\x25'
+ b'\xc9\xc2\xb3\xff\x8a\xe1\x0e\x83\x9a\x2d\xdb\x4c\xdc\xfe\x4f\xf4'
+ b'\x77\x28\xb4\xa1\xb7\xc1\x36\x2b\xaa\xd2\x9a\xb4\x8d\x28\x69\xd5'
+ b'\x02\x41\x21\x43\x58\x11\x59\x1b\xe3\x92\xf9\x82\xfb\x3e\x87\xd0'
+ b'\x95\xae\xb4\x04\x48\xdb\x97\x2f\x3a\xc1\x4f\x7b\xc2\x75\x19\x52'
+ b'\x81\xce\x32\xd2\xf1\xb7\x6d\x4d\x35\x3e\x2d'
+ )
+
+ # dbMask = MGF(seed, length(DB))
+ db_mask = pkcs1_v2.mgf1(seed, length=len(db))
+ expected_db_mask = (
+ b'\x06\xe1\xde\xb2\x36\x9a\xa5\xa5\xc7\x07\xd8\x2c\x8e\x4e\x93\x24'
+ b'\x8a\xc7\x83\xde\xe0\xb2\xc0\x46\x26\xf5\xaf\xf9\x3e\xdc\xfb\x25'
+ b'\xc9\xc2\xb3\xff\x8a\xe1\x0e\x83\x9a\x2d\xdb\x4c\xdc\xfe\x4f\xf4'
+ b'\x77\x28\xb4\xa1\xb7\xc1\x36\x2b\xaa\xd2\x9a\xb4\x8d\x28\x69\xd5'
+ b'\x02\x41\x21\x43\x58\x11\x59\x1b\xe3\x92\xf9\x82\xfb\x3e\x87\xd0'
+ b'\x95\xae\xb4\x04\x48\xdb\x97\x2f\x3a\xc1\x4e\xaf\xf4\x9c\x8c\x3b'
+ b'\x7c\xfc\x95\x1a\x51\xec\xd1\xdd\xe6\x12\x64'
+ )
+
+ self.assertEqual(db_mask, expected_db_mask)
+
+ # seedMask = MGF(maskedDB, length(seed))
+ seed_mask = pkcs1_v2.mgf1(masked_db, length=len(seed))
+ expected_seed_mask = (
+ b'\x41\x87\x0b\x5a\xb0\x29\xe6\x57\xd9\x57\x50\xb5\x4c\x28\x3c\x08'
+ b'\x72\x5d\xbe\xa9'
+ )
+
+ self.assertEqual(seed_mask, expected_seed_mask)
+
+ def test_invalid_hasher(self):
+ """Tests an invalid hasher generates an exception"""
+ with self.assertRaises(ValueError):
+ pkcs1_v2.mgf1(b'\x06\xe1\xde\xb2', length=8, hasher='SHA2')
+
+ def test_invalid_length(self):
+ with self.assertRaises(OverflowError):
+ pkcs1_v2.mgf1(b'\x06\xe1\xde\xb2', length=2**50)
diff --git a/contrib/python/rsa/py2/tests/test_prime.py b/contrib/python/rsa/py2/tests/test_prime.py
new file mode 100644
index 0000000000..f3bda9b486
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_prime.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests prime functions."""
+
+import unittest
+
+from rsa._compat import range
+import rsa.prime
+import rsa.randnum
+
+
+class PrimeTest(unittest.TestCase):
+ def test_is_prime(self):
+ """Test some common primes."""
+
+ # Test some trivial numbers
+ self.assertFalse(rsa.prime.is_prime(-1))
+ self.assertFalse(rsa.prime.is_prime(0))
+ self.assertFalse(rsa.prime.is_prime(1))
+ self.assertTrue(rsa.prime.is_prime(2))
+ self.assertFalse(rsa.prime.is_prime(42))
+ self.assertTrue(rsa.prime.is_prime(41))
+
+ # Test some slightly larger numbers
+ self.assertEqual(
+ [907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997],
+ [x for x in range(901, 1000) if rsa.prime.is_prime(x)]
+ )
+
+ # Test around the 50th millionth known prime.
+ self.assertTrue(rsa.prime.is_prime(982451653))
+ self.assertFalse(rsa.prime.is_prime(982451653 * 961748941))
+
+ def test_miller_rabin_primality_testing(self):
+ """Uses monkeypatching to ensure certain random numbers.
+
+ This allows us to predict/control the code path.
+ """
+
+ randints = []
+
+ def fake_randint(maxvalue):
+ return randints.pop(0)
+
+ orig_randint = rsa.randnum.randint
+ rsa.randnum.randint = fake_randint
+ try:
+ # 'n is composite'
+ randints.append(2630484832) # causes the 'n is composite' case with n=3784949785
+ self.assertEqual(False, rsa.prime.miller_rabin_primality_testing(2787998641, 7))
+ self.assertEqual([], randints)
+
+ # 'Exit inner loop and continue with next witness'
+ randints.extend([
+ 2119139098, # causes 'Exit inner loop and continue with next witness'
+ # the next witnesses for the above case:
+ 3051067716, 3603501763, 3230895847, 3687808133, 3760099987, 4026931495, 3022471882,
+ ])
+ self.assertEqual(True, rsa.prime.miller_rabin_primality_testing(2211417913,
+ len(randints)))
+ self.assertEqual([], randints)
+ finally:
+ rsa.randnum.randint = orig_randint
+
+ def test_mersenne_primes(self):
+ """Tests first known Mersenne primes.
+
+ Mersenne primes are prime numbers that can be written in the form
+ `Mn = 2**n - 1` for some integer `n`. For the list of known Mersenne
+ primes, see:
+ https://en.wikipedia.org/wiki/Mersenne_prime#List_of_known_Mersenne_primes
+ """
+
+ # List of known Mersenne exponents.
+ known_mersenne_exponents = [
+ 2, 3, 5, 7, 13, 17, 19, 31, 61, 89, 107, 127, 521, 607, 1279,
+ 2203, 2281, 4423,
+ ]
+
+ # Test Mersenne primes.
+ for exp in known_mersenne_exponents:
+ self.assertTrue(rsa.prime.is_prime(2**exp - 1))
+
+ def test_get_primality_testing_rounds(self):
+ """Test round calculation for primality testing."""
+
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 63), 10)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 127), 10)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 255), 10)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 511), 7)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 767), 7)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 1023), 4)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 1279), 4)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 1535), 3)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 2047), 3)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 4095), 3)
diff --git a/contrib/python/rsa/py2/tests/test_strings.py b/contrib/python/rsa/py2/tests/test_strings.py
new file mode 100644
index 0000000000..28fa091a47
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_strings.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests string operations."""
+
+from __future__ import absolute_import
+
+import unittest
+
+import rsa
+
+unicode_string = u"Euro=\u20ac ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+
+class StringTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(384)
+
+ def test_enc_dec(self):
+ message = unicode_string.encode('utf-8')
+ print("\tMessage: %s" % message)
+
+ encrypted = rsa.encrypt(message, self.pub)
+ print("\tEncrypted: %s" % encrypted)
+
+ decrypted = rsa.decrypt(encrypted, self.priv)
+ print("\tDecrypted: %s" % decrypted)
+
+ self.assertEqual(message, decrypted)
diff --git a/contrib/python/rsa/py2/tests/test_transform.py b/contrib/python/rsa/py2/tests/test_transform.py
new file mode 100644
index 0000000000..fe0970c962
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/test_transform.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from rsa.transform import int2bytes, bytes2int, _int2bytes
+
+
+class Test_int2bytes(unittest.TestCase):
+ def test_accuracy(self):
+ self.assertEqual(int2bytes(123456789), b'\x07[\xcd\x15')
+ self.assertEqual(_int2bytes(123456789), b'\x07[\xcd\x15')
+
+ def test_codec_identity(self):
+ self.assertEqual(bytes2int(int2bytes(123456789, 128)), 123456789)
+ self.assertEqual(bytes2int(_int2bytes(123456789, 128)), 123456789)
+
+ def test_chunk_size(self):
+ self.assertEqual(int2bytes(123456789, 6), b'\x00\x00\x07[\xcd\x15')
+ self.assertEqual(int2bytes(123456789, 7),
+ b'\x00\x00\x00\x07[\xcd\x15')
+
+ self.assertEqual(_int2bytes(123456789, 6),
+ b'\x00\x00\x07[\xcd\x15')
+ self.assertEqual(_int2bytes(123456789, 7),
+ b'\x00\x00\x00\x07[\xcd\x15')
+
+ def test_zero(self):
+ self.assertEqual(int2bytes(0, 4), b'\x00' * 4)
+ self.assertEqual(int2bytes(0, 7), b'\x00' * 7)
+ self.assertEqual(int2bytes(0), b'\x00')
+
+ self.assertEqual(_int2bytes(0, 4), b'\x00' * 4)
+ self.assertEqual(_int2bytes(0, 7), b'\x00' * 7)
+ self.assertEqual(_int2bytes(0), b'\x00')
+
+ def test_correctness_against_base_implementation(self):
+ # Slow test.
+ values = [
+ 1 << 512,
+ 1 << 8192,
+ 1 << 77,
+ ]
+ for value in values:
+ self.assertEqual(int2bytes(value), _int2bytes(value),
+ "Boom %d" % value)
+ self.assertEqual(bytes2int(int2bytes(value)),
+ value,
+ "Boom %d" % value)
+ self.assertEqual(bytes2int(_int2bytes(value)),
+ value,
+ "Boom %d" % value)
+
+ def test_raises_OverflowError_when_chunk_size_is_insufficient(self):
+ self.assertRaises(OverflowError, int2bytes, 123456789, 3)
+ self.assertRaises(OverflowError, int2bytes, 299999999999, 4)
+
+ self.assertRaises(OverflowError, _int2bytes, 123456789, 3)
+ self.assertRaises(OverflowError, _int2bytes, 299999999999, 4)
+
+ def test_raises_ValueError_when_negative_integer(self):
+ self.assertRaises(ValueError, int2bytes, -1)
+ self.assertRaises(ValueError, _int2bytes, -1)
+
+ def test_raises_TypeError_when_not_integer(self):
+ self.assertRaises(TypeError, int2bytes, None)
+ self.assertRaises(TypeError, _int2bytes, None)
diff --git a/contrib/python/rsa/py2/tests/ya.make b/contrib/python/rsa/py2/tests/ya.make
new file mode 100644
index 0000000000..05640300fb
--- /dev/null
+++ b/contrib/python/rsa/py2/tests/ya.make
@@ -0,0 +1,30 @@
+PY2TEST()
+
+PEERDIR(
+ contrib/python/rsa
+ contrib/python/mock
+)
+
+NO_LINT()
+
+TEST_SRCS(
+ test_cli.py
+ test_common.py
+ test_compat.py
+ test_integers.py
+ test_key.py
+ test_load_save_keys.py
+ test_parallel.py
+ test_pem.py
+ test_pkcs1.py
+ test_pkcs1_v2.py
+ test_prime.py
+ test_strings.py
+ test_transform.py
+)
+
+DATA (
+ arcadia/contrib/python/rsa/py2/tests
+)
+
+END()
diff --git a/contrib/python/rsa/py2/ya.make b/contrib/python/rsa/py2/ya.make
new file mode 100644
index 0000000000..f196afec21
--- /dev/null
+++ b/contrib/python/rsa/py2/ya.make
@@ -0,0 +1,46 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(4.5)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/pyasn1
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ rsa/__init__.py
+ rsa/_compat.py
+ rsa/asn1.py
+ rsa/cli.py
+ rsa/common.py
+ rsa/core.py
+ rsa/key.py
+ rsa/machine_size.py
+ rsa/parallel.py
+ rsa/pem.py
+ rsa/pkcs1.py
+ rsa/pkcs1_v2.py
+ rsa/prime.py
+ rsa/randnum.py
+ rsa/transform.py
+ rsa/util.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/rsa/py2/
+ .dist-info/METADATA
+ .dist-info/entry_points.txt
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/rsa/py3/.dist-info/METADATA b/contrib/python/rsa/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..926968149b
--- /dev/null
+++ b/contrib/python/rsa/py3/.dist-info/METADATA
@@ -0,0 +1,106 @@
+Metadata-Version: 2.1
+Name: rsa
+Version: 4.9
+Summary: Pure-Python RSA implementation
+Home-page: https://stuvel.eu/rsa
+License: Apache-2.0
+Author: Sybren A. Stüvel
+Author-email: sybren@stuvel.eu
+Requires-Python: >=3.6,<4
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Security :: Cryptography
+Requires-Dist: pyasn1 (>=0.1.3)
+Project-URL: Repository, https://github.com/sybrenstuvel/python-rsa
+Description-Content-Type: text/markdown
+
+# Pure Python RSA implementation
+
+[![PyPI](https://img.shields.io/pypi/v/rsa.svg)](https://pypi.org/project/rsa/)
+[![Build Status](https://travis-ci.org/sybrenstuvel/python-rsa.svg?branch=master)](https://travis-ci.org/sybrenstuvel/python-rsa)
+[![Coverage Status](https://coveralls.io/repos/github/sybrenstuvel/python-rsa/badge.svg?branch=master)](https://coveralls.io/github/sybrenstuvel/python-rsa?branch=master)
+[![Code Climate](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/codeclimate/codeclimate/maintainability)
+
+[Python-RSA](https://stuvel.eu/rsa) is a pure-Python RSA implementation. It supports
+encryption and decryption, signing and verifying signatures, and key
+generation according to PKCS#1 version 1.5. It can be used as a Python
+library as well as on the commandline. The code was mostly written by
+Sybren A. Stüvel.
+
+Documentation can be found at the [Python-RSA homepage](https://stuvel.eu/rsa). For all changes, check [the changelog](https://github.com/sybrenstuvel/python-rsa/blob/master/CHANGELOG.md).
+
+Download and install using:
+
+ pip install rsa
+
+or download it from the [Python Package Index](https://pypi.org/project/rsa/).
+
+The source code is maintained at [GitHub](https://github.com/sybrenstuvel/python-rsa/) and is
+licensed under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
+
+## Security
+
+Because of how Python internally stores numbers, it is very hard (if not impossible) to make a pure-Python program secure against timing attacks. This library is no exception, so use it with care. See https://securitypitfalls.wordpress.com/2018/08/03/constant-time-compare-in-python/ for more info.
+
+## Setup of Development Environment
+
+```
+python3 -m venv .venv
+. ./.venv/bin/activate
+pip install poetry
+poetry install
+```
+
+## Publishing a New Release
+
+Since this project is considered critical on the Python Package Index,
+two-factor authentication is required. For uploading packages to PyPi, an API
+key is required; username+password will not work.
+
+First, generate an API token at https://pypi.org/manage/account/token/. Then,
+use this token when publishing instead of your username and password.
+
+As username, use `__token__`.
+As password, use the token itself, including the `pypi-` prefix.
+
+See https://pypi.org/help/#apitoken for help using API tokens to publish. This
+is what I have in `~/.pypirc`:
+
+```
+[distutils]
+index-servers =
+ rsa
+
+# Use `twine upload -r rsa` to upload with this token.
+[rsa]
+ repository = https://upload.pypi.org/legacy/
+ username = __token__
+ password = pypi-token
+```
+
+```
+. ./.venv/bin/activate
+pip install twine
+
+poetry build
+twine check dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl
+twine upload -r rsa dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl
+```
+
+The `pip install twine` is necessary as Python-RSA requires Python >= 3.6, and
+Twine requires at least version 3.7. This means Poetry refuses to add it as
+dependency.
+
diff --git a/contrib/python/rsa/py3/.dist-info/entry_points.txt b/contrib/python/rsa/py3/.dist-info/entry_points.txt
new file mode 100644
index 0000000000..bf058e3ebd
--- /dev/null
+++ b/contrib/python/rsa/py3/.dist-info/entry_points.txt
@@ -0,0 +1,8 @@
+[console_scripts]
+pyrsa-decrypt=rsa.cli:decrypt
+pyrsa-encrypt=rsa.cli:encrypt
+pyrsa-keygen=rsa.cli:keygen
+pyrsa-priv2pub=rsa.util:private_to_public
+pyrsa-sign=rsa.cli:sign
+pyrsa-verify=rsa.cli:verify
+
diff --git a/contrib/python/rsa/py3/.dist-info/top_level.txt b/contrib/python/rsa/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..703f551006
--- /dev/null
+++ b/contrib/python/rsa/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+rsa
diff --git a/contrib/python/rsa/py3/LICENSE b/contrib/python/rsa/py3/LICENSE
new file mode 100644
index 0000000000..67589cbb86
--- /dev/null
+++ b/contrib/python/rsa/py3/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/contrib/python/rsa/py3/README.md b/contrib/python/rsa/py3/README.md
new file mode 100644
index 0000000000..fae569b9ea
--- /dev/null
+++ b/contrib/python/rsa/py3/README.md
@@ -0,0 +1,76 @@
+# Pure Python RSA implementation
+
+[![PyPI](https://img.shields.io/pypi/v/rsa.svg)](https://pypi.org/project/rsa/)
+[![Build Status](https://travis-ci.org/sybrenstuvel/python-rsa.svg?branch=master)](https://travis-ci.org/sybrenstuvel/python-rsa)
+[![Coverage Status](https://coveralls.io/repos/github/sybrenstuvel/python-rsa/badge.svg?branch=master)](https://coveralls.io/github/sybrenstuvel/python-rsa?branch=master)
+[![Code Climate](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/codeclimate/codeclimate/maintainability)
+
+[Python-RSA](https://stuvel.eu/rsa) is a pure-Python RSA implementation. It supports
+encryption and decryption, signing and verifying signatures, and key
+generation according to PKCS#1 version 1.5. It can be used as a Python
+library as well as on the commandline. The code was mostly written by
+Sybren A. Stüvel.
+
+Documentation can be found at the [Python-RSA homepage](https://stuvel.eu/rsa). For all changes, check [the changelog](https://github.com/sybrenstuvel/python-rsa/blob/master/CHANGELOG.md).
+
+Download and install using:
+
+ pip install rsa
+
+or download it from the [Python Package Index](https://pypi.org/project/rsa/).
+
+The source code is maintained at [GitHub](https://github.com/sybrenstuvel/python-rsa/) and is
+licensed under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
+
+## Security
+
+Because of how Python internally stores numbers, it is very hard (if not impossible) to make a pure-Python program secure against timing attacks. This library is no exception, so use it with care. See https://securitypitfalls.wordpress.com/2018/08/03/constant-time-compare-in-python/ for more info.
+
+## Setup of Development Environment
+
+```
+python3 -m venv .venv
+. ./.venv/bin/activate
+pip install poetry
+poetry install
+```
+
+## Publishing a New Release
+
+Since this project is considered critical on the Python Package Index,
+two-factor authentication is required. For uploading packages to PyPi, an API
+key is required; username+password will not work.
+
+First, generate an API token at https://pypi.org/manage/account/token/. Then,
+use this token when publishing instead of your username and password.
+
+As username, use `__token__`.
+As password, use the token itself, including the `pypi-` prefix.
+
+See https://pypi.org/help/#apitoken for help using API tokens to publish. This
+is what I have in `~/.pypirc`:
+
+```
+[distutils]
+index-servers =
+ rsa
+
+# Use `twine upload -r rsa` to upload with this token.
+[rsa]
+ repository = https://upload.pypi.org/legacy/
+ username = __token__
+ password = pypi-token
+```
+
+```
+. ./.venv/bin/activate
+pip install twine
+
+poetry build
+twine check dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl
+twine upload -r rsa dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl
+```
+
+The `pip install twine` is necessary as Python-RSA requires Python >= 3.6, and
+Twine requires at least version 3.7. This means Poetry refuses to add it as
+dependency.
diff --git a/contrib/python/rsa/py3/rsa/__init__.py b/contrib/python/rsa/py3/rsa/__init__.py
new file mode 100644
index 0000000000..d0185fe922
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/__init__.py
@@ -0,0 +1,60 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RSA module
+
+Module for calculating large primes, and RSA encryption, decryption, signing
+and verification. Includes generating public and private keys.
+
+WARNING: this implementation does not use compression of the cleartext input to
+prevent repetitions, or other common security improvements. Use with care.
+
+"""
+
+from rsa.key import newkeys, PrivateKey, PublicKey
+from rsa.pkcs1 import (
+ encrypt,
+ decrypt,
+ sign,
+ verify,
+ DecryptionError,
+ VerificationError,
+ find_signature_hash,
+ sign_hash,
+ compute_hash,
+)
+
+__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
+__date__ = "2022-07-20"
+__version__ = "4.9"
+
+# Do doctest if we're run directly
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+__all__ = [
+ "newkeys",
+ "encrypt",
+ "decrypt",
+ "sign",
+ "verify",
+ "PublicKey",
+ "PrivateKey",
+ "DecryptionError",
+ "VerificationError",
+ "find_signature_hash",
+ "compute_hash",
+ "sign_hash",
+]
diff --git a/contrib/python/rsa/py3/rsa/asn1.py b/contrib/python/rsa/py3/rsa/asn1.py
new file mode 100644
index 0000000000..4cc4dd35de
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/asn1.py
@@ -0,0 +1,52 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""ASN.1 definitions.
+
+Not all ASN.1-handling code use these definitions, but when it does, they should be here.
+"""
+
+from pyasn1.type import univ, namedtype, tag
+
+
+class PubKeyHeader(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType("oid", univ.ObjectIdentifier()),
+ namedtype.NamedType("parameters", univ.Null()),
+ )
+
+
+class OpenSSLPubKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType("header", PubKeyHeader()),
+ # This little hack (the implicit tag) allows us to get a Bit String as Octet String
+ namedtype.NamedType(
+ "key",
+ univ.OctetString().subtype(implicitTag=tag.Tag(tagClass=0, tagFormat=0, tagId=3)),
+ ),
+ )
+
+
+class AsnPubKey(univ.Sequence):
+ """ASN.1 contents of DER encoded public key:
+
+ RSAPublicKey ::= SEQUENCE {
+ modulus INTEGER, -- n
+ publicExponent INTEGER, -- e
+ """
+
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType("modulus", univ.Integer()),
+ namedtype.NamedType("publicExponent", univ.Integer()),
+ )
diff --git a/contrib/python/rsa/py3/rsa/cli.py b/contrib/python/rsa/py3/rsa/cli.py
new file mode 100644
index 0000000000..4db3f0b5e0
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/cli.py
@@ -0,0 +1,321 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Commandline scripts.
+
+These scripts are called by the executables defined in setup.py.
+"""
+
+import abc
+import sys
+import typing
+import optparse
+
+import rsa
+import rsa.key
+import rsa.pkcs1
+
+HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
+Indexable = typing.Union[typing.Tuple, typing.List[str]]
+
+
+def keygen() -> None:
+ """Key generator."""
+
+ # Parse the CLI options
+ parser = optparse.OptionParser(
+ usage="usage: %prog [options] keysize",
+ description='Generates a new RSA key pair of "keysize" bits.',
+ )
+
+ parser.add_option(
+ "--pubout",
+ type="string",
+ help="Output filename for the public key. The public key is "
+ "not saved if this option is not present. You can use "
+ "pyrsa-priv2pub to create the public key file later.",
+ )
+
+ parser.add_option(
+ "-o",
+ "--out",
+ type="string",
+ help="Output filename for the private key. The key is "
+ "written to stdout if this option is not present.",
+ )
+
+ parser.add_option(
+ "--form",
+ help="key format of the private and public keys - default PEM",
+ choices=("PEM", "DER"),
+ default="PEM",
+ )
+
+ (cli, cli_args) = parser.parse_args(sys.argv[1:])
+
+ if len(cli_args) != 1:
+ parser.print_help()
+ raise SystemExit(1)
+
+ try:
+ keysize = int(cli_args[0])
+ except ValueError as ex:
+ parser.print_help()
+ print("Not a valid number: %s" % cli_args[0], file=sys.stderr)
+ raise SystemExit(1) from ex
+
+ print("Generating %i-bit key" % keysize, file=sys.stderr)
+ (pub_key, priv_key) = rsa.newkeys(keysize)
+
+ # Save public key
+ if cli.pubout:
+ print("Writing public key to %s" % cli.pubout, file=sys.stderr)
+ data = pub_key.save_pkcs1(format=cli.form)
+ with open(cli.pubout, "wb") as outfile:
+ outfile.write(data)
+
+ # Save private key
+ data = priv_key.save_pkcs1(format=cli.form)
+
+ if cli.out:
+ print("Writing private key to %s" % cli.out, file=sys.stderr)
+ with open(cli.out, "wb") as outfile:
+ outfile.write(data)
+ else:
+ print("Writing private key to stdout", file=sys.stderr)
+ sys.stdout.buffer.write(data)
+
+
+class CryptoOperation(metaclass=abc.ABCMeta):
+ """CLI callable that operates with input, output, and a key."""
+
+ keyname = "public" # or 'private'
+ usage = "usage: %%prog [options] %(keyname)s_key"
+ description = ""
+ operation = "decrypt"
+ operation_past = "decrypted"
+ operation_progressive = "decrypting"
+ input_help = "Name of the file to %(operation)s. Reads from stdin if " "not specified."
+ output_help = (
+ "Name of the file to write the %(operation_past)s file "
+ "to. Written to stdout if this option is not present."
+ )
+ expected_cli_args = 1
+ has_output = True
+
+ key_class = rsa.PublicKey # type: typing.Type[rsa.key.AbstractKey]
+
+ def __init__(self) -> None:
+ self.usage = self.usage % self.__class__.__dict__
+ self.input_help = self.input_help % self.__class__.__dict__
+ self.output_help = self.output_help % self.__class__.__dict__
+
+ @abc.abstractmethod
+ def perform_operation(
+ self, indata: bytes, key: rsa.key.AbstractKey, cli_args: Indexable
+ ) -> typing.Any:
+ """Performs the program's operation.
+
+ Implement in a subclass.
+
+ :returns: the data to write to the output.
+ """
+
+ def __call__(self) -> None:
+ """Runs the program."""
+
+ (cli, cli_args) = self.parse_cli()
+
+ key = self.read_key(cli_args[0], cli.keyform)
+
+ indata = self.read_infile(cli.input)
+
+ print(self.operation_progressive.title(), file=sys.stderr)
+ outdata = self.perform_operation(indata, key, cli_args)
+
+ if self.has_output:
+ self.write_outfile(outdata, cli.output)
+
+ def parse_cli(self) -> typing.Tuple[optparse.Values, typing.List[str]]:
+ """Parse the CLI options
+
+ :returns: (cli_opts, cli_args)
+ """
+
+ parser = optparse.OptionParser(usage=self.usage, description=self.description)
+
+ parser.add_option("-i", "--input", type="string", help=self.input_help)
+
+ if self.has_output:
+ parser.add_option("-o", "--output", type="string", help=self.output_help)
+
+ parser.add_option(
+ "--keyform",
+ help="Key format of the %s key - default PEM" % self.keyname,
+ choices=("PEM", "DER"),
+ default="PEM",
+ )
+
+ (cli, cli_args) = parser.parse_args(sys.argv[1:])
+
+ if len(cli_args) != self.expected_cli_args:
+ parser.print_help()
+ raise SystemExit(1)
+
+ return cli, cli_args
+
+ def read_key(self, filename: str, keyform: str) -> rsa.key.AbstractKey:
+ """Reads a public or private key."""
+
+ print("Reading %s key from %s" % (self.keyname, filename), file=sys.stderr)
+ with open(filename, "rb") as keyfile:
+ keydata = keyfile.read()
+
+ return self.key_class.load_pkcs1(keydata, keyform)
+
+ def read_infile(self, inname: str) -> bytes:
+ """Read the input file"""
+
+ if inname:
+ print("Reading input from %s" % inname, file=sys.stderr)
+ with open(inname, "rb") as infile:
+ return infile.read()
+
+ print("Reading input from stdin", file=sys.stderr)
+ return sys.stdin.buffer.read()
+
+ def write_outfile(self, outdata: bytes, outname: str) -> None:
+ """Write the output file"""
+
+ if outname:
+ print("Writing output to %s" % outname, file=sys.stderr)
+ with open(outname, "wb") as outfile:
+ outfile.write(outdata)
+ else:
+ print("Writing output to stdout", file=sys.stderr)
+ sys.stdout.buffer.write(outdata)
+
+
+class EncryptOperation(CryptoOperation):
+ """Encrypts a file."""
+
+ keyname = "public"
+ description = (
+ "Encrypts a file. The file must be shorter than the key " "length in order to be encrypted."
+ )
+ operation = "encrypt"
+ operation_past = "encrypted"
+ operation_progressive = "encrypting"
+
+ def perform_operation(
+ self, indata: bytes, pub_key: rsa.key.AbstractKey, cli_args: Indexable = ()
+ ) -> bytes:
+ """Encrypts files."""
+ assert isinstance(pub_key, rsa.key.PublicKey)
+ return rsa.encrypt(indata, pub_key)
+
+
+class DecryptOperation(CryptoOperation):
+ """Decrypts a file."""
+
+ keyname = "private"
+ description = (
+ "Decrypts a file. The original file must be shorter than "
+ "the key length in order to have been encrypted."
+ )
+ operation = "decrypt"
+ operation_past = "decrypted"
+ operation_progressive = "decrypting"
+ key_class = rsa.PrivateKey
+
+ def perform_operation(
+ self, indata: bytes, priv_key: rsa.key.AbstractKey, cli_args: Indexable = ()
+ ) -> bytes:
+ """Decrypts files."""
+ assert isinstance(priv_key, rsa.key.PrivateKey)
+ return rsa.decrypt(indata, priv_key)
+
+
+class SignOperation(CryptoOperation):
+ """Signs a file."""
+
+ keyname = "private"
+ usage = "usage: %%prog [options] private_key hash_method"
+ description = (
+ "Signs a file, outputs the signature. Choose the hash "
+ "method from %s" % ", ".join(HASH_METHODS)
+ )
+ operation = "sign"
+ operation_past = "signature"
+ operation_progressive = "Signing"
+ key_class = rsa.PrivateKey
+ expected_cli_args = 2
+
+ output_help = (
+ "Name of the file to write the signature to. Written "
+ "to stdout if this option is not present."
+ )
+
+ def perform_operation(
+ self, indata: bytes, priv_key: rsa.key.AbstractKey, cli_args: Indexable
+ ) -> bytes:
+ """Signs files."""
+ assert isinstance(priv_key, rsa.key.PrivateKey)
+
+ hash_method = cli_args[1]
+ if hash_method not in HASH_METHODS:
+ raise SystemExit("Invalid hash method, choose one of %s" % ", ".join(HASH_METHODS))
+
+ return rsa.sign(indata, priv_key, hash_method)
+
+
+class VerifyOperation(CryptoOperation):
+ """Verify a signature."""
+
+ keyname = "public"
+ usage = "usage: %%prog [options] public_key signature_file"
+ description = (
+ "Verifies a signature, exits with status 0 upon success, "
+ "prints an error message and exits with status 1 upon error."
+ )
+ operation = "verify"
+ operation_past = "verified"
+ operation_progressive = "Verifying"
+ key_class = rsa.PublicKey
+ expected_cli_args = 2
+ has_output = False
+
+ def perform_operation(
+ self, indata: bytes, pub_key: rsa.key.AbstractKey, cli_args: Indexable
+ ) -> None:
+ """Verifies files."""
+ assert isinstance(pub_key, rsa.key.PublicKey)
+
+ signature_file = cli_args[1]
+
+ with open(signature_file, "rb") as sigfile:
+ signature = sigfile.read()
+
+ try:
+ rsa.verify(indata, signature, pub_key)
+ except rsa.VerificationError as ex:
+ raise SystemExit("Verification failed.") from ex
+
+ print("Verification OK", file=sys.stderr)
+
+
+encrypt = EncryptOperation()
+decrypt = DecryptOperation()
+sign = SignOperation()
+verify = VerifyOperation()
diff --git a/contrib/python/rsa/py3/rsa/common.py b/contrib/python/rsa/py3/rsa/common.py
new file mode 100644
index 0000000000..ca732e5819
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/common.py
@@ -0,0 +1,184 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Common functionality shared by several modules."""
+
+import typing
+
+
+class NotRelativePrimeError(ValueError):
+ def __init__(self, a: int, b: int, d: int, msg: str = "") -> None:
+ super().__init__(msg or "%d and %d are not relatively prime, divider=%i" % (a, b, d))
+ self.a = a
+ self.b = b
+ self.d = d
+
+
+def bit_size(num: int) -> int:
+ """
+ Number of bits needed to represent a integer excluding any prefix
+ 0 bits.
+
+ Usage::
+
+ >>> bit_size(1023)
+ 10
+ >>> bit_size(1024)
+ 11
+ >>> bit_size(1025)
+ 11
+
+ :param num:
+ Integer value. If num is 0, returns 0. Only the absolute value of the
+ number is considered. Therefore, signed integers will be abs(num)
+ before the number's bit length is determined.
+ :returns:
+ Returns the number of bits in the integer.
+ """
+
+ try:
+ return num.bit_length()
+ except AttributeError as ex:
+ raise TypeError("bit_size(num) only supports integers, not %r" % type(num)) from ex
+
+
+def byte_size(number: int) -> int:
+ """
+ Returns the number of bytes required to hold a specific long number.
+
+ The number of bytes is rounded up.
+
+ Usage::
+
+ >>> byte_size(1 << 1023)
+ 128
+ >>> byte_size((1 << 1024) - 1)
+ 128
+ >>> byte_size(1 << 1024)
+ 129
+
+ :param number:
+ An unsigned integer
+ :returns:
+ The number of bytes required to hold a specific long number.
+ """
+ if number == 0:
+ return 1
+ return ceil_div(bit_size(number), 8)
+
+
+def ceil_div(num: int, div: int) -> int:
+ """
+ Returns the ceiling function of a division between `num` and `div`.
+
+ Usage::
+
+ >>> ceil_div(100, 7)
+ 15
+ >>> ceil_div(100, 10)
+ 10
+ >>> ceil_div(1, 4)
+ 1
+
+ :param num: Division's numerator, a number
+ :param div: Division's divisor, a number
+
+ :return: Rounded up result of the division between the parameters.
+ """
+ quanta, mod = divmod(num, div)
+ if mod:
+ quanta += 1
+ return quanta
+
+
+def extended_gcd(a: int, b: int) -> typing.Tuple[int, int, int]:
+ """Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb"""
+ # r = gcd(a,b) i = multiplicitive inverse of a mod b
+ # or j = multiplicitive inverse of b mod a
+ # Neg return values for i or j are made positive mod b or a respectively
+ # Iterateive Version is faster and uses much less stack space
+ x = 0
+ y = 1
+ lx = 1
+ ly = 0
+ oa = a # Remember original a/b to remove
+ ob = b # negative values from return results
+ while b != 0:
+ q = a // b
+ (a, b) = (b, a % b)
+ (x, lx) = ((lx - (q * x)), x)
+ (y, ly) = ((ly - (q * y)), y)
+ if lx < 0:
+ lx += ob # If neg wrap modulo original b
+ if ly < 0:
+ ly += oa # If neg wrap modulo original a
+ return a, lx, ly # Return only positive values
+
+
+def inverse(x: int, n: int) -> int:
+ """Returns the inverse of x % n under multiplication, a.k.a x^-1 (mod n)
+
+ >>> inverse(7, 4)
+ 3
+ >>> (inverse(143, 4) * 143) % 4
+ 1
+ """
+
+ (divider, inv, _) = extended_gcd(x, n)
+
+ if divider != 1:
+ raise NotRelativePrimeError(x, n, divider)
+
+ return inv
+
+
+def crt(a_values: typing.Iterable[int], modulo_values: typing.Iterable[int]) -> int:
+ """Chinese Remainder Theorem.
+
+ Calculates x such that x = a[i] (mod m[i]) for each i.
+
+ :param a_values: the a-values of the above equation
+ :param modulo_values: the m-values of the above equation
+ :returns: x such that x = a[i] (mod m[i]) for each i
+
+
+ >>> crt([2, 3], [3, 5])
+ 8
+
+ >>> crt([2, 3, 2], [3, 5, 7])
+ 23
+
+ >>> crt([2, 3, 0], [7, 11, 15])
+ 135
+ """
+
+ m = 1
+ x = 0
+
+ for modulo in modulo_values:
+ m *= modulo
+
+ for (m_i, a_i) in zip(modulo_values, a_values):
+ M_i = m // m_i
+ inv = inverse(M_i, m_i)
+
+ x = (x + a_i * M_i * inv) % m
+
+ return x
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/contrib/python/rsa/py3/rsa/core.py b/contrib/python/rsa/py3/rsa/core.py
new file mode 100644
index 0000000000..84ed3f883f
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/core.py
@@ -0,0 +1,53 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Core mathematical operations.
+
+This is the actual core RSA implementation, which is only defined
+mathematically on integers.
+"""
+
+
+def assert_int(var: int, name: str) -> None:
+ if isinstance(var, int):
+ return
+
+ raise TypeError("%s should be an integer, not %s" % (name, var.__class__))
+
+
+def encrypt_int(message: int, ekey: int, n: int) -> int:
+ """Encrypts a message using encryption key 'ekey', working modulo n"""
+
+ assert_int(message, "message")
+ assert_int(ekey, "ekey")
+ assert_int(n, "n")
+
+ if message < 0:
+ raise ValueError("Only non-negative numbers are supported")
+
+ if message > n:
+ raise OverflowError("The message %i is too long for n=%i" % (message, n))
+
+ return pow(message, ekey, n)
+
+
+def decrypt_int(cyphertext: int, dkey: int, n: int) -> int:
+ """Decrypts a cypher text using the decryption key 'dkey', working modulo n"""
+
+ assert_int(cyphertext, "cyphertext")
+ assert_int(dkey, "dkey")
+ assert_int(n, "n")
+
+ message = pow(cyphertext, dkey, n)
+ return message
diff --git a/contrib/python/rsa/py3/rsa/key.py b/contrib/python/rsa/py3/rsa/key.py
new file mode 100644
index 0000000000..f800644308
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/key.py
@@ -0,0 +1,858 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""RSA key generation code.
+
+Create new keys with the newkeys() function. It will give you a PublicKey and a
+PrivateKey object.
+
+Loading and saving keys requires the pyasn1 module. This module is imported as
+late as possible, such that other functionality will remain working in absence
+of pyasn1.
+
+.. note::
+
+ Storing public and private keys via the `pickle` module is possible.
+ However, it is insecure to load a key from an untrusted source.
+ The pickle module is not secure against erroneous or maliciously
+ constructed data. Never unpickle data received from an untrusted
+ or unauthenticated source.
+
+"""
+
+import threading
+import typing
+import warnings
+
+import rsa.prime
+import rsa.pem
+import rsa.common
+import rsa.randnum
+import rsa.core
+
+
+DEFAULT_EXPONENT = 65537
+
+
+T = typing.TypeVar("T", bound="AbstractKey")
+
+
+class AbstractKey:
+ """Abstract superclass for private and public keys."""
+
+ __slots__ = ("n", "e", "blindfac", "blindfac_inverse", "mutex")
+
+ def __init__(self, n: int, e: int) -> None:
+ self.n = n
+ self.e = e
+
+ # These will be computed properly on the first call to blind().
+ self.blindfac = self.blindfac_inverse = -1
+
+ # Used to protect updates to the blinding factor in multi-threaded
+ # environments.
+ self.mutex = threading.Lock()
+
+ @classmethod
+ def _load_pkcs1_pem(cls: typing.Type[T], keyfile: bytes) -> T:
+ """Loads a key in PKCS#1 PEM format, implement in a subclass.
+
+ :param keyfile: contents of a PEM-encoded file that contains
+ the public key.
+ :type keyfile: bytes
+
+ :return: the loaded key
+ :rtype: AbstractKey
+ """
+
+ @classmethod
+ def _load_pkcs1_der(cls: typing.Type[T], keyfile: bytes) -> T:
+ """Loads a key in PKCS#1 PEM format, implement in a subclass.
+
+ :param keyfile: contents of a DER-encoded file that contains
+ the public key.
+ :type keyfile: bytes
+
+ :return: the loaded key
+ :rtype: AbstractKey
+ """
+
+ def _save_pkcs1_pem(self) -> bytes:
+ """Saves the key in PKCS#1 PEM format, implement in a subclass.
+
+ :returns: the PEM-encoded key.
+ :rtype: bytes
+ """
+
+ def _save_pkcs1_der(self) -> bytes:
+ """Saves the key in PKCS#1 DER format, implement in a subclass.
+
+ :returns: the DER-encoded key.
+ :rtype: bytes
+ """
+
+ @classmethod
+ def load_pkcs1(cls: typing.Type[T], keyfile: bytes, format: str = "PEM") -> T:
+ """Loads a key in PKCS#1 DER or PEM format.
+
+ :param keyfile: contents of a DER- or PEM-encoded file that contains
+ the key.
+ :type keyfile: bytes
+ :param format: the format of the file to load; 'PEM' or 'DER'
+ :type format: str
+
+ :return: the loaded key
+ :rtype: AbstractKey
+ """
+
+ methods = {
+ "PEM": cls._load_pkcs1_pem,
+ "DER": cls._load_pkcs1_der,
+ }
+
+ method = cls._assert_format_exists(format, methods)
+ return method(keyfile)
+
+ @staticmethod
+ def _assert_format_exists(
+ file_format: str, methods: typing.Mapping[str, typing.Callable]
+ ) -> typing.Callable:
+ """Checks whether the given file format exists in 'methods'."""
+
+ try:
+ return methods[file_format]
+ except KeyError as ex:
+ formats = ", ".join(sorted(methods.keys()))
+ raise ValueError(
+ "Unsupported format: %r, try one of %s" % (file_format, formats)
+ ) from ex
+
+ def save_pkcs1(self, format: str = "PEM") -> bytes:
+ """Saves the key in PKCS#1 DER or PEM format.
+
+ :param format: the format to save; 'PEM' or 'DER'
+ :type format: str
+ :returns: the DER- or PEM-encoded key.
+ :rtype: bytes
+ """
+
+ methods = {
+ "PEM": self._save_pkcs1_pem,
+ "DER": self._save_pkcs1_der,
+ }
+
+ method = self._assert_format_exists(format, methods)
+ return method()
+
+ def blind(self, message: int) -> typing.Tuple[int, int]:
+ """Performs blinding on the message.
+
+ :param message: the message, as integer, to blind.
+ :param r: the random number to blind with.
+ :return: tuple (the blinded message, the inverse of the used blinding factor)
+
+ The blinding is such that message = unblind(decrypt(blind(encrypt(message))).
+
+ See https://en.wikipedia.org/wiki/Blinding_%28cryptography%29
+ """
+ blindfac, blindfac_inverse = self._update_blinding_factor()
+ blinded = (message * pow(blindfac, self.e, self.n)) % self.n
+ return blinded, blindfac_inverse
+
+ def unblind(self, blinded: int, blindfac_inverse: int) -> int:
+ """Performs blinding on the message using random number 'blindfac_inverse'.
+
+ :param blinded: the blinded message, as integer, to unblind.
+ :param blindfac: the factor to unblind with.
+ :return: the original message.
+
+ The blinding is such that message = unblind(decrypt(blind(encrypt(message))).
+
+ See https://en.wikipedia.org/wiki/Blinding_%28cryptography%29
+ """
+ return (blindfac_inverse * blinded) % self.n
+
+ def _initial_blinding_factor(self) -> int:
+ for _ in range(1000):
+ blind_r = rsa.randnum.randint(self.n - 1)
+ if rsa.prime.are_relatively_prime(self.n, blind_r):
+ return blind_r
+ raise RuntimeError("unable to find blinding factor")
+
+ def _update_blinding_factor(self) -> typing.Tuple[int, int]:
+ """Update blinding factors.
+
+ Computing a blinding factor is expensive, so instead this function
+ does this once, then updates the blinding factor as per section 9
+ of 'A Timing Attack against RSA with the Chinese Remainder Theorem'
+ by Werner Schindler.
+ See https://tls.mbed.org/public/WSchindler-RSA_Timing_Attack.pdf
+
+ :return: the new blinding factor and its inverse.
+ """
+
+ with self.mutex:
+ if self.blindfac < 0:
+ # Compute initial blinding factor, which is rather slow to do.
+ self.blindfac = self._initial_blinding_factor()
+ self.blindfac_inverse = rsa.common.inverse(self.blindfac, self.n)
+ else:
+ # Reuse previous blinding factor.
+ self.blindfac = pow(self.blindfac, 2, self.n)
+ self.blindfac_inverse = pow(self.blindfac_inverse, 2, self.n)
+
+ return self.blindfac, self.blindfac_inverse
+
+
+class PublicKey(AbstractKey):
+ """Represents a public RSA key.
+
+ This key is also known as the 'encryption key'. It contains the 'n' and 'e'
+ values.
+
+ Supports attributes as well as dictionary-like access. Attribute access is
+ faster, though.
+
+ >>> PublicKey(5, 3)
+ PublicKey(5, 3)
+
+ >>> key = PublicKey(5, 3)
+ >>> key.n
+ 5
+ >>> key['n']
+ 5
+ >>> key.e
+ 3
+ >>> key['e']
+ 3
+
+ """
+
+ __slots__ = ()
+
+ def __getitem__(self, key: str) -> int:
+ return getattr(self, key)
+
+ def __repr__(self) -> str:
+ return "PublicKey(%i, %i)" % (self.n, self.e)
+
+ def __getstate__(self) -> typing.Tuple[int, int]:
+ """Returns the key as tuple for pickling."""
+ return self.n, self.e
+
+ def __setstate__(self, state: typing.Tuple[int, int]) -> None:
+ """Sets the key from tuple."""
+ self.n, self.e = state
+ AbstractKey.__init__(self, self.n, self.e)
+
+ def __eq__(self, other: typing.Any) -> bool:
+ if other is None:
+ return False
+
+ if not isinstance(other, PublicKey):
+ return False
+
+ return self.n == other.n and self.e == other.e
+
+ def __ne__(self, other: typing.Any) -> bool:
+ return not (self == other)
+
+ def __hash__(self) -> int:
+ return hash((self.n, self.e))
+
+ @classmethod
+ def _load_pkcs1_der(cls, keyfile: bytes) -> "PublicKey":
+ """Loads a key in PKCS#1 DER format.
+
+ :param keyfile: contents of a DER-encoded file that contains the public
+ key.
+ :return: a PublicKey object
+
+ First let's construct a DER encoded key:
+
+ >>> import base64
+ >>> b64der = 'MAwCBQCNGmYtAgMBAAE='
+ >>> der = base64.standard_b64decode(b64der)
+
+ This loads the file:
+
+ >>> PublicKey._load_pkcs1_der(der)
+ PublicKey(2367317549, 65537)
+
+ """
+
+ from pyasn1.codec.der import decoder
+ from rsa.asn1 import AsnPubKey
+
+ (priv, _) = decoder.decode(keyfile, asn1Spec=AsnPubKey())
+ return cls(n=int(priv["modulus"]), e=int(priv["publicExponent"]))
+
+ def _save_pkcs1_der(self) -> bytes:
+ """Saves the public key in PKCS#1 DER format.
+
+ :returns: the DER-encoded public key.
+ :rtype: bytes
+ """
+
+ from pyasn1.codec.der import encoder
+ from rsa.asn1 import AsnPubKey
+
+ # Create the ASN object
+ asn_key = AsnPubKey()
+ asn_key.setComponentByName("modulus", self.n)
+ asn_key.setComponentByName("publicExponent", self.e)
+
+ return encoder.encode(asn_key)
+
+ @classmethod
+ def _load_pkcs1_pem(cls, keyfile: bytes) -> "PublicKey":
+ """Loads a PKCS#1 PEM-encoded public key file.
+
+ The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
+ after the "-----END RSA PUBLIC KEY-----" lines is ignored.
+
+ :param keyfile: contents of a PEM-encoded file that contains the public
+ key.
+ :return: a PublicKey object
+ """
+
+ der = rsa.pem.load_pem(keyfile, "RSA PUBLIC KEY")
+ return cls._load_pkcs1_der(der)
+
+ def _save_pkcs1_pem(self) -> bytes:
+ """Saves a PKCS#1 PEM-encoded public key file.
+
+ :return: contents of a PEM-encoded file that contains the public key.
+ :rtype: bytes
+ """
+
+ der = self._save_pkcs1_der()
+ return rsa.pem.save_pem(der, "RSA PUBLIC KEY")
+
+ @classmethod
+ def load_pkcs1_openssl_pem(cls, keyfile: bytes) -> "PublicKey":
+ """Loads a PKCS#1.5 PEM-encoded public key file from OpenSSL.
+
+ These files can be recognised in that they start with BEGIN PUBLIC KEY
+ rather than BEGIN RSA PUBLIC KEY.
+
+ The contents of the file before the "-----BEGIN PUBLIC KEY-----" and
+ after the "-----END PUBLIC KEY-----" lines is ignored.
+
+ :param keyfile: contents of a PEM-encoded file that contains the public
+ key, from OpenSSL.
+ :type keyfile: bytes
+ :return: a PublicKey object
+ """
+
+ der = rsa.pem.load_pem(keyfile, "PUBLIC KEY")
+ return cls.load_pkcs1_openssl_der(der)
+
+ @classmethod
+ def load_pkcs1_openssl_der(cls, keyfile: bytes) -> "PublicKey":
+ """Loads a PKCS#1 DER-encoded public key file from OpenSSL.
+
+ :param keyfile: contents of a DER-encoded file that contains the public
+ key, from OpenSSL.
+ :return: a PublicKey object
+ """
+
+ from rsa.asn1 import OpenSSLPubKey
+ from pyasn1.codec.der import decoder
+ from pyasn1.type import univ
+
+ (keyinfo, _) = decoder.decode(keyfile, asn1Spec=OpenSSLPubKey())
+
+ if keyinfo["header"]["oid"] != univ.ObjectIdentifier("1.2.840.113549.1.1.1"):
+ raise TypeError("This is not a DER-encoded OpenSSL-compatible public key")
+
+ return cls._load_pkcs1_der(keyinfo["key"][1:])
+
+
+class PrivateKey(AbstractKey):
+ """Represents a private RSA key.
+
+ This key is also known as the 'decryption key'. It contains the 'n', 'e',
+ 'd', 'p', 'q' and other values.
+
+ Supports attributes as well as dictionary-like access. Attribute access is
+ faster, though.
+
+ >>> PrivateKey(3247, 65537, 833, 191, 17)
+ PrivateKey(3247, 65537, 833, 191, 17)
+
+ exp1, exp2 and coef will be calculated:
+
+ >>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+ >>> pk.exp1
+ 55063
+ >>> pk.exp2
+ 10095
+ >>> pk.coef
+ 50797
+
+ """
+
+ __slots__ = ("d", "p", "q", "exp1", "exp2", "coef")
+
+ def __init__(self, n: int, e: int, d: int, p: int, q: int) -> None:
+ AbstractKey.__init__(self, n, e)
+ self.d = d
+ self.p = p
+ self.q = q
+
+ # Calculate exponents and coefficient.
+ self.exp1 = int(d % (p - 1))
+ self.exp2 = int(d % (q - 1))
+ self.coef = rsa.common.inverse(q, p)
+
+ def __getitem__(self, key: str) -> int:
+ return getattr(self, key)
+
+ def __repr__(self) -> str:
+ return "PrivateKey(%i, %i, %i, %i, %i)" % (
+ self.n,
+ self.e,
+ self.d,
+ self.p,
+ self.q,
+ )
+
+ def __getstate__(self) -> typing.Tuple[int, int, int, int, int, int, int, int]:
+ """Returns the key as tuple for pickling."""
+ return self.n, self.e, self.d, self.p, self.q, self.exp1, self.exp2, self.coef
+
+ def __setstate__(self, state: typing.Tuple[int, int, int, int, int, int, int, int]) -> None:
+ """Sets the key from tuple."""
+ self.n, self.e, self.d, self.p, self.q, self.exp1, self.exp2, self.coef = state
+ AbstractKey.__init__(self, self.n, self.e)
+
+ def __eq__(self, other: typing.Any) -> bool:
+ if other is None:
+ return False
+
+ if not isinstance(other, PrivateKey):
+ return False
+
+ return (
+ self.n == other.n
+ and self.e == other.e
+ and self.d == other.d
+ and self.p == other.p
+ and self.q == other.q
+ and self.exp1 == other.exp1
+ and self.exp2 == other.exp2
+ and self.coef == other.coef
+ )
+
+ def __ne__(self, other: typing.Any) -> bool:
+ return not (self == other)
+
+ def __hash__(self) -> int:
+ return hash((self.n, self.e, self.d, self.p, self.q, self.exp1, self.exp2, self.coef))
+
+ def blinded_decrypt(self, encrypted: int) -> int:
+ """Decrypts the message using blinding to prevent side-channel attacks.
+
+ :param encrypted: the encrypted message
+ :type encrypted: int
+
+ :returns: the decrypted message
+ :rtype: int
+ """
+
+ # Blinding and un-blinding should be using the same factor
+ blinded, blindfac_inverse = self.blind(encrypted)
+
+ # Instead of using the core functionality, use the Chinese Remainder
+ # Theorem and be 2-4x faster. This the same as:
+ #
+ # decrypted = rsa.core.decrypt_int(blinded, self.d, self.n)
+ s1 = pow(blinded, self.exp1, self.p)
+ s2 = pow(blinded, self.exp2, self.q)
+ h = ((s1 - s2) * self.coef) % self.p
+ decrypted = s2 + self.q * h
+
+ return self.unblind(decrypted, blindfac_inverse)
+
+ def blinded_encrypt(self, message: int) -> int:
+ """Encrypts the message using blinding to prevent side-channel attacks.
+
+ :param message: the message to encrypt
+ :type message: int
+
+ :returns: the encrypted message
+ :rtype: int
+ """
+
+ blinded, blindfac_inverse = self.blind(message)
+ encrypted = rsa.core.encrypt_int(blinded, self.d, self.n)
+ return self.unblind(encrypted, blindfac_inverse)
+
+ @classmethod
+ def _load_pkcs1_der(cls, keyfile: bytes) -> "PrivateKey":
+ """Loads a key in PKCS#1 DER format.
+
+ :param keyfile: contents of a DER-encoded file that contains the private
+ key.
+ :type keyfile: bytes
+ :return: a PrivateKey object
+
+ First let's construct a DER encoded key:
+
+ >>> import base64
+ >>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
+ >>> der = base64.standard_b64decode(b64der)
+
+ This loads the file:
+
+ >>> PrivateKey._load_pkcs1_der(der)
+ PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ """
+
+ from pyasn1.codec.der import decoder
+
+ (priv, _) = decoder.decode(keyfile)
+
+ # ASN.1 contents of DER encoded private key:
+ #
+ # RSAPrivateKey ::= SEQUENCE {
+ # version Version,
+ # modulus INTEGER, -- n
+ # publicExponent INTEGER, -- e
+ # privateExponent INTEGER, -- d
+ # prime1 INTEGER, -- p
+ # prime2 INTEGER, -- q
+ # exponent1 INTEGER, -- d mod (p-1)
+ # exponent2 INTEGER, -- d mod (q-1)
+ # coefficient INTEGER, -- (inverse of q) mod p
+ # otherPrimeInfos OtherPrimeInfos OPTIONAL
+ # }
+
+ if priv[0] != 0:
+ raise ValueError("Unable to read this file, version %s != 0" % priv[0])
+
+ as_ints = map(int, priv[1:6])
+ key = cls(*as_ints)
+
+ exp1, exp2, coef = map(int, priv[6:9])
+
+ if (key.exp1, key.exp2, key.coef) != (exp1, exp2, coef):
+ warnings.warn(
+ "You have provided a malformed keyfile. Either the exponents "
+ "or the coefficient are incorrect. Using the correct values "
+ "instead.",
+ UserWarning,
+ )
+
+ return key
+
+ def _save_pkcs1_der(self) -> bytes:
+ """Saves the private key in PKCS#1 DER format.
+
+ :returns: the DER-encoded private key.
+ :rtype: bytes
+ """
+
+ from pyasn1.type import univ, namedtype
+ from pyasn1.codec.der import encoder
+
+ class AsnPrivKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType("version", univ.Integer()),
+ namedtype.NamedType("modulus", univ.Integer()),
+ namedtype.NamedType("publicExponent", univ.Integer()),
+ namedtype.NamedType("privateExponent", univ.Integer()),
+ namedtype.NamedType("prime1", univ.Integer()),
+ namedtype.NamedType("prime2", univ.Integer()),
+ namedtype.NamedType("exponent1", univ.Integer()),
+ namedtype.NamedType("exponent2", univ.Integer()),
+ namedtype.NamedType("coefficient", univ.Integer()),
+ )
+
+ # Create the ASN object
+ asn_key = AsnPrivKey()
+ asn_key.setComponentByName("version", 0)
+ asn_key.setComponentByName("modulus", self.n)
+ asn_key.setComponentByName("publicExponent", self.e)
+ asn_key.setComponentByName("privateExponent", self.d)
+ asn_key.setComponentByName("prime1", self.p)
+ asn_key.setComponentByName("prime2", self.q)
+ asn_key.setComponentByName("exponent1", self.exp1)
+ asn_key.setComponentByName("exponent2", self.exp2)
+ asn_key.setComponentByName("coefficient", self.coef)
+
+ return encoder.encode(asn_key)
+
+ @classmethod
+ def _load_pkcs1_pem(cls, keyfile: bytes) -> "PrivateKey":
+ """Loads a PKCS#1 PEM-encoded private key file.
+
+ The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
+ after the "-----END RSA PRIVATE KEY-----" lines is ignored.
+
+ :param keyfile: contents of a PEM-encoded file that contains the private
+ key.
+ :type keyfile: bytes
+ :return: a PrivateKey object
+ """
+
+ der = rsa.pem.load_pem(keyfile, b"RSA PRIVATE KEY")
+ return cls._load_pkcs1_der(der)
+
+ def _save_pkcs1_pem(self) -> bytes:
+ """Saves a PKCS#1 PEM-encoded private key file.
+
+ :return: contents of a PEM-encoded file that contains the private key.
+ :rtype: bytes
+ """
+
+ der = self._save_pkcs1_der()
+ return rsa.pem.save_pem(der, b"RSA PRIVATE KEY")
+
+
+def find_p_q(
+ nbits: int,
+ getprime_func: typing.Callable[[int], int] = rsa.prime.getprime,
+ accurate: bool = True,
+) -> typing.Tuple[int, int]:
+ """Returns a tuple of two different primes of nbits bits each.
+
+ The resulting p * q has exactly 2 * nbits bits, and the returned p and q
+ will not be equal.
+
+ :param nbits: the number of bits in each of p and q.
+ :param getprime_func: the getprime function, defaults to
+ :py:func:`rsa.prime.getprime`.
+
+ *Introduced in Python-RSA 3.1*
+
+ :param accurate: whether to enable accurate mode or not.
+ :returns: (p, q), where p > q
+
+ >>> (p, q) = find_p_q(128)
+ >>> from rsa import common
+ >>> common.bit_size(p * q)
+ 256
+
+ When not in accurate mode, the number of bits can be slightly less
+
+ >>> (p, q) = find_p_q(128, accurate=False)
+ >>> from rsa import common
+ >>> common.bit_size(p * q) <= 256
+ True
+ >>> common.bit_size(p * q) > 240
+ True
+
+ """
+
+ total_bits = nbits * 2
+
+ # Make sure that p and q aren't too close or the factoring programs can
+ # factor n.
+ shift = nbits // 16
+ pbits = nbits + shift
+ qbits = nbits - shift
+
+ # Choose the two initial primes
+ p = getprime_func(pbits)
+ q = getprime_func(qbits)
+
+ def is_acceptable(p: int, q: int) -> bool:
+ """Returns True iff p and q are acceptable:
+
+ - p and q differ
+ - (p * q) has the right nr of bits (when accurate=True)
+ """
+
+ if p == q:
+ return False
+
+ if not accurate:
+ return True
+
+ # Make sure we have just the right amount of bits
+ found_size = rsa.common.bit_size(p * q)
+ return total_bits == found_size
+
+ # Keep choosing other primes until they match our requirements.
+ change_p = False
+ while not is_acceptable(p, q):
+ # Change p on one iteration and q on the other
+ if change_p:
+ p = getprime_func(pbits)
+ else:
+ q = getprime_func(qbits)
+
+ change_p = not change_p
+
+ # We want p > q as described on
+ # http://www.di-mgt.com.au/rsa_alg.html#crt
+ return max(p, q), min(p, q)
+
+
+def calculate_keys_custom_exponent(p: int, q: int, exponent: int) -> typing.Tuple[int, int]:
+ """Calculates an encryption and a decryption key given p, q and an exponent,
+ and returns them as a tuple (e, d)
+
+ :param p: the first large prime
+ :param q: the second large prime
+ :param exponent: the exponent for the key; only change this if you know
+ what you're doing, as the exponent influences how difficult your
+ private key can be cracked. A very common choice for e is 65537.
+ :type exponent: int
+
+ """
+
+ phi_n = (p - 1) * (q - 1)
+
+ try:
+ d = rsa.common.inverse(exponent, phi_n)
+ except rsa.common.NotRelativePrimeError as ex:
+ raise rsa.common.NotRelativePrimeError(
+ exponent,
+ phi_n,
+ ex.d,
+ msg="e (%d) and phi_n (%d) are not relatively prime (divider=%i)"
+ % (exponent, phi_n, ex.d),
+ ) from ex
+
+ if (exponent * d) % phi_n != 1:
+ raise ValueError(
+ "e (%d) and d (%d) are not mult. inv. modulo " "phi_n (%d)" % (exponent, d, phi_n)
+ )
+
+ return exponent, d
+
+
+def calculate_keys(p: int, q: int) -> typing.Tuple[int, int]:
+ """Calculates an encryption and a decryption key given p and q, and
+ returns them as a tuple (e, d)
+
+ :param p: the first large prime
+ :param q: the second large prime
+
+ :return: tuple (e, d) with the encryption and decryption exponents.
+ """
+
+ return calculate_keys_custom_exponent(p, q, DEFAULT_EXPONENT)
+
+
+def gen_keys(
+ nbits: int,
+ getprime_func: typing.Callable[[int], int],
+ accurate: bool = True,
+ exponent: int = DEFAULT_EXPONENT,
+) -> typing.Tuple[int, int, int, int]:
+ """Generate RSA keys of nbits bits. Returns (p, q, e, d).
+
+ Note: this can take a long time, depending on the key size.
+
+ :param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
+ ``q`` will use ``nbits/2`` bits.
+ :param getprime_func: either :py:func:`rsa.prime.getprime` or a function
+ with similar signature.
+ :param exponent: the exponent for the key; only change this if you know
+ what you're doing, as the exponent influences how difficult your
+ private key can be cracked. A very common choice for e is 65537.
+ :type exponent: int
+ """
+
+ # Regenerate p and q values, until calculate_keys doesn't raise a
+ # ValueError.
+ while True:
+ (p, q) = find_p_q(nbits // 2, getprime_func, accurate)
+ try:
+ (e, d) = calculate_keys_custom_exponent(p, q, exponent=exponent)
+ break
+ except ValueError:
+ pass
+
+ return p, q, e, d
+
+
+def newkeys(
+ nbits: int,
+ accurate: bool = True,
+ poolsize: int = 1,
+ exponent: int = DEFAULT_EXPONENT,
+) -> typing.Tuple[PublicKey, PrivateKey]:
+ """Generates public and private keys, and returns them as (pub, priv).
+
+ The public key is also known as the 'encryption key', and is a
+ :py:class:`rsa.PublicKey` object. The private key is also known as the
+ 'decryption key' and is a :py:class:`rsa.PrivateKey` object.
+
+ :param nbits: the number of bits required to store ``n = p*q``.
+ :param accurate: when True, ``n`` will have exactly the number of bits you
+ asked for. However, this makes key generation much slower. When False,
+ `n`` may have slightly less bits.
+ :param poolsize: the number of processes to use to generate the prime
+ numbers. If set to a number > 1, a parallel algorithm will be used.
+ This requires Python 2.6 or newer.
+ :param exponent: the exponent for the key; only change this if you know
+ what you're doing, as the exponent influences how difficult your
+ private key can be cracked. A very common choice for e is 65537.
+ :type exponent: int
+
+ :returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
+
+ The ``poolsize`` parameter was added in *Python-RSA 3.1* and requires
+ Python 2.6 or newer.
+
+ """
+
+ if nbits < 16:
+ raise ValueError("Key too small")
+
+ if poolsize < 1:
+ raise ValueError("Pool size (%i) should be >= 1" % poolsize)
+
+ # Determine which getprime function to use
+ if poolsize > 1:
+ from rsa import parallel
+
+ def getprime_func(nbits: int) -> int:
+ return parallel.getprime(nbits, poolsize=poolsize)
+
+ else:
+ getprime_func = rsa.prime.getprime
+
+ # Generate the key components
+ (p, q, e, d) = gen_keys(nbits, getprime_func, accurate=accurate, exponent=exponent)
+
+ # Create the key objects
+ n = p * q
+
+ return (PublicKey(n, e), PrivateKey(n, e, d, p, q))
+
+
+__all__ = ["PublicKey", "PrivateKey", "newkeys"]
+
+if __name__ == "__main__":
+ import doctest
+
+ try:
+ for count in range(100):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if (count % 10 == 0 and count) or count == 1:
+ print("%i times" % count)
+ except KeyboardInterrupt:
+ print("Aborted")
+ else:
+ print("Doctests done")
diff --git a/contrib/python/rsa/py3/rsa/parallel.py b/contrib/python/rsa/py3/rsa/parallel.py
new file mode 100644
index 0000000000..5020edbc76
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/parallel.py
@@ -0,0 +1,96 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for parallel computation on multiple cores.
+
+Introduced in Python-RSA 3.1.
+
+.. note::
+
+ Requires Python 2.6 or newer.
+
+"""
+
+import multiprocessing as mp
+from multiprocessing.connection import Connection
+
+import rsa.prime
+import rsa.randnum
+
+
+def _find_prime(nbits: int, pipe: Connection) -> None:
+ while True:
+ integer = rsa.randnum.read_random_odd_int(nbits)
+
+ # Test for primeness
+ if rsa.prime.is_prime(integer):
+ pipe.send(integer)
+ return
+
+
+def getprime(nbits: int, poolsize: int) -> int:
+ """Returns a prime number that can be stored in 'nbits' bits.
+
+ Works in multiple threads at the same time.
+
+ >>> p = getprime(128, 3)
+ >>> rsa.prime.is_prime(p-1)
+ False
+ >>> rsa.prime.is_prime(p)
+ True
+ >>> rsa.prime.is_prime(p+1)
+ False
+
+ >>> from rsa import common
+ >>> common.bit_size(p) == 128
+ True
+
+ """
+
+ (pipe_recv, pipe_send) = mp.Pipe(duplex=False)
+
+ # Create processes
+ try:
+ procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send)) for _ in range(poolsize)]
+ # Start processes
+ for p in procs:
+ p.start()
+
+ result = pipe_recv.recv()
+ finally:
+ pipe_recv.close()
+ pipe_send.close()
+
+ # Terminate processes
+ for p in procs:
+ p.terminate()
+
+ return result
+
+
+__all__ = ["getprime"]
+
+if __name__ == "__main__":
+ print("Running doctests 1000x or until failure")
+ import doctest
+
+ for count in range(100):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 10 == 0 and count:
+ print("%i times" % count)
+
+ print("Doctests done")
diff --git a/contrib/python/rsa/py3/rsa/pem.py b/contrib/python/rsa/py3/rsa/pem.py
new file mode 100644
index 0000000000..5d26e6ed09
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/pem.py
@@ -0,0 +1,134 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions that load and write PEM-encoded files."""
+
+import base64
+import typing
+
+# Should either be ASCII strings or bytes.
+FlexiText = typing.Union[str, bytes]
+
+
+def _markers(pem_marker: FlexiText) -> typing.Tuple[bytes, bytes]:
+ """
+ Returns the start and end PEM markers, as bytes.
+ """
+
+ if not isinstance(pem_marker, bytes):
+ pem_marker = pem_marker.encode("ascii")
+
+ return (
+ b"-----BEGIN " + pem_marker + b"-----",
+ b"-----END " + pem_marker + b"-----",
+ )
+
+
+def _pem_lines(contents: bytes, pem_start: bytes, pem_end: bytes) -> typing.Iterator[bytes]:
+ """Generator over PEM lines between pem_start and pem_end."""
+
+ in_pem_part = False
+ seen_pem_start = False
+
+ for line in contents.splitlines():
+ line = line.strip()
+
+ # Skip empty lines
+ if not line:
+ continue
+
+ # Handle start marker
+ if line == pem_start:
+ if in_pem_part:
+ raise ValueError('Seen start marker "%r" twice' % pem_start)
+
+ in_pem_part = True
+ seen_pem_start = True
+ continue
+
+ # Skip stuff before first marker
+ if not in_pem_part:
+ continue
+
+ # Handle end marker
+ if in_pem_part and line == pem_end:
+ in_pem_part = False
+ break
+
+ # Load fields
+ if b":" in line:
+ continue
+
+ yield line
+
+ # Do some sanity checks
+ if not seen_pem_start:
+ raise ValueError('No PEM start marker "%r" found' % pem_start)
+
+ if in_pem_part:
+ raise ValueError('No PEM end marker "%r" found' % pem_end)
+
+
+def load_pem(contents: FlexiText, pem_marker: FlexiText) -> bytes:
+ """Loads a PEM file.
+
+ :param contents: the contents of the file to interpret
+ :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
+ when your file has '-----BEGIN RSA PRIVATE KEY-----' and
+ '-----END RSA PRIVATE KEY-----' markers.
+
+ :return: the base64-decoded content between the start and end markers.
+
+ @raise ValueError: when the content is invalid, for example when the start
+ marker cannot be found.
+
+ """
+
+ # We want bytes, not text. If it's text, it can be converted to ASCII bytes.
+ if not isinstance(contents, bytes):
+ contents = contents.encode("ascii")
+
+ (pem_start, pem_end) = _markers(pem_marker)
+ pem_lines = [line for line in _pem_lines(contents, pem_start, pem_end)]
+
+ # Base64-decode the contents
+ pem = b"".join(pem_lines)
+ return base64.standard_b64decode(pem)
+
+
+def save_pem(contents: bytes, pem_marker: FlexiText) -> bytes:
+ """Saves a PEM file.
+
+ :param contents: the contents to encode in PEM format
+ :param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
+ when your file has '-----BEGIN RSA PRIVATE KEY-----' and
+ '-----END RSA PRIVATE KEY-----' markers.
+
+ :return: the base64-encoded content between the start and end markers, as bytes.
+
+ """
+
+ (pem_start, pem_end) = _markers(pem_marker)
+
+ b64 = base64.standard_b64encode(contents).replace(b"\n", b"")
+ pem_lines = [pem_start]
+
+ for block_start in range(0, len(b64), 64):
+ block = b64[block_start : block_start + 64]
+ pem_lines.append(block)
+
+ pem_lines.append(pem_end)
+ pem_lines.append(b"")
+
+ return b"\n".join(pem_lines)
diff --git a/contrib/python/rsa/py3/rsa/pkcs1.py b/contrib/python/rsa/py3/rsa/pkcs1.py
new file mode 100644
index 0000000000..ec6998e537
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/pkcs1.py
@@ -0,0 +1,485 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for PKCS#1 version 1.5 encryption and signing
+
+This module implements certain functionality from PKCS#1 version 1.5. For a
+very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
+
+At least 8 bytes of random padding is used when encrypting a message. This makes
+these methods much more secure than the ones in the ``rsa`` module.
+
+WARNING: this module leaks information when decryption fails. The exceptions
+that are raised contain the Python traceback information, which can be used to
+deduce where in the process the failure occurred. DO NOT PASS SUCH INFORMATION
+to your users.
+"""
+
+import hashlib
+import os
+import sys
+import typing
+from hmac import compare_digest
+
+from . import common, transform, core, key
+
+if typing.TYPE_CHECKING:
+ HashType = hashlib._Hash
+else:
+ HashType = typing.Any
+
+# ASN.1 codes that describe the hash algorithm used.
+HASH_ASN1 = {
+ "MD5": b"\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10",
+ "SHA-1": b"\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14",
+ "SHA-224": b"\x30\x2d\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04\x05\x00\x04\x1c",
+ "SHA-256": b"\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20",
+ "SHA-384": b"\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30",
+ "SHA-512": b"\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40",
+}
+
+HASH_METHODS: typing.Dict[str, typing.Callable[[], HashType]] = {
+ "MD5": hashlib.md5,
+ "SHA-1": hashlib.sha1,
+ "SHA-224": hashlib.sha224,
+ "SHA-256": hashlib.sha256,
+ "SHA-384": hashlib.sha384,
+ "SHA-512": hashlib.sha512,
+}
+"""Hash methods supported by this library."""
+
+
+if sys.version_info >= (3, 6):
+ # Python 3.6 introduced SHA3 support.
+ HASH_ASN1.update(
+ {
+ "SHA3-256": b"\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x08\x05\x00\x04\x20",
+ "SHA3-384": b"\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x09\x05\x00\x04\x30",
+ "SHA3-512": b"\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x0a\x05\x00\x04\x40",
+ }
+ )
+
+ HASH_METHODS.update(
+ {
+ "SHA3-256": hashlib.sha3_256,
+ "SHA3-384": hashlib.sha3_384,
+ "SHA3-512": hashlib.sha3_512,
+ }
+ )
+
+
+class CryptoError(Exception):
+ """Base class for all exceptions in this module."""
+
+
+class DecryptionError(CryptoError):
+ """Raised when decryption fails."""
+
+
+class VerificationError(CryptoError):
+ """Raised when verification fails."""
+
+
+def _pad_for_encryption(message: bytes, target_length: int) -> bytes:
+ r"""Pads the message for encryption, returning the padded message.
+
+ :return: 00 02 RANDOM_DATA 00 MESSAGE
+
+ >>> block = _pad_for_encryption(b'hello', 16)
+ >>> len(block)
+ 16
+ >>> block[0:2]
+ b'\x00\x02'
+ >>> block[-6:]
+ b'\x00hello'
+
+ """
+
+ max_msglength = target_length - 11
+ msglength = len(message)
+
+ if msglength > max_msglength:
+ raise OverflowError(
+ "%i bytes needed for message, but there is only"
+ " space for %i" % (msglength, max_msglength)
+ )
+
+ # Get random padding
+ padding = b""
+ padding_length = target_length - msglength - 3
+
+ # We remove 0-bytes, so we'll end up with less padding than we've asked for,
+ # so keep adding data until we're at the correct length.
+ while len(padding) < padding_length:
+ needed_bytes = padding_length - len(padding)
+
+ # Always read at least 8 bytes more than we need, and trim off the rest
+ # after removing the 0-bytes. This increases the chance of getting
+ # enough bytes, especially when needed_bytes is small
+ new_padding = os.urandom(needed_bytes + 5)
+ new_padding = new_padding.replace(b"\x00", b"")
+ padding = padding + new_padding[:needed_bytes]
+
+ assert len(padding) == padding_length
+
+ return b"".join([b"\x00\x02", padding, b"\x00", message])
+
+
+def _pad_for_signing(message: bytes, target_length: int) -> bytes:
+ r"""Pads the message for signing, returning the padded message.
+
+ The padding is always a repetition of FF bytes.
+
+ :return: 00 01 PADDING 00 MESSAGE
+
+ >>> block = _pad_for_signing(b'hello', 16)
+ >>> len(block)
+ 16
+ >>> block[0:2]
+ b'\x00\x01'
+ >>> block[-6:]
+ b'\x00hello'
+ >>> block[2:-6]
+ b'\xff\xff\xff\xff\xff\xff\xff\xff'
+
+ """
+
+ max_msglength = target_length - 11
+ msglength = len(message)
+
+ if msglength > max_msglength:
+ raise OverflowError(
+ "%i bytes needed for message, but there is only"
+ " space for %i" % (msglength, max_msglength)
+ )
+
+ padding_length = target_length - msglength - 3
+
+ return b"".join([b"\x00\x01", padding_length * b"\xff", b"\x00", message])
+
+
+def encrypt(message: bytes, pub_key: key.PublicKey) -> bytes:
+ """Encrypts the given message using PKCS#1 v1.5
+
+ :param message: the message to encrypt. Must be a byte string no longer than
+ ``k-11`` bytes, where ``k`` is the number of bytes needed to encode
+ the ``n`` component of the public key.
+ :param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
+ :raise OverflowError: when the message is too large to fit in the padded
+ block.
+
+ >>> from rsa import key, common
+ >>> (pub_key, priv_key) = key.newkeys(256)
+ >>> message = b'hello'
+ >>> crypto = encrypt(message, pub_key)
+
+ The crypto text should be just as long as the public key 'n' component:
+
+ >>> len(crypto) == common.byte_size(pub_key.n)
+ True
+
+ """
+
+ keylength = common.byte_size(pub_key.n)
+ padded = _pad_for_encryption(message, keylength)
+
+ payload = transform.bytes2int(padded)
+ encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
+ block = transform.int2bytes(encrypted, keylength)
+
+ return block
+
+
+def decrypt(crypto: bytes, priv_key: key.PrivateKey) -> bytes:
+ r"""Decrypts the given message using PKCS#1 v1.5
+
+ The decryption is considered 'failed' when the resulting cleartext doesn't
+ start with the bytes 00 02, or when the 00 byte between the padding and
+ the message cannot be found.
+
+ :param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
+ :param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
+ :raise DecryptionError: when the decryption fails. No details are given as
+ to why the code thinks the decryption fails, as this would leak
+ information about the private key.
+
+
+ >>> import rsa
+ >>> (pub_key, priv_key) = rsa.newkeys(256)
+
+ It works with strings:
+
+ >>> crypto = encrypt(b'hello', pub_key)
+ >>> decrypt(crypto, priv_key)
+ b'hello'
+
+ And with binary data:
+
+ >>> crypto = encrypt(b'\x00\x00\x00\x00\x01', pub_key)
+ >>> decrypt(crypto, priv_key)
+ b'\x00\x00\x00\x00\x01'
+
+ Altering the encrypted information will *likely* cause a
+ :py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
+ :py:func:`rsa.sign`.
+
+
+ .. warning::
+
+ Never display the stack trace of a
+ :py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
+ code the exception occurred, and thus leaks information about the key.
+ It's only a tiny bit of information, but every bit makes cracking the
+ keys easier.
+
+ >>> crypto = encrypt(b'hello', pub_key)
+ >>> crypto = crypto[0:5] + b'X' + crypto[6:] # change a byte
+ >>> decrypt(crypto, priv_key)
+ Traceback (most recent call last):
+ ...
+ rsa.pkcs1.DecryptionError: Decryption failed
+
+ """
+
+ blocksize = common.byte_size(priv_key.n)
+ encrypted = transform.bytes2int(crypto)
+ decrypted = priv_key.blinded_decrypt(encrypted)
+ cleartext = transform.int2bytes(decrypted, blocksize)
+
+ # Detect leading zeroes in the crypto. These are not reflected in the
+ # encrypted value (as leading zeroes do not influence the value of an
+ # integer). This fixes CVE-2020-13757.
+ if len(crypto) > blocksize:
+ # This is operating on public information, so doesn't need to be constant-time.
+ raise DecryptionError("Decryption failed")
+
+ # If we can't find the cleartext marker, decryption failed.
+ cleartext_marker_bad = not compare_digest(cleartext[:2], b"\x00\x02")
+
+ # Find the 00 separator between the padding and the message
+ sep_idx = cleartext.find(b"\x00", 2)
+
+ # sep_idx indicates the position of the `\x00` separator that separates the
+ # padding from the actual message. The padding should be at least 8 bytes
+ # long (see https://tools.ietf.org/html/rfc8017#section-7.2.2 step 3), which
+ # means the separator should be at least at index 10 (because of the
+ # `\x00\x02` marker that precedes it).
+ sep_idx_bad = sep_idx < 10
+
+ anything_bad = cleartext_marker_bad | sep_idx_bad
+ if anything_bad:
+ raise DecryptionError("Decryption failed")
+
+ return cleartext[sep_idx + 1 :]
+
+
+def sign_hash(hash_value: bytes, priv_key: key.PrivateKey, hash_method: str) -> bytes:
+ """Signs a precomputed hash with the private key.
+
+ Hashes the message, then signs the hash with the given key. This is known
+ as a "detached signature", because the message itself isn't altered.
+
+ :param hash_value: A precomputed hash to sign (ignores message).
+ :param priv_key: the :py:class:`rsa.PrivateKey` to sign with
+ :param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
+ 'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
+ :return: a message signature block.
+ :raise OverflowError: if the private key is too small to contain the
+ requested hash.
+
+ """
+
+ # Get the ASN1 code for this hash method
+ if hash_method not in HASH_ASN1:
+ raise ValueError("Invalid hash method: %s" % hash_method)
+ asn1code = HASH_ASN1[hash_method]
+
+ # Encrypt the hash with the private key
+ cleartext = asn1code + hash_value
+ keylength = common.byte_size(priv_key.n)
+ padded = _pad_for_signing(cleartext, keylength)
+
+ payload = transform.bytes2int(padded)
+ encrypted = priv_key.blinded_encrypt(payload)
+ block = transform.int2bytes(encrypted, keylength)
+
+ return block
+
+
+def sign(message: bytes, priv_key: key.PrivateKey, hash_method: str) -> bytes:
+ """Signs the message with the private key.
+
+ Hashes the message, then signs the hash with the given key. This is known
+ as a "detached signature", because the message itself isn't altered.
+
+ :param message: the message to sign. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param priv_key: the :py:class:`rsa.PrivateKey` to sign with
+ :param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
+ 'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
+ :return: a message signature block.
+ :raise OverflowError: if the private key is too small to contain the
+ requested hash.
+
+ """
+
+ msg_hash = compute_hash(message, hash_method)
+ return sign_hash(msg_hash, priv_key, hash_method)
+
+
+def verify(message: bytes, signature: bytes, pub_key: key.PublicKey) -> str:
+ """Verifies that the signature matches the message.
+
+ The hash method is detected automatically from the signature.
+
+ :param message: the signed message. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param signature: the signature block, as created with :py:func:`rsa.sign`.
+ :param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
+ :raise VerificationError: when the signature doesn't match the message.
+ :returns: the name of the used hash.
+
+ """
+
+ keylength = common.byte_size(pub_key.n)
+ encrypted = transform.bytes2int(signature)
+ decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
+ clearsig = transform.int2bytes(decrypted, keylength)
+
+ # Get the hash method
+ method_name = _find_method_hash(clearsig)
+ message_hash = compute_hash(message, method_name)
+
+ # Reconstruct the expected padded hash
+ cleartext = HASH_ASN1[method_name] + message_hash
+ expected = _pad_for_signing(cleartext, keylength)
+
+ if len(signature) != keylength:
+ raise VerificationError("Verification failed")
+
+ # Compare with the signed one
+ if expected != clearsig:
+ raise VerificationError("Verification failed")
+
+ return method_name
+
+
+def find_signature_hash(signature: bytes, pub_key: key.PublicKey) -> str:
+ """Returns the hash name detected from the signature.
+
+ If you also want to verify the message, use :py:func:`rsa.verify()` instead.
+ It also returns the name of the used hash.
+
+ :param signature: the signature block, as created with :py:func:`rsa.sign`.
+ :param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
+ :returns: the name of the used hash.
+ """
+
+ keylength = common.byte_size(pub_key.n)
+ encrypted = transform.bytes2int(signature)
+ decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
+ clearsig = transform.int2bytes(decrypted, keylength)
+
+ return _find_method_hash(clearsig)
+
+
+def yield_fixedblocks(infile: typing.BinaryIO, blocksize: int) -> typing.Iterator[bytes]:
+ """Generator, yields each block of ``blocksize`` bytes in the input file.
+
+ :param infile: file to read and separate in blocks.
+ :param blocksize: block size in bytes.
+ :returns: a generator that yields the contents of each block
+ """
+
+ while True:
+ block = infile.read(blocksize)
+
+ read_bytes = len(block)
+ if read_bytes == 0:
+ break
+
+ yield block
+
+ if read_bytes < blocksize:
+ break
+
+
+def compute_hash(message: typing.Union[bytes, typing.BinaryIO], method_name: str) -> bytes:
+ """Returns the message digest.
+
+ :param message: the signed message. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param method_name: the hash method, must be a key of
+ :py:const:`rsa.pkcs1.HASH_METHODS`.
+
+ """
+
+ if method_name not in HASH_METHODS:
+ raise ValueError("Invalid hash method: %s" % method_name)
+
+ method = HASH_METHODS[method_name]
+ hasher = method()
+
+ if isinstance(message, bytes):
+ hasher.update(message)
+ else:
+ assert hasattr(message, "read") and hasattr(message.read, "__call__")
+ # read as 1K blocks
+ for block in yield_fixedblocks(message, 1024):
+ hasher.update(block)
+
+ return hasher.digest()
+
+
+def _find_method_hash(clearsig: bytes) -> str:
+ """Finds the hash method.
+
+ :param clearsig: full padded ASN1 and hash.
+ :return: the used hash method.
+ :raise VerificationFailed: when the hash method cannot be found
+ """
+
+ for (hashname, asn1code) in HASH_ASN1.items():
+ if asn1code in clearsig:
+ return hashname
+
+ raise VerificationError("Verification failed")
+
+
+__all__ = [
+ "encrypt",
+ "decrypt",
+ "sign",
+ "verify",
+ "DecryptionError",
+ "VerificationError",
+ "CryptoError",
+]
+
+if __name__ == "__main__":
+ print("Running doctests 1000x or until failure")
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 100 == 0 and count:
+ print("%i times" % count)
+
+ print("Doctests done")
diff --git a/contrib/python/rsa/py3/rsa/pkcs1_v2.py b/contrib/python/rsa/py3/rsa/pkcs1_v2.py
new file mode 100644
index 0000000000..d68b907721
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/pkcs1_v2.py
@@ -0,0 +1,100 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for PKCS#1 version 2 encryption and signing
+
+This module implements certain functionality from PKCS#1 version 2. Main
+documentation is RFC 2437: https://tools.ietf.org/html/rfc2437
+"""
+
+from rsa import (
+ common,
+ pkcs1,
+ transform,
+)
+
+
+def mgf1(seed: bytes, length: int, hasher: str = "SHA-1") -> bytes:
+ """
+ MGF1 is a Mask Generation Function based on a hash function.
+
+ A mask generation function takes an octet string of variable length and a
+ desired output length as input, and outputs an octet string of the desired
+ length. The plaintext-awareness of RSAES-OAEP relies on the random nature of
+ the output of the mask generation function, which in turn relies on the
+ random nature of the underlying hash.
+
+ :param bytes seed: seed from which mask is generated, an octet string
+ :param int length: intended length in octets of the mask, at most 2^32(hLen)
+ :param str hasher: hash function (hLen denotes the length in octets of the hash
+ function output)
+
+ :return: mask, an octet string of length `length`
+ :rtype: bytes
+
+ :raise OverflowError: when `length` is too large for the specified `hasher`
+ :raise ValueError: when specified `hasher` is invalid
+ """
+
+ try:
+ hash_length = pkcs1.HASH_METHODS[hasher]().digest_size
+ except KeyError as ex:
+ raise ValueError(
+ "Invalid `hasher` specified. Please select one of: {hash_list}".format(
+ hash_list=", ".join(sorted(pkcs1.HASH_METHODS.keys()))
+ )
+ ) from ex
+
+ # If l > 2^32(hLen), output "mask too long" and stop.
+ if length > (2 ** 32 * hash_length):
+ raise OverflowError(
+ "Desired length should be at most 2**32 times the hasher's output "
+ "length ({hash_length} for {hasher} function)".format(
+ hash_length=hash_length,
+ hasher=hasher,
+ )
+ )
+
+ # Looping `counter` from 0 to ceil(l / hLen)-1, build `output` based on the
+ # hashes formed by (`seed` + C), being `C` an octet string of length 4
+ # generated by converting `counter` with the primitive I2OSP
+ output = b"".join(
+ pkcs1.compute_hash(
+ seed + transform.int2bytes(counter, fill_size=4),
+ method_name=hasher,
+ )
+ for counter in range(common.ceil_div(length, hash_length) + 1)
+ )
+
+ # Output the leading `length` octets of `output` as the octet string mask.
+ return output[:length]
+
+
+__all__ = [
+ "mgf1",
+]
+
+if __name__ == "__main__":
+ print("Running doctests 1000x or until failure")
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 100 == 0 and count:
+ print("%i times" % count)
+
+ print("Doctests done")
diff --git a/contrib/python/rsa/py3/rsa/prime.py b/contrib/python/rsa/py3/rsa/prime.py
new file mode 100644
index 0000000000..ec486bcc05
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/prime.py
@@ -0,0 +1,198 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Numerical functions related to primes.
+
+Implementation based on the book Algorithm Design by Michael T. Goodrich and
+Roberto Tamassia, 2002.
+"""
+
+import rsa.common
+import rsa.randnum
+
+__all__ = ["getprime", "are_relatively_prime"]
+
+
+def gcd(p: int, q: int) -> int:
+ """Returns the greatest common divisor of p and q
+
+ >>> gcd(48, 180)
+ 12
+ """
+
+ while q != 0:
+ (p, q) = (q, p % q)
+ return p
+
+
+def get_primality_testing_rounds(number: int) -> int:
+ """Returns minimum number of rounds for Miller-Rabing primality testing,
+ based on number bitsize.
+
+ According to NIST FIPS 186-4, Appendix C, Table C.3, minimum number of
+ rounds of M-R testing, using an error probability of 2 ** (-100), for
+ different p, q bitsizes are:
+ * p, q bitsize: 512; rounds: 7
+ * p, q bitsize: 1024; rounds: 4
+ * p, q bitsize: 1536; rounds: 3
+ See: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
+ """
+
+ # Calculate number bitsize.
+ bitsize = rsa.common.bit_size(number)
+ # Set number of rounds.
+ if bitsize >= 1536:
+ return 3
+ if bitsize >= 1024:
+ return 4
+ if bitsize >= 512:
+ return 7
+ # For smaller bitsizes, set arbitrary number of rounds.
+ return 10
+
+
+def miller_rabin_primality_testing(n: int, k: int) -> bool:
+ """Calculates whether n is composite (which is always correct) or prime
+ (which theoretically is incorrect with error probability 4**-k), by
+ applying Miller-Rabin primality testing.
+
+ For reference and implementation example, see:
+ https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
+
+ :param n: Integer to be tested for primality.
+ :type n: int
+ :param k: Number of rounds (witnesses) of Miller-Rabin testing.
+ :type k: int
+ :return: False if the number is composite, True if it's probably prime.
+ :rtype: bool
+ """
+
+ # prevent potential infinite loop when d = 0
+ if n < 2:
+ return False
+
+ # Decompose (n - 1) to write it as (2 ** r) * d
+ # While d is even, divide it by 2 and increase the exponent.
+ d = n - 1
+ r = 0
+
+ while not (d & 1):
+ r += 1
+ d >>= 1
+
+ # Test k witnesses.
+ for _ in range(k):
+ # Generate random integer a, where 2 <= a <= (n - 2)
+ a = rsa.randnum.randint(n - 3) + 1
+
+ x = pow(a, d, n)
+ if x == 1 or x == n - 1:
+ continue
+
+ for _ in range(r - 1):
+ x = pow(x, 2, n)
+ if x == 1:
+ # n is composite.
+ return False
+ if x == n - 1:
+ # Exit inner loop and continue with next witness.
+ break
+ else:
+ # If loop doesn't break, n is composite.
+ return False
+
+ return True
+
+
+def is_prime(number: int) -> bool:
+ """Returns True if the number is prime, and False otherwise.
+
+ >>> is_prime(2)
+ True
+ >>> is_prime(42)
+ False
+ >>> is_prime(41)
+ True
+ """
+
+ # Check for small numbers.
+ if number < 10:
+ return number in {2, 3, 5, 7}
+
+ # Check for even numbers.
+ if not (number & 1):
+ return False
+
+ # Calculate minimum number of rounds.
+ k = get_primality_testing_rounds(number)
+
+ # Run primality testing with (minimum + 1) rounds.
+ return miller_rabin_primality_testing(number, k + 1)
+
+
+def getprime(nbits: int) -> int:
+ """Returns a prime number that can be stored in 'nbits' bits.
+
+ >>> p = getprime(128)
+ >>> is_prime(p-1)
+ False
+ >>> is_prime(p)
+ True
+ >>> is_prime(p+1)
+ False
+
+ >>> from rsa import common
+ >>> common.bit_size(p) == 128
+ True
+ """
+
+ assert nbits > 3 # the loop will hang on too small numbers
+
+ while True:
+ integer = rsa.randnum.read_random_odd_int(nbits)
+
+ # Test for primeness
+ if is_prime(integer):
+ return integer
+
+ # Retry if not prime
+
+
+def are_relatively_prime(a: int, b: int) -> bool:
+ """Returns True if a and b are relatively prime, and False if they
+ are not.
+
+ >>> are_relatively_prime(2, 3)
+ True
+ >>> are_relatively_prime(2, 4)
+ False
+ """
+
+ d = gcd(a, b)
+ return d == 1
+
+
+if __name__ == "__main__":
+ print("Running doctests 1000x or until failure")
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count % 100 == 0 and count:
+ print("%i times" % count)
+
+ print("Doctests done")
diff --git a/contrib/python/rsa/py3/rsa/py.typed b/contrib/python/rsa/py3/rsa/py.typed
new file mode 100644
index 0000000000..6c27071a39
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561. The rsa package uses inline types.
diff --git a/contrib/python/rsa/py3/rsa/randnum.py b/contrib/python/rsa/py3/rsa/randnum.py
new file mode 100644
index 0000000000..c65facddc7
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/randnum.py
@@ -0,0 +1,95 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions for generating random numbers."""
+
+# Source inspired by code by Yesudeep Mangalapilly <yesudeep@gmail.com>
+
+import os
+import struct
+
+from rsa import common, transform
+
+
+def read_random_bits(nbits: int) -> bytes:
+ """Reads 'nbits' random bits.
+
+ If nbits isn't a whole number of bytes, an extra byte will be appended with
+ only the lower bits set.
+ """
+
+ nbytes, rbits = divmod(nbits, 8)
+
+ # Get the random bytes
+ randomdata = os.urandom(nbytes)
+
+ # Add the remaining random bits
+ if rbits > 0:
+ randomvalue = ord(os.urandom(1))
+ randomvalue >>= 8 - rbits
+ randomdata = struct.pack("B", randomvalue) + randomdata
+
+ return randomdata
+
+
+def read_random_int(nbits: int) -> int:
+ """Reads a random integer of approximately nbits bits."""
+
+ randomdata = read_random_bits(nbits)
+ value = transform.bytes2int(randomdata)
+
+ # Ensure that the number is large enough to just fill out the required
+ # number of bits.
+ value |= 1 << (nbits - 1)
+
+ return value
+
+
+def read_random_odd_int(nbits: int) -> int:
+ """Reads a random odd integer of approximately nbits bits.
+
+ >>> read_random_odd_int(512) & 1
+ 1
+ """
+
+ value = read_random_int(nbits)
+
+ # Make sure it's odd
+ return value | 1
+
+
+def randint(maxvalue: int) -> int:
+ """Returns a random integer x with 1 <= x <= maxvalue
+
+ May take a very long time in specific situations. If maxvalue needs N bits
+ to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
+ is.
+ """
+
+ bit_size = common.bit_size(maxvalue)
+
+ tries = 0
+ while True:
+ value = read_random_int(bit_size)
+ if value <= maxvalue:
+ break
+
+ if tries % 10 == 0 and tries:
+ # After a lot of tries to get the right number of bits but still
+ # smaller than maxvalue, decrease the number of bits by 1. That'll
+ # dramatically increase the chances to get a large enough number.
+ bit_size -= 1
+ tries += 1
+
+ return value
diff --git a/contrib/python/rsa/py3/rsa/transform.py b/contrib/python/rsa/py3/rsa/transform.py
new file mode 100644
index 0000000000..c609b65f3c
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/transform.py
@@ -0,0 +1,72 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Data transformation functions.
+
+From bytes to a number, number to bytes, etc.
+"""
+
+import math
+
+
+def bytes2int(raw_bytes: bytes) -> int:
+ r"""Converts a list of bytes or an 8-bit string to an integer.
+
+ When using unicode strings, encode it to some encoding like UTF8 first.
+
+ >>> (((128 * 256) + 64) * 256) + 15
+ 8405007
+ >>> bytes2int(b'\x80@\x0f')
+ 8405007
+
+ """
+ return int.from_bytes(raw_bytes, "big", signed=False)
+
+
+def int2bytes(number: int, fill_size: int = 0) -> bytes:
+ """
+ Convert an unsigned integer to bytes (big-endian)::
+
+ Does not preserve leading zeros if you don't specify a fill size.
+
+ :param number:
+ Integer value
+ :param fill_size:
+ If the optional fill size is given the length of the resulting
+ byte string is expected to be the fill size and will be padded
+ with prefix zero bytes to satisfy that length.
+ :returns:
+ Raw bytes (base-256 representation).
+ :raises:
+ ``OverflowError`` when fill_size is given and the number takes up more
+ bytes than fit into the block. This requires the ``overflow``
+ argument to this function to be set to ``False`` otherwise, no
+ error will be raised.
+ """
+
+ if number < 0:
+ raise ValueError("Number must be an unsigned integer: %d" % number)
+
+ bytes_required = max(1, math.ceil(number.bit_length() / 8))
+
+ if fill_size > 0:
+ return number.to_bytes(fill_size, "big")
+
+ return number.to_bytes(bytes_required, "big")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/contrib/python/rsa/py3/rsa/util.py b/contrib/python/rsa/py3/rsa/util.py
new file mode 100644
index 0000000000..087caf8df5
--- /dev/null
+++ b/contrib/python/rsa/py3/rsa/util.py
@@ -0,0 +1,97 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions."""
+
+import sys
+from optparse import OptionParser
+
+import rsa.key
+
+
+def private_to_public() -> None:
+ """Reads a private key and outputs the corresponding public key."""
+
+ # Parse the CLI options
+ parser = OptionParser(
+ usage="usage: %prog [options]",
+ description="Reads a private key and outputs the "
+ "corresponding public key. Both private and public keys use "
+ "the format described in PKCS#1 v1.5",
+ )
+
+ parser.add_option(
+ "-i",
+ "--input",
+ dest="infilename",
+ type="string",
+ help="Input filename. Reads from stdin if not specified",
+ )
+ parser.add_option(
+ "-o",
+ "--output",
+ dest="outfilename",
+ type="string",
+ help="Output filename. Writes to stdout of not specified",
+ )
+
+ parser.add_option(
+ "--inform",
+ dest="inform",
+ help="key format of input - default PEM",
+ choices=("PEM", "DER"),
+ default="PEM",
+ )
+
+ parser.add_option(
+ "--outform",
+ dest="outform",
+ help="key format of output - default PEM",
+ choices=("PEM", "DER"),
+ default="PEM",
+ )
+
+ (cli, cli_args) = parser.parse_args(sys.argv)
+
+ # Read the input data
+ if cli.infilename:
+ print(
+ "Reading private key from %s in %s format" % (cli.infilename, cli.inform),
+ file=sys.stderr,
+ )
+ with open(cli.infilename, "rb") as infile:
+ in_data = infile.read()
+ else:
+ print("Reading private key from stdin in %s format" % cli.inform, file=sys.stderr)
+ in_data = sys.stdin.read().encode("ascii")
+
+ assert type(in_data) == bytes, type(in_data)
+
+ # Take the public fields and create a public key
+ priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
+ pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
+
+ # Save to the output file
+ out_data = pub_key.save_pkcs1(cli.outform)
+
+ if cli.outfilename:
+ print(
+ "Writing public key to %s in %s format" % (cli.outfilename, cli.outform),
+ file=sys.stderr,
+ )
+ with open(cli.outfilename, "wb") as outfile:
+ outfile.write(out_data)
+ else:
+ print("Writing public key to stdout in %s format" % cli.outform, file=sys.stderr)
+ sys.stdout.write(out_data.decode("ascii"))
diff --git a/contrib/python/rsa/py3/tests/__init__.py b/contrib/python/rsa/py3/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/__init__.py
diff --git a/contrib/python/rsa/py3/tests/private.pem b/contrib/python/rsa/py3/tests/private.pem
new file mode 100644
index 0000000000..1a17279f23
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/private.pem
@@ -0,0 +1,5 @@
+-----BEGIN RSA PRIVATE KEY-----
+MGECAQACEQCvWovlXBvfEeOMZPEleO9NAgMBAAECEA20Y+6fDkaWvC24horBzQEC
+CQDdS2PAL/tK4QIJAMratZuNnT3tAghs7iNYA0ZrgQIIQQ5nU93U4fkCCHR55el6
+/K+2
+-----END RSA PRIVATE KEY-----
diff --git a/contrib/python/rsa/py3/tests/test_cli.py b/contrib/python/rsa/py3/tests/test_cli.py
new file mode 100644
index 0000000000..bb872ea7c6
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_cli.py
@@ -0,0 +1,291 @@
+"""
+Unit tests for CLI entry points.
+"""
+
+from __future__ import print_function
+
+import functools
+import io
+import os
+import sys
+import typing
+import unittest
+from contextlib import contextmanager, redirect_stdout, redirect_stderr
+
+import rsa
+import rsa.cli
+import rsa.util
+
+
+@contextmanager
+def captured_output() -> typing.Generator:
+ """Captures output to stdout and stderr"""
+
+ # According to mypy, we're not supposed to change buf_out.buffer.
+ # However, this is just a test, and it works, hence the 'type: ignore'.
+ buf_out = io.StringIO()
+ buf_out.buffer = io.BytesIO() # type: ignore
+
+ buf_err = io.StringIO()
+ buf_err.buffer = io.BytesIO() # type: ignore
+
+ with redirect_stdout(buf_out), redirect_stderr(buf_err):
+ yield buf_out, buf_err
+
+
+def get_bytes_out(buf) -> bytes:
+ return buf.buffer.getvalue()
+
+
+@contextmanager
+def cli_args(*new_argv):
+ """Updates sys.argv[1:] for a single test."""
+
+ old_args = sys.argv[:]
+ sys.argv[1:] = [str(arg) for arg in new_argv]
+
+ try:
+ yield
+ finally:
+ sys.argv[1:] = old_args
+
+
+def remove_if_exists(fname):
+ """Removes a file if it exists."""
+
+ if os.path.exists(fname):
+ os.unlink(fname)
+
+
+def cleanup_files(*filenames):
+ """Makes sure the files don't exist when the test runs, and deletes them afterward."""
+
+ def remove():
+ for fname in filenames:
+ remove_if_exists(fname)
+
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ remove()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ remove()
+
+ return wrapper
+
+ return decorator
+
+
+class AbstractCliTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ # Ensure there is a key to use
+ cls.pub_key, cls.priv_key = rsa.newkeys(512)
+ cls.pub_fname = "%s.pub" % cls.__name__
+ cls.priv_fname = "%s.key" % cls.__name__
+
+ with open(cls.pub_fname, "wb") as outfile:
+ outfile.write(cls.pub_key.save_pkcs1())
+
+ with open(cls.priv_fname, "wb") as outfile:
+ outfile.write(cls.priv_key.save_pkcs1())
+
+ @classmethod
+ def tearDownClass(cls):
+ if hasattr(cls, "pub_fname"):
+ remove_if_exists(cls.pub_fname)
+ if hasattr(cls, "priv_fname"):
+ remove_if_exists(cls.priv_fname)
+
+ def assertExits(self, status_code, func, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except SystemExit as ex:
+ if status_code == ex.code:
+ return
+ self.fail(
+ "SystemExit() raised by %r, but exited with code %r, expected %r"
+ % (func, ex.code, status_code)
+ )
+ else:
+ self.fail("SystemExit() not raised by %r" % func)
+
+
+class KeygenTest(AbstractCliTest):
+ def test_keygen_no_args(self):
+ with captured_output(), cli_args():
+ self.assertExits(1, rsa.cli.keygen)
+
+ def test_keygen_priv_stdout(self):
+ with captured_output() as (out, err):
+ with cli_args(128):
+ rsa.cli.keygen()
+
+ lines = get_bytes_out(out).splitlines()
+ self.assertEqual(b"-----BEGIN RSA PRIVATE KEY-----", lines[0])
+ self.assertEqual(b"-----END RSA PRIVATE KEY-----", lines[-1])
+
+ # The key size should be shown on stderr
+ self.assertTrue("128-bit key" in err.getvalue())
+
+ @cleanup_files("test_cli_privkey_out.pem")
+ def test_keygen_priv_out_pem(self):
+ with captured_output() as (out, err):
+ with cli_args("--out=test_cli_privkey_out.pem", "--form=PEM", 128):
+ rsa.cli.keygen()
+
+ # The key size should be shown on stderr
+ self.assertTrue("128-bit key" in err.getvalue())
+
+ # The output file should be shown on stderr
+ self.assertTrue("test_cli_privkey_out.pem" in err.getvalue())
+
+ # If we can load the file as PEM, it's good enough.
+ with open("test_cli_privkey_out.pem", "rb") as pemfile:
+ rsa.PrivateKey.load_pkcs1(pemfile.read())
+
+ @cleanup_files("test_cli_privkey_out.der")
+ def test_keygen_priv_out_der(self):
+ with captured_output() as (out, err):
+ with cli_args("--out=test_cli_privkey_out.der", "--form=DER", 128):
+ rsa.cli.keygen()
+
+ # The key size should be shown on stderr
+ self.assertTrue("128-bit key" in err.getvalue())
+
+ # The output file should be shown on stderr
+ self.assertTrue("test_cli_privkey_out.der" in err.getvalue())
+
+ # If we can load the file as der, it's good enough.
+ with open("test_cli_privkey_out.der", "rb") as derfile:
+ rsa.PrivateKey.load_pkcs1(derfile.read(), format="DER")
+
+ @cleanup_files("test_cli_privkey_out.pem", "test_cli_pubkey_out.pem")
+ def test_keygen_pub_out_pem(self):
+ with captured_output() as (out, err):
+ with cli_args(
+ "--out=test_cli_privkey_out.pem",
+ "--pubout=test_cli_pubkey_out.pem",
+ "--form=PEM",
+ 256,
+ ):
+ rsa.cli.keygen()
+
+ # The key size should be shown on stderr
+ self.assertTrue("256-bit key" in err.getvalue())
+
+ # The output files should be shown on stderr
+ self.assertTrue("test_cli_privkey_out.pem" in err.getvalue())
+ self.assertTrue("test_cli_pubkey_out.pem" in err.getvalue())
+
+ # If we can load the file as PEM, it's good enough.
+ with open("test_cli_pubkey_out.pem", "rb") as pemfile:
+ rsa.PublicKey.load_pkcs1(pemfile.read())
+
+
+class EncryptDecryptTest(AbstractCliTest):
+ def test_empty_decrypt(self):
+ with captured_output(), cli_args():
+ self.assertExits(1, rsa.cli.decrypt)
+
+ def test_empty_encrypt(self):
+ with captured_output(), cli_args():
+ self.assertExits(1, rsa.cli.encrypt)
+
+ @cleanup_files("encrypted.txt", "cleartext.txt")
+ def test_encrypt_decrypt(self):
+ with open("cleartext.txt", "wb") as outfile:
+ outfile.write(b"Hello cleartext RSA users!")
+
+ with cli_args("-i", "cleartext.txt", "--out=encrypted.txt", self.pub_fname):
+ with captured_output():
+ rsa.cli.encrypt()
+
+ with cli_args("-i", "encrypted.txt", self.priv_fname):
+ with captured_output() as (out, err):
+ rsa.cli.decrypt()
+
+ # We should have the original cleartext on stdout now.
+ output = get_bytes_out(out)
+ self.assertEqual(b"Hello cleartext RSA users!", output)
+
+ @cleanup_files("encrypted.txt", "cleartext.txt")
+ def test_encrypt_decrypt_unhappy(self):
+ with open("cleartext.txt", "wb") as outfile:
+ outfile.write(b"Hello cleartext RSA users!")
+
+ with cli_args("-i", "cleartext.txt", "--out=encrypted.txt", self.pub_fname):
+ with captured_output():
+ rsa.cli.encrypt()
+
+ # Change a few bytes in the encrypted stream.
+ with open("encrypted.txt", "r+b") as encfile:
+ encfile.seek(40)
+ encfile.write(b"hahaha")
+
+ with cli_args("-i", "encrypted.txt", self.priv_fname):
+ with captured_output() as (out, err):
+ self.assertRaises(rsa.DecryptionError, rsa.cli.decrypt)
+
+
+class SignVerifyTest(AbstractCliTest):
+ def test_empty_verify(self):
+ with captured_output(), cli_args():
+ self.assertExits(1, rsa.cli.verify)
+
+ def test_empty_sign(self):
+ with captured_output(), cli_args():
+ self.assertExits(1, rsa.cli.sign)
+
+ @cleanup_files("signature.txt", "cleartext.txt")
+ def test_sign_verify(self):
+ with open("cleartext.txt", "wb") as outfile:
+ outfile.write(b"Hello RSA users!")
+
+ with cli_args("-i", "cleartext.txt", "--out=signature.txt", self.priv_fname, "SHA-256"):
+ with captured_output():
+ rsa.cli.sign()
+
+ with cli_args("-i", "cleartext.txt", self.pub_fname, "signature.txt"):
+ with captured_output() as (out, err):
+ rsa.cli.verify()
+
+ self.assertFalse(b"Verification OK" in get_bytes_out(out))
+
+ @cleanup_files("signature.txt", "cleartext.txt")
+ def test_sign_verify_unhappy(self):
+ with open("cleartext.txt", "wb") as outfile:
+ outfile.write(b"Hello RSA users!")
+
+ with cli_args("-i", "cleartext.txt", "--out=signature.txt", self.priv_fname, "SHA-256"):
+ with captured_output():
+ rsa.cli.sign()
+
+ # Change a few bytes in the cleartext file.
+ with open("cleartext.txt", "r+b") as encfile:
+ encfile.seek(6)
+ encfile.write(b"DSA")
+
+ with cli_args("-i", "cleartext.txt", self.pub_fname, "signature.txt"):
+ with captured_output() as (out, err):
+ self.assertExits("Verification failed.", rsa.cli.verify)
+
+
+class PrivatePublicTest(AbstractCliTest):
+ """Test CLI command to convert a private to a public key."""
+
+ @cleanup_files("test_private_to_public.pem")
+ def test_private_to_public(self):
+
+ with cli_args("-i", self.priv_fname, "-o", "test_private_to_public.pem"):
+ with captured_output():
+ rsa.util.private_to_public()
+
+ # Check that the key is indeed valid.
+ with open("test_private_to_public.pem", "rb") as pemfile:
+ key = rsa.PublicKey.load_pkcs1(pemfile.read())
+
+ self.assertEqual(self.priv_key.n, key.n)
+ self.assertEqual(self.priv_key.e, key.e)
diff --git a/contrib/python/rsa/py3/tests/test_common.py b/contrib/python/rsa/py3/tests/test_common.py
new file mode 100644
index 0000000000..c6a60d5acd
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_common.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import struct
+from rsa.common import byte_size, bit_size, inverse
+
+
+class TestByteSize(unittest.TestCase):
+ def test_values(self):
+ self.assertEqual(byte_size(1 << 1023), 128)
+ self.assertEqual(byte_size((1 << 1024) - 1), 128)
+ self.assertEqual(byte_size(1 << 1024), 129)
+ self.assertEqual(byte_size(255), 1)
+ self.assertEqual(byte_size(256), 2)
+ self.assertEqual(byte_size(0xFFFF), 2)
+ self.assertEqual(byte_size(0xFFFFFF), 3)
+ self.assertEqual(byte_size(0xFFFFFFFF), 4)
+ self.assertEqual(byte_size(0xFFFFFFFFFF), 5)
+ self.assertEqual(byte_size(0xFFFFFFFFFFFF), 6)
+ self.assertEqual(byte_size(0xFFFFFFFFFFFFFF), 7)
+ self.assertEqual(byte_size(0xFFFFFFFFFFFFFFFF), 8)
+
+ def test_zero(self):
+ self.assertEqual(byte_size(0), 1)
+
+ def test_bad_type(self):
+ self.assertRaises(TypeError, byte_size, [])
+ self.assertRaises(TypeError, byte_size, ())
+ self.assertRaises(TypeError, byte_size, dict())
+ self.assertRaises(TypeError, byte_size, "")
+ self.assertRaises(TypeError, byte_size, None)
+
+
+class TestBitSize(unittest.TestCase):
+ def test_zero(self):
+ self.assertEqual(bit_size(0), 0)
+
+ def test_values(self):
+ self.assertEqual(bit_size(1023), 10)
+ self.assertEqual(bit_size(1024), 11)
+ self.assertEqual(bit_size(1025), 11)
+ self.assertEqual(bit_size(1 << 1024), 1025)
+ self.assertEqual(bit_size((1 << 1024) + 1), 1025)
+ self.assertEqual(bit_size((1 << 1024) - 1), 1024)
+
+ def test_negative_values(self):
+ self.assertEqual(bit_size(-1023), 10)
+ self.assertEqual(bit_size(-1024), 11)
+ self.assertEqual(bit_size(-1025), 11)
+ self.assertEqual(bit_size(-1 << 1024), 1025)
+ self.assertEqual(bit_size(-((1 << 1024) + 1)), 1025)
+ self.assertEqual(bit_size(-((1 << 1024) - 1)), 1024)
+
+ def test_bad_type(self):
+ self.assertRaises(TypeError, bit_size, [])
+ self.assertRaises(TypeError, bit_size, ())
+ self.assertRaises(TypeError, bit_size, dict())
+ self.assertRaises(TypeError, bit_size, "")
+ self.assertRaises(TypeError, bit_size, None)
+ self.assertRaises(TypeError, bit_size, 0.0)
+
+
+class TestInverse(unittest.TestCase):
+ def test_normal(self):
+ self.assertEqual(3, inverse(7, 4))
+ self.assertEqual(9, inverse(5, 11))
+
+ def test_not_relprime(self):
+ self.assertRaises(ValueError, inverse, 4, 8)
+ self.assertRaises(ValueError, inverse, 25, 5)
diff --git a/contrib/python/rsa/py3/tests/test_integers.py b/contrib/python/rsa/py3/tests/test_integers.py
new file mode 100644
index 0000000000..659e85ae95
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_integers.py
@@ -0,0 +1,48 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests integer operations."""
+
+import unittest
+
+import rsa
+import rsa.core
+
+
+class IntegerTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(64)
+
+ def test_enc_dec(self):
+ message = 42
+ print("\n\tMessage: %d" % message)
+
+ encrypted = rsa.core.encrypt_int(message, self.pub.e, self.pub.n)
+ print("\tEncrypted: %d" % encrypted)
+
+ decrypted = rsa.core.decrypt_int(encrypted, self.priv.d, self.pub.n)
+ print("\tDecrypted: %d" % decrypted)
+
+ self.assertEqual(message, decrypted)
+
+ def test_sign_verify(self):
+ message = 42
+
+ signed = rsa.core.encrypt_int(message, self.priv.d, self.pub.n)
+ print("\n\tSigned: %d" % signed)
+
+ verified = rsa.core.decrypt_int(signed, self.pub.e, self.pub.n)
+ print("\tVerified: %d" % verified)
+
+ self.assertEqual(message, verified)
diff --git a/contrib/python/rsa/py3/tests/test_key.py b/contrib/python/rsa/py3/tests/test_key.py
new file mode 100644
index 0000000000..c570830ccc
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_key.py
@@ -0,0 +1,87 @@
+"""
+Some tests for the rsa/key.py file.
+"""
+
+import unittest
+
+import rsa.key
+import rsa.core
+
+
+class BlindingTest(unittest.TestCase):
+ def test_blinding(self):
+ """Test blinding and unblinding.
+
+ This is basically the doctest of the PrivateKey.blind method, but then
+ implemented as unittest to allow running on different Python versions.
+ """
+
+ pk = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ message = 12345
+ encrypted = rsa.core.encrypt_int(message, pk.e, pk.n)
+
+ blinded_1, unblind_1 = pk.blind(encrypted) # blind before decrypting
+ decrypted = rsa.core.decrypt_int(blinded_1, pk.d, pk.n)
+ unblinded_1 = pk.unblind(decrypted, unblind_1)
+
+ self.assertEqual(unblinded_1, message)
+
+ # Re-blinding should use a different blinding factor.
+ blinded_2, unblind_2 = pk.blind(encrypted) # blind before decrypting
+ self.assertNotEqual(blinded_1, blinded_2)
+
+ # The unblinding should still work, though.
+ decrypted = rsa.core.decrypt_int(blinded_2, pk.d, pk.n)
+ unblinded_2 = pk.unblind(decrypted, unblind_2)
+ self.assertEqual(unblinded_2, message)
+
+
+class KeyGenTest(unittest.TestCase):
+ def test_custom_exponent(self):
+ pub, priv = rsa.key.newkeys(16, exponent=3)
+
+ self.assertEqual(3, priv.e)
+ self.assertEqual(3, pub.e)
+
+ def test_default_exponent(self):
+ pub, priv = rsa.key.newkeys(16)
+
+ self.assertEqual(0x10001, priv.e)
+ self.assertEqual(0x10001, pub.e)
+
+ def test_exponents_coefficient_calculation(self):
+ pk = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ self.assertEqual(pk.exp1, 55063)
+ self.assertEqual(pk.exp2, 10095)
+ self.assertEqual(pk.coef, 50797)
+
+ def test_custom_getprime_func(self):
+ # List of primes to test with, in order [p, q, p, q, ....]
+ # By starting with two of the same primes, we test that this is
+ # properly rejected.
+ primes = [64123, 64123, 64123, 50957, 39317, 33107]
+
+ def getprime(_):
+ return primes.pop(0)
+
+ # This exponent will cause two other primes to be generated.
+ exponent = 136407
+
+ (p, q, e, d) = rsa.key.gen_keys(
+ 64, accurate=False, getprime_func=getprime, exponent=exponent
+ )
+ self.assertEqual(39317, p)
+ self.assertEqual(33107, q)
+
+
+class HashTest(unittest.TestCase):
+ """Test hashing of keys"""
+
+ def test_hash_possible(self):
+ pub, priv = rsa.key.newkeys(16)
+
+ # This raises a TypeError when hashing isn't possible.
+ hash(priv)
+ hash(pub)
diff --git a/contrib/python/rsa/py3/tests/test_load_save_keys.py b/contrib/python/rsa/py3/tests/test_load_save_keys.py
new file mode 100644
index 0000000000..9b8e0d0a05
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_load_save_keys.py
@@ -0,0 +1,234 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for saving and loading keys."""
+
+import base64
+import os.path
+import pickle
+import unittest
+import warnings
+from unittest import mock
+
+import rsa.key
+
+B64PRIV_DER = b"MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt"
+PRIVATE_DER = base64.standard_b64decode(B64PRIV_DER)
+
+B64PUB_DER = b"MAwCBQDeKYlRAgMBAAE="
+PUBLIC_DER = base64.standard_b64decode(B64PUB_DER)
+
+PRIVATE_PEM = (
+ b"""\
+-----BEGIN CONFUSING STUFF-----
+Cruft before the key
+
+-----BEGIN RSA PRIVATE KEY-----
+Comment: something blah
+
+"""
+ + B64PRIV_DER
+ + b"""
+-----END RSA PRIVATE KEY-----
+
+Stuff after the key
+-----END CONFUSING STUFF-----
+"""
+)
+
+CLEAN_PRIVATE_PEM = (
+ b"""\
+-----BEGIN RSA PRIVATE KEY-----
+"""
+ + B64PRIV_DER
+ + b"""
+-----END RSA PRIVATE KEY-----
+"""
+)
+
+PUBLIC_PEM = (
+ b"""\
+-----BEGIN CONFUSING STUFF-----
+Cruft before the key
+
+-----BEGIN RSA PUBLIC KEY-----
+Comment: something blah
+
+"""
+ + B64PUB_DER
+ + b"""
+-----END RSA PUBLIC KEY-----
+
+Stuff after the key
+-----END CONFUSING STUFF-----
+"""
+)
+
+CLEAN_PUBLIC_PEM = (
+ b"""\
+-----BEGIN RSA PUBLIC KEY-----
+"""
+ + B64PUB_DER
+ + b"""
+-----END RSA PUBLIC KEY-----
+"""
+)
+
+
+class DerTest(unittest.TestCase):
+ """Test saving and loading DER keys."""
+
+ def test_load_private_key(self):
+ """Test loading private DER keys."""
+
+ key = rsa.key.PrivateKey.load_pkcs1(PRIVATE_DER, "DER")
+ expected = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ self.assertEqual(expected, key)
+ self.assertEqual(key.exp1, 55063)
+ self.assertEqual(key.exp2, 10095)
+ self.assertEqual(key.coef, 50797)
+
+ @mock.patch("pyasn1.codec.der.decoder.decode")
+ def test_load_malformed_private_key(self, der_decode):
+ """Test loading malformed private DER keys."""
+
+ # Decode returns an invalid exp2 value.
+ der_decode.return_value = (
+ [0, 3727264081, 65537, 3349121513, 65063, 57287, 55063, 0, 50797],
+ 0,
+ )
+
+ with warnings.catch_warnings(record=True) as w:
+ # Always print warnings
+ warnings.simplefilter("always")
+
+ # Load 3 keys
+ for _ in range(3):
+ key = rsa.key.PrivateKey.load_pkcs1(PRIVATE_DER, "DER")
+
+ # Check that 3 warnings were generated.
+ self.assertEqual(3, len(w))
+
+ for warning in w:
+ self.assertTrue(issubclass(warning.category, UserWarning))
+ self.assertIn("malformed", str(warning.message))
+
+ # Check that we are creating the key with correct values
+ self.assertEqual(key.exp1, 55063)
+ self.assertEqual(key.exp2, 10095)
+ self.assertEqual(key.coef, 50797)
+
+ def test_save_private_key(self):
+ """Test saving private DER keys."""
+
+ key = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+ der = key.save_pkcs1("DER")
+
+ self.assertIsInstance(der, bytes)
+ self.assertEqual(PRIVATE_DER, der)
+
+ def test_load_public_key(self):
+ """Test loading public DER keys."""
+
+ key = rsa.key.PublicKey.load_pkcs1(PUBLIC_DER, "DER")
+ expected = rsa.key.PublicKey(3727264081, 65537)
+
+ self.assertEqual(expected, key)
+
+ def test_save_public_key(self):
+ """Test saving public DER keys."""
+
+ key = rsa.key.PublicKey(3727264081, 65537)
+ der = key.save_pkcs1("DER")
+
+ self.assertIsInstance(der, bytes)
+ self.assertEqual(PUBLIC_DER, der)
+
+
+class PemTest(unittest.TestCase):
+ """Test saving and loading PEM keys."""
+
+ def test_load_private_key(self):
+ """Test loading private PEM files."""
+
+ key = rsa.key.PrivateKey.load_pkcs1(PRIVATE_PEM, "PEM")
+ expected = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ self.assertEqual(expected, key)
+ self.assertEqual(key.exp1, 55063)
+ self.assertEqual(key.exp2, 10095)
+ self.assertEqual(key.coef, 50797)
+
+ def test_save_private_key(self):
+ """Test saving private PEM files."""
+
+ key = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+ pem = key.save_pkcs1("PEM")
+
+ self.assertIsInstance(pem, bytes)
+ self.assertEqual(CLEAN_PRIVATE_PEM, pem)
+
+ def test_load_public_key(self):
+ """Test loading public PEM files."""
+
+ key = rsa.key.PublicKey.load_pkcs1(PUBLIC_PEM, "PEM")
+ expected = rsa.key.PublicKey(3727264081, 65537)
+
+ self.assertEqual(expected, key)
+
+ def test_save_public_key(self):
+ """Test saving public PEM files."""
+
+ key = rsa.key.PublicKey(3727264081, 65537)
+ pem = key.save_pkcs1("PEM")
+
+ self.assertIsInstance(pem, bytes)
+ self.assertEqual(CLEAN_PUBLIC_PEM, pem)
+
+ def test_load_from_disk(self):
+ """Test loading a PEM file from disk."""
+ from yatest.common import source_path
+
+ fname = source_path("contrib/python/rsa/py3/tests/private.pem")
+ with open(fname, mode="rb") as privatefile:
+ keydata = privatefile.read()
+ privkey = rsa.key.PrivateKey.load_pkcs1(keydata)
+
+ self.assertEqual(15945948582725241569, privkey.p)
+ self.assertEqual(14617195220284816877, privkey.q)
+
+
+class PickleTest(unittest.TestCase):
+ """Test saving and loading keys by pickling."""
+
+ def test_private_key(self):
+ pk = rsa.key.PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ pickled = pickle.dumps(pk)
+ unpickled = pickle.loads(pickled)
+ self.assertEqual(pk, unpickled)
+
+ for attr in rsa.key.AbstractKey.__slots__:
+ self.assertTrue(hasattr(unpickled, attr))
+
+ def test_public_key(self):
+ pk = rsa.key.PublicKey(3727264081, 65537)
+
+ pickled = pickle.dumps(pk)
+ unpickled = pickle.loads(pickled)
+
+ self.assertEqual(pk, unpickled)
+ for attr in rsa.key.AbstractKey.__slots__:
+ self.assertTrue(hasattr(unpickled, attr))
diff --git a/contrib/python/rsa/py3/tests/test_mypy.py b/contrib/python/rsa/py3/tests/test_mypy.py
new file mode 100644
index 0000000000..8cc0d59650
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_mypy.py
@@ -0,0 +1,31 @@
+import pathlib
+import sys
+import unittest
+
+import mypy.api
+
+test_modules = ["rsa", "tests"]
+
+
+class MypyRunnerTest(unittest.TestCase):
+ def test_run_mypy(self):
+ proj_root = pathlib.Path(__file__).parent.parent
+ args = [
+ "--incremental",
+ "--ignore-missing-imports",
+ f"--python-version={sys.version_info.major}.{sys.version_info.minor}",
+ ] + [str(proj_root / dirname) for dirname in test_modules]
+
+ result = mypy.api.run(args)
+
+ stdout, stderr, status = result
+
+ messages = []
+ if stderr:
+ messages.append(stderr)
+ if stdout:
+ messages.append(stdout)
+ if status:
+ messages.append("Mypy failed with status %d" % status)
+ if messages and not all("Success" in message for message in messages):
+ self.fail("\n".join(["Mypy errors:"] + messages))
diff --git a/contrib/python/rsa/py3/tests/test_parallel.py b/contrib/python/rsa/py3/tests/test_parallel.py
new file mode 100644
index 0000000000..1a69e9ece6
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_parallel.py
@@ -0,0 +1,20 @@
+"""Test for multiprocess prime generation."""
+
+import unittest
+
+import rsa.prime
+import rsa.parallel
+import rsa.common
+
+
+class ParallelTest(unittest.TestCase):
+ """Tests for multiprocess prime generation."""
+
+ def test_parallel_primegen(self):
+ p = rsa.parallel.getprime(1024, 3)
+
+ self.assertFalse(rsa.prime.is_prime(p - 1))
+ self.assertTrue(rsa.prime.is_prime(p))
+ self.assertFalse(rsa.prime.is_prime(p + 1))
+
+ self.assertEqual(1024, rsa.common.bit_size(p))
diff --git a/contrib/python/rsa/py3/tests/test_pem.py b/contrib/python/rsa/py3/tests/test_pem.py
new file mode 100644
index 0000000000..7440431fc0
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_pem.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from rsa.pem import _markers
+import rsa.key
+
+# 512-bit key. Too small for practical purposes, but good enough for testing with.
+public_key_pem = """
+-----BEGIN PUBLIC KEY-----
+MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKH0aYP9ZFuctlPnXhEyHjgc8ltKKx9M
+0c+h4sKMXwjhjbQAZdtWIw8RRghpUJnKj+6bN2XzZDazyULxgPhtax0CAwEAAQ==
+-----END PUBLIC KEY-----
+"""
+
+private_key_pem = """
+-----BEGIN RSA PRIVATE KEY-----
+MIIBOwIBAAJBAKH0aYP9ZFuctlPnXhEyHjgc8ltKKx9M0c+h4sKMXwjhjbQAZdtW
+Iw8RRghpUJnKj+6bN2XzZDazyULxgPhtax0CAwEAAQJADwR36EpNzQTqDzusCFIq
+ZS+h9X8aIovgBK3RNhMIGO2ThpsnhiDTcqIvgQ56knbl6B2W4iOl54tJ6CNtf6l6
+zQIhANTaNLFGsJfOvZHcI0WL1r89+1A4JVxR+lpslJJwAvgDAiEAwsjqqZ2wY2F0
+F8p1J98BEbtjU2mEZIVCMn6vQuhWdl8CIDRL4IJl4eGKlB0QP0JJF1wpeGO/R76l
+DaPF5cMM7k3NAiEAss28m/ck9BWBfFVdNjx/vsdFZkx2O9AX9EJWoBSnSgECIQCa
++sVQMUVJFGsdE/31C7wCIbE3IpB7ziABZ7mN+V3Dhg==
+-----END RSA PRIVATE KEY-----
+"""
+
+# Private key components
+prime1 = 96275860229939261876671084930484419185939191875438854026071315955024109172739
+prime2 = 88103681619592083641803383393198542599284510949756076218404908654323473741407
+
+
+class TestMarkers(unittest.TestCase):
+ def test_values(self):
+ self.assertEqual(
+ _markers("RSA PRIVATE KEY"),
+ (b"-----BEGIN RSA PRIVATE KEY-----", b"-----END RSA PRIVATE KEY-----"),
+ )
+
+
+class TestBytesAndStrings(unittest.TestCase):
+ """Test that we can use PEM in both Unicode strings and bytes."""
+
+ def test_unicode_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem)
+ self.assertEqual(prime1 * prime2, key.n)
+
+ def test_bytes_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem.encode("ascii"))
+ self.assertEqual(prime1 * prime2, key.n)
+
+ def test_unicode_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem)
+ self.assertEqual(prime1 * prime2, key.n)
+
+ def test_bytes_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem.encode("ascii"))
+ self.assertEqual(prime1, key.p)
+ self.assertEqual(prime2, key.q)
+
+
+class TestByteOutput(unittest.TestCase):
+ """Tests that PEM and DER are returned as bytes."""
+
+ def test_bytes_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem)
+ self.assertIsInstance(key.save_pkcs1(format="DER"), bytes)
+ self.assertIsInstance(key.save_pkcs1(format="PEM"), bytes)
+
+ def test_bytes_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem)
+ self.assertIsInstance(key.save_pkcs1(format="DER"), bytes)
+ self.assertIsInstance(key.save_pkcs1(format="PEM"), bytes)
+
+
+class TestByteInput(unittest.TestCase):
+ """Tests that PEM and DER can be loaded from bytes."""
+
+ def test_bytes_public(self):
+ key = rsa.key.PublicKey.load_pkcs1_openssl_pem(public_key_pem.encode("ascii"))
+ self.assertIsInstance(key.save_pkcs1(format="DER"), bytes)
+ self.assertIsInstance(key.save_pkcs1(format="PEM"), bytes)
+
+ def test_bytes_private(self):
+ key = rsa.key.PrivateKey.load_pkcs1(private_key_pem.encode("ascii"))
+ self.assertIsInstance(key.save_pkcs1(format="DER"), bytes)
+ self.assertIsInstance(key.save_pkcs1(format="PEM"), bytes)
diff --git a/contrib/python/rsa/py3/tests/test_pkcs1.py b/contrib/python/rsa/py3/tests/test_pkcs1.py
new file mode 100644
index 0000000000..a8b3cfdee9
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_pkcs1.py
@@ -0,0 +1,218 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests string operations."""
+
+import struct
+import sys
+import unittest
+
+import rsa
+from rsa import pkcs1
+
+
+class BinaryTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(256)
+
+ def test_enc_dec(self):
+ message = struct.pack(">IIII", 0, 0, 0, 1)
+ print("\n\tMessage: %r" % message)
+
+ encrypted = pkcs1.encrypt(message, self.pub)
+ print("\tEncrypted: %r" % encrypted)
+
+ decrypted = pkcs1.decrypt(encrypted, self.priv)
+ print("\tDecrypted: %r" % decrypted)
+
+ self.assertEqual(message, decrypted)
+
+ def test_decoding_failure(self):
+ message = struct.pack(">IIII", 0, 0, 0, 1)
+ encrypted = pkcs1.encrypt(message, self.pub)
+
+ # Alter the encrypted stream
+ a = encrypted[5]
+ self.assertIsInstance(a, int)
+
+ altered_a = (a + 1) % 256
+ encrypted = encrypted[:5] + bytes([altered_a]) + encrypted[6:]
+
+ self.assertRaises(pkcs1.DecryptionError, pkcs1.decrypt, encrypted, self.priv)
+
+ def test_randomness(self):
+ """Encrypting the same message twice should result in different
+ cryptos.
+ """
+
+ message = struct.pack(">IIII", 0, 0, 0, 1)
+ encrypted1 = pkcs1.encrypt(message, self.pub)
+ encrypted2 = pkcs1.encrypt(message, self.pub)
+
+ self.assertNotEqual(encrypted1, encrypted2)
+
+
+class ExtraZeroesTest(unittest.TestCase):
+ def setUp(self):
+ # Key, cyphertext, and plaintext taken from https://github.com/sybrenstuvel/python-rsa/issues/146
+ self.private_key = rsa.PrivateKey.load_pkcs1(
+ "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAs1EKK81M5kTFtZSuUFnhKy8FS2WNXaWVmi/fGHG4CLw98+Yo\n0nkuUarVwSS0O9pFPcpc3kvPKOe9Tv+6DLS3Qru21aATy2PRqjqJ4CYn71OYtSwM\n/ZfSCKvrjXybzgu+sBmobdtYm+sppbdL+GEHXGd8gdQw8DDCZSR6+dPJFAzLZTCd\nB+Ctwe/RXPF+ewVdfaOGjkZIzDoYDw7n+OHnsYCYozkbTOcWHpjVevipR+IBpGPi\n1rvKgFnlcG6d/tj0hWRl/6cS7RqhjoiNEtxqoJzpXs/Kg8xbCxXbCchkf11STA8u\ndiCjQWuWI8rcDwl69XMmHJjIQAqhKvOOQ8rYTQIDAQABAoIBABpQLQ7qbHtp4h1Y\nORAfcFRW7Q74UvtH/iEHH1TF8zyM6wZsYtcn4y0mxYE3Mp+J0xlTJbeVJkwZXYVH\nL3UH29CWHSlR+TWiazTwrCTRVJDhEoqbcTiRW8fb+o/jljVxMcVDrpyYUHNo2c6w\njBxhmKPtp66hhaDpds1Cwi0A8APZ8Z2W6kya/L/hRBzMgCz7Bon1nYBMak5PQEwV\nF0dF7Wy4vIjvCzO6DSqA415DvJDzUAUucgFudbANNXo4HJwNRnBpymYIh8mHdmNJ\n/MQ0YLSqUWvOB57dh7oWQwe3UsJ37ZUorTugvxh3NJ7Tt5ZqbCQBEECb9ND63gxo\n/a3YR/0CgYEA7BJc834xCi/0YmO5suBinWOQAF7IiRPU+3G9TdhWEkSYquupg9e6\nK9lC5k0iP+t6I69NYF7+6mvXDTmv6Z01o6oV50oXaHeAk74O3UqNCbLe9tybZ/+F\ndkYlwuGSNttMQBzjCiVy0+y0+Wm3rRnFIsAtd0RlZ24aN3bFTWJINIsCgYEAwnQq\nvNmJe9SwtnH5c/yCqPhKv1cF/4jdQZSGI6/p3KYNxlQzkHZ/6uvrU5V27ov6YbX8\nvKlKfO91oJFQxUD6lpTdgAStI3GMiJBJIZNpyZ9EWNSvwUj28H34cySpbZz3s4Xd\nhiJBShgy+fKURvBQwtWmQHZJ3EGrcOI7PcwiyYcCgYEAlql5jSUCY0ALtidzQogW\nJ+B87N+RGHsBuJ/0cxQYinwg+ySAAVbSyF1WZujfbO/5+YBN362A/1dn3lbswCnH\nK/bHF9+fZNqvwprPnceQj5oK1n4g6JSZNsy6GNAhosT+uwQ0misgR8SQE4W25dDG\nkdEYsz+BgCsyrCcu8J5C+tUCgYAFVPQbC4f2ikVyKzvgz0qx4WUDTBqRACq48p6e\n+eLatv7nskVbr7QgN+nS9+Uz80ihR0Ev1yCAvnwmM/XYAskcOea87OPmdeWZlQM8\nVXNwINrZ6LMNBLgorfuTBK1UoRo1pPUHCYdqxbEYI2unak18mikd2WB7Fp3h0YI4\nVpGZnwKBgBxkAYnZv+jGI4MyEKdsQgxvROXXYOJZkWzsKuKxVkVpYP2V4nR2YMOJ\nViJQ8FUEnPq35cMDlUk4SnoqrrHIJNOvcJSCqM+bWHAioAsfByLbUPM8sm3CDdIk\nXVJl32HuKYPJOMIWfc7hIfxLRHnCN+coz2M6tgqMDs0E/OfjuqVZ\n-----END RSA PRIVATE KEY-----",
+ format="PEM",
+ )
+ self.cyphertext = bytes.fromhex(
+ "4501b4d669e01b9ef2dc800aa1b06d49196f5a09fe8fbcd037323c60eaf027bfb98432be4e4a26c567ffec718bcbea977dd26812fa071c33808b4d5ebb742d9879806094b6fbeea63d25ea3141733b60e31c6912106e1b758a7fe0014f075193faa8b4622bfd5d3013f0a32190a95de61a3604711bc62945f95a6522bd4dfed0a994ef185b28c281f7b5e4c8ed41176d12d9fc1b837e6a0111d0132d08a6d6f0580de0c9eed8ed105531799482d1e466c68c23b0c222af7fc12ac279bc4ff57e7b4586d209371b38c4c1035edd418dc5f960441cb21ea2bedbfea86de0d7861e81021b650a1de51002c315f1e7c12debe4dcebf790caaa54a2f26b149cf9e77d"
+ )
+ self.plaintext = bytes.fromhex("54657374")
+
+ def test_unmodified(self):
+ message = rsa.decrypt(self.cyphertext, self.private_key)
+ self.assertEqual(message, self.plaintext)
+
+ def test_prepend_zeroes(self):
+ cyphertext = bytes.fromhex("0000") + self.cyphertext
+ with self.assertRaises(rsa.DecryptionError):
+ rsa.decrypt(cyphertext, self.private_key)
+
+ def test_append_zeroes(self):
+ cyphertext = self.cyphertext + bytes.fromhex("0000")
+ with self.assertRaises(rsa.DecryptionError):
+ rsa.decrypt(cyphertext, self.private_key)
+
+
+class SignatureTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(512)
+
+ def test_sign_verify(self):
+ """Test happy flow of sign and verify"""
+
+ message = b"je moeder"
+ signature = pkcs1.sign(message, self.priv, "SHA-256")
+ self.assertEqual("SHA-256", pkcs1.verify(message, signature, self.pub))
+
+ @unittest.skipIf(sys.version_info < (3, 6), "SHA3 requires Python 3.6+")
+ def test_sign_verify_sha3(self):
+ """Test happy flow of sign and verify with SHA3-256"""
+
+ message = b"je moeder"
+ signature = pkcs1.sign(message, self.priv, "SHA3-256")
+ self.assertEqual("SHA3-256", pkcs1.verify(message, signature, self.pub))
+
+ def test_find_signature_hash(self):
+ """Test happy flow of sign and find_signature_hash"""
+
+ message = b"je moeder"
+ signature = pkcs1.sign(message, self.priv, "SHA-256")
+
+ self.assertEqual("SHA-256", pkcs1.find_signature_hash(signature, self.pub))
+
+ def test_alter_message(self):
+ """Altering the message should let the verification fail."""
+
+ signature = pkcs1.sign(b"je moeder", self.priv, "SHA-256")
+ self.assertRaises(
+ pkcs1.VerificationError, pkcs1.verify, b"mijn moeder", signature, self.pub
+ )
+
+ def test_sign_different_key(self):
+ """Signing with another key should let the verification fail."""
+
+ (otherpub, _) = rsa.newkeys(512)
+
+ message = b"je moeder"
+ signature = pkcs1.sign(message, self.priv, "SHA-256")
+ self.assertRaises(pkcs1.VerificationError, pkcs1.verify, message, signature, otherpub)
+
+ def test_multiple_signings(self):
+ """Signing the same message twice should return the same signatures."""
+
+ message = struct.pack(">IIII", 0, 0, 0, 1)
+ signature1 = pkcs1.sign(message, self.priv, "SHA-1")
+ signature2 = pkcs1.sign(message, self.priv, "SHA-1")
+
+ self.assertEqual(signature1, signature2)
+
+ def test_split_hash_sign(self):
+ """Hashing and then signing should match with directly signing the message."""
+
+ message = b"je moeder"
+ msg_hash = pkcs1.compute_hash(message, "SHA-256")
+ signature1 = pkcs1.sign_hash(msg_hash, self.priv, "SHA-256")
+
+ # Calculate the signature using the unified method
+ signature2 = pkcs1.sign(message, self.priv, "SHA-256")
+
+ self.assertEqual(signature1, signature2)
+
+ def test_hash_sign_verify(self):
+ """Test happy flow of hash, sign, and verify"""
+
+ message = b"je moeder"
+ msg_hash = pkcs1.compute_hash(message, "SHA-224")
+ signature = pkcs1.sign_hash(msg_hash, self.priv, "SHA-224")
+
+ self.assertTrue(pkcs1.verify(message, signature, self.pub))
+
+ def test_prepend_zeroes(self):
+ """Prepending the signature with zeroes should be detected."""
+
+ message = b"je moeder"
+ signature = pkcs1.sign(message, self.priv, "SHA-256")
+ signature = bytes.fromhex("0000") + signature
+ with self.assertRaises(rsa.VerificationError):
+ pkcs1.verify(message, signature, self.pub)
+
+ def test_apppend_zeroes(self):
+ """Apppending the signature with zeroes should be detected."""
+
+ message = b"je moeder"
+ signature = pkcs1.sign(message, self.priv, "SHA-256")
+ signature = signature + bytes.fromhex("0000")
+ with self.assertRaises(rsa.VerificationError):
+ pkcs1.verify(message, signature, self.pub)
+
+
+class PaddingSizeTest(unittest.TestCase):
+ def test_too_little_padding(self):
+ """Padding less than 8 bytes should be rejected."""
+
+ # Construct key that will be small enough to need only 7 bytes of padding.
+ # This key is 168 bit long, and was generated with rsa.newkeys(nbits=168).
+ self.private_key = rsa.PrivateKey.load_pkcs1(
+ b"""
+-----BEGIN RSA PRIVATE KEY-----
+MHkCAQACFgCIGbbNSkIRLtprxka9NgOf5UxgxCMCAwEAAQIVQqymO0gHubdEVS68
+CdCiWmOJxVfRAgwBQM+e1JJwMKmxSF0CCmya6CFxO8Evdn8CDACMM3AlVC4FhlN8
+3QIKC9cjoam/swMirwIMAR7Br9tdouoH7jAE
+-----END RSA PRIVATE KEY-----
+ """
+ )
+ self.public_key = rsa.PublicKey(n=self.private_key.n, e=self.private_key.e)
+
+ cyphertext = self.encrypt_with_short_padding(b"op je hoofd")
+ with self.assertRaises(rsa.DecryptionError):
+ rsa.decrypt(cyphertext, self.private_key)
+
+ def encrypt_with_short_padding(self, message: bytes) -> bytes:
+ # This is a copy of rsa.pkcs1.encrypt() adjusted to use the wrong padding length.
+ keylength = rsa.common.byte_size(self.public_key.n)
+
+ # The word 'padding' has 7 letters, so is one byte short of a valid padding length.
+ padded = b"\x00\x02padding\x00" + message
+
+ payload = rsa.transform.bytes2int(padded)
+ encrypted_value = rsa.core.encrypt_int(payload, self.public_key.e, self.public_key.n)
+ cyphertext = rsa.transform.int2bytes(encrypted_value, keylength)
+
+ return cyphertext
diff --git a/contrib/python/rsa/py3/tests/test_pkcs1_v2.py b/contrib/python/rsa/py3/tests/test_pkcs1_v2.py
new file mode 100644
index 0000000000..ead1393fe1
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_pkcs1_v2.py
@@ -0,0 +1,79 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests PKCS #1 version 2 functionality.
+
+Most of the mocked values come from the test vectors found at:
+http://www.itomorrowmag.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm
+"""
+
+import unittest
+
+from rsa import pkcs1_v2
+
+
+class MGFTest(unittest.TestCase):
+ def test_oaep_int_db_mask(self):
+ seed = (
+ b"\xaa\xfd\x12\xf6\x59\xca\xe6\x34\x89\xb4\x79\xe5\x07\x6d\xde\xc2" b"\xf0\x6c\xb5\x8f"
+ )
+ db = (
+ b"\xda\x39\xa3\xee\x5e\x6b\x4b\x0d\x32\x55\xbf\xef\x95\x60\x18\x90"
+ b"\xaf\xd8\x07\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xd4\x36\xe9\x95\x69"
+ b"\xfd\x32\xa7\xc8\xa0\x5b\xbc\x90\xd3\x2c\x49"
+ )
+ masked_db = (
+ b"\xdc\xd8\x7d\x5c\x68\xf1\xee\xa8\xf5\x52\x67\xc3\x1b\x2e\x8b\xb4"
+ b"\x25\x1f\x84\xd7\xe0\xb2\xc0\x46\x26\xf5\xaf\xf9\x3e\xdc\xfb\x25"
+ b"\xc9\xc2\xb3\xff\x8a\xe1\x0e\x83\x9a\x2d\xdb\x4c\xdc\xfe\x4f\xf4"
+ b"\x77\x28\xb4\xa1\xb7\xc1\x36\x2b\xaa\xd2\x9a\xb4\x8d\x28\x69\xd5"
+ b"\x02\x41\x21\x43\x58\x11\x59\x1b\xe3\x92\xf9\x82\xfb\x3e\x87\xd0"
+ b"\x95\xae\xb4\x04\x48\xdb\x97\x2f\x3a\xc1\x4f\x7b\xc2\x75\x19\x52"
+ b"\x81\xce\x32\xd2\xf1\xb7\x6d\x4d\x35\x3e\x2d"
+ )
+
+ # dbMask = MGF(seed, length(DB))
+ db_mask = pkcs1_v2.mgf1(seed, length=len(db))
+ expected_db_mask = (
+ b"\x06\xe1\xde\xb2\x36\x9a\xa5\xa5\xc7\x07\xd8\x2c\x8e\x4e\x93\x24"
+ b"\x8a\xc7\x83\xde\xe0\xb2\xc0\x46\x26\xf5\xaf\xf9\x3e\xdc\xfb\x25"
+ b"\xc9\xc2\xb3\xff\x8a\xe1\x0e\x83\x9a\x2d\xdb\x4c\xdc\xfe\x4f\xf4"
+ b"\x77\x28\xb4\xa1\xb7\xc1\x36\x2b\xaa\xd2\x9a\xb4\x8d\x28\x69\xd5"
+ b"\x02\x41\x21\x43\x58\x11\x59\x1b\xe3\x92\xf9\x82\xfb\x3e\x87\xd0"
+ b"\x95\xae\xb4\x04\x48\xdb\x97\x2f\x3a\xc1\x4e\xaf\xf4\x9c\x8c\x3b"
+ b"\x7c\xfc\x95\x1a\x51\xec\xd1\xdd\xe6\x12\x64"
+ )
+
+ self.assertEqual(db_mask, expected_db_mask)
+
+ # seedMask = MGF(maskedDB, length(seed))
+ seed_mask = pkcs1_v2.mgf1(masked_db, length=len(seed))
+ expected_seed_mask = (
+ b"\x41\x87\x0b\x5a\xb0\x29\xe6\x57\xd9\x57\x50\xb5\x4c\x28\x3c\x08" b"\x72\x5d\xbe\xa9"
+ )
+
+ self.assertEqual(seed_mask, expected_seed_mask)
+
+ def test_invalid_hasher(self):
+ """Tests an invalid hasher generates an exception"""
+ with self.assertRaises(ValueError):
+ pkcs1_v2.mgf1(b"\x06\xe1\xde\xb2", length=8, hasher="SHA2")
+
+ def test_invalid_length(self):
+ with self.assertRaises(OverflowError):
+ pkcs1_v2.mgf1(b"\x06\xe1\xde\xb2", length=2 ** 50)
diff --git a/contrib/python/rsa/py3/tests/test_prime.py b/contrib/python/rsa/py3/tests/test_prime.py
new file mode 100644
index 0000000000..42d8af1670
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_prime.py
@@ -0,0 +1,133 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests prime functions."""
+
+import unittest
+
+import rsa.prime
+import rsa.randnum
+
+
+class PrimeTest(unittest.TestCase):
+ def test_is_prime(self):
+ """Test some common primes."""
+
+ # Test some trivial numbers
+ self.assertFalse(rsa.prime.is_prime(-1))
+ self.assertFalse(rsa.prime.is_prime(0))
+ self.assertFalse(rsa.prime.is_prime(1))
+ self.assertTrue(rsa.prime.is_prime(2))
+ self.assertFalse(rsa.prime.is_prime(42))
+ self.assertTrue(rsa.prime.is_prime(41))
+
+ # Test some slightly larger numbers
+ self.assertEqual(
+ [907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997],
+ [x for x in range(901, 1000) if rsa.prime.is_prime(x)],
+ )
+
+ # Test around the 50th millionth known prime.
+ self.assertTrue(rsa.prime.is_prime(982451653))
+ self.assertFalse(rsa.prime.is_prime(982451653 * 961748941))
+
+ def test_miller_rabin_primality_testing(self):
+ """Uses monkeypatching to ensure certain random numbers.
+
+ This allows us to predict/control the code path.
+ """
+
+ randints = []
+
+ def fake_randint(maxvalue):
+ return randints.pop(0)
+
+ orig_randint = rsa.randnum.randint
+ rsa.randnum.randint = fake_randint
+ try:
+ # 'n is composite'
+ randints.append(2630484832) # causes the 'n is composite' case with n=3784949785
+ self.assertEqual(False, rsa.prime.miller_rabin_primality_testing(2787998641, 7))
+ self.assertEqual([], randints)
+
+ # 'Exit inner loop and continue with next witness'
+ randints.extend(
+ [
+ 2119139098, # causes 'Exit inner loop and continue with next witness'
+ # the next witnesses for the above case:
+ 3051067716,
+ 3603501763,
+ 3230895847,
+ 3687808133,
+ 3760099987,
+ 4026931495,
+ 3022471882,
+ ]
+ )
+ self.assertEqual(
+ True,
+ rsa.prime.miller_rabin_primality_testing(2211417913, len(randints)),
+ )
+ self.assertEqual([], randints)
+ finally:
+ rsa.randnum.randint = orig_randint
+
+ def test_mersenne_primes(self):
+ """Tests first known Mersenne primes.
+
+ Mersenne primes are prime numbers that can be written in the form
+ `Mn = 2**n - 1` for some integer `n`. For the list of known Mersenne
+ primes, see:
+ https://en.wikipedia.org/wiki/Mersenne_prime#List_of_known_Mersenne_primes
+ """
+
+ # List of known Mersenne exponents.
+ known_mersenne_exponents = [
+ 2,
+ 3,
+ 5,
+ 7,
+ 13,
+ 17,
+ 19,
+ 31,
+ 61,
+ 89,
+ 107,
+ 127,
+ 521,
+ 607,
+ 1279,
+ 2203,
+ 2281,
+ 4423,
+ ]
+
+ # Test Mersenne primes.
+ for exp in known_mersenne_exponents:
+ self.assertTrue(rsa.prime.is_prime(2 ** exp - 1))
+
+ def test_get_primality_testing_rounds(self):
+ """Test round calculation for primality testing."""
+
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 63), 10)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 127), 10)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 255), 10)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 511), 7)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 767), 7)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 1023), 4)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 1279), 4)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 1535), 3)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 2047), 3)
+ self.assertEqual(rsa.prime.get_primality_testing_rounds(1 << 4095), 3)
diff --git a/contrib/python/rsa/py3/tests/test_strings.py b/contrib/python/rsa/py3/tests/test_strings.py
new file mode 100644
index 0000000000..ae8ffe1a4e
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_strings.py
@@ -0,0 +1,40 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests string operations."""
+
+from __future__ import absolute_import
+
+import unittest
+
+import rsa
+
+unicode_string = u"Euro=\u20ac ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+
+class StringTest(unittest.TestCase):
+ def setUp(self):
+ (self.pub, self.priv) = rsa.newkeys(384)
+
+ def test_enc_dec(self):
+ message = unicode_string.encode("utf-8")
+ print("\n\tMessage: %r" % message)
+
+ encrypted = rsa.encrypt(message, self.pub)
+ print("\tEncrypted: %r" % encrypted)
+
+ decrypted = rsa.decrypt(encrypted, self.priv)
+ print("\tDecrypted: %r" % decrypted)
+
+ self.assertEqual(message, decrypted)
diff --git a/contrib/python/rsa/py3/tests/test_transform.py b/contrib/python/rsa/py3/tests/test_transform.py
new file mode 100644
index 0000000000..14046191fe
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/test_transform.py
@@ -0,0 +1,53 @@
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from rsa.transform import int2bytes, bytes2int
+
+
+class Test_int2bytes(unittest.TestCase):
+ def test_accuracy(self):
+ self.assertEqual(int2bytes(123456789), b"\x07[\xcd\x15")
+
+ def test_codec_identity(self):
+ self.assertEqual(bytes2int(int2bytes(123456789, 128)), 123456789)
+
+ def test_chunk_size(self):
+ self.assertEqual(int2bytes(123456789, 6), b"\x00\x00\x07[\xcd\x15")
+ self.assertEqual(int2bytes(123456789, 7), b"\x00\x00\x00\x07[\xcd\x15")
+
+ def test_zero(self):
+ self.assertEqual(int2bytes(0, 4), b"\x00" * 4)
+ self.assertEqual(int2bytes(0, 7), b"\x00" * 7)
+ self.assertEqual(int2bytes(0), b"\x00")
+
+ def test_correctness_against_base_implementation(self):
+ # Slow test.
+ values = [
+ 1 << 512,
+ 1 << 8192,
+ 1 << 77,
+ ]
+ for value in values:
+ self.assertEqual(bytes2int(int2bytes(value)), value, "Boom %d" % value)
+
+ def test_raises_OverflowError_when_chunk_size_is_insufficient(self):
+ self.assertRaises(OverflowError, int2bytes, 123456789, 3)
+ self.assertRaises(OverflowError, int2bytes, 299999999999, 4)
+
+ def test_raises_ValueError_when_negative_integer(self):
+ self.assertRaises(ValueError, int2bytes, -1)
+
+ def test_raises_TypeError_when_not_integer(self):
+ self.assertRaises(TypeError, int2bytes, None)
diff --git a/contrib/python/rsa/py3/tests/ya.make b/contrib/python/rsa/py3/tests/ya.make
new file mode 100644
index 0000000000..059fbe0129
--- /dev/null
+++ b/contrib/python/rsa/py3/tests/ya.make
@@ -0,0 +1,28 @@
+PY3TEST()
+
+PEERDIR(
+ contrib/python/rsa
+)
+
+NO_LINT()
+
+TEST_SRCS(
+ test_cli.py
+ test_common.py
+ test_integers.py
+ test_key.py
+ test_load_save_keys.py
+ test_parallel.py
+ test_pem.py
+ test_pkcs1.py
+ test_pkcs1_v2.py
+ test_prime.py
+ test_strings.py
+ test_transform.py
+)
+
+DATA (
+ arcadia/contrib/python/rsa/py3/tests
+)
+
+END()
diff --git a/contrib/python/rsa/py3/ya.make b/contrib/python/rsa/py3/ya.make
new file mode 100644
index 0000000000..3b4e2bfef1
--- /dev/null
+++ b/contrib/python/rsa/py3/ya.make
@@ -0,0 +1,45 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(4.9)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/python/pyasn1
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ rsa/__init__.py
+ rsa/asn1.py
+ rsa/cli.py
+ rsa/common.py
+ rsa/core.py
+ rsa/key.py
+ rsa/parallel.py
+ rsa/pem.py
+ rsa/pkcs1.py
+ rsa/pkcs1_v2.py
+ rsa/prime.py
+ rsa/randnum.py
+ rsa/transform.py
+ rsa/util.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/rsa/py3/
+ .dist-info/METADATA
+ .dist-info/entry_points.txt
+ .dist-info/top_level.txt
+ rsa/py.typed
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/rsa/ya.make b/contrib/python/rsa/ya.make
new file mode 100644
index 0000000000..0deffc020e
--- /dev/null
+++ b/contrib/python/rsa/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/rsa/py2)
+ELSE()
+ PEERDIR(contrib/python/rsa/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/ruamel.yaml.clib/py2/.dist-info/METADATA b/contrib/python/ruamel.yaml.clib/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..7b8ca2a332
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/.dist-info/METADATA
@@ -0,0 +1,55 @@
+Metadata-Version: 2.1
+Name: ruamel.yaml.clib
+Version: 0.2.7
+Summary: C version of reader, parser and emitter for ruamel.yaml derived from libyaml
+Home-page: https://sourceforge.net/p/ruamel-yaml-clib/code/ci/default/tree
+Author: Anthon van der Neut
+Author-email: a.van.der.neut@ruamel.eu
+License: MIT
+Keywords: yaml 1.2 parser c-library config
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+
+
+ruamel.yaml.clib
+================
+
+``ruamel.yaml.clib`` is the C based reader/scanner and emitter for ruamel.yaml
+
+:version: 0.2.7
+:updated: 2022-10-19
+:documentation: http://yaml.readthedocs.io
+:repository: https://sourceforge.net/projects/ruamel-yaml-clib/
+:pypi: https://pypi.org/project/ruamel.yaml.clib/
+
+This package was split of from ruamel.yaml, so that ruamel.yaml can be build as
+a universal wheel. Apart from the C code seldom changing, and taking a long
+time to compile for all platforms, this allows installation of the .so
+on Linux systems under /usr/lib64/pythonX.Y (without a .pth file or a ruamel
+directory) and the Python code for ruamel.yaml under /usr/lib/pythonX.Y.
+
+
+.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+.. image:: https://sourceforge.net/p/ruamel-yaml-clib/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
+This release in loving memory of Johanna Clasina van der Neut-Bandel [1922-10-19 - 2015-11-21]
+
+
diff --git a/contrib/python/ruamel.yaml.clib/py2/.dist-info/top_level.txt b/contrib/python/ruamel.yaml.clib/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..be006da740
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+_ruamel_yaml
+ruamel
diff --git a/contrib/python/ruamel.yaml.clib/py2/LICENSE b/contrib/python/ruamel.yaml.clib/py2/LICENSE
new file mode 100644
index 0000000000..786ebed939
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/LICENSE
@@ -0,0 +1,21 @@
+ The MIT License (MIT)
+
+ Copyright (c) 2019-2022 Anthon van der Neut, Ruamel bvba
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/contrib/python/ruamel.yaml.clib/py2/README.rst b/contrib/python/ruamel.yaml.clib/py2/README.rst
new file mode 100644
index 0000000000..c41aca60e6
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/README.rst
@@ -0,0 +1,25 @@
+
+ruamel.yaml.clib
+================
+
+``ruamel.yaml.clib`` is the C based reader/scanner and emitter for ruamel.yaml
+
+:version: 0.2.6
+:updated: 2021-07-04
+:documentation: http://yaml.readthedocs.io
+:repository: https://sourceforge.net/projects/ruamel-yaml-clib/
+:pypi: https://pypi.org/project/ruamel.yaml.clib/
+
+This package was split of from ruamel.yaml, so that ruamel.yaml can be build as
+a universal wheel. Apart from the C code seldom changing, and taking a long
+time to compile for all platforms, this allows installation of the .so
+on Linux systems under /usr/lib64/pythonX.Y (without a .pth file or a ruamel
+directory) and the Python code for ruamel.yaml under /usr/lib/pythonX.Y.
+
+
+.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+.. image:: https://sourceforge.net/p/ruamel-yaml-clib/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
diff --git a/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.h b/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.h
new file mode 100644
index 0000000000..568db509aa
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.h
@@ -0,0 +1,23 @@
+
+#include "yaml.h"
+
+#if PY_MAJOR_VERSION < 3
+
+#define PyUnicode_FromString(s) PyUnicode_DecodeUTF8((s), strlen(s), "strict")
+
+#else
+
+#define PyString_CheckExact PyBytes_CheckExact
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+
+#endif
+
+#ifdef _MSC_VER /* MS Visual C++ 6.0 */
+#if _MSC_VER == 1200
+
+#define PyLong_FromUnsignedLongLong(z) PyInt_FromLong(i)
+
+#endif
+#endif
diff --git a/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pxd b/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pxd
new file mode 100644
index 0000000000..d8dc3c6bda
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pxd
@@ -0,0 +1,251 @@
+
+cdef extern from "_ruamel_yaml.h":
+
+ void malloc(int l)
+ void memcpy(char *d, char *s, int l)
+ int strlen(char *s)
+ int PyString_CheckExact(object o)
+ int PyUnicode_CheckExact(object o)
+ char *PyString_AS_STRING(object o)
+ int PyString_GET_SIZE(object o)
+ object PyString_FromStringAndSize(char *v, int l)
+ object PyUnicode_FromString(char *u)
+ object PyUnicode_DecodeUTF8(char *u, int s, char *e)
+ object PyUnicode_AsUTF8String(object o)
+ int PY_MAJOR_VERSION
+
+ ctypedef enum:
+ SIZEOF_VOID_P
+ ctypedef enum yaml_encoding_t:
+ YAML_ANY_ENCODING
+ YAML_UTF8_ENCODING
+ YAML_UTF16LE_ENCODING
+ YAML_UTF16BE_ENCODING
+ ctypedef enum yaml_break_t:
+ YAML_ANY_BREAK
+ YAML_CR_BREAK
+ YAML_LN_BREAK
+ YAML_CRLN_BREAK
+ ctypedef enum yaml_error_type_t:
+ YAML_NO_ERROR
+ YAML_MEMORY_ERROR
+ YAML_READER_ERROR
+ YAML_SCANNER_ERROR
+ YAML_PARSER_ERROR
+ YAML_WRITER_ERROR
+ YAML_EMITTER_ERROR
+ ctypedef enum yaml_scalar_style_t:
+ YAML_ANY_SCALAR_STYLE
+ YAML_PLAIN_SCALAR_STYLE
+ YAML_SINGLE_QUOTED_SCALAR_STYLE
+ YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ YAML_LITERAL_SCALAR_STYLE
+ YAML_FOLDED_SCALAR_STYLE
+ ctypedef enum yaml_sequence_style_t:
+ YAML_ANY_SEQUENCE_STYLE
+ YAML_BLOCK_SEQUENCE_STYLE
+ YAML_FLOW_SEQUENCE_STYLE
+ ctypedef enum yaml_mapping_style_t:
+ YAML_ANY_MAPPING_STYLE
+ YAML_BLOCK_MAPPING_STYLE
+ YAML_FLOW_MAPPING_STYLE
+ ctypedef enum yaml_token_type_t:
+ YAML_NO_TOKEN
+ YAML_STREAM_START_TOKEN
+ YAML_STREAM_END_TOKEN
+ YAML_VERSION_DIRECTIVE_TOKEN
+ YAML_TAG_DIRECTIVE_TOKEN
+ YAML_DOCUMENT_START_TOKEN
+ YAML_DOCUMENT_END_TOKEN
+ YAML_BLOCK_SEQUENCE_START_TOKEN
+ YAML_BLOCK_MAPPING_START_TOKEN
+ YAML_BLOCK_END_TOKEN
+ YAML_FLOW_SEQUENCE_START_TOKEN
+ YAML_FLOW_SEQUENCE_END_TOKEN
+ YAML_FLOW_MAPPING_START_TOKEN
+ YAML_FLOW_MAPPING_END_TOKEN
+ YAML_BLOCK_ENTRY_TOKEN
+ YAML_FLOW_ENTRY_TOKEN
+ YAML_KEY_TOKEN
+ YAML_VALUE_TOKEN
+ YAML_ALIAS_TOKEN
+ YAML_ANCHOR_TOKEN
+ YAML_TAG_TOKEN
+ YAML_SCALAR_TOKEN
+ ctypedef enum yaml_event_type_t:
+ YAML_NO_EVENT
+ YAML_STREAM_START_EVENT
+ YAML_STREAM_END_EVENT
+ YAML_DOCUMENT_START_EVENT
+ YAML_DOCUMENT_END_EVENT
+ YAML_ALIAS_EVENT
+ YAML_SCALAR_EVENT
+ YAML_SEQUENCE_START_EVENT
+ YAML_SEQUENCE_END_EVENT
+ YAML_MAPPING_START_EVENT
+ YAML_MAPPING_END_EVENT
+
+ ctypedef int yaml_read_handler_t(void *data, char *buffer,
+ int size, int *size_read) except 0
+
+ ctypedef int yaml_write_handler_t(void *data, char *buffer,
+ int size) except 0
+
+ ctypedef struct yaml_mark_t:
+ int index
+ int line
+ int column
+ ctypedef struct yaml_version_directive_t:
+ int major
+ int minor
+ ctypedef struct yaml_tag_directive_t:
+ char *handle
+ char *prefix
+
+ ctypedef struct _yaml_token_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_token_alias_data_t:
+ char *value
+ ctypedef struct _yaml_token_anchor_data_t:
+ char *value
+ ctypedef struct _yaml_token_tag_data_t:
+ char *handle
+ char *suffix
+ ctypedef struct _yaml_token_scalar_data_t:
+ char *value
+ int length
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_token_version_directive_data_t:
+ int major
+ int minor
+ ctypedef struct _yaml_token_tag_directive_data_t:
+ char *handle
+ char *prefix
+ ctypedef union _yaml_token_data_t:
+ _yaml_token_stream_start_data_t stream_start
+ _yaml_token_alias_data_t alias
+ _yaml_token_anchor_data_t anchor
+ _yaml_token_tag_data_t tag
+ _yaml_token_scalar_data_t scalar
+ _yaml_token_version_directive_data_t version_directive
+ _yaml_token_tag_directive_data_t tag_directive
+ ctypedef struct yaml_token_t:
+ yaml_token_type_t type
+ _yaml_token_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct _yaml_event_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_event_document_start_data_tag_directives_t:
+ yaml_tag_directive_t *start
+ yaml_tag_directive_t *end
+ ctypedef struct _yaml_event_document_start_data_t:
+ yaml_version_directive_t *version_directive
+ _yaml_event_document_start_data_tag_directives_t tag_directives
+ int implicit
+ ctypedef struct _yaml_event_document_end_data_t:
+ int implicit
+ ctypedef struct _yaml_event_alias_data_t:
+ char *anchor
+ ctypedef struct _yaml_event_scalar_data_t:
+ char *anchor
+ char *tag
+ char *value
+ int length
+ int plain_implicit
+ int quoted_implicit
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_event_sequence_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_sequence_style_t style
+ ctypedef struct _yaml_event_mapping_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_mapping_style_t style
+ ctypedef union _yaml_event_data_t:
+ _yaml_event_stream_start_data_t stream_start
+ _yaml_event_document_start_data_t document_start
+ _yaml_event_document_end_data_t document_end
+ _yaml_event_alias_data_t alias
+ _yaml_event_scalar_data_t scalar
+ _yaml_event_sequence_start_data_t sequence_start
+ _yaml_event_mapping_start_data_t mapping_start
+ ctypedef struct yaml_event_t:
+ yaml_event_type_t type
+ _yaml_event_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct yaml_parser_t:
+ yaml_error_type_t error
+ char *problem
+ int problem_offset
+ int problem_value
+ yaml_mark_t problem_mark
+ char *context
+ yaml_mark_t context_mark
+
+ ctypedef struct yaml_emitter_t:
+ yaml_error_type_t error
+ char *problem
+
+ char *yaml_get_version_string()
+ void yaml_get_version(int *major, int *minor, int *patch)
+
+ void yaml_token_delete(yaml_token_t *token)
+
+ int yaml_stream_start_event_initialize(yaml_event_t *event,
+ yaml_encoding_t encoding)
+ int yaml_stream_end_event_initialize(yaml_event_t *event)
+ int yaml_document_start_event_initialize(yaml_event_t *event,
+ yaml_version_directive_t *version_directive,
+ yaml_tag_directive_t *tag_directives_start,
+ yaml_tag_directive_t *tag_directives_end,
+ int implicit)
+ int yaml_document_end_event_initialize(yaml_event_t *event,
+ int implicit)
+ int yaml_alias_event_initialize(yaml_event_t *event, char *anchor)
+ int yaml_scalar_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, char *value, int length,
+ int plain_implicit, int quoted_implicit,
+ yaml_scalar_style_t style)
+ int yaml_sequence_start_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, int implicit, yaml_sequence_style_t style)
+ int yaml_sequence_end_event_initialize(yaml_event_t *event)
+ int yaml_mapping_start_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, int implicit, yaml_mapping_style_t style)
+ int yaml_mapping_end_event_initialize(yaml_event_t *event)
+ void yaml_event_delete(yaml_event_t *event)
+
+ int yaml_parser_initialize(yaml_parser_t *parser)
+ void yaml_parser_delete(yaml_parser_t *parser)
+ void yaml_parser_set_input_string(yaml_parser_t *parser,
+ char *input, int size)
+ void yaml_parser_set_input(yaml_parser_t *parser,
+ yaml_read_handler_t *handler, void *data)
+ void yaml_parser_set_encoding(yaml_parser_t *parser,
+ yaml_encoding_t encoding)
+ int yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) except *
+ int yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) except *
+
+ int yaml_emitter_initialize(yaml_emitter_t *emitter)
+ void yaml_emitter_delete(yaml_emitter_t *emitter)
+ void yaml_emitter_set_output_string(yaml_emitter_t *emitter,
+ char *output, int size, int *size_written)
+ void yaml_emitter_set_output(yaml_emitter_t *emitter,
+ yaml_write_handler_t *handler, void *data)
+ void yaml_emitter_set_encoding(yaml_emitter_t *emitter,
+ yaml_encoding_t encoding)
+ void yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical)
+ void yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent)
+ void yaml_emitter_set_width(yaml_emitter_t *emitter, int width)
+ void yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode)
+ void yaml_emitter_set_break(yaml_emitter_t *emitter,
+ yaml_break_t line_break)
+ int yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) except *
+ int yaml_emitter_flush(yaml_emitter_t *emitter)
+
diff --git a/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pyx b/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pyx
new file mode 100644
index 0000000000..4fd50e207b
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/_ruamel_yaml.pyx
@@ -0,0 +1,1526 @@
+
+
+def get_version_string():
+ cdef char *value
+ value = yaml_get_version_string()
+ if PY_MAJOR_VERSION < 3:
+ return value
+ else:
+ return PyUnicode_FromString(value)
+
+def get_version():
+ cdef int major, minor, patch
+ yaml_get_version(&major, &minor, &patch)
+ return (major, minor, patch)
+
+#Mark = yaml.error.Mark
+from ruamel.yaml.error import YAMLError
+from ruamel.yaml.reader import ReaderError
+from ruamel.yaml.scanner import ScannerError
+from ruamel.yaml.parser import ParserError
+from ruamel.yaml.composer import ComposerError
+from ruamel.yaml.constructor import ConstructorError
+from ruamel.yaml.emitter import EmitterError
+from ruamel.yaml.serializer import SerializerError
+from ruamel.yaml.representer import RepresenterError
+
+from ruamel.yaml.tokens import StreamStartToken
+from ruamel.yaml.tokens import StreamEndToken
+from ruamel.yaml.tokens import DirectiveToken
+from ruamel.yaml.tokens import DocumentStartToken
+from ruamel.yaml.tokens import DocumentEndToken
+from ruamel.yaml.tokens import BlockSequenceStartToken
+from ruamel.yaml.tokens import BlockMappingStartToken
+from ruamel.yaml.tokens import BlockEndToken
+from ruamel.yaml.tokens import FlowSequenceStartToken
+from ruamel.yaml.tokens import FlowMappingStartToken
+from ruamel.yaml.tokens import FlowSequenceEndToken
+from ruamel.yaml.tokens import FlowMappingEndToken
+from ruamel.yaml.tokens import KeyToken
+from ruamel.yaml.tokens import ValueToken
+from ruamel.yaml.tokens import BlockEntryToken
+from ruamel.yaml.tokens import FlowEntryToken
+from ruamel.yaml.tokens import AliasToken
+from ruamel.yaml.tokens import AnchorToken
+from ruamel.yaml.tokens import TagToken
+from ruamel.yaml.tokens import ScalarToken
+
+from ruamel.yaml.events import StreamStartEvent
+from ruamel.yaml.events import StreamEndEvent
+from ruamel.yaml.events import DocumentStartEvent
+from ruamel.yaml.events import DocumentEndEvent
+from ruamel.yaml.events import AliasEvent
+from ruamel.yaml.events import ScalarEvent
+from ruamel.yaml.events import SequenceStartEvent
+from ruamel.yaml.events import SequenceEndEvent
+from ruamel.yaml.events import MappingStartEvent
+from ruamel.yaml.events import MappingEndEvent
+
+from ruamel.yaml.nodes import ScalarNode
+from ruamel.yaml.nodes import SequenceNode
+from ruamel.yaml.nodes import MappingNode
+
+cdef class Mark:
+ cdef readonly object name
+ cdef readonly size_t index
+ cdef readonly size_t line
+ cdef readonly size_t column
+ cdef readonly buffer
+ cdef readonly pointer
+
+ def __init__(self, object name, size_t index, size_t line, size_t column,
+ object buffer, object pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self):
+ return None
+
+ def __str__(self):
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ return where
+
+#class YAMLError(Exception):
+# pass
+#
+#class MarkedYAMLError(YAMLError):
+#
+# def __init__(self, context=None, context_mark=None,
+# problem=None, problem_mark=None, note=None):
+# self.context = context
+# self.context_mark = context_mark
+# self.problem = problem
+# self.problem_mark = problem_mark
+# self.note = note
+#
+# def __str__(self):
+# lines = []
+# if self.context is not None:
+# lines.append(self.context)
+# if self.context_mark is not None \
+# and (self.problem is None or self.problem_mark is None
+# or self.context_mark.name != self.problem_mark.name
+# or self.context_mark.line != self.problem_mark.line
+# or self.context_mark.column != self.problem_mark.column):
+# lines.append(str(self.context_mark))
+# if self.problem is not None:
+# lines.append(self.problem)
+# if self.problem_mark is not None:
+# lines.append(str(self.problem_mark))
+# if self.note is not None:
+# lines.append(self.note)
+# return '\n'.join(lines)
+#
+#class ReaderError(YAMLError):
+#
+# def __init__(self, name, position, character, encoding, reason):
+# self.name = name
+# self.character = character
+# self.position = position
+# self.encoding = encoding
+# self.reason = reason
+#
+# def __str__(self):
+# if isinstance(self.character, str):
+# return "'%s' codec can't decode byte #x%02x: %s\n" \
+# " in \"%s\", position %d" \
+# % (self.encoding, ord(self.character), self.reason,
+# self.name, self.position)
+# else:
+# return "unacceptable character #x%04x: %s\n" \
+# " in \"%s\", position %d" \
+# % (ord(self.character), self.reason,
+# self.name, self.position)
+#
+#class ScannerError(MarkedYAMLError):
+# pass
+#
+#class ParserError(MarkedYAMLError):
+# pass
+#
+#class EmitterError(YAMLError):
+# pass
+#
+#cdef class Token:
+# cdef readonly Mark start_mark
+# cdef readonly Mark end_mark
+# def __init__(self, Mark start_mark, Mark end_mark):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class StreamStartToken(Token):
+# cdef readonly object encoding
+# def __init__(self, Mark start_mark, Mark end_mark, encoding):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.encoding = encoding
+#
+#cdef class StreamEndToken(Token):
+# pass
+#
+#cdef class DirectiveToken(Token):
+# cdef readonly object name
+# cdef readonly object value
+# def __init__(self, name, value, Mark start_mark, Mark end_mark):
+# self.name = name
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class DocumentStartToken(Token):
+# pass
+#
+#cdef class DocumentEndToken(Token):
+# pass
+#
+#cdef class BlockSequenceStartToken(Token):
+# pass
+#
+#cdef class BlockMappingStartToken(Token):
+# pass
+#
+#cdef class BlockEndToken(Token):
+# pass
+#
+#cdef class FlowSequenceStartToken(Token):
+# pass
+#
+#cdef class FlowMappingStartToken(Token):
+# pass
+#
+#cdef class FlowSequenceEndToken(Token):
+# pass
+#
+#cdef class FlowMappingEndToken(Token):
+# pass
+#
+#cdef class KeyToken(Token):
+# pass
+#
+#cdef class ValueToken(Token):
+# pass
+#
+#cdef class BlockEntryToken(Token):
+# pass
+#
+#cdef class FlowEntryToken(Token):
+# pass
+#
+#cdef class AliasToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class AnchorToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class TagToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class ScalarToken(Token):
+# cdef readonly object value
+# cdef readonly object plain
+# cdef readonly object style
+# def __init__(self, value, plain, Mark start_mark, Mark end_mark, style=None):
+# self.value = value
+# self.plain = plain
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.style = style
+
+cdef class CParser:
+
+ cdef yaml_parser_t parser
+ cdef yaml_event_t parsed_event
+
+ cdef object stream
+ cdef object stream_name
+ cdef object current_token
+ cdef object current_event
+ cdef object anchors
+ cdef object stream_cache
+ cdef int stream_cache_len
+ cdef int stream_cache_pos
+ cdef int unicode_source
+
+ def __init__(self, stream):
+ cdef is_readable
+ if yaml_parser_initialize(&self.parser) == 0:
+ raise MemoryError
+ self.parsed_event.type = YAML_NO_EVENT
+ is_readable = 1
+ try:
+ stream.read
+ except AttributeError:
+ is_readable = 0
+ self.unicode_source = 0
+ if is_readable:
+ self.stream = stream
+ try:
+ self.stream_name = stream.name
+ except AttributeError:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<file>'
+ else:
+ self.stream_name = u'<file>'
+ self.stream_cache = None
+ self.stream_cache_len = 0
+ self.stream_cache_pos = 0
+ yaml_parser_set_input(&self.parser, input_handler, <void *>self)
+ else:
+ if PyUnicode_CheckExact(stream) != 0:
+ stream = PyUnicode_AsUTF8String(stream)
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<unicode string>'
+ else:
+ self.stream_name = u'<unicode string>'
+ self.unicode_source = 1
+ else:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<byte string>'
+ else:
+ self.stream_name = u'<byte string>'
+ if PyString_CheckExact(stream) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string or stream input is required")
+ else:
+ raise TypeError(u"a string or stream input is required")
+ self.stream = stream
+ yaml_parser_set_input_string(&self.parser, PyString_AS_STRING(stream), PyString_GET_SIZE(stream))
+ self.current_token = None
+ self.current_event = None
+ self.anchors = {}
+
+ def __dealloc__(self):
+ yaml_parser_delete(&self.parser)
+ yaml_event_delete(&self.parsed_event)
+
+ def dispose(self):
+ pass
+
+ cdef object _parser_error(self):
+ if self.parser.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.parser.error == YAML_READER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, '?', self.parser.problem)
+ else:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, u'?', PyUnicode_FromString(self.parser.problem))
+ elif self.parser.error == YAML_SCANNER_ERROR \
+ or self.parser.error == YAML_PARSER_ERROR:
+ context_mark = None
+ problem_mark = None
+ if self.parser.context != NULL:
+ context_mark = Mark(self.stream_name,
+ self.parser.context_mark.index,
+ self.parser.context_mark.line,
+ self.parser.context_mark.column, None, None)
+ if self.parser.problem != NULL:
+ problem_mark = Mark(self.stream_name,
+ self.parser.problem_mark.index,
+ self.parser.problem_mark.line,
+ self.parser.problem_mark.column, None, None)
+ context = None
+ if self.parser.context != NULL:
+ if PY_MAJOR_VERSION < 3:
+ context = self.parser.context
+ else:
+ context = PyUnicode_FromString(self.parser.context)
+ if PY_MAJOR_VERSION < 3:
+ problem = self.parser.problem
+ else:
+ problem = PyUnicode_FromString(self.parser.problem)
+ if self.parser.error == YAML_SCANNER_ERROR:
+ return ScannerError(context, context_mark, problem, problem_mark)
+ else:
+ return ParserError(context, context_mark, problem, problem_mark)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no parser error")
+ else:
+ raise ValueError(u"no parser error")
+
+ def raw_scan(self):
+ cdef yaml_token_t token
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ if token.type == YAML_NO_TOKEN:
+ done = 1
+ else:
+ count = count+1
+ yaml_token_delete(&token)
+ return count
+
+ cdef object _scan(self):
+ cdef yaml_token_t token
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ token_object = self._token_to_object(&token)
+ yaml_token_delete(&token)
+ return token_object
+
+ cdef object _token_to_object(self, yaml_token_t *token):
+ start_mark = Mark(self.stream_name,
+ token.start_mark.index,
+ token.start_mark.line,
+ token.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ token.end_mark.index,
+ token.end_mark.line,
+ token.end_mark.column,
+ None, None)
+ if token.type == YAML_NO_TOKEN:
+ return None
+ elif token.type == YAML_STREAM_START_TOKEN:
+ encoding = None
+ if token.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif token.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif token.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartToken(start_mark, end_mark, encoding)
+ elif token.type == YAML_STREAM_END_TOKEN:
+ return StreamEndToken(start_mark, end_mark)
+ elif token.type == YAML_VERSION_DIRECTIVE_TOKEN:
+ return DirectiveToken(u"YAML",
+ (token.data.version_directive.major,
+ token.data.version_directive.minor),
+ start_mark, end_mark)
+ elif token.type == YAML_TAG_DIRECTIVE_TOKEN:
+ handle = PyUnicode_FromString(token.data.tag_directive.handle)
+ prefix = PyUnicode_FromString(token.data.tag_directive.prefix)
+ return DirectiveToken(u"TAG", (handle, prefix),
+ start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_START_TOKEN:
+ return DocumentStartToken(start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_END_TOKEN:
+ return DocumentEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_SEQUENCE_START_TOKEN:
+ return BlockSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_MAPPING_START_TOKEN:
+ return BlockMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_END_TOKEN:
+ return BlockEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_START_TOKEN:
+ return FlowSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_END_TOKEN:
+ return FlowSequenceEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_START_TOKEN:
+ return FlowMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_END_TOKEN:
+ return FlowMappingEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_ENTRY_TOKEN:
+ return BlockEntryToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_ENTRY_TOKEN:
+ return FlowEntryToken(start_mark, end_mark)
+ elif token.type == YAML_KEY_TOKEN:
+ return KeyToken(start_mark, end_mark)
+ elif token.type == YAML_VALUE_TOKEN:
+ return ValueToken(start_mark, end_mark)
+ elif token.type == YAML_ALIAS_TOKEN:
+ value = PyUnicode_FromString(token.data.alias.value)
+ return AliasToken(value, start_mark, end_mark)
+ elif token.type == YAML_ANCHOR_TOKEN:
+ value = PyUnicode_FromString(token.data.anchor.value)
+ return AnchorToken(value, start_mark, end_mark)
+ elif token.type == YAML_TAG_TOKEN:
+ handle = PyUnicode_FromString(token.data.tag.handle)
+ suffix = PyUnicode_FromString(token.data.tag.suffix)
+ if not handle:
+ handle = None
+ return TagToken((handle, suffix), start_mark, end_mark)
+ elif token.type == YAML_SCALAR_TOKEN:
+ value = PyUnicode_DecodeUTF8(token.data.scalar.value,
+ token.data.scalar.length, 'strict')
+ plain = False
+ style = None
+ if token.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ plain = True
+ style = u''
+ elif token.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif token.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif token.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif token.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarToken(value, plain,
+ start_mark, end_mark, style)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown token type")
+ else:
+ raise ValueError(u"unknown token type")
+
+ def get_token(self):
+ if self.current_token is not None:
+ value = self.current_token
+ self.current_token = None
+ else:
+ value = self._scan()
+ return value
+
+ def peek_token(self):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ return self.current_token
+
+ def check_token(self, *choices):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ if self.current_token is None:
+ return False
+ if not choices:
+ return True
+ token_class = self.current_token.__class__
+ for choice in choices:
+ if token_class is choice:
+ return True
+ return False
+
+ def raw_parse(self):
+ cdef yaml_event_t event
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ if event.type == YAML_NO_EVENT:
+ done = 1
+ else:
+ count = count+1
+ yaml_event_delete(&event)
+ return count
+
+ cdef object _parse(self):
+ cdef yaml_event_t event
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ event_object = self._event_to_object(&event)
+ yaml_event_delete(&event)
+ return event_object
+
+ cdef object _event_to_object(self, yaml_event_t *event):
+ cdef yaml_tag_directive_t *tag_directive
+ start_mark = Mark(self.stream_name,
+ event.start_mark.index,
+ event.start_mark.line,
+ event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ event.end_mark.index,
+ event.end_mark.line,
+ event.end_mark.column,
+ None, None)
+ if event.type == YAML_NO_EVENT:
+ return None
+ elif event.type == YAML_STREAM_START_EVENT:
+ encoding = None
+ if event.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif event.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif event.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartEvent(start_mark, end_mark, encoding)
+ elif event.type == YAML_STREAM_END_EVENT:
+ return StreamEndEvent(start_mark, end_mark)
+ elif event.type == YAML_DOCUMENT_START_EVENT:
+ explicit = False
+ if event.data.document_start.implicit == 0:
+ explicit = True
+ version = None
+ if event.data.document_start.version_directive != NULL:
+ version = (event.data.document_start.version_directive.major,
+ event.data.document_start.version_directive.minor)
+ tags = None
+ if event.data.document_start.tag_directives.start != NULL:
+ tags = {}
+ tag_directive = event.data.document_start.tag_directives.start
+ while tag_directive != event.data.document_start.tag_directives.end:
+ handle = PyUnicode_FromString(tag_directive.handle)
+ prefix = PyUnicode_FromString(tag_directive.prefix)
+ tags[handle] = prefix
+ tag_directive = tag_directive+1
+ return DocumentStartEvent(start_mark, end_mark,
+ explicit, version, tags)
+ elif event.type == YAML_DOCUMENT_END_EVENT:
+ explicit = False
+ if event.data.document_end.implicit == 0:
+ explicit = True
+ return DocumentEndEvent(start_mark, end_mark, explicit)
+ elif event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(event.data.alias.anchor)
+ return AliasEvent(anchor, start_mark, end_mark)
+ elif event.type == YAML_SCALAR_EVENT:
+ anchor = None
+ if event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.scalar.anchor)
+ tag = None
+ if event.data.scalar.tag != NULL:
+ tag = PyUnicode_FromString(event.data.scalar.tag)
+ value = PyUnicode_DecodeUTF8(event.data.scalar.value,
+ event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ style = None
+ if event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarEvent(anchor, tag,
+ (plain_implicit, quoted_implicit),
+ value, start_mark, end_mark, style)
+ elif event.type == YAML_SEQUENCE_START_EVENT:
+ anchor = None
+ if event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.sequence_start.anchor)
+ tag = None
+ if event.data.sequence_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.sequence_start.tag)
+ implicit = False
+ if event.data.sequence_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ return SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_MAPPING_START_EVENT:
+ anchor = None
+ if event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.mapping_start.anchor)
+ tag = None
+ if event.data.mapping_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.mapping_start.tag)
+ implicit = False
+ if event.data.mapping_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ return MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_SEQUENCE_END_EVENT:
+ return SequenceEndEvent(start_mark, end_mark)
+ elif event.type == YAML_MAPPING_END_EVENT:
+ return MappingEndEvent(start_mark, end_mark)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown event type")
+ else:
+ raise ValueError(u"unknown event type")
+
+ def get_event(self):
+ if self.current_event is not None:
+ value = self.current_event
+ self.current_event = None
+ else:
+ value = self._parse()
+ return value
+
+ def peek_event(self):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ return self.current_event
+
+ def check_event(self, *choices):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ if self.current_event is None:
+ return False
+ if not choices:
+ return True
+ event_class = self.current_event.__class__
+ for choice in choices:
+ if event_class is choice:
+ return True
+ return False
+
+ def check_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_STREAM_START_EVENT:
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return True
+ return False
+
+ def get_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return self._compose_document()
+
+ def get_single_node(self):
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ document = None
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ document = self._compose_document()
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document", mark)
+ else:
+ raise ComposerError(u"expected a single document in the stream",
+ document.start_mark, u"but found another document", mark)
+ return document
+
+ cdef object _compose_document(self):
+ yaml_event_delete(&self.parsed_event)
+ node = self._compose_node(None, None)
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self.anchors = {}
+ return node
+
+ cdef object _compose_node(self, object parent, object index):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(self.parsed_event.data.alias.anchor)
+ if anchor not in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError(None, None, "found undefined alias", mark)
+ else:
+ raise ComposerError(None, None, u"found undefined alias", mark)
+ yaml_event_delete(&self.parsed_event)
+ return self.anchors[anchor]
+ anchor = None
+ if self.parsed_event.type == YAML_SCALAR_EVENT \
+ and self.parsed_event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.scalar.anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT \
+ and self.parsed_event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.sequence_start.anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT \
+ and self.parsed_event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.mapping_start.anchor)
+ if anchor is not None:
+ if anchor in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("found duplicate anchor; first occurrence",
+ self.anchors[anchor].start_mark, "second occurrence", mark)
+ else:
+ raise ComposerError(u"found duplicate anchor; first occurrence",
+ self.anchors[anchor].start_mark, u"second occurrence", mark)
+ self.descend_resolver(parent, index)
+ if self.parsed_event.type == YAML_SCALAR_EVENT:
+ node = self._compose_scalar_node(anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT:
+ node = self._compose_sequence_node(anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT:
+ node = self._compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ cdef _compose_scalar_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ value = PyUnicode_DecodeUTF8(self.parsed_event.data.scalar.value,
+ self.parsed_event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if self.parsed_event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if self.parsed_event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ if self.parsed_event.data.scalar.tag == NULL \
+ or (self.parsed_event.data.scalar.tag[0] == c'!'
+ and self.parsed_event.data.scalar.tag[1] == c'\0'):
+ tag = self.resolve(ScalarNode, value, (plain_implicit, quoted_implicit))
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.scalar.tag)
+ style = None
+ if self.parsed_event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif self.parsed_event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif self.parsed_event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif self.parsed_event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif self.parsed_event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ node = ScalarNode(tag, value, start_mark, end_mark, style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_sequence_node(self, object anchor):
+ cdef int index
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.sequence_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.sequence_start.tag == NULL \
+ or (self.parsed_event.data.sequence_start.tag[0] == c'!'
+ and self.parsed_event.data.sequence_start.tag[1] == c'\0'):
+ tag = self.resolve(SequenceNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.sequence_start.tag)
+ flow_style = None
+ if self.parsed_event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ value = []
+ node = SequenceNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ index = 0
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_SEQUENCE_END_EVENT:
+ value.append(self._compose_node(node, index))
+ index = index+1
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_mapping_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.mapping_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.mapping_start.tag == NULL \
+ or (self.parsed_event.data.mapping_start.tag[0] == c'!'
+ and self.parsed_event.data.mapping_start.tag[1] == c'\0'):
+ tag = self.resolve(MappingNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.mapping_start.tag)
+ flow_style = None
+ if self.parsed_event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ value = []
+ node = MappingNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_MAPPING_END_EVENT:
+ item_key = self._compose_node(node, None)
+ item_value = self._compose_node(node, item_key)
+ value.append((item_key, item_value))
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef int _parse_next_event(self) except 0:
+ if self.parsed_event.type == YAML_NO_EVENT:
+ if yaml_parser_parse(&self.parser, &self.parsed_event) == 0:
+ error = self._parser_error()
+ raise error
+ return 1
+
+cdef int input_handler(void *data, char *buffer, int size, int *read) except 0:
+ cdef CParser parser
+ parser = <CParser>data
+ if parser.stream_cache is None:
+ value = parser.stream.read(size)
+ if PyUnicode_CheckExact(value) != 0:
+ value = PyUnicode_AsUTF8String(value)
+ parser.unicode_source = 1
+ if PyString_CheckExact(value) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string value is expected")
+ else:
+ raise TypeError(u"a string value is expected")
+ parser.stream_cache = value
+ parser.stream_cache_pos = 0
+ parser.stream_cache_len = PyString_GET_SIZE(value)
+ if (parser.stream_cache_len - parser.stream_cache_pos) < size:
+ size = parser.stream_cache_len - parser.stream_cache_pos
+ if size > 0:
+ memcpy(buffer, PyString_AS_STRING(parser.stream_cache)
+ + parser.stream_cache_pos, size)
+ read[0] = size
+ parser.stream_cache_pos += size
+ if parser.stream_cache_pos == parser.stream_cache_len:
+ parser.stream_cache = None
+ return 1
+
+cdef class CEmitter:
+
+ cdef yaml_emitter_t emitter
+
+ cdef object stream
+
+ cdef int document_start_implicit
+ cdef int document_end_implicit
+ cdef object use_version
+ cdef object use_tags
+
+ cdef object serialized_nodes
+ cdef object anchors
+ cdef int last_alias_id
+ cdef int closed
+ cdef int dump_unicode
+ cdef object use_encoding
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ if yaml_emitter_initialize(&self.emitter) == 0:
+ raise MemoryError
+ self.stream = stream
+ self.dump_unicode = 0
+ if PY_MAJOR_VERSION < 3:
+ if getattr3(stream, 'encoding', None):
+ self.dump_unicode = 1
+ else:
+ if hasattr(stream, u'encoding'):
+ self.dump_unicode = 1
+ self.use_encoding = encoding
+ yaml_emitter_set_output(&self.emitter, output_handler, <void *>self)
+ if canonical:
+ yaml_emitter_set_canonical(&self.emitter, 1)
+ if indent is not None:
+ yaml_emitter_set_indent(&self.emitter, indent)
+ if width is not None:
+ yaml_emitter_set_width(&self.emitter, width)
+ if allow_unicode:
+ yaml_emitter_set_unicode(&self.emitter, 1)
+ if line_break is not None:
+ if line_break == '\r':
+ yaml_emitter_set_break(&self.emitter, YAML_CR_BREAK)
+ elif line_break == '\n':
+ yaml_emitter_set_break(&self.emitter, YAML_LN_BREAK)
+ elif line_break == '\r\n':
+ yaml_emitter_set_break(&self.emitter, YAML_CRLN_BREAK)
+ self.document_start_implicit = 1
+ if explicit_start:
+ self.document_start_implicit = 0
+ self.document_end_implicit = 1
+ if explicit_end:
+ self.document_end_implicit = 0
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+ self.closed = -1
+
+ def __dealloc__(self):
+ yaml_emitter_delete(&self.emitter)
+
+ def dispose(self):
+ pass
+
+ cdef object _emitter_error(self):
+ if self.emitter.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.emitter.error == YAML_EMITTER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ problem = self.emitter.problem
+ else:
+ problem = PyUnicode_FromString(self.emitter.problem)
+ return EmitterError(problem)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no emitter error")
+ else:
+ raise ValueError(u"no emitter error")
+
+ cdef int _object_to_event(self, object event_object, yaml_event_t *event) except 0:
+ cdef yaml_encoding_t encoding
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ event_class = event_object.__class__
+ if event_class is StreamStartEvent:
+ encoding = YAML_UTF8_ENCODING
+ if event_object.encoding == u'utf-16-le' or event_object.encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif event_object.encoding == u'utf-16-be' or event_object.encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ if event_object.encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(event, encoding)
+ elif event_class is StreamEndEvent:
+ yaml_stream_end_event_initialize(event)
+ elif event_class is DocumentStartEvent:
+ version_directive = NULL
+ if event_object.version:
+ version_directive_value.major = event_object.version[0]
+ version_directive_value.minor = event_object.version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if event_object.tags:
+ if len(event_object.tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ cache = []
+ for handle in event_object.tags:
+ prefix = event_object.tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
+ tag_directives_end.handle = PyString_AS_STRING(handle)
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
+ tag_directives_end.prefix = PyString_AS_STRING(prefix)
+ tag_directives_end = tag_directives_end+1
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ if yaml_document_start_event_initialize(event, version_directive,
+ tag_directives_start, tag_directives_end, implicit) == 0:
+ raise MemoryError
+ elif event_class is DocumentEndEvent:
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ yaml_document_end_event_initialize(event, implicit)
+ elif event_class is AliasEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if yaml_alias_event_initialize(event, anchor) == 0:
+ raise MemoryError
+ elif event_class is ScalarEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = event_object.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ plain_implicit = 0
+ quoted_implicit = 0
+ if event_object.implicit is not None:
+ plain_implicit = event_object.implicit[0]
+ quoted_implicit = event_object.implicit[1]
+ style_object = event_object.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
+ if yaml_scalar_event_initialize(event, anchor, tag, value, length,
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if event_object.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ if yaml_sequence_start_event_initialize(event, anchor, tag,
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ elif event_class is MappingStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if event_object.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
+ if yaml_mapping_start_event_initialize(event, anchor, tag,
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceEndEvent:
+ yaml_sequence_end_event_initialize(event)
+ elif event_class is MappingEndEvent:
+ yaml_mapping_end_event_initialize(event)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("invalid event %s" % event_object)
+ else:
+ raise TypeError(u"invalid event %s" % event_object)
+ return 1
+
+ def emit(self, event_object):
+ cdef yaml_event_t event
+ self._object_to_event(event_object, &event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+
+ def open(self):
+ cdef yaml_event_t event
+ cdef yaml_encoding_t encoding
+ if self.closed == -1:
+ if self.use_encoding == u'utf-16-le' or self.use_encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif self.use_encoding == u'utf-16-be' or self.use_encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ else:
+ encoding = YAML_UTF8_ENCODING
+ if self.use_encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(&event, encoding)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 0
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is already opened")
+ else:
+ raise SerializerError(u"serializer is already opened")
+
+ def close(self):
+ cdef yaml_event_t event
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 0:
+ yaml_stream_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 1
+
+ def serialize(self, node):
+ cdef yaml_event_t event
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ cache = []
+ version_directive = NULL
+ if self.use_version:
+ version_directive_value.major = self.use_version[0]
+ version_directive_value.minor = self.use_version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if self.use_tags:
+ if len(self.use_tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ for handle in self.use_tags:
+ prefix = self.use_tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
+ tag_directives_end.handle = PyString_AS_STRING(handle)
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
+ tag_directives_end.prefix = PyString_AS_STRING(prefix)
+ tag_directives_end = tag_directives_end+1
+ if yaml_document_start_event_initialize(&event, version_directive,
+ tag_directives_start, tag_directives_end,
+ self.document_start_implicit) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self._anchor_node(node)
+ self._serialize_node(node, None, None)
+ yaml_document_end_event_initialize(&event, self.document_end_implicit)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+
+ cdef int _anchor_node(self, object node) except 0:
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.last_alias_id = self.last_alias_id+1
+ self.anchors[node] = u"id%03d" % self.last_alias_id
+ else:
+ self.anchors[node] = None
+ node_class = node.__class__
+ if node_class is SequenceNode:
+ for item in node.value:
+ self._anchor_node(item)
+ elif node_class is MappingNode:
+ for key, value in node.value:
+ self._anchor_node(key)
+ self._anchor_node(value)
+ return 1
+
+ cdef int _serialize_node(self, object node, object parent, object index) except 0:
+ cdef yaml_event_t event
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef int item_index
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ anchor_object = self.anchors[node]
+ anchor = NULL
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if node in self.serialized_nodes:
+ if yaml_alias_event_initialize(&event, anchor) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ else:
+ node_class = node.__class__
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if node_class is ScalarNode:
+ plain_implicit = 0
+ quoted_implicit = 0
+ tag_object = node.tag
+ if self.resolve(ScalarNode, node.value, (True, False)) == tag_object:
+ plain_implicit = 1
+ if self.resolve(ScalarNode, node.value, (False, True)) == tag_object:
+ quoted_implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = node.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ style_object = node.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
+ if yaml_scalar_event_initialize(&event, anchor, tag, value, length,
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is SequenceNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(SequenceNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if node.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ if yaml_sequence_start_event_initialize(&event, anchor, tag,
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ item_index = 0
+ for item in node.value:
+ self._serialize_node(item, node, item_index)
+ item_index = item_index+1
+ yaml_sequence_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is MappingNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(MappingNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if node.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
+ if yaml_mapping_start_event_initialize(&event, anchor, tag,
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ for item_key, item_value in node.value:
+ self._serialize_node(item_key, node, None)
+ self._serialize_node(item_value, node, item_key)
+ yaml_mapping_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.ascend_resolver()
+ return 1
+
+cdef int output_handler(void *data, char *buffer, int size) except 0:
+ cdef CEmitter emitter
+ emitter = <CEmitter>data
+ if emitter.dump_unicode == 0:
+ value = PyString_FromStringAndSize(buffer, size)
+ else:
+ value = PyUnicode_DecodeUTF8(buffer, size, 'strict')
+ emitter.stream.write(value)
+ return 1
+
diff --git a/contrib/python/ruamel.yaml.clib/py2/ya.make b/contrib/python/ruamel.yaml.clib/py2/ya.make
new file mode 100644
index 0000000000..0f53126bd7
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py2/ya.make
@@ -0,0 +1,31 @@
+PY2_LIBRARY()
+
+VERSION(0.2.7)
+
+LICENSE(MIT)
+
+PEERDIR(
+ contrib/libs/yaml
+)
+
+ADDINCL(
+ contrib/python/ruamel.yaml.clib/py2
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ CYTHON_C
+ _ruamel_yaml.pyx
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/ruamel.yaml.clib/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/ruamel.yaml.clib/py3/.dist-info/METADATA b/contrib/python/ruamel.yaml.clib/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..7b8ca2a332
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/.dist-info/METADATA
@@ -0,0 +1,55 @@
+Metadata-Version: 2.1
+Name: ruamel.yaml.clib
+Version: 0.2.7
+Summary: C version of reader, parser and emitter for ruamel.yaml derived from libyaml
+Home-page: https://sourceforge.net/p/ruamel-yaml-clib/code/ci/default/tree
+Author: Anthon van der Neut
+Author-email: a.van.der.neut@ruamel.eu
+License: MIT
+Keywords: yaml 1.2 parser c-library config
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+
+
+ruamel.yaml.clib
+================
+
+``ruamel.yaml.clib`` is the C based reader/scanner and emitter for ruamel.yaml
+
+:version: 0.2.7
+:updated: 2022-10-19
+:documentation: http://yaml.readthedocs.io
+:repository: https://sourceforge.net/projects/ruamel-yaml-clib/
+:pypi: https://pypi.org/project/ruamel.yaml.clib/
+
+This package was split of from ruamel.yaml, so that ruamel.yaml can be build as
+a universal wheel. Apart from the C code seldom changing, and taking a long
+time to compile for all platforms, this allows installation of the .so
+on Linux systems under /usr/lib64/pythonX.Y (without a .pth file or a ruamel
+directory) and the Python code for ruamel.yaml under /usr/lib/pythonX.Y.
+
+
+.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+.. image:: https://sourceforge.net/p/ruamel-yaml-clib/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
+This release in loving memory of Johanna Clasina van der Neut-Bandel [1922-10-19 - 2015-11-21]
+
+
diff --git a/contrib/python/ruamel.yaml.clib/py3/.dist-info/top_level.txt b/contrib/python/ruamel.yaml.clib/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..be006da740
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+_ruamel_yaml
+ruamel
diff --git a/contrib/python/ruamel.yaml.clib/py3/LICENSE b/contrib/python/ruamel.yaml.clib/py3/LICENSE
new file mode 100644
index 0000000000..786ebed939
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/LICENSE
@@ -0,0 +1,21 @@
+ The MIT License (MIT)
+
+ Copyright (c) 2019-2022 Anthon van der Neut, Ruamel bvba
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/contrib/python/ruamel.yaml.clib/py3/README.rst b/contrib/python/ruamel.yaml.clib/py3/README.rst
new file mode 100644
index 0000000000..c41aca60e6
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/README.rst
@@ -0,0 +1,25 @@
+
+ruamel.yaml.clib
+================
+
+``ruamel.yaml.clib`` is the C based reader/scanner and emitter for ruamel.yaml
+
+:version: 0.2.6
+:updated: 2021-07-04
+:documentation: http://yaml.readthedocs.io
+:repository: https://sourceforge.net/projects/ruamel-yaml-clib/
+:pypi: https://pypi.org/project/ruamel.yaml.clib/
+
+This package was split of from ruamel.yaml, so that ruamel.yaml can be build as
+a universal wheel. Apart from the C code seldom changing, and taking a long
+time to compile for all platforms, this allows installation of the .so
+on Linux systems under /usr/lib64/pythonX.Y (without a .pth file or a ruamel
+directory) and the Python code for ruamel.yaml under /usr/lib/pythonX.Y.
+
+
+.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+.. image:: https://sourceforge.net/p/ruamel-yaml-clib/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
diff --git a/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.h b/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.h
new file mode 100644
index 0000000000..568db509aa
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.h
@@ -0,0 +1,23 @@
+
+#include "yaml.h"
+
+#if PY_MAJOR_VERSION < 3
+
+#define PyUnicode_FromString(s) PyUnicode_DecodeUTF8((s), strlen(s), "strict")
+
+#else
+
+#define PyString_CheckExact PyBytes_CheckExact
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+
+#endif
+
+#ifdef _MSC_VER /* MS Visual C++ 6.0 */
+#if _MSC_VER == 1200
+
+#define PyLong_FromUnsignedLongLong(z) PyInt_FromLong(i)
+
+#endif
+#endif
diff --git a/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pxd b/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pxd
new file mode 100644
index 0000000000..d8dc3c6bda
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pxd
@@ -0,0 +1,251 @@
+
+cdef extern from "_ruamel_yaml.h":
+
+ void malloc(int l)
+ void memcpy(char *d, char *s, int l)
+ int strlen(char *s)
+ int PyString_CheckExact(object o)
+ int PyUnicode_CheckExact(object o)
+ char *PyString_AS_STRING(object o)
+ int PyString_GET_SIZE(object o)
+ object PyString_FromStringAndSize(char *v, int l)
+ object PyUnicode_FromString(char *u)
+ object PyUnicode_DecodeUTF8(char *u, int s, char *e)
+ object PyUnicode_AsUTF8String(object o)
+ int PY_MAJOR_VERSION
+
+ ctypedef enum:
+ SIZEOF_VOID_P
+ ctypedef enum yaml_encoding_t:
+ YAML_ANY_ENCODING
+ YAML_UTF8_ENCODING
+ YAML_UTF16LE_ENCODING
+ YAML_UTF16BE_ENCODING
+ ctypedef enum yaml_break_t:
+ YAML_ANY_BREAK
+ YAML_CR_BREAK
+ YAML_LN_BREAK
+ YAML_CRLN_BREAK
+ ctypedef enum yaml_error_type_t:
+ YAML_NO_ERROR
+ YAML_MEMORY_ERROR
+ YAML_READER_ERROR
+ YAML_SCANNER_ERROR
+ YAML_PARSER_ERROR
+ YAML_WRITER_ERROR
+ YAML_EMITTER_ERROR
+ ctypedef enum yaml_scalar_style_t:
+ YAML_ANY_SCALAR_STYLE
+ YAML_PLAIN_SCALAR_STYLE
+ YAML_SINGLE_QUOTED_SCALAR_STYLE
+ YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ YAML_LITERAL_SCALAR_STYLE
+ YAML_FOLDED_SCALAR_STYLE
+ ctypedef enum yaml_sequence_style_t:
+ YAML_ANY_SEQUENCE_STYLE
+ YAML_BLOCK_SEQUENCE_STYLE
+ YAML_FLOW_SEQUENCE_STYLE
+ ctypedef enum yaml_mapping_style_t:
+ YAML_ANY_MAPPING_STYLE
+ YAML_BLOCK_MAPPING_STYLE
+ YAML_FLOW_MAPPING_STYLE
+ ctypedef enum yaml_token_type_t:
+ YAML_NO_TOKEN
+ YAML_STREAM_START_TOKEN
+ YAML_STREAM_END_TOKEN
+ YAML_VERSION_DIRECTIVE_TOKEN
+ YAML_TAG_DIRECTIVE_TOKEN
+ YAML_DOCUMENT_START_TOKEN
+ YAML_DOCUMENT_END_TOKEN
+ YAML_BLOCK_SEQUENCE_START_TOKEN
+ YAML_BLOCK_MAPPING_START_TOKEN
+ YAML_BLOCK_END_TOKEN
+ YAML_FLOW_SEQUENCE_START_TOKEN
+ YAML_FLOW_SEQUENCE_END_TOKEN
+ YAML_FLOW_MAPPING_START_TOKEN
+ YAML_FLOW_MAPPING_END_TOKEN
+ YAML_BLOCK_ENTRY_TOKEN
+ YAML_FLOW_ENTRY_TOKEN
+ YAML_KEY_TOKEN
+ YAML_VALUE_TOKEN
+ YAML_ALIAS_TOKEN
+ YAML_ANCHOR_TOKEN
+ YAML_TAG_TOKEN
+ YAML_SCALAR_TOKEN
+ ctypedef enum yaml_event_type_t:
+ YAML_NO_EVENT
+ YAML_STREAM_START_EVENT
+ YAML_STREAM_END_EVENT
+ YAML_DOCUMENT_START_EVENT
+ YAML_DOCUMENT_END_EVENT
+ YAML_ALIAS_EVENT
+ YAML_SCALAR_EVENT
+ YAML_SEQUENCE_START_EVENT
+ YAML_SEQUENCE_END_EVENT
+ YAML_MAPPING_START_EVENT
+ YAML_MAPPING_END_EVENT
+
+ ctypedef int yaml_read_handler_t(void *data, char *buffer,
+ int size, int *size_read) except 0
+
+ ctypedef int yaml_write_handler_t(void *data, char *buffer,
+ int size) except 0
+
+ ctypedef struct yaml_mark_t:
+ int index
+ int line
+ int column
+ ctypedef struct yaml_version_directive_t:
+ int major
+ int minor
+ ctypedef struct yaml_tag_directive_t:
+ char *handle
+ char *prefix
+
+ ctypedef struct _yaml_token_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_token_alias_data_t:
+ char *value
+ ctypedef struct _yaml_token_anchor_data_t:
+ char *value
+ ctypedef struct _yaml_token_tag_data_t:
+ char *handle
+ char *suffix
+ ctypedef struct _yaml_token_scalar_data_t:
+ char *value
+ int length
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_token_version_directive_data_t:
+ int major
+ int minor
+ ctypedef struct _yaml_token_tag_directive_data_t:
+ char *handle
+ char *prefix
+ ctypedef union _yaml_token_data_t:
+ _yaml_token_stream_start_data_t stream_start
+ _yaml_token_alias_data_t alias
+ _yaml_token_anchor_data_t anchor
+ _yaml_token_tag_data_t tag
+ _yaml_token_scalar_data_t scalar
+ _yaml_token_version_directive_data_t version_directive
+ _yaml_token_tag_directive_data_t tag_directive
+ ctypedef struct yaml_token_t:
+ yaml_token_type_t type
+ _yaml_token_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct _yaml_event_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_event_document_start_data_tag_directives_t:
+ yaml_tag_directive_t *start
+ yaml_tag_directive_t *end
+ ctypedef struct _yaml_event_document_start_data_t:
+ yaml_version_directive_t *version_directive
+ _yaml_event_document_start_data_tag_directives_t tag_directives
+ int implicit
+ ctypedef struct _yaml_event_document_end_data_t:
+ int implicit
+ ctypedef struct _yaml_event_alias_data_t:
+ char *anchor
+ ctypedef struct _yaml_event_scalar_data_t:
+ char *anchor
+ char *tag
+ char *value
+ int length
+ int plain_implicit
+ int quoted_implicit
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_event_sequence_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_sequence_style_t style
+ ctypedef struct _yaml_event_mapping_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_mapping_style_t style
+ ctypedef union _yaml_event_data_t:
+ _yaml_event_stream_start_data_t stream_start
+ _yaml_event_document_start_data_t document_start
+ _yaml_event_document_end_data_t document_end
+ _yaml_event_alias_data_t alias
+ _yaml_event_scalar_data_t scalar
+ _yaml_event_sequence_start_data_t sequence_start
+ _yaml_event_mapping_start_data_t mapping_start
+ ctypedef struct yaml_event_t:
+ yaml_event_type_t type
+ _yaml_event_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct yaml_parser_t:
+ yaml_error_type_t error
+ char *problem
+ int problem_offset
+ int problem_value
+ yaml_mark_t problem_mark
+ char *context
+ yaml_mark_t context_mark
+
+ ctypedef struct yaml_emitter_t:
+ yaml_error_type_t error
+ char *problem
+
+ char *yaml_get_version_string()
+ void yaml_get_version(int *major, int *minor, int *patch)
+
+ void yaml_token_delete(yaml_token_t *token)
+
+ int yaml_stream_start_event_initialize(yaml_event_t *event,
+ yaml_encoding_t encoding)
+ int yaml_stream_end_event_initialize(yaml_event_t *event)
+ int yaml_document_start_event_initialize(yaml_event_t *event,
+ yaml_version_directive_t *version_directive,
+ yaml_tag_directive_t *tag_directives_start,
+ yaml_tag_directive_t *tag_directives_end,
+ int implicit)
+ int yaml_document_end_event_initialize(yaml_event_t *event,
+ int implicit)
+ int yaml_alias_event_initialize(yaml_event_t *event, char *anchor)
+ int yaml_scalar_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, char *value, int length,
+ int plain_implicit, int quoted_implicit,
+ yaml_scalar_style_t style)
+ int yaml_sequence_start_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, int implicit, yaml_sequence_style_t style)
+ int yaml_sequence_end_event_initialize(yaml_event_t *event)
+ int yaml_mapping_start_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, int implicit, yaml_mapping_style_t style)
+ int yaml_mapping_end_event_initialize(yaml_event_t *event)
+ void yaml_event_delete(yaml_event_t *event)
+
+ int yaml_parser_initialize(yaml_parser_t *parser)
+ void yaml_parser_delete(yaml_parser_t *parser)
+ void yaml_parser_set_input_string(yaml_parser_t *parser,
+ char *input, int size)
+ void yaml_parser_set_input(yaml_parser_t *parser,
+ yaml_read_handler_t *handler, void *data)
+ void yaml_parser_set_encoding(yaml_parser_t *parser,
+ yaml_encoding_t encoding)
+ int yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) except *
+ int yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) except *
+
+ int yaml_emitter_initialize(yaml_emitter_t *emitter)
+ void yaml_emitter_delete(yaml_emitter_t *emitter)
+ void yaml_emitter_set_output_string(yaml_emitter_t *emitter,
+ char *output, int size, int *size_written)
+ void yaml_emitter_set_output(yaml_emitter_t *emitter,
+ yaml_write_handler_t *handler, void *data)
+ void yaml_emitter_set_encoding(yaml_emitter_t *emitter,
+ yaml_encoding_t encoding)
+ void yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical)
+ void yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent)
+ void yaml_emitter_set_width(yaml_emitter_t *emitter, int width)
+ void yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode)
+ void yaml_emitter_set_break(yaml_emitter_t *emitter,
+ yaml_break_t line_break)
+ int yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) except *
+ int yaml_emitter_flush(yaml_emitter_t *emitter)
+
diff --git a/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pyx b/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pyx
new file mode 100644
index 0000000000..4fd50e207b
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/_ruamel_yaml.pyx
@@ -0,0 +1,1526 @@
+
+
+def get_version_string():
+ cdef char *value
+ value = yaml_get_version_string()
+ if PY_MAJOR_VERSION < 3:
+ return value
+ else:
+ return PyUnicode_FromString(value)
+
+def get_version():
+ cdef int major, minor, patch
+ yaml_get_version(&major, &minor, &patch)
+ return (major, minor, patch)
+
+#Mark = yaml.error.Mark
+from ruamel.yaml.error import YAMLError
+from ruamel.yaml.reader import ReaderError
+from ruamel.yaml.scanner import ScannerError
+from ruamel.yaml.parser import ParserError
+from ruamel.yaml.composer import ComposerError
+from ruamel.yaml.constructor import ConstructorError
+from ruamel.yaml.emitter import EmitterError
+from ruamel.yaml.serializer import SerializerError
+from ruamel.yaml.representer import RepresenterError
+
+from ruamel.yaml.tokens import StreamStartToken
+from ruamel.yaml.tokens import StreamEndToken
+from ruamel.yaml.tokens import DirectiveToken
+from ruamel.yaml.tokens import DocumentStartToken
+from ruamel.yaml.tokens import DocumentEndToken
+from ruamel.yaml.tokens import BlockSequenceStartToken
+from ruamel.yaml.tokens import BlockMappingStartToken
+from ruamel.yaml.tokens import BlockEndToken
+from ruamel.yaml.tokens import FlowSequenceStartToken
+from ruamel.yaml.tokens import FlowMappingStartToken
+from ruamel.yaml.tokens import FlowSequenceEndToken
+from ruamel.yaml.tokens import FlowMappingEndToken
+from ruamel.yaml.tokens import KeyToken
+from ruamel.yaml.tokens import ValueToken
+from ruamel.yaml.tokens import BlockEntryToken
+from ruamel.yaml.tokens import FlowEntryToken
+from ruamel.yaml.tokens import AliasToken
+from ruamel.yaml.tokens import AnchorToken
+from ruamel.yaml.tokens import TagToken
+from ruamel.yaml.tokens import ScalarToken
+
+from ruamel.yaml.events import StreamStartEvent
+from ruamel.yaml.events import StreamEndEvent
+from ruamel.yaml.events import DocumentStartEvent
+from ruamel.yaml.events import DocumentEndEvent
+from ruamel.yaml.events import AliasEvent
+from ruamel.yaml.events import ScalarEvent
+from ruamel.yaml.events import SequenceStartEvent
+from ruamel.yaml.events import SequenceEndEvent
+from ruamel.yaml.events import MappingStartEvent
+from ruamel.yaml.events import MappingEndEvent
+
+from ruamel.yaml.nodes import ScalarNode
+from ruamel.yaml.nodes import SequenceNode
+from ruamel.yaml.nodes import MappingNode
+
+cdef class Mark:
+ cdef readonly object name
+ cdef readonly size_t index
+ cdef readonly size_t line
+ cdef readonly size_t column
+ cdef readonly buffer
+ cdef readonly pointer
+
+ def __init__(self, object name, size_t index, size_t line, size_t column,
+ object buffer, object pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self):
+ return None
+
+ def __str__(self):
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ return where
+
+#class YAMLError(Exception):
+# pass
+#
+#class MarkedYAMLError(YAMLError):
+#
+# def __init__(self, context=None, context_mark=None,
+# problem=None, problem_mark=None, note=None):
+# self.context = context
+# self.context_mark = context_mark
+# self.problem = problem
+# self.problem_mark = problem_mark
+# self.note = note
+#
+# def __str__(self):
+# lines = []
+# if self.context is not None:
+# lines.append(self.context)
+# if self.context_mark is not None \
+# and (self.problem is None or self.problem_mark is None
+# or self.context_mark.name != self.problem_mark.name
+# or self.context_mark.line != self.problem_mark.line
+# or self.context_mark.column != self.problem_mark.column):
+# lines.append(str(self.context_mark))
+# if self.problem is not None:
+# lines.append(self.problem)
+# if self.problem_mark is not None:
+# lines.append(str(self.problem_mark))
+# if self.note is not None:
+# lines.append(self.note)
+# return '\n'.join(lines)
+#
+#class ReaderError(YAMLError):
+#
+# def __init__(self, name, position, character, encoding, reason):
+# self.name = name
+# self.character = character
+# self.position = position
+# self.encoding = encoding
+# self.reason = reason
+#
+# def __str__(self):
+# if isinstance(self.character, str):
+# return "'%s' codec can't decode byte #x%02x: %s\n" \
+# " in \"%s\", position %d" \
+# % (self.encoding, ord(self.character), self.reason,
+# self.name, self.position)
+# else:
+# return "unacceptable character #x%04x: %s\n" \
+# " in \"%s\", position %d" \
+# % (ord(self.character), self.reason,
+# self.name, self.position)
+#
+#class ScannerError(MarkedYAMLError):
+# pass
+#
+#class ParserError(MarkedYAMLError):
+# pass
+#
+#class EmitterError(YAMLError):
+# pass
+#
+#cdef class Token:
+# cdef readonly Mark start_mark
+# cdef readonly Mark end_mark
+# def __init__(self, Mark start_mark, Mark end_mark):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class StreamStartToken(Token):
+# cdef readonly object encoding
+# def __init__(self, Mark start_mark, Mark end_mark, encoding):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.encoding = encoding
+#
+#cdef class StreamEndToken(Token):
+# pass
+#
+#cdef class DirectiveToken(Token):
+# cdef readonly object name
+# cdef readonly object value
+# def __init__(self, name, value, Mark start_mark, Mark end_mark):
+# self.name = name
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class DocumentStartToken(Token):
+# pass
+#
+#cdef class DocumentEndToken(Token):
+# pass
+#
+#cdef class BlockSequenceStartToken(Token):
+# pass
+#
+#cdef class BlockMappingStartToken(Token):
+# pass
+#
+#cdef class BlockEndToken(Token):
+# pass
+#
+#cdef class FlowSequenceStartToken(Token):
+# pass
+#
+#cdef class FlowMappingStartToken(Token):
+# pass
+#
+#cdef class FlowSequenceEndToken(Token):
+# pass
+#
+#cdef class FlowMappingEndToken(Token):
+# pass
+#
+#cdef class KeyToken(Token):
+# pass
+#
+#cdef class ValueToken(Token):
+# pass
+#
+#cdef class BlockEntryToken(Token):
+# pass
+#
+#cdef class FlowEntryToken(Token):
+# pass
+#
+#cdef class AliasToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class AnchorToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class TagToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class ScalarToken(Token):
+# cdef readonly object value
+# cdef readonly object plain
+# cdef readonly object style
+# def __init__(self, value, plain, Mark start_mark, Mark end_mark, style=None):
+# self.value = value
+# self.plain = plain
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.style = style
+
+cdef class CParser:
+
+ cdef yaml_parser_t parser
+ cdef yaml_event_t parsed_event
+
+ cdef object stream
+ cdef object stream_name
+ cdef object current_token
+ cdef object current_event
+ cdef object anchors
+ cdef object stream_cache
+ cdef int stream_cache_len
+ cdef int stream_cache_pos
+ cdef int unicode_source
+
+ def __init__(self, stream):
+ cdef is_readable
+ if yaml_parser_initialize(&self.parser) == 0:
+ raise MemoryError
+ self.parsed_event.type = YAML_NO_EVENT
+ is_readable = 1
+ try:
+ stream.read
+ except AttributeError:
+ is_readable = 0
+ self.unicode_source = 0
+ if is_readable:
+ self.stream = stream
+ try:
+ self.stream_name = stream.name
+ except AttributeError:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<file>'
+ else:
+ self.stream_name = u'<file>'
+ self.stream_cache = None
+ self.stream_cache_len = 0
+ self.stream_cache_pos = 0
+ yaml_parser_set_input(&self.parser, input_handler, <void *>self)
+ else:
+ if PyUnicode_CheckExact(stream) != 0:
+ stream = PyUnicode_AsUTF8String(stream)
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<unicode string>'
+ else:
+ self.stream_name = u'<unicode string>'
+ self.unicode_source = 1
+ else:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<byte string>'
+ else:
+ self.stream_name = u'<byte string>'
+ if PyString_CheckExact(stream) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string or stream input is required")
+ else:
+ raise TypeError(u"a string or stream input is required")
+ self.stream = stream
+ yaml_parser_set_input_string(&self.parser, PyString_AS_STRING(stream), PyString_GET_SIZE(stream))
+ self.current_token = None
+ self.current_event = None
+ self.anchors = {}
+
+ def __dealloc__(self):
+ yaml_parser_delete(&self.parser)
+ yaml_event_delete(&self.parsed_event)
+
+ def dispose(self):
+ pass
+
+ cdef object _parser_error(self):
+ if self.parser.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.parser.error == YAML_READER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, '?', self.parser.problem)
+ else:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, u'?', PyUnicode_FromString(self.parser.problem))
+ elif self.parser.error == YAML_SCANNER_ERROR \
+ or self.parser.error == YAML_PARSER_ERROR:
+ context_mark = None
+ problem_mark = None
+ if self.parser.context != NULL:
+ context_mark = Mark(self.stream_name,
+ self.parser.context_mark.index,
+ self.parser.context_mark.line,
+ self.parser.context_mark.column, None, None)
+ if self.parser.problem != NULL:
+ problem_mark = Mark(self.stream_name,
+ self.parser.problem_mark.index,
+ self.parser.problem_mark.line,
+ self.parser.problem_mark.column, None, None)
+ context = None
+ if self.parser.context != NULL:
+ if PY_MAJOR_VERSION < 3:
+ context = self.parser.context
+ else:
+ context = PyUnicode_FromString(self.parser.context)
+ if PY_MAJOR_VERSION < 3:
+ problem = self.parser.problem
+ else:
+ problem = PyUnicode_FromString(self.parser.problem)
+ if self.parser.error == YAML_SCANNER_ERROR:
+ return ScannerError(context, context_mark, problem, problem_mark)
+ else:
+ return ParserError(context, context_mark, problem, problem_mark)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no parser error")
+ else:
+ raise ValueError(u"no parser error")
+
+ def raw_scan(self):
+ cdef yaml_token_t token
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ if token.type == YAML_NO_TOKEN:
+ done = 1
+ else:
+ count = count+1
+ yaml_token_delete(&token)
+ return count
+
+ cdef object _scan(self):
+ cdef yaml_token_t token
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ token_object = self._token_to_object(&token)
+ yaml_token_delete(&token)
+ return token_object
+
+ cdef object _token_to_object(self, yaml_token_t *token):
+ start_mark = Mark(self.stream_name,
+ token.start_mark.index,
+ token.start_mark.line,
+ token.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ token.end_mark.index,
+ token.end_mark.line,
+ token.end_mark.column,
+ None, None)
+ if token.type == YAML_NO_TOKEN:
+ return None
+ elif token.type == YAML_STREAM_START_TOKEN:
+ encoding = None
+ if token.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif token.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif token.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartToken(start_mark, end_mark, encoding)
+ elif token.type == YAML_STREAM_END_TOKEN:
+ return StreamEndToken(start_mark, end_mark)
+ elif token.type == YAML_VERSION_DIRECTIVE_TOKEN:
+ return DirectiveToken(u"YAML",
+ (token.data.version_directive.major,
+ token.data.version_directive.minor),
+ start_mark, end_mark)
+ elif token.type == YAML_TAG_DIRECTIVE_TOKEN:
+ handle = PyUnicode_FromString(token.data.tag_directive.handle)
+ prefix = PyUnicode_FromString(token.data.tag_directive.prefix)
+ return DirectiveToken(u"TAG", (handle, prefix),
+ start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_START_TOKEN:
+ return DocumentStartToken(start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_END_TOKEN:
+ return DocumentEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_SEQUENCE_START_TOKEN:
+ return BlockSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_MAPPING_START_TOKEN:
+ return BlockMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_END_TOKEN:
+ return BlockEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_START_TOKEN:
+ return FlowSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_END_TOKEN:
+ return FlowSequenceEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_START_TOKEN:
+ return FlowMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_END_TOKEN:
+ return FlowMappingEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_ENTRY_TOKEN:
+ return BlockEntryToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_ENTRY_TOKEN:
+ return FlowEntryToken(start_mark, end_mark)
+ elif token.type == YAML_KEY_TOKEN:
+ return KeyToken(start_mark, end_mark)
+ elif token.type == YAML_VALUE_TOKEN:
+ return ValueToken(start_mark, end_mark)
+ elif token.type == YAML_ALIAS_TOKEN:
+ value = PyUnicode_FromString(token.data.alias.value)
+ return AliasToken(value, start_mark, end_mark)
+ elif token.type == YAML_ANCHOR_TOKEN:
+ value = PyUnicode_FromString(token.data.anchor.value)
+ return AnchorToken(value, start_mark, end_mark)
+ elif token.type == YAML_TAG_TOKEN:
+ handle = PyUnicode_FromString(token.data.tag.handle)
+ suffix = PyUnicode_FromString(token.data.tag.suffix)
+ if not handle:
+ handle = None
+ return TagToken((handle, suffix), start_mark, end_mark)
+ elif token.type == YAML_SCALAR_TOKEN:
+ value = PyUnicode_DecodeUTF8(token.data.scalar.value,
+ token.data.scalar.length, 'strict')
+ plain = False
+ style = None
+ if token.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ plain = True
+ style = u''
+ elif token.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif token.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif token.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif token.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarToken(value, plain,
+ start_mark, end_mark, style)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown token type")
+ else:
+ raise ValueError(u"unknown token type")
+
+ def get_token(self):
+ if self.current_token is not None:
+ value = self.current_token
+ self.current_token = None
+ else:
+ value = self._scan()
+ return value
+
+ def peek_token(self):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ return self.current_token
+
+ def check_token(self, *choices):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ if self.current_token is None:
+ return False
+ if not choices:
+ return True
+ token_class = self.current_token.__class__
+ for choice in choices:
+ if token_class is choice:
+ return True
+ return False
+
+ def raw_parse(self):
+ cdef yaml_event_t event
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ if event.type == YAML_NO_EVENT:
+ done = 1
+ else:
+ count = count+1
+ yaml_event_delete(&event)
+ return count
+
+ cdef object _parse(self):
+ cdef yaml_event_t event
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ event_object = self._event_to_object(&event)
+ yaml_event_delete(&event)
+ return event_object
+
+ cdef object _event_to_object(self, yaml_event_t *event):
+ cdef yaml_tag_directive_t *tag_directive
+ start_mark = Mark(self.stream_name,
+ event.start_mark.index,
+ event.start_mark.line,
+ event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ event.end_mark.index,
+ event.end_mark.line,
+ event.end_mark.column,
+ None, None)
+ if event.type == YAML_NO_EVENT:
+ return None
+ elif event.type == YAML_STREAM_START_EVENT:
+ encoding = None
+ if event.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif event.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif event.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartEvent(start_mark, end_mark, encoding)
+ elif event.type == YAML_STREAM_END_EVENT:
+ return StreamEndEvent(start_mark, end_mark)
+ elif event.type == YAML_DOCUMENT_START_EVENT:
+ explicit = False
+ if event.data.document_start.implicit == 0:
+ explicit = True
+ version = None
+ if event.data.document_start.version_directive != NULL:
+ version = (event.data.document_start.version_directive.major,
+ event.data.document_start.version_directive.minor)
+ tags = None
+ if event.data.document_start.tag_directives.start != NULL:
+ tags = {}
+ tag_directive = event.data.document_start.tag_directives.start
+ while tag_directive != event.data.document_start.tag_directives.end:
+ handle = PyUnicode_FromString(tag_directive.handle)
+ prefix = PyUnicode_FromString(tag_directive.prefix)
+ tags[handle] = prefix
+ tag_directive = tag_directive+1
+ return DocumentStartEvent(start_mark, end_mark,
+ explicit, version, tags)
+ elif event.type == YAML_DOCUMENT_END_EVENT:
+ explicit = False
+ if event.data.document_end.implicit == 0:
+ explicit = True
+ return DocumentEndEvent(start_mark, end_mark, explicit)
+ elif event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(event.data.alias.anchor)
+ return AliasEvent(anchor, start_mark, end_mark)
+ elif event.type == YAML_SCALAR_EVENT:
+ anchor = None
+ if event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.scalar.anchor)
+ tag = None
+ if event.data.scalar.tag != NULL:
+ tag = PyUnicode_FromString(event.data.scalar.tag)
+ value = PyUnicode_DecodeUTF8(event.data.scalar.value,
+ event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ style = None
+ if event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarEvent(anchor, tag,
+ (plain_implicit, quoted_implicit),
+ value, start_mark, end_mark, style)
+ elif event.type == YAML_SEQUENCE_START_EVENT:
+ anchor = None
+ if event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.sequence_start.anchor)
+ tag = None
+ if event.data.sequence_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.sequence_start.tag)
+ implicit = False
+ if event.data.sequence_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ return SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_MAPPING_START_EVENT:
+ anchor = None
+ if event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.mapping_start.anchor)
+ tag = None
+ if event.data.mapping_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.mapping_start.tag)
+ implicit = False
+ if event.data.mapping_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ return MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_SEQUENCE_END_EVENT:
+ return SequenceEndEvent(start_mark, end_mark)
+ elif event.type == YAML_MAPPING_END_EVENT:
+ return MappingEndEvent(start_mark, end_mark)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown event type")
+ else:
+ raise ValueError(u"unknown event type")
+
+ def get_event(self):
+ if self.current_event is not None:
+ value = self.current_event
+ self.current_event = None
+ else:
+ value = self._parse()
+ return value
+
+ def peek_event(self):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ return self.current_event
+
+ def check_event(self, *choices):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ if self.current_event is None:
+ return False
+ if not choices:
+ return True
+ event_class = self.current_event.__class__
+ for choice in choices:
+ if event_class is choice:
+ return True
+ return False
+
+ def check_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_STREAM_START_EVENT:
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return True
+ return False
+
+ def get_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return self._compose_document()
+
+ def get_single_node(self):
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ document = None
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ document = self._compose_document()
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document", mark)
+ else:
+ raise ComposerError(u"expected a single document in the stream",
+ document.start_mark, u"but found another document", mark)
+ return document
+
+ cdef object _compose_document(self):
+ yaml_event_delete(&self.parsed_event)
+ node = self._compose_node(None, None)
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self.anchors = {}
+ return node
+
+ cdef object _compose_node(self, object parent, object index):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(self.parsed_event.data.alias.anchor)
+ if anchor not in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError(None, None, "found undefined alias", mark)
+ else:
+ raise ComposerError(None, None, u"found undefined alias", mark)
+ yaml_event_delete(&self.parsed_event)
+ return self.anchors[anchor]
+ anchor = None
+ if self.parsed_event.type == YAML_SCALAR_EVENT \
+ and self.parsed_event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.scalar.anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT \
+ and self.parsed_event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.sequence_start.anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT \
+ and self.parsed_event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.mapping_start.anchor)
+ if anchor is not None:
+ if anchor in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("found duplicate anchor; first occurrence",
+ self.anchors[anchor].start_mark, "second occurrence", mark)
+ else:
+ raise ComposerError(u"found duplicate anchor; first occurrence",
+ self.anchors[anchor].start_mark, u"second occurrence", mark)
+ self.descend_resolver(parent, index)
+ if self.parsed_event.type == YAML_SCALAR_EVENT:
+ node = self._compose_scalar_node(anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT:
+ node = self._compose_sequence_node(anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT:
+ node = self._compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ cdef _compose_scalar_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ value = PyUnicode_DecodeUTF8(self.parsed_event.data.scalar.value,
+ self.parsed_event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if self.parsed_event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if self.parsed_event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ if self.parsed_event.data.scalar.tag == NULL \
+ or (self.parsed_event.data.scalar.tag[0] == c'!'
+ and self.parsed_event.data.scalar.tag[1] == c'\0'):
+ tag = self.resolve(ScalarNode, value, (plain_implicit, quoted_implicit))
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.scalar.tag)
+ style = None
+ if self.parsed_event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif self.parsed_event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif self.parsed_event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif self.parsed_event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif self.parsed_event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ node = ScalarNode(tag, value, start_mark, end_mark, style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_sequence_node(self, object anchor):
+ cdef int index
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.sequence_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.sequence_start.tag == NULL \
+ or (self.parsed_event.data.sequence_start.tag[0] == c'!'
+ and self.parsed_event.data.sequence_start.tag[1] == c'\0'):
+ tag = self.resolve(SequenceNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.sequence_start.tag)
+ flow_style = None
+ if self.parsed_event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ value = []
+ node = SequenceNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ index = 0
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_SEQUENCE_END_EVENT:
+ value.append(self._compose_node(node, index))
+ index = index+1
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_mapping_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.mapping_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.mapping_start.tag == NULL \
+ or (self.parsed_event.data.mapping_start.tag[0] == c'!'
+ and self.parsed_event.data.mapping_start.tag[1] == c'\0'):
+ tag = self.resolve(MappingNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.mapping_start.tag)
+ flow_style = None
+ if self.parsed_event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ value = []
+ node = MappingNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_MAPPING_END_EVENT:
+ item_key = self._compose_node(node, None)
+ item_value = self._compose_node(node, item_key)
+ value.append((item_key, item_value))
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef int _parse_next_event(self) except 0:
+ if self.parsed_event.type == YAML_NO_EVENT:
+ if yaml_parser_parse(&self.parser, &self.parsed_event) == 0:
+ error = self._parser_error()
+ raise error
+ return 1
+
+cdef int input_handler(void *data, char *buffer, int size, int *read) except 0:
+ cdef CParser parser
+ parser = <CParser>data
+ if parser.stream_cache is None:
+ value = parser.stream.read(size)
+ if PyUnicode_CheckExact(value) != 0:
+ value = PyUnicode_AsUTF8String(value)
+ parser.unicode_source = 1
+ if PyString_CheckExact(value) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string value is expected")
+ else:
+ raise TypeError(u"a string value is expected")
+ parser.stream_cache = value
+ parser.stream_cache_pos = 0
+ parser.stream_cache_len = PyString_GET_SIZE(value)
+ if (parser.stream_cache_len - parser.stream_cache_pos) < size:
+ size = parser.stream_cache_len - parser.stream_cache_pos
+ if size > 0:
+ memcpy(buffer, PyString_AS_STRING(parser.stream_cache)
+ + parser.stream_cache_pos, size)
+ read[0] = size
+ parser.stream_cache_pos += size
+ if parser.stream_cache_pos == parser.stream_cache_len:
+ parser.stream_cache = None
+ return 1
+
+cdef class CEmitter:
+
+ cdef yaml_emitter_t emitter
+
+ cdef object stream
+
+ cdef int document_start_implicit
+ cdef int document_end_implicit
+ cdef object use_version
+ cdef object use_tags
+
+ cdef object serialized_nodes
+ cdef object anchors
+ cdef int last_alias_id
+ cdef int closed
+ cdef int dump_unicode
+ cdef object use_encoding
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ if yaml_emitter_initialize(&self.emitter) == 0:
+ raise MemoryError
+ self.stream = stream
+ self.dump_unicode = 0
+ if PY_MAJOR_VERSION < 3:
+ if getattr3(stream, 'encoding', None):
+ self.dump_unicode = 1
+ else:
+ if hasattr(stream, u'encoding'):
+ self.dump_unicode = 1
+ self.use_encoding = encoding
+ yaml_emitter_set_output(&self.emitter, output_handler, <void *>self)
+ if canonical:
+ yaml_emitter_set_canonical(&self.emitter, 1)
+ if indent is not None:
+ yaml_emitter_set_indent(&self.emitter, indent)
+ if width is not None:
+ yaml_emitter_set_width(&self.emitter, width)
+ if allow_unicode:
+ yaml_emitter_set_unicode(&self.emitter, 1)
+ if line_break is not None:
+ if line_break == '\r':
+ yaml_emitter_set_break(&self.emitter, YAML_CR_BREAK)
+ elif line_break == '\n':
+ yaml_emitter_set_break(&self.emitter, YAML_LN_BREAK)
+ elif line_break == '\r\n':
+ yaml_emitter_set_break(&self.emitter, YAML_CRLN_BREAK)
+ self.document_start_implicit = 1
+ if explicit_start:
+ self.document_start_implicit = 0
+ self.document_end_implicit = 1
+ if explicit_end:
+ self.document_end_implicit = 0
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+ self.closed = -1
+
+ def __dealloc__(self):
+ yaml_emitter_delete(&self.emitter)
+
+ def dispose(self):
+ pass
+
+ cdef object _emitter_error(self):
+ if self.emitter.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.emitter.error == YAML_EMITTER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ problem = self.emitter.problem
+ else:
+ problem = PyUnicode_FromString(self.emitter.problem)
+ return EmitterError(problem)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no emitter error")
+ else:
+ raise ValueError(u"no emitter error")
+
+ cdef int _object_to_event(self, object event_object, yaml_event_t *event) except 0:
+ cdef yaml_encoding_t encoding
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ event_class = event_object.__class__
+ if event_class is StreamStartEvent:
+ encoding = YAML_UTF8_ENCODING
+ if event_object.encoding == u'utf-16-le' or event_object.encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif event_object.encoding == u'utf-16-be' or event_object.encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ if event_object.encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(event, encoding)
+ elif event_class is StreamEndEvent:
+ yaml_stream_end_event_initialize(event)
+ elif event_class is DocumentStartEvent:
+ version_directive = NULL
+ if event_object.version:
+ version_directive_value.major = event_object.version[0]
+ version_directive_value.minor = event_object.version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if event_object.tags:
+ if len(event_object.tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ cache = []
+ for handle in event_object.tags:
+ prefix = event_object.tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
+ tag_directives_end.handle = PyString_AS_STRING(handle)
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
+ tag_directives_end.prefix = PyString_AS_STRING(prefix)
+ tag_directives_end = tag_directives_end+1
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ if yaml_document_start_event_initialize(event, version_directive,
+ tag_directives_start, tag_directives_end, implicit) == 0:
+ raise MemoryError
+ elif event_class is DocumentEndEvent:
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ yaml_document_end_event_initialize(event, implicit)
+ elif event_class is AliasEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if yaml_alias_event_initialize(event, anchor) == 0:
+ raise MemoryError
+ elif event_class is ScalarEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = event_object.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ plain_implicit = 0
+ quoted_implicit = 0
+ if event_object.implicit is not None:
+ plain_implicit = event_object.implicit[0]
+ quoted_implicit = event_object.implicit[1]
+ style_object = event_object.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
+ if yaml_scalar_event_initialize(event, anchor, tag, value, length,
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if event_object.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ if yaml_sequence_start_event_initialize(event, anchor, tag,
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ elif event_class is MappingStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if event_object.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
+ if yaml_mapping_start_event_initialize(event, anchor, tag,
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceEndEvent:
+ yaml_sequence_end_event_initialize(event)
+ elif event_class is MappingEndEvent:
+ yaml_mapping_end_event_initialize(event)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("invalid event %s" % event_object)
+ else:
+ raise TypeError(u"invalid event %s" % event_object)
+ return 1
+
+ def emit(self, event_object):
+ cdef yaml_event_t event
+ self._object_to_event(event_object, &event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+
+ def open(self):
+ cdef yaml_event_t event
+ cdef yaml_encoding_t encoding
+ if self.closed == -1:
+ if self.use_encoding == u'utf-16-le' or self.use_encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif self.use_encoding == u'utf-16-be' or self.use_encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ else:
+ encoding = YAML_UTF8_ENCODING
+ if self.use_encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(&event, encoding)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 0
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is already opened")
+ else:
+ raise SerializerError(u"serializer is already opened")
+
+ def close(self):
+ cdef yaml_event_t event
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 0:
+ yaml_stream_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 1
+
+ def serialize(self, node):
+ cdef yaml_event_t event
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ cache = []
+ version_directive = NULL
+ if self.use_version:
+ version_directive_value.major = self.use_version[0]
+ version_directive_value.minor = self.use_version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if self.use_tags:
+ if len(self.use_tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ for handle in self.use_tags:
+ prefix = self.use_tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
+ tag_directives_end.handle = PyString_AS_STRING(handle)
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
+ tag_directives_end.prefix = PyString_AS_STRING(prefix)
+ tag_directives_end = tag_directives_end+1
+ if yaml_document_start_event_initialize(&event, version_directive,
+ tag_directives_start, tag_directives_end,
+ self.document_start_implicit) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self._anchor_node(node)
+ self._serialize_node(node, None, None)
+ yaml_document_end_event_initialize(&event, self.document_end_implicit)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+
+ cdef int _anchor_node(self, object node) except 0:
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.last_alias_id = self.last_alias_id+1
+ self.anchors[node] = u"id%03d" % self.last_alias_id
+ else:
+ self.anchors[node] = None
+ node_class = node.__class__
+ if node_class is SequenceNode:
+ for item in node.value:
+ self._anchor_node(item)
+ elif node_class is MappingNode:
+ for key, value in node.value:
+ self._anchor_node(key)
+ self._anchor_node(value)
+ return 1
+
+ cdef int _serialize_node(self, object node, object parent, object index) except 0:
+ cdef yaml_event_t event
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef int item_index
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ anchor_object = self.anchors[node]
+ anchor = NULL
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if node in self.serialized_nodes:
+ if yaml_alias_event_initialize(&event, anchor) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ else:
+ node_class = node.__class__
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if node_class is ScalarNode:
+ plain_implicit = 0
+ quoted_implicit = 0
+ tag_object = node.tag
+ if self.resolve(ScalarNode, node.value, (True, False)) == tag_object:
+ plain_implicit = 1
+ if self.resolve(ScalarNode, node.value, (False, True)) == tag_object:
+ quoted_implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = node.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ style_object = node.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
+ if yaml_scalar_event_initialize(&event, anchor, tag, value, length,
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is SequenceNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(SequenceNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if node.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ if yaml_sequence_start_event_initialize(&event, anchor, tag,
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ item_index = 0
+ for item in node.value:
+ self._serialize_node(item, node, item_index)
+ item_index = item_index+1
+ yaml_sequence_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is MappingNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(MappingNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if node.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
+ if yaml_mapping_start_event_initialize(&event, anchor, tag,
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ for item_key, item_value in node.value:
+ self._serialize_node(item_key, node, None)
+ self._serialize_node(item_value, node, item_key)
+ yaml_mapping_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.ascend_resolver()
+ return 1
+
+cdef int output_handler(void *data, char *buffer, int size) except 0:
+ cdef CEmitter emitter
+ emitter = <CEmitter>data
+ if emitter.dump_unicode == 0:
+ value = PyString_FromStringAndSize(buffer, size)
+ else:
+ value = PyUnicode_DecodeUTF8(buffer, size, 'strict')
+ emitter.stream.write(value)
+ return 1
+
diff --git a/contrib/python/ruamel.yaml.clib/py3/ya.make b/contrib/python/ruamel.yaml.clib/py3/ya.make
new file mode 100644
index 0000000000..af7562fd1c
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/py3/ya.make
@@ -0,0 +1,31 @@
+PY3_LIBRARY()
+
+VERSION(0.2.7)
+
+LICENSE(MIT)
+
+PEERDIR(
+ contrib/libs/yaml
+)
+
+ADDINCL(
+ contrib/python/ruamel.yaml.clib/py3
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ CYTHON_C
+ _ruamel_yaml.pyx
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/ruamel.yaml.clib/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/ruamel.yaml.clib/ya.make b/contrib/python/ruamel.yaml.clib/ya.make
new file mode 100644
index 0000000000..acaec752ef
--- /dev/null
+++ b/contrib/python/ruamel.yaml.clib/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/ruamel.yaml.clib/py2)
+ELSE()
+ PEERDIR(contrib/python/ruamel.yaml.clib/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/ruamel.yaml/py2/.dist-info/METADATA b/contrib/python/ruamel.yaml/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..92fc1d4906
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/.dist-info/METADATA
@@ -0,0 +1,815 @@
+Metadata-Version: 2.1
+Name: ruamel.yaml
+Version: 0.16.13
+Summary: ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order
+Home-page: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree
+Author: Anthon van der Neut
+Author-email: a.van.der.neut@ruamel.eu
+License: MIT license
+Keywords: yaml 1.2 parser round-trip preserve quotes order config
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: Jython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Classifier: Typing :: Typed
+Description-Content-Type: text/x-rst
+Requires-Dist: ruamel.yaml.clib (>=0.1.2) ; platform_python_implementation=="CPython" and python_version<"3.10"
+Requires-Dist: ruamel.ordereddict ; platform_python_implementation=="CPython" and python_version<="2.7"
+Provides-Extra: docs
+Requires-Dist: ryd ; extra == 'docs'
+Provides-Extra: jinja2
+Requires-Dist: ruamel.yaml.jinja2 (>=0.2) ; extra == 'jinja2'
+
+
+ruamel.yaml
+===========
+
+``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python.
+
+:version: 0.16.13
+:updated: 2021-03-05
+:documentation: http://yaml.readthedocs.io
+:repository: https://sourceforge.net/projects/ruamel-yaml/
+:pypi: https://pypi.org/project/ruamel.yaml/
+
+*The 0.16.13 release is the last that will tested to be working on Python 2.7.
+The 0.17 series will still be tested on Python 3.5, but the 0.18 will not. The
+0.17 series will also stop support for the old PyYAML functions, so a `YAML()` instance
+will need to be created.*
+
+*Please adjust your dependencies accordingly if necessary.*
+
+
+Starting with version 0.15.0 the way YAML files are loaded and dumped
+is changing. See the API doc for details. Currently existing
+functionality will throw a warning before being changed/removed.
+**For production systems you should pin the version being used with
+``ruamel.yaml<=0.15``**. There might be bug fixes in the 0.14 series,
+but new functionality is likely only to be available via the new API.
+
+If your package uses ``ruamel.yaml`` and is not listed on PyPI, drop
+me an email, preferably with some information on how you use the
+package (or a link to bitbucket/github) and I'll keep you informed
+when the status of the API is stable enough to make the transition.
+
+* `Overview <http://yaml.readthedocs.org/en/latest/overview.html>`_
+* `Installing <http://yaml.readthedocs.org/en/latest/install.html>`_
+* `Basic Usage <http://yaml.readthedocs.org/en/latest/basicuse.html>`_
+* `Details <http://yaml.readthedocs.org/en/latest/detail.html>`_
+* `Examples <http://yaml.readthedocs.org/en/latest/example.html>`_
+* `API <http://yaml.readthedocs.org/en/latest/api.html>`_
+* `Differences with PyYAML <http://yaml.readthedocs.org/en/latest/pyyaml.html>`_
+
+.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable
+ :target: https://yaml.readthedocs.org/en/stable
+
+.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
+.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw
+ :target: https://pypi.org/project/ruamel.yaml/
+
+.. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw
+ :target: https://pypi.org/project/oitnb/
+
+.. image:: http://www.mypy-lang.org/static/mypy_badge.svg
+ :target: http://mypy-lang.org/
+
+ChangeLog
+=========
+
+.. should insert NEXT: at the beginning of line for next key (with empty line)
+
+0.16.13 (2021-03-05):
+ - fix for issue 359: could not update() CommentedMap with keyword arguments
+ (reported by `Steve Franchak <https://sourceforge.net/u/binaryadder/>`__)
+ - fix for issue 365: unable to dump mutated TimeStamp objects
+ (reported by Anton Akmerov <https://sourceforge.net/u/akhmerov/>`__)
+ - fix for issue 371: unable to addd comment without starting space
+ (reported by 'Mark Grandi <https://sourceforge.net/u/mgrandi>`__)
+ - fix for issue 373: recursive call to walk_tree not preserving all params
+ (reported by `eulores <https://sourceforge.net/u/eulores/>`__)
+ - a None value in a flow-style sequence is now dumped as `null` instead
+ of `!!null ''` (reported by mcarans on
+ `StackOverlow <https://stackoverflow.com/a/66489600/1307905>`__)
+
+0.16.12 (2020-09-04):
+ - update links in doc
+
+0.16.11 (2020-09-03):
+ - workaround issue with setuptools 0.50 and importing pip ( fix by jaraco
+ https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580 )
+
+0.16.10 (2020-02-12):
+ - (auto) updated image references in README to sourceforge
+
+0.16.9 (2020-02-11):
+ - update CHANGES
+
+0.16.8 (2020-02-11):
+ - update requirements so that ruamel.yaml.clib is installed for 3.8,
+ as it has become available (via manylinux builds)
+
+0.16.7 (2020-01-30):
+ - fix typchecking issue on TaggedScalar (reported by Jens Nielsen)
+ - fix error in dumping literal scalar in sequence with comments before element
+ (reported by `EJ Etherington <https://sourceforge.net/u/ejether/>`__)
+
+0.16.6 (2020-01-20):
+ - fix empty string mapping key roundtripping with preservation of quotes as `? ''`
+ (reported via email by Tomer Aharoni).
+ - fix incorrect state setting in class constructor (reported by `Douglas Raillard
+ <https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/>`__)
+ - adjust deprecation warning test for Hashable, as that no longer warns (reported
+ by `Jason Montleon <https://bitbucket.org/%7B8f377d12-8d5b-4069-a662-00a2674fee4e%7D/>`__)
+
+0.16.5 (2019-08-18):
+ - allow for ``YAML(typ=['unsafe', 'pytypes'])``
+
+0.16.4 (2019-08-16):
+ - fix output of TAG directives with # (reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+
+0.16.3 (2019-08-15):
+ - split construct_object
+ - change stuff back to keep mypy happy
+ - move setting of version based on YAML directive to scanner, allowing to
+ check for file version during TAG directive scanning
+
+0.16.2 (2019-08-15):
+ - preserve YAML and TAG directives on roundtrip, correctly output #
+ in URL for YAML 1.2 (both reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+0.16.1 (2019-08-08):
+ - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz
+ <https://bitbucket.org/%7B9af55900-2534-4212-976c-61339b6ffe14%7D/>`__)
+ - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by
+ `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+0.16.0 (2019-07-25):
+ - split of C source that generates .so file to ruamel.yaml.clib
+ - duplicate keys are now an error when working with the old API as well
+
+0.15.100 (2019-07-17):
+ - fixing issue with dumping deep-copied data from commented YAML, by
+ providing both the memo parameter to __deepcopy__, and by allowing
+ startmarks to be compared on their content (reported by `Theofilos
+ Petsios
+ <https://bitbucket.org/%7Be550bc5d-403d-4fda-820b-bebbe71796d3%7D/>`__)
+
+0.15.99 (2019-07-12):
+ - add `py.typed` to distribution, based on a PR submitted by
+ `Michael Crusoe
+ <https://bitbucket.org/%7Bc9fbde69-e746-48f5-900d-34992b7860c8%7D/>`__
+ - merge PR 40 (also by Michael Crusoe) to more accurately specify
+ repository in the README (also reported in a misunderstood issue
+ some time ago)
+
+0.15.98 (2019-07-09):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed
+ for Python 3.8.0b2 (reported by `John Vandenberg
+ <https://bitbucket.org/%7B6d4e8487-3c97-4dab-a060-088ec50c682c%7D/>`__)
+
+0.15.97 (2019-06-06):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for
+ Python 3.8.0b1
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for
+ Python 3.8.0a4 (reported by `Anthony Sottile
+ <https://bitbucket.org/%7B569cc8ea-0d9e-41cb-94a4-19ea517324df%7D/>`__)
+
+0.15.96 (2019-05-16):
+ - fix failure to indent comments on round-trip anchored block style
+ scalars in block sequence (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+
+0.15.95 (2019-05-16):
+ - fix failure to round-trip anchored scalars in block sequence
+ (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+ - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18
+ <https://www.python.org/dev/peps/pep-0429/>`__)
+
+0.15.94 (2019-04-23):
+ - fix missing line-break after end-of-file comments not ending in
+ line-break (reported by `Philip Thompson
+ <https://bitbucket.org/%7Be42ba205-0876-4151-bcbe-ccaea5bd13ce%7D/>`__)
+
+0.15.93 (2019-04-21):
+ - fix failure to parse empty implicit flow mapping key
+ - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now
+ correctly recognised as booleans and such strings dumped quoted
+ (reported by `Marcel Bollmann
+ <https://bitbucket.org/%7Bd8850921-9145-4ad0-ac30-64c3bd9b036d%7D/>`__)
+
+0.15.92 (2019-04-16):
+ - fix failure to parse empty implicit block mapping key (reported by
+ `Nolan W <https://bitbucket.org/i2labs/>`__)
+
+0.15.91 (2019-04-05):
+ - allowing duplicate keys would not work for merge keys (reported by mamacdon on
+ `StackOverflow <https://stackoverflow.com/questions/55540686/>`__
+
+0.15.90 (2019-04-04):
+ - fix issue with updating `CommentedMap` from list of tuples (reported by
+ `Peter Henry <https://bitbucket.org/mosbasik/>`__)
+
+0.15.89 (2019-02-27):
+ - fix for items with flow-mapping in block sequence output on single line
+ (reported by `Zahari Dim <https://bitbucket.org/zahari_dim/>`__)
+ - fix for safe dumping erroring in creation of representereror when dumping namedtuple
+ (reported and solution by `Jaakko Kantojärvi <https://bitbucket.org/raphendyr/>`__)
+
+0.15.88 (2019-02-12):
+ - fix inclusing of python code from the subpackage data (containing extra tests,
+ reported by `Florian Apolloner <https://bitbucket.org/apollo13/>`__)
+
+0.15.87 (2019-01-22):
+ - fix problem with empty lists and the code to reinsert merge keys (reported via email
+ by Zaloo)
+
+0.15.86 (2019-01-16):
+ - reinsert merge key in its old position (reported by grumbler on
+ `StackOverflow <https://stackoverflow.com/a/54206512/1307905>`__)
+ - fix for issue with non-ASCII anchor names (reported and fix
+ provided by Dandaleon Flux via email)
+ - fix for issue when parsing flow mapping value starting with colon (in pure Python only)
+ (reported by `FichteFoll <https://bitbucket.org/FichteFoll/>`__)
+
+0.15.85 (2019-01-08):
+ - the types used by ``SafeConstructor`` for mappings and sequences can
+ now by set by assigning to ``XXXConstructor.yaml_base_dict_type``
+ (and ``..._list_type``), preventing the need to copy two methods
+ with 50+ lines that had ``var = {}`` hardcoded. (Implemented to
+ help solve an feature request by `Anthony Sottile
+ <https://bitbucket.org/asottile/>`__ in an easier way)
+
+0.15.84 (2019-01-07):
+ - fix for ``CommentedMap.copy()`` not returning ``CommentedMap``, let alone copying comments etc.
+ (reported by `Anthony Sottile <https://bitbucket.org/asottile/>`__)
+
+0.15.83 (2019-01-02):
+ - fix for bug in roundtripping aliases used as key (reported via email by Zaloo)
+
+0.15.82 (2018-12-28):
+ - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors
+ do not need a referring alias for these (reported by
+ `Alex Harvey <https://bitbucket.org/alexharv074/>`__)
+ - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo
+ <https://bitbucket.org/zaloo/>`__)
+
+0.15.81 (2018-12-06):
+ - fix issue dumping methods of metaclass derived classes (reported and fix provided
+ by `Douglas Raillard <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.80 (2018-11-26):
+ - fix issue emitting BEL character when round-tripping invalid folded input
+ (reported by Isaac on `StackOverflow <https://stackoverflow.com/a/53471217/1307905>`__)
+
+0.15.79 (2018-11-21):
+ - fix issue with anchors nested deeper than alias (reported by gaFF on
+ `StackOverflow <https://stackoverflow.com/a/53397781/1307905>`__)
+
+0.15.78 (2018-11-15):
+ - fix setup issue for 3.8 (reported by `Sidney Kuyateh
+ <https://bitbucket.org/autinerd/>`__)
+
+0.15.77 (2018-11-09):
+ - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent
+ explicit sorting by keys in the base representer of mappings. Roundtrip
+ already did not do this. Usage only makes real sense for Python 3.6+
+ (feature request by `Sebastian Gerber <https://bitbucket.org/spacemanspiff2007/>`__).
+ - implement Python version check in YAML metadata in ``_test/test_z_data.py``
+
+0.15.76 (2018-11-01):
+ - fix issue with empty mapping and sequence loaded as flow-style
+ (mapping reported by `Min RK <https://bitbucket.org/minrk/>`__, sequence
+ by `Maged Ahmed <https://bitbucket.org/maged2/>`__)
+
+0.15.75 (2018-10-27):
+ - fix issue with single '?' scalar (reported by `Terrance
+ <https://bitbucket.org/OllieTerrance/>`__)
+ - fix issue with duplicate merge keys (prompted by `answering
+ <https://stackoverflow.com/a/52852106/1307905>`__ a
+ `StackOverflow question <https://stackoverflow.com/q/52851168/1307905>`__
+ by `math <https://stackoverflow.com/users/1355634/math>`__)
+
+0.15.74 (2018-10-17):
+ - fix dropping of comment on rt before sequence item that is sequence item
+ (reported by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+
+0.15.73 (2018-10-16):
+ - fix irregular output on pre-comment in sequence within sequence (reported
+ by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+ - allow non-compact (i.e. next line) dumping sequence/mapping within sequence.
+
+0.15.72 (2018-10-06):
+ - fix regression on explicit 1.1 loading with the C based scanner/parser
+ (reported by `Tomas Vavra <https://bitbucket.org/xtomik/>`__)
+
+0.15.71 (2018-09-26):
+ - some of the tests now live in YAML files in the
+ `yaml.data <https://bitbucket.org/ruamel/yaml.data>`__ repository.
+ ``_test/test_z_data.py`` processes these.
+ - fix regression where handcrafted CommentedMaps could not be initiated (reported by
+ `Dan Helfman <https://bitbucket.org/dhelfman/>`__)
+ - fix regression with non-root literal scalars that needed indent indicator
+ (reported by `Clark Breyman <https://bitbucket.org/clarkbreyman/>`__)
+ - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+ - issue with self-referring object creation
+ (reported and fix by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.70 (2018-09-21):
+ - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list,
+ reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON
+ dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``.
+ (Proposed by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__, with feedback
+ from `blhsing <https://stackoverflow.com/users/6890912/blhsing>`__ on
+ `StackOverflow <https://stackoverflow.com/q/52314186/1307905>`__)
+
+0.15.69 (2018-09-20):
+ - fix issue with dump_all gobbling end-of-document comments on parsing
+ (reported by `Pierre B. <https://bitbucket.org/octplane/>`__)
+
+0.15.68 (2018-09-20):
+ - fix issue with parsabel, but incorrect output with nested flow-style sequences
+ (reported by `Dougal Seeley <https://bitbucket.org/dseeley/>`__)
+ - fix issue with loading Python objects that have __setstate__ and recursion in parameters
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.67 (2018-09-19):
+ - fix issue with extra space inserted with non-root literal strings
+ (Issue reported and PR with fix provided by
+ `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+
+0.15.66 (2018-09-07):
+ - fix issue with fold indicating characters inserted in safe_load-ed folded strings
+ (reported by `Maximilian Hils <https://bitbucket.org/mhils/>`__).
+
+0.15.65 (2018-09-07):
+ - fix issue #232 revert to throw ParserError for unexcpected ``]``
+ and ``}`` instead of IndexError. (Issue reported and PR with fix
+ provided by `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+ - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email)
+ - indent root level literal scalars that have directive or document end markers
+ at the beginning of a line
+
+0.15.64 (2018-08-30):
+ - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]``
+ - single entry mappings in flow sequences now written by default without braces,
+ set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force
+ getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]``
+ - fix issue when roundtripping floats starting with a dot such as ``.5``
+ (reported by `Harrison Gregg <https://bitbucket.org/HarrisonGregg/>`__)
+
+0.15.63 (2018-08-29):
+ - small fix only necessary for Windows users that don't use wheels.
+
+0.15.62 (2018-08-29):
+ - C based reader/scanner & emitter now allow setting of 1.2 as YAML version.
+ ** The loading/dumping is still YAML 1.1 code**, so use the common subset of
+ YAML 1.2 and 1.1 (reported by `Ge Yang <https://bitbucket.org/yangge/>`__)
+
+0.15.61 (2018-08-23):
+ - support for round-tripping folded style scalars (initially requested
+ by `Johnathan Viduchinsky <https://bitbucket.org/johnathanvidu/>`__)
+ - update of C code
+ - speed up of scanning (~30% depending on the input)
+
+0.15.60 (2018-08-18):
+ - again allow single entry map in flow sequence context (reported by
+ `Lee Goolsbee <https://bitbucket.org/lgoolsbee/>`__)
+ - cleanup for mypy
+ - spurious print in library (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__), now automatically checked
+
+0.15.59 (2018-08-17):
+ - issue with C based loader and leading zeros (reported by
+ `Tom Hamilton Stubber <https://bitbucket.org/TomHamiltonStubber/>`__)
+
+0.15.58 (2018-08-17):
+ - simple mappings can now be used as keys when round-tripping::
+
+ {a: 1, b: 2}: hello world
+
+ although using the obvious operations (del, popitem) on the key will
+ fail, you can mutilate it by going through its attributes. If you load the
+ above YAML in `d`, then changing the value is cumbersome:
+
+ d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"}
+
+ and changing the key even more so:
+
+ d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop(
+ CommentedKeyMap([('a', 1), ('b', 2)]))
+
+ (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result
+ in a different order, of the keys of the key, in the output)
+ - check integers to dump with 1.2 patterns instead of 1.1 (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__)
+
+
+0.15.57 (2018-08-15):
+ - Fix that CommentedSeq could no longer be used in adding or do a sort
+ (reported by `Christopher Wright <https://bitbucket.org/CJ-Wright4242/>`__)
+
+0.15.56 (2018-08-15):
+ - fix issue with ``python -O`` optimizing away code (reported, and detailed cause
+ pinpointed, by `Alex Grönholm <https://bitbucket.org/agronholm/>`__)
+
+0.15.55 (2018-08-14):
+ - unmade ``CommentedSeq`` a subclass of ``list``. It is now
+ indirectly a subclass of the standard
+ ``collections.abc.MutableSequence`` (without .abc if you are
+ still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'),
+ list)``) anywhere in your code replace ``list`` with
+ ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of
+ the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``,
+ with the result that *(extended) slicing is supported on
+ ``CommentedSeq``*.
+ (reported by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__)
+ - duplicate keys (or their values) with non-ascii now correctly
+ report in Python2, instead of raising a Unicode error.
+ (Reported by `Jonathan Pyle <https://bitbucket.org/jonathan_pyle/>`__)
+
+0.15.54 (2018-08-13):
+ - fix issue where a comment could pop-up twice in the output (reported by
+ `Mike Kazantsev <https://bitbucket.org/mk_fg/>`__ and by
+ `Nate Peterson <https://bitbucket.org/ndpete21/>`__)
+ - fix issue where JSON object (mapping) without spaces was not parsed
+ properly (reported by `Marc Schmidt <https://bitbucket.org/marcj/>`__)
+ - fix issue where comments after empty flow-style mappings were not emitted
+ (reported by `Qinfench Chen <https://bitbucket.org/flyin5ish/>`__)
+
+0.15.53 (2018-08-12):
+ - fix issue with flow style mapping with comments gobbled newline (reported
+ by `Christopher Lambert <https://bitbucket.org/XN137/>`__)
+ - fix issue where single '+' under YAML 1.2 was interpreted as
+ integer, erroring out (reported by `Jethro Yu
+ <https://bitbucket.org/jcppkkk/>`__)
+
+0.15.52 (2018-08-09):
+ - added `.copy()` mapping representation for round-tripping
+ (``CommentedMap``) to fix incomplete copies of merged mappings
+ (reported by `Will Richards
+ <https://bitbucket.org/will_richards/>`__)
+ - Also unmade that class a subclass of ordereddict to solve incorrect behaviour
+ for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported independently by
+ `Tim Olsson <https://bitbucket.org/tgolsson/>`__ and
+ `Filip Matzner <https://bitbucket.org/FloopCZ/>`__)
+
+0.15.51 (2018-08-08):
+ - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard
+ <https://bitbucket.org/DouglasRaillard/>`__)
+ - Fix spurious trailing white-space caused when the comment start
+ column was no longer reached and there was no actual EOL comment
+ (e.g. following empty line) and doing substitutions, or when
+ quotes around scalars got dropped. (reported by `Thomas Guillet
+ <https://bitbucket.org/guillett/>`__)
+
+0.15.50 (2018-08-05):
+ - Allow ``YAML()`` as a context manager for output, thereby making it much easier
+ to generate multi-documents in a stream.
+ - Fix issue with incorrect type information for `load()` and `dump()` (reported
+ by `Jimbo Jim <https://bitbucket.org/jimbo1qaz/>`__)
+
+0.15.49 (2018-08-05):
+ - fix preservation of leading newlines in root level literal style scalar,
+ and preserve comment after literal style indicator (``| # some comment``)
+ Both needed for round-tripping multi-doc streams in
+ `ryd <https://pypi.org/project/ryd/>`__.
+
+0.15.48 (2018-08-03):
+ - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity
+
+0.15.47 (2018-07-31):
+ - fix broken 3.6 manylinux1, the result of an unclean ``build`` (reported by
+ `Roman Sichnyi <https://bitbucket.org/rsichnyi-gl/>`__)
+
+
+0.15.46 (2018-07-29):
+ - fixed DeprecationWarning for importing from ``collections`` on 3.7
+ (issue 210, reported by `Reinoud Elhorst
+ <https://bitbucket.org/reinhrst/>`__). It was `difficult to find
+ why tox/pytest did not report
+ <https://stackoverflow.com/q/51573204/1307905>`__ and as time
+ consuming to actually `fix
+ <https://stackoverflow.com/a/51573205/1307905>`__ the tests.
+
+0.15.45 (2018-07-26):
+ - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration
+ (PR provided by `Zachary Buhman <https://bitbucket.org/buhman/>`__,
+ also reported by `Steven Hiscocks <https://bitbucket.org/sdhiscocks/>`__.
+
+0.15.44 (2018-07-14):
+ - Correct loading plain scalars consisting of numerals only and
+ starting with `0`, when not explicitly specifying YAML version
+ 1.1. This also fixes the issue about dumping string `'019'` as
+ plain scalars as reported by `Min RK
+ <https://bitbucket.org/minrk/>`__, that prompted this chance.
+
+0.15.43 (2018-07-12):
+ - merge PR33: Python2.7 on Windows is narrow, but has no
+ ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__)
+ - ``register_class()`` now returns class (proposed by
+ `Mike Nerone <https://bitbucket.org/Manganeez/>`__}
+
+0.15.42 (2018-07-01):
+ - fix regression showing only on narrow Python 2.7 (py27mu) builds
+ (with help from
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__ and
+ `Colm O'Connor <https://bitbucket.org/colmoconnorgithub/>`__).
+ - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as
+ 3.4/3.5/3.6/3.7/pypy
+
+0.15.41 (2018-06-27):
+ - add detection of C-compile failure (investigation prompted by
+ `StackOverlow <https://stackoverflow.com/a/51057399/1307905>`__ by
+ `Emmanuel Blot <https://stackoverflow.com/users/8233409/emmanuel-blot>`__),
+ which was removed while no longer dependent on ``libyaml``, C-extensions
+ compilation still needs a compiler though.
+
+0.15.40 (2018-06-18):
+ - added links to landing places as suggested in issue 190 by
+ `KostisA <https://bitbucket.org/ankostis/>`__
+ - fixes issue #201: decoding unicode escaped tags on Python2, reported
+ by `Dan Abolafia <https://bitbucket.org/danabo/>`__
+
+0.15.39 (2018-06-17):
+ - merge PR27 improving package startup time (and loading when regexp not
+ actually used), provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__
+
+0.15.38 (2018-06-13):
+ - fix for losing precision when roundtripping floats by
+ `Rolf Wojtech <https://bitbucket.org/asomov/>`__
+ - fix for hardcoded dir separator not working for Windows by
+ `Nuno André <https://bitbucket.org/nu_no/>`__
+ - typo fix by `Andrey Somov <https://bitbucket.org/asomov/>`__
+
+0.15.37 (2018-03-21):
+ - again trying to create installable files for 187
+
+0.15.36 (2018-02-07):
+ - fix issue 187, incompatibility of C extension with 3.7 (reported by
+ Daniel Blanchard)
+
+0.15.35 (2017-12-03):
+ - allow ``None`` as stream when specifying ``transform`` parameters to
+ ``YAML.dump()``.
+ This is useful if the transforming function doesn't return a meaningful value
+ (inspired by `StackOverflow <https://stackoverflow.com/q/47614862/1307905>`__ by
+ `rsaw <https://stackoverflow.com/users/406281/rsaw>`__).
+
+0.15.34 (2017-09-17):
+ - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka)
+
+0.15.33 (2017-08-31):
+ - support for "undefined" round-tripping tagged scalar objects (in addition to
+ tagged mapping object). Inspired by a use case presented by Matthew Patton
+ on `StackOverflow <https://stackoverflow.com/a/45967047/1307905>`__.
+ - fix issue 148: replace cryptic error message when using !!timestamp with an
+ incorrectly formatted or non- scalar. Reported by FichteFoll.
+
+0.15.32 (2017-08-21):
+ - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for
+ for ``typ='rt'``.
+ - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float``
+ (reported by jan.brezina@tul.cz)
+
+0.15.31 (2017-08-15):
+ - fix Comment dumping
+
+0.15.30 (2017-08-14):
+ - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}``
+ (reported on `StackOverflow <https://stackoverflow.com/q/45681626/1307905>`__ by
+ `mjalkio <https://stackoverflow.com/users/5130525/mjalkio>`_
+
+0.15.29 (2017-08-14):
+ - fix issue #51: different indents for mappings and sequences (reported by
+ Alex Harvey)
+ - fix for flow sequence/mapping as element/value of block sequence with
+ sequence-indent minus dash-offset not equal two.
+
+0.15.28 (2017-08-13):
+ - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron)
+
+0.15.27 (2017-08-13):
+ - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious
+ (reported by nowox)
+ - fix lists within lists which would make comments disappear
+
+0.15.26 (2017-08-10):
+ - fix for disappearing comment after empty flow sequence (reported by
+ oit-tzhimmash)
+
+0.15.25 (2017-08-09):
+ - fix for problem with dumping (unloaded) floats (reported by eyenseo)
+
+0.15.24 (2017-08-09):
+ - added ScalarFloat which supports roundtripping of 23.1, 23.100,
+ 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas
+ are not preserved/supported (yet, is anybody using that?).
+ - (finally) fixed longstanding issue 23 (reported by `Antony Sottile
+ <https://bitbucket.org/asottile/>`__), now handling comment between block
+ mapping key and value correctly
+ - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML
+ provided by Cecil Curry)
+ - allow setting of boolean representation (`false`, `true`) by using:
+ ``yaml.boolean_representation = [u'False', u'True']``
+
+0.15.23 (2017-08-01):
+ - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina)
+
+0.15.22 (2017-07-28):
+ - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina)
+
+0.15.21 (2017-07-25):
+ - fix for writing unicode in new API, (reported on
+ `StackOverflow <https://stackoverflow.com/a/45281922/1307905>`__
+
+0.15.20 (2017-07-23):
+ - wheels for windows including C extensions
+
+0.15.19 (2017-07-13):
+ - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject.
+ - fix for problem using load_all with Path() instance
+ - fix for load_all in combination with zero indent block style literal
+ (``pure=True`` only!)
+
+0.15.18 (2017-07-04):
+ - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag
+ constructor for `including YAML files in a YAML file
+ <https://stackoverflow.com/a/44913652/1307905>`__
+ - some documentation improvements
+ - trigger of doc build on new revision
+
+0.15.17 (2017-07-03):
+ - support for Unicode supplementary Plane **output**
+ (input was already supported, triggered by
+ `this <https://stackoverflow.com/a/44875714/1307905>`__ Stack Overflow Q&A)
+
+0.15.16 (2017-07-01):
+ - minor typing issues (reported and fix provided by
+ `Manvendra Singh <https://bitbucket.org/manu-chroma/>`__
+ - small doc improvements
+
+0.15.15 (2017-06-27):
+ - fix for issue 135, typ='safe' not dumping in Python 2.7
+ (reported by Andrzej Ostrowski <https://bitbucket.org/aostr123/>`__)
+
+0.15.14 (2017-06-25):
+ - fix for issue 133, in setup.py: change ModuleNotFoundError to
+ ImportError (reported and fix by
+ `Asley Drake <https://github.com/aldraco>`__)
+
+0.15.13 (2017-06-24):
+ - suppress duplicate key warning on mappings with merge keys (reported by
+ Cameron Sweeney)
+
+0.15.12 (2017-06-24):
+ - remove fatal dependency of setup.py on wheel package (reported by
+ Cameron Sweeney)
+
+0.15.11 (2017-06-24):
+ - fix for issue 130, regression in nested merge keys (reported by
+ `David Fee <https://bitbucket.org/dfee/>`__)
+
+0.15.10 (2017-06-23):
+ - top level PreservedScalarString not indented if not explicitly asked to
+ - remove Makefile (not very useful anyway)
+ - some mypy additions
+
+0.15.9 (2017-06-16):
+ - fix for issue 127: tagged scalars were always quoted and seperated
+ by a newline when in a block sequence (reported and largely fixed by
+ `Tommy Wang <https://bitbucket.org/twang817/>`__)
+
+0.15.8 (2017-06-15):
+ - allow plug-in install via ``install ruamel.yaml[jinja2]``
+
+0.15.7 (2017-06-14):
+ - add plug-in mechanism for load/dump pre resp. post-processing
+
+0.15.6 (2017-06-10):
+ - a set() with duplicate elements now throws error in rt loading
+ - support for toplevel column zero literal/folded scalar in explicit documents
+
+0.15.5 (2017-06-08):
+ - repeat `load()` on a single `YAML()` instance would fail.
+
+0.15.4 (2017-06-08):
+ - `transform` parameter on dump that expects a function taking a
+ string and returning a string. This allows transformation of the output
+ before it is written to stream. This forces creation of the complete output in memory!
+ - some updates to the docs
+
+0.15.3 (2017-06-07):
+ - No longer try to compile C extensions on Windows. Compilation can be forced by setting
+ the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value
+ before starting the `pip install`.
+
+0.15.2 (2017-06-07):
+ - update to conform to mypy 0.511: mypy --strict
+
+0.15.1 (2017-06-07):
+ - `duplicate keys <http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys>`__
+ in mappings generate an error (in the old API this change generates a warning until 0.16)
+ - dependecy on ruamel.ordereddict for 2.7 now via extras_require
+
+0.15.0 (2017-06-04):
+ - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all
+ load/dump functions
+ - passing in a non-supported object (e.g. a string) as "stream" will result in a
+ much more meaningful YAMLStreamError.
+ - assigning a normal string value to an existing CommentedMap key or CommentedSeq
+ element will result in a value cast to the previous value's type if possible.
+ - added ``YAML`` class for new API
+
+0.14.12 (2017-05-14):
+ - fix for issue 119, deepcopy not returning subclasses (reported and PR by
+ Constantine Evans <cevans@evanslabs.org>)
+
+0.14.11 (2017-05-01):
+ - fix for issue 103 allowing implicit documents after document end marker line (``...``)
+ in YAML 1.2
+
+0.14.10 (2017-04-26):
+ - fix problem with emitting using cyaml
+
+0.14.9 (2017-04-22):
+ - remove dependency on ``typing`` while still supporting ``mypy``
+ (http://stackoverflow.com/a/43516781/1307905)
+ - fix unclarity in doc that stated 2.6 is supported (reported by feetdust)
+
+0.14.8 (2017-04-19):
+ - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards
+ on all files (reported by `João Paulo Magalhães <https://bitbucket.org/jpmag/>`__)
+
+0.14.7 (2017-04-18):
+ - round trip of integers (decimal, octal, hex, binary) now preserve
+ leading zero(s) padding and underscores. Underscores are presumed
+ to be at regular distances (i.e. ``0o12_345_67`` dumps back as
+ ``0o1_23_45_67`` as the space from the last digit to the
+ underscore before that is the determining factor).
+
+0.14.6 (2017-04-14):
+ - binary, octal and hex integers are now preserved by default. This
+ was a known deficiency. Working on this was prompted by the issue report (112)
+ from devnoname120, as well as the additional experience with `.replace()`
+ on `scalarstring` classes.
+ - fix issues 114: cannot install on Buildozer (reported by mixmastamyk).
+ Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check.
+
+0.14.5 (2017-04-04):
+ - fix issue 109: None not dumping correctly at top level (reported by Andrea Censi)
+ - fix issue 110: .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString
+ would give back "normal" string (reported by sandres23)
+
+0.14.4 (2017-03-31):
+ - fix readme
+
+0.14.3 (2017-03-31):
+ - fix for 0o52 not being a string in YAML 1.1 (reported on
+ `StackOverflow Q&A 43138503 <http://stackoverflow.com/a/43138503/1307905>`__ by
+ `Frank D <http://stackoverflow.com/users/7796630/frank-d>`__)
+
+0.14.2 (2017-03-23):
+ - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch)
+
+0.14.1 (2017-03-22):
+ - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré)
+
+0.14.0 (2017-03-21):
+ - updates for mypy --strict
+ - preparation for moving away from inheritance in Loader and Dumper, calls from e.g.
+ the Representer to the Serializer.serialize() are now done via the attribute
+ .serializer.serialize(). Usage of .serialize() outside of Serializer will be
+ deprecated soon
+ - some extra tests on main.py functions
+
+----
+
+For older changes see the file
+`CHANGES <https://bitbucket.org/ruamel/yaml/src/default/CHANGES>`_
+
+
diff --git a/contrib/python/ruamel.yaml/py2/.dist-info/top_level.txt b/contrib/python/ruamel.yaml/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..282b116fc6
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+ruamel
diff --git a/contrib/python/ruamel.yaml/py2/LICENSE b/contrib/python/ruamel.yaml/py2/LICENSE
new file mode 100644
index 0000000000..3f65b07a8c
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/LICENSE
@@ -0,0 +1,21 @@
+ The MIT License (MIT)
+
+ Copyright (c) 2014-2021 Anthon van der Neut, Ruamel bvba
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/contrib/python/ruamel.yaml/py2/README.rst b/contrib/python/ruamel.yaml/py2/README.rst
new file mode 100644
index 0000000000..0adfa00668
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/README.rst
@@ -0,0 +1,779 @@
+
+ruamel.yaml
+===========
+
+``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python.
+
+:version: 0.16.13
+:updated: 2021-03-05
+:documentation: http://yaml.readthedocs.io
+:repository: https://sourceforge.net/projects/ruamel-yaml/
+:pypi: https://pypi.org/project/ruamel.yaml/
+
+*The 0.16.13 release is the last that will tested to be working on Python 2.7.
+The 0.17 series will still be tested on Python 3.5, but the 0.18 will not. The
+0.17 series will also stop support for the old PyYAML functions, so a `YAML()` instance
+will need to be created.*
+
+*Please adjust your dependencies accordingly if necessary.*
+
+
+Starting with version 0.15.0 the way YAML files are loaded and dumped
+is changing. See the API doc for details. Currently existing
+functionality will throw a warning before being changed/removed.
+**For production systems you should pin the version being used with
+``ruamel.yaml<=0.15``**. There might be bug fixes in the 0.14 series,
+but new functionality is likely only to be available via the new API.
+
+If your package uses ``ruamel.yaml`` and is not listed on PyPI, drop
+me an email, preferably with some information on how you use the
+package (or a link to bitbucket/github) and I'll keep you informed
+when the status of the API is stable enough to make the transition.
+
+* `Overview <http://yaml.readthedocs.org/en/latest/overview.html>`_
+* `Installing <http://yaml.readthedocs.org/en/latest/install.html>`_
+* `Basic Usage <http://yaml.readthedocs.org/en/latest/basicuse.html>`_
+* `Details <http://yaml.readthedocs.org/en/latest/detail.html>`_
+* `Examples <http://yaml.readthedocs.org/en/latest/example.html>`_
+* `API <http://yaml.readthedocs.org/en/latest/api.html>`_
+* `Differences with PyYAML <http://yaml.readthedocs.org/en/latest/pyyaml.html>`_
+
+.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable
+ :target: https://yaml.readthedocs.org/en/stable
+
+.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
+.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw
+ :target: https://pypi.org/project/ruamel.yaml/
+
+.. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw
+ :target: https://pypi.org/project/oitnb/
+
+.. image:: http://www.mypy-lang.org/static/mypy_badge.svg
+ :target: http://mypy-lang.org/
+
+ChangeLog
+=========
+
+.. should insert NEXT: at the beginning of line for next key (with empty line)
+
+0.16.13 (2021-03-05):
+ - fix for issue 359: could not update() CommentedMap with keyword arguments
+ (reported by `Steve Franchak <https://sourceforge.net/u/binaryadder/>`__)
+ - fix for issue 365: unable to dump mutated TimeStamp objects
+ (reported by Anton Akmerov <https://sourceforge.net/u/akhmerov/>`__)
+ - fix for issue 371: unable to addd comment without starting space
+ (reported by 'Mark Grandi <https://sourceforge.net/u/mgrandi>`__)
+ - fix for issue 373: recursive call to walk_tree not preserving all params
+ (reported by `eulores <https://sourceforge.net/u/eulores/>`__)
+ - a None value in a flow-style sequence is now dumped as `null` instead
+ of `!!null ''` (reported by mcarans on
+ `StackOverlow <https://stackoverflow.com/a/66489600/1307905>`__)
+
+0.16.12 (2020-09-04):
+ - update links in doc
+
+0.16.11 (2020-09-03):
+ - workaround issue with setuptools 0.50 and importing pip ( fix by jaraco
+ https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580 )
+
+0.16.10 (2020-02-12):
+ - (auto) updated image references in README to sourceforge
+
+0.16.9 (2020-02-11):
+ - update CHANGES
+
+0.16.8 (2020-02-11):
+ - update requirements so that ruamel.yaml.clib is installed for 3.8,
+ as it has become available (via manylinux builds)
+
+0.16.7 (2020-01-30):
+ - fix typchecking issue on TaggedScalar (reported by Jens Nielsen)
+ - fix error in dumping literal scalar in sequence with comments before element
+ (reported by `EJ Etherington <https://sourceforge.net/u/ejether/>`__)
+
+0.16.6 (2020-01-20):
+ - fix empty string mapping key roundtripping with preservation of quotes as `? ''`
+ (reported via email by Tomer Aharoni).
+ - fix incorrect state setting in class constructor (reported by `Douglas Raillard
+ <https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/>`__)
+ - adjust deprecation warning test for Hashable, as that no longer warns (reported
+ by `Jason Montleon <https://bitbucket.org/%7B8f377d12-8d5b-4069-a662-00a2674fee4e%7D/>`__)
+
+0.16.5 (2019-08-18):
+ - allow for ``YAML(typ=['unsafe', 'pytypes'])``
+
+0.16.4 (2019-08-16):
+ - fix output of TAG directives with # (reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+
+0.16.3 (2019-08-15):
+ - split construct_object
+ - change stuff back to keep mypy happy
+ - move setting of version based on YAML directive to scanner, allowing to
+ check for file version during TAG directive scanning
+
+0.16.2 (2019-08-15):
+ - preserve YAML and TAG directives on roundtrip, correctly output #
+ in URL for YAML 1.2 (both reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+0.16.1 (2019-08-08):
+ - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz
+ <https://bitbucket.org/%7B9af55900-2534-4212-976c-61339b6ffe14%7D/>`__)
+ - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by
+ `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+0.16.0 (2019-07-25):
+ - split of C source that generates .so file to ruamel.yaml.clib
+ - duplicate keys are now an error when working with the old API as well
+
+0.15.100 (2019-07-17):
+ - fixing issue with dumping deep-copied data from commented YAML, by
+ providing both the memo parameter to __deepcopy__, and by allowing
+ startmarks to be compared on their content (reported by `Theofilos
+ Petsios
+ <https://bitbucket.org/%7Be550bc5d-403d-4fda-820b-bebbe71796d3%7D/>`__)
+
+0.15.99 (2019-07-12):
+ - add `py.typed` to distribution, based on a PR submitted by
+ `Michael Crusoe
+ <https://bitbucket.org/%7Bc9fbde69-e746-48f5-900d-34992b7860c8%7D/>`__
+ - merge PR 40 (also by Michael Crusoe) to more accurately specify
+ repository in the README (also reported in a misunderstood issue
+ some time ago)
+
+0.15.98 (2019-07-09):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed
+ for Python 3.8.0b2 (reported by `John Vandenberg
+ <https://bitbucket.org/%7B6d4e8487-3c97-4dab-a060-088ec50c682c%7D/>`__)
+
+0.15.97 (2019-06-06):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for
+ Python 3.8.0b1
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for
+ Python 3.8.0a4 (reported by `Anthony Sottile
+ <https://bitbucket.org/%7B569cc8ea-0d9e-41cb-94a4-19ea517324df%7D/>`__)
+
+0.15.96 (2019-05-16):
+ - fix failure to indent comments on round-trip anchored block style
+ scalars in block sequence (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+
+0.15.95 (2019-05-16):
+ - fix failure to round-trip anchored scalars in block sequence
+ (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+ - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18
+ <https://www.python.org/dev/peps/pep-0429/>`__)
+
+0.15.94 (2019-04-23):
+ - fix missing line-break after end-of-file comments not ending in
+ line-break (reported by `Philip Thompson
+ <https://bitbucket.org/%7Be42ba205-0876-4151-bcbe-ccaea5bd13ce%7D/>`__)
+
+0.15.93 (2019-04-21):
+ - fix failure to parse empty implicit flow mapping key
+ - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now
+ correctly recognised as booleans and such strings dumped quoted
+ (reported by `Marcel Bollmann
+ <https://bitbucket.org/%7Bd8850921-9145-4ad0-ac30-64c3bd9b036d%7D/>`__)
+
+0.15.92 (2019-04-16):
+ - fix failure to parse empty implicit block mapping key (reported by
+ `Nolan W <https://bitbucket.org/i2labs/>`__)
+
+0.15.91 (2019-04-05):
+ - allowing duplicate keys would not work for merge keys (reported by mamacdon on
+ `StackOverflow <https://stackoverflow.com/questions/55540686/>`__
+
+0.15.90 (2019-04-04):
+ - fix issue with updating `CommentedMap` from list of tuples (reported by
+ `Peter Henry <https://bitbucket.org/mosbasik/>`__)
+
+0.15.89 (2019-02-27):
+ - fix for items with flow-mapping in block sequence output on single line
+ (reported by `Zahari Dim <https://bitbucket.org/zahari_dim/>`__)
+ - fix for safe dumping erroring in creation of representereror when dumping namedtuple
+ (reported and solution by `Jaakko Kantojärvi <https://bitbucket.org/raphendyr/>`__)
+
+0.15.88 (2019-02-12):
+ - fix inclusing of python code from the subpackage data (containing extra tests,
+ reported by `Florian Apolloner <https://bitbucket.org/apollo13/>`__)
+
+0.15.87 (2019-01-22):
+ - fix problem with empty lists and the code to reinsert merge keys (reported via email
+ by Zaloo)
+
+0.15.86 (2019-01-16):
+ - reinsert merge key in its old position (reported by grumbler on
+ `StackOverflow <https://stackoverflow.com/a/54206512/1307905>`__)
+ - fix for issue with non-ASCII anchor names (reported and fix
+ provided by Dandaleon Flux via email)
+ - fix for issue when parsing flow mapping value starting with colon (in pure Python only)
+ (reported by `FichteFoll <https://bitbucket.org/FichteFoll/>`__)
+
+0.15.85 (2019-01-08):
+ - the types used by ``SafeConstructor`` for mappings and sequences can
+ now by set by assigning to ``XXXConstructor.yaml_base_dict_type``
+ (and ``..._list_type``), preventing the need to copy two methods
+ with 50+ lines that had ``var = {}`` hardcoded. (Implemented to
+ help solve an feature request by `Anthony Sottile
+ <https://bitbucket.org/asottile/>`__ in an easier way)
+
+0.15.84 (2019-01-07):
+ - fix for ``CommentedMap.copy()`` not returning ``CommentedMap``, let alone copying comments etc.
+ (reported by `Anthony Sottile <https://bitbucket.org/asottile/>`__)
+
+0.15.83 (2019-01-02):
+ - fix for bug in roundtripping aliases used as key (reported via email by Zaloo)
+
+0.15.82 (2018-12-28):
+ - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors
+ do not need a referring alias for these (reported by
+ `Alex Harvey <https://bitbucket.org/alexharv074/>`__)
+ - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo
+ <https://bitbucket.org/zaloo/>`__)
+
+0.15.81 (2018-12-06):
+ - fix issue dumping methods of metaclass derived classes (reported and fix provided
+ by `Douglas Raillard <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.80 (2018-11-26):
+ - fix issue emitting BEL character when round-tripping invalid folded input
+ (reported by Isaac on `StackOverflow <https://stackoverflow.com/a/53471217/1307905>`__)
+
+0.15.79 (2018-11-21):
+ - fix issue with anchors nested deeper than alias (reported by gaFF on
+ `StackOverflow <https://stackoverflow.com/a/53397781/1307905>`__)
+
+0.15.78 (2018-11-15):
+ - fix setup issue for 3.8 (reported by `Sidney Kuyateh
+ <https://bitbucket.org/autinerd/>`__)
+
+0.15.77 (2018-11-09):
+ - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent
+ explicit sorting by keys in the base representer of mappings. Roundtrip
+ already did not do this. Usage only makes real sense for Python 3.6+
+ (feature request by `Sebastian Gerber <https://bitbucket.org/spacemanspiff2007/>`__).
+ - implement Python version check in YAML metadata in ``_test/test_z_data.py``
+
+0.15.76 (2018-11-01):
+ - fix issue with empty mapping and sequence loaded as flow-style
+ (mapping reported by `Min RK <https://bitbucket.org/minrk/>`__, sequence
+ by `Maged Ahmed <https://bitbucket.org/maged2/>`__)
+
+0.15.75 (2018-10-27):
+ - fix issue with single '?' scalar (reported by `Terrance
+ <https://bitbucket.org/OllieTerrance/>`__)
+ - fix issue with duplicate merge keys (prompted by `answering
+ <https://stackoverflow.com/a/52852106/1307905>`__ a
+ `StackOverflow question <https://stackoverflow.com/q/52851168/1307905>`__
+ by `math <https://stackoverflow.com/users/1355634/math>`__)
+
+0.15.74 (2018-10-17):
+ - fix dropping of comment on rt before sequence item that is sequence item
+ (reported by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+
+0.15.73 (2018-10-16):
+ - fix irregular output on pre-comment in sequence within sequence (reported
+ by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+ - allow non-compact (i.e. next line) dumping sequence/mapping within sequence.
+
+0.15.72 (2018-10-06):
+ - fix regression on explicit 1.1 loading with the C based scanner/parser
+ (reported by `Tomas Vavra <https://bitbucket.org/xtomik/>`__)
+
+0.15.71 (2018-09-26):
+ - some of the tests now live in YAML files in the
+ `yaml.data <https://bitbucket.org/ruamel/yaml.data>`__ repository.
+ ``_test/test_z_data.py`` processes these.
+ - fix regression where handcrafted CommentedMaps could not be initiated (reported by
+ `Dan Helfman <https://bitbucket.org/dhelfman/>`__)
+ - fix regression with non-root literal scalars that needed indent indicator
+ (reported by `Clark Breyman <https://bitbucket.org/clarkbreyman/>`__)
+ - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+ - issue with self-referring object creation
+ (reported and fix by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.70 (2018-09-21):
+ - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list,
+ reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON
+ dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``.
+ (Proposed by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__, with feedback
+ from `blhsing <https://stackoverflow.com/users/6890912/blhsing>`__ on
+ `StackOverflow <https://stackoverflow.com/q/52314186/1307905>`__)
+
+0.15.69 (2018-09-20):
+ - fix issue with dump_all gobbling end-of-document comments on parsing
+ (reported by `Pierre B. <https://bitbucket.org/octplane/>`__)
+
+0.15.68 (2018-09-20):
+ - fix issue with parsabel, but incorrect output with nested flow-style sequences
+ (reported by `Dougal Seeley <https://bitbucket.org/dseeley/>`__)
+ - fix issue with loading Python objects that have __setstate__ and recursion in parameters
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.67 (2018-09-19):
+ - fix issue with extra space inserted with non-root literal strings
+ (Issue reported and PR with fix provided by
+ `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+
+0.15.66 (2018-09-07):
+ - fix issue with fold indicating characters inserted in safe_load-ed folded strings
+ (reported by `Maximilian Hils <https://bitbucket.org/mhils/>`__).
+
+0.15.65 (2018-09-07):
+ - fix issue #232 revert to throw ParserError for unexcpected ``]``
+ and ``}`` instead of IndexError. (Issue reported and PR with fix
+ provided by `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+ - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email)
+ - indent root level literal scalars that have directive or document end markers
+ at the beginning of a line
+
+0.15.64 (2018-08-30):
+ - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]``
+ - single entry mappings in flow sequences now written by default without braces,
+ set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force
+ getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]``
+ - fix issue when roundtripping floats starting with a dot such as ``.5``
+ (reported by `Harrison Gregg <https://bitbucket.org/HarrisonGregg/>`__)
+
+0.15.63 (2018-08-29):
+ - small fix only necessary for Windows users that don't use wheels.
+
+0.15.62 (2018-08-29):
+ - C based reader/scanner & emitter now allow setting of 1.2 as YAML version.
+ ** The loading/dumping is still YAML 1.1 code**, so use the common subset of
+ YAML 1.2 and 1.1 (reported by `Ge Yang <https://bitbucket.org/yangge/>`__)
+
+0.15.61 (2018-08-23):
+ - support for round-tripping folded style scalars (initially requested
+ by `Johnathan Viduchinsky <https://bitbucket.org/johnathanvidu/>`__)
+ - update of C code
+ - speed up of scanning (~30% depending on the input)
+
+0.15.60 (2018-08-18):
+ - again allow single entry map in flow sequence context (reported by
+ `Lee Goolsbee <https://bitbucket.org/lgoolsbee/>`__)
+ - cleanup for mypy
+ - spurious print in library (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__), now automatically checked
+
+0.15.59 (2018-08-17):
+ - issue with C based loader and leading zeros (reported by
+ `Tom Hamilton Stubber <https://bitbucket.org/TomHamiltonStubber/>`__)
+
+0.15.58 (2018-08-17):
+ - simple mappings can now be used as keys when round-tripping::
+
+ {a: 1, b: 2}: hello world
+
+ although using the obvious operations (del, popitem) on the key will
+ fail, you can mutilate it by going through its attributes. If you load the
+ above YAML in `d`, then changing the value is cumbersome:
+
+ d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"}
+
+ and changing the key even more so:
+
+ d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop(
+ CommentedKeyMap([('a', 1), ('b', 2)]))
+
+ (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result
+ in a different order, of the keys of the key, in the output)
+ - check integers to dump with 1.2 patterns instead of 1.1 (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__)
+
+
+0.15.57 (2018-08-15):
+ - Fix that CommentedSeq could no longer be used in adding or do a sort
+ (reported by `Christopher Wright <https://bitbucket.org/CJ-Wright4242/>`__)
+
+0.15.56 (2018-08-15):
+ - fix issue with ``python -O`` optimizing away code (reported, and detailed cause
+ pinpointed, by `Alex Grönholm <https://bitbucket.org/agronholm/>`__)
+
+0.15.55 (2018-08-14):
+ - unmade ``CommentedSeq`` a subclass of ``list``. It is now
+ indirectly a subclass of the standard
+ ``collections.abc.MutableSequence`` (without .abc if you are
+ still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'),
+ list)``) anywhere in your code replace ``list`` with
+ ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of
+ the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``,
+ with the result that *(extended) slicing is supported on
+ ``CommentedSeq``*.
+ (reported by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__)
+ - duplicate keys (or their values) with non-ascii now correctly
+ report in Python2, instead of raising a Unicode error.
+ (Reported by `Jonathan Pyle <https://bitbucket.org/jonathan_pyle/>`__)
+
+0.15.54 (2018-08-13):
+ - fix issue where a comment could pop-up twice in the output (reported by
+ `Mike Kazantsev <https://bitbucket.org/mk_fg/>`__ and by
+ `Nate Peterson <https://bitbucket.org/ndpete21/>`__)
+ - fix issue where JSON object (mapping) without spaces was not parsed
+ properly (reported by `Marc Schmidt <https://bitbucket.org/marcj/>`__)
+ - fix issue where comments after empty flow-style mappings were not emitted
+ (reported by `Qinfench Chen <https://bitbucket.org/flyin5ish/>`__)
+
+0.15.53 (2018-08-12):
+ - fix issue with flow style mapping with comments gobbled newline (reported
+ by `Christopher Lambert <https://bitbucket.org/XN137/>`__)
+ - fix issue where single '+' under YAML 1.2 was interpreted as
+ integer, erroring out (reported by `Jethro Yu
+ <https://bitbucket.org/jcppkkk/>`__)
+
+0.15.52 (2018-08-09):
+ - added `.copy()` mapping representation for round-tripping
+ (``CommentedMap``) to fix incomplete copies of merged mappings
+ (reported by `Will Richards
+ <https://bitbucket.org/will_richards/>`__)
+ - Also unmade that class a subclass of ordereddict to solve incorrect behaviour
+ for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported independently by
+ `Tim Olsson <https://bitbucket.org/tgolsson/>`__ and
+ `Filip Matzner <https://bitbucket.org/FloopCZ/>`__)
+
+0.15.51 (2018-08-08):
+ - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard
+ <https://bitbucket.org/DouglasRaillard/>`__)
+ - Fix spurious trailing white-space caused when the comment start
+ column was no longer reached and there was no actual EOL comment
+ (e.g. following empty line) and doing substitutions, or when
+ quotes around scalars got dropped. (reported by `Thomas Guillet
+ <https://bitbucket.org/guillett/>`__)
+
+0.15.50 (2018-08-05):
+ - Allow ``YAML()`` as a context manager for output, thereby making it much easier
+ to generate multi-documents in a stream.
+ - Fix issue with incorrect type information for `load()` and `dump()` (reported
+ by `Jimbo Jim <https://bitbucket.org/jimbo1qaz/>`__)
+
+0.15.49 (2018-08-05):
+ - fix preservation of leading newlines in root level literal style scalar,
+ and preserve comment after literal style indicator (``| # some comment``)
+ Both needed for round-tripping multi-doc streams in
+ `ryd <https://pypi.org/project/ryd/>`__.
+
+0.15.48 (2018-08-03):
+ - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity
+
+0.15.47 (2018-07-31):
+ - fix broken 3.6 manylinux1, the result of an unclean ``build`` (reported by
+ `Roman Sichnyi <https://bitbucket.org/rsichnyi-gl/>`__)
+
+
+0.15.46 (2018-07-29):
+ - fixed DeprecationWarning for importing from ``collections`` on 3.7
+ (issue 210, reported by `Reinoud Elhorst
+ <https://bitbucket.org/reinhrst/>`__). It was `difficult to find
+ why tox/pytest did not report
+ <https://stackoverflow.com/q/51573204/1307905>`__ and as time
+ consuming to actually `fix
+ <https://stackoverflow.com/a/51573205/1307905>`__ the tests.
+
+0.15.45 (2018-07-26):
+ - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration
+ (PR provided by `Zachary Buhman <https://bitbucket.org/buhman/>`__,
+ also reported by `Steven Hiscocks <https://bitbucket.org/sdhiscocks/>`__.
+
+0.15.44 (2018-07-14):
+ - Correct loading plain scalars consisting of numerals only and
+ starting with `0`, when not explicitly specifying YAML version
+ 1.1. This also fixes the issue about dumping string `'019'` as
+ plain scalars as reported by `Min RK
+ <https://bitbucket.org/minrk/>`__, that prompted this chance.
+
+0.15.43 (2018-07-12):
+ - merge PR33: Python2.7 on Windows is narrow, but has no
+ ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__)
+ - ``register_class()`` now returns class (proposed by
+ `Mike Nerone <https://bitbucket.org/Manganeez/>`__}
+
+0.15.42 (2018-07-01):
+ - fix regression showing only on narrow Python 2.7 (py27mu) builds
+ (with help from
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__ and
+ `Colm O'Connor <https://bitbucket.org/colmoconnorgithub/>`__).
+ - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as
+ 3.4/3.5/3.6/3.7/pypy
+
+0.15.41 (2018-06-27):
+ - add detection of C-compile failure (investigation prompted by
+ `StackOverlow <https://stackoverflow.com/a/51057399/1307905>`__ by
+ `Emmanuel Blot <https://stackoverflow.com/users/8233409/emmanuel-blot>`__),
+ which was removed while no longer dependent on ``libyaml``, C-extensions
+ compilation still needs a compiler though.
+
+0.15.40 (2018-06-18):
+ - added links to landing places as suggested in issue 190 by
+ `KostisA <https://bitbucket.org/ankostis/>`__
+ - fixes issue #201: decoding unicode escaped tags on Python2, reported
+ by `Dan Abolafia <https://bitbucket.org/danabo/>`__
+
+0.15.39 (2018-06-17):
+ - merge PR27 improving package startup time (and loading when regexp not
+ actually used), provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__
+
+0.15.38 (2018-06-13):
+ - fix for losing precision when roundtripping floats by
+ `Rolf Wojtech <https://bitbucket.org/asomov/>`__
+ - fix for hardcoded dir separator not working for Windows by
+ `Nuno André <https://bitbucket.org/nu_no/>`__
+ - typo fix by `Andrey Somov <https://bitbucket.org/asomov/>`__
+
+0.15.37 (2018-03-21):
+ - again trying to create installable files for 187
+
+0.15.36 (2018-02-07):
+ - fix issue 187, incompatibility of C extension with 3.7 (reported by
+ Daniel Blanchard)
+
+0.15.35 (2017-12-03):
+ - allow ``None`` as stream when specifying ``transform`` parameters to
+ ``YAML.dump()``.
+ This is useful if the transforming function doesn't return a meaningful value
+ (inspired by `StackOverflow <https://stackoverflow.com/q/47614862/1307905>`__ by
+ `rsaw <https://stackoverflow.com/users/406281/rsaw>`__).
+
+0.15.34 (2017-09-17):
+ - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka)
+
+0.15.33 (2017-08-31):
+ - support for "undefined" round-tripping tagged scalar objects (in addition to
+ tagged mapping object). Inspired by a use case presented by Matthew Patton
+ on `StackOverflow <https://stackoverflow.com/a/45967047/1307905>`__.
+ - fix issue 148: replace cryptic error message when using !!timestamp with an
+ incorrectly formatted or non- scalar. Reported by FichteFoll.
+
+0.15.32 (2017-08-21):
+ - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for
+ for ``typ='rt'``.
+ - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float``
+ (reported by jan.brezina@tul.cz)
+
+0.15.31 (2017-08-15):
+ - fix Comment dumping
+
+0.15.30 (2017-08-14):
+ - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}``
+ (reported on `StackOverflow <https://stackoverflow.com/q/45681626/1307905>`__ by
+ `mjalkio <https://stackoverflow.com/users/5130525/mjalkio>`_
+
+0.15.29 (2017-08-14):
+ - fix issue #51: different indents for mappings and sequences (reported by
+ Alex Harvey)
+ - fix for flow sequence/mapping as element/value of block sequence with
+ sequence-indent minus dash-offset not equal two.
+
+0.15.28 (2017-08-13):
+ - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron)
+
+0.15.27 (2017-08-13):
+ - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious
+ (reported by nowox)
+ - fix lists within lists which would make comments disappear
+
+0.15.26 (2017-08-10):
+ - fix for disappearing comment after empty flow sequence (reported by
+ oit-tzhimmash)
+
+0.15.25 (2017-08-09):
+ - fix for problem with dumping (unloaded) floats (reported by eyenseo)
+
+0.15.24 (2017-08-09):
+ - added ScalarFloat which supports roundtripping of 23.1, 23.100,
+ 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas
+ are not preserved/supported (yet, is anybody using that?).
+ - (finally) fixed longstanding issue 23 (reported by `Antony Sottile
+ <https://bitbucket.org/asottile/>`__), now handling comment between block
+ mapping key and value correctly
+ - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML
+ provided by Cecil Curry)
+ - allow setting of boolean representation (`false`, `true`) by using:
+ ``yaml.boolean_representation = [u'False', u'True']``
+
+0.15.23 (2017-08-01):
+ - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina)
+
+0.15.22 (2017-07-28):
+ - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina)
+
+0.15.21 (2017-07-25):
+ - fix for writing unicode in new API, (reported on
+ `StackOverflow <https://stackoverflow.com/a/45281922/1307905>`__
+
+0.15.20 (2017-07-23):
+ - wheels for windows including C extensions
+
+0.15.19 (2017-07-13):
+ - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject.
+ - fix for problem using load_all with Path() instance
+ - fix for load_all in combination with zero indent block style literal
+ (``pure=True`` only!)
+
+0.15.18 (2017-07-04):
+ - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag
+ constructor for `including YAML files in a YAML file
+ <https://stackoverflow.com/a/44913652/1307905>`__
+ - some documentation improvements
+ - trigger of doc build on new revision
+
+0.15.17 (2017-07-03):
+ - support for Unicode supplementary Plane **output**
+ (input was already supported, triggered by
+ `this <https://stackoverflow.com/a/44875714/1307905>`__ Stack Overflow Q&A)
+
+0.15.16 (2017-07-01):
+ - minor typing issues (reported and fix provided by
+ `Manvendra Singh <https://bitbucket.org/manu-chroma/>`__
+ - small doc improvements
+
+0.15.15 (2017-06-27):
+ - fix for issue 135, typ='safe' not dumping in Python 2.7
+ (reported by Andrzej Ostrowski <https://bitbucket.org/aostr123/>`__)
+
+0.15.14 (2017-06-25):
+ - fix for issue 133, in setup.py: change ModuleNotFoundError to
+ ImportError (reported and fix by
+ `Asley Drake <https://github.com/aldraco>`__)
+
+0.15.13 (2017-06-24):
+ - suppress duplicate key warning on mappings with merge keys (reported by
+ Cameron Sweeney)
+
+0.15.12 (2017-06-24):
+ - remove fatal dependency of setup.py on wheel package (reported by
+ Cameron Sweeney)
+
+0.15.11 (2017-06-24):
+ - fix for issue 130, regression in nested merge keys (reported by
+ `David Fee <https://bitbucket.org/dfee/>`__)
+
+0.15.10 (2017-06-23):
+ - top level PreservedScalarString not indented if not explicitly asked to
+ - remove Makefile (not very useful anyway)
+ - some mypy additions
+
+0.15.9 (2017-06-16):
+ - fix for issue 127: tagged scalars were always quoted and seperated
+ by a newline when in a block sequence (reported and largely fixed by
+ `Tommy Wang <https://bitbucket.org/twang817/>`__)
+
+0.15.8 (2017-06-15):
+ - allow plug-in install via ``install ruamel.yaml[jinja2]``
+
+0.15.7 (2017-06-14):
+ - add plug-in mechanism for load/dump pre resp. post-processing
+
+0.15.6 (2017-06-10):
+ - a set() with duplicate elements now throws error in rt loading
+ - support for toplevel column zero literal/folded scalar in explicit documents
+
+0.15.5 (2017-06-08):
+ - repeat `load()` on a single `YAML()` instance would fail.
+
+0.15.4 (2017-06-08):
+ - `transform` parameter on dump that expects a function taking a
+ string and returning a string. This allows transformation of the output
+ before it is written to stream. This forces creation of the complete output in memory!
+ - some updates to the docs
+
+0.15.3 (2017-06-07):
+ - No longer try to compile C extensions on Windows. Compilation can be forced by setting
+ the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value
+ before starting the `pip install`.
+
+0.15.2 (2017-06-07):
+ - update to conform to mypy 0.511: mypy --strict
+
+0.15.1 (2017-06-07):
+ - `duplicate keys <http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys>`__
+ in mappings generate an error (in the old API this change generates a warning until 0.16)
+ - dependecy on ruamel.ordereddict for 2.7 now via extras_require
+
+0.15.0 (2017-06-04):
+ - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all
+ load/dump functions
+ - passing in a non-supported object (e.g. a string) as "stream" will result in a
+ much more meaningful YAMLStreamError.
+ - assigning a normal string value to an existing CommentedMap key or CommentedSeq
+ element will result in a value cast to the previous value's type if possible.
+ - added ``YAML`` class for new API
+
+0.14.12 (2017-05-14):
+ - fix for issue 119, deepcopy not returning subclasses (reported and PR by
+ Constantine Evans <cevans@evanslabs.org>)
+
+0.14.11 (2017-05-01):
+ - fix for issue 103 allowing implicit documents after document end marker line (``...``)
+ in YAML 1.2
+
+0.14.10 (2017-04-26):
+ - fix problem with emitting using cyaml
+
+0.14.9 (2017-04-22):
+ - remove dependency on ``typing`` while still supporting ``mypy``
+ (http://stackoverflow.com/a/43516781/1307905)
+ - fix unclarity in doc that stated 2.6 is supported (reported by feetdust)
+
+0.14.8 (2017-04-19):
+ - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards
+ on all files (reported by `João Paulo Magalhães <https://bitbucket.org/jpmag/>`__)
+
+0.14.7 (2017-04-18):
+ - round trip of integers (decimal, octal, hex, binary) now preserve
+ leading zero(s) padding and underscores. Underscores are presumed
+ to be at regular distances (i.e. ``0o12_345_67`` dumps back as
+ ``0o1_23_45_67`` as the space from the last digit to the
+ underscore before that is the determining factor).
+
+0.14.6 (2017-04-14):
+ - binary, octal and hex integers are now preserved by default. This
+ was a known deficiency. Working on this was prompted by the issue report (112)
+ from devnoname120, as well as the additional experience with `.replace()`
+ on `scalarstring` classes.
+ - fix issues 114: cannot install on Buildozer (reported by mixmastamyk).
+ Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check.
+
+0.14.5 (2017-04-04):
+ - fix issue 109: None not dumping correctly at top level (reported by Andrea Censi)
+ - fix issue 110: .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString
+ would give back "normal" string (reported by sandres23)
+
+0.14.4 (2017-03-31):
+ - fix readme
+
+0.14.3 (2017-03-31):
+ - fix for 0o52 not being a string in YAML 1.1 (reported on
+ `StackOverflow Q&A 43138503 <http://stackoverflow.com/a/43138503/1307905>`__ by
+ `Frank D <http://stackoverflow.com/users/7796630/frank-d>`__)
+
+0.14.2 (2017-03-23):
+ - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch)
+
+0.14.1 (2017-03-22):
+ - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré)
+
+0.14.0 (2017-03-21):
+ - updates for mypy --strict
+ - preparation for moving away from inheritance in Loader and Dumper, calls from e.g.
+ the Representer to the Serializer.serialize() are now done via the attribute
+ .serializer.serialize(). Usage of .serialize() outside of Serializer will be
+ deprecated soon
+ - some extra tests on main.py functions
+
+----
+
+For older changes see the file
+`CHANGES <https://bitbucket.org/ruamel/yaml/src/default/CHANGES>`_
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/__init__.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/__init__.py
new file mode 100644
index 0000000000..7964157c87
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/__init__.py
@@ -0,0 +1,59 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+if False: # MYPY
+ from typing import Dict, Any # NOQA
+
+_package_data = dict(
+ full_package_name='ruamel.yaml',
+ version_info=(0, 16, 13),
+ __version__='0.16.13',
+ author='Anthon van der Neut',
+ author_email='a.van.der.neut@ruamel.eu',
+ description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA
+ entry_points=None,
+ since=2014,
+ extras_require={
+ ':platform_python_implementation=="CPython" and python_version<="2.7"': ['ruamel.ordereddict'], # NOQA
+ ':platform_python_implementation=="CPython" and python_version<"3.10"': ['ruamel.yaml.clib>=0.1.2'], # NOQA
+ 'jinja2': ['ruamel.yaml.jinja2>=0.2'],
+ 'docs': ['ryd'],
+ },
+ classifiers=[
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Programming Language :: Python :: Implementation :: PyPy',
+ 'Programming Language :: Python :: Implementation :: Jython',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Text Processing :: Markup',
+ 'Typing :: Typed',
+ ],
+ keywords='yaml 1.2 parser round-trip preserve quotes order config',
+ read_the_docs='yaml',
+ supported=[(2, 7), (3, 5)], # minimum
+ tox=dict(
+ env='*', # remove 'pn', no longer test narrow Python 2.7 for unicode patterns and PyPy
+ deps='ruamel.std.pathlib',
+ fl8excl='_test/lib',
+ ),
+ universal=True,
+ rtfd='yaml',
+) # type: Dict[Any, Any]
+
+
+version_info = _package_data['version_info']
+__version__ = _package_data['__version__']
+
+try:
+ from .cyaml import * # NOQA
+
+ __with_libyaml__ = True
+except (ImportError, ValueError): # for Jython
+ __with_libyaml__ = False
+
+from ruamel.yaml.main import * # NOQA
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/anchor.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/anchor.py
new file mode 100644
index 0000000000..d702126039
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/anchor.py
@@ -0,0 +1,19 @@
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+anchor_attrib = '_yaml_anchor'
+
+
+class Anchor(object):
+ __slots__ = 'value', 'always_dump'
+ attrib = anchor_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.value = None
+ self.always_dump = False
+
+ def __repr__(self):
+ # type: () -> Any
+ ad = ', (always dump)' if self.always_dump else ""
+ return 'Anchor({!r}{})'.format(self.value, ad)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/comments.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/comments.py
new file mode 100644
index 0000000000..13a519eb20
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/comments.py
@@ -0,0 +1,1154 @@
+# coding: utf-8
+
+from __future__ import absolute_import, print_function
+
+"""
+stuff to deal with comments and formatting on dict/list/ordereddict/set
+these are not really related, formatting could be factored out as
+a separate base
+"""
+
+import sys
+import copy
+
+
+from ruamel.yaml.compat import ordereddict # type: ignore
+from ruamel.yaml.compat import PY2, string_types, MutableSliceableSequence
+from ruamel.yaml.scalarstring import ScalarString
+from ruamel.yaml.anchor import Anchor
+
+if PY2:
+ from collections import MutableSet, Sized, Set, Mapping
+else:
+ from collections.abc import MutableSet, Sized, Set, Mapping
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+# fmt: off
+__all__ = ['CommentedSeq', 'CommentedKeySeq',
+ 'CommentedMap', 'CommentedOrderedMap',
+ 'CommentedSet', 'comment_attrib', 'merge_attrib']
+# fmt: on
+
+comment_attrib = '_yaml_comment'
+format_attrib = '_yaml_format'
+line_col_attrib = '_yaml_line_col'
+merge_attrib = '_yaml_merge'
+tag_attrib = '_yaml_tag'
+
+
+class Comment(object):
+ # sys.getsize tested the Comment objects, __slots__ makes them bigger
+ # and adding self.end did not matter
+ __slots__ = 'comment', '_items', '_end', '_start'
+ attrib = comment_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.comment = None # [post, [pre]]
+ # map key (mapping/omap/dict) or index (sequence/list) to a list of
+ # dict: post_key, pre_key, post_value, pre_value
+ # list: pre item, post item
+ self._items = {} # type: Dict[Any, Any]
+ # self._start = [] # should not put these on first item
+ self._end = [] # type: List[Any] # end of document comments
+
+ def __str__(self):
+ # type: () -> str
+ if bool(self._end):
+ end = ',\n end=' + str(self._end)
+ else:
+ end = ""
+ return 'Comment(comment={0},\n items={1}{2})'.format(self.comment, self._items, end)
+
+ @property
+ def items(self):
+ # type: () -> Any
+ return self._items
+
+ @property
+ def end(self):
+ # type: () -> Any
+ return self._end
+
+ @end.setter
+ def end(self, value):
+ # type: (Any) -> None
+ self._end = value
+
+ @property
+ def start(self):
+ # type: () -> Any
+ return self._start
+
+ @start.setter
+ def start(self, value):
+ # type: (Any) -> None
+ self._start = value
+
+
+# to distinguish key from None
+def NoComment():
+ # type: () -> None
+ pass
+
+
+class Format(object):
+ __slots__ = ('_flow_style',)
+ attrib = format_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self._flow_style = None # type: Any
+
+ def set_flow_style(self):
+ # type: () -> None
+ self._flow_style = True
+
+ def set_block_style(self):
+ # type: () -> None
+ self._flow_style = False
+
+ def flow_style(self, default=None):
+ # type: (Optional[Any]) -> Any
+ """if default (the flow_style) is None, the flow style tacked on to
+ the object explicitly will be taken. If that is None as well the
+ default flow style rules the format down the line, or the type
+ of the constituent values (simple -> flow, map/list -> block)"""
+ if self._flow_style is None:
+ return default
+ return self._flow_style
+
+
+class LineCol(object):
+ attrib = line_col_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.line = None
+ self.col = None
+ self.data = None # type: Optional[Dict[Any, Any]]
+
+ def add_kv_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+ def key(self, k):
+ # type: (Any) -> Any
+ return self._kv(k, 0, 1)
+
+ def value(self, k):
+ # type: (Any) -> Any
+ return self._kv(k, 2, 3)
+
+ def _kv(self, k, x0, x1):
+ # type: (Any, Any, Any) -> Any
+ if self.data is None:
+ return None
+ data = self.data[k]
+ return data[x0], data[x1]
+
+ def item(self, idx):
+ # type: (Any) -> Any
+ if self.data is None:
+ return None
+ return self.data[idx][0], self.data[idx][1]
+
+ def add_idx_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+
+class Tag(object):
+ """store tag information for roundtripping"""
+
+ __slots__ = ('value',)
+ attrib = tag_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.value = None
+
+ def __repr__(self):
+ # type: () -> Any
+ return '{0.__class__.__name__}({0.value!r})'.format(self)
+
+
+class CommentedBase(object):
+ @property
+ def ca(self):
+ # type: () -> Any
+ if not hasattr(self, Comment.attrib):
+ setattr(self, Comment.attrib, Comment())
+ return getattr(self, Comment.attrib)
+
+ def yaml_end_comment_extend(self, comment, clear=False):
+ # type: (Any, bool) -> None
+ if comment is None:
+ return
+ if clear or self.ca.end is None:
+ self.ca.end = []
+ self.ca.end.extend(comment)
+
+ def yaml_key_comment_extend(self, key, comment, clear=False):
+ # type: (Any, Any, bool) -> None
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[1] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[1] = comment[1]
+ else:
+ r[1].extend(comment[0])
+ r[0] = comment[0]
+
+ def yaml_value_comment_extend(self, key, comment, clear=False):
+ # type: (Any, Any, bool) -> None
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[3] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[3] = comment[1]
+ else:
+ r[3].extend(comment[0])
+ r[2] = comment[0]
+
+ def yaml_set_start_comment(self, comment, indent=0):
+ # type: (Any, Any) -> None
+ """overwrites any preceding comment lines on an object
+ expects comment to be without `#` and possible have multiple lines
+ """
+ from .error import CommentMark
+ from .tokens import CommentToken
+
+ pre_comments = self._yaml_get_pre_comment()
+ if comment[-1] == '\n':
+ comment = comment[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ for com in comment.split('\n'):
+ c = com.strip()
+ if len(c) > 0 and c[0] != '#':
+ com = '# ' + com
+ pre_comments.append(CommentToken(com + '\n', start_mark, None))
+
+ def yaml_set_comment_before_after_key(
+ self, key, before=None, indent=0, after=None, after_indent=None
+ ):
+ # type: (Any, Any, Any, Any, Any) -> None
+ """
+ expects comment (before/after) to be without `#` and possible have multiple lines
+ """
+ from ruamel.yaml.error import CommentMark
+ from ruamel.yaml.tokens import CommentToken
+
+ def comment_token(s, mark):
+ # type: (Any, Any) -> Any
+ # handle empty lines as having no comment
+ return CommentToken(('# ' if s else "") + s + '\n', mark, None)
+
+ if after_indent is None:
+ after_indent = indent + 2
+ if before and (len(before) > 1) and before[-1] == '\n':
+ before = before[:-1] # strip final newline if there
+ if after and after[-1] == '\n':
+ after = after[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ c = self.ca.items.setdefault(key, [None, [], None, None])
+ if before == '\n':
+ c[1].append(comment_token("", start_mark))
+ elif before:
+ for com in before.split('\n'):
+ c[1].append(comment_token(com, start_mark))
+ if after:
+ start_mark = CommentMark(after_indent)
+ if c[3] is None:
+ c[3] = []
+ for com in after.split('\n'):
+ c[3].append(comment_token(com, start_mark)) # type: ignore
+
+ @property
+ def fa(self):
+ # type: () -> Any
+ """format attribute
+
+ set_flow_style()/set_block_style()"""
+ if not hasattr(self, Format.attrib):
+ setattr(self, Format.attrib, Format())
+ return getattr(self, Format.attrib)
+
+ def yaml_add_eol_comment(self, comment, key=NoComment, column=None):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """
+ there is a problem as eol comments should start with ' #'
+ (but at the beginning of the line the space doesn't have to be before
+ the #. The column index is for the # mark
+ """
+ from .tokens import CommentToken
+ from .error import CommentMark
+
+ if column is None:
+ try:
+ column = self._yaml_get_column(key)
+ except AttributeError:
+ column = 0
+ if comment[0] != '#':
+ comment = '# ' + comment
+ if column is None:
+ if comment[0] == '#':
+ comment = ' ' + comment
+ column = 0
+ start_mark = CommentMark(column)
+ ct = [CommentToken(comment, start_mark, None), None]
+ self._yaml_add_eol_comment(ct, key=key)
+
+ @property
+ def lc(self):
+ # type: () -> Any
+ if not hasattr(self, LineCol.attrib):
+ setattr(self, LineCol.attrib, LineCol())
+ return getattr(self, LineCol.attrib)
+
+ def _yaml_set_line_col(self, line, col):
+ # type: (Any, Any) -> None
+ self.lc.line = line
+ self.lc.col = col
+
+ def _yaml_set_kv_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ self.lc.add_kv_line_col(key, data)
+
+ def _yaml_set_idx_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ self.lc.add_idx_line_col(key, data)
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ return self.anchor
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ @property
+ def tag(self):
+ # type: () -> Any
+ if not hasattr(self, Tag.attrib):
+ setattr(self, Tag.attrib, Tag())
+ return getattr(self, Tag.attrib)
+
+ def yaml_set_tag(self, value):
+ # type: (Any) -> None
+ self.tag.value = value
+
+ def copy_attributes(self, t, memo=None):
+ # type: (Any, Any) -> None
+ # fmt: off
+ for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib,
+ Tag.attrib, merge_attrib]:
+ if hasattr(self, a):
+ if memo is not None:
+ setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ else:
+ setattr(t, a, getattr(self, a))
+ # fmt: on
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ raise NotImplementedError
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ raise NotImplementedError
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ raise NotImplementedError
+
+
+class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore
+ __slots__ = (Comment.attrib, '_lst')
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ list.__init__(self, *args, **kw)
+
+ def __getsingleitem__(self, idx):
+ # type: (Any) -> Any
+ return list.__getitem__(self, idx)
+
+ def __setsingleitem__(self, idx, value):
+ # type: (Any, Any) -> None
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if idx < len(self):
+ if (
+ isinstance(value, string_types)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[idx], ScalarString)
+ ):
+ value = type(self[idx])(value)
+ list.__setitem__(self, idx, value)
+
+ def __delsingleitem__(self, idx=None):
+ # type: (Any) -> Any
+ list.__delitem__(self, idx)
+ self.ca.items.pop(idx, None) # might not be there -> default value
+ for list_index in sorted(self.ca.items):
+ if list_index < idx:
+ continue
+ self.ca.items[list_index - 1] = self.ca.items.pop(list_index)
+
+ def __len__(self):
+ # type: () -> int
+ return list.__len__(self)
+
+ def insert(self, idx, val):
+ # type: (Any, Any) -> None
+ """the comments after the insertion have to move forward"""
+ list.insert(self, idx, val)
+ for list_index in sorted(self.ca.items, reverse=True):
+ if list_index < idx:
+ break
+ self.ca.items[list_index + 1] = self.ca.items.pop(list_index)
+
+ def extend(self, val):
+ # type: (Any) -> None
+ list.extend(self, val)
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ return list.__eq__(self, other)
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res.append(copy.deepcopy(k, memo))
+ self.copy_attributes(res, memo=memo)
+ return res
+
+ def __add__(self, other):
+ # type: (Any) -> Any
+ return list.__add__(self, other)
+
+ def sort(self, key=None, reverse=False): # type: ignore
+ # type: (Any, bool) -> None
+ if key is None:
+ tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse)
+ list.__init__(self, [x[0] for x in tmp_lst])
+ else:
+ tmp_lst = sorted(
+ zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse
+ )
+ list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst])
+ itm = self.ca.items
+ self.ca._items = {}
+ for idx, x in enumerate(tmp_lst):
+ old_index = x[1]
+ if old_index in itm:
+ self.ca.items[idx] = itm[old_index]
+
+ def __repr__(self):
+ # type: () -> Any
+ return list.__repr__(self)
+
+
+class CommentedKeySeq(tuple, CommentedBase): # type: ignore
+ """This primarily exists to be able to roundtrip keys that are sequences"""
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedMapView(Sized):
+ __slots__ = ('_mapping',)
+
+ def __init__(self, mapping):
+ # type: (Any) -> None
+ self._mapping = mapping
+
+ def __len__(self):
+ # type: () -> int
+ count = len(self._mapping)
+ return count
+
+
+class CommentedMapKeysView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it):
+ # type: (Any) -> Any
+ return set(it)
+
+ def __contains__(self, key):
+ # type: (Any) -> Any
+ return key in self._mapping
+
+ def __iter__(self):
+ # type: () -> Any # yield from self._mapping # not in py27, pypy
+ # for x in self._mapping._keys():
+ for x in self._mapping:
+ yield x
+
+
+class CommentedMapItemsView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it):
+ # type: (Any) -> Any
+ return set(it)
+
+ def __contains__(self, item):
+ # type: (Any) -> Any
+ key, value = item
+ try:
+ v = self._mapping[key]
+ except KeyError:
+ return False
+ else:
+ return v == value
+
+ def __iter__(self):
+ # type: () -> Any
+ for key in self._mapping._keys():
+ yield (key, self._mapping[key])
+
+
+class CommentedMapValuesView(CommentedMapView):
+ __slots__ = ()
+
+ def __contains__(self, value):
+ # type: (Any) -> Any
+ for key in self._mapping:
+ if value == self._mapping[key]:
+ return True
+ return False
+
+ def __iter__(self):
+ # type: () -> Any
+ for key in self._mapping._keys():
+ yield self._mapping[key]
+
+
+class CommentedMap(ordereddict, CommentedBase): # type: ignore
+ __slots__ = (Comment.attrib, '_ok', '_ref')
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ self._ok = set() # type: MutableSet[Any] # own keys
+ self._ref = [] # type: List[CommentedMap]
+ ordereddict.__init__(self, *args, **kw)
+
+ def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NoComment:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][2].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post, last = None, None, None
+ for x in self:
+ if pre is not None and x != key:
+ post = x
+ break
+ if x == key:
+ pre = last
+ last = x
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for k1 in self:
+ if k1 >= key:
+ break
+ if k1 not in self.ca.items:
+ continue
+ sel_idx = k1
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def update(self, *vals, **kw):
+ # type: (Any, Any) -> None
+ try:
+ ordereddict.update(self, *vals, **kw)
+ except TypeError:
+ # probably a dict that is used
+ for x in vals[0]:
+ self[x] = vals[0][x]
+ try:
+ self._ok.update(vals.keys()) # type: ignore
+ except AttributeError:
+ # assume one argument that is a list/tuple of two element lists/tuples
+ for x in vals[0]:
+ self._ok.add(x[0])
+ if kw:
+ self._ok.add(*kw.keys())
+
+ def insert(self, pos, key, value, comment=None):
+ # type: (Any, Any, Any, Optional[Any]) -> None
+ """insert key value into given position
+ attach comment if provided
+ """
+ ordereddict.insert(self, pos, key, value)
+ self._ok.add(key)
+ if comment is not None:
+ self.yaml_add_eol_comment(comment, key=key)
+
+ def mlget(self, key, default=None, list_ok=False):
+ # type: (Any, Any, Any) -> Any
+ """multi-level get that expects dicts within dicts"""
+ if not isinstance(key, list):
+ return self.get(key, default)
+ # assume that the key is a list of recursively accessible dicts
+
+ def get_one_level(key_list, level, d):
+ # type: (Any, Any, Any) -> Any
+ if not list_ok:
+ assert isinstance(d, dict)
+ if level >= len(key_list):
+ if level > len(key_list):
+ raise IndexError
+ return d[key_list[level - 1]]
+ return get_one_level(key_list, level + 1, d[key_list[level - 1]])
+
+ try:
+ return get_one_level(key, 1, self)
+ except KeyError:
+ return default
+ except (TypeError, IndexError):
+ if not list_ok:
+ raise
+ return default
+
+ def __getitem__(self, key):
+ # type: (Any) -> Any
+ try:
+ return ordereddict.__getitem__(self, key)
+ except KeyError:
+ for merged in getattr(self, merge_attrib, []):
+ if key in merged[1]:
+ return merged[1][key]
+ raise
+
+ def __setitem__(self, key, value):
+ # type: (Any, Any) -> None
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if key in self:
+ if (
+ isinstance(value, string_types)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[key], ScalarString)
+ ):
+ value = type(self[key])(value)
+ ordereddict.__setitem__(self, key, value)
+ self._ok.add(key)
+
+ def _unmerged_contains(self, key):
+ # type: (Any) -> Any
+ if key in self._ok:
+ return True
+ return None
+
+ def __contains__(self, key):
+ # type: (Any) -> bool
+ return bool(ordereddict.__contains__(self, key))
+
+ def get(self, key, default=None):
+ # type: (Any, Any) -> Any
+ try:
+ return self.__getitem__(key)
+ except: # NOQA
+ return default
+
+ def __repr__(self):
+ # type: () -> Any
+ return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict')
+
+ def non_merged_items(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ if x in self._ok:
+ yield x, ordereddict.__getitem__(self, x)
+
+ def __delitem__(self, key):
+ # type: (Any) -> None
+ # for merged in getattr(self, merge_attrib, []):
+ # if key in merged[1]:
+ # value = merged[1][key]
+ # break
+ # else:
+ # # not found in merged in stuff
+ # ordereddict.__delitem__(self, key)
+ # for referer in self._ref:
+ # referer.update_key_value(key)
+ # return
+ #
+ # ordereddict.__setitem__(self, key, value) # merge might have different value
+ # self._ok.discard(key)
+ self._ok.discard(key)
+ ordereddict.__delitem__(self, key)
+ for referer in self._ref:
+ referer.update_key_value(key)
+
+ def __iter__(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def _keys(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return int(ordereddict.__len__(self))
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ return bool(dict(self) == other)
+
+ if PY2:
+
+ def keys(self):
+ # type: () -> Any
+ return list(self._keys())
+
+ def iterkeys(self):
+ # type: () -> Any
+ return self._keys()
+
+ def viewkeys(self):
+ # type: () -> Any
+ return CommentedMapKeysView(self)
+
+ else:
+
+ def keys(self):
+ # type: () -> Any
+ return CommentedMapKeysView(self)
+
+ if PY2:
+
+ def _values(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield ordereddict.__getitem__(self, x)
+
+ def values(self):
+ # type: () -> Any
+ return list(self._values())
+
+ def itervalues(self):
+ # type: () -> Any
+ return self._values()
+
+ def viewvalues(self):
+ # type: () -> Any
+ return CommentedMapValuesView(self)
+
+ else:
+
+ def values(self):
+ # type: () -> Any
+ return CommentedMapValuesView(self)
+
+ def _items(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x, ordereddict.__getitem__(self, x)
+
+ if PY2:
+
+ def items(self):
+ # type: () -> Any
+ return list(self._items())
+
+ def iteritems(self):
+ # type: () -> Any
+ return self._items()
+
+ def viewitems(self):
+ # type: () -> Any
+ return CommentedMapItemsView(self)
+
+ else:
+
+ def items(self):
+ # type: () -> Any
+ return CommentedMapItemsView(self)
+
+ @property
+ def merge(self):
+ # type: () -> Any
+ if not hasattr(self, merge_attrib):
+ setattr(self, merge_attrib, [])
+ return getattr(self, merge_attrib)
+
+ def copy(self):
+ # type: () -> Any
+ x = type(self)() # update doesn't work
+ for k, v in self._items():
+ x[k] = v
+ self.copy_attributes(x)
+ return x
+
+ def add_referent(self, cm):
+ # type: (Any) -> None
+ if cm not in self._ref:
+ self._ref.append(cm)
+
+ def add_yaml_merge(self, value):
+ # type: (Any) -> None
+ for v in value:
+ v[1].add_referent(self)
+ for k, v in v[1].items():
+ if ordereddict.__contains__(self, k):
+ continue
+ ordereddict.__setitem__(self, k, v)
+ self.merge.extend(value)
+
+ def update_key_value(self, key):
+ # type: (Any) -> None
+ if key in self._ok:
+ return
+ for v in self.merge:
+ if key in v[1]:
+ ordereddict.__setitem__(self, key, v[1][key])
+ return
+ ordereddict.__delitem__(self, key)
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res[k] = copy.deepcopy(self[k], memo)
+ self.copy_attributes(res, memo=memo)
+ return res
+
+
+# based on brownie mappings
+@classmethod # type: ignore
+def raise_immutable(cls, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> None
+ raise TypeError('{} objects are immutable'.format(cls.__name__))
+
+
+class CommentedKeyMap(CommentedBase, Mapping): # type: ignore
+ __slots__ = Comment.attrib, '_od'
+ """This primarily exists to be able to roundtrip keys that are mappings"""
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ if hasattr(self, '_od'):
+ raise_immutable(self)
+ try:
+ self._od = ordereddict(*args, **kw)
+ except TypeError:
+ if PY2:
+ self._od = ordereddict(args[0].items())
+ else:
+ raise
+
+ __delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable
+
+ # need to implement __getitem__, __iter__ and __len__
+ def __getitem__(self, index):
+ # type: (Any) -> Any
+ return self._od[index]
+
+ def __iter__(self):
+ # type: () -> Iterator[Any]
+ for x in self._od.__iter__():
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return len(self._od)
+
+ def __hash__(self):
+ # type: () -> Any
+ return hash(tuple(self.items()))
+
+ def __repr__(self):
+ # type: () -> Any
+ if not hasattr(self, merge_attrib):
+ return self._od.__repr__()
+ return 'ordereddict(' + repr(list(self._od.items())) + ')'
+
+ @classmethod
+ def fromkeys(keys, v=None):
+ # type: (Any, Any) -> Any
+ return CommentedKeyMap(dict.fromkeys(keys, v))
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedOrderedMap(CommentedMap):
+ __slots__ = (Comment.attrib,)
+
+
+class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA
+ __slots__ = Comment.attrib, 'odict'
+
+ def __init__(self, values=None):
+ # type: (Any) -> None
+ self.odict = ordereddict()
+ MutableSet.__init__(self)
+ if values is not None:
+ self |= values # type: ignore
+
+ def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NoComment:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def add(self, value):
+ # type: (Any) -> None
+ """Add an element."""
+ self.odict[value] = None
+
+ def discard(self, value):
+ # type: (Any) -> None
+ """Remove an element. Do not raise an exception if absent."""
+ del self.odict[value]
+
+ def __contains__(self, x):
+ # type: (Any) -> Any
+ return x in self.odict
+
+ def __iter__(self):
+ # type: () -> Any
+ for x in self.odict:
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return len(self.odict)
+
+ def __repr__(self):
+ # type: () -> str
+ return 'set({0!r})'.format(self.odict.keys())
+
+
+class TaggedScalar(CommentedBase):
+ # the value and style attributes are set during roundtrip construction
+ def __init__(self, value=None, style=None, tag=None):
+ # type: (Any, Any, Any) -> None
+ self.value = value
+ self.style = style
+ if tag is not None:
+ self.yaml_set_tag(tag)
+
+ def __str__(self):
+ # type: () -> Any
+ return self.value
+
+
+def dump_comments(d, name="", sep='.', out=sys.stdout):
+ # type: (Any, str, str, Any) -> None
+ """
+ recursively dump comments, all but the toplevel preceded by the path
+ in dotted form x.0.a
+ """
+ if isinstance(d, dict) and hasattr(d, 'ca'):
+ if name:
+ sys.stdout.write('{}\n'.format(name))
+ out.write('{}\n'.format(d.ca)) # type: ignore
+ for k in d:
+ dump_comments(d[k], name=(name + sep + k) if name else k, sep=sep, out=out)
+ elif isinstance(d, list) and hasattr(d, 'ca'):
+ if name:
+ sys.stdout.write('{}\n'.format(name))
+ out.write('{}\n'.format(d.ca)) # type: ignore
+ for idx, k in enumerate(d):
+ dump_comments(
+ k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out
+ )
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/compat.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/compat.py
new file mode 100644
index 0000000000..839166f254
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/compat.py
@@ -0,0 +1,324 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+# partially from package six by Benjamin Peterson
+
+import sys
+import os
+import types
+import traceback
+from abc import abstractmethod
+
+
+# fmt: off
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA
+ from typing import Optional # NOQA
+# fmt: on
+
+_DEFAULT_YAML_VERSION = (1, 2)
+
+try:
+ from ruamel.ordereddict import ordereddict
+except: # NOQA
+ try:
+ from collections import OrderedDict
+ except ImportError:
+ from ordereddict import OrderedDict # type: ignore
+ # to get the right name import ... as ordereddict doesn't do that
+
+ class ordereddict(OrderedDict): # type: ignore
+ if not hasattr(OrderedDict, 'insert'):
+
+ def insert(self, pos, key, value):
+ # type: (int, Any, Any) -> None
+ if pos >= len(self):
+ self[key] = value
+ return
+ od = ordereddict()
+ od.update(self)
+ for k in od:
+ del self[k]
+ for index, old_key in enumerate(od):
+ if pos == index:
+ self[key] = value
+ self[old_key] = od[old_key]
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+
+if PY3:
+
+ def utf8(s):
+ # type: (str) -> str
+ return s
+
+ def to_str(s):
+ # type: (str) -> str
+ return s
+
+ def to_unicode(s):
+ # type: (str) -> str
+ return s
+
+
+else:
+ if False:
+ unicode = str
+
+ def utf8(s):
+ # type: (unicode) -> str
+ return s.encode('utf-8')
+
+ def to_str(s):
+ # type: (str) -> str
+ return str(s)
+
+ def to_unicode(s):
+ # type: (str) -> unicode
+ return unicode(s) # NOQA
+
+
+if PY3:
+ string_types = str
+ integer_types = int
+ class_types = type
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+ unichr = chr
+ import io
+
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ # have unlimited precision
+ no_limit_int = int
+ from collections.abc import Hashable, MutableSequence, MutableMapping, Mapping # NOQA
+
+else:
+ string_types = basestring # NOQA
+ integer_types = (int, long) # NOQA
+ class_types = (type, types.ClassType)
+ text_type = unicode # NOQA
+ binary_type = str
+
+ # to allow importing
+ unichr = unichr
+ from StringIO import StringIO as _StringIO
+
+ StringIO = _StringIO
+ import cStringIO
+
+ BytesIO = cStringIO.StringIO
+ # have unlimited precision
+ no_limit_int = long # NOQA not available on Python 3
+ from collections import Hashable, MutableSequence, MutableMapping, Mapping # NOQA
+
+if False: # MYPY
+ # StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO]
+ # StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore
+ StreamType = Any
+
+ StreamTextType = StreamType # Union[Text, StreamType]
+ VersionType = Union[List[int], str, Tuple[int, int]]
+
+if PY3:
+ builtins_module = 'builtins'
+else:
+ builtins_module = '__builtin__'
+
+UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2
+
+
+def with_metaclass(meta, *bases):
+ # type: (Any, Any) -> Any
+ """Create a base class with a metaclass."""
+ return meta('NewBase', bases, {})
+
+
+DBG_TOKEN = 1
+DBG_EVENT = 2
+DBG_NODE = 4
+
+
+_debug = None # type: Optional[int]
+if 'RUAMELDEBUG' in os.environ:
+ _debugx = os.environ.get('RUAMELDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+
+
+if bool(_debug):
+
+ class ObjectCounter(object):
+ def __init__(self):
+ # type: () -> None
+ self.map = {} # type: Dict[Any, Any]
+
+ def __call__(self, k):
+ # type: (Any) -> None
+ self.map[k] = self.map.get(k, 0) + 1
+
+ def dump(self):
+ # type: () -> None
+ for k in sorted(self.map):
+ sys.stdout.write('{} -> {}'.format(k, self.map[k]))
+
+ object_counter = ObjectCounter()
+
+
+# used from yaml util when testing
+def dbg(val=None):
+ # type: (Any) -> Any
+ global _debug
+ if _debug is None:
+ # set to true or false
+ _debugx = os.environ.get('YAMLDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+ if val is None:
+ return _debug
+ return _debug & val
+
+
+class Nprint(object):
+ def __init__(self, file_name=None):
+ # type: (Any) -> None
+ self._max_print = None # type: Any
+ self._count = None # type: Any
+ self._file_name = file_name
+
+ def __call__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ if not bool(_debug):
+ return
+ out = sys.stdout if self._file_name is None else open(self._file_name, 'a')
+ dbgprint = print # to fool checking for print statements by dv utility
+ kw1 = kw.copy()
+ kw1['file'] = out
+ dbgprint(*args, **kw1)
+ out.flush()
+ if self._max_print is not None:
+ if self._count is None:
+ self._count = self._max_print
+ self._count -= 1
+ if self._count == 0:
+ dbgprint('forced exit\n')
+ traceback.print_stack()
+ out.flush()
+ sys.exit(0)
+ if self._file_name:
+ out.close()
+
+ def set_max_print(self, i):
+ # type: (int) -> None
+ self._max_print = i
+ self._count = None
+
+
+nprint = Nprint()
+nprintf = Nprint('/var/tmp/ruamel.yaml.log')
+
+# char checkers following production rules
+
+
+def check_namespace_char(ch):
+ # type: (Any) -> bool
+ if u'\x21' <= ch <= u'\x7E': # ! to ~
+ return True
+ if u'\xA0' <= ch <= u'\uD7FF':
+ return True
+ if (u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': # excl. byte order mark
+ return True
+ if u'\U00010000' <= ch <= u'\U0010FFFF':
+ return True
+ return False
+
+
+def check_anchorname_char(ch):
+ # type: (Any) -> bool
+ if ch in u',[]{}':
+ return False
+ return check_namespace_char(ch)
+
+
+def version_tnf(t1, t2=None):
+ # type: (Any, Any) -> Any
+ """
+ return True if ruamel.yaml version_info < t1, None if t2 is specified and bigger else False
+ """
+ from ruamel.yaml import version_info # NOQA
+
+ if version_info < t1:
+ return True
+ if t2 is not None and version_info < t2:
+ return None
+ return False
+
+
+class MutableSliceableSequence(MutableSequence): # type: ignore
+ __slots__ = ()
+
+ def __getitem__(self, index):
+ # type: (Any) -> Any
+ if not isinstance(index, slice):
+ return self.__getsingleitem__(index)
+ return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore
+
+ def __setitem__(self, index, value):
+ # type: (Any, Any) -> None
+ if not isinstance(index, slice):
+ return self.__setsingleitem__(index, value)
+ assert iter(value)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ if index.step is None:
+ del self[index.start : index.stop]
+ for elem in reversed(value):
+ self.insert(0 if index.start is None else index.start, elem)
+ else:
+ range_parms = index.indices(len(self))
+ nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[2] + 1
+ # need to test before changing, in case TypeError is caught
+ if nr_assigned_items < len(value):
+ raise TypeError(
+ 'too many elements in value {} < {}'.format(nr_assigned_items, len(value))
+ )
+ elif nr_assigned_items > len(value):
+ raise TypeError(
+ 'not enough elements in value {} > {}'.format(
+ nr_assigned_items, len(value)
+ )
+ )
+ for idx, i in enumerate(range(*range_parms)):
+ self[i] = value[idx]
+
+ def __delitem__(self, index):
+ # type: (Any) -> None
+ if not isinstance(index, slice):
+ return self.__delsingleitem__(index)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ for i in reversed(range(*index.indices(len(self)))):
+ del self[i]
+
+ @abstractmethod
+ def __getsingleitem__(self, index):
+ # type: (Any) -> Any
+ raise IndexError
+
+ @abstractmethod
+ def __setsingleitem__(self, index, value):
+ # type: (Any, Any) -> None
+ raise IndexError
+
+ @abstractmethod
+ def __delsingleitem__(self, index):
+ # type: (Any) -> None
+ raise IndexError
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/composer.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/composer.py
new file mode 100644
index 0000000000..d8d3d11e1b
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/composer.py
@@ -0,0 +1,238 @@
+# coding: utf-8
+
+from __future__ import absolute_import, print_function
+
+import warnings
+
+from ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning
+from ruamel.yaml.compat import utf8, nprint, nprintf # NOQA
+
+from ruamel.yaml.events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+__all__ = ['Composer', 'ComposerError']
+
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+
+class Composer(object):
+ def __init__(self, loader=None):
+ # type: (Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_composer', None) is None:
+ self.loader._composer = self
+ self.anchors = {} # type: Dict[Any, Any]
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ self.loader.parser
+ return self.loader._parser
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ # assert self.loader._resolver is not None
+ if hasattr(self.loader, 'typ'):
+ self.loader.resolver
+ return self.loader._resolver
+
+ def check_node(self):
+ # type: () -> Any
+ # Drop the STREAM-START event.
+ if self.parser.check_event(StreamStartEvent):
+ self.parser.get_event()
+
+ # If there are more documents available?
+ return not self.parser.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # type: () -> Any
+ # Get the root node of the next document.
+ if not self.parser.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # type: () -> Any
+ # Drop the STREAM-START event.
+ self.parser.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None # type: Any
+ if not self.parser.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.parser.check_event(StreamEndEvent):
+ event = self.parser.get_event()
+ raise ComposerError(
+ 'expected a single document in the stream',
+ document.start_mark,
+ 'but found another document',
+ event.start_mark,
+ )
+
+ # Drop the STREAM-END event.
+ self.parser.get_event()
+
+ return document
+
+ def compose_document(self):
+ # type: (Any) -> Any
+ # Drop the DOCUMENT-START event.
+ self.parser.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.parser.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ # type: (Any, Any) -> Any
+ if self.parser.check_event(AliasEvent):
+ event = self.parser.get_event()
+ alias = event.anchor
+ if alias not in self.anchors:
+ raise ComposerError(
+ None, None, 'found undefined alias %r' % utf8(alias), event.start_mark
+ )
+ return self.anchors[alias]
+ event = self.parser.peek_event()
+ anchor = event.anchor
+ if anchor is not None: # have an anchor
+ if anchor in self.anchors:
+ # raise ComposerError(
+ # "found duplicate anchor %r; first occurrence"
+ # % utf8(anchor), self.anchors[anchor].start_mark,
+ # "second occurrence", event.start_mark)
+ ws = (
+ '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence '
+ '{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark)
+ )
+ warnings.warn(ws, ReusedAnchorWarning)
+ self.resolver.descend_resolver(parent, index)
+ if self.parser.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.parser.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.parser.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.resolver.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ # type: (Any) -> Any
+ event = self.parser.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(
+ tag,
+ event.value,
+ event.start_mark,
+ event.end_mark,
+ style=event.style,
+ comment=event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ # type: (Any) -> Any
+ start_event = self.parser.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.parser.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ if node.comment is not None:
+ nprint(
+ 'Warning: unexpected end_event commment in sequence '
+ 'node {}'.format(node.flow_style)
+ )
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def compose_mapping_node(self, anchor):
+ # type: (Any) -> Any
+ start_event = self.parser.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.parser.check_event(MappingEndEvent):
+ # key_event = self.parser.peek_event()
+ item_key = self.compose_node(node, None)
+ # if item_key in node.value:
+ # raise ComposerError("while composing a mapping",
+ # start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ # node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def check_end_doc_comment(self, end_event, node):
+ # type: (Any, Any) -> None
+ if end_event.comment and end_event.comment[1]:
+ # pre comments on an end_event, no following to move to
+ if node.comment is None:
+ node.comment = [None, None]
+ assert not isinstance(node, ScalarEvent)
+ # this is a post comment on a mapping node, add as third element
+ # in the list
+ node.comment.append(end_event.comment[1])
+ end_event.comment[1] = None
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/configobjwalker.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/configobjwalker.py
new file mode 100644
index 0000000000..cbc6148038
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/configobjwalker.py
@@ -0,0 +1,14 @@
+# coding: utf-8
+
+import warnings
+
+from ruamel.yaml.util import configobj_walker as new_configobj_walker
+
+if False: # MYPY
+ from typing import Any # NOQA
+
+
+def configobj_walker(cfg):
+ # type: (Any) -> Any
+ warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code')
+ return new_configobj_walker(cfg)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/constructor.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/constructor.py
new file mode 100644
index 0000000000..3b16fe5e9f
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/constructor.py
@@ -0,0 +1,1806 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division
+
+import datetime
+import base64
+import binascii
+import re
+import sys
+import types
+import warnings
+
+# fmt: off
+from ruamel.yaml.error import (MarkedYAMLError, MarkedYAMLFutureWarning,
+ MantissaNoDotYAML1_1Warning)
+from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.nodes import (SequenceNode, MappingNode, ScalarNode)
+from ruamel.yaml.compat import (utf8, builtins_module, to_str, PY2, PY3, # NOQA
+ text_type, nprint, nprintf, version_tnf)
+from ruamel.yaml.compat import ordereddict, Hashable, MutableSequence # type: ignore
+from ruamel.yaml.compat import MutableMapping # type: ignore
+
+from ruamel.yaml.comments import * # NOQA
+from ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSet,
+ CommentedKeySeq, CommentedSeq, TaggedScalar,
+ CommentedKeyMap)
+from ruamel.yaml.scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString,
+ LiteralScalarString, FoldedScalarString,
+ PlainScalarString, ScalarString,)
+from ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from ruamel.yaml.scalarfloat import ScalarFloat
+from ruamel.yaml.scalarbool import ScalarBoolean
+from ruamel.yaml.timestamp import TimeStamp
+from ruamel.yaml.util import RegExp
+
+if False: # MYPY
+ from typing import Any, Dict, List, Set, Generator, Union, Optional # NOQA
+
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError', 'RoundTripConstructor']
+# fmt: on
+
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+
+class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning):
+ pass
+
+
+class DuplicateKeyError(MarkedYAMLFutureWarning):
+ pass
+
+
+class BaseConstructor(object):
+
+ yaml_constructors = {} # type: Dict[Any, Any]
+ yaml_multi_constructors = {} # type: Dict[Any, Any]
+
+ def __init__(self, preserve_quotes=None, loader=None):
+ # type: (Optional[bool], Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_constructor', None) is None:
+ self.loader._constructor = self
+ self.loader = loader
+ self.yaml_base_dict_type = dict
+ self.yaml_base_list_type = list
+ self.constructed_objects = {} # type: Dict[Any, Any]
+ self.recursive_objects = {} # type: Dict[Any, Any]
+ self.state_generators = [] # type: List[Any]
+ self.deep_construct = False
+ self._preserve_quotes = preserve_quotes
+ self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16))
+
+ @property
+ def composer(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.composer
+ try:
+ return self.loader._composer
+ except AttributeError:
+ sys.stdout.write('slt {}\n'.format(type(self)))
+ sys.stdout.write('slc {}\n'.format(self.loader._composer))
+ sys.stdout.write('{}\n'.format(dir(self)))
+ raise
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ def check_data(self):
+ # type: () -> Any
+ # If there are more documents available?
+ return self.composer.check_node()
+
+ def get_data(self):
+ # type: () -> Any
+ # Construct and return the next document.
+ if self.composer.check_node():
+ return self.construct_document(self.composer.get_node())
+
+ def get_single_data(self):
+ # type: () -> Any
+ # Ensure that the stream contains a single document and construct it.
+ node = self.composer.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ # type: (Any) -> Any
+ data = self.construct_object(node)
+ while bool(self.state_generators):
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for _dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ return self.recursive_objects[node]
+ # raise ConstructorError(
+ # None, None, 'found unconstructable recursive node', node.start_mark
+ # )
+ self.recursive_objects[node] = None
+ data = self.construct_non_recursive_object(node)
+
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_non_recursive_object(self, node, tag=None):
+ # type: (Any, Optional[str]) -> Any
+ constructor = None # type: Any
+ tag_suffix = None
+ if tag is None:
+ tag = node.tag
+ if tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag.startswith(tag_prefix):
+ tag_suffix = tag[len(tag_prefix) :]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for _dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ return data
+
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark
+ )
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark
+ )
+ return [self.construct_object(child, deep=deep) for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ total_mapping = self.yaml_base_dict_type()
+ if getattr(node, 'merge', None) is not None:
+ todo = [(node.merge, False), (node.value, False)]
+ else:
+ todo = [(node.value, True)]
+ for values, check in todo:
+ mapping = self.yaml_base_dict_type() # type: Dict[Any, Any]
+ for key_node, value_node in values:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if PY2:
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
+ else:
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+
+ value = self.construct_object(value_node, deep=deep)
+ if check:
+ if self.check_mapping_key(node, key_node, mapping, key, value):
+ mapping[key] = value
+ else:
+ mapping[key] = value
+ total_mapping.update(mapping)
+ return total_mapping
+
+ def check_mapping_key(self, node, key_node, mapping, key, value):
+ # type: (Any, Any, Any, Any, Any) -> bool
+ """return True if key is unique"""
+ if key in mapping:
+ if not self.allow_duplicate_keys:
+ mk = mapping.get(key)
+ if PY2:
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ if isinstance(mk, unicode):
+ mk = mk.encode('utf-8')
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}" with value "{}" '
+ '(original value: "{}")'.format(key, value, mk),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ return False
+ return True
+
+ def check_set_key(self, node, key_node, setting, key):
+ # type: (Any, Any, Any, Any, Any) -> None
+ if key in setting:
+ if not self.allow_duplicate_keys:
+ if PY2:
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ args = [
+ 'while constructing a set',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+
+ def construct_pairs(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ # type: (Any, Any) -> None
+ if 'yaml_constructors' not in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ # type: (Any, Any) -> None
+ if 'yaml_multi_constructors' not in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+
+class SafeConstructor(BaseConstructor):
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ # type: (Any) -> Any
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+ merge = [] # type: List[Any]
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ if merge: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key_node.value),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping for merging, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ 'but found %s' % value_node.id,
+ value_node.start_mark,
+ )
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if bool(merge):
+ node.merge = merge # separate merge keys to be able to update without duplicate
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ # type: (Any) -> Any
+ self.construct_scalar(node)
+ return None
+
+ # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'y': True,
+ u'n': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ # type: (Any) -> bool
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ # type: (Any) -> int
+ value_s = to_str(self.construct_scalar(node))
+ value_s = value_s.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ return sign * int(value_s[2:], 2)
+ elif value_s.startswith('0x'):
+ return sign * int(value_s[2:], 16)
+ elif value_s.startswith('0o'):
+ return sign * int(value_s[2:], 8)
+ elif self.resolver.processing_version == (1, 1) and value_s[0] == '0':
+ return sign * int(value_s, 8)
+ elif self.resolver.processing_version == (1, 1) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ return sign * int(value_s)
+
+ inf_value = 1e300
+ while inf_value != inf_value * inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ # type: (Any) -> float
+ value_so = to_str(self.construct_scalar(node))
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ elif value_s == '.nan':
+ return self.nan_value
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ if self.resolver.processing_version != (1, 2) and 'e' in value_s:
+ # value_s is lower case independent of input
+ mantissa, exponent = value_s.split('e')
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+ return sign * float(value_s)
+
+ if PY3:
+
+ def construct_yaml_binary(self, node):
+ # type: (Any) -> Any
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to convert base64 data into ascii: %s' % exc,
+ node.start_mark,
+ )
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
+
+ else:
+
+ def construct_yaml_binary(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ try:
+ return to_str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError) as exc:
+ raise ConstructorError(
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
+
+ timestamp_regexp = RegExp(
+ u"""^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:((?P<t>[Tt])|[ \\t]+) # explictly not retaining extra spaces
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\\.(?P<fraction>[0-9]*))?
+ (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$""",
+ re.X,
+ )
+
+ def construct_yaml_timestamp(self, node, values=None):
+ # type: (Any, Any) -> Any
+ if values is None:
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to construct timestamp from "{}"'.format(node.value),
+ node.start_mark,
+ )
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction_s = values['fraction'][:6]
+ while len(fraction_s) < 6:
+ fraction_s += '0'
+ fraction = int(fraction_s)
+ if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4:
+ fraction += 1
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ minutes = values['tz_minute']
+ tz_minute = int(minutes) if minutes else 0
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ # should do something else instead (or hook this up to the preceding if statement
+ # in reverse
+ # if delta is None:
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction)
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ # datetime.timezone.utc)
+ # the above is not good enough though, should provide tzinfo. In Python3 that is easily
+ # doable drop that kind of support for Python2 as it has not native tzinfo
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # type: (Any) -> Any
+ # Note: we do now check for duplicate keys
+ omap = ordereddict()
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ omap[key] = value
+
+ def construct_yaml_pairs(self, node):
+ # type: (Any) -> Any
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = [] # type: List[Any]
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ # type: (Any) -> Any
+ data = set() # type: Set[Any]
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ if PY3:
+ return value
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ # type: (Any) -> Any
+ data = self.yaml_base_list_type() # type: List[Any]
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ # type: (Any) -> Any
+ data = self.yaml_base_dict_type() # type: Dict[Any, Any]
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ # type: (Any, Any) -> Any
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ # type: (Any) -> None
+ raise ConstructorError(
+ None,
+ None,
+ 'could not determine a constructor for the tag %r' % utf8(node.tag),
+ node.start_mark,
+ )
+
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float
+)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary
+)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp
+)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs
+)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined)
+
+if PY2:
+
+ class classobj:
+ pass
+
+
+class Constructor(SafeConstructor):
+ def construct_python_str(self, node):
+ # type: (Any) -> Any
+ return utf8(self.construct_scalar(node))
+
+ def construct_python_unicode(self, node):
+ # type: (Any) -> Any
+ return self.construct_scalar(node)
+
+ if PY3:
+
+ def construct_python_bytes(self, node):
+ # type: (Any) -> Any
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to convert base64 data into ascii: %s' % exc,
+ node.start_mark,
+ )
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
+
+ def construct_python_long(self, node):
+ # type: (Any) -> int
+ val = self.construct_yaml_int(node)
+ if PY3:
+ return val
+ return int(val)
+
+ def construct_python_complex(self, node):
+ # type: (Any) -> Any
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ # type: (Any) -> Any
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ # type: (Any, Any) -> Any
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ 'cannot find module %r (%s)' % (utf8(name), exc),
+ mark,
+ )
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ # type: (Any, Any) -> Any
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ if u'.' in name:
+ lname = name.split('.')
+ lmodule_name = lname
+ lobject_name = [] # type: List[Any]
+ while len(lmodule_name) > 1:
+ lobject_name.insert(0, lmodule_name.pop())
+ module_name = '.'.join(lmodule_name)
+ try:
+ __import__(module_name)
+ # object_name = '.'.join(object_name)
+ break
+ except ImportError:
+ continue
+ else:
+ module_name = builtins_module
+ lobject_name = [name]
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'cannot find module %r (%s)' % (utf8(module_name), exc),
+ mark,
+ )
+ module = sys.modules[module_name]
+ object_name = '.'.join(lobject_name)
+ obj = module
+ while lobject_name:
+ if not hasattr(obj, lobject_name[0]):
+
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'cannot find %r in the module %r' % (utf8(object_name), module.__name__),
+ mark,
+ )
+ obj = getattr(obj, lobject_name.pop(0))
+ return obj
+
+ def construct_python_name(self, suffix, node):
+ # type: (Any, Any) -> Any
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python name',
+ node.start_mark,
+ 'expected the empty value, but found %r' % utf8(value),
+ node.start_mark,
+ )
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ # type: (Any, Any) -> Any
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ node.start_mark,
+ 'expected the empty value, but found %r' % utf8(value),
+ node.start_mark,
+ )
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+ # type: (Any, Any, Any, Any, bool) -> Any
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if PY3:
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+ else:
+ if newobj and isinstance(cls, type(classobj)) and not args and not kwds:
+ instance = classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ # type: (Any, Any) -> None
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {} # type: Dict[Any, Any]
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # type: (Any, Any) -> Any
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ self.recursive_objects[node] = instance
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # type: (Any, Any, bool) -> Any
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {} # type: Dict[Any, Any]
+ state = {} # type: Dict[Any, Any]
+ listitems = [] # type: List[Any]
+ dictitems = {} # type: Dict[Any, Any]
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if bool(state):
+ self.set_python_instance_state(instance, state)
+ if bool(listitems):
+ instance.extend(listitems)
+ if bool(dictitems):
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ # type: (Any, Any) -> Any
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/none', Constructor.construct_yaml_null)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/str', Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode
+)
+
+if PY3:
+ Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes
+ )
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/int', Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long', Constructor.construct_python_long
+)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float', Constructor.construct_yaml_float
+)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex
+)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple
+)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:', Constructor.construct_python_name
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:', Constructor.construct_python_module
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:', Constructor.construct_python_object
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new
+)
+
+
+class RoundTripConstructor(SafeConstructor):
+ """need to store the comments on the node itself,
+ as well as on the items
+ """
+
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark
+ )
+
+ if node.style == '|' and isinstance(node.value, text_type):
+ lss = LiteralScalarString(node.value, anchor=node.anchor)
+ if node.comment and node.comment[1]:
+ lss.comment = node.comment[1][0] # type: ignore
+ return lss
+ if node.style == '>' and isinstance(node.value, text_type):
+ fold_positions = [] # type: List[int]
+ idx = -1
+ while True:
+ idx = node.value.find('\a', idx + 1)
+ if idx < 0:
+ break
+ fold_positions.append(idx - len(fold_positions))
+ fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor)
+ if node.comment and node.comment[1]:
+ fss.comment = node.comment[1][0] # type: ignore
+ if fold_positions:
+ fss.fold_pos = fold_positions # type: ignore
+ return fss
+ elif bool(self._preserve_quotes) and isinstance(node.value, text_type):
+ if node.style == "'":
+ return SingleQuotedScalarString(node.value, anchor=node.anchor)
+ if node.style == '"':
+ return DoubleQuotedScalarString(node.value, anchor=node.anchor)
+ if node.anchor:
+ return PlainScalarString(node.value, anchor=node.anchor)
+ return node.value
+
+ def construct_yaml_int(self, node):
+ # type: (Any) -> Any
+ width = None # type: Any
+ value_su = to_str(self.construct_scalar(node))
+ try:
+ sx = value_su.rstrip('_')
+ underscore = [len(sx) - sx.rindex('_') - 1, False, False] # type: Any
+ except ValueError:
+ underscore = None
+ except IndexError:
+ underscore = None
+ value_s = value_su.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return BinaryInt(
+ sign * int(value_s[2:], 2),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0x'):
+ # default to lower-case if no a-fA-F in string
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ hex_fun = HexInt # type: Any
+ for ch in value_s[2:]:
+ if ch in 'ABCDEF': # first non-digit is capital
+ hex_fun = HexCapsInt
+ break
+ if ch in 'abcdef':
+ break
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return hex_fun(
+ sign * int(value_s[2:], 16),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0o'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return OctalInt(
+ sign * int(value_s[2:], 8),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif self.resolver.processing_version != (1, 2) and value_s[0] == '0':
+ return sign * int(value_s, 8)
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ elif self.resolver.processing_version > (1, 1) and value_s[0] == '0':
+ # not an octal, an integer with leading zero(s)
+ if underscore is not None:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(sign * int(value_s), width=len(value_s), underscore=underscore)
+ elif underscore:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(
+ sign * int(value_s), width=None, underscore=underscore, anchor=node.anchor
+ )
+ elif node.anchor:
+ return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor)
+ else:
+ return sign * int(value_s)
+
+ def construct_yaml_float(self, node):
+ # type: (Any) -> Any
+ def leading_zeros(v):
+ # type: (Any) -> int
+ lead0 = 0
+ idx = 0
+ while idx < len(v) and v[idx] in '0.':
+ if v[idx] == '0':
+ lead0 += 1
+ idx += 1
+ return lead0
+
+ # underscore = None
+ m_sign = False # type: Any
+ value_so = to_str(self.construct_scalar(node))
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ m_sign = value_s[0]
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ if value_s == '.nan':
+ return self.nan_value
+ if self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ if 'e' in value_s:
+ try:
+ mantissa, exponent = value_so.split('e')
+ exp = 'e'
+ except ValueError:
+ mantissa, exponent = value_so.split('E')
+ exp = 'E'
+ if self.resolver.processing_version != (1, 2):
+ # value_s is lower case independent of input
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+ lead0 = leading_zeros(mantissa)
+ width = len(mantissa)
+ prec = mantissa.find('.')
+ if m_sign:
+ width -= 1
+ e_width = len(exponent)
+ e_sign = exponent[0] in '+-'
+ # nprint('sf', width, prec, m_sign, exp, e_width, e_sign)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ exp=exp,
+ e_width=e_width,
+ e_sign=e_sign,
+ anchor=node.anchor,
+ )
+ width = len(value_so)
+ prec = value_so.index('.') # you can use index, this would not be float without dot
+ lead0 = leading_zeros(value_so)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ anchor=node.anchor,
+ )
+
+ def construct_yaml_str(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ if isinstance(value, ScalarString):
+ return value
+ if PY3:
+ return value
+ try:
+ return value.encode('ascii')
+ except AttributeError:
+ # in case you replace the node dynamically e.g. with a dict
+ return value
+ except UnicodeEncodeError:
+ return value
+
+ def construct_rt_sequence(self, node, seqtyp, deep=False):
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark
+ )
+ ret_val = []
+ if node.comment:
+ seqtyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ seqtyp.yaml_set_anchor(node.anchor)
+ for idx, child in enumerate(node.value):
+ if child.comment:
+ seqtyp._yaml_add_comment(child.comment, key=idx)
+ child.comment = None # if moved to sequence remove from child
+ ret_val.append(self.construct_object(child, deep=deep))
+ seqtyp._yaml_set_idx_line_col(
+ idx, [child.start_mark.line, child.start_mark.column]
+ )
+ return ret_val
+
+ def flatten_mapping(self, node):
+ # type: (Any) -> Any
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+
+ def constructed(value_node):
+ # type: (Any) -> Any
+ # If the contents of a merge are defined within the
+ # merge marker, then they won't have been constructed
+ # yet. But if they were already constructed, we need to use
+ # the existing object.
+ if value_node in self.constructed_objects:
+ value = self.constructed_objects[value_node]
+ else:
+ value = self.construct_object(value_node, deep=False)
+ return value
+
+ # merge = []
+ merge_map_list = [] # type: List[Any]
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ if merge_map_list: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key_node.value),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ merge_map_list.append((index, constructed(value_node)))
+ # self.flatten_mapping(value_node)
+ # merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ # submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping for merging, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ merge_map_list.append((index, constructed(subnode)))
+ # self.flatten_mapping(subnode)
+ # submerge.append(subnode.value)
+ # submerge.reverse()
+ # for value in submerge:
+ # merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ 'but found %s' % value_node.id,
+ value_node.start_mark,
+ )
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ return merge_map_list
+ # if merge:
+ # node.value = merge + node.value
+
+ def _sentinel(self):
+ # type: () -> None
+ pass
+
+ def construct_mapping(self, node, maptyp, deep=False): # type: ignore
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ merge_map = self.flatten_mapping(node)
+ # mapping = {}
+ if node.comment:
+ maptyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ maptyp.yaml_set_anchor(node.anchor)
+ last_key, last_value = None, self._sentinel
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, MutableSequence):
+ key_s = CommentedKeySeq(key)
+ if key_node.flow_style is True:
+ key_s.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_s.fa.set_block_style()
+ key = key_s
+ elif isinstance(key, MutableMapping):
+ key_m = CommentedKeyMap(key)
+ if key_node.flow_style is True:
+ key_m.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_m.fa.set_block_style()
+ key = key_m
+ if PY2:
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
+ else:
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ value = self.construct_object(value_node, deep=deep)
+ if self.check_mapping_key(node, key_node, maptyp, key, value):
+ if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]:
+ if last_value is None:
+ key_node.comment[0] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, value=last_key)
+ else:
+ key_node.comment[2] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ key_node.comment = None
+ if key_node.comment:
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ maptyp._yaml_add_comment(value_node.comment, value=key)
+ maptyp._yaml_set_kv_line_col(
+ key,
+ [
+ key_node.start_mark.line,
+ key_node.start_mark.column,
+ value_node.start_mark.line,
+ value_node.start_mark.column,
+ ],
+ )
+ maptyp[key] = value
+ last_key, last_value = key, value # could use indexing
+ # do this last, or <<: before a key will prevent insertion in instances
+ # of collections.OrderedDict (as they have no __contains__
+ if merge_map:
+ maptyp.add_yaml_merge(merge_map)
+
+ def construct_setting(self, node, typ, deep=False):
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ if node.comment:
+ typ._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ typ.yaml_end_comment_extend(node.comment[2], clear=True)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ typ.yaml_set_anchor(node.anchor)
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if PY2:
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
+ else:
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ # construct but should be null
+ value = self.construct_object(value_node, deep=deep) # NOQA
+ self.check_set_key(node, key_node, typ, key)
+ if key_node.comment:
+ typ._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ typ._yaml_add_comment(value_node.comment, value=key)
+ typ.add(key)
+
+ def construct_yaml_seq(self, node):
+ # type: (Any) -> Any
+ data = CommentedSeq()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.comment:
+ data._yaml_add_comment(node.comment)
+ yield data
+ data.extend(self.construct_rt_sequence(node, data))
+ self.set_collection_style(data, node)
+
+ def construct_yaml_map(self, node):
+ # type: (Any) -> Any
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ yield data
+ self.construct_mapping(node, data, deep=True)
+ self.set_collection_style(data, node)
+
+ def set_collection_style(self, data, node):
+ # type: (Any, Any) -> None
+ if len(data) == 0:
+ return
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+
+ def construct_yaml_object(self, node, cls):
+ # type: (Any, Any) -> Any
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = SafeConstructor.construct_mapping(self, node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = SafeConstructor.construct_mapping(self, node)
+ data.__dict__.update(state)
+
+ def construct_yaml_omap(self, node):
+ # type: (Any) -> Any
+ # Note: we do now check for duplicate keys
+ omap = CommentedOrderedMap()
+ omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ omap.fa.set_flow_style()
+ elif node.flow_style is False:
+ omap.fa.set_block_style()
+ yield omap
+ if node.comment:
+ omap._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ omap.yaml_end_comment_extend(node.comment[2], clear=True)
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ if key_node.comment:
+ omap._yaml_add_comment(key_node.comment, key=key)
+ if subnode.comment:
+ omap._yaml_add_comment(subnode.comment, key=key)
+ if value_node.comment:
+ omap._yaml_add_comment(value_node.comment, value=key)
+ omap[key] = value
+
+ def construct_yaml_set(self, node):
+ # type: (Any) -> Any
+ data = CommentedSet()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ yield data
+ self.construct_setting(node, data)
+
+ def construct_undefined(self, node):
+ # type: (Any) -> Any
+ try:
+ if isinstance(node, MappingNode):
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+ data.yaml_set_tag(node.tag)
+ yield data
+ if node.anchor:
+ data.yaml_set_anchor(node.anchor)
+ self.construct_mapping(node, data)
+ return
+ elif isinstance(node, ScalarNode):
+ data2 = TaggedScalar()
+ data2.value = self.construct_scalar(node)
+ data2.style = node.style
+ data2.yaml_set_tag(node.tag)
+ yield data2
+ if node.anchor:
+ data2.yaml_set_anchor(node.anchor, always_dump=True)
+ return
+ elif isinstance(node, SequenceNode):
+ data3 = CommentedSeq()
+ data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data3.fa.set_flow_style()
+ elif node.flow_style is False:
+ data3.fa.set_block_style()
+ data3.yaml_set_tag(node.tag)
+ yield data3
+ if node.anchor:
+ data3.yaml_set_anchor(node.anchor)
+ data3.extend(self.construct_sequence(node))
+ return
+ except: # NOQA
+ pass
+ raise ConstructorError(
+ None,
+ None,
+ 'could not determine a constructor for the tag %r' % utf8(node.tag),
+ node.start_mark,
+ )
+
+ def construct_yaml_timestamp(self, node, values=None):
+ # type: (Any, Any) -> Any
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to construct timestamp from "{}"'.format(node.value),
+ node.start_mark,
+ )
+ values = match.groupdict()
+ if not values['hour']:
+ return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']:
+ if values[part]:
+ break
+ else:
+ return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction_s = values['fraction'][:6]
+ while len(fraction_s) < 6:
+ fraction_s += '0'
+ fraction = int(fraction_s)
+ if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4:
+ fraction += 1
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ minutes = values['tz_minute']
+ tz_minute = int(minutes) if minutes else 0
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ # shold check for NOne and solve issue 366 should be tzinfo=delta)
+ if delta:
+ dt = datetime.datetime(year, month, day, hour, minute)
+ dt -= delta
+ data = TimeStamp(dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction)
+ data._yaml['delta'] = delta
+ tz = values['tz_sign'] + values['tz_hour']
+ if values['tz_minute']:
+ tz += ':' + values['tz_minute']
+ data._yaml['tz'] = tz
+ else:
+ data = TimeStamp(year, month, day, hour, minute, second, fraction)
+ if values['tz']: # no delta
+ data._yaml['tz'] = values['tz']
+
+ if values['t']:
+ data._yaml['t'] = True
+ return data
+
+ def construct_yaml_bool(self, node):
+ # type: (Any) -> Any
+ b = SafeConstructor.construct_yaml_bool(self, node)
+ if node.anchor:
+ return ScalarBoolean(b, anchor=node.anchor)
+ return b
+
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map
+)
+
+RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/cyaml.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/cyaml.py
new file mode 100644
index 0000000000..7a808a5310
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/cyaml.py
@@ -0,0 +1,185 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from _ruamel_yaml import CParser, CEmitter # type: ignore
+
+from ruamel.yaml.constructor import Constructor, BaseConstructor, SafeConstructor
+from ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter
+from ruamel.yaml.resolver import Resolver, BaseResolver
+
+if False: # MYPY
+ from typing import Any, Union, Optional # NOQA
+ from ruamel.yaml.compat import StreamTextType, StreamType, VersionType # NOQA
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+
+# this includes some hacks to solve the usage of resolver by lower level
+# parts of the parser
+
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ BaseConstructor.__init__(self, loader=self)
+ BaseResolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ SafeConstructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CLoader(CParser, Constructor, Resolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ Constructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ self._emitter = self._serializer = self._representer = self
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ SafeRepresenter.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
+ Resolver.__init__(self)
+
+
+class CDumper(CEmitter, Representer, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ Representer.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
+ Resolver.__init__(self)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/dumper.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/dumper.py
new file mode 100644
index 0000000000..5d99b4faf9
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/dumper.py
@@ -0,0 +1,221 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from ruamel.yaml.emitter import Emitter
+from ruamel.yaml.serializer import Serializer
+from ruamel.yaml.representer import (
+ Representer,
+ SafeRepresenter,
+ BaseRepresenter,
+ RoundTripRepresenter,
+)
+from ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Optional # NOQA
+ from ruamel.yaml.compat import StreamType, VersionType # NOQA
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
+
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ Representer.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ RoundTripRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ VersionedResolver.__init__(self, loader=self)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/emitter.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/emitter.py
new file mode 100644
index 0000000000..f8c5e1609a
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/emitter.py
@@ -0,0 +1,1696 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+import sys
+from ruamel.yaml.error import YAMLError, YAMLStreamError
+from ruamel.yaml.events import * # NOQA
+
+# fmt: off
+from ruamel.yaml.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, \
+ check_anchorname_char
+# fmt: on
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA
+ from ruamel.yaml.compat import StreamType # NOQA
+
+__all__ = ['Emitter', 'EmitterError']
+
+
+class EmitterError(YAMLError):
+ pass
+
+
+class ScalarAnalysis(object):
+ def __init__(
+ self,
+ scalar,
+ empty,
+ multiline,
+ allow_flow_plain,
+ allow_block_plain,
+ allow_single_quoted,
+ allow_double_quoted,
+ allow_block,
+ ):
+ # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+
+class Indents(object):
+ # replacement for the list based stack of None/int
+ def __init__(self):
+ # type: () -> None
+ self.values = [] # type: List[Tuple[int, bool]]
+
+ def append(self, val, seq):
+ # type: (Any, Any) -> None
+ self.values.append((val, seq))
+
+ def pop(self):
+ # type: () -> Any
+ return self.values.pop()[0]
+
+ def last_seq(self):
+ # type: () -> bool
+ # return the seq(uence) value for the element added before the last one
+ # in increase_indent()
+ try:
+ return self.values[-2][1]
+ except IndexError:
+ return False
+
+ def seq_flow_align(self, seq_indent, column):
+ # type: (int, int) -> int
+ # extra spaces because of dash
+ if len(self.values) < 2 or not self.values[-1][1]:
+ return 0
+ # -1 for the dash
+ base = self.values[-1][0] if self.values[-1][0] is not None else 0
+ return base + seq_indent - column - 1
+
+ def __len__(self):
+ # type: () -> int
+ return len(self.values)
+
+
+class Emitter(object):
+ # fmt: off
+ DEFAULT_TAG_PREFIXES = {
+ u'!': u'!',
+ u'tag:yaml.org,2002:': u'!!',
+ }
+ # fmt: on
+
+ MAX_SIMPLE_KEY_LENGTH = 128
+
+ def __init__(
+ self,
+ stream,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ brace_single_entry_mapping_in_flow_sequence=None,
+ dumper=None,
+ ):
+ # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA
+ self.dumper = dumper
+ if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None:
+ self.dumper._emitter = self
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None # type: Optional[Text]
+ self.allow_space_break = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = [] # type: List[Any]
+ self.state = self.expect_stream_start # type: Any
+
+ # Current event and the event queue.
+ self.events = [] # type: List[Any]
+ self.event = None # type: Any
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = Indents()
+ self.indent = None # type: Optional[int]
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context = [] # type: List[Text]
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+ self.compact_seq_seq = True # dash after dash
+ self.compact_seq_map = True # key after dash
+ # self.compact_ms = False # dash after key, only when excplicit key with ?
+ self.no_newline = None # type: Optional[bool] # set if directly after `- `
+
+ # Whether the document requires an explicit document end indicator
+ self.open_ended = False
+
+ # colon handling
+ self.colon = u':'
+ self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
+ # single entry mappings in flow sequence
+ self.brace_single_entry_mapping_in_flow_sequence = (
+ brace_single_entry_mapping_in_flow_sequence # NOQA
+ )
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
+ self.unicode_supplementary = sys.maxunicode > 0xFFFF
+ self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
+ self.top_level_colon_align = top_level_colon_align
+ self.best_sequence_indent = 2
+ self.requested_indent = indent # specific for literal zero indent
+ if indent and 1 < indent < 10:
+ self.best_sequence_indent = indent
+ self.best_map_indent = self.best_sequence_indent
+ # if self.best_sequence_indent < self.sequence_dash_offset + 1:
+ # self.best_sequence_indent = self.sequence_dash_offset + 1
+ self.best_width = 80
+ if width and width > self.best_sequence_indent * 2:
+ self.best_width = width
+ self.best_line_break = u'\n' # type: Any
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None # type: Any
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None # type: Any
+ self.prepared_tag = None # type: Any
+
+ # Scalar analysis and style.
+ self.analysis = None # type: Any
+ self.style = None # type: Any
+
+ self.scalar_after_indicator = True # write a scalar on the same line as `---`
+
+ self.alt_null = 'null'
+
+ @property
+ def stream(self):
+ # type: () -> Any
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('output stream needs to specified')
+
+ @stream.setter
+ def stream(self, val):
+ # type: (Any) -> None
+ if val is None:
+ return
+ if not hasattr(val, 'write'):
+ raise YAMLStreamError('stream argument needs to have a write() method')
+ self._stream = val
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ @property
+ def flow_level(self):
+ # type: () -> int
+ return len(self.flow_context)
+
+ def dispose(self):
+ # type: () -> None
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ # type: (Any) -> None
+ if dbg(DBG_EVENT):
+ nprint(event)
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ # type: () -> bool
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ # type: (int) -> bool
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return len(self.events) < count + 1
+
+ def increase_indent(self, flow=False, sequence=None, indentless=False):
+ # type: (bool, Optional[bool], bool) -> None
+ self.indents.append(self.indent, sequence)
+ if self.indent is None: # top level
+ if flow:
+ # self.indent = self.best_sequence_indent if self.indents.last_seq() else \
+ # self.best_map_indent
+ # self.indent = self.best_sequence_indent
+ self.indent = self.requested_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += (
+ self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent
+ )
+ # if self.indents.last_seq():
+ # if self.indent == 0: # top level block sequence
+ # self.indent = self.best_sequence_indent - self.sequence_dash_offset
+ # else:
+ # self.indent += self.best_sequence_indent
+ # else:
+ # self.indent += self.best_map_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ # type: () -> None
+ if isinstance(self.event, StreamStartEvent):
+ if PY2:
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ else:
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError('expected StreamStartEvent, but got %s' % (self.event,))
+
+ def expect_nothing(self):
+ # type: () -> None
+ raise EmitterError('expected nothing, but got %s' % (self.event,))
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ # type: () -> Any
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ # type: (bool) -> None
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (
+ first
+ and not self.event.explicit
+ and not self.canonical
+ and not self.event.version
+ and not self.event.tags
+ and not self.check_empty_document()
+ )
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError('expected DocumentStartEvent, but got %s' % (self.event,))
+
+ def expect_document_end(self):
+ # type: () -> None
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError('expected DocumentEndEvent, but got %s' % (self.event,))
+
+ def expect_document_root(self):
+ # type: () -> None
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
+ # type: (bool, bool, bool, bool) -> None
+ self.root_context = root
+ self.sequence_context = sequence # not used in PyYAML
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ if (
+ self.process_anchor(u'&')
+ and isinstance(self.event, ScalarEvent)
+ and self.sequence_context
+ ):
+ self.sequence_context = False
+ if (
+ root
+ and isinstance(self.event, ScalarEvent)
+ and not self.scalar_after_indicator
+ ):
+ self.write_indent()
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ i2, n2 = self.indention, self.no_newline # NOQA
+ if self.event.comment:
+ if self.event.flow_style is False and self.event.comment:
+ if self.write_post_comment(self.event):
+ self.indention = False
+ self.no_newline = True
+ if self.write_pre_comment(self.event):
+ self.indention = i2
+ self.no_newline = not self.indention
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_sequence()
+ ):
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.event.flow_style is False and self.event.comment:
+ self.write_post_comment(self.event)
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_mapping()
+ ):
+ self.expect_flow_mapping(single=self.event.nr_items == 1)
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError('expected NodeEvent, but got %s' % (self.event,))
+
+ def expect_alias(self):
+ # type: () -> None
+ if self.event.anchor is None:
+ raise EmitterError('anchor is not specified for alias')
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ # type: () -> None
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ # type: () -> None
+ ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
+ self.write_indicator(u' ' * ind + u'[', True, whitespace=True)
+ self.increase_indent(flow=True, sequence=True)
+ self.flow_context.append('[')
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ # type: () -> None
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ self.write_indicator(u']', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty flow sequence
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ # type: () -> None
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow sequence
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self, single=False):
+ # type: (Optional[bool]) -> None
+ ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
+ map_init = u'{'
+ if (
+ single
+ and self.flow_level
+ and self.flow_context[-1] == '['
+ and not self.canonical
+ and not self.brace_single_entry_mapping_in_flow_sequence
+ ):
+ # single map item with flow context, no curly braces necessary
+ map_init = u''
+ self.write_indicator(u' ' * ind + map_init, True, whitespace=True)
+ self.flow_context.append(map_init)
+ self.increase_indent(flow=True, sequence=False)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ # type: () -> None
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '{' # empty flow mapping
+ self.write_indicator(u'}', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty mapping
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ # type: () -> None
+ if isinstance(self.event, MappingEndEvent):
+ # if self.event.comment and self.event.comment[1]:
+ # self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped in [u'{', u'']
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ if popped != u'':
+ self.write_indicator(u'}', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow mapping, never reached on empty mappings
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ # type: () -> None
+ self.write_indicator(self.prefixed_colon, False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ # type: () -> None
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ # type: () -> None
+ if self.mapping_context:
+ indentless = not self.indention
+ else:
+ indentless = False
+ if not self.compact_seq_seq and self.column != 0:
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=True, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ # type: () -> Any
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ # type: (bool) -> None
+ if not first and isinstance(self.event, SequenceEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments on a block list e.g. empty line
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ self.no_newline = False
+ else:
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ nonl = self.no_newline if self.column == 0 else False
+ self.write_indent()
+ ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0
+ self.write_indicator(u' ' * ind + u'-', True, indention=True)
+ if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
+ self.no_newline = True
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ # type: () -> None
+ if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ # type: () -> None
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ # type: (Any) -> None
+ if not first and isinstance(self.event, MappingEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.write_indent()
+ if self.check_simple_key():
+ if not isinstance(
+ self.event, (SequenceStartEvent, MappingStartEvent)
+ ): # sequence keys
+ try:
+ if self.event.style == '?':
+ self.write_indicator(u'?', True, indention=True)
+ except AttributeError: # aliases have no style
+ pass
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ if isinstance(self.event, AliasEvent):
+ self.stream.write(u' ')
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ # type: () -> None
+ if getattr(self.event, 'style', None) != '?':
+ # prefix = u''
+ if self.indent == 0 and self.top_level_colon_align is not None:
+ # write non-prefixed colon
+ c = u' ' * (self.top_level_colon_align - self.column) + self.colon
+ else:
+ c = self.prefixed_colon
+ self.write_indicator(c, False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ # type: () -> None
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ # type: () -> bool
+ return (
+ isinstance(self.event, SequenceStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], SequenceEndEvent)
+ )
+
+ def check_empty_mapping(self):
+ # type: () -> bool
+ return (
+ isinstance(self.event, MappingStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], MappingEndEvent)
+ )
+
+ def check_empty_document(self):
+ # type: () -> bool
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (
+ isinstance(event, ScalarEvent)
+ and event.anchor is None
+ and event.tag is None
+ and event.implicit
+ and event.value == ""
+ )
+
+ def check_simple_key(self):
+ # type: () -> bool
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if (
+ isinstance(self.event, (ScalarEvent, CollectionStartEvent))
+ and self.event.tag is not None
+ ):
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return length < self.MAX_SIMPLE_KEY_LENGTH and (
+ isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True)
+ or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True)
+ or (
+ isinstance(self.event, ScalarEvent)
+ # if there is an explicit style for an empty string, it is a simple key
+ and not (self.analysis.empty and self.style and self.style not in '\'"')
+ and not self.analysis.multiline
+ )
+ or self.check_empty_sequence()
+ or self.check_empty_mapping()
+ )
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ # type: (Any) -> bool
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return False
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator + self.prepared_anchor, True)
+ # issue 288
+ self.no_newline = False
+ self.prepared_anchor = None
+ return True
+
+ def process_tag(self):
+ # type: () -> None
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if self.event.value == '' and self.style == "'" and \
+ tag == 'tag:yaml.org,2002:null' and self.alt_null is not None:
+ self.event.value = self.alt_null
+ self.analysis = None
+ self.style = self.choose_scalar_style()
+ if (not self.canonical or tag is None) and (
+ (self.style == "" and self.event.implicit[0])
+ or (self.style != "" and self.event.implicit[1])
+ ):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError('tag is not specified')
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ if (
+ self.sequence_context
+ and not self.flow_level
+ and isinstance(self.event, ScalarEvent)
+ ):
+ self.no_newline = True
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ # type: () -> Any
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if (not self.event.style or self.event.style == '?') and (
+ self.event.implicit[0] or not self.event.implicit[2]
+ ):
+ if not (
+ self.simple_key_context and (self.analysis.empty or self.analysis.multiline)
+ ) and (
+ self.flow_level
+ and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain)
+ ):
+ return ""
+ self.analysis.allow_block = True
+ if self.event.style and self.event.style in '|>':
+ if (
+ not self.flow_level
+ and not self.simple_key_context
+ and self.analysis.allow_block
+ ):
+ return self.event.style
+ if not self.event.style and self.analysis.allow_double_quoted:
+ if "'" in self.event.value or '\n' in self.event.value:
+ return '"'
+ if not self.event.style or self.event.style == "'":
+ if self.analysis.allow_single_quoted and not (
+ self.simple_key_context and self.analysis.multiline
+ ):
+ return "'"
+ return '"'
+
+ def process_scalar(self):
+ # type: () -> None
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = not self.simple_key_context
+ # if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ # nprint('xx', self.sequence_context, self.flow_level)
+ if self.sequence_context and not self.flow_level:
+ self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == "'":
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar, self.event.comment)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+ if self.event.comment:
+ self.write_post_comment(self.event)
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ # type: (Any) -> Any
+ major, minor = version
+ if major != 1:
+ raise EmitterError('unsupported YAML version: %d.%d' % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ # type: (Any) -> Any
+ if not handle:
+ raise EmitterError('tag handle must not be empty')
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r" % (utf8(handle)))
+ for ch in handle[1:-1]:
+ if not (
+ u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in u'-_'
+ ):
+ raise EmitterError(
+ 'invalid character %r in the tag handle: %r' % (utf8(ch), utf8(handle))
+ )
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ # type: (Any) -> Any
+ if not prefix:
+ raise EmitterError('tag prefix must not be empty')
+ chunks = [] # type: List[Any]
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ ch_set = u"-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += u'#'
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in ch_set:
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end + 1
+ data = utf8(ch)
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return "".join(chunks)
+
+ def prepare_tag(self, tag):
+ # type: (Any) -> Any
+ if not tag:
+ raise EmitterError('tag must not be empty')
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix) :]
+ chunks = [] # type: List[Any]
+ start = end = 0
+ ch_set = u"-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += u'#'
+ while end < len(suffix):
+ ch = suffix[end]
+ if (
+ u'0' <= ch <= u'9'
+ or u'A' <= ch <= u'Z'
+ or u'a' <= ch <= u'z'
+ or ch in ch_set
+ or (ch == u'!' and handle != u'!')
+ ):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end + 1
+ data = utf8(ch)
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = "".join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ # type: (Any) -> Any
+ if not anchor:
+ raise EmitterError('anchor must not be empty')
+ for ch in anchor:
+ if not check_anchorname_char(ch):
+ raise EmitterError(
+ 'invalid character %r in the anchor: %r' % (utf8(ch), utf8(anchor))
+ )
+ return anchor
+
+ def analyze_scalar(self, scalar):
+ # type: (Any) -> Any
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=True,
+ multiline=False,
+ allow_flow_plain=False,
+ allow_block_plain=True,
+ allow_single_quoted=True,
+ allow_double_quoted=True,
+ allow_block=False,
+ )
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = len(scalar) == 1 or scalar[1] in u'\0 \t\r\n\x85\u2028\u2029'
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:': # ToDo
+ if self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ elif len(scalar) == 1: # single character
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859
+ flow_indicators = True
+ if ch == u'?' and self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ if ch == u':':
+ if followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (
+ ch == u'\x85'
+ or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'
+ or (self.unicode_supplementary and (u'\U00010000' <= ch <= u'\U0010FFFF'))
+ ) and ch != u'\uFEFF':
+ # unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar) - 1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar) - 1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = ch in u'\0 \t\r\n\x85\u2028\u2029'
+ followed_by_whitespace = (
+ index + 1 >= len(scalar) or scalar[index + 1] in u'\0 \t\r\n\x85\u2028\u2029'
+ )
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if leading_space or leading_break or trailing_space or trailing_break:
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if special_characters:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False
+ elif space_break:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+ if not self.allow_space_break:
+ allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=False,
+ multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block,
+ )
+
+ # Writers.
+
+ def flush_stream(self):
+ # type: () -> None
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # type: () -> None
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ # type: () -> None
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False):
+ # type: (Any, Any, bool, bool) -> None
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' ' + indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ # type: () -> None
+ indent = self.indent or 0
+ if (
+ not self.indention
+ or self.column > indent
+ or (self.column == indent and not self.whitespace)
+ ):
+ if bool(self.no_newline):
+ self.no_newline = False
+ else:
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' ' * (indent - self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ # type: (Any) -> None
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ # type: (Any) -> None
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ # type: (Any, Any) -> None
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator(u"'", True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if (
+ start + 1 == end
+ and self.column > self.best_width
+ and split
+ and start != 0
+ and end != len(text)
+ ):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u"'":
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u"'":
+ data = u"''"
+ self.column += 2
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = ch == u' '
+ breaks = ch in u'\n\x85\u2028\u2029'
+ end += 1
+ self.write_indicator(u"'", False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'"': u'"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if (
+ ch is None
+ or ch in u'"\\\x85\u2028\u2029\uFEFF'
+ or not (
+ u'\x20' <= ch <= u'\x7E'
+ or (
+ self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD')
+ )
+ )
+ ):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\' + self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if (
+ 0 < end < len(text) - 1
+ and (ch == u' ' or start >= end)
+ and self.column + (end - start) > self.best_width
+ and split
+ ):
+ data = text[start:end] + u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ # type: (Any) -> Any
+ indent = 0
+ indicator = u''
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ indent = self.best_sequence_indent
+ hints += text_type(indent)
+ elif self.root_context:
+ for end in ['\n---', '\n...']:
+ pos = 0
+ while True:
+ pos = text.find(end, pos)
+ if pos == -1:
+ break
+ try:
+ if text[pos + 4] in ' \r\n':
+ break
+ except IndexError:
+ pass
+ pos += 1
+ if pos > -1:
+ break
+ if pos > 0:
+ indent = self.best_sequence_indent
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ indicator = u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ indicator = u'+'
+ hints += indicator
+ return hints, indent, indicator
+
+ def write_folded(self, text):
+ # type: (Any) -> None
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ self.write_indicator(u'>' + hints, True)
+ if _indicator == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029\a':
+ if (
+ not leading_space
+ and ch is not None
+ and ch != u' '
+ and text[start] == u'\n'
+ ):
+ self.write_line_break()
+ leading_space = ch == u' '
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start + 1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029\a':
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch == u'\a':
+ if end < (len(text) - 1) and not text[end + 2].isspace():
+ self.write_line_break()
+ self.write_indent()
+ end += 2 # \a and the space that is inserted on the fold
+ else:
+ raise EmitterError('unexcpected fold indicator \\a before space')
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in u'\n\x85\u2028\u2029'
+ spaces = ch == u' '
+ end += 1
+
+ def write_literal(self, text, comment=None):
+ # type: (Any, Any) -> None
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ self.write_indicator(u'|' + hints, True)
+ try:
+ comment = comment[1][0]
+ if comment:
+ self.stream.write(comment)
+ except (TypeError, IndexError):
+ pass
+ if _indicator == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ if self.root_context:
+ idnx = self.indent if self.indent is not None else 0
+ self.stream.write(u' ' * (_indent + idnx))
+ else:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in u'\n\x85\u2028\u2029'
+ end += 1
+
+ def write_plain(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ else:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start + 1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029': # type: ignore
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ try:
+ self.stream.write(data)
+ except: # NOQA
+ sys.stdout.write(repr(data) + '\n')
+ raise
+ start = end
+ if ch is not None:
+ spaces = ch == u' '
+ breaks = ch in u'\n\x85\u2028\u2029'
+ end += 1
+
+ def write_comment(self, comment, pre=False):
+ # type: (Any, bool) -> None
+ value = comment.value
+ # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value))
+ if not pre and value[-1] == '\n':
+ value = value[:-1]
+ try:
+ # get original column position
+ col = comment.start_mark.column
+ if comment.value and comment.value.startswith('\n'):
+ # never inject extra spaces if the comment starts with a newline
+ # and not a real comment (e.g. if you have an empty line following a key-value
+ col = self.column
+ elif col < self.column + 1:
+ ValueError
+ except ValueError:
+ col = self.column + 1
+ # nprint('post_comment', self.line, self.column, value)
+ try:
+ # at least one space if the current column >= the start column of the comment
+ # but not at the start of a line
+ nr_spaces = col - self.column
+ if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n':
+ nr_spaces = 1
+ value = ' ' * nr_spaces + value
+ try:
+ if bool(self.encoding):
+ value = value.encode(self.encoding)
+ except UnicodeDecodeError:
+ pass
+ self.stream.write(value)
+ except TypeError:
+ raise
+ if not pre:
+ self.write_line_break()
+
+ def write_pre_comment(self, event):
+ # type: (Any) -> bool
+ comments = event.comment[1]
+ if comments is None:
+ return False
+ try:
+ start_events = (MappingStartEvent, SequenceStartEvent)
+ for comment in comments:
+ if isinstance(event, start_events) and getattr(comment, 'pre_done', None):
+ continue
+ if self.column != 0:
+ self.write_line_break()
+ self.write_comment(comment, pre=True)
+ if isinstance(event, start_events):
+ comment.pre_done = True
+ except TypeError:
+ sys.stdout.write('eventtt {} {}'.format(type(event), event))
+ raise
+ return True
+
+ def write_post_comment(self, event):
+ # type: (Any) -> bool
+ if self.event.comment[0] is None:
+ return False
+ comment = event.comment[0]
+ self.write_comment(comment)
+ return True
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/error.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/error.py
new file mode 100644
index 0000000000..d5f15532d2
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/error.py
@@ -0,0 +1,311 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import warnings
+import textwrap
+
+from ruamel.yaml.compat import utf8
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Text # NOQA
+
+
+__all__ = [
+ 'FileMark',
+ 'StringMark',
+ 'CommentMark',
+ 'YAMLError',
+ 'MarkedYAMLError',
+ 'ReusedAnchorWarning',
+ 'UnsafeLoaderWarning',
+ 'MarkedYAMLWarning',
+ 'MarkedYAMLFutureWarning',
+]
+
+
+class StreamMark(object):
+ __slots__ = 'name', 'index', 'line', 'column'
+
+ def __init__(self, name, index, line, column):
+ # type: (Any, int, int, int) -> None
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+
+ def __str__(self):
+ # type: () -> Any
+ where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1)
+ return where
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ if self.line != other.line or self.column != other.column:
+ return False
+ if self.name != other.name or self.index != other.index:
+ return False
+ return True
+
+ def __ne__(self, other):
+ # type: (Any) -> bool
+ return not self.__eq__(other)
+
+
+class FileMark(StreamMark):
+ __slots__ = ()
+
+
+class StringMark(StreamMark):
+ __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer'
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ # type: (Any, int, int, int, Any, Any) -> None
+ StreamMark.__init__(self, name, index, line, column)
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ # type: (int, int) -> Any
+ if self.buffer is None: # always False
+ return None
+ head = ""
+ start = self.pointer
+ while start > 0 and self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer - start > max_length / 2 - 1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ""
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end - self.pointer > max_length / 2 - 1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = utf8(self.buffer[start:end])
+ caret = '^'
+ caret = '^ (line: {})'.format(self.line + 1)
+ return (
+ ' ' * indent
+ + head
+ + snippet
+ + tail
+ + '\n'
+ + ' ' * (indent + self.pointer - start + len(head))
+ + caret
+ )
+
+ def __str__(self):
+ # type: () -> Any
+ snippet = self.get_snippet()
+ where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1)
+ if snippet is not None:
+ where += ':\n' + snippet
+ return where
+
+
+class CommentMark(object):
+ __slots__ = ('column',)
+
+ def __init__(self, column):
+ # type: (Any) -> None
+ self.column = column
+
+
+class YAMLError(Exception):
+ pass
+
+
+class MarkedYAMLError(YAMLError):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ # warn is ignored
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ return '\n'.join(lines)
+
+
+class YAMLStreamError(Exception):
+ pass
+
+
+class YAMLWarning(Warning):
+ pass
+
+
+class MarkedYAMLWarning(YAMLWarning):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
+
+
+class ReusedAnchorWarning(YAMLWarning):
+ pass
+
+
+class UnsafeLoaderWarning(YAMLWarning):
+ text = """
+The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
+Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK.
+Alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
+
+In most other cases you should consider using 'safe_load(stream)'"""
+ pass
+
+
+warnings.simplefilter('once', UnsafeLoaderWarning)
+
+
+class MantissaNoDotYAML1_1Warning(YAMLWarning):
+ def __init__(self, node, flt_str):
+ # type: (Any, Any) -> None
+ self.node = node
+ self.flt = flt_str
+
+ def __str__(self):
+ # type: () -> Any
+ line = self.node.start_mark.line
+ col = self.node.start_mark.column
+ return """
+In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
+See the Floating-Point Language-Independent Type for YAMLâ„¢ Version 1.1 specification
+( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
+
+Correct your float: "{}" on line: {}, column: {}
+
+or alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
+
+""".format(
+ self.flt, line, col
+ )
+
+
+warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
+
+
+class YAMLFutureWarning(Warning):
+ pass
+
+
+class MarkedYAMLFutureWarning(YAMLFutureWarning):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/events.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/events.py
new file mode 100644
index 0000000000..58b212190a
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/events.py
@@ -0,0 +1,157 @@
+# coding: utf-8
+
+# Abstract classes.
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+
+def CommentCheck():
+ # type: () -> None
+ pass
+
+
+class Event(object):
+ __slots__ = 'start_mark', 'end_mark', 'comment'
+
+ def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
+ # type: (Any, Any, Any) -> None
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ # assert comment is not CommentCheck
+ if comment is CommentCheck:
+ comment = None
+ self.comment = comment
+
+ def __repr__(self):
+ # type: () -> Any
+ attributes = [
+ key
+ for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
+ if hasattr(self, key)
+ ]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes])
+ if self.comment not in [None, CommentCheck]:
+ arguments += ', comment={!r}'.format(self.comment)
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+
+class NodeEvent(Event):
+ __slots__ = ('anchor',)
+
+ def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.anchor = anchor
+
+
+class CollectionStartEvent(NodeEvent):
+ __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ nr_items=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.tag = tag
+ self.implicit = implicit
+ self.flow_style = flow_style
+ self.nr_items = nr_items
+
+
+class CollectionEndEvent(Event):
+ __slots__ = ()
+
+
+# Implementations.
+
+
+class StreamStartEvent(Event):
+ __slots__ = ('encoding',)
+
+ def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.encoding = encoding
+
+
+class StreamEndEvent(Event):
+ __slots__ = ()
+
+
+class DocumentStartEvent(Event):
+ __slots__ = 'explicit', 'version', 'tags'
+
+ def __init__(
+ self,
+ start_mark=None,
+ end_mark=None,
+ explicit=None,
+ version=None,
+ tags=None,
+ comment=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+
+class DocumentEndEvent(Event):
+ __slots__ = ('explicit',)
+
+ def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+
+
+class AliasEvent(NodeEvent):
+ __slots__ = ()
+
+
+class ScalarEvent(NodeEvent):
+ __slots__ = 'tag', 'implicit', 'value', 'style'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ value,
+ start_mark=None,
+ end_mark=None,
+ style=None,
+ comment=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.style = style
+
+
+class SequenceStartEvent(CollectionStartEvent):
+ __slots__ = ()
+
+
+class SequenceEndEvent(CollectionEndEvent):
+ __slots__ = ()
+
+
+class MappingStartEvent(CollectionStartEvent):
+ __slots__ = ()
+
+
+class MappingEndEvent(CollectionEndEvent):
+ __slots__ = ()
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/loader.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/loader.py
new file mode 100644
index 0000000000..979ec62386
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/loader.py
@@ -0,0 +1,74 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+
+from ruamel.yaml.reader import Reader
+from ruamel.yaml.scanner import Scanner, RoundTripScanner
+from ruamel.yaml.parser import Parser, RoundTripParser
+from ruamel.yaml.composer import Composer
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from ruamel.yaml.resolver import VersionedResolver
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Optional # NOQA
+ from ruamel.yaml.compat import StreamTextType, VersionType # NOQA
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
+
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ BaseConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ SafeConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ Constructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class RoundTripLoader(
+ Reader,
+ RoundTripScanner,
+ RoundTripParser,
+ Composer,
+ RoundTripConstructor,
+ VersionedResolver,
+):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ # self.reader = Reader.__init__(self, stream)
+ Reader.__init__(self, stream, loader=self)
+ RoundTripScanner.__init__(self, loader=self)
+ RoundTripParser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/main.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/main.py
new file mode 100644
index 0000000000..a452a399d1
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/main.py
@@ -0,0 +1,1534 @@
+# coding: utf-8
+
+from __future__ import absolute_import, unicode_literals, print_function
+
+import sys
+import os
+import warnings
+import glob
+from importlib import import_module
+
+
+import ruamel.yaml
+from ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA
+
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
+
+from ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA
+from ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA
+from ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, PY3, nprint
+from ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA
+from ruamel.yaml.representer import (
+ BaseRepresenter,
+ SafeRepresenter,
+ Representer,
+ RoundTripRepresenter,
+)
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from ruamel.yaml.loader import Loader as UnsafeLoader
+
+if False: # MYPY
+ from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA
+ from ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA
+
+ if PY3:
+ from pathlib import Path
+ else:
+ Path = Any
+
+try:
+ from _ruamel_yaml import CParser, CEmitter # type: ignore
+except: # NOQA
+ CParser = CEmitter = None
+
+# import io
+
+enforce = object()
+
+
+# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
+# subset of abbreviations, which should be all caps according to PEP8
+
+
+class YAML(object):
+ def __init__(
+ self, _kw=enforce, typ=None, pure=False, output=None, plug_ins=None # input=None,
+ ):
+ # type: (Any, Optional[Text], Any, Any, Any) -> None
+ """
+ _kw: not used, forces keyword arguments in 2.7 (in 3 you can do (*, safe_load=..)
+ typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default)
+ 'safe' -> SafeLoader/SafeDumper,
+ 'unsafe' -> normal/unsafe Loader/Dumper
+ 'base' -> baseloader
+ pure: if True only use Python modules
+ input/output: needed to work as context manager
+ plug_ins: a list of plug-in files
+ """
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.__init__() takes no positional argument but at least '
+ 'one was given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+
+ self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ])
+ self.pure = pure
+
+ # self._input = input
+ self._output = output
+ self._context_manager = None # type: Any
+
+ self.plug_ins = [] # type: List[Any]
+ for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
+ file_name = pu.replace(os.sep, '.')
+ self.plug_ins.append(import_module(file_name))
+ self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any
+ self.allow_unicode = True
+ self.Reader = None # type: Any
+ self.Representer = None # type: Any
+ self.Constructor = None # type: Any
+ self.Scanner = None # type: Any
+ self.Serializer = None # type: Any
+ self.default_flow_style = None # type: Any
+ typ_found = 1
+ setup_rt = False
+ if 'rt' in self.typ:
+ setup_rt = True
+ elif 'safe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.SafeRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.SafeConstructor
+ elif 'base' in self.typ:
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Representer = ruamel.yaml.representer.BaseRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.BaseConstructor
+ elif 'unsafe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.Representer
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.Constructor
+ else:
+ setup_rt = True
+ typ_found = 0
+ if setup_rt:
+ self.default_flow_style = False
+ # no optimized rt-dumper yet
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.Representer = ruamel.yaml.representer.RoundTripRepresenter
+ self.Scanner = ruamel.yaml.scanner.RoundTripScanner
+ # no optimized rt-parser yet
+ self.Parser = ruamel.yaml.parser.RoundTripParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
+ del setup_rt
+ self.stream = None
+ self.canonical = None
+ self.old_indent = None
+ self.width = None
+ self.line_break = None
+
+ self.map_indent = None
+ self.sequence_indent = None
+ self.sequence_dash_offset = 0
+ self.compact_seq_seq = None
+ self.compact_seq_map = None
+ self.sort_base_mapping_type_on_output = None # default: sort
+
+ self.top_level_colon_align = None
+ self.prefix_colon = None
+ self.version = None
+ self.preserve_quotes = None
+ self.allow_duplicate_keys = False # duplicate keys in map, set
+ self.encoding = 'utf-8'
+ self.explicit_start = None
+ self.explicit_end = None
+ self.tags = None
+ self.default_style = None
+ self.top_level_block_style_scalar_no_indent_error_1_1 = False
+ # directives end indicator with single scalar document
+ self.scalar_after_indicator = None
+ # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}]
+ self.brace_single_entry_mapping_in_flow_sequence = False
+ for module in self.plug_ins:
+ if getattr(module, 'typ', None) in self.typ:
+ typ_found += 1
+ module.init_typ(self)
+ break
+ if typ_found == 0:
+ raise NotImplementedError(
+ 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
+ )
+
+ @property
+ def reader(self):
+ # type: () -> Any
+ try:
+ return self._reader # type: ignore
+ except AttributeError:
+ self._reader = self.Reader(None, loader=self)
+ return self._reader
+
+ @property
+ def scanner(self):
+ # type: () -> Any
+ try:
+ return self._scanner # type: ignore
+ except AttributeError:
+ self._scanner = self.Scanner(loader=self)
+ return self._scanner
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Parser is not CParser:
+ setattr(self, attr, self.Parser(loader=self))
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ else:
+ # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
+ # # pathlib.Path() instance
+ # setattr(self, attr, CParser(self._stream))
+ # else:
+ setattr(self, attr, CParser(self._stream))
+ # self._parser = self._composer = self
+ # nprint('scanner', self.loader.scanner)
+
+ return getattr(self, attr)
+
+ @property
+ def composer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Composer(loader=self))
+ return getattr(self, attr)
+
+ @property
+ def constructor(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
+ cnst.allow_duplicate_keys = self.allow_duplicate_keys
+ setattr(self, attr, cnst)
+ return getattr(self, attr)
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Resolver(version=self.version, loader=self))
+ return getattr(self, attr)
+
+ @property
+ def emitter(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Emitter is not CEmitter:
+ _emitter = self.Emitter(
+ None,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ prefix_colon=self.prefix_colon,
+ brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA
+ dumper=self,
+ )
+ setattr(self, attr, _emitter)
+ if self.map_indent is not None:
+ _emitter.best_map_indent = self.map_indent
+ if self.sequence_indent is not None:
+ _emitter.best_sequence_indent = self.sequence_indent
+ if self.sequence_dash_offset is not None:
+ _emitter.sequence_dash_offset = self.sequence_dash_offset
+ # _emitter.block_seq_indent = self.sequence_dash_offset
+ if self.compact_seq_seq is not None:
+ _emitter.compact_seq_seq = self.compact_seq_seq
+ if self.compact_seq_map is not None:
+ _emitter.compact_seq_map = self.compact_seq_map
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ return None
+ return getattr(self, attr)
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(
+ self,
+ attr,
+ self.Serializer(
+ encoding=self.encoding,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ dumper=self,
+ ),
+ )
+ return getattr(self, attr)
+
+ @property
+ def representer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ repres = self.Representer(
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ dumper=self,
+ )
+ if self.sort_base_mapping_type_on_output is not None:
+ repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output
+ setattr(self, attr, repres)
+ return getattr(self, attr)
+
+ # separate output resolver?
+
+ # def load(self, stream=None):
+ # if self._context_manager:
+ # if not self._input:
+ # raise TypeError("Missing input stream while dumping from context manager")
+ # for data in self._context_manager.load():
+ # yield data
+ # return
+ # if stream is None:
+ # raise TypeError("Need a stream argument when not loading from context manager")
+ # return self.load_one(stream)
+
+ def load(self, stream):
+ # type: (Union[Path, StreamTextType]) -> Any
+ """
+ at this point you either have the non-pure Parser (which has its own reader and
+ scanner) or you have the pure Parser.
+ If the pure Parser is set, then set the Reader and Scanner, if not already set.
+ If either the Scanner or Reader are set, you cannot use the non-pure Parser,
+ so reset it to the pure parser and set the Reader resp. Scanner if necessary
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.load(fp)
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ return constructor.get_single_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def load_all(self, stream, _kw=enforce): # , skip=None):
+ # type: (Union[Path, StreamTextType], Any) -> Any
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.__init__() takes no positional argument but at least '
+ 'one was given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('r') as fp:
+ for d in self.load_all(fp, _kw=enforce):
+ yield d
+ return
+ # if skip is None:
+ # skip = []
+ # elif isinstance(skip, int):
+ # skip = [skip]
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ while constructor.check_data():
+ yield constructor.get_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def get_constructor_parser(self, stream):
+ # type: (StreamTextType) -> Any
+ """
+ the old cyaml needs special setup, and therefore the stream
+ """
+ if self.Parser is not CParser:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.reader.stream = stream
+ else:
+ if self.Reader is not None:
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ elif self.Scanner is not None:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ else:
+ # combined C level reader>scanner>parser
+ # does some calls to the resolver, e.g. BaseResolver.descend_resolver
+ # if you just initialise the CParser, to much of resolver.py
+ # is actually used
+ rslvr = self.Resolver
+ # if rslvr is ruamel.yaml.resolver.VersionedResolver:
+ # rslvr = ruamel.yaml.resolver.Resolver
+
+ class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
+ def __init__(selfx, stream, version=self.version, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA
+ CParser.__init__(selfx, stream)
+ selfx._parser = selfx._composer = selfx
+ self.Constructor.__init__(selfx, loader=selfx)
+ selfx.allow_duplicate_keys = self.allow_duplicate_keys
+ rslvr.__init__(selfx, version=version, loadumper=selfx)
+
+ self._stream = stream
+ loader = XLoader(stream)
+ return loader, loader
+ return self.constructor, self.parser
+
+ def dump(self, data, stream=None, _kw=enforce, transform=None):
+ # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+ if self._context_manager:
+ if not self._output:
+ raise TypeError('Missing output stream while dumping from context manager')
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.dump() takes one positional argument but at least '
+ 'two were given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ if transform is not None:
+ raise TypeError(
+ '{}.dump() in the context manager cannot have transform keyword '
+ ''.format(self.__class__.__name__)
+ )
+ self._context_manager.dump(data)
+ else: # old style
+ if stream is None:
+ raise TypeError('Need a stream argument when not dumping from context manager')
+ return self.dump_all([data], stream, _kw, transform=transform)
+
+ def dump_all(self, documents, stream, _kw=enforce, transform=None):
+ # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+ if self._context_manager:
+ raise NotImplementedError
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.dump(_all) takes two positional argument but at least '
+ 'three were given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ self._output = stream
+ self._context_manager = YAMLContextManager(self, transform=transform)
+ for data in documents:
+ self._context_manager.dump(data)
+ self._context_manager.teardown_output()
+ self._output = None
+ self._context_manager = None
+
+ def Xdump_all(self, documents, stream, _kw=enforce, transform=None):
+ # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ """
+ if not hasattr(stream, 'write') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('w') as fp:
+ return self.dump_all(documents, fp, _kw, transform=transform)
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.dump(_all) takes two positional argument but at least '
+ 'three were given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ # The stream should have the methods `write` and possibly `flush`.
+ if self.top_level_colon_align is True:
+ tlca = max([len(str(x)) for x in documents[0]]) # type: Any
+ else:
+ tlca = self.top_level_colon_align
+ if transform is not None:
+ fstream = stream
+ if self.encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ serializer, representer, emitter = self.get_serializer_representer_emitter(
+ stream, tlca
+ )
+ try:
+ self.serializer.open()
+ for data in documents:
+ try:
+ self.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ self.serializer.close()
+ finally:
+ try:
+ self.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ delattr(self, '_serializer')
+ delattr(self, '_emitter')
+ if transform:
+ val = stream.getvalue()
+ if self.encoding:
+ val = val.decode(self.encoding)
+ if fstream is None:
+ transform(val)
+ else:
+ fstream.write(transform(val))
+ return None
+
+ def get_serializer_representer_emitter(self, stream, tlca):
+ # type: (StreamType, Any) -> Any
+ # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
+ if self.Emitter is not CEmitter:
+ if self.Serializer is None:
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ if self.Serializer is not None:
+ # cannot set serializer with CEmitter
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ # C routines
+
+ rslvr = (
+ ruamel.yaml.resolver.BaseResolver
+ if 'base' in self.typ
+ else ruamel.yaml.resolver.Resolver
+ )
+
+ class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
+ def __init__(
+ selfx,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ selfx,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ selfx._emitter = selfx._serializer = selfx._representer = selfx
+ self.Representer.__init__(
+ selfx, default_style=default_style, default_flow_style=default_flow_style
+ )
+ rslvr.__init__(selfx)
+
+ self._stream = stream
+ dumper = XDumper(
+ stream,
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ )
+ self._emitter = self._serializer = dumper
+ return dumper, dumper, dumper
+
+ # basic types
+ def map(self, **kw):
+ # type: (Any) -> Any
+ if 'rt' in self.typ:
+ from ruamel.yaml.comments import CommentedMap
+
+ return CommentedMap(**kw)
+ else:
+ return dict(**kw)
+
+ def seq(self, *args):
+ # type: (Any) -> Any
+ if 'rt' in self.typ:
+ from ruamel.yaml.comments import CommentedSeq
+
+ return CommentedSeq(*args)
+ else:
+ return list(*args)
+
+ # helpers
+ def official_plug_ins(self):
+ # type: () -> Any
+ bd = os.path.dirname(__file__)
+ gpbd = os.path.dirname(os.path.dirname(bd))
+ res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
+ return res
+
+ def register_class(self, cls):
+ # type:(Any) -> Any
+ """
+ register a class for dumping loading
+ - if it has attribute yaml_tag use that to register, else use class name
+ - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
+ as mapping
+ """
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ self.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer, data):
+ # type: (Any, Any) -> Any
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
+
+ self.representer.add_representer(cls, t_y)
+ try:
+ self.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor, node):
+ # type: (Any, Any) -> Any
+ return constructor.construct_yaml_object(node, cls)
+
+ self.constructor.add_constructor(tag, f_y)
+ return cls
+
+ def parse(self, stream):
+ # type: (StreamTextType) -> Any
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ _, parser = self.get_constructor_parser(stream)
+ try:
+ while parser.check_event():
+ yield parser.get_event()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ # ### context manager
+
+ def __enter__(self):
+ # type: () -> Any
+ self._context_manager = YAMLContextManager(self)
+ return self
+
+ def __exit__(self, typ, value, traceback):
+ # type: (Any, Any, Any) -> None
+ if typ:
+ nprint('typ', typ)
+ self._context_manager.teardown_output()
+ # self._context_manager.teardown_input()
+ self._context_manager = None
+
+ # ### backwards compatibility
+ def _indent(self, mapping=None, sequence=None, offset=None):
+ # type: (Any, Any, Any) -> None
+ if mapping is not None:
+ self.map_indent = mapping
+ if sequence is not None:
+ self.sequence_indent = sequence
+ if offset is not None:
+ self.sequence_dash_offset = offset
+
+ @property
+ def indent(self):
+ # type: () -> Any
+ return self._indent
+
+ @indent.setter
+ def indent(self, val):
+ # type: (Any) -> None
+ self.old_indent = val
+
+ @property
+ def block_seq_indent(self):
+ # type: () -> Any
+ return self.sequence_dash_offset
+
+ @block_seq_indent.setter
+ def block_seq_indent(self, val):
+ # type: (Any) -> None
+ self.sequence_dash_offset = val
+
+ def compact(self, seq_seq=None, seq_map=None):
+ # type: (Any, Any) -> None
+ self.compact_seq_seq = seq_seq
+ self.compact_seq_map = seq_map
+
+
+class YAMLContextManager(object):
+ def __init__(self, yaml, transform=None):
+ # type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None
+ self._yaml = yaml
+ self._output_inited = False
+ self._output_path = None
+ self._output = self._yaml._output
+ self._transform = transform
+
+ # self._input_inited = False
+ # self._input = input
+ # self._input_path = None
+ # self._transform = yaml.transform
+ # self._fstream = None
+
+ if not hasattr(self._output, 'write') and hasattr(self._output, 'open'):
+ # pathlib.Path() instance, open with the same mode
+ self._output_path = self._output
+ self._output = self._output_path.open('w')
+
+ # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
+ # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
+ # # pathlib.Path() instance, open with the same mode
+ # self._input_path = self._input
+ # self._input = self._input_path.open('r')
+
+ if self._transform is not None:
+ self._fstream = self._output
+ if self._yaml.encoding is None:
+ self._output = StringIO()
+ else:
+ self._output = BytesIO()
+
+ def teardown_output(self):
+ # type: () -> None
+ if self._output_inited:
+ self._yaml.serializer.close()
+ else:
+ return
+ try:
+ self._yaml.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ try:
+ delattr(self._yaml, '_serializer')
+ delattr(self._yaml, '_emitter')
+ except AttributeError:
+ raise
+ if self._transform:
+ val = self._output.getvalue()
+ if self._yaml.encoding:
+ val = val.decode(self._yaml.encoding)
+ if self._fstream is None:
+ self._transform(val)
+ else:
+ self._fstream.write(self._transform(val))
+ self._fstream.flush()
+ self._output = self._fstream # maybe not necessary
+ if self._output_path is not None:
+ self._output.close()
+
+ def init_output(self, first_data):
+ # type: (Any) -> None
+ if self._yaml.top_level_colon_align is True:
+ tlca = max([len(str(x)) for x in first_data]) # type: Any
+ else:
+ tlca = self._yaml.top_level_colon_align
+ self._yaml.get_serializer_representer_emitter(self._output, tlca)
+ self._yaml.serializer.open()
+ self._output_inited = True
+
+ def dump(self, data):
+ # type: (Any) -> None
+ if not self._output_inited:
+ self.init_output(data)
+ try:
+ self._yaml.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+
+ # def teardown_input(self):
+ # pass
+ #
+ # def init_input(self):
+ # # set the constructor and parser on YAML() instance
+ # self._yaml.get_constructor_parser(stream)
+ #
+ # def load(self):
+ # if not self._input_inited:
+ # self.init_input()
+ # try:
+ # while self._yaml.constructor.check_data():
+ # yield self._yaml.constructor.get_data()
+ # finally:
+ # parser.dispose()
+ # try:
+ # self._reader.reset_reader() # type: ignore
+ # except AttributeError:
+ # pass
+ # try:
+ # self._scanner.reset_scanner() # type: ignore
+ # except AttributeError:
+ # pass
+
+
+def yaml_object(yml):
+ # type: (Any) -> Any
+ """ decorator for classes that needs to dump/load objects
+ The tag for such objects is taken from the class attribute yaml_tag (or the
+ class name in lowercase in case unavailable)
+ If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
+ loading, default routines (dumping a mapping of the attributes) used otherwise.
+ """
+
+ def yo_deco(cls):
+ # type: (Any) -> Any
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ yml.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer, data):
+ # type: (Any, Any) -> Any
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
+
+ yml.representer.add_representer(cls, t_y)
+ try:
+ yml.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor, node):
+ # type: (Any, Any) -> Any
+ return constructor.construct_yaml_object(node, cls)
+
+ yml.constructor.add_constructor(tag, f_y)
+ return cls
+
+ return yo_deco
+
+
+########################################################################################
+
+
+def scan(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.scanner.check_token():
+ yield loader.scanner.get_token()
+ finally:
+ loader._parser.dispose()
+
+
+def parse(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader._parser.check_event():
+ yield loader._parser.get_event()
+ finally:
+ loader._parser.dispose()
+
+
+def compose(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+
+def compose_all(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader._composer.get_node()
+ finally:
+ loader._parser.dispose()
+
+
+def load(stream, Loader=None, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Any, Optional[VersionType], Any) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes)
+ try:
+ return loader._constructor.get_single_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def load_all(stream, Loader=None, version=None, preserve_quotes=None):
+ # type: (Optional[StreamTextType], Any, Optional[VersionType], Optional[bool]) -> Any # NOQA
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes)
+ try:
+ while loader._constructor.check_data():
+ yield loader._constructor.get_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def safe_load(stream, version=None):
+ # type: (StreamTextType, Optional[VersionType]) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader, version)
+
+
+def safe_load_all(stream, version=None):
+ # type: (StreamTextType, Optional[VersionType]) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader, version)
+
+
+def round_trip_load(stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def round_trip_load_all(stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def emit(
+ events,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+):
+ # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ )
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+enc = None if PY3 else 'utf-8'
+
+
+def serialize_all(
+ nodes,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ version=version,
+ tags=tags,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ )
+ try:
+ dumper._serializer.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ # type: (Any, Optional[StreamType], Any, Any) -> Any
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+
+def dump_all(
+ documents,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Optional[str] # NOQA
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if top_level_colon_align is True:
+ top_level_colon_align = max([len(str(x)) for x in documents[0]])
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+ try:
+ dumper._serializer.open()
+ for data in documents:
+ try:
+ dumper._representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+ return None
+
+
+def dump(
+ data,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[str] # NOQA
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+
+ default_style ∈ None, '', '"', "'", '|', '>'
+
+ """
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ )
+
+
+def safe_dump_all(documents, stream=None, **kwds):
+ # type: (Any, Optional[StreamType], Any) -> Optional[str]
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+
+def safe_dump(data, stream=None, **kwds):
+ # type: (Any, Optional[StreamType], Any) -> Optional[str]
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+
+def round_trip_dump(
+ data,
+ stream=None,
+ Dumper=RoundTripDumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[str] # NOQA
+ allow_unicode = True if allow_unicode is None else allow_unicode
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+
+
+# Loader/Dumper are no longer composites, to get to the associated
+# Resolver()/Representer(), etc., you need to instantiate the class
+
+
+def add_implicit_resolver(
+ tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
+):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_implicit_resolver(tag, regexp, first)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_implicit_resolver'):
+ Loader.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_implicit_resolver'):
+ Dumper.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_path_resolver(tag, path, kind)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_path_resolver'):
+ Loader.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_path_resolver'):
+ Dumper.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+
+
+def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add an object constructor for the given tag.
+ object_onstructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_constructor(tag, object_constructor)
+ else:
+ if hasattr(Loader, 'add_constructor'):
+ Loader.add_constructor(tag, object_constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, Loader):
+ Constructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_constructor(tag, object_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ if False and hasattr(Loader, 'add_multi_constructor'):
+ Loader.add_multi_constructor(tag_prefix, constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, ruamel.yaml.loader.Loader):
+ Constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_representer(data_type, object_representer, Dumper=None, representer=Representer):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a representer for the given type.
+ object_representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_representer(data_type, object_representer)
+ else:
+ if hasattr(Dumper, 'add_representer'):
+ Dumper.add_representer(data_type, object_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_representer(data_type, object_representer)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a representer for the given type.
+ multi_representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_multi_representer(data_type, multi_representer)
+ else:
+ if hasattr(Dumper, 'add_multi_representer'):
+ Dumper.add_multi_representer(data_type, multi_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
+ else:
+ raise NotImplementedError
+
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+
+ def __init__(cls, name, bases, kwds):
+ # type: (Any, Any, Any) -> None
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
+ cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
+
+
+class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_constructor = Constructor
+ yaml_representer = Representer
+
+ yaml_tag = None # type: Any
+ yaml_flow_style = None # type: Any
+
+ @classmethod
+ def from_yaml(cls, constructor, node):
+ # type: (Any, Any) -> Any
+ """
+ Convert a representation node to a Python object.
+ """
+ return constructor.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, representer, data):
+ # type: (Any, Any) -> Any
+ """
+ Convert a Python object to a representation node.
+ """
+ return representer.represent_yaml_object(
+ cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
+ )
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/nodes.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/nodes.py
new file mode 100644
index 0000000000..da86e9c857
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/nodes.py
@@ -0,0 +1,131 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+import sys
+from .compat import string_types
+
+if False: # MYPY
+ from typing import Dict, Any, Text # NOQA
+
+
+class Node(object):
+ __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor'
+
+ def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.comment = comment
+ self.anchor = anchor
+
+ def __repr__(self):
+ # type: () -> str
+ value = self.value
+ # if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ # else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+ def dump(self, indent=0):
+ # type: (int) -> None
+ if isinstance(self.value, string_types):
+ sys.stdout.write(
+ '{}{}(tag={!r}, value={!r})\n'.format(
+ ' ' * indent, self.__class__.__name__, self.tag, self.value
+ )
+ )
+ if self.comment:
+ sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
+ return
+ sys.stdout.write(
+ '{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag)
+ )
+ if self.comment:
+ sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
+ for v in self.value:
+ if isinstance(v, tuple):
+ for v1 in v:
+ v1.dump(indent + 1)
+ elif isinstance(v, Node):
+ v.dump(indent + 1)
+ else:
+ sys.stdout.write('Node value type? {}\n'.format(type(v)))
+
+
+class ScalarNode(Node):
+ """
+ styles:
+ ? -> set() ? key, no value
+ " -> double quoted
+ ' -> single quoted
+ | -> literal style
+ > -> folding style
+ """
+
+ __slots__ = ('style',)
+ id = 'scalar'
+
+ def __init__(
+ self, tag, value, start_mark=None, end_mark=None, style=None, comment=None, anchor=None
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor)
+ self.style = style
+
+
+class CollectionNode(Node):
+ __slots__ = ('flow_style',)
+
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
+ self.flow_style = flow_style
+ self.anchor = anchor
+
+
+class SequenceNode(CollectionNode):
+ __slots__ = ()
+ id = 'sequence'
+
+
+class MappingNode(CollectionNode):
+ __slots__ = ('merge',)
+ id = 'mapping'
+
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ CollectionNode.__init__(
+ self, tag, value, start_mark, end_mark, flow_style, comment, anchor
+ )
+ self.merge = None
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/parser.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/parser.py
new file mode 100644
index 0000000000..10deaa87b3
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/parser.py
@@ -0,0 +1,802 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document*
+# STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content |
+# indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+# BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START
+# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR
+# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START
+# FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+
+# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py
+# only to not do anything with the package afterwards
+# and for Jython too
+
+
+from ruamel.yaml.error import MarkedYAMLError
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA
+from ruamel.yaml.compat import utf8, nprint, nprintf # NOQA
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+__all__ = ['Parser', 'RoundTripParser', 'ParserError']
+
+
+class ParserError(MarkedYAMLError):
+ pass
+
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {u'!': u'!', u'!!': u'tag:yaml.org,2002:'}
+
+ def __init__(self, loader):
+ # type: (Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_parser', None) is None:
+ self.loader._parser = self
+ self.reset_parser()
+
+ def reset_parser(self):
+ # type: () -> None
+ # Reset the state attributes (to clear self-references)
+ self.current_event = None
+ self.tag_handles = {} # type: Dict[Any, Any]
+ self.states = [] # type: List[Any]
+ self.marks = [] # type: List[Any]
+ self.state = self.parse_stream_start # type: Any
+
+ def dispose(self):
+ # type: () -> None
+ self.reset_parser()
+
+ @property
+ def scanner(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.scanner
+ return self.loader._scanner
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ def check_event(self, *choices):
+ # type: (Any) -> bool
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # type: () -> Any
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # type: () -> Any
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document*
+ # STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+ # type: () -> Any
+ # Parse the stream start.
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+ # type: () -> Any
+ # Parse an implicit document.
+ if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark, explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+ # type: () -> Any
+ # Parse any extra document end indicators.
+ while self.scanner.check_token(DocumentEndToken):
+ self.scanner.get_token()
+ # Parse an explicit document.
+ if not self.scanner.check_token(StreamEndToken):
+ token = self.scanner.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.scanner.check_token(DocumentStartToken):
+ raise ParserError(
+ None,
+ None,
+ "expected '<document start>', but found %r" % self.scanner.peek_token().id,
+ self.scanner.peek_token().start_mark,
+ )
+ token = self.scanner.get_token()
+ end_mark = token.end_mark
+ # if self.loader is not None and \
+ # end_mark.line != self.scanner.peek_token().start_mark.line:
+ # self.loader.scalar_after_indicator = False
+ event = DocumentStartEvent(
+ start_mark, end_mark, explicit=True, version=version, tags=tags
+ ) # type: Any
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.scanner.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+ # type: () -> Any
+ # Parse the document end.
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.scanner.check_token(DocumentEndToken):
+ token = self.scanner.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark, explicit=explicit)
+
+ # Prepare the next state.
+ if self.resolver.processing_version == (1, 1):
+ self.state = self.parse_document_start
+ else:
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_document_content(self):
+ # type: () -> Any
+ if self.scanner.check_token(
+ DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken
+ ):
+ event = self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ # type: () -> Any
+ yaml_version = None
+ self.tag_handles = {}
+ while self.scanner.check_token(DirectiveToken):
+ token = self.scanner.get_token()
+ if token.name == u'YAML':
+ if yaml_version is not None:
+ raise ParserError(
+ None, None, 'found duplicate YAML directive', token.start_mark
+ )
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(
+ None,
+ None,
+ 'found incompatible YAML document (version 1.* is ' 'required)',
+ token.start_mark,
+ )
+ yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(
+ None, None, 'duplicate tag handle %r' % utf8(handle), token.start_mark
+ )
+ self.tag_handles[handle] = prefix
+ if bool(self.tag_handles):
+ value = yaml_version, self.tag_handles.copy() # type: Any
+ else:
+ value = yaml_version, None
+ if self.loader is not None and hasattr(self.loader, 'tags'):
+ self.loader.version = yaml_version
+ if self.loader.tags is None:
+ self.loader.tags = {}
+ for k in self.tag_handles:
+ self.loader.tags[k] = self.tag_handles[k]
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ # type: () -> Any
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ # type: () -> Any
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ # type: () -> Any
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def transform_tag(self, handle, suffix):
+ # type: (Any, Any) -> Any
+ return self.tag_handles[handle] + suffix
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ # type: (bool, bool) -> Any
+ if self.scanner.check_token(AliasToken):
+ token = self.scanner.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark) # type: Any
+ self.state = self.states.pop()
+ return event
+
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError(
+ 'while parsing a node',
+ start_mark,
+ 'found undefined tag handle %r' % utf8(handle),
+ tag_mark,
+ )
+ tag = self.transform_tag(handle, suffix)
+ else:
+ tag = suffix
+ # if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag'
+ # and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.scanner.peek_token().start_mark
+ event = None
+ implicit = tag is None or tag == u'!'
+ if indentless_sequence and self.scanner.check_token(BlockEntryToken):
+ comment = None
+ pt = self.scanner.peek_token()
+ if pt.comment and pt.comment[0]:
+ comment = [pt.comment[0], []]
+ pt.comment[0] = None
+ end_mark = self.scanner.peek_token().end_mark
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_indentless_sequence_entry
+ return event
+
+ if self.scanner.check_token(ScalarToken):
+ token = self.scanner.get_token()
+ # self.scanner.peek_token_same_line_comment(token)
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ # nprint('se', token.value, token.comment)
+ event = ScalarEvent(
+ anchor,
+ tag,
+ implicit,
+ token.value,
+ start_mark,
+ end_mark,
+ style=token.style,
+ comment=token.comment,
+ )
+ self.state = self.states.pop()
+ elif self.scanner.check_token(FlowSequenceStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = SequenceStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.scanner.check_token(FlowMappingStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = MappingStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.scanner.check_token(BlockSequenceStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ # should inserting the comment be dependent on the
+ # indentation?
+ pt = self.scanner.peek_token()
+ comment = pt.comment
+ # nprint('pt0', type(pt))
+ if comment is None or comment[1] is None:
+ comment = pt.split_comment()
+ # nprint('pt1', comment)
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.scanner.check_token(BlockMappingStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ comment = self.scanner.peek_token().comment
+ event = MappingStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a %s node' % node,
+ start_mark,
+ 'expected the node content, but found %r' % token.id,
+ token.start_mark,
+ )
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+ # BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ # move any comment from start token
+ # token.move_comment(self.scanner.peek_token())
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ # type: () -> Any
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ if not self.scanner.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block collection',
+ self.marks[-1],
+ 'expected <block end>, but found %r' % token.id,
+ token.start_mark,
+ )
+ token = self.scanner.get_token() # BlockEndToken
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ # indentless_sequence?
+ # sequence:
+ # - entry
+ # - nested
+
+ def parse_indentless_sequence_entry(self):
+ # type: () -> Any
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ if not self.scanner.check_token(
+ BlockEntryToken, KeyToken, ValueToken, BlockEndToken
+ ):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.scanner.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark, comment=token.comment)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ # type: () -> Any
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if self.resolver.processing_version > (1, 1) and self.scanner.check_token(ValueToken):
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block mapping',
+ self.marks[-1],
+ 'expected <block end>, but found %r' % token.id,
+ token.start_mark,
+ )
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ # value token might have post comment move it to e.g. block
+ if self.scanner.check_token(ValueToken):
+ token.move_comment(self.scanner.peek_token())
+ else:
+ if not self.scanner.check_token(KeyToken):
+ token.move_comment(self.scanner.peek_token(), empty=True)
+ # else: empty value for this key cannot move token.comment
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ comment = token.comment
+ if comment is None:
+ token = self.scanner.peek_token()
+ comment = token.comment
+ if comment:
+ token._comment = [None, comment[1]]
+ comment = [comment[0], None]
+ return self.process_empty_scalar(token.end_mark, comment=comment)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ # type: (bool) -> Any
+ if not self.scanner.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow sequence',
+ self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id,
+ token.start_mark,
+ )
+
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.peek_token()
+ event = MappingStartEvent(
+ None, None, True, token.start_mark, token.end_mark, flow_style=True
+ ) # type: Any
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.scanner.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ # type: () -> Any
+ self.state = self.parse_flow_sequence_entry
+ token = self.scanner.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ # type: (Any) -> Any
+ if not self.scanner.check_token(FlowMappingEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow mapping',
+ self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id,
+ token.start_mark,
+ )
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(
+ ValueToken, FlowEntryToken, FlowMappingEndToken
+ ):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif self.resolver.processing_version > (1, 1) and self.scanner.check_token(
+ ValueToken
+ ):
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().end_mark)
+ elif not self.scanner.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ # type: () -> Any
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark, comment=None):
+ # type: (Any, Any) -> Any
+ return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment)
+
+
+class RoundTripParser(Parser):
+ """roundtrip is a safe loader, that wants to see the unmangled tag"""
+
+ def transform_tag(self, handle, suffix):
+ # type: (Any, Any) -> Any
+ # return self.tag_handles[handle]+suffix
+ if handle == '!!' and suffix in (
+ u'null',
+ u'bool',
+ u'int',
+ u'float',
+ u'binary',
+ u'timestamp',
+ u'omap',
+ u'pairs',
+ u'set',
+ u'str',
+ u'seq',
+ u'map',
+ ):
+ return Parser.transform_tag(self, handle, suffix)
+ return handle + suffix
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/py.typed b/contrib/python/ruamel.yaml/py2/ruamel/yaml/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/py.typed
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/reader.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/reader.py
new file mode 100644
index 0000000000..b056a04b99
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/reader.py
@@ -0,0 +1,311 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length`
+# characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current
+# character.
+
+import codecs
+
+from ruamel.yaml.error import YAMLError, FileMark, StringMark, YAMLStreamError
+from ruamel.yaml.compat import text_type, binary_type, PY3, UNICODE_SIZE
+from ruamel.yaml.util import RegExp
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA
+# from ruamel.yaml.compat import StreamTextType # NOQA
+
+__all__ = ['Reader', 'ReaderError']
+
+
+class ReaderError(YAMLError):
+ def __init__(self, name, position, character, encoding, reason):
+ # type: (Any, Any, Any, Any, Any) -> None
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ # type: () -> str
+ if isinstance(self.character, binary_type):
+ return "'%s' codec can't decode byte #x%02x: %s\n" ' in "%s", position %d' % (
+ self.encoding,
+ ord(self.character),
+ self.reason,
+ self.name,
+ self.position,
+ )
+ else:
+ return 'unacceptable character #x%04x: %s\n' ' in "%s", position %d' % (
+ self.character,
+ self.reason,
+ self.name,
+ self.position,
+ )
+
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object (PY2) / a `bytes` object (PY3),
+ # - a `unicode` object (PY2) / a `str` object (PY3),
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream, loader=None):
+ # type: (Any, Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_reader', None) is None:
+ self.loader._reader = self
+ self.reset_reader()
+ self.stream = stream # type: Any # as .read is called
+
+ def reset_reader(self):
+ # type: () -> None
+ self.name = None # type: Any
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ""
+ self.pointer = 0
+ self.raw_buffer = None # type: Any
+ self.raw_decode = None
+ self.encoding = None # type: Optional[Text]
+ self.index = 0
+ self.line = 0
+ self.column = 0
+
+ @property
+ def stream(self):
+ # type: () -> Any
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('input stream needs to specified')
+
+ @stream.setter
+ def stream(self, val):
+ # type: (Any) -> None
+ if val is None:
+ return
+ self._stream = None
+ if isinstance(val, text_type):
+ self.name = '<unicode string>'
+ self.check_printable(val)
+ self.buffer = val + u'\0' # type: ignore
+ elif isinstance(val, binary_type):
+ self.name = '<byte string>'
+ self.raw_buffer = val
+ self.determine_encoding()
+ else:
+ if not hasattr(val, 'read'):
+ raise YAMLStreamError('stream argument needs to have a read() method')
+ self._stream = val
+ self.name = getattr(self.stream, 'name', '<file>')
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ # type: (int) -> Text
+ try:
+ return self.buffer[self.pointer + index]
+ except IndexError:
+ self.update(index + 1)
+ return self.buffer[self.pointer + index]
+
+ def prefix(self, length=1):
+ # type: (int) -> Any
+ if self.pointer + length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer : self.pointer + length]
+
+ def forward_1_1(self, length=1):
+ # type: (int) -> None
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' or (
+ ch == u'\r' and self.buffer[self.pointer] != u'\n'
+ ):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def forward(self, length=1):
+ # type: (int) -> None
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch == u'\n' or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ # type: () -> Any
+ if self.stream is None:
+ return StringMark(
+ self.name, self.index, self.line, self.column, self.buffer, self.pointer
+ )
+ else:
+ return FileMark(self.name, self.index, self.line, self.column)
+
+ def determine_encoding(self):
+ # type: () -> None
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, binary_type):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode # type: ignore
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode # type: ignore
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode # type: ignore
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ if UNICODE_SIZE == 2:
+ NON_PRINTABLE = RegExp(
+ u'[^\x09\x0A\x0D\x20-\x7E\x85' u'\xA0-\uD7FF' u'\uE000-\uFFFD' u']'
+ )
+ else:
+ NON_PRINTABLE = RegExp(
+ u'[^\x09\x0A\x0D\x20-\x7E\x85'
+ u'\xA0-\uD7FF'
+ u'\uE000-\uFFFD'
+ u'\U00010000-\U0010FFFF'
+ u']'
+ )
+
+ _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii')
+
+ @classmethod
+ def _get_non_printable_ascii(cls, data): # type: ignore
+ # type: (Text, bytes) -> Optional[Tuple[int, Text]]
+ ascii_bytes = data.encode('ascii')
+ non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore
+ if not non_printables:
+ return None
+ non_printable = non_printables[:1]
+ return ascii_bytes.index(non_printable), non_printable.decode('ascii')
+
+ @classmethod
+ def _get_non_printable_regex(cls, data):
+ # type: (Text) -> Optional[Tuple[int, Text]]
+ match = cls.NON_PRINTABLE.search(data)
+ if not bool(match):
+ return None
+ return match.start(), match.group()
+
+ @classmethod
+ def _get_non_printable(cls, data):
+ # type: (Text) -> Optional[Tuple[int, Text]]
+ try:
+ return cls._get_non_printable_ascii(data) # type: ignore
+ except UnicodeEncodeError:
+ return cls._get_non_printable_regex(data)
+
+ def check_printable(self, data):
+ # type: (Any) -> None
+ non_printable_match = self._get_non_printable(data)
+ if non_printable_match is not None:
+ start, character = non_printable_match
+ position = self.index + (len(self.buffer) - self.pointer) + start
+ raise ReaderError(
+ self.name,
+ position,
+ ord(character),
+ 'unicode',
+ 'special characters are not allowed',
+ )
+
+ def update(self, length):
+ # type: (int) -> None
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer :]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ if PY3:
+ character = self.raw_buffer[exc.start]
+ else:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ elif self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character, exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=None):
+ # type: (Optional[int]) -> None
+ if size is None:
+ size = 4096 if PY3 else 1024
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+
+# try:
+# import psyco
+# psyco.bind(Reader)
+# except ImportError:
+# pass
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/representer.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/representer.py
new file mode 100644
index 0000000000..1b5185a69b
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/representer.py
@@ -0,0 +1,1282 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division
+
+
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.compat import text_type, binary_type, to_unicode, PY2, PY3
+from ruamel.yaml.compat import ordereddict # type: ignore
+from ruamel.yaml.compat import nprint, nprintf # NOQA
+from ruamel.yaml.scalarstring import (
+ LiteralScalarString,
+ FoldedScalarString,
+ SingleQuotedScalarString,
+ DoubleQuotedScalarString,
+ PlainScalarString,
+)
+from ruamel.yaml.comments import (
+ CommentedMap,
+ CommentedOrderedMap,
+ CommentedSeq,
+ CommentedKeySeq,
+ CommentedKeyMap,
+ CommentedSet,
+ comment_attrib,
+ merge_attrib,
+ TaggedScalar,
+)
+from ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from ruamel.yaml.scalarfloat import ScalarFloat
+from ruamel.yaml.scalarbool import ScalarBoolean
+from ruamel.yaml.timestamp import TimeStamp
+
+import datetime
+import sys
+import types
+
+if PY3:
+ import copyreg
+ import base64
+else:
+ import copy_reg as copyreg # type: ignore
+
+if False: # MYPY
+ from typing import Dict, List, Any, Union, Text, Optional # NOQA
+
+# fmt: off
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError', 'RoundTripRepresenter']
+# fmt: on
+
+
+class RepresenterError(YAMLError):
+ pass
+
+
+if PY2:
+
+ def get_classobj_bases(cls):
+ # type: (Any) -> Any
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(get_classobj_bases(base))
+ return bases
+
+
+class BaseRepresenter(object):
+
+ yaml_representers = {} # type: Dict[Any, Any]
+ yaml_multi_representers = {} # type: Dict[Any, Any]
+
+ def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+ # type: (Any, Any, Any, Any) -> None
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._representer = self
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {} # type: Dict[Any, Any]
+ self.object_keeper = [] # type: List[Any]
+ self.alias_key = None # type: Optional[int]
+ self.sort_base_mapping_type_on_output = True
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ def represent(self, data):
+ # type: (Any) -> None
+ node = self.represent_data(data)
+ self.serializer.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ # type: (Any) -> Any
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ # if node is None:
+ # raise RepresenterError(
+ # "recursive objects are not allowed: %r" % data)
+ return node
+ # self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if PY2:
+ # if type(data) is types.InstanceType:
+ if isinstance(data, types.InstanceType):
+ data_types = get_classobj_bases(data.__class__) + list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, text_type(data))
+ # if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def represent_key(self, data):
+ # type: (Any) -> Any
+ """
+ David Fraser: Extract a method to represent keys in mappings, so that
+ a subclass can choose not to quote them (for example)
+ used in represent_mapping
+ https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
+ """
+ return self.represent_data(data)
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ # type: (Any, Any) -> None
+ if 'yaml_representers' not in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ # type: (Any, Any) -> None
+ if 'yaml_multi_representers' not in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if style is None:
+ style = self.default_style
+ comment = None
+ if style and style[0] in '|>':
+ comment = getattr(value, 'comment', None)
+ if comment:
+ comment = [None, [comment]]
+ node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_omap(self, tag, omap, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ if self.sort_base_mapping_type_on_output:
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ return False
+
+
+class SafeRepresenter(BaseRepresenter):
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ # https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
+ # "i.e. two occurrences of the empty tuple may or may not yield the same object"
+ # so "data is ()" should not be used
+ if data is None or (isinstance(data, tuple) and data == ()):
+ return True
+ if isinstance(data, (binary_type, text_type, bool, int, float)):
+ return True
+ return False
+
+ def represent_none(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
+
+ if PY3:
+
+ def represent_str(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ # type: (Any) -> Any
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|')
+
+ else:
+
+ def represent_str(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data, anchor=None):
+ # type: (Any, Optional[Any]) -> Any
+ try:
+ value = self.dumper.boolean_representation[bool(data)]
+ except AttributeError:
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value, anchor=anchor)
+
+ def represent_int(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
+
+ if PY2:
+
+ def represent_long(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value * inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ # type: (Any) -> Any
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = to_unicode(repr(data)).lower()
+ if getattr(self.serializer, 'use_version', None) == (1, 1):
+ if u'.' not in value and u'e' in value:
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag in YAML 1.1. We fix
+ # this by adding '.0' before the 'e' symbol.
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ # type: (Any) -> Any
+ # pairs = (len(data) > 0 and isinstance(data, list))
+ # if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ # if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+
+ # value = []
+ # for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ # return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ # type: (Any) -> Any
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_ordereddict(self, data):
+ # type: (Any) -> Any
+ return self.represent_omap(u'tag:yaml.org,2002:omap', data)
+
+ def represent_set(self, data):
+ # type: (Any) -> Any
+ value = {} # type: Dict[Any, None]
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ # type: (Any) -> Any
+ value = to_unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ # type: (Any) -> Any
+ value = to_unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ # type: (Any) -> None
+ raise RepresenterError('cannot represent an object: %s' % (data,))
+
+
+SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
+
+if PY2:
+ SafeRepresenter.add_representer(unicode, SafeRepresenter.represent_unicode)
+else:
+ SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
+
+if PY2:
+ SafeRepresenter.add_representer(long, SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
+
+if sys.version_info >= (2, 7):
+ import collections
+
+ SafeRepresenter.add_representer(
+ collections.OrderedDict, SafeRepresenter.represent_ordereddict
+ )
+
+SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
+
+
+class Representer(SafeRepresenter):
+ if PY2:
+
+ def represent_str(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ # type: (Any) -> Any
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ # type: (Any) -> Any
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, to_unicode(data))
+
+ def represent_complex(self, data):
+ # type: (Any) -> Any
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ # type: (Any) -> Any
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ # type: (Any) -> Any
+ try:
+ name = u'%s.%s' % (data.__module__, data.__qualname__)
+ except AttributeError:
+ # probably PY2
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:' + name, "")
+
+ def represent_module(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:python/module:' + data.__name__, "")
+
+ if PY2:
+
+ def represent_instance(self, data):
+ # type: (Any) -> Any
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed
+ # by calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never
+ # be called and the class instance is created by instantiating a
+ # trivial class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary,
+ # we produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:' + class_name, state
+ )
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:' + class_name, args
+ )
+ value = {}
+ if bool(args):
+ value['args'] = args
+ value['state'] = state # type: ignore
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:' + class_name, value
+ )
+
+ def represent_object(self, data):
+ # type: (Any) -> Any
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError('cannot represent object: %r' % (data,))
+ reduce = (list(reduce) + [None] * 5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ try:
+ function_name = u'%s.%s' % (function.__module__, function.__qualname__)
+ except AttributeError:
+ # probably PY2
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:' + function_name, state
+ )
+ if not listitems and not dictitems and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag + function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag + function_name, value)
+
+
+if PY2:
+ Representer.add_representer(str, Representer.represent_str)
+
+ Representer.add_representer(unicode, Representer.represent_unicode)
+
+ Representer.add_representer(long, Representer.represent_long)
+
+Representer.add_representer(complex, Representer.represent_complex)
+
+Representer.add_representer(tuple, Representer.represent_tuple)
+
+Representer.add_representer(type, Representer.represent_name)
+
+if PY2:
+ Representer.add_representer(types.ClassType, Representer.represent_name)
+
+Representer.add_representer(types.FunctionType, Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name)
+
+Representer.add_representer(types.ModuleType, Representer.represent_module)
+
+if PY2:
+ Representer.add_multi_representer(types.InstanceType, Representer.represent_instance)
+
+Representer.add_multi_representer(object, Representer.represent_object)
+
+Representer.add_multi_representer(type, Representer.represent_name)
+
+
+class RoundTripRepresenter(SafeRepresenter):
+ # need to add type here and write out the .comment
+ # in serializer and emitter
+
+ def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+ # type: (Any, Any, Any) -> None
+ if not hasattr(dumper, 'typ') and default_flow_style is None:
+ default_flow_style = False
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=dumper,
+ )
+
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ try:
+ if data.anchor is not None and data.anchor.value is not None:
+ return False
+ except AttributeError:
+ pass
+ return SafeRepresenter.ignore_aliases(self, data)
+
+ def represent_none(self, data):
+ # type: (Any) -> Any
+ if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start:
+ # this will be open ended (although it is not yet)
+ return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
+ return self.represent_scalar(u'tag:yaml.org,2002:null', "")
+
+ def represent_literal_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '|'
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ represent_preserved_scalarstring = represent_literal_scalarstring
+
+ def represent_folded_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '>'
+ anchor = data.yaml_anchor(any=True)
+ for fold_pos in reversed(getattr(data, 'fold_pos', [])):
+ if (
+ data[fold_pos] == ' '
+ and (fold_pos > 0 and not data[fold_pos - 1].isspace())
+ and (fold_pos < len(data) and not data[fold_pos + 1].isspace())
+ ):
+ data = data[:fold_pos] + '\a' + data[fold_pos:]
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_single_quoted_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = "'"
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_double_quoted_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '"'
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_plain_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = ''
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def insert_underscore(self, prefix, s, underscore, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if underscore is None:
+ return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+ if underscore[0]:
+ sl = list(s)
+ pos = len(s) - underscore[0]
+ while pos > 0:
+ sl.insert(pos, '_')
+ pos -= underscore[0]
+ s = "".join(sl)
+ if underscore[1]:
+ s = '_' + s
+ if underscore[2]:
+ s += '_'
+ return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+
+ def represent_scalar_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ s = '{:0{}d}'.format(data, data._width)
+ else:
+ s = format(data, 'd')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore("", s, data._underscore, anchor=anchor)
+
+ def represent_binary_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}b}', that strips the zeros
+ s = '{:0{}b}'.format(data, data._width)
+ else:
+ s = format(data, 'b')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0b', s, data._underscore, anchor=anchor)
+
+ def represent_octal_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}o}', that strips the zeros
+ s = '{:0{}o}'.format(data, data._width)
+ else:
+ s = format(data, 'o')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0o', s, data._underscore, anchor=anchor)
+
+ def represent_hex_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}x}', that strips the zeros
+ s = '{:0{}x}'.format(data, data._width)
+ else:
+ s = format(data, 'x')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_hex_caps_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}X}', that strips the zeros
+ s = '{:0{}X}'.format(data, data._width)
+ else:
+ s = format(data, 'X')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_scalar_float(self, data):
+ # type: (Any) -> Any
+ """ this is way more complicated """
+ value = None
+ anchor = data.yaml_anchor(any=True)
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ if value:
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor)
+ if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
+ # no exponent, but trailing dot
+ value = u'{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data)))
+ elif data._exp is None:
+ # no exponent, "normal" dot
+ prec = data._prec
+ ms = data._m_sign if data._m_sign else ""
+ # -1 for the dot
+ value = u'{}{:0{}.{}f}'.format(
+ ms, abs(data), data._width - len(ms), data._width - prec - 1
+ )
+ if prec == 0 or (prec == 1 and ms != ""):
+ value = value.replace(u'0.', u'.')
+ while len(value) < data._width:
+ value += u'0'
+ else:
+ # exponent
+ m, es = u'{:{}.{}e}'.format(
+ # data, data._width, data._width - data._prec + (1 if data._m_sign else 0)
+ data,
+ data._width,
+ data._width + (1 if data._m_sign else 0),
+ ).split('e')
+ w = data._width if data._prec > 0 else (data._width + 1)
+ if data < 0:
+ w += 1
+ m = m[:w]
+ e = int(es)
+ m1, m2 = m.split('.') # always second?
+ while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0):
+ m2 += u'0'
+ if data._m_sign and data > 0:
+ m1 = '+' + m1
+ esgn = u'+' if data._e_sign else ""
+ if data._prec < 0: # mantissa without dot
+ if m2 != u'0':
+ e -= len(m2)
+ else:
+ m2 = ""
+ while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
+ m2 += u'0'
+ e -= 1
+ value = m1 + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ elif data._prec == 0: # mantissa with trailing dot
+ e -= len(m2)
+ value = (
+ m1 + m2 + u'.' + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ )
+ else:
+ if data._m_lead0 > 0:
+ m2 = u'0' * (data._m_lead0 - 1) + m1 + m2
+ m1 = u'0'
+ m2 = m2[: -data._m_lead0] # these should be zeros
+ e += data._m_lead0
+ while len(m1) < data._prec:
+ m1 += m2[0]
+ m2 = m2[1:]
+ e -= 1
+ value = (
+ m1 + u'.' + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ )
+
+ if value is None:
+ value = to_unicode(repr(data)).lower()
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor)
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ # if the flow_style is None, the flow style tacked on to the object
+ # explicitly will be taken. If that is None as well the default flow
+ # style rules
+ try:
+ flow_style = sequence.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = sequence.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(sequence, comment_attrib)
+ node.comment = comment.comment
+ # reset any comment already printed information
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ item_comments = comment.items
+ node.comment = comment.comment
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for idx, item in enumerate(sequence):
+ node_item = self.represent_data(item)
+ self.merge_comments(node_item, item_comments.get(idx))
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if len(sequence) != 0 and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def merge_comments(self, node, comments):
+ # type: (Any, Any) -> Any
+ if comments is None:
+ assert hasattr(node, 'comment')
+ return node
+ if getattr(node, 'comment', None) is not None:
+ for idx, val in enumerate(comments):
+ if idx >= len(node.comment):
+ continue
+ nc = node.comment[idx]
+ if nc is not None:
+ assert val is None or val == nc
+ comments[idx] = nc
+ node.comment = comments
+ return node
+
+ def represent_key(self, data):
+ # type: (Any) -> Any
+ if isinstance(data, CommentedKeySeq):
+ self.alias_key = None
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data, flow_style=True)
+ if isinstance(data, CommentedKeyMap):
+ self.alias_key = None
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data, flow_style=True)
+ return SafeRepresenter.represent_key(self, data)
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ try:
+ flow_style = mapping.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = mapping.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(mapping, comment_attrib)
+ node.comment = comment.comment
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
+ try:
+ merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0]
+ except IndexError:
+ merge_pos = 0
+ item_count = 0
+ if bool(merge_list):
+ items = mapping.non_merged_items()
+ else:
+ items = mapping.items()
+ for item_key, item_value in items:
+ item_count += 1
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ assert getattr(node_key, 'comment', None) is None
+ node_key.comment = item_comment[:2]
+ nvc = getattr(node_value, 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_value.comment = item_comment[2:]
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ if bool(merge_list):
+ # because of the call to represent_data here, the anchors
+ # are marked as being used and thereby created
+ if len(merge_list) == 1:
+ arg = self.represent_data(merge_list[0])
+ else:
+ arg = self.represent_data(merge_list)
+ arg.flow_style = True
+ value.insert(merge_pos, (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg))
+ return node
+
+ def represent_omap(self, tag, omap, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ try:
+ flow_style = omap.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = omap.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(omap, comment_attrib)
+ node.comment = comment.comment
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # node_item.flow_style = False
+ # node item has two scalars in value: node_key and node_value
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ if item_comment[1]:
+ node_item.comment = [None, item_comment[1]]
+ assert getattr(node_item.value[0][0], 'comment', None) is None
+ node_item.value[0][0].comment = [item_comment[0], None]
+ nvc = getattr(node_item.value[0][1], 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_item.value[0][1].comment = item_comment[2:]
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_set(self, setting):
+ # type: (Any) -> Any
+ flow_style = False
+ tag = u'tag:yaml.org,2002:set'
+ # return self.represent_mapping(tag, value)
+ value = [] # type: List[Any]
+ flow_style = setting.fa.flow_style(flow_style)
+ try:
+ anchor = setting.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(setting, comment_attrib)
+ node.comment = comment.comment
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in setting.odict:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(None)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ assert getattr(node_key, 'comment', None) is None
+ node_key.comment = item_comment[:2]
+ node_key.style = node_value.style = '?'
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ best_style = best_style
+ return node
+
+ def represent_dict(self, data):
+ # type: (Any) -> Any
+ """write out tag if saved on loading"""
+ try:
+ t = data.tag.value
+ except AttributeError:
+ t = None
+ if t:
+ if t.startswith('!!'):
+ tag = 'tag:yaml.org,2002:' + t[2:]
+ else:
+ tag = t
+ else:
+ tag = u'tag:yaml.org,2002:map'
+ return self.represent_mapping(tag, data)
+
+ def represent_list(self, data):
+ # type: (Any) -> Any
+ try:
+ t = data.tag.value
+ except AttributeError:
+ t = None
+ if t:
+ if t.startswith('!!'):
+ tag = 'tag:yaml.org,2002:' + t[2:]
+ else:
+ tag = t
+ else:
+ tag = u'tag:yaml.org,2002:seq'
+ return self.represent_sequence(tag, data)
+
+ def represent_datetime(self, data):
+ # type: (Any) -> Any
+ inter = 'T' if data._yaml['t'] else ' '
+ _yaml = data._yaml
+ if _yaml['delta']:
+ data += _yaml['delta']
+ value = data.isoformat(inter)
+ else:
+ value = data.isoformat(inter)
+ if _yaml['tz']:
+ value += _yaml['tz']
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', to_unicode(value))
+
+ def represent_tagged_scalar(self, data):
+ # type: (Any) -> Any
+ try:
+ tag = data.tag.value
+ except AttributeError:
+ tag = None
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor)
+
+ def represent_scalar_bool(self, data):
+ # type: (Any) -> Any
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return SafeRepresenter.represent_bool(self, data, anchor=anchor)
+
+
+RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
+
+RoundTripRepresenter.add_representer(
+ LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring
+)
+
+# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple)
+
+RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int)
+
+RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int)
+
+RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
+
+RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
+
+RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int)
+
+RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float)
+
+RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool)
+
+RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
+
+RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
+
+RoundTripRepresenter.add_representer(
+ CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict
+)
+
+if sys.version_info >= (2, 7):
+ import collections
+
+ RoundTripRepresenter.add_representer(
+ collections.OrderedDict, RoundTripRepresenter.represent_ordereddict
+ )
+
+RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
+
+RoundTripRepresenter.add_representer(
+ TaggedScalar, RoundTripRepresenter.represent_tagged_scalar
+)
+
+RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/resolver.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/resolver.py
new file mode 100644
index 0000000000..6379943eb4
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/resolver.py
@@ -0,0 +1,399 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import re
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Text, Optional # NOQA
+ from ruamel.yaml.compat import VersionType # NOQA
+
+from ruamel.yaml.compat import string_types, _DEFAULT_YAML_VERSION # NOQA
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA
+from ruamel.yaml.util import RegExp # NOQA
+
+__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
+
+
+# fmt: off
+# resolvers consist of
+# - a list of applicable version
+# - a tag
+# - a regexp
+# - a list of first characters to match
+implicit_resolvers = [
+ ([(1, 2)],
+ u'tag:yaml.org,2002:bool',
+ RegExp(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
+ list(u'tTfF')),
+ ([(1, 1)],
+ u'tag:yaml.org,2002:bool',
+ RegExp(u'''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO')),
+ ([(1, 2)],
+ u'tag:yaml.org,2002:float',
+ RegExp(u'''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.')),
+ ([(1, 1)],
+ u'tag:yaml.org,2002:float',
+ RegExp(u'''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.')),
+ ([(1, 2)],
+ u'tag:yaml.org,2002:int',
+ RegExp(u'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0o?[0-7_]+
+ |[-+]?[0-9_]+
+ |[-+]?0x[0-9a-fA-F_]+)$''', re.X),
+ list(u'-+0123456789')),
+ ([(1, 1)],
+ u'tag:yaml.org,2002:int',
+ RegExp(u'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0?[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int
+ list(u'-+0123456789')),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:merge',
+ RegExp(u'^(?:<<)$'),
+ [u'<']),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:null',
+ RegExp(u'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u'']),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:timestamp',
+ RegExp(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \\t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
+ (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789')),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:value',
+ RegExp(u'^(?:=)$'),
+ [u'=']),
+ # The following resolver is only for documentation purposes. It cannot work
+ # because plain scalars cannot start with '!', '&', or '*'.
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:yaml',
+ RegExp(u'^(?:!|&|\\*)$'),
+ list(u'!&*')),
+]
+# fmt: on
+
+
+class ResolverError(YAMLError):
+ pass
+
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {} # type: Dict[Any, Any]
+ yaml_path_resolvers = {} # type: Dict[Any, Any]
+
+ def __init__(self, loadumper=None):
+ # type: (Any, Any) -> None
+ self.loadumper = loadumper
+ if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None:
+ self.loadumper._resolver = self.loadumper
+ self._loader_version = None # type: Any
+ self.resolver_exact_paths = [] # type: List[Any]
+ self.resolver_prefix_paths = [] # type: List[Any]
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ if self.loadumper is not None:
+ if hasattr(self.loadumper, 'typ'):
+ return self.loadumper.parser
+ return self.loadumper._parser
+ return None
+
+ @classmethod
+ def add_implicit_resolver_base(cls, tag, regexp, first):
+ # type: (Any, Any, Any) -> None
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ # type: (Any, Any, Any) -> None
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
+
+ # @classmethod
+ # def add_implicit_resolver(cls, tag, regexp, first):
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # type: (Any, Any, Any) -> None
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if 'yaml_path_resolvers' not in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = [] # type: List[Any]
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError('Invalid path element: %s' % (element,))
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif (
+ node_check not in [ScalarNode, SequenceNode, MappingNode]
+ and not isinstance(node_check, string_types)
+ and node_check is not None
+ ):
+ raise ResolverError('Invalid node checker: %s' % (node_check,))
+ if not isinstance(index_check, (string_types, int)) and index_check is not None:
+ raise ResolverError('Invalid index checker: %s' % (index_check,))
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
+ raise ResolverError('Invalid node kind: %s' % (kind,))
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ # type: (Any, Any) -> None
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ # type: () -> None
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
+ # type: (int, Text, Any, Any, Any) -> bool
+ node_check, index_check = path[depth - 1]
+ if isinstance(node_check, string_types):
+ if current_node.tag != node_check:
+ return False
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return False
+ if index_check is True and current_index is not None:
+ return False
+ if (index_check is False or index_check is None) and current_index is None:
+ return False
+ if isinstance(index_check, string_types):
+ if not (
+ isinstance(current_index, ScalarNode) and index_check == current_index.value
+ ):
+ return False
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return False
+ return True
+
+ def resolve(self, kind, value, implicit):
+ # type: (Any, Any, Any) -> Any
+ if kind is ScalarNode and implicit[0]:
+ if value == "":
+ resolvers = self.yaml_implicit_resolvers.get("", [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if bool(self.yaml_path_resolvers):
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+ @property
+ def processing_version(self):
+ # type: () -> Any
+ return None
+
+
+class Resolver(BaseResolver):
+ pass
+
+
+for ir in implicit_resolvers:
+ if (1, 2) in ir[0]:
+ Resolver.add_implicit_resolver_base(*ir[1:])
+
+
+class VersionedResolver(BaseResolver):
+ """
+ contrary to the "normal" resolver, the smart resolver delays loading
+ the pattern matching rules. That way it can decide to load 1.1 rules
+ or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals
+ and Yes/No/On/Off booleans.
+ """
+
+ def __init__(self, version=None, loader=None, loadumper=None):
+ # type: (Optional[VersionType], Any, Any) -> None
+ if loader is None and loadumper is not None:
+ loader = loadumper
+ BaseResolver.__init__(self, loader)
+ self._loader_version = self.get_loader_version(version)
+ self._version_implicit_resolver = {} # type: Dict[Any, Any]
+
+ def add_version_implicit_resolver(self, version, tag, regexp, first):
+ # type: (VersionType, Any, Any, Any) -> None
+ if first is None:
+ first = [None]
+ impl_resolver = self._version_implicit_resolver.setdefault(version, {})
+ for ch in first:
+ impl_resolver.setdefault(ch, []).append((tag, regexp))
+
+ def get_loader_version(self, version):
+ # type: (Optional[VersionType]) -> Any
+ if version is None or isinstance(version, tuple):
+ return version
+ if isinstance(version, list):
+ return tuple(version)
+ # assume string
+ return tuple(map(int, version.split(u'.')))
+
+ @property
+ def versioned_resolver(self):
+ # type: () -> Any
+ """
+ select the resolver based on the version we are parsing
+ """
+ version = self.processing_version
+ if version not in self._version_implicit_resolver:
+ for x in implicit_resolvers:
+ if version in x[0]:
+ self.add_version_implicit_resolver(version, x[1], x[2], x[3])
+ return self._version_implicit_resolver[version]
+
+ def resolve(self, kind, value, implicit):
+ # type: (Any, Any, Any) -> Any
+ if kind is ScalarNode and implicit[0]:
+ if value == "":
+ resolvers = self.versioned_resolver.get("", [])
+ else:
+ resolvers = self.versioned_resolver.get(value[0], [])
+ resolvers += self.versioned_resolver.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if bool(self.yaml_path_resolvers):
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+ @property
+ def processing_version(self):
+ # type: () -> Any
+ try:
+ version = self.loadumper._scanner.yaml_version
+ except AttributeError:
+ try:
+ if hasattr(self.loadumper, 'typ'):
+ version = self.loadumper.version
+ else:
+ version = self.loadumper._serializer.use_version # dumping
+ except AttributeError:
+ version = None
+ if version is None:
+ version = self._loader_version
+ if version is None:
+ version = _DEFAULT_YAML_VERSION
+ return version
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarbool.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarbool.py
new file mode 100644
index 0000000000..fc8f8c2a53
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarbool.py
@@ -0,0 +1,51 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+"""
+You cannot subclass bool, and this is necessary for round-tripping anchored
+bool values (and also if you want to preserve the original way of writing)
+
+bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well.
+
+You can use these in an if statement, but not when testing equivalence
+"""
+
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarBoolean']
+
+# no need for no_limit_int -> int
+
+
+class ScalarBoolean(int):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ anchor = kw.pop('anchor', None) # type: ignore
+ b = int.__new__(cls, *args, **kw) # type: ignore
+ if anchor is not None:
+ b.yaml_set_anchor(anchor, always_dump=True)
+ return b
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarfloat.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarfloat.py
new file mode 100644
index 0000000000..0404df3376
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarfloat.py
@@ -0,0 +1,127 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+import sys
+from .compat import no_limit_int # NOQA
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat']
+
+
+class ScalarFloat(float):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ width = kw.pop('width', None) # type: ignore
+ prec = kw.pop('prec', None) # type: ignore
+ m_sign = kw.pop('m_sign', None) # type: ignore
+ m_lead0 = kw.pop('m_lead0', 0) # type: ignore
+ exp = kw.pop('exp', None) # type: ignore
+ e_width = kw.pop('e_width', None) # type: ignore
+ e_sign = kw.pop('e_sign', None) # type: ignore
+ underscore = kw.pop('underscore', None) # type: ignore
+ anchor = kw.pop('anchor', None) # type: ignore
+ v = float.__new__(cls, *args, **kw) # type: ignore
+ v._width = width
+ v._prec = prec
+ v._m_sign = m_sign
+ v._m_lead0 = m_lead0
+ v._exp = exp
+ v._e_width = e_width
+ v._e_sign = e_sign
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) + a
+ x = type(self)(self + a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __ifloordiv__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) // a
+ x = type(self)(self // a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __imul__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) * a
+ x = type(self)(self * a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ x._prec = self._prec # check for others
+ return x
+
+ def __ipow__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) ** a
+ x = type(self)(self ** a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __isub__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) - a
+ x = type(self)(self - a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ def dump(self, out=sys.stdout):
+ # type: (Any) -> Any
+ out.write(
+ 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format(
+ self,
+ self._width, # type: ignore
+ self._prec, # type: ignore
+ self._m_sign, # type: ignore
+ self._m_lead0, # type: ignore
+ self._underscore, # type: ignore
+ self._exp, # type: ignore
+ self._e_width, # type: ignore
+ self._e_sign, # type: ignore
+ )
+ )
+
+
+class ExponentialFloat(ScalarFloat):
+ def __new__(cls, value, width=None, underscore=None):
+ # type: (Any, Any, Any) -> Any
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
+
+
+class ExponentialCapsFloat(ScalarFloat):
+ def __new__(cls, value, width=None, underscore=None):
+ # type: (Any, Any, Any) -> Any
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarint.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarint.py
new file mode 100644
index 0000000000..581a8df730
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarint.py
@@ -0,0 +1,130 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+from .compat import no_limit_int # NOQA
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt']
+
+
+class ScalarInt(no_limit_int):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ width = kw.pop('width', None) # type: ignore
+ underscore = kw.pop('underscore', None) # type: ignore
+ anchor = kw.pop('anchor', None) # type: ignore
+ v = no_limit_int.__new__(cls, *args, **kw) # type: ignore
+ v._width = width
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self + a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ifloordiv__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self // a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __imul__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self * a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ipow__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self ** a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __isub__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self - a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class BinaryInt(ScalarInt):
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class OctalInt(ScalarInt):
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+# mixed casing of A-F is not supported, when loading the first non digit
+# determines the case
+
+
+class HexInt(ScalarInt):
+ """uses lower case (a-f)"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class HexCapsInt(ScalarInt):
+ """uses upper case (A-F)"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class DecimalInt(ScalarInt):
+ """needed if anchor"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarstring.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarstring.py
new file mode 100644
index 0000000000..f1646392dd
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scalarstring.py
@@ -0,0 +1,156 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+from ruamel.yaml.compat import text_type
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = [
+ 'ScalarString',
+ 'LiteralScalarString',
+ 'FoldedScalarString',
+ 'SingleQuotedScalarString',
+ 'DoubleQuotedScalarString',
+ 'PlainScalarString',
+ # PreservedScalarString is the old name, as it was the first to be preserved on rt,
+ # use LiteralScalarString instead
+ 'PreservedScalarString',
+]
+
+
+class ScalarString(text_type):
+ __slots__ = Anchor.attrib
+
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any) -> Any
+ anchor = kw.pop('anchor', None) # type: ignore
+ ret_val = text_type.__new__(cls, *args, **kw) # type: ignore
+ if anchor is not None:
+ ret_val.yaml_set_anchor(anchor, always_dump=True)
+ return ret_val
+
+ def replace(self, old, new, maxreplace=-1):
+ # type: (Any, Any, int) -> Any
+ return type(self)((text_type.replace(self, old, new, maxreplace)))
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class LiteralScalarString(ScalarString):
+ __slots__ = 'comment' # the comment after the | on the first line
+
+ style = '|'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+PreservedScalarString = LiteralScalarString
+
+
+class FoldedScalarString(ScalarString):
+ __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line
+
+ style = '>'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class SingleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = "'"
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class DoubleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = '"'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class PlainScalarString(ScalarString):
+ __slots__ = ()
+
+ style = ''
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+def preserve_literal(s):
+ # type: (Text) -> Text
+ return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
+
+
+def walk_tree(base, map=None):
+ # type: (Any, Any) -> None
+ """
+ the routine here walks over a simple yaml tree (recursing in
+ dict values and list items) and converts strings that
+ have multiple lines to literal scalars
+
+ You can also provide an explicit (ordered) mapping for multiple transforms
+ (first of which is executed):
+ map = ruamel.yaml.compat.ordereddict
+ map['\n'] = preserve_literal
+ map[':'] = SingleQuotedScalarString
+ walk_tree(data, map=map)
+ """
+ from ruamel.yaml.compat import string_types
+ from ruamel.yaml.compat import MutableMapping, MutableSequence # type: ignore
+
+ if map is None:
+ map = {'\n': preserve_literal}
+
+ if isinstance(base, MutableMapping):
+ for k in base:
+ v = base[k] # type: Text
+ if isinstance(v, string_types):
+ for ch in map:
+ if ch in v:
+ base[k] = map[ch](v)
+ break
+ else:
+ walk_tree(v, map=map)
+ elif isinstance(base, MutableSequence):
+ for idx, elem in enumerate(base):
+ if isinstance(elem, string_types):
+ for ch in map:
+ if ch in elem: # type: ignore
+ base[idx] = map[ch](elem)
+ break
+ else:
+ walk_tree(elem, map=map)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/scanner.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scanner.py
new file mode 100644
index 0000000000..df85ae033f
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/scanner.py
@@ -0,0 +1,1980 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# RoundTripScanner
+# COMMENT(value)
+#
+# Read comments in the Scanner code for more details.
+#
+
+from ruamel.yaml.error import MarkedYAMLError
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.compat import utf8, unichr, PY3, check_anchorname_char, nprint # NOQA
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Text # NOQA
+ from ruamel.yaml.compat import VersionType # NOQA
+
+__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError']
+
+
+_THE_END = '\n\0\r\x85\u2028\u2029'
+_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029'
+_SPACE_TAB = ' \t'
+
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ # type: (Any, Any, int, int, int, Any) -> None
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+
+class Scanner(object):
+ def __init__(self, loader=None):
+ # type: (Any) -> None
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer
+
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_scanner', None) is None:
+ self.loader._scanner = self
+ self.reset_scanner()
+ self.first_time = False
+ self.yaml_version = None # type: Any
+
+ @property
+ def flow_level(self):
+ # type: () -> int
+ return len(self.flow_context)
+
+ def reset_scanner(self):
+ # type: () -> None
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context = [] # type: List[Text]
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = [] # type: List[Any]
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = [] # type: List[int]
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {} # type: Dict[Any, Any]
+
+ @property
+ def reader(self):
+ # type: () -> Any
+ try:
+ return self._scanner_reader # type: ignore
+ except AttributeError:
+ if hasattr(self.loader, 'typ'):
+ self._scanner_reader = self.loader.reader
+ else:
+ self._scanner_reader = self.loader._reader
+ return self._scanner_reader
+
+ @property
+ def scanner_processing_version(self): # prefix until un-composited
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver.processing_version
+ return self.loader.processing_version
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # type: (Any) -> bool
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if bool(self.tokens):
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # type: () -> Any
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if bool(self.tokens):
+ return self.tokens[0]
+
+ def get_token(self):
+ # type: () -> Any
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if bool(self.tokens):
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ # type: () -> bool
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+ return False
+
+ def fetch_comment(self, comment):
+ # type: (Any) -> None
+ raise NotImplementedError
+
+ def fetch_more_tokens(self):
+ # type: () -> Any
+ # Eat whitespaces and comments until we reach the next token.
+ comment = self.scan_to_next_token()
+ if comment is not None: # never happens for base scanner
+ return self.fetch_comment(comment)
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.reader.column)
+
+ # Peek the next character.
+ ch = self.reader.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ # if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == "'":
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError(
+ 'while scanning for the next token',
+ None,
+ 'found character %r that cannot start any token' % utf8(ch),
+ self.reader.get_mark(),
+ )
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # type: () -> Any
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # type: () -> None
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.reader.line or self.reader.index - key.index > 1024:
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # type: () -> None
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.reader.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken + len(self.tokens)
+ key = SimpleKey(
+ token_number,
+ required,
+ self.reader.index,
+ self.reader.line,
+ self.reader.column,
+ self.reader.get_mark(),
+ )
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # type: () -> None
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+ # type: (Any) -> None
+ # In flow context, tokens should respect indentation.
+ # Actually the condition should be `self.indent >= column` according to
+ # the spec. But this condition will prohibit intuitively correct
+ # constructions such as
+ # key : {
+ # }
+ # ####
+ # if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.reader.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if bool(self.flow_level):
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.reader.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # type: (int) -> bool
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # type: () -> None
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding))
+
+ def fetch_stream_end(self):
+ # type: () -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+ # type: () -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ # type: () -> None
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ # type: () -> None
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+ # type: (Any) -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward(3)
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ # type: () -> None
+ self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[')
+
+ def fetch_flow_mapping_start(self):
+ # type: () -> None
+ self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{')
+
+ def fetch_flow_collection_start(self, TokenClass, to_push):
+ # type: (Any, Text) -> None
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+ # Increase the flow level.
+ self.flow_context.append(to_push)
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ # type: () -> None
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ # type: () -> None
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+ # type: (Any) -> None
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Decrease the flow level.
+ try:
+ popped = self.flow_context.pop() # NOQA
+ except IndexError:
+ # We must not be in a list or object.
+ # Defer error handling to the parser.
+ pass
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+ # type: () -> None
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Add FLOW-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+ # type: () -> None
+ # Block context needs additional checks.
+ if not self.flow_level:
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None, None, 'sequence entries are not allowed here', self.reader.get_mark()
+ )
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+ # type: () -> None
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None, None, 'mapping keys are not allowed here', self.reader.get_mark()
+ )
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+ # type: () -> None
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(
+ key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark)
+ )
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(
+ key.token_number - self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark),
+ )
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None,
+ None,
+ 'mapping values are not allowed here',
+ self.reader.get_mark(),
+ )
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+ # type: () -> None
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+ # type: () -> None
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+ # type: () -> None
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ # type: () -> None
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ # type: () -> None
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+ # type: (Any) -> None
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ # type: () -> None
+ self.fetch_flow_scalar(style="'")
+
+ def fetch_double(self):
+ # type: () -> None
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+ # type: (Any) -> None
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+ # type: () -> None
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+ # type: () -> Any
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.reader.column == 0:
+ return True
+ return None
+
+ def check_document_start(self):
+ # type: () -> Any
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_document_end(self):
+ # type: () -> Any
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_block_entry(self):
+ # type: () -> Any
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_key(self):
+ # type: () -> Any
+ # KEY(flow context): '?'
+ if bool(self.flow_level):
+ return True
+ # KEY(block context): '?' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_value(self):
+ # type: () -> Any
+ # VALUE(flow context): ':'
+ if self.scanner_processing_version == (1, 1):
+ if bool(self.flow_level):
+ return True
+ else:
+ if bool(self.flow_level):
+ if self.flow_context[-1] == '[':
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ elif self.tokens and isinstance(self.tokens[-1], ValueToken):
+ # mapping flow context scanning a value token
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ return True
+ # VALUE(block context): ':' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_plain(self):
+ # type: () -> Any
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ srp = self.reader.peek
+ ch = srp()
+ if self.scanner_processing_version == (1, 1):
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or (
+ srp(1) not in _THE_END_SPACE_TAB
+ and (ch == '-' or (not self.flow_level and ch in '?:'))
+ )
+ # YAML 1.2
+ if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`':
+ # ################### ^ ???
+ return True
+ ch1 = srp(1)
+ if ch == '-' and ch1 not in _THE_END_SPACE_TAB:
+ return True
+ if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB:
+ return True
+
+ return srp(1) not in _THE_END_SPACE_TAB and (
+ ch == '-' or (not self.flow_level and ch in '?:')
+ )
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # type: () -> Any
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ _the_end = _THE_END
+ while not found:
+ while srp() == ' ':
+ srf()
+ if srp() == '#':
+ while srp() not in _the_end:
+ srf()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+ return None
+
+ def scan_directive(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ start_mark = self.reader.get_mark()
+ srf()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ else:
+ end_mark = self.reader.get_mark()
+ while srp() not in _THE_END:
+ srf()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ length = 0
+ srp = self.reader.peek
+ ch = srp(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.':
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ major = self.scan_yaml_directive_number(start_mark)
+ if srp() != '.':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected a digit or '.', but found %r" % utf8(srp()),
+ self.reader.get_mark(),
+ )
+ srf()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if srp() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected a digit or ' ', but found %r" % utf8(srp()),
+ self.reader.get_mark(),
+ )
+ self.yaml_version = (major, minor)
+ return self.yaml_version
+
+ def scan_yaml_directive_number(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ ch = srp()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected a digit, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ length = 0
+ while '0' <= srp(length) <= '9':
+ length += 1
+ value = int(self.reader.prefix(length))
+ srf(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while srp() == ' ':
+ srf()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.reader.peek()
+ if ch != ' ':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.reader.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # type: (Any) -> None
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ if srp() == '#':
+ while srp() not in _THE_END:
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected a comment or a line break, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # type: (Any) -> Any
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ indicator = srp()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.reader.forward()
+ length = 0
+ ch = srp(length)
+ # while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ # or ch in u'-_':
+ while check_anchorname_char(ch):
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ 'while scanning an %s' % (name,),
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ # ch1 = ch
+ # ch = srp() # no need to peek, ch is already set
+ # assert ch1 == ch
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`':
+ raise ScannerError(
+ 'while scanning an %s' % (name,),
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ end_mark = self.reader.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ ch = srp(1)
+ if ch == '<':
+ handle = None
+ self.reader.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if srp() != '>':
+ raise ScannerError(
+ 'while parsing a tag',
+ start_mark,
+ "expected '>', but found %r" % utf8(srp()),
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in _THE_END_SPACE_TAB:
+ handle = None
+ suffix = '!'
+ self.reader.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = srp(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.reader.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a tag',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ value = (handle, suffix)
+ end_mark = self.reader.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style, rt=False):
+ # type: (Any, Optional[bool]) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+
+ # Scan the header.
+ self.reader.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ # block scalar comment e.g. : |+ # comment text
+ block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent + 1
+ if increment is None:
+ # no increment and top level, min_indent could be 0
+ if min_indent < 1 and (
+ style not in '|>'
+ or (self.scanner_processing_version == (1, 1))
+ and getattr(
+ self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False
+ )
+ ):
+ min_indent = 1
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ if min_indent < 1:
+ min_indent = 1
+ indent = min_indent + increment - 1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ""
+
+ # Scan the inner part of the block scalar.
+ while self.reader.column == indent and srp() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = srp() not in ' \t'
+ length = 0
+ while srp(length) not in _THE_END:
+ length += 1
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if style in '|>' and min_indent == 0:
+ # at the beginning of a line, if in block style see if
+ # end of document/start_new_document
+ if self.check_document_start() or self.check_document_end():
+ break
+ if self.reader.column == indent and srp() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if rt and folded and line_break == '\n':
+ chunks.append('\a')
+ if folded and line_break == '\n' and leading_non_space and srp() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ # if folded and line_break == u'\n':
+ # if not breaks:
+ # if srp() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ # else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Process trailing line breaks. The 'chomping' setting determines
+ # whether they are included in the value.
+ trailing = [] # type: List[Any]
+ if chomping in [None, True]:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+ elif chomping in [None, False]:
+ trailing.extend(breaks)
+
+ # We are done.
+ token = ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+ if block_scalar_comment is not None:
+ token.add_pre_comments([block_scalar_comment])
+ if len(trailing) > 0:
+ # nprint('trailing 1', trailing) # XXXXX
+ # Eat whitespaces and comments until we reach the next token.
+ comment = self.scan_to_next_token()
+ while comment:
+ trailing.append(' ' * comment[1].column + comment[0])
+ comment = self.scan_to_next_token()
+
+ # Keep track of the trailing whitespace and following comments
+ # as a comment token, if isn't all included in the actual value.
+ comment_end_mark = self.reader.get_mark()
+ comment = CommentToken("".join(trailing), end_mark, comment_end_mark)
+ token.add_post_comment(comment)
+ return token
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ chomping = None
+ increment = None
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected chomping or indentation indicators, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ prefix = ''
+ comment = None
+ while srp() == ' ':
+ prefix += srp()
+ srf()
+ if srp() == '#':
+ comment = prefix
+ while srp() not in _THE_END:
+ comment += srp()
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected a comment or a line break, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+ return comment
+
+ def scan_block_scalar_indentation(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ max_indent = 0
+ end_mark = self.reader.get_mark()
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ else:
+ srf()
+ if self.reader.column > max_indent:
+ max_indent = self.reader.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # type: (int) -> Any
+ # See the specification for details.
+ chunks = []
+ srp = self.reader.peek
+ srf = self.reader.forward
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ while srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # type: (Any) -> Any
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ srp = self.reader.peek
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+ quote = srp()
+ self.reader.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while srp() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ return ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '"': '"',
+ '/': '/', # as per http://www.json.org/
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8}
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ chunks = [] # type: List[Any]
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ length = 0
+ while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029':
+ length += 1
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ ch = srp()
+ if not double and ch == "'" and srp(1) == "'":
+ chunks.append("'")
+ srf(2)
+ elif (double and ch == "'") or (not double and ch in '"\\'):
+ chunks.append(ch)
+ srf()
+ elif double and ch == '\\':
+ srf()
+ ch = srp()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ srf()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ srf()
+ for k in range(length):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ 'expected escape sequence of %d hexdecimal '
+ 'numbers, but found %r' % (length, utf8(srp(k))),
+ self.reader.get_mark(),
+ )
+ code = int(self.reader.prefix(length), 16)
+ chunks.append(unichr(code))
+ srf(length)
+ elif ch in '\n\r\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ 'found unknown escape character %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ while srp(length) in ' \t':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch == '\0':
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected end of stream',
+ self.reader.get_mark(),
+ )
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ chunks = [] # type: List[Any]
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected document separator',
+ self.reader.get_mark(),
+ )
+ while srp() in ' \t':
+ srf()
+ if srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # type: () -> Any
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ': ' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+ end_mark = start_mark
+ indent = self.indent + 1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ # if indent == 0:
+ # indent = 1
+ spaces = [] # type: List[Any]
+ while True:
+ length = 0
+ if srp() == '#':
+ break
+ while True:
+ ch = srp(length)
+ if ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB:
+ pass
+ elif ch == '?' and self.scanner_processing_version != (1, 1):
+ pass
+ elif (
+ ch in _THE_END_SPACE_TAB
+ or (
+ not self.flow_level
+ and ch == ':'
+ and srp(length + 1) in _THE_END_SPACE_TAB
+ )
+ or (self.flow_level and ch in ',:?[]{}')
+ ):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (
+ self.flow_level
+ and ch == ':'
+ and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'
+ ):
+ srf(length)
+ raise ScannerError(
+ 'while scanning a plain scalar',
+ start_mark,
+ "found unexpected ':'",
+ self.reader.get_mark(),
+ 'Please check '
+ 'http://pyyaml.org/wiki/YAMLColonInFlowContext '
+ 'for details.',
+ )
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ end_mark = self.reader.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if (
+ not spaces
+ or srp() == '#'
+ or (not self.flow_level and self.reader.column < indent)
+ ):
+ break
+
+ token = ScalarToken("".join(chunks), True, start_mark, end_mark)
+ if spaces and spaces[0] == '\n':
+ # Create a comment token to preserve the trailing line breaks.
+ comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark)
+ token.add_post_comment(comment)
+ return token
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ length = 0
+ while srp(length) in ' ':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ breaks = []
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() == ' ':
+ srf()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ srp = self.reader.peek
+ ch = srp()
+ if ch != '!':
+ raise ScannerError(
+ 'while scanning a %s' % (name,),
+ start_mark,
+ "expected '!', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ length = 1
+ ch = srp(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_':
+ length += 1
+ ch = srp(length)
+ if ch != '!':
+ self.reader.forward(length)
+ raise ScannerError(
+ 'while scanning a %s' % (name,),
+ start_mark,
+ "expected '!', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ length += 1
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ ch = srp(length)
+ while (
+ '0' <= ch <= '9'
+ or 'A' <= ch <= 'Z'
+ or 'a' <= ch <= 'z'
+ or ch in "-;/?:@&=+$,_.!~*'()[]%"
+ or ((self.scanner_processing_version > (1, 1)) and ch == '#')
+ ):
+ if ch == '%':
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = srp(length)
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError(
+ 'while parsing a %s' % (name,),
+ start_mark,
+ 'expected URI, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return "".join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ code_bytes = [] # type: List[Any]
+ mark = self.reader.get_mark()
+ while srp() == '%':
+ srf()
+ for k in range(2):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ 'while scanning a %s' % (name,),
+ start_mark,
+ 'expected URI escape sequence of 2 hexdecimal numbers,'
+ ' but found %r' % utf8(srp(k)),
+ self.reader.get_mark(),
+ )
+ if PY3:
+ code_bytes.append(int(self.reader.prefix(2), 16))
+ else:
+ code_bytes.append(chr(int(self.reader.prefix(2), 16)))
+ srf(2)
+ try:
+ if PY3:
+ value = bytes(code_bytes).decode('utf-8')
+ else:
+ value = unicode(b"".join(code_bytes), 'utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError('while scanning a %s' % (name,), start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # type: () -> Any
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.reader.peek()
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ return ""
+
+
+class RoundTripScanner(Scanner):
+ def check_token(self, *choices):
+ # type: (Any) -> bool
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if bool(self.tokens):
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # type: () -> Any
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if bool(self.tokens):
+ return self.tokens[0]
+ return None
+
+ def _gather_comments(self):
+ # type: () -> Any
+ """combine multiple comment lines"""
+ comments = [] # type: List[Any]
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ comment = self.tokens.pop(0)
+ self.tokens_taken += 1
+ comments.append(comment)
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ self.tokens_taken += 1
+ comment = self.tokens.pop(0)
+ # nprint('dropping2', comment)
+ comments.append(comment)
+ if len(comments) >= 1:
+ self.tokens[0].add_pre_comments(comments)
+ # pull in post comment on e.g. ':'
+ if not self.done and len(self.tokens) < 2:
+ self.fetch_more_tokens()
+
+ def get_token(self):
+ # type: () -> Any
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if bool(self.tokens):
+ # nprint('tk', self.tokens)
+ # only add post comment to single line tokens:
+ # scalar, value token. FlowXEndToken, otherwise
+ # hidden streamtokens could get them (leave them and they will be
+ # pre comments for the next map/seq
+ if (
+ len(self.tokens) > 1
+ and isinstance(
+ self.tokens[0],
+ (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken),
+ )
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens[0].add_post_comment(c)
+ elif (
+ len(self.tokens) > 1
+ and isinstance(self.tokens[0], ScalarToken)
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ c.value = (
+ '\n' * (c.start_mark.line - self.tokens[0].end_mark.line)
+ + (' ' * c.start_mark.column)
+ + c.value
+ )
+ self.tokens[0].add_post_comment(c)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+ return None
+
+ def fetch_comment(self, comment):
+ # type: (Any) -> None
+ value, start_mark, end_mark = comment
+ while value and value[-1] == ' ':
+ # empty line within indented key context
+ # no need to update end-mark, that is not used
+ value = value[:-1]
+ self.tokens.append(CommentToken(value, start_mark, end_mark))
+
+ # scanner
+
+ def scan_to_next_token(self):
+ # type: () -> Any
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ while not found:
+ while srp() == ' ':
+ srf()
+ ch = srp()
+ if ch == '#':
+ start_mark = self.reader.get_mark()
+ comment = ch
+ srf()
+ while ch not in _THE_END:
+ ch = srp()
+ if ch == '\0': # don't gobble the end-of-stream character
+ # but add an explicit newline as "YAML processors should terminate
+ # the stream with an explicit line break
+ # https://yaml.org/spec/1.2/spec.html#id2780069
+ comment += '\n'
+ break
+ comment += ch
+ srf()
+ # gather any blank lines following the comment too
+ ch = self.scan_line_break()
+ while len(ch) > 0:
+ comment += ch
+ ch = self.scan_line_break()
+ end_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ return comment, start_mark, end_mark
+ if bool(self.scan_line_break()):
+ start_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ ch = srp()
+ if ch == '\n': # empty toplevel lines
+ start_mark = self.reader.get_mark()
+ comment = ""
+ while ch:
+ ch = self.scan_line_break(empty_line=True)
+ comment += ch
+ if srp() == '#':
+ # empty line followed by indented real comment
+ comment = comment.rsplit('\n', 1)[0] + '\n'
+ end_mark = self.reader.get_mark()
+ return comment, start_mark, end_mark
+ else:
+ found = True
+ return None
+
+ def scan_line_break(self, empty_line=False):
+ # type: (bool) -> Text
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.reader.peek() # type: Text
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ elif empty_line and ch in '\t ':
+ self.reader.forward()
+ return ch
+ return ""
+
+ def scan_block_scalar(self, style, rt=True):
+ # type: (Any, Optional[bool]) -> Any
+ return Scanner.scan_block_scalar(self, style, rt=rt)
+
+
+# try:
+# import psyco
+# psyco.bind(Scanner)
+# except ImportError:
+# pass
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/serializer.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/serializer.py
new file mode 100644
index 0000000000..a37885c273
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/serializer.py
@@ -0,0 +1,240 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from ruamel.yaml.error import YAMLError
+from ruamel.yaml.compat import nprint, DBG_NODE, dbg, string_types, nprintf # NOQA
+from ruamel.yaml.util import RegExp
+
+from ruamel.yaml.events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+ DocumentStartEvent,
+ DocumentEndEvent,
+)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
+
+if False: # MYPY
+ from typing import Any, Dict, Union, Text, Optional # NOQA
+ from ruamel.yaml.compat import VersionType # NOQA
+
+__all__ = ['Serializer', 'SerializerError']
+
+
+class SerializerError(YAMLError):
+ pass
+
+
+class Serializer(object):
+
+ # 'id' and 3+ numbers, but not 000
+ ANCHOR_TEMPLATE = u'id%03d'
+ ANCHOR_RE = RegExp(u'id(?!000$)\\d{3,}')
+
+ def __init__(
+ self,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ dumper=None,
+ ):
+ # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._serializer = self
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ if isinstance(version, string_types):
+ self.use_version = tuple(map(int, version.split('.')))
+ else:
+ self.use_version = version # type: ignore
+ self.use_tags = tags
+ self.serialized_nodes = {} # type: Dict[Any, Any]
+ self.anchors = {} # type: Dict[Any, Any]
+ self.last_anchor_id = 0
+ self.closed = None # type: Optional[bool]
+ self._templated_id = None
+
+ @property
+ def emitter(self):
+ # type: () -> Any
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.emitter
+ return self.dumper._emitter
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.dumper, 'typ'):
+ self.dumper.resolver
+ return self.dumper._resolver
+
+ def open(self):
+ # type: () -> None
+ if self.closed is None:
+ self.emitter.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ else:
+ raise SerializerError('serializer is already opened')
+
+ def close(self):
+ # type: () -> None
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif not self.closed:
+ self.emitter.emit(StreamEndEvent())
+ self.closed = True
+
+ # def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ # type: (Any) -> None
+ if dbg(DBG_NODE):
+ nprint('Serializing nodes')
+ node.dump()
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ self.emitter.emit(
+ DocumentStartEvent(
+ explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags
+ )
+ )
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ # type: (Any) -> None
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ anchor = None
+ try:
+ if node.anchor.always_dump:
+ anchor = node.anchor.value
+ except: # NOQA
+ pass
+ self.anchors[node] = anchor
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ # type: (Any) -> Any
+ try:
+ anchor = node.anchor.value
+ except: # NOQA
+ anchor = None
+ if anchor is None:
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+ return anchor
+
+ def serialize_node(self, node, parent, index):
+ # type: (Any, Any, Any) -> None
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emitter.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.resolver.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ # here check if the node.tag equals the one that would result from parsing
+ # if not equal quoting is necessary for strings
+ detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True))
+ implicit = (
+ (node.tag == detected_tag),
+ (node.tag == default_tag),
+ node.tag.startswith('tag:yaml.org,2002:'),
+ )
+ self.emitter.emit(
+ ScalarEvent(
+ alias,
+ node.tag,
+ implicit,
+ node.value,
+ style=node.style,
+ comment=node.comment,
+ )
+ )
+ elif isinstance(node, SequenceNode):
+ implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ seq_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ seq_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ else:
+ end_comment = None
+ self.emitter.emit(
+ SequenceStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ )
+ )
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
+ elif isinstance(node, MappingNode):
+ implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ map_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ map_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ self.emitter.emit(
+ MappingStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ nr_items=len(node.value),
+ )
+ )
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment]))
+ self.resolver.ascend_resolver()
+
+
+def templated_id(s):
+ # type: (Text) -> Any
+ return Serializer.ANCHOR_RE.match(s)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/timestamp.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/timestamp.py
new file mode 100644
index 0000000000..e44db44d08
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/timestamp.py
@@ -0,0 +1,54 @@
+
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+import datetime
+import copy
+
+# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object
+# a more complete datetime might be used by safe loading as well
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+
+class TimeStamp(datetime.datetime):
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any]
+
+ def __new__(cls, *args, **kw): # datetime is immutable
+ # type: (Any, Any) -> Any
+ return datetime.datetime.__new__(cls, *args, **kw) # type: ignore
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second)
+ ts._yaml = copy.deepcopy(self._yaml)
+ return ts
+
+ def replace(self, year=None, month=None, day=None, hour=None,
+ minute=None, second=None, microsecond=None, tzinfo=True,
+ fold=None):
+ if year is None:
+ year = self.year
+ if month is None:
+ month = self.month
+ if day is None:
+ day = self.day
+ if hour is None:
+ hour = self.hour
+ if minute is None:
+ minute = self.minute
+ if second is None:
+ second = self.second
+ if microsecond is None:
+ microsecond = self.microsecond
+ if tzinfo is True:
+ tzinfo = self.tzinfo
+ if fold is None:
+ fold = self.fold
+ ts = type(self)(year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold)
+ ts._yaml = copy.deepcopy(self._yaml)
+ return ts
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/tokens.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/tokens.py
new file mode 100644
index 0000000000..5f5a663534
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/tokens.py
@@ -0,0 +1,286 @@
+# # header
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+if False: # MYPY
+ from typing import Text, Any, Dict, Optional, List # NOQA
+ from .error import StreamMark # NOQA
+
+SHOWLINES = True
+
+
+class Token(object):
+ __slots__ = 'start_mark', 'end_mark', '_comment'
+
+ def __init__(self, start_mark, end_mark):
+ # type: (StreamMark, StreamMark) -> None
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+ def __repr__(self):
+ # type: () -> Any
+ # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and
+ # hasattr('self', key)]
+ attributes = [key for key in self.__slots__ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes])
+ if SHOWLINES:
+ try:
+ arguments += ', line: ' + str(self.start_mark.line)
+ except: # NOQA
+ pass
+ try:
+ arguments += ', comment: ' + str(self._comment)
+ except: # NOQA
+ pass
+ return '{}({})'.format(self.__class__.__name__, arguments)
+
+ def add_post_comment(self, comment):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ self._comment[0] = comment
+
+ def add_pre_comments(self, comments):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ assert self._comment[1] is None
+ self._comment[1] = comments
+
+ def get_comment(self):
+ # type: () -> Any
+ return getattr(self, '_comment', None)
+
+ @property
+ def comment(self):
+ # type: () -> Any
+ return getattr(self, '_comment', None)
+
+ def move_comment(self, target, empty=False):
+ # type: (Any, bool) -> Any
+ """move a comment from this token to target (normally next token)
+ used to combine e.g. comments before a BlockEntryToken to the
+ ScalarToken that follows it
+ empty is a special for empty values -> comment after key
+ """
+ c = self.comment
+ if c is None:
+ return
+ # don't push beyond last element
+ if isinstance(target, (StreamEndToken, DocumentStartToken)):
+ return
+ delattr(self, '_comment')
+ tc = target.comment
+ if not tc: # target comment, just insert
+ # special for empty value in key: value issue 25
+ if empty:
+ c = [c[0], c[1], None, None, c[0]]
+ target._comment = c
+ # nprint('mco2:', self, target, target.comment, empty)
+ return self
+ if c[0] and tc[0] or c[1] and tc[1]:
+ raise NotImplementedError('overlap in comment %r %r' % (c, tc))
+ if c[0]:
+ tc[0] = c[0]
+ if c[1]:
+ tc[1] = c[1]
+ return self
+
+ def split_comment(self):
+ # type: () -> Any
+ """ split the post part of a comment, and return it
+ as comment to be added. Delete second part if [None, None]
+ abc: # this goes to sequence
+ # this goes to first element
+ - first element
+ """
+ comment = self.comment
+ if comment is None or comment[0] is None:
+ return None # nothing to do
+ ret_val = [comment[0], None]
+ if comment[1] is None:
+ delattr(self, '_comment')
+ return ret_val
+
+
+# class BOMToken(Token):
+# id = '<byte order mark>'
+
+
+class DirectiveToken(Token):
+ __slots__ = 'name', 'value'
+ id = '<directive>'
+
+ def __init__(self, name, value, start_mark, end_mark):
+ # type: (Any, Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.name = name
+ self.value = value
+
+
+class DocumentStartToken(Token):
+ __slots__ = ()
+ id = '<document start>'
+
+
+class DocumentEndToken(Token):
+ __slots__ = ()
+ id = '<document end>'
+
+
+class StreamStartToken(Token):
+ __slots__ = ('encoding',)
+ id = '<stream start>'
+
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.encoding = encoding
+
+
+class StreamEndToken(Token):
+ __slots__ = ()
+ id = '<stream end>'
+
+
+class BlockSequenceStartToken(Token):
+ __slots__ = ()
+ id = '<block sequence start>'
+
+
+class BlockMappingStartToken(Token):
+ __slots__ = ()
+ id = '<block mapping start>'
+
+
+class BlockEndToken(Token):
+ __slots__ = ()
+ id = '<block end>'
+
+
+class FlowSequenceStartToken(Token):
+ __slots__ = ()
+ id = '['
+
+
+class FlowMappingStartToken(Token):
+ __slots__ = ()
+ id = '{'
+
+
+class FlowSequenceEndToken(Token):
+ __slots__ = ()
+ id = ']'
+
+
+class FlowMappingEndToken(Token):
+ __slots__ = ()
+ id = '}'
+
+
+class KeyToken(Token):
+ __slots__ = ()
+ id = '?'
+
+ # def x__repr__(self):
+ # return 'KeyToken({})'.format(
+ # self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0])
+
+
+class ValueToken(Token):
+ __slots__ = ()
+ id = ':'
+
+
+class BlockEntryToken(Token):
+ __slots__ = ()
+ id = '-'
+
+
+class FlowEntryToken(Token):
+ __slots__ = ()
+ id = ','
+
+
+class AliasToken(Token):
+ __slots__ = ('value',)
+ id = '<alias>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class AnchorToken(Token):
+ __slots__ = ('value',)
+ id = '<anchor>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class TagToken(Token):
+ __slots__ = ('value',)
+ id = '<tag>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class ScalarToken(Token):
+ __slots__ = 'value', 'plain', 'style'
+ id = '<scalar>'
+
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ # type: (Any, Any, Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+ self.plain = plain
+ self.style = style
+
+
+class CommentToken(Token):
+ __slots__ = 'value', 'pre_done'
+ id = '<comment>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+ def reset(self):
+ # type: () -> None
+ if hasattr(self, 'pre_done'):
+ delattr(self, 'pre_done')
+
+ def __repr__(self):
+ # type: () -> Any
+ v = '{!r}'.format(self.value)
+ if SHOWLINES:
+ try:
+ v += ', line: ' + str(self.start_mark.line)
+ v += ', col: ' + str(self.start_mark.column)
+ except: # NOQA
+ pass
+ return 'CommentToken({})'.format(v)
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ if self.start_mark != other.start_mark:
+ return False
+ if self.end_mark != other.end_mark:
+ return False
+ if self.value != other.value:
+ return False
+ return True
+
+ def __ne__(self, other):
+ # type: (Any) -> bool
+ return not self.__eq__(other)
diff --git a/contrib/python/ruamel.yaml/py2/ruamel/yaml/util.py b/contrib/python/ruamel.yaml/py2/ruamel/yaml/util.py
new file mode 100644
index 0000000000..1788254924
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ruamel/yaml/util.py
@@ -0,0 +1,190 @@
+# coding: utf-8
+
+"""
+some helper functions that might be generally useful
+"""
+
+from __future__ import absolute_import, print_function
+
+from functools import partial
+import re
+
+from .compat import text_type, binary_type
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Text # NOQA
+ from .compat import StreamTextType # NOQA
+
+
+class LazyEval(object):
+ """
+ Lightweight wrapper around lazily evaluated func(*args, **kwargs).
+
+ func is only evaluated when any attribute of its return value is accessed.
+ Every attribute access is passed through to the wrapped value.
+ (This only excludes special cases like method-wrappers, e.g., __hash__.)
+ The sole additional attribute is the lazy_self function which holds the
+ return value (or, prior to evaluation, func and arguments), in its closure.
+ """
+
+ def __init__(self, func, *args, **kwargs):
+ # type: (Any, Any, Any) -> None
+ def lazy_self():
+ # type: () -> Any
+ return_value = func(*args, **kwargs)
+ object.__setattr__(self, 'lazy_self', lambda: return_value)
+ return return_value
+
+ object.__setattr__(self, 'lazy_self', lazy_self)
+
+ def __getattribute__(self, name):
+ # type: (Any) -> Any
+ lazy_self = object.__getattribute__(self, 'lazy_self')
+ if name == 'lazy_self':
+ return lazy_self
+ return getattr(lazy_self(), name)
+
+ def __setattr__(self, name, value):
+ # type: (Any, Any) -> None
+ setattr(self.lazy_self(), name, value)
+
+
+RegExp = partial(LazyEval, re.compile)
+
+
+# originally as comment
+# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605
+# if you use this in your code, I suggest adding a test in your test suite
+# that check this routines output against a known piece of your YAML
+# before upgrades to this code break your round-tripped YAML
+def load_yaml_guess_indent(stream, **kw):
+ # type: (StreamTextType, Any) -> Any
+ """guess the indent and block sequence indent of yaml stream/string
+
+ returns round_trip_loaded stream, indent level, block sequence indent
+ - block sequence indent is the number of spaces before a dash relative to previous indent
+ - if there are no block sequences, indent is taken from nested mappings, block sequence
+ indent is unset (None) in that case
+ """
+ from .main import round_trip_load
+
+ # load a YAML document, guess the indentation, if you use TABs you're on your own
+ def leading_spaces(line):
+ # type: (Any) -> int
+ idx = 0
+ while idx < len(line) and line[idx] == ' ':
+ idx += 1
+ return idx
+
+ if isinstance(stream, text_type):
+ yaml_str = stream # type: Any
+ elif isinstance(stream, binary_type):
+ # most likely, but the Reader checks BOM for this
+ yaml_str = stream.decode('utf-8')
+ else:
+ yaml_str = stream.read()
+ map_indent = None
+ indent = None # default if not found for some reason
+ block_seq_indent = None
+ prev_line_key_only = None
+ key_indent = 0
+ for line in yaml_str.splitlines():
+ rline = line.rstrip()
+ lline = rline.lstrip()
+ if lline.startswith('- '):
+ l_s = leading_spaces(line)
+ block_seq_indent = l_s - key_indent
+ idx = l_s + 1
+ while line[idx] == ' ': # this will end as we rstripped
+ idx += 1
+ if line[idx] == '#': # comment after -
+ continue
+ indent = idx - key_indent
+ break
+ if map_indent is None and prev_line_key_only is not None and rline:
+ idx = 0
+ while line[idx] in ' -':
+ idx += 1
+ if idx > prev_line_key_only:
+ map_indent = idx - prev_line_key_only
+ if rline.endswith(':'):
+ key_indent = leading_spaces(line)
+ idx = 0
+ while line[idx] == ' ': # this will end on ':'
+ idx += 1
+ prev_line_key_only = idx
+ continue
+ prev_line_key_only = None
+ if indent is None and map_indent is not None:
+ indent = map_indent
+ return round_trip_load(yaml_str, **kw), indent, block_seq_indent
+
+
+def configobj_walker(cfg):
+ # type: (Any) -> Any
+ """
+ walks over a ConfigObj (INI file with comments) generating
+ corresponding YAML output (including comments
+ """
+ from configobj import ConfigObj # type: ignore
+
+ assert isinstance(cfg, ConfigObj)
+ for c in cfg.initial_comment:
+ if c.strip():
+ yield c
+ for s in _walk_section(cfg):
+ if s.strip():
+ yield s
+ for c in cfg.final_comment:
+ if c.strip():
+ yield c
+
+
+def _walk_section(s, level=0):
+ # type: (Any, int) -> Any
+ from configobj import Section
+
+ assert isinstance(s, Section)
+ indent = u' ' * level
+ for name in s.scalars:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ x = s[name]
+ if u'\n' in x:
+ i = indent + u' '
+ x = u'|\n' + i + x.strip().replace(u'\n', u'\n' + i)
+ elif ':' in x:
+ x = u"'" + x.replace(u"'", u"''") + u"'"
+ line = u'{0}{1}: {2}'.format(indent, name, x)
+ c = s.inline_comments[name]
+ if c:
+ line += u' ' + c
+ yield line
+ for name in s.sections:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ line = u'{0}{1}:'.format(indent, name)
+ c = s.inline_comments[name]
+ if c:
+ line += u' ' + c
+ yield line
+ for val in _walk_section(s[name], level=level + 1):
+ yield val
+
+
+# def config_obj_2_rt_yaml(cfg):
+# from .comments import CommentedMap, CommentedSeq
+# from configobj import ConfigObj
+# assert isinstance(cfg, ConfigObj)
+# #for c in cfg.initial_comment:
+# # if c.strip():
+# # pass
+# cm = CommentedMap()
+# for name in s.sections:
+# cm[name] = d = CommentedMap()
+#
+#
+# #for c in cfg.final_comment:
+# # if c.strip():
+# # yield c
+# return cm
diff --git a/contrib/python/ruamel.yaml/py2/ya.make b/contrib/python/ruamel.yaml/py2/ya.make
new file mode 100644
index 0000000000..e866f43d34
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py2/ya.make
@@ -0,0 +1,55 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(0.16.13)
+
+LICENSE(MIT)
+
+PEERDIR(
+ contrib/deprecated/python/ruamel.ordereddict
+ contrib/python/ruamel.yaml.clib
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ ruamel/yaml/__init__.py
+ ruamel/yaml/anchor.py
+ ruamel/yaml/comments.py
+ ruamel/yaml/compat.py
+ ruamel/yaml/composer.py
+ ruamel/yaml/configobjwalker.py
+ ruamel/yaml/constructor.py
+ ruamel/yaml/cyaml.py
+ ruamel/yaml/dumper.py
+ ruamel/yaml/emitter.py
+ ruamel/yaml/error.py
+ ruamel/yaml/events.py
+ ruamel/yaml/loader.py
+ ruamel/yaml/main.py
+ ruamel/yaml/nodes.py
+ ruamel/yaml/parser.py
+ ruamel/yaml/reader.py
+ ruamel/yaml/representer.py
+ ruamel/yaml/resolver.py
+ ruamel/yaml/scalarbool.py
+ ruamel/yaml/scalarfloat.py
+ ruamel/yaml/scalarint.py
+ ruamel/yaml/scalarstring.py
+ ruamel/yaml/scanner.py
+ ruamel/yaml/serializer.py
+ ruamel/yaml/timestamp.py
+ ruamel/yaml/tokens.py
+ ruamel/yaml/util.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/ruamel.yaml/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+ ruamel/yaml/py.typed
+)
+
+END()
diff --git a/contrib/python/ruamel.yaml/py3/.dist-info/METADATA b/contrib/python/ruamel.yaml/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..d4d5ff57b0
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/.dist-info/METADATA
@@ -0,0 +1,400 @@
+Metadata-Version: 2.1
+Name: ruamel.yaml
+Version: 0.17.40
+Summary: ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order
+Home-page: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree
+Author: Anthon van der Neut
+Author-email: a.van.der.neut@ruamel.eu
+License: MIT license
+Keywords: yaml 1.2 parser round-trip preserve quotes order config
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Classifier: Typing :: Typed
+Requires-Python: >=3
+Description-Content-Type: text/markdown; charset=UTF-8; variant=CommonMark
+License-File: LICENSE
+Requires-Dist: ruamel.yaml.clib >=0.2.7 ; platform_python_implementation=="CPython" and python_version<"3.13"
+Provides-Extra: docs
+Requires-Dist: ryd ; extra == 'docs'
+Requires-Dist: mercurial >5.7 ; extra == 'docs'
+Provides-Extra: jinja2
+Requires-Dist: ruamel.yaml.jinja2 >=0.2 ; extra == 'jinja2'
+
+# ruamel.yaml
+
+`ruamel.yaml` is a YAML 1.2 loader/dumper package for Python.
+<table class="docutils">
+ <tr> <td>version</td>
+ <td>0.17.40</td>
+ </tr>
+ <tr> <td>updated</td>
+ <td>2023-10-20</td>
+ </tr>
+ <tr> <td>documentation</td>
+ <td><a href="http://yaml.readthedocs.io">http://yaml.readthedocs.io</a></td>
+ </tr>
+ <tr> <td>repository</td>
+ <td><a href="https://sourceforge.net/projects/ruamel-yaml">https://sourceforge.net/projects/ruamel-yaml</a></td>
+ </tr>
+ <tr> <td>pypi</td>
+ <td><a href="https://pypi.org/project/ruamel.yaml">https://pypi.org/project/ruamel.yaml</a></td>
+ </tr>
+</table>
+
+*Starting with 0.17.22 only Python 3.7+ is supported. The 0.17 series is
+also the last to support old PyYAML functions, replace it by creating a*
+`YAML()` *instance and use its* `.load()` *and* `.dump()` *methods.*
+**New(er) functionality is usually only available via the new API.**
+
+The 0.17.21 was the last one tested to be working on Python 3.5 and 3.6
+(the latter was not tested, because tox/virtualenv stopped supporting
+that EOL version). The 0.16.13 release was the last that was tested to
+be working on Python 2.7.
+
+*Please adjust/pin your dependencies accordingly if necessary.*
+(`ruamel.yaml<0.18`)
+
+There are now two extra plug-in packages
+(`ruamel.yaml.bytes` and `ruamel.yaml.string`)
+for those not wanting to do the streaming to a
+`io.BytesIO/StringIO` buffer themselves.
+
+If your package uses `ruamel.yaml` and is not listed on PyPI, drop me an
+email, preferably with some information on how you use the package (or a
+link to the repository) and I'll keep you informed when the status of
+the API is stable enough to make the transition.
+
+- [Overview](http://yaml.readthedocs.io/en/latest/overview/)
+- [Installing](http://yaml.readthedocs.io/en/latest/install/)
+- [Basic Usage](http://yaml.readthedocs.io/en/latest/basicuse/)
+- [Details](http://yaml.readthedocs.io/en/latest/detail/)
+- [Examples](http://yaml.readthedocs.io/en/latest/example/)
+- [API](http://yaml.readthedocs.io/en/latest/api/)
+- [Differences with
+ PyYAML](http://yaml.readthedocs.io/en/latest/pyyaml/)
+
+[![image](https://readthedocs.org/projects/yaml/badge/?version=latest)](https://yaml.readthedocs.org/en/latest?badge=latest)[![image](https://bestpractices.coreinfrastructure.org/projects/1128/badge)](https://bestpractices.coreinfrastructure.org/projects/1128)
+[![image](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw)](https://opensource.org/licenses/MIT)
+[![image](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw)](https://pypi.org/project/ruamel.yaml/)
+[![image](https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw)](https://pypi.org/project/oitnb/)
+[![image](http://www.mypy-lang.org/static/mypy_badge.svg)](http://mypy-lang.org/)
+
+# ChangeLog
+
+0.17.40 (2023-10-20):
+
+- flow style sets are now preserved ( `!!set {a, b, c} )`. Any values specified when loading are dropped, including `!!null ""`.
+- potential workaround for issue 484: the long_description_content_type including the variant specification `CommonMark`
+can result in problems on Azure. If you can install from `.tar.gz` using
+`RUAMEL_NO_LONG_DESCRIPTION=1 pip install ruamel.yaml --no-binary :all:` then the long description, and its
+offending type, are nog included (in the METADATA).
+(Reported by [Coury Ditch](https://sourceforge.net/u/cmditch/profile/))
+
+- links in documentation update (reported by [David Hoese](https://sourceforge.net/u/daveydave400/profile/))
+- Added some `__repr__` for internally used classes
+
+0.17.39 (2023-10-19):
+
+- update README generation, no code changes
+
+0.17.36 (2023-10-19):
+
+- fixed issue 480, dumping of a loaded empty flow-style mapping with comment failed (Reported by [Stéphane Brunner](https://sourceforge.net/u/stbrunner/profile/))
+- fixed issue 482, caused by DEFAULT_MAPPING_TAG having changes to being a `Tag()` instance, not a string (reported by [yan12125](https://sourceforge.net/u/yan12125/profile/))
+- updated documentation to use mkdocs
+
+0.17.35 (2023-10-04):
+
+- support for loading dataclasses with `InitVar` variables (some special coding was necessary to get the, unexecpected, default value in the corresponding instance attribute ( example of usage in [this question](https://stackoverflow.com/q/77228378/1307905))
+
+0.17.34 (2023-10-03):
+
+- Python 3.12 also loads C version when using `typ='safe'`
+- initial support for loading invoking
+`__post_init__()` on dataclasses that have that
+method after loading a registered dataclass.
+(Originally
+[asked](https://stackoverflow.com/q/51529458/1307905) on
+Stackoverflow by
+[nyanpasu64](https://stackoverflow.com/users/2683842/nyanpasu64)
+and as
+[ticket](https://sourceforge.net/p/ruamel-yaml/tickets/355/) by
+[Patrick Lehmann](https://sourceforge.net/u/paebbels/profile/))
+
+```
+@yaml.register_class
+@dataclass
+class ...
+```
+
+0.17.33 (2023-09-28):
+
+- added `flow_seq_start`, `flow_seq_end`, `flow_seq_separator`, `flow_map_start`, `flow_map_end`, `flow_map_separator` **class** attributes to the `Emitter` class so flow style output can more easily be influenced (based on [this answer](https://stackoverflow.com/a/76547814/1307905) on a StackOverflow question by [Huw Walters](https://stackoverflow.com/users/291033/huw-walters)).
+
+0.17.32 (2023-06-17):
+
+- fix issue with scanner getting stuck in infinite loop
+
+0.17.31 (2023-05-31):
+
+- added tag.setter on `ScalarEvent` and on `Node`, that takes either a `Tag` instance, or a str (reported by [Sorin Sbarnea](https://sourceforge.net/u/ssbarnea/profile/))
+
+0.17.30 (2023-05-30):
+
+- fix issue 467, caused by Tag instances not being hashable (reported by [Douglas Raillard](https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/))
+
+0.17.29 (2023-05-30):
+
+- changed the internals of the tag property from a string to a class which allows for preservation of the original handle and suffix. This should result in better results using documents with %TAG directives, as well as preserving URI escapes in tag suffixes.
+
+0.17.28 (2023-05-26):
+
+- fix for issue 464: documents ending with document end marker
+without final newline fail to load (reported by [Mariusz
+Rusiniak](https://sourceforge.net/u/r2dan/profile/))
+
+0.17.27 (2023-05-25):
+
+- fix issue with inline mappings as value for merge keys (reported by Sirish on [StackOverflow](https://stackoverflow.com/q/76331049/1307905))
+- fix for 468, error inserting after accessing merge attribute on `CommentedMap` (reported by [Bastien gerard](https://sourceforge.net/u/bagerard/))
+- fix for issue 461 pop + insert on same `CommentedMap` key throwing error (reported by [John Thorvald Wodder II](https://sourceforge.net/u/jwodder/profile/))
+
+0.17.26 (2023-05-09):
+
+- fix for error on edge cage for issue 459
+
+0.17.25 (2023-05-09):
+
+- fix for regression while dumping wrapped strings with too many backslashes removed (issue 459, reported by [Lele Gaifax](https://sourceforge.net/u/lele/profile/))
+
+0.17.24 (2023-05-06):
+
+- rewrite of `CommentedMap.insert()`. If you have a merge key in the YAML document for the mapping you insert to, the position value should be the one as you look at the YAML input. This fixes issue 453 where other keys of a merged in mapping would show up after an insert (reported by [Alex Miller](https://sourceforge.net/u/millerdevel/profile/)). It also fixes a call to `.insert()` resulting into the merge key to move to be the first key if it wasn't already and it is also now possible to insert a key before a merge key (even if the fist key in the mapping).
+- fix (in the pure Python implementation including default) for issue 447. (reported by [Jack Cherng](https://sourceforge.net/u/jfcherng/profile/), also brought up by brent on [StackOverflow](https://stackoverflow.com/q/40072485/1307905))
+
+0.17.23 (2023-05-05):
+
+- fix 458, error on plain scalars starting with word longer than width. (reported by [Kyle Larose](https://sourceforge.net/u/klarose/profile/))
+- fix for `.update()` no longer correctly handling keyword arguments (reported by John Lin on [StackOverflow]( https://stackoverflow.com/q/76089100/1307905))
+- fix issue 454: high Unicode (emojis) in quoted strings always
+escaped (reported by [Michal
+ÄŒihaÅ™](https://sourceforge.net/u/nijel/profile/) based on a
+question on StackOverflow).
+- fix issue with emitter conservatively inserting extra backslashes in wrapped quoted strings (reported by thebenman on [StackOverflow](https://stackoverflow.com/q/75631454/1307905))
+
+0.17.22 (2023-05-02):
+
+- fix issue 449 where the second exclamation marks got URL encoded (reported and fixing PR provided by [John Stark](https://sourceforge.net/u/jods/profile/))
+- fix issue with indent != 2 and literal scalars with empty first line (reported by wrdis on [StackOverflow](https://stackoverflow.com/q/75584262/1307905))
+- updated `__repr__` of CommentedMap, now that Python's dict is ordered -> no more `ordereddict(list-of-tuples)`
+- merge MR 4, handling OctalInt in YAML 1.1 (provided by [Jacob Floyd](https://sourceforge.net/u/cognifloyd/profile/))
+- fix loading of `!!float 42` (reported by Eric on [Stack overflow](https://stackoverflow.com/a/71555107/1307905))
+- line numbers are now set on `CommentedKeySeq` and `CommentedKeyMap` (which are created if you have a sequence resp. mapping as the key in a mapping)
+- plain scalars: put single words longer than width on a line of
+their own, instead of after the previous line (issue 427, reported
+by [Antoine
+Cotten](https://sourceforge.net/u/antoineco/profile/)). Caveat:
+this currently results in a space ending the previous line.
+- fix for folded scalar part of 421: comments after ">" on first
+line of folded scalars are now preserved (as were those in the
+same position on literal scalars). Issue reported by Jacob Floyd.
+- added stacklevel to warnings
+- typing changed from Py2 compatible comments to Py3, removed various Py2-isms
+
+0.17.21 (2022-02-12):
+
+- fix bug in calling `.compose()` method with `pathlib.Path` instance.
+
+0.17.20 (2022-01-03):
+
+- fix error in microseconds while rounding datetime fractions >= 9999995 (reported by [Luis Ferreira](https://sourceforge.net/u/ljmf00/))
+
+0.17.19 (2021-12-26):
+
+- fix mypy problems (reported by [Arun](https://sourceforge.net/u/arunppsg/profile/))
+
+0.17.18 (2021-12-24):
+
+- copy-paste error in folded scalar comment attachment (reported by [Stephan Geulette](https://sourceforge.net/u/sgeulette/profile/))
+- fix 411, indent error comment between key empty seq value (reported by [Guillermo Julián](https://sourceforge.net/u/gjulianm/profile/))
+
+0.17.17 (2021-10-31):
+
+- extract timestamp matching/creation to util
+
+0.17.16 (2021-08-28):
+
+- 398 also handle issue 397 when comment is newline
+
+0.17.15 (2021-08-28):
+
+- fix issue 397, insert comment before key when a comment between key and value exists (reported by [Bastien gerard](https://sourceforge.net/u/bagerard/))
+
+0.17.14 (2021-08-25):
+
+- fix issue 396, inserting key/val in merged-in dictionary (reported by [Bastien gerard](https://sourceforge.net/u/bagerard/))
+
+0.17.13 (2021-08-21):
+
+- minor fix in attr handling
+
+0.17.12 (2021-08-21):
+
+- fix issue with anchor on registered class not preserved and those classes using package attrs with `@attr.s()` (both reported by [ssph](https://sourceforge.net/u/sph/))
+
+0.17.11 (2021-08-19):
+
+- fix error baseclass for `DuplicateKeyError` (reported by [Åukasz Rogalski](https://sourceforge.net/u/lrogalski/))
+- fix typo in reader error message, causing `KeyError` during reader error (reported by [MTU](https://sourceforge.net/u/mtu/))
+
+0.17.10 (2021-06-24):
+
+- fix issue 388, token with old comment structure != two elements (reported by [Dimitrios Bariamis](https://sourceforge.net/u/dbdbc/))
+
+0.17.9 (2021-06-10):
+
+- fix issue with updating CommentedMap (reported by sri on [StackOverflow](https://stackoverflow.com/q/67911659/1307905))
+
+0.17.8 (2021-06-09):
+
+- fix for issue 387 where templated anchors on tagged object did get set resulting in potential id reuse. (reported by [Artem Ploujnikov](https://sourceforge.net/u/flexthink/))
+
+0.17.7 (2021-05-31):
+
+- issue 385 also affected other deprecated loaders (reported via email by Oren Watson)
+
+0.17.6 (2021-05-31):
+
+- merged type annotations update provided by [Jochen Sprickerhof](https://sourceforge.net/u/jspricke/)
+- fix for issue 385: deprecated round_trip_loader function not
+working (reported by [Mike
+Gouline](https://sourceforge.net/u/gouline/))
+- wasted a few hours getting rid of mypy warnings/errors
+
+0.17.5 (2021-05-30):
+
+- fix for issue 384 `!!set` with aliased entry resulting in broken YAML on rt reported by [William Kimball](https://sourceforge.net/u/william303/))
+
+0.17.4 (2021-04-07):
+
+- prevent (empty) comments from throwing assertion error (issue 351 reported by [William Kimball](https://sourceforge.net/u/william303/)) comments (or empty line) will be dropped
+
+0.17.3 (2021-04-07):
+
+- fix for issue 382 caused by an error in a format string (reported by [William Kimball](https://sourceforge.net/u/william303/))
+- allow expansion of aliases by setting `yaml.composer.return_alias = lambda s: copy.deepcopy(s)`
+(as per [Stackoverflow answer](https://stackoverflow.com/a/66983530/1307905))
+
+0.17.2 (2021-03-29):
+
+- change -py2.py3-none-any.whl to -py3-none-any.whl, and remove 0.17.1
+
+0.17.1 (2021-03-29):
+
+- added 'Programming Language :: Python :: 3 :: Only', and
+removing 0.17.0 from PyPI (reported by [Alasdair
+Nicol](https://sourceforge.net/u/alasdairnicol/))
+
+0.17.0 (2021-03-26):
+
+- removed because of incomplete classifiers
+- this release no longer supports Python 2.7, most if not all Python 2 specific code is removed. The 0.17.x series is the last to support Python 3.5 (this also allowed for removal of the dependency on `ruamel.std.pathlib`)
+- remove Python2 specific code branches and adaptations (u-strings)
+- prepare % code for f-strings using `_F`
+- allow PyOxidisation ([issue 324](https://sourceforge.net/p/ruamel-yaml/tickets/324/) resp. [issue 171](https://github.com/indygreg/PyOxidizer/issues/171))
+- replaced Python 2 compatible enforcement of keyword arguments with '*'
+- the old top level *functions* `load`, `safe_load`, `round_trip_load`, `dump`, `safe_dump`, `round_trip_dump`, `scan`, `parse`, `compose`, `emit`, `serialize` as well as their `_all` variants for multi-document streams, now issue a `PendingDeprecationning` (e.g. when run from pytest, but also Python is started with `-Wd`). Use the methods on `YAML()`, which have been extended.
+- fix for issue 376: indentation changes could put literal/folded
+scalar to start before the `#` column of a following comment.
+Effectively making the comment part of the scalar in the output.
+(reported by [Bence Nagy](https://sourceforge.net/u/underyx/))
+
+0.16.13 (2021-03-05):
+
+- fix for issue 359: could not update() CommentedMap with keyword
+arguments (reported by [Steve
+Franchak](https://sourceforge.net/u/binaryadder/))
+- fix for issue 365: unable to dump mutated TimeStamp objects
+(reported by [Anton Akmerov](https://sourceforge.net/u/akhmerov))
+- fix for issue 371: unable to add comment without starting space
+(reported by [Mark Grandi](https://sourceforge.net/u/mgrandi))
+- fix for issue 373: recursive call to walk_tree not preserving
+all params (reported by [eulores](https://sourceforge.net/u/eulores/))
+- a None value in a flow-style sequence is now dumped as `null` instead of `!!null ''` (reported by mcarans on [StackOverflow](https://stackoverflow.com/a/66489600/1307905))
+
+0.16.12 (2020-09-04):
+
+- update links in doc
+
+0.16.11 (2020-09-03):
+
+- workaround issue with setuptools 0.50 and importing pip (fix by [jaraco](https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580)
+
+0.16.10 (2020-02-12):
+
+- (auto) updated image references in README to sourceforge
+
+0.16.9 (2020-02-11):
+
+- update CHANGES
+
+0.16.8 (2020-02-11):
+
+- update requirements so that ruamel.yaml.clib is installed for 3.8, as it has become available (via manylinux builds)
+
+0.16.7 (2020-01-30):
+
+- fix typchecking issue on TaggedScalar (reported by Jens Nielsen)
+- fix error in dumping literal scalar in sequence with comments before element (reported by [EJ Etherington](https://sourceforge.net/u/ejether/))
+
+0.16.6 (2020-01-20):
+
+- fix empty string mapping key roundtripping with preservation of quotes as `? ''` (reported via email by Tomer Aharoni).
+- fix incorrect state setting in class constructor (reported by [Douglas Raillard](https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/))
+- adjust deprecation warning test for Hashable, as that no longer warns (reported by [Jason Montleon](https://bitbucket.org/%7B8f377d12-8d5b-4069-a662-00a2674fee4e%7D/))
+
+0.16.5 (2019-08-18):
+
+- allow for `YAML(typ=['unsafe', 'pytypes'])`
+
+0.16.4 (2019-08-16):
+
+- fix output of TAG directives with `#` (reported by [Thomas Smith](https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/))
+
+0.16.3 (2019-08-15):
+
+- split construct_object
+- change stuff back to keep mypy happy
+- move setting of version based on YAML directive to scanner, allowing to check for file version during TAG directive scanning
+
+0.16.2 (2019-08-15):
+
+- preserve YAML and TAG directives on roundtrip, correctly output `#` in URL for YAML 1.2 (both reported by [Thomas Smith](https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/))
+
+0.16.1 (2019-08-08):
+
+- Force the use of new version of ruamel.yaml.clib (reported by [Alex Joz](https://bitbucket.org/%7B9af55900-2534-4212-976c-61339b6ffe14%7D/))
+- Allow `#` in tag URI as these are allowed in YAML 1.2 (reported by [Thomas Smith](https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/))
+
+0.16.0 (2019-07-25):
+
+- split of C source that generates `.so` file to [ruamel.yaml.clib]( https://pypi.org/project/ruamel.yaml.clib/)
+- duplicate keys are now an error when working with the old API as well
+
+------------------------------------------------------------------------
+
+For older changes see the file
+[CHANGES](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/CHANGES)
diff --git a/contrib/python/ruamel.yaml/py3/.dist-info/top_level.txt b/contrib/python/ruamel.yaml/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..282b116fc6
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+ruamel
diff --git a/contrib/python/ruamel.yaml/py3/LICENSE b/contrib/python/ruamel.yaml/py3/LICENSE
new file mode 100644
index 0000000000..5fdca40edc
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/LICENSE
@@ -0,0 +1,21 @@
+ The MIT License (MIT)
+
+ Copyright (c) 2014-2023 Anthon van der Neut, Ruamel bvba
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/contrib/python/ruamel.yaml/py3/README.md b/contrib/python/ruamel.yaml/py3/README.md
new file mode 100644
index 0000000000..ddf9f4dbc8
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/README.md
@@ -0,0 +1,365 @@
+# ruamel.yaml
+
+`ruamel.yaml` is a YAML 1.2 loader/dumper package for Python.
+<table class="docutils">
+ <tr> <td>version</td>
+ <td>0.17.40</td>
+ </tr>
+ <tr> <td>updated</td>
+ <td>2023-10-20</td>
+ </tr>
+ <tr> <td>documentation</td>
+ <td><a href="http://yaml.readthedocs.io">http://yaml.readthedocs.io</a></td>
+ </tr>
+ <tr> <td>repository</td>
+ <td><a href="https://sourceforge.net/projects/ruamel-yaml">https://sourceforge.net/projects/ruamel-yaml</a></td>
+ </tr>
+ <tr> <td>pypi</td>
+ <td><a href="https://pypi.org/project/ruamel.yaml">https://pypi.org/project/ruamel.yaml</a></td>
+ </tr>
+</table>
+
+*Starting with 0.17.22 only Python 3.7+ is supported. The 0.17 series is
+also the last to support old PyYAML functions, replace it by creating a*
+`YAML()` *instance and use its* `.load()` *and* `.dump()` *methods.*
+**New(er) functionality is usually only available via the new API.**
+
+The 0.17.21 was the last one tested to be working on Python 3.5 and 3.6
+(the latter was not tested, because tox/virtualenv stopped supporting
+that EOL version). The 0.16.13 release was the last that was tested to
+be working on Python 2.7.
+
+*Please adjust/pin your dependencies accordingly if necessary.*
+(`ruamel.yaml<0.18`)
+
+There are now two extra plug-in packages
+(`ruamel.yaml.bytes` and `ruamel.yaml.string`)
+for those not wanting to do the streaming to a
+`io.BytesIO/StringIO` buffer themselves.
+
+If your package uses `ruamel.yaml` and is not listed on PyPI, drop me an
+email, preferably with some information on how you use the package (or a
+link to the repository) and I'll keep you informed when the status of
+the API is stable enough to make the transition.
+
+- [Overview](http://yaml.readthedocs.io/en/latest/overview/)
+- [Installing](http://yaml.readthedocs.io/en/latest/install/)
+- [Basic Usage](http://yaml.readthedocs.io/en/latest/basicuse/)
+- [Details](http://yaml.readthedocs.io/en/latest/detail/)
+- [Examples](http://yaml.readthedocs.io/en/latest/example/)
+- [API](http://yaml.readthedocs.io/en/latest/api/)
+- [Differences with
+ PyYAML](http://yaml.readthedocs.io/en/latest/pyyaml/)
+
+[![image](https://readthedocs.org/projects/yaml/badge/?version=latest)](https://yaml.readthedocs.org/en/latest?badge=latest)[![image](https://bestpractices.coreinfrastructure.org/projects/1128/badge)](https://bestpractices.coreinfrastructure.org/projects/1128)
+[![image](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw)](https://opensource.org/licenses/MIT)
+[![image](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw)](https://pypi.org/project/ruamel.yaml/)
+[![image](https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw)](https://pypi.org/project/oitnb/)
+[![image](http://www.mypy-lang.org/static/mypy_badge.svg)](http://mypy-lang.org/)
+
+# ChangeLog
+
+0.17.40 (2023-10-20):
+
+- flow style sets are now preserved ( `!!set {a, b, c} )`. Any values specified when loading are dropped, including `!!null ""`.
+- potential workaround for issue 484: the long_description_content_type including the variant specification `CommonMark`
+can result in problems on Azure. If you can install from `.tar.gz` using
+`RUAMEL_NO_LONG_DESCRIPTION=1 pip install ruamel.yaml --no-binary :all:` then the long description, and its
+offending type, are nog included (in the METADATA).
+(Reported by [Coury Ditch](https://sourceforge.net/u/cmditch/profile/))
+
+- links in documentation update (reported by [David Hoese](https://sourceforge.net/u/daveydave400/profile/))
+- Added some `__repr__` for internally used classes
+
+0.17.39 (2023-10-19):
+
+- update README generation, no code changes
+
+0.17.36 (2023-10-19):
+
+- fixed issue 480, dumping of a loaded empty flow-style mapping with comment failed (Reported by [Stéphane Brunner](https://sourceforge.net/u/stbrunner/profile/))
+- fixed issue 482, caused by DEFAULT_MAPPING_TAG having changes to being a `Tag()` instance, not a string (reported by [yan12125](https://sourceforge.net/u/yan12125/profile/))
+- updated documentation to use mkdocs
+
+0.17.35 (2023-10-04):
+
+- support for loading dataclasses with `InitVar` variables (some special coding was necessary to get the, unexecpected, default value in the corresponding instance attribute ( example of usage in [this question](https://stackoverflow.com/q/77228378/1307905))
+
+0.17.34 (2023-10-03):
+
+- Python 3.12 also loads C version when using `typ='safe'`
+- initial support for loading invoking
+`__post_init__()` on dataclasses that have that
+method after loading a registered dataclass.
+(Originally
+[asked](https://stackoverflow.com/q/51529458/1307905) on
+Stackoverflow by
+[nyanpasu64](https://stackoverflow.com/users/2683842/nyanpasu64)
+and as
+[ticket](https://sourceforge.net/p/ruamel-yaml/tickets/355/) by
+[Patrick Lehmann](https://sourceforge.net/u/paebbels/profile/))
+
+```
+@yaml.register_class
+@dataclass
+class ...
+```
+
+0.17.33 (2023-09-28):
+
+- added `flow_seq_start`, `flow_seq_end`, `flow_seq_separator`, `flow_map_start`, `flow_map_end`, `flow_map_separator` **class** attributes to the `Emitter` class so flow style output can more easily be influenced (based on [this answer](https://stackoverflow.com/a/76547814/1307905) on a StackOverflow question by [Huw Walters](https://stackoverflow.com/users/291033/huw-walters)).
+
+0.17.32 (2023-06-17):
+
+- fix issue with scanner getting stuck in infinite loop
+
+0.17.31 (2023-05-31):
+
+- added tag.setter on `ScalarEvent` and on `Node`, that takes either a `Tag` instance, or a str (reported by [Sorin Sbarnea](https://sourceforge.net/u/ssbarnea/profile/))
+
+0.17.30 (2023-05-30):
+
+- fix issue 467, caused by Tag instances not being hashable (reported by [Douglas Raillard](https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/))
+
+0.17.29 (2023-05-30):
+
+- changed the internals of the tag property from a string to a class which allows for preservation of the original handle and suffix. This should result in better results using documents with %TAG directives, as well as preserving URI escapes in tag suffixes.
+
+0.17.28 (2023-05-26):
+
+- fix for issue 464: documents ending with document end marker
+without final newline fail to load (reported by [Mariusz
+Rusiniak](https://sourceforge.net/u/r2dan/profile/))
+
+0.17.27 (2023-05-25):
+
+- fix issue with inline mappings as value for merge keys (reported by Sirish on [StackOverflow](https://stackoverflow.com/q/76331049/1307905))
+- fix for 468, error inserting after accessing merge attribute on `CommentedMap` (reported by [Bastien gerard](https://sourceforge.net/u/bagerard/))
+- fix for issue 461 pop + insert on same `CommentedMap` key throwing error (reported by [John Thorvald Wodder II](https://sourceforge.net/u/jwodder/profile/))
+
+0.17.26 (2023-05-09):
+
+- fix for error on edge cage for issue 459
+
+0.17.25 (2023-05-09):
+
+- fix for regression while dumping wrapped strings with too many backslashes removed (issue 459, reported by [Lele Gaifax](https://sourceforge.net/u/lele/profile/))
+
+0.17.24 (2023-05-06):
+
+- rewrite of `CommentedMap.insert()`. If you have a merge key in the YAML document for the mapping you insert to, the position value should be the one as you look at the YAML input. This fixes issue 453 where other keys of a merged in mapping would show up after an insert (reported by [Alex Miller](https://sourceforge.net/u/millerdevel/profile/)). It also fixes a call to `.insert()` resulting into the merge key to move to be the first key if it wasn't already and it is also now possible to insert a key before a merge key (even if the fist key in the mapping).
+- fix (in the pure Python implementation including default) for issue 447. (reported by [Jack Cherng](https://sourceforge.net/u/jfcherng/profile/), also brought up by brent on [StackOverflow](https://stackoverflow.com/q/40072485/1307905))
+
+0.17.23 (2023-05-05):
+
+- fix 458, error on plain scalars starting with word longer than width. (reported by [Kyle Larose](https://sourceforge.net/u/klarose/profile/))
+- fix for `.update()` no longer correctly handling keyword arguments (reported by John Lin on [StackOverflow]( https://stackoverflow.com/q/76089100/1307905))
+- fix issue 454: high Unicode (emojis) in quoted strings always
+escaped (reported by [Michal
+ÄŒihaÅ™](https://sourceforge.net/u/nijel/profile/) based on a
+question on StackOverflow).
+- fix issue with emitter conservatively inserting extra backslashes in wrapped quoted strings (reported by thebenman on [StackOverflow](https://stackoverflow.com/q/75631454/1307905))
+
+0.17.22 (2023-05-02):
+
+- fix issue 449 where the second exclamation marks got URL encoded (reported and fixing PR provided by [John Stark](https://sourceforge.net/u/jods/profile/))
+- fix issue with indent != 2 and literal scalars with empty first line (reported by wrdis on [StackOverflow](https://stackoverflow.com/q/75584262/1307905))
+- updated `__repr__` of CommentedMap, now that Python's dict is ordered -> no more `ordereddict(list-of-tuples)`
+- merge MR 4, handling OctalInt in YAML 1.1 (provided by [Jacob Floyd](https://sourceforge.net/u/cognifloyd/profile/))
+- fix loading of `!!float 42` (reported by Eric on [Stack overflow](https://stackoverflow.com/a/71555107/1307905))
+- line numbers are now set on `CommentedKeySeq` and `CommentedKeyMap` (which are created if you have a sequence resp. mapping as the key in a mapping)
+- plain scalars: put single words longer than width on a line of
+their own, instead of after the previous line (issue 427, reported
+by [Antoine
+Cotten](https://sourceforge.net/u/antoineco/profile/)). Caveat:
+this currently results in a space ending the previous line.
+- fix for folded scalar part of 421: comments after ">" on first
+line of folded scalars are now preserved (as were those in the
+same position on literal scalars). Issue reported by Jacob Floyd.
+- added stacklevel to warnings
+- typing changed from Py2 compatible comments to Py3, removed various Py2-isms
+
+0.17.21 (2022-02-12):
+
+- fix bug in calling `.compose()` method with `pathlib.Path` instance.
+
+0.17.20 (2022-01-03):
+
+- fix error in microseconds while rounding datetime fractions >= 9999995 (reported by [Luis Ferreira](https://sourceforge.net/u/ljmf00/))
+
+0.17.19 (2021-12-26):
+
+- fix mypy problems (reported by [Arun](https://sourceforge.net/u/arunppsg/profile/))
+
+0.17.18 (2021-12-24):
+
+- copy-paste error in folded scalar comment attachment (reported by [Stephan Geulette](https://sourceforge.net/u/sgeulette/profile/))
+- fix 411, indent error comment between key empty seq value (reported by [Guillermo Julián](https://sourceforge.net/u/gjulianm/profile/))
+
+0.17.17 (2021-10-31):
+
+- extract timestamp matching/creation to util
+
+0.17.16 (2021-08-28):
+
+- 398 also handle issue 397 when comment is newline
+
+0.17.15 (2021-08-28):
+
+- fix issue 397, insert comment before key when a comment between key and value exists (reported by [Bastien gerard](https://sourceforge.net/u/bagerard/))
+
+0.17.14 (2021-08-25):
+
+- fix issue 396, inserting key/val in merged-in dictionary (reported by [Bastien gerard](https://sourceforge.net/u/bagerard/))
+
+0.17.13 (2021-08-21):
+
+- minor fix in attr handling
+
+0.17.12 (2021-08-21):
+
+- fix issue with anchor on registered class not preserved and those classes using package attrs with `@attr.s()` (both reported by [ssph](https://sourceforge.net/u/sph/))
+
+0.17.11 (2021-08-19):
+
+- fix error baseclass for `DuplicateKeyError` (reported by [Åukasz Rogalski](https://sourceforge.net/u/lrogalski/))
+- fix typo in reader error message, causing `KeyError` during reader error (reported by [MTU](https://sourceforge.net/u/mtu/))
+
+0.17.10 (2021-06-24):
+
+- fix issue 388, token with old comment structure != two elements (reported by [Dimitrios Bariamis](https://sourceforge.net/u/dbdbc/))
+
+0.17.9 (2021-06-10):
+
+- fix issue with updating CommentedMap (reported by sri on [StackOverflow](https://stackoverflow.com/q/67911659/1307905))
+
+0.17.8 (2021-06-09):
+
+- fix for issue 387 where templated anchors on tagged object did get set resulting in potential id reuse. (reported by [Artem Ploujnikov](https://sourceforge.net/u/flexthink/))
+
+0.17.7 (2021-05-31):
+
+- issue 385 also affected other deprecated loaders (reported via email by Oren Watson)
+
+0.17.6 (2021-05-31):
+
+- merged type annotations update provided by [Jochen Sprickerhof](https://sourceforge.net/u/jspricke/)
+- fix for issue 385: deprecated round_trip_loader function not
+working (reported by [Mike
+Gouline](https://sourceforge.net/u/gouline/))
+- wasted a few hours getting rid of mypy warnings/errors
+
+0.17.5 (2021-05-30):
+
+- fix for issue 384 `!!set` with aliased entry resulting in broken YAML on rt reported by [William Kimball](https://sourceforge.net/u/william303/))
+
+0.17.4 (2021-04-07):
+
+- prevent (empty) comments from throwing assertion error (issue 351 reported by [William Kimball](https://sourceforge.net/u/william303/)) comments (or empty line) will be dropped
+
+0.17.3 (2021-04-07):
+
+- fix for issue 382 caused by an error in a format string (reported by [William Kimball](https://sourceforge.net/u/william303/))
+- allow expansion of aliases by setting `yaml.composer.return_alias = lambda s: copy.deepcopy(s)`
+(as per [Stackoverflow answer](https://stackoverflow.com/a/66983530/1307905))
+
+0.17.2 (2021-03-29):
+
+- change -py2.py3-none-any.whl to -py3-none-any.whl, and remove 0.17.1
+
+0.17.1 (2021-03-29):
+
+- added 'Programming Language :: Python :: 3 :: Only', and
+removing 0.17.0 from PyPI (reported by [Alasdair
+Nicol](https://sourceforge.net/u/alasdairnicol/))
+
+0.17.0 (2021-03-26):
+
+- removed because of incomplete classifiers
+- this release no longer supports Python 2.7, most if not all Python 2 specific code is removed. The 0.17.x series is the last to support Python 3.5 (this also allowed for removal of the dependency on `ruamel.std.pathlib`)
+- remove Python2 specific code branches and adaptations (u-strings)
+- prepare % code for f-strings using `_F`
+- allow PyOxidisation ([issue 324](https://sourceforge.net/p/ruamel-yaml/tickets/324/) resp. [issue 171](https://github.com/indygreg/PyOxidizer/issues/171))
+- replaced Python 2 compatible enforcement of keyword arguments with '*'
+- the old top level *functions* `load`, `safe_load`, `round_trip_load`, `dump`, `safe_dump`, `round_trip_dump`, `scan`, `parse`, `compose`, `emit`, `serialize` as well as their `_all` variants for multi-document streams, now issue a `PendingDeprecationning` (e.g. when run from pytest, but also Python is started with `-Wd`). Use the methods on `YAML()`, which have been extended.
+- fix for issue 376: indentation changes could put literal/folded
+scalar to start before the `#` column of a following comment.
+Effectively making the comment part of the scalar in the output.
+(reported by [Bence Nagy](https://sourceforge.net/u/underyx/))
+
+0.16.13 (2021-03-05):
+
+- fix for issue 359: could not update() CommentedMap with keyword
+arguments (reported by [Steve
+Franchak](https://sourceforge.net/u/binaryadder/))
+- fix for issue 365: unable to dump mutated TimeStamp objects
+(reported by [Anton Akmerov](https://sourceforge.net/u/akhmerov))
+- fix for issue 371: unable to add comment without starting space
+(reported by [Mark Grandi](https://sourceforge.net/u/mgrandi))
+- fix for issue 373: recursive call to walk_tree not preserving
+all params (reported by [eulores](https://sourceforge.net/u/eulores/))
+- a None value in a flow-style sequence is now dumped as `null` instead of `!!null ''` (reported by mcarans on [StackOverflow](https://stackoverflow.com/a/66489600/1307905))
+
+0.16.12 (2020-09-04):
+
+- update links in doc
+
+0.16.11 (2020-09-03):
+
+- workaround issue with setuptools 0.50 and importing pip (fix by [jaraco](https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580)
+
+0.16.10 (2020-02-12):
+
+- (auto) updated image references in README to sourceforge
+
+0.16.9 (2020-02-11):
+
+- update CHANGES
+
+0.16.8 (2020-02-11):
+
+- update requirements so that ruamel.yaml.clib is installed for 3.8, as it has become available (via manylinux builds)
+
+0.16.7 (2020-01-30):
+
+- fix typchecking issue on TaggedScalar (reported by Jens Nielsen)
+- fix error in dumping literal scalar in sequence with comments before element (reported by [EJ Etherington](https://sourceforge.net/u/ejether/))
+
+0.16.6 (2020-01-20):
+
+- fix empty string mapping key roundtripping with preservation of quotes as `? ''` (reported via email by Tomer Aharoni).
+- fix incorrect state setting in class constructor (reported by [Douglas Raillard](https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/))
+- adjust deprecation warning test for Hashable, as that no longer warns (reported by [Jason Montleon](https://bitbucket.org/%7B8f377d12-8d5b-4069-a662-00a2674fee4e%7D/))
+
+0.16.5 (2019-08-18):
+
+- allow for `YAML(typ=['unsafe', 'pytypes'])`
+
+0.16.4 (2019-08-16):
+
+- fix output of TAG directives with `#` (reported by [Thomas Smith](https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/))
+
+0.16.3 (2019-08-15):
+
+- split construct_object
+- change stuff back to keep mypy happy
+- move setting of version based on YAML directive to scanner, allowing to check for file version during TAG directive scanning
+
+0.16.2 (2019-08-15):
+
+- preserve YAML and TAG directives on roundtrip, correctly output `#` in URL for YAML 1.2 (both reported by [Thomas Smith](https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/))
+
+0.16.1 (2019-08-08):
+
+- Force the use of new version of ruamel.yaml.clib (reported by [Alex Joz](https://bitbucket.org/%7B9af55900-2534-4212-976c-61339b6ffe14%7D/))
+- Allow `#` in tag URI as these are allowed in YAML 1.2 (reported by [Thomas Smith](https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/))
+
+0.16.0 (2019-07-25):
+
+- split of C source that generates `.so` file to [ruamel.yaml.clib]( https://pypi.org/project/ruamel.yaml.clib/)
+- duplicate keys are now an error when working with the old API as well
+
+------------------------------------------------------------------------
+
+For older changes see the file
+[CHANGES](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/CHANGES)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/__init__.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/__init__.py
new file mode 100644
index 0000000000..a487f1b2b1
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/__init__.py
@@ -0,0 +1,57 @@
+# coding: utf-8
+
+if False: # MYPY
+ from typing import Dict, Any # NOQA
+
+_package_data = dict(
+ full_package_name='ruamel.yaml',
+ version_info=(0, 17, 40),
+ __version__='0.17.40',
+ version_timestamp='2023-10-20 14:51:55',
+ author='Anthon van der Neut',
+ author_email='a.van.der.neut@ruamel.eu',
+ description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA
+ entry_points=None,
+ since=2014,
+ extras_require={
+ ':platform_python_implementation=="CPython" and python_version<"3.13"': ['ruamel.yaml.clib>=0.2.7'], # NOQA
+ 'jinja2': ['ruamel.yaml.jinja2>=0.2'],
+ 'docs': ['ryd', 'mercurial>5.7'],
+ },
+ classifiers=[
+ 'Programming Language :: Python :: 3 :: Only',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: 3.11',
+ 'Programming Language :: Python :: 3.12',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Text Processing :: Markup',
+ 'Typing :: Typed',
+ ],
+ keywords='yaml 1.2 parser round-trip preserve quotes order config',
+ read_the_docs='yaml',
+ supported=[(3, 7)], # minimum
+ tox=dict(
+ env='*',
+ fl8excl='_test/lib,branch_default',
+ ),
+ # universal=True,
+ python_requires='>=3',
+ rtfd='yaml',
+) # type: Dict[Any, Any]
+
+
+version_info = _package_data['version_info']
+__version__ = _package_data['__version__']
+
+try:
+ from .cyaml import * # NOQA
+
+ __with_libyaml__ = True
+except (ImportError, ValueError): # for Jython
+ __with_libyaml__ = False
+
+from ruamel.yaml.main import * # NOQA
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/anchor.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/anchor.py
new file mode 100644
index 0000000000..1eb1480bdb
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/anchor.py
@@ -0,0 +1,18 @@
+# coding: utf-8
+
+from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+anchor_attrib = '_yaml_anchor'
+
+
+class Anchor:
+ __slots__ = 'value', 'always_dump'
+ attrib = anchor_attrib
+
+ def __init__(self) -> None:
+ self.value = None
+ self.always_dump = False
+
+ def __repr__(self) -> Any:
+ ad = ', (always dump)' if self.always_dump else ""
+ return f'Anchor({self.value!r}{ad})'
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/comments.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/comments.py
new file mode 100644
index 0000000000..2d3838436a
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/comments.py
@@ -0,0 +1,1166 @@
+# coding: utf-8
+
+"""
+stuff to deal with comments and formatting on dict/list/ordereddict/set
+these are not really related, formatting could be factored out as
+a separate base
+"""
+
+import sys
+import copy
+
+
+from ruamel.yaml.compat import ordereddict
+from ruamel.yaml.compat import MutableSliceableSequence, nprintf # NOQA
+from ruamel.yaml.scalarstring import ScalarString
+from ruamel.yaml.anchor import Anchor
+from ruamel.yaml.tag import Tag
+
+from collections.abc import MutableSet, Sized, Set, Mapping
+
+from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+# fmt: off
+__all__ = ['CommentedSeq', 'CommentedKeySeq',
+ 'CommentedMap', 'CommentedOrderedMap',
+ 'CommentedSet', 'comment_attrib', 'merge_attrib',
+ 'C_POST', 'C_PRE', 'C_SPLIT_ON_FIRST_BLANK', 'C_BLANK_LINE_PRESERVE_SPACE',
+ ]
+# fmt: on
+
+# splitting of comments by the scanner
+# an EOLC (End-Of-Line Comment) is preceded by some token
+# an FLC (Full Line Comment) is a comment not preceded by a token, i.e. # is
+# the first non-blank on line
+# a BL is a blank line i.e. empty or spaces/tabs only
+# bits 0 and 1 are combined, you can choose only one
+C_POST = 0b00
+C_PRE = 0b01
+C_SPLIT_ON_FIRST_BLANK = 0b10 # as C_POST, but if blank line then C_PRE all lines before
+# first blank goes to POST even if no following real FLC
+# (first blank -> first of post)
+# 0b11 -> reserved for future use
+C_BLANK_LINE_PRESERVE_SPACE = 0b100
+# C_EOL_PRESERVE_SPACE2 = 0b1000
+
+
+class IDX:
+ # temporary auto increment, so rearranging is easier
+ def __init__(self) -> None:
+ self._idx = 0
+
+ def __call__(self) -> Any:
+ x = self._idx
+ self._idx += 1
+ return x
+
+ def __str__(self) -> Any:
+ return str(self._idx)
+
+
+cidx = IDX()
+
+# more or less in order of subjective expected likelyhood
+# the _POST and _PRE ones are lists themselves
+C_VALUE_EOL = C_ELEM_EOL = cidx()
+C_KEY_EOL = cidx()
+C_KEY_PRE = C_ELEM_PRE = cidx() # not this is not value
+C_VALUE_POST = C_ELEM_POST = cidx() # not this is not value
+C_VALUE_PRE = cidx()
+C_KEY_POST = cidx()
+C_TAG_EOL = cidx()
+C_TAG_POST = cidx()
+C_TAG_PRE = cidx()
+C_ANCHOR_EOL = cidx()
+C_ANCHOR_POST = cidx()
+C_ANCHOR_PRE = cidx()
+
+
+comment_attrib = '_yaml_comment'
+format_attrib = '_yaml_format'
+line_col_attrib = '_yaml_line_col'
+merge_attrib = '_yaml_merge'
+
+
+class Comment:
+ # using sys.getsize tested the Comment objects, __slots__ makes them bigger
+ # and adding self.end did not matter
+ __slots__ = 'comment', '_items', '_post', '_pre'
+ attrib = comment_attrib
+
+ def __init__(self, old: bool = True) -> None:
+ self._pre = None if old else [] # type: ignore
+ self.comment = None # [post, [pre]]
+ # map key (mapping/omap/dict) or index (sequence/list) to a list of
+ # dict: post_key, pre_key, post_value, pre_value
+ # list: pre item, post item
+ self._items: Dict[Any, Any] = {}
+ # self._start = [] # should not put these on first item
+ self._post: List[Any] = [] # end of document comments
+
+ def __str__(self) -> str:
+ if bool(self._post):
+ end = ',\n end=' + str(self._post)
+ else:
+ end = ""
+ return f'Comment(comment={self.comment},\n items={self._items}{end})'
+
+ def _old__repr__(self) -> str:
+ if bool(self._post):
+ end = ',\n end=' + str(self._post)
+ else:
+ end = ""
+ try:
+ ln = max([len(str(k)) for k in self._items]) + 1
+ except ValueError:
+ ln = '' # type: ignore
+ it = ' '.join([f'{str(k) + ":":{ln}} {v}\n' for k, v in self._items.items()])
+ if it:
+ it = '\n ' + it + ' '
+ return f'Comment(\n start={self.comment},\n items={{{it}}}{end})'
+
+ def __repr__(self) -> str:
+ if self._pre is None:
+ return self._old__repr__()
+ if bool(self._post):
+ end = ',\n end=' + repr(self._post)
+ else:
+ end = ""
+ try:
+ ln = max([len(str(k)) for k in self._items]) + 1
+ except ValueError:
+ ln = '' # type: ignore
+ it = ' '.join([f'{str(k) + ":":{ln}} {v}\n' for k, v in self._items.items()])
+ if it:
+ it = '\n ' + it + ' '
+ return f'Comment(\n pre={self.pre},\n items={{{it}}}{end})'
+
+ @property
+ def items(self) -> Any:
+ return self._items
+
+ @property
+ def end(self) -> Any:
+ return self._post
+
+ @end.setter
+ def end(self, value: Any) -> None:
+ self._post = value
+
+ @property
+ def pre(self) -> Any:
+ return self._pre
+
+ @pre.setter
+ def pre(self, value: Any) -> None:
+ self._pre = value
+
+ def get(self, item: Any, pos: Any) -> Any:
+ x = self._items.get(item)
+ if x is None or len(x) < pos:
+ return None
+ return x[pos] # can be None
+
+ def set(self, item: Any, pos: Any, value: Any) -> Any:
+ x = self._items.get(item)
+ if x is None:
+ self._items[item] = x = [None] * (pos + 1)
+ else:
+ while len(x) <= pos:
+ x.append(None)
+ assert x[pos] is None
+ x[pos] = value
+
+ def __contains__(self, x: Any) -> Any:
+ # test if a substring is in any of the attached comments
+ if self.comment:
+ if self.comment[0] and x in self.comment[0].value:
+ return True
+ if self.comment[1]:
+ for c in self.comment[1]:
+ if x in c.value:
+ return True
+ for value in self.items.values():
+ if not value:
+ continue
+ for c in value:
+ if c and x in c.value:
+ return True
+ if self.end:
+ for c in self.end:
+ if x in c.value:
+ return True
+ return False
+
+
+# to distinguish key from None
+class NotNone:
+ pass # NOQA
+
+
+class Format:
+ __slots__ = ('_flow_style',)
+ attrib = format_attrib
+
+ def __init__(self) -> None:
+ self._flow_style: Any = None
+
+ def set_flow_style(self) -> None:
+ self._flow_style = True
+
+ def set_block_style(self) -> None:
+ self._flow_style = False
+
+ def flow_style(self, default: Optional[Any] = None) -> Any:
+ """if default (the flow_style) is None, the flow style tacked on to
+ the object explicitly will be taken. If that is None as well the
+ default flow style rules the format down the line, or the type
+ of the constituent values (simple -> flow, map/list -> block)"""
+ if self._flow_style is None:
+ return default
+ return self._flow_style
+
+ def __repr__(self) -> str:
+ return f'Format({self._flow_style})'
+
+
+class LineCol:
+ """
+ line and column information wrt document, values start at zero (0)
+ """
+
+ attrib = line_col_attrib
+
+ def __init__(self) -> None:
+ self.line = None
+ self.col = None
+ self.data: Optional[Dict[Any, Any]] = None
+
+ def add_kv_line_col(self, key: Any, data: Any) -> None:
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+ def key(self, k: Any) -> Any:
+ return self._kv(k, 0, 1)
+
+ def value(self, k: Any) -> Any:
+ return self._kv(k, 2, 3)
+
+ def _kv(self, k: Any, x0: Any, x1: Any) -> Any:
+ if self.data is None:
+ return None
+ data = self.data[k]
+ return data[x0], data[x1]
+
+ def item(self, idx: Any) -> Any:
+ if self.data is None:
+ return None
+ return self.data[idx][0], self.data[idx][1]
+
+ def add_idx_line_col(self, key: Any, data: Any) -> None:
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+ def __repr__(self) -> str:
+ return f'LineCol({self.line}, {self.col})'
+
+
+class CommentedBase:
+ @property
+ def ca(self):
+ # type: () -> Any
+ if not hasattr(self, Comment.attrib):
+ setattr(self, Comment.attrib, Comment())
+ return getattr(self, Comment.attrib)
+
+ def yaml_end_comment_extend(self, comment: Any, clear: bool = False) -> None:
+ if comment is None:
+ return
+ if clear or self.ca.end is None:
+ self.ca.end = []
+ self.ca.end.extend(comment)
+
+ def yaml_key_comment_extend(self, key: Any, comment: Any, clear: bool = False) -> None:
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[1] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[1] = comment[1]
+ else:
+ r[1].extend(comment[0])
+ r[0] = comment[0]
+
+ def yaml_value_comment_extend(self, key: Any, comment: Any, clear: bool = False) -> None:
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[3] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[3] = comment[1]
+ else:
+ r[3].extend(comment[0])
+ r[2] = comment[0]
+
+ def yaml_set_start_comment(self, comment: Any, indent: Any = 0) -> None:
+ """overwrites any preceding comment lines on an object
+ expects comment to be without `#` and possible have multiple lines
+ """
+ from .error import CommentMark
+ from .tokens import CommentToken
+
+ pre_comments = self._yaml_clear_pre_comment() # type: ignore
+ if comment[-1] == '\n':
+ comment = comment[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ for com in comment.split('\n'):
+ c = com.strip()
+ if len(c) > 0 and c[0] != '#':
+ com = '# ' + com
+ pre_comments.append(CommentToken(com + '\n', start_mark))
+
+ def yaml_set_comment_before_after_key(
+ self,
+ key: Any,
+ before: Any = None,
+ indent: Any = 0,
+ after: Any = None,
+ after_indent: Any = None,
+ ) -> None:
+ """
+ expects comment (before/after) to be without `#` and possible have multiple lines
+ """
+ from ruamel.yaml.error import CommentMark
+ from ruamel.yaml.tokens import CommentToken
+
+ def comment_token(s: Any, mark: Any) -> Any:
+ # handle empty lines as having no comment
+ return CommentToken(('# ' if s else "") + s + '\n', mark)
+
+ if after_indent is None:
+ after_indent = indent + 2
+ if before and (len(before) > 1) and before[-1] == '\n':
+ before = before[:-1] # strip final newline if there
+ if after and after[-1] == '\n':
+ after = after[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ c = self.ca.items.setdefault(key, [None, [], None, None])
+ if before is not None:
+ if c[1] is None:
+ c[1] = []
+ if before == '\n':
+ c[1].append(comment_token("", start_mark)) # type: ignore
+ else:
+ for com in before.split('\n'):
+ c[1].append(comment_token(com, start_mark)) # type: ignore
+ if after:
+ start_mark = CommentMark(after_indent)
+ if c[3] is None:
+ c[3] = []
+ for com in after.split('\n'):
+ c[3].append(comment_token(com, start_mark)) # type: ignore
+
+ @property
+ def fa(self) -> Any:
+ """format attribute
+
+ set_flow_style()/set_block_style()"""
+ if not hasattr(self, Format.attrib):
+ setattr(self, Format.attrib, Format())
+ return getattr(self, Format.attrib)
+
+ def yaml_add_eol_comment(
+ self, comment: Any, key: Optional[Any] = NotNone, column: Optional[Any] = None,
+ ) -> None:
+ """
+ there is a problem as eol comments should start with ' #'
+ (but at the beginning of the line the space doesn't have to be before
+ the #. The column index is for the # mark
+ """
+ from .tokens import CommentToken
+ from .error import CommentMark
+
+ if column is None:
+ try:
+ column = self._yaml_get_column(key)
+ except AttributeError:
+ column = 0
+ if comment[0] != '#':
+ comment = '# ' + comment
+ if column is None:
+ if comment[0] == '#':
+ comment = ' ' + comment
+ column = 0
+ start_mark = CommentMark(column)
+ ct = [CommentToken(comment, start_mark), None]
+ self._yaml_add_eol_comment(ct, key=key)
+
+ @property
+ def lc(self) -> Any:
+ if not hasattr(self, LineCol.attrib):
+ setattr(self, LineCol.attrib, LineCol())
+ return getattr(self, LineCol.attrib)
+
+ def _yaml_set_line_col(self, line: Any, col: Any) -> None:
+ self.lc.line = line
+ self.lc.col = col
+
+ def _yaml_set_kv_line_col(self, key: Any, data: Any) -> None:
+ self.lc.add_kv_line_col(key, data)
+
+ def _yaml_set_idx_line_col(self, key: Any, data: Any) -> None:
+ self.lc.add_idx_line_col(key, data)
+
+ @property
+ def anchor(self) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ return None
+ return self.anchor
+
+ def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ @property
+ def tag(self) -> Any:
+ if not hasattr(self, Tag.attrib):
+ setattr(self, Tag.attrib, Tag())
+ return getattr(self, Tag.attrib)
+
+ def yaml_set_ctag(self, value: Tag) -> None:
+ setattr(self, Tag.attrib, value)
+
+ def copy_attributes(self, t: Any, memo: Any = None) -> None:
+ # fmt: off
+ for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib,
+ Tag.attrib, merge_attrib]:
+ if hasattr(self, a):
+ if memo is not None:
+ setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ else:
+ setattr(t, a, getattr(self, a))
+ # fmt: on
+
+ def _yaml_add_eol_comment(self, comment: Any, key: Any) -> None:
+ raise NotImplementedError
+
+ def _yaml_get_pre_comment(self) -> Any:
+ raise NotImplementedError
+
+ def _yaml_get_column(self, key: Any) -> Any:
+ raise NotImplementedError
+
+
+class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore
+ __slots__ = (Comment.attrib, '_lst')
+
+ def __init__(self, *args: Any, **kw: Any) -> None:
+ list.__init__(self, *args, **kw)
+
+ def __getsingleitem__(self, idx: Any) -> Any:
+ return list.__getitem__(self, idx)
+
+ def __setsingleitem__(self, idx: Any, value: Any) -> None:
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if idx < len(self):
+ if (
+ isinstance(value, str)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[idx], ScalarString)
+ ):
+ value = type(self[idx])(value)
+ list.__setitem__(self, idx, value)
+
+ def __delsingleitem__(self, idx: Any = None) -> Any:
+ list.__delitem__(self, idx)
+ self.ca.items.pop(idx, None) # might not be there -> default value
+ for list_index in sorted(self.ca.items):
+ if list_index < idx:
+ continue
+ self.ca.items[list_index - 1] = self.ca.items.pop(list_index)
+
+ def __len__(self) -> int:
+ return list.__len__(self)
+
+ def insert(self, idx: Any, val: Any) -> None:
+ """the comments after the insertion have to move forward"""
+ list.insert(self, idx, val)
+ for list_index in sorted(self.ca.items, reverse=True):
+ if list_index < idx:
+ break
+ self.ca.items[list_index + 1] = self.ca.items.pop(list_index)
+
+ def extend(self, val: Any) -> None:
+ list.extend(self, val)
+
+ def __eq__(self, other: Any) -> bool:
+ return list.__eq__(self, other)
+
+ def _yaml_add_comment(self, comment: Any, key: Optional[Any] = NotNone) -> None:
+ if key is not NotNone:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment: Any, key: Any) -> None:
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key: Any) -> Any:
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key: Any) -> Any:
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self) -> Any:
+ pre_comments: List[Any] = []
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ pre_comments = self.ca.comment[1]
+ return pre_comments
+
+ def _yaml_clear_pre_comment(self) -> Any:
+ pre_comments: List[Any] = []
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def __deepcopy__(self, memo: Any) -> Any:
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res.append(copy.deepcopy(k, memo))
+ self.copy_attributes(res, memo=memo)
+ return res
+
+ def __add__(self, other: Any) -> Any:
+ return list.__add__(self, other)
+
+ def sort(self, key: Any = None, reverse: bool = False) -> None:
+ if key is None:
+ tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse)
+ list.__init__(self, [x[0] for x in tmp_lst])
+ else:
+ tmp_lst = sorted(
+ zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse,
+ )
+ list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst])
+ itm = self.ca.items
+ self.ca._items = {}
+ for idx, x in enumerate(tmp_lst):
+ old_index = x[1]
+ if old_index in itm:
+ self.ca.items[idx] = itm[old_index]
+
+ def __repr__(self) -> Any:
+ return list.__repr__(self)
+
+
+class CommentedKeySeq(tuple, CommentedBase): # type: ignore
+ """This primarily exists to be able to roundtrip keys that are sequences"""
+
+ def _yaml_add_comment(self, comment: Any, key: Optional[Any] = NotNone) -> None:
+ if key is not NotNone:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment: Any, key: Any) -> None:
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key: Any) -> Any:
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key: Any) -> Any:
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self) -> Any:
+ pre_comments: List[Any] = []
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ pre_comments = self.ca.comment[1]
+ return pre_comments
+
+ def _yaml_clear_pre_comment(self) -> Any:
+ pre_comments: List[Any] = []
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedMapView(Sized):
+ __slots__ = ('_mapping',)
+
+ def __init__(self, mapping: Any) -> None:
+ self._mapping = mapping
+
+ def __len__(self) -> int:
+ count = len(self._mapping)
+ return count
+
+
+class CommentedMapKeysView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it: Any) -> Any:
+ return set(it)
+
+ def __contains__(self, key: Any) -> Any:
+ return key in self._mapping
+
+ def __iter__(self) -> Any:
+ # yield from self._mapping # not in py27, pypy
+ # for x in self._mapping._keys():
+ for x in self._mapping:
+ yield x
+
+
+class CommentedMapItemsView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it: Any) -> Any:
+ return set(it)
+
+ def __contains__(self, item: Any) -> Any:
+ key, value = item
+ try:
+ v = self._mapping[key]
+ except KeyError:
+ return False
+ else:
+ return v == value
+
+ def __iter__(self) -> Any:
+ for key in self._mapping._keys():
+ yield (key, self._mapping[key])
+
+
+class CommentedMapValuesView(CommentedMapView):
+ __slots__ = ()
+
+ def __contains__(self, value: Any) -> Any:
+ for key in self._mapping:
+ if value == self._mapping[key]:
+ return True
+ return False
+
+ def __iter__(self) -> Any:
+ for key in self._mapping._keys():
+ yield self._mapping[key]
+
+
+class CommentedMap(ordereddict, CommentedBase):
+ __slots__ = (Comment.attrib, '_ok', '_ref')
+
+ def __init__(self, *args: Any, **kw: Any) -> None:
+ self._ok: MutableSet[Any] = set() # own keys
+ self._ref: List[CommentedMap] = []
+ ordereddict.__init__(self, *args, **kw)
+
+ def _yaml_add_comment(
+ self, comment: Any, key: Optional[Any] = NotNone, value: Optional[Any] = NotNone,
+ ) -> None:
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NotNone:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NotNone:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment: Any, key: Any) -> None:
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def _yaml_get_columnX(self, key: Any) -> Any:
+ return self.ca.items[key][2].start_mark.column
+
+ def _yaml_get_column(self, key: Any) -> Any:
+ column = None
+ sel_idx = None
+ pre, post, last = None, None, None
+ for x in self:
+ if pre is not None and x != key:
+ post = x
+ break
+ if x == key:
+ pre = last
+ last = x
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for k1 in self:
+ if k1 >= key:
+ break
+ if k1 not in self.ca.items:
+ continue
+ sel_idx = k1
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self) -> Any:
+ pre_comments: List[Any] = []
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ pre_comments = self.ca.comment[1]
+ return pre_comments
+
+ def _yaml_clear_pre_comment(self) -> Any:
+ pre_comments: List[Any] = []
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def update(self, *vals: Any, **kw: Any) -> None:
+ try:
+ ordereddict.update(self, *vals, **kw)
+ except TypeError:
+ # probably a dict that is used
+ for x in vals[0]:
+ self[x] = vals[0][x]
+ if vals:
+ try:
+ self._ok.update(vals[0].keys()) # type: ignore
+ except AttributeError:
+ # assume one argument that is a list/tuple of two element lists/tuples
+ for x in vals[0]:
+ self._ok.add(x[0])
+ if kw:
+ self._ok.update(*kw.keys()) # type: ignore
+
+ def insert(self, pos: Any, key: Any, value: Any, comment: Optional[Any] = None) -> None:
+ """insert key value into given position, as defined by source YAML
+ attach comment if provided
+ """
+ if key in self._ok:
+ del self[key]
+ keys = [k for k in self.keys() if k in self._ok]
+ try:
+ ma0 = getattr(self, merge_attrib, [[-1]])[0]
+ merge_pos = ma0[0]
+ except IndexError:
+ merge_pos = -1
+ if merge_pos >= 0:
+ if merge_pos >= pos:
+ getattr(self, merge_attrib)[0] = (merge_pos + 1, ma0[1])
+ idx_min = pos
+ idx_max = len(self._ok)
+ else:
+ idx_min = pos - 1
+ idx_max = len(self._ok)
+ else:
+ idx_min = pos
+ idx_max = len(self._ok)
+ self[key] = value # at the end
+ # print(f'{idx_min=} {idx_max=}')
+ for idx in range(idx_min, idx_max):
+ self.move_to_end(keys[idx])
+ self._ok.add(key)
+ # for referer in self._ref:
+ # for keytmp in keys:
+ # referer.update_key_value(keytmp)
+ if comment is not None:
+ self.yaml_add_eol_comment(comment, key=key)
+
+ def mlget(self, key: Any, default: Any = None, list_ok: Any = False) -> Any:
+ """multi-level get that expects dicts within dicts"""
+ if not isinstance(key, list):
+ return self.get(key, default)
+ # assume that the key is a list of recursively accessible dicts
+
+ def get_one_level(key_list: Any, level: Any, d: Any) -> Any:
+ if not list_ok:
+ assert isinstance(d, dict)
+ if level >= len(key_list):
+ if level > len(key_list):
+ raise IndexError
+ return d[key_list[level - 1]]
+ return get_one_level(key_list, level + 1, d[key_list[level - 1]])
+
+ try:
+ return get_one_level(key, 1, self)
+ except KeyError:
+ return default
+ except (TypeError, IndexError):
+ if not list_ok:
+ raise
+ return default
+
+ def __getitem__(self, key: Any) -> Any:
+ try:
+ return ordereddict.__getitem__(self, key)
+ except KeyError:
+ for merged in getattr(self, merge_attrib, []):
+ if key in merged[1]:
+ return merged[1][key]
+ raise
+
+ def __setitem__(self, key: Any, value: Any) -> None:
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if key in self:
+ if (
+ isinstance(value, str)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[key], ScalarString)
+ ):
+ value = type(self[key])(value)
+ ordereddict.__setitem__(self, key, value)
+ self._ok.add(key)
+
+ def _unmerged_contains(self, key: Any) -> Any:
+ if key in self._ok:
+ return True
+ return None
+
+ def __contains__(self, key: Any) -> bool:
+ return bool(ordereddict.__contains__(self, key))
+
+ def get(self, key: Any, default: Any = None) -> Any:
+ try:
+ return self.__getitem__(key)
+ except: # NOQA
+ return default
+
+ def __repr__(self) -> Any:
+ res = '{'
+ sep = ''
+ for k, v in self.items():
+ res += f'{sep}{k!r}: {v!r}'
+ if not sep:
+ sep = ', '
+ res += '}'
+ return res
+
+ def non_merged_items(self) -> Any:
+ for x in ordereddict.__iter__(self):
+ if x in self._ok:
+ yield x, ordereddict.__getitem__(self, x)
+
+ def __delitem__(self, key: Any) -> None:
+ # for merged in getattr(self, merge_attrib, []):
+ # if key in merged[1]:
+ # value = merged[1][key]
+ # break
+ # else:
+ # # not found in merged in stuff
+ # ordereddict.__delitem__(self, key)
+ # for referer in self._ref:
+ # referer.update=_key_value(key)
+ # return
+ #
+ # ordereddict.__setitem__(self, key, value) # merge might have different value
+ # self._ok.discard(key)
+ self._ok.discard(key)
+ ordereddict.__delitem__(self, key)
+ for referer in self._ref:
+ referer.update_key_value(key)
+
+ def __iter__(self) -> Any:
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def pop(self, key: Any, default: Any = NotNone) -> Any:
+ try:
+ result = self[key]
+ except KeyError:
+ if default is NotNone:
+ raise
+ return default
+ del self[key]
+ return result
+
+ def _keys(self) -> Any:
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def __len__(self) -> int:
+ return int(ordereddict.__len__(self))
+
+ def __eq__(self, other: Any) -> bool:
+ return bool(dict(self) == other)
+
+ def keys(self) -> Any:
+ return CommentedMapKeysView(self)
+
+ def values(self) -> Any:
+ return CommentedMapValuesView(self)
+
+ def _items(self) -> Any:
+ for x in ordereddict.__iter__(self):
+ yield x, ordereddict.__getitem__(self, x)
+
+ def items(self) -> Any:
+ return CommentedMapItemsView(self)
+
+ @property
+ def merge(self) -> Any:
+ if not hasattr(self, merge_attrib):
+ setattr(self, merge_attrib, [])
+ return getattr(self, merge_attrib)
+
+ def copy(self) -> Any:
+ x = type(self)() # update doesn't work
+ for k, v in self._items():
+ x[k] = v
+ self.copy_attributes(x)
+ return x
+
+ def add_referent(self, cm: Any) -> None:
+ if cm not in self._ref:
+ self._ref.append(cm)
+
+ def add_yaml_merge(self, value: Any) -> None:
+ for v in value:
+ v[1].add_referent(self)
+ for k1, v1 in v[1].items():
+ if ordereddict.__contains__(self, k1):
+ continue
+ ordereddict.__setitem__(self, k1, v1)
+ self.merge.extend(value)
+
+ def update_key_value(self, key: Any) -> None:
+ if key in self._ok:
+ return
+ for v in self.merge:
+ if key in v[1]:
+ ordereddict.__setitem__(self, key, v[1][key])
+ return
+ ordereddict.__delitem__(self, key)
+
+ def __deepcopy__(self, memo: Any) -> Any:
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res[k] = copy.deepcopy(self[k], memo)
+ self.copy_attributes(res, memo=memo)
+ return res
+
+
+# based on brownie mappings
+@classmethod # type: ignore
+def raise_immutable(cls: Any, *args: Any, **kwargs: Any) -> None:
+ raise TypeError(f'{cls.__name__} objects are immutable')
+
+
+class CommentedKeyMap(CommentedBase, Mapping): # type: ignore
+ __slots__ = Comment.attrib, '_od'
+ """This primarily exists to be able to roundtrip keys that are mappings"""
+
+ def __init__(self, *args: Any, **kw: Any) -> None:
+ if hasattr(self, '_od'):
+ raise_immutable(self)
+ try:
+ self._od = ordereddict(*args, **kw)
+ except TypeError:
+ raise
+
+ __delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable
+
+ # need to implement __getitem__, __iter__ and __len__
+ def __getitem__(self, index: Any) -> Any:
+ return self._od[index]
+
+ def __iter__(self) -> Iterator[Any]:
+ for x in self._od.__iter__():
+ yield x
+
+ def __len__(self) -> int:
+ return len(self._od)
+
+ def __hash__(self) -> Any:
+ return hash(tuple(self.items()))
+
+ def __repr__(self) -> Any:
+ if not hasattr(self, merge_attrib):
+ return self._od.__repr__()
+ return 'ordereddict(' + repr(list(self._od.items())) + ')'
+
+ @classmethod
+ def fromkeys(keys: Any, v: Any = None) -> Any:
+ return CommentedKeyMap(dict.fromkeys(keys, v))
+
+ def _yaml_add_comment(self, comment: Any, key: Optional[Any] = NotNone) -> None:
+ if key is not NotNone:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment: Any, key: Any) -> None:
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key: Any) -> Any:
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key: Any) -> Any:
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self) -> Any:
+ pre_comments: List[Any] = []
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedOrderedMap(CommentedMap):
+ __slots__ = (Comment.attrib,)
+
+
+class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA
+ __slots__ = Comment.attrib, 'odict'
+
+ def __init__(self, values: Any = None) -> None:
+ self.odict = ordereddict()
+ MutableSet.__init__(self)
+ if values is not None:
+ self |= values
+
+ def _yaml_add_comment(
+ self, comment: Any, key: Optional[Any] = NotNone, value: Optional[Any] = NotNone,
+ ) -> None:
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NotNone:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NotNone:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment: Any, key: Any) -> None:
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def add(self, value: Any) -> None:
+ """Add an element."""
+ self.odict[value] = None
+
+ def discard(self, value: Any) -> None:
+ """Remove an element. Do not raise an exception if absent."""
+ del self.odict[value]
+
+ def __contains__(self, x: Any) -> Any:
+ return x in self.odict
+
+ def __iter__(self) -> Any:
+ for x in self.odict:
+ yield x
+
+ def __len__(self) -> int:
+ return len(self.odict)
+
+ def __repr__(self) -> str:
+ return f'set({self.odict.keys()!r})'
+
+
+class TaggedScalar(CommentedBase):
+ # the value and style attributes are set during roundtrip construction
+ def __init__(self, value: Any = None, style: Any = None, tag: Any = None) -> None:
+ self.value = value
+ self.style = style
+ if tag is not None:
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ self.yaml_set_ctag(tag)
+
+ def __str__(self) -> Any:
+ return self.value
+
+ def count(self, s: str, start: Optional[int] = None, end: Optional[int] = None) -> Any:
+ return self.value.count(s, start, end)
+
+ def __getitem__(self, pos: int) -> Any:
+ return self.value[pos]
+
+
+def dump_comments(d: Any, name: str = "", sep: str = '.', out: Any = sys.stdout) -> None:
+ """
+ recursively dump comments, all but the toplevel preceded by the path
+ in dotted form x.0.a
+ """
+ if isinstance(d, dict) and hasattr(d, 'ca'):
+ if name:
+ out.write(f'{name} {type(d)}\n')
+ out.write(f'{d.ca!r}\n')
+ for k in d:
+ dump_comments(d[k], name=(name + sep + str(k)) if name else k, sep=sep, out=out)
+ elif isinstance(d, list) and hasattr(d, 'ca'):
+ if name:
+ out.write(f'{name} {type(d)}\n')
+ out.write(f'{d.ca!r}\n')
+ for idx, k in enumerate(d):
+ dump_comments(
+ k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out,
+ )
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/compat.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/compat.py
new file mode 100644
index 0000000000..c427246927
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/compat.py
@@ -0,0 +1,235 @@
+# coding: utf-8
+
+# partially from package six by Benjamin Peterson
+
+import sys
+import os
+import io
+import traceback
+from abc import abstractmethod
+import collections.abc
+
+
+# fmt: off
+from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA
+from typing import Optional # NOQA
+try:
+ from typing import SupportsIndex as SupportsIndex # in order to reexport for mypy
+except ImportError:
+ SupportsIndex = int # type: ignore
+# fmt: on
+
+
+_DEFAULT_YAML_VERSION = (1, 2)
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict # type: ignore
+
+ # to get the right name import ... as ordereddict doesn't do that
+
+
+class ordereddict(OrderedDict): # type: ignore
+ if not hasattr(OrderedDict, 'insert'):
+
+ def insert(self, pos: int, key: Any, value: Any) -> None:
+ if pos >= len(self):
+ self[key] = value
+ return
+ od = ordereddict()
+ od.update(self)
+ for k in od:
+ del self[k]
+ for index, old_key in enumerate(od):
+ if pos == index:
+ self[key] = value
+ self[old_key] = od[old_key]
+
+
+StringIO = io.StringIO
+BytesIO = io.BytesIO
+
+# StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO]
+# StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore
+StreamType = Any
+
+StreamTextType = StreamType # Union[Text, StreamType]
+VersionType = Union[List[int], str, Tuple[int, int]]
+
+builtins_module = 'builtins'
+
+
+def with_metaclass(meta: Any, *bases: Any) -> Any:
+ """Create a base class with a metaclass."""
+ return meta('NewBase', bases, {})
+
+
+DBG_TOKEN = 1
+DBG_EVENT = 2
+DBG_NODE = 4
+
+
+_debug: Optional[int] = None
+if 'RUAMELDEBUG' in os.environ:
+ _debugx = os.environ.get('RUAMELDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+
+
+if bool(_debug):
+
+ class ObjectCounter:
+ def __init__(self) -> None:
+ self.map: Dict[Any, Any] = {}
+
+ def __call__(self, k: Any) -> None:
+ self.map[k] = self.map.get(k, 0) + 1
+
+ def dump(self) -> None:
+ for k in sorted(self.map):
+ sys.stdout.write(f'{k} -> {self.map[k]}')
+
+ object_counter = ObjectCounter()
+
+
+# used from yaml util when testing
+def dbg(val: Any = None) -> Any:
+ global _debug
+ if _debug is None:
+ # set to true or false
+ _debugx = os.environ.get('YAMLDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+ if val is None:
+ return _debug
+ return _debug & val
+
+
+class Nprint:
+ def __init__(self, file_name: Any = None) -> None:
+ self._max_print: Any = None
+ self._count: Any = None
+ self._file_name = file_name
+
+ def __call__(self, *args: Any, **kw: Any) -> None:
+ if not bool(_debug):
+ return
+ out = sys.stdout if self._file_name is None else open(self._file_name, 'a')
+ dbgprint = print # to fool checking for print statements by dv utility
+ kw1 = kw.copy()
+ kw1['file'] = out
+ dbgprint(*args, **kw1)
+ out.flush()
+ if self._max_print is not None:
+ if self._count is None:
+ self._count = self._max_print
+ self._count -= 1
+ if self._count == 0:
+ dbgprint('forced exit\n')
+ traceback.print_stack()
+ out.flush()
+ sys.exit(0)
+ if self._file_name:
+ out.close()
+
+ def set_max_print(self, i: int) -> None:
+ self._max_print = i
+ self._count = None
+
+ def fp(self, mode: str = 'a') -> Any:
+ out = sys.stdout if self._file_name is None else open(self._file_name, mode)
+ return out
+
+
+nprint = Nprint()
+nprintf = Nprint('/var/tmp/ruamel.yaml.log')
+
+# char checkers following production rules
+
+
+def check_namespace_char(ch: Any) -> bool:
+ if '\x21' <= ch <= '\x7E': # ! to ~
+ return True
+ if '\xA0' <= ch <= '\uD7FF':
+ return True
+ if ('\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': # excl. byte order mark
+ return True
+ if '\U00010000' <= ch <= '\U0010FFFF':
+ return True
+ return False
+
+
+def check_anchorname_char(ch: Any) -> bool:
+ if ch in ',[]{}':
+ return False
+ return check_namespace_char(ch)
+
+
+def version_tnf(t1: Any, t2: Any = None) -> Any:
+ """
+ return True if ruamel.yaml version_info < t1, None if t2 is specified and bigger else False
+ """
+ from ruamel.yaml import version_info # NOQA
+
+ if version_info < t1:
+ return True
+ if t2 is not None and version_info < t2:
+ return None
+ return False
+
+
+class MutableSliceableSequence(collections.abc.MutableSequence): # type: ignore
+ __slots__ = ()
+
+ def __getitem__(self, index: Any) -> Any:
+ if not isinstance(index, slice):
+ return self.__getsingleitem__(index)
+ return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore
+
+ def __setitem__(self, index: Any, value: Any) -> None:
+ if not isinstance(index, slice):
+ return self.__setsingleitem__(index, value)
+ assert iter(value)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ if index.step is None:
+ del self[index.start : index.stop]
+ for elem in reversed(value):
+ self.insert(0 if index.start is None else index.start, elem)
+ else:
+ range_parms = index.indices(len(self))
+ nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[2] + 1
+ # need to test before changing, in case TypeError is caught
+ if nr_assigned_items < len(value):
+ raise TypeError(
+ f'too many elements in value {nr_assigned_items} < {len(value)}',
+ )
+ elif nr_assigned_items > len(value):
+ raise TypeError(
+ f'not enough elements in value {nr_assigned_items} > {len(value)}',
+ )
+ for idx, i in enumerate(range(*range_parms)):
+ self[i] = value[idx]
+
+ def __delitem__(self, index: Any) -> None:
+ if not isinstance(index, slice):
+ return self.__delsingleitem__(index)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ for i in reversed(range(*index.indices(len(self)))):
+ del self[i]
+
+ @abstractmethod
+ def __getsingleitem__(self, index: Any) -> Any:
+ raise IndexError
+
+ @abstractmethod
+ def __setsingleitem__(self, index: Any, value: Any) -> None:
+ raise IndexError
+
+ @abstractmethod
+ def __delsingleitem__(self, index: Any) -> None:
+ raise IndexError
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/composer.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/composer.py
new file mode 100644
index 0000000000..3802d9483f
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/composer.py
@@ -0,0 +1,228 @@
+# coding: utf-8
+
+import warnings
+
+from ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning
+from ruamel.yaml.compat import nprint, nprintf # NOQA
+
+from ruamel.yaml.events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
+
+from typing import Any, Dict, Optional, List # NOQA
+
+__all__ = ['Composer', 'ComposerError']
+
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+
+class Composer:
+ def __init__(self, loader: Any = None) -> None:
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_composer', None) is None:
+ self.loader._composer = self
+ self.anchors: Dict[Any, Any] = {}
+ self.warn_double_anchors = True
+
+ @property
+ def parser(self) -> Any:
+ if hasattr(self.loader, 'typ'):
+ self.loader.parser
+ return self.loader._parser
+
+ @property
+ def resolver(self) -> Any:
+ # assert self.loader._resolver is not None
+ if hasattr(self.loader, 'typ'):
+ self.loader.resolver
+ return self.loader._resolver
+
+ def check_node(self) -> Any:
+ # Drop the STREAM-START event.
+ if self.parser.check_event(StreamStartEvent):
+ self.parser.get_event()
+
+ # If there are more documents available?
+ return not self.parser.check_event(StreamEndEvent)
+
+ def get_node(self) -> Any:
+ # Get the root node of the next document.
+ if not self.parser.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self) -> Any:
+ # Drop the STREAM-START event.
+ self.parser.get_event()
+
+ # Compose a document if the stream is not empty.
+ document: Any = None
+ if not self.parser.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.parser.check_event(StreamEndEvent):
+ event = self.parser.get_event()
+ raise ComposerError(
+ 'expected a single document in the stream',
+ document.start_mark,
+ 'but found another document',
+ event.start_mark,
+ )
+
+ # Drop the STREAM-END event.
+ self.parser.get_event()
+
+ return document
+
+ def compose_document(self: Any) -> Any:
+ # Drop the DOCUMENT-START event.
+ self.parser.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.parser.get_event()
+
+ self.anchors = {}
+ return node
+
+ def return_alias(self, a: Any) -> Any:
+ return a
+
+ def compose_node(self, parent: Any, index: Any) -> Any:
+ if self.parser.check_event(AliasEvent):
+ event = self.parser.get_event()
+ alias = event.anchor
+ if alias not in self.anchors:
+ raise ComposerError(
+ None, None, f'found undefined alias {alias!r}', event.start_mark,
+ )
+ return self.return_alias(self.anchors[alias])
+ event = self.parser.peek_event()
+ anchor = event.anchor
+ if anchor is not None: # have an anchor
+ if self.warn_double_anchors and anchor in self.anchors:
+ ws = (
+ f'\nfound duplicate anchor {anchor!r}\n'
+ f'first occurrence {self.anchors[anchor].start_mark}\n'
+ f'second occurrence {event.start_mark}'
+ )
+ warnings.warn(ws, ReusedAnchorWarning, stacklevel=2)
+ self.resolver.descend_resolver(parent, index)
+ if self.parser.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.parser.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.parser.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.resolver.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor: Any) -> Any:
+ event = self.parser.get_event()
+ tag = event.ctag
+ if tag is None or str(tag) == '!':
+ tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
+ assert not isinstance(tag, str)
+ # e.g tag.yaml.org,2002:str
+ node = ScalarNode(
+ tag,
+ event.value,
+ event.start_mark,
+ event.end_mark,
+ style=event.style,
+ comment=event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor: Any) -> Any:
+ start_event = self.parser.get_event()
+ tag = start_event.ctag
+ if tag is None or str(tag) == '!':
+ tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
+ assert not isinstance(tag, str)
+ node = SequenceNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.parser.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ if node.comment is not None:
+ x = node.flow_style
+ nprint(
+ f'Warning: unexpected end_event commment in sequence node {x}',
+ )
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def compose_mapping_node(self, anchor: Any) -> Any:
+ start_event = self.parser.get_event()
+ tag = start_event.ctag
+ if tag is None or str(tag) == '!':
+ tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
+ assert not isinstance(tag, str)
+ node = MappingNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.parser.check_event(MappingEndEvent):
+ # key_event = self.parser.peek_event()
+ item_key = self.compose_node(node, None)
+ # if item_key in node.value:
+ # raise ComposerError("while composing a mapping",
+ # start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ # node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def check_end_doc_comment(self, end_event: Any, node: Any) -> None:
+ if end_event.comment and end_event.comment[1]:
+ # pre comments on an end_event, no following to move to
+ if node.comment is None:
+ node.comment = [None, None]
+ assert not isinstance(node, ScalarEvent)
+ # this is a post comment on a mapping node, add as third element
+ # in the list
+ node.comment.append(end_event.comment[1])
+ end_event.comment[1] = None
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/configobjwalker.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/configobjwalker.py
new file mode 100644
index 0000000000..28318f125e
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/configobjwalker.py
@@ -0,0 +1,15 @@
+# coding: utf-8
+
+import warnings
+
+from ruamel.yaml.util import configobj_walker as new_configobj_walker
+
+from typing import Any
+
+
+def configobj_walker(cfg: Any) -> Any:
+ warnings.warn(
+ 'configobj_walker has moved to ruamel.yaml.util, please update your code',
+ stacklevel=2,
+ )
+ return new_configobj_walker(cfg)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/constructor.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/constructor.py
new file mode 100644
index 0000000000..e4f6f16ac5
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/constructor.py
@@ -0,0 +1,1723 @@
+# coding: utf-8
+
+import datetime
+import base64
+import binascii
+import sys
+import types
+import warnings
+from collections.abc import Hashable, MutableSequence, MutableMapping
+
+# fmt: off
+from ruamel.yaml.error import (MarkedYAMLError, MarkedYAMLFutureWarning,
+ MantissaNoDotYAML1_1Warning)
+from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.nodes import (SequenceNode, MappingNode, ScalarNode)
+from ruamel.yaml.compat import (builtins_module, # NOQA
+ nprint, nprintf, version_tnf)
+from ruamel.yaml.compat import ordereddict
+
+from ruamel.yaml.tag import Tag
+from ruamel.yaml.comments import * # NOQA
+from ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSet,
+ CommentedKeySeq, CommentedSeq, TaggedScalar,
+ CommentedKeyMap,
+ C_KEY_PRE, C_KEY_EOL, C_KEY_POST,
+ C_VALUE_PRE, C_VALUE_EOL, C_VALUE_POST,
+ )
+from ruamel.yaml.scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString,
+ LiteralScalarString, FoldedScalarString,
+ PlainScalarString, ScalarString)
+from ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from ruamel.yaml.scalarfloat import ScalarFloat
+from ruamel.yaml.scalarbool import ScalarBoolean
+from ruamel.yaml.timestamp import TimeStamp
+from ruamel.yaml.util import timestamp_regexp, create_timestamp
+
+from typing import Any, Dict, List, Set, Iterator, Union, Optional # NOQA
+
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError', 'RoundTripConstructor']
+# fmt: on
+
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+
+class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning):
+ pass
+
+
+class DuplicateKeyError(MarkedYAMLError):
+ pass
+
+
+class BaseConstructor:
+
+ yaml_constructors = {} # type: Dict[Any, Any]
+ yaml_multi_constructors = {} # type: Dict[Any, Any]
+
+ def __init__(self, preserve_quotes: Optional[bool] = None, loader: Any = None) -> None:
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_constructor', None) is None:
+ self.loader._constructor = self
+ self.loader = loader
+ self.yaml_base_dict_type = dict
+ self.yaml_base_list_type = list
+ self.constructed_objects: Dict[Any, Any] = {}
+ self.recursive_objects: Dict[Any, Any] = {}
+ self.state_generators: List[Any] = []
+ self.deep_construct = False
+ self._preserve_quotes = preserve_quotes
+ self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16))
+
+ @property
+ def composer(self) -> Any:
+ if hasattr(self.loader, 'typ'):
+ return self.loader.composer
+ try:
+ return self.loader._composer
+ except AttributeError:
+ sys.stdout.write(f'slt {type(self)}\n')
+ sys.stdout.write(f'slc {self.loader._composer}\n')
+ sys.stdout.write(f'{dir(self)}\n')
+ raise
+
+ @property
+ def resolver(self) -> Any:
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ @property
+ def scanner(self) -> Any:
+ # needed to get to the expanded comments
+ if hasattr(self.loader, 'typ'):
+ return self.loader.scanner
+ return self.loader._scanner
+
+ def check_data(self) -> Any:
+ # If there are more documents available?
+ return self.composer.check_node()
+
+ def get_data(self) -> Any:
+ # Construct and return the next document.
+ if self.composer.check_node():
+ return self.construct_document(self.composer.get_node())
+
+ def get_single_data(self) -> Any:
+ # Ensure that the stream contains a single document and construct it.
+ node = self.composer.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node: Any) -> Any:
+ data = self.construct_object(node)
+ while bool(self.state_generators):
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for _dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node: Any, deep: bool = False) -> Any:
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ return self.recursive_objects[node]
+ # raise ConstructorError(
+ # None, None, 'found unconstructable recursive node', node.start_mark
+ # )
+ self.recursive_objects[node] = None
+ data = self.construct_non_recursive_object(node)
+
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_non_recursive_object(self, node: Any, tag: Optional[str] = None) -> Any:
+ constructor: Any = None
+ tag_suffix = None
+ if tag is None:
+ tag = node.tag
+ if tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag.startswith(tag_prefix):
+ tag_suffix = tag[len(tag_prefix) :]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for _dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ return data
+
+ def construct_scalar(self, node: Any) -> Any:
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None, None, f'expected a scalar node, but found {node.id!s}', node.start_mark,
+ )
+ return node.value
+
+ def construct_sequence(self, node: Any, deep: bool = False) -> Any:
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None,
+ None,
+ f'expected a sequence node, but found {node.id!s}',
+ node.start_mark,
+ )
+ return [self.construct_object(child, deep=deep) for child in node.value]
+
+ def construct_mapping(self, node: Any, deep: bool = False) -> Any:
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, f'expected a mapping node, but found {node.id!s}', node.start_mark,
+ )
+ total_mapping = self.yaml_base_dict_type()
+ if getattr(node, 'merge', None) is not None:
+ todo = [(node.merge, False), (node.value, False)]
+ else:
+ todo = [(node.value, True)]
+ for values, check in todo:
+ mapping: Dict[Any, Any] = self.yaml_base_dict_type()
+ for key_node, value_node in values:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+
+ value = self.construct_object(value_node, deep=deep)
+ if check:
+ if self.check_mapping_key(node, key_node, mapping, key, value):
+ mapping[key] = value
+ else:
+ mapping[key] = value
+ total_mapping.update(mapping)
+ return total_mapping
+
+ def check_mapping_key(
+ self, node: Any, key_node: Any, mapping: Any, key: Any, value: Any,
+ ) -> bool:
+ """return True if key is unique"""
+ if key in mapping:
+ if not self.allow_duplicate_keys:
+ mk = mapping.get(key)
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ f'found duplicate key "{key}" with value "{value}" '
+ f'(original value: "{mk}")',
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args), stacklevel=1)
+ else:
+ raise DuplicateKeyError(*args)
+ return False
+ return True
+
+ def check_set_key(self: Any, node: Any, key_node: Any, setting: Any, key: Any) -> None:
+ if key in setting:
+ if not self.allow_duplicate_keys:
+ args = [
+ 'while constructing a set',
+ node.start_mark,
+ f'found duplicate key "{key}"',
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args), stacklevel=1)
+ else:
+ raise DuplicateKeyError(*args)
+
+ def construct_pairs(self, node: Any, deep: bool = False) -> Any:
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, f'expected a mapping node, but found {node.id!s}', node.start_mark,
+ )
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ # ToDo: putting stuff on the class makes it global, consider making this to work on an
+ # instance variable once function load is dropped.
+ @classmethod
+ def add_constructor(cls, tag: Any, constructor: Any) -> Any:
+ if isinstance(tag, Tag):
+ tag = str(tag)
+ if 'yaml_constructors' not in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ ret_val = cls.yaml_constructors.get(tag, None)
+ cls.yaml_constructors[tag] = constructor
+ return ret_val
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix: Any, multi_constructor: Any) -> None:
+ if 'yaml_multi_constructors' not in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+ @classmethod
+ def add_default_constructor(
+ cls, tag: str, method: Any = None, tag_base: str = 'tag:yaml.org,2002:',
+ ) -> None:
+ if not tag.startswith('tag:'):
+ if method is None:
+ method = 'construct_yaml_' + tag
+ tag = tag_base + tag
+ cls.add_constructor(tag, getattr(cls, method))
+
+
+class SafeConstructor(BaseConstructor):
+ def construct_scalar(self, node: Any) -> Any:
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node: Any) -> Any:
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+ merge: List[Any] = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ if merge: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ f'found duplicate key "{key_node.value}"',
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args), stacklevel=1)
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ f'expected a mapping for merging, but found {subnode.id!s}',
+ subnode.start_mark,
+ )
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ f'but found {value_node.id!s}',
+ value_node.start_mark,
+ )
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if bool(merge):
+ node.merge = merge # separate merge keys to be able to update without duplicate
+ node.value = merge + node.value
+
+ def construct_mapping(self, node: Any, deep: bool = False) -> Any:
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node: Any) -> Any:
+ self.construct_scalar(node)
+ return None
+
+ # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'y': True,
+ 'n': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node: Any) -> bool:
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node: Any) -> int:
+ value_s = self.construct_scalar(node)
+ value_s = value_s.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ return sign * int(value_s[2:], 2)
+ elif value_s.startswith('0x'):
+ return sign * int(value_s[2:], 16)
+ elif value_s.startswith('0o'):
+ return sign * int(value_s[2:], 8)
+ elif self.resolver.processing_version == (1, 1) and value_s[0] == '0':
+ return sign * int(value_s, 8)
+ elif self.resolver.processing_version == (1, 1) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ return sign * int(value_s)
+
+ inf_value = 1e300
+ while inf_value != inf_value * inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node: Any) -> float:
+ value_so = self.construct_scalar(node)
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ elif value_s == '.nan':
+ return self.nan_value
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ if self.resolver.processing_version != (1, 2) and 'e' in value_s:
+ # value_s is lower case independent of input
+ mantissa, exponent = value_s.split('e')
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so), stacklevel=1)
+ return sign * float(value_s)
+
+ def construct_yaml_binary(self, node: Any) -> Any:
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ f'failed to convert base64 data into ascii: {exc!s}',
+ node.start_mark,
+ )
+ try:
+ return base64.decodebytes(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None, None, f'failed to decode base64 data: {exc!s}', node.start_mark,
+ )
+
+ timestamp_regexp = timestamp_regexp # moved to util 0.17.17
+
+ def construct_yaml_timestamp(self, node: Any, values: Any = None) -> Any:
+ if values is None:
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ f'failed to construct timestamp from "{node.value}"',
+ node.start_mark,
+ )
+ values = match.groupdict()
+ return create_timestamp(**values)
+
+ def construct_yaml_omap(self, node: Any) -> Any:
+ # Note: we do now check for duplicate keys
+ omap = ordereddict()
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ f'expected a sequence, but found {node.id!s}',
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ f'expected a mapping of length 1, but found {subnode.id!s}',
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ f'expected a single mapping item, but found {len(subnode.value):d} items',
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ omap[key] = value
+
+ def construct_yaml_pairs(self, node: Any) -> Any:
+ # Note: the same code as `construct_yaml_omap`.
+ pairs: List[Any] = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ f'expected a sequence, but found {node.id!s}',
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ f'expected a mapping of length 1, but found {subnode.id!s}',
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ f'expected a single mapping item, but found {len(subnode.value):d} items',
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node: Any) -> Any:
+ data: Set[Any] = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node: Any) -> Any:
+ value = self.construct_scalar(node)
+ return value
+
+ def construct_yaml_seq(self, node: Any) -> Any:
+ data: List[Any] = self.yaml_base_list_type()
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node: Any) -> Any:
+ data: Dict[Any, Any] = self.yaml_base_dict_type()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node: Any, cls: Any) -> Any:
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node: Any) -> None:
+ raise ConstructorError(
+ None,
+ None,
+ f'could not determine a constructor for the tag {node.tag!r}',
+ node.start_mark,
+ )
+
+
+for tag in 'null bool int float binary timestamp omap pairs set str seq map'.split():
+ SafeConstructor.add_default_constructor(tag)
+
+SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined)
+
+
+class Constructor(SafeConstructor):
+ def construct_python_str(self, node: Any) -> Any:
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node: Any) -> Any:
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node: Any) -> Any:
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ f'failed to convert base64 data into ascii: {exc!s}',
+ node.start_mark,
+ )
+ try:
+ return base64.decodebytes(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None, None, f'failed to decode base64 data: {exc!s}', node.start_mark,
+ )
+
+ def construct_python_long(self, node: Any) -> int:
+ val = self.construct_yaml_int(node)
+ return val
+
+ def construct_python_complex(self, node: Any) -> Any:
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node: Any) -> Any:
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name: Any, mark: Any) -> Any:
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ f'cannot find module {name!r} ({exc!s})',
+ mark,
+ )
+ return sys.modules[name]
+
+ def find_python_name(self, name: Any, mark: Any) -> Any:
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ if '.' in name:
+ lname = name.split('.')
+ lmodule_name = lname
+ lobject_name: List[Any] = []
+ while len(lmodule_name) > 1:
+ lobject_name.insert(0, lmodule_name.pop())
+ module_name = '.'.join(lmodule_name)
+ try:
+ __import__(module_name)
+ # object_name = '.'.join(object_name)
+ break
+ except ImportError:
+ continue
+ else:
+ module_name = builtins_module
+ lobject_name = [name]
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ f'cannot find module {module_name!r} ({exc!s})',
+ mark,
+ )
+ module = sys.modules[module_name]
+ object_name = '.'.join(lobject_name)
+ obj = module
+ while lobject_name:
+ if not hasattr(obj, lobject_name[0]):
+
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ f'cannot find {object_name!r} in the module {module.__name__!r}',
+ mark,
+ )
+ obj = getattr(obj, lobject_name.pop(0))
+ return obj
+
+ def construct_python_name(self, suffix: Any, node: Any) -> Any:
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python name',
+ node.start_mark,
+ f'expected the empty value, but found {value!r}',
+ node.start_mark,
+ )
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix: Any, node: Any) -> Any:
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ node.start_mark,
+ f'expected the empty value, but found {value!r}',
+ node.start_mark,
+ )
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(
+ self, suffix: Any, node: Any, args: Any = None, kwds: Any = None, newobj: bool = False,
+ ) -> Any:
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance: Any, state: Any) -> None:
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate: Dict[Any, Any] = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix: Any, node: Any) -> Any:
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ self.recursive_objects[node] = instance
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(
+ self, suffix: Any, node: Any, newobj: bool = False,
+ ) -> Any:
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds: Dict[Any, Any] = {}
+ state: Dict[Any, Any] = {}
+ listitems: List[Any] = []
+ dictitems: Dict[Any, Any] = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if bool(state):
+ self.set_python_instance_state(instance, state)
+ if bool(listitems):
+ instance.extend(listitems)
+ if bool(dictitems):
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix: Any, node: Any) -> Any:
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+ @classmethod
+ def add_default_constructor(
+ cls, tag: str, method: Any = None, tag_base: str = 'tag:yaml.org,2002:python/',
+ ) -> None:
+ if not tag.startswith('tag:'):
+ if method is None:
+ method = 'construct_yaml_' + tag
+ tag = tag_base + tag
+ cls.add_constructor(tag, getattr(cls, method))
+
+
+Constructor.add_constructor('tag:yaml.org,2002:python/none', Constructor.construct_yaml_null)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/str', Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode,
+)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes,
+)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/int', Constructor.construct_yaml_int)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/long', Constructor.construct_python_long)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/float', Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex,
+)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple,
+)
+# for tag in 'bool str unicode bytes int long float complex tuple'.split():
+# Constructor.add_default_constructor(tag)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:', Constructor.construct_python_name,
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:', Constructor.construct_python_module,
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:', Constructor.construct_python_object,
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply,
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new,
+)
+
+
+class RoundTripConstructor(SafeConstructor):
+ """need to store the comments on the node itself,
+ as well as on the items
+ """
+
+ def comment(self, idx: Any) -> Any:
+ assert self.loader.comment_handling is not None
+ x = self.scanner.comments[idx]
+ x.set_assigned()
+ return x
+
+ def comments(self, list_of_comments: Any, idx: Optional[Any] = None) -> Any:
+ # hand in the comment and optional pre, eol, post segment
+ if list_of_comments is None:
+ return []
+ if idx is not None:
+ if list_of_comments[idx] is None:
+ return []
+ list_of_comments = list_of_comments[idx]
+ for x in list_of_comments:
+ yield self.comment(x)
+
+ def construct_scalar(self, node: Any) -> Any:
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None, None, f'expected a scalar node, but found {node.id!s}', node.start_mark,
+ )
+
+ if node.style == '|' and isinstance(node.value, str):
+ lss = LiteralScalarString(node.value, anchor=node.anchor)
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment and node.comment[1]:
+ lss.comment = node.comment[1][0] # type: ignore
+ else:
+ # NEWCMNT
+ if node.comment is not None and node.comment[1]:
+ # nprintf('>>>>nc1', node.comment)
+ # EOL comment after |
+ lss.comment = self.comment(node.comment[1][0]) # type: ignore
+ return lss
+ if node.style == '>' and isinstance(node.value, str):
+ fold_positions: List[int] = []
+ idx = -1
+ while True:
+ idx = node.value.find('\a', idx + 1)
+ if idx < 0:
+ break
+ fold_positions.append(idx - len(fold_positions))
+ fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor)
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment and node.comment[1]:
+ fss.comment = node.comment[1][0] # type: ignore
+ else:
+ # NEWCMNT
+ if node.comment is not None and node.comment[1]:
+ # nprintf('>>>>nc2', node.comment)
+ # EOL comment after >
+ fss.comment = self.comment(node.comment[1][0]) # type: ignore
+ if fold_positions:
+ fss.fold_pos = fold_positions # type: ignore
+ return fss
+ elif bool(self._preserve_quotes) and isinstance(node.value, str):
+ if node.style == "'":
+ return SingleQuotedScalarString(node.value, anchor=node.anchor)
+ if node.style == '"':
+ return DoubleQuotedScalarString(node.value, anchor=node.anchor)
+ # if node.ctag:
+ # data2 = TaggedScalar()
+ # data2.value = node.value
+ # data2.style = node.style
+ # data2.yaml_set_ctag(node.ctag)
+ # if node.anchor:
+ # from ruamel.yaml.serializer import templated_id
+
+ # if not templated_id(node.anchor):
+ # data2.yaml_set_anchor(node.anchor, always_dump=True)
+ # return data2
+ if node.anchor:
+ return PlainScalarString(node.value, anchor=node.anchor)
+ return node.value
+
+ def construct_yaml_int(self, node: Any) -> Any:
+ width: Any = None
+ value_su = self.construct_scalar(node)
+ try:
+ sx = value_su.rstrip('_')
+ underscore: Any = [len(sx) - sx.rindex('_') - 1, False, False]
+ except ValueError:
+ underscore = None
+ except IndexError:
+ underscore = None
+ value_s = value_su.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return BinaryInt(
+ sign * int(value_s[2:], 2),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0x'):
+ # default to lower-case if no a-fA-F in string
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ hex_fun: Any = HexInt
+ for ch in value_s[2:]:
+ if ch in 'ABCDEF': # first non-digit is capital
+ hex_fun = HexCapsInt
+ break
+ if ch in 'abcdef':
+ break
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return hex_fun(
+ sign * int(value_s[2:], 16),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0o'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return OctalInt(
+ sign * int(value_s[2:], 8),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif self.resolver.processing_version != (1, 2) and value_s[0] == '0':
+ return OctalInt(
+ sign * int(value_s, 8), width=width, underscore=underscore, anchor=node.anchor,
+ )
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ elif self.resolver.processing_version > (1, 1) and value_s[0] == '0':
+ # not an octal, an integer with leading zero(s)
+ if underscore is not None:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(sign * int(value_s), width=len(value_s), underscore=underscore)
+ elif underscore:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(
+ sign * int(value_s), width=None, underscore=underscore, anchor=node.anchor,
+ )
+ elif node.anchor:
+ return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor)
+ else:
+ return sign * int(value_s)
+
+ def construct_yaml_float(self, node: Any) -> Any:
+ def leading_zeros(v: Any) -> int:
+ lead0 = 0
+ idx = 0
+ while idx < len(v) and v[idx] in '0.':
+ if v[idx] == '0':
+ lead0 += 1
+ idx += 1
+ return lead0
+
+ # underscore = None
+ m_sign: Any = False
+ value_so = self.construct_scalar(node)
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ m_sign = value_s[0]
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ if value_s == '.nan':
+ return self.nan_value
+ if self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ if 'e' in value_s:
+ try:
+ mantissa, exponent = value_so.split('e')
+ exp = 'e'
+ except ValueError:
+ mantissa, exponent = value_so.split('E')
+ exp = 'E'
+ if self.resolver.processing_version != (1, 2):
+ # value_s is lower case independent of input
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so), stacklevel=1)
+ lead0 = leading_zeros(mantissa)
+ width = len(mantissa)
+ prec = mantissa.find('.')
+ if m_sign:
+ width -= 1
+ e_width = len(exponent)
+ e_sign = exponent[0] in '+-'
+ # nprint('sf', width, prec, m_sign, exp, e_width, e_sign)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ exp=exp,
+ e_width=e_width,
+ e_sign=e_sign,
+ anchor=node.anchor,
+ )
+ width = len(value_so)
+ # you can't use index, !!float 42 would be a float without a dot
+ prec = value_so.find('.')
+ lead0 = leading_zeros(value_so)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ anchor=node.anchor,
+ )
+
+ def construct_yaml_str(self, node: Any) -> Any:
+ if node.ctag.handle:
+ value = self.construct_unknown(node)
+ else:
+ value = self.construct_scalar(node)
+ if isinstance(value, ScalarString):
+ return value
+ return value
+
+ def construct_rt_sequence(self, node: Any, seqtyp: Any, deep: bool = False) -> Any:
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None,
+ None,
+ f'expected a sequence node, but found {node.id!s}',
+ node.start_mark,
+ )
+ ret_val = []
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ seqtyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ # this happens e.g. if you have a sequence element that is a flow-style
+ # mapping and that has no EOL comment but a following commentline or
+ # empty line
+ seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ nprintf('nc3', node.comment)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ seqtyp.yaml_set_anchor(node.anchor)
+ for idx, child in enumerate(node.value):
+ if child.comment:
+ seqtyp._yaml_add_comment(child.comment, key=idx)
+ child.comment = None # if moved to sequence remove from child
+ ret_val.append(self.construct_object(child, deep=deep))
+ seqtyp._yaml_set_idx_line_col(
+ idx, [child.start_mark.line, child.start_mark.column],
+ )
+ return ret_val
+
+ def flatten_mapping(self, node: Any) -> Any:
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+
+ def constructed(value_node: Any) -> Any:
+ # If the contents of a merge are defined within the
+ # merge marker, then they won't have been constructed
+ # yet. But if they were already constructed, we need to use
+ # the existing object.
+ if value_node in self.constructed_objects:
+ value = self.constructed_objects[value_node]
+ else:
+ value = self.construct_object(value_node, deep=True)
+ return value
+
+ # merge = []
+ merge_map_list: List[Any] = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ if merge_map_list: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ f'found duplicate key "{key_node.value}"',
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args), stacklevel=1)
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ merge_map_list.append((index, constructed(value_node)))
+ # self.flatten_mapping(value_node)
+ # merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ # submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ f'expected a mapping for merging, but found {subnode.id!s}',
+ subnode.start_mark,
+ )
+ merge_map_list.append((index, constructed(subnode)))
+ # self.flatten_mapping(subnode)
+ # submerge.append(subnode.value)
+ # submerge.reverse()
+ # for value in submerge:
+ # merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ f'but found {value_node.id!s}',
+ value_node.start_mark,
+ )
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ return merge_map_list
+ # if merge:
+ # node.value = merge + node.value
+
+ def _sentinel(self) -> None:
+ pass
+
+ def construct_mapping(self, node: Any, maptyp: Any, deep: bool = False) -> Any: # type: ignore # NOQA
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, f'expected a mapping node, but found {node.id!s}', node.start_mark,
+ )
+ merge_map = self.flatten_mapping(node)
+ # mapping = {}
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ maptyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ # nprintf('nc4', node.comment, node.start_mark)
+ if maptyp.ca.pre is None:
+ maptyp.ca.pre = []
+ for cmnt in self.comments(node.comment, 0):
+ maptyp.ca.pre.append(cmnt)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ maptyp.yaml_set_anchor(node.anchor)
+ last_key, last_value = None, self._sentinel
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, MutableSequence):
+ key_s = CommentedKeySeq(key)
+ if key_node.flow_style is True:
+ key_s.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_s.fa.set_block_style()
+ key_s._yaml_set_line_col(key.lc.line, key.lc.col) # type: ignore
+ key = key_s
+ elif isinstance(key, MutableMapping):
+ key_m = CommentedKeyMap(key)
+ if key_node.flow_style is True:
+ key_m.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_m.fa.set_block_style()
+ key_m._yaml_set_line_col(key.lc.line, key.lc.col) # type: ignore
+ key = key_m
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ value = self.construct_object(value_node, deep=deep)
+ if self.check_mapping_key(node, key_node, maptyp, key, value):
+ if self.loader and self.loader.comment_handling is None:
+ if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]:
+ if last_value is None:
+ key_node.comment[0] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, value=last_key)
+ else:
+ key_node.comment[2] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ key_node.comment = None
+ if key_node.comment:
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ maptyp._yaml_add_comment(value_node.comment, value=key)
+ else:
+ # NEWCMNT
+ if key_node.comment:
+ nprintf('nc5a', key, key_node.comment)
+ if key_node.comment[0]:
+ maptyp.ca.set(key, C_KEY_PRE, key_node.comment[0])
+ if key_node.comment[1]:
+ maptyp.ca.set(key, C_KEY_EOL, key_node.comment[1])
+ if key_node.comment[2]:
+ maptyp.ca.set(key, C_KEY_POST, key_node.comment[2])
+ if value_node.comment:
+ nprintf('nc5b', key, value_node.comment)
+ if value_node.comment[0]:
+ maptyp.ca.set(key, C_VALUE_PRE, value_node.comment[0])
+ if value_node.comment[1]:
+ maptyp.ca.set(key, C_VALUE_EOL, value_node.comment[1])
+ if value_node.comment[2]:
+ maptyp.ca.set(key, C_VALUE_POST, value_node.comment[2])
+ maptyp._yaml_set_kv_line_col(
+ key,
+ [
+ key_node.start_mark.line,
+ key_node.start_mark.column,
+ value_node.start_mark.line,
+ value_node.start_mark.column,
+ ],
+ )
+ maptyp[key] = value
+ last_key, last_value = key, value # could use indexing
+ # do this last, or <<: before a key will prevent insertion in instances
+ # of collections.OrderedDict (as they have no __contains__
+ if merge_map:
+ maptyp.add_yaml_merge(merge_map)
+
+ def construct_setting(self, node: Any, typ: Any, deep: bool = False) -> Any:
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, f'expected a mapping node, but found {node.id!s}', node.start_mark,
+ )
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ typ._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ typ.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ nprintf('nc6', node.comment)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ typ.yaml_set_anchor(node.anchor)
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ # construct but should be null
+ value = self.construct_object(value_node, deep=deep) # NOQA
+ self.check_set_key(node, key_node, typ, key)
+ if self.loader and self.loader.comment_handling is None:
+ if key_node.comment:
+ typ._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ typ._yaml_add_comment(value_node.comment, value=key)
+ else:
+ # NEWCMNT
+ if key_node.comment:
+ nprintf('nc7a', key_node.comment)
+ if value_node.comment:
+ nprintf('nc7b', value_node.comment)
+ typ.add(key)
+
+ def construct_yaml_seq(self, node: Any) -> Iterator[CommentedSeq]:
+ data = CommentedSeq()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ # if node.comment:
+ # data._yaml_add_comment(node.comment)
+ yield data
+ data.extend(self.construct_rt_sequence(node, data))
+ self.set_collection_style(data, node)
+
+ def construct_yaml_map(self, node: Any) -> Iterator[CommentedMap]:
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ yield data
+ self.construct_mapping(node, data, deep=True)
+ self.set_collection_style(data, node)
+
+ def set_collection_style(self, data: Any, node: Any) -> None:
+ if len(data) == 0:
+ return
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+
+ def construct_yaml_object(self, node: Any, cls: Any) -> Any:
+ from dataclasses import is_dataclass, InitVar, MISSING
+
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = SafeConstructor.construct_mapping(self, node, deep=True)
+ data.__setstate__(state)
+ elif is_dataclass(data):
+ mapping = SafeConstructor.construct_mapping(self, node)
+ init_var_defaults = {}
+ for field in data.__dataclass_fields__.values():
+ # nprintf('field', field, field.default is MISSING,
+ # isinstance(field.type, InitVar))
+ # in 3.7, InitVar is a singleton
+ if (
+ isinstance(field.type, InitVar) or field.type is InitVar
+ ) and field.default is not MISSING:
+ init_var_defaults[field.name] = field.default
+ for attr, value in mapping.items():
+ if attr not in init_var_defaults:
+ setattr(data, attr, value)
+ post_init = getattr(data, '__post_init__', None)
+ if post_init is not None:
+ kw = {}
+ for name, default in init_var_defaults.items():
+ kw[name] = mapping.get(name, default)
+ post_init(**kw)
+ else:
+ state = SafeConstructor.construct_mapping(self, node)
+ if hasattr(data, '__attrs_attrs__'): # issue 394
+ data.__init__(**state)
+ else:
+ data.__dict__.update(state)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+ from ruamel.yaml.anchor import Anchor
+
+ if not templated_id(node.anchor):
+ if not hasattr(data, Anchor.attrib):
+ a = Anchor()
+ setattr(data, Anchor.attrib, a)
+ else:
+ a = getattr(data, Anchor.attrib)
+ a.value = node.anchor
+
+ def construct_yaml_omap(self, node: Any) -> Iterator[CommentedOrderedMap]:
+ # Note: we do now check for duplicate keys
+ omap = CommentedOrderedMap()
+ omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ omap.fa.set_flow_style()
+ elif node.flow_style is False:
+ omap.fa.set_block_style()
+ yield omap
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ omap._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ omap.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ nprintf('nc8', node.comment)
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ f'expected a sequence, but found {node.id!s}',
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ f'expected a mapping of length 1, but found {subnode.id!s}',
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ f'expected a single mapping item, but found {len(subnode.value):d} items',
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ if self.loader and self.loader.comment_handling is None:
+ if key_node.comment:
+ omap._yaml_add_comment(key_node.comment, key=key)
+ if subnode.comment:
+ omap._yaml_add_comment(subnode.comment, key=key)
+ if value_node.comment:
+ omap._yaml_add_comment(value_node.comment, value=key)
+ else:
+ # NEWCMNT
+ if key_node.comment:
+ nprintf('nc9a', key_node.comment)
+ if subnode.comment:
+ nprintf('nc9b', subnode.comment)
+ if value_node.comment:
+ nprintf('nc9c', value_node.comment)
+ omap[key] = value
+
+ def construct_yaml_set(self, node: Any) -> Iterator[CommentedSet]:
+ data = CommentedSet()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+ yield data
+ self.construct_setting(node, data)
+
+ def construct_unknown(
+ self, node: Any,
+ ) -> Iterator[Union[CommentedMap, TaggedScalar, CommentedSeq]]:
+ try:
+ if isinstance(node, MappingNode):
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+ data.yaml_set_ctag(node.ctag)
+ yield data
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ data.yaml_set_anchor(node.anchor)
+ self.construct_mapping(node, data)
+ return
+ elif isinstance(node, ScalarNode):
+ data2 = TaggedScalar()
+ data2.value = self.construct_scalar(node)
+ data2.style = node.style
+ data2.yaml_set_ctag(node.ctag)
+ yield data2
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ data2.yaml_set_anchor(node.anchor, always_dump=True)
+ return
+ elif isinstance(node, SequenceNode):
+ data3 = CommentedSeq()
+ data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data3.fa.set_flow_style()
+ elif node.flow_style is False:
+ data3.fa.set_block_style()
+ data3.yaml_set_ctag(node.ctag)
+ yield data3
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ data3.yaml_set_anchor(node.anchor)
+ data3.extend(self.construct_sequence(node))
+ return
+ except: # NOQA
+ pass
+ raise ConstructorError(
+ None,
+ None,
+ f'could not determine a constructor for the tag {node.tag!r}',
+ node.start_mark,
+ )
+
+ def construct_yaml_timestamp(
+ self, node: Any, values: Any = None,
+ ) -> Union[datetime.date, datetime.datetime, TimeStamp]:
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ f'failed to construct timestamp from "{node.value}"',
+ node.start_mark,
+ )
+ values = match.groupdict()
+ if not values['hour']:
+ return create_timestamp(**values)
+ # return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']:
+ if values[part]:
+ break
+ else:
+ return create_timestamp(**values)
+ # return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ dd = create_timestamp(**values) # this has delta applied
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ minutes = values['tz_minute']
+ tz_minute = int(minutes) if minutes else 0
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ # should check for None and solve issue 366 should be tzinfo=delta)
+ # isinstance(datetime.datetime.now, datetime.date) is true)
+ if isinstance(dd, datetime.datetime):
+ data = TimeStamp(
+ dd.year, dd.month, dd.day, dd.hour, dd.minute, dd.second, dd.microsecond,
+ )
+ else:
+ # ToDo: make this into a DateStamp?
+ data = TimeStamp(dd.year, dd.month, dd.day, 0, 0, 0, 0)
+ return data
+ if delta:
+ data._yaml['delta'] = delta
+ tz = values['tz_sign'] + values['tz_hour']
+ if values['tz_minute']:
+ tz += ':' + values['tz_minute']
+ data._yaml['tz'] = tz
+ else:
+ if values['tz']: # no delta
+ data._yaml['tz'] = values['tz']
+ if values['t']:
+ data._yaml['t'] = True
+ return data
+
+ def construct_yaml_sbool(self, node: Any) -> Union[bool, ScalarBoolean]:
+ b = SafeConstructor.construct_yaml_bool(self, node)
+ if node.anchor:
+ return ScalarBoolean(b, anchor=node.anchor)
+ return b
+
+
+RoundTripConstructor.add_default_constructor('bool', method='construct_yaml_sbool')
+
+for tag in 'null int float binary timestamp omap pairs set str seq map'.split():
+ RoundTripConstructor.add_default_constructor(tag)
+
+RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_unknown)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/cyaml.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/cyaml.py
new file mode 100644
index 0000000000..3f15ffc541
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/cyaml.py
@@ -0,0 +1,195 @@
+# coding: utf-8
+
+from _ruamel_yaml import CParser, CEmitter # type: ignore
+
+from ruamel.yaml.constructor import Constructor, BaseConstructor, SafeConstructor
+from ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter
+from ruamel.yaml.resolver import Resolver, BaseResolver
+
+
+from typing import Any, Union, Optional # NOQA
+from ruamel.yaml.compat import StreamTextType, StreamType, VersionType # NOQA
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+
+# this includes some hacks to solve the usage of resolver by lower level
+# parts of the parser
+
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore
+ def __init__(
+ self,
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ BaseConstructor.__init__(self, loader=self)
+ BaseResolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ SafeConstructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CLoader(CParser, Constructor, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ Constructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore
+ def __init__(
+ self: StreamType,
+ stream: Any,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore
+ def __init__(
+ self: StreamType,
+ stream: Any,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ self._emitter = self._serializer = self._representer = self
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ SafeRepresenter.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style,
+ )
+ Resolver.__init__(self)
+
+
+class CDumper(CEmitter, Representer, Resolver): # type: ignore
+ def __init__(
+ self: StreamType,
+ stream: Any,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ Representer.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style,
+ )
+ Resolver.__init__(self)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/dumper.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/dumper.py
new file mode 100644
index 0000000000..e6457a6139
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/dumper.py
@@ -0,0 +1,218 @@
+# coding: utf-8
+
+from ruamel.yaml.emitter import Emitter
+from ruamel.yaml.serializer import Serializer
+from ruamel.yaml.representer import (
+ Representer,
+ SafeRepresenter,
+ BaseRepresenter,
+ RoundTripRepresenter,
+)
+from ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver
+
+from typing import Any, Dict, List, Union, Optional # NOQA
+from ruamel.yaml.compat import StreamType, VersionType # NOQA
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
+
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+ def __init__(
+ self: Any,
+ stream: StreamType,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+ def __init__(
+ self,
+ stream: StreamType,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+ def __init__(
+ self,
+ stream: StreamType,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ Representer.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
+ def __init__(
+ self,
+ stream: StreamType,
+ default_style: Any = None,
+ default_flow_style: Optional[bool] = None,
+ canonical: Optional[int] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ RoundTripRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ VersionedResolver.__init__(self, loader=self)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/emitter.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/emitter.py
new file mode 100644
index 0000000000..a0f59d72df
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/emitter.py
@@ -0,0 +1,1766 @@
+# coding: utf-8
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+import sys
+from ruamel.yaml.error import YAMLError, YAMLStreamError
+from ruamel.yaml.events import * # NOQA
+
+# fmt: off
+from ruamel.yaml.compat import nprint, dbg, DBG_EVENT, \
+ check_anchorname_char, nprintf # NOQA
+# fmt: on
+
+
+from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA
+from ruamel.yaml.compat import StreamType # NOQA
+
+__all__ = ['Emitter', 'EmitterError']
+
+
+class EmitterError(YAMLError):
+ pass
+
+
+class ScalarAnalysis:
+ def __init__(
+ self,
+ scalar: Any,
+ empty: Any,
+ multiline: Any,
+ allow_flow_plain: bool,
+ allow_block_plain: bool,
+ allow_single_quoted: bool,
+ allow_double_quoted: bool,
+ allow_block: bool,
+ ) -> None:
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+ def __repr__(self) -> str:
+ return f'scalar={self.scalar!r}, empty={self.empty}, multiline={self.multiline}, allow_flow_plain={self.allow_flow_plain}, allow_block_plain={self.allow_block_plain}, allow_single_quoted={self.allow_single_quoted}, allow_double_quoted={self.allow_double_quoted}, allow_block={self.allow_block}' # NOQA
+
+
+class Indents:
+ # replacement for the list based stack of None/int
+ def __init__(self) -> None:
+ self.values: List[Tuple[Any, bool]] = []
+
+ def append(self, val: Any, seq: Any) -> None:
+ self.values.append((val, seq))
+
+ def pop(self) -> Any:
+ return self.values.pop()[0]
+
+ def last_seq(self) -> bool:
+ # return the seq(uence) value for the element added before the last one
+ # in increase_indent()
+ try:
+ return self.values[-2][1]
+ except IndexError:
+ return False
+
+ def seq_flow_align(
+ self, seq_indent: int, column: int, pre_comment: Optional[bool] = False,
+ ) -> int:
+ # extra spaces because of dash
+ # nprint('seq_flow_align', self.values, pre_comment)
+ if len(self.values) < 2 or not self.values[-1][1]:
+ if len(self.values) == 0 or not pre_comment:
+ return 0
+ base = self.values[-1][0] if self.values[-1][0] is not None else 0
+ if pre_comment:
+ return base + seq_indent # type: ignore
+ # return (len(self.values)) * seq_indent
+ # -1 for the dash
+ return base + seq_indent - column - 1 # type: ignore
+
+ def __len__(self) -> int:
+ return len(self.values)
+
+
+class Emitter:
+ # fmt: off
+ DEFAULT_TAG_PREFIXES = {
+ '!': '!',
+ 'tag:yaml.org,2002:': '!!',
+ '!!': '!!',
+ }
+ # fmt: on
+
+ MAX_SIMPLE_KEY_LENGTH = 128
+ flow_seq_start = '['
+ flow_seq_end = ']'
+ flow_seq_separator = ','
+ flow_map_start = '{'
+ flow_map_end = '}'
+ flow_map_separator = ','
+
+ def __init__(
+ self,
+ stream: StreamType,
+ canonical: Any = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ block_seq_indent: Optional[int] = None,
+ top_level_colon_align: Optional[bool] = None,
+ prefix_colon: Any = None,
+ brace_single_entry_mapping_in_flow_sequence: Optional[bool] = None,
+ dumper: Any = None,
+ ) -> None:
+ # NOQA
+ self.dumper = dumper
+ if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None:
+ self.dumper._emitter = self
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding: Optional[Text] = None
+ self.allow_space_break = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states: List[Any] = []
+ self.state: Any = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events: List[Any] = []
+ self.event: Any = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = Indents()
+ self.indent: Optional[int] = None
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context: List[Text] = []
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+ self.compact_seq_seq = True # dash after dash
+ self.compact_seq_map = True # key after dash
+ # self.compact_ms = False # dash after key, only when excplicit key with ?
+ self.no_newline: Optional[bool] = None # set if directly after `- `
+
+ # Whether the document requires an explicit document end indicator
+ self.open_ended = False
+
+ # colon handling
+ self.colon = ':'
+ self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
+ # single entry mappings in flow sequence
+ self.brace_single_entry_mapping_in_flow_sequence = (
+ brace_single_entry_mapping_in_flow_sequence # NOQA
+ )
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
+ self.unicode_supplementary = sys.maxunicode > 0xFFFF
+ self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
+ self.top_level_colon_align = top_level_colon_align
+ self.best_sequence_indent = 2
+ self.requested_indent = indent # specific for literal zero indent
+ if indent and 1 < indent < 10:
+ self.best_sequence_indent = indent
+ self.best_map_indent = self.best_sequence_indent
+ # if self.best_sequence_indent < self.sequence_dash_offset + 1:
+ # self.best_sequence_indent = self.sequence_dash_offset + 1
+ self.best_width = 80
+ if width and width > self.best_sequence_indent * 2:
+ self.best_width = width
+ self.best_line_break: Any = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes: Any = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor: Any = None
+ self.prepared_tag: Any = None
+
+ # Scalar analysis and style.
+ self.analysis: Any = None
+ self.style: Any = None
+
+ self.scalar_after_indicator = True # write a scalar on the same line as `---`
+
+ self.alt_null = 'null'
+
+ @property
+ def stream(self) -> Any:
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('output stream needs to be specified')
+
+ @stream.setter
+ def stream(self, val: Any) -> None:
+ if val is None:
+ return
+ if not hasattr(val, 'write'):
+ raise YAMLStreamError('stream argument needs to have a write() method')
+ self._stream = val
+
+ @property
+ def serializer(self) -> Any:
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ @property
+ def flow_level(self) -> int:
+ return len(self.flow_context)
+
+ def dispose(self) -> None:
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event: Any) -> None:
+ if dbg(DBG_EVENT):
+ nprint(event)
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self) -> bool:
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count: int) -> bool:
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return len(self.events) < count + 1
+
+ def increase_indent(
+ self, flow: bool = False, sequence: Optional[bool] = None, indentless: bool = False,
+ ) -> None:
+ self.indents.append(self.indent, sequence)
+ if self.indent is None: # top level
+ if flow:
+ # self.indent = self.best_sequence_indent if self.indents.last_seq() else \
+ # self.best_map_indent
+ # self.indent = self.best_sequence_indent
+ self.indent = self.requested_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += (
+ self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent
+ )
+ # if self.indents.last_seq():
+ # if self.indent == 0: # top level block sequence
+ # self.indent = self.best_sequence_indent - self.sequence_dash_offset
+ # else:
+ # self.indent += self.best_sequence_indent
+ # else:
+ # self.indent += self.best_map_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self) -> None:
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError(f'expected StreamStartEvent, but got {self.event!s}')
+
+ def expect_nothing(self) -> None:
+ raise EmitterError(f'expected nothing, but got {self.event!s}')
+
+ # Document handlers.
+
+ def expect_first_document_start(self) -> Any:
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first: bool = False) -> None:
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (
+ first
+ and not self.event.explicit
+ and not self.canonical
+ and not self.event.version
+ and not self.event.tags
+ and not self.check_empty_document()
+ )
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError(f'expected DocumentStartEvent, but got {self.event!s}')
+
+ def expect_document_end(self) -> None:
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError(f'expected DocumentEndEvent, but got {self.event!s}')
+
+ def expect_document_root(self) -> None:
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(
+ self,
+ root: bool = False,
+ sequence: bool = False,
+ mapping: bool = False,
+ simple_key: bool = False,
+ ) -> None:
+ self.root_context = root
+ self.sequence_context = sequence # not used in PyYAML
+ force_flow_indent = False
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ if (
+ self.process_anchor('&')
+ and isinstance(self.event, ScalarEvent)
+ and self.sequence_context
+ ):
+ self.sequence_context = False
+ if (
+ root
+ and isinstance(self.event, ScalarEvent)
+ and not self.scalar_after_indicator
+ ):
+ self.write_indent()
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ i2, n2 = self.indention, self.no_newline # NOQA
+ if self.event.comment:
+ if self.event.flow_style is False:
+ if self.write_post_comment(self.event):
+ self.indention = False
+ self.no_newline = True
+ if self.event.flow_style:
+ column = self.column
+ if self.write_pre_comment(self.event):
+ if self.event.flow_style:
+ # force_flow_indent = True
+ force_flow_indent = not self.indents.values[-1][1]
+ self.indention = i2
+ self.no_newline = not self.indention
+ if self.event.flow_style:
+ self.column = column
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_sequence()
+ ):
+ self.expect_flow_sequence(force_flow_indent)
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.event.flow_style is False and self.event.comment:
+ self.write_post_comment(self.event)
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ if self.event.flow_style and self.indents.values:
+ force_flow_indent = not self.indents.values[-1][1]
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_mapping()
+ ):
+ self.expect_flow_mapping(
+ single=self.event.nr_items == 1, force_flow_indent=force_flow_indent,
+ )
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError('expected NodeEvent, but got {self.event!s}')
+
+ def expect_alias(self) -> None:
+ if self.event.anchor is None:
+ raise EmitterError('anchor is not specified for alias')
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self) -> None:
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self, force_flow_indent: Optional[bool] = False) -> None:
+ if force_flow_indent:
+ self.increase_indent(flow=True, sequence=True)
+ ind = self.indents.seq_flow_align(
+ self.best_sequence_indent, self.column, force_flow_indent,
+ )
+ self.write_indicator(' ' * ind + self.flow_seq_start, True, whitespace=True)
+ if not force_flow_indent:
+ self.increase_indent(flow=True, sequence=True)
+ self.flow_context.append('[')
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self) -> None:
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ self.write_indicator(self.flow_seq_end, False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty flow sequence
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self) -> None:
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ if self.canonical:
+ # ToDo: so-39595807, maybe add a space to the flow_seq_separator
+ # and strip the last space, if space then indent, else do not
+ # not sure that [1,2,3] is a valid YAML seq
+ self.write_indicator(self.flow_seq_separator, False)
+ self.write_indent()
+ self.write_indicator(self.flow_seq_end, False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow sequence
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(self.flow_seq_separator, False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(
+ self, single: Optional[bool] = False, force_flow_indent: Optional[bool] = False,
+ ) -> None:
+ if force_flow_indent:
+ self.increase_indent(flow=True, sequence=False)
+ ind = self.indents.seq_flow_align(
+ self.best_sequence_indent, self.column, force_flow_indent,
+ )
+ map_init = self.flow_map_start
+ if (
+ single
+ and self.flow_level
+ and self.flow_context[-1] == '['
+ and not self.canonical
+ and not self.brace_single_entry_mapping_in_flow_sequence
+ ):
+ # single map item with flow context, no curly braces necessary
+ map_init = ''
+ self.write_indicator(' ' * ind + map_init, True, whitespace=True)
+ self.flow_context.append(map_init)
+ if not force_flow_indent:
+ self.increase_indent(flow=True, sequence=False)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self) -> None:
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '{' # empty flow mapping
+ self.write_indicator(self.flow_map_end, False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty mapping
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self) -> None:
+ if isinstance(self.event, MappingEndEvent):
+ # if self.event.comment and self.event.comment[1]:
+ # self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped in ['{', '']
+ if self.canonical:
+ self.write_indicator(self.flow_map_separator, False)
+ self.write_indent()
+ if popped != '':
+ self.write_indicator(self.flow_map_end, False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow mapping, never reached on empty mappings
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(self.flow_map_separator, False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self) -> None:
+ if getattr(self.event, 'style', '?') != '-': # suppress for flow style sets
+ self.write_indicator(self.prefixed_colon, False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self) -> None:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self) -> None:
+ if self.mapping_context:
+ indentless = not self.indention
+ else:
+ indentless = False
+ if not self.compact_seq_seq and self.column != 0:
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=True, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self) -> Any:
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first: bool = False) -> None:
+ if not first and isinstance(self.event, SequenceEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments on a block list e.g. empty line
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ self.no_newline = False
+ else:
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ nonl = self.no_newline if self.column == 0 else False
+ self.write_indent()
+ ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0
+ self.write_indicator(' ' * ind + '-', True, indention=True)
+ if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
+ self.no_newline = True
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self) -> None:
+ if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self) -> None:
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first: Any = False) -> None:
+ if not first and isinstance(self.event, MappingEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.write_indent()
+ if self.check_simple_key():
+ if not isinstance(
+ self.event, (SequenceStartEvent, MappingStartEvent),
+ ): # sequence keys
+ try:
+ if self.event.style == '?':
+ self.write_indicator('?', True, indention=True)
+ except AttributeError: # aliases have no style
+ pass
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ # test on style for alias in !!set
+ if isinstance(self.event, AliasEvent) and not self.event.style == '?':
+ self.stream.write(' ')
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self) -> None:
+ if getattr(self.event, 'style', None) != '?':
+ # prefix = ''
+ if self.indent == 0 and self.top_level_colon_align is not None:
+ # write non-prefixed colon
+ c = ' ' * (self.top_level_colon_align - self.column) + self.colon
+ else:
+ c = self.prefixed_colon
+ self.write_indicator(c, False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self) -> None:
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self) -> bool:
+ return (
+ isinstance(self.event, SequenceStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], SequenceEndEvent)
+ )
+
+ def check_empty_mapping(self) -> bool:
+ return (
+ isinstance(self.event, MappingStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], MappingEndEvent)
+ )
+
+ def check_empty_document(self) -> bool:
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (
+ isinstance(event, ScalarEvent)
+ and event.anchor is None
+ and event.tag is None
+ and event.implicit
+ and event.value == ""
+ )
+
+ def check_simple_key(self) -> bool:
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if (
+ isinstance(self.event, (ScalarEvent, CollectionStartEvent))
+ and self.event.tag is not None
+ ):
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.ctag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return length < self.MAX_SIMPLE_KEY_LENGTH and (
+ isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True)
+ or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True)
+ or (
+ isinstance(self.event, ScalarEvent)
+ # if there is an explicit style for an empty string, it is a simple key
+ and not (self.analysis.empty and self.style and self.style not in '\'"')
+ and not self.analysis.multiline
+ )
+ or self.check_empty_sequence()
+ or self.check_empty_mapping()
+ )
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator: Any) -> bool:
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return False
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator + self.prepared_anchor, True)
+ # issue 288
+ self.no_newline = False
+ self.prepared_anchor = None
+ return True
+
+ def process_tag(self) -> None:
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if (
+ self.event.value == ''
+ and self.style == "'"
+ and tag == 'tag:yaml.org,2002:null'
+ and self.alt_null is not None
+ ):
+ self.event.value = self.alt_null
+ self.analysis = None
+ self.style = self.choose_scalar_style()
+ if (not self.canonical or tag is None) and (
+ (self.style == "" and self.event.implicit[0])
+ or (self.style != "" and self.event.implicit[1])
+ ):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError('tag is not specified')
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.ctag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ if (
+ self.sequence_context
+ and not self.flow_level
+ and isinstance(self.event, ScalarEvent)
+ ):
+ self.no_newline = True
+ self.prepared_tag = None
+
+ def choose_scalar_style(self) -> Any:
+ # issue 449 needs this otherwise emits single quoted empty string
+ if self.event.value == '' and self.event.ctag.handle == '!!':
+ return None
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if (not self.event.style or self.event.style == '?' or self.event.style == '-') and (
+ self.event.implicit[0] or not self.event.implicit[2]
+ ):
+ if not (
+ self.simple_key_context and (self.analysis.empty or self.analysis.multiline)
+ ) and (
+ self.flow_level
+ and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain)
+ ):
+ return ""
+ if self.event.style == '-':
+ return ""
+ self.analysis.allow_block = True
+ if self.event.style and self.event.style in '|>':
+ if (
+ not self.flow_level
+ and not self.simple_key_context
+ and self.analysis.allow_block
+ ):
+ return self.event.style
+ if not self.event.style and self.analysis.allow_double_quoted:
+ if "'" in self.event.value or '\n' in self.event.value:
+ return '"'
+ if not self.event.style or self.event.style == "'":
+ if self.analysis.allow_single_quoted and not (
+ self.simple_key_context and self.analysis.multiline
+ ):
+ return "'"
+ return '"'
+
+ def process_scalar(self) -> None:
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = not self.simple_key_context
+ # if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ # nprint('xx', self.sequence_context, self.flow_level)
+ if self.sequence_context and not self.flow_level:
+ self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == "'":
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ try:
+ cmx = self.event.comment[1][0]
+ except (IndexError, TypeError):
+ cmx = ""
+ self.write_folded(self.analysis.scalar, cmx)
+ if (
+ self.event.comment
+ and self.event.comment[0]
+ and self.event.comment[0].column >= self.indent
+ ):
+ # comment following a folded scalar must dedent (issue 376)
+ self.event.comment[0].column = self.indent - 1 # type: ignore
+ elif self.style == '|':
+ # self.write_literal(self.analysis.scalar, self.event.comment)
+ try:
+ cmx = self.event.comment[1][0]
+ except (IndexError, TypeError):
+ cmx = ""
+ self.write_literal(self.analysis.scalar, cmx)
+ if (
+ self.event.comment
+ and self.event.comment[0]
+ and self.event.comment[0].column >= self.indent
+ ):
+ # comment following a literal scalar must dedent (issue 376)
+ self.event.comment[0].column = self.indent - 1 # type: ignore
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+ if self.event.comment:
+ self.write_post_comment(self.event)
+
+ # Analyzers.
+
+ def prepare_version(self, version: Any) -> Any:
+ major, minor = version
+ if major != 1:
+ raise EmitterError(f'unsupported YAML version: {major:d}.{minor:d}')
+ return f'{major:d}.{minor:d}'
+
+ def prepare_tag_handle(self, handle: Any) -> Any:
+ if not handle:
+ raise EmitterError('tag handle must not be empty')
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError(f"tag handle must start and end with '!': {handle!r}")
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_'):
+ raise EmitterError(f'invalid character {ch!r} in the tag handle: {handle!r}')
+ return handle
+
+ def prepare_tag_prefix(self, prefix: Any) -> Any:
+ if not prefix:
+ raise EmitterError('tag prefix must not be empty')
+ chunks: List[Any] = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ ch_set = "-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += '#'
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in ch_set:
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end + 1
+ data = ch
+ for ch in data:
+ chunks.append(f'%{ord(ch):02X}')
+ if start < end:
+ chunks.append(prefix[start:end])
+ return "".join(chunks)
+
+ def prepare_tag(self, tag: Any) -> Any:
+ if not tag:
+ raise EmitterError('tag must not be empty')
+ tag = str(tag)
+ if tag == '!' or tag == '!!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix) :]
+ chunks: List[Any] = []
+ start = end = 0
+ ch_set = "-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += '#'
+ while end < len(suffix):
+ ch = suffix[end]
+ if (
+ '0' <= ch <= '9'
+ or 'A' <= ch <= 'Z'
+ or 'a' <= ch <= 'z'
+ or ch in ch_set
+ or (ch == '!' and handle != '!')
+ ):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end + 1
+ data = ch
+ for ch in data:
+ chunks.append(f'%{ord(ch):02X}')
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = "".join(chunks)
+ if handle:
+ return f'{handle!s}{suffix_text!s}'
+ else:
+ return f'!<{suffix_text!s}>'
+
+ def prepare_anchor(self, anchor: Any) -> Any:
+ if not anchor:
+ raise EmitterError('anchor must not be empty')
+ for ch in anchor:
+ if not check_anchorname_char(ch):
+ raise EmitterError(f'invalid character {ch!r} in the anchor: {anchor!r}')
+ return anchor
+
+ def analyze_scalar(self, scalar: Any) -> Any:
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=True,
+ multiline=False,
+ allow_flow_plain=False,
+ allow_block_plain=True,
+ allow_single_quoted=True,
+ allow_double_quoted=True,
+ allow_block=False,
+ )
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = len(scalar) == 1 or scalar[1] in '\0 \t\r\n\x85\u2028\u2029'
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:': # ToDo
+ if self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ elif len(scalar) == 1: # single character
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859
+ flow_indicators = True
+ if ch == '?' and self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ if ch == ':':
+ if followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (
+ ch == '\x85'
+ or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'
+ or (self.unicode_supplementary and ('\U00010000' <= ch <= '\U0010FFFF'))
+ ) and ch != '\uFEFF':
+ # unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar) - 1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar) - 1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = ch in '\0 \t\r\n\x85\u2028\u2029'
+ followed_by_whitespace = (
+ index + 1 >= len(scalar) or scalar[index + 1] in '\0 \t\r\n\x85\u2028\u2029'
+ )
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if leading_space or leading_break or trailing_space or trailing_break:
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if special_characters:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False
+ elif space_break:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+ if not self.allow_space_break:
+ allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=False,
+ multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block,
+ )
+
+ # Writers.
+
+ def flush_stream(self) -> None:
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self) -> None:
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self) -> None:
+ self.flush_stream()
+
+ def write_indicator(
+ self,
+ indicator: Any,
+ need_whitespace: Any,
+ whitespace: bool = False,
+ indention: bool = False,
+ ) -> None:
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' ' + indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self) -> None:
+ indent = self.indent or 0
+ if (
+ not self.indention
+ or self.column > indent
+ or (self.column == indent and not self.whitespace)
+ ):
+ if bool(self.no_newline):
+ self.no_newline = False
+ else:
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' ' * (indent - self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ self.stream.write(data)
+
+ def write_line_break(self, data: Any = None) -> None:
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text: Any) -> None:
+ data: Any = f'%YAML {version_text!s}'
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text: Any, prefix_text: Any) -> None:
+ data: Any = f'%TAG {handle_text!s} {prefix_text!s}'
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text: Any, split: Any = True) -> None:
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator("'", True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if (
+ start + 1 == end
+ and self.column > self.best_width
+ and split
+ and start != 0
+ and end != len(text)
+ ):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == "'":
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == "'":
+ data = "''"
+ self.column += 2
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = ch == ' '
+ breaks = ch in '\n\x85\u2028\u2029'
+ end += 1
+ self.write_indicator("'", False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '"': '"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text: Any, split: Any = True) -> None:
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if (
+ ch is None
+ or ch in '"\\\x85\u2028\u2029\uFEFF'
+ or not (
+ '\x20' <= ch <= '\x7E'
+ or (
+ self.allow_unicode
+ and (
+ ('\xA0' <= ch <= '\uD7FF')
+ or ('\uE000' <= ch <= '\uFFFD')
+ or ('\U00010000' <= ch <= '\U0010FFFF')
+ )
+ )
+ )
+ ):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\' + self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if (
+ 0 < end < len(text) - 1
+ and (ch == ' ' or start >= end)
+ and self.column + (end - start) > self.best_width
+ and split
+ ):
+ # SO https://stackoverflow.com/a/75634614/1307905
+ # data = text[start:end] + u'\\' # <<< replaced with following six lines
+ need_backquote = True
+ if len(text) > end:
+ try:
+ space_pos = text.index(' ', end)
+ if (
+ '"' not in text[end:space_pos]
+ and "'" not in text[end:space_pos]
+ and text[space_pos + 1] != ' '
+ and text[end - 1 : end + 1] != ' '
+ ):
+ need_backquote = False
+ except (ValueError, IndexError):
+ pass
+ data = text[start:end] + ('\\' if need_backquote else '')
+ if start < end:
+ start = end
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ if not need_backquote:
+ # remove leading space it will load from the newline
+ start += 1
+ # data = u'\\' # <<< replaced with following line
+ data = '\\' if need_backquote else ''
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text: Any) -> Any:
+ indent = 0
+ indicator = ''
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ indent = 2
+ hints += str(indent)
+ elif self.root_context:
+ for end in ['\n---', '\n...']:
+ pos = 0
+ while True:
+ pos = text.find(end, pos)
+ if pos == -1:
+ break
+ try:
+ if text[pos + 4] in ' \r\n':
+ break
+ except IndexError:
+ pass
+ pos += 1
+ if pos > -1:
+ break
+ if pos > 0:
+ indent = 2
+ if text[-1] not in '\n\x85\u2028\u2029':
+ indicator = '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ indicator = '+'
+ hints += indicator
+ return hints, indent, indicator
+
+ def write_folded(self, text: Any, comment: Any) -> None:
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ if not isinstance(comment, str):
+ comment = ''
+ self.write_indicator('>' + hints + comment, True)
+ if _indicator == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029\a':
+ if (
+ not leading_space
+ and ch is not None
+ and ch != ' '
+ and text[start] == '\n'
+ ):
+ self.write_line_break()
+ leading_space = ch == ' '
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start + 1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029\a':
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch == '\a':
+ if end < (len(text) - 1) and not text[end + 2].isspace():
+ self.write_line_break()
+ self.write_indent()
+ end += 2 # \a and the space that is inserted on the fold
+ else:
+ raise EmitterError('unexcpected fold indicator \\a before space')
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in '\n\x85\u2028\u2029'
+ spaces = ch == ' '
+ end += 1
+
+ def write_literal(self, text: Any, comment: Any = None) -> None:
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ # if comment is not None:
+ # try:
+ # hints += comment[1][0]
+ # except (TypeError, IndexError) as e:
+ # pass
+ if not isinstance(comment, str):
+ comment = ''
+ self.write_indicator('|' + hints + comment, True)
+ # try:
+ # nprintf('selfev', comment)
+ # cmx = comment[1][0]
+ # if cmx:
+ # self.stream.write(cmx)
+ # except (TypeError, IndexError) as e:
+ # pass
+ if _indicator == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ if self.root_context:
+ idnx = self.indent if self.indent is not None else 0
+ self.stream.write(' ' * (_indent + idnx))
+ else:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in '\n\x85\u2028\u2029'
+ end += 1
+
+ def write_plain(self, text: Any, split: Any = True) -> None:
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ else:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start + 1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029': # type: ignore
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ if (
+ len(data) > self.best_width
+ and self.indent is not None
+ and self.column > self.indent
+ ):
+ # words longer than line length get a line of their own
+ self.write_indent()
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ try:
+ self.stream.write(data)
+ except: # NOQA
+ sys.stdout.write(repr(data) + '\n')
+ raise
+ start = end
+ if ch is not None:
+ spaces = ch == ' '
+ breaks = ch in '\n\x85\u2028\u2029'
+ end += 1
+
+ def write_comment(self, comment: Any, pre: bool = False) -> None:
+ value = comment.value
+ # nprintf(f'{self.column:02d} {comment.start_mark.column:02d} {value!r}')
+ if not pre and value[-1] == '\n':
+ value = value[:-1]
+ try:
+ # get original column position
+ col = comment.start_mark.column
+ if comment.value and comment.value.startswith('\n'):
+ # never inject extra spaces if the comment starts with a newline
+ # and not a real comment (e.g. if you have an empty line following a key-value
+ col = self.column
+ elif col < self.column + 1:
+ ValueError
+ except ValueError:
+ col = self.column + 1
+ # nprint('post_comment', self.line, self.column, value)
+ try:
+ # at least one space if the current column >= the start column of the comment
+ # but not at the start of a line
+ nr_spaces = col - self.column
+ if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n':
+ nr_spaces = 1
+ value = ' ' * nr_spaces + value
+ try:
+ if bool(self.encoding):
+ value = value.encode(self.encoding)
+ except UnicodeDecodeError:
+ pass
+ self.stream.write(value)
+ except TypeError:
+ raise
+ if not pre:
+ self.write_line_break()
+
+ def write_pre_comment(self, event: Any) -> bool:
+ comments = event.comment[1]
+ if comments is None:
+ return False
+ try:
+ start_events = (MappingStartEvent, SequenceStartEvent)
+ for comment in comments:
+ if isinstance(event, start_events) and getattr(comment, 'pre_done', None):
+ continue
+ if self.column != 0:
+ self.write_line_break()
+ self.write_comment(comment, pre=True)
+ if isinstance(event, start_events):
+ comment.pre_done = True
+ except TypeError:
+ sys.stdout.write(f'eventtt {type(event)} {event}')
+ raise
+ return True
+
+ def write_post_comment(self, event: Any) -> bool:
+ if self.event.comment[0] is None:
+ return False
+ comment = event.comment[0]
+ self.write_comment(comment)
+ return True
+
+
+class RoundTripEmitter(Emitter):
+ def prepare_tag(self, ctag: Any) -> Any:
+ if not ctag:
+ raise EmitterError('tag must not be empty')
+ tag = str(ctag)
+ if tag == '!' or tag == '!!':
+ return tag
+ handle = ctag.handle
+ suffix = ctag.suffix
+ prefixes = sorted(self.tag_prefixes.keys())
+ # print('handling', repr(tag), repr(suffix), repr(handle))
+ if handle is None:
+ for prefix in prefixes:
+ if tag.startswith(prefix) and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = suffix[len(prefix) :]
+ if handle:
+ return f'{handle!s}{suffix!s}'
+ else:
+ return f'!<{suffix!s}>'
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/error.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/error.py
new file mode 100644
index 0000000000..4843fdb593
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/error.py
@@ -0,0 +1,297 @@
+# coding: utf-8
+
+import warnings
+import textwrap
+
+from typing import Any, Dict, Optional, List, Text # NOQA
+
+
+__all__ = [
+ 'FileMark',
+ 'StringMark',
+ 'CommentMark',
+ 'YAMLError',
+ 'MarkedYAMLError',
+ 'ReusedAnchorWarning',
+ 'UnsafeLoaderWarning',
+ 'MarkedYAMLWarning',
+ 'MarkedYAMLFutureWarning',
+]
+
+
+class StreamMark:
+ __slots__ = 'name', 'index', 'line', 'column'
+
+ def __init__(self, name: Any, index: int, line: int, column: int) -> None:
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+
+ def __str__(self) -> Any:
+ where = f' in "{self.name!s}", line {self.line + 1:d}, column {self.column + 1:d}'
+ return where
+
+ def __eq__(self, other: Any) -> bool:
+ if self.line != other.line or self.column != other.column:
+ return False
+ if self.name != other.name or self.index != other.index:
+ return False
+ return True
+
+ def __ne__(self, other: Any) -> bool:
+ return not self.__eq__(other)
+
+
+class FileMark(StreamMark):
+ __slots__ = ()
+
+
+class StringMark(StreamMark):
+ __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer'
+
+ def __init__(
+ self, name: Any, index: int, line: int, column: int, buffer: Any, pointer: Any,
+ ) -> None:
+ StreamMark.__init__(self, name, index, line, column)
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent: int = 4, max_length: int = 75) -> Any:
+ if self.buffer is None: # always False
+ return None
+ head = ""
+ start = self.pointer
+ while start > 0 and self.buffer[start - 1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer - start > max_length / 2 - 1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ""
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end - self.pointer > max_length / 2 - 1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ caret = '^'
+ caret = f'^ (line: {self.line + 1})'
+ return (
+ ' ' * indent
+ + head
+ + snippet
+ + tail
+ + '\n'
+ + ' ' * (indent + self.pointer - start + len(head))
+ + caret
+ )
+
+ def __str__(self) -> Any:
+ snippet = self.get_snippet()
+ where = f' in "{self.name!s}", line {self.line + 1:d}, column {self.column + 1:d}'
+ if snippet is not None:
+ where += ':\n' + snippet
+ return where
+
+ def __repr__(self) -> Any:
+ snippet = self.get_snippet()
+ where = f' in "{self.name!s}", line {self.line + 1:d}, column {self.column + 1:d}'
+ if snippet is not None:
+ where += ':\n' + snippet
+ return where
+
+
+class CommentMark:
+ __slots__ = ('column',)
+
+ def __init__(self, column: Any) -> None:
+ self.column = column
+
+
+class YAMLError(Exception):
+ pass
+
+
+class MarkedYAMLError(YAMLError):
+ def __init__(
+ self,
+ context: Any = None,
+ context_mark: Any = None,
+ problem: Any = None,
+ problem_mark: Any = None,
+ note: Any = None,
+ warn: Any = None,
+ ) -> None:
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ # warn is ignored
+
+ def __str__(self) -> Any:
+ lines: List[str] = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ return '\n'.join(lines)
+
+
+class YAMLStreamError(Exception):
+ pass
+
+
+class YAMLWarning(Warning):
+ pass
+
+
+class MarkedYAMLWarning(YAMLWarning):
+ def __init__(
+ self,
+ context: Any = None,
+ context_mark: Any = None,
+ problem: Any = None,
+ problem_mark: Any = None,
+ note: Any = None,
+ warn: Any = None,
+ ) -> None:
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self) -> Any:
+ lines: List[str] = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
+
+
+class ReusedAnchorWarning(YAMLWarning):
+ pass
+
+
+class UnsafeLoaderWarning(YAMLWarning):
+ text = """
+The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
+Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK.
+Alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
+
+In most other cases you should consider using 'safe_load(stream)'"""
+ pass
+
+
+warnings.simplefilter('once', UnsafeLoaderWarning)
+
+
+class MantissaNoDotYAML1_1Warning(YAMLWarning):
+ def __init__(self, node: Any, flt_str: Any) -> None:
+ self.node = node
+ self.flt = flt_str
+
+ def __str__(self) -> Any:
+ line = self.node.start_mark.line
+ col = self.node.start_mark.column
+ return f"""
+In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
+See the Floating-Point Language-Independent Type for YAMLâ„¢ Version 1.1 specification
+( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
+
+Correct your float: "{self.flt}" on line: {line}, column: {col}
+
+or alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
+
+"""
+
+
+warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
+
+
+class YAMLFutureWarning(Warning):
+ pass
+
+
+class MarkedYAMLFutureWarning(YAMLFutureWarning):
+ def __init__(
+ self,
+ context: Any = None,
+ context_mark: Any = None,
+ problem: Any = None,
+ problem_mark: Any = None,
+ note: Any = None,
+ warn: Any = None,
+ ) -> None:
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self) -> Any:
+ lines: List[str] = []
+ if self.context is not None:
+ lines.append(self.context)
+
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/events.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/events.py
new file mode 100644
index 0000000000..a570a0d2f0
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/events.py
@@ -0,0 +1,264 @@
+# coding: utf-8
+
+# Abstract classes.
+
+from typing import Any, Dict, Optional, List # NOQA
+from ruamel.yaml.tag import Tag
+
+SHOW_LINES = False
+
+
+def CommentCheck() -> None:
+ pass
+
+
+class Event:
+ __slots__ = 'start_mark', 'end_mark', 'comment'
+ crepr = 'Unspecified Event'
+
+ def __init__(
+ self, start_mark: Any = None, end_mark: Any = None, comment: Any = CommentCheck,
+ ) -> None:
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ # assert comment is not CommentCheck
+ if comment is CommentCheck:
+ comment = None
+ self.comment = comment
+
+ def __repr__(self) -> Any:
+ if True:
+ arguments = []
+ if hasattr(self, 'value'):
+ # if you use repr(getattr(self, 'value')) then flake8 complains about
+ # abuse of getattr with a constant. When you change to self.value
+ # then mypy throws an error
+ arguments.append(repr(self.value))
+ for key in ['anchor', 'tag', 'implicit', 'flow_style', 'style']:
+ v = getattr(self, key, None)
+ if v is not None:
+ arguments.append(f'{key!s}={v!r}')
+ if self.comment not in [None, CommentCheck]:
+ arguments.append(f'comment={self.comment!r}')
+ if SHOW_LINES:
+ arguments.append(
+ f'({self.start_mark.line}:{self.start_mark.column}/'
+ f'{self.end_mark.line}:{self.end_mark.column})',
+ )
+ arguments = ', '.join(arguments) # type: ignore
+ else:
+ attributes = [
+ key
+ for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
+ if hasattr(self, key)
+ ]
+ arguments = ', '.join([f'{key!s}={getattr(self, key)!r}' for key in attributes])
+ if self.comment not in [None, CommentCheck]:
+ arguments += f', comment={self.comment!r}'
+ return f'{self.__class__.__name__!s}({arguments!s})'
+
+ def compact_repr(self) -> str:
+ return f'{self.crepr}'
+
+
+class NodeEvent(Event):
+ __slots__ = ('anchor',)
+
+ def __init__(
+ self, anchor: Any, start_mark: Any = None, end_mark: Any = None, comment: Any = None,
+ ) -> None:
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.anchor = anchor
+
+
+class CollectionStartEvent(NodeEvent):
+ __slots__ = 'ctag', 'implicit', 'flow_style', 'nr_items'
+
+ def __init__(
+ self,
+ anchor: Any,
+ tag: Any,
+ implicit: Any,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ flow_style: Any = None,
+ comment: Any = None,
+ nr_items: Optional[int] = None,
+ ) -> None:
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.ctag = tag
+ self.implicit = implicit
+ self.flow_style = flow_style
+ self.nr_items = nr_items
+
+ @property
+ def tag(self) -> Optional[str]:
+ return None if self.ctag is None else str(self.ctag)
+
+
+class CollectionEndEvent(Event):
+ __slots__ = ()
+
+
+# Implementations.
+
+
+class StreamStartEvent(Event):
+ __slots__ = ('encoding',)
+ crepr = '+STR'
+
+ def __init__(
+ self,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ encoding: Any = None,
+ comment: Any = None,
+ ) -> None:
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.encoding = encoding
+
+
+class StreamEndEvent(Event):
+ __slots__ = ()
+ crepr = '-STR'
+
+
+class DocumentStartEvent(Event):
+ __slots__ = 'explicit', 'version', 'tags'
+ crepr = '+DOC'
+
+ def __init__(
+ self,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ explicit: Any = None,
+ version: Any = None,
+ tags: Any = None,
+ comment: Any = None,
+ ) -> None:
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+ def compact_repr(self) -> str:
+ start = ' ---' if self.explicit else ''
+ return f'{self.crepr}{start}'
+
+
+class DocumentEndEvent(Event):
+ __slots__ = ('explicit',)
+ crepr = '-DOC'
+
+ def __init__(
+ self,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ explicit: Any = None,
+ comment: Any = None,
+ ) -> None:
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+
+ def compact_repr(self) -> str:
+ end = ' ...' if self.explicit else ''
+ return f'{self.crepr}{end}'
+
+
+class AliasEvent(NodeEvent):
+ __slots__ = 'style'
+ crepr = '=ALI'
+
+ def __init__(
+ self,
+ anchor: Any,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ style: Any = None,
+ comment: Any = None,
+ ) -> None:
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.style = style
+
+ def compact_repr(self) -> str:
+ return f'{self.crepr} *{self.anchor}'
+
+
+class ScalarEvent(NodeEvent):
+ __slots__ = 'ctag', 'implicit', 'value', 'style'
+ crepr = '=VAL'
+
+ def __init__(
+ self,
+ anchor: Any,
+ tag: Any,
+ implicit: Any,
+ value: Any,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ style: Any = None,
+ comment: Any = None,
+ ) -> None:
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.ctag = tag
+ self.implicit = implicit
+ self.value = value
+ self.style = style
+
+ @property
+ def tag(self) -> Optional[str]:
+ return None if self.ctag is None else str(self.ctag)
+
+ @tag.setter
+ def tag(self, val: Any) -> None:
+ if isinstance(val, str):
+ val = Tag(suffix=val)
+ self.ctag = val
+
+ def compact_repr(self) -> str:
+ style = ':' if self.style is None else self.style
+ anchor = f'&{self.anchor} ' if self.anchor else ''
+ tag = f'<{self.tag!s}> ' if self.tag else ''
+ value = self.value
+ for ch, rep in [
+ ('\\', '\\\\'),
+ ('\t', '\\t'),
+ ('\n', '\\n'),
+ ('\a', ''), # remove from folded
+ ('\r', '\\r'),
+ ('\b', '\\b'),
+ ]:
+ value = value.replace(ch, rep)
+ return f'{self.crepr} {anchor}{tag}{style}{value}'
+
+
+class SequenceStartEvent(CollectionStartEvent):
+ __slots__ = ()
+ crepr = '+SEQ'
+
+ def compact_repr(self) -> str:
+ flow = ' []' if self.flow_style else ''
+ anchor = f' &{self.anchor}' if self.anchor else ''
+ tag = f' <{self.tag!s}>' if self.tag else ''
+ return f'{self.crepr}{flow}{anchor}{tag}'
+
+
+class SequenceEndEvent(CollectionEndEvent):
+ __slots__ = ()
+ crepr = '-SEQ'
+
+
+class MappingStartEvent(CollectionStartEvent):
+ __slots__ = ()
+ crepr = '+MAP'
+
+ def compact_repr(self) -> str:
+ flow = ' {}' if self.flow_style else ''
+ anchor = f' &{self.anchor}' if self.anchor else ''
+ tag = f' <{self.tag!s}>' if self.tag else ''
+ return f'{self.crepr}{flow}{anchor}{tag}'
+
+
+class MappingEndEvent(CollectionEndEvent):
+ __slots__ = ()
+ crepr = '-MAP'
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/loader.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/loader.py
new file mode 100644
index 0000000000..d6c708b260
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/loader.py
@@ -0,0 +1,90 @@
+# coding: utf-8
+
+from ruamel.yaml.reader import Reader
+from ruamel.yaml.scanner import Scanner, RoundTripScanner
+from ruamel.yaml.parser import Parser, RoundTripParser
+from ruamel.yaml.composer import Composer
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from ruamel.yaml.resolver import VersionedResolver
+
+from typing import Any, Dict, List, Union, Optional # NOQA
+from ruamel.yaml.compat import StreamTextType, VersionType # NOQA
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
+
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
+ def __init__(
+ self,
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ self.comment_handling = None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ BaseConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
+ def __init__(
+ self,
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ self.comment_handling = None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ SafeConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
+ def __init__(
+ self,
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ self.comment_handling = None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ Constructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class RoundTripLoader(
+ Reader,
+ RoundTripScanner,
+ RoundTripParser,
+ Composer,
+ RoundTripConstructor,
+ VersionedResolver,
+):
+ def __init__(
+ self,
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ # self.reader = Reader.__init__(self, stream)
+ self.comment_handling = None # issue 385
+ Reader.__init__(self, stream, loader=self)
+ RoundTripScanner.__init__(self, loader=self)
+ RoundTripParser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/main.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/main.py
new file mode 100644
index 0000000000..92ec81711d
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/main.py
@@ -0,0 +1,1664 @@
+# coding: utf-8
+
+import sys
+import os
+import warnings
+import glob
+from importlib import import_module
+
+
+import ruamel.yaml
+from ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA
+
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
+
+from ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA
+from ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA
+from ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, nprint, nprintf # NOQA
+from ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA
+from ruamel.yaml.representer import (
+ BaseRepresenter,
+ SafeRepresenter,
+ Representer,
+ RoundTripRepresenter,
+)
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from ruamel.yaml.loader import Loader as UnsafeLoader # NOQA
+from ruamel.yaml.comments import CommentedMap, CommentedSeq, C_PRE
+
+from typing import List, Set, Dict, Union, Any, Callable, Optional, Text, Type # NOQA
+from types import TracebackType
+from ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA
+from pathlib import Path # NOQA
+
+try:
+ from _ruamel_yaml import CParser, CEmitter # type: ignore
+except: # NOQA
+ CParser = CEmitter = None
+
+# import io
+
+
+# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
+# subset of abbreviations, which should be all caps according to PEP8
+
+
+class YAML:
+ def __init__(
+ self: Any,
+ *,
+ typ: Optional[Union[List[Text], Text]] = None,
+ pure: Any = False,
+ output: Any = None,
+ plug_ins: Any = None,
+ ) -> None: # input=None,
+ """
+ typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default)
+ 'safe' -> SafeLoader/SafeDumper,
+ 'unsafe' -> normal/unsafe Loader/Dumper
+ 'base' -> baseloader
+ pure: if True only use Python modules
+ input/output: needed to work as context manager
+ plug_ins: a list of plug-in files
+ """
+
+ self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ])
+ self.pure = pure
+
+ # self._input = input
+ self._output = output
+ self._context_manager: Any = None
+
+ self.plug_ins: List[Any] = []
+ for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
+ file_name = pu.replace(os.sep, '.')
+ self.plug_ins.append(import_module(file_name))
+ self.Resolver: Any = ruamel.yaml.resolver.VersionedResolver
+ self.allow_unicode = True
+ self.Reader: Any = None
+ self.Representer: Any = None
+ self.Constructor: Any = None
+ self.Scanner: Any = None
+ self.Serializer: Any = None
+ self.default_flow_style: Any = None
+ self.comment_handling = None
+ typ_found = 1
+ setup_rt = False
+ if 'rt' in self.typ:
+ setup_rt = True
+ elif 'safe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.SafeRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.SafeConstructor
+ elif 'base' in self.typ:
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Representer = ruamel.yaml.representer.BaseRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.BaseConstructor
+ elif 'unsafe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.Representer
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.Constructor
+ elif 'rtsc' in self.typ:
+ self.default_flow_style = False
+ # no optimized rt-dumper yet
+ self.Emitter = ruamel.yaml.emitter.RoundTripEmitter
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.Representer = ruamel.yaml.representer.RoundTripRepresenter
+ self.Scanner = ruamel.yaml.scanner.RoundTripScannerSC
+ # no optimized rt-parser yet
+ self.Parser = ruamel.yaml.parser.RoundTripParserSC
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
+ self.comment_handling = C_PRE
+ else:
+ setup_rt = True
+ typ_found = 0
+ if setup_rt:
+ self.default_flow_style = False
+ # no optimized rt-dumper yet
+ self.Emitter = ruamel.yaml.emitter.RoundTripEmitter
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.Representer = ruamel.yaml.representer.RoundTripRepresenter
+ self.Scanner = ruamel.yaml.scanner.RoundTripScanner
+ # no optimized rt-parser yet
+ self.Parser = ruamel.yaml.parser.RoundTripParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
+ del setup_rt
+ self.stream = None
+ self.canonical = None
+ self.old_indent = None
+ self.width: Union[int, None] = None
+ self.line_break = None
+
+ self.map_indent: Union[int, None] = None
+ self.sequence_indent: Union[int, None] = None
+ self.sequence_dash_offset: int = 0
+ self.compact_seq_seq = None
+ self.compact_seq_map = None
+ self.sort_base_mapping_type_on_output = None # default: sort
+
+ self.top_level_colon_align = None
+ self.prefix_colon = None
+ self._version: Optional[Any] = None
+ self.preserve_quotes: Optional[bool] = None
+ self.allow_duplicate_keys = False # duplicate keys in map, set
+ self.encoding = 'utf-8'
+ self.explicit_start: Union[bool, None] = None
+ self.explicit_end: Union[bool, None] = None
+ self.tags = None
+ self.default_style = None
+ self.top_level_block_style_scalar_no_indent_error_1_1 = False
+ # directives end indicator with single scalar document
+ self.scalar_after_indicator: Optional[bool] = None
+ # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}]
+ self.brace_single_entry_mapping_in_flow_sequence = False
+ for module in self.plug_ins:
+ if getattr(module, 'typ', None) in self.typ:
+ typ_found += 1
+ module.init_typ(self)
+ break
+ if typ_found == 0:
+ raise NotImplementedError(
+ f'typ "{self.typ}" not recognised (need to install plug-in?)',
+ )
+
+ @property
+ def reader(self) -> Any:
+ try:
+ return self._reader # type: ignore
+ except AttributeError:
+ self._reader = self.Reader(None, loader=self)
+ return self._reader
+
+ @property
+ def scanner(self) -> Any:
+ try:
+ return self._scanner # type: ignore
+ except AttributeError:
+ self._scanner = self.Scanner(loader=self)
+ return self._scanner
+
+ @property
+ def parser(self) -> Any:
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Parser is not CParser:
+ setattr(self, attr, self.Parser(loader=self))
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ else:
+ # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
+ # # pathlib.Path() instance
+ # setattr(self, attr, CParser(self._stream))
+ # else:
+ setattr(self, attr, CParser(self._stream))
+ # self._parser = self._composer = self
+ # nprint('scanner', self.loader.scanner)
+
+ return getattr(self, attr)
+
+ @property
+ def composer(self) -> Any:
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Composer(loader=self))
+ return getattr(self, attr)
+
+ @property
+ def constructor(self) -> Any:
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
+ cnst.allow_duplicate_keys = self.allow_duplicate_keys
+ setattr(self, attr, cnst)
+ return getattr(self, attr)
+
+ @property
+ def resolver(self) -> Any:
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Resolver(version=self.version, loader=self))
+ return getattr(self, attr)
+
+ @property
+ def emitter(self) -> Any:
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Emitter is not CEmitter:
+ _emitter = self.Emitter(
+ None,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ prefix_colon=self.prefix_colon,
+ brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA
+ dumper=self,
+ )
+ setattr(self, attr, _emitter)
+ if self.map_indent is not None:
+ _emitter.best_map_indent = self.map_indent
+ if self.sequence_indent is not None:
+ _emitter.best_sequence_indent = self.sequence_indent
+ if self.sequence_dash_offset is not None:
+ _emitter.sequence_dash_offset = self.sequence_dash_offset
+ # _emitter.block_seq_indent = self.sequence_dash_offset
+ if self.compact_seq_seq is not None:
+ _emitter.compact_seq_seq = self.compact_seq_seq
+ if self.compact_seq_map is not None:
+ _emitter.compact_seq_map = self.compact_seq_map
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ return None
+ return getattr(self, attr)
+
+ @property
+ def serializer(self) -> Any:
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(
+ self,
+ attr,
+ self.Serializer(
+ encoding=self.encoding,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ dumper=self,
+ ),
+ )
+ return getattr(self, attr)
+
+ @property
+ def representer(self) -> Any:
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ repres = self.Representer(
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ dumper=self,
+ )
+ if self.sort_base_mapping_type_on_output is not None:
+ repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output
+ setattr(self, attr, repres)
+ return getattr(self, attr)
+
+ def scan(self, stream: StreamTextType) -> Any:
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.scan(fp)
+ _, parser = self.get_constructor_parser(stream)
+ try:
+ while self.scanner.check_token():
+ yield self.scanner.get_token()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def parse(self, stream: StreamTextType) -> Any:
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.parse(fp)
+ _, parser = self.get_constructor_parser(stream)
+ try:
+ while parser.check_event():
+ yield parser.get_event()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def compose(self, stream: Union[Path, StreamTextType]) -> Any:
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.compose(fp)
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ return constructor.composer.get_single_node()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def compose_all(self, stream: Union[Path, StreamTextType]) -> Any:
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ while constructor.composer.check_node():
+ yield constructor.composer.get_node()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ # separate output resolver?
+
+ # def load(self, stream=None):
+ # if self._context_manager:
+ # if not self._input:
+ # raise TypeError("Missing input stream while dumping from context manager")
+ # for data in self._context_manager.load():
+ # yield data
+ # return
+ # if stream is None:
+ # raise TypeError("Need a stream argument when not loading from context manager")
+ # return self.load_one(stream)
+
+ def load(self, stream: Union[Path, StreamTextType]) -> Any:
+ """
+ at this point you either have the non-pure Parser (which has its own reader and
+ scanner) or you have the pure Parser.
+ If the pure Parser is set, then set the Reader and Scanner, if not already set.
+ If either the Scanner or Reader are set, you cannot use the non-pure Parser,
+ so reset it to the pure parser and set the Reader resp. Scanner if necessary
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.load(fp)
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ return constructor.get_single_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def load_all(self, stream: Union[Path, StreamTextType]) -> Any: # *, skip=None):
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('r') as fp:
+ for d in self.load_all(fp):
+ yield d
+ return
+ # if skip is None:
+ # skip = []
+ # elif isinstance(skip, int):
+ # skip = [skip]
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ while constructor.check_data():
+ yield constructor.get_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def get_constructor_parser(self, stream: StreamTextType) -> Any:
+ """
+ the old cyaml needs special setup, and therefore the stream
+ """
+ if self.Parser is not CParser:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.reader.stream = stream
+ else:
+ if self.Reader is not None:
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ elif self.Scanner is not None:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ else:
+ # combined C level reader>scanner>parser
+ # does some calls to the resolver, e.g. BaseResolver.descend_resolver
+ # if you just initialise the CParser, to much of resolver.py
+ # is actually used
+ rslvr = self.Resolver
+ # if rslvr is ruamel.yaml.resolver.VersionedResolver:
+ # rslvr = ruamel.yaml.resolver.Resolver
+
+ class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
+ def __init__(
+ selfx,
+ stream: StreamTextType,
+ version: Optional[VersionType] = self.version,
+ preserve_quotes: Optional[bool] = None,
+ ) -> None:
+ # NOQA
+ CParser.__init__(selfx, stream)
+ selfx._parser = selfx._composer = selfx
+ self.Constructor.__init__(selfx, loader=selfx)
+ selfx.allow_duplicate_keys = self.allow_duplicate_keys
+ rslvr.__init__(selfx, version=version, loadumper=selfx)
+
+ self._stream = stream
+ loader = XLoader(stream)
+ return loader, loader
+ return self.constructor, self.parser
+
+ def emit(self, events: Any, stream: Any) -> None:
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ _, _, emitter = self.get_serializer_representer_emitter(stream, None)
+ try:
+ for event in events:
+ emitter.emit(event)
+ finally:
+ try:
+ emitter.dispose()
+ except AttributeError:
+ raise
+
+ def serialize(self, node: Any, stream: Optional[StreamType]) -> Any:
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ self.serialize_all([node], stream)
+
+ def serialize_all(self, nodes: Any, stream: Optional[StreamType]) -> Any:
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ serializer, _, emitter = self.get_serializer_representer_emitter(stream, None)
+ try:
+ serializer.open()
+ for node in nodes:
+ serializer.serialize(node)
+ serializer.close()
+ finally:
+ try:
+ emitter.dispose()
+ except AttributeError:
+ raise
+
+ def dump(
+ self: Any, data: Union[Path, StreamType], stream: Any = None, *, transform: Any = None,
+ ) -> Any:
+ if self._context_manager:
+ if not self._output:
+ raise TypeError('Missing output stream while dumping from context manager')
+ if transform is not None:
+ x = self.__class__.__name__
+ raise TypeError(
+ f'{x}.dump() in the context manager cannot have transform keyword',
+ )
+ self._context_manager.dump(data)
+ else: # old style
+ if stream is None:
+ raise TypeError('Need a stream argument when not dumping from context manager')
+ return self.dump_all([data], stream, transform=transform)
+
+ def dump_all(
+ self, documents: Any, stream: Union[Path, StreamType], *, transform: Any = None,
+ ) -> Any:
+ if self._context_manager:
+ raise NotImplementedError
+ self._output = stream
+ self._context_manager = YAMLContextManager(self, transform=transform)
+ for data in documents:
+ self._context_manager.dump(data)
+ self._context_manager.teardown_output()
+ self._output = None
+ self._context_manager = None
+
+ def Xdump_all(self, documents: Any, stream: Any, *, transform: Any = None) -> Any:
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ """
+ if not hasattr(stream, 'write') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('w') as fp:
+ return self.dump_all(documents, fp, transform=transform)
+ # The stream should have the methods `write` and possibly `flush`.
+ if self.top_level_colon_align is True:
+ tlca: Any = max([len(str(x)) for x in documents[0]])
+ else:
+ tlca = self.top_level_colon_align
+ if transform is not None:
+ fstream = stream
+ if self.encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ serializer, representer, emitter = self.get_serializer_representer_emitter(
+ stream, tlca,
+ )
+ try:
+ self.serializer.open()
+ for data in documents:
+ try:
+ self.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ self.serializer.close()
+ finally:
+ try:
+ self.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ delattr(self, '_serializer')
+ delattr(self, '_emitter')
+ if transform:
+ val = stream.getvalue()
+ if self.encoding:
+ val = val.decode(self.encoding)
+ if fstream is None:
+ transform(val)
+ else:
+ fstream.write(transform(val))
+ return None
+
+ def get_serializer_representer_emitter(self, stream: StreamType, tlca: Any) -> Any:
+ # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
+ if self.Emitter is not CEmitter:
+ if self.Serializer is None:
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ if self.Serializer is not None:
+ # cannot set serializer with CEmitter
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ # C routines
+
+ rslvr = (
+ ruamel.yaml.resolver.BaseResolver
+ if 'base' in self.typ
+ else ruamel.yaml.resolver.Resolver
+ )
+
+ class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
+ def __init__(
+ selfx: StreamType,
+ stream: Any,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+ ) -> None:
+ # NOQA
+ CEmitter.__init__(
+ selfx,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ selfx._emitter = selfx._serializer = selfx._representer = selfx
+ self.Representer.__init__(
+ selfx, default_style=default_style, default_flow_style=default_flow_style,
+ )
+ rslvr.__init__(selfx)
+
+ self._stream = stream
+ dumper = XDumper(
+ stream,
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ )
+ self._emitter = self._serializer = dumper
+ return dumper, dumper, dumper
+
+ # basic types
+ def map(self, **kw: Any) -> Any:
+ if 'rt' in self.typ:
+ return CommentedMap(**kw)
+ else:
+ return dict(**kw)
+
+ def seq(self, *args: Any) -> Any:
+ if 'rt' in self.typ:
+ return CommentedSeq(*args)
+ else:
+ return list(*args)
+
+ # helpers
+ def official_plug_ins(self) -> Any:
+ """search for list of subdirs that are plug-ins, if __file__ is not available, e.g.
+ single file installers that are not properly emulating a file-system (issue 324)
+ no plug-ins will be found. If any are packaged, you know which file that are
+ and you can explicitly provide it during instantiation:
+ yaml = ruamel.yaml.YAML(plug_ins=['ruamel/yaml/jinja2/__plug_in__'])
+ """
+ try:
+ bd = os.path.dirname(__file__)
+ except NameError:
+ return []
+ gpbd = os.path.dirname(os.path.dirname(bd))
+ res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
+ return res
+
+ def register_class(self, cls: Any) -> Any:
+ """
+ register a class for dumping/loading
+ - if it has attribute yaml_tag use that to register, else use class name
+ - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
+ as mapping
+ """
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ self.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer: Any, data: Any) -> Any:
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style,
+ )
+
+ self.representer.add_representer(cls, t_y)
+ try:
+ self.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor: Any, node: Any) -> Any:
+ return constructor.construct_yaml_object(node, cls)
+
+ self.constructor.add_constructor(tag, f_y)
+ return cls
+
+ # ### context manager
+
+ def __enter__(self) -> Any:
+ self._context_manager = YAMLContextManager(self)
+ return self
+
+ def __exit__(
+ self,
+ typ: Optional[Type[BaseException]],
+ value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ ) -> None:
+ if typ:
+ nprint('typ', typ)
+ self._context_manager.teardown_output()
+ # self._context_manager.teardown_input()
+ self._context_manager = None
+
+ # ### backwards compatibility
+ def _indent(self, mapping: Any = None, sequence: Any = None, offset: Any = None) -> None:
+ if mapping is not None:
+ self.map_indent = mapping
+ if sequence is not None:
+ self.sequence_indent = sequence
+ if offset is not None:
+ self.sequence_dash_offset = offset
+
+ @property
+ def version(self) -> Optional[Any]:
+ return self._version
+
+ @version.setter
+ def version(self, val: Optional[VersionType]) -> None:
+ if val is None:
+ self._version = val
+ return
+ if isinstance(val, str):
+ sval = tuple(int(x) for x in val.split('.'))
+ else:
+ sval = tuple(int(x) for x in val)
+ assert len(sval) == 2, f'version can only have major.minor, got {val}'
+ assert sval[0] == 1, f'version major part can only be 1, got {val}'
+ assert sval[1] in [1, 2], f'version minor part can only be 2 or 1, got {val}'
+ self._version = sval
+
+ @property
+ def indent(self) -> Any:
+ return self._indent
+
+ @indent.setter
+ def indent(self, val: Any) -> None:
+ self.old_indent = val
+
+ @property
+ def block_seq_indent(self) -> Any:
+ return self.sequence_dash_offset
+
+ @block_seq_indent.setter
+ def block_seq_indent(self, val: Any) -> None:
+ self.sequence_dash_offset = val
+
+ def compact(self, seq_seq: Any = None, seq_map: Any = None) -> None:
+ self.compact_seq_seq = seq_seq
+ self.compact_seq_map = seq_map
+
+
+class YAMLContextManager:
+ def __init__(self, yaml: Any, transform: Any = None) -> None:
+ # used to be: (Any, Optional[Callable]) -> None
+ self._yaml = yaml
+ self._output_inited = False
+ self._output_path = None
+ self._output = self._yaml._output
+ self._transform = transform
+
+ # self._input_inited = False
+ # self._input = input
+ # self._input_path = None
+ # self._transform = yaml.transform
+ # self._fstream = None
+
+ if not hasattr(self._output, 'write') and hasattr(self._output, 'open'):
+ # pathlib.Path() instance, open with the same mode
+ self._output_path = self._output
+ self._output = self._output_path.open('w')
+
+ # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
+ # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
+ # # pathlib.Path() instance, open with the same mode
+ # self._input_path = self._input
+ # self._input = self._input_path.open('r')
+
+ if self._transform is not None:
+ self._fstream = self._output
+ if self._yaml.encoding is None:
+ self._output = StringIO()
+ else:
+ self._output = BytesIO()
+
+ def teardown_output(self) -> None:
+ if self._output_inited:
+ self._yaml.serializer.close()
+ else:
+ return
+ try:
+ self._yaml.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ try:
+ delattr(self._yaml, '_serializer')
+ delattr(self._yaml, '_emitter')
+ except AttributeError:
+ raise
+ if self._transform:
+ val = self._output.getvalue()
+ if self._yaml.encoding:
+ val = val.decode(self._yaml.encoding)
+ if self._fstream is None:
+ self._transform(val)
+ else:
+ self._fstream.write(self._transform(val))
+ self._fstream.flush()
+ self._output = self._fstream # maybe not necessary
+ if self._output_path is not None:
+ self._output.close()
+
+ def init_output(self, first_data: Any) -> None:
+ if self._yaml.top_level_colon_align is True:
+ tlca: Any = max([len(str(x)) for x in first_data])
+ else:
+ tlca = self._yaml.top_level_colon_align
+ self._yaml.get_serializer_representer_emitter(self._output, tlca)
+ self._yaml.serializer.open()
+ self._output_inited = True
+
+ def dump(self, data: Any) -> None:
+ if not self._output_inited:
+ self.init_output(data)
+ try:
+ self._yaml.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+
+ # def teardown_input(self):
+ # pass
+ #
+ # def init_input(self):
+ # # set the constructor and parser on YAML() instance
+ # self._yaml.get_constructor_parser(stream)
+ #
+ # def load(self):
+ # if not self._input_inited:
+ # self.init_input()
+ # try:
+ # while self._yaml.constructor.check_data():
+ # yield self._yaml.constructor.get_data()
+ # finally:
+ # parser.dispose()
+ # try:
+ # self._reader.reset_reader() # type: ignore
+ # except AttributeError:
+ # pass
+ # try:
+ # self._scanner.reset_scanner() # type: ignore
+ # except AttributeError:
+ # pass
+
+
+def yaml_object(yml: Any) -> Any:
+ """ decorator for classes that needs to dump/load objects
+ The tag for such objects is taken from the class attribute yaml_tag (or the
+ class name in lowercase in case unavailable)
+ If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
+ loading, default routines (dumping a mapping of the attributes) used otherwise.
+ """
+
+ def yo_deco(cls: Any) -> Any:
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ yml.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer: Any, data: Any) -> Any:
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style,
+ )
+
+ yml.representer.add_representer(cls, t_y)
+ try:
+ yml.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor: Any, node: Any) -> Any:
+ return constructor.construct_yaml_object(node, cls)
+
+ yml.constructor.add_constructor(tag, f_y)
+ return cls
+
+ return yo_deco
+
+
+########################################################################################
+def warn_deprecation(fun: Any, method: Any, arg: str = '') -> None:
+ warnings.warn(
+ f'\n{fun} will be removed, use\n\n yaml=YAML({arg})\n yaml.{method}(...)\n\ninstead', # NOQA
+ PendingDeprecationWarning, # this will show when testing with pytest/tox
+ stacklevel=3,
+ )
+
+
+def error_deprecation(fun: Any, method: Any, arg: str = '') -> None:
+ warnings.warn(
+ f'\n{fun} has been removed, use\n\n yaml=YAML({arg})\n yaml.{method}(...)\n\ninstead', # NOQA
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ sys.exit(1)
+
+
+########################################################################################
+
+
+def scan(stream: StreamTextType, Loader: Any = Loader) -> Any:
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ warn_deprecation('scan', 'scan', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ while loader.scanner.check_token():
+ yield loader.scanner.get_token()
+ finally:
+ loader._parser.dispose()
+
+
+def parse(stream: StreamTextType, Loader: Any = Loader) -> Any:
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ warn_deprecation('parse', 'parse', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ while loader._parser.check_event():
+ yield loader._parser.get_event()
+ finally:
+ loader._parser.dispose()
+
+
+def compose(stream: StreamTextType, Loader: Any = Loader) -> Any:
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+
+def compose_all(stream: StreamTextType, Loader: Any = Loader) -> Any:
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader._composer.get_node()
+ finally:
+ loader._parser.dispose()
+
+
+def load(
+ stream: Any, Loader: Any = None, version: Any = None, preserve_quotes: Any = None,
+) -> Any:
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ warn_deprecation('load', 'load', arg="typ='unsafe', pure=True")
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
+ try:
+ return loader._constructor.get_single_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def load_all(
+ stream: Any, Loader: Any = None, version: Any = None, preserve_quotes: Any = None,
+) -> Any:
+ # NOQA
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ warn_deprecation('load_all', 'load_all', arg="typ='unsafe', pure=True")
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
+ try:
+ while loader._constructor.check_data():
+ yield loader._constructor.get_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def safe_load(stream: StreamTextType, version: Optional[VersionType] = None) -> Any:
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('safe_load', 'load', arg="typ='safe', pure=True")
+ return load(stream, SafeLoader, version)
+
+
+def safe_load_all(stream: StreamTextType, version: Optional[VersionType] = None) -> Any:
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('safe_load_all', 'load_all', arg="typ='safe', pure=True")
+ return load_all(stream, SafeLoader, version)
+
+
+def round_trip_load(
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+) -> Any:
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('round_trip_load_all', 'load')
+ return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def round_trip_load_all(
+ stream: StreamTextType,
+ version: Optional[VersionType] = None,
+ preserve_quotes: Optional[bool] = None,
+) -> Any:
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('round_trip_load_all', 'load_all')
+ return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def emit(
+ events: Any,
+ stream: Optional[StreamType] = None,
+ Dumper: Any = Dumper,
+ canonical: Optional[bool] = None,
+ indent: Union[int, None] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+) -> Any:
+ # NOQA
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('emit', 'emit', arg="typ='safe', pure=True")
+ getvalue = None
+ if stream is None:
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ )
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+enc = None
+
+
+def serialize_all(
+ nodes: Any,
+ stream: Optional[StreamType] = None,
+ Dumper: Any = Dumper,
+ canonical: Any = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = enc,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Optional[VersionType] = None,
+ tags: Any = None,
+) -> Any:
+ # NOQA
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('serialize_all', 'serialize_all', arg="typ='safe', pure=True")
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ version=version,
+ tags=tags,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ )
+ try:
+ dumper._serializer.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+def serialize(
+ node: Any, stream: Optional[StreamType] = None, Dumper: Any = Dumper, **kwds: Any,
+) -> Any:
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('serialize', 'serialize', arg="typ='safe', pure=True")
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+
+def dump_all(
+ documents: Any,
+ stream: Optional[StreamType] = None,
+ Dumper: Any = Dumper,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = enc,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Any = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+) -> Any:
+ # NOQA
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('dump_all', 'dump_all', arg="typ='unsafe', pure=True")
+ getvalue = None
+ if top_level_colon_align is True:
+ top_level_colon_align = max([len(str(x)) for x in documents[0]])
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+ try:
+ dumper._serializer.open()
+ for data in documents:
+ try:
+ dumper._representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+ return None
+
+
+def dump(
+ data: Any,
+ stream: Optional[StreamType] = None,
+ Dumper: Any = Dumper,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = enc,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Optional[VersionType] = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+) -> Any:
+ # NOQA
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+
+ default_style ∈ None, '', '"', "'", '|', '>'
+
+ """
+ warn_deprecation('dump', 'dump', arg="typ='unsafe', pure=True")
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ )
+
+
+def safe_dump(data: Any, stream: Optional[StreamType] = None, **kwds: Any) -> Any:
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('safe_dump', 'dump', arg="typ='safe', pure=True")
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+
+def round_trip_dump(
+ data: Any,
+ stream: Optional[StreamType] = None,
+ Dumper: Any = RoundTripDumper,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ canonical: Optional[bool] = None,
+ indent: Optional[int] = None,
+ width: Optional[int] = None,
+ allow_unicode: Optional[bool] = None,
+ line_break: Any = None,
+ encoding: Any = enc,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Optional[VersionType] = None,
+ tags: Any = None,
+ block_seq_indent: Any = None,
+ top_level_colon_align: Any = None,
+ prefix_colon: Any = None,
+) -> Any:
+ allow_unicode = True if allow_unicode is None else allow_unicode
+ warn_deprecation('round_trip_dump', 'dump')
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+
+
+# Loader/Dumper are no longer composites, to get to the associated
+# Resolver()/Representer(), etc., you need to instantiate the class
+
+
+def add_implicit_resolver(
+ tag: Any,
+ regexp: Any,
+ first: Any = None,
+ Loader: Any = None,
+ Dumper: Any = None,
+ resolver: Any = Resolver,
+) -> None:
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_implicit_resolver(tag, regexp, first)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_implicit_resolver'):
+ Loader.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader),
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_implicit_resolver'):
+ Dumper.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper),
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_path_resolver(
+ tag: Any,
+ path: Any,
+ kind: Any = None,
+ Loader: Any = None,
+ Dumper: Any = None,
+ resolver: Any = Resolver,
+) -> None:
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_path_resolver(tag, path, kind)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_path_resolver'):
+ Loader.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader),
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_path_resolver'):
+ Dumper.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper),
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+
+
+def add_constructor(
+ tag: Any, object_constructor: Any, Loader: Any = None, constructor: Any = Constructor,
+) -> None:
+ """
+ Add an object constructor for the given tag.
+ object_onstructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_constructor(tag, object_constructor)
+ else:
+ if hasattr(Loader, 'add_constructor'):
+ Loader.add_constructor(tag, object_constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, Loader):
+ Constructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_constructor(tag, object_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_multi_constructor(
+ tag_prefix: Any, multi_constructor: Any, Loader: Any = None, constructor: Any = Constructor, # NOQA
+) -> None:
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ if False and hasattr(Loader, 'add_multi_constructor'):
+ Loader.add_multi_constructor(tag_prefix, constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, ruamel.yaml.loader.Loader):
+ Constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_representer(
+ data_type: Any, object_representer: Any, Dumper: Any = None, representer: Any = Representer, # NOQA
+) -> None:
+ """
+ Add a representer for the given type.
+ object_representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_representer(data_type, object_representer)
+ else:
+ if hasattr(Dumper, 'add_representer'):
+ Dumper.add_representer(data_type, object_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_representer(data_type, object_representer)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_multi_representer(
+ data_type: Any, multi_representer: Any, Dumper: Any = None, representer: Any = Representer,
+) -> None:
+ """
+ Add a representer for the given type.
+ multi_representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_multi_representer(data_type, multi_representer)
+ else:
+ if hasattr(Dumper, 'add_multi_representer'):
+ Dumper.add_multi_representer(data_type, multi_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
+ else:
+ raise NotImplementedError
+
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+
+ def __init__(cls, name: Any, bases: Any, kwds: Any) -> None:
+ super().__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
+ cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
+
+
+class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_constructor = Constructor
+ yaml_representer = Representer
+
+ yaml_tag: Any = None
+ yaml_flow_style: Any = None
+
+ @classmethod
+ def from_yaml(cls, constructor: Any, node: Any) -> Any:
+ """
+ Convert a representation node to a Python object.
+ """
+ return constructor.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, representer: Any, data: Any) -> Any:
+ """
+ Convert a Python object to a representation node.
+ """
+ return representer.represent_yaml_object(
+ cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style,
+ )
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/nodes.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/nodes.py
new file mode 100644
index 0000000000..172104981b
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/nodes.py
@@ -0,0 +1,145 @@
+# coding: utf-8
+
+import sys
+
+from typing import Dict, Any, Text, Optional # NOQA
+from ruamel.yaml.tag import Tag
+
+
+class Node:
+ __slots__ = 'ctag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor'
+
+ def __init__(
+ self,
+ tag: Any,
+ value: Any,
+ start_mark: Any,
+ end_mark: Any,
+ comment: Any = None,
+ anchor: Any = None,
+ ) -> None:
+ # you can still get a string from the serializer
+ self.ctag = tag if isinstance(tag, Tag) else Tag(suffix=tag)
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.comment = comment
+ self.anchor = anchor
+
+ @property
+ def tag(self) -> Optional[str]:
+ return None if self.ctag is None else str(self.ctag)
+
+ @tag.setter
+ def tag(self, val: Any) -> None:
+ if isinstance(val, str):
+ val = Tag(suffix=val)
+ self.ctag = val
+
+ def __repr__(self) -> Any:
+ value = self.value
+ # if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = f'<{len(value)} items>'
+ # else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return f'{self.__class__.__name__!s}(tag={self.tag!r}, value={value!s})'
+
+ def dump(self, indent: int = 0) -> None:
+ xx = self.__class__.__name__
+ xi = ' ' * indent
+ if isinstance(self.value, str):
+ sys.stdout.write(f'{xi}{xx}(tag={self.tag!r}, value={self.value!r})\n')
+ if self.comment:
+ sys.stdout.write(f' {xi}comment: {self.comment})\n')
+ return
+ sys.stdout.write(f'{xi}{xx}(tag={self.tag!r})\n')
+ if self.comment:
+ sys.stdout.write(f' {xi}comment: {self.comment})\n')
+ for v in self.value:
+ if isinstance(v, tuple):
+ for v1 in v:
+ v1.dump(indent + 1)
+ elif isinstance(v, Node):
+ v.dump(indent + 1)
+ else:
+ sys.stdout.write(f'Node value type? {type(v)}\n')
+
+
+class ScalarNode(Node):
+ """
+ styles:
+ ? -> set() ? key, no value
+ - -> suppressable null value in set
+ " -> double quoted
+ ' -> single quoted
+ | -> literal style
+ > -> folding style
+ """
+
+ __slots__ = ('style',)
+ id = 'scalar'
+
+ def __init__(
+ self,
+ tag: Any,
+ value: Any,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ style: Any = None,
+ comment: Any = None,
+ anchor: Any = None,
+ ) -> None:
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor)
+ self.style = style
+
+
+class CollectionNode(Node):
+ __slots__ = ('flow_style',)
+
+ def __init__(
+ self,
+ tag: Any,
+ value: Any,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ flow_style: Any = None,
+ comment: Any = None,
+ anchor: Any = None,
+ ) -> None:
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
+ self.flow_style = flow_style
+ self.anchor = anchor
+
+
+class SequenceNode(CollectionNode):
+ __slots__ = ()
+ id = 'sequence'
+
+
+class MappingNode(CollectionNode):
+ __slots__ = ('merge',)
+ id = 'mapping'
+
+ def __init__(
+ self,
+ tag: Any,
+ value: Any,
+ start_mark: Any = None,
+ end_mark: Any = None,
+ flow_style: Any = None,
+ comment: Any = None,
+ anchor: Any = None,
+ ) -> None:
+ CollectionNode.__init__(
+ self, tag, value, start_mark, end_mark, flow_style, comment, anchor,
+ )
+ self.merge = None
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/parser.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/parser.py
new file mode 100644
index 0000000000..8cb9a372a4
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/parser.py
@@ -0,0 +1,851 @@
+# coding: utf-8
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document*
+# STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content |
+# indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+# BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START <}
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START
+# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR
+# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START
+# FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+
+# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py
+# only to not do anything with the package afterwards
+# and for Jython too
+
+
+from ruamel.yaml.error import MarkedYAMLError
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA
+from ruamel.yaml.scanner import BlankLineComment
+from ruamel.yaml.comments import C_PRE, C_POST, C_SPLIT_ON_FIRST_BLANK
+from ruamel.yaml.compat import nprint, nprintf # NOQA
+from ruamel.yaml.tag import Tag
+
+from typing import Any, Dict, Optional, List, Optional # NOQA
+
+__all__ = ['Parser', 'RoundTripParser', 'ParserError']
+
+
+def xprintf(*args: Any, **kw: Any) -> Any:
+ return nprintf(*args, **kw)
+ pass
+
+
+class ParserError(MarkedYAMLError):
+ pass
+
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {'!': '!', '!!': 'tag:yaml.org,2002:'}
+
+ def __init__(self, loader: Any) -> None:
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_parser', None) is None:
+ self.loader._parser = self
+ self.reset_parser()
+
+ def reset_parser(self) -> None:
+ # Reset the state attributes (to clear self-references)
+ self.current_event = self.last_event = None
+ self.tag_handles: Dict[Any, Any] = {}
+ self.states: List[Any] = []
+ self.marks: List[Any] = []
+ self.state: Any = self.parse_stream_start
+
+ def dispose(self) -> None:
+ self.reset_parser()
+
+ @property
+ def scanner(self) -> Any:
+ if hasattr(self.loader, 'typ'):
+ return self.loader.scanner
+ return self.loader._scanner
+
+ @property
+ def resolver(self) -> Any:
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ def check_event(self, *choices: Any) -> bool:
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self) -> Any:
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self) -> Any:
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ # assert self.current_event is not None
+ # if self.current_event.end_mark.line != self.peek_event().start_mark.line:
+ xprintf('get_event', repr(self.current_event), self.peek_event().start_mark.line)
+ self.last_event = value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document*
+ # STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self) -> Any:
+ # Parse the stream start.
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self) -> Any:
+ # Parse an implicit document.
+ if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken):
+ # don't need copy, as an implicit tag doesn't add tag_handles
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark, explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self) -> Any:
+ # Parse any extra document end indicators.
+ while self.scanner.check_token(DocumentEndToken):
+ self.scanner.get_token()
+ # Parse an explicit document.
+ if not self.scanner.check_token(StreamEndToken):
+ version, tags = self.process_directives()
+ if not self.scanner.check_token(DocumentStartToken):
+ raise ParserError(
+ None,
+ None,
+ "expected '<document start>', "
+ f'but found {self.scanner.peek_token().id,!r}',
+ self.scanner.peek_token().start_mark,
+ )
+ token = self.scanner.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ # if self.loader is not None and \
+ # end_mark.line != self.scanner.peek_token().start_mark.line:
+ # self.loader.scalar_after_indicator = False
+ event: Any = DocumentStartEvent(
+ start_mark,
+ end_mark,
+ explicit=True,
+ version=version,
+ tags=tags,
+ comment=token.comment,
+ )
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.scanner.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self) -> Any:
+ # Parse the document end.
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.scanner.check_token(DocumentEndToken):
+ token = self.scanner.get_token()
+ # if token.end_mark.line != self.peek_event().start_mark.line:
+ pt = self.scanner.peek_token()
+ if not isinstance(pt, StreamEndToken) and (
+ token.end_mark.line == pt.start_mark.line
+ ):
+ raise ParserError(
+ None,
+ None,
+ 'found non-comment content after document end marker, '
+ f'{self.scanner.peek_token().id,!r}',
+ self.scanner.peek_token().start_mark,
+ )
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark, explicit=explicit)
+
+ # Prepare the next state.
+ if self.resolver.processing_version == (1, 1):
+ self.state = self.parse_document_start
+ else:
+ if explicit:
+ # found a document end marker, can be followed by implicit document
+ self.state = self.parse_implicit_document_start
+ else:
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self) -> Any:
+ if self.scanner.check_token(
+ DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken,
+ ):
+ event = self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self) -> Any:
+ yaml_version = None
+ self.tag_handles = {}
+ while self.scanner.check_token(DirectiveToken):
+ token = self.scanner.get_token()
+ if token.name == 'YAML':
+ if yaml_version is not None:
+ raise ParserError(
+ None, None, 'found duplicate YAML directive', token.start_mark,
+ )
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(
+ None,
+ None,
+ 'found incompatible YAML document (version 1.* is required)',
+ token.start_mark,
+ )
+ yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(
+ None, None, f'duplicate tag handle {handle!r}', token.start_mark,
+ )
+ self.tag_handles[handle] = prefix
+ if bool(self.tag_handles):
+ value: Any = (yaml_version, self.tag_handles.copy())
+ else:
+ value = yaml_version, None
+ if self.loader is not None and hasattr(self.loader, 'tags'):
+ self.loader.version = yaml_version
+ if self.loader.tags is None:
+ self.loader.tags = {}
+ for k in self.tag_handles:
+ self.loader.tags[k] = self.tag_handles[k]
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self) -> Any:
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self) -> Any:
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self) -> Any:
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ # def transform_tag(self, handle: Any, suffix: Any) -> Any:
+ # return self.tag_handles[handle] + suffix
+
+ def select_tag_transform(self, tag: Tag) -> None:
+ if tag is None:
+ return
+ tag.select_transform(False)
+
+ def parse_node(self, block: bool = False, indentless_sequence: bool = False) -> Any:
+ if self.scanner.check_token(AliasToken):
+ token = self.scanner.get_token()
+ event: Any = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ return event
+
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ # tag = token.value
+ tag = Tag(
+ handle=token.value[0], suffix=token.value[1], handles=self.tag_handles,
+ )
+ elif self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ # tag = token.value
+ tag = Tag(handle=token.value[0], suffix=token.value[1], handles=self.tag_handles)
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ self.select_tag_transform(tag)
+ if tag.check_handle():
+ raise ParserError(
+ 'while parsing a node',
+ start_mark,
+ f'found undefined tag handle {tag.handle!r}',
+ tag_mark,
+ )
+ if start_mark is None:
+ start_mark = end_mark = self.scanner.peek_token().start_mark
+ event = None
+ implicit = tag is None or str(tag) == '!'
+ if indentless_sequence and self.scanner.check_token(BlockEntryToken):
+ comment = None
+ pt = self.scanner.peek_token()
+ if self.loader and self.loader.comment_handling is None:
+ if pt.comment and pt.comment[0]:
+ comment = [pt.comment[0], []]
+ pt.comment[0] = None
+ elif self.loader:
+ if pt.comment:
+ comment = pt.comment
+ end_mark = self.scanner.peek_token().end_mark
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment,
+ )
+ self.state = self.parse_indentless_sequence_entry
+ return event
+
+ if self.scanner.check_token(ScalarToken):
+ token = self.scanner.get_token()
+ # self.scanner.peek_token_same_line_comment(token)
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or str(tag) == '!':
+ dimplicit = (True, False)
+ elif tag is None:
+ dimplicit = (False, True)
+ else:
+ dimplicit = (False, False)
+ # nprint('se', token.value, token.comment)
+ event = ScalarEvent(
+ anchor,
+ tag,
+ dimplicit,
+ token.value,
+ start_mark,
+ end_mark,
+ style=token.style,
+ comment=token.comment,
+ )
+ self.state = self.states.pop()
+ elif self.scanner.check_token(FlowSequenceStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = SequenceStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.scanner.check_token(FlowMappingStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = MappingStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.scanner.check_token(BlockSequenceStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ # should inserting the comment be dependent on the
+ # indentation?
+ pt = self.scanner.peek_token()
+ comment = pt.comment
+ # nprint('pt0', type(pt))
+ if comment is None or comment[1] is None:
+ comment = pt.split_old_comment()
+ # nprint('pt1', comment)
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment,
+ )
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.scanner.check_token(BlockMappingStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ comment = self.scanner.peek_token().comment
+ event = MappingStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment,
+ )
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.scanner.peek_token()
+ raise ParserError(
+ f'while parsing a {node!s} node',
+ start_mark,
+ f'expected the node content, but found {token.id!r}',
+ token.start_mark,
+ )
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+ # BLOCK-END
+
+ def parse_block_sequence_first_entry(self) -> Any:
+ token = self.scanner.get_token()
+ # move any comment from start token
+ # self.move_token_comment(token)
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self) -> Any:
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ if not self.scanner.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block collection',
+ self.marks[-1],
+ f'expected <block end>, but found {token.id!r}',
+ token.start_mark,
+ )
+ token = self.scanner.get_token() # BlockEndToken
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ # indentless_sequence?
+ # sequence:
+ # - entry
+ # - nested
+
+ def parse_indentless_sequence_entry(self) -> Any:
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ if not self.scanner.check_token(
+ BlockEntryToken, KeyToken, ValueToken, BlockEndToken,
+ ):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.scanner.peek_token()
+ c = None
+ if self.loader and self.loader.comment_handling is None:
+ c = token.comment
+ start_mark = token.start_mark
+ else:
+ start_mark = self.last_event.end_mark # type: ignore
+ c = self.distribute_comment(token.comment, start_mark.line) # type: ignore
+ event = SequenceEndEvent(start_mark, start_mark, comment=c)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self) -> Any:
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self) -> Any:
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if self.resolver.processing_version > (1, 1) and self.scanner.check_token(ValueToken):
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block mapping',
+ self.marks[-1],
+ f'expected <block end>, but found {token.id!r}',
+ token.start_mark,
+ )
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self) -> Any:
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ # value token might have post comment move it to e.g. block
+ if self.scanner.check_token(ValueToken):
+ self.move_token_comment(token)
+ else:
+ if not self.scanner.check_token(KeyToken):
+ self.move_token_comment(token, empty=True)
+ # else: empty value for this key cannot move token.comment
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ comment = token.comment
+ if comment is None:
+ token = self.scanner.peek_token()
+ comment = token.comment
+ if comment:
+ token._comment = [None, comment[1]]
+ comment = [comment[0], None]
+ return self.process_empty_scalar(token.end_mark, comment=comment)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self) -> Any:
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first: bool = False) -> Any:
+ if not self.scanner.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow sequence',
+ self.marks[-1],
+ f"expected ',' or ']', but got {token.id!r}",
+ token.start_mark,
+ )
+
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.peek_token()
+ event: Any = MappingStartEvent(
+ None, None, True, token.start_mark, token.end_mark, flow_style=True,
+ )
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.scanner.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self) -> Any:
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self) -> Any:
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self) -> Any:
+ self.state = self.parse_flow_sequence_entry
+ token = self.scanner.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self) -> Any:
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first: Any = False) -> Any:
+ if not self.scanner.check_token(FlowMappingEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow mapping',
+ self.marks[-1],
+ f"expected ',' or '}}', but got {token.id!r}",
+ token.start_mark,
+ )
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(
+ ValueToken, FlowEntryToken, FlowMappingEndToken,
+ ):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif self.resolver.processing_version > (1, 1) and self.scanner.check_token(
+ ValueToken,
+ ):
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().end_mark)
+ elif not self.scanner.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self) -> Any:
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self) -> Any:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark: Any, comment: Any = None) -> Any:
+ return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment)
+
+ def move_token_comment(
+ self, token: Any, nt: Optional[Any] = None, empty: Optional[bool] = False,
+ ) -> Any:
+ pass
+
+
+class RoundTripParser(Parser):
+ """roundtrip is a safe loader, that wants to see the unmangled tag"""
+
+ def select_tag_transform(self, tag: Tag) -> None:
+ if tag is None:
+ return
+ tag.select_transform(True)
+
+ def move_token_comment(
+ self, token: Any, nt: Optional[Any] = None, empty: Optional[bool] = False,
+ ) -> Any:
+ token.move_old_comment(self.scanner.peek_token() if nt is None else nt, empty=empty)
+
+
+class RoundTripParserSC(RoundTripParser):
+ """roundtrip is a safe loader, that wants to see the unmangled tag"""
+
+ # some of the differences are based on the superclass testing
+ # if self.loader.comment_handling is not None
+
+ def move_token_comment(
+ self: Any, token: Any, nt: Any = None, empty: Optional[bool] = False,
+ ) -> None:
+ token.move_new_comment(self.scanner.peek_token() if nt is None else nt, empty=empty)
+
+ def distribute_comment(self, comment: Any, line: Any) -> Any:
+ # ToDo, look at indentation of the comment to determine attachment
+ if comment is None:
+ return None
+ if not comment[0]:
+ return None
+ if comment[0][0] != line + 1:
+ nprintf('>>>dcxxx', comment, line)
+ assert comment[0][0] == line + 1
+ # if comment[0] - line > 1:
+ # return
+ typ = self.loader.comment_handling & 0b11
+ # nprintf('>>>dca', comment, line, typ)
+ if typ == C_POST:
+ return None
+ if typ == C_PRE:
+ c = [None, None, comment[0]]
+ comment[0] = None
+ return c
+ # nprintf('>>>dcb', comment[0])
+ for _idx, cmntidx in enumerate(comment[0]):
+ # nprintf('>>>dcb', cmntidx)
+ if isinstance(self.scanner.comments[cmntidx], BlankLineComment):
+ break
+ else:
+ return None # no space found
+ if _idx == 0:
+ return None # first line was blank
+ # nprintf('>>>dcc', idx)
+ if typ == C_SPLIT_ON_FIRST_BLANK:
+ c = [None, None, comment[0][:_idx]]
+ comment[0] = comment[0][_idx:]
+ return c
+ raise NotImplementedError # reserved
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/py.typed b/contrib/python/ruamel.yaml/py3/ruamel/yaml/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/py.typed
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/reader.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/reader.py
new file mode 100644
index 0000000000..3780a2c1e9
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/reader.py
@@ -0,0 +1,275 @@
+# coding: utf-8
+
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length`
+# characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current
+# character.
+
+import codecs
+
+from ruamel.yaml.error import YAMLError, FileMark, StringMark, YAMLStreamError
+from ruamel.yaml.util import RegExp
+
+from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA
+# from ruamel.yaml.compat import StreamTextType # NOQA
+
+__all__ = ['Reader', 'ReaderError']
+
+
+class ReaderError(YAMLError):
+ def __init__(
+ self, name: Any, position: Any, character: Any, encoding: Any, reason: Any,
+ ) -> None:
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self) -> Any:
+ if isinstance(self.character, bytes):
+ return (
+ f"'{self.encoding!s}' codec can't decode byte #x{ord(self.character):02x}: "
+ f'{self.reason!s}\n'
+ f' in "{self.name!s}", position {self.position:d}'
+ )
+ else:
+ return (
+ f'unacceptable character #x{self.character:04x}: {self.reason!s}\n'
+ f' in "{self.name!s}", position {self.position:d}'
+ )
+
+
+class Reader:
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream: Any, loader: Any = None) -> None:
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_reader', None) is None:
+ self.loader._reader = self
+ self.reset_reader()
+ self.stream: Any = stream # as .read is called
+
+ def reset_reader(self) -> None:
+ self.name: Any = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ""
+ self.pointer = 0
+ self.raw_buffer: Any = None
+ self.raw_decode = None
+ self.encoding: Optional[Text] = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+
+ @property
+ def stream(self) -> Any:
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('input stream needs to be specified')
+
+ @stream.setter
+ def stream(self, val: Any) -> None:
+ if val is None:
+ return
+ self._stream = None
+ if isinstance(val, str):
+ self.name = '<unicode string>'
+ self.check_printable(val)
+ self.buffer = val + '\0'
+ elif isinstance(val, bytes):
+ self.name = '<byte string>'
+ self.raw_buffer = val
+ self.determine_encoding()
+ else:
+ if not hasattr(val, 'read'):
+ raise YAMLStreamError('stream argument needs to have a read() method')
+ self._stream = val
+ self.name = getattr(self.stream, 'name', '<file>')
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index: int = 0) -> Text:
+ try:
+ return self.buffer[self.pointer + index]
+ except IndexError:
+ self.update(index + 1)
+ return self.buffer[self.pointer + index]
+
+ def prefix(self, length: int = 1) -> Any:
+ if self.pointer + length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer : self.pointer + length]
+
+ def forward_1_1(self, length: int = 1) -> None:
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' or (
+ ch == '\r' and self.buffer[self.pointer] != '\n'
+ ):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def forward(self, length: int = 1) -> None:
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch == '\n' or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self) -> Any:
+ if self.stream is None:
+ return StringMark(
+ self.name, self.index, self.line, self.column, self.buffer, self.pointer,
+ )
+ else:
+ return FileMark(self.name, self.index, self.line, self.column)
+
+ def determine_encoding(self) -> None:
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode # type: ignore
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode # type: ignore
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode # type: ignore
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = RegExp(
+ '[^\x09\x0A\x0D\x20-\x7E\x85' '\xA0-\uD7FF' '\uE000-\uFFFD' '\U00010000-\U0010FFFF' ']' # NOQA
+ )
+
+ _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii')
+
+ @classmethod
+ def _get_non_printable_ascii(cls: Text, data: bytes) -> Optional[Tuple[int, Text]]: # type: ignore # NOQA
+ ascii_bytes = data.encode('ascii') # type: ignore
+ non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore
+ if not non_printables:
+ return None
+ non_printable = non_printables[:1]
+ return ascii_bytes.index(non_printable), non_printable.decode('ascii')
+
+ @classmethod
+ def _get_non_printable_regex(cls, data: Text) -> Optional[Tuple[int, Text]]:
+ match = cls.NON_PRINTABLE.search(data)
+ if not bool(match):
+ return None
+ return match.start(), match.group()
+
+ @classmethod
+ def _get_non_printable(cls, data: Text) -> Optional[Tuple[int, Text]]:
+ try:
+ return cls._get_non_printable_ascii(data) # type: ignore
+ except UnicodeEncodeError:
+ return cls._get_non_printable_regex(data)
+
+ def check_printable(self, data: Any) -> None:
+ non_printable_match = self._get_non_printable(data)
+ if non_printable_match is not None:
+ start, character = non_printable_match
+ position = self.index + (len(self.buffer) - self.pointer) + start
+ raise ReaderError(
+ self.name,
+ position,
+ ord(character),
+ 'unicode',
+ 'special characters are not allowed',
+ )
+
+ def update(self, length: int) -> None:
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer :]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ elif self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character, exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size: Optional[int] = None) -> None:
+ if size is None:
+ size = 4096
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+
+# try:
+# import psyco
+# psyco.bind(Reader)
+# except ImportError:
+# pass
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/representer.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/representer.py
new file mode 100644
index 0000000000..0d1ca12b15
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/representer.py
@@ -0,0 +1,1127 @@
+# coding: utf-8
+
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.compat import ordereddict
+from ruamel.yaml.compat import nprint, nprintf # NOQA
+from ruamel.yaml.scalarstring import (
+ LiteralScalarString,
+ FoldedScalarString,
+ SingleQuotedScalarString,
+ DoubleQuotedScalarString,
+ PlainScalarString,
+)
+from ruamel.yaml.comments import (
+ CommentedMap,
+ CommentedOrderedMap,
+ CommentedSeq,
+ CommentedKeySeq,
+ CommentedKeyMap,
+ CommentedSet,
+ comment_attrib,
+ merge_attrib,
+ TaggedScalar,
+)
+from ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from ruamel.yaml.scalarfloat import ScalarFloat
+from ruamel.yaml.scalarbool import ScalarBoolean
+from ruamel.yaml.timestamp import TimeStamp
+from ruamel.yaml.anchor import Anchor
+
+import collections
+import datetime
+import types
+
+import copyreg
+import base64
+
+from typing import Dict, List, Any, Union, Text, Optional # NOQA
+
+# fmt: off
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError', 'RoundTripRepresenter']
+# fmt: on
+
+
+class RepresenterError(YAMLError):
+ pass
+
+
+class BaseRepresenter:
+
+ yaml_representers: Dict[Any, Any] = {}
+ yaml_multi_representers: Dict[Any, Any] = {}
+
+ def __init__(
+ self: Any,
+ default_style: Any = None,
+ default_flow_style: Any = None,
+ dumper: Any = None,
+ ) -> None:
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._representer = self
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects: Dict[Any, Any] = {}
+ self.object_keeper: List[Any] = []
+ self.alias_key: Optional[int] = None
+ self.sort_base_mapping_type_on_output = True
+
+ @property
+ def serializer(self) -> Any:
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ def represent(self, data: Any) -> None:
+ node = self.represent_data(data)
+ self.serializer.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data: Any) -> Any:
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ # if node is None:
+ # raise RepresenterError(
+ # f"recursive objects are not allowed: {data!r}")
+ return node
+ # self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ # if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def represent_key(self, data: Any) -> Any:
+ """
+ David Fraser: Extract a method to represent keys in mappings, so that
+ a subclass can choose not to quote them (for example)
+ used in represent_mapping
+ https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
+ """
+ return self.represent_data(data)
+
+ @classmethod
+ def add_representer(cls, data_type: Any, representer: Any) -> None:
+ if 'yaml_representers' not in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type: Any, representer: Any) -> None:
+ if 'yaml_multi_representers' not in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(
+ self, tag: Any, value: Any, style: Any = None, anchor: Any = None,
+ ) -> ScalarNode:
+ if style is None:
+ style = self.default_style
+ comment = None
+ if style and style[0] in '|>':
+ comment = getattr(value, 'comment', None)
+ if comment:
+ comment = [None, [comment]]
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(
+ self, tag: Any, sequence: Any, flow_style: Any = None,
+ ) -> SequenceNode:
+ value: List[Any] = []
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_omap(self, tag: Any, omap: Any, flow_style: Any = None) -> SequenceNode:
+ value: List[Any] = []
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag: Any, mapping: Any, flow_style: Any = None) -> MappingNode:
+ value: List[Any] = []
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ if self.sort_base_mapping_type_on_output:
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data: Any) -> bool:
+ return False
+
+
+class SafeRepresenter(BaseRepresenter):
+ def ignore_aliases(self, data: Any) -> bool:
+ # https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
+ # "i.e. two occurrences of the empty tuple may or may not yield the same object"
+ # so "data is ()" should not be used
+ if data is None or (isinstance(data, tuple) and data == ()):
+ return True
+ if isinstance(data, (bytes, str, bool, int, float)):
+ return True
+ return False
+
+ def represent_none(self, data: Any) -> ScalarNode:
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data: Any) -> Any:
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data: Any) -> ScalarNode:
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ # check py2 only?
+ data = base64.encodestring(data).decode('ascii') # type: ignore
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data: Any, anchor: Optional[Any] = None) -> ScalarNode:
+ try:
+ value = self.dumper.boolean_representation[bool(data)]
+ except AttributeError:
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value, anchor=anchor)
+
+ def represent_int(self, data: Any) -> ScalarNode:
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value * inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data: Any) -> ScalarNode:
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ if getattr(self.serializer, 'use_version', None) == (1, 1):
+ if '.' not in value and 'e' in value:
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag in YAML 1.1. We fix
+ # this by adding '.0' before the 'e' symbol.
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data: Any) -> SequenceNode:
+ # pairs = (len(data) > 0 and isinstance(data, list))
+ # if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ # if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+
+ # value = []
+ # for item_key, item_value in data:
+ # value.append(self.represent_mapping('tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ # return SequenceNode('tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data: Any) -> MappingNode:
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_ordereddict(self, data: Any) -> SequenceNode:
+ return self.represent_omap('tag:yaml.org,2002:omap', data)
+
+ def represent_set(self, data: Any) -> MappingNode:
+ value: Dict[Any, None] = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data: Any) -> ScalarNode:
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data: Any) -> ScalarNode:
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(
+ self, tag: Any, data: Any, cls: Any, flow_style: Any = None,
+ ) -> MappingNode:
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data: Any) -> None:
+ raise RepresenterError(f'cannot represent an object: {data!s}')
+
+
+SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
+
+SafeRepresenter.add_representer(
+ collections.OrderedDict, SafeRepresenter.represent_ordereddict,
+)
+
+SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
+
+
+class Representer(SafeRepresenter):
+ def represent_complex(self, data: Any) -> Any:
+ if data.imag == 0.0:
+ data = repr(data.real)
+ elif data.real == 0.0:
+ data = f'{data.imag!r}j'
+ elif data.imag > 0:
+ data = f'{data.real!r}+{data.imag!r}j'
+ else:
+ data = f'{data.real!r}{data.imag!r}j'
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data: Any) -> SequenceNode:
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data: Any) -> ScalarNode:
+ try:
+ name = f'{data.__module__!s}.{data.__qualname__!s}'
+ except AttributeError:
+ # ToDo: check if this can be reached in Py3
+ name = f'{data.__module__!s}.{data.__name__!s}'
+ return self.represent_scalar('tag:yaml.org,2002:python/name:' + name, "")
+
+ def represent_module(self, data: Any) -> ScalarNode:
+ return self.represent_scalar('tag:yaml.org,2002:python/module:' + data.__name__, "")
+
+ def represent_object(self, data: Any) -> Union[SequenceNode, MappingNode]:
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce: Any = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError(f'cannot represent object: {data!r}')
+ reduce = (list(reduce) + [None] * 5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ try:
+ function_name = f'{function.__module__!s}.{function.__qualname__!s}'
+ except AttributeError:
+ # ToDo: check if this can be reached in Py3
+ function_name = f'{function.__module__!s}.{function.__name__!s}'
+ if not args and not listitems and not dictitems and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:' + function_name, state,
+ )
+ if not listitems and not dictitems and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag + function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag + function_name, value)
+
+
+Representer.add_representer(complex, Representer.represent_complex)
+
+Representer.add_representer(tuple, Representer.represent_tuple)
+
+Representer.add_representer(type, Representer.represent_name)
+
+Representer.add_representer(types.FunctionType, Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name)
+
+Representer.add_representer(types.ModuleType, Representer.represent_module)
+
+Representer.add_multi_representer(object, Representer.represent_object)
+
+Representer.add_multi_representer(type, Representer.represent_name)
+
+
+class RoundTripRepresenter(SafeRepresenter):
+ # need to add type here and write out the .comment
+ # in serializer and emitter
+
+ def __init__(
+ self, default_style: Any = None, default_flow_style: Any = None, dumper: Any = None,
+ ) -> None:
+ if not hasattr(dumper, 'typ') and default_flow_style is None:
+ default_flow_style = False
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=dumper,
+ )
+
+ def ignore_aliases(self, data: Any) -> bool:
+ try:
+ if data.anchor is not None and data.anchor.value is not None:
+ return False
+ except AttributeError:
+ pass
+ return SafeRepresenter.ignore_aliases(self, data)
+
+ def represent_none(self, data: Any) -> ScalarNode:
+ if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start:
+ # this will be open ended (although it is not yet)
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+ return self.represent_scalar('tag:yaml.org,2002:null', "")
+
+ def represent_literal_scalarstring(self, data: Any) -> ScalarNode:
+ tag = None
+ style = '|'
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ represent_preserved_scalarstring = represent_literal_scalarstring
+
+ def represent_folded_scalarstring(self, data: Any) -> ScalarNode:
+ tag = None
+ style = '>'
+ anchor = data.yaml_anchor(any=True)
+ for fold_pos in reversed(getattr(data, 'fold_pos', [])):
+ if (
+ data[fold_pos] == ' '
+ and (fold_pos > 0 and not data[fold_pos - 1].isspace())
+ and (fold_pos < len(data) and not data[fold_pos + 1].isspace())
+ ):
+ data = data[:fold_pos] + '\a' + data[fold_pos:]
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_single_quoted_scalarstring(self, data: Any) -> ScalarNode:
+ tag = None
+ style = "'"
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_double_quoted_scalarstring(self, data: Any) -> ScalarNode:
+ tag = None
+ style = '"'
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_plain_scalarstring(self, data: Any) -> ScalarNode:
+ tag = None
+ style = ''
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def insert_underscore(
+ self, prefix: Any, s: Any, underscore: Any, anchor: Any = None,
+ ) -> ScalarNode:
+ if underscore is None:
+ return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+ if underscore[0]:
+ sl = list(s)
+ pos = len(s) - underscore[0]
+ while pos > 0:
+ sl.insert(pos, '_')
+ pos -= underscore[0]
+ s = "".join(sl)
+ if underscore[1]:
+ s = '_' + s
+ if underscore[2]:
+ s += '_'
+ return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+
+ def represent_scalar_int(self, data: Any) -> ScalarNode:
+ if data._width is not None:
+ s = f'{data:0{data._width}d}'
+ else:
+ s = format(data, 'd')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore("", s, data._underscore, anchor=anchor)
+
+ def represent_binary_int(self, data: Any) -> ScalarNode:
+ if data._width is not None:
+ # cannot use '{:#0{}b}', that strips the zeros
+ s = f'{data:0{data._width}b}'
+ else:
+ s = format(data, 'b')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0b', s, data._underscore, anchor=anchor)
+
+ def represent_octal_int(self, data: Any) -> ScalarNode:
+ if data._width is not None:
+ # cannot use '{:#0{}o}', that strips the zeros
+ s = f'{data:0{data._width}o}'
+ else:
+ s = format(data, 'o')
+ anchor = data.yaml_anchor(any=True)
+ prefix = '0o'
+ if getattr(self.serializer, 'use_version', None) == (1, 1):
+ prefix = '0'
+ return self.insert_underscore(prefix, s, data._underscore, anchor=anchor)
+
+ def represent_hex_int(self, data: Any) -> ScalarNode:
+ if data._width is not None:
+ # cannot use '{:#0{}x}', that strips the zeros
+ s = f'{data:0{data._width}x}'
+ else:
+ s = format(data, 'x')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_hex_caps_int(self, data: Any) -> ScalarNode:
+ if data._width is not None:
+ # cannot use '{:#0{}X}', that strips the zeros
+ s = f'{data:0{data._width}X}'
+ else:
+ s = format(data, 'X')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_scalar_float(self, data: Any) -> ScalarNode:
+ """ this is way more complicated """
+ value = None
+ anchor = data.yaml_anchor(any=True)
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ if value:
+ return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor)
+ if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
+ # no exponent, but trailing dot
+ value = f'{data._m_sign if data._m_sign else ""}{abs(int(data)):d}.'
+ elif data._exp is None:
+ # no exponent, "normal" dot
+ prec = data._prec
+ ms = data._m_sign if data._m_sign else ""
+ if prec < 0:
+ value = f'{ms}{abs(int(data)):0{data._width - len(ms)}d}'
+ else:
+ # -1 for the dot
+ value = f'{ms}{abs(data):0{data._width - len(ms)}.{data._width - prec - 1}f}'
+ if prec == 0 or (prec == 1 and ms != ""):
+ value = value.replace('0.', '.')
+ while len(value) < data._width:
+ value += '0'
+ else:
+ # exponent
+ (
+ m,
+ es,
+ ) = f'{data:{data._width}.{data._width + (1 if data._m_sign else 0)}e}'.split('e')
+ w = data._width if data._prec > 0 else (data._width + 1)
+ if data < 0:
+ w += 1
+ m = m[:w]
+ e = int(es)
+ m1, m2 = m.split('.') # always second?
+ while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0):
+ m2 += '0'
+ if data._m_sign and data > 0:
+ m1 = '+' + m1
+ esgn = '+' if data._e_sign else ""
+ if data._prec < 0: # mantissa without dot
+ if m2 != '0':
+ e -= len(m2)
+ else:
+ m2 = ""
+ while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
+ m2 += '0'
+ e -= 1
+ value = m1 + m2 + data._exp + f'{e:{esgn}0{data._e_width}d}'
+ elif data._prec == 0: # mantissa with trailing dot
+ e -= len(m2)
+ value = m1 + m2 + '.' + data._exp + f'{e:{esgn}0{data._e_width}d}'
+ else:
+ if data._m_lead0 > 0:
+ m2 = '0' * (data._m_lead0 - 1) + m1 + m2
+ m1 = '0'
+ m2 = m2[: -data._m_lead0] # these should be zeros
+ e += data._m_lead0
+ while len(m1) < data._prec:
+ m1 += m2[0]
+ m2 = m2[1:]
+ e -= 1
+ value = m1 + '.' + m2 + data._exp + f'{e:{esgn}0{data._e_width}d}'
+
+ if value is None:
+ value = repr(data).lower()
+ return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor)
+
+ def represent_sequence(
+ self, tag: Any, sequence: Any, flow_style: Any = None,
+ ) -> SequenceNode:
+ value: List[Any] = []
+ # if the flow_style is None, the flow style tacked on to the object
+ # explicitly will be taken. If that is None as well the default flow
+ # style rules
+ try:
+ flow_style = sequence.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = sequence.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(sequence, comment_attrib)
+ node.comment = comment.comment
+ # reset any comment already printed information
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ item_comments = comment.items
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for idx, item in enumerate(sequence):
+ node_item = self.represent_data(item)
+ self.merge_comments(node_item, item_comments.get(idx))
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if len(sequence) != 0 and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def merge_comments(self, node: Any, comments: Any) -> Any:
+ if comments is None:
+ assert hasattr(node, 'comment')
+ return node
+ if getattr(node, 'comment', None) is not None:
+ for idx, val in enumerate(comments):
+ if idx >= len(node.comment):
+ continue
+ nc = node.comment[idx]
+ if nc is not None:
+ assert val is None or val == nc
+ comments[idx] = nc
+ node.comment = comments
+ return node
+
+ def represent_key(self, data: Any) -> Any:
+ if isinstance(data, CommentedKeySeq):
+ self.alias_key = None
+ return self.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=True)
+ if isinstance(data, CommentedKeyMap):
+ self.alias_key = None
+ return self.represent_mapping('tag:yaml.org,2002:map', data, flow_style=True)
+ return SafeRepresenter.represent_key(self, data)
+
+ def represent_mapping(self, tag: Any, mapping: Any, flow_style: Any = None) -> MappingNode:
+ value: List[Any] = []
+ try:
+ flow_style = mapping.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = mapping.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(mapping, comment_attrib)
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ if self.dumper.comment_handling is None:
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ else:
+ # NEWCMNT
+ pass
+ except AttributeError:
+ item_comments = {}
+ merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
+ try:
+ merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0]
+ except IndexError:
+ merge_pos = 0
+ item_count = 0
+ if bool(merge_list):
+ items = mapping.non_merged_items()
+ else:
+ items = mapping.items()
+ for item_key, item_value in items:
+ item_count += 1
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ # assert getattr(node_key, 'comment', None) is None
+ # issue 351 did throw this because the comment from the list item was
+ # moved to the dict
+ node_key.comment = item_comment[:2]
+ nvc = getattr(node_value, 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_value.comment = item_comment[2:]
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ if bool(merge_list):
+ # because of the call to represent_data here, the anchors
+ # are marked as being used and thereby created
+ if len(merge_list) == 1:
+ arg = self.represent_data(merge_list[0])
+ else:
+ arg = self.represent_data(merge_list)
+ arg.flow_style = True
+ value.insert(
+ merge_pos, (ScalarNode(Tag(suffix='tag:yaml.org,2002:merge'), '<<'), arg),
+ )
+ return node
+
+ def represent_omap(self, tag: Any, omap: Any, flow_style: Any = None) -> SequenceNode:
+ value: List[Any] = []
+ try:
+ flow_style = omap.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = omap.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ if isinstance(tag, str):
+ tag = Tag(suffix=tag)
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(omap, comment_attrib)
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # node_item.flow_style = False
+ # node item has two scalars in value: node_key and node_value
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ if item_comment[1]:
+ node_item.comment = [None, item_comment[1]]
+ assert getattr(node_item.value[0][0], 'comment', None) is None
+ node_item.value[0][0].comment = [item_comment[0], None]
+ nvc = getattr(node_item.value[0][1], 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_item.value[0][1].comment = item_comment[2:]
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_set(self, setting: Any) -> MappingNode:
+ flow_style = False
+ tag = Tag(suffix='tag:yaml.org,2002:set')
+ # return self.represent_mapping(tag, value)
+ value: List[Any] = []
+ flow_style = setting.fa.flow_style(flow_style)
+ try:
+ anchor = setting.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(setting, comment_attrib)
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in setting.odict:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(None)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ assert getattr(node_key, 'comment', None) is None
+ node_key.comment = item_comment[:2]
+ node_key.style = '?'
+ node_value.style = '-' if flow_style else '?'
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ best_style = best_style
+ return node
+
+ def represent_dict(self, data: Any) -> MappingNode:
+ """write out tag if saved on loading"""
+ try:
+ _ = data.tag
+ except AttributeError:
+ tag = Tag(suffix='tag:yaml.org,2002:map')
+ else:
+ if data.tag.trval:
+ if data.tag.startswith('!!'):
+ tag = Tag(suffix='tag:yaml.org,2002:' + data.tag.trval[2:])
+ else:
+ tag = data.tag
+ else:
+ tag = Tag(suffix='tag:yaml.org,2002:map')
+ return self.represent_mapping(tag, data)
+
+ def represent_list(self, data: Any) -> SequenceNode:
+ try:
+ _ = data.tag
+ except AttributeError:
+ tag = Tag(suffix='tag:yaml.org,2002:seq')
+ else:
+ if data.tag.trval:
+ if data.tag.startswith('!!'):
+ tag = Tag(suffix='tag:yaml.org,2002:' + data.tag.trval[2:])
+ else:
+ tag = data.tag
+ else:
+ tag = Tag(suffix='tag:yaml.org,2002:seq')
+ return self.represent_sequence(tag, data)
+
+ def represent_datetime(self, data: Any) -> ScalarNode:
+ inter = 'T' if data._yaml['t'] else ' '
+ _yaml = data._yaml
+ if _yaml['delta']:
+ data += _yaml['delta']
+ value = data.isoformat(inter)
+ else:
+ value = data.isoformat(inter)
+ if _yaml['tz']:
+ value += _yaml['tz']
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_tagged_scalar(self, data: Any) -> ScalarNode:
+ try:
+ if data.tag.handle == '!!':
+ tag = f'{data.tag.handle} {data.tag.suffix}'
+ else:
+ tag = data.tag
+ except AttributeError:
+ tag = None
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor)
+
+ def represent_scalar_bool(self, data: Any) -> ScalarNode:
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return SafeRepresenter.represent_bool(self, data, anchor=anchor)
+
+ def represent_yaml_object(
+ self, tag: Any, data: Any, cls: Any, flow_style: Optional[Any] = None,
+ ) -> MappingNode:
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ anchor = state.pop(Anchor.attrib, None)
+ res = self.represent_mapping(tag, state, flow_style=flow_style)
+ if anchor is not None:
+ res.anchor = anchor
+ return res
+
+
+RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
+
+RoundTripRepresenter.add_representer(
+ LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring,
+)
+
+RoundTripRepresenter.add_representer(
+ FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring,
+)
+
+RoundTripRepresenter.add_representer(
+ SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring,
+)
+
+RoundTripRepresenter.add_representer(
+ DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring,
+)
+
+RoundTripRepresenter.add_representer(
+ PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring,
+)
+
+# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple)
+
+RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int)
+
+RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int)
+
+RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
+
+RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
+
+RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int)
+
+RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float)
+
+RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool)
+
+RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
+
+RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
+
+RoundTripRepresenter.add_representer(
+ CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict,
+)
+
+RoundTripRepresenter.add_representer(
+ collections.OrderedDict, RoundTripRepresenter.represent_ordereddict,
+)
+
+RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
+
+RoundTripRepresenter.add_representer(
+ TaggedScalar, RoundTripRepresenter.represent_tagged_scalar,
+)
+
+RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/resolver.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/resolver.py
new file mode 100644
index 0000000000..71ba4c7b57
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/resolver.py
@@ -0,0 +1,389 @@
+# coding: utf-8
+
+import re
+
+from typing import Any, Dict, List, Union, Text, Optional # NOQA
+from ruamel.yaml.compat import VersionType # NOQA
+
+from ruamel.yaml.tag import Tag
+from ruamel.yaml.compat import _DEFAULT_YAML_VERSION # NOQA
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA
+from ruamel.yaml.util import RegExp # NOQA
+
+__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
+
+
+# fmt: off
+# resolvers consist of
+# - a list of applicable version
+# - a tag
+# - a regexp
+# - a list of first characters to match
+implicit_resolvers = [
+ ([(1, 2)],
+ 'tag:yaml.org,2002:bool',
+ RegExp('''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
+ list('tTfF')),
+ ([(1, 1)],
+ 'tag:yaml.org,2002:bool',
+ RegExp('''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO')),
+ ([(1, 2)],
+ 'tag:yaml.org,2002:float',
+ RegExp('''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.')),
+ ([(1, 1)],
+ 'tag:yaml.org,2002:float',
+ RegExp('''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.')),
+ ([(1, 2)],
+ 'tag:yaml.org,2002:int',
+ RegExp('''^(?:[-+]?0b[0-1_]+
+ |[-+]?0o?[0-7_]+
+ |[-+]?[0-9_]+
+ |[-+]?0x[0-9a-fA-F_]+)$''', re.X),
+ list('-+0123456789')),
+ ([(1, 1)],
+ 'tag:yaml.org,2002:int',
+ RegExp('''^(?:[-+]?0b[0-1_]+
+ |[-+]?0?[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int
+ list('-+0123456789')),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:merge',
+ RegExp('^(?:<<)$'),
+ ['<']),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:null',
+ RegExp('''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', '']),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:timestamp',
+ RegExp('''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \\t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
+ (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789')),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:value',
+ RegExp('^(?:=)$'),
+ ['=']),
+ # The following resolver is only for documentation purposes. It cannot work
+ # because plain scalars cannot start with '!', '&', or '*'.
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:yaml',
+ RegExp('^(?:!|&|\\*)$'),
+ list('!&*')),
+]
+# fmt: on
+
+
+class ResolverError(YAMLError):
+ pass
+
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = Tag(suffix='tag:yaml.org,2002:str')
+ DEFAULT_SEQUENCE_TAG = Tag(suffix='tag:yaml.org,2002:seq')
+ DEFAULT_MAPPING_TAG = Tag(suffix='tag:yaml.org,2002:map')
+
+ yaml_implicit_resolvers: Dict[Any, Any] = {}
+ yaml_path_resolvers: Dict[Any, Any] = {}
+
+ def __init__(self: Any, loadumper: Any = None) -> None:
+ self.loadumper = loadumper
+ if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None:
+ self.loadumper._resolver = self.loadumper
+ self._loader_version: Any = None
+ self.resolver_exact_paths: List[Any] = []
+ self.resolver_prefix_paths: List[Any] = []
+
+ @property
+ def parser(self) -> Any:
+ if self.loadumper is not None:
+ if hasattr(self.loadumper, 'typ'):
+ return self.loadumper.parser
+ return self.loadumper._parser
+ return None
+
+ @classmethod
+ def add_implicit_resolver_base(cls, tag: Any, regexp: Any, first: Any) -> None:
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = {
+ k: cls.yaml_implicit_resolvers[k][:] for k in cls.yaml_implicit_resolvers
+ }
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_implicit_resolver(cls, tag: Any, regexp: Any, first: Any) -> None:
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = {
+ k: cls.yaml_implicit_resolvers[k][:] for k in cls.yaml_implicit_resolvers
+ }
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
+
+ # @classmethod
+ # def add_implicit_resolver(cls, tag, regexp, first):
+
+ @classmethod
+ def add_path_resolver(cls, tag: Any, path: Any, kind: Any = None) -> None:
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if 'yaml_path_resolvers' not in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path: List[Any] = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError(f'Invalid path element: {element!s}')
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif (
+ node_check not in [ScalarNode, SequenceNode, MappingNode]
+ and not isinstance(node_check, str)
+ and node_check is not None
+ ):
+ raise ResolverError(f'Invalid node checker: {node_check!s}')
+ if not isinstance(index_check, (str, int)) and index_check is not None:
+ raise ResolverError(f'Invalid index checker: {index_check!s}')
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
+ raise ResolverError(f'Invalid node kind: {kind!s}')
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node: Any, current_index: Any) -> None:
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self) -> None:
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(
+ self, depth: int, path: Any, kind: Any, current_node: Any, current_index: Any,
+ ) -> bool:
+ node_check, index_check = path[depth - 1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return False
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return False
+ if index_check is True and current_index is not None:
+ return False
+ if (index_check is False or index_check is None) and current_index is None:
+ return False
+ if isinstance(index_check, str):
+ if not (
+ isinstance(current_index, ScalarNode) and index_check == current_index.value
+ ):
+ return False
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return False
+ return True
+
+ def resolve(self, kind: Any, value: Any, implicit: Any) -> Any:
+ if kind is ScalarNode and implicit[0]:
+ if value == "":
+ resolvers = self.yaml_implicit_resolvers.get("", [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return Tag(suffix=tag)
+ implicit = implicit[1]
+ if bool(self.yaml_path_resolvers):
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return Tag(suffix=exact_paths[kind])
+ if None in exact_paths:
+ return Tag(suffix=exact_paths[None])
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+ @property
+ def processing_version(self) -> Any:
+ return None
+
+
+class Resolver(BaseResolver):
+ pass
+
+
+for ir in implicit_resolvers:
+ if (1, 2) in ir[0]:
+ Resolver.add_implicit_resolver_base(*ir[1:])
+
+
+class VersionedResolver(BaseResolver):
+ """
+ contrary to the "normal" resolver, the smart resolver delays loading
+ the pattern matching rules. That way it can decide to load 1.1 rules
+ or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals
+ and Yes/No/On/Off booleans.
+ """
+
+ def __init__(
+ self, version: Optional[VersionType] = None, loader: Any = None, loadumper: Any = None,
+ ) -> None:
+ if loader is None and loadumper is not None:
+ loader = loadumper
+ BaseResolver.__init__(self, loader)
+ self._loader_version = self.get_loader_version(version)
+ self._version_implicit_resolver: Dict[Any, Any] = {}
+
+ def add_version_implicit_resolver(
+ self, version: VersionType, tag: Any, regexp: Any, first: Any,
+ ) -> None:
+ if first is None:
+ first = [None]
+ impl_resolver = self._version_implicit_resolver.setdefault(version, {})
+ for ch in first:
+ impl_resolver.setdefault(ch, []).append((tag, regexp))
+
+ def get_loader_version(self, version: Optional[VersionType]) -> Any:
+ if version is None or isinstance(version, tuple):
+ return version
+ if isinstance(version, list):
+ return tuple(version)
+ # assume string
+ return tuple(map(int, version.split('.')))
+
+ @property
+ def versioned_resolver(self) -> Any:
+ """
+ select the resolver based on the version we are parsing
+ """
+ version = self.processing_version
+ if isinstance(version, str):
+ version = tuple(map(int, version.split('.')))
+ if version not in self._version_implicit_resolver:
+ for x in implicit_resolvers:
+ if version in x[0]:
+ self.add_version_implicit_resolver(version, x[1], x[2], x[3])
+ return self._version_implicit_resolver[version]
+
+ def resolve(self, kind: Any, value: Any, implicit: Any) -> Any:
+ if kind is ScalarNode and implicit[0]:
+ if value == "":
+ resolvers = self.versioned_resolver.get("", [])
+ else:
+ resolvers = self.versioned_resolver.get(value[0], [])
+ resolvers += self.versioned_resolver.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return Tag(suffix=tag)
+ implicit = implicit[1]
+ if bool(self.yaml_path_resolvers):
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return Tag(suffix=exact_paths[kind])
+ if None in exact_paths:
+ return Tag(suffix=exact_paths[None])
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+ @property
+ def processing_version(self) -> Any:
+ try:
+ version = self.loadumper._scanner.yaml_version
+ except AttributeError:
+ try:
+ if hasattr(self.loadumper, 'typ'):
+ version = self.loadumper.version
+ else:
+ version = self.loadumper._serializer.use_version # dumping
+ except AttributeError:
+ version = None
+ if version is None:
+ version = self._loader_version
+ if version is None:
+ version = _DEFAULT_YAML_VERSION
+ return version
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarbool.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarbool.py
new file mode 100644
index 0000000000..083d3cbb25
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarbool.py
@@ -0,0 +1,42 @@
+# coding: utf-8
+
+"""
+You cannot subclass bool, and this is necessary for round-tripping anchored
+bool values (and also if you want to preserve the original way of writing)
+
+bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well.
+
+You can use these in an if statement, but not when testing equivalence
+"""
+
+from ruamel.yaml.anchor import Anchor
+
+from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarBoolean']
+
+
+class ScalarBoolean(int):
+ def __new__(cls: Any, *args: Any, **kw: Any) -> Any:
+ anchor = kw.pop('anchor', None)
+ b = int.__new__(cls, *args, **kw)
+ if anchor is not None:
+ b.yaml_set_anchor(anchor, always_dump=True)
+ return b
+
+ @property
+ def anchor(self) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any: bool = False) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarfloat.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarfloat.py
new file mode 100644
index 0000000000..10b4c290e0
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarfloat.py
@@ -0,0 +1,103 @@
+# coding: utf-8
+
+import sys
+from ruamel.yaml.anchor import Anchor
+
+from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat']
+
+
+class ScalarFloat(float):
+ def __new__(cls: Any, *args: Any, **kw: Any) -> Any:
+ width = kw.pop('width', None)
+ prec = kw.pop('prec', None)
+ m_sign = kw.pop('m_sign', None)
+ m_lead0 = kw.pop('m_lead0', 0)
+ exp = kw.pop('exp', None)
+ e_width = kw.pop('e_width', None)
+ e_sign = kw.pop('e_sign', None)
+ underscore = kw.pop('underscore', None)
+ anchor = kw.pop('anchor', None)
+ v = float.__new__(cls, *args, **kw)
+ v._width = width
+ v._prec = prec
+ v._m_sign = m_sign
+ v._m_lead0 = m_lead0
+ v._exp = exp
+ v._e_width = e_width
+ v._e_sign = e_sign
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a: Any) -> Any: # type: ignore
+ return float(self) + a
+ x = type(self)(self + a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __ifloordiv__(self, a: Any) -> Any: # type: ignore
+ return float(self) // a
+ x = type(self)(self // a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __imul__(self, a: Any) -> Any: # type: ignore
+ return float(self) * a
+ x = type(self)(self * a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ x._prec = self._prec # check for others
+ return x
+
+ def __ipow__(self, a: Any) -> Any: # type: ignore
+ return float(self) ** a
+ x = type(self)(self ** a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __isub__(self, a: Any) -> Any: # type: ignore
+ return float(self) - a
+ x = type(self)(self - a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ @property
+ def anchor(self) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any: bool = False) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ def dump(self, out: Any = sys.stdout) -> None:
+ out.write(
+ f'ScalarFloat({self}| w:{self._width}, p:{self._prec}, ' # type: ignore
+ f's:{self._m_sign}, lz:{self._m_lead0}, _:{self._underscore}|{self._exp}'
+ f', w:{self._e_width}, s:{self._e_sign})\n',
+ )
+
+
+class ExponentialFloat(ScalarFloat):
+ def __new__(cls, value: Any, width: Any = None, underscore: Any = None) -> Any:
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
+
+
+class ExponentialCapsFloat(ScalarFloat):
+ def __new__(cls, value: Any, width: Any = None, underscore: Any = None) -> Any:
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarint.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarint.py
new file mode 100644
index 0000000000..af798b71fd
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarint.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+from ruamel.yaml.anchor import Anchor
+
+from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt']
+
+
+class ScalarInt(int):
+ def __new__(cls: Any, *args: Any, **kw: Any) -> Any:
+ width = kw.pop('width', None)
+ underscore = kw.pop('underscore', None)
+ anchor = kw.pop('anchor', None)
+ v = int.__new__(cls, *args, **kw)
+ v._width = width
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a: Any) -> Any: # type: ignore
+ x = type(self)(self + a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ifloordiv__(self, a: Any) -> Any: # type: ignore
+ x = type(self)(self // a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __imul__(self, a: Any) -> Any: # type: ignore
+ x = type(self)(self * a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ipow__(self, a: Any) -> Any: # type: ignore
+ x = type(self)(self ** a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __isub__(self, a: Any) -> Any: # type: ignore
+ x = type(self)(self - a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ @property
+ def anchor(self) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any: bool = False) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class BinaryInt(ScalarInt):
+ def __new__(
+ cls, value: Any, width: Any = None, underscore: Any = None, anchor: Any = None,
+ ) -> Any:
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class OctalInt(ScalarInt):
+ def __new__(
+ cls, value: Any, width: Any = None, underscore: Any = None, anchor: Any = None,
+ ) -> Any:
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+# mixed casing of A-F is not supported, when loading the first non digit
+# determines the case
+
+
+class HexInt(ScalarInt):
+ """uses lower case (a-f)"""
+
+ def __new__(
+ cls, value: Any, width: Any = None, underscore: Any = None, anchor: Any = None,
+ ) -> Any:
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class HexCapsInt(ScalarInt):
+ """uses upper case (A-F)"""
+
+ def __new__(
+ cls, value: Any, width: Any = None, underscore: Any = None, anchor: Any = None,
+ ) -> Any:
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class DecimalInt(ScalarInt):
+ """needed if anchor"""
+
+ def __new__(
+ cls, value: Any, width: Any = None, underscore: Any = None, anchor: Any = None,
+ ) -> Any:
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarstring.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarstring.py
new file mode 100644
index 0000000000..30f4fde18b
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scalarstring.py
@@ -0,0 +1,140 @@
+# coding: utf-8
+
+from ruamel.yaml.anchor import Anchor
+
+from typing import Text, Any, Dict, List # NOQA
+from ruamel.yaml.compat import SupportsIndex
+
+__all__ = [
+ 'ScalarString',
+ 'LiteralScalarString',
+ 'FoldedScalarString',
+ 'SingleQuotedScalarString',
+ 'DoubleQuotedScalarString',
+ 'PlainScalarString',
+ # PreservedScalarString is the old name, as it was the first to be preserved on rt,
+ # use LiteralScalarString instead
+ 'PreservedScalarString',
+]
+
+
+class ScalarString(str):
+ __slots__ = Anchor.attrib
+
+ def __new__(cls, *args: Any, **kw: Any) -> Any:
+ anchor = kw.pop('anchor', None)
+ ret_val = str.__new__(cls, *args, **kw)
+ if anchor is not None:
+ ret_val.yaml_set_anchor(anchor, always_dump=True)
+ return ret_val
+
+ def replace(self, old: Any, new: Any, maxreplace: SupportsIndex = -1) -> Any:
+ return type(self)((str.replace(self, old, new, maxreplace)))
+
+ @property
+ def anchor(self) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any: bool = False) -> Any:
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class LiteralScalarString(ScalarString):
+ __slots__ = 'comment' # the comment after the | on the first line
+
+ style = '|'
+
+ def __new__(cls, value: Text, anchor: Any = None) -> Any:
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+PreservedScalarString = LiteralScalarString
+
+
+class FoldedScalarString(ScalarString):
+ __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line
+
+ style = '>'
+
+ def __new__(cls, value: Text, anchor: Any = None) -> Any:
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class SingleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = "'"
+
+ def __new__(cls, value: Text, anchor: Any = None) -> Any:
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class DoubleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = '"'
+
+ def __new__(cls, value: Text, anchor: Any = None) -> Any:
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class PlainScalarString(ScalarString):
+ __slots__ = ()
+
+ style = ''
+
+ def __new__(cls, value: Text, anchor: Any = None) -> Any:
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+def preserve_literal(s: Text) -> Text:
+ return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
+
+
+def walk_tree(base: Any, map: Any = None) -> None:
+ """
+ the routine here walks over a simple yaml tree (recursing in
+ dict values and list items) and converts strings that
+ have multiple lines to literal scalars
+
+ You can also provide an explicit (ordered) mapping for multiple transforms
+ (first of which is executed):
+ map = ruamel.yaml.compat.ordereddict
+ map['\n'] = preserve_literal
+ map[':'] = SingleQuotedScalarString
+ walk_tree(data, map=map)
+ """
+ from collections.abc import MutableMapping, MutableSequence
+
+ if map is None:
+ map = {'\n': preserve_literal}
+
+ if isinstance(base, MutableMapping):
+ for k in base:
+ v: Text = base[k]
+ if isinstance(v, str):
+ for ch in map:
+ if ch in v:
+ base[k] = map[ch](v)
+ break
+ else:
+ walk_tree(v, map=map)
+ elif isinstance(base, MutableSequence):
+ for idx, elem in enumerate(base):
+ if isinstance(elem, str):
+ for ch in map:
+ if ch in elem:
+ base[idx] = map[ch](elem)
+ break
+ else:
+ walk_tree(elem, map=map)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/scanner.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scanner.py
new file mode 100644
index 0000000000..11779f4774
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/scanner.py
@@ -0,0 +1,2359 @@
+# coding: utf-8
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# RoundTripScanner
+# COMMENT(value)
+#
+# Read comments in the Scanner code for more details.
+#
+
+import inspect
+from ruamel.yaml.error import MarkedYAMLError, CommentMark # NOQA
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.compat import check_anchorname_char, nprint, nprintf # NOQA
+
+from typing import Any, Dict, Optional, List, Union, Text # NOQA
+from ruamel.yaml.compat import VersionType # NOQA
+
+__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError']
+
+
+_THE_END = '\n\0\r\x85\u2028\u2029'
+_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029'
+_SPACE_TAB = ' \t'
+
+
+def xprintf(*args: Any, **kw: Any) -> Any:
+ return nprintf(*args, **kw)
+ pass
+
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(
+ self, token_number: Any, required: Any, index: int, line: int, column: int, mark: Any,
+ ) -> None:
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+
+class Scanner:
+ def __init__(self, loader: Any = None) -> None:
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer
+
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_scanner', None) is None:
+ self.loader._scanner = self
+ self.reset_scanner()
+ self.first_time = False
+ self.yaml_version: Any = None
+
+ @property
+ def flow_level(self) -> int:
+ return len(self.flow_context)
+
+ def reset_scanner(self) -> None:
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context: List[Text] = []
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens: List[Any] = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents: List[int] = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys: Dict[Any, Any] = {}
+
+ @property
+ def reader(self) -> Any:
+ try:
+ return self._scanner_reader # type: ignore
+ except AttributeError:
+ if hasattr(self.loader, 'typ'):
+ self._scanner_reader = self.loader.reader
+ else:
+ self._scanner_reader = self.loader._reader
+ return self._scanner_reader
+
+ @property
+ def scanner_processing_version(self) -> Any: # prefix until un-composited
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver.processing_version
+ return self.loader.processing_version
+
+ # Public methods.
+
+ def check_token(self, *choices: Any) -> bool:
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self) -> Any:
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ return self.tokens[0]
+
+ def get_token(self) -> Any:
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self) -> bool:
+ if self.done:
+ return False
+ if len(self.tokens) == 0:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+ return False
+
+ def fetch_comment(self, comment: Any) -> None:
+ raise NotImplementedError
+
+ def fetch_more_tokens(self) -> Any:
+ # Eat whitespaces and comments until we reach the next token.
+ comment = self.scan_to_next_token()
+ if comment is not None: # never happens for base scanner
+ return self.fetch_comment(comment)
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.reader.column)
+
+ # Peek the next character.
+ ch = self.reader.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ # if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == "'":
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError(
+ 'while scanning for the next token',
+ None,
+ f'found character {ch!r} that cannot start any token',
+ self.reader.get_mark(),
+ )
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self) -> Any:
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self) -> None:
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.reader.line or self.reader.index - key.index > 1024:
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self) -> None:
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.reader.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken + len(self.tokens)
+ key = SimpleKey(
+ token_number,
+ required,
+ self.reader.index,
+ self.reader.line,
+ self.reader.column,
+ self.reader.get_mark(),
+ )
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self) -> None:
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column: Any) -> None:
+ # In flow context, tokens should respect indentation.
+ # Actually the condition should be `self.indent >= column` according to
+ # the spec. But this condition will prohibit intuitively correct
+ # constructions such as
+ # key : {
+ # }
+ # ####
+ # if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.reader.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if bool(self.flow_level):
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.reader.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column: int) -> bool:
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self) -> None:
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding))
+
+ def fetch_stream_end(self) -> None:
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self) -> None:
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self) -> None:
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self) -> None:
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass: Any) -> None:
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward(3)
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self) -> None:
+ self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[')
+
+ def fetch_flow_mapping_start(self) -> None:
+ self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{')
+
+ def fetch_flow_collection_start(self, TokenClass: Any, to_push: Text) -> None:
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+ # Increase the flow level.
+ self.flow_context.append(to_push)
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self) -> None:
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self) -> None:
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass: Any) -> None:
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Decrease the flow level.
+ try:
+ popped = self.flow_context.pop() # NOQA
+ except IndexError:
+ # We must not be in a list or object.
+ # Defer error handling to the parser.
+ pass
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self) -> None:
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Add FLOW-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self) -> None:
+ # Block context needs additional checks.
+ if not self.flow_level:
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None,
+ None,
+ 'sequence entries are not allowed here',
+ self.reader.get_mark(),
+ )
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self) -> None:
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None, None, 'mapping keys are not allowed here', self.reader.get_mark(),
+ )
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self) -> None:
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(
+ key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark),
+ )
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(
+ key.token_number - self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark),
+ )
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None,
+ None,
+ 'mapping values are not allowed here',
+ self.reader.get_mark(),
+ )
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self) -> None:
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self) -> None:
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self) -> None:
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self) -> None:
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self) -> None:
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style: Any) -> None:
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self) -> None:
+ self.fetch_flow_scalar(style="'")
+
+ def fetch_double(self) -> None:
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style: Any) -> None:
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self) -> None:
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self) -> Any:
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.reader.column == 0:
+ return True
+ return None
+
+ def check_document_start(self) -> Any:
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_document_end(self) -> Any:
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_block_entry(self) -> Any:
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_key(self) -> Any:
+ # KEY(flow context): '?'
+ if bool(self.flow_level):
+ return True
+ # KEY(block context): '?' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_value(self) -> Any:
+ # VALUE(flow context): ':'
+ if self.scanner_processing_version == (1, 1):
+ if bool(self.flow_level):
+ return True
+ else:
+ if bool(self.flow_level):
+ if self.flow_context[-1] == '[':
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ elif self.tokens and isinstance(self.tokens[-1], ValueToken):
+ # mapping flow context scanning a value token
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ return True
+ # VALUE(block context): ':' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_plain(self) -> Any:
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ srp = self.reader.peek
+ ch = srp()
+ if self.scanner_processing_version == (1, 1):
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or (
+ srp(1) not in _THE_END_SPACE_TAB
+ and (ch == '-' or (not self.flow_level and ch in '?:'))
+ )
+ # YAML 1.2
+ if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`':
+ # ################### ^ ???
+ return True
+ ch1 = srp(1)
+ if ch == '-' and ch1 not in _THE_END_SPACE_TAB:
+ return True
+ if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB:
+ return True
+
+ return srp(1) not in _THE_END_SPACE_TAB and (
+ ch == '-' or (not self.flow_level and ch in '?:')
+ )
+
+ # Scanners.
+
+ def scan_to_next_token(self) -> Any:
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ _the_end = _THE_END
+ white_space = ' \t' if self.flow_level > 0 else ' '
+ while not found:
+ while srp() in white_space:
+ srf()
+ if srp() == '#':
+ while srp() not in _the_end:
+ srf()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+ return None
+
+ def scan_directive(self) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ start_mark = self.reader.get_mark()
+ srf()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ else:
+ end_mark = self.reader.get_mark()
+ while srp() not in _THE_END:
+ srf()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ length = 0
+ srp = self.reader.peek
+ ch = srp(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.':
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f'expected alphabetic or numeric character, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f'expected alphabetic or numeric character, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_yaml_directive_value(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ major = self.scan_yaml_directive_number(start_mark)
+ if srp() != '.':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f"expected a digit or '.', but found {srp()!r}",
+ self.reader.get_mark(),
+ )
+ srf()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if srp() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f"expected a digit or '.', but found {srp()!r}",
+ self.reader.get_mark(),
+ )
+ self.yaml_version = (major, minor)
+ return self.yaml_version
+
+ def scan_yaml_directive_number(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ ch = srp()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f'expected a digit, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ length = 0
+ while '0' <= srp(length) <= '9':
+ length += 1
+ value = int(self.reader.prefix(length))
+ srf(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while srp() == ' ':
+ srf()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.reader.peek()
+ if ch != ' ':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f"expected ' ', but found {ch!r}",
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.reader.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f"expected ' ', but found {ch!r}",
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_directive_ignored_line(self, start_mark: Any) -> None:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ if srp() == '#':
+ while srp() not in _THE_END:
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ f'expected a comment or a line break, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass: Any) -> Any:
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ indicator = srp()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.reader.forward()
+ length = 0
+ ch = srp(length)
+ # while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ # or ch in '-_':
+ while check_anchorname_char(ch):
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ f'while scanning an {name!s}',
+ start_mark,
+ f'expected alphabetic or numeric character, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ # ch1 = ch
+ # ch = srp() # no need to peek, ch is already set
+ # assert ch1 == ch
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`':
+ raise ScannerError(
+ f'while scanning an {name!s}',
+ start_mark,
+ f'expected alphabetic or numeric character, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ end_mark = self.reader.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ ch = srp(1)
+ short_handle = '!'
+ if ch == '!':
+ short_handle = '!!'
+ self.reader.forward()
+ srp = self.reader.peek
+ ch = srp(1)
+
+ if ch == '<':
+ handle = None
+ self.reader.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if srp() != '>':
+ raise ScannerError(
+ 'while parsing a tag',
+ start_mark,
+ f"expected '>' but found {srp()!r}",
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in _THE_END_SPACE_TAB:
+ handle = None
+ suffix = short_handle
+ self.reader.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = srp(length)
+ handle = short_handle
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = short_handle
+ self.reader.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a tag',
+ start_mark,
+ f"expected ' ', but found {ch!r}",
+ self.reader.get_mark(),
+ )
+ value = (handle, suffix)
+ end_mark = self.reader.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style: Any, rt: Optional[bool] = False) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks: List[Any] = []
+ start_mark = self.reader.get_mark()
+
+ # Scan the header.
+ self.reader.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ # block scalar comment e.g. : |+ # comment text
+ block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent + 1
+ if increment is None:
+ # no increment and top level, min_indent could be 0
+ if min_indent < 1 and (
+ style not in '|>'
+ or (self.scanner_processing_version == (1, 1))
+ and getattr(
+ self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False,
+ )
+ ):
+ min_indent = 1
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ if min_indent < 1:
+ min_indent = 1
+ indent = min_indent + increment - 1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ""
+
+ # Scan the inner part of the block scalar.
+ while self.reader.column == indent and srp() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = srp() not in ' \t'
+ length = 0
+ while srp(length) not in _THE_END:
+ length += 1
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if style in '|>' and min_indent == 0:
+ # at the beginning of a line, if in block style see if
+ # end of document/start_new_document
+ if self.check_document_start() or self.check_document_end():
+ break
+ if self.reader.column == indent and srp() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if rt and folded and line_break == '\n':
+ chunks.append('\a')
+ if folded and line_break == '\n' and leading_non_space and srp() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ # if folded and line_break == '\n':
+ # if not breaks:
+ # if srp() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ # else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Process trailing line breaks. The 'chomping' setting determines
+ # whether they are included in the value.
+ trailing: List[Any] = []
+ if chomping in [None, True]:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+ elif chomping in [None, False]:
+ trailing.extend(breaks)
+
+ # We are done.
+ token = ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', False)
+ if comment_handler is None:
+ if block_scalar_comment is not None:
+ token.add_pre_comments([block_scalar_comment])
+ if len(trailing) > 0:
+ # Eat whitespaces and comments until we reach the next token.
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', None)
+ if comment_handler is not None:
+ line = end_mark.line - len(trailing)
+ for x in trailing:
+ assert x[-1] == '\n'
+ self.comments.add_blank_line(x, 0, line) # type: ignore
+ line += 1
+ comment = self.scan_to_next_token()
+ while comment:
+ trailing.append(' ' * comment[1].column + comment[0])
+ comment = self.scan_to_next_token()
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', False)
+ if comment_handler is None:
+ # Keep track of the trailing whitespace and following comments
+ # as a comment token, if isn't all included in the actual value.
+ comment_end_mark = self.reader.get_mark()
+ comment = CommentToken("".join(trailing), end_mark, comment_end_mark)
+ token.add_post_comment(comment)
+ return token
+
+ def scan_block_scalar_indicators(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ chomping = None
+ increment = None
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ f'expected chomping or indentation indicators, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ prefix = ''
+ comment = None
+ while srp() == ' ':
+ prefix += srp()
+ srf()
+ if srp() == '#':
+ comment = prefix
+ while srp() not in _THE_END:
+ comment += srp()
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ f'expected a comment or a line break, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+ return comment
+
+ def scan_block_scalar_indentation(self) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ first_indent = -1
+ max_indent = 0
+ end_mark = self.reader.get_mark()
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() != ' ':
+ if first_indent < 0:
+ first_indent = self.reader.column
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ else:
+ srf()
+ if self.reader.column > max_indent:
+ max_indent = self.reader.column
+ if first_indent > 0 and max_indent > first_indent:
+ start_mark = self.reader.get_mark()
+ raise ScannerError(
+ 'more indented follow up line than first in a block scalar', start_mark,
+ )
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent: int) -> Any:
+ # See the specification for details.
+ chunks = []
+ srp = self.reader.peek
+ srf = self.reader.forward
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ while srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style: Any) -> Any:
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ srp = self.reader.peek
+ chunks: List[Any] = []
+ start_mark = self.reader.get_mark()
+ quote = srp()
+ self.reader.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while srp() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ return ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '"': '"',
+ '/': '/', # as per http://www.json.org/
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8}
+
+ def scan_flow_scalar_non_spaces(self, double: Any, start_mark: Any) -> Any:
+ # See the specification for details.
+ chunks: List[Any] = []
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ length = 0
+ while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029':
+ length += 1
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ ch = srp()
+ if not double and ch == "'" and srp(1) == "'":
+ chunks.append("'")
+ srf(2)
+ elif (double and ch == "'") or (not double and ch in '"\\'):
+ chunks.append(ch)
+ srf()
+ elif double and ch == '\\':
+ srf()
+ ch = srp()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ srf()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ srf()
+ for k in range(length):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ f'expected escape sequence of {length:d} '
+ f'hexdecimal numbers, but found {srp(k)!r}',
+ self.reader.get_mark(),
+ )
+ code = int(self.reader.prefix(length), 16)
+ chunks.append(chr(code))
+ srf(length)
+ elif ch in '\n\r\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ f'found unknown escape character {ch!r}',
+ self.reader.get_mark(),
+ )
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double: Any, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ while srp(length) in ' \t':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch == '\0':
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected end of stream',
+ self.reader.get_mark(),
+ )
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double: Any, start_mark: Any) -> Any:
+ # See the specification for details.
+ chunks: List[Any] = []
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected document separator',
+ self.reader.get_mark(),
+ )
+ while srp() in ' \t':
+ srf()
+ if srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self) -> Any:
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ': ' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks: List[Any] = []
+ start_mark = self.reader.get_mark()
+ end_mark = start_mark
+ indent = self.indent + 1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ # if indent == 0:
+ # indent = 1
+ spaces: List[Any] = []
+ while True:
+ length = 0
+ if srp() == '#':
+ break
+ while True:
+ ch = srp(length)
+ if False and ch == ':' and srp(length + 1) == ',':
+ break
+ elif ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB:
+ pass
+ elif ch == '?' and self.scanner_processing_version != (1, 1):
+ pass
+ elif (
+ ch in _THE_END_SPACE_TAB
+ or (
+ not self.flow_level
+ and ch == ':'
+ and srp(length + 1) in _THE_END_SPACE_TAB
+ )
+ or (self.flow_level and ch in ',:?[]{}')
+ ):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (
+ self.flow_level
+ and ch == ':'
+ and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'
+ ):
+ srf(length)
+ raise ScannerError(
+ 'while scanning a plain scalar',
+ start_mark,
+ "found unexpected ':'",
+ self.reader.get_mark(),
+ 'Please check '
+ 'http://pyyaml.org/wiki/YAMLColonInFlowContext '
+ 'for details.',
+ )
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ end_mark = self.reader.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if (
+ not spaces
+ or srp() == '#'
+ or (not self.flow_level and self.reader.column < indent)
+ ):
+ break
+
+ token = ScalarToken("".join(chunks), True, start_mark, end_mark)
+ # getattr provides True so C type loader, which cannot handle comment,
+ # will not make CommentToken
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', False)
+ if comment_handler is None:
+ if spaces and spaces[0] == '\n':
+ # Create a comment token to preserve the trailing line breaks.
+ comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark)
+ token.add_post_comment(comment)
+ elif comment_handler is not False:
+ line = start_mark.line + 1
+ for ch in spaces:
+ if ch == '\n':
+ self.comments.add_blank_line('\n', 0, line) # type: ignore
+ line += 1
+
+ return token
+
+ def scan_plain_spaces(self, indent: Any, start_mark: Any) -> Any:
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ length = 0
+ while srp(length) in ' ':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ breaks = []
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() == ' ':
+ srf()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name: Any, start_mark: Any) -> Any:
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ srp = self.reader.peek
+ ch = srp()
+ if ch != '!':
+ raise ScannerError(
+ f'while scanning an {name!s}',
+ start_mark,
+ f"expected '!', but found {ch!r}",
+ self.reader.get_mark(),
+ )
+ length = 1
+ ch = srp(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_':
+ length += 1
+ ch = srp(length)
+ if ch != '!':
+ self.reader.forward(length)
+ raise ScannerError(
+ f'while scanning an {name!s}',
+ start_mark,
+ f"expected '!' but found {ch!r}",
+ self.reader.get_mark(),
+ )
+ length += 1
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ return value
+
+ def scan_tag_uri(self, name: Any, start_mark: Any) -> Any:
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ ch = srp(length)
+ while (
+ '0' <= ch <= '9'
+ or 'A' <= ch <= 'Z'
+ or 'a' <= ch <= 'z'
+ or ch in "-;/?:@&=+$,_.!~*'()[]%"
+ or ((self.scanner_processing_version > (1, 1)) and ch == '#')
+ ):
+ if ch == '%':
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = srp(length)
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError(
+ f'while parsing an {name!s}',
+ start_mark,
+ f'expected URI, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ return "".join(chunks)
+
+ def scan_uri_escapes(self, name: Any, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ code_bytes: List[Any] = []
+ mark = self.reader.get_mark()
+ while srp() == '%':
+ srf()
+ for k in range(2):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ f'while scanning an {name!s}',
+ start_mark,
+ f'expected URI escape sequence of 2 hexdecimal numbers, '
+ f'but found {srp(k)!r}',
+ self.reader.get_mark(),
+ )
+ code_bytes.append(int(self.reader.prefix(2), 16))
+ srf(2)
+ try:
+ value = bytes(code_bytes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError(f'while scanning an {name!s}', start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self) -> Any:
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.reader.peek()
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ return ""
+
+
+class RoundTripScanner(Scanner):
+ def check_token(self, *choices: Any) -> bool:
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if len(self.tokens) > 0:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self) -> Any:
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if len(self.tokens) > 0:
+ return self.tokens[0]
+ return None
+
+ def _gather_comments(self) -> Any:
+ """combine multiple comment lines and assign to next non-comment-token"""
+ comments: List[Any] = []
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ comment = self.tokens.pop(0)
+ self.tokens_taken += 1
+ comments.append(comment)
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ self.tokens_taken += 1
+ comment = self.tokens.pop(0)
+ # nprint('dropping2', comment)
+ comments.append(comment)
+ if len(comments) >= 1:
+ self.tokens[0].add_pre_comments(comments)
+ # pull in post comment on e.g. ':'
+ if not self.done and len(self.tokens) < 2:
+ self.fetch_more_tokens()
+
+ def get_token(self) -> Any:
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if len(self.tokens) > 0:
+ # nprint('tk', self.tokens)
+ # only add post comment to single line tokens:
+ # scalar, value token. FlowXEndToken, otherwise
+ # hidden streamtokens could get them (leave them and they will be
+ # pre comments for the next map/seq
+ if (
+ len(self.tokens) > 1
+ and isinstance(
+ self.tokens[0],
+ (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken),
+ )
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens[0].add_post_comment(c)
+ elif (
+ len(self.tokens) > 1
+ and isinstance(self.tokens[0], ScalarToken)
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ c.value = (
+ '\n' * (c.start_mark.line - self.tokens[0].end_mark.line)
+ + (' ' * c.start_mark.column)
+ + c.value
+ )
+ self.tokens[0].add_post_comment(c)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+ return None
+
+ def fetch_comment(self, comment: Any) -> None:
+ value, start_mark, end_mark = comment
+ while value and value[-1] == ' ':
+ # empty line within indented key context
+ # no need to update end-mark, that is not used
+ value = value[:-1]
+ self.tokens.append(CommentToken(value, start_mark, end_mark))
+
+ # scanner
+
+ def scan_to_next_token(self) -> Any:
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ white_space = ' \t' if self.flow_level > 0 else ' '
+ while not found:
+ while srp() in white_space:
+ srf()
+ ch = srp()
+ if ch == '#':
+ start_mark = self.reader.get_mark()
+ comment = ch
+ srf()
+ while ch not in _THE_END:
+ ch = srp()
+ if ch == '\0': # don't gobble the end-of-stream character
+ # but add an explicit newline as "YAML processors should terminate
+ # the stream with an explicit line break
+ # https://yaml.org/spec/1.2/spec.html#id2780069
+ comment += '\n'
+ break
+ comment += ch
+ srf()
+ # gather any blank lines following the comment
+ ch = self.scan_line_break()
+ while len(ch) > 0:
+ comment += ch
+ ch = self.scan_line_break()
+ end_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ return comment, start_mark, end_mark
+ if self.scan_line_break() != '':
+ start_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ ch = srp()
+ if ch == '\n': # empty toplevel lines
+ start_mark = self.reader.get_mark()
+ comment = ""
+ while ch:
+ ch = self.scan_line_break(empty_line=True)
+ comment += ch
+ if srp() == '#':
+ # empty line followed by indented real comment
+ comment = comment.rsplit('\n', 1)[0] + '\n'
+ end_mark = self.reader.get_mark()
+ return comment, start_mark, end_mark
+ else:
+ found = True
+ return None
+
+ def scan_line_break(self, empty_line: bool = False) -> Text:
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch: Text = self.reader.peek()
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ elif empty_line and ch in '\t ':
+ self.reader.forward()
+ return ch
+ return ""
+
+ def scan_block_scalar(self, style: Any, rt: Optional[bool] = True) -> Any:
+ return Scanner.scan_block_scalar(self, style, rt=rt)
+
+ def scan_uri_escapes(self, name: Any, start_mark: Any) -> Any:
+ """
+ The roundtripscanner doesn't do URI escaping
+ """
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ code_bytes: List[Any] = []
+ chunk = ''
+ mark = self.reader.get_mark()
+ while srp() == '%':
+ chunk += '%'
+ srf()
+ for k in range(2):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ f'while scanning an {name!s}',
+ start_mark,
+ f'expected URI escape sequence of 2 hexdecimal numbers, '
+ f'but found {srp(k)!r}',
+ self.reader.get_mark(),
+ )
+ code_bytes.append(int(self.reader.prefix(2), 16))
+ chunk += self.reader.prefix(2)
+ srf(2)
+ try:
+ _ = bytes(code_bytes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError(f'while scanning an {name!s}', start_mark, str(exc), mark)
+ return chunk
+
+
+# commenthandling 2021, differentiatiation not needed
+
+VALUECMNT = 0
+KEYCMNT = 0 # 1
+# TAGCMNT = 2
+# ANCHORCMNT = 3
+
+
+class CommentBase:
+ __slots__ = ('value', 'line', 'column', 'used', 'function', 'fline', 'ufun', 'uline')
+
+ def __init__(self, value: Any, line: Any, column: Any) -> None:
+ self.value = value
+ self.line = line
+ self.column = column
+ self.used = ' '
+ info = inspect.getframeinfo(inspect.stack()[3][0])
+ self.function = info.function
+ self.fline = info.lineno
+ self.ufun = None
+ self.uline = None
+
+ def set_used(self, v: Any = '+') -> None:
+ self.used = v
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ self.ufun = info.function # type: ignore
+ self.uline = info.lineno # type: ignore
+
+ def set_assigned(self) -> None:
+ self.used = '|'
+
+ def __str__(self) -> str:
+ return f'{self.value}'
+
+ def __repr__(self) -> str:
+ return f'{self.value!r}'
+
+ def info(self) -> str:
+ xv = self.value + '"'
+ name = self.name # type: ignore
+ return (
+ f'{name}{self.used} {self.line:2}:{self.column:<2} "{xv:40s} '
+ f'{self.function}:{self.fline} {self.ufun}:{self.uline}'
+ )
+
+
+class EOLComment(CommentBase):
+ name = 'EOLC'
+
+ def __init__(self, value: Any, line: Any, column: Any) -> None:
+ super().__init__(value, line, column)
+
+
+class FullLineComment(CommentBase):
+ name = 'FULL'
+
+ def __init__(self, value: Any, line: Any, column: Any) -> None:
+ super().__init__(value, line, column)
+
+
+class BlankLineComment(CommentBase):
+ name = 'BLNK'
+
+ def __init__(self, value: Any, line: Any, column: Any) -> None:
+ super().__init__(value, line, column)
+
+
+class ScannedComments:
+ def __init__(self: Any) -> None:
+ self.comments = {} # type: ignore
+ self.unused = [] # type: ignore
+
+ def add_eol_comment(self, comment: Any, column: Any, line: Any) -> Any:
+ # info = inspect.getframeinfo(inspect.stack()[1][0])
+ if comment.count('\n') == 1:
+ assert comment[-1] == '\n'
+ else:
+ assert '\n' not in comment
+ self.comments[line] = retval = EOLComment(comment[:-1], line, column)
+ self.unused.append(line)
+ return retval
+
+ def add_blank_line(self, comment: Any, column: Any, line: Any) -> Any:
+ # info = inspect.getframeinfo(inspect.stack()[1][0])
+ assert comment.count('\n') == 1 and comment[-1] == '\n'
+ assert line not in self.comments
+ self.comments[line] = retval = BlankLineComment(comment[:-1], line, column)
+ self.unused.append(line)
+ return retval
+
+ def add_full_line_comment(self, comment: Any, column: Any, line: Any) -> Any:
+ # info = inspect.getframeinfo(inspect.stack()[1][0])
+ assert comment.count('\n') == 1 and comment[-1] == '\n'
+ # if comment.startswith('# C12'):
+ # raise
+ # this raises in line 2127 fro 330
+ self.comments[line] = retval = FullLineComment(comment[:-1], line, column)
+ self.unused.append(line)
+ return retval
+
+ def __getitem__(self, idx: Any) -> Any:
+ return self.comments[idx]
+
+ def __str__(self) -> Any:
+ return (
+ 'ParsedComments:\n '
+ + '\n '.join((f'{lineno:2} {x.info()}' for lineno, x in self.comments.items()))
+ + '\n'
+ )
+
+ def last(self) -> str:
+ lineno, x = list(self.comments.items())[-1]
+ return f'{lineno:2} {x.info()}\n'
+
+ def any_unprocessed(self) -> bool:
+ # ToDo: might want to differentiate based on lineno
+ return len(self.unused) > 0
+ # for lno, comment in reversed(self.comments.items()):
+ # if comment.used == ' ':
+ # return True
+ # return False
+
+ def unprocessed(self, use: Any = False) -> Any:
+ while len(self.unused) > 0:
+ first = self.unused.pop(0) if use else self.unused[0]
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ xprintf('using', first, self.comments[first].value, info.function, info.lineno)
+ yield first, self.comments[first]
+ if use:
+ self.comments[first].set_used()
+
+ def assign_pre(self, token: Any) -> Any:
+ token_line = token.start_mark.line
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ xprintf('assign_pre', token_line, self.unused, info.function, info.lineno)
+ gobbled = False
+ while self.unused and self.unused[0] < token_line:
+ gobbled = True
+ first = self.unused.pop(0)
+ xprintf('assign_pre < ', first)
+ self.comments[first].set_used()
+ token.add_comment_pre(first)
+ return gobbled
+
+ def assign_eol(self, tokens: Any) -> Any:
+ try:
+ comment_line = self.unused[0]
+ except IndexError:
+ return
+ if not isinstance(self.comments[comment_line], EOLComment):
+ return
+ idx = 1
+ while tokens[-idx].start_mark.line > comment_line or isinstance(
+ tokens[-idx], ValueToken,
+ ):
+ idx += 1
+ xprintf('idx1', idx)
+ if (
+ len(tokens) > idx
+ and isinstance(tokens[-idx], ScalarToken)
+ and isinstance(tokens[-(idx + 1)], ScalarToken)
+ ):
+ return
+ try:
+ if isinstance(tokens[-idx], ScalarToken) and isinstance(
+ tokens[-(idx + 1)], KeyToken,
+ ):
+ try:
+ eol_idx = self.unused.pop(0)
+ self.comments[eol_idx].set_used()
+ xprintf('>>>>>a', idx, eol_idx, KEYCMNT)
+ tokens[-idx].add_comment_eol(eol_idx, KEYCMNT)
+ except IndexError:
+ raise NotImplementedError
+ return
+ except IndexError:
+ xprintf('IndexError1')
+ pass
+ try:
+ if isinstance(tokens[-idx], ScalarToken) and isinstance(
+ tokens[-(idx + 1)], (ValueToken, BlockEntryToken),
+ ):
+ try:
+ eol_idx = self.unused.pop(0)
+ self.comments[eol_idx].set_used()
+ tokens[-idx].add_comment_eol(eol_idx, VALUECMNT)
+ except IndexError:
+ raise NotImplementedError
+ return
+ except IndexError:
+ xprintf('IndexError2')
+ pass
+ for t in tokens:
+ xprintf('tt-', t)
+ xprintf('not implemented EOL', type(tokens[-idx]))
+ import sys
+
+ sys.exit(0)
+
+ def assign_post(self, token: Any) -> Any:
+ token_line = token.start_mark.line
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ xprintf('assign_post', token_line, self.unused, info.function, info.lineno)
+ gobbled = False
+ while self.unused and self.unused[0] < token_line:
+ gobbled = True
+ first = self.unused.pop(0)
+ xprintf('assign_post < ', first)
+ self.comments[first].set_used()
+ token.add_comment_post(first)
+ return gobbled
+
+ def str_unprocessed(self) -> Any:
+ return ''.join(
+ (f' {ind:2} {x.info()}\n' for ind, x in self.comments.items() if x.used == ' '),
+ )
+
+
+class RoundTripScannerSC(Scanner): # RoundTripScanner Split Comments
+ def __init__(self, *arg: Any, **kw: Any) -> None:
+ super().__init__(*arg, **kw)
+ assert self.loader is not None
+ # comments isinitialised on .need_more_tokens and persist on
+ # self.loader.parsed_comments
+ self.comments = None
+
+ def get_token(self) -> Any:
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ if isinstance(self.tokens[0], BlockEndToken):
+ self.comments.assign_post(self.tokens[0]) # type: ignore
+ else:
+ self.comments.assign_pre(self.tokens[0]) # type: ignore
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ def need_more_tokens(self) -> bool:
+ if self.comments is None:
+ self.loader.parsed_comments = self.comments = ScannedComments() # type: ignore
+ if self.done:
+ return False
+ if len(self.tokens) == 0:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+ if len(self.tokens) < 2:
+ return True
+ if self.tokens[0].start_mark.line == self.tokens[-1].start_mark.line:
+ return True
+ if True:
+ xprintf('-x--', len(self.tokens))
+ for t in self.tokens:
+ xprintf(t)
+ # xprintf(self.comments.last())
+ xprintf(self.comments.str_unprocessed()) # type: ignore
+ self.comments.assign_pre(self.tokens[0]) # type: ignore
+ self.comments.assign_eol(self.tokens) # type: ignore
+ return False
+
+ def scan_to_next_token(self) -> None:
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ start_mark = self.reader.get_mark()
+ # xprintf('current_mark', start_mark.line, start_mark.column)
+ found = False
+ while not found:
+ while srp() == ' ':
+ srf()
+ ch = srp()
+ if ch == '#':
+ comment_start_mark = self.reader.get_mark()
+ comment = ch
+ srf() # skipt the '#'
+ while ch not in _THE_END:
+ ch = srp()
+ if ch == '\0': # don't gobble the end-of-stream character
+ # but add an explicit newline as "YAML processors should terminate
+ # the stream with an explicit line break
+ # https://yaml.org/spec/1.2/spec.html#id2780069
+ comment += '\n'
+ break
+ comment += ch
+ srf()
+ # we have a comment
+ if start_mark.column == 0:
+ self.comments.add_full_line_comment( # type: ignore
+ comment, comment_start_mark.column, comment_start_mark.line,
+ )
+ else:
+ self.comments.add_eol_comment( # type: ignore
+ comment, comment_start_mark.column, comment_start_mark.line,
+ )
+ comment = ""
+ # gather any blank lines or full line comments following the comment as well
+ self.scan_empty_or_full_line_comments()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ return
+ if bool(self.scan_line_break()):
+ # start_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ self.scan_empty_or_full_line_comments()
+ return None
+ ch = srp()
+ if ch == '\n': # empty toplevel lines
+ start_mark = self.reader.get_mark()
+ comment = ""
+ while ch:
+ ch = self.scan_line_break(empty_line=True)
+ comment += ch
+ if srp() == '#':
+ # empty line followed by indented real comment
+ comment = comment.rsplit('\n', 1)[0] + '\n'
+ _ = self.reader.get_mark() # gobble end_mark
+ return None
+ else:
+ found = True
+ return None
+
+ def scan_empty_or_full_line_comments(self) -> None:
+ blmark = self.reader.get_mark()
+ assert blmark.column == 0
+ blanks = ""
+ comment = None
+ mark = None
+ ch = self.reader.peek()
+ while True:
+ # nprint('ch', repr(ch), self.reader.get_mark().column)
+ if ch in '\r\n\x85\u2028\u2029':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ if comment is not None:
+ comment += '\n'
+ self.comments.add_full_line_comment(comment, mark.column, mark.line)
+ comment = None
+ else:
+ blanks += '\n'
+ self.comments.add_blank_line(blanks, blmark.column, blmark.line) # type: ignore # NOQA
+ blanks = ""
+ blmark = self.reader.get_mark()
+ ch = self.reader.peek()
+ continue
+ if comment is None:
+ if ch in ' \t':
+ blanks += ch
+ elif ch == '#':
+ mark = self.reader.get_mark()
+ comment = '#'
+ else:
+ # xprintf('breaking on', repr(ch))
+ break
+ else:
+ comment += ch
+ self.reader.forward()
+ ch = self.reader.peek()
+
+ def scan_block_scalar_ignored_line(self, start_mark: Any) -> Any:
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ prefix = ''
+ comment = None
+ while srp() == ' ':
+ prefix += srp()
+ srf()
+ if srp() == '#':
+ comment = ''
+ mark = self.reader.get_mark()
+ while srp() not in _THE_END:
+ comment += srp()
+ srf()
+ comment += '\n' # type: ignore
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ f'expected a comment or a line break, but found {ch!r}',
+ self.reader.get_mark(),
+ )
+ if comment is not None:
+ self.comments.add_eol_comment(comment, mark.column, mark.line) # type: ignore
+ self.scan_line_break()
+ return None
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/serializer.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/serializer.py
new file mode 100644
index 0000000000..1ac46d25fb
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/serializer.py
@@ -0,0 +1,231 @@
+# coding: utf-8
+
+from ruamel.yaml.error import YAMLError
+from ruamel.yaml.compat import nprint, DBG_NODE, dbg, nprintf # NOQA
+from ruamel.yaml.util import RegExp
+
+from ruamel.yaml.events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+ DocumentStartEvent,
+ DocumentEndEvent,
+)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
+
+from typing import Any, Dict, Union, Text, Optional # NOQA
+from ruamel.yaml.compat import VersionType # NOQA
+
+__all__ = ['Serializer', 'SerializerError']
+
+
+class SerializerError(YAMLError):
+ pass
+
+
+class Serializer:
+
+ # 'id' and 3+ numbers, but not 000
+ ANCHOR_TEMPLATE = 'id{:03d}'
+ ANCHOR_RE = RegExp('id(?!000$)\\d{3,}')
+
+ def __init__(
+ self,
+ encoding: Any = None,
+ explicit_start: Optional[bool] = None,
+ explicit_end: Optional[bool] = None,
+ version: Optional[VersionType] = None,
+ tags: Any = None,
+ dumper: Any = None,
+ ) -> None:
+ # NOQA
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._serializer = self
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ if isinstance(version, str):
+ self.use_version = tuple(map(int, version.split('.')))
+ else:
+ self.use_version = version # type: ignore
+ self.use_tags = tags
+ self.serialized_nodes: Dict[Any, Any] = {}
+ self.anchors: Dict[Any, Any] = {}
+ self.last_anchor_id = 0
+ self.closed: Optional[bool] = None
+ self._templated_id = None
+
+ @property
+ def emitter(self) -> Any:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.emitter
+ return self.dumper._emitter
+
+ @property
+ def resolver(self) -> Any:
+ if hasattr(self.dumper, 'typ'):
+ self.dumper.resolver
+ return self.dumper._resolver
+
+ def open(self) -> None:
+ if self.closed is None:
+ self.emitter.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ else:
+ raise SerializerError('serializer is already opened')
+
+ def close(self) -> None:
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif not self.closed:
+ self.emitter.emit(StreamEndEvent())
+ self.closed = True
+
+ # def __del__(self):
+ # self.close()
+
+ def serialize(self, node: Any) -> None:
+ if dbg(DBG_NODE):
+ nprint('Serializing nodes')
+ node.dump()
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ self.emitter.emit(
+ DocumentStartEvent(
+ explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags,
+ ),
+ )
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node: Any) -> None:
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ anchor = None
+ try:
+ if node.anchor.always_dump:
+ anchor = node.anchor.value
+ except: # NOQA
+ pass
+ self.anchors[node] = anchor
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node: Any) -> Any:
+ try:
+ anchor = node.anchor.value
+ except: # NOQA
+ anchor = None
+ if anchor is None:
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE.format(self.last_anchor_id)
+ return anchor
+
+ def serialize_node(self, node: Any, parent: Any, index: Any) -> None:
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ node_style = getattr(node, 'style', None)
+ if node_style != '?':
+ node_style = None
+ self.emitter.emit(AliasEvent(alias, style=node_style))
+ else:
+ self.serialized_nodes[node] = True
+ self.resolver.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ # here check if the node.tag equals the one that would result from parsing
+ # if not equal quoting is necessary for strings
+ detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True))
+ implicit = (
+ (node.ctag == detected_tag),
+ (node.ctag == default_tag),
+ node.tag.startswith('tag:yaml.org,2002:'), # type: ignore
+ )
+ self.emitter.emit(
+ ScalarEvent(
+ alias,
+ node.ctag,
+ implicit,
+ node.value,
+ style=node.style,
+ comment=node.comment,
+ ),
+ )
+ elif isinstance(node, SequenceNode):
+ implicit = node.ctag == self.resolver.resolve(SequenceNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ seq_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ seq_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ else:
+ end_comment = None
+ self.emitter.emit(
+ SequenceStartEvent(
+ alias,
+ node.ctag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ ),
+ )
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
+ elif isinstance(node, MappingNode):
+ implicit = node.ctag == self.resolver.resolve(MappingNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ map_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ map_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ self.emitter.emit(
+ MappingStartEvent(
+ alias,
+ node.ctag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ nr_items=len(node.value),
+ ),
+ )
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment]))
+ self.resolver.ascend_resolver()
+
+
+def templated_id(s: Text) -> Any:
+ return Serializer.ANCHOR_RE.match(s)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/tag.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/tag.py
new file mode 100644
index 0000000000..7ad23fec01
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/tag.py
@@ -0,0 +1,124 @@
+# coding: utf-8
+
+"""
+In round-trip mode the original tag needs to be preserved, but the tag
+transformed based on the directives needs to be available as well.
+
+A Tag that is created during loading has a handle and a suffix.
+Not all objects loaded currently have a Tag, that .tag attribute can be None
+A Tag that is created for dumping only (on an object loaded without a tag) has a suffix
+only.
+"""
+
+from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+tag_attrib = '_yaml_tag'
+
+
+class Tag:
+ """store original tag information for roundtripping"""
+
+ attrib = tag_attrib
+
+ def __init__(self, handle: Any = None, suffix: Any = None, handles: Any = None) -> None:
+ self.handle = handle
+ self.suffix = suffix
+ self.handles = handles
+ self._transform_type: Optional[bool] = None
+
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}({self.trval!r})'
+
+ def __str__(self) -> str:
+ return f'{self.trval}'
+
+ def __hash__(self) -> int:
+ try:
+ return self._hash_id # type: ignore
+ except AttributeError:
+ self._hash_id = res = hash((self.handle, self.suffix))
+ return res
+
+ def __eq__(self, other: Any) -> bool:
+ # other should not be a string, but the serializer sometimes provides these
+ if isinstance(other, str):
+ return self.trval == other
+ return bool(self.trval == other.trval)
+
+ def startswith(self, x: str) -> bool:
+ if self.trval is not None:
+ return self.trval.startswith(x)
+ return False
+
+ @property
+ def trval(self) -> Optional[str]:
+ try:
+ return self._trval
+ except AttributeError:
+ pass
+ if self.handle is None:
+ self._trval: Optional[str] = self.uri_decoded_suffix
+ return self._trval
+ assert self._transform_type is not None
+ if not self._transform_type:
+ # the non-round-trip case
+ self._trval = self.handles[self.handle] + self.uri_decoded_suffix
+ return self._trval
+ # round-trip case
+ if self.handle == '!!' and self.suffix in (
+ 'null',
+ 'bool',
+ 'int',
+ 'float',
+ 'binary',
+ 'timestamp',
+ 'omap',
+ 'pairs',
+ 'set',
+ 'str',
+ 'seq',
+ 'map',
+ ):
+ self._trval = self.handles[self.handle] + self.uri_decoded_suffix
+ else:
+ # self._trval = self.handle + self.suffix
+ self._trval = self.handles[self.handle] + self.uri_decoded_suffix
+ return self._trval
+
+ value = trval
+
+ @property
+ def uri_decoded_suffix(self) -> Optional[str]:
+ try:
+ return self._uri_decoded_suffix
+ except AttributeError:
+ pass
+ if self.suffix is None:
+ self._uri_decoded_suffix: Optional[str] = None
+ return None
+ res = ''
+ # don't have to check for scanner errors here
+ idx = 0
+ while idx < len(self.suffix):
+ ch = self.suffix[idx]
+ idx += 1
+ if ch != '%':
+ res += ch
+ else:
+ res += chr(int(self.suffix[idx : idx + 2], 16))
+ idx += 2
+ self._uri_decoded_suffix = res
+ return res
+
+ def select_transform(self, val: bool) -> None:
+ """
+ val: False -> non-round-trip
+ True -> round-trip
+ """
+ assert self._transform_type is None
+ self._transform_type = val
+
+ def check_handle(self) -> bool:
+ if self.handle is None:
+ return False
+ return self.handle not in self.handles
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/timestamp.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/timestamp.py
new file mode 100644
index 0000000000..753dfc1ab4
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/timestamp.py
@@ -0,0 +1,58 @@
+# coding: utf-8
+
+import datetime
+import copy
+
+# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object
+# a more complete datetime might be used by safe loading as well
+#
+# add type information (iso8601, spaced)
+
+from typing import Any, Dict, Optional, List # NOQA
+
+
+class TimeStamp(datetime.datetime):
+ def __init__(self, *args: Any, **kw: Any) -> None:
+ self._yaml: Dict[Any, Any] = dict(t=False, tz=None, delta=0)
+
+ def __new__(cls, *args: Any, **kw: Any) -> Any: # datetime is immutable
+ return datetime.datetime.__new__(cls, *args, **kw)
+
+ def __deepcopy__(self, memo: Any) -> Any:
+ ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second)
+ ts._yaml = copy.deepcopy(self._yaml)
+ return ts
+
+ def replace(
+ self,
+ year: Any = None,
+ month: Any = None,
+ day: Any = None,
+ hour: Any = None,
+ minute: Any = None,
+ second: Any = None,
+ microsecond: Any = None,
+ tzinfo: Any = True,
+ fold: Any = None,
+ ) -> Any:
+ if year is None:
+ year = self.year
+ if month is None:
+ month = self.month
+ if day is None:
+ day = self.day
+ if hour is None:
+ hour = self.hour
+ if minute is None:
+ minute = self.minute
+ if second is None:
+ second = self.second
+ if microsecond is None:
+ microsecond = self.microsecond
+ if tzinfo is True:
+ tzinfo = self.tzinfo
+ if fold is None:
+ fold = self.fold
+ ts = type(self)(year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold)
+ ts._yaml = copy.deepcopy(self._yaml)
+ return ts
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/tokens.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/tokens.py
new file mode 100644
index 0000000000..0c73dcfaed
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/tokens.py
@@ -0,0 +1,379 @@
+# coding: utf-8
+
+from ruamel.yaml.compat import nprintf # NOQA
+
+from typing import Text, Any, Dict, Optional, List # NOQA
+from .error import StreamMark # NOQA
+
+SHOW_LINES = True
+
+
+class Token:
+ __slots__ = 'start_mark', 'end_mark', '_comment'
+
+ def __init__(self, start_mark: StreamMark, end_mark: StreamMark) -> None:
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+ def __repr__(self) -> Any:
+ # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and
+ # hasattr('self', key)]
+ attributes = [key for key in self.__slots__ if not key.endswith('_mark')]
+ attributes.sort()
+ # arguments = ', '.join(
+ # [f'{key!s}={getattr(self, key)!r})' for key in attributes]
+ # )
+ arguments = [f'{key!s}={getattr(self, key)!r}' for key in attributes]
+ if SHOW_LINES:
+ try:
+ arguments.append('line: ' + str(self.start_mark.line))
+ except: # NOQA
+ pass
+ try:
+ arguments.append('comment: ' + str(self._comment))
+ except: # NOQA
+ pass
+ return f'{self.__class__.__name__}({", ".join(arguments)})'
+
+ @property
+ def column(self) -> int:
+ return self.start_mark.column
+
+ @column.setter
+ def column(self, pos: Any) -> None:
+ self.start_mark.column = pos
+
+ # old style ( <= 0.17) is a TWO element list with first being the EOL
+ # comment concatenated with following FLC/BLNK; and second being a list of FLC/BLNK
+ # preceding the token
+ # new style ( >= 0.17 ) is a THREE element list with the first being a list of
+ # preceding FLC/BLNK, the second EOL and the third following FLC/BLNK
+ # note that new style has differing order, and does not consist of CommentToken(s)
+ # but of CommentInfo instances
+ # any non-assigned values in new style are None, but first and last can be empty list
+ # new style routines add one comment at a time
+
+ # going to be deprecated in favour of add_comment_eol/post
+ def add_post_comment(self, comment: Any) -> None:
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ else:
+ assert len(self._comment) in [2, 5] # make sure it is version 0
+ # if isinstance(comment, CommentToken):
+ # if comment.value.startswith('# C09'):
+ # raise
+ self._comment[0] = comment
+
+ # going to be deprecated in favour of add_comment_pre
+ def add_pre_comments(self, comments: Any) -> None:
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ else:
+ assert len(self._comment) == 2 # make sure it is version 0
+ assert self._comment[1] is None
+ self._comment[1] = comments
+ return
+
+ # new style
+ def add_comment_pre(self, comment: Any) -> None:
+ if not hasattr(self, '_comment'):
+ self._comment = [[], None, None] # type: ignore
+ else:
+ assert len(self._comment) == 3
+ if self._comment[0] is None:
+ self._comment[0] = [] # type: ignore
+ self._comment[0].append(comment) # type: ignore
+
+ def add_comment_eol(self, comment: Any, comment_type: Any) -> None:
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None, None]
+ else:
+ assert len(self._comment) == 3
+ assert self._comment[1] is None
+ if self.comment[1] is None:
+ self._comment[1] = [] # type: ignore
+ self._comment[1].extend([None] * (comment_type + 1 - len(self.comment[1]))) # type: ignore # NOQA
+ # nprintf('commy', self.comment, comment_type)
+ self._comment[1][comment_type] = comment # type: ignore
+
+ def add_comment_post(self, comment: Any) -> None:
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None, []] # type: ignore
+ else:
+ assert len(self._comment) == 3
+ if self._comment[2] is None:
+ self._comment[2] = [] # type: ignore
+ self._comment[2].append(comment) # type: ignore
+
+ # def get_comment(self) -> Any:
+ # return getattr(self, '_comment', None)
+
+ @property
+ def comment(self) -> Any:
+ return getattr(self, '_comment', None)
+
+ def move_old_comment(self, target: Any, empty: bool = False) -> Any:
+ """move a comment from this token to target (normally next token)
+ used to combine e.g. comments before a BlockEntryToken to the
+ ScalarToken that follows it
+ empty is a special for empty values -> comment after key
+ """
+ c = self.comment
+ if c is None:
+ return
+ # don't push beyond last element
+ if isinstance(target, (StreamEndToken, DocumentStartToken)):
+ return
+ delattr(self, '_comment')
+ tc = target.comment
+ if not tc: # target comment, just insert
+ # special for empty value in key: value issue 25
+ if empty:
+ c = [c[0], c[1], None, None, c[0]]
+ target._comment = c
+ # nprint('mco2:', self, target, target.comment, empty)
+ return self
+ if c[0] and tc[0] or c[1] and tc[1]:
+ raise NotImplementedError(f'overlap in comment {c!r} {tc!r}')
+ if c[0]:
+ tc[0] = c[0]
+ if c[1]:
+ tc[1] = c[1]
+ return self
+
+ def split_old_comment(self) -> Any:
+ """ split the post part of a comment, and return it
+ as comment to be added. Delete second part if [None, None]
+ abc: # this goes to sequence
+ # this goes to first element
+ - first element
+ """
+ comment = self.comment
+ if comment is None or comment[0] is None:
+ return None # nothing to do
+ ret_val = [comment[0], None]
+ if comment[1] is None:
+ delattr(self, '_comment')
+ return ret_val
+
+ def move_new_comment(self, target: Any, empty: bool = False) -> Any:
+ """move a comment from this token to target (normally next token)
+ used to combine e.g. comments before a BlockEntryToken to the
+ ScalarToken that follows it
+ empty is a special for empty values -> comment after key
+ """
+ c = self.comment
+ if c is None:
+ return
+ # don't push beyond last element
+ if isinstance(target, (StreamEndToken, DocumentStartToken)):
+ return
+ delattr(self, '_comment')
+ tc = target.comment
+ if not tc: # target comment, just insert
+ # special for empty value in key: value issue 25
+ if empty:
+ c = [c[0], c[1], c[2]]
+ target._comment = c
+ # nprint('mco2:', self, target, target.comment, empty)
+ return self
+ # if self and target have both pre, eol or post comments, something seems wrong
+ for idx in range(3):
+ if c[idx] is not None and tc[idx] is not None:
+ raise NotImplementedError(f'overlap in comment {c!r} {tc!r}')
+ # move the comment parts
+ for idx in range(3):
+ if c[idx]:
+ tc[idx] = c[idx]
+ return self
+
+
+# class BOMToken(Token):
+# id = '<byte order mark>'
+
+
+class DirectiveToken(Token):
+ __slots__ = 'name', 'value'
+ id = '<directive>'
+
+ def __init__(self, name: Any, value: Any, start_mark: Any, end_mark: Any) -> None:
+ Token.__init__(self, start_mark, end_mark)
+ self.name = name
+ self.value = value
+
+
+class DocumentStartToken(Token):
+ __slots__ = ()
+ id = '<document start>'
+
+
+class DocumentEndToken(Token):
+ __slots__ = ()
+ id = '<document end>'
+
+
+class StreamStartToken(Token):
+ __slots__ = ('encoding',)
+ id = '<stream start>'
+
+ def __init__(
+ self, start_mark: Any = None, end_mark: Any = None, encoding: Any = None,
+ ) -> None:
+ Token.__init__(self, start_mark, end_mark)
+ self.encoding = encoding
+
+
+class StreamEndToken(Token):
+ __slots__ = ()
+ id = '<stream end>'
+
+
+class BlockSequenceStartToken(Token):
+ __slots__ = ()
+ id = '<block sequence start>'
+
+
+class BlockMappingStartToken(Token):
+ __slots__ = ()
+ id = '<block mapping start>'
+
+
+class BlockEndToken(Token):
+ __slots__ = ()
+ id = '<block end>'
+
+
+class FlowSequenceStartToken(Token):
+ __slots__ = ()
+ id = '['
+
+
+class FlowMappingStartToken(Token):
+ __slots__ = ()
+ id = '{'
+
+
+class FlowSequenceEndToken(Token):
+ __slots__ = ()
+ id = ']'
+
+
+class FlowMappingEndToken(Token):
+ __slots__ = ()
+ id = '}'
+
+
+class KeyToken(Token):
+ __slots__ = ()
+ id = '?'
+
+# def x__repr__(self):
+# return f'KeyToken({self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0]})'
+
+
+class ValueToken(Token):
+ __slots__ = ()
+ id = ':'
+
+
+class BlockEntryToken(Token):
+ __slots__ = ()
+ id = '-'
+
+
+class FlowEntryToken(Token):
+ __slots__ = ()
+ id = ','
+
+
+class AliasToken(Token):
+ __slots__ = ('value',)
+ id = '<alias>'
+
+ def __init__(self, value: Any, start_mark: Any, end_mark: Any) -> None:
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class AnchorToken(Token):
+ __slots__ = ('value',)
+ id = '<anchor>'
+
+ def __init__(self, value: Any, start_mark: Any, end_mark: Any) -> None:
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class TagToken(Token):
+ __slots__ = ('value',)
+ id = '<tag>'
+
+ def __init__(self, value: Any, start_mark: Any, end_mark: Any) -> None:
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class ScalarToken(Token):
+ __slots__ = 'value', 'plain', 'style'
+ id = '<scalar>'
+
+ def __init__(
+ self, value: Any, plain: Any, start_mark: Any, end_mark: Any, style: Any = None,
+ ) -> None:
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+ self.plain = plain
+ self.style = style
+
+
+class CommentToken(Token):
+ __slots__ = '_value', '_column', 'pre_done'
+ id = '<comment>'
+
+ def __init__(
+ self, value: Any, start_mark: Any = None, end_mark: Any = None, column: Any = None,
+ ) -> None:
+ if start_mark is None:
+ assert column is not None
+ self._column = column
+ Token.__init__(self, start_mark, None) # type: ignore
+ self._value = value
+
+ @property
+ def value(self) -> str:
+ if isinstance(self._value, str):
+ return self._value
+ return "".join(self._value)
+
+ @value.setter
+ def value(self, val: Any) -> None:
+ self._value = val
+
+ def reset(self) -> None:
+ if hasattr(self, 'pre_done'):
+ delattr(self, 'pre_done')
+
+ def __repr__(self) -> Any:
+ v = f'{self.value!r}'
+ if SHOW_LINES:
+ try:
+ v += ', line: ' + str(self.start_mark.line)
+ except: # NOQA
+ pass
+ try:
+ v += ', col: ' + str(self.start_mark.column)
+ except: # NOQA
+ pass
+ return f'CommentToken({v})'
+
+ def __eq__(self, other: Any) -> bool:
+ if self.start_mark != other.start_mark:
+ return False
+ if self.end_mark != other.end_mark:
+ return False
+ if self.value != other.value:
+ return False
+ return True
+
+ def __ne__(self, other: Any) -> bool:
+ return not self.__eq__(other)
diff --git a/contrib/python/ruamel.yaml/py3/ruamel/yaml/util.py b/contrib/python/ruamel.yaml/py3/ruamel/yaml/util.py
new file mode 100644
index 0000000000..b621ce0758
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ruamel/yaml/util.py
@@ -0,0 +1,257 @@
+# coding: utf-8
+
+"""
+some helper functions that might be generally useful
+"""
+
+import datetime
+from functools import partial
+import re
+
+
+from typing import Any, Dict, Optional, List, Text, Callable, Union # NOQA
+from .compat import StreamTextType # NOQA
+
+
+class LazyEval:
+ """
+ Lightweight wrapper around lazily evaluated func(*args, **kwargs).
+
+ func is only evaluated when any attribute of its return value is accessed.
+ Every attribute access is passed through to the wrapped value.
+ (This only excludes special cases like method-wrappers, e.g., __hash__.)
+ The sole additional attribute is the lazy_self function which holds the
+ return value (or, prior to evaluation, func and arguments), in its closure.
+ """
+
+ def __init__(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:
+ def lazy_self() -> Any:
+ return_value = func(*args, **kwargs)
+ object.__setattr__(self, 'lazy_self', lambda: return_value)
+ return return_value
+
+ object.__setattr__(self, 'lazy_self', lazy_self)
+
+ def __getattribute__(self, name: str) -> Any:
+ lazy_self = object.__getattribute__(self, 'lazy_self')
+ if name == 'lazy_self':
+ return lazy_self
+ return getattr(lazy_self(), name)
+
+ def __setattr__(self, name: str, value: Any) -> None:
+ setattr(self.lazy_self(), name, value)
+
+
+RegExp = partial(LazyEval, re.compile)
+
+timestamp_regexp = RegExp(
+ """^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:((?P<t>[Tt])|[ \\t]+) # explictly not retaining extra spaces
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\\.(?P<fraction>[0-9]*))?
+ (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$""",
+ re.X,
+)
+
+
+def create_timestamp(
+ year: Any,
+ month: Any,
+ day: Any,
+ t: Any,
+ hour: Any,
+ minute: Any,
+ second: Any,
+ fraction: Any,
+ tz: Any,
+ tz_sign: Any,
+ tz_hour: Any,
+ tz_minute: Any,
+) -> Union[datetime.datetime, datetime.date]:
+ # create a timestamp from match against timestamp_regexp
+ MAX_FRAC = 999999
+ year = int(year)
+ month = int(month)
+ day = int(day)
+ if not hour:
+ return datetime.date(year, month, day)
+ hour = int(hour)
+ minute = int(minute)
+ second = int(second)
+ frac = 0
+ if fraction:
+ frac_s = fraction[:6]
+ while len(frac_s) < 6:
+ frac_s += '0'
+ frac = int(frac_s)
+ if len(fraction) > 6 and int(fraction[6]) > 4:
+ frac += 1
+ if frac > MAX_FRAC:
+ fraction = 0
+ else:
+ fraction = frac
+ else:
+ fraction = 0
+ delta = None
+ if tz_sign:
+ tz_hour = int(tz_hour)
+ tz_minute = int(tz_minute) if tz_minute else 0
+ delta = datetime.timedelta(
+ hours=tz_hour, minutes=tz_minute, seconds=1 if frac > MAX_FRAC else 0,
+ )
+ if tz_sign == '-':
+ delta = -delta
+ elif frac > MAX_FRAC:
+ delta = -datetime.timedelta(seconds=1)
+ # should do something else instead (or hook this up to the preceding if statement
+ # in reverse
+ # if delta is None:
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction)
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ # datetime.timezone.utc)
+ # the above is not good enough though, should provide tzinfo. In Python3 that is easily
+ # doable drop that kind of support for Python2 as it has not native tzinfo
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+
+# originally as comment
+# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605
+# if you use this in your code, I suggest adding a test in your test suite
+# that check this routines output against a known piece of your YAML
+# before upgrades to this code break your round-tripped YAML
+def load_yaml_guess_indent(stream: StreamTextType, **kw: Any) -> Any:
+ """guess the indent and block sequence indent of yaml stream/string
+
+ returns round_trip_loaded stream, indent level, block sequence indent
+ - block sequence indent is the number of spaces before a dash relative to previous indent
+ - if there are no block sequences, indent is taken from nested mappings, block sequence
+ indent is unset (None) in that case
+ """
+ from .main import YAML
+
+ # load a YAML document, guess the indentation, if you use TABs you are on your own
+ def leading_spaces(line: Any) -> int:
+ idx = 0
+ while idx < len(line) and line[idx] == ' ':
+ idx += 1
+ return idx
+
+ if isinstance(stream, str):
+ yaml_str: Any = stream
+ elif isinstance(stream, bytes):
+ # most likely, but the Reader checks BOM for this
+ yaml_str = stream.decode('utf-8')
+ else:
+ yaml_str = stream.read()
+ map_indent = None
+ indent = None # default if not found for some reason
+ block_seq_indent = None
+ prev_line_key_only = None
+ key_indent = 0
+ for line in yaml_str.splitlines():
+ rline = line.rstrip()
+ lline = rline.lstrip()
+ if lline.startswith('- '):
+ l_s = leading_spaces(line)
+ block_seq_indent = l_s - key_indent
+ idx = l_s + 1
+ while line[idx] == ' ': # this will end as we rstripped
+ idx += 1
+ if line[idx] == '#': # comment after -
+ continue
+ indent = idx - key_indent
+ break
+ if map_indent is None and prev_line_key_only is not None and rline:
+ idx = 0
+ while line[idx] in ' -':
+ idx += 1
+ if idx > prev_line_key_only:
+ map_indent = idx - prev_line_key_only
+ if rline.endswith(':'):
+ key_indent = leading_spaces(line)
+ idx = 0
+ while line[idx] == ' ': # this will end on ':'
+ idx += 1
+ prev_line_key_only = idx
+ continue
+ prev_line_key_only = None
+ if indent is None and map_indent is not None:
+ indent = map_indent
+ yaml = YAML()
+ return yaml.load(yaml_str, **kw), indent, block_seq_indent
+
+
+def configobj_walker(cfg: Any) -> Any:
+ """
+ walks over a ConfigObj (INI file with comments) generating
+ corresponding YAML output (including comments
+ """
+ from configobj import ConfigObj # type: ignore
+
+ assert isinstance(cfg, ConfigObj)
+ for c in cfg.initial_comment:
+ if c.strip():
+ yield c
+ for s in _walk_section(cfg):
+ if s.strip():
+ yield s
+ for c in cfg.final_comment:
+ if c.strip():
+ yield c
+
+
+def _walk_section(s: Any, level: int = 0) -> Any:
+ from configobj import Section
+
+ assert isinstance(s, Section)
+ indent = ' ' * level
+ for name in s.scalars:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ x = s[name]
+ if '\n' in x:
+ i = indent + ' '
+ x = '|\n' + i + x.strip().replace('\n', '\n' + i)
+ elif ':' in x:
+ x = "'" + x.replace("'", "''") + "'"
+ line = f'{indent}{name}: {x}'
+ c = s.inline_comments[name]
+ if c:
+ line += ' ' + c
+ yield line
+ for name in s.sections:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ line = f'{indent}{name}:'
+ c = s.inline_comments[name]
+ if c:
+ line += ' ' + c
+ yield line
+ for val in _walk_section(s[name], level=level + 1):
+ yield val
+
+
+# def config_obj_2_rt_yaml(cfg):
+# from .comments import CommentedMap, CommentedSeq
+# from configobj import ConfigObj
+# assert isinstance(cfg, ConfigObj)
+# #for c in cfg.initial_comment:
+# # if c.strip():
+# # pass
+# cm = CommentedMap()
+# for name in s.sections:
+# cm[name] = d = CommentedMap()
+#
+#
+# #for c in cfg.final_comment:
+# # if c.strip():
+# # yield c
+# return cm
diff --git a/contrib/python/ruamel.yaml/py3/ya.make b/contrib/python/ruamel.yaml/py3/ya.make
new file mode 100644
index 0000000000..85fdafc9a6
--- /dev/null
+++ b/contrib/python/ruamel.yaml/py3/ya.make
@@ -0,0 +1,55 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(0.17.40)
+
+LICENSE(MIT)
+
+PEERDIR(
+ contrib/python/ruamel.yaml.clib
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ ruamel/yaml/__init__.py
+ ruamel/yaml/anchor.py
+ ruamel/yaml/comments.py
+ ruamel/yaml/compat.py
+ ruamel/yaml/composer.py
+ ruamel/yaml/configobjwalker.py
+ ruamel/yaml/constructor.py
+ ruamel/yaml/cyaml.py
+ ruamel/yaml/dumper.py
+ ruamel/yaml/emitter.py
+ ruamel/yaml/error.py
+ ruamel/yaml/events.py
+ ruamel/yaml/loader.py
+ ruamel/yaml/main.py
+ ruamel/yaml/nodes.py
+ ruamel/yaml/parser.py
+ ruamel/yaml/reader.py
+ ruamel/yaml/representer.py
+ ruamel/yaml/resolver.py
+ ruamel/yaml/scalarbool.py
+ ruamel/yaml/scalarfloat.py
+ ruamel/yaml/scalarint.py
+ ruamel/yaml/scalarstring.py
+ ruamel/yaml/scanner.py
+ ruamel/yaml/serializer.py
+ ruamel/yaml/tag.py
+ ruamel/yaml/timestamp.py
+ ruamel/yaml/tokens.py
+ ruamel/yaml/util.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/ruamel.yaml/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+ ruamel/yaml/py.typed
+)
+
+END()
diff --git a/contrib/python/ruamel.yaml/ya.make b/contrib/python/ruamel.yaml/ya.make
new file mode 100644
index 0000000000..97670cf32f
--- /dev/null
+++ b/contrib/python/ruamel.yaml/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/ruamel.yaml/py2)
+ELSE()
+ PEERDIR(contrib/python/ruamel.yaml/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/tenacity/py2/.dist-info/METADATA b/contrib/python/tenacity/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..80edeb2ac1
--- /dev/null
+++ b/contrib/python/tenacity/py2/.dist-info/METADATA
@@ -0,0 +1,33 @@
+Metadata-Version: 2.1
+Name: tenacity
+Version: 7.0.0
+Summary: Retry code until it succeeds
+Home-page: https://github.com/jd/tenacity
+Author: Julien Danjou
+Author-email: julien@danjou.info
+License: Apache 2.0
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Utilities
+Requires-Dist: six (>=1.9.0)
+Requires-Dist: futures (>=3.0) ; python_version == "2.7"
+Requires-Dist: monotonic (>=0.6) ; python_version == "2.7"
+Requires-Dist: typing (>=3.7.4.1) ; python_version == "2.7"
+Provides-Extra: doc
+Requires-Dist: reno ; extra == 'doc'
+Requires-Dist: sphinx ; extra == 'doc'
+Requires-Dist: tornado (>=4.5) ; extra == 'doc'
+
+Tenacity is a general-purpose retrying library to simplify the task of adding retry behavior to just about anything.
+
+
diff --git a/contrib/python/tenacity/py2/.dist-info/top_level.txt b/contrib/python/tenacity/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..48f8e12ee7
--- /dev/null
+++ b/contrib/python/tenacity/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+tenacity
diff --git a/contrib/python/tenacity/py2/LICENSE b/contrib/python/tenacity/py2/LICENSE
new file mode 100644
index 0000000000..7a4a3ea242
--- /dev/null
+++ b/contrib/python/tenacity/py2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. \ No newline at end of file
diff --git a/contrib/python/tenacity/py2/README.rst b/contrib/python/tenacity/py2/README.rst
new file mode 100644
index 0000000000..2f025ef147
--- /dev/null
+++ b/contrib/python/tenacity/py2/README.rst
@@ -0,0 +1,599 @@
+Tenacity
+========
+.. image:: https://img.shields.io/pypi/v/tenacity.svg
+ :target: https://pypi.python.org/pypi/tenacity
+
+.. image:: https://circleci.com/gh/jd/tenacity.svg?style=svg
+ :target: https://circleci.com/gh/jd/tenacity
+
+.. image:: https://img.shields.io/endpoint.svg?url=https://dashboard.mergify.io/badges/jd/tenacity&style=flat
+ :target: https://mergify.io
+ :alt: Mergify Status
+
+Tenacity is an Apache 2.0 licensed general-purpose retrying library, written in
+Python, to simplify the task of adding retry behavior to just about anything.
+It originates from `a fork of retrying
+<https://github.com/rholder/retrying/issues/65>`_ which is sadly no longer
+`maintained <https://julien.danjou.info/python-tenacity/>`_. Tenacity isn't
+api compatible with retrying but adds significant new functionality and
+fixes a number of longstanding bugs.
+
+The simplest use case is retrying a flaky function whenever an `Exception`
+occurs until a value is returned.
+
+.. testcode::
+
+ import random
+ from tenacity import retry
+
+ @retry
+ def do_something_unreliable():
+ if random.randint(0, 10) > 1:
+ raise IOError("Broken sauce, everything is hosed!!!111one")
+ else:
+ return "Awesome sauce!"
+
+ print(do_something_unreliable())
+
+.. testoutput::
+ :hide:
+
+ Awesome sauce!
+
+
+.. toctree::
+ :hidden:
+ :maxdepth: 2
+
+ changelog
+ api
+
+
+Features
+--------
+
+- Generic Decorator API
+- Specify stop condition (i.e. limit by number of attempts)
+- Specify wait condition (i.e. exponential backoff sleeping between attempts)
+- Customize retrying on Exceptions
+- Customize retrying on expected returned result
+- Retry on coroutines
+- Retry code block with context manager
+
+
+Installation
+------------
+
+To install *tenacity*, simply:
+
+.. code-block:: bash
+
+ $ pip install tenacity
+
+
+Examples
+----------
+
+Basic Retry
+~~~~~~~~~~~
+
+.. testsetup:: *
+
+ import logging
+ #
+ # Note the following import is used for demonstration convenience only.
+ # Production code should always explicitly import the names it needs.
+ #
+ from tenacity import *
+
+ class MyException(Exception):
+ pass
+
+As you saw above, the default behavior is to retry forever without waiting when
+an exception is raised.
+
+.. testcode::
+
+ @retry
+ def never_give_up_never_surrender():
+ print("Retry forever ignoring Exceptions, don't wait between retries")
+ raise Exception
+
+Stopping
+~~~~~~~~
+
+Let's be a little less persistent and set some boundaries, such as the number
+of attempts before giving up.
+
+.. testcode::
+
+ @retry(stop=stop_after_attempt(7))
+ def stop_after_7_attempts():
+ print("Stopping after 7 attempts")
+ raise Exception
+
+We don't have all day, so let's set a boundary for how long we should be
+retrying stuff.
+
+.. testcode::
+
+ @retry(stop=stop_after_delay(10))
+ def stop_after_10_s():
+ print("Stopping after 10 seconds")
+ raise Exception
+
+You can combine several stop conditions by using the `|` operator:
+
+.. testcode::
+
+ @retry(stop=(stop_after_delay(10) | stop_after_attempt(5)))
+ def stop_after_10_s_or_5_retries():
+ print("Stopping after 10 seconds or 5 retries")
+ raise Exception
+
+Waiting before retrying
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Most things don't like to be polled as fast as possible, so let's just wait 2
+seconds between retries.
+
+.. testcode::
+
+ @retry(wait=wait_fixed(2))
+ def wait_2_s():
+ print("Wait 2 second between retries")
+ raise Exception
+
+Some things perform best with a bit of randomness injected.
+
+.. testcode::
+
+ @retry(wait=wait_random(min=1, max=2))
+ def wait_random_1_to_2_s():
+ print("Randomly wait 1 to 2 seconds between retries")
+ raise Exception
+
+Then again, it's hard to beat exponential backoff when retrying distributed
+services and other remote endpoints.
+
+.. testcode::
+
+ @retry(wait=wait_exponential(multiplier=1, min=4, max=10))
+ def wait_exponential_1():
+ print("Wait 2^x * 1 second between each retry starting with 4 seconds, then up to 10 seconds, then 10 seconds afterwards")
+ raise Exception
+
+
+Then again, it's also hard to beat combining fixed waits and jitter (to
+help avoid thundering herds) when retrying distributed services and other
+remote endpoints.
+
+.. testcode::
+
+ @retry(wait=wait_fixed(3) + wait_random(0, 2))
+ def wait_fixed_jitter():
+ print("Wait at least 3 seconds, and add up to 2 seconds of random delay")
+ raise Exception
+
+When multiple processes are in contention for a shared resource, exponentially
+increasing jitter helps minimise collisions.
+
+.. testcode::
+
+ @retry(wait=wait_random_exponential(multiplier=1, max=60))
+ def wait_exponential_jitter():
+ print("Randomly wait up to 2^x * 1 seconds between each retry until the range reaches 60 seconds, then randomly up to 60 seconds afterwards")
+ raise Exception
+
+
+Sometimes it's necessary to build a chain of backoffs.
+
+.. testcode::
+
+ @retry(wait=wait_chain(*[wait_fixed(3) for i in range(3)] +
+ [wait_fixed(7) for i in range(2)] +
+ [wait_fixed(9)]))
+ def wait_fixed_chained():
+ print("Wait 3s for 3 attempts, 7s for the next 2 attempts and 9s for all attempts thereafter")
+ raise Exception
+
+Whether to retry
+~~~~~~~~~~~~~~~~
+
+We have a few options for dealing with retries that raise specific or general
+exceptions, as in the cases here.
+
+.. testcode::
+
+ @retry(retry=retry_if_exception_type(IOError))
+ def might_io_error():
+ print("Retry forever with no wait if an IOError occurs, raise any other errors")
+ raise Exception
+
+We can also use the result of the function to alter the behavior of retrying.
+
+.. testcode::
+
+ def is_none_p(value):
+ """Return True if value is None"""
+ return value is None
+
+ @retry(retry=retry_if_result(is_none_p))
+ def might_return_none():
+ print("Retry with no wait if return value is None")
+
+We can also combine several conditions:
+
+.. testcode::
+
+ def is_none_p(value):
+ """Return True if value is None"""
+ return value is None
+
+ @retry(retry=(retry_if_result(is_none_p) | retry_if_exception_type()))
+ def might_return_none():
+ print("Retry forever ignoring Exceptions with no wait if return value is None")
+
+Any combination of stop, wait, etc. is also supported to give you the freedom
+to mix and match.
+
+It's also possible to retry explicitly at any time by raising the `TryAgain`
+exception:
+
+.. testcode::
+
+ @retry
+ def do_something():
+ result = something_else()
+ if result == 23:
+ raise TryAgain
+
+Error Handling
+~~~~~~~~~~~~~~
+
+While callables that "timeout" retrying raise a `RetryError` by default,
+we can reraise the last attempt's exception if needed:
+
+.. testcode::
+
+ @retry(reraise=True, stop=stop_after_attempt(3))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception()
+ except MyException:
+ # timed out retrying
+ pass
+
+Before and After Retry, and Logging
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It's possible to execute an action before any attempt of calling the function
+by using the before callback function:
+
+.. testcode::
+
+ import logging
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ @retry(stop=stop_after_attempt(3), before=before_log(logger, logging.DEBUG))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+In the same spirit, It's possible to execute after a call that failed:
+
+.. testcode::
+
+ import logging
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ @retry(stop=stop_after_attempt(3), after=after_log(logger, logging.DEBUG))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+It's also possible to only log failures that are going to be retried. Normally
+retries happen after a wait interval, so the keyword argument is called
+``before_sleep``:
+
+.. testcode::
+
+ import logging
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ @retry(stop=stop_after_attempt(3),
+ before_sleep=before_sleep_log(logger, logging.DEBUG))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+
+Statistics
+~~~~~~~~~~
+
+You can access the statistics about the retry made over a function by using the
+`retry` attribute attached to the function and its `statistics` attribute:
+
+.. testcode::
+
+ @retry(stop=stop_after_attempt(3))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception()
+ except Exception:
+ pass
+
+ print(raise_my_exception.retry.statistics)
+
+.. testoutput::
+ :hide:
+
+ ...
+
+Custom Callbacks
+~~~~~~~~~~~~~~~~
+
+You can also define your own callbacks. The callback should accept one
+parameter called ``retry_state`` that contains all information about current
+retry invocation.
+
+For example, you can call a custom callback function after all retries failed,
+without raising an exception (or you can re-raise or do anything really)
+
+.. testcode::
+
+ def return_last_value(retry_state):
+ """return the result of the last call attempt"""
+ return retry_state.outcome.result()
+
+ def is_false(value):
+ """Return True if value is False"""
+ return value is False
+
+ # will return False after trying 3 times to get a different result
+ @retry(stop=stop_after_attempt(3),
+ retry_error_callback=return_last_value,
+ retry=retry_if_result(is_false))
+ def eventually_return_false():
+ return False
+
+RetryCallState
+~~~~~~~~~~~~~~
+
+``retry_state`` argument is an object of `RetryCallState` class:
+
+.. autoclass:: tenacity.RetryCallState
+
+ Constant attributes:
+
+ .. autoattribute:: start_time(float)
+ :annotation:
+
+ .. autoattribute:: retry_object(BaseRetrying)
+ :annotation:
+
+ .. autoattribute:: fn(callable)
+ :annotation:
+
+ .. autoattribute:: args(tuple)
+ :annotation:
+
+ .. autoattribute:: kwargs(dict)
+ :annotation:
+
+ Variable attributes:
+
+ .. autoattribute:: attempt_number(int)
+ :annotation:
+
+ .. autoattribute:: outcome(tenacity.Future or None)
+ :annotation:
+
+ .. autoattribute:: outcome_timestamp(float or None)
+ :annotation:
+
+ .. autoattribute:: idle_for(float)
+ :annotation:
+
+ .. autoattribute:: next_action(tenacity.RetryAction or None)
+ :annotation:
+
+Other Custom Callbacks
+~~~~~~~~~~~~~~~~~~~~~~
+
+It's also possible to define custom callbacks for other keyword arguments.
+
+.. function:: my_stop(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+ :return: whether or not retrying should stop
+ :rtype: bool
+
+.. function:: my_wait(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+ :return: number of seconds to wait before next retry
+ :rtype: float
+
+.. function:: my_retry(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+ :return: whether or not retrying should continue
+ :rtype: bool
+
+.. function:: my_before(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+
+.. function:: my_after(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+
+.. function:: my_before_sleep(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+
+Here's an example with a custom ``before_sleep`` function:
+
+.. testcode::
+
+ import logging
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ def my_before_sleep(retry_state):
+ if retry_state.attempt_number < 1:
+ loglevel = logging.INFO
+ else:
+ loglevel = logging.WARNING
+ logger.log(
+ loglevel, 'Retrying %s: attempt %s ended with: %s',
+ retry_state.fn, retry_state.attempt_number, retry_state.outcome)
+
+ @retry(stop=stop_after_attempt(3), before_sleep=my_before_sleep)
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception()
+ except RetryError:
+ pass
+
+
+Changing Arguments at Run Time
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can change the arguments of a retry decorator as needed when calling it by
+using the `retry_with` function attached to the wrapped function:
+
+.. testcode::
+
+ @retry(stop=stop_after_attempt(3))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception.retry_with(stop=stop_after_attempt(4))()
+ except Exception:
+ pass
+
+ print(raise_my_exception.retry.statistics)
+
+.. testoutput::
+ :hide:
+
+ ...
+
+If you want to use variables to set up the retry parameters, you don't have
+to use the `retry` decorator - you can instead use `Retrying` directly:
+
+.. testcode::
+
+ def never_good_enough(arg1):
+ raise Exception('Invalid argument: {}'.format(arg1))
+
+ def try_never_good_enough(max_attempts=3):
+ retryer = Retrying(stop=stop_after_attempt(max_attempts), reraise=True)
+ retryer(never_good_enough, 'I really do try')
+
+Retrying code block
+~~~~~~~~~~~~~~~~~~~
+
+Tenacity allows you to retry a code block without the need to wraps it in an
+isolated function. This makes it easy to isolate failing block while sharing
+context. The trick is to combine a for loop and a context manager.
+
+.. testcode::
+
+ from tenacity import Retrying, RetryError, stop_after_attempt
+
+ try:
+ for attempt in Retrying(stop=stop_after_attempt(3)):
+ with attempt:
+ raise Exception('My code is failing!')
+ except RetryError:
+ pass
+
+You can configure every details of retry policy by configuring the Retrying
+object.
+
+With async code you can use AsyncRetrying.
+
+.. testcode::
+
+ from tenacity import AsyncRetrying, RetryError, stop_after_attempt
+
+ async def function():
+ try:
+ async for attempt in AsyncRetrying(stop=stop_after_attempt(3)):
+ with attempt:
+ raise Exception('My code is failing!')
+ except RetryError:
+ pass
+
+Async and retry
+~~~~~~~~~~~~~~~
+
+Finally, ``retry`` works also on asyncio and Tornado (>= 4.5) coroutines.
+Sleeps are done asynchronously too.
+
+.. code-block:: python
+
+ @retry
+ async def my_async_function(loop):
+ await loop.getaddrinfo('8.8.8.8', 53)
+
+.. code-block:: python
+
+ @retry
+ @tornado.gen.coroutine
+ def my_async_function(http_client, url):
+ yield http_client.fetch(url)
+
+You can even use alternative event loops such as `curio` or `Trio` by passing the correct sleep function:
+
+.. code-block:: python
+
+ @retry(sleep=trio.sleep)
+ async def my_async_function(loop):
+ await asks.get('https://example.org')
+
+Contribute
+----------
+
+#. Check for open issues or open a fresh issue to start a discussion around a
+ feature idea or a bug.
+#. Fork `the repository`_ on GitHub to start making your changes to the
+ **master** branch (or branch off of it).
+#. Write a test which shows that the bug was fixed or that the feature works as
+ expected.
+#. Add a `changelog <#Changelogs>`_
+#. Make the docs better (or more detailed, or more easier to read, or ...)
+
+.. _`the repository`: https://github.com/jd/tenacity
+
+Changelogs
+~~~~~~~~~~
+
+`reno`_ is used for managing changelogs. Take a look at their usage docs.
+
+The doc generation will automatically compile the changelogs. You just need to add them.
+
+.. code-block:: sh
+
+ # Opens a template file in an editor
+ tox -e reno -- new some-slug-for-my-change --edit
+
+.. _`reno`: https://docs.openstack.org/reno/latest/user/usage.html
diff --git a/contrib/python/tenacity/py2/tenacity/__init__.py b/contrib/python/tenacity/py2/tenacity/__init__.py
new file mode 100644
index 0000000000..99ecec2ecf
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/__init__.py
@@ -0,0 +1,523 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016-2018 Julien Danjou
+# Copyright 2017 Elisey Zanko
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+ from inspect import iscoroutinefunction
+except ImportError:
+ iscoroutinefunction = None
+
+try:
+ import tornado
+except ImportError:
+ tornado = None
+
+import sys
+import threading
+import typing as t
+import warnings
+from abc import ABCMeta, abstractmethod
+from concurrent import futures
+
+
+import six
+
+from tenacity import _utils
+
+# Import all built-in retry strategies for easier usage.
+from .retry import retry_base # noqa
+from .retry import retry_all # noqa
+from .retry import retry_always # noqa
+from .retry import retry_any # noqa
+from .retry import retry_if_exception # noqa
+from .retry import retry_if_exception_type # noqa
+from .retry import retry_if_not_result # noqa
+from .retry import retry_if_result # noqa
+from .retry import retry_never # noqa
+from .retry import retry_unless_exception_type # noqa
+from .retry import retry_if_exception_message # noqa
+from .retry import retry_if_not_exception_message # noqa
+
+# Import all nap strategies for easier usage.
+from .nap import sleep # noqa
+from .nap import sleep_using_event # noqa
+
+# Import all built-in stop strategies for easier usage.
+from .stop import stop_after_attempt # noqa
+from .stop import stop_after_delay # noqa
+from .stop import stop_all # noqa
+from .stop import stop_any # noqa
+from .stop import stop_never # noqa
+from .stop import stop_when_event_set # noqa
+
+# Import all built-in wait strategies for easier usage.
+from .wait import wait_chain # noqa
+from .wait import wait_combine # noqa
+from .wait import wait_exponential # noqa
+from .wait import wait_fixed # noqa
+from .wait import wait_incrementing # noqa
+from .wait import wait_none # noqa
+from .wait import wait_random # noqa
+from .wait import wait_random_exponential # noqa
+from .wait import wait_random_exponential as wait_full_jitter # noqa
+
+# Import all built-in before strategies for easier usage.
+from .before import before_log # noqa
+from .before import before_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .after import after_log # noqa
+from .after import after_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .before_sleep import before_sleep_log # noqa
+from .before_sleep import before_sleep_nothing # noqa
+
+
+WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable)
+
+
+@t.overload
+def retry(fn):
+ # type: (WrappedFn) -> WrappedFn
+ """Type signature for @retry as a raw decorator."""
+ pass
+
+
+@t.overload
+def retry(*dargs, **dkw): # noqa
+ # type: (...) -> t.Callable[[WrappedFn], WrappedFn]
+ """Type signature for the @retry() decorator constructor."""
+ pass
+
+
+def retry(*dargs, **dkw): # noqa
+ """Wrap a function with a new `Retrying` object.
+
+ :param dargs: positional arguments passed to Retrying object
+ :param dkw: keyword arguments passed to the Retrying object
+ """
+ # support both @retry and @retry() as valid syntax
+ if len(dargs) == 1 and callable(dargs[0]):
+ return retry()(dargs[0])
+ else:
+
+ def wrap(f):
+ if isinstance(f, retry_base):
+ warnings.warn(
+ (
+ "Got retry_base instance ({cls}) as callable argument, "
+ + "this will probably hang indefinitely (did you mean "
+ + "retry={cls}(...)?)"
+ ).format(cls=f.__class__.__name__)
+ )
+ if iscoroutinefunction is not None and iscoroutinefunction(f):
+ r = AsyncRetrying(*dargs, **dkw)
+ elif (
+ tornado
+ and hasattr(tornado.gen, "is_coroutine_function")
+ and tornado.gen.is_coroutine_function(f)
+ ):
+ r = TornadoRetrying(*dargs, **dkw)
+ else:
+ r = Retrying(*dargs, **dkw)
+
+ return r.wraps(f)
+
+ return wrap
+
+
+class TryAgain(Exception):
+ """Always retry the executed function when raised."""
+
+
+NO_RESULT = object()
+
+
+class DoAttempt(object):
+ pass
+
+
+class DoSleep(float):
+ pass
+
+
+class BaseAction(object):
+ """Base class for representing actions to take by retry object.
+
+ Concrete implementations must define:
+ - __init__: to initialize all necessary fields
+ - REPR_ATTRS: class variable specifying attributes to include in repr(self)
+ - NAME: for identification in retry object methods and callbacks
+ """
+
+ REPR_FIELDS = ()
+ NAME = None
+
+ def __repr__(self):
+ state_str = ", ".join(
+ "%s=%r" % (field, getattr(self, field)) for field in self.REPR_FIELDS
+ )
+ return "%s(%s)" % (type(self).__name__, state_str)
+
+ def __str__(self):
+ return repr(self)
+
+
+class RetryAction(BaseAction):
+ REPR_FIELDS = ("sleep",)
+ NAME = "retry"
+
+ def __init__(self, sleep):
+ self.sleep = float(sleep)
+
+
+_unset = object()
+
+
+def _first_set(first, second):
+ return second if first is _unset else first
+
+
+class RetryError(Exception):
+ """Encapsulates the last attempt instance right before giving up."""
+
+ def __init__(self, last_attempt):
+ self.last_attempt = last_attempt
+ super(RetryError, self).__init__(last_attempt)
+
+ def reraise(self):
+ if self.last_attempt.failed:
+ raise self.last_attempt.result()
+ raise self
+
+ def __str__(self):
+ return "{0}[{1}]".format(self.__class__.__name__, self.last_attempt)
+
+
+class AttemptManager(object):
+ """Manage attempt context."""
+
+ def __init__(self, retry_state):
+ self.retry_state = retry_state
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if isinstance(exc_value, BaseException):
+ self.retry_state.set_exception((exc_type, exc_value, traceback))
+ return True # Swallow exception.
+ else:
+ # We don't have the result, actually.
+ self.retry_state.set_result(None)
+
+
+class BaseRetrying(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(
+ self,
+ sleep=sleep,
+ stop=stop_never,
+ wait=wait_none(),
+ retry=retry_if_exception_type(),
+ before=before_nothing,
+ after=after_nothing,
+ before_sleep=None,
+ reraise=False,
+ retry_error_cls=RetryError,
+ retry_error_callback=None,
+ ):
+ self.sleep = sleep
+ self.stop = stop
+ self.wait = wait
+ self.retry = retry
+ self.before = before
+ self.after = after
+ self.before_sleep = before_sleep
+ self.reraise = reraise
+ self._local = threading.local()
+ self.retry_error_cls = retry_error_cls
+ self.retry_error_callback = retry_error_callback
+
+ # This attribute was moved to RetryCallState and is deprecated on
+ # Retrying objects but kept for backward compatibility.
+ self.fn = None
+
+ def copy(
+ self,
+ sleep=_unset,
+ stop=_unset,
+ wait=_unset,
+ retry=_unset,
+ before=_unset,
+ after=_unset,
+ before_sleep=_unset,
+ reraise=_unset,
+ retry_error_cls=_unset,
+ retry_error_callback=_unset,
+ ):
+ """Copy this object with some parameters changed if needed."""
+ return self.__class__(
+ sleep=_first_set(sleep, self.sleep),
+ stop=_first_set(stop, self.stop),
+ wait=_first_set(wait, self.wait),
+ retry=_first_set(retry, self.retry),
+ before=_first_set(before, self.before),
+ after=_first_set(after, self.after),
+ before_sleep=_first_set(before_sleep, self.before_sleep),
+ reraise=_first_set(reraise, self.reraise),
+ retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),
+ retry_error_callback=_first_set(
+ retry_error_callback, self.retry_error_callback
+ ),
+ )
+
+ def __repr__(self):
+ attrs = dict(
+ _utils.visible_attrs(self, attrs={"me": id(self)}),
+ __class__=self.__class__.__name__,
+ )
+ return (
+ "<%(__class__)s object at 0x%(me)x (stop=%(stop)s, "
+ "wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, "
+ "before=%(before)s, after=%(after)s)>"
+ ) % (attrs)
+
+ @property
+ def statistics(self):
+ """Return a dictionary of runtime statistics.
+
+ This dictionary will be empty when the controller has never been
+ ran. When it is running or has ran previously it should have (but
+ may not) have useful and/or informational keys and values when
+ running is underway and/or completed.
+
+ .. warning:: The keys in this dictionary **should** be some what
+ stable (not changing), but there existence **may**
+ change between major releases as new statistics are
+ gathered or removed so before accessing keys ensure that
+ they actually exist and handle when they do not.
+
+ .. note:: The values in this dictionary are local to the thread
+ running call (so if multiple threads share the same retrying
+ object - either directly or indirectly) they will each have
+ there own view of statistics they have collected (in the
+ future we may provide a way to aggregate the various
+ statistics from each thread).
+ """
+ try:
+ return self._local.statistics
+ except AttributeError:
+ self._local.statistics = {}
+ return self._local.statistics
+
+ def wraps(self, f):
+ """Wrap a function for retrying.
+
+ :param f: A function to wraps for retrying.
+ """
+
+ @_utils.wraps(f)
+ def wrapped_f(*args, **kw):
+ return self(f, *args, **kw)
+
+ def retry_with(*args, **kwargs):
+ return self.copy(*args, **kwargs).wraps(f)
+
+ wrapped_f.retry = self
+ wrapped_f.retry_with = retry_with
+
+ return wrapped_f
+
+ def begin(self, fn):
+ self.statistics.clear()
+ self.statistics["start_time"] = _utils.now()
+ self.statistics["attempt_number"] = 1
+ self.statistics["idle_for"] = 0
+ self.fn = fn
+
+ def iter(self, retry_state): # noqa
+ fut = retry_state.outcome
+ if fut is None:
+ if self.before is not None:
+ self.before(retry_state)
+ return DoAttempt()
+
+ is_explicit_retry = retry_state.outcome.failed and isinstance(
+ retry_state.outcome.exception(), TryAgain
+ )
+ if not (is_explicit_retry or self.retry(retry_state=retry_state)):
+ return fut.result()
+
+ if self.after is not None:
+ self.after(retry_state=retry_state)
+
+ self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
+ if self.stop(retry_state=retry_state):
+ if self.retry_error_callback:
+ return self.retry_error_callback(retry_state=retry_state)
+ retry_exc = self.retry_error_cls(fut)
+ if self.reraise:
+ raise retry_exc.reraise()
+ six.raise_from(retry_exc, fut.exception())
+
+ if self.wait:
+ sleep = self.wait(retry_state=retry_state)
+ else:
+ sleep = 0.0
+ retry_state.next_action = RetryAction(sleep)
+ retry_state.idle_for += sleep
+ self.statistics["idle_for"] += sleep
+ self.statistics["attempt_number"] += 1
+
+ if self.before_sleep is not None:
+ self.before_sleep(retry_state=retry_state)
+
+ return DoSleep(sleep)
+
+ def __iter__(self):
+ self.begin(None)
+
+ retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ yield AttemptManager(retry_state=retry_state)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ break
+
+ @abstractmethod
+ def __call__(self, *args, **kwargs):
+ pass
+
+ def call(self, *args, **kwargs):
+ """Use ``__call__`` instead because this method is deprecated."""
+ warnings.warn(
+ "'call()' method is deprecated. " + "Use '__call__()' instead",
+ DeprecationWarning,
+ )
+ return self.__call__(*args, **kwargs)
+
+
+class Retrying(BaseRetrying):
+ """Retrying controller."""
+
+ def __call__(self, fn, *args, **kwargs):
+ self.begin(fn)
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ return do
+
+
+class Future(futures.Future):
+ """Encapsulates a (future or past) attempted call to a target function."""
+
+ def __init__(self, attempt_number):
+ super(Future, self).__init__()
+ self.attempt_number = attempt_number
+
+ @property
+ def failed(self):
+ """Return whether a exception is being held in this future."""
+ return self.exception() is not None
+
+ @classmethod
+ def construct(cls, attempt_number, value, has_exception):
+ """Construct a new Future object."""
+ fut = cls(attempt_number)
+ if has_exception:
+ fut.set_exception(value)
+ else:
+ fut.set_result(value)
+ return fut
+
+
+class RetryCallState(object):
+ """State related to a single call wrapped with Retrying."""
+
+ def __init__(self, retry_object, fn, args, kwargs):
+ #: Retry call start timestamp
+ self.start_time = _utils.now()
+ #: Retry manager object
+ self.retry_object = retry_object
+ #: Function wrapped by this retry call
+ self.fn = fn
+ #: Arguments of the function wrapped by this retry call
+ self.args = args
+ #: Keyword arguments of the function wrapped by this retry call
+ self.kwargs = kwargs
+
+ #: The number of the current attempt
+ self.attempt_number = 1
+ #: Last outcome (result or exception) produced by the function
+ self.outcome = None
+ #: Timestamp of the last outcome
+ self.outcome_timestamp = None
+ #: Time spent sleeping in retries
+ self.idle_for = 0
+ #: Next action as decided by the retry manager
+ self.next_action = None
+
+ @property
+ def seconds_since_start(self):
+ if self.outcome_timestamp is None:
+ return None
+ return self.outcome_timestamp - self.start_time
+
+ def prepare_for_next_attempt(self):
+ self.outcome = None
+ self.outcome_timestamp = None
+ self.attempt_number += 1
+ self.next_action = None
+
+ def set_result(self, val):
+ ts = _utils.now()
+ fut = Future(self.attempt_number)
+ fut.set_result(val)
+ self.outcome, self.outcome_timestamp = fut, ts
+
+ def set_exception(self, exc_info):
+ ts = _utils.now()
+ fut = Future(self.attempt_number)
+ _utils.capture(fut, exc_info)
+ self.outcome, self.outcome_timestamp = fut, ts
+
+
+if iscoroutinefunction:
+ from tenacity._asyncio import AsyncRetrying
+
+if tornado:
+ from tenacity.tornadoweb import TornadoRetrying
diff --git a/contrib/python/tenacity/py2/tenacity/_utils.py b/contrib/python/tenacity/py2/tenacity/_utils.py
new file mode 100644
index 0000000000..625d5901d0
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/_utils.py
@@ -0,0 +1,159 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import sys
+import time
+from functools import update_wrapper
+
+import six
+
+# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint...
+try:
+ MAX_WAIT = sys.maxint / 2
+except AttributeError:
+ MAX_WAIT = 1073741823
+
+
+if six.PY2:
+ from functools import WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES
+
+ def wraps(fn):
+ """Do the same as six.wraps but only copy attributes that exist.
+
+ For example, object instances don't have __name__ attribute, so
+ six.wraps fails. This is fixed in Python 3
+ (https://bugs.python.org/issue3445), but didn't get backported to six.
+
+ Also, see https://github.com/benjaminp/six/issues/250.
+ """
+
+ def filter_hasattr(obj, attrs):
+ return tuple(a for a in attrs if hasattr(obj, a))
+
+ return six.wraps(
+ fn,
+ assigned=filter_hasattr(fn, WRAPPER_ASSIGNMENTS),
+ updated=filter_hasattr(fn, WRAPPER_UPDATES),
+ )
+
+ def capture(fut, tb):
+ # TODO(harlowja): delete this in future, since its
+ # has to repeatedly calculate this crap.
+ fut.set_exception_info(tb[1], tb[2])
+
+ def getargspec(func):
+ # This was deprecated in Python 3.
+ return inspect.getargspec(func)
+
+
+else:
+ from functools import wraps # noqa
+
+ def capture(fut, tb):
+ fut.set_exception(tb[1])
+
+ def getargspec(func):
+ return inspect.getfullargspec(func)
+
+
+def visible_attrs(obj, attrs=None):
+ if attrs is None:
+ attrs = {}
+ for attr_name, attr in inspect.getmembers(obj):
+ if attr_name.startswith("_"):
+ continue
+ attrs[attr_name] = attr
+ return attrs
+
+
+def find_ordinal(pos_num):
+ # See: https://en.wikipedia.org/wiki/English_numerals#Ordinal_numbers
+ if pos_num == 0:
+ return "th"
+ elif pos_num == 1:
+ return "st"
+ elif pos_num == 2:
+ return "nd"
+ elif pos_num == 3:
+ return "rd"
+ elif pos_num >= 4 and pos_num <= 20:
+ return "th"
+ else:
+ return find_ordinal(pos_num % 10)
+
+
+def to_ordinal(pos_num):
+ return "%i%s" % (pos_num, find_ordinal(pos_num))
+
+
+def get_callback_name(cb):
+ """Get a callback fully-qualified name.
+
+ If no name can be produced ``repr(cb)`` is called and returned.
+ """
+ segments = []
+ try:
+ segments.append(cb.__qualname__)
+ except AttributeError:
+ try:
+ segments.append(cb.__name__)
+ if inspect.ismethod(cb):
+ try:
+ # This attribute doesn't exist on py3.x or newer, so
+ # we optionally ignore it... (on those versions of
+ # python `__qualname__` should have been found anyway).
+ segments.insert(0, cb.im_class.__name__)
+ except AttributeError:
+ pass
+ except AttributeError:
+ pass
+ if not segments:
+ return repr(cb)
+ else:
+ try:
+ # When running under sphinx it appears this can be none?
+ if cb.__module__:
+ segments.insert(0, cb.__module__)
+ except AttributeError:
+ pass
+ return ".".join(segments)
+
+
+try:
+ now = time.monotonic # noqa
+except AttributeError:
+ from monotonic import monotonic as now # noqa
+
+
+class cached_property(object):
+ """A property that is computed once per instance.
+
+ Upon being computed it replaces itself with an ordinary attribute. Deleting
+ the attribute resets the property.
+
+ Source: https://github.com/bottlepy/bottle/blob/1de24157e74a6971d136550afe1b63eec5b0df2b/bottle.py#L234-L246
+ """ # noqa: E501
+
+ def __init__(self, func):
+ update_wrapper(self, func)
+ self.func = func
+
+ def __get__(self, obj, cls):
+ if obj is None:
+ return self
+ value = obj.__dict__[self.func.__name__] = self.func(obj)
+ return value
diff --git a/contrib/python/tenacity/py2/tenacity/after.py b/contrib/python/tenacity/py2/tenacity/after.py
new file mode 100644
index 0000000000..c577d7c74e
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/after.py
@@ -0,0 +1,40 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tenacity import _utils
+
+
+def after_nothing(retry_state):
+ """After call strategy that does nothing."""
+
+
+def after_log(logger, log_level, sec_format="%0.3f"):
+ """After call strategy that logs to some logger the finished attempt."""
+ log_tpl = (
+ "Finished call to '%s' after " + str(sec_format) + "(s), "
+ "this was the %s time calling it."
+ )
+
+ def log_it(retry_state):
+ logger.log(
+ log_level,
+ log_tpl,
+ _utils.get_callback_name(retry_state.fn),
+ retry_state.seconds_since_start,
+ _utils.to_ordinal(retry_state.attempt_number),
+ )
+
+ return log_it
diff --git a/contrib/python/tenacity/py2/tenacity/before.py b/contrib/python/tenacity/py2/tenacity/before.py
new file mode 100644
index 0000000000..68a2ae6115
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/before.py
@@ -0,0 +1,35 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tenacity import _utils
+
+
+def before_nothing(retry_state):
+ """Before call strategy that does nothing."""
+
+
+def before_log(logger, log_level):
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state):
+ logger.log(
+ log_level,
+ "Starting call to '%s', this is the %s time calling it.",
+ _utils.get_callback_name(retry_state.fn),
+ _utils.to_ordinal(retry_state.attempt_number),
+ )
+
+ return log_it
diff --git a/contrib/python/tenacity/py2/tenacity/before_sleep.py b/contrib/python/tenacity/py2/tenacity/before_sleep.py
new file mode 100644
index 0000000000..d797a24612
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/before_sleep.py
@@ -0,0 +1,51 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tenacity import _utils
+from tenacity.compat import get_exc_info_from_future
+
+
+def before_sleep_nothing(retry_state):
+ """Before call strategy that does nothing."""
+
+
+def before_sleep_log(logger, log_level, exc_info=False):
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state):
+ if retry_state.outcome.failed:
+ ex = retry_state.outcome.exception()
+ verb, value = "raised", "%s: %s" % (type(ex).__name__, ex)
+
+ if exc_info:
+ local_exc_info = get_exc_info_from_future(retry_state.outcome)
+ else:
+ local_exc_info = False
+ else:
+ verb, value = "returned", retry_state.outcome.result()
+ local_exc_info = False # exc_info does not apply when no exception
+
+ logger.log(
+ log_level,
+ "Retrying %s in %s seconds as it %s %s.",
+ _utils.get_callback_name(retry_state.fn),
+ getattr(retry_state.next_action, "sleep"),
+ verb,
+ value,
+ exc_info=local_exc_info,
+ )
+
+ return log_it
diff --git a/contrib/python/tenacity/py2/tenacity/compat.py b/contrib/python/tenacity/py2/tenacity/compat.py
new file mode 100644
index 0000000000..c08ff774f6
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/compat.py
@@ -0,0 +1,23 @@
+"""Utilities for providing backward compatibility."""
+import six
+
+
+def get_exc_info_from_future(future):
+ """
+ Get an exc_info value from a Future.
+
+ Given a a Future instance, retrieve an exc_info value suitable for passing
+ in as the exc_info parameter to logging.Logger.log() and related methods.
+
+ On Python 2, this will be a (type, value, traceback) triple.
+ On Python 3, this will be an exception instance (with embedded traceback).
+
+ If there was no exception, None is returned on both versions of Python.
+ """
+ if six.PY3:
+ return future.exception()
+ else:
+ ex, tb = future.exception_info()
+ if ex is None:
+ return None
+ return type(ex), ex, tb
diff --git a/contrib/python/tenacity/py2/tenacity/nap.py b/contrib/python/tenacity/py2/tenacity/nap.py
new file mode 100644
index 0000000000..83ff839c36
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/nap.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+
+def sleep(seconds):
+ """
+ Sleep strategy that delays execution for a given number of seconds.
+
+ This is the default strategy, and may be mocked out for unit testing.
+ """
+ time.sleep(seconds)
+
+
+class sleep_using_event(object):
+ """Sleep strategy that waits on an event to be set."""
+
+ def __init__(self, event):
+ self.event = event
+
+ def __call__(self, timeout):
+ # NOTE(harlowja): this may *not* actually wait for timeout
+ # seconds if the event is set (ie this may eject out early).
+ self.event.wait(timeout=timeout)
diff --git a/contrib/python/tenacity/py2/tenacity/py.typed b/contrib/python/tenacity/py2/tenacity/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/py.typed
diff --git a/contrib/python/tenacity/py2/tenacity/retry.py b/contrib/python/tenacity/py2/tenacity/retry.py
new file mode 100644
index 0000000000..ebf26df579
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/retry.py
@@ -0,0 +1,192 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import re
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class retry_base(object):
+ """Abstract base class for retry strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state):
+ pass
+
+ def __and__(self, other):
+ return retry_all(self, other)
+
+ def __or__(self, other):
+ return retry_any(self, other)
+
+
+class _retry_never(retry_base):
+ """Retry strategy that never rejects any result."""
+
+ def __call__(self, retry_state):
+ return False
+
+
+retry_never = _retry_never()
+
+
+class _retry_always(retry_base):
+ """Retry strategy that always rejects any result."""
+
+ def __call__(self, retry_state):
+ return True
+
+
+retry_always = _retry_always()
+
+
+class retry_if_exception(retry_base):
+ """Retry strategy that retries if an exception verifies a predicate."""
+
+ def __init__(self, predicate):
+ self.predicate = predicate
+
+ def __call__(self, retry_state):
+ if retry_state.outcome.failed:
+ return self.predicate(retry_state.outcome.exception())
+ else:
+ return False
+
+
+class retry_if_exception_type(retry_if_exception):
+ """Retries if an exception has been raised of one or more types."""
+
+ def __init__(self, exception_types=Exception):
+ self.exception_types = exception_types
+ super(retry_if_exception_type, self).__init__(
+ lambda e: isinstance(e, exception_types)
+ )
+
+
+class retry_unless_exception_type(retry_if_exception):
+ """Retries until an exception is raised of one or more types."""
+
+ def __init__(self, exception_types=Exception):
+ self.exception_types = exception_types
+ super(retry_unless_exception_type, self).__init__(
+ lambda e: not isinstance(e, exception_types)
+ )
+
+ def __call__(self, retry_state):
+ # always retry if no exception was raised
+ if not retry_state.outcome.failed:
+ return True
+ return self.predicate(retry_state.outcome.exception())
+
+
+class retry_if_result(retry_base):
+ """Retries if the result verifies a predicate."""
+
+ def __init__(self, predicate):
+ self.predicate = predicate
+
+ def __call__(self, retry_state):
+ if not retry_state.outcome.failed:
+ return self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_not_result(retry_base):
+ """Retries if the result refutes a predicate."""
+
+ def __init__(self, predicate):
+ self.predicate = predicate
+
+ def __call__(self, retry_state):
+ if not retry_state.outcome.failed:
+ return not self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_exception_message(retry_if_exception):
+ """Retries if an exception message equals or matches."""
+
+ def __init__(self, message=None, match=None):
+ if message and match:
+ raise TypeError(
+ "{}() takes either 'message' or 'match', not both".format(
+ self.__class__.__name__
+ )
+ )
+
+ # set predicate
+ if message:
+
+ def message_fnc(exception):
+ return message == str(exception)
+
+ predicate = message_fnc
+ elif match:
+ prog = re.compile(match)
+
+ def match_fnc(exception):
+ return prog.match(str(exception))
+
+ predicate = match_fnc
+ else:
+ raise TypeError(
+ "{}() missing 1 required argument 'message' or 'match'".format(
+ self.__class__.__name__
+ )
+ )
+
+ super(retry_if_exception_message, self).__init__(predicate)
+
+
+class retry_if_not_exception_message(retry_if_exception_message):
+ """Retries until an exception message equals or matches."""
+
+ def __init__(self, *args, **kwargs):
+ super(retry_if_not_exception_message, self).__init__(*args, **kwargs)
+ # invert predicate
+ if_predicate = self.predicate
+ self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
+
+ def __call__(self, retry_state):
+ if not retry_state.outcome.failed:
+ return True
+ return self.predicate(retry_state.outcome.exception())
+
+
+class retry_any(retry_base):
+ """Retries if any of the retries condition is valid."""
+
+ def __init__(self, *retries):
+ self.retries = retries
+
+ def __call__(self, retry_state):
+ return any(r(retry_state) for r in self.retries)
+
+
+class retry_all(retry_base):
+ """Retries if all the retries condition are valid."""
+
+ def __init__(self, *retries):
+ self.retries = retries
+
+ def __call__(self, retry_state):
+ return all(r(retry_state) for r in self.retries)
diff --git a/contrib/python/tenacity/py2/tenacity/stop.py b/contrib/python/tenacity/py2/tenacity/stop.py
new file mode 100644
index 0000000000..94a0f32966
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/stop.py
@@ -0,0 +1,95 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class stop_base(object):
+ """Abstract base class for stop strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state):
+ pass
+
+ def __and__(self, other):
+ return stop_all(self, other)
+
+ def __or__(self, other):
+ return stop_any(self, other)
+
+
+class stop_any(stop_base):
+ """Stop if any of the stop condition is valid."""
+
+ def __init__(self, *stops):
+ self.stops = stops
+
+ def __call__(self, retry_state):
+ return any(x(retry_state) for x in self.stops)
+
+
+class stop_all(stop_base):
+ """Stop if all the stop conditions are valid."""
+
+ def __init__(self, *stops):
+ self.stops = stops
+
+ def __call__(self, retry_state):
+ return all(x(retry_state) for x in self.stops)
+
+
+class _stop_never(stop_base):
+ """Never stop."""
+
+ def __call__(self, retry_state):
+ return False
+
+
+stop_never = _stop_never()
+
+
+class stop_when_event_set(stop_base):
+ """Stop when the given event is set."""
+
+ def __init__(self, event):
+ self.event = event
+
+ def __call__(self, retry_state):
+ return self.event.is_set()
+
+
+class stop_after_attempt(stop_base):
+ """Stop when the previous attempt >= max_attempt."""
+
+ def __init__(self, max_attempt_number):
+ self.max_attempt_number = max_attempt_number
+
+ def __call__(self, retry_state):
+ return retry_state.attempt_number >= self.max_attempt_number
+
+
+class stop_after_delay(stop_base):
+ """Stop when the time from the first attempt >= limit."""
+
+ def __init__(self, max_delay):
+ self.max_delay = max_delay
+
+ def __call__(self, retry_state):
+ return retry_state.seconds_since_start >= self.max_delay
diff --git a/contrib/python/tenacity/py2/tenacity/tornadoweb.py b/contrib/python/tenacity/py2/tenacity/tornadoweb.py
new file mode 100644
index 0000000000..243cf5deab
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/tornadoweb.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 Elisey Zanko
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from tenacity import BaseRetrying
+from tenacity import DoAttempt
+from tenacity import DoSleep
+from tenacity import RetryCallState
+
+from tornado import gen
+
+
+class TornadoRetrying(BaseRetrying):
+ def __init__(self, sleep=gen.sleep, **kwargs):
+ super(TornadoRetrying, self).__init__(**kwargs)
+ self.sleep = sleep
+
+ @gen.coroutine
+ def __call__(self, fn, *args, **kwargs):
+ self.begin(fn)
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = yield fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ yield self.sleep(do)
+ else:
+ raise gen.Return(do)
diff --git a/contrib/python/tenacity/py2/tenacity/wait.py b/contrib/python/tenacity/py2/tenacity/wait.py
new file mode 100644
index 0000000000..2f981c89d1
--- /dev/null
+++ b/contrib/python/tenacity/py2/tenacity/wait.py
@@ -0,0 +1,183 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import random
+
+import six
+
+from tenacity import _utils
+
+
+@six.add_metaclass(abc.ABCMeta)
+class wait_base(object):
+ """Abstract base class for wait strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state):
+ pass
+
+ def __add__(self, other):
+ return wait_combine(self, other)
+
+ def __radd__(self, other):
+ # make it possible to use multiple waits with the built-in sum function
+ if other == 0:
+ return self
+ return self.__add__(other)
+
+
+class wait_fixed(wait_base):
+ """Wait strategy that waits a fixed amount of time between each retry."""
+
+ def __init__(self, wait):
+ self.wait_fixed = wait
+
+ def __call__(self, retry_state):
+ return self.wait_fixed
+
+
+class wait_none(wait_fixed):
+ """Wait strategy that doesn't wait at all before retrying."""
+
+ def __init__(self):
+ super(wait_none, self).__init__(0)
+
+
+class wait_random(wait_base):
+ """Wait strategy that waits a random amount of time between min/max."""
+
+ def __init__(self, min=0, max=1): # noqa
+ self.wait_random_min = min
+ self.wait_random_max = max
+
+ def __call__(self, retry_state):
+ return self.wait_random_min + (
+ random.random() * (self.wait_random_max - self.wait_random_min)
+ )
+
+
+class wait_combine(wait_base):
+ """Combine several waiting strategies."""
+
+ def __init__(self, *strategies):
+ self.wait_funcs = strategies
+
+ def __call__(self, retry_state):
+ return sum(x(retry_state=retry_state) for x in self.wait_funcs)
+
+
+class wait_chain(wait_base):
+ """Chain two or more waiting strategies.
+
+ If all strategies are exhausted, the very last strategy is used
+ thereafter.
+
+ For example::
+
+ @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] +
+ [wait_fixed(2) for j in range(5)] +
+ [wait_fixed(5) for k in range(4)))
+ def wait_chained():
+ print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s
+ thereafter.")
+ """
+
+ def __init__(self, *strategies):
+ self.strategies = strategies
+
+ def __call__(self, retry_state):
+ wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies))
+ wait_func = self.strategies[wait_func_no - 1]
+ return wait_func(retry_state=retry_state)
+
+
+class wait_incrementing(wait_base):
+ """Wait an incremental amount of time after each attempt.
+
+ Starting at a starting value and incrementing by a value for each attempt
+ (and restricting the upper limit to some maximum value).
+ """
+
+ def __init__(self, start=0, increment=100, max=_utils.MAX_WAIT): # noqa
+ self.start = start
+ self.increment = increment
+ self.max = max
+
+ def __call__(self, retry_state):
+ result = self.start + (self.increment * (retry_state.attempt_number - 1))
+ return max(0, min(result, self.max))
+
+
+class wait_exponential(wait_base):
+ """Wait strategy that applies exponential backoff.
+
+ It allows for a customized multiplier and an ability to restrict the
+ upper and lower limits to some maximum and minimum value.
+
+ The intervals are fixed (i.e. there is no jitter), so this strategy is
+ suitable for balancing retries against latency when a required resource is
+ unavailable for an unknown duration, but *not* suitable for resolving
+ contention between multiple processes for a shared resource. Use
+ wait_random_exponential for the latter case.
+ """
+
+ def __init__(self, multiplier=1, max=_utils.MAX_WAIT, exp_base=2, min=0): # noqa
+ self.multiplier = multiplier
+ self.min = min
+ self.max = max
+ self.exp_base = exp_base
+
+ def __call__(self, retry_state):
+ try:
+ exp = self.exp_base ** (retry_state.attempt_number - 1)
+ result = self.multiplier * exp
+ except OverflowError:
+ return self.max
+ return max(max(0, self.min), min(result, self.max))
+
+
+class wait_random_exponential(wait_exponential):
+ """Random wait with exponentially widening window.
+
+ An exponential backoff strategy used to mediate contention between multiple
+ uncoordinated processes for a shared resource in distributed systems. This
+ is the sense in which "exponential backoff" is meant in e.g. Ethernet
+ networking, and corresponds to the "Full Jitter" algorithm described in
+ this blog post:
+
+ https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
+
+ Each retry occurs at a random time in a geometrically expanding interval.
+ It allows for a custom multiplier and an ability to restrict the upper
+ limit of the random interval to some maximum value.
+
+ Example::
+
+ wait_random_exponential(multiplier=0.5, # initial window 0.5s
+ max=60) # max 60s timeout
+
+ When waiting for an unavailable resource to become available again, as
+ opposed to trying to resolve contention for a shared resource, the
+ wait_exponential strategy (which uses a fixed interval) may be preferable.
+
+ """
+
+ def __call__(self, retry_state):
+ high = super(wait_random_exponential, self).__call__(retry_state=retry_state)
+ return random.uniform(0, high)
diff --git a/contrib/python/tenacity/py2/ya.make b/contrib/python/tenacity/py2/ya.make
new file mode 100644
index 0000000000..57cb4ccace
--- /dev/null
+++ b/contrib/python/tenacity/py2/ya.make
@@ -0,0 +1,44 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(7.0.0)
+
+LICENSE(Apache-2.0)
+
+PEERDIR(
+ contrib/deprecated/python/futures
+ contrib/deprecated/python/typing
+ contrib/python/monotonic
+ contrib/python/six
+)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ tenacity.tornadoweb
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ tenacity/__init__.py
+ tenacity/_utils.py
+ tenacity/after.py
+ tenacity/before.py
+ tenacity/before_sleep.py
+ tenacity/compat.py
+ tenacity/nap.py
+ tenacity/retry.py
+ tenacity/stop.py
+ tenacity/tornadoweb.py
+ tenacity/wait.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/tenacity/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+ tenacity/py.typed
+)
+
+END()
diff --git a/contrib/python/tenacity/py3/.dist-info/METADATA b/contrib/python/tenacity/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..c8f74da305
--- /dev/null
+++ b/contrib/python/tenacity/py3/.dist-info/METADATA
@@ -0,0 +1,27 @@
+Metadata-Version: 2.1
+Name: tenacity
+Version: 8.2.3
+Summary: Retry code until it succeeds
+Home-page: https://github.com/jd/tenacity
+Author: Julien Danjou
+Author-email: julien@danjou.info
+License: Apache 2.0
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Topic :: Utilities
+Requires-Python: >=3.7
+License-File: LICENSE
+Provides-Extra: doc
+Requires-Dist: reno ; extra == 'doc'
+Requires-Dist: sphinx ; extra == 'doc'
+Requires-Dist: tornado >=4.5 ; extra == 'doc'
+
+Tenacity is a general-purpose retrying library to simplify the task of adding retry behavior to just about anything.
diff --git a/contrib/python/tenacity/py3/.dist-info/top_level.txt b/contrib/python/tenacity/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..48f8e12ee7
--- /dev/null
+++ b/contrib/python/tenacity/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+tenacity
diff --git a/contrib/python/tenacity/py3/LICENSE b/contrib/python/tenacity/py3/LICENSE
new file mode 100644
index 0000000000..7a4a3ea242
--- /dev/null
+++ b/contrib/python/tenacity/py3/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. \ No newline at end of file
diff --git a/contrib/python/tenacity/py3/README.rst b/contrib/python/tenacity/py3/README.rst
new file mode 100644
index 0000000000..ce586f9e6d
--- /dev/null
+++ b/contrib/python/tenacity/py3/README.rst
@@ -0,0 +1,647 @@
+Tenacity
+========
+.. image:: https://img.shields.io/pypi/v/tenacity.svg
+ :target: https://pypi.python.org/pypi/tenacity
+
+.. image:: https://circleci.com/gh/jd/tenacity.svg?style=svg
+ :target: https://circleci.com/gh/jd/tenacity
+
+.. image:: https://img.shields.io/endpoint.svg?url=https://api.mergify.com/badges/jd/tenacity&style=flat
+ :target: https://mergify.io
+ :alt: Mergify Status
+
+**Please refer to the** `tenacity documentation <https://tenacity.readthedocs.io/en/latest/>`_ **for a better experience.**
+
+Tenacity is an Apache 2.0 licensed general-purpose retrying library, written in
+Python, to simplify the task of adding retry behavior to just about anything.
+It originates from `a fork of retrying
+<https://github.com/rholder/retrying/issues/65>`_ which is sadly no longer
+`maintained <https://julien.danjou.info/python-tenacity/>`_. Tenacity isn't
+api compatible with retrying but adds significant new functionality and
+fixes a number of longstanding bugs.
+
+The simplest use case is retrying a flaky function whenever an `Exception`
+occurs until a value is returned.
+
+.. testcode::
+
+ import random
+ from tenacity import retry
+
+ @retry
+ def do_something_unreliable():
+ if random.randint(0, 10) > 1:
+ raise IOError("Broken sauce, everything is hosed!!!111one")
+ else:
+ return "Awesome sauce!"
+
+ print(do_something_unreliable())
+
+.. testoutput::
+ :hide:
+
+ Awesome sauce!
+
+
+.. toctree::
+ :hidden:
+ :maxdepth: 2
+
+ changelog
+ api
+
+
+Features
+--------
+
+- Generic Decorator API
+- Specify stop condition (i.e. limit by number of attempts)
+- Specify wait condition (i.e. exponential backoff sleeping between attempts)
+- Customize retrying on Exceptions
+- Customize retrying on expected returned result
+- Retry on coroutines
+- Retry code block with context manager
+
+
+Installation
+------------
+
+To install *tenacity*, simply:
+
+.. code-block:: bash
+
+ $ pip install tenacity
+
+
+Examples
+----------
+
+Basic Retry
+~~~~~~~~~~~
+
+.. testsetup:: *
+
+ import logging
+ #
+ # Note the following import is used for demonstration convenience only.
+ # Production code should always explicitly import the names it needs.
+ #
+ from tenacity import *
+
+ class MyException(Exception):
+ pass
+
+As you saw above, the default behavior is to retry forever without waiting when
+an exception is raised.
+
+.. testcode::
+
+ @retry
+ def never_gonna_give_you_up():
+ print("Retry forever ignoring Exceptions, don't wait between retries")
+ raise Exception
+
+Stopping
+~~~~~~~~
+
+Let's be a little less persistent and set some boundaries, such as the number
+of attempts before giving up.
+
+.. testcode::
+
+ @retry(stop=stop_after_attempt(7))
+ def stop_after_7_attempts():
+ print("Stopping after 7 attempts")
+ raise Exception
+
+We don't have all day, so let's set a boundary for how long we should be
+retrying stuff.
+
+.. testcode::
+
+ @retry(stop=stop_after_delay(10))
+ def stop_after_10_s():
+ print("Stopping after 10 seconds")
+ raise Exception
+
+You can combine several stop conditions by using the `|` operator:
+
+.. testcode::
+
+ @retry(stop=(stop_after_delay(10) | stop_after_attempt(5)))
+ def stop_after_10_s_or_5_retries():
+ print("Stopping after 10 seconds or 5 retries")
+ raise Exception
+
+Waiting before retrying
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Most things don't like to be polled as fast as possible, so let's just wait 2
+seconds between retries.
+
+.. testcode::
+
+ @retry(wait=wait_fixed(2))
+ def wait_2_s():
+ print("Wait 2 second between retries")
+ raise Exception
+
+Some things perform best with a bit of randomness injected.
+
+.. testcode::
+
+ @retry(wait=wait_random(min=1, max=2))
+ def wait_random_1_to_2_s():
+ print("Randomly wait 1 to 2 seconds between retries")
+ raise Exception
+
+Then again, it's hard to beat exponential backoff when retrying distributed
+services and other remote endpoints.
+
+.. testcode::
+
+ @retry(wait=wait_exponential(multiplier=1, min=4, max=10))
+ def wait_exponential_1():
+ print("Wait 2^x * 1 second between each retry starting with 4 seconds, then up to 10 seconds, then 10 seconds afterwards")
+ raise Exception
+
+
+Then again, it's also hard to beat combining fixed waits and jitter (to
+help avoid thundering herds) when retrying distributed services and other
+remote endpoints.
+
+.. testcode::
+
+ @retry(wait=wait_fixed(3) + wait_random(0, 2))
+ def wait_fixed_jitter():
+ print("Wait at least 3 seconds, and add up to 2 seconds of random delay")
+ raise Exception
+
+When multiple processes are in contention for a shared resource, exponentially
+increasing jitter helps minimise collisions.
+
+.. testcode::
+
+ @retry(wait=wait_random_exponential(multiplier=1, max=60))
+ def wait_exponential_jitter():
+ print("Randomly wait up to 2^x * 1 seconds between each retry until the range reaches 60 seconds, then randomly up to 60 seconds afterwards")
+ raise Exception
+
+
+Sometimes it's necessary to build a chain of backoffs.
+
+.. testcode::
+
+ @retry(wait=wait_chain(*[wait_fixed(3) for i in range(3)] +
+ [wait_fixed(7) for i in range(2)] +
+ [wait_fixed(9)]))
+ def wait_fixed_chained():
+ print("Wait 3s for 3 attempts, 7s for the next 2 attempts and 9s for all attempts thereafter")
+ raise Exception
+
+Whether to retry
+~~~~~~~~~~~~~~~~
+
+We have a few options for dealing with retries that raise specific or general
+exceptions, as in the cases here.
+
+.. testcode::
+
+ class ClientError(Exception):
+ """Some type of client error."""
+
+ @retry(retry=retry_if_exception_type(IOError))
+ def might_io_error():
+ print("Retry forever with no wait if an IOError occurs, raise any other errors")
+ raise Exception
+
+ @retry(retry=retry_if_not_exception_type(ClientError))
+ def might_client_error():
+ print("Retry forever with no wait if any error other than ClientError occurs. Immediately raise ClientError.")
+ raise Exception
+
+We can also use the result of the function to alter the behavior of retrying.
+
+.. testcode::
+
+ def is_none_p(value):
+ """Return True if value is None"""
+ return value is None
+
+ @retry(retry=retry_if_result(is_none_p))
+ def might_return_none():
+ print("Retry with no wait if return value is None")
+
+See also these methods:
+
+.. testcode::
+
+ retry_if_exception
+ retry_if_exception_type
+ retry_if_not_exception_type
+ retry_unless_exception_type
+ retry_if_result
+ retry_if_not_result
+ retry_if_exception_message
+ retry_if_not_exception_message
+ retry_any
+ retry_all
+
+We can also combine several conditions:
+
+.. testcode::
+
+ def is_none_p(value):
+ """Return True if value is None"""
+ return value is None
+
+ @retry(retry=(retry_if_result(is_none_p) | retry_if_exception_type()))
+ def might_return_none():
+ print("Retry forever ignoring Exceptions with no wait if return value is None")
+
+Any combination of stop, wait, etc. is also supported to give you the freedom
+to mix and match.
+
+It's also possible to retry explicitly at any time by raising the `TryAgain`
+exception:
+
+.. testcode::
+
+ @retry
+ def do_something():
+ result = something_else()
+ if result == 23:
+ raise TryAgain
+
+Error Handling
+~~~~~~~~~~~~~~
+
+Normally when your function fails its final time (and will not be retried again based on your settings),
+a `RetryError` is raised. The exception your code encountered will be shown somewhere in the *middle*
+of the stack trace.
+
+If you would rather see the exception your code encountered at the *end* of the stack trace (where it
+is most visible), you can set `reraise=True`.
+
+.. testcode::
+
+ @retry(reraise=True, stop=stop_after_attempt(3))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception()
+ except MyException:
+ # timed out retrying
+ pass
+
+Before and After Retry, and Logging
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It's possible to execute an action before any attempt of calling the function
+by using the before callback function:
+
+.. testcode::
+
+ import logging
+ import sys
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ @retry(stop=stop_after_attempt(3), before=before_log(logger, logging.DEBUG))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+In the same spirit, It's possible to execute after a call that failed:
+
+.. testcode::
+
+ import logging
+ import sys
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ @retry(stop=stop_after_attempt(3), after=after_log(logger, logging.DEBUG))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+It's also possible to only log failures that are going to be retried. Normally
+retries happen after a wait interval, so the keyword argument is called
+``before_sleep``:
+
+.. testcode::
+
+ import logging
+ import sys
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ @retry(stop=stop_after_attempt(3),
+ before_sleep=before_sleep_log(logger, logging.DEBUG))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+
+Statistics
+~~~~~~~~~~
+
+You can access the statistics about the retry made over a function by using the
+`retry` attribute attached to the function and its `statistics` attribute:
+
+.. testcode::
+
+ @retry(stop=stop_after_attempt(3))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception()
+ except Exception:
+ pass
+
+ print(raise_my_exception.retry.statistics)
+
+.. testoutput::
+ :hide:
+
+ ...
+
+Custom Callbacks
+~~~~~~~~~~~~~~~~
+
+You can also define your own callbacks. The callback should accept one
+parameter called ``retry_state`` that contains all information about current
+retry invocation.
+
+For example, you can call a custom callback function after all retries failed,
+without raising an exception (or you can re-raise or do anything really)
+
+.. testcode::
+
+ def return_last_value(retry_state):
+ """return the result of the last call attempt"""
+ return retry_state.outcome.result()
+
+ def is_false(value):
+ """Return True if value is False"""
+ return value is False
+
+ # will return False after trying 3 times to get a different result
+ @retry(stop=stop_after_attempt(3),
+ retry_error_callback=return_last_value,
+ retry=retry_if_result(is_false))
+ def eventually_return_false():
+ return False
+
+RetryCallState
+~~~~~~~~~~~~~~
+
+``retry_state`` argument is an object of `RetryCallState` class:
+
+.. autoclass:: tenacity.RetryCallState
+
+ Constant attributes:
+
+ .. autoattribute:: start_time(float)
+ :annotation:
+
+ .. autoattribute:: retry_object(BaseRetrying)
+ :annotation:
+
+ .. autoattribute:: fn(callable)
+ :annotation:
+
+ .. autoattribute:: args(tuple)
+ :annotation:
+
+ .. autoattribute:: kwargs(dict)
+ :annotation:
+
+ Variable attributes:
+
+ .. autoattribute:: attempt_number(int)
+ :annotation:
+
+ .. autoattribute:: outcome(tenacity.Future or None)
+ :annotation:
+
+ .. autoattribute:: outcome_timestamp(float or None)
+ :annotation:
+
+ .. autoattribute:: idle_for(float)
+ :annotation:
+
+ .. autoattribute:: next_action(tenacity.RetryAction or None)
+ :annotation:
+
+Other Custom Callbacks
+~~~~~~~~~~~~~~~~~~~~~~
+
+It's also possible to define custom callbacks for other keyword arguments.
+
+.. function:: my_stop(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+ :return: whether or not retrying should stop
+ :rtype: bool
+
+.. function:: my_wait(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+ :return: number of seconds to wait before next retry
+ :rtype: float
+
+.. function:: my_retry(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+ :return: whether or not retrying should continue
+ :rtype: bool
+
+.. function:: my_before(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+
+.. function:: my_after(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+
+.. function:: my_before_sleep(retry_state)
+
+ :param RetryState retry_state: info about current retry invocation
+
+Here's an example with a custom ``before_sleep`` function:
+
+.. testcode::
+
+ import logging
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+ logger = logging.getLogger(__name__)
+
+ def my_before_sleep(retry_state):
+ if retry_state.attempt_number < 1:
+ loglevel = logging.INFO
+ else:
+ loglevel = logging.WARNING
+ logger.log(
+ loglevel, 'Retrying %s: attempt %s ended with: %s',
+ retry_state.fn, retry_state.attempt_number, retry_state.outcome)
+
+ @retry(stop=stop_after_attempt(3), before_sleep=my_before_sleep)
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception()
+ except RetryError:
+ pass
+
+
+Changing Arguments at Run Time
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can change the arguments of a retry decorator as needed when calling it by
+using the `retry_with` function attached to the wrapped function:
+
+.. testcode::
+
+ @retry(stop=stop_after_attempt(3))
+ def raise_my_exception():
+ raise MyException("Fail")
+
+ try:
+ raise_my_exception.retry_with(stop=stop_after_attempt(4))()
+ except Exception:
+ pass
+
+ print(raise_my_exception.retry.statistics)
+
+.. testoutput::
+ :hide:
+
+ ...
+
+If you want to use variables to set up the retry parameters, you don't have
+to use the `retry` decorator - you can instead use `Retrying` directly:
+
+.. testcode::
+
+ def never_good_enough(arg1):
+ raise Exception('Invalid argument: {}'.format(arg1))
+
+ def try_never_good_enough(max_attempts=3):
+ retryer = Retrying(stop=stop_after_attempt(max_attempts), reraise=True)
+ retryer(never_good_enough, 'I really do try')
+
+Retrying code block
+~~~~~~~~~~~~~~~~~~~
+
+Tenacity allows you to retry a code block without the need to wraps it in an
+isolated function. This makes it easy to isolate failing block while sharing
+context. The trick is to combine a for loop and a context manager.
+
+.. testcode::
+
+ from tenacity import Retrying, RetryError, stop_after_attempt
+
+ try:
+ for attempt in Retrying(stop=stop_after_attempt(3)):
+ with attempt:
+ raise Exception('My code is failing!')
+ except RetryError:
+ pass
+
+You can configure every details of retry policy by configuring the Retrying
+object.
+
+With async code you can use AsyncRetrying.
+
+.. testcode::
+
+ from tenacity import AsyncRetrying, RetryError, stop_after_attempt
+
+ async def function():
+ try:
+ async for attempt in AsyncRetrying(stop=stop_after_attempt(3)):
+ with attempt:
+ raise Exception('My code is failing!')
+ except RetryError:
+ pass
+
+In both cases, you may want to set the result to the attempt so it's available
+in retry strategies like ``retry_if_result``. This can be done accessing the
+``retry_state`` property:
+
+.. testcode::
+
+ from tenacity import AsyncRetrying, retry_if_result
+
+ async def function():
+ async for attempt in AsyncRetrying(retry=retry_if_result(lambda x: x < 3)):
+ with attempt:
+ result = 1 # Some complex calculation, function call, etc.
+ if not attempt.retry_state.outcome.failed:
+ attempt.retry_state.set_result(result)
+ return result
+
+Async and retry
+~~~~~~~~~~~~~~~
+
+Finally, ``retry`` works also on asyncio and Tornado (>= 4.5) coroutines.
+Sleeps are done asynchronously too.
+
+.. code-block:: python
+
+ @retry
+ async def my_async_function(loop):
+ await loop.getaddrinfo('8.8.8.8', 53)
+
+.. code-block:: python
+
+ @retry
+ @tornado.gen.coroutine
+ def my_async_function(http_client, url):
+ yield http_client.fetch(url)
+
+You can even use alternative event loops such as `curio` or `Trio` by passing the correct sleep function:
+
+.. code-block:: python
+
+ @retry(sleep=trio.sleep)
+ async def my_async_function(loop):
+ await asks.get('https://example.org')
+
+Contribute
+----------
+
+#. Check for open issues or open a fresh issue to start a discussion around a
+ feature idea or a bug.
+#. Fork `the repository`_ on GitHub to start making your changes to the
+ **main** branch (or branch off of it).
+#. Write a test which shows that the bug was fixed or that the feature works as
+ expected.
+#. Add a `changelog <#Changelogs>`_
+#. Make the docs better (or more detailed, or more easier to read, or ...)
+
+.. _`the repository`: https://github.com/jd/tenacity
+
+Changelogs
+~~~~~~~~~~
+
+`reno`_ is used for managing changelogs. Take a look at their usage docs.
+
+The doc generation will automatically compile the changelogs. You just need to add them.
+
+.. code-block:: sh
+
+ # Opens a template file in an editor
+ tox -e reno -- new some-slug-for-my-change --edit
+
+.. _`reno`: https://docs.openstack.org/reno/latest/user/usage.html
diff --git a/contrib/python/tenacity/py3/tenacity/__init__.py b/contrib/python/tenacity/py3/tenacity/__init__.py
new file mode 100644
index 0000000000..ba8011be02
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/__init__.py
@@ -0,0 +1,606 @@
+# Copyright 2016-2018 Julien Danjou
+# Copyright 2017 Elisey Zanko
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import functools
+import sys
+import threading
+import time
+import typing as t
+import warnings
+from abc import ABC, abstractmethod
+from concurrent import futures
+from inspect import iscoroutinefunction
+
+# Import all built-in retry strategies for easier usage.
+from .retry import retry_base # noqa
+from .retry import retry_all # noqa
+from .retry import retry_always # noqa
+from .retry import retry_any # noqa
+from .retry import retry_if_exception # noqa
+from .retry import retry_if_exception_type # noqa
+from .retry import retry_if_exception_cause_type # noqa
+from .retry import retry_if_not_exception_type # noqa
+from .retry import retry_if_not_result # noqa
+from .retry import retry_if_result # noqa
+from .retry import retry_never # noqa
+from .retry import retry_unless_exception_type # noqa
+from .retry import retry_if_exception_message # noqa
+from .retry import retry_if_not_exception_message # noqa
+
+# Import all nap strategies for easier usage.
+from .nap import sleep # noqa
+from .nap import sleep_using_event # noqa
+
+# Import all built-in stop strategies for easier usage.
+from .stop import stop_after_attempt # noqa
+from .stop import stop_after_delay # noqa
+from .stop import stop_all # noqa
+from .stop import stop_any # noqa
+from .stop import stop_never # noqa
+from .stop import stop_when_event_set # noqa
+
+# Import all built-in wait strategies for easier usage.
+from .wait import wait_chain # noqa
+from .wait import wait_combine # noqa
+from .wait import wait_exponential # noqa
+from .wait import wait_fixed # noqa
+from .wait import wait_incrementing # noqa
+from .wait import wait_none # noqa
+from .wait import wait_random # noqa
+from .wait import wait_random_exponential # noqa
+from .wait import wait_random_exponential as wait_full_jitter # noqa
+from .wait import wait_exponential_jitter # noqa
+
+# Import all built-in before strategies for easier usage.
+from .before import before_log # noqa
+from .before import before_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .after import after_log # noqa
+from .after import after_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .before_sleep import before_sleep_log # noqa
+from .before_sleep import before_sleep_nothing # noqa
+
+try:
+ import tornado
+except ImportError:
+ tornado = None
+
+if t.TYPE_CHECKING:
+ import types
+
+ from .retry import RetryBaseT
+ from .stop import StopBaseT
+ from .wait import WaitBaseT
+
+
+WrappedFnReturnT = t.TypeVar("WrappedFnReturnT")
+WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Any])
+
+
+class TryAgain(Exception):
+ """Always retry the executed function when raised."""
+
+
+NO_RESULT = object()
+
+
+class DoAttempt:
+ pass
+
+
+class DoSleep(float):
+ pass
+
+
+class BaseAction:
+ """Base class for representing actions to take by retry object.
+
+ Concrete implementations must define:
+ - __init__: to initialize all necessary fields
+ - REPR_FIELDS: class variable specifying attributes to include in repr(self)
+ - NAME: for identification in retry object methods and callbacks
+ """
+
+ REPR_FIELDS: t.Sequence[str] = ()
+ NAME: t.Optional[str] = None
+
+ def __repr__(self) -> str:
+ state_str = ", ".join(f"{field}={getattr(self, field)!r}" for field in self.REPR_FIELDS)
+ return f"{self.__class__.__name__}({state_str})"
+
+ def __str__(self) -> str:
+ return repr(self)
+
+
+class RetryAction(BaseAction):
+ REPR_FIELDS = ("sleep",)
+ NAME = "retry"
+
+ def __init__(self, sleep: t.SupportsFloat) -> None:
+ self.sleep = float(sleep)
+
+
+_unset = object()
+
+
+def _first_set(first: t.Union[t.Any, object], second: t.Any) -> t.Any:
+ return second if first is _unset else first
+
+
+class RetryError(Exception):
+ """Encapsulates the last attempt instance right before giving up."""
+
+ def __init__(self, last_attempt: "Future") -> None:
+ self.last_attempt = last_attempt
+ super().__init__(last_attempt)
+
+ def reraise(self) -> t.NoReturn:
+ if self.last_attempt.failed:
+ raise self.last_attempt.result()
+ raise self
+
+ def __str__(self) -> str:
+ return f"{self.__class__.__name__}[{self.last_attempt}]"
+
+
+class AttemptManager:
+ """Manage attempt context."""
+
+ def __init__(self, retry_state: "RetryCallState"):
+ self.retry_state = retry_state
+
+ def __enter__(self) -> None:
+ pass
+
+ def __exit__(
+ self,
+ exc_type: t.Optional[t.Type[BaseException]],
+ exc_value: t.Optional[BaseException],
+ traceback: t.Optional["types.TracebackType"],
+ ) -> t.Optional[bool]:
+ if exc_type is not None and exc_value is not None:
+ self.retry_state.set_exception((exc_type, exc_value, traceback))
+ return True # Swallow exception.
+ else:
+ # We don't have the result, actually.
+ self.retry_state.set_result(None)
+ return None
+
+
+class BaseRetrying(ABC):
+ def __init__(
+ self,
+ sleep: t.Callable[[t.Union[int, float]], None] = sleep,
+ stop: "StopBaseT" = stop_never,
+ wait: "WaitBaseT" = wait_none(),
+ retry: "RetryBaseT" = retry_if_exception_type(),
+ before: t.Callable[["RetryCallState"], None] = before_nothing,
+ after: t.Callable[["RetryCallState"], None] = after_nothing,
+ before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
+ reraise: bool = False,
+ retry_error_cls: t.Type[RetryError] = RetryError,
+ retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
+ ):
+ self.sleep = sleep
+ self.stop = stop
+ self.wait = wait
+ self.retry = retry
+ self.before = before
+ self.after = after
+ self.before_sleep = before_sleep
+ self.reraise = reraise
+ self._local = threading.local()
+ self.retry_error_cls = retry_error_cls
+ self.retry_error_callback = retry_error_callback
+
+ def copy(
+ self,
+ sleep: t.Union[t.Callable[[t.Union[int, float]], None], object] = _unset,
+ stop: t.Union["StopBaseT", object] = _unset,
+ wait: t.Union["WaitBaseT", object] = _unset,
+ retry: t.Union[retry_base, object] = _unset,
+ before: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
+ after: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
+ before_sleep: t.Union[t.Optional[t.Callable[["RetryCallState"], None]], object] = _unset,
+ reraise: t.Union[bool, object] = _unset,
+ retry_error_cls: t.Union[t.Type[RetryError], object] = _unset,
+ retry_error_callback: t.Union[t.Optional[t.Callable[["RetryCallState"], t.Any]], object] = _unset,
+ ) -> "BaseRetrying":
+ """Copy this object with some parameters changed if needed."""
+ return self.__class__(
+ sleep=_first_set(sleep, self.sleep),
+ stop=_first_set(stop, self.stop),
+ wait=_first_set(wait, self.wait),
+ retry=_first_set(retry, self.retry),
+ before=_first_set(before, self.before),
+ after=_first_set(after, self.after),
+ before_sleep=_first_set(before_sleep, self.before_sleep),
+ reraise=_first_set(reraise, self.reraise),
+ retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),
+ retry_error_callback=_first_set(retry_error_callback, self.retry_error_callback),
+ )
+
+ def __repr__(self) -> str:
+ return (
+ f"<{self.__class__.__name__} object at 0x{id(self):x} ("
+ f"stop={self.stop}, "
+ f"wait={self.wait}, "
+ f"sleep={self.sleep}, "
+ f"retry={self.retry}, "
+ f"before={self.before}, "
+ f"after={self.after})>"
+ )
+
+ @property
+ def statistics(self) -> t.Dict[str, t.Any]:
+ """Return a dictionary of runtime statistics.
+
+ This dictionary will be empty when the controller has never been
+ ran. When it is running or has ran previously it should have (but
+ may not) have useful and/or informational keys and values when
+ running is underway and/or completed.
+
+ .. warning:: The keys in this dictionary **should** be some what
+ stable (not changing), but there existence **may**
+ change between major releases as new statistics are
+ gathered or removed so before accessing keys ensure that
+ they actually exist and handle when they do not.
+
+ .. note:: The values in this dictionary are local to the thread
+ running call (so if multiple threads share the same retrying
+ object - either directly or indirectly) they will each have
+ there own view of statistics they have collected (in the
+ future we may provide a way to aggregate the various
+ statistics from each thread).
+ """
+ try:
+ return self._local.statistics # type: ignore[no-any-return]
+ except AttributeError:
+ self._local.statistics = t.cast(t.Dict[str, t.Any], {})
+ return self._local.statistics
+
+ def wraps(self, f: WrappedFn) -> WrappedFn:
+ """Wrap a function for retrying.
+
+ :param f: A function to wraps for retrying.
+ """
+
+ @functools.wraps(f)
+ def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
+ return self(f, *args, **kw)
+
+ def retry_with(*args: t.Any, **kwargs: t.Any) -> WrappedFn:
+ return self.copy(*args, **kwargs).wraps(f)
+
+ wrapped_f.retry = self # type: ignore[attr-defined]
+ wrapped_f.retry_with = retry_with # type: ignore[attr-defined]
+
+ return wrapped_f # type: ignore[return-value]
+
+ def begin(self) -> None:
+ self.statistics.clear()
+ self.statistics["start_time"] = time.monotonic()
+ self.statistics["attempt_number"] = 1
+ self.statistics["idle_for"] = 0
+
+ def iter(self, retry_state: "RetryCallState") -> t.Union[DoAttempt, DoSleep, t.Any]: # noqa
+ fut = retry_state.outcome
+ if fut is None:
+ if self.before is not None:
+ self.before(retry_state)
+ return DoAttempt()
+
+ is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
+ if not (is_explicit_retry or self.retry(retry_state)):
+ return fut.result()
+
+ if self.after is not None:
+ self.after(retry_state)
+
+ self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
+ if self.stop(retry_state):
+ if self.retry_error_callback:
+ return self.retry_error_callback(retry_state)
+ retry_exc = self.retry_error_cls(fut)
+ if self.reraise:
+ raise retry_exc.reraise()
+ raise retry_exc from fut.exception()
+
+ if self.wait:
+ sleep = self.wait(retry_state)
+ else:
+ sleep = 0.0
+ retry_state.next_action = RetryAction(sleep)
+ retry_state.idle_for += sleep
+ self.statistics["idle_for"] += sleep
+ self.statistics["attempt_number"] += 1
+
+ if self.before_sleep is not None:
+ self.before_sleep(retry_state)
+
+ return DoSleep(sleep)
+
+ def __iter__(self) -> t.Generator[AttemptManager, None, None]:
+ self.begin()
+
+ retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ yield AttemptManager(retry_state=retry_state)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ break
+
+ @abstractmethod
+ def __call__(
+ self,
+ fn: t.Callable[..., WrappedFnReturnT],
+ *args: t.Any,
+ **kwargs: t.Any,
+ ) -> WrappedFnReturnT:
+ pass
+
+
+class Retrying(BaseRetrying):
+ """Retrying controller."""
+
+ def __call__(
+ self,
+ fn: t.Callable[..., WrappedFnReturnT],
+ *args: t.Any,
+ **kwargs: t.Any,
+ ) -> WrappedFnReturnT:
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ return do # type: ignore[no-any-return]
+
+
+if sys.version_info[1] >= 9:
+ FutureGenericT = futures.Future[t.Any]
+else:
+ FutureGenericT = futures.Future
+
+
+class Future(FutureGenericT):
+ """Encapsulates a (future or past) attempted call to a target function."""
+
+ def __init__(self, attempt_number: int) -> None:
+ super().__init__()
+ self.attempt_number = attempt_number
+
+ @property
+ def failed(self) -> bool:
+ """Return whether a exception is being held in this future."""
+ return self.exception() is not None
+
+ @classmethod
+ def construct(cls, attempt_number: int, value: t.Any, has_exception: bool) -> "Future":
+ """Construct a new Future object."""
+ fut = cls(attempt_number)
+ if has_exception:
+ fut.set_exception(value)
+ else:
+ fut.set_result(value)
+ return fut
+
+
+class RetryCallState:
+ """State related to a single call wrapped with Retrying."""
+
+ def __init__(
+ self,
+ retry_object: BaseRetrying,
+ fn: t.Optional[WrappedFn],
+ args: t.Any,
+ kwargs: t.Any,
+ ) -> None:
+ #: Retry call start timestamp
+ self.start_time = time.monotonic()
+ #: Retry manager object
+ self.retry_object = retry_object
+ #: Function wrapped by this retry call
+ self.fn = fn
+ #: Arguments of the function wrapped by this retry call
+ self.args = args
+ #: Keyword arguments of the function wrapped by this retry call
+ self.kwargs = kwargs
+
+ #: The number of the current attempt
+ self.attempt_number: int = 1
+ #: Last outcome (result or exception) produced by the function
+ self.outcome: t.Optional[Future] = None
+ #: Timestamp of the last outcome
+ self.outcome_timestamp: t.Optional[float] = None
+ #: Time spent sleeping in retries
+ self.idle_for: float = 0.0
+ #: Next action as decided by the retry manager
+ self.next_action: t.Optional[RetryAction] = None
+
+ @property
+ def seconds_since_start(self) -> t.Optional[float]:
+ if self.outcome_timestamp is None:
+ return None
+ return self.outcome_timestamp - self.start_time
+
+ def prepare_for_next_attempt(self) -> None:
+ self.outcome = None
+ self.outcome_timestamp = None
+ self.attempt_number += 1
+ self.next_action = None
+
+ def set_result(self, val: t.Any) -> None:
+ ts = time.monotonic()
+ fut = Future(self.attempt_number)
+ fut.set_result(val)
+ self.outcome, self.outcome_timestamp = fut, ts
+
+ def set_exception(
+ self, exc_info: t.Tuple[t.Type[BaseException], BaseException, "types.TracebackType| None"]
+ ) -> None:
+ ts = time.monotonic()
+ fut = Future(self.attempt_number)
+ fut.set_exception(exc_info[1])
+ self.outcome, self.outcome_timestamp = fut, ts
+
+ def __repr__(self) -> str:
+ if self.outcome is None:
+ result = "none yet"
+ elif self.outcome.failed:
+ exception = self.outcome.exception()
+ result = f"failed ({exception.__class__.__name__} {exception})"
+ else:
+ result = f"returned {self.outcome.result()}"
+
+ slept = float(round(self.idle_for, 2))
+ clsname = self.__class__.__name__
+ return f"<{clsname} {id(self)}: attempt #{self.attempt_number}; slept for {slept}; last result: {result}>"
+
+
+@t.overload
+def retry(func: WrappedFn) -> WrappedFn:
+ ...
+
+
+@t.overload
+def retry(
+ sleep: t.Callable[[t.Union[int, float]], t.Optional[t.Awaitable[None]]] = sleep,
+ stop: "StopBaseT" = stop_never,
+ wait: "WaitBaseT" = wait_none(),
+ retry: "RetryBaseT" = retry_if_exception_type(),
+ before: t.Callable[["RetryCallState"], None] = before_nothing,
+ after: t.Callable[["RetryCallState"], None] = after_nothing,
+ before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
+ reraise: bool = False,
+ retry_error_cls: t.Type["RetryError"] = RetryError,
+ retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
+) -> t.Callable[[WrappedFn], WrappedFn]:
+ ...
+
+
+def retry(*dargs: t.Any, **dkw: t.Any) -> t.Any:
+ """Wrap a function with a new `Retrying` object.
+
+ :param dargs: positional arguments passed to Retrying object
+ :param dkw: keyword arguments passed to the Retrying object
+ """
+ # support both @retry and @retry() as valid syntax
+ if len(dargs) == 1 and callable(dargs[0]):
+ return retry()(dargs[0])
+ else:
+
+ def wrap(f: WrappedFn) -> WrappedFn:
+ if isinstance(f, retry_base):
+ warnings.warn(
+ f"Got retry_base instance ({f.__class__.__name__}) as callable argument, "
+ f"this will probably hang indefinitely (did you mean retry={f.__class__.__name__}(...)?)"
+ )
+ r: "BaseRetrying"
+ if iscoroutinefunction(f):
+ r = AsyncRetrying(*dargs, **dkw)
+ elif tornado and hasattr(tornado.gen, "is_coroutine_function") and tornado.gen.is_coroutine_function(f):
+ r = TornadoRetrying(*dargs, **dkw)
+ else:
+ r = Retrying(*dargs, **dkw)
+
+ return r.wraps(f)
+
+ return wrap
+
+
+from tenacity._asyncio import AsyncRetrying # noqa:E402,I100
+
+if tornado:
+ from tenacity.tornadoweb import TornadoRetrying
+
+
+__all__ = [
+ "retry_base",
+ "retry_all",
+ "retry_always",
+ "retry_any",
+ "retry_if_exception",
+ "retry_if_exception_type",
+ "retry_if_exception_cause_type",
+ "retry_if_not_exception_type",
+ "retry_if_not_result",
+ "retry_if_result",
+ "retry_never",
+ "retry_unless_exception_type",
+ "retry_if_exception_message",
+ "retry_if_not_exception_message",
+ "sleep",
+ "sleep_using_event",
+ "stop_after_attempt",
+ "stop_after_delay",
+ "stop_all",
+ "stop_any",
+ "stop_never",
+ "stop_when_event_set",
+ "wait_chain",
+ "wait_combine",
+ "wait_exponential",
+ "wait_fixed",
+ "wait_incrementing",
+ "wait_none",
+ "wait_random",
+ "wait_random_exponential",
+ "wait_full_jitter",
+ "wait_exponential_jitter",
+ "before_log",
+ "before_nothing",
+ "after_log",
+ "after_nothing",
+ "before_sleep_log",
+ "before_sleep_nothing",
+ "retry",
+ "WrappedFn",
+ "TryAgain",
+ "NO_RESULT",
+ "DoAttempt",
+ "DoSleep",
+ "BaseAction",
+ "RetryAction",
+ "RetryError",
+ "AttemptManager",
+ "BaseRetrying",
+ "Retrying",
+ "Future",
+ "RetryCallState",
+ "AsyncRetrying",
+]
diff --git a/contrib/python/tenacity/py3/tenacity/_asyncio.py b/contrib/python/tenacity/py3/tenacity/_asyncio.py
new file mode 100644
index 0000000000..9e10c072eb
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/_asyncio.py
@@ -0,0 +1,94 @@
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import sys
+import typing as t
+from asyncio import sleep
+
+from tenacity import AttemptManager
+from tenacity import BaseRetrying
+from tenacity import DoAttempt
+from tenacity import DoSleep
+from tenacity import RetryCallState
+
+WrappedFnReturnT = t.TypeVar("WrappedFnReturnT")
+WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Awaitable[t.Any]])
+
+
+class AsyncRetrying(BaseRetrying):
+ sleep: t.Callable[[float], t.Awaitable[t.Any]]
+
+ def __init__(self, sleep: t.Callable[[float], t.Awaitable[t.Any]] = sleep, **kwargs: t.Any) -> None:
+ super().__init__(**kwargs)
+ self.sleep = sleep
+
+ async def __call__( # type: ignore[override]
+ self, fn: WrappedFn, *args: t.Any, **kwargs: t.Any
+ ) -> WrappedFnReturnT:
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = await fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ await self.sleep(do)
+ else:
+ return do # type: ignore[no-any-return]
+
+ def __iter__(self) -> t.Generator[AttemptManager, None, None]:
+ raise TypeError("AsyncRetrying object is not iterable")
+
+ def __aiter__(self) -> "AsyncRetrying":
+ self.begin()
+ self._retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
+ return self
+
+ async def __anext__(self) -> AttemptManager:
+ while True:
+ do = self.iter(retry_state=self._retry_state)
+ if do is None:
+ raise StopAsyncIteration
+ elif isinstance(do, DoAttempt):
+ return AttemptManager(retry_state=self._retry_state)
+ elif isinstance(do, DoSleep):
+ self._retry_state.prepare_for_next_attempt()
+ await self.sleep(do)
+ else:
+ raise StopAsyncIteration
+
+ def wraps(self, fn: WrappedFn) -> WrappedFn:
+ fn = super().wraps(fn)
+ # Ensure wrapper is recognized as a coroutine function.
+
+ @functools.wraps(fn)
+ async def async_wrapped(*args: t.Any, **kwargs: t.Any) -> t.Any:
+ return await fn(*args, **kwargs)
+
+ # Preserve attributes
+ async_wrapped.retry = fn.retry # type: ignore[attr-defined]
+ async_wrapped.retry_with = fn.retry_with # type: ignore[attr-defined]
+
+ return async_wrapped # type: ignore[return-value]
diff --git a/contrib/python/tenacity/py3/tenacity/_utils.py b/contrib/python/tenacity/py3/tenacity/_utils.py
new file mode 100644
index 0000000000..f14ff32096
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/_utils.py
@@ -0,0 +1,76 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import typing
+from datetime import timedelta
+
+
+# sys.maxsize:
+# An integer giving the maximum value a variable of type Py_ssize_t can take.
+MAX_WAIT = sys.maxsize / 2
+
+
+def find_ordinal(pos_num: int) -> str:
+ # See: https://en.wikipedia.org/wiki/English_numerals#Ordinal_numbers
+ if pos_num == 0:
+ return "th"
+ elif pos_num == 1:
+ return "st"
+ elif pos_num == 2:
+ return "nd"
+ elif pos_num == 3:
+ return "rd"
+ elif 4 <= pos_num <= 20:
+ return "th"
+ else:
+ return find_ordinal(pos_num % 10)
+
+
+def to_ordinal(pos_num: int) -> str:
+ return f"{pos_num}{find_ordinal(pos_num)}"
+
+
+def get_callback_name(cb: typing.Callable[..., typing.Any]) -> str:
+ """Get a callback fully-qualified name.
+
+ If no name can be produced ``repr(cb)`` is called and returned.
+ """
+ segments = []
+ try:
+ segments.append(cb.__qualname__)
+ except AttributeError:
+ try:
+ segments.append(cb.__name__)
+ except AttributeError:
+ pass
+ if not segments:
+ return repr(cb)
+ else:
+ try:
+ # When running under sphinx it appears this can be none?
+ if cb.__module__:
+ segments.insert(0, cb.__module__)
+ except AttributeError:
+ pass
+ return ".".join(segments)
+
+
+time_unit_type = typing.Union[int, float, timedelta]
+
+
+def to_seconds(time_unit: time_unit_type) -> float:
+ return float(time_unit.total_seconds() if isinstance(time_unit, timedelta) else time_unit)
diff --git a/contrib/python/tenacity/py3/tenacity/after.py b/contrib/python/tenacity/py3/tenacity/after.py
new file mode 100644
index 0000000000..aa3cc9df0c
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/after.py
@@ -0,0 +1,51 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from tenacity import RetryCallState
+
+
+def after_nothing(retry_state: "RetryCallState") -> None:
+ """After call strategy that does nothing."""
+
+
+def after_log(
+ logger: "logging.Logger",
+ log_level: int,
+ sec_format: str = "%0.3f",
+) -> typing.Callable[["RetryCallState"], None]:
+ """After call strategy that logs to some logger the finished attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ if retry_state.fn is None:
+ # NOTE(sileht): can't really happen, but we must please mypy
+ fn_name = "<unknown>"
+ else:
+ fn_name = _utils.get_callback_name(retry_state.fn)
+ logger.log(
+ log_level,
+ f"Finished call to '{fn_name}' "
+ f"after {sec_format % retry_state.seconds_since_start}(s), "
+ f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
+ )
+
+ return log_it
diff --git a/contrib/python/tenacity/py3/tenacity/before.py b/contrib/python/tenacity/py3/tenacity/before.py
new file mode 100644
index 0000000000..9284f7ae5b
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/before.py
@@ -0,0 +1,46 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from tenacity import RetryCallState
+
+
+def before_nothing(retry_state: "RetryCallState") -> None:
+ """Before call strategy that does nothing."""
+
+
+def before_log(logger: "logging.Logger", log_level: int) -> typing.Callable[["RetryCallState"], None]:
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ if retry_state.fn is None:
+ # NOTE(sileht): can't really happen, but we must please mypy
+ fn_name = "<unknown>"
+ else:
+ fn_name = _utils.get_callback_name(retry_state.fn)
+ logger.log(
+ log_level,
+ f"Starting call to '{fn_name}', "
+ f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
+ )
+
+ return log_it
diff --git a/contrib/python/tenacity/py3/tenacity/before_sleep.py b/contrib/python/tenacity/py3/tenacity/before_sleep.py
new file mode 100644
index 0000000000..279a21eb5b
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/before_sleep.py
@@ -0,0 +1,71 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from tenacity import RetryCallState
+
+
+def before_sleep_nothing(retry_state: "RetryCallState") -> None:
+ """Before call strategy that does nothing."""
+
+
+def before_sleep_log(
+ logger: "logging.Logger",
+ log_level: int,
+ exc_info: bool = False,
+) -> typing.Callable[["RetryCallState"], None]:
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ local_exc_info: BaseException | bool | None
+
+ if retry_state.outcome is None:
+ raise RuntimeError("log_it() called before outcome was set")
+
+ if retry_state.next_action is None:
+ raise RuntimeError("log_it() called before next_action was set")
+
+ if retry_state.outcome.failed:
+ ex = retry_state.outcome.exception()
+ verb, value = "raised", f"{ex.__class__.__name__}: {ex}"
+
+ if exc_info:
+ local_exc_info = retry_state.outcome.exception()
+ else:
+ local_exc_info = False
+ else:
+ verb, value = "returned", retry_state.outcome.result()
+ local_exc_info = False # exc_info does not apply when no exception
+
+ if retry_state.fn is None:
+ # NOTE(sileht): can't really happen, but we must please mypy
+ fn_name = "<unknown>"
+ else:
+ fn_name = _utils.get_callback_name(retry_state.fn)
+
+ logger.log(
+ log_level,
+ f"Retrying {fn_name} " f"in {retry_state.next_action.sleep} seconds as it {verb} {value}.",
+ exc_info=local_exc_info,
+ )
+
+ return log_it
diff --git a/contrib/python/tenacity/py3/tenacity/nap.py b/contrib/python/tenacity/py3/tenacity/nap.py
new file mode 100644
index 0000000000..72aa5bfd4b
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/nap.py
@@ -0,0 +1,43 @@
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import typing
+
+if typing.TYPE_CHECKING:
+ import threading
+
+
+def sleep(seconds: float) -> None:
+ """
+ Sleep strategy that delays execution for a given number of seconds.
+
+ This is the default strategy, and may be mocked out for unit testing.
+ """
+ time.sleep(seconds)
+
+
+class sleep_using_event:
+ """Sleep strategy that waits on an event to be set."""
+
+ def __init__(self, event: "threading.Event") -> None:
+ self.event = event
+
+ def __call__(self, timeout: typing.Optional[float]) -> None:
+ # NOTE(harlowja): this may *not* actually wait for timeout
+ # seconds if the event is set (ie this may eject out early).
+ self.event.wait(timeout=timeout)
diff --git a/contrib/python/tenacity/py3/tenacity/py.typed b/contrib/python/tenacity/py3/tenacity/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/py.typed
diff --git a/contrib/python/tenacity/py3/tenacity/retry.py b/contrib/python/tenacity/py3/tenacity/retry.py
new file mode 100644
index 0000000000..765b6fe14a
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/retry.py
@@ -0,0 +1,272 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import re
+import typing
+
+if typing.TYPE_CHECKING:
+ from tenacity import RetryCallState
+
+
+class retry_base(abc.ABC):
+ """Abstract base class for retry strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ pass
+
+ def __and__(self, other: "retry_base") -> "retry_all":
+ return retry_all(self, other)
+
+ def __or__(self, other: "retry_base") -> "retry_any":
+ return retry_any(self, other)
+
+
+RetryBaseT = typing.Union[retry_base, typing.Callable[["RetryCallState"], bool]]
+
+
+class _retry_never(retry_base):
+ """Retry strategy that never rejects any result."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return False
+
+
+retry_never = _retry_never()
+
+
+class _retry_always(retry_base):
+ """Retry strategy that always rejects any result."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return True
+
+
+retry_always = _retry_always()
+
+
+class retry_if_exception(retry_base):
+ """Retry strategy that retries if an exception verifies a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[BaseException], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome is None:
+ raise RuntimeError("__call__() called before outcome was set")
+
+ if retry_state.outcome.failed:
+ exception = retry_state.outcome.exception()
+ if exception is None:
+ raise RuntimeError("outcome failed but the exception is None")
+ return self.predicate(exception)
+ else:
+ return False
+
+
+class retry_if_exception_type(retry_if_exception):
+ """Retries if an exception has been raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: isinstance(e, exception_types))
+
+
+class retry_if_not_exception_type(retry_if_exception):
+ """Retries except an exception has been raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: not isinstance(e, exception_types))
+
+
+class retry_unless_exception_type(retry_if_exception):
+ """Retries until an exception is raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: not isinstance(e, exception_types))
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome is None:
+ raise RuntimeError("__call__() called before outcome was set")
+
+ # always retry if no exception was raised
+ if not retry_state.outcome.failed:
+ return True
+
+ exception = retry_state.outcome.exception()
+ if exception is None:
+ raise RuntimeError("outcome failed but the exception is None")
+ return self.predicate(exception)
+
+
+class retry_if_exception_cause_type(retry_base):
+ """Retries if any of the causes of the raised exception is of one or more types.
+
+ The check on the type of the cause of the exception is done recursively (until finding
+ an exception in the chain that has no `__cause__`)
+ """
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_cause_types = exception_types
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome is None:
+ raise RuntimeError("__call__ called before outcome was set")
+
+ if retry_state.outcome.failed:
+ exc = retry_state.outcome.exception()
+ while exc is not None:
+ if isinstance(exc.__cause__, self.exception_cause_types):
+ return True
+ exc = exc.__cause__
+
+ return False
+
+
+class retry_if_result(retry_base):
+ """Retries if the result verifies a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome is None:
+ raise RuntimeError("__call__() called before outcome was set")
+
+ if not retry_state.outcome.failed:
+ return self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_not_result(retry_base):
+ """Retries if the result refutes a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome is None:
+ raise RuntimeError("__call__() called before outcome was set")
+
+ if not retry_state.outcome.failed:
+ return not self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_exception_message(retry_if_exception):
+ """Retries if an exception message equals or matches."""
+
+ def __init__(
+ self,
+ message: typing.Optional[str] = None,
+ match: typing.Optional[str] = None,
+ ) -> None:
+ if message and match:
+ raise TypeError(f"{self.__class__.__name__}() takes either 'message' or 'match', not both")
+
+ # set predicate
+ if message:
+
+ def message_fnc(exception: BaseException) -> bool:
+ return message == str(exception)
+
+ predicate = message_fnc
+ elif match:
+ prog = re.compile(match)
+
+ def match_fnc(exception: BaseException) -> bool:
+ return bool(prog.match(str(exception)))
+
+ predicate = match_fnc
+ else:
+ raise TypeError(f"{self.__class__.__name__}() missing 1 required argument 'message' or 'match'")
+
+ super().__init__(predicate)
+
+
+class retry_if_not_exception_message(retry_if_exception_message):
+ """Retries until an exception message equals or matches."""
+
+ def __init__(
+ self,
+ message: typing.Optional[str] = None,
+ match: typing.Optional[str] = None,
+ ) -> None:
+ super().__init__(message, match)
+ # invert predicate
+ if_predicate = self.predicate
+ self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome is None:
+ raise RuntimeError("__call__() called before outcome was set")
+
+ if not retry_state.outcome.failed:
+ return True
+
+ exception = retry_state.outcome.exception()
+ if exception is None:
+ raise RuntimeError("outcome failed but the exception is None")
+ return self.predicate(exception)
+
+
+class retry_any(retry_base):
+ """Retries if any of the retries condition is valid."""
+
+ def __init__(self, *retries: retry_base) -> None:
+ self.retries = retries
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return any(r(retry_state) for r in self.retries)
+
+
+class retry_all(retry_base):
+ """Retries if all the retries condition are valid."""
+
+ def __init__(self, *retries: retry_base) -> None:
+ self.retries = retries
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return all(r(retry_state) for r in self.retries)
diff --git a/contrib/python/tenacity/py3/tenacity/stop.py b/contrib/python/tenacity/py3/tenacity/stop.py
new file mode 100644
index 0000000000..e64786063f
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/stop.py
@@ -0,0 +1,103 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+import typing
+
+from tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import threading
+
+ from tenacity import RetryCallState
+
+
+class stop_base(abc.ABC):
+ """Abstract base class for stop strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ pass
+
+ def __and__(self, other: "stop_base") -> "stop_all":
+ return stop_all(self, other)
+
+ def __or__(self, other: "stop_base") -> "stop_any":
+ return stop_any(self, other)
+
+
+StopBaseT = typing.Union[stop_base, typing.Callable[["RetryCallState"], bool]]
+
+
+class stop_any(stop_base):
+ """Stop if any of the stop condition is valid."""
+
+ def __init__(self, *stops: stop_base) -> None:
+ self.stops = stops
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return any(x(retry_state) for x in self.stops)
+
+
+class stop_all(stop_base):
+ """Stop if all the stop conditions are valid."""
+
+ def __init__(self, *stops: stop_base) -> None:
+ self.stops = stops
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return all(x(retry_state) for x in self.stops)
+
+
+class _stop_never(stop_base):
+ """Never stop."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return False
+
+
+stop_never = _stop_never()
+
+
+class stop_when_event_set(stop_base):
+ """Stop when the given event is set."""
+
+ def __init__(self, event: "threading.Event") -> None:
+ self.event = event
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return self.event.is_set()
+
+
+class stop_after_attempt(stop_base):
+ """Stop when the previous attempt >= max_attempt."""
+
+ def __init__(self, max_attempt_number: int) -> None:
+ self.max_attempt_number = max_attempt_number
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return retry_state.attempt_number >= self.max_attempt_number
+
+
+class stop_after_delay(stop_base):
+ """Stop when the time from the first attempt >= limit."""
+
+ def __init__(self, max_delay: _utils.time_unit_type) -> None:
+ self.max_delay = _utils.to_seconds(max_delay)
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.seconds_since_start is None:
+ raise RuntimeError("__call__() called but seconds_since_start is not set")
+ return retry_state.seconds_since_start >= self.max_delay
diff --git a/contrib/python/tenacity/py3/tenacity/tornadoweb.py b/contrib/python/tenacity/py3/tenacity/tornadoweb.py
new file mode 100644
index 0000000000..fabf13ae2e
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/tornadoweb.py
@@ -0,0 +1,59 @@
+# Copyright 2017 Elisey Zanko
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import typing
+
+from tenacity import BaseRetrying
+from tenacity import DoAttempt
+from tenacity import DoSleep
+from tenacity import RetryCallState
+
+from tornado import gen
+
+if typing.TYPE_CHECKING:
+ from tornado.concurrent import Future
+
+_RetValT = typing.TypeVar("_RetValT")
+
+
+class TornadoRetrying(BaseRetrying):
+ def __init__(self, sleep: "typing.Callable[[float], Future[None]]" = gen.sleep, **kwargs: typing.Any) -> None:
+ super().__init__(**kwargs)
+ self.sleep = sleep
+
+ @gen.coroutine # type: ignore[misc]
+ def __call__(
+ self,
+ fn: "typing.Callable[..., typing.Union[typing.Generator[typing.Any, typing.Any, _RetValT], Future[_RetValT]]]",
+ *args: typing.Any,
+ **kwargs: typing.Any,
+ ) -> "typing.Generator[typing.Any, typing.Any, _RetValT]":
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = yield fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ yield self.sleep(do)
+ else:
+ raise gen.Return(do)
diff --git a/contrib/python/tenacity/py3/tenacity/wait.py b/contrib/python/tenacity/py3/tenacity/wait.py
new file mode 100644
index 0000000000..e1e2fe48bc
--- /dev/null
+++ b/contrib/python/tenacity/py3/tenacity/wait.py
@@ -0,0 +1,228 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import random
+import typing
+
+from tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ from tenacity import RetryCallState
+
+
+class wait_base(abc.ABC):
+ """Abstract base class for wait strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ pass
+
+ def __add__(self, other: "wait_base") -> "wait_combine":
+ return wait_combine(self, other)
+
+ def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]:
+ # make it possible to use multiple waits with the built-in sum function
+ if other == 0: # type: ignore[comparison-overlap]
+ return self
+ return self.__add__(other)
+
+
+WaitBaseT = typing.Union[wait_base, typing.Callable[["RetryCallState"], typing.Union[float, int]]]
+
+
+class wait_fixed(wait_base):
+ """Wait strategy that waits a fixed amount of time between each retry."""
+
+ def __init__(self, wait: _utils.time_unit_type) -> None:
+ self.wait_fixed = _utils.to_seconds(wait)
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return self.wait_fixed
+
+
+class wait_none(wait_fixed):
+ """Wait strategy that doesn't wait at all before retrying."""
+
+ def __init__(self) -> None:
+ super().__init__(0)
+
+
+class wait_random(wait_base):
+ """Wait strategy that waits a random amount of time between min/max."""
+
+ def __init__(self, min: _utils.time_unit_type = 0, max: _utils.time_unit_type = 1) -> None: # noqa
+ self.wait_random_min = _utils.to_seconds(min)
+ self.wait_random_max = _utils.to_seconds(max)
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min))
+
+
+class wait_combine(wait_base):
+ """Combine several waiting strategies."""
+
+ def __init__(self, *strategies: wait_base) -> None:
+ self.wait_funcs = strategies
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return sum(x(retry_state=retry_state) for x in self.wait_funcs)
+
+
+class wait_chain(wait_base):
+ """Chain two or more waiting strategies.
+
+ If all strategies are exhausted, the very last strategy is used
+ thereafter.
+
+ For example::
+
+ @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] +
+ [wait_fixed(2) for j in range(5)] +
+ [wait_fixed(5) for k in range(4)))
+ def wait_chained():
+ print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s
+ thereafter.")
+ """
+
+ def __init__(self, *strategies: wait_base) -> None:
+ self.strategies = strategies
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies))
+ wait_func = self.strategies[wait_func_no - 1]
+ return wait_func(retry_state=retry_state)
+
+
+class wait_incrementing(wait_base):
+ """Wait an incremental amount of time after each attempt.
+
+ Starting at a starting value and incrementing by a value for each attempt
+ (and restricting the upper limit to some maximum value).
+ """
+
+ def __init__(
+ self,
+ start: _utils.time_unit_type = 0,
+ increment: _utils.time_unit_type = 100,
+ max: _utils.time_unit_type = _utils.MAX_WAIT, # noqa
+ ) -> None:
+ self.start = _utils.to_seconds(start)
+ self.increment = _utils.to_seconds(increment)
+ self.max = _utils.to_seconds(max)
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ result = self.start + (self.increment * (retry_state.attempt_number - 1))
+ return max(0, min(result, self.max))
+
+
+class wait_exponential(wait_base):
+ """Wait strategy that applies exponential backoff.
+
+ It allows for a customized multiplier and an ability to restrict the
+ upper and lower limits to some maximum and minimum value.
+
+ The intervals are fixed (i.e. there is no jitter), so this strategy is
+ suitable for balancing retries against latency when a required resource is
+ unavailable for an unknown duration, but *not* suitable for resolving
+ contention between multiple processes for a shared resource. Use
+ wait_random_exponential for the latter case.
+ """
+
+ def __init__(
+ self,
+ multiplier: typing.Union[int, float] = 1,
+ max: _utils.time_unit_type = _utils.MAX_WAIT, # noqa
+ exp_base: typing.Union[int, float] = 2,
+ min: _utils.time_unit_type = 0, # noqa
+ ) -> None:
+ self.multiplier = multiplier
+ self.min = _utils.to_seconds(min)
+ self.max = _utils.to_seconds(max)
+ self.exp_base = exp_base
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ try:
+ exp = self.exp_base ** (retry_state.attempt_number - 1)
+ result = self.multiplier * exp
+ except OverflowError:
+ return self.max
+ return max(max(0, self.min), min(result, self.max))
+
+
+class wait_random_exponential(wait_exponential):
+ """Random wait with exponentially widening window.
+
+ An exponential backoff strategy used to mediate contention between multiple
+ uncoordinated processes for a shared resource in distributed systems. This
+ is the sense in which "exponential backoff" is meant in e.g. Ethernet
+ networking, and corresponds to the "Full Jitter" algorithm described in
+ this blog post:
+
+ https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
+
+ Each retry occurs at a random time in a geometrically expanding interval.
+ It allows for a custom multiplier and an ability to restrict the upper
+ limit of the random interval to some maximum value.
+
+ Example::
+
+ wait_random_exponential(multiplier=0.5, # initial window 0.5s
+ max=60) # max 60s timeout
+
+ When waiting for an unavailable resource to become available again, as
+ opposed to trying to resolve contention for a shared resource, the
+ wait_exponential strategy (which uses a fixed interval) may be preferable.
+
+ """
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ high = super().__call__(retry_state=retry_state)
+ return random.uniform(0, high)
+
+
+class wait_exponential_jitter(wait_base):
+ """Wait strategy that applies exponential backoff and jitter.
+
+ It allows for a customized initial wait, maximum wait and jitter.
+
+ This implements the strategy described here:
+ https://cloud.google.com/storage/docs/retry-strategy
+
+ The wait time is min(initial * 2**n + random.uniform(0, jitter), maximum)
+ where n is the retry count.
+ """
+
+ def __init__(
+ self,
+ initial: float = 1,
+ max: float = _utils.MAX_WAIT, # noqa
+ exp_base: float = 2,
+ jitter: float = 1,
+ ) -> None:
+ self.initial = initial
+ self.max = max
+ self.exp_base = exp_base
+ self.jitter = jitter
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ jitter = random.uniform(0, self.jitter)
+ try:
+ exp = self.exp_base ** (retry_state.attempt_number - 1)
+ result = self.initial * exp + jitter
+ except OverflowError:
+ result = self.max
+ return max(0, min(result, self.max))
diff --git a/contrib/python/tenacity/py3/ya.make b/contrib/python/tenacity/py3/ya.make
new file mode 100644
index 0000000000..9b5488ebed
--- /dev/null
+++ b/contrib/python/tenacity/py3/ya.make
@@ -0,0 +1,37 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(8.2.3)
+
+LICENSE(Apache-2.0)
+
+NO_LINT()
+
+NO_CHECK_IMPORTS(
+ tenacity.tornadoweb
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ tenacity/__init__.py
+ tenacity/_asyncio.py
+ tenacity/_utils.py
+ tenacity/after.py
+ tenacity/before.py
+ tenacity/before_sleep.py
+ tenacity/nap.py
+ tenacity/retry.py
+ tenacity/stop.py
+ tenacity/tornadoweb.py
+ tenacity/wait.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/tenacity/py3/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+ tenacity/py.typed
+)
+
+END()
diff --git a/contrib/python/tenacity/ya.make b/contrib/python/tenacity/ya.make
new file mode 100644
index 0000000000..ea622da569
--- /dev/null
+++ b/contrib/python/tenacity/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/tenacity/py2)
+ELSE()
+ PEERDIR(contrib/python/tenacity/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/contrib/python/websocket-client/py2/.dist-info/METADATA b/contrib/python/websocket-client/py2/.dist-info/METADATA
new file mode 100644
index 0000000000..f71e30a306
--- /dev/null
+++ b/contrib/python/websocket-client/py2/.dist-info/METADATA
@@ -0,0 +1,174 @@
+Metadata-Version: 2.1
+Name: websocket-client
+Version: 0.59.0
+Summary: WebSocket client for Python with low level API options
+Home-page: https://github.com/websocket-client/websocket-client.git
+Author: liris
+Author-email: liris.pp@gmail.com
+License: LGPL version 2.1
+Download-URL: https://github.com/websocket-client/websocket-client/releases
+Project-URL: Documentation, https://websocket-client.readthedocs.io/
+Project-URL: Source, https://github.com/websocket-client/websocket-client/
+Keywords: websockets client
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Topic :: Internet
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Intended Audience :: Developers
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Description-Content-Type: text/markdown
+Requires-Dist: six
+
+[![docs](https://readthedocs.org/projects/websocket-client/badge/?style=flat)](https://websocket-client.readthedocs.io/)
+[![Build Status](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml/badge.svg)](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml)
+[![codecov](https://codecov.io/gh/websocket-client/websocket-client/branch/master/graph/badge.svg?token=pcXhUQwiL3)](https://codecov.io/gh/websocket-client/websocket-client)
+[![PyPI Downloads](https://pepy.tech/badge/websocket-client)](https://pepy.tech/project/websocket-client)
+[![PyPI version](https://img.shields.io/pypi/v/websocket_client)](https://pypi.org/project/websocket_client/)
+
+# websocket-client
+
+websocket-client is a WebSocket client for Python. It provides access
+to low level APIs for WebSockets. websocket-client implements version
+[hybi-13](https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-13)
+of the WebSocket procotol. This client does not currently support the
+permessage-deflate extension from
+[RFC 7692](https://tools.ietf.org/html/rfc7692).
+
+## Documentation
+
+This project's documentation can be found at
+[https://websocket-client.readthedocs.io/](https://websocket-client.readthedocs.io/)
+
+## Contributing
+
+Please see the [contribution guidelines](https://github.com/websocket-client/websocket-client/blob/master/CONTRIBUTING.md)
+
+## Installation
+
+First, install the following dependencies:
+- six
+- backports.ssl\_match\_hostname for Python 2.x
+
+You can install the dependencies with the command `pip install six` and
+`pip install backports.ssl_match_hostname`
+
+You can use either `python setup.py install` or `pip install websocket-client`
+to install. This module is tested on Python 2.7 and Python 3.4+. Python 3
+support was first introduced in version 0.14.0, but is a work in progress.
+
+## Usage Tips
+
+Check out the documentation's FAQ for additional guidelines:
+[https://websocket-client.readthedocs.io/en/latest/faq.html](https://websocket-client.readthedocs.io/en/latest/faq.html)
+
+Known issues with this library include lack of WebSocket Compression
+support (RFC 7692) and [minimal threading documentation/support](https://websocket-client.readthedocs.io/en/latest/threading.html).
+
+## License
+
+- LGPL version 2.1
+
+### Performance
+
+The `send` and `validate_utf8` methods are very slow in pure Python. You can
+disable UTF8 validation in this library (and receive a performance enhancement)
+with the `skip_utf8_validation` parameter. If you want to get better
+performance, please install both numpy and wsaccel, and import them into your
+project files - these other libraries will automatically be used when available.
+Note that wsaccel can sometimes cause other issues.
+
+### Long-lived Connection
+
+Most real-world WebSockets situations involve longer-lived connections.
+The WebSocketApp `run_forever` loop automatically tries to reconnect when a
+connection is lost, and provides a variety of event-based connection controls.
+The project documentation has
+[additional examples](https://websocket-client.readthedocs.io/en/latest/examples.html)
+
+```python
+import websocket
+try:
+ import thread
+except ImportError:
+ import _thread as thread
+import time
+
+def on_message(ws, message):
+ print(message)
+
+def on_error(ws, error):
+ print(error)
+
+def on_close(ws):
+ print("### closed ###")
+
+def on_open(ws):
+ def run(*args):
+ for i in range(3):
+ time.sleep(1)
+ ws.send("Hello %d" % i)
+ time.sleep(1)
+ ws.close()
+ print("thread terminating...")
+ thread.start_new_thread(run, ())
+
+if __name__ == "__main__":
+ websocket.enableTrace(True)
+ ws = websocket.WebSocketApp("ws://echo.websocket.org/",
+ on_open = on_open,
+ on_message = on_message,
+ on_error = on_error,
+ on_close = on_close)
+
+ ws.run_forever()
+```
+
+### Short-lived Connection
+
+This is if you want to communicate a short message and disconnect
+immediately when done. For example, if you want to confirm that a WebSocket
+server is running and responds properly to a specific request.
+The project documentation has
+[additional examples](https://websocket-client.readthedocs.io/en/latest/examples.html)
+
+```python
+from websocket import create_connection
+ws = create_connection("ws://echo.websocket.org/")
+print("Sending 'Hello, World'...")
+ws.send("Hello, World")
+print("Sent")
+print("Receiving...")
+result = ws.recv()
+print("Received '%s'" % result)
+ws.close()
+```
+
+If you want to customize socket options, set sockopt, as seen below:
+
+```python
+from websocket import create_connection
+ws = create_connection("ws://echo.websocket.org/",
+ sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY),))
+```
+
+### Acknowledgements
+
+Thanks to @battlemidget and @ralphbean for helping migrate this project to
+Python 3.
+
+
diff --git a/contrib/python/websocket-client/py2/.dist-info/top_level.txt b/contrib/python/websocket-client/py2/.dist-info/top_level.txt
new file mode 100644
index 0000000000..ca4cb0cf82
--- /dev/null
+++ b/contrib/python/websocket-client/py2/.dist-info/top_level.txt
@@ -0,0 +1 @@
+websocket
diff --git a/contrib/python/websocket-client/py2/COPYING.LESSER b/contrib/python/websocket-client/py2/COPYING.LESSER
new file mode 100644
index 0000000000..67cd97bbc2
--- /dev/null
+++ b/contrib/python/websocket-client/py2/COPYING.LESSER
@@ -0,0 +1,503 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
diff --git a/contrib/python/websocket-client/py2/LICENSE b/contrib/python/websocket-client/py2/LICENSE
new file mode 100644
index 0000000000..67cd97bbc2
--- /dev/null
+++ b/contrib/python/websocket-client/py2/LICENSE
@@ -0,0 +1,503 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
diff --git a/contrib/python/websocket-client/py2/README.md b/contrib/python/websocket-client/py2/README.md
new file mode 100644
index 0000000000..d61578f116
--- /dev/null
+++ b/contrib/python/websocket-client/py2/README.md
@@ -0,0 +1,136 @@
+[![docs](https://readthedocs.org/projects/websocket-client/badge/?style=flat)](https://websocket-client.readthedocs.io/)
+[![Build Status](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml/badge.svg)](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml)
+[![codecov](https://codecov.io/gh/websocket-client/websocket-client/branch/master/graph/badge.svg?token=pcXhUQwiL3)](https://codecov.io/gh/websocket-client/websocket-client)
+[![PyPI Downloads](https://pepy.tech/badge/websocket-client)](https://pepy.tech/project/websocket-client)
+[![PyPI version](https://img.shields.io/pypi/v/websocket_client)](https://pypi.org/project/websocket_client/)
+
+# websocket-client
+
+websocket-client is a WebSocket client for Python. It provides access
+to low level APIs for WebSockets. websocket-client implements version
+[hybi-13](https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-13)
+of the WebSocket procotol. This client does not currently support the
+permessage-deflate extension from
+[RFC 7692](https://tools.ietf.org/html/rfc7692).
+
+## Documentation
+
+This project's documentation can be found at
+[https://websocket-client.readthedocs.io/](https://websocket-client.readthedocs.io/)
+
+## Contributing
+
+Please see the [contribution guidelines](https://github.com/websocket-client/websocket-client/blob/master/CONTRIBUTING.md)
+
+## Installation
+
+First, install the following dependencies:
+- six
+- backports.ssl\_match\_hostname for Python 2.x
+
+You can install the dependencies with the command `pip install six` and
+`pip install backports.ssl_match_hostname`
+
+You can use either `python setup.py install` or `pip install websocket-client`
+to install. This module is tested on Python 2.7 and Python 3.4+. Python 3
+support was first introduced in version 0.14.0, but is a work in progress.
+
+## Usage Tips
+
+Check out the documentation's FAQ for additional guidelines:
+[https://websocket-client.readthedocs.io/en/latest/faq.html](https://websocket-client.readthedocs.io/en/latest/faq.html)
+
+Known issues with this library include lack of WebSocket Compression
+support (RFC 7692) and [minimal threading documentation/support](https://websocket-client.readthedocs.io/en/latest/threading.html).
+
+## License
+
+- LGPL version 2.1
+
+### Performance
+
+The `send` and `validate_utf8` methods are very slow in pure Python. You can
+disable UTF8 validation in this library (and receive a performance enhancement)
+with the `skip_utf8_validation` parameter. If you want to get better
+performance, please install both numpy and wsaccel, and import them into your
+project files - these other libraries will automatically be used when available.
+Note that wsaccel can sometimes cause other issues.
+
+### Long-lived Connection
+
+Most real-world WebSockets situations involve longer-lived connections.
+The WebSocketApp `run_forever` loop automatically tries to reconnect when a
+connection is lost, and provides a variety of event-based connection controls.
+The project documentation has
+[additional examples](https://websocket-client.readthedocs.io/en/latest/examples.html)
+
+```python
+import websocket
+try:
+ import thread
+except ImportError:
+ import _thread as thread
+import time
+
+def on_message(ws, message):
+ print(message)
+
+def on_error(ws, error):
+ print(error)
+
+def on_close(ws):
+ print("### closed ###")
+
+def on_open(ws):
+ def run(*args):
+ for i in range(3):
+ time.sleep(1)
+ ws.send("Hello %d" % i)
+ time.sleep(1)
+ ws.close()
+ print("thread terminating...")
+ thread.start_new_thread(run, ())
+
+if __name__ == "__main__":
+ websocket.enableTrace(True)
+ ws = websocket.WebSocketApp("ws://echo.websocket.org/",
+ on_open = on_open,
+ on_message = on_message,
+ on_error = on_error,
+ on_close = on_close)
+
+ ws.run_forever()
+```
+
+### Short-lived Connection
+
+This is if you want to communicate a short message and disconnect
+immediately when done. For example, if you want to confirm that a WebSocket
+server is running and responds properly to a specific request.
+The project documentation has
+[additional examples](https://websocket-client.readthedocs.io/en/latest/examples.html)
+
+```python
+from websocket import create_connection
+ws = create_connection("ws://echo.websocket.org/")
+print("Sending 'Hello, World'...")
+ws.send("Hello, World")
+print("Sent")
+print("Receiving...")
+result = ws.recv()
+print("Received '%s'" % result)
+ws.close()
+```
+
+If you want to customize socket options, set sockopt, as seen below:
+
+```python
+from websocket import create_connection
+ws = create_connection("ws://echo.websocket.org/",
+ sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY),))
+```
+
+### Acknowledgements
+
+Thanks to @battlemidget and @ralphbean for helping migrate this project to
+Python 3.
diff --git a/contrib/python/websocket-client/py2/tests/ya.make b/contrib/python/websocket-client/py2/tests/ya.make
new file mode 100644
index 0000000000..7084ee13b0
--- /dev/null
+++ b/contrib/python/websocket-client/py2/tests/ya.make
@@ -0,0 +1,28 @@
+PY2TEST()
+
+PEERDIR(
+ contrib/python/PySocks
+ contrib/python/websocket-client
+)
+
+DATA(
+ arcadia/contrib/python/websocket-client/py2/websocket/tests/data
+)
+
+SRCDIR(
+ contrib/python/websocket-client/py2/websocket/tests
+)
+
+TEST_SRCS(
+ __init__.py
+ test_abnf.py
+ test_app.py
+ test_cookiejar.py
+ test_http.py
+ test_url.py
+ test_websocket.py
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/websocket-client/py2/websocket/__init__.py b/contrib/python/websocket-client/py2/websocket/__init__.py
new file mode 100644
index 0000000000..f2c7b44c17
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/__init__.py
@@ -0,0 +1,28 @@
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+from ._abnf import *
+from ._app import WebSocketApp
+from ._core import *
+from ._exceptions import *
+from ._logging import *
+from ._socket import *
+
+__version__ = "0.59.0"
diff --git a/contrib/python/websocket-client/py2/websocket/_abnf.py b/contrib/python/websocket-client/py2/websocket/_abnf.py
new file mode 100644
index 0000000000..80fbe1f9b9
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_abnf.py
@@ -0,0 +1,458 @@
+"""
+
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import array
+import os
+import struct
+
+import six
+
+from ._exceptions import *
+from ._utils import validate_utf8
+from threading import Lock
+
+try:
+ if six.PY3:
+ import numpy
+ else:
+ numpy = None
+except ImportError:
+ numpy = None
+
+try:
+ # If wsaccel is available we use compiled routines to mask data.
+ if not numpy:
+ from wsaccel.xormask import XorMaskerSimple
+
+ def _mask(_m, _d):
+ return XorMaskerSimple(_m).process(_d)
+except ImportError:
+ # wsaccel is not available, we rely on python implementations.
+ def _mask(_m, _d):
+ for i in range(len(_d)):
+ _d[i] ^= _m[i % 4]
+
+ if six.PY3:
+ return _d.tobytes()
+ else:
+ return _d.tostring()
+
+
+__all__ = [
+ 'ABNF', 'continuous_frame', 'frame_buffer',
+ 'STATUS_NORMAL',
+ 'STATUS_GOING_AWAY',
+ 'STATUS_PROTOCOL_ERROR',
+ 'STATUS_UNSUPPORTED_DATA_TYPE',
+ 'STATUS_STATUS_NOT_AVAILABLE',
+ 'STATUS_ABNORMAL_CLOSED',
+ 'STATUS_INVALID_PAYLOAD',
+ 'STATUS_POLICY_VIOLATION',
+ 'STATUS_MESSAGE_TOO_BIG',
+ 'STATUS_INVALID_EXTENSION',
+ 'STATUS_UNEXPECTED_CONDITION',
+ 'STATUS_BAD_GATEWAY',
+ 'STATUS_TLS_HANDSHAKE_ERROR',
+]
+
+# closing frame status codes.
+STATUS_NORMAL = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA_TYPE = 1003
+STATUS_STATUS_NOT_AVAILABLE = 1005
+STATUS_ABNORMAL_CLOSED = 1006
+STATUS_INVALID_PAYLOAD = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_INVALID_EXTENSION = 1010
+STATUS_UNEXPECTED_CONDITION = 1011
+STATUS_BAD_GATEWAY = 1014
+STATUS_TLS_HANDSHAKE_ERROR = 1015
+
+VALID_CLOSE_STATUS = (
+ STATUS_NORMAL,
+ STATUS_GOING_AWAY,
+ STATUS_PROTOCOL_ERROR,
+ STATUS_UNSUPPORTED_DATA_TYPE,
+ STATUS_INVALID_PAYLOAD,
+ STATUS_POLICY_VIOLATION,
+ STATUS_MESSAGE_TOO_BIG,
+ STATUS_INVALID_EXTENSION,
+ STATUS_UNEXPECTED_CONDITION,
+ STATUS_BAD_GATEWAY,
+)
+
+
+class ABNF(object):
+ """
+ ABNF frame class.
+ See http://tools.ietf.org/html/rfc5234
+ and http://tools.ietf.org/html/rfc6455#section-5.2
+ """
+
+ # operation code values.
+ OPCODE_CONT = 0x0
+ OPCODE_TEXT = 0x1
+ OPCODE_BINARY = 0x2
+ OPCODE_CLOSE = 0x8
+ OPCODE_PING = 0x9
+ OPCODE_PONG = 0xa
+
+ # available operation code value tuple
+ OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
+ OPCODE_PING, OPCODE_PONG)
+
+ # opcode human readable string
+ OPCODE_MAP = {
+ OPCODE_CONT: "cont",
+ OPCODE_TEXT: "text",
+ OPCODE_BINARY: "binary",
+ OPCODE_CLOSE: "close",
+ OPCODE_PING: "ping",
+ OPCODE_PONG: "pong"
+ }
+
+ # data length threshold.
+ LENGTH_7 = 0x7e
+ LENGTH_16 = 1 << 16
+ LENGTH_63 = 1 << 63
+
+ def __init__(self, fin=0, rsv1=0, rsv2=0, rsv3=0,
+ opcode=OPCODE_TEXT, mask=1, data=""):
+ """
+ Constructor for ABNF. Please check RFC for arguments.
+ """
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.mask = mask
+ if data is None:
+ data = ""
+ self.data = data
+ self.get_mask_key = os.urandom
+
+ def validate(self, skip_utf8_validation=False):
+ """
+ Validate the ABNF frame.
+
+ Parameters
+ ----------
+ skip_utf8_validation: skip utf8 validation.
+ """
+ if self.rsv1 or self.rsv2 or self.rsv3:
+ raise WebSocketProtocolException("rsv is not implemented, yet")
+
+ if self.opcode not in ABNF.OPCODES:
+ raise WebSocketProtocolException("Invalid opcode %r", self.opcode)
+
+ if self.opcode == ABNF.OPCODE_PING and not self.fin:
+ raise WebSocketProtocolException("Invalid ping frame.")
+
+ if self.opcode == ABNF.OPCODE_CLOSE:
+ l = len(self.data)
+ if not l:
+ return
+ if l == 1 or l >= 126:
+ raise WebSocketProtocolException("Invalid close frame.")
+ if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]):
+ raise WebSocketProtocolException("Invalid close frame.")
+
+ code = 256 * \
+ six.byte2int(self.data[0:1]) + six.byte2int(self.data[1:2])
+ if not self._is_valid_close_status(code):
+ raise WebSocketProtocolException("Invalid close opcode.")
+
+ @staticmethod
+ def _is_valid_close_status(code):
+ return code in VALID_CLOSE_STATUS or (3000 <= code < 5000)
+
+ def __str__(self):
+ return "fin=" + str(self.fin) \
+ + " opcode=" + str(self.opcode) \
+ + " data=" + str(self.data)
+
+ @staticmethod
+ def create_frame(data, opcode, fin=1):
+ """
+ Create frame to send text, binary and other data.
+
+ Parameters
+ ----------
+ data: <type>
+ data to send. This is string value(byte array).
+ If opcode is OPCODE_TEXT and this value is unicode,
+ data value is converted into unicode string, automatically.
+ opcode: <type>
+ operation code. please see OPCODE_XXX.
+ fin: <type>
+ fin flag. if set to 0, create continue fragmentation.
+ """
+ if opcode == ABNF.OPCODE_TEXT and isinstance(data, six.text_type):
+ data = data.encode("utf-8")
+ # mask must be set if send data from client
+ return ABNF(fin, 0, 0, 0, opcode, 1, data)
+
+ def format(self):
+ """
+ Format this object to string(byte array) to send data to server.
+ """
+ if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
+ raise ValueError("not 0 or 1")
+ if self.opcode not in ABNF.OPCODES:
+ raise ValueError("Invalid OPCODE")
+ length = len(self.data)
+ if length >= ABNF.LENGTH_63:
+ raise ValueError("data is too long")
+
+ frame_header = chr(self.fin << 7 |
+ self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4 |
+ self.opcode)
+ if length < ABNF.LENGTH_7:
+ frame_header += chr(self.mask << 7 | length)
+ frame_header = six.b(frame_header)
+ elif length < ABNF.LENGTH_16:
+ frame_header += chr(self.mask << 7 | 0x7e)
+ frame_header = six.b(frame_header)
+ frame_header += struct.pack("!H", length)
+ else:
+ frame_header += chr(self.mask << 7 | 0x7f)
+ frame_header = six.b(frame_header)
+ frame_header += struct.pack("!Q", length)
+
+ if not self.mask:
+ return frame_header + self.data
+ else:
+ mask_key = self.get_mask_key(4)
+ return frame_header + self._get_masked(mask_key)
+
+ def _get_masked(self, mask_key):
+ s = ABNF.mask(mask_key, self.data)
+
+ if isinstance(mask_key, six.text_type):
+ mask_key = mask_key.encode('utf-8')
+
+ return mask_key + s
+
+ @staticmethod
+ def mask(mask_key, data):
+ """
+ Mask or unmask data. Just do xor for each byte
+
+ Parameters
+ ----------
+ mask_key: <type>
+ 4 byte string(byte).
+ data: <type>
+ data to mask/unmask.
+ """
+ if data is None:
+ data = ""
+
+ if isinstance(mask_key, six.text_type):
+ mask_key = six.b(mask_key)
+
+ if isinstance(data, six.text_type):
+ data = six.b(data)
+
+ if numpy:
+ origlen = len(data)
+ _mask_key = mask_key[3] << 24 | mask_key[2] << 16 | mask_key[1] << 8 | mask_key[0]
+
+ # We need data to be a multiple of four...
+ data += bytes(" " * (4 - (len(data) % 4)), "us-ascii")
+ a = numpy.frombuffer(data, dtype="uint32")
+ masked = numpy.bitwise_xor(a, [_mask_key]).astype("uint32")
+ if len(data) > origlen:
+ return masked.tobytes()[:origlen]
+ return masked.tobytes()
+ else:
+ _m = array.array("B", mask_key)
+ _d = array.array("B", data)
+ return _mask(_m, _d)
+
+
+class frame_buffer(object):
+ _HEADER_MASK_INDEX = 5
+ _HEADER_LENGTH_INDEX = 6
+
+ def __init__(self, recv_fn, skip_utf8_validation):
+ self.recv = recv_fn
+ self.skip_utf8_validation = skip_utf8_validation
+ # Buffers over the packets from the layer beneath until desired amount
+ # bytes of bytes are received.
+ self.recv_buffer = []
+ self.clear()
+ self.lock = Lock()
+
+ def clear(self):
+ self.header = None
+ self.length = None
+ self.mask = None
+
+ def has_received_header(self):
+ return self.header is None
+
+ def recv_header(self):
+ header = self.recv_strict(2)
+ b1 = header[0]
+
+ if six.PY2:
+ b1 = ord(b1)
+
+ fin = b1 >> 7 & 1
+ rsv1 = b1 >> 6 & 1
+ rsv2 = b1 >> 5 & 1
+ rsv3 = b1 >> 4 & 1
+ opcode = b1 & 0xf
+ b2 = header[1]
+
+ if six.PY2:
+ b2 = ord(b2)
+
+ has_mask = b2 >> 7 & 1
+ length_bits = b2 & 0x7f
+
+ self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits)
+
+ def has_mask(self):
+ if not self.header:
+ return False
+ return self.header[frame_buffer._HEADER_MASK_INDEX]
+
+ def has_received_length(self):
+ return self.length is None
+
+ def recv_length(self):
+ bits = self.header[frame_buffer._HEADER_LENGTH_INDEX]
+ length_bits = bits & 0x7f
+ if length_bits == 0x7e:
+ v = self.recv_strict(2)
+ self.length = struct.unpack("!H", v)[0]
+ elif length_bits == 0x7f:
+ v = self.recv_strict(8)
+ self.length = struct.unpack("!Q", v)[0]
+ else:
+ self.length = length_bits
+
+ def has_received_mask(self):
+ return self.mask is None
+
+ def recv_mask(self):
+ self.mask = self.recv_strict(4) if self.has_mask() else ""
+
+ def recv_frame(self):
+
+ with self.lock:
+ # Header
+ if self.has_received_header():
+ self.recv_header()
+ (fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = self.header
+
+ # Frame length
+ if self.has_received_length():
+ self.recv_length()
+ length = self.length
+
+ # Mask
+ if self.has_received_mask():
+ self.recv_mask()
+ mask = self.mask
+
+ # Payload
+ payload = self.recv_strict(length)
+ if has_mask:
+ payload = ABNF.mask(mask, payload)
+
+ # Reset for next frame
+ self.clear()
+
+ frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
+ frame.validate(self.skip_utf8_validation)
+
+ return frame
+
+ def recv_strict(self, bufsize):
+ shortage = bufsize - sum(len(x) for x in self.recv_buffer)
+ while shortage > 0:
+ # Limit buffer size that we pass to socket.recv() to avoid
+ # fragmenting the heap -- the number of bytes recv() actually
+ # reads is limited by socket buffer and is relatively small,
+ # yet passing large numbers repeatedly causes lots of large
+ # buffers allocated and then shrunk, which results in
+ # fragmentation.
+ bytes_ = self.recv(min(16384, shortage))
+ self.recv_buffer.append(bytes_)
+ shortage -= len(bytes_)
+
+ unified = six.b("").join(self.recv_buffer)
+
+ if shortage == 0:
+ self.recv_buffer = []
+ return unified
+ else:
+ self.recv_buffer = [unified[bufsize:]]
+ return unified[:bufsize]
+
+
+class continuous_frame(object):
+
+ def __init__(self, fire_cont_frame, skip_utf8_validation):
+ self.fire_cont_frame = fire_cont_frame
+ self.skip_utf8_validation = skip_utf8_validation
+ self.cont_data = None
+ self.recving_frames = None
+
+ def validate(self, frame):
+ if not self.recving_frames and frame.opcode == ABNF.OPCODE_CONT:
+ raise WebSocketProtocolException("Illegal frame")
+ if self.recving_frames and \
+ frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
+ raise WebSocketProtocolException("Illegal frame")
+
+ def add(self, frame):
+ if self.cont_data:
+ self.cont_data[1] += frame.data
+ else:
+ if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
+ self.recving_frames = frame.opcode
+ self.cont_data = [frame.opcode, frame.data]
+
+ if frame.fin:
+ self.recving_frames = None
+
+ def is_fire(self, frame):
+ return frame.fin or self.fire_cont_frame
+
+ def extract(self, frame):
+ data = self.cont_data
+ self.cont_data = None
+ frame.data = data[1]
+ if not self.fire_cont_frame and data[0] == ABNF.OPCODE_TEXT and not self.skip_utf8_validation and not validate_utf8(frame.data):
+ raise WebSocketPayloadException(
+ "cannot decode: " + repr(frame.data))
+
+ return [data[0], frame]
diff --git a/contrib/python/websocket-client/py2/websocket/_app.py b/contrib/python/websocket-client/py2/websocket/_app.py
new file mode 100644
index 0000000000..a3f91a38a3
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_app.py
@@ -0,0 +1,399 @@
+"""
+
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import inspect
+import select
+import sys
+import threading
+import time
+import traceback
+
+import six
+
+from ._abnf import ABNF
+from ._core import WebSocket, getdefaulttimeout
+from ._exceptions import *
+from . import _logging
+
+
+__all__ = ["WebSocketApp"]
+
+
+class Dispatcher:
+ """
+ Dispatcher
+ """
+ def __init__(self, app, ping_timeout):
+ self.app = app
+ self.ping_timeout = ping_timeout
+
+ def read(self, sock, read_callback, check_callback):
+ while self.app.keep_running:
+ r, w, e = select.select(
+ (self.app.sock.sock, ), (), (), self.ping_timeout)
+ if r:
+ if not read_callback():
+ break
+ check_callback()
+
+
+class SSLDispatcher:
+ """
+ SSLDispatcher
+ """
+ def __init__(self, app, ping_timeout):
+ self.app = app
+ self.ping_timeout = ping_timeout
+
+ def read(self, sock, read_callback, check_callback):
+ while self.app.keep_running:
+ r = self.select()
+ if r:
+ if not read_callback():
+ break
+ check_callback()
+
+ def select(self):
+ sock = self.app.sock.sock
+ if sock.pending():
+ return [sock,]
+
+ r, w, e = select.select((sock, ), (), (), self.ping_timeout)
+ return r
+
+
+class WebSocketApp(object):
+ """
+ Higher level of APIs are provided. The interface is like JavaScript WebSocket object.
+ """
+
+ def __init__(self, url, header=None,
+ on_open=None, on_message=None, on_error=None,
+ on_close=None, on_ping=None, on_pong=None,
+ on_cont_message=None,
+ keep_running=True, get_mask_key=None, cookie=None,
+ subprotocols=None,
+ on_data=None):
+ """
+ WebSocketApp initialization
+
+ Parameters
+ ----------
+ url: <type>
+ websocket url.
+ header: list or dict
+ custom header for websocket handshake.
+ on_open: <type>
+ callable object which is called at opening websocket.
+ this function has one argument. The argument is this class object.
+ on_message: <type>
+ callable object which is called when received data.
+ on_message has 2 arguments.
+ The 1st argument is this class object.
+ The 2nd argument is utf-8 string which we get from the server.
+ on_error: <type>
+ callable object which is called when we get error.
+ on_error has 2 arguments.
+ The 1st argument is this class object.
+ The 2nd argument is exception object.
+ on_close: <type>
+ callable object which is called when closed the connection.
+ this function has one argument. The argument is this class object.
+ on_cont_message: <type>
+ callback object which is called when receive continued
+ frame data.
+ on_cont_message has 3 arguments.
+ The 1st argument is this class object.
+ The 2nd argument is utf-8 string which we get from the server.
+ The 3rd argument is continue flag. if 0, the data continue
+ to next frame data
+ on_data: <type>
+ callback object which is called when a message received.
+ This is called before on_message or on_cont_message,
+ and then on_message or on_cont_message is called.
+ on_data has 4 argument.
+ The 1st argument is this class object.
+ The 2nd argument is utf-8 string which we get from the server.
+ The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
+ The 4th argument is continue flag. if 0, the data continue
+ keep_running: <type>
+ this parameter is obsolete and ignored.
+ get_mask_key: func
+ a callable to produce new mask keys,
+ see the WebSocket.set_mask_key's docstring for more information
+ cookie: str
+ cookie value.
+ subprotocols: <type>
+ array of available sub protocols. default is None.
+ """
+ self.url = url
+ self.header = header if header is not None else []
+ self.cookie = cookie
+
+ self.on_open = on_open
+ self.on_message = on_message
+ self.on_data = on_data
+ self.on_error = on_error
+ self.on_close = on_close
+ self.on_ping = on_ping
+ self.on_pong = on_pong
+ self.on_cont_message = on_cont_message
+ self.keep_running = False
+ self.get_mask_key = get_mask_key
+ self.sock = None
+ self.last_ping_tm = 0
+ self.last_pong_tm = 0
+ self.subprotocols = subprotocols
+
+ def send(self, data, opcode=ABNF.OPCODE_TEXT):
+ """
+ send message
+
+ Parameters
+ ----------
+ data: <type>
+ Message to send. If you set opcode to OPCODE_TEXT,
+ data must be utf-8 string or unicode.
+ opcode: <type>
+ Operation code of data. default is OPCODE_TEXT.
+ """
+
+ if not self.sock or self.sock.send(data, opcode) == 0:
+ raise WebSocketConnectionClosedException(
+ "Connection is already closed.")
+
+ def close(self, **kwargs):
+ """
+ Close websocket connection.
+ """
+ self.keep_running = False
+ if self.sock:
+ self.sock.close(**kwargs)
+ self.sock = None
+
+ def _send_ping(self, interval, event, payload):
+ while not event.wait(interval):
+ self.last_ping_tm = time.time()
+ if self.sock:
+ try:
+ self.sock.ping(payload)
+ except Exception as ex:
+ _logging.warning("send_ping routine terminated: {}".format(ex))
+ break
+
+ def run_forever(self, sockopt=None, sslopt=None,
+ ping_interval=0, ping_timeout=None,
+ ping_payload="",
+ http_proxy_host=None, http_proxy_port=None,
+ http_no_proxy=None, http_proxy_auth=None,
+ skip_utf8_validation=False,
+ host=None, origin=None, dispatcher=None,
+ suppress_origin=False, proxy_type=None):
+ """
+ Run event loop for WebSocket framework.
+
+ This loop is an infinite loop and is alive while websocket is available.
+
+ Parameters
+ ----------
+ sockopt: tuple
+ values for socket.setsockopt.
+ sockopt must be tuple
+ and each element is argument of sock.setsockopt.
+ sslopt: dict
+ optional dict object for ssl socket option.
+ ping_interval: int or float
+ automatically send "ping" command
+ every specified period (in seconds)
+ if set to 0, not send automatically.
+ ping_timeout: int or float
+ timeout (in seconds) if the pong message is not received.
+ ping_payload: str
+ payload message to send with each ping.
+ http_proxy_host: <type>
+ http proxy host name.
+ http_proxy_port: <type>
+ http proxy port. If not set, set to 80.
+ http_no_proxy: <type>
+ host names, which doesn't use proxy.
+ skip_utf8_validation: bool
+ skip utf8 validation.
+ host: str
+ update host header.
+ origin: str
+ update origin header.
+ dispatcher: <type>
+ customize reading data from socket.
+ suppress_origin: bool
+ suppress outputting origin header.
+
+ Returns
+ -------
+ teardown: bool
+ False if caught KeyboardInterrupt, True if other exception was raised during a loop
+ """
+
+ if ping_timeout is not None and ping_timeout <= 0:
+ ping_timeout = None
+ if ping_timeout and ping_interval and ping_interval <= ping_timeout:
+ raise WebSocketException("Ensure ping_interval > ping_timeout")
+ if not sockopt:
+ sockopt = []
+ if not sslopt:
+ sslopt = {}
+ if self.sock:
+ raise WebSocketException("socket is already opened")
+ thread = None
+ self.keep_running = True
+ self.last_ping_tm = 0
+ self.last_pong_tm = 0
+
+ def teardown(close_frame=None):
+ """
+ Tears down the connection.
+
+ If close_frame is set, we will invoke the on_close handler with the
+ statusCode and reason from there.
+ """
+ if thread and thread.is_alive():
+ event.set()
+ thread.join()
+ self.keep_running = False
+ if self.sock:
+ self.sock.close()
+ close_args = self._get_close_args(
+ close_frame.data if close_frame else None)
+ self._callback(self.on_close, *close_args)
+ self.sock = None
+
+ try:
+ self.sock = WebSocket(
+ self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
+ fire_cont_frame=self.on_cont_message is not None,
+ skip_utf8_validation=skip_utf8_validation,
+ enable_multithread=True if ping_interval else False)
+ self.sock.settimeout(getdefaulttimeout())
+ self.sock.connect(
+ self.url, header=self.header, cookie=self.cookie,
+ http_proxy_host=http_proxy_host,
+ http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
+ http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
+ host=host, origin=origin, suppress_origin=suppress_origin,
+ proxy_type=proxy_type)
+ if not dispatcher:
+ dispatcher = self.create_dispatcher(ping_timeout)
+
+ self._callback(self.on_open)
+
+ if ping_interval:
+ event = threading.Event()
+ thread = threading.Thread(
+ target=self._send_ping, args=(ping_interval, event, ping_payload))
+ thread.daemon = True
+ thread.start()
+
+ def read():
+ if not self.keep_running:
+ return teardown()
+
+ op_code, frame = self.sock.recv_data_frame(True)
+ if op_code == ABNF.OPCODE_CLOSE:
+ return teardown(frame)
+ elif op_code == ABNF.OPCODE_PING:
+ self._callback(self.on_ping, frame.data)
+ elif op_code == ABNF.OPCODE_PONG:
+ self.last_pong_tm = time.time()
+ self._callback(self.on_pong, frame.data)
+ elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
+ self._callback(self.on_data, frame.data,
+ frame.opcode, frame.fin)
+ self._callback(self.on_cont_message,
+ frame.data, frame.fin)
+ else:
+ data = frame.data
+ if six.PY3 and op_code == ABNF.OPCODE_TEXT:
+ data = data.decode("utf-8")
+ self._callback(self.on_data, data, frame.opcode, True)
+ self._callback(self.on_message, data)
+
+ return True
+
+ def check():
+ if (ping_timeout):
+ has_timeout_expired = time.time() - self.last_ping_tm > ping_timeout
+ has_pong_not_arrived_after_last_ping = self.last_pong_tm - self.last_ping_tm < 0
+ has_pong_arrived_too_late = self.last_pong_tm - self.last_ping_tm > ping_timeout
+
+ if (self.last_ping_tm and
+ has_timeout_expired and
+ (has_pong_not_arrived_after_last_ping or has_pong_arrived_too_late)):
+ raise WebSocketTimeoutException("ping/pong timed out")
+ return True
+
+ dispatcher.read(self.sock.sock, read, check)
+ except (Exception, KeyboardInterrupt, SystemExit) as e:
+ self._callback(self.on_error, e)
+ if isinstance(e, SystemExit):
+ # propagate SystemExit further
+ raise
+ teardown()
+ return not isinstance(e, KeyboardInterrupt)
+
+ def create_dispatcher(self, ping_timeout):
+ timeout = ping_timeout or 10
+ if self.sock.is_ssl():
+ return SSLDispatcher(self, timeout)
+
+ return Dispatcher(self, timeout)
+
+ def _get_close_args(self, data):
+ """
+ _get_close_args extracts the code, reason from the close body
+ if they exists, and if the self.on_close except three arguments
+ """
+ # if the on_close callback is "old", just return empty list
+ if sys.version_info < (3, 0):
+ if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
+ return []
+ else:
+ if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
+ return []
+
+ if data and len(data) >= 2:
+ code = 256 * six.byte2int(data[0:1]) + six.byte2int(data[1:2])
+ reason = data[2:].decode('utf-8')
+ return [code, reason]
+
+ return [None, None]
+
+ def _callback(self, callback, *args):
+ if callback:
+ try:
+ callback(self, *args)
+
+ except Exception as e:
+ _logging.error("error from callback {}: {}".format(callback, e))
+ if _logging.isEnabledForDebug():
+ _, _, tb = sys.exc_info()
+ traceback.print_tb(tb)
diff --git a/contrib/python/websocket-client/py2/websocket/_cookiejar.py b/contrib/python/websocket-client/py2/websocket/_cookiejar.py
new file mode 100644
index 0000000000..bc2891a650
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_cookiejar.py
@@ -0,0 +1,78 @@
+"""
+
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+try:
+ import Cookie
+except:
+ import http.cookies as Cookie
+
+
+class SimpleCookieJar(object):
+ def __init__(self):
+ self.jar = dict()
+
+ def add(self, set_cookie):
+ if set_cookie:
+ try:
+ simpleCookie = Cookie.SimpleCookie(set_cookie)
+ except:
+ simpleCookie = Cookie.SimpleCookie(set_cookie.encode('ascii', 'ignore'))
+
+ for k, v in simpleCookie.items():
+ domain = v.get("domain")
+ if domain:
+ if not domain.startswith("."):
+ domain = "." + domain
+ cookie = self.jar.get(domain) if self.jar.get(domain) else Cookie.SimpleCookie()
+ cookie.update(simpleCookie)
+ self.jar[domain.lower()] = cookie
+
+ def set(self, set_cookie):
+ if set_cookie:
+ try:
+ simpleCookie = Cookie.SimpleCookie(set_cookie)
+ except:
+ simpleCookie = Cookie.SimpleCookie(set_cookie.encode('ascii', 'ignore'))
+
+ for k, v in simpleCookie.items():
+ domain = v.get("domain")
+ if domain:
+ if not domain.startswith("."):
+ domain = "." + domain
+ self.jar[domain.lower()] = simpleCookie
+
+ def get(self, host):
+ if not host:
+ return ""
+
+ cookies = []
+ for domain, simpleCookie in self.jar.items():
+ host = host.lower()
+ if host.endswith(domain) or host == domain[1:]:
+ cookies.append(self.jar.get(domain))
+
+ return "; ".join(filter(
+ None, sorted(
+ ["%s=%s" % (k, v.value) for cookie in filter(None, cookies) for k, v in cookie.items()]
+ )))
diff --git a/contrib/python/websocket-client/py2/websocket/_core.py b/contrib/python/websocket-client/py2/websocket/_core.py
new file mode 100644
index 0000000000..1ff80f05d7
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_core.py
@@ -0,0 +1,595 @@
+from __future__ import print_function
+"""
+_core.py
+====================================
+WebSocket Python client
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import socket
+import struct
+import threading
+import time
+
+import six
+
+# websocket modules
+from ._abnf import *
+from ._exceptions import *
+from ._handshake import *
+from ._http import *
+from ._logging import *
+from ._socket import *
+from ._ssl_compat import *
+from ._utils import *
+
+__all__ = ['WebSocket', 'create_connection']
+
+
+class WebSocket(object):
+ """
+ Low level WebSocket interface.
+
+ This class is based on the WebSocket protocol `draft-hixie-thewebsocketprotocol-76 <http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76>`_
+
+ We can connect to the websocket server and send/receive data.
+ The following example is an echo client.
+
+ >>> import websocket
+ >>> ws = websocket.WebSocket()
+ >>> ws.connect("ws://echo.websocket.org")
+ >>> ws.send("Hello, Server")
+ >>> ws.recv()
+ 'Hello, Server'
+ >>> ws.close()
+
+ Parameters
+ ----------
+ get_mask_key: func
+ a callable to produce new mask keys, see the set_mask_key
+ function's docstring for more details
+ sockopt: tuple
+ values for socket.setsockopt.
+ sockopt must be tuple and each element is argument of sock.setsockopt.
+ sslopt: dict
+ optional dict object for ssl socket option.
+ fire_cont_frame: bool
+ fire recv event for each cont frame. default is False
+ enable_multithread: bool
+ if set to True, lock send method.
+ skip_utf8_validation: bool
+ skip utf8 validation.
+ """
+
+ def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
+ fire_cont_frame=False, enable_multithread=False,
+ skip_utf8_validation=False, **_):
+ """
+ Initialize WebSocket object.
+
+ Parameters
+ ----------
+ sslopt: specify ssl certification verification options
+ """
+ self.sock_opt = sock_opt(sockopt, sslopt)
+ self.handshake_response = None
+ self.sock = None
+
+ self.connected = False
+ self.get_mask_key = get_mask_key
+ # These buffer over the build-up of a single frame.
+ self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
+ self.cont_frame = continuous_frame(
+ fire_cont_frame, skip_utf8_validation)
+
+ if enable_multithread:
+ self.lock = threading.Lock()
+ self.readlock = threading.Lock()
+ else:
+ self.lock = NoLock()
+ self.readlock = NoLock()
+
+ def __iter__(self):
+ """
+ Allow iteration over websocket, implying sequential `recv` executions.
+ """
+ while True:
+ yield self.recv()
+
+ def __next__(self):
+ return self.recv()
+
+ def next(self):
+ return self.__next__()
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def set_mask_key(self, func):
+ """
+ Set function to create mask key. You can customize mask key generator.
+ Mainly, this is for testing purpose.
+
+ Parameters
+ ----------
+ func: func
+ callable object. the func takes 1 argument as integer.
+ The argument means length of mask key.
+ This func must return string(byte array),
+ which length is argument specified.
+ """
+ self.get_mask_key = func
+
+ def gettimeout(self):
+ """
+ Get the websocket timeout (in seconds) as an int or float
+
+ Returns
+ ----------
+ timeout: int or float
+ returns timeout value (in seconds). This value could be either float/integer.
+ """
+ return self.sock_opt.timeout
+
+ def settimeout(self, timeout):
+ """
+ Set the timeout to the websocket.
+
+ Parameters
+ ----------
+ timeout: int or float
+ timeout time (in seconds). This value could be either float/integer.
+ """
+ self.sock_opt.timeout = timeout
+ if self.sock:
+ self.sock.settimeout(timeout)
+
+ timeout = property(gettimeout, settimeout)
+
+ def getsubprotocol(self):
+ """
+ Get subprotocol
+ """
+ if self.handshake_response:
+ return self.handshake_response.subprotocol
+ else:
+ return None
+
+ subprotocol = property(getsubprotocol)
+
+ def getstatus(self):
+ """
+ Get handshake status
+ """
+ if self.handshake_response:
+ return self.handshake_response.status
+ else:
+ return None
+
+ status = property(getstatus)
+
+ def getheaders(self):
+ """
+ Get handshake response header
+ """
+ if self.handshake_response:
+ return self.handshake_response.headers
+ else:
+ return None
+
+ def is_ssl(self):
+ return isinstance(self.sock, ssl.SSLSocket)
+
+ headers = property(getheaders)
+
+ def connect(self, url, **options):
+ """
+ Connect to url. url is websocket url scheme.
+ ie. ws://host:port/resource
+ You can customize using 'options'.
+ If you set "header" list object, you can set your own custom header.
+
+ >>> ws = WebSocket()
+ >>> ws.connect("ws://echo.websocket.org/",
+ ... header=["User-Agent: MyProgram",
+ ... "x-custom: header"])
+
+ timeout: <type>
+ socket timeout time. This value is an integer or float.
+ if you set None for this value, it means "use default_timeout value"
+
+ Parameters
+ ----------
+ options:
+ - header: list or dict
+ custom http header list or dict.
+ - cookie: str
+ cookie value.
+ - origin: str
+ custom origin url.
+ - suppress_origin: bool
+ suppress outputting origin header.
+ - host: str
+ custom host header string.
+ - http_proxy_host: <type>
+ http proxy host name.
+ - http_proxy_port: <type>
+ http proxy port. If not set, set to 80.
+ - http_no_proxy: <type>
+ host names, which doesn't use proxy.
+ - http_proxy_auth: <type>
+ http proxy auth information. tuple of username and password. default is None
+ - redirect_limit: <type>
+ number of redirects to follow.
+ - subprotocols: <type>
+ array of available sub protocols. default is None.
+ - socket: <type>
+ pre-initialized stream socket.
+ """
+ self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout)
+ self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
+ options.pop('socket', None))
+
+ try:
+ self.handshake_response = handshake(self.sock, *addrs, **options)
+ for attempt in range(options.pop('redirect_limit', 3)):
+ if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES:
+ url = self.handshake_response.headers['location']
+ self.sock.close()
+ self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
+ options.pop('socket', None))
+ self.handshake_response = handshake(self.sock, *addrs, **options)
+ self.connected = True
+ except:
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ raise
+
+ def send(self, payload, opcode=ABNF.OPCODE_TEXT):
+ """
+ Send the data as string.
+
+ Parameters
+ ----------
+ payload: <type>
+ Payload must be utf-8 string or unicode,
+ if the opcode is OPCODE_TEXT.
+ Otherwise, it must be string(byte array)
+ opcode: <type>
+ operation code to send. Please see OPCODE_XXX.
+ """
+
+ frame = ABNF.create_frame(payload, opcode)
+ return self.send_frame(frame)
+
+ def send_frame(self, frame):
+ """
+ Send the data frame.
+
+ >>> ws = create_connection("ws://echo.websocket.org/")
+ >>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
+ >>> ws.send_frame(frame)
+ >>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
+ >>> ws.send_frame(frame)
+ >>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
+ >>> ws.send_frame(frame)
+
+ Parameters
+ ----------
+ frame: <type>
+ frame data created by ABNF.create_frame
+ """
+ if self.get_mask_key:
+ frame.get_mask_key = self.get_mask_key
+ data = frame.format()
+ length = len(data)
+ if (isEnabledForTrace()):
+ trace("send: " + repr(data))
+
+ with self.lock:
+ while data:
+ l = self._send(data)
+ data = data[l:]
+
+ return length
+
+ def send_binary(self, payload):
+ return self.send(payload, ABNF.OPCODE_BINARY)
+
+ def ping(self, payload=""):
+ """
+ Send ping data.
+
+ Parameters
+ ----------
+ payload: <type>
+ data payload to send server.
+ """
+ if isinstance(payload, six.text_type):
+ payload = payload.encode("utf-8")
+ self.send(payload, ABNF.OPCODE_PING)
+
+ def pong(self, payload=""):
+ """
+ Send pong data.
+
+ Parameters
+ ----------
+ payload: <type>
+ data payload to send server.
+ """
+ if isinstance(payload, six.text_type):
+ payload = payload.encode("utf-8")
+ self.send(payload, ABNF.OPCODE_PONG)
+
+ def recv(self):
+ """
+ Receive string data(byte array) from the server.
+
+ Returns
+ ----------
+ data: string (byte array) value.
+ """
+ with self.readlock:
+ opcode, data = self.recv_data()
+ if six.PY3 and opcode == ABNF.OPCODE_TEXT:
+ return data.decode("utf-8")
+ elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
+ return data
+ else:
+ return ''
+
+ def recv_data(self, control_frame=False):
+ """
+ Receive data with operation code.
+
+ Parameters
+ ----------
+ control_frame: bool
+ a boolean flag indicating whether to return control frame
+ data, defaults to False
+
+ Returns
+ -------
+ opcode, frame.data: tuple
+ tuple of operation code and string(byte array) value.
+ """
+ opcode, frame = self.recv_data_frame(control_frame)
+ return opcode, frame.data
+
+ def recv_data_frame(self, control_frame=False):
+ """
+ Receive data with operation code.
+
+ Parameters
+ ----------
+ control_frame: bool
+ a boolean flag indicating whether to return control frame
+ data, defaults to False
+
+ Returns
+ -------
+ frame.opcode, frame: tuple
+ tuple of operation code and string(byte array) value.
+ """
+ while True:
+ frame = self.recv_frame()
+ if not frame:
+ # handle error:
+ # 'NoneType' object has no attribute 'opcode'
+ raise WebSocketProtocolException(
+ "Not a valid frame %s" % frame)
+ elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
+ self.cont_frame.validate(frame)
+ self.cont_frame.add(frame)
+
+ if self.cont_frame.is_fire(frame):
+ return self.cont_frame.extract(frame)
+
+ elif frame.opcode == ABNF.OPCODE_CLOSE:
+ self.send_close()
+ return frame.opcode, frame
+ elif frame.opcode == ABNF.OPCODE_PING:
+ if len(frame.data) < 126:
+ self.pong(frame.data)
+ else:
+ raise WebSocketProtocolException(
+ "Ping message is too long")
+ if control_frame:
+ return frame.opcode, frame
+ elif frame.opcode == ABNF.OPCODE_PONG:
+ if control_frame:
+ return frame.opcode, frame
+
+ def recv_frame(self):
+ """
+ Receive data as frame from server.
+
+ Returns
+ -------
+ self.frame_buffer.recv_frame(): ABNF frame object
+ """
+ return self.frame_buffer.recv_frame()
+
+ def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
+ """
+ Send close data to the server.
+
+ Parameters
+ ----------
+ status: <type>
+ status code to send. see STATUS_XXX.
+ reason: str or bytes
+ the reason to close. This must be string or bytes.
+ """
+ if status < 0 or status >= ABNF.LENGTH_16:
+ raise ValueError("code is invalid range")
+ self.connected = False
+ self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
+
+ def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3):
+ """
+ Close Websocket object
+
+ Parameters
+ ----------
+ status: <type>
+ status code to send. see STATUS_XXX.
+ reason: <type>
+ the reason to close. This must be string.
+ timeout: int or float
+ timeout until receive a close frame.
+ If None, it will wait forever until receive a close frame.
+ """
+ if self.connected:
+ if status < 0 or status >= ABNF.LENGTH_16:
+ raise ValueError("code is invalid range")
+
+ try:
+ self.connected = False
+ self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
+ sock_timeout = self.sock.gettimeout()
+ self.sock.settimeout(timeout)
+ start_time = time.time()
+ while timeout is None or time.time() - start_time < timeout:
+ try:
+ frame = self.recv_frame()
+ if frame.opcode != ABNF.OPCODE_CLOSE:
+ continue
+ if isEnabledForError():
+ recv_status = struct.unpack("!H", frame.data[0:2])[0]
+ if recv_status >= 3000 and recv_status <= 4999:
+ debug("close status: " + repr(recv_status))
+ elif recv_status != STATUS_NORMAL:
+ error("close status: " + repr(recv_status))
+ break
+ except:
+ break
+ self.sock.settimeout(sock_timeout)
+ self.sock.shutdown(socket.SHUT_RDWR)
+ except:
+ pass
+
+ self.shutdown()
+
+ def abort(self):
+ """
+ Low-level asynchronous abort, wakes up other threads that are waiting in recv_*
+ """
+ if self.connected:
+ self.sock.shutdown(socket.SHUT_RDWR)
+
+ def shutdown(self):
+ """
+ close socket, immediately.
+ """
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ self.connected = False
+
+ def _send(self, data):
+ return send(self.sock, data)
+
+ def _recv(self, bufsize):
+ try:
+ return recv(self.sock, bufsize)
+ except WebSocketConnectionClosedException:
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ self.connected = False
+ raise
+
+
+def create_connection(url, timeout=None, class_=WebSocket, **options):
+ """
+ Connect to url and return websocket object.
+
+ Connect to url and return the WebSocket object.
+ Passing optional timeout parameter will set the timeout on the socket.
+ If no timeout is supplied,
+ the global default timeout setting returned by getdefaulttimeout() is used.
+ You can customize using 'options'.
+ If you set "header" list object, you can set your own custom header.
+
+ >>> conn = create_connection("ws://echo.websocket.org/",
+ ... header=["User-Agent: MyProgram",
+ ... "x-custom: header"])
+
+ Parameters
+ ----------
+ timeout: int or float
+ socket timeout time. This value could be either float/integer.
+ if you set None for this value,
+ it means "use default_timeout value"
+ class_: <type>
+ class to instantiate when creating the connection. It has to implement
+ settimeout and connect. It's __init__ should be compatible with
+ WebSocket.__init__, i.e. accept all of it's kwargs.
+ options: <type>
+ - header: list or dict
+ custom http header list or dict.
+ - cookie: str
+ cookie value.
+ - origin: str
+ custom origin url.
+ - suppress_origin: bool
+ suppress outputting origin header.
+ - host: <type>
+ custom host header string.
+ - http_proxy_host: <type>
+ http proxy host name.
+ - http_proxy_port: <type>
+ http proxy port. If not set, set to 80.
+ - http_no_proxy: <type>
+ host names, which doesn't use proxy.
+ - http_proxy_auth: <type>
+ http proxy auth information. tuple of username and password. default is None
+ - enable_multithread: bool
+ enable lock for multithread.
+ - redirect_limit: <type>
+ number of redirects to follow.
+ - sockopt: <type>
+ socket options
+ - sslopt: <type>
+ ssl option
+ - subprotocols: <type>
+ array of available sub protocols. default is None.
+ - skip_utf8_validation: bool
+ skip utf8 validation.
+ - socket: <type>
+ pre-initialized stream socket.
+ """
+ sockopt = options.pop("sockopt", [])
+ sslopt = options.pop("sslopt", {})
+ fire_cont_frame = options.pop("fire_cont_frame", False)
+ enable_multithread = options.pop("enable_multithread", False)
+ skip_utf8_validation = options.pop("skip_utf8_validation", False)
+ websock = class_(sockopt=sockopt, sslopt=sslopt,
+ fire_cont_frame=fire_cont_frame,
+ enable_multithread=enable_multithread,
+ skip_utf8_validation=skip_utf8_validation, **options)
+ websock.settimeout(timeout if timeout is not None else getdefaulttimeout())
+ websock.connect(url, **options)
+ return websock
diff --git a/contrib/python/websocket-client/py2/websocket/_exceptions.py b/contrib/python/websocket-client/py2/websocket/_exceptions.py
new file mode 100644
index 0000000000..83c6e42b7d
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_exceptions.py
@@ -0,0 +1,86 @@
+"""
+Define WebSocket exceptions
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+
+
+class WebSocketException(Exception):
+ """
+ WebSocket exception class.
+ """
+ pass
+
+
+class WebSocketProtocolException(WebSocketException):
+ """
+ If the WebSocket protocol is invalid, this exception will be raised.
+ """
+ pass
+
+
+class WebSocketPayloadException(WebSocketException):
+ """
+ If the WebSocket payload is invalid, this exception will be raised.
+ """
+ pass
+
+
+class WebSocketConnectionClosedException(WebSocketException):
+ """
+ If remote host closed the connection or some network error happened,
+ this exception will be raised.
+ """
+ pass
+
+
+class WebSocketTimeoutException(WebSocketException):
+ """
+ WebSocketTimeoutException will be raised at socket timeout during read/write data.
+ """
+ pass
+
+
+class WebSocketProxyException(WebSocketException):
+ """
+ WebSocketProxyException will be raised when proxy error occurred.
+ """
+ pass
+
+
+class WebSocketBadStatusException(WebSocketException):
+ """
+ WebSocketBadStatusException will be raised when we get bad handshake status code.
+ """
+
+ def __init__(self, message, status_code, status_message=None, resp_headers=None):
+ msg = message % (status_code, status_message)
+ super(WebSocketBadStatusException, self).__init__(msg)
+ self.status_code = status_code
+ self.resp_headers = resp_headers
+
+
+class WebSocketAddressException(WebSocketException):
+ """
+ If the websocket address info cannot be found, this exception will be raised.
+ """
+ pass
diff --git a/contrib/python/websocket-client/py2/websocket/_handshake.py b/contrib/python/websocket-client/py2/websocket/_handshake.py
new file mode 100644
index 0000000000..c4d9d169da
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_handshake.py
@@ -0,0 +1,212 @@
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import hashlib
+import hmac
+import os
+
+import six
+
+from ._cookiejar import SimpleCookieJar
+from ._exceptions import *
+from ._http import *
+from ._logging import *
+from ._socket import *
+
+if hasattr(six, 'PY3') and six.PY3:
+ from base64 import encodebytes as base64encode
+else:
+ from base64 import encodestring as base64encode
+
+if hasattr(six, 'PY3') and six.PY3:
+ if hasattr(six, 'PY34') and six.PY34:
+ from http import client as HTTPStatus
+ else:
+ from http import HTTPStatus
+else:
+ import httplib as HTTPStatus
+
+__all__ = ["handshake_response", "handshake", "SUPPORTED_REDIRECT_STATUSES"]
+
+if hasattr(hmac, "compare_digest"):
+ compare_digest = hmac.compare_digest
+else:
+ def compare_digest(s1, s2):
+ return s1 == s2
+
+# websocket supported version.
+VERSION = 13
+
+SUPPORTED_REDIRECT_STATUSES = (HTTPStatus.MOVED_PERMANENTLY, HTTPStatus.FOUND, HTTPStatus.SEE_OTHER,)
+SUCCESS_STATUSES = SUPPORTED_REDIRECT_STATUSES + (HTTPStatus.SWITCHING_PROTOCOLS,)
+
+CookieJar = SimpleCookieJar()
+
+
+class handshake_response(object):
+
+ def __init__(self, status, headers, subprotocol):
+ self.status = status
+ self.headers = headers
+ self.subprotocol = subprotocol
+ CookieJar.add(headers.get("set-cookie"))
+
+
+def handshake(sock, hostname, port, resource, **options):
+ headers, key = _get_handshake_headers(resource, hostname, port, options)
+
+ header_str = "\r\n".join(headers)
+ send(sock, header_str)
+ dump("request header", header_str)
+
+ status, resp = _get_resp_headers(sock)
+ if status in SUPPORTED_REDIRECT_STATUSES:
+ return handshake_response(status, resp, None)
+ success, subproto = _validate(resp, key, options.get("subprotocols"))
+ if not success:
+ raise WebSocketException("Invalid WebSocket Header")
+
+ return handshake_response(status, resp, subproto)
+
+
+def _pack_hostname(hostname):
+ # IPv6 address
+ if ':' in hostname:
+ return '[' + hostname + ']'
+
+ return hostname
+
+
+def _get_handshake_headers(resource, host, port, options):
+ headers = [
+ "GET %s HTTP/1.1" % resource,
+ "Upgrade: websocket"
+ ]
+ if port == 80 or port == 443:
+ hostport = _pack_hostname(host)
+ else:
+ hostport = "%s:%d" % (_pack_hostname(host), port)
+ if "host" in options and options["host"] is not None:
+ headers.append("Host: %s" % options["host"])
+ else:
+ headers.append("Host: %s" % hostport)
+
+ if "suppress_origin" not in options or not options["suppress_origin"]:
+ if "origin" in options and options["origin"] is not None:
+ headers.append("Origin: %s" % options["origin"])
+ else:
+ headers.append("Origin: http://%s" % hostport)
+
+ key = _create_sec_websocket_key()
+
+ # Append Sec-WebSocket-Key & Sec-WebSocket-Version if not manually specified
+ if 'header' not in options or 'Sec-WebSocket-Key' not in options['header']:
+ key = _create_sec_websocket_key()
+ headers.append("Sec-WebSocket-Key: %s" % key)
+ else:
+ key = options['header']['Sec-WebSocket-Key']
+
+ if 'header' not in options or 'Sec-WebSocket-Version' not in options['header']:
+ headers.append("Sec-WebSocket-Version: %s" % VERSION)
+
+ if 'connection' not in options or options['connection'] is None:
+ headers.append('Connection: Upgrade')
+ else:
+ headers.append(options['connection'])
+
+ subprotocols = options.get("subprotocols")
+ if subprotocols:
+ headers.append("Sec-WebSocket-Protocol: %s" % ",".join(subprotocols))
+
+ if "header" in options:
+ header = options["header"]
+ if isinstance(header, dict):
+ header = [
+ ": ".join([k, v])
+ for k, v in header.items()
+ if v is not None
+ ]
+ headers.extend(header)
+
+ server_cookie = CookieJar.get(host)
+ client_cookie = options.get("cookie", None)
+
+ cookie = "; ".join(filter(None, [server_cookie, client_cookie]))
+
+ if cookie:
+ headers.append("Cookie: %s" % cookie)
+
+ headers.append("")
+ headers.append("")
+
+ return headers, key
+
+
+def _get_resp_headers(sock, success_statuses=SUCCESS_STATUSES):
+ status, resp_headers, status_message = read_headers(sock)
+ if status not in success_statuses:
+ raise WebSocketBadStatusException("Handshake status %d %s", status, status_message, resp_headers)
+ return status, resp_headers
+
+
+_HEADERS_TO_CHECK = {
+ "upgrade": "websocket",
+ "connection": "upgrade",
+}
+
+
+def _validate(headers, key, subprotocols):
+ subproto = None
+ for k, v in _HEADERS_TO_CHECK.items():
+ r = headers.get(k, None)
+ if not r:
+ return False, None
+ r = [x.strip().lower() for x in r.split(',')]
+ if v not in r:
+ return False, None
+
+ if subprotocols:
+ subproto = headers.get("sec-websocket-protocol", None)
+ if not subproto or subproto.lower() not in [s.lower() for s in subprotocols]:
+ error("Invalid subprotocol: " + str(subprotocols))
+ return False, None
+ subproto = subproto.lower()
+
+ result = headers.get("sec-websocket-accept", None)
+ if not result:
+ return False, None
+ result = result.lower()
+
+ if isinstance(result, six.text_type):
+ result = result.encode('utf-8')
+
+ value = (key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11").encode('utf-8')
+ hashed = base64encode(hashlib.sha1(value).digest()).strip().lower()
+ success = compare_digest(hashed, result)
+
+ if success:
+ return True, subproto
+ else:
+ return False, None
+
+
+def _create_sec_websocket_key():
+ randomness = os.urandom(16)
+ return base64encode(randomness).decode('utf-8').strip()
diff --git a/contrib/python/websocket-client/py2/websocket/_http.py b/contrib/python/websocket-client/py2/websocket/_http.py
new file mode 100644
index 0000000000..b0dad48ce0
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_http.py
@@ -0,0 +1,335 @@
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import errno
+import os
+import socket
+import sys
+
+import six
+
+from ._exceptions import *
+from ._logging import *
+from ._socket import*
+from ._ssl_compat import *
+from ._url import *
+
+if six.PY3:
+ from base64 import encodebytes as base64encode
+else:
+ from base64 import encodestring as base64encode
+
+__all__ = ["proxy_info", "connect", "read_headers"]
+
+try:
+ import socks
+ ProxyConnectionError = socks.ProxyConnectionError
+ HAS_PYSOCKS = True
+except:
+ class ProxyConnectionError(BaseException):
+ pass
+ HAS_PYSOCKS = False
+
+
+class proxy_info(object):
+
+ def __init__(self, **options):
+ self.type = options.get("proxy_type") or "http"
+ if not(self.type in ['http', 'socks4', 'socks5', 'socks5h']):
+ raise ValueError("proxy_type must be 'http', 'socks4', 'socks5' or 'socks5h'")
+ self.host = options.get("http_proxy_host", None)
+ if self.host:
+ self.port = options.get("http_proxy_port", 0)
+ self.auth = options.get("http_proxy_auth", None)
+ self.no_proxy = options.get("http_no_proxy", None)
+ else:
+ self.port = 0
+ self.auth = None
+ self.no_proxy = None
+
+
+def _open_proxied_socket(url, options, proxy):
+ hostname, port, resource, is_secure = parse_url(url)
+
+ if not HAS_PYSOCKS:
+ raise WebSocketException("PySocks module not found.")
+
+ ptype = socks.SOCKS5
+ rdns = False
+ if proxy.type == "socks4":
+ ptype = socks.SOCKS4
+ if proxy.type == "http":
+ ptype = socks.HTTP
+ if proxy.type[-1] == "h":
+ rdns = True
+
+ sock = socks.create_connection(
+ (hostname, port),
+ proxy_type=ptype,
+ proxy_addr=proxy.host,
+ proxy_port=proxy.port,
+ proxy_rdns=rdns,
+ proxy_username=proxy.auth[0] if proxy.auth else None,
+ proxy_password=proxy.auth[1] if proxy.auth else None,
+ timeout=options.timeout,
+ socket_options=DEFAULT_SOCKET_OPTION + options.sockopt
+ )
+
+ if is_secure:
+ if HAVE_SSL:
+ sock = _ssl_socket(sock, options.sslopt, hostname)
+ else:
+ raise WebSocketException("SSL not available.")
+
+ return sock, (hostname, port, resource)
+
+
+def connect(url, options, proxy, socket):
+ if proxy.host and not socket and not (proxy.type == 'http'):
+ return _open_proxied_socket(url, options, proxy)
+
+ hostname, port, resource, is_secure = parse_url(url)
+
+ if socket:
+ return socket, (hostname, port, resource)
+
+ addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
+ hostname, port, is_secure, proxy)
+ if not addrinfo_list:
+ raise WebSocketException(
+ "Host not found.: " + hostname + ":" + str(port))
+
+ sock = None
+ try:
+ sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
+ if need_tunnel:
+ sock = _tunnel(sock, hostname, port, auth)
+
+ if is_secure:
+ if HAVE_SSL:
+ sock = _ssl_socket(sock, options.sslopt, hostname)
+ else:
+ raise WebSocketException("SSL not available.")
+
+ return sock, (hostname, port, resource)
+ except:
+ if sock:
+ sock.close()
+ raise
+
+
+def _get_addrinfo_list(hostname, port, is_secure, proxy):
+ phost, pport, pauth = get_proxy_info(
+ hostname, is_secure, proxy.host, proxy.port, proxy.auth, proxy.no_proxy)
+ try:
+ # when running on windows 10, getaddrinfo without socktype returns a socktype 0.
+ # This generates an error exception: `_on_error: exception Socket type must be stream or datagram, not 0`
+ # or `OSError: [Errno 22] Invalid argument` when creating socket. Force the socket type to SOCK_STREAM.
+ if not phost:
+ addrinfo_list = socket.getaddrinfo(
+ hostname, port, 0, socket.SOCK_STREAM, socket.SOL_TCP)
+ return addrinfo_list, False, None
+ else:
+ pport = pport and pport or 80
+ # when running on windows 10, the getaddrinfo used above
+ # returns a socktype 0. This generates an error exception:
+ # _on_error: exception Socket type must be stream or datagram, not 0
+ # Force the socket type to SOCK_STREAM
+ addrinfo_list = socket.getaddrinfo(phost, pport, 0, socket.SOCK_STREAM, socket.SOL_TCP)
+ return addrinfo_list, True, pauth
+ except socket.gaierror as e:
+ raise WebSocketAddressException(e)
+
+
+def _open_socket(addrinfo_list, sockopt, timeout):
+ err = None
+ for addrinfo in addrinfo_list:
+ family, socktype, proto = addrinfo[:3]
+ sock = socket.socket(family, socktype, proto)
+ sock.settimeout(timeout)
+ for opts in DEFAULT_SOCKET_OPTION:
+ sock.setsockopt(*opts)
+ for opts in sockopt:
+ sock.setsockopt(*opts)
+
+ address = addrinfo[4]
+ err = None
+ while not err:
+ try:
+ sock.connect(address)
+ except ProxyConnectionError as error:
+ err = WebSocketProxyException(str(error))
+ err.remote_ip = str(address[0])
+ continue
+ except socket.error as error:
+ error.remote_ip = str(address[0])
+ try:
+ eConnRefused = (errno.ECONNREFUSED, errno.WSAECONNREFUSED)
+ except:
+ eConnRefused = (errno.ECONNREFUSED, )
+ if error.errno == errno.EINTR:
+ continue
+ elif error.errno in eConnRefused:
+ err = error
+ continue
+ else:
+ raise error
+ else:
+ break
+ else:
+ continue
+ break
+ else:
+ if err:
+ raise err
+
+ return sock
+
+
+def _can_use_sni():
+ return six.PY2 and sys.version_info >= (2, 7, 9) or sys.version_info >= (3, 2)
+
+
+def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
+ context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_SSLv23))
+
+ if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
+ cafile = sslopt.get('ca_certs', None)
+ capath = sslopt.get('ca_cert_path', None)
+ if cafile or capath:
+ context.load_verify_locations(cafile=cafile, capath=capath)
+ elif hasattr(context, 'load_default_certs'):
+ context.load_default_certs(ssl.Purpose.SERVER_AUTH)
+ if sslopt.get('certfile', None):
+ context.load_cert_chain(
+ sslopt['certfile'],
+ sslopt.get('keyfile', None),
+ sslopt.get('password', None),
+ )
+ # see
+ # https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
+ context.verify_mode = sslopt['cert_reqs']
+ if HAVE_CONTEXT_CHECK_HOSTNAME:
+ context.check_hostname = check_hostname
+ if 'ciphers' in sslopt:
+ context.set_ciphers(sslopt['ciphers'])
+ if 'cert_chain' in sslopt:
+ certfile, keyfile, password = sslopt['cert_chain']
+ context.load_cert_chain(certfile, keyfile, password)
+ if 'ecdh_curve' in sslopt:
+ context.set_ecdh_curve(sslopt['ecdh_curve'])
+
+ return context.wrap_socket(
+ sock,
+ do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
+ suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
+ server_hostname=hostname,
+ )
+
+
+def _ssl_socket(sock, user_sslopt, hostname):
+ sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
+ sslopt.update(user_sslopt)
+
+ certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
+ if certPath and os.path.isfile(certPath) \
+ and user_sslopt.get('ca_certs', None) is None \
+ and user_sslopt.get('ca_cert', None) is None:
+ sslopt['ca_certs'] = certPath
+ elif certPath and os.path.isdir(certPath) \
+ and user_sslopt.get('ca_cert_path', None) is None:
+ sslopt['ca_cert_path'] = certPath
+
+ check_hostname = sslopt["cert_reqs"] != ssl.CERT_NONE and sslopt.pop(
+ 'check_hostname', True)
+
+ if _can_use_sni():
+ sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
+ else:
+ sslopt.pop('check_hostname', True)
+ sock = ssl.wrap_socket(sock, **sslopt)
+
+ if not HAVE_CONTEXT_CHECK_HOSTNAME and check_hostname:
+ match_hostname(sock.getpeercert(), hostname)
+
+ return sock
+
+
+def _tunnel(sock, host, port, auth):
+ debug("Connecting proxy...")
+ connect_header = "CONNECT %s:%d HTTP/1.1\r\n" % (host, port)
+ connect_header += "Host: %s:%d\r\n" % (host, port)
+
+ # TODO: support digest auth.
+ if auth and auth[0]:
+ auth_str = auth[0]
+ if auth[1]:
+ auth_str += ":" + auth[1]
+ encoded_str = base64encode(auth_str.encode()).strip().decode().replace('\n', '')
+ connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
+ connect_header += "\r\n"
+ dump("request header", connect_header)
+
+ send(sock, connect_header)
+
+ try:
+ status, resp_headers, status_message = read_headers(sock)
+ except Exception as e:
+ raise WebSocketProxyException(str(e))
+
+ if status != 200:
+ raise WebSocketProxyException(
+ "failed CONNECT via proxy status: %r" % status)
+
+ return sock
+
+
+def read_headers(sock):
+ status = None
+ status_message = None
+ headers = {}
+ trace("--- response header ---")
+
+ while True:
+ line = recv_line(sock)
+ line = line.decode('utf-8').strip()
+ if not line:
+ break
+ trace(line)
+ if not status:
+
+ status_info = line.split(" ", 2)
+ status = int(status_info[1])
+ if len(status_info) > 2:
+ status_message = status_info[2]
+ else:
+ kv = line.split(":", 1)
+ if len(kv) == 2:
+ key, value = kv
+ if key.lower() == "set-cookie" and headers.get("set-cookie"):
+ headers["set-cookie"] = headers.get("set-cookie") + "; " + value.strip()
+ else:
+ headers[key.lower()] = value.strip()
+ else:
+ raise WebSocketException("Invalid header")
+
+ trace("-----------------------")
+
+ return status, headers, status_message
diff --git a/contrib/python/websocket-client/py2/websocket/_logging.py b/contrib/python/websocket-client/py2/websocket/_logging.py
new file mode 100644
index 0000000000..07d9009031
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_logging.py
@@ -0,0 +1,92 @@
+"""
+
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import logging
+
+_logger = logging.getLogger('websocket')
+try:
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+_logger.addHandler(NullHandler())
+
+_traceEnabled = False
+
+__all__ = ["enableTrace", "dump", "error", "warning", "debug", "trace",
+ "isEnabledForError", "isEnabledForDebug", "isEnabledForTrace"]
+
+
+def enableTrace(traceable, handler=logging.StreamHandler()):
+ """
+ Turn on/off the traceability.
+
+ Parameters
+ ----------
+ traceable: bool
+ If set to True, traceability is enabled.
+ """
+ global _traceEnabled
+ _traceEnabled = traceable
+ if traceable:
+ _logger.addHandler(handler)
+ _logger.setLevel(logging.DEBUG)
+
+
+def dump(title, message):
+ if _traceEnabled:
+ _logger.debug("--- " + title + " ---")
+ _logger.debug(message)
+ _logger.debug("-----------------------")
+
+
+def error(msg):
+ _logger.error(msg)
+
+
+def warning(msg):
+ _logger.warning(msg)
+
+
+def debug(msg):
+ _logger.debug(msg)
+
+
+def trace(msg):
+ if _traceEnabled:
+ _logger.debug(msg)
+
+
+def isEnabledForError():
+ return _logger.isEnabledFor(logging.ERROR)
+
+
+def isEnabledForDebug():
+ return _logger.isEnabledFor(logging.DEBUG)
+
+
+def isEnabledForTrace():
+ return _traceEnabled
diff --git a/contrib/python/websocket-client/py2/websocket/_socket.py b/contrib/python/websocket-client/py2/websocket/_socket.py
new file mode 100644
index 0000000000..2c383ed4d3
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_socket.py
@@ -0,0 +1,176 @@
+"""
+
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import errno
+import select
+import socket
+
+import six
+
+from ._exceptions import *
+from ._ssl_compat import *
+from ._utils import *
+
+DEFAULT_SOCKET_OPTION = [(socket.SOL_TCP, socket.TCP_NODELAY, 1)]
+if hasattr(socket, "SO_KEEPALIVE"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
+if hasattr(socket, "TCP_KEEPIDLE"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPIDLE, 30))
+if hasattr(socket, "TCP_KEEPINTVL"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPINTVL, 10))
+if hasattr(socket, "TCP_KEEPCNT"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPCNT, 3))
+
+_default_timeout = None
+
+__all__ = ["DEFAULT_SOCKET_OPTION", "sock_opt", "setdefaulttimeout", "getdefaulttimeout",
+ "recv", "recv_line", "send"]
+
+
+class sock_opt(object):
+
+ def __init__(self, sockopt, sslopt):
+ if sockopt is None:
+ sockopt = []
+ if sslopt is None:
+ sslopt = {}
+ self.sockopt = sockopt
+ self.sslopt = sslopt
+ self.timeout = None
+
+
+def setdefaulttimeout(timeout):
+ """
+ Set the global timeout setting to connect.
+
+ Parameters
+ ----------
+ timeout: int or float
+ default socket timeout time (in seconds)
+ """
+ global _default_timeout
+ _default_timeout = timeout
+
+
+def getdefaulttimeout():
+ """
+ Get default timeout
+
+ Returns
+ ----------
+ _default_timeout: int or float
+ Return the global timeout setting (in seconds) to connect.
+ """
+ return _default_timeout
+
+
+def recv(sock, bufsize):
+ if not sock:
+ raise WebSocketConnectionClosedException("socket is already closed.")
+
+ def _recv():
+ try:
+ return sock.recv(bufsize)
+ except SSLWantReadError:
+ pass
+ except socket.error as exc:
+ error_code = extract_error_code(exc)
+ if error_code is None:
+ raise
+ if error_code != errno.EAGAIN or error_code != errno.EWOULDBLOCK:
+ raise
+
+ r, w, e = select.select((sock, ), (), (), sock.gettimeout())
+ if r:
+ return sock.recv(bufsize)
+
+ try:
+ if sock.gettimeout() == 0:
+ bytes_ = sock.recv(bufsize)
+ else:
+ bytes_ = _recv()
+ except socket.timeout as e:
+ message = extract_err_message(e)
+ raise WebSocketTimeoutException(message)
+ except SSLError as e:
+ message = extract_err_message(e)
+ if isinstance(message, str) and 'timed out' in message:
+ raise WebSocketTimeoutException(message)
+ else:
+ raise
+
+ if not bytes_:
+ raise WebSocketConnectionClosedException(
+ "Connection is already closed.")
+
+ return bytes_
+
+
+def recv_line(sock):
+ line = []
+ while True:
+ c = recv(sock, 1)
+ line.append(c)
+ if c == six.b("\n"):
+ break
+ return six.b("").join(line)
+
+
+def send(sock, data):
+ if isinstance(data, six.text_type):
+ data = data.encode('utf-8')
+
+ if not sock:
+ raise WebSocketConnectionClosedException("socket is already closed.")
+
+ def _send():
+ try:
+ return sock.send(data)
+ except SSLWantWriteError:
+ pass
+ except socket.error as exc:
+ error_code = extract_error_code(exc)
+ if error_code is None:
+ raise
+ if error_code != errno.EAGAIN or error_code != errno.EWOULDBLOCK:
+ raise
+
+ r, w, e = select.select((), (sock, ), (), sock.gettimeout())
+ if w:
+ return sock.send(data)
+
+ try:
+ if sock.gettimeout() == 0:
+ return sock.send(data)
+ else:
+ return _send()
+ except socket.timeout as e:
+ message = extract_err_message(e)
+ raise WebSocketTimeoutException(message)
+ except Exception as e:
+ message = extract_err_message(e)
+ if isinstance(message, str) and "timed out" in message:
+ raise WebSocketTimeoutException(message)
+ else:
+ raise
diff --git a/contrib/python/websocket-client/py2/websocket/_ssl_compat.py b/contrib/python/websocket-client/py2/websocket/_ssl_compat.py
new file mode 100644
index 0000000000..9e201ddf00
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_ssl_compat.py
@@ -0,0 +1,53 @@
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+__all__ = ["HAVE_SSL", "ssl", "SSLError", "SSLWantReadError", "SSLWantWriteError"]
+
+try:
+ import ssl
+ from ssl import SSLError
+ from ssl import SSLWantReadError
+ from ssl import SSLWantWriteError
+ if hasattr(ssl, 'SSLContext') and hasattr(ssl.SSLContext, 'check_hostname'):
+ HAVE_CONTEXT_CHECK_HOSTNAME = True
+ else:
+ HAVE_CONTEXT_CHECK_HOSTNAME = False
+ if hasattr(ssl, "match_hostname"):
+ from ssl import match_hostname
+ else:
+ from backports.ssl_match_hostname import match_hostname
+ __all__.append("match_hostname")
+ __all__.append("HAVE_CONTEXT_CHECK_HOSTNAME")
+
+ HAVE_SSL = True
+except ImportError:
+ # dummy class of SSLError for ssl none-support environment.
+ class SSLError(Exception):
+ pass
+
+ class SSLWantReadError(Exception):
+ pass
+
+ class SSLWantWriteError(Exception):
+ pass
+
+ ssl = None
+
+ HAVE_SSL = False
diff --git a/contrib/python/websocket-client/py2/websocket/_url.py b/contrib/python/websocket-client/py2/websocket/_url.py
new file mode 100644
index 0000000000..92ff939e39
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_url.py
@@ -0,0 +1,178 @@
+"""
+
+"""
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+
+import os
+import socket
+import struct
+
+from six.moves.urllib.parse import urlparse
+
+
+__all__ = ["parse_url", "get_proxy_info"]
+
+
+def parse_url(url):
+ """
+ parse url and the result is tuple of
+ (hostname, port, resource path and the flag of secure mode)
+
+ Parameters
+ ----------
+ url: str
+ url string.
+ """
+ if ":" not in url:
+ raise ValueError("url is invalid")
+
+ scheme, url = url.split(":", 1)
+
+ parsed = urlparse(url, scheme="http")
+ if parsed.hostname:
+ hostname = parsed.hostname
+ else:
+ raise ValueError("hostname is invalid")
+ port = 0
+ if parsed.port:
+ port = parsed.port
+
+ is_secure = False
+ if scheme == "ws":
+ if not port:
+ port = 80
+ elif scheme == "wss":
+ is_secure = True
+ if not port:
+ port = 443
+ else:
+ raise ValueError("scheme %s is invalid" % scheme)
+
+ if parsed.path:
+ resource = parsed.path
+ else:
+ resource = "/"
+
+ if parsed.query:
+ resource += "?" + parsed.query
+
+ return hostname, port, resource, is_secure
+
+
+DEFAULT_NO_PROXY_HOST = ["localhost", "127.0.0.1"]
+
+
+def _is_ip_address(addr):
+ try:
+ socket.inet_aton(addr)
+ except socket.error:
+ return False
+ else:
+ return True
+
+
+def _is_subnet_address(hostname):
+ try:
+ addr, netmask = hostname.split("/")
+ return _is_ip_address(addr) and 0 <= int(netmask) < 32
+ except ValueError:
+ return False
+
+
+def _is_address_in_network(ip, net):
+ ipaddr = struct.unpack('!I', socket.inet_aton(ip))[0]
+ netaddr, netmask = net.split('/')
+ netaddr = struct.unpack('!I', socket.inet_aton(netaddr))[0]
+
+ netmask = (0xFFFFFFFF << (32 - int(netmask))) & 0xFFFFFFFF
+ return ipaddr & netmask == netaddr
+
+
+def _is_no_proxy_host(hostname, no_proxy):
+ if not no_proxy:
+ v = os.environ.get("no_proxy", "").replace(" ", "")
+ if v:
+ no_proxy = v.split(",")
+ if not no_proxy:
+ no_proxy = DEFAULT_NO_PROXY_HOST
+
+ if '*' in no_proxy:
+ return True
+ if hostname in no_proxy:
+ return True
+ if _is_ip_address(hostname):
+ return any([_is_address_in_network(hostname, subnet) for subnet in no_proxy if _is_subnet_address(subnet)])
+ for domain in [domain for domain in no_proxy if domain.startswith('.')]:
+ if hostname.endswith(domain):
+ return True
+ return False
+
+
+def get_proxy_info(
+ hostname, is_secure, proxy_host=None, proxy_port=0, proxy_auth=None,
+ no_proxy=None, proxy_type='http'):
+ """
+ Try to retrieve proxy host and port from environment
+ if not provided in options.
+ Result is (proxy_host, proxy_port, proxy_auth).
+ proxy_auth is tuple of username and password
+ of proxy authentication information.
+
+ Parameters
+ ----------
+ hostname: <type>
+ websocket server name.
+ is_secure: <type>
+ is the connection secure? (wss) looks for "https_proxy" in env
+ before falling back to "http_proxy"
+ options: <type>
+ - http_proxy_host: <type>
+ http proxy host name.
+ - http_proxy_port: <type>
+ http proxy port.
+ - http_no_proxy: <type>
+ host names, which doesn't use proxy.
+ - http_proxy_auth: <type>
+ http proxy auth information. tuple of username and password. default is None
+ - proxy_type: <type>
+ if set to "socks5" PySocks wrapper will be used in place of a http proxy. default is "http"
+ """
+ if _is_no_proxy_host(hostname, no_proxy):
+ return None, 0, None
+
+ if proxy_host:
+ port = proxy_port
+ auth = proxy_auth
+ return proxy_host, port, auth
+
+ env_keys = ["http_proxy"]
+ if is_secure:
+ env_keys.insert(0, "https_proxy")
+
+ for key in env_keys:
+ value = os.environ.get(key, None)
+ if value:
+ proxy = urlparse(value)
+ auth = (proxy.username, proxy.password) if proxy.username else None
+ return proxy.hostname, proxy.port, auth
+
+ return None, 0, None
diff --git a/contrib/python/websocket-client/py2/websocket/_utils.py b/contrib/python/websocket-client/py2/websocket/_utils.py
new file mode 100644
index 0000000000..0072bce8ac
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/_utils.py
@@ -0,0 +1,110 @@
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import six
+
+__all__ = ["NoLock", "validate_utf8", "extract_err_message", "extract_error_code"]
+
+
+class NoLock(object):
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+try:
+ # If wsaccel is available we use compiled routines to validate UTF-8
+ # strings.
+ from wsaccel.utf8validator import Utf8Validator
+
+ def _validate_utf8(utfbytes):
+ return Utf8Validator().validate(utfbytes)[0]
+
+except ImportError:
+ # UTF-8 validator
+ # python implementation of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+
+ _UTF8_ACCEPT = 0
+ _UTF8_REJECT = 12
+
+ _UTF8D = [
+ # The first part of the table maps bytes to character classes that
+ # to reduce the size of the transition table and create bitmasks.
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
+
+ # The second part is a transition table that maps a combination
+ # of a state of the automaton and a character class to a state.
+ 0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12,
+ 12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12,
+ 12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12,
+ 12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12,
+ 12,36,12,12,12,12,12,12,12,12,12,12, ]
+
+ def _decode(state, codep, ch):
+ tp = _UTF8D[ch]
+
+ codep = (ch & 0x3f) | (codep << 6) if (
+ state != _UTF8_ACCEPT) else (0xff >> tp) & ch
+ state = _UTF8D[256 + state + tp]
+
+ return state, codep
+
+ def _validate_utf8(utfbytes):
+ state = _UTF8_ACCEPT
+ codep = 0
+ for i in utfbytes:
+ if six.PY2:
+ i = ord(i)
+ state, codep = _decode(state, codep, i)
+ if state == _UTF8_REJECT:
+ return False
+
+ return True
+
+
+def validate_utf8(utfbytes):
+ """
+ validate utf8 byte string.
+ utfbytes: utf byte string to check.
+ return value: if valid utf8 string, return true. Otherwise, return false.
+ """
+ return _validate_utf8(utfbytes)
+
+
+def extract_err_message(exception):
+ if exception.args:
+ return exception.args[0]
+ else:
+ return None
+
+
+def extract_error_code(exception):
+ if exception.args and len(exception.args) > 1:
+ return exception.args[0] if isinstance(exception.args[0], int) else None
diff --git a/contrib/python/websocket-client/py2/websocket/tests/__init__.py b/contrib/python/websocket-client/py2/websocket/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/__init__.py
diff --git a/contrib/python/websocket-client/py2/websocket/tests/data/header01.txt b/contrib/python/websocket-client/py2/websocket/tests/data/header01.txt
new file mode 100644
index 0000000000..d44d24c205
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/data/header01.txt
@@ -0,0 +1,6 @@
+HTTP/1.1 101 WebSocket Protocol Handshake
+Connection: Upgrade
+Upgrade: WebSocket
+Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0=
+some_header: something
+
diff --git a/contrib/python/websocket-client/py2/websocket/tests/data/header02.txt b/contrib/python/websocket-client/py2/websocket/tests/data/header02.txt
new file mode 100644
index 0000000000..f481de928a
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/data/header02.txt
@@ -0,0 +1,6 @@
+HTTP/1.1 101 WebSocket Protocol Handshake
+Connection: Upgrade
+Upgrade WebSocket
+Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0=
+some_header: something
+
diff --git a/contrib/python/websocket-client/py2/websocket/tests/data/header03.txt b/contrib/python/websocket-client/py2/websocket/tests/data/header03.txt
new file mode 100644
index 0000000000..012b7d18dd
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/data/header03.txt
@@ -0,0 +1,6 @@
+HTTP/1.1 101 WebSocket Protocol Handshake
+Connection: Upgrade, Keep-Alive
+Upgrade: WebSocket
+Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0=
+some_header: something
+
diff --git a/contrib/python/websocket-client/py2/websocket/tests/test_abnf.py b/contrib/python/websocket-client/py2/websocket/tests/test_abnf.py
new file mode 100644
index 0000000000..acce020682
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/test_abnf.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+#
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+
+import os
+import websocket as ws
+from websocket._abnf import *
+import sys
+sys.path[0:0] = [""]
+
+if sys.version_info[0] == 2 and sys.version_info[1] < 7:
+ import unittest2 as unittest
+else:
+ import unittest
+
+
+class ABNFTest(unittest.TestCase):
+
+ def testInit(self):
+ a = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
+ self.assertEqual(a.fin, 0)
+ self.assertEqual(a.rsv1, 0)
+ self.assertEqual(a.rsv2, 0)
+ self.assertEqual(a.rsv3, 0)
+ self.assertEqual(a.opcode, 9)
+ self.assertEqual(a.data, '')
+ a_bad = ABNF(0,1,0,0, opcode=77)
+ self.assertEqual(a_bad.rsv1, 1)
+ self.assertEqual(a_bad.opcode, 77)
+
+ def testValidate(self):
+ a = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
+ self.assertRaises(ws.WebSocketProtocolException, a.validate)
+ a_bad = ABNF(0,1,0,0, opcode=77)
+ self.assertRaises(ws.WebSocketProtocolException, a_bad.validate)
+ a_close = ABNF(0,1,0,0, opcode=ABNF.OPCODE_CLOSE, data="abcdefgh1234567890abcdefgh1234567890abcdefgh1234567890abcdefgh1234567890")
+ self.assertRaises(ws.WebSocketProtocolException, a_close.validate)
+
+# This caused an error in the Python 2.7 Github Actions build
+# Uncomment test case when Python 2 support no longer wanted
+# def testMask(self):
+# ab = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
+# bytes_val = bytes("aaaa", 'utf-8')
+# self.assertEqual(ab._get_masked(bytes_val), bytes_val)
+
+ def testFrameBuffer(self):
+ fb = frame_buffer(0, True)
+ self.assertEqual(fb.recv, 0)
+ self.assertEqual(fb.skip_utf8_validation, True)
+ fb.clear
+ self.assertEqual(fb.header, None)
+ self.assertEqual(fb.length, None)
+ self.assertEqual(fb.mask, None)
+ self.assertEqual(fb.has_mask(), False)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py2/websocket/tests/test_app.py b/contrib/python/websocket-client/py2/websocket/tests/test_app.py
new file mode 100644
index 0000000000..e5a739008e
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/test_app.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+#
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+
+import os
+import os.path
+import websocket as ws
+import sys
+sys.path[0:0] = [""]
+
+try:
+ import ssl
+except ImportError:
+ HAVE_SSL = False
+
+if sys.version_info[0] == 2 and sys.version_info[1] < 7:
+ import unittest2 as unittest
+else:
+ import unittest
+
+# Skip test to access the internet.
+TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
+TRACEABLE = True
+
+
+class WebSocketAppTest(unittest.TestCase):
+
+ class NotSetYet(object):
+ """ A marker class for signalling that a value hasn't been set yet.
+ """
+
+ def setUp(self):
+ ws.enableTrace(TRACEABLE)
+
+ WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
+
+ def tearDown(self):
+ WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testKeepRunning(self):
+ """ A WebSocketApp should keep running as long as its self.keep_running
+ is not False (in the boolean context).
+ """
+
+ def on_open(self, *args, **kwargs):
+ """ Set the keep_running flag for later inspection and immediately
+ close the connection.
+ """
+ WebSocketAppTest.keep_running_open = self.keep_running
+
+ self.close()
+
+ def on_close(self, *args, **kwargs):
+ """ Set the keep_running flag for the test to use.
+ """
+ WebSocketAppTest.keep_running_close = self.keep_running
+
+ app = ws.WebSocketApp('ws://echo.websocket.org/', on_open=on_open, on_close=on_close)
+ app.run_forever()
+
+ # if numpy is installed, this assertion fail
+ # self.assertFalse(isinstance(WebSocketAppTest.keep_running_open,
+ # WebSocketAppTest.NotSetYet))
+
+ # self.assertFalse(isinstance(WebSocketAppTest.keep_running_close,
+ # WebSocketAppTest.NotSetYet))
+
+ # self.assertEqual(True, WebSocketAppTest.keep_running_open)
+ # self.assertEqual(False, WebSocketAppTest.keep_running_close)
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testSockMaskKey(self):
+ """ A WebSocketApp should forward the received mask_key function down
+ to the actual socket.
+ """
+
+ def my_mask_key_func():
+ pass
+
+ def on_open(self, *args, **kwargs):
+ """ Set the value so the test can use it later on and immediately
+ close the connection.
+ """
+ WebSocketAppTest.get_mask_key_id = id(self.get_mask_key)
+ self.close()
+
+ app = ws.WebSocketApp('ws://echo.websocket.org/', on_open=on_open, get_mask_key=my_mask_key_func)
+ app.run_forever()
+
+ # if numpy is installed, this assertion fail
+ # Note: We can't use 'is' for comparing the functions directly, need to use 'id'.
+ # self.assertEqual(WebSocketAppTest.get_mask_key_id, id(my_mask_key_func))
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testPingInterval(self):
+ """ A WebSocketApp should ping regularly
+ """
+
+ def on_ping(app, msg):
+ print("Got a ping!")
+ app.close()
+
+ def on_pong(app, msg):
+ print("Got a pong! No need to respond")
+ app.close()
+
+ app = ws.WebSocketApp('wss://api-pub.bitfinex.com/ws/1', on_ping=on_ping, on_pong=on_pong)
+ app.run_forever(ping_interval=2, ping_timeout=1) # , sslopt={"cert_reqs": ssl.CERT_NONE}
+ self.assertRaises(ws.WebSocketException, app.run_forever, ping_interval=2, ping_timeout=3, sslopt={"cert_reqs": ssl.CERT_NONE})
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py2/websocket/tests/test_cookiejar.py b/contrib/python/websocket-client/py2/websocket/tests/test_cookiejar.py
new file mode 100644
index 0000000000..fc66e58b0e
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/test_cookiejar.py
@@ -0,0 +1,117 @@
+"""
+
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+import unittest
+
+from websocket._cookiejar import SimpleCookieJar
+
+
+class CookieJarTest(unittest.TestCase):
+ def testAdd(self):
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("")
+ self.assertFalse(cookie_jar.jar, "Cookie with no domain should not be added to the jar")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b")
+ self.assertFalse(cookie_jar.jar, "Cookie with no domain should not be added to the jar")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; domain=.abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; domain=abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+ self.assertTrue("abc" not in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ cookie_jar.add("e=f; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d; e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ cookie_jar.add("e=f; domain=.abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d; e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ cookie_jar.add("e=f; domain=xyz")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("xyz"), "e=f")
+ self.assertEqual(cookie_jar.get("something"), "")
+
+ def testSet(self):
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b")
+ self.assertFalse(cookie_jar.jar, "Cookie with no domain should not be added to the jar")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; domain=.abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; domain=abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+ self.assertTrue("abc" not in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ cookie_jar.set("e=f; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ cookie_jar.set("e=f; domain=.abc")
+ self.assertEqual(cookie_jar.get("abc"), "e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ cookie_jar.set("e=f; domain=xyz")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("xyz"), "e=f")
+ self.assertEqual(cookie_jar.get("something"), "")
+
+ def testGet(self):
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc.com")
+ self.assertEqual(cookie_jar.get("abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("x.abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("abc.com.es"), "")
+ self.assertEqual(cookie_jar.get("xabc.com"), "")
+
+ cookie_jar.set("a=b; c=d; domain=.abc.com")
+ self.assertEqual(cookie_jar.get("abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("x.abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("abc.com.es"), "")
+ self.assertEqual(cookie_jar.get("xabc.com"), "")
diff --git a/contrib/python/websocket-client/py2/websocket/tests/test_http.py b/contrib/python/websocket-client/py2/websocket/tests/test_http.py
new file mode 100644
index 0000000000..f08bd0c91c
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/test_http.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+#
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+
+import os
+import os.path
+import websocket as ws
+from websocket._http import proxy_info, read_headers, _open_proxied_socket, _tunnel
+import sys
+sys.path[0:0] = [""]
+
+if sys.version_info[0] == 2 and sys.version_info[1] < 7:
+ import unittest2 as unittest
+else:
+ import unittest
+
+
+class SockMock(object):
+ def __init__(self):
+ self.data = []
+ self.sent = []
+
+ def add_packet(self, data):
+ self.data.append(data)
+
+ def gettimeout(self):
+ return None
+
+ def recv(self, bufsize):
+ if self.data:
+ e = self.data.pop(0)
+ if isinstance(e, Exception):
+ raise e
+ if len(e) > bufsize:
+ self.data.insert(0, e[bufsize:])
+ return e[:bufsize]
+
+ def send(self, data):
+ self.sent.append(data)
+ return len(data)
+
+ def close(self):
+ pass
+
+
+class HeaderSockMock(SockMock):
+
+ def __init__(self, fname):
+ SockMock.__init__(self)
+ import yatest.common
+ path = yatest.common.source_path(os.path.join('contrib/python/websocket-client/py2/websocket/tests', fname))
+ with open(path, "rb") as f:
+ self.add_packet(f.read())
+
+
+class OptsList():
+
+ def __init__(self):
+ self.timeout = 0
+ self.sockopt = []
+
+
+class HttpTest(unittest.TestCase):
+
+ def testReadHeader(self):
+ status, header, status_message = read_headers(HeaderSockMock("data/header01.txt"))
+ self.assertEqual(status, 101)
+ self.assertEqual(header["connection"], "Upgrade")
+ # header02.txt is intentionally malformed
+ self.assertRaises(ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt"))
+
+ def testTunnel(self):
+ self.assertRaises(ws.WebSocketProxyException, _tunnel, HeaderSockMock("data/header01.txt"), "example.com", 80, ("username", "password"))
+ self.assertRaises(ws.WebSocketProxyException, _tunnel, HeaderSockMock("data/header02.txt"), "example.com", 80, ("username", "password"))
+
+ def _testConnect(self):
+ # Not currently testing an actual proxy connection, so just check whether TypeError is raised
+ self.assertRaises(TypeError, _open_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="http"))
+ self.assertRaises(TypeError, _open_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks4"))
+ self.assertRaises(TypeError, _open_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks5h"))
+
+ def testProxyInfo(self):
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").type, "http")
+ self.assertRaises(ValueError, proxy_info, http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="badval")
+ self.assertEqual(proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="http").host, "example.com")
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").port, "8080")
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").auth, None)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py2/websocket/tests/test_url.py b/contrib/python/websocket-client/py2/websocket/tests/test_url.py
new file mode 100644
index 0000000000..b1d8e06f23
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/test_url.py
@@ -0,0 +1,309 @@
+# -*- coding: utf-8 -*-
+#
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+
+import sys
+import os
+
+from websocket._url import get_proxy_info, parse_url, _is_address_in_network, _is_no_proxy_host
+
+if sys.version_info[0] == 2 and sys.version_info[1] < 7:
+ import unittest2 as unittest
+else:
+ import unittest
+sys.path[0:0] = [""]
+
+
+class UrlTest(unittest.TestCase):
+
+ def test_address_in_network(self):
+ self.assertTrue(_is_address_in_network('127.0.0.1', '127.0.0.0/8'))
+ self.assertTrue(_is_address_in_network('127.1.0.1', '127.0.0.0/8'))
+ self.assertFalse(_is_address_in_network('127.1.0.1', '127.0.0.0/24'))
+
+ def testParseUrl(self):
+ p = parse_url("ws://www.example.com/r")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com/r/")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/r/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com/")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com:8080/r")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com:8080/")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com:8080")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("wss://www.example.com:8080/r")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], True)
+
+ p = parse_url("wss://www.example.com:8080/r?key=value")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r?key=value")
+ self.assertEqual(p[3], True)
+
+ self.assertRaises(ValueError, parse_url, "http://www.example.com/r")
+
+ if sys.version_info[0] == 2 and sys.version_info[1] < 7:
+ return
+
+ p = parse_url("ws://[2a03:4000:123:83::3]/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://[2a03:4000:123:83::3]:8080/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("wss://[2a03:4000:123:83::3]/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 443)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], True)
+
+ p = parse_url("wss://[2a03:4000:123:83::3]:8080/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], True)
+
+
+class IsNoProxyHostTest(unittest.TestCase):
+ def setUp(self):
+ self.no_proxy = os.environ.get("no_proxy", None)
+ if "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def tearDown(self):
+ if self.no_proxy:
+ os.environ["no_proxy"] = self.no_proxy
+ elif "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def testMatchAll(self):
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['*']))
+ self.assertTrue(_is_no_proxy_host("192.168.0.1", ['*']))
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['other.websocket.org', '*']))
+ os.environ['no_proxy'] = '*'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+ self.assertTrue(_is_no_proxy_host("192.168.0.1", None))
+ os.environ['no_proxy'] = 'other.websocket.org, *'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+
+ def testIpAddress(self):
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.1']))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", ['127.0.0.1']))
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", ['other.websocket.org', '127.0.0.1']))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", ['other.websocket.org', '127.0.0.1']))
+ os.environ['no_proxy'] = '127.0.0.1'
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
+ os.environ['no_proxy'] = 'other.websocket.org, 127.0.0.1'
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
+
+ def testIpAddressInRange(self):
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.0/8']))
+ self.assertTrue(_is_no_proxy_host("127.0.0.2", ['127.0.0.0/8']))
+ self.assertFalse(_is_no_proxy_host("127.1.0.1", ['127.0.0.0/24']))
+ os.environ['no_proxy'] = '127.0.0.0/8'
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
+ self.assertTrue(_is_no_proxy_host("127.0.0.2", None))
+ os.environ['no_proxy'] = '127.0.0.0/24'
+ self.assertFalse(_is_no_proxy_host("127.1.0.1", None))
+
+ def testHostnameMatch(self):
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", ['my.websocket.org']))
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", ['other.websocket.org', 'my.websocket.org']))
+ self.assertFalse(_is_no_proxy_host("my.websocket.org", ['other.websocket.org']))
+ os.environ['no_proxy'] = 'my.websocket.org'
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
+ self.assertFalse(_is_no_proxy_host("other.websocket.org", None))
+ os.environ['no_proxy'] = 'other.websocket.org, my.websocket.org'
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
+
+ def testHostnameMatchDomain(self):
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['.websocket.org']))
+ self.assertTrue(_is_no_proxy_host("my.other.websocket.org", ['.websocket.org']))
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['my.websocket.org', '.websocket.org']))
+ self.assertFalse(_is_no_proxy_host("any.websocket.com", ['.websocket.org']))
+ os.environ['no_proxy'] = '.websocket.org'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+ self.assertTrue(_is_no_proxy_host("my.other.websocket.org", None))
+ self.assertFalse(_is_no_proxy_host("any.websocket.com", None))
+ os.environ['no_proxy'] = 'my.websocket.org, .websocket.org'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+
+
+class ProxyInfoTest(unittest.TestCase):
+ def setUp(self):
+ self.http_proxy = os.environ.get("http_proxy", None)
+ self.https_proxy = os.environ.get("https_proxy", None)
+ self.no_proxy = os.environ.get("no_proxy", None)
+ if "http_proxy" in os.environ:
+ del os.environ["http_proxy"]
+ if "https_proxy" in os.environ:
+ del os.environ["https_proxy"]
+ if "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def tearDown(self):
+ if self.http_proxy:
+ os.environ["http_proxy"] = self.http_proxy
+ elif "http_proxy" in os.environ:
+ del os.environ["http_proxy"]
+
+ if self.https_proxy:
+ os.environ["https_proxy"] = self.https_proxy
+ elif "https_proxy" in os.environ:
+ del os.environ["https_proxy"]
+
+ if self.no_proxy:
+ os.environ["no_proxy"] = self.no_proxy
+ elif "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def testProxyFromArgs(self):
+ self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost"), ("localhost", 0, None))
+ self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128),
+ ("localhost", 3128, None))
+ self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost"), ("localhost", 0, None))
+ self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128),
+ ("localhost", 3128, None))
+
+ self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_auth=("a", "b")),
+ ("localhost", 0, ("a", "b")))
+ self.assertEqual(
+ get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
+ ("localhost", 3128, ("a", "b")))
+ self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_auth=("a", "b")),
+ ("localhost", 0, ("a", "b")))
+ self.assertEqual(
+ get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
+ ("localhost", 3128, ("a", "b")))
+
+ self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128,
+ no_proxy=["example.com"], proxy_auth=("a", "b")),
+ ("localhost", 3128, ("a", "b")))
+ self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128,
+ no_proxy=["echo.websocket.org"], proxy_auth=("a", "b")),
+ (None, 0, None))
+
+ def testProxyFromEnv(self):
+ os.environ["http_proxy"] = "http://localhost/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
+ os.environ["http_proxy"] = "http://localhost:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
+
+ os.environ["http_proxy"] = "http://localhost/"
+ os.environ["https_proxy"] = "http://localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
+ os.environ["http_proxy"] = "http://localhost:3128/"
+ os.environ["https_proxy"] = "http://localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
+
+ os.environ["http_proxy"] = "http://localhost/"
+ os.environ["https_proxy"] = "http://localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, None))
+ os.environ["http_proxy"] = "http://localhost:3128/"
+ os.environ["https_proxy"] = "http://localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, None))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ os.environ["https_proxy"] = "http://a:b@localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ os.environ["https_proxy"] = "http://a:b@localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, ("a", "b")))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ os.environ["https_proxy"] = "http://a:b@localhost2/"
+ os.environ["no_proxy"] = "example1.com,example2.com"
+ self.assertEqual(get_proxy_info("example.1.com", True), ("localhost2", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ os.environ["no_proxy"] = "example1.com,example2.com, echo.websocket.org"
+ self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ os.environ["no_proxy"] = "example1.com,example2.com, .websocket.org"
+ self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
+
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ os.environ["no_proxy"] = "127.0.0.0/8, 192.168.0.0/16"
+ self.assertEqual(get_proxy_info("127.0.0.1", False), (None, 0, None))
+ self.assertEqual(get_proxy_info("192.168.1.1", False), (None, 0, None))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py2/websocket/tests/test_websocket.py b/contrib/python/websocket-client/py2/websocket/tests/test_websocket.py
new file mode 100644
index 0000000000..b1b66b8a71
--- /dev/null
+++ b/contrib/python/websocket-client/py2/websocket/tests/test_websocket.py
@@ -0,0 +1,434 @@
+# -*- coding: utf-8 -*-
+#
+"""
+
+"""
+
+"""
+websocket - WebSocket client library for Python
+
+Copyright (C) 2010 Hiroki Ohtani(liris)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+"""
+
+import sys
+sys.path[0:0] = [""]
+
+import os
+import os.path
+import socket
+
+import six
+
+# websocket-client
+import websocket as ws
+from websocket._handshake import _create_sec_websocket_key, \
+ _validate as _validate_header
+from websocket._http import read_headers
+from websocket._utils import validate_utf8
+
+if six.PY3:
+ from base64 import decodebytes as base64decode
+else:
+ from base64 import decodestring as base64decode
+
+if sys.version_info[0] == 2 and sys.version_info[1] < 7:
+ import unittest2 as unittest
+else:
+ import unittest
+
+try:
+ from ssl import SSLError
+except ImportError:
+ # dummy class of SSLError for ssl none-support environment.
+ class SSLError(Exception):
+ pass
+
+# Skip test to access the internet.
+TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
+TRACEABLE = True
+
+
+def create_mask_key(_):
+ return "abcd"
+
+
+class SockMock(object):
+ def __init__(self):
+ self.data = []
+ self.sent = []
+
+ def add_packet(self, data):
+ self.data.append(data)
+
+ def gettimeout(self):
+ return None
+
+ def recv(self, bufsize):
+ if self.data:
+ e = self.data.pop(0)
+ if isinstance(e, Exception):
+ raise e
+ if len(e) > bufsize:
+ self.data.insert(0, e[bufsize:])
+ return e[:bufsize]
+
+ def send(self, data):
+ self.sent.append(data)
+ return len(data)
+
+ def close(self):
+ pass
+
+
+class HeaderSockMock(SockMock):
+
+ def __init__(self, fname):
+ SockMock.__init__(self)
+ import yatest.common
+ path = yatest.common.source_path(os.path.join('contrib/python/websocket-client/py2/websocket/tests', fname))
+ with open(path, "rb") as f:
+ self.add_packet(f.read())
+
+
+class WebSocketTest(unittest.TestCase):
+ def setUp(self):
+ ws.enableTrace(TRACEABLE)
+
+ def tearDown(self):
+ pass
+
+ def testDefaultTimeout(self):
+ self.assertEqual(ws.getdefaulttimeout(), None)
+ ws.setdefaulttimeout(10)
+ self.assertEqual(ws.getdefaulttimeout(), 10)
+ ws.setdefaulttimeout(None)
+
+ def testWSKey(self):
+ key = _create_sec_websocket_key()
+ self.assertTrue(key != 24)
+ self.assertTrue(six.u("Â¥n") not in key)
+
+ def testNonce(self):
+ """ WebSocket key should be a random 16-byte nonce.
+ """
+ key = _create_sec_websocket_key()
+ nonce = base64decode(key.encode("utf-8"))
+ self.assertEqual(16, len(nonce))
+
+ def testWsUtils(self):
+ key = "c6b8hTg4EeGb2gQMztV1/g=="
+ required_header = {
+ "upgrade": "websocket",
+ "connection": "upgrade",
+ "sec-websocket-accept": "Kxep+hNu9n51529fGidYu7a3wO0="}
+ self.assertEqual(_validate_header(required_header, key, None), (True, None))
+
+ header = required_header.copy()
+ header["upgrade"] = "http"
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+ del header["upgrade"]
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+
+ header = required_header.copy()
+ header["connection"] = "something"
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+ del header["connection"]
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+
+ header = required_header.copy()
+ header["sec-websocket-accept"] = "something"
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+ del header["sec-websocket-accept"]
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+
+ header = required_header.copy()
+ header["sec-websocket-protocol"] = "sub1"
+ self.assertEqual(_validate_header(header, key, ["sub1", "sub2"]), (True, "sub1"))
+ self.assertEqual(_validate_header(header, key, ["sub2", "sub3"]), (False, None))
+
+ header = required_header.copy()
+ header["sec-websocket-protocol"] = "sUb1"
+ self.assertEqual(_validate_header(header, key, ["Sub1", "suB2"]), (True, "sub1"))
+
+ header = required_header.copy()
+ self.assertEqual(_validate_header(header, key, ["Sub1", "suB2"]), (False, None))
+
+ def testReadHeader(self):
+ status, header, status_message = read_headers(HeaderSockMock("data/header01.txt"))
+ self.assertEqual(status, 101)
+ self.assertEqual(header["connection"], "Upgrade")
+
+ status, header, status_message = read_headers(HeaderSockMock("data/header03.txt"))
+ self.assertEqual(status, 101)
+ self.assertEqual(header["connection"], "Upgrade, Keep-Alive")
+
+ HeaderSockMock("data/header02.txt")
+ self.assertRaises(ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt"))
+
+ def testSend(self):
+ # TODO: add longer frame data
+ sock = ws.WebSocket()
+ sock.set_mask_key(create_mask_key)
+ s = sock.sock = HeaderSockMock("data/header01.txt")
+ sock.send("Hello")
+ self.assertEqual(s.sent[0], six.b("\x81\x85abcd)\x07\x0f\x08\x0e"))
+
+ sock.send("ã“ã‚“ã«ã¡ã¯")
+ self.assertEqual(s.sent[1], six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc"))
+
+ sock.send(u"ã“ã‚“ã«ã¡ã¯")
+ self.assertEqual(s.sent[1], six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc"))
+
+# sock.send("x" * 5000)
+# self.assertEqual(s.sent[1], six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc"))
+
+ self.assertEqual(sock.send_binary(b'1111111111101'), 19)
+
+ def testRecv(self):
+ # TODO: add longer frame data
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ something = six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc")
+ s.add_packet(something)
+ data = sock.recv()
+ self.assertEqual(data, "ã“ã‚“ã«ã¡ã¯")
+
+ s.add_packet(six.b("\x81\x85abcd)\x07\x0f\x08\x0e"))
+ data = sock.recv()
+ self.assertEqual(data, "Hello")
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testIter(self):
+ count = 2
+ for _ in ws.create_connection('wss://stream.meetup.com/2/rsvps'):
+ count -= 1
+ if count == 0:
+ break
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testNext(self):
+ sock = ws.create_connection('wss://stream.meetup.com/2/rsvps')
+ self.assertEqual(str, type(next(sock)))
+
+ def testInternalRecvStrict(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ s.add_packet(six.b("foo"))
+ s.add_packet(socket.timeout())
+ s.add_packet(six.b("bar"))
+ # s.add_packet(SSLError("The read operation timed out"))
+ s.add_packet(six.b("baz"))
+ with self.assertRaises(ws.WebSocketTimeoutException):
+ sock.frame_buffer.recv_strict(9)
+ # if six.PY2:
+ # with self.assertRaises(ws.WebSocketTimeoutException):
+ # data = sock._recv_strict(9)
+ # else:
+ # with self.assertRaises(SSLError):
+ # data = sock._recv_strict(9)
+ data = sock.frame_buffer.recv_strict(9)
+ self.assertEqual(data, six.b("foobarbaz"))
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.frame_buffer.recv_strict(1)
+
+ def testRecvTimeout(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ s.add_packet(six.b("\x81"))
+ s.add_packet(socket.timeout())
+ s.add_packet(six.b("\x8dabcd\x29\x07\x0f\x08\x0e"))
+ s.add_packet(socket.timeout())
+ s.add_packet(six.b("\x4e\x43\x33\x0e\x10\x0f\x00\x40"))
+ with self.assertRaises(ws.WebSocketTimeoutException):
+ sock.recv()
+ with self.assertRaises(ws.WebSocketTimeoutException):
+ sock.recv()
+ data = sock.recv()
+ self.assertEqual(data, "Hello, World!")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testRecvWithSimpleFragmentation(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Brevity is "
+ s.add_packet(six.b("\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
+ # OPCODE=CONT, FIN=1, MSG="the soul of wit"
+ s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
+ data = sock.recv()
+ self.assertEqual(data, "Brevity is the soul of wit")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testRecvWithFireEventOfFragmentation(self):
+ sock = ws.WebSocket(fire_cont_frame=True)
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Brevity is "
+ s.add_packet(six.b("\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
+ # OPCODE=CONT, FIN=0, MSG="Brevity is "
+ s.add_packet(six.b("\x00\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
+ # OPCODE=CONT, FIN=1, MSG="the soul of wit"
+ s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
+
+ _, data = sock.recv_data()
+ self.assertEqual(data, six.b("Brevity is "))
+ _, data = sock.recv_data()
+ self.assertEqual(data, six.b("Brevity is "))
+ _, data = sock.recv_data()
+ self.assertEqual(data, six.b("the soul of wit"))
+
+ # OPCODE=CONT, FIN=0, MSG="Brevity is "
+ s.add_packet(six.b("\x80\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
+
+ with self.assertRaises(ws.WebSocketException):
+ sock.recv_data()
+
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testClose(self):
+ sock = ws.WebSocket()
+ sock.sock = SockMock()
+ sock.connected = True
+ sock.close()
+ self.assertEqual(sock.connected, False)
+
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ sock.connected = True
+ s.add_packet(six.b('\x88\x80\x17\x98p\x84'))
+ sock.recv()
+ self.assertEqual(sock.connected, False)
+
+ def testRecvContFragmentation(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ # OPCODE=CONT, FIN=1, MSG="the soul of wit"
+ s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
+ self.assertRaises(ws.WebSocketException, sock.recv)
+
+ def testRecvWithProlongedFragmentation(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Once more unto the breach, "
+ s.add_packet(six.b("\x01\x9babcd.\x0c\x00\x01A\x0f\x0c\x16\x04B\x16\n\x15"
+ "\rC\x10\t\x07C\x06\x13\x07\x02\x07\tNC"))
+ # OPCODE=CONT, FIN=0, MSG="dear friends, "
+ s.add_packet(six.b("\x00\x8eabcd\x05\x07\x02\x16A\x04\x11\r\x04\x0c\x07"
+ "\x17MB"))
+ # OPCODE=CONT, FIN=1, MSG="once more"
+ s.add_packet(six.b("\x80\x89abcd\x0e\x0c\x00\x01A\x0f\x0c\x16\x04"))
+ data = sock.recv()
+ self.assertEqual(
+ data,
+ "Once more unto the breach, dear friends, once more")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testRecvWithFragmentationAndControlFrame(self):
+ sock = ws.WebSocket()
+ sock.set_mask_key(create_mask_key)
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Too much "
+ s.add_packet(six.b("\x01\x89abcd5\r\x0cD\x0c\x17\x00\x0cA"))
+ # OPCODE=PING, FIN=1, MSG="Please PONG this"
+ s.add_packet(six.b("\x89\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17"))
+ # OPCODE=CONT, FIN=1, MSG="of a good thing"
+ s.add_packet(six.b("\x80\x8fabcd\x0e\x04C\x05A\x05\x0c\x0b\x05B\x17\x0c"
+ "\x08\x0c\x04"))
+ data = sock.recv()
+ self.assertEqual(data, "Too much of a good thing")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+ self.assertEqual(
+ s.sent[0],
+ six.b("\x8a\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17"))
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testWebSocket(self):
+ s = ws.create_connection("ws://echo.websocket.org/")
+ self.assertNotEqual(s, None)
+ s.send("Hello, World")
+ result = s.recv()
+ self.assertEqual(result, "Hello, World")
+
+ s.send(u"ã“ã«ã‚ƒã«ã‚ƒã¡ã¯ã€ä¸–ç•Œ")
+ result = s.recv()
+ self.assertEqual(result, "ã“ã«ã‚ƒã«ã‚ƒã¡ã¯ã€ä¸–ç•Œ")
+ self.assertRaises(ValueError, s.send_close, -1, "")
+ s.close()
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testPingPong(self):
+ s = ws.create_connection("ws://echo.websocket.org/")
+ self.assertNotEqual(s, None)
+ s.ping("Hello")
+ s.pong("Hi")
+ s.close()
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testSecureWebSocket(self):
+ import ssl
+ s = ws.create_connection("wss://api.bitfinex.com/ws/2")
+ self.assertNotEqual(s, None)
+ self.assertTrue(isinstance(s.sock, ssl.SSLSocket))
+ self.assertEqual(s.getstatus(), 101)
+ self.assertNotEqual(s.getheaders(), None)
+ s.close()
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testWebSocketWithCustomHeader(self):
+ s = ws.create_connection("ws://echo.websocket.org/",
+ headers={"User-Agent": "PythonWebsocketClient"})
+ self.assertNotEqual(s, None)
+ s.send("Hello, World")
+ result = s.recv()
+ self.assertEqual(result, "Hello, World")
+ self.assertRaises(ValueError, s.close, -1, "")
+ s.close()
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testAfterClose(self):
+ s = ws.create_connection("ws://echo.websocket.org/")
+ self.assertNotEqual(s, None)
+ s.close()
+ self.assertRaises(ws.WebSocketConnectionClosedException, s.send, "Hello")
+ self.assertRaises(ws.WebSocketConnectionClosedException, s.recv)
+
+
+class SockOptTest(unittest.TestCase):
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testSockOpt(self):
+ sockopt = ((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),)
+ s = ws.create_connection("ws://echo.websocket.org", sockopt=sockopt)
+ self.assertNotEqual(s.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0)
+ s.close()
+
+
+class UtilsTest(unittest.TestCase):
+ def testUtf8Validator(self):
+ state = validate_utf8(six.b('\xf0\x90\x80\x80'))
+ self.assertEqual(state, True)
+ state = validate_utf8(six.b('\xce\xba\xe1\xbd\xb9\xcf\x83\xce\xbc\xce\xb5\xed\xa0\x80edited'))
+ self.assertEqual(state, False)
+ state = validate_utf8(six.b(''))
+ self.assertEqual(state, True)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py2/ya.make b/contrib/python/websocket-client/py2/ya.make
new file mode 100644
index 0000000000..5bcff2d4ff
--- /dev/null
+++ b/contrib/python/websocket-client/py2/ya.make
@@ -0,0 +1,42 @@
+# Generated by devtools/yamaker (pypi).
+
+PY2_LIBRARY()
+
+VERSION(0.59.0)
+
+LICENSE(LGPL-2.1-or-later)
+
+PEERDIR(
+ contrib/python/six
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ websocket/__init__.py
+ websocket/_abnf.py
+ websocket/_app.py
+ websocket/_cookiejar.py
+ websocket/_core.py
+ websocket/_exceptions.py
+ websocket/_handshake.py
+ websocket/_http.py
+ websocket/_logging.py
+ websocket/_socket.py
+ websocket/_ssl_compat.py
+ websocket/_url.py
+ websocket/_utils.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/websocket-client/py2/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/websocket-client/py3/.dist-info/METADATA b/contrib/python/websocket-client/py3/.dist-info/METADATA
new file mode 100644
index 0000000000..3925b4a17d
--- /dev/null
+++ b/contrib/python/websocket-client/py3/.dist-info/METADATA
@@ -0,0 +1,184 @@
+Metadata-Version: 2.1
+Name: websocket-client
+Version: 1.6.4
+Summary: WebSocket client for Python with low level API options
+Home-page: https://github.com/websocket-client/websocket-client.git
+Author: liris
+Author-email: liris.pp@gmail.com
+Maintainer: engn33r
+Maintainer-email: websocket.client@proton.me
+License: Apache-2.0
+Download-URL: https://github.com/websocket-client/websocket-client/releases
+Project-URL: Documentation, https://websocket-client.readthedocs.io/
+Project-URL: Source, https://github.com/websocket-client/websocket-client/
+Keywords: websockets client
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Topic :: Internet
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Intended Audience :: Developers
+Requires-Python: >=3.8
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Provides-Extra: docs
+Requires-Dist: Sphinx (>=6.0) ; extra == 'docs'
+Requires-Dist: sphinx-rtd-theme (>=1.1.0) ; extra == 'docs'
+Provides-Extra: optional
+Requires-Dist: python-socks ; extra == 'optional'
+Requires-Dist: wsaccel ; extra == 'optional'
+Provides-Extra: test
+Requires-Dist: websockets ; extra == 'test'
+
+[![docs](https://readthedocs.org/projects/websocket-client/badge/?style=flat)](https://websocket-client.readthedocs.io/)
+[![Build Status](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml/badge.svg)](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml)
+[![codecov](https://codecov.io/gh/websocket-client/websocket-client/branch/master/graph/badge.svg?token=pcXhUQwiL3)](https://codecov.io/gh/websocket-client/websocket-client)
+[![PyPI Downloads](https://pepy.tech/badge/websocket-client)](https://pepy.tech/project/websocket-client)
+[![PyPI version](https://img.shields.io/pypi/v/websocket_client)](https://pypi.org/project/websocket_client/)
+
+# websocket-client
+
+websocket-client is a WebSocket client for Python. It provides access
+to low level APIs for WebSockets. websocket-client implements version
+[hybi-13](https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-13)
+of the WebSocket protocol. This client does not currently support the
+permessage-deflate extension from
+[RFC 7692](https://tools.ietf.org/html/rfc7692).
+
+## Documentation
+
+This project's documentation can be found at
+[https://websocket-client.readthedocs.io/](https://websocket-client.readthedocs.io/)
+
+## Contributing
+
+Please see the [contribution guidelines](https://github.com/websocket-client/websocket-client/blob/master/CONTRIBUTING.md)
+
+## Installation
+
+You can use either `python3 setup.py install` or `pip3 install websocket-client`
+to install. This module is tested on Python 3.8+.
+
+There are several optional dependencies that can be installed to enable
+specific websocket-client features.
+- To install `python-socks` for proxy usage and `wsaccel` for a minor performance boost, use:
+ `pip3 install websocket-client[optional]`
+- To install `websockets` to run unit tests using the local echo server, use:
+ `pip3 install websocket-client[test]`
+- To install `Sphinx` and `sphinx_rtd_theme` to build project documentation, use:
+ `pip3 install websocket-client[docs]`
+
+While not a strict dependency, [rel](https://github.com/bubbleboy14/registeredeventlistener)
+is useful when using `run_forever` with automatic reconnect. Install rel with `pip3 install rel`.
+
+Footnote: Some shells, such as zsh, require you to escape the `[` and `]` characters with a `\`.
+
+## Usage Tips
+
+Check out the documentation's FAQ for additional guidelines:
+[https://websocket-client.readthedocs.io/en/latest/faq.html](https://websocket-client.readthedocs.io/en/latest/faq.html)
+
+Known issues with this library include lack of WebSocket Compression
+support (RFC 7692) and [minimal threading documentation/support](https://websocket-client.readthedocs.io/en/latest/threading.html).
+
+## Performance
+
+The `send` and `validate_utf8` methods can sometimes be bottleneck.
+You can disable UTF8 validation in this library (and receive a
+performance enhancement) with the `skip_utf8_validation` parameter.
+If you want to get better performance, install wsaccel. While
+websocket-client does not depend on wsaccel, it will be used if
+available. wsaccel doubles the speed of UTF8 validation and
+offers a very minor 10% performance boost when masking the
+payload data as part of the `send` process. Numpy used to
+be a suggested performance enhancement alternative, but
+[issue #687](https://github.com/websocket-client/websocket-client/issues/687)
+found it didn't help.
+
+## Examples
+
+Many more examples are found in the
+[examples documentation](https://websocket-client.readthedocs.io/en/latest/examples.html).
+
+### Long-lived Connection
+
+Most real-world WebSockets situations involve longer-lived connections.
+The WebSocketApp `run_forever` loop will automatically try to reconnect
+to an open WebSocket connection when a network
+connection is lost if it is provided with:
+
+- a `dispatcher` argument (async dispatcher like rel or pyevent)
+- a non-zero `reconnect` argument (delay between disconnection and attempted reconnection)
+
+`run_forever` provides a variety of event-based connection controls
+using callbacks like `on_message` and `on_error`.
+`run_forever` **does not automatically reconnect** if the server
+closes the WebSocket gracefully (returning
+[a standard websocket close code](https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1)).
+[This is the logic](https://github.com/websocket-client/websocket-client/pull/838#issuecomment-1228454826) behind the decision.
+Customizing behavior when the server closes
+the WebSocket should be handled in the `on_close` callback.
+This example uses [rel](https://github.com/bubbleboy14/registeredeventlistener)
+for the dispatcher to provide automatic reconnection.
+
+```python
+import websocket
+import _thread
+import time
+import rel
+
+def on_message(ws, message):
+ print(message)
+
+def on_error(ws, error):
+ print(error)
+
+def on_close(ws, close_status_code, close_msg):
+ print("### closed ###")
+
+def on_open(ws):
+ print("Opened connection")
+
+if __name__ == "__main__":
+ websocket.enableTrace(True)
+ ws = websocket.WebSocketApp("wss://api.gemini.com/v1/marketdata/BTCUSD",
+ on_open=on_open,
+ on_message=on_message,
+ on_error=on_error,
+ on_close=on_close)
+
+ ws.run_forever(dispatcher=rel, reconnect=5) # Set dispatcher to automatic reconnection, 5 second reconnect delay if connection closed unexpectedly
+ rel.signal(2, rel.abort) # Keyboard Interrupt
+ rel.dispatch()
+```
+
+### Short-lived Connection
+
+This is if you want to communicate a short message and disconnect
+immediately when done. For example, if you want to confirm that a WebSocket
+server is running and responds properly to a specific request.
+
+```python
+from websocket import create_connection
+
+ws = create_connection("ws://echo.websocket.events/")
+print(ws.recv())
+print("Sending 'Hello, World'...")
+ws.send("Hello, World")
+print("Sent")
+print("Receiving...")
+result = ws.recv()
+print("Received '%s'" % result)
+ws.close()
+```
+
+
diff --git a/contrib/python/websocket-client/py3/.dist-info/entry_points.txt b/contrib/python/websocket-client/py3/.dist-info/entry_points.txt
new file mode 100644
index 0000000000..2c30a29b85
--- /dev/null
+++ b/contrib/python/websocket-client/py3/.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+wsdump = websocket._wsdump:main
+
diff --git a/contrib/python/websocket-client/py3/.dist-info/top_level.txt b/contrib/python/websocket-client/py3/.dist-info/top_level.txt
new file mode 100644
index 0000000000..ca4cb0cf82
--- /dev/null
+++ b/contrib/python/websocket-client/py3/.dist-info/top_level.txt
@@ -0,0 +1 @@
+websocket
diff --git a/contrib/python/websocket-client/py3/LICENSE b/contrib/python/websocket-client/py3/LICENSE
new file mode 100644
index 0000000000..88a0d3eb19
--- /dev/null
+++ b/contrib/python/websocket-client/py3/LICENSE
@@ -0,0 +1,203 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2023 engn33r
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/contrib/python/websocket-client/py3/README.md b/contrib/python/websocket-client/py3/README.md
new file mode 100644
index 0000000000..daa39bca8a
--- /dev/null
+++ b/contrib/python/websocket-client/py3/README.md
@@ -0,0 +1,141 @@
+[![docs](https://readthedocs.org/projects/websocket-client/badge/?style=flat)](https://websocket-client.readthedocs.io/)
+[![Build Status](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml/badge.svg)](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml)
+[![codecov](https://codecov.io/gh/websocket-client/websocket-client/branch/master/graph/badge.svg?token=pcXhUQwiL3)](https://codecov.io/gh/websocket-client/websocket-client)
+[![PyPI Downloads](https://pepy.tech/badge/websocket-client)](https://pepy.tech/project/websocket-client)
+[![PyPI version](https://img.shields.io/pypi/v/websocket_client)](https://pypi.org/project/websocket_client/)
+
+# websocket-client
+
+websocket-client is a WebSocket client for Python. It provides access
+to low level APIs for WebSockets. websocket-client implements version
+[hybi-13](https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-13)
+of the WebSocket protocol. This client does not currently support the
+permessage-deflate extension from
+[RFC 7692](https://tools.ietf.org/html/rfc7692).
+
+## Documentation
+
+This project's documentation can be found at
+[https://websocket-client.readthedocs.io/](https://websocket-client.readthedocs.io/)
+
+## Contributing
+
+Please see the [contribution guidelines](https://github.com/websocket-client/websocket-client/blob/master/CONTRIBUTING.md)
+
+## Installation
+
+You can use either `python3 setup.py install` or `pip3 install websocket-client`
+to install. This module is tested on Python 3.8+.
+
+There are several optional dependencies that can be installed to enable
+specific websocket-client features.
+- To install `python-socks` for proxy usage and `wsaccel` for a minor performance boost, use:
+ `pip3 install websocket-client[optional]`
+- To install `websockets` to run unit tests using the local echo server, use:
+ `pip3 install websocket-client[test]`
+- To install `Sphinx` and `sphinx_rtd_theme` to build project documentation, use:
+ `pip3 install websocket-client[docs]`
+
+While not a strict dependency, [rel](https://github.com/bubbleboy14/registeredeventlistener)
+is useful when using `run_forever` with automatic reconnect. Install rel with `pip3 install rel`.
+
+Footnote: Some shells, such as zsh, require you to escape the `[` and `]` characters with a `\`.
+
+## Usage Tips
+
+Check out the documentation's FAQ for additional guidelines:
+[https://websocket-client.readthedocs.io/en/latest/faq.html](https://websocket-client.readthedocs.io/en/latest/faq.html)
+
+Known issues with this library include lack of WebSocket Compression
+support (RFC 7692) and [minimal threading documentation/support](https://websocket-client.readthedocs.io/en/latest/threading.html).
+
+## Performance
+
+The `send` and `validate_utf8` methods can sometimes be bottleneck.
+You can disable UTF8 validation in this library (and receive a
+performance enhancement) with the `skip_utf8_validation` parameter.
+If you want to get better performance, install wsaccel. While
+websocket-client does not depend on wsaccel, it will be used if
+available. wsaccel doubles the speed of UTF8 validation and
+offers a very minor 10% performance boost when masking the
+payload data as part of the `send` process. Numpy used to
+be a suggested performance enhancement alternative, but
+[issue #687](https://github.com/websocket-client/websocket-client/issues/687)
+found it didn't help.
+
+## Examples
+
+Many more examples are found in the
+[examples documentation](https://websocket-client.readthedocs.io/en/latest/examples.html).
+
+### Long-lived Connection
+
+Most real-world WebSockets situations involve longer-lived connections.
+The WebSocketApp `run_forever` loop will automatically try to reconnect
+to an open WebSocket connection when a network
+connection is lost if it is provided with:
+
+- a `dispatcher` argument (async dispatcher like rel or pyevent)
+- a non-zero `reconnect` argument (delay between disconnection and attempted reconnection)
+
+`run_forever` provides a variety of event-based connection controls
+using callbacks like `on_message` and `on_error`.
+`run_forever` **does not automatically reconnect** if the server
+closes the WebSocket gracefully (returning
+[a standard websocket close code](https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1)).
+[This is the logic](https://github.com/websocket-client/websocket-client/pull/838#issuecomment-1228454826) behind the decision.
+Customizing behavior when the server closes
+the WebSocket should be handled in the `on_close` callback.
+This example uses [rel](https://github.com/bubbleboy14/registeredeventlistener)
+for the dispatcher to provide automatic reconnection.
+
+```python
+import websocket
+import _thread
+import time
+import rel
+
+def on_message(ws, message):
+ print(message)
+
+def on_error(ws, error):
+ print(error)
+
+def on_close(ws, close_status_code, close_msg):
+ print("### closed ###")
+
+def on_open(ws):
+ print("Opened connection")
+
+if __name__ == "__main__":
+ websocket.enableTrace(True)
+ ws = websocket.WebSocketApp("wss://api.gemini.com/v1/marketdata/BTCUSD",
+ on_open=on_open,
+ on_message=on_message,
+ on_error=on_error,
+ on_close=on_close)
+
+ ws.run_forever(dispatcher=rel, reconnect=5) # Set dispatcher to automatic reconnection, 5 second reconnect delay if connection closed unexpectedly
+ rel.signal(2, rel.abort) # Keyboard Interrupt
+ rel.dispatch()
+```
+
+### Short-lived Connection
+
+This is if you want to communicate a short message and disconnect
+immediately when done. For example, if you want to confirm that a WebSocket
+server is running and responds properly to a specific request.
+
+```python
+from websocket import create_connection
+
+ws = create_connection("ws://echo.websocket.events/")
+print(ws.recv())
+print("Sending 'Hello, World'...")
+ws.send("Hello, World")
+print("Sent")
+print("Receiving...")
+result = ws.recv()
+print("Received '%s'" % result)
+ws.close()
+```
diff --git a/contrib/python/websocket-client/py3/tests/ya.make b/contrib/python/websocket-client/py3/tests/ya.make
new file mode 100644
index 0000000000..df3343f388
--- /dev/null
+++ b/contrib/python/websocket-client/py3/tests/ya.make
@@ -0,0 +1,27 @@
+PY3TEST()
+
+PEERDIR(
+ contrib/python/websocket-client
+)
+
+DATA(
+ arcadia/contrib/python/websocket-client/py3/websocket/tests/data
+)
+
+SRCDIR(
+ contrib/python/websocket-client/py3/websocket/tests
+)
+
+TEST_SRCS(
+ __init__.py
+ test_abnf.py
+ test_app.py
+ test_cookiejar.py
+ test_http.py
+ test_url.py
+ test_websocket.py
+)
+
+NO_LINT()
+
+END()
diff --git a/contrib/python/websocket-client/py3/websocket/__init__.py b/contrib/python/websocket-client/py3/websocket/__init__.py
new file mode 100644
index 0000000000..c186ace8cc
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/__init__.py
@@ -0,0 +1,26 @@
+"""
+__init__.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from ._abnf import *
+from ._app import WebSocketApp, setReconnect
+from ._core import *
+from ._exceptions import *
+from ._logging import *
+from ._socket import *
+
+__version__ = "1.6.4"
diff --git a/contrib/python/websocket-client/py3/websocket/_abnf.py b/contrib/python/websocket-client/py3/websocket/_abnf.py
new file mode 100644
index 0000000000..a1c6f5a6fe
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_abnf.py
@@ -0,0 +1,426 @@
+import array
+import os
+import struct
+import sys
+
+from threading import Lock
+from typing import Callable, Union
+
+from ._exceptions import *
+from ._utils import validate_utf8
+
+"""
+_abnf.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+try:
+ # If wsaccel is available, use compiled routines to mask data.
+ # wsaccel only provides around a 10% speed boost compared
+ # to the websocket-client _mask() implementation.
+ # Note that wsaccel is unmaintained.
+ from wsaccel.xormask import XorMaskerSimple
+
+ def _mask(_m, _d) -> bytes:
+ return XorMaskerSimple(_m).process(_d)
+
+except ImportError:
+ # wsaccel is not available, use websocket-client _mask()
+ native_byteorder = sys.byteorder
+
+ def _mask(mask_value: array.array, data_value: array.array) -> bytes:
+ datalen = len(data_value)
+ int_data_value = int.from_bytes(data_value, native_byteorder)
+ int_mask_value = int.from_bytes(mask_value * (datalen // 4) + mask_value[: datalen % 4], native_byteorder)
+ return (int_data_value ^ int_mask_value).to_bytes(datalen, native_byteorder)
+
+
+__all__ = [
+ 'ABNF', 'continuous_frame', 'frame_buffer',
+ 'STATUS_NORMAL',
+ 'STATUS_GOING_AWAY',
+ 'STATUS_PROTOCOL_ERROR',
+ 'STATUS_UNSUPPORTED_DATA_TYPE',
+ 'STATUS_STATUS_NOT_AVAILABLE',
+ 'STATUS_ABNORMAL_CLOSED',
+ 'STATUS_INVALID_PAYLOAD',
+ 'STATUS_POLICY_VIOLATION',
+ 'STATUS_MESSAGE_TOO_BIG',
+ 'STATUS_INVALID_EXTENSION',
+ 'STATUS_UNEXPECTED_CONDITION',
+ 'STATUS_BAD_GATEWAY',
+ 'STATUS_TLS_HANDSHAKE_ERROR',
+]
+
+# closing frame status codes.
+STATUS_NORMAL = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA_TYPE = 1003
+STATUS_STATUS_NOT_AVAILABLE = 1005
+STATUS_ABNORMAL_CLOSED = 1006
+STATUS_INVALID_PAYLOAD = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_INVALID_EXTENSION = 1010
+STATUS_UNEXPECTED_CONDITION = 1011
+STATUS_SERVICE_RESTART = 1012
+STATUS_TRY_AGAIN_LATER = 1013
+STATUS_BAD_GATEWAY = 1014
+STATUS_TLS_HANDSHAKE_ERROR = 1015
+
+VALID_CLOSE_STATUS = (
+ STATUS_NORMAL,
+ STATUS_GOING_AWAY,
+ STATUS_PROTOCOL_ERROR,
+ STATUS_UNSUPPORTED_DATA_TYPE,
+ STATUS_INVALID_PAYLOAD,
+ STATUS_POLICY_VIOLATION,
+ STATUS_MESSAGE_TOO_BIG,
+ STATUS_INVALID_EXTENSION,
+ STATUS_UNEXPECTED_CONDITION,
+ STATUS_SERVICE_RESTART,
+ STATUS_TRY_AGAIN_LATER,
+ STATUS_BAD_GATEWAY,
+)
+
+
+class ABNF:
+ """
+ ABNF frame class.
+ See http://tools.ietf.org/html/rfc5234
+ and http://tools.ietf.org/html/rfc6455#section-5.2
+ """
+
+ # operation code values.
+ OPCODE_CONT = 0x0
+ OPCODE_TEXT = 0x1
+ OPCODE_BINARY = 0x2
+ OPCODE_CLOSE = 0x8
+ OPCODE_PING = 0x9
+ OPCODE_PONG = 0xa
+
+ # available operation code value tuple
+ OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
+ OPCODE_PING, OPCODE_PONG)
+
+ # opcode human readable string
+ OPCODE_MAP = {
+ OPCODE_CONT: "cont",
+ OPCODE_TEXT: "text",
+ OPCODE_BINARY: "binary",
+ OPCODE_CLOSE: "close",
+ OPCODE_PING: "ping",
+ OPCODE_PONG: "pong"
+ }
+
+ # data length threshold.
+ LENGTH_7 = 0x7e
+ LENGTH_16 = 1 << 16
+ LENGTH_63 = 1 << 63
+
+ def __init__(self, fin: int = 0, rsv1: int = 0, rsv2: int = 0, rsv3: int = 0,
+ opcode: int = OPCODE_TEXT, mask: int = 1, data: Union[str, bytes] = "") -> None:
+ """
+ Constructor for ABNF. Please check RFC for arguments.
+ """
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.mask = mask
+ if data is None:
+ data = ""
+ self.data = data
+ self.get_mask_key = os.urandom
+
+ def validate(self, skip_utf8_validation: bool = False) -> None:
+ """
+ Validate the ABNF frame.
+
+ Parameters
+ ----------
+ skip_utf8_validation: skip utf8 validation.
+ """
+ if self.rsv1 or self.rsv2 or self.rsv3:
+ raise WebSocketProtocolException("rsv is not implemented, yet")
+
+ if self.opcode not in ABNF.OPCODES:
+ raise WebSocketProtocolException("Invalid opcode %r", self.opcode)
+
+ if self.opcode == ABNF.OPCODE_PING and not self.fin:
+ raise WebSocketProtocolException("Invalid ping frame.")
+
+ if self.opcode == ABNF.OPCODE_CLOSE:
+ l = len(self.data)
+ if not l:
+ return
+ if l == 1 or l >= 126:
+ raise WebSocketProtocolException("Invalid close frame.")
+ if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]):
+ raise WebSocketProtocolException("Invalid close frame.")
+
+ code = 256 * self.data[0] + self.data[1]
+ if not self._is_valid_close_status(code):
+ raise WebSocketProtocolException("Invalid close opcode %r", code)
+
+ @staticmethod
+ def _is_valid_close_status(code: int) -> bool:
+ return code in VALID_CLOSE_STATUS or (3000 <= code < 5000)
+
+ def __str__(self) -> str:
+ return "fin=" + str(self.fin) \
+ + " opcode=" + str(self.opcode) \
+ + " data=" + str(self.data)
+
+ @staticmethod
+ def create_frame(data: Union[bytes, str], opcode: int, fin: int = 1) -> 'ABNF':
+ """
+ Create frame to send text, binary and other data.
+
+ Parameters
+ ----------
+ data: str
+ data to send. This is string value(byte array).
+ If opcode is OPCODE_TEXT and this value is unicode,
+ data value is converted into unicode string, automatically.
+ opcode: int
+ operation code. please see OPCODE_MAP.
+ fin: int
+ fin flag. if set to 0, create continue fragmentation.
+ """
+ if opcode == ABNF.OPCODE_TEXT and isinstance(data, str):
+ data = data.encode("utf-8")
+ # mask must be set if send data from client
+ return ABNF(fin, 0, 0, 0, opcode, 1, data)
+
+ def format(self) -> bytes:
+ """
+ Format this object to string(byte array) to send data to server.
+ """
+ if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
+ raise ValueError("not 0 or 1")
+ if self.opcode not in ABNF.OPCODES:
+ raise ValueError("Invalid OPCODE")
+ length = len(self.data)
+ if length >= ABNF.LENGTH_63:
+ raise ValueError("data is too long")
+
+ frame_header = chr(self.fin << 7 |
+ self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4 |
+ self.opcode).encode('latin-1')
+ if length < ABNF.LENGTH_7:
+ frame_header += chr(self.mask << 7 | length).encode('latin-1')
+ elif length < ABNF.LENGTH_16:
+ frame_header += chr(self.mask << 7 | 0x7e).encode('latin-1')
+ frame_header += struct.pack("!H", length)
+ else:
+ frame_header += chr(self.mask << 7 | 0x7f).encode('latin-1')
+ frame_header += struct.pack("!Q", length)
+
+ if not self.mask:
+ return frame_header + self.data
+ else:
+ mask_key = self.get_mask_key(4)
+ return frame_header + self._get_masked(mask_key)
+
+ def _get_masked(self, mask_key: Union[str, bytes]) -> bytes:
+ s = ABNF.mask(mask_key, self.data)
+
+ if isinstance(mask_key, str):
+ mask_key = mask_key.encode('utf-8')
+
+ return mask_key + s
+
+ @staticmethod
+ def mask(mask_key: Union[str, bytes], data: Union[str, bytes]) -> bytes:
+ """
+ Mask or unmask data. Just do xor for each byte
+
+ Parameters
+ ----------
+ mask_key: bytes or str
+ 4 byte mask.
+ data: bytes or str
+ data to mask/unmask.
+ """
+ if data is None:
+ data = ""
+
+ if isinstance(mask_key, str):
+ mask_key = mask_key.encode('latin-1')
+
+ if isinstance(data, str):
+ data = data.encode('latin-1')
+
+ return _mask(array.array("B", mask_key), array.array("B", data))
+
+
+class frame_buffer:
+ _HEADER_MASK_INDEX = 5
+ _HEADER_LENGTH_INDEX = 6
+
+ def __init__(self, recv_fn: Callable[[int], int], skip_utf8_validation: bool) -> None:
+ self.recv = recv_fn
+ self.skip_utf8_validation = skip_utf8_validation
+ # Buffers over the packets from the layer beneath until desired amount
+ # bytes of bytes are received.
+ self.recv_buffer = []
+ self.clear()
+ self.lock = Lock()
+
+ def clear(self) -> None:
+ self.header = None
+ self.length = None
+ self.mask = None
+
+ def has_received_header(self) -> bool:
+ return self.header is None
+
+ def recv_header(self) -> None:
+ header = self.recv_strict(2)
+ b1 = header[0]
+ fin = b1 >> 7 & 1
+ rsv1 = b1 >> 6 & 1
+ rsv2 = b1 >> 5 & 1
+ rsv3 = b1 >> 4 & 1
+ opcode = b1 & 0xf
+ b2 = header[1]
+ has_mask = b2 >> 7 & 1
+ length_bits = b2 & 0x7f
+
+ self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits)
+
+ def has_mask(self) -> Union[bool, int]:
+ if not self.header:
+ return False
+ return self.header[frame_buffer._HEADER_MASK_INDEX]
+
+ def has_received_length(self) -> bool:
+ return self.length is None
+
+ def recv_length(self) -> None:
+ bits = self.header[frame_buffer._HEADER_LENGTH_INDEX]
+ length_bits = bits & 0x7f
+ if length_bits == 0x7e:
+ v = self.recv_strict(2)
+ self.length = struct.unpack("!H", v)[0]
+ elif length_bits == 0x7f:
+ v = self.recv_strict(8)
+ self.length = struct.unpack("!Q", v)[0]
+ else:
+ self.length = length_bits
+
+ def has_received_mask(self) -> bool:
+ return self.mask is None
+
+ def recv_mask(self) -> None:
+ self.mask = self.recv_strict(4) if self.has_mask() else ""
+
+ def recv_frame(self) -> ABNF:
+
+ with self.lock:
+ # Header
+ if self.has_received_header():
+ self.recv_header()
+ (fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = self.header
+
+ # Frame length
+ if self.has_received_length():
+ self.recv_length()
+ length = self.length
+
+ # Mask
+ if self.has_received_mask():
+ self.recv_mask()
+ mask = self.mask
+
+ # Payload
+ payload = self.recv_strict(length)
+ if has_mask:
+ payload = ABNF.mask(mask, payload)
+
+ # Reset for next frame
+ self.clear()
+
+ frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
+ frame.validate(self.skip_utf8_validation)
+
+ return frame
+
+ def recv_strict(self, bufsize: int) -> bytes:
+ shortage = bufsize - sum(map(len, self.recv_buffer))
+ while shortage > 0:
+ # Limit buffer size that we pass to socket.recv() to avoid
+ # fragmenting the heap -- the number of bytes recv() actually
+ # reads is limited by socket buffer and is relatively small,
+ # yet passing large numbers repeatedly causes lots of large
+ # buffers allocated and then shrunk, which results in
+ # fragmentation.
+ bytes_ = self.recv(min(16384, shortage))
+ self.recv_buffer.append(bytes_)
+ shortage -= len(bytes_)
+
+ unified = b"".join(self.recv_buffer)
+
+ if shortage == 0:
+ self.recv_buffer = []
+ return unified
+ else:
+ self.recv_buffer = [unified[bufsize:]]
+ return unified[:bufsize]
+
+
+class continuous_frame:
+
+ def __init__(self, fire_cont_frame: bool, skip_utf8_validation: bool) -> None:
+ self.fire_cont_frame = fire_cont_frame
+ self.skip_utf8_validation = skip_utf8_validation
+ self.cont_data = None
+ self.recving_frames = None
+
+ def validate(self, frame: ABNF) -> None:
+ if not self.recving_frames and frame.opcode == ABNF.OPCODE_CONT:
+ raise WebSocketProtocolException("Illegal frame")
+ if self.recving_frames and \
+ frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
+ raise WebSocketProtocolException("Illegal frame")
+
+ def add(self, frame: ABNF) -> None:
+ if self.cont_data:
+ self.cont_data[1] += frame.data
+ else:
+ if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
+ self.recving_frames = frame.opcode
+ self.cont_data = [frame.opcode, frame.data]
+
+ if frame.fin:
+ self.recving_frames = None
+
+ def is_fire(self, frame: ABNF) -> Union[bool, int]:
+ return frame.fin or self.fire_cont_frame
+
+ def extract(self, frame: ABNF) -> list:
+ data = self.cont_data
+ self.cont_data = None
+ frame.data = data[1]
+ if not self.fire_cont_frame and data[0] == ABNF.OPCODE_TEXT and not self.skip_utf8_validation and not validate_utf8(frame.data):
+ raise WebSocketPayloadException(
+ "cannot decode: " + repr(frame.data))
+
+ return [data[0], frame]
diff --git a/contrib/python/websocket-client/py3/websocket/_app.py b/contrib/python/websocket-client/py3/websocket/_app.py
new file mode 100644
index 0000000000..13f8bd5634
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_app.py
@@ -0,0 +1,558 @@
+import inspect
+import selectors
+import socket
+import sys
+import threading
+import time
+import traceback
+
+from typing import Any, Callable, Optional, Union
+
+from . import _logging
+from ._abnf import ABNF
+from ._url import parse_url
+from ._core import WebSocket, getdefaulttimeout
+from ._exceptions import *
+
+"""
+_app.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+__all__ = ["WebSocketApp"]
+
+RECONNECT = 0
+
+
+def setReconnect(reconnectInterval: int) -> None:
+ global RECONNECT
+ RECONNECT = reconnectInterval
+
+
+class DispatcherBase:
+ """
+ DispatcherBase
+ """
+ def __init__(self, app: Any, ping_timeout: float) -> None:
+ self.app = app
+ self.ping_timeout = ping_timeout
+
+ def timeout(self, seconds: int, callback: Callable) -> None:
+ time.sleep(seconds)
+ callback()
+
+ def reconnect(self, seconds: int, reconnector: Callable) -> None:
+ try:
+ _logging.info("reconnect() - retrying in {seconds_count} seconds [{frame_count} frames in stack]".format(
+ seconds_count=seconds, frame_count=len(inspect.stack())))
+ time.sleep(seconds)
+ reconnector(reconnecting=True)
+ except KeyboardInterrupt as e:
+ _logging.info("User exited {err}".format(err=e))
+ raise e
+
+
+class Dispatcher(DispatcherBase):
+ """
+ Dispatcher
+ """
+ def read(self, sock: socket.socket, read_callback: Callable, check_callback: Callable) -> None:
+ sel = selectors.DefaultSelector()
+ sel.register(self.app.sock.sock, selectors.EVENT_READ)
+ try:
+ while self.app.keep_running:
+ r = sel.select(self.ping_timeout)
+ if r:
+ if not read_callback():
+ break
+ check_callback()
+ finally:
+ sel.close()
+
+
+class SSLDispatcher(DispatcherBase):
+ """
+ SSLDispatcher
+ """
+ def read(self, sock: socket.socket, read_callback: Callable, check_callback: Callable) -> None:
+ sock = self.app.sock.sock
+ sel = selectors.DefaultSelector()
+ sel.register(sock, selectors.EVENT_READ)
+ try:
+ while self.app.keep_running:
+ r = self.select(sock, sel)
+ if r:
+ if not read_callback():
+ break
+ check_callback()
+ finally:
+ sel.close()
+
+ def select(self, sock, sel:selectors.DefaultSelector):
+ sock = self.app.sock.sock
+ if sock.pending():
+ return [sock,]
+
+ r = sel.select(self.ping_timeout)
+
+ if len(r) > 0:
+ return r[0][0]
+
+
+class WrappedDispatcher:
+ """
+ WrappedDispatcher
+ """
+ def __init__(self, app, ping_timeout: float, dispatcher: Dispatcher) -> None:
+ self.app = app
+ self.ping_timeout = ping_timeout
+ self.dispatcher = dispatcher
+ dispatcher.signal(2, dispatcher.abort) # keyboard interrupt
+
+ def read(self, sock: socket.socket, read_callback: Callable, check_callback: Callable) -> None:
+ self.dispatcher.read(sock, read_callback)
+ self.ping_timeout and self.timeout(self.ping_timeout, check_callback)
+
+ def timeout(self, seconds: int, callback: Callable) -> None:
+ self.dispatcher.timeout(seconds, callback)
+
+ def reconnect(self, seconds: int, reconnector: Callable) -> None:
+ self.timeout(seconds, reconnector)
+
+
+class WebSocketApp:
+ """
+ Higher level of APIs are provided. The interface is like JavaScript WebSocket object.
+ """
+
+ def __init__(self, url: str, header: Union[list, dict, Callable] = None,
+ on_open: Callable = None, on_message: Callable = None, on_error: Callable = None,
+ on_close: Callable = None, on_ping: Callable = None, on_pong: Callable = None,
+ on_cont_message: Callable = None,
+ keep_running: bool = True, get_mask_key: Callable = None, cookie: str = None,
+ subprotocols: list = None,
+ on_data: Callable = None,
+ socket: socket.socket = None) -> None:
+ """
+ WebSocketApp initialization
+
+ Parameters
+ ----------
+ url: str
+ Websocket url.
+ header: list or dict or Callable
+ Custom header for websocket handshake.
+ If the parameter is a callable object, it is called just before the connection attempt.
+ The returned dict or list is used as custom header value.
+ This could be useful in order to properly setup timestamp dependent headers.
+ on_open: function
+ Callback object which is called at opening websocket.
+ on_open has one argument.
+ The 1st argument is this class object.
+ on_message: function
+ Callback object which is called when received data.
+ on_message has 2 arguments.
+ The 1st argument is this class object.
+ The 2nd argument is utf-8 data received from the server.
+ on_error: function
+ Callback object which is called when we get error.
+ on_error has 2 arguments.
+ The 1st argument is this class object.
+ The 2nd argument is exception object.
+ on_close: function
+ Callback object which is called when connection is closed.
+ on_close has 3 arguments.
+ The 1st argument is this class object.
+ The 2nd argument is close_status_code.
+ The 3rd argument is close_msg.
+ on_cont_message: function
+ Callback object which is called when a continuation
+ frame is received.
+ on_cont_message has 3 arguments.
+ The 1st argument is this class object.
+ The 2nd argument is utf-8 string which we get from the server.
+ The 3rd argument is continue flag. if 0, the data continue
+ to next frame data
+ on_data: function
+ Callback object which is called when a message received.
+ This is called before on_message or on_cont_message,
+ and then on_message or on_cont_message is called.
+ on_data has 4 argument.
+ The 1st argument is this class object.
+ The 2nd argument is utf-8 string which we get from the server.
+ The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
+ The 4th argument is continue flag. If 0, the data continue
+ keep_running: bool
+ This parameter is obsolete and ignored.
+ get_mask_key: function
+ A callable function to get new mask keys, see the
+ WebSocket.set_mask_key's docstring for more information.
+ cookie: str
+ Cookie value.
+ subprotocols: list
+ List of available sub protocols. Default is None.
+ socket: socket
+ Pre-initialized stream socket.
+ """
+ self.url = url
+ self.header = header if header is not None else []
+ self.cookie = cookie
+
+ self.on_open = on_open
+ self.on_message = on_message
+ self.on_data = on_data
+ self.on_error = on_error
+ self.on_close = on_close
+ self.on_ping = on_ping
+ self.on_pong = on_pong
+ self.on_cont_message = on_cont_message
+ self.keep_running = False
+ self.get_mask_key = get_mask_key
+ self.sock = None
+ self.last_ping_tm = 0
+ self.last_pong_tm = 0
+ self.ping_thread = None
+ self.stop_ping = None
+ self.ping_interval = 0
+ self.ping_timeout = None
+ self.ping_payload = ""
+ self.subprotocols = subprotocols
+ self.prepared_socket = socket
+ self.has_errored = False
+ self.has_done_teardown = False
+ self.has_done_teardown_lock = threading.Lock()
+
+ def send(self, data: str, opcode: int = ABNF.OPCODE_TEXT) -> None:
+ """
+ send message
+
+ Parameters
+ ----------
+ data: str
+ Message to send. If you set opcode to OPCODE_TEXT,
+ data must be utf-8 string or unicode.
+ opcode: int
+ Operation code of data. Default is OPCODE_TEXT.
+ """
+
+ if not self.sock or self.sock.send(data, opcode) == 0:
+ raise WebSocketConnectionClosedException(
+ "Connection is already closed.")
+
+ def close(self, **kwargs) -> None:
+ """
+ Close websocket connection.
+ """
+ self.keep_running = False
+ if self.sock:
+ self.sock.close(**kwargs)
+ self.sock = None
+
+ def _start_ping_thread(self) -> None:
+ self.last_ping_tm = self.last_pong_tm = 0
+ self.stop_ping = threading.Event()
+ self.ping_thread = threading.Thread(target=self._send_ping)
+ self.ping_thread.daemon = True
+ self.ping_thread.start()
+
+ def _stop_ping_thread(self) -> None:
+ if self.stop_ping:
+ self.stop_ping.set()
+ if self.ping_thread and self.ping_thread.is_alive():
+ self.ping_thread.join(3)
+ self.last_ping_tm = self.last_pong_tm = 0
+
+ def _send_ping(self) -> None:
+ if self.stop_ping.wait(self.ping_interval) or self.keep_running is False:
+ return
+ while not self.stop_ping.wait(self.ping_interval) and self.keep_running is True:
+ if self.sock:
+ self.last_ping_tm = time.time()
+ try:
+ _logging.debug("Sending ping")
+ self.sock.ping(self.ping_payload)
+ except Exception as e:
+ _logging.debug("Failed to send ping: {err}".format(err=e))
+
+ def run_forever(self, sockopt: tuple = None, sslopt: dict = None,
+ ping_interval: float = 0, ping_timeout: Optional[float] = None,
+ ping_payload: str = "",
+ http_proxy_host: str = None, http_proxy_port: Union[int, str] = None,
+ http_no_proxy: list = None, http_proxy_auth: tuple = None,
+ http_proxy_timeout: float = None,
+ skip_utf8_validation: bool = False,
+ host: str = None, origin: str = None, dispatcher: Dispatcher = None,
+ suppress_origin: bool = False, proxy_type: str = None, reconnect: int = None) -> bool:
+ """
+ Run event loop for WebSocket framework.
+
+ This loop is an infinite loop and is alive while websocket is available.
+
+ Parameters
+ ----------
+ sockopt: tuple
+ Values for socket.setsockopt.
+ sockopt must be tuple
+ and each element is argument of sock.setsockopt.
+ sslopt: dict
+ Optional dict object for ssl socket option.
+ ping_interval: int or float
+ Automatically send "ping" command
+ every specified period (in seconds).
+ If set to 0, no ping is sent periodically.
+ ping_timeout: int or float
+ Timeout (in seconds) if the pong message is not received.
+ ping_payload: str
+ Payload message to send with each ping.
+ http_proxy_host: str
+ HTTP proxy host name.
+ http_proxy_port: int or str
+ HTTP proxy port. If not set, set to 80.
+ http_no_proxy: list
+ Whitelisted host names that don't use the proxy.
+ http_proxy_timeout: int or float
+ HTTP proxy timeout, default is 60 sec as per python-socks.
+ http_proxy_auth: tuple
+ HTTP proxy auth information. tuple of username and password. Default is None.
+ skip_utf8_validation: bool
+ skip utf8 validation.
+ host: str
+ update host header.
+ origin: str
+ update origin header.
+ dispatcher: Dispatcher object
+ customize reading data from socket.
+ suppress_origin: bool
+ suppress outputting origin header.
+ proxy_type: str
+ type of proxy from: http, socks4, socks4a, socks5, socks5h
+ reconnect: int
+ delay interval when reconnecting
+
+ Returns
+ -------
+ teardown: bool
+ False if the `WebSocketApp` is closed or caught KeyboardInterrupt,
+ True if any other exception was raised during a loop.
+ """
+
+ if reconnect is None:
+ reconnect = RECONNECT
+
+ if ping_timeout is not None and ping_timeout <= 0:
+ raise WebSocketException("Ensure ping_timeout > 0")
+ if ping_interval is not None and ping_interval < 0:
+ raise WebSocketException("Ensure ping_interval >= 0")
+ if ping_timeout and ping_interval and ping_interval <= ping_timeout:
+ raise WebSocketException("Ensure ping_interval > ping_timeout")
+ if not sockopt:
+ sockopt = []
+ if not sslopt:
+ sslopt = {}
+ if self.sock:
+ raise WebSocketException("socket is already opened")
+
+ self.ping_interval = ping_interval
+ self.ping_timeout = ping_timeout
+ self.ping_payload = ping_payload
+ self.keep_running = True
+
+ def teardown(close_frame: ABNF = None):
+ """
+ Tears down the connection.
+
+ Parameters
+ ----------
+ close_frame: ABNF frame
+ If close_frame is set, the on_close handler is invoked
+ with the statusCode and reason from the provided frame.
+ """
+
+ # teardown() is called in many code paths to ensure resources are cleaned up and on_close is fired.
+ # To ensure the work is only done once, we use this bool and lock.
+ with self.has_done_teardown_lock:
+ if self.has_done_teardown:
+ return
+ self.has_done_teardown = True
+
+ self._stop_ping_thread()
+ self.keep_running = False
+ if self.sock:
+ self.sock.close()
+ close_status_code, close_reason = self._get_close_args(
+ close_frame if close_frame else None)
+ self.sock = None
+
+ # Finally call the callback AFTER all teardown is complete
+ self._callback(self.on_close, close_status_code, close_reason)
+
+ def setSock(reconnecting: bool = False) -> None:
+ if reconnecting and self.sock:
+ self.sock.shutdown()
+
+ self.sock = WebSocket(
+ self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
+ fire_cont_frame=self.on_cont_message is not None,
+ skip_utf8_validation=skip_utf8_validation,
+ enable_multithread=True)
+
+ self.sock.settimeout(getdefaulttimeout())
+ try:
+
+ header = self.header() if callable(self.header) else self.header
+
+ self.sock.connect(
+ self.url, header=header, cookie=self.cookie,
+ http_proxy_host=http_proxy_host,
+ http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
+ http_proxy_auth=http_proxy_auth, http_proxy_timeout=http_proxy_timeout,
+ subprotocols=self.subprotocols,
+ host=host, origin=origin, suppress_origin=suppress_origin,
+ proxy_type=proxy_type, socket=self.prepared_socket)
+
+ _logging.info("Websocket connected")
+
+ if self.ping_interval:
+ self._start_ping_thread()
+
+ self._callback(self.on_open)
+
+ dispatcher.read(self.sock.sock, read, check)
+ except (WebSocketConnectionClosedException, ConnectionRefusedError, KeyboardInterrupt, SystemExit, Exception) as e:
+ handleDisconnect(e, reconnecting)
+
+ def read() -> bool:
+ if not self.keep_running:
+ return teardown()
+
+ try:
+ op_code, frame = self.sock.recv_data_frame(True)
+ except (WebSocketConnectionClosedException, KeyboardInterrupt) as e:
+ if custom_dispatcher:
+ return handleDisconnect(e)
+ else:
+ raise e
+
+ if op_code == ABNF.OPCODE_CLOSE:
+ return teardown(frame)
+ elif op_code == ABNF.OPCODE_PING:
+ self._callback(self.on_ping, frame.data)
+ elif op_code == ABNF.OPCODE_PONG:
+ self.last_pong_tm = time.time()
+ self._callback(self.on_pong, frame.data)
+ elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
+ self._callback(self.on_data, frame.data,
+ frame.opcode, frame.fin)
+ self._callback(self.on_cont_message,
+ frame.data, frame.fin)
+ else:
+ data = frame.data
+ if op_code == ABNF.OPCODE_TEXT and not skip_utf8_validation:
+ data = data.decode("utf-8")
+ self._callback(self.on_data, data, frame.opcode, True)
+ self._callback(self.on_message, data)
+
+ return True
+
+ def check() -> bool:
+ if (self.ping_timeout):
+ has_timeout_expired = time.time() - self.last_ping_tm > self.ping_timeout
+ has_pong_not_arrived_after_last_ping = self.last_pong_tm - self.last_ping_tm < 0
+ has_pong_arrived_too_late = self.last_pong_tm - self.last_ping_tm > self.ping_timeout
+
+ if (self.last_ping_tm and
+ has_timeout_expired and
+ (has_pong_not_arrived_after_last_ping or has_pong_arrived_too_late)):
+ raise WebSocketTimeoutException("ping/pong timed out")
+ return True
+
+ def handleDisconnect(e: Exception, reconnecting: bool = False) -> bool:
+ self.has_errored = True
+ self._stop_ping_thread()
+ if not reconnecting:
+ self._callback(self.on_error, e)
+
+ if isinstance(e, (KeyboardInterrupt, SystemExit)):
+ teardown()
+ # Propagate further
+ raise
+
+ if reconnect:
+ _logging.info("{err} - reconnect".format(err=e))
+ if custom_dispatcher:
+ _logging.debug("Calling custom dispatcher reconnect [{frame_count} frames in stack]".format(frame_count=len(inspect.stack())))
+ dispatcher.reconnect(reconnect, setSock)
+ else:
+ _logging.error("{err} - goodbye".format(err=e))
+ teardown()
+
+ custom_dispatcher = bool(dispatcher)
+ dispatcher = self.create_dispatcher(ping_timeout, dispatcher, parse_url(self.url)[3])
+
+ try:
+ setSock()
+ if not custom_dispatcher and reconnect:
+ while self.keep_running:
+ _logging.debug("Calling dispatcher reconnect [{frame_count} frames in stack]".format(frame_count=len(inspect.stack())))
+ dispatcher.reconnect(reconnect, setSock)
+ except (KeyboardInterrupt, Exception) as e:
+ _logging.info("tearing down on exception {err}".format(err=e))
+ teardown()
+ finally:
+ if not custom_dispatcher:
+ # Ensure teardown was called before returning from run_forever
+ teardown()
+
+ return self.has_errored
+
+ def create_dispatcher(self, ping_timeout: int, dispatcher: Dispatcher = None, is_ssl: bool = False) -> DispatcherBase:
+ if dispatcher: # If custom dispatcher is set, use WrappedDispatcher
+ return WrappedDispatcher(self, ping_timeout, dispatcher)
+ timeout = ping_timeout or 10
+ if is_ssl:
+ return SSLDispatcher(self, timeout)
+
+ return Dispatcher(self, timeout)
+
+ def _get_close_args(self, close_frame: ABNF) -> list:
+ """
+ _get_close_args extracts the close code and reason from the close body
+ if it exists (RFC6455 says WebSocket Connection Close Code is optional)
+ """
+ # Need to catch the case where close_frame is None
+ # Otherwise the following if statement causes an error
+ if not self.on_close or not close_frame:
+ return [None, None]
+
+ # Extract close frame status code
+ if close_frame.data and len(close_frame.data) >= 2:
+ close_status_code = 256 * close_frame.data[0] + close_frame.data[1]
+ reason = close_frame.data[2:].decode('utf-8')
+ return [close_status_code, reason]
+ else:
+ # Most likely reached this because len(close_frame_data.data) < 2
+ return [None, None]
+
+ def _callback(self, callback, *args) -> None:
+ if callback:
+ try:
+ callback(self, *args)
+
+ except Exception as e:
+ _logging.error("error from callback {callback}: {err}".format(callback=callback, err=e))
+ if self.on_error:
+ self.on_error(self, e)
diff --git a/contrib/python/websocket-client/py3/websocket/_cookiejar.py b/contrib/python/websocket-client/py3/websocket/_cookiejar.py
new file mode 100644
index 0000000000..bf907d6bdb
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_cookiejar.py
@@ -0,0 +1,66 @@
+import http.cookies
+
+from typing import Optional
+
+"""
+_cookiejar.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+class SimpleCookieJar:
+ def __init__(self) -> None:
+ self.jar = dict()
+
+ def add(self, set_cookie: Optional[str]) -> None:
+ if set_cookie:
+ simpleCookie = http.cookies.SimpleCookie(set_cookie)
+
+ for k, v in simpleCookie.items():
+ domain = v.get("domain")
+ if domain:
+ if not domain.startswith("."):
+ domain = "." + domain
+ cookie = self.jar.get(domain) if self.jar.get(domain) else http.cookies.SimpleCookie()
+ cookie.update(simpleCookie)
+ self.jar[domain.lower()] = cookie
+
+ def set(self, set_cookie: str) -> None:
+ if set_cookie:
+ simpleCookie = http.cookies.SimpleCookie(set_cookie)
+
+ for k, v in simpleCookie.items():
+ domain = v.get("domain")
+ if domain:
+ if not domain.startswith("."):
+ domain = "." + domain
+ self.jar[domain.lower()] = simpleCookie
+
+ def get(self, host: str) -> str:
+ if not host:
+ return ""
+
+ cookies = []
+ for domain, simpleCookie in self.jar.items():
+ host = host.lower()
+ if host.endswith(domain) or host == domain[1:]:
+ cookies.append(self.jar.get(domain))
+
+ return "; ".join(filter(
+ None, sorted(
+ ["%s=%s" % (k, v.value) for cookie in filter(None, cookies) for k, v in cookie.items()]
+ )))
diff --git a/contrib/python/websocket-client/py3/websocket/_core.py b/contrib/python/websocket-client/py3/websocket/_core.py
new file mode 100644
index 0000000000..fea2b6d49c
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_core.py
@@ -0,0 +1,611 @@
+import socket
+import struct
+import threading
+import time
+
+from typing import Optional, Union
+
+# websocket modules
+from ._abnf import *
+from ._exceptions import *
+from ._handshake import *
+from ._http import *
+from ._logging import *
+from ._socket import *
+from ._ssl_compat import *
+from ._utils import *
+
+"""
+_core.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+__all__ = ['WebSocket', 'create_connection']
+
+
+class WebSocket:
+ """
+ Low level WebSocket interface.
+
+ This class is based on the WebSocket protocol `draft-hixie-thewebsocketprotocol-76 <http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76>`_
+
+ We can connect to the websocket server and send/receive data.
+ The following example is an echo client.
+
+ >>> import websocket
+ >>> ws = websocket.WebSocket()
+ >>> ws.connect("ws://echo.websocket.events")
+ >>> ws.recv()
+ 'echo.websocket.events sponsored by Lob.com'
+ >>> ws.send("Hello, Server")
+ 19
+ >>> ws.recv()
+ 'Hello, Server'
+ >>> ws.close()
+
+ Parameters
+ ----------
+ get_mask_key: func
+ A callable function to get new mask keys, see the
+ WebSocket.set_mask_key's docstring for more information.
+ sockopt: tuple
+ Values for socket.setsockopt.
+ sockopt must be tuple and each element is argument of sock.setsockopt.
+ sslopt: dict
+ Optional dict object for ssl socket options. See FAQ for details.
+ fire_cont_frame: bool
+ Fire recv event for each cont frame. Default is False.
+ enable_multithread: bool
+ If set to True, lock send method.
+ skip_utf8_validation: bool
+ Skip utf8 validation.
+ """
+
+ def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
+ fire_cont_frame: bool = False, enable_multithread: bool = True,
+ skip_utf8_validation: bool = False, **_):
+ """
+ Initialize WebSocket object.
+
+ Parameters
+ ----------
+ sslopt: dict
+ Optional dict object for ssl socket options. See FAQ for details.
+ """
+ self.sock_opt = sock_opt(sockopt, sslopt)
+ self.handshake_response = None
+ self.sock = None
+
+ self.connected = False
+ self.get_mask_key = get_mask_key
+ # These buffer over the build-up of a single frame.
+ self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
+ self.cont_frame = continuous_frame(
+ fire_cont_frame, skip_utf8_validation)
+
+ if enable_multithread:
+ self.lock = threading.Lock()
+ self.readlock = threading.Lock()
+ else:
+ self.lock = NoLock()
+ self.readlock = NoLock()
+
+ def __iter__(self):
+ """
+ Allow iteration over websocket, implying sequential `recv` executions.
+ """
+ while True:
+ yield self.recv()
+
+ def __next__(self):
+ return self.recv()
+
+ def next(self):
+ return self.__next__()
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def set_mask_key(self, func):
+ """
+ Set function to create mask key. You can customize mask key generator.
+ Mainly, this is for testing purpose.
+
+ Parameters
+ ----------
+ func: func
+ callable object. the func takes 1 argument as integer.
+ The argument means length of mask key.
+ This func must return string(byte array),
+ which length is argument specified.
+ """
+ self.get_mask_key = func
+
+ def gettimeout(self) -> float:
+ """
+ Get the websocket timeout (in seconds) as an int or float
+
+ Returns
+ ----------
+ timeout: int or float
+ returns timeout value (in seconds). This value could be either float/integer.
+ """
+ return self.sock_opt.timeout
+
+ def settimeout(self, timeout: Optional[float]):
+ """
+ Set the timeout to the websocket.
+
+ Parameters
+ ----------
+ timeout: int or float
+ timeout time (in seconds). This value could be either float/integer.
+ """
+ self.sock_opt.timeout = timeout
+ if self.sock:
+ self.sock.settimeout(timeout)
+
+ timeout = property(gettimeout, settimeout)
+
+ def getsubprotocol(self):
+ """
+ Get subprotocol
+ """
+ if self.handshake_response:
+ return self.handshake_response.subprotocol
+ else:
+ return None
+
+ subprotocol = property(getsubprotocol)
+
+ def getstatus(self):
+ """
+ Get handshake status
+ """
+ if self.handshake_response:
+ return self.handshake_response.status
+ else:
+ return None
+
+ status = property(getstatus)
+
+ def getheaders(self):
+ """
+ Get handshake response header
+ """
+ if self.handshake_response:
+ return self.handshake_response.headers
+ else:
+ return None
+
+ def is_ssl(self):
+ try:
+ return isinstance(self.sock, ssl.SSLSocket)
+ except:
+ return False
+
+ headers = property(getheaders)
+
+ def connect(self, url, **options):
+ """
+ Connect to url. url is websocket url scheme.
+ ie. ws://host:port/resource
+ You can customize using 'options'.
+ If you set "header" list object, you can set your own custom header.
+
+ >>> ws = WebSocket()
+ >>> ws.connect("ws://echo.websocket.events",
+ ... header=["User-Agent: MyProgram",
+ ... "x-custom: header"])
+
+ Parameters
+ ----------
+ header: list or dict
+ Custom http header list or dict.
+ cookie: str
+ Cookie value.
+ origin: str
+ Custom origin url.
+ connection: str
+ Custom connection header value.
+ Default value "Upgrade" set in _handshake.py
+ suppress_origin: bool
+ Suppress outputting origin header.
+ host: str
+ Custom host header string.
+ timeout: int or float
+ Socket timeout time. This value is an integer or float.
+ If you set None for this value, it means "use default_timeout value"
+ http_proxy_host: str
+ HTTP proxy host name.
+ http_proxy_port: str or int
+ HTTP proxy port. Default is 80.
+ http_no_proxy: list
+ Whitelisted host names that don't use the proxy.
+ http_proxy_auth: tuple
+ HTTP proxy auth information. Tuple of username and password. Default is None.
+ http_proxy_timeout: int or float
+ HTTP proxy timeout, default is 60 sec as per python-socks.
+ redirect_limit: int
+ Number of redirects to follow.
+ subprotocols: list
+ List of available subprotocols. Default is None.
+ socket: socket
+ Pre-initialized stream socket.
+ """
+ self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout)
+ self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
+ options.pop('socket', None))
+
+ try:
+ self.handshake_response = handshake(self.sock, url, *addrs, **options)
+ for attempt in range(options.pop('redirect_limit', 3)):
+ if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES:
+ url = self.handshake_response.headers['location']
+ self.sock.close()
+ self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
+ options.pop('socket', None))
+ self.handshake_response = handshake(self.sock, url, *addrs, **options)
+ self.connected = True
+ except:
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ raise
+
+ def send(self, payload: Union[bytes, str], opcode: int = ABNF.OPCODE_TEXT) -> int:
+ """
+ Send the data as string.
+
+ Parameters
+ ----------
+ payload: str
+ Payload must be utf-8 string or unicode,
+ If the opcode is OPCODE_TEXT.
+ Otherwise, it must be string(byte array).
+ opcode: int
+ Operation code (opcode) to send.
+ """
+
+ frame = ABNF.create_frame(payload, opcode)
+ return self.send_frame(frame)
+
+ def send_frame(self, frame) -> int:
+ """
+ Send the data frame.
+
+ >>> ws = create_connection("ws://echo.websocket.events")
+ >>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
+ >>> ws.send_frame(frame)
+ >>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
+ >>> ws.send_frame(frame)
+ >>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
+ >>> ws.send_frame(frame)
+
+ Parameters
+ ----------
+ frame: ABNF frame
+ frame data created by ABNF.create_frame
+ """
+ if self.get_mask_key:
+ frame.get_mask_key = self.get_mask_key
+ data = frame.format()
+ length = len(data)
+ if (isEnabledForTrace()):
+ trace("++Sent raw: " + repr(data))
+ trace("++Sent decoded: " + frame.__str__())
+ with self.lock:
+ while data:
+ l = self._send(data)
+ data = data[l:]
+
+ return length
+
+ def send_binary(self, payload: bytes) -> int:
+ """
+ Send a binary message (OPCODE_BINARY).
+
+ Parameters
+ ----------
+ payload: bytes
+ payload of message to send.
+ """
+ return self.send(payload, ABNF.OPCODE_BINARY)
+
+ def ping(self, payload: Union[str, bytes] = ""):
+ """
+ Send ping data.
+
+ Parameters
+ ----------
+ payload: str
+ data payload to send server.
+ """
+ if isinstance(payload, str):
+ payload = payload.encode("utf-8")
+ self.send(payload, ABNF.OPCODE_PING)
+
+ def pong(self, payload: Union[str, bytes] = ""):
+ """
+ Send pong data.
+
+ Parameters
+ ----------
+ payload: str
+ data payload to send server.
+ """
+ if isinstance(payload, str):
+ payload = payload.encode("utf-8")
+ self.send(payload, ABNF.OPCODE_PONG)
+
+ def recv(self) -> Union[str, bytes]:
+ """
+ Receive string data(byte array) from the server.
+
+ Returns
+ ----------
+ data: string (byte array) value.
+ """
+ with self.readlock:
+ opcode, data = self.recv_data()
+ if opcode == ABNF.OPCODE_TEXT:
+ return data.decode("utf-8")
+ elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
+ return data
+ else:
+ return ''
+
+ def recv_data(self, control_frame: bool = False) -> tuple:
+ """
+ Receive data with operation code.
+
+ Parameters
+ ----------
+ control_frame: bool
+ a boolean flag indicating whether to return control frame
+ data, defaults to False
+
+ Returns
+ -------
+ opcode, frame.data: tuple
+ tuple of operation code and string(byte array) value.
+ """
+ opcode, frame = self.recv_data_frame(control_frame)
+ return opcode, frame.data
+
+ def recv_data_frame(self, control_frame: bool = False):
+ """
+ Receive data with operation code.
+
+ If a valid ping message is received, a pong response is sent.
+
+ Parameters
+ ----------
+ control_frame: bool
+ a boolean flag indicating whether to return control frame
+ data, defaults to False
+
+ Returns
+ -------
+ frame.opcode, frame: tuple
+ tuple of operation code and string(byte array) value.
+ """
+ while True:
+ frame = self.recv_frame()
+ if (isEnabledForTrace()):
+ trace("++Rcv raw: " + repr(frame.format()))
+ trace("++Rcv decoded: " + frame.__str__())
+ if not frame:
+ # handle error:
+ # 'NoneType' object has no attribute 'opcode'
+ raise WebSocketProtocolException(
+ "Not a valid frame {frame}".format(frame=frame))
+ elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
+ self.cont_frame.validate(frame)
+ self.cont_frame.add(frame)
+
+ if self.cont_frame.is_fire(frame):
+ return self.cont_frame.extract(frame)
+
+ elif frame.opcode == ABNF.OPCODE_CLOSE:
+ self.send_close()
+ return frame.opcode, frame
+ elif frame.opcode == ABNF.OPCODE_PING:
+ if len(frame.data) < 126:
+ self.pong(frame.data)
+ else:
+ raise WebSocketProtocolException(
+ "Ping message is too long")
+ if control_frame:
+ return frame.opcode, frame
+ elif frame.opcode == ABNF.OPCODE_PONG:
+ if control_frame:
+ return frame.opcode, frame
+
+ def recv_frame(self):
+ """
+ Receive data as frame from server.
+
+ Returns
+ -------
+ self.frame_buffer.recv_frame(): ABNF frame object
+ """
+ return self.frame_buffer.recv_frame()
+
+ def send_close(self, status: int = STATUS_NORMAL, reason: bytes = b""):
+ """
+ Send close data to the server.
+
+ Parameters
+ ----------
+ status: int
+ Status code to send. See STATUS_XXX.
+ reason: str or bytes
+ The reason to close. This must be string or UTF-8 bytes.
+ """
+ if status < 0 or status >= ABNF.LENGTH_16:
+ raise ValueError("code is invalid range")
+ self.connected = False
+ self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
+
+ def close(self, status: int = STATUS_NORMAL, reason: bytes = b"", timeout: float = 3):
+ """
+ Close Websocket object
+
+ Parameters
+ ----------
+ status: int
+ Status code to send. See VALID_CLOSE_STATUS in ABNF.
+ reason: bytes
+ The reason to close in UTF-8.
+ timeout: int or float
+ Timeout until receive a close frame.
+ If None, it will wait forever until receive a close frame.
+ """
+ if self.connected:
+ if status < 0 or status >= ABNF.LENGTH_16:
+ raise ValueError("code is invalid range")
+
+ try:
+ self.connected = False
+ self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
+ sock_timeout = self.sock.gettimeout()
+ self.sock.settimeout(timeout)
+ start_time = time.time()
+ while timeout is None or time.time() - start_time < timeout:
+ try:
+ frame = self.recv_frame()
+ if frame.opcode != ABNF.OPCODE_CLOSE:
+ continue
+ if isEnabledForError():
+ recv_status = struct.unpack("!H", frame.data[0:2])[0]
+ if recv_status >= 3000 and recv_status <= 4999:
+ debug("close status: " + repr(recv_status))
+ elif recv_status != STATUS_NORMAL:
+ error("close status: " + repr(recv_status))
+ break
+ except:
+ break
+ self.sock.settimeout(sock_timeout)
+ self.sock.shutdown(socket.SHUT_RDWR)
+ except:
+ pass
+
+ self.shutdown()
+
+ def abort(self):
+ """
+ Low-level asynchronous abort, wakes up other threads that are waiting in recv_*
+ """
+ if self.connected:
+ self.sock.shutdown(socket.SHUT_RDWR)
+
+ def shutdown(self):
+ """
+ close socket, immediately.
+ """
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ self.connected = False
+
+ def _send(self, data: Union[str, bytes]):
+ return send(self.sock, data)
+
+ def _recv(self, bufsize):
+ try:
+ return recv(self.sock, bufsize)
+ except WebSocketConnectionClosedException:
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ self.connected = False
+ raise
+
+
+def create_connection(url: str, timeout=None, class_=WebSocket, **options):
+ """
+ Connect to url and return websocket object.
+
+ Connect to url and return the WebSocket object.
+ Passing optional timeout parameter will set the timeout on the socket.
+ If no timeout is supplied,
+ the global default timeout setting returned by getdefaulttimeout() is used.
+ You can customize using 'options'.
+ If you set "header" list object, you can set your own custom header.
+
+ >>> conn = create_connection("ws://echo.websocket.events",
+ ... header=["User-Agent: MyProgram",
+ ... "x-custom: header"])
+
+ Parameters
+ ----------
+ class_: class
+ class to instantiate when creating the connection. It has to implement
+ settimeout and connect. It's __init__ should be compatible with
+ WebSocket.__init__, i.e. accept all of it's kwargs.
+ header: list or dict
+ custom http header list or dict.
+ cookie: str
+ Cookie value.
+ origin: str
+ custom origin url.
+ suppress_origin: bool
+ suppress outputting origin header.
+ host: str
+ custom host header string.
+ timeout: int or float
+ socket timeout time. This value could be either float/integer.
+ If set to None, it uses the default_timeout value.
+ http_proxy_host: str
+ HTTP proxy host name.
+ http_proxy_port: str or int
+ HTTP proxy port. If not set, set to 80.
+ http_no_proxy: list
+ Whitelisted host names that don't use the proxy.
+ http_proxy_auth: tuple
+ HTTP proxy auth information. tuple of username and password. Default is None.
+ http_proxy_timeout: int or float
+ HTTP proxy timeout, default is 60 sec as per python-socks.
+ enable_multithread: bool
+ Enable lock for multithread.
+ redirect_limit: int
+ Number of redirects to follow.
+ sockopt: tuple
+ Values for socket.setsockopt.
+ sockopt must be a tuple and each element is an argument of sock.setsockopt.
+ sslopt: dict
+ Optional dict object for ssl socket options. See FAQ for details.
+ subprotocols: list
+ List of available subprotocols. Default is None.
+ skip_utf8_validation: bool
+ Skip utf8 validation.
+ socket: socket
+ Pre-initialized stream socket.
+ """
+ sockopt = options.pop("sockopt", [])
+ sslopt = options.pop("sslopt", {})
+ fire_cont_frame = options.pop("fire_cont_frame", False)
+ enable_multithread = options.pop("enable_multithread", True)
+ skip_utf8_validation = options.pop("skip_utf8_validation", False)
+ websock = class_(sockopt=sockopt, sslopt=sslopt,
+ fire_cont_frame=fire_cont_frame,
+ enable_multithread=enable_multithread,
+ skip_utf8_validation=skip_utf8_validation, **options)
+ websock.settimeout(timeout if timeout is not None else getdefaulttimeout())
+ websock.connect(url, **options)
+ return websock
diff --git a/contrib/python/websocket-client/py3/websocket/_exceptions.py b/contrib/python/websocket-client/py3/websocket/_exceptions.py
new file mode 100644
index 0000000000..48f40a0724
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_exceptions.py
@@ -0,0 +1,80 @@
+"""
+_exceptions.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+class WebSocketException(Exception):
+ """
+ WebSocket exception class.
+ """
+ pass
+
+
+class WebSocketProtocolException(WebSocketException):
+ """
+ If the WebSocket protocol is invalid, this exception will be raised.
+ """
+ pass
+
+
+class WebSocketPayloadException(WebSocketException):
+ """
+ If the WebSocket payload is invalid, this exception will be raised.
+ """
+ pass
+
+
+class WebSocketConnectionClosedException(WebSocketException):
+ """
+ If remote host closed the connection or some network error happened,
+ this exception will be raised.
+ """
+ pass
+
+
+class WebSocketTimeoutException(WebSocketException):
+ """
+ WebSocketTimeoutException will be raised at socket timeout during read/write data.
+ """
+ pass
+
+
+class WebSocketProxyException(WebSocketException):
+ """
+ WebSocketProxyException will be raised when proxy error occurred.
+ """
+ pass
+
+
+class WebSocketBadStatusException(WebSocketException):
+ """
+ WebSocketBadStatusException will be raised when we get bad handshake status code.
+ """
+
+ def __init__(self, message: str, status_code: int, status_message=None, resp_headers=None, resp_body=None):
+ super().__init__(message)
+ self.status_code = status_code
+ self.resp_headers = resp_headers
+ self.resp_body = resp_body
+
+
+class WebSocketAddressException(WebSocketException):
+ """
+ If the websocket address info cannot be found, this exception will be raised.
+ """
+ pass
diff --git a/contrib/python/websocket-client/py3/websocket/_handshake.py b/contrib/python/websocket-client/py3/websocket/_handshake.py
new file mode 100644
index 0000000000..a94d3030c3
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_handshake.py
@@ -0,0 +1,197 @@
+"""
+_handshake.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import hashlib
+import hmac
+import os
+from base64 import encodebytes as base64encode
+from http import client as HTTPStatus
+from ._cookiejar import SimpleCookieJar
+from ._exceptions import *
+from ._http import *
+from ._logging import *
+from ._socket import *
+
+__all__ = ["handshake_response", "handshake", "SUPPORTED_REDIRECT_STATUSES"]
+
+# websocket supported version.
+VERSION = 13
+
+SUPPORTED_REDIRECT_STATUSES = (HTTPStatus.MOVED_PERMANENTLY, HTTPStatus.FOUND, HTTPStatus.SEE_OTHER, HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT)
+SUCCESS_STATUSES = SUPPORTED_REDIRECT_STATUSES + (HTTPStatus.SWITCHING_PROTOCOLS,)
+
+CookieJar = SimpleCookieJar()
+
+
+class handshake_response:
+
+ def __init__(self, status: int, headers: dict, subprotocol):
+ self.status = status
+ self.headers = headers
+ self.subprotocol = subprotocol
+ CookieJar.add(headers.get("set-cookie"))
+
+
+def handshake(sock, url: str, hostname: str, port: int, resource: str, **options):
+ headers, key = _get_handshake_headers(resource, url, hostname, port, options)
+
+ header_str = "\r\n".join(headers)
+ send(sock, header_str)
+ dump("request header", header_str)
+
+ status, resp = _get_resp_headers(sock)
+ if status in SUPPORTED_REDIRECT_STATUSES:
+ return handshake_response(status, resp, None)
+ success, subproto = _validate(resp, key, options.get("subprotocols"))
+ if not success:
+ raise WebSocketException("Invalid WebSocket Header")
+
+ return handshake_response(status, resp, subproto)
+
+
+def _pack_hostname(hostname: str) -> str:
+ # IPv6 address
+ if ':' in hostname:
+ return '[' + hostname + ']'
+
+ return hostname
+
+
+def _get_handshake_headers(resource: str, url: str, host: str, port: int, options: dict):
+ headers = [
+ "GET {resource} HTTP/1.1".format(resource=resource),
+ "Upgrade: websocket"
+ ]
+ if port == 80 or port == 443:
+ hostport = _pack_hostname(host)
+ else:
+ hostport = "{h}:{p}".format(h=_pack_hostname(host), p=port)
+ if options.get("host"):
+ headers.append("Host: {h}".format(h=options["host"]))
+ else:
+ headers.append("Host: {hp}".format(hp=hostport))
+
+ # scheme indicates whether http or https is used in Origin
+ # The same approach is used in parse_url of _url.py to set default port
+ scheme, url = url.split(":", 1)
+ if not options.get("suppress_origin"):
+ if "origin" in options and options["origin"] is not None:
+ headers.append("Origin: {origin}".format(origin=options["origin"]))
+ elif scheme == "wss":
+ headers.append("Origin: https://{hp}".format(hp=hostport))
+ else:
+ headers.append("Origin: http://{hp}".format(hp=hostport))
+
+ key = _create_sec_websocket_key()
+
+ # Append Sec-WebSocket-Key & Sec-WebSocket-Version if not manually specified
+ if not options.get('header') or 'Sec-WebSocket-Key' not in options['header']:
+ headers.append("Sec-WebSocket-Key: {key}".format(key=key))
+ else:
+ key = options['header']['Sec-WebSocket-Key']
+
+ if not options.get('header') or 'Sec-WebSocket-Version' not in options['header']:
+ headers.append("Sec-WebSocket-Version: {version}".format(version=VERSION))
+
+ if not options.get('connection'):
+ headers.append('Connection: Upgrade')
+ else:
+ headers.append(options['connection'])
+
+ subprotocols = options.get("subprotocols")
+ if subprotocols:
+ headers.append("Sec-WebSocket-Protocol: {protocols}".format(protocols=",".join(subprotocols)))
+
+ header = options.get("header")
+ if header:
+ if isinstance(header, dict):
+ header = [
+ ": ".join([k, v])
+ for k, v in header.items()
+ if v is not None
+ ]
+ headers.extend(header)
+
+ server_cookie = CookieJar.get(host)
+ client_cookie = options.get("cookie", None)
+
+ cookie = "; ".join(filter(None, [server_cookie, client_cookie]))
+
+ if cookie:
+ headers.append("Cookie: {cookie}".format(cookie=cookie))
+
+ headers.extend(("", ""))
+ return headers, key
+
+
+def _get_resp_headers(sock, success_statuses: tuple = SUCCESS_STATUSES) -> tuple:
+ status, resp_headers, status_message = read_headers(sock)
+ if status not in success_statuses:
+ content_len = resp_headers.get('content-length')
+ if content_len:
+ response_body = sock.recv(int(content_len)) # read the body of the HTTP error message response and include it in the exception
+ else:
+ response_body = None
+ raise WebSocketBadStatusException("Handshake status {status} {message} -+-+- {headers} -+-+- {body}".format(status=status, message=status_message, headers=resp_headers, body=response_body), status, status_message, resp_headers, response_body)
+ return status, resp_headers
+
+
+_HEADERS_TO_CHECK = {
+ "upgrade": "websocket",
+ "connection": "upgrade",
+}
+
+
+def _validate(headers, key: str, subprotocols):
+ subproto = None
+ for k, v in _HEADERS_TO_CHECK.items():
+ r = headers.get(k, None)
+ if not r:
+ return False, None
+ r = [x.strip().lower() for x in r.split(',')]
+ if v not in r:
+ return False, None
+
+ if subprotocols:
+ subproto = headers.get("sec-websocket-protocol", None)
+ if not subproto or subproto.lower() not in [s.lower() for s in subprotocols]:
+ error("Invalid subprotocol: " + str(subprotocols))
+ return False, None
+ subproto = subproto.lower()
+
+ result = headers.get("sec-websocket-accept", None)
+ if not result:
+ return False, None
+ result = result.lower()
+
+ if isinstance(result, str):
+ result = result.encode('utf-8')
+
+ value = (key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11").encode('utf-8')
+ hashed = base64encode(hashlib.sha1(value).digest()).strip().lower()
+ success = hmac.compare_digest(hashed, result)
+
+ if success:
+ return True, subproto
+ else:
+ return False, None
+
+
+def _create_sec_websocket_key() -> str:
+ randomness = os.urandom(16)
+ return base64encode(randomness).decode('utf-8').strip()
diff --git a/contrib/python/websocket-client/py3/websocket/_http.py b/contrib/python/websocket-client/py3/websocket/_http.py
new file mode 100644
index 0000000000..13183b2034
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_http.py
@@ -0,0 +1,340 @@
+"""
+_http.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import errno
+import os
+import socket
+
+from ._exceptions import *
+from ._logging import *
+from ._socket import *
+from ._ssl_compat import *
+from ._url import *
+
+from base64 import encodebytes as base64encode
+
+__all__ = ["proxy_info", "connect", "read_headers"]
+
+try:
+ from python_socks.sync import Proxy
+ from python_socks._errors import *
+ from python_socks._types import ProxyType
+ HAVE_PYTHON_SOCKS = True
+except:
+ HAVE_PYTHON_SOCKS = False
+
+ class ProxyError(Exception):
+ pass
+
+ class ProxyTimeoutError(Exception):
+ pass
+
+ class ProxyConnectionError(Exception):
+ pass
+
+
+class proxy_info:
+
+ def __init__(self, **options):
+ self.proxy_host = options.get("http_proxy_host", None)
+ if self.proxy_host:
+ self.proxy_port = options.get("http_proxy_port", 0)
+ self.auth = options.get("http_proxy_auth", None)
+ self.no_proxy = options.get("http_no_proxy", None)
+ self.proxy_protocol = options.get("proxy_type", "http")
+ # Note: If timeout not specified, default python-socks timeout is 60 seconds
+ self.proxy_timeout = options.get("http_proxy_timeout", None)
+ if self.proxy_protocol not in ['http', 'socks4', 'socks4a', 'socks5', 'socks5h']:
+ raise ProxyError("Only http, socks4, socks5 proxy protocols are supported")
+ else:
+ self.proxy_port = 0
+ self.auth = None
+ self.no_proxy = None
+ self.proxy_protocol = "http"
+
+
+def _start_proxied_socket(url: str, options, proxy):
+ if not HAVE_PYTHON_SOCKS:
+ raise WebSocketException("Python Socks is needed for SOCKS proxying but is not available")
+
+ hostname, port, resource, is_secure = parse_url(url)
+
+ if proxy.proxy_protocol == "socks5":
+ rdns = False
+ proxy_type = ProxyType.SOCKS5
+ if proxy.proxy_protocol == "socks4":
+ rdns = False
+ proxy_type = ProxyType.SOCKS4
+ # socks5h and socks4a send DNS through proxy
+ if proxy.proxy_protocol == "socks5h":
+ rdns = True
+ proxy_type = ProxyType.SOCKS5
+ if proxy.proxy_protocol == "socks4a":
+ rdns = True
+ proxy_type = ProxyType.SOCKS4
+
+ ws_proxy = Proxy.create(
+ proxy_type=proxy_type,
+ host=proxy.proxy_host,
+ port=int(proxy.proxy_port),
+ username=proxy.auth[0] if proxy.auth else None,
+ password=proxy.auth[1] if proxy.auth else None,
+ rdns=rdns)
+
+ sock = ws_proxy.connect(hostname, port, timeout=proxy.proxy_timeout)
+
+ if is_secure and HAVE_SSL:
+ sock = _ssl_socket(sock, options.sslopt, hostname)
+ elif is_secure:
+ raise WebSocketException("SSL not available.")
+
+ return sock, (hostname, port, resource)
+
+
+def connect(url: str, options, proxy, socket):
+ # Use _start_proxied_socket() only for socks4 or socks5 proxy
+ # Use _tunnel() for http proxy
+ # TODO: Use python-socks for http protocol also, to standardize flow
+ if proxy.proxy_host and not socket and not (proxy.proxy_protocol == "http"):
+ return _start_proxied_socket(url, options, proxy)
+
+ hostname, port_from_url, resource, is_secure = parse_url(url)
+
+ if socket:
+ return socket, (hostname, port_from_url, resource)
+
+ addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
+ hostname, port_from_url, is_secure, proxy)
+ if not addrinfo_list:
+ raise WebSocketException(
+ "Host not found.: " + hostname + ":" + str(port_from_url))
+
+ sock = None
+ try:
+ sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
+ if need_tunnel:
+ sock = _tunnel(sock, hostname, port_from_url, auth)
+
+ if is_secure:
+ if HAVE_SSL:
+ sock = _ssl_socket(sock, options.sslopt, hostname)
+ else:
+ raise WebSocketException("SSL not available.")
+
+ return sock, (hostname, port_from_url, resource)
+ except:
+ if sock:
+ sock.close()
+ raise
+
+
+def _get_addrinfo_list(hostname, port, is_secure, proxy):
+ phost, pport, pauth = get_proxy_info(
+ hostname, is_secure, proxy.proxy_host, proxy.proxy_port, proxy.auth, proxy.no_proxy)
+ try:
+ # when running on windows 10, getaddrinfo without socktype returns a socktype 0.
+ # This generates an error exception: `_on_error: exception Socket type must be stream or datagram, not 0`
+ # or `OSError: [Errno 22] Invalid argument` when creating socket. Force the socket type to SOCK_STREAM.
+ if not phost:
+ addrinfo_list = socket.getaddrinfo(
+ hostname, port, 0, socket.SOCK_STREAM, socket.SOL_TCP)
+ return addrinfo_list, False, None
+ else:
+ pport = pport and pport or 80
+ # when running on windows 10, the getaddrinfo used above
+ # returns a socktype 0. This generates an error exception:
+ # _on_error: exception Socket type must be stream or datagram, not 0
+ # Force the socket type to SOCK_STREAM
+ addrinfo_list = socket.getaddrinfo(phost, pport, 0, socket.SOCK_STREAM, socket.SOL_TCP)
+ return addrinfo_list, True, pauth
+ except socket.gaierror as e:
+ raise WebSocketAddressException(e)
+
+
+def _open_socket(addrinfo_list, sockopt, timeout):
+ err = None
+ for addrinfo in addrinfo_list:
+ family, socktype, proto = addrinfo[:3]
+ sock = socket.socket(family, socktype, proto)
+ sock.settimeout(timeout)
+ for opts in DEFAULT_SOCKET_OPTION:
+ sock.setsockopt(*opts)
+ for opts in sockopt:
+ sock.setsockopt(*opts)
+
+ address = addrinfo[4]
+ err = None
+ while not err:
+ try:
+ sock.connect(address)
+ except socket.error as error:
+ sock.close()
+ error.remote_ip = str(address[0])
+ try:
+ eConnRefused = (errno.ECONNREFUSED, errno.WSAECONNREFUSED, errno.ENETUNREACH)
+ except AttributeError:
+ eConnRefused = (errno.ECONNREFUSED, errno.ENETUNREACH)
+ if error.errno in eConnRefused:
+ err = error
+ continue
+ else:
+ raise error
+ else:
+ break
+ else:
+ continue
+ break
+ else:
+ if err:
+ raise err
+
+ return sock
+
+
+def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
+ context = sslopt.get('context', None)
+ if not context:
+ context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_TLS_CLIENT))
+ # Non default context need to manually enable SSLKEYLOGFILE support by setting the keylog_filename attribute.
+ # For more details see also:
+ # * https://docs.python.org/3.8/library/ssl.html?highlight=sslkeylogfile#context-creation
+ # * https://docs.python.org/3.8/library/ssl.html?highlight=sslkeylogfile#ssl.SSLContext.keylog_filename
+ context.keylog_filename = os.environ.get("SSLKEYLOGFILE", None)
+
+ if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
+ cafile = sslopt.get('ca_certs', None)
+ capath = sslopt.get('ca_cert_path', None)
+ if cafile or capath:
+ context.load_verify_locations(cafile=cafile, capath=capath)
+ elif hasattr(context, 'load_default_certs'):
+ context.load_default_certs(ssl.Purpose.SERVER_AUTH)
+ if sslopt.get('certfile', None):
+ context.load_cert_chain(
+ sslopt['certfile'],
+ sslopt.get('keyfile', None),
+ sslopt.get('password', None),
+ )
+
+ # Python 3.10 switch to PROTOCOL_TLS_CLIENT defaults to "cert_reqs = ssl.CERT_REQUIRED" and "check_hostname = True"
+ # If both disabled, set check_hostname before verify_mode
+ # see https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
+ if sslopt.get('cert_reqs', ssl.CERT_NONE) == ssl.CERT_NONE and not sslopt.get('check_hostname', False):
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ else:
+ context.check_hostname = sslopt.get('check_hostname', True)
+ context.verify_mode = sslopt.get('cert_reqs', ssl.CERT_REQUIRED)
+
+ if 'ciphers' in sslopt:
+ context.set_ciphers(sslopt['ciphers'])
+ if 'cert_chain' in sslopt:
+ certfile, keyfile, password = sslopt['cert_chain']
+ context.load_cert_chain(certfile, keyfile, password)
+ if 'ecdh_curve' in sslopt:
+ context.set_ecdh_curve(sslopt['ecdh_curve'])
+
+ return context.wrap_socket(
+ sock,
+ do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
+ suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
+ server_hostname=hostname,
+ )
+
+
+def _ssl_socket(sock, user_sslopt, hostname):
+ sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
+ sslopt.update(user_sslopt)
+
+ certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
+ if certPath and os.path.isfile(certPath) \
+ and user_sslopt.get('ca_certs', None) is None:
+ sslopt['ca_certs'] = certPath
+ elif certPath and os.path.isdir(certPath) \
+ and user_sslopt.get('ca_cert_path', None) is None:
+ sslopt['ca_cert_path'] = certPath
+
+ if sslopt.get('server_hostname', None):
+ hostname = sslopt['server_hostname']
+
+ check_hostname = sslopt.get('check_hostname', True)
+ sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
+
+ return sock
+
+
+def _tunnel(sock, host, port, auth):
+ debug("Connecting proxy...")
+ connect_header = "CONNECT {h}:{p} HTTP/1.1\r\n".format(h=host, p=port)
+ connect_header += "Host: {h}:{p}\r\n".format(h=host, p=port)
+
+ # TODO: support digest auth.
+ if auth and auth[0]:
+ auth_str = auth[0]
+ if auth[1]:
+ auth_str += ":" + auth[1]
+ encoded_str = base64encode(auth_str.encode()).strip().decode().replace('\n', '')
+ connect_header += "Proxy-Authorization: Basic {str}\r\n".format(str=encoded_str)
+ connect_header += "\r\n"
+ dump("request header", connect_header)
+
+ send(sock, connect_header)
+
+ try:
+ status, resp_headers, status_message = read_headers(sock)
+ except Exception as e:
+ raise WebSocketProxyException(str(e))
+
+ if status != 200:
+ raise WebSocketProxyException(
+ "failed CONNECT via proxy status: {status}".format(status=status))
+
+ return sock
+
+
+def read_headers(sock):
+ status = None
+ status_message = None
+ headers = {}
+ trace("--- response header ---")
+
+ while True:
+ line = recv_line(sock)
+ line = line.decode('utf-8').strip()
+ if not line:
+ break
+ trace(line)
+ if not status:
+
+ status_info = line.split(" ", 2)
+ status = int(status_info[1])
+ if len(status_info) > 2:
+ status_message = status_info[2]
+ else:
+ kv = line.split(":", 1)
+ if len(kv) == 2:
+ key, value = kv
+ if key.lower() == "set-cookie" and headers.get("set-cookie"):
+ headers["set-cookie"] = headers.get("set-cookie") + "; " + value.strip()
+ else:
+ headers[key.lower()] = value.strip()
+ else:
+ raise WebSocketException("Invalid header")
+
+ trace("-----------------------")
+
+ return status, headers, status_message
diff --git a/contrib/python/websocket-client/py3/websocket/_logging.py b/contrib/python/websocket-client/py3/websocket/_logging.py
new file mode 100644
index 0000000000..806de4d41f
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_logging.py
@@ -0,0 +1,93 @@
+import logging
+
+"""
+_logging.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+_logger = logging.getLogger('websocket')
+try:
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record) -> None:
+ pass
+
+_logger.addHandler(NullHandler())
+
+_traceEnabled = False
+
+__all__ = ["enableTrace", "dump", "error", "warning", "debug", "trace",
+ "isEnabledForError", "isEnabledForDebug", "isEnabledForTrace"]
+
+
+def enableTrace(traceable: bool,
+ handler: logging.StreamHandler = logging.StreamHandler(),
+ level: str = "DEBUG") -> None:
+ """
+ Turn on/off the traceability.
+
+ Parameters
+ ----------
+ traceable: bool
+ If set to True, traceability is enabled.
+ """
+ global _traceEnabled
+ _traceEnabled = traceable
+ if traceable:
+ _logger.addHandler(handler)
+ _logger.setLevel(getattr(logging, level))
+
+
+def dump(title: str, message: str) -> None:
+ if _traceEnabled:
+ _logger.debug("--- " + title + " ---")
+ _logger.debug(message)
+ _logger.debug("-----------------------")
+
+
+def error(msg: str) -> None:
+ _logger.error(msg)
+
+
+def warning(msg: str) -> None:
+ _logger.warning(msg)
+
+
+def debug(msg: str) -> None:
+ _logger.debug(msg)
+
+
+def info(msg: str) -> None:
+ _logger.info(msg)
+
+
+def trace(msg: str) -> None:
+ if _traceEnabled:
+ _logger.debug(msg)
+
+
+def isEnabledForError() -> bool:
+ return _logger.isEnabledFor(logging.ERROR)
+
+
+def isEnabledForDebug() -> bool:
+ return _logger.isEnabledFor(logging.DEBUG)
+
+
+def isEnabledForTrace() -> bool:
+ return _traceEnabled
diff --git a/contrib/python/websocket-client/py3/websocket/_socket.py b/contrib/python/websocket-client/py3/websocket/_socket.py
new file mode 100644
index 0000000000..1575a0c0c3
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_socket.py
@@ -0,0 +1,181 @@
+import errno
+import selectors
+import socket
+
+from typing import Union
+
+from ._exceptions import *
+from ._ssl_compat import *
+from ._utils import *
+
+"""
+_socket.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+DEFAULT_SOCKET_OPTION = [(socket.SOL_TCP, socket.TCP_NODELAY, 1)]
+if hasattr(socket, "SO_KEEPALIVE"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
+if hasattr(socket, "TCP_KEEPIDLE"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPIDLE, 30))
+if hasattr(socket, "TCP_KEEPINTVL"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPINTVL, 10))
+if hasattr(socket, "TCP_KEEPCNT"):
+ DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPCNT, 3))
+
+_default_timeout = None
+
+__all__ = ["DEFAULT_SOCKET_OPTION", "sock_opt", "setdefaulttimeout", "getdefaulttimeout",
+ "recv", "recv_line", "send"]
+
+
+class sock_opt:
+
+ def __init__(self, sockopt: list, sslopt: dict) -> None:
+ if sockopt is None:
+ sockopt = []
+ if sslopt is None:
+ sslopt = {}
+ self.sockopt = sockopt
+ self.sslopt = sslopt
+ self.timeout = None
+
+
+def setdefaulttimeout(timeout: Union[int, float, None]) -> None:
+ """
+ Set the global timeout setting to connect.
+
+ Parameters
+ ----------
+ timeout: int or float
+ default socket timeout time (in seconds)
+ """
+ global _default_timeout
+ _default_timeout = timeout
+
+
+def getdefaulttimeout() -> Union[int, float, None]:
+ """
+ Get default timeout
+
+ Returns
+ ----------
+ _default_timeout: int or float
+ Return the global timeout setting (in seconds) to connect.
+ """
+ return _default_timeout
+
+
+def recv(sock: socket.socket, bufsize: int) -> bytes:
+ if not sock:
+ raise WebSocketConnectionClosedException("socket is already closed.")
+
+ def _recv():
+ try:
+ return sock.recv(bufsize)
+ except SSLWantReadError:
+ pass
+ except socket.error as exc:
+ error_code = extract_error_code(exc)
+ if error_code != errno.EAGAIN and error_code != errno.EWOULDBLOCK:
+ raise
+
+ sel = selectors.DefaultSelector()
+ sel.register(sock, selectors.EVENT_READ)
+
+ r = sel.select(sock.gettimeout())
+ sel.close()
+
+ if r:
+ return sock.recv(bufsize)
+
+ try:
+ if sock.gettimeout() == 0:
+ bytes_ = sock.recv(bufsize)
+ else:
+ bytes_ = _recv()
+ except TimeoutError:
+ raise WebSocketTimeoutException("Connection timed out")
+ except socket.timeout as e:
+ message = extract_err_message(e)
+ raise WebSocketTimeoutException(message)
+ except SSLError as e:
+ message = extract_err_message(e)
+ if isinstance(message, str) and 'timed out' in message:
+ raise WebSocketTimeoutException(message)
+ else:
+ raise
+
+ if not bytes_:
+ raise WebSocketConnectionClosedException(
+ "Connection to remote host was lost.")
+
+ return bytes_
+
+
+def recv_line(sock: socket.socket) -> bytes:
+ line = []
+ while True:
+ c = recv(sock, 1)
+ line.append(c)
+ if c == b'\n':
+ break
+ return b''.join(line)
+
+
+def send(sock: socket.socket, data: Union[bytes, str]) -> int:
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+
+ if not sock:
+ raise WebSocketConnectionClosedException("socket is already closed.")
+
+ def _send():
+ try:
+ return sock.send(data)
+ except SSLWantWriteError:
+ pass
+ except socket.error as exc:
+ error_code = extract_error_code(exc)
+ if error_code is None:
+ raise
+ if error_code != errno.EAGAIN and error_code != errno.EWOULDBLOCK:
+ raise
+
+ sel = selectors.DefaultSelector()
+ sel.register(sock, selectors.EVENT_WRITE)
+
+ w = sel.select(sock.gettimeout())
+ sel.close()
+
+ if w:
+ return sock.send(data)
+
+ try:
+ if sock.gettimeout() == 0:
+ return sock.send(data)
+ else:
+ return _send()
+ except socket.timeout as e:
+ message = extract_err_message(e)
+ raise WebSocketTimeoutException(message)
+ except Exception as e:
+ message = extract_err_message(e)
+ if isinstance(message, str) and "timed out" in message:
+ raise WebSocketTimeoutException(message)
+ else:
+ raise
diff --git a/contrib/python/websocket-client/py3/websocket/_ssl_compat.py b/contrib/python/websocket-client/py3/websocket/_ssl_compat.py
new file mode 100644
index 0000000000..b2eba3877b
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_ssl_compat.py
@@ -0,0 +1,39 @@
+"""
+_ssl_compat.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+__all__ = ["HAVE_SSL", "ssl", "SSLError", "SSLWantReadError", "SSLWantWriteError"]
+
+try:
+ import ssl
+ from ssl import SSLError
+ from ssl import SSLWantReadError
+ from ssl import SSLWantWriteError
+ HAVE_SSL = True
+except ImportError:
+ # dummy class of SSLError for environment without ssl support
+ class SSLError(Exception):
+ pass
+
+ class SSLWantReadError(Exception):
+ pass
+
+ class SSLWantWriteError(Exception):
+ pass
+
+ ssl = None
+ HAVE_SSL = False
diff --git a/contrib/python/websocket-client/py3/websocket/_url.py b/contrib/python/websocket-client/py3/websocket/_url.py
new file mode 100644
index 0000000000..a330615485
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_url.py
@@ -0,0 +1,169 @@
+import os
+import socket
+import struct
+
+from typing import Optional
+from urllib.parse import unquote, urlparse
+
+"""
+_url.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+__all__ = ["parse_url", "get_proxy_info"]
+
+
+def parse_url(url: str) -> tuple:
+ """
+ parse url and the result is tuple of
+ (hostname, port, resource path and the flag of secure mode)
+
+ Parameters
+ ----------
+ url: str
+ url string.
+ """
+ if ":" not in url:
+ raise ValueError("url is invalid")
+
+ scheme, url = url.split(":", 1)
+
+ parsed = urlparse(url, scheme="http")
+ if parsed.hostname:
+ hostname = parsed.hostname
+ else:
+ raise ValueError("hostname is invalid")
+ port = 0
+ if parsed.port:
+ port = parsed.port
+
+ is_secure = False
+ if scheme == "ws":
+ if not port:
+ port = 80
+ elif scheme == "wss":
+ is_secure = True
+ if not port:
+ port = 443
+ else:
+ raise ValueError("scheme %s is invalid" % scheme)
+
+ if parsed.path:
+ resource = parsed.path
+ else:
+ resource = "/"
+
+ if parsed.query:
+ resource += "?" + parsed.query
+
+ return hostname, port, resource, is_secure
+
+
+DEFAULT_NO_PROXY_HOST = ["localhost", "127.0.0.1"]
+
+
+def _is_ip_address(addr: str) -> bool:
+ try:
+ socket.inet_aton(addr)
+ except socket.error:
+ return False
+ else:
+ return True
+
+
+def _is_subnet_address(hostname: str) -> bool:
+ try:
+ addr, netmask = hostname.split("/")
+ return _is_ip_address(addr) and 0 <= int(netmask) < 32
+ except ValueError:
+ return False
+
+
+def _is_address_in_network(ip: str, net: str) -> bool:
+ ipaddr = struct.unpack('!I', socket.inet_aton(ip))[0]
+ netaddr, netmask = net.split('/')
+ netaddr = struct.unpack('!I', socket.inet_aton(netaddr))[0]
+
+ netmask = (0xFFFFFFFF << (32 - int(netmask))) & 0xFFFFFFFF
+ return ipaddr & netmask == netaddr
+
+
+def _is_no_proxy_host(hostname: str, no_proxy: Optional[list]) -> bool:
+ if not no_proxy:
+ v = os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")).replace(" ", "")
+ if v:
+ no_proxy = v.split(",")
+ if not no_proxy:
+ no_proxy = DEFAULT_NO_PROXY_HOST
+
+ if '*' in no_proxy:
+ return True
+ if hostname in no_proxy:
+ return True
+ if _is_ip_address(hostname):
+ return any([_is_address_in_network(hostname, subnet) for subnet in no_proxy if _is_subnet_address(subnet)])
+ for domain in [domain for domain in no_proxy if domain.startswith('.')]:
+ if hostname.endswith(domain):
+ return True
+ return False
+
+
+def get_proxy_info(
+ hostname: str, is_secure: bool, proxy_host: Optional[str] = None, proxy_port: int = 0, proxy_auth: Optional[tuple] = None,
+ no_proxy: Optional[list] = None, proxy_type: str = 'http') -> tuple:
+ """
+ Try to retrieve proxy host and port from environment
+ if not provided in options.
+ Result is (proxy_host, proxy_port, proxy_auth).
+ proxy_auth is tuple of username and password
+ of proxy authentication information.
+
+ Parameters
+ ----------
+ hostname: str
+ Websocket server name.
+ is_secure: bool
+ Is the connection secure? (wss) looks for "https_proxy" in env
+ instead of "http_proxy"
+ proxy_host: str
+ http proxy host name.
+ proxy_port: str or int
+ http proxy port.
+ no_proxy: list
+ Whitelisted host names that don't use the proxy.
+ proxy_auth: tuple
+ HTTP proxy auth information. Tuple of username and password. Default is None.
+ proxy_type: str
+ Specify the proxy protocol (http, socks4, socks4a, socks5, socks5h). Default is "http".
+ Use socks4a or socks5h if you want to send DNS requests through the proxy.
+ """
+ if _is_no_proxy_host(hostname, no_proxy):
+ return None, 0, None
+
+ if proxy_host:
+ port = proxy_port
+ auth = proxy_auth
+ return proxy_host, port, auth
+
+ env_key = "https_proxy" if is_secure else "http_proxy"
+ value = os.environ.get(env_key, os.environ.get(env_key.upper(), "")).replace(" ", "")
+ if value:
+ proxy = urlparse(value)
+ auth = (unquote(proxy.username), unquote(proxy.password)) if proxy.username else None
+ return proxy.hostname, proxy.port, auth
+
+ return None, 0, None
diff --git a/contrib/python/websocket-client/py3/websocket/_utils.py b/contrib/python/websocket-client/py3/websocket/_utils.py
new file mode 100644
index 0000000000..62ba0b01b8
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_utils.py
@@ -0,0 +1,106 @@
+from typing import Union
+
+"""
+_url.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+__all__ = ["NoLock", "validate_utf8", "extract_err_message", "extract_error_code"]
+
+
+class NoLock:
+
+ def __enter__(self) -> None:
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
+ pass
+
+
+try:
+ # If wsaccel is available we use compiled routines to validate UTF-8
+ # strings.
+ from wsaccel.utf8validator import Utf8Validator
+
+ def _validate_utf8(utfbytes: bytes) -> bool:
+ return Utf8Validator().validate(utfbytes)[0]
+
+except ImportError:
+ # UTF-8 validator
+ # python implementation of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+
+ _UTF8_ACCEPT = 0
+ _UTF8_REJECT = 12
+
+ _UTF8D = [
+ # The first part of the table maps bytes to character classes that
+ # to reduce the size of the transition table and create bitmasks.
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
+
+ # The second part is a transition table that maps a combination
+ # of a state of the automaton and a character class to a state.
+ 0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12,
+ 12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12,
+ 12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12,
+ 12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12,
+ 12,36,12,12,12,12,12,12,12,12,12,12, ]
+
+ def _decode(state: int, codep: int, ch: int) -> tuple:
+ tp = _UTF8D[ch]
+
+ codep = (ch & 0x3f) | (codep << 6) if (
+ state != _UTF8_ACCEPT) else (0xff >> tp) & ch
+ state = _UTF8D[256 + state + tp]
+
+ return state, codep
+
+ def _validate_utf8(utfbytes: Union[str, bytes]) -> bool:
+ state = _UTF8_ACCEPT
+ codep = 0
+ for i in utfbytes:
+ state, codep = _decode(state, codep, i)
+ if state == _UTF8_REJECT:
+ return False
+
+ return True
+
+
+def validate_utf8(utfbytes: Union[str, bytes]) -> bool:
+ """
+ validate utf8 byte string.
+ utfbytes: utf byte string to check.
+ return value: if valid utf8 string, return true. Otherwise, return false.
+ """
+ return _validate_utf8(utfbytes)
+
+
+def extract_err_message(exception: Exception) -> Union[str, None]:
+ if exception.args:
+ return exception.args[0]
+ else:
+ return None
+
+
+def extract_error_code(exception: Exception) -> Union[int, None]:
+ if exception.args and len(exception.args) > 1:
+ return exception.args[0] if isinstance(exception.args[0], int) else None
diff --git a/contrib/python/websocket-client/py3/websocket/_wsdump.py b/contrib/python/websocket-client/py3/websocket/_wsdump.py
new file mode 100644
index 0000000000..d637ce2b45
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/_wsdump.py
@@ -0,0 +1,231 @@
+#!/usr/bin/env python3
+
+"""
+wsdump.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import argparse
+import code
+import sys
+import threading
+import time
+import ssl
+import gzip
+import zlib
+from urllib.parse import urlparse
+
+import websocket
+
+try:
+ import readline
+except ImportError:
+ pass
+
+
+def get_encoding() -> str:
+ encoding = getattr(sys.stdin, "encoding", "")
+ if not encoding:
+ return "utf-8"
+ else:
+ return encoding.lower()
+
+
+OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
+ENCODING = get_encoding()
+
+
+class VAction(argparse.Action):
+
+ def __call__(self, parser: argparse.Namespace, args: tuple, values: str, option_string: str = None) -> None:
+ if values is None:
+ values = "1"
+ try:
+ values = int(values)
+ except ValueError:
+ values = values.count("v") + 1
+ setattr(args, self.dest, values)
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
+ parser.add_argument("url", metavar="ws_url",
+ help="websocket url. ex. ws://echo.websocket.events/")
+ parser.add_argument("-p", "--proxy",
+ help="proxy url. ex. http://127.0.0.1:8080")
+ parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
+ dest="verbose",
+ help="set verbose mode. If set to 1, show opcode. "
+ "If set to 2, enable to trace websocket module")
+ parser.add_argument("-n", "--nocert", action='store_true',
+ help="Ignore invalid SSL cert")
+ parser.add_argument("-r", "--raw", action="store_true",
+ help="raw output")
+ parser.add_argument("-s", "--subprotocols", nargs='*',
+ help="Set subprotocols")
+ parser.add_argument("-o", "--origin",
+ help="Set origin")
+ parser.add_argument("--eof-wait", default=0, type=int,
+ help="wait time(second) after 'EOF' received.")
+ parser.add_argument("-t", "--text",
+ help="Send initial text")
+ parser.add_argument("--timings", action="store_true",
+ help="Print timings in seconds")
+ parser.add_argument("--headers",
+ help="Set custom headers. Use ',' as separator")
+
+ return parser.parse_args()
+
+
+class RawInput:
+
+ def raw_input(self, prompt: str = "") -> str:
+ line = input(prompt)
+
+ if ENCODING and ENCODING != "utf-8" and not isinstance(line, str):
+ line = line.decode(ENCODING).encode("utf-8")
+ elif isinstance(line, str):
+ line = line.encode("utf-8")
+
+ return line
+
+
+class InteractiveConsole(RawInput, code.InteractiveConsole):
+
+ def write(self, data: str) -> None:
+ sys.stdout.write("\033[2K\033[E")
+ # sys.stdout.write("\n")
+ sys.stdout.write("\033[34m< " + data + "\033[39m")
+ sys.stdout.write("\n> ")
+ sys.stdout.flush()
+
+ def read(self) -> str:
+ return self.raw_input("> ")
+
+
+class NonInteractive(RawInput):
+
+ def write(self, data: str) -> None:
+ sys.stdout.write(data)
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+
+ def read(self) -> str:
+ return self.raw_input("")
+
+
+def main() -> None:
+ start_time = time.time()
+ args = parse_args()
+ if args.verbose > 1:
+ websocket.enableTrace(True)
+ options = {}
+ if args.proxy:
+ p = urlparse(args.proxy)
+ options["http_proxy_host"] = p.hostname
+ options["http_proxy_port"] = p.port
+ if args.origin:
+ options["origin"] = args.origin
+ if args.subprotocols:
+ options["subprotocols"] = args.subprotocols
+ opts = {}
+ if args.nocert:
+ opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
+ if args.headers:
+ options['header'] = list(map(str.strip, args.headers.split(',')))
+ ws = websocket.create_connection(args.url, sslopt=opts, **options)
+ if args.raw:
+ console = NonInteractive()
+ else:
+ console = InteractiveConsole()
+ print("Press Ctrl+C to quit")
+
+ def recv() -> tuple:
+ try:
+ frame = ws.recv_frame()
+ except websocket.WebSocketException:
+ return websocket.ABNF.OPCODE_CLOSE, ""
+ if not frame:
+ raise websocket.WebSocketException("Not a valid frame {frame}".format(frame=frame))
+ elif frame.opcode in OPCODE_DATA:
+ return frame.opcode, frame.data
+ elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
+ ws.send_close()
+ return frame.opcode, ""
+ elif frame.opcode == websocket.ABNF.OPCODE_PING:
+ ws.pong(frame.data)
+ return frame.opcode, frame.data
+
+ return frame.opcode, frame.data
+
+ def recv_ws() -> None:
+ while True:
+ opcode, data = recv()
+ msg = None
+ if opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
+ data = str(data, "utf-8")
+ if isinstance(data, bytes) and len(data) > 2 and data[:2] == b'\037\213': # gzip magick
+ try:
+ data = "[gzip] " + str(gzip.decompress(data), "utf-8")
+ except:
+ pass
+ elif isinstance(data, bytes):
+ try:
+ data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
+ except:
+ pass
+
+ if isinstance(data, bytes):
+ data = repr(data)
+
+ if args.verbose:
+ msg = "{opcode}: {data}".format(opcode=websocket.ABNF.OPCODE_MAP.get(opcode), data=data)
+ else:
+ msg = data
+
+ if msg is not None:
+ if args.timings:
+ console.write(str(time.time() - start_time) + ": " + msg)
+ else:
+ console.write(msg)
+
+ if opcode == websocket.ABNF.OPCODE_CLOSE:
+ break
+
+ thread = threading.Thread(target=recv_ws)
+ thread.daemon = True
+ thread.start()
+
+ if args.text:
+ ws.send(args.text)
+
+ while True:
+ try:
+ message = console.read()
+ ws.send(message)
+ except KeyboardInterrupt:
+ return
+ except EOFError:
+ time.sleep(args.eof_wait)
+ return
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except Exception as e:
+ print(e)
diff --git a/contrib/python/websocket-client/py3/websocket/tests/__init__.py b/contrib/python/websocket-client/py3/websocket/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/__init__.py
diff --git a/contrib/python/websocket-client/py3/websocket/tests/data/header01.txt b/contrib/python/websocket-client/py3/websocket/tests/data/header01.txt
new file mode 100644
index 0000000000..d44d24c205
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/data/header01.txt
@@ -0,0 +1,6 @@
+HTTP/1.1 101 WebSocket Protocol Handshake
+Connection: Upgrade
+Upgrade: WebSocket
+Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0=
+some_header: something
+
diff --git a/contrib/python/websocket-client/py3/websocket/tests/data/header02.txt b/contrib/python/websocket-client/py3/websocket/tests/data/header02.txt
new file mode 100644
index 0000000000..f481de928a
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/data/header02.txt
@@ -0,0 +1,6 @@
+HTTP/1.1 101 WebSocket Protocol Handshake
+Connection: Upgrade
+Upgrade WebSocket
+Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0=
+some_header: something
+
diff --git a/contrib/python/websocket-client/py3/websocket/tests/data/header03.txt b/contrib/python/websocket-client/py3/websocket/tests/data/header03.txt
new file mode 100644
index 0000000000..1a81dc70ce
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/data/header03.txt
@@ -0,0 +1,8 @@
+HTTP/1.1 101 WebSocket Protocol Handshake
+Connection: Upgrade, Keep-Alive
+Upgrade: WebSocket
+Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0=
+Set-Cookie: Token=ABCDE
+Set-Cookie: Token=FGHIJ
+some_header: something
+
diff --git a/contrib/python/websocket-client/py3/websocket/tests/test_abnf.py b/contrib/python/websocket-client/py3/websocket/tests/test_abnf.py
new file mode 100644
index 0000000000..dbf9b636a3
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/test_abnf.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+#
+import websocket as ws
+from websocket._abnf import *
+import unittest
+
+"""
+test_abnf.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+class ABNFTest(unittest.TestCase):
+
+ def testInit(self):
+ a = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
+ self.assertEqual(a.fin, 0)
+ self.assertEqual(a.rsv1, 0)
+ self.assertEqual(a.rsv2, 0)
+ self.assertEqual(a.rsv3, 0)
+ self.assertEqual(a.opcode, 9)
+ self.assertEqual(a.data, '')
+ a_bad = ABNF(0,1,0,0, opcode=77)
+ self.assertEqual(a_bad.rsv1, 1)
+ self.assertEqual(a_bad.opcode, 77)
+
+ def testValidate(self):
+ a_invalid_ping = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
+ self.assertRaises(ws._exceptions.WebSocketProtocolException, a_invalid_ping.validate, skip_utf8_validation=False)
+ a_bad_rsv_value = ABNF(0,1,0,0, opcode=ABNF.OPCODE_TEXT)
+ self.assertRaises(ws._exceptions.WebSocketProtocolException, a_bad_rsv_value.validate, skip_utf8_validation=False)
+ a_bad_opcode = ABNF(0,0,0,0, opcode=77)
+ self.assertRaises(ws._exceptions.WebSocketProtocolException, a_bad_opcode.validate, skip_utf8_validation=False)
+ a_bad_close_frame = ABNF(0,0,0,0, opcode=ABNF.OPCODE_CLOSE, data=b'\x01')
+ self.assertRaises(ws._exceptions.WebSocketProtocolException, a_bad_close_frame.validate, skip_utf8_validation=False)
+ a_bad_close_frame_2 = ABNF(0,0,0,0, opcode=ABNF.OPCODE_CLOSE, data=b'\x01\x8a\xaa\xff\xdd')
+ self.assertRaises(ws._exceptions.WebSocketProtocolException, a_bad_close_frame_2.validate, skip_utf8_validation=False)
+ a_bad_close_frame_3 = ABNF(0,0,0,0, opcode=ABNF.OPCODE_CLOSE, data=b'\x03\xe7')
+ self.assertRaises(ws._exceptions.WebSocketProtocolException, a_bad_close_frame_3.validate, skip_utf8_validation=True)
+
+ def testMask(self):
+ abnf_none_data = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING, mask=1, data=None)
+ bytes_val = b"aaaa"
+ self.assertEqual(abnf_none_data._get_masked(bytes_val), bytes_val)
+ abnf_str_data = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING, mask=1, data="a")
+ self.assertEqual(abnf_str_data._get_masked(bytes_val), b'aaaa\x00')
+
+ def testFormat(self):
+ abnf_bad_rsv_bits = ABNF(2,0,0,0, opcode=ABNF.OPCODE_TEXT)
+ self.assertRaises(ValueError, abnf_bad_rsv_bits.format)
+ abnf_bad_opcode = ABNF(0,0,0,0, opcode=5)
+ self.assertRaises(ValueError, abnf_bad_opcode.format)
+ abnf_length_10 = ABNF(0,0,0,0, opcode=ABNF.OPCODE_TEXT, data="abcdefghij")
+ self.assertEqual(b'\x01', abnf_length_10.format()[0].to_bytes(1, 'big'))
+ self.assertEqual(b'\x8a', abnf_length_10.format()[1].to_bytes(1, 'big'))
+ self.assertEqual("fin=0 opcode=1 data=abcdefghij", abnf_length_10.__str__())
+ abnf_length_20 = ABNF(0,0,0,0, opcode=ABNF.OPCODE_BINARY, data="abcdefghijabcdefghij")
+ self.assertEqual(b'\x02', abnf_length_20.format()[0].to_bytes(1, 'big'))
+ self.assertEqual(b'\x94', abnf_length_20.format()[1].to_bytes(1, 'big'))
+ abnf_no_mask = ABNF(0,0,0,0, opcode=ABNF.OPCODE_TEXT, mask=0, data=b'\x01\x8a\xcc')
+ self.assertEqual(b'\x01\x03\x01\x8a\xcc', abnf_no_mask.format())
+
+ def testFrameBuffer(self):
+ fb = frame_buffer(0, True)
+ self.assertEqual(fb.recv, 0)
+ self.assertEqual(fb.skip_utf8_validation, True)
+ fb.clear
+ self.assertEqual(fb.header, None)
+ self.assertEqual(fb.length, None)
+ self.assertEqual(fb.mask, None)
+ self.assertEqual(fb.has_mask(), False)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py3/websocket/tests/test_app.py b/contrib/python/websocket-client/py3/websocket/tests/test_app.py
new file mode 100644
index 0000000000..ff90a0aa87
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/test_app.py
@@ -0,0 +1,299 @@
+# -*- coding: utf-8 -*-
+#
+import os
+import os.path
+import threading
+import websocket as ws
+import ssl
+import unittest
+
+"""
+test_app.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Skip test to access the internet unless TEST_WITH_INTERNET == 1
+TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
+# Skip tests relying on local websockets server unless LOCAL_WS_SERVER_PORT != -1
+LOCAL_WS_SERVER_PORT = os.environ.get('LOCAL_WS_SERVER_PORT', '-1')
+TEST_WITH_LOCAL_SERVER = LOCAL_WS_SERVER_PORT != '-1'
+TRACEABLE = True
+
+
+class WebSocketAppTest(unittest.TestCase):
+
+ class NotSetYet:
+ """ A marker class for signalling that a value hasn't been set yet.
+ """
+
+ def setUp(self):
+ ws.enableTrace(TRACEABLE)
+
+ WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.on_error_data = WebSocketAppTest.NotSetYet()
+
+ def tearDown(self):
+ WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
+ WebSocketAppTest.on_error_data = WebSocketAppTest.NotSetYet()
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testKeepRunning(self):
+ """ A WebSocketApp should keep running as long as its self.keep_running
+ is not False (in the boolean context).
+ """
+
+ def on_open(self, *args, **kwargs):
+ """ Set the keep_running flag for later inspection and immediately
+ close the connection.
+ """
+ self.send("hello!")
+ WebSocketAppTest.keep_running_open = self.keep_running
+ self.keep_running = False
+
+ def on_message(wsapp, message):
+ print(message)
+ self.close()
+
+ def on_close(self, *args, **kwargs):
+ """ Set the keep_running flag for the test to use.
+ """
+ WebSocketAppTest.keep_running_close = self.keep_running
+
+ app = ws.WebSocketApp('ws://127.0.0.1:' + LOCAL_WS_SERVER_PORT, on_open=on_open, on_close=on_close, on_message=on_message)
+ app.run_forever()
+
+# @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ @unittest.skipUnless(False, "Test disabled for now (requires rel)")
+ def testRunForeverDispatcher(self):
+ """ A WebSocketApp should keep running as long as its self.keep_running
+ is not False (in the boolean context).
+ """
+
+ def on_open(self, *args, **kwargs):
+ """ Send a message, receive, and send one more
+ """
+ self.send("hello!")
+ self.recv()
+ self.send("goodbye!")
+
+ def on_message(wsapp, message):
+ print(message)
+ self.close()
+
+ app = ws.WebSocketApp('ws://127.0.0.1:' + LOCAL_WS_SERVER_PORT, on_open=on_open, on_message=on_message)
+ app.run_forever(dispatcher="Dispatcher") # doesn't work
+# app.run_forever(dispatcher=rel) # would work
+# rel.dispatch()
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testRunForeverTeardownCleanExit(self):
+ """ The WebSocketApp.run_forever() method should return `False` when the application ends gracefully.
+ """
+ app = ws.WebSocketApp('ws://127.0.0.1:' + LOCAL_WS_SERVER_PORT)
+ threading.Timer(interval=0.2, function=app.close).start()
+ teardown = app.run_forever()
+ self.assertEqual(teardown, False)
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testSockMaskKey(self):
+ """ A WebSocketApp should forward the received mask_key function down
+ to the actual socket.
+ """
+
+ def my_mask_key_func():
+ return "\x00\x00\x00\x00"
+
+ app = ws.WebSocketApp('wss://api-pub.bitfinex.com/ws/1', get_mask_key=my_mask_key_func)
+
+ # if numpy is installed, this assertion fail
+ # Note: We can't use 'is' for comparing the functions directly, need to use 'id'.
+ self.assertEqual(id(app.get_mask_key), id(my_mask_key_func))
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testInvalidPingIntervalPingTimeout(self):
+ """ Test exception handling if ping_interval < ping_timeout
+ """
+
+ def on_ping(app, msg):
+ print("Got a ping!")
+ app.close()
+
+ def on_pong(app, msg):
+ print("Got a pong! No need to respond")
+ app.close()
+
+ app = ws.WebSocketApp('wss://api-pub.bitfinex.com/ws/1', on_ping=on_ping, on_pong=on_pong)
+ self.assertRaises(ws.WebSocketException, app.run_forever, ping_interval=1, ping_timeout=2, sslopt={"cert_reqs": ssl.CERT_NONE})
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testPingInterval(self):
+ """ Test WebSocketApp proper ping functionality
+ """
+
+ def on_ping(app, msg):
+ print("Got a ping!")
+ app.close()
+
+ def on_pong(app, msg):
+ print("Got a pong! No need to respond")
+ app.close()
+
+ app = ws.WebSocketApp('wss://api-pub.bitfinex.com/ws/1', on_ping=on_ping, on_pong=on_pong)
+ app.run_forever(ping_interval=2, ping_timeout=1, sslopt={"cert_reqs": ssl.CERT_NONE})
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testOpcodeClose(self):
+ """ Test WebSocketApp close opcode
+ """
+
+ app = ws.WebSocketApp('wss://tsock.us1.twilio.com/v3/wsconnect')
+ app.run_forever(ping_interval=2, ping_timeout=1, ping_payload="Ping payload")
+
+ # This is commented out because the URL no longer responds in the expected way
+ # @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ # def testOpcodeBinary(self):
+ # """ Test WebSocketApp binary opcode
+ # """
+ # app = ws.WebSocketApp('wss://streaming.vn.teslamotors.com/streaming/')
+ # app.run_forever(ping_interval=2, ping_timeout=1, ping_payload="Ping payload")
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testBadPingInterval(self):
+ """ A WebSocketApp handling of negative ping_interval
+ """
+ app = ws.WebSocketApp('wss://api-pub.bitfinex.com/ws/1')
+ self.assertRaises(ws.WebSocketException, app.run_forever, ping_interval=-5, sslopt={"cert_reqs": ssl.CERT_NONE})
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testBadPingTimeout(self):
+ """ A WebSocketApp handling of negative ping_timeout
+ """
+ app = ws.WebSocketApp('wss://api-pub.bitfinex.com/ws/1')
+ self.assertRaises(ws.WebSocketException, app.run_forever, ping_timeout=-3, sslopt={"cert_reqs": ssl.CERT_NONE})
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testCloseStatusCode(self):
+ """ Test extraction of close frame status code and close reason in WebSocketApp
+ """
+ def on_close(wsapp, close_status_code, close_msg):
+ print("on_close reached")
+
+ app = ws.WebSocketApp('wss://tsock.us1.twilio.com/v3/wsconnect', on_close=on_close)
+ closeframe = ws.ABNF(opcode=ws.ABNF.OPCODE_CLOSE, data=b'\x03\xe8no-init-from-client')
+ self.assertEqual([1000, 'no-init-from-client'], app._get_close_args(closeframe))
+
+ closeframe = ws.ABNF(opcode=ws.ABNF.OPCODE_CLOSE, data=b'')
+ self.assertEqual([None, None], app._get_close_args(closeframe))
+
+ app2 = ws.WebSocketApp('wss://tsock.us1.twilio.com/v3/wsconnect')
+ closeframe = ws.ABNF(opcode=ws.ABNF.OPCODE_CLOSE, data=b'')
+ self.assertEqual([None, None], app2._get_close_args(closeframe))
+
+ self.assertRaises(ws.WebSocketConnectionClosedException, app.send, data="test if connection is closed")
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testCallbackFunctionException(self):
+ """ Test callback function exception handling """
+
+ exc = None
+ passed_app = None
+
+ def on_open(app):
+ raise RuntimeError("Callback failed")
+
+ def on_error(app, err):
+ nonlocal passed_app
+ passed_app = app
+ nonlocal exc
+ exc = err
+
+ def on_pong(app, msg):
+ app.close()
+
+ app = ws.WebSocketApp('ws://127.0.0.1:' + LOCAL_WS_SERVER_PORT, on_open=on_open, on_error=on_error, on_pong=on_pong)
+ app.run_forever(ping_interval=2, ping_timeout=1)
+
+ self.assertEqual(passed_app, app)
+ self.assertIsInstance(exc, RuntimeError)
+ self.assertEqual(str(exc), "Callback failed")
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testCallbackMethodException(self):
+ """ Test callback method exception handling """
+
+ class Callbacks:
+ def __init__(self):
+ self.exc = None
+ self.passed_app = None
+ self.app = ws.WebSocketApp(
+ 'ws://127.0.0.1:' + LOCAL_WS_SERVER_PORT,
+ on_open=self.on_open,
+ on_error=self.on_error,
+ on_pong=self.on_pong
+ )
+ self.app.run_forever(ping_interval=2, ping_timeout=1)
+
+ def on_open(self, app):
+ raise RuntimeError("Callback failed")
+
+ def on_error(self, app, err):
+ self.passed_app = app
+ self.exc = err
+
+ def on_pong(self, app, msg):
+ app.close()
+
+ callbacks = Callbacks()
+
+ self.assertEqual(callbacks.passed_app, callbacks.app)
+ self.assertIsInstance(callbacks.exc, RuntimeError)
+ self.assertEqual(str(callbacks.exc), "Callback failed")
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testReconnect(self):
+ """ Test reconnect """
+ pong_count = 0
+ exc = None
+
+ def on_error(app, err):
+ nonlocal exc
+ exc = err
+
+ def on_pong(app, msg):
+ nonlocal pong_count
+ pong_count += 1
+ if pong_count == 1:
+ # First pong, shutdown socket, enforce read error
+ app.sock.shutdown()
+ if pong_count >= 2:
+ # Got second pong after reconnect
+ app.close()
+
+ app = ws.WebSocketApp('ws://127.0.0.1:' + LOCAL_WS_SERVER_PORT, on_pong=on_pong, on_error=on_error)
+ app.run_forever(ping_interval=2, ping_timeout=1, reconnect=3)
+
+ self.assertEqual(pong_count, 2)
+ self.assertIsInstance(exc, ws.WebSocketTimeoutException)
+ self.assertEqual(str(exc), "ping/pong timed out")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py3/websocket/tests/test_cookiejar.py b/contrib/python/websocket-client/py3/websocket/tests/test_cookiejar.py
new file mode 100644
index 0000000000..8f835e9e7c
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/test_cookiejar.py
@@ -0,0 +1,116 @@
+import unittest
+from websocket._cookiejar import SimpleCookieJar
+
+"""
+test_cookiejar.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+class CookieJarTest(unittest.TestCase):
+ def testAdd(self):
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("")
+ self.assertFalse(cookie_jar.jar, "Cookie with no domain should not be added to the jar")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b")
+ self.assertFalse(cookie_jar.jar, "Cookie with no domain should not be added to the jar")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; domain=.abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; domain=abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+ self.assertTrue("abc" not in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get(None), "")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ cookie_jar.add("e=f; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d; e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ cookie_jar.add("e=f; domain=.abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d; e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.add("a=b; c=d; domain=abc")
+ cookie_jar.add("e=f; domain=xyz")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("xyz"), "e=f")
+ self.assertEqual(cookie_jar.get("something"), "")
+
+ def testSet(self):
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b")
+ self.assertFalse(cookie_jar.jar, "Cookie with no domain should not be added to the jar")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; domain=.abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; domain=abc")
+ self.assertTrue(".abc" in cookie_jar.jar)
+ self.assertTrue("abc" not in cookie_jar.jar)
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ cookie_jar.set("e=f; domain=abc")
+ self.assertEqual(cookie_jar.get("abc"), "e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ cookie_jar.set("e=f; domain=.abc")
+ self.assertEqual(cookie_jar.get("abc"), "e=f")
+
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc")
+ cookie_jar.set("e=f; domain=xyz")
+ self.assertEqual(cookie_jar.get("abc"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("xyz"), "e=f")
+ self.assertEqual(cookie_jar.get("something"), "")
+
+ def testGet(self):
+ cookie_jar = SimpleCookieJar()
+ cookie_jar.set("a=b; c=d; domain=abc.com")
+ self.assertEqual(cookie_jar.get("abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("x.abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("abc.com.es"), "")
+ self.assertEqual(cookie_jar.get("xabc.com"), "")
+
+ cookie_jar.set("a=b; c=d; domain=.abc.com")
+ self.assertEqual(cookie_jar.get("abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("x.abc.com"), "a=b; c=d")
+ self.assertEqual(cookie_jar.get("abc.com.es"), "")
+ self.assertEqual(cookie_jar.get("xabc.com"), "")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py3/websocket/tests/test_http.py b/contrib/python/websocket-client/py3/websocket/tests/test_http.py
new file mode 100644
index 0000000000..456279f288
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/test_http.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+#
+import os
+import os.path
+import websocket as ws
+from websocket._http import proxy_info, read_headers, _start_proxied_socket, _tunnel, _get_addrinfo_list, connect
+import unittest
+import ssl
+import websocket
+import socket
+
+"""
+test_http.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+try:
+ from python_socks._errors import ProxyError, ProxyTimeoutError, ProxyConnectionError
+except:
+ from websocket._http import ProxyError, ProxyTimeoutError, ProxyConnectionError
+
+# Skip test to access the internet unless TEST_WITH_INTERNET == 1
+TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
+TEST_WITH_PROXY = os.environ.get('TEST_WITH_PROXY', '0') == '1'
+# Skip tests relying on local websockets server unless LOCAL_WS_SERVER_PORT != -1
+LOCAL_WS_SERVER_PORT = os.environ.get('LOCAL_WS_SERVER_PORT', '-1')
+TEST_WITH_LOCAL_SERVER = LOCAL_WS_SERVER_PORT != '-1'
+
+
+class SockMock:
+ def __init__(self):
+ self.data = []
+ self.sent = []
+
+ def add_packet(self, data):
+ self.data.append(data)
+
+ def gettimeout(self):
+ return None
+
+ def recv(self, bufsize):
+ if self.data:
+ e = self.data.pop(0)
+ if isinstance(e, Exception):
+ raise e
+ if len(e) > bufsize:
+ self.data.insert(0, e[bufsize:])
+ return e[:bufsize]
+
+ def send(self, data):
+ self.sent.append(data)
+ return len(data)
+
+ def close(self):
+ pass
+
+
+class HeaderSockMock(SockMock):
+
+ def __init__(self, fname):
+ SockMock.__init__(self)
+ import yatest.common
+ path = yatest.common.source_path(os.path.join('contrib/python/websocket-client/py3/websocket/tests', fname))
+ with open(path, "rb") as f:
+ self.add_packet(f.read())
+
+
+class OptsList():
+
+ def __init__(self):
+ self.timeout = 1
+ self.sockopt = []
+ self.sslopt = {"cert_reqs": ssl.CERT_NONE}
+
+
+class HttpTest(unittest.TestCase):
+
+ def testReadHeader(self):
+ status, header, status_message = read_headers(HeaderSockMock("data/header01.txt"))
+ self.assertEqual(status, 101)
+ self.assertEqual(header["connection"], "Upgrade")
+ # header02.txt is intentionally malformed
+ self.assertRaises(ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt"))
+
+ def testTunnel(self):
+ self.assertRaises(ws.WebSocketProxyException, _tunnel, HeaderSockMock("data/header01.txt"), "example.com", 80, ("username", "password"))
+ self.assertRaises(ws.WebSocketProxyException, _tunnel, HeaderSockMock("data/header02.txt"), "example.com", 80, ("username", "password"))
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testConnect(self):
+ # Not currently testing an actual proxy connection, so just check whether proxy errors are raised. This requires internet for a DNS lookup
+ if ws._http.HAVE_PYTHON_SOCKS:
+ # Need this check, otherwise case where python_socks is not installed triggers
+ # websocket._exceptions.WebSocketException: Python Socks is needed for SOCKS proxying but is not available
+ self.assertRaises((ProxyTimeoutError, OSError), _start_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks4", http_proxy_timeout=1))
+ self.assertRaises((ProxyTimeoutError, OSError), _start_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks4a", http_proxy_timeout=1))
+ self.assertRaises((ProxyTimeoutError, OSError), _start_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks5", http_proxy_timeout=1))
+ self.assertRaises((ProxyTimeoutError, OSError), _start_proxied_socket, "wss://example.com", OptsList(), proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="socks5h", http_proxy_timeout=1))
+ self.assertRaises(ProxyConnectionError, connect, "wss://example.com", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port=9999, proxy_type="socks4", http_proxy_timeout=1), None)
+
+ self.assertRaises(TypeError, _get_addrinfo_list, None, 80, True, proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="9999", proxy_type="http"))
+ self.assertRaises(TypeError, _get_addrinfo_list, None, 80, True, proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="9999", proxy_type="http"))
+ self.assertRaises(socket.timeout, connect, "wss://google.com", OptsList(), proxy_info(http_proxy_host="8.8.8.8", http_proxy_port=9999, proxy_type="http", http_proxy_timeout=1), None)
+ self.assertEqual(
+ connect("wss://google.com", OptsList(), proxy_info(http_proxy_host="8.8.8.8", http_proxy_port=8080, proxy_type="http"), True),
+ (True, ("google.com", 443, "/")))
+ # The following test fails on Mac OS with a gaierror, not an OverflowError
+ # self.assertRaises(OverflowError, connect, "wss://example.com", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port=99999, proxy_type="socks4", timeout=2), False)
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ @unittest.skipUnless(TEST_WITH_PROXY, "This test requires a HTTP proxy to be running on port 8899")
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testProxyConnect(self):
+ ws = websocket.WebSocket()
+ ws.connect("ws://127.0.0.1:" + LOCAL_WS_SERVER_PORT, http_proxy_host="127.0.0.1", http_proxy_port="8899", proxy_type="http")
+ ws.send("Hello, Server")
+ server_response = ws.recv()
+ self.assertEqual(server_response, "Hello, Server")
+ # self.assertEqual(_start_proxied_socket("wss://api.bitfinex.com/ws/2", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8899", proxy_type="http"))[1], ("api.bitfinex.com", 443, '/ws/2'))
+ self.assertEqual(_get_addrinfo_list("api.bitfinex.com", 443, True, proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8899", proxy_type="http")),
+ (socket.getaddrinfo("127.0.0.1", 8899, 0, socket.SOCK_STREAM, socket.SOL_TCP), True, None))
+ self.assertEqual(connect("wss://api.bitfinex.com/ws/2", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port=8899, proxy_type="http"), None)[1], ("api.bitfinex.com", 443, '/ws/2'))
+ # TODO: Test SOCKS4 and SOCK5 proxies with unit tests
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testSSLopt(self):
+ ssloptions = {
+ "check_hostname": False,
+ "server_hostname": "ServerName",
+ "ssl_version": ssl.PROTOCOL_TLS_CLIENT,
+ "ciphers": "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:\
+ TLS_AES_128_GCM_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:\
+ ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:\
+ ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:\
+ DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:\
+ ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-GCM-SHA256:\
+ ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:\
+ DHE-RSA-AES256-SHA256:ECDHE-ECDSA-AES128-SHA256:\
+ ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA256:\
+ ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA",
+ "ecdh_curve": "prime256v1"
+ }
+ ws_ssl1 = websocket.WebSocket(sslopt=ssloptions)
+ ws_ssl1.connect("wss://api.bitfinex.com/ws/2")
+ ws_ssl1.send("Hello")
+ ws_ssl1.close()
+
+ ws_ssl2 = websocket.WebSocket(sslopt={"check_hostname": True})
+ ws_ssl2.connect("wss://api.bitfinex.com/ws/2")
+ ws_ssl2.close
+
+ def testProxyInfo(self):
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").proxy_protocol, "http")
+ self.assertRaises(ProxyError, proxy_info, http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="badval")
+ self.assertEqual(proxy_info(http_proxy_host="example.com", http_proxy_port="8080", proxy_type="http").proxy_host, "example.com")
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").proxy_port, "8080")
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http").auth, None)
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http", http_proxy_auth=("my_username123", "my_pass321")).auth[0], "my_username123")
+ self.assertEqual(proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http", http_proxy_auth=("my_username123", "my_pass321")).auth[1], "my_pass321")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py3/websocket/tests/test_url.py b/contrib/python/websocket-client/py3/websocket/tests/test_url.py
new file mode 100644
index 0000000000..a74dd7669d
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/test_url.py
@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+#
+import os
+import unittest
+from websocket._url import get_proxy_info, parse_url, _is_address_in_network, _is_no_proxy_host
+
+"""
+test_url.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+class UrlTest(unittest.TestCase):
+
+ def test_address_in_network(self):
+ self.assertTrue(_is_address_in_network('127.0.0.1', '127.0.0.0/8'))
+ self.assertTrue(_is_address_in_network('127.1.0.1', '127.0.0.0/8'))
+ self.assertFalse(_is_address_in_network('127.1.0.1', '127.0.0.0/24'))
+
+ def testParseUrl(self):
+ p = parse_url("ws://www.example.com/r")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com/r/")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/r/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com/")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com:8080/r")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com:8080/")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://www.example.com:8080")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("wss://www.example.com:8080/r")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], True)
+
+ p = parse_url("wss://www.example.com:8080/r?key=value")
+ self.assertEqual(p[0], "www.example.com")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r?key=value")
+ self.assertEqual(p[3], True)
+
+ self.assertRaises(ValueError, parse_url, "http://www.example.com/r")
+
+ p = parse_url("ws://[2a03:4000:123:83::3]/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 80)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("ws://[2a03:4000:123:83::3]:8080/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], False)
+
+ p = parse_url("wss://[2a03:4000:123:83::3]/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 443)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], True)
+
+ p = parse_url("wss://[2a03:4000:123:83::3]:8080/r")
+ self.assertEqual(p[0], "2a03:4000:123:83::3")
+ self.assertEqual(p[1], 8080)
+ self.assertEqual(p[2], "/r")
+ self.assertEqual(p[3], True)
+
+
+class IsNoProxyHostTest(unittest.TestCase):
+ def setUp(self):
+ self.no_proxy = os.environ.get("no_proxy", None)
+ if "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def tearDown(self):
+ if self.no_proxy:
+ os.environ["no_proxy"] = self.no_proxy
+ elif "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def testMatchAll(self):
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['*']))
+ self.assertTrue(_is_no_proxy_host("192.168.0.1", ['*']))
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['other.websocket.org', '*']))
+ os.environ['no_proxy'] = '*'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+ self.assertTrue(_is_no_proxy_host("192.168.0.1", None))
+ os.environ['no_proxy'] = 'other.websocket.org, *'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+
+ def testIpAddress(self):
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.1']))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", ['127.0.0.1']))
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", ['other.websocket.org', '127.0.0.1']))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", ['other.websocket.org', '127.0.0.1']))
+ os.environ['no_proxy'] = '127.0.0.1'
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
+ os.environ['no_proxy'] = 'other.websocket.org, 127.0.0.1'
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
+ self.assertFalse(_is_no_proxy_host("127.0.0.2", None))
+
+ def testIpAddressInRange(self):
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", ['127.0.0.0/8']))
+ self.assertTrue(_is_no_proxy_host("127.0.0.2", ['127.0.0.0/8']))
+ self.assertFalse(_is_no_proxy_host("127.1.0.1", ['127.0.0.0/24']))
+ os.environ['no_proxy'] = '127.0.0.0/8'
+ self.assertTrue(_is_no_proxy_host("127.0.0.1", None))
+ self.assertTrue(_is_no_proxy_host("127.0.0.2", None))
+ os.environ['no_proxy'] = '127.0.0.0/24'
+ self.assertFalse(_is_no_proxy_host("127.1.0.1", None))
+
+ def testHostnameMatch(self):
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", ['my.websocket.org']))
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", ['other.websocket.org', 'my.websocket.org']))
+ self.assertFalse(_is_no_proxy_host("my.websocket.org", ['other.websocket.org']))
+ os.environ['no_proxy'] = 'my.websocket.org'
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
+ self.assertFalse(_is_no_proxy_host("other.websocket.org", None))
+ os.environ['no_proxy'] = 'other.websocket.org, my.websocket.org'
+ self.assertTrue(_is_no_proxy_host("my.websocket.org", None))
+
+ def testHostnameMatchDomain(self):
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['.websocket.org']))
+ self.assertTrue(_is_no_proxy_host("my.other.websocket.org", ['.websocket.org']))
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", ['my.websocket.org', '.websocket.org']))
+ self.assertFalse(_is_no_proxy_host("any.websocket.com", ['.websocket.org']))
+ os.environ['no_proxy'] = '.websocket.org'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+ self.assertTrue(_is_no_proxy_host("my.other.websocket.org", None))
+ self.assertFalse(_is_no_proxy_host("any.websocket.com", None))
+ os.environ['no_proxy'] = 'my.websocket.org, .websocket.org'
+ self.assertTrue(_is_no_proxy_host("any.websocket.org", None))
+
+
+class ProxyInfoTest(unittest.TestCase):
+ def setUp(self):
+ self.http_proxy = os.environ.get("http_proxy", None)
+ self.https_proxy = os.environ.get("https_proxy", None)
+ self.no_proxy = os.environ.get("no_proxy", None)
+ if "http_proxy" in os.environ:
+ del os.environ["http_proxy"]
+ if "https_proxy" in os.environ:
+ del os.environ["https_proxy"]
+ if "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def tearDown(self):
+ if self.http_proxy:
+ os.environ["http_proxy"] = self.http_proxy
+ elif "http_proxy" in os.environ:
+ del os.environ["http_proxy"]
+
+ if self.https_proxy:
+ os.environ["https_proxy"] = self.https_proxy
+ elif "https_proxy" in os.environ:
+ del os.environ["https_proxy"]
+
+ if self.no_proxy:
+ os.environ["no_proxy"] = self.no_proxy
+ elif "no_proxy" in os.environ:
+ del os.environ["no_proxy"]
+
+ def testProxyFromArgs(self):
+ self.assertEqual(get_proxy_info("echo.websocket.events", False, proxy_host="localhost"), ("localhost", 0, None))
+ self.assertEqual(get_proxy_info("echo.websocket.events", False, proxy_host="localhost", proxy_port=3128),
+ ("localhost", 3128, None))
+ self.assertEqual(get_proxy_info("echo.websocket.events", True, proxy_host="localhost"), ("localhost", 0, None))
+ self.assertEqual(get_proxy_info("echo.websocket.events", True, proxy_host="localhost", proxy_port=3128),
+ ("localhost", 3128, None))
+
+ self.assertEqual(get_proxy_info("echo.websocket.events", False, proxy_host="localhost", proxy_auth=("a", "b")),
+ ("localhost", 0, ("a", "b")))
+ self.assertEqual(
+ get_proxy_info("echo.websocket.events", False, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
+ ("localhost", 3128, ("a", "b")))
+ self.assertEqual(get_proxy_info("echo.websocket.events", True, proxy_host="localhost", proxy_auth=("a", "b")),
+ ("localhost", 0, ("a", "b")))
+ self.assertEqual(
+ get_proxy_info("echo.websocket.events", True, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
+ ("localhost", 3128, ("a", "b")))
+
+ self.assertEqual(get_proxy_info("echo.websocket.events", True, proxy_host="localhost", proxy_port=3128,
+ no_proxy=["example.com"], proxy_auth=("a", "b")),
+ ("localhost", 3128, ("a", "b")))
+ self.assertEqual(get_proxy_info("echo.websocket.events", True, proxy_host="localhost", proxy_port=3128,
+ no_proxy=["echo.websocket.events"], proxy_auth=("a", "b")),
+ (None, 0, None))
+
+ def testProxyFromEnv(self):
+ os.environ["http_proxy"] = "http://localhost/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", None, None))
+ os.environ["http_proxy"] = "http://localhost:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", 3128, None))
+
+ os.environ["http_proxy"] = "http://localhost/"
+ os.environ["https_proxy"] = "http://localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", None, None))
+ os.environ["http_proxy"] = "http://localhost:3128/"
+ os.environ["https_proxy"] = "http://localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", 3128, None))
+
+ os.environ["http_proxy"] = "http://localhost/"
+ os.environ["https_proxy"] = "http://localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", None, None))
+ os.environ["http_proxy"] = "http://localhost:3128/"
+ os.environ["https_proxy"] = "http://localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, None))
+
+ os.environ["http_proxy"] = ""
+ os.environ["https_proxy"] = "http://localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", None, None))
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), (None, 0, None))
+ os.environ["http_proxy"] = ""
+ os.environ["https_proxy"] = "http://localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, None))
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), (None, 0, None))
+
+ os.environ["http_proxy"] = "http://localhost/"
+ os.environ["https_proxy"] = ""
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None))
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", None, None))
+ os.environ["http_proxy"] = "http://localhost:3128/"
+ os.environ["https_proxy"] = ""
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None))
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", 3128, None))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", 3128, ("a", "b")))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ os.environ["https_proxy"] = "http://a:b@localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", False), ("localhost", 3128, ("a", "b")))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ os.environ["https_proxy"] = "http://a:b@localhost2/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, ("a", "b")))
+
+ os.environ["http_proxy"] = "http://john%40example.com:P%40SSWORD@localhost:3128/"
+ os.environ["https_proxy"] = "http://john%40example.com:P%40SSWORD@localhost2:3128/"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, ("john@example.com", "P@SSWORD")))
+
+ os.environ["http_proxy"] = "http://a:b@localhost/"
+ os.environ["https_proxy"] = "http://a:b@localhost2/"
+ os.environ["no_proxy"] = "example1.com,example2.com"
+ self.assertEqual(get_proxy_info("example.1.com", True), ("localhost2", None, ("a", "b")))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ os.environ["no_proxy"] = "example1.com,example2.com, echo.websocket.events"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None))
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ os.environ["no_proxy"] = "example1.com,example2.com, .websocket.events"
+ self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None))
+
+ os.environ["http_proxy"] = "http://a:b@localhost:3128/"
+ os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
+ os.environ["no_proxy"] = "127.0.0.0/8, 192.168.0.0/16"
+ self.assertEqual(get_proxy_info("127.0.0.1", False), (None, 0, None))
+ self.assertEqual(get_proxy_info("192.168.1.1", False), (None, 0, None))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py3/websocket/tests/test_websocket.py b/contrib/python/websocket-client/py3/websocket/tests/test_websocket.py
new file mode 100644
index 0000000000..54555c8b6c
--- /dev/null
+++ b/contrib/python/websocket-client/py3/websocket/tests/test_websocket.py
@@ -0,0 +1,456 @@
+# -*- coding: utf-8 -*-
+#
+import os
+import os.path
+import socket
+import websocket as ws
+import unittest
+from websocket._handshake import _create_sec_websocket_key, \
+ _validate as _validate_header
+from websocket._http import read_headers
+from websocket._utils import validate_utf8
+from base64 import decodebytes as base64decode
+
+"""
+test_websocket.py
+websocket - WebSocket client library for Python
+
+Copyright 2023 engn33r
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+try:
+ import ssl
+ from ssl import SSLError
+except ImportError:
+ # dummy class of SSLError for ssl none-support environment.
+ class SSLError(Exception):
+ pass
+
+# Skip test to access the internet unless TEST_WITH_INTERNET == 1
+TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
+# Skip tests relying on local websockets server unless LOCAL_WS_SERVER_PORT != -1
+LOCAL_WS_SERVER_PORT = os.environ.get('LOCAL_WS_SERVER_PORT', '-1')
+TEST_WITH_LOCAL_SERVER = LOCAL_WS_SERVER_PORT != '-1'
+TRACEABLE = True
+
+
+def create_mask_key(_):
+ return "abcd"
+
+
+class SockMock:
+ def __init__(self):
+ self.data = []
+ self.sent = []
+
+ def add_packet(self, data):
+ self.data.append(data)
+
+ def gettimeout(self):
+ return None
+
+ def recv(self, bufsize):
+ if self.data:
+ e = self.data.pop(0)
+ if isinstance(e, Exception):
+ raise e
+ if len(e) > bufsize:
+ self.data.insert(0, e[bufsize:])
+ return e[:bufsize]
+
+ def send(self, data):
+ self.sent.append(data)
+ return len(data)
+
+ def close(self):
+ pass
+
+
+class HeaderSockMock(SockMock):
+
+ def __init__(self, fname):
+ SockMock.__init__(self)
+ import yatest.common
+ path = yatest.common.source_path(os.path.join('contrib/python/websocket-client/py3/websocket/tests', fname))
+ with open(path, "rb") as f:
+ self.add_packet(f.read())
+
+
+class WebSocketTest(unittest.TestCase):
+ def setUp(self):
+ ws.enableTrace(TRACEABLE)
+
+ def tearDown(self):
+ pass
+
+ def testDefaultTimeout(self):
+ self.assertEqual(ws.getdefaulttimeout(), None)
+ ws.setdefaulttimeout(10)
+ self.assertEqual(ws.getdefaulttimeout(), 10)
+ ws.setdefaulttimeout(None)
+
+ def testWSKey(self):
+ key = _create_sec_websocket_key()
+ self.assertTrue(key != 24)
+ self.assertTrue(str("Â¥n") not in key)
+
+ def testNonce(self):
+ """ WebSocket key should be a random 16-byte nonce.
+ """
+ key = _create_sec_websocket_key()
+ nonce = base64decode(key.encode("utf-8"))
+ self.assertEqual(16, len(nonce))
+
+ def testWsUtils(self):
+ key = "c6b8hTg4EeGb2gQMztV1/g=="
+ required_header = {
+ "upgrade": "websocket",
+ "connection": "upgrade",
+ "sec-websocket-accept": "Kxep+hNu9n51529fGidYu7a3wO0="}
+ self.assertEqual(_validate_header(required_header, key, None), (True, None))
+
+ header = required_header.copy()
+ header["upgrade"] = "http"
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+ del header["upgrade"]
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+
+ header = required_header.copy()
+ header["connection"] = "something"
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+ del header["connection"]
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+
+ header = required_header.copy()
+ header["sec-websocket-accept"] = "something"
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+ del header["sec-websocket-accept"]
+ self.assertEqual(_validate_header(header, key, None), (False, None))
+
+ header = required_header.copy()
+ header["sec-websocket-protocol"] = "sub1"
+ self.assertEqual(_validate_header(header, key, ["sub1", "sub2"]), (True, "sub1"))
+ # This case will print out a logging error using the error() function, but that is expected
+ self.assertEqual(_validate_header(header, key, ["sub2", "sub3"]), (False, None))
+
+ header = required_header.copy()
+ header["sec-websocket-protocol"] = "sUb1"
+ self.assertEqual(_validate_header(header, key, ["Sub1", "suB2"]), (True, "sub1"))
+
+ header = required_header.copy()
+ # This case will print out a logging error using the error() function, but that is expected
+ self.assertEqual(_validate_header(header, key, ["Sub1", "suB2"]), (False, None))
+
+ def testReadHeader(self):
+ status, header, status_message = read_headers(HeaderSockMock("data/header01.txt"))
+ self.assertEqual(status, 101)
+ self.assertEqual(header["connection"], "Upgrade")
+
+ status, header, status_message = read_headers(HeaderSockMock("data/header03.txt"))
+ self.assertEqual(status, 101)
+ self.assertEqual(header["connection"], "Upgrade, Keep-Alive")
+
+ HeaderSockMock("data/header02.txt")
+ self.assertRaises(ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt"))
+
+ def testSend(self):
+ # TODO: add longer frame data
+ sock = ws.WebSocket()
+ sock.set_mask_key(create_mask_key)
+ s = sock.sock = HeaderSockMock("data/header01.txt")
+ sock.send("Hello")
+ self.assertEqual(s.sent[0], b'\x81\x85abcd)\x07\x0f\x08\x0e')
+
+ sock.send("ã“ã‚“ã«ã¡ã¯")
+ self.assertEqual(s.sent[1], b'\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc')
+
+# sock.send("x" * 5000)
+# self.assertEqual(s.sent[1], b'\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc")
+
+ self.assertEqual(sock.send_binary(b'1111111111101'), 19)
+
+ def testRecv(self):
+ # TODO: add longer frame data
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ something = b'\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc'
+ s.add_packet(something)
+ data = sock.recv()
+ self.assertEqual(data, "ã“ã‚“ã«ã¡ã¯")
+
+ s.add_packet(b'\x81\x85abcd)\x07\x0f\x08\x0e')
+ data = sock.recv()
+ self.assertEqual(data, "Hello")
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testIter(self):
+ count = 2
+ s = ws.create_connection('wss://api.bitfinex.com/ws/2')
+ s.send('{"event": "subscribe", "channel": "ticker"}')
+ for _ in s:
+ count -= 1
+ if count == 0:
+ break
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testNext(self):
+ sock = ws.create_connection('wss://api.bitfinex.com/ws/2')
+ self.assertEqual(str, type(next(sock)))
+
+ def testInternalRecvStrict(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ s.add_packet(b'foo')
+ s.add_packet(socket.timeout())
+ s.add_packet(b'bar')
+ # s.add_packet(SSLError("The read operation timed out"))
+ s.add_packet(b'baz')
+ with self.assertRaises(ws.WebSocketTimeoutException):
+ sock.frame_buffer.recv_strict(9)
+ # with self.assertRaises(SSLError):
+ # data = sock._recv_strict(9)
+ data = sock.frame_buffer.recv_strict(9)
+ self.assertEqual(data, b'foobarbaz')
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.frame_buffer.recv_strict(1)
+
+ def testRecvTimeout(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ s.add_packet(b'\x81')
+ s.add_packet(socket.timeout())
+ s.add_packet(b'\x8dabcd\x29\x07\x0f\x08\x0e')
+ s.add_packet(socket.timeout())
+ s.add_packet(b'\x4e\x43\x33\x0e\x10\x0f\x00\x40')
+ with self.assertRaises(ws.WebSocketTimeoutException):
+ sock.recv()
+ with self.assertRaises(ws.WebSocketTimeoutException):
+ sock.recv()
+ data = sock.recv()
+ self.assertEqual(data, "Hello, World!")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testRecvWithSimpleFragmentation(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Brevity is "
+ s.add_packet(b'\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C')
+ # OPCODE=CONT, FIN=1, MSG="the soul of wit"
+ s.add_packet(b'\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17')
+ data = sock.recv()
+ self.assertEqual(data, "Brevity is the soul of wit")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testRecvWithFireEventOfFragmentation(self):
+ sock = ws.WebSocket(fire_cont_frame=True)
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Brevity is "
+ s.add_packet(b'\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C')
+ # OPCODE=CONT, FIN=0, MSG="Brevity is "
+ s.add_packet(b'\x00\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C')
+ # OPCODE=CONT, FIN=1, MSG="the soul of wit"
+ s.add_packet(b'\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17')
+
+ _, data = sock.recv_data()
+ self.assertEqual(data, b'Brevity is ')
+ _, data = sock.recv_data()
+ self.assertEqual(data, b'Brevity is ')
+ _, data = sock.recv_data()
+ self.assertEqual(data, b'the soul of wit')
+
+ # OPCODE=CONT, FIN=0, MSG="Brevity is "
+ s.add_packet(b'\x80\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C')
+
+ with self.assertRaises(ws.WebSocketException):
+ sock.recv_data()
+
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testClose(self):
+ sock = ws.WebSocket()
+ sock.connected = True
+ sock.close
+
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ sock.connected = True
+ s.add_packet(b'\x88\x80\x17\x98p\x84')
+ sock.recv()
+ self.assertEqual(sock.connected, False)
+
+ def testRecvContFragmentation(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ # OPCODE=CONT, FIN=1, MSG="the soul of wit"
+ s.add_packet(b'\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17')
+ self.assertRaises(ws.WebSocketException, sock.recv)
+
+ def testRecvWithProlongedFragmentation(self):
+ sock = ws.WebSocket()
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Once more unto the breach, "
+ s.add_packet(b'\x01\x9babcd.\x0c\x00\x01A\x0f\x0c\x16\x04B\x16\n\x15\rC\x10\t\x07C\x06\x13\x07\x02\x07\tNC')
+ # OPCODE=CONT, FIN=0, MSG="dear friends, "
+ s.add_packet(b'\x00\x8eabcd\x05\x07\x02\x16A\x04\x11\r\x04\x0c\x07\x17MB')
+ # OPCODE=CONT, FIN=1, MSG="once more"
+ s.add_packet(b'\x80\x89abcd\x0e\x0c\x00\x01A\x0f\x0c\x16\x04')
+ data = sock.recv()
+ self.assertEqual(
+ data,
+ "Once more unto the breach, dear friends, once more")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+
+ def testRecvWithFragmentationAndControlFrame(self):
+ sock = ws.WebSocket()
+ sock.set_mask_key(create_mask_key)
+ s = sock.sock = SockMock()
+ # OPCODE=TEXT, FIN=0, MSG="Too much "
+ s.add_packet(b'\x01\x89abcd5\r\x0cD\x0c\x17\x00\x0cA')
+ # OPCODE=PING, FIN=1, MSG="Please PONG this"
+ s.add_packet(b'\x89\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17')
+ # OPCODE=CONT, FIN=1, MSG="of a good thing"
+ s.add_packet(b'\x80\x8fabcd\x0e\x04C\x05A\x05\x0c\x0b\x05B\x17\x0c\x08\x0c\x04')
+ data = sock.recv()
+ self.assertEqual(data, "Too much of a good thing")
+ with self.assertRaises(ws.WebSocketConnectionClosedException):
+ sock.recv()
+ self.assertEqual(
+ s.sent[0],
+ b'\x8a\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17')
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testWebSocket(self):
+ s = ws.create_connection("ws://127.0.0.1:" + LOCAL_WS_SERVER_PORT)
+ self.assertNotEqual(s, None)
+ s.send("Hello, World")
+ result = s.next()
+ s.fileno()
+ self.assertEqual(result, "Hello, World")
+
+ s.send("ã“ã«ã‚ƒã«ã‚ƒã¡ã¯ã€ä¸–ç•Œ")
+ result = s.recv()
+ self.assertEqual(result, "ã“ã«ã‚ƒã«ã‚ƒã¡ã¯ã€ä¸–ç•Œ")
+ self.assertRaises(ValueError, s.send_close, -1, "")
+ s.close()
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testPingPong(self):
+ s = ws.create_connection("ws://127.0.0.1:" + LOCAL_WS_SERVER_PORT)
+ self.assertNotEqual(s, None)
+ s.ping("Hello")
+ s.pong("Hi")
+ s.close()
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testSupportRedirect(self):
+ s = ws.WebSocket()
+ self.assertRaises(ws._exceptions.WebSocketBadStatusException, s.connect, "ws://google.com/")
+ # Need to find a URL that has a redirect code leading to a websocket
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testSecureWebSocket(self):
+ import ssl
+ s = ws.create_connection("wss://api.bitfinex.com/ws/2")
+ self.assertNotEqual(s, None)
+ self.assertTrue(isinstance(s.sock, ssl.SSLSocket))
+ self.assertEqual(s.getstatus(), 101)
+ self.assertNotEqual(s.getheaders(), None)
+ s.settimeout(10)
+ self.assertEqual(s.gettimeout(), 10)
+ self.assertEqual(s.getsubprotocol(), None)
+ s.abort()
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testWebSocketWithCustomHeader(self):
+ s = ws.create_connection("ws://127.0.0.1:" + LOCAL_WS_SERVER_PORT,
+ headers={"User-Agent": "PythonWebsocketClient"})
+ self.assertNotEqual(s, None)
+ self.assertEqual(s.getsubprotocol(), None)
+ s.send("Hello, World")
+ result = s.recv()
+ self.assertEqual(result, "Hello, World")
+ self.assertRaises(ValueError, s.close, -1, "")
+ s.close()
+
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testAfterClose(self):
+ s = ws.create_connection("ws://127.0.0.1:" + LOCAL_WS_SERVER_PORT)
+ self.assertNotEqual(s, None)
+ s.close()
+ self.assertRaises(ws.WebSocketConnectionClosedException, s.send, "Hello")
+ self.assertRaises(ws.WebSocketConnectionClosedException, s.recv)
+
+
+class SockOptTest(unittest.TestCase):
+ @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled")
+ def testSockOpt(self):
+ sockopt = ((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),)
+ s = ws.create_connection("ws://127.0.0.1:" + LOCAL_WS_SERVER_PORT, sockopt=sockopt)
+ self.assertNotEqual(s.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0)
+ s.close()
+
+
+class UtilsTest(unittest.TestCase):
+ def testUtf8Validator(self):
+ state = validate_utf8(b'\xf0\x90\x80\x80')
+ self.assertEqual(state, True)
+ state = validate_utf8(b'\xce\xba\xe1\xbd\xb9\xcf\x83\xce\xbc\xce\xb5\xed\xa0\x80edited')
+ self.assertEqual(state, False)
+ state = validate_utf8(b'')
+ self.assertEqual(state, True)
+
+
+class HandshakeTest(unittest.TestCase):
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def test_http_SSL(self):
+ websock1 = ws.WebSocket(sslopt={"cert_chain": ssl.get_default_verify_paths().capath}, enable_multithread=False)
+ self.assertRaises(ValueError,
+ websock1.connect, "wss://api.bitfinex.com/ws/2")
+ websock2 = ws.WebSocket(sslopt={"certfile": "myNonexistentCertFile"})
+ self.assertRaises(FileNotFoundError,
+ websock2.connect, "wss://api.bitfinex.com/ws/2")
+
+ @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
+ def testManualHeaders(self):
+ websock3 = ws.WebSocket(sslopt={"ca_certs": ssl.get_default_verify_paths().cafile,
+ "ca_cert_path": ssl.get_default_verify_paths().capath})
+ self.assertRaises(ws._exceptions.WebSocketBadStatusException,
+ websock3.connect, "wss://api.bitfinex.com/ws/2", cookie="chocolate",
+ origin="testing_websockets.com",
+ host="echo.websocket.events/websocket-client-test",
+ subprotocols=["testproto"],
+ connection="Upgrade",
+ header={"CustomHeader1":"123",
+ "Cookie":"TestValue",
+ "Sec-WebSocket-Key":"k9kFAUWNAMmf5OEMfTlOEA==",
+ "Sec-WebSocket-Protocol":"newprotocol"})
+
+ def testIPv6(self):
+ websock2 = ws.WebSocket()
+ self.assertRaises(ValueError, websock2.connect, "2001:4860:4860::8888")
+
+ def testBadURLs(self):
+ websock3 = ws.WebSocket()
+ self.assertRaises(ValueError, websock3.connect, "ws//example.com")
+ self.assertRaises(ws.WebSocketAddressException, websock3.connect, "ws://example")
+ self.assertRaises(ValueError, websock3.connect, "example.com")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/contrib/python/websocket-client/py3/ya.make b/contrib/python/websocket-client/py3/ya.make
new file mode 100644
index 0000000000..e2714d2d23
--- /dev/null
+++ b/contrib/python/websocket-client/py3/ya.make
@@ -0,0 +1,40 @@
+# Generated by devtools/yamaker (pypi).
+
+PY3_LIBRARY()
+
+VERSION(1.6.4)
+
+LICENSE(Apache-2.0)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ websocket/__init__.py
+ websocket/_abnf.py
+ websocket/_app.py
+ websocket/_cookiejar.py
+ websocket/_core.py
+ websocket/_exceptions.py
+ websocket/_handshake.py
+ websocket/_http.py
+ websocket/_logging.py
+ websocket/_socket.py
+ websocket/_ssl_compat.py
+ websocket/_url.py
+ websocket/_utils.py
+ websocket/_wsdump.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/websocket-client/py3/
+ .dist-info/METADATA
+ .dist-info/entry_points.txt
+ .dist-info/top_level.txt
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ tests
+)
diff --git a/contrib/python/websocket-client/ya.make b/contrib/python/websocket-client/ya.make
new file mode 100644
index 0000000000..f121a58fed
--- /dev/null
+++ b/contrib/python/websocket-client/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/websocket-client/py2)
+ELSE()
+ PEERDIR(contrib/python/websocket-client/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)
diff --git a/ydb/tools/ya.make b/ydb/tools/ya.make
index a9e74511d1..e01fbd619c 100644
--- a/ydb/tools/ya.make
+++ b/ydb/tools/ya.make
@@ -1,3 +1,4 @@
RECURSE(
cfg
+ ydbd_slice
)
diff --git a/ydb/tools/ydbd_slice/__init__.py b/ydb/tools/ydbd_slice/__init__.py
new file mode 100644
index 0000000000..be1bc7b508
--- /dev/null
+++ b/ydb/tools/ydbd_slice/__init__.py
@@ -0,0 +1,1189 @@
+import os
+import sys
+import json
+import signal
+import shutil
+import tempfile
+import logging
+import argparse
+import subprocess
+import warnings
+from urllib3.exceptions import HTTPWarning
+
+from ydb.tools.ydbd_slice import nodes, handlers, cluster_description
+from ydb.tools.ydbd_slice.kube import handlers as kube_handlers, docker
+
+
+warnings.filterwarnings("ignore", category=DeprecationWarning)
+warnings.filterwarnings("ignore", category=HTTPWarning)
+
+
+logger = logging.getLogger(__name__)
+
+
+HELP = '''
+\033[92mKiKiMR Developer's Slice Deployment Tool\x1b[0m
+
+See examples of cluster.yaml here
+ https://cs.yandex-team.ru/#!,kikimr%%2F.*%%2Fcluster.yaml,,arcadia
+And some explanation about cluster.yaml's format here
+ https://wiki.yandex-team.ru/kikimr/techdoc/confdeploy/configuration/
+Guide for Kubernetes Developer Slice could be found here
+ https://docs.yandex-team.ru/ydb-tech/ops/devslice-user-guide-k8s-baremetal-host
+Guide for ad-hoc Kubernetes operations could be found here
+ https://docs.yandex-team.ru/ydb-tech/ops/kubernetes/howto/
+
+\033[96mCommands for Traditional Developer's Slices\x1b[0m
+
+\033[95minstall\033[94m - full install process from scratch:
+ %(prog)s install cluster.yaml --arcadia
+
+\033[95mupdate\033[94m - only update configs and kikimr executable on cluster:
+ %(prog)s update cluster.yaml --arcadia
+
+\033[95mexplain\033[94m - explain cluster description:
+ %(prog)s explain cluster.yaml --out-cfg cfg
+
+\033[95mstart\033[94m - start instances on cluster:
+ %(prog)s start cluster.yaml
+
+\033[95mstop\033[94m - stop instances on cluster:
+ %(prog)s stop cluster.yaml
+
+\033[95mclear\033[94m - clear cluster:
+ %(prog)s clear cluster.yaml
+
+\033[95mformat\033[94m - format cluster:
+ %(prog)s format cluster.yaml
+
+Use option --nodes to instant particular nodes subset form cluster nodes.
+Example, update only kikimr0999 according cluster.yaml setting:
+ %(prog)s update cluster.yaml --hosts kikimr0999 --arcadia
+
+Use components specification to choose active component.
+Example, update only kikimr:
+ %(prog)s update cluster.yaml kikimr --arcadia
+
+Example, update only kikimr binary:
+ %(prog)s update cluster.yaml kikimr=bin --arcadia
+
+Example, update only kikimr configs:
+ %(prog)s update cluster.yaml kikimr=cfg --arcadia
+
+Example, install part by part:
+ %(prog)s install cluster.yaml kikimr --arcadia
+ %(prog)s install cluster.yaml dynamic_slots --arcadia
+
+Example, stop only kikimr at one node:
+ %(prog)s stop cluster.yaml kikimr --hosts kikimr0111
+
+Example, stop only kikimr at the cluster:
+ %(prog)s stop cluster.yaml kikimr
+
+Example, stop/start all slots at the cluster:
+ %(prog)s stop cluster.yaml dynamic_slots
+ %(prog)s start cluster.yaml dynamic_slots
+
+And so on. Feel free to combine component arguments with all modes.
+And use hosts specification to reduce active nodes set.
+
+\033[96mCommands for Kubernetes Developer's Slices\x1b[0m
+
+\033[95mdocker-build\033[94m - command to build dev docker image.
+
+\033[36mDev images uses special docker-registry and tag by default,
+example: cr.yandex/crpbo4q9lbgkn85vr1rm/ydb:<login>-latest.\033[94m
+
+Example, build dev docker image:
+ %(prog)s docker-build
+
+Example, build dev docker image with specific build args:
+ %(prog)s docker-build --build_args -j 50
+
+\033[95mkube-generate\033[94m - command to generate manifests for new slice in current directory or directory,
+specified using -p option.
+
+Example, create Kubernetes manifests for 8 node cluster with block-4-2 erasure using nodes with
+cpu104_soc2_mem512G_net25G_4nvme flavor:
+ mkdir directory_with_my_manifests
+ cd directory_with_my_manifests
+ %(prog)s kube-generate -n myslice -v node_flavor=cpu104_soc2_mem512G_net25G_4nvme
+
+\033[36mAll kube-* commands must be executed in directory with Kubernetes manifest files or pointed to such directory
+using -p option.\033[94m
+
+\033[95mkube-install\033[94m - command to setup new or completely redeploy existing YDB Slice in Kubernetes.
+All existing objects related to your mainfest files will be deleted first.
+
+Example, create Kubernetes objects using your latest dev image (cr.yandex/crpbo4q9lbgkn85vr1rm/ydb:<login>-latest):
+ cd directory_with_my_manifests
+ %(prog)s kube-install
+
+Example, create Kubernetes objects using your latest dev image (cr.yandex/crpbo4q9lbgkn85vr1rm/ydb:<login>-latest) and
+wait for all slice objects to become Ready:
+ cd directory_with_my_manifests
+ %(prog)s kube-install -w
+
+Example, create kubernetes objects using dev image with specific tag (cr.yandex/crpbo4q9lbgkn85vr1rm/ydb:somebody-1):
+ cd directory_with_my_manifests
+ %(prog)s kube-install -t somebody-1
+
+Example, create kubernetes objects using your locally build image, specify
+image name (cr.yandex/crpl7ipeu79oseqhcgn2/ydb:23.2.11):
+ cd directory_with_my_manifests
+ %(prog)s kube-install -i cr.yandex/crpl7ipeu79oseqhcgn2/ydb:23.3.11
+
+Example, create kubernetes objects using existing release image with
+specific tag (cr.yandex/crpl7ipeu79oseqhcgn2/ydb:23.2.10:
+ cd directory_with_my_manifests
+ %(prog)s kube-install --use-prebuilt-image -i cr.yandex/crpl7ipeu79oseqhcgn2/ydb:23.2.10
+
+Example, create kubernetes objects, force-rebuild image:
+ cd directory_with_my_manifests
+ %(prog)s kube-install --force-rebuild --build_args -j 50
+
+\033[95mkube-update\033[94m - command to update existing YDB Slice in kubernetes.
+
+Example, update kubernetes objects using your latest dev image (cr.yandex/crpbo4q9lbgkn85vr1rm/ydb:<login>-latest):
+ cd directory_with_my_manifests
+ %(prog)s kube-update
+
+Example, update all storage objects only:
+ cd directory_with_my_manifests
+ %(prog)s kube-update -c storage
+
+Example, update specific database object only:
+ cd directory_with_my_manifests
+ %(prog)s kube-update -c database:database1,database2
+
+Example, update specific database object only and wait for this Database object to become Ready:
+ cd directory_with_my_manifests
+ %(prog)s kube-update -c database:database1,database2 -w
+
+Command supports all docker-related build options.
+Example, update kubernetes objects, force-rebuild image:
+ cd directory_with_my_manifests
+ %(prog)s kube-update --force-rebuild --build_args -j 50
+
+\033[95mkube-stop\033[94m - command to stop nodes by removing Storage and Database objects from Kubernetes cluster.
+
+Example, stop all pods:
+ cd directory_with_my_manifests
+ %(prog)s kube-stop
+
+Example, stop mydatabase pods:
+ cd directory_with_my_manifests
+ %(prog)s kube-stop -c database:mydatabase
+
+\033[95mkube-start\033[94m - command to start nodes by creating Storage and Database objects in Kubernetes cluster.
+
+Example, start all pods:
+ cd directory_with_my_manifests
+ %(prog)s kube-start
+
+Example, start mydatabase pods:
+ cd directory_with_my_manifests
+ %(prog)s kube-start -c database:mydatabase
+
+Example, start mydatabase pods and wait for Database object to become Ready:
+ cd directory_with_my_manifests
+ %(prog)s kube-start -c database:mydatabase -w
+
+\033[95mkube-restart\033[94m - command to restart nodes by deleting pods in Kuberetes cluster.
+
+Example, restart all pods:
+ cd directory_with_my_manifests
+ %(prog)s kube-restart
+
+Example, restart mydatabase pods:
+ cd directory_with_my_manifests
+ %(prog)s kube-restart -c database:mydatabase
+
+\033[95mkube-nodes\033[94m - command to list NodeClaim nodes.
+
+Example, list all slice nodes:
+ cd directory_with_my_manifests
+ %(prog)s kube-nodes
+
+Example, save all slice nodes in file and use this file to run remote commands on nodes:
+ cd directory_with_my_manifests
+ %(prog)s kube-nodes > nodelist
+ pssh run-e -p 30 -H L@nodelist 'unified_agent select -s kikimr -S now-10m'
+
+\033[95mkube-format\033[94m - command to stop nodes (like with kube-stop command), format drives on hosts,
+reserved by your NodeClaims, start nodes (like with kube-start command).
+
+Example:
+ cd directory_with_my_manifests
+ %(prog)s kube-format
+
+Example, wait for Storage and Database object to become Ready:
+ cd directory_with_my_manifests
+ %(prog)s kube-format -w
+
+\033[95mkube-clear\033[94m - command to stop nodes (like with kube-stop command), format drives on hosts,
+reserved by your NodeClaims.
+
+Example:
+ cd directory_with_my_manifests
+ %(prog)s kube-clear
+
+\033[95mkube-uninstall\033[94m - command to stop nodes (like with kube-stop command), format drives on hosts,
+reserved by your NodeClaims, delete your NodeClaims.
+
+Example:
+ cd directory_with_my_manifests
+ %(prog)s kube-uninstall
+
+\x1b[0m
+'''
+
+
+YDBD_EXECUTABLE = 'ydb/apps/ydbd/ydbd'
+
+
+class Terminate(BaseException):
+ @staticmethod
+ def handler(signum, frame):
+ logger.debug('got SIGTERM signal, terminating')
+ raise Terminate(signum, frame)
+
+
+def safe_load_cluster_details(cluster_yaml):
+ try:
+ cluster_details = cluster_description.ClusterDetails(cluster_yaml)
+ except IOError as io_err:
+ print('', file=sys.stderr)
+ print("unable to open YAML params as a file, check args", file=sys.stderr)
+ print("origin exception was %s" % io_err, file=sys.stderr)
+ sys.exit(2)
+ else:
+ return cluster_details
+
+
+def deduce_components_from_args(args, cluster_details):
+ dynamic_enabled = bool(cluster_details.databases) or bool(cluster_details.dynamic_slots)
+
+ components = ['kikimr']
+ if dynamic_enabled:
+ components.append('dynamic_slots')
+
+ result = dict()
+
+ for item in args.components:
+ name, val = item.rsplit('=') if '=' in item else (item, None)
+ assert name == 'all' or name in components, \
+ "component <%s> not in allowed set of components [%s]" % (name, ", ".join(components))
+
+ if name == 'dynamic_slots':
+ assert val is None
+ else:
+ assert val in ('cfg', 'bin', 'none', None)
+
+ val = [val] if val is not None else []
+ if name in result:
+ result[name] += val
+ else:
+ result[name] = val
+
+ if 'all' in args.components:
+ result = {item: [] for item in components}
+
+ if 'kikimr' in result and len(result['kikimr']) == 0:
+ result['kikimr'] = ['bin', 'cfg']
+
+ if 'dynamic_slots' in result:
+ result['dynamic_slots'] = ['all']
+
+ logger.debug("active components is '%s'", result)
+ return result
+
+
+def deduce_nodes_from_args(args):
+ cluster_hosts = safe_load_cluster_details(args.cluster).hosts_names
+ result = cluster_hosts
+
+ if args.nodes is not None:
+ result = []
+ for orig_host in cluster_hosts:
+ for manual_host in args.nodes:
+ if orig_host.startswith(manual_host):
+ result.append(orig_host)
+ break
+
+ if not result:
+ sys.exit("unable to deduce hosts")
+
+ logger.info("use nodes '%s'", result)
+ return nodes.Nodes(result, args.dry_run)
+
+
+def ya_build(arcadia_root, artifact, opts, dry_run):
+ project, _ = os.path.split(artifact)
+ ya_bin = os.path.join(arcadia_root, 'ya')
+ project_path = os.path.join(arcadia_root, project)
+ bin_path = os.path.join(arcadia_root, artifact)
+
+ cmd = [ya_bin, 'make'] + opts + [project_path]
+ logger.info("run command '%s'", cmd)
+ if not dry_run:
+ subprocess.check_call(cmd)
+ logger.debug(bin_path)
+ assert os.path.isfile(bin_path)
+
+ return bin_path
+
+
+def ya_package_docker(arcadia_root, opts, pkg_path, image):
+ registry = docker.DOCKER_IMAGE_REGISTRY
+ repository = docker.DOCKER_IMAGE_REPOSITORY
+ ya_bin = os.path.join(arcadia_root, 'ya')
+ path = os.path.join(arcadia_root, pkg_path)
+
+ image_name, tag = image.rsplit(':', 1)
+
+ cmd = [
+ ya_bin, 'package'
+ ] + opts + [
+ '--docker',
+ '--docker-network', 'host',
+ '--docker-registry', registry,
+ '--docker-repository', repository,
+ '--custom-version', tag,
+ path,
+ ]
+ logger.info("run command '%s'", cmd)
+ subprocess.check_call(cmd)
+ try:
+ with open('packages.json') as file:
+ img_data = json.load(file)
+ logger.info('successfully built image: %s', img_data)
+ built_image = img_data[0]['docker_image']
+ built_image_name, _ = built_image.rsplit(':', 1)
+ if built_image_name != image_name:
+ logger.debug('tagging image from "%s" to "%s"', built_image_name, image_name)
+ docker.docker_tag(built_image, image)
+ return built_image
+ except Exception as e:
+ logger.error('failed to get image details from packages.json file, error: %s', str(e))
+ raise
+
+
+def arcadia_root(begin_path='.'):
+ path = os.path.realpath(begin_path)
+ while not path == '/':
+ mark = os.path.join(path, '.arcadia.root')
+ if os.path.exists(mark):
+ return path
+ path = os.path.dirname(path)
+ sys.exit("unable to find arcadia root")
+
+
+def deduce_kikimr_bin_from_args(args):
+ if args.kikimr is not None:
+ path = os.path.abspath(args.kikimr)
+ elif args.arcadia:
+ root = arcadia_root()
+ path = ya_build(root, YDBD_EXECUTABLE, args.build_args, args.dry_run)
+ else:
+ sys.exit("unable to deduce kikimr bin")
+
+ compressed_path = args.kikimr_lz4
+
+ logger.info("use kikimr bin '%s'", path)
+ return path, compressed_path
+
+
+def deduce_temp_dir_from_args(args):
+ permits = 0o755
+
+ if args.temp_dir is not None:
+ temp_dir = args.temp_dir
+ if not os.path.exists(temp_dir):
+ os.mkdir(temp_dir, permits)
+ assert os.path.isdir(temp_dir)
+ else:
+ temp_dir = tempfile.mkdtemp()
+ assert os.path.isdir(temp_dir)
+ os.chmod(temp_dir, permits)
+
+ logger.info("use tmp dir '%s'", temp_dir)
+ return temp_dir
+
+
+def direct_nodes_args():
+ args = argparse.ArgumentParser(add_help=False)
+ args.add_argument(
+ "-H",
+ "--hosts",
+ metavar="HOST",
+ dest='nodes',
+ nargs='+',
+ help="set of nodes as is"
+ )
+ return args
+
+
+def cluster_description_args():
+ args = argparse.ArgumentParser(add_help=False)
+ args.add_argument(
+ "cluster",
+ metavar="YAML",
+ help="cluster description in yaml format"
+ )
+ return args
+
+
+def log_args():
+ args = argparse.ArgumentParser(add_help=False)
+ args.add_argument(
+ "--clear_logs",
+ dest='clear_logs',
+ action='store_true',
+ help="stop rsyslogd and erase all kikimr logs"
+ )
+ return args
+
+
+def binaries_args():
+ args = argparse.ArgumentParser(add_help=False)
+ args.add_argument(
+ "--kikimr",
+ metavar="BIN",
+ default=None,
+ help="explicit path to kikimr"
+ )
+ args.add_argument(
+ "--kikimr-lz4",
+ metavar="PATH",
+ help="explicit path to compressed kikimr binary file used for transfer acceleration"
+ )
+ args.add_argument(
+ "--arcadia",
+ action='store_true',
+ help="build all binaries from arcadia, figure out root by finding .arcadia.root upstairs"
+ )
+ args.add_argument(
+ "--build_args",
+ metavar="BUILD_ARGS",
+ default=['--checkout', '-r'],
+ nargs=argparse.REMAINDER,
+ help="remaining arguments are treated as arguments to 'ya make' tool (only valid if --arcadia is provided)"
+ )
+ return args
+
+
+def component_args():
+ args = argparse.ArgumentParser(add_help=False)
+ args.add_argument(
+ "components",
+ metavar="COMPONENT",
+ default=['all'],
+ nargs="*",
+ help="specify components to work with, "
+ "multiple choice from: 'all', 'kikimr[={bin|cfg}]', "
+ "'dynamic_slots'"
+ "'all' is default",
+ )
+ return args
+
+
+def add_explain_mode(modes):
+ def _run(args):
+ logger.debug("run func explain with cmd args is '%s'", args)
+
+ cluster_details = safe_load_cluster_details(args.cluster)
+ components = deduce_components_from_args(args, cluster_details)
+
+ kikimr_bin, kikimr_compressed_bin = deduce_kikimr_bin_from_args(args)
+
+ if not os.path.exists(args.out_cfg):
+ os.mkdir(args.out_cfg, 0o755)
+ assert os.path.isdir(args.out_cfg)
+
+ configuration = cluster_description.Configurator(
+ cluster_details,
+ args.out_cfg,
+ kikimr_bin,
+ kikimr_compressed_bin,
+ )
+
+ if 'kikimr' in components:
+ static = configuration.create_static_cfg()
+ logger.debug("static cfg: %s", static)
+
+ dynamic = configuration.create_dynamic_cfg()
+ logger.debug("dynamic cfg: %s", dynamic)
+
+ mode = modes.add_parser(
+ "explain",
+ parents=[cluster_description_args(), binaries_args(), component_args()],
+ description="Just dump generated cfg into --out-cfg."
+ )
+ mode.add_argument(
+ "--out-cfg",
+ metavar="DIR",
+ required=True,
+ help=""
+ )
+ mode.set_defaults(handler=_run)
+
+
+def dispatch_run_light(func, args):
+ logger.debug("run func '%s' with cmd args is '%s'", func.__name__, args)
+
+ cluster_details = safe_load_cluster_details(args.cluster)
+ components = deduce_components_from_args(args, cluster_details)
+
+ logger.debug("components is '%s'", components)
+
+ nodes = deduce_nodes_from_args(args)
+
+ func(components, nodes, cluster_details)
+
+
+def dispatch_run(func, args):
+ logger.debug("run func '%s' with cmd args is '%s'", func.__name__, args)
+
+ cluster_details = safe_load_cluster_details(args.cluster)
+ components = deduce_components_from_args(args, cluster_details)
+
+ nodes = deduce_nodes_from_args(args)
+
+ temp_dir = deduce_temp_dir_from_args(args)
+ clear_tmp = not args.dry_run and args.temp_dir is None
+
+ kikimr_bin, kikimr_compressed_bin = deduce_kikimr_bin_from_args(args)
+
+ configurator = cluster_description.Configurator(
+ cluster_details,
+ out_dir=temp_dir,
+ kikimr_bin=kikimr_bin,
+ kikimr_compressed_bin=kikimr_compressed_bin
+ )
+
+ v = vars(args)
+ func(components, nodes, cluster_details, configurator, v.get('clear_logs'), args)
+
+ if clear_tmp:
+ logger.debug("remove temp dirs '%s'", temp_dir)
+ shutil.rmtree(temp_dir)
+
+
+def dispatch_run_raw_cfg(func, args):
+ logger.debug("run func '%s' with cmd args is '%s'", func.__name__, args)
+
+ cluster_details = safe_load_cluster_details(args.cluster)
+ components = deduce_components_from_args(args, cluster_details)
+
+ nodes = deduce_nodes_from_args(args)
+
+ func(components, nodes, cluster_details, args.raw_cfg)
+
+
+def add_install_mode(modes):
+ def _run(args):
+ dispatch_run(handlers.slice_install, args)
+
+ mode = modes.add_parser(
+ "install",
+ conflict_handler='resolve',
+ parents=[direct_nodes_args(), cluster_description_args(), binaries_args(), component_args(), log_args()],
+ description="Full installation of the cluster from scratch. "
+ "You can use --hosts to specify particular hosts. But it is tricky."
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_update_mode(modes):
+ def _run(args):
+ dispatch_run(handlers.slice_update, args)
+
+ mode = modes.add_parser(
+ "update",
+ conflict_handler='resolve',
+ parents=[direct_nodes_args(), cluster_description_args(), binaries_args(), component_args(), log_args()],
+ description="Minor cluster update, just binary and cfg. No additional configuration is performed."
+ "Stop all kikimr instances at the nodes, sync binary and cfg, start the instances. "
+ "Use --hosts to specify particular hosts."
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_update_raw_configs(modes):
+ def _run(args):
+ dispatch_run_raw_cfg(handlers.slice_update_raw_configs, args)
+
+ mode = modes.add_parser(
+ "update-raw-cfg",
+ conflict_handler='resolve',
+ parents=[direct_nodes_args(), cluster_description_args(), component_args()],
+ description=""
+ )
+ mode.add_argument(
+ "--raw-cfg",
+ metavar="DIR",
+ required=True,
+ help="",
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_stop_mode(modes):
+ def _run(args):
+ dispatch_run_light(handlers.slice_stop, args)
+
+ mode = modes.add_parser(
+ "stop",
+ parents=[direct_nodes_args(), cluster_description_args(), binaries_args(), component_args()],
+ description="Stop kikimr static instaneces at the nodes. "
+ "If option components specified, try to stop particular component. "
+ "Use --hosts to specify particular hosts."
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_start_mode(modes):
+ def _run(args):
+ dispatch_run_light(handlers.slice_start, args)
+
+ mode = modes.add_parser(
+ "start",
+ parents=[direct_nodes_args(), cluster_description_args(), binaries_args(), component_args()],
+ description="Start all kikimr instances at the nodes. "
+ "If option components specified, try to start particular component. "
+ "Otherwise only kikimr-multi-all will be started. "
+ "Use --hosts to specify particular hosts."
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_clear_mode(modes):
+ def _run(args):
+ dispatch_run_light(handlers.slice_clear, args)
+
+ mode = modes.add_parser(
+ "clear",
+ parents=[direct_nodes_args(), cluster_description_args(), component_args()],
+ description="Stop all kikimr instances at the nodes, format all kikimr drivers, shutdown dynamic slots. "
+ "And don't start nodes afrer it. "
+ "Use --hosts to specify particular hosts."
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_format_mode(modes):
+ def _run(args):
+ dispatch_run_light(handlers.slice_format, args)
+
+ mode = modes.add_parser(
+ "format",
+ parents=[direct_nodes_args(), cluster_description_args(), component_args()],
+ description="Stop all kikimr instances at the nodes, format all kikimr drivers at the nodes, start the instances. "
+ "If you call format for all cluster, you will spoil it. "
+ "Additional dynamic configuration will required after it. "
+ "If you call format for few nodes, cluster will regenerate after it. "
+ "Use --hosts to specify particular hosts."
+
+ )
+ mode.set_defaults(handler=_run)
+
+
+#
+# docker and kube scenarios
+def build_and_push_docker_image(build_args, docker_package, build_ydbd, image, force_rebuild):
+ if docker_package is None:
+ docker_package = docker.DOCKER_IMAGE_YDBD_PACKAGE_SPEC
+
+ logger.debug(f'using docker package spec: {docker_package}')
+
+ image_details = docker.docker_inspect(image)
+
+ if image_details is None:
+ logger.debug('ydb image %s is not present on host, building', image)
+ root = arcadia_root()
+ ya_package_docker(root, build_args, docker_package, image)
+ elif force_rebuild:
+ logger.debug('ydb image %s is already present on host, rebuilding', image)
+ root = arcadia_root()
+ ya_package_docker(root, build_args, docker_package, image)
+ else:
+ logger.debug('ydb image %s is already present on host, using existing image', image)
+
+ docker.docker_push(image)
+
+
+def add_arguments_docker_build_with_remainder(mode, add_force_rebuild=False):
+ group = mode.add_argument_group('docker build options')
+ if add_force_rebuild:
+ group.add_argument(
+ '-f', '--force-rebuild',
+ help='Force rebuild docker image even if it is already present on host.',
+ action='store_true',
+ )
+ group.add_argument(
+ '-d', '--docker-package',
+ help='Optional: path to docker package description file relative from ARCADIA_ROOT.',
+ )
+ group.add_argument(
+ '-i', '--image',
+ help='Optional: docker image name and tag to mark image after build. Conflicts with "-t" argument.',
+ )
+ group.add_argument(
+ '-t', '--tag',
+ help='Optional: docker image tag to mark image after build. Conflicts with "-i" argument. Default is {user}-latest.',
+ )
+ group.add_argument(
+ "--build_args",
+ metavar="BUILD_ARGS",
+ default=['--checkout', '-r'],
+ nargs=argparse.REMAINDER,
+ help="remaining arguments are treated as arguments to 'ya package' tool"
+ )
+
+
+def add_docker_build_mode(modes):
+ def _run(args):
+ logger.debug("starting docker-build cmd with args '%s'", args)
+ try:
+ image = docker.get_image_from_args(args)
+ build_and_push_docker_image(args.build_args, args.docker_package, False, image, force_rebuild=True)
+
+ logger.info('docker-build finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "docker-build",
+ parents=[],
+ description="Build YDB docker image."
+ )
+ add_arguments_docker_build_with_remainder(mode, add_force_rebuild=False)
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_generate_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-generate cmd with args '%s'", args)
+ try:
+ if args.user is None:
+ args.user = docker.get_user()
+
+ template_vars = {}
+ for item in args.template_vars:
+ key, value = item.split('=')
+ template_vars[key] = value
+
+ kube_handlers.slice_generate(args.path, args.user, args.name, args.template, template_vars)
+
+ logger.info('kube-generate finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-generate",
+ parents=[],
+ description="Setup new or completely redeploy existing YDB Slice in Kubernetes."
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.add_argument(
+ '-n', '--name',
+ help='Slice name.',
+ required=True,
+ )
+ mode.add_argument(
+ '-t', '--template',
+ help='Slice manifest templates for quick start.',
+ choices=('8-node-block-4-2',),
+ default='8-node-block-4-2',
+ )
+ mode.add_argument(
+ '-v', '--template-vars',
+ help='Slice manifest template variables for quick start. Example: ',
+ action='append',
+ default=[],
+ )
+ mode.add_argument(
+ '--user',
+ help='Slice user login. Default: get from $USER environ.',
+ default=None,
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_install_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-install cmd with args '%s'", args)
+ try:
+ image = docker.get_image_from_args(args)
+ if not args.use_prebuilt_image:
+ build_and_push_docker_image(args.build_args, args.docker_package, False, image, force_rebuild=args.force_rebuild)
+
+ manifests = kube_handlers.get_all_manifests(args.path)
+ kube_handlers.manifests_ydb_set_image(args.path, manifests, image)
+ kube_handlers.slice_install(args.path, manifests, args.wait_ready)
+
+ logger.info('kube-install finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-install",
+ parents=[],
+ description="Setup new or completely redeploy existing YDB Slice in Kubernetes."
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.add_argument(
+ '-w', '--wait-ready',
+ help='Wait for ydb objects ready state. Default: false',
+ action='store_true',
+ )
+ mode.add_argument(
+ '--use-prebuilt-image',
+ help='Do not build docker image, just specify image name in manifests.',
+ action='store_true',
+ )
+ add_arguments_docker_build_with_remainder(mode, add_force_rebuild=True)
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_update_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-update cmd with args '%s'", args)
+ try:
+ image = docker.get_image_from_args(args)
+ if not args.use_prebuilt_image:
+ build_and_push_docker_image(args.build_args, args.docker_package, False, image, force_rebuild=args.force_rebuild)
+
+ manifests = kube_handlers.get_all_manifests(args.path)
+ manifests = kube_handlers.manifests_ydb_filter_components(args.path, manifests, args.components)
+ kube_handlers.manifests_ydb_set_image(args.path, manifests, image)
+ kube_handlers.slice_update(args.path, manifests, args.wait_ready)
+
+ logger.info('kube-update finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-update",
+ parents=[],
+ description="Update existing YDB Slice in kubernetes."
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.add_argument(
+ '-c', '--components',
+ help=('Selector for specific components to perform action. '
+ 'Example: "storage:mystorage;database:mydatabase1,mydatabase2".'),
+ type=kube_handlers.parse_components_selector,
+ )
+ mode.add_argument(
+ '-w', '--wait-ready',
+ help='Wait for ydb objects ready state. Default: false',
+ action='store_true',
+ )
+ mode.add_argument(
+ '--use-prebuilt-image',
+ help='Do not build docker image, just specify image name in manifests.',
+ action='store_true',
+ )
+ add_arguments_docker_build_with_remainder(mode, add_force_rebuild=True)
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_stop_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-stop cmd with args '%s'", args)
+ try:
+ manifests = kube_handlers.get_all_manifests(args.path)
+ manifests = kube_handlers.manifests_ydb_filter_components(args.path, manifests, args.components)
+ kube_handlers.slice_stop(args.path, manifests)
+
+ logger.info('kube-stop finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-stop",
+ parents=[],
+ description="Stop nodes by removing Storage and Database objects from Kubernetes cluster."
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.add_argument(
+ '-c', '--components',
+ help=('Selector for specific components to perform action. '
+ 'Example: "storage:mystorage;database:mydatabase1,mydatabase2".'),
+ type=kube_handlers.parse_components_selector,
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_start_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-start cmd with args '%s'", args)
+ try:
+ manifests = kube_handlers.get_all_manifests(args.path)
+ manifests = kube_handlers.manifests_ydb_filter_components(args.path, manifests, args.components)
+ kube_handlers.slice_start(args.path, manifests, args.wait_ready)
+
+ logger.info('kube-start finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-start",
+ parents=[],
+ description="Start nodes by creating Storage and Database objects in Kubernetes cluster."
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.add_argument(
+ '-c', '--components',
+ help=('Selector for specific components to perform action. '
+ 'Example: "storage:mystorage;database:mydatabase1,mydatabase2".'),
+ type=kube_handlers.parse_components_selector,
+ )
+ mode.add_argument(
+ '-w', '--wait-ready',
+ help='Wait for ydb objects ready state. Default: false',
+ action='store_true',
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_restart_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-restart cmd with args '%s'", args)
+ try:
+ manifests = kube_handlers.get_all_manifests(args.path)
+ manifests = kube_handlers.manifests_ydb_filter_components(args.path, manifests, args.components)
+ kube_handlers.slice_restart(args.path, manifests)
+
+ logger.info('kube-restart finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-restart",
+ parents=[],
+ description="Restart nodes by deleting pods in Kuberetes cluster."
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.add_argument(
+ '-c', '--components',
+ help=('Selector for specific components to perform action. '
+ 'Example: "storage:mystorage;database:mydatabase1,mydatabase2".'),
+ type=kube_handlers.parse_components_selector,
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_nodes_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-nodes cmd with args '%s'", args)
+ try:
+ manifests = kube_handlers.get_all_manifests(args.path)
+ kube_handlers.slice_nodes(args.path, manifests)
+
+ logger.info('kube-nodes finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-nodes",
+ parents=[],
+ description=("List slice nodes.")
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_format_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-format cmd with args '%s'", args)
+ try:
+ manifests = kube_handlers.get_all_manifests(args.path)
+ kube_handlers.slice_format(args.path, manifests, args.wait_ready)
+
+ logger.info('kube-format finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-format",
+ parents=[],
+ description=("Stop nodes (like with kube-stop command), format drives on hosts, reserved by your NodeClaims, "
+ "start nodes (like with kube-start command).")
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.add_argument(
+ '-w', '--wait-ready',
+ help='Wait for ydb objects ready state. Default: false',
+ action='store_true',
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_clear_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-clear cmd with args '%s'", args)
+ try:
+ manifests = kube_handlers.get_all_manifests(args.path)
+ kube_handlers.slice_clear(args.path, manifests)
+
+ logger.info('kube-clear finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-clear",
+ parents=[],
+ description="Stop nodes (like with kube-stop command), format drives on hosts, reserved by your NodeClaims."
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.set_defaults(handler=_run)
+
+
+def add_kube_uninstall_mode(modes):
+ def _run(args):
+ logger.debug("starting kube-uninstall cmd with args '%s'", args)
+ try:
+ manifests = kube_handlers.get_all_manifests(args.path)
+ kube_handlers.slice_uninstall(args.path, manifests)
+
+ logger.info('kube-uninstall finished')
+ except RuntimeError as e:
+ logger.error(e.args[0])
+ sys.exit(1)
+
+ mode = modes.add_parser(
+ "kube-uninstall",
+ parents=[],
+ description=("Stop nodes (like with kube-stop command), format drives on hosts, reserved by your NodeClaims, "
+ "delete your NodeClaims.")
+ )
+ mode.add_argument(
+ '-p', '--path',
+ help='Path to project directory with kubernetes manifests. Default: $PWD.',
+ default='.',
+ )
+ mode.set_defaults(handler=_run)
+
+
+def main():
+ try:
+ signal.signal(signal.SIGTERM, Terminate.handler)
+
+ log_formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(name)-39s %(funcName)s: %(message)s')
+ log_handler = logging.StreamHandler()
+ log_handler.setFormatter(log_formatter)
+ logging.getLogger().addHandler(log_handler)
+ logging.getLogger().setLevel(logging.DEBUG)
+ logging.getLogger('kikimr.tools.kikimr_slice').setLevel(logging.DEBUG)
+ logging.getLogger('ya.test').setLevel(logging.WARNING)
+ logging.getLogger('kubernetes').setLevel(logging.ERROR)
+ logging.getLogger('urllib3').setLevel(logging.ERROR)
+
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description=HELP,
+ )
+ parser.add_argument(
+ '--log-level',
+ metavar="LEVEL",
+ choices=['debug', 'info', 'error'],
+ default='debug',
+ help='root logger level'
+ )
+ parser.add_argument(
+ '--dry-run',
+ default=False,
+ action='store_true',
+ help='do not touch the cluster only print debug'
+ )
+ parser.add_argument(
+ '--temp-dir',
+ metavar="DIR",
+ default=None,
+ help=''
+ )
+ parser.add_argument(
+ "--yav-version",
+ metavar="VERSION",
+ default="ver-01gswscgce37hdbqyssjm3nd7x",
+ help=''
+ )
+
+ modes = parser.add_subparsers()
+ add_start_mode(modes)
+ add_stop_mode(modes)
+ add_install_mode(modes)
+ add_update_mode(modes)
+ add_update_raw_configs(modes)
+ add_clear_mode(modes)
+ add_format_mode(modes)
+ add_explain_mode(modes)
+ add_docker_build_mode(modes)
+ add_kube_generate_mode(modes)
+ add_kube_install_mode(modes)
+ add_kube_update_mode(modes)
+ add_kube_stop_mode(modes)
+ add_kube_start_mode(modes)
+ add_kube_restart_mode(modes)
+ add_kube_nodes_mode(modes)
+ add_kube_format_mode(modes)
+ add_kube_clear_mode(modes)
+ add_kube_uninstall_mode(modes)
+
+ args = parser.parse_args()
+ logging.root.setLevel(args.log_level.upper())
+ args.handler(args)
+
+ except KeyboardInterrupt:
+ sys.exit('\nStopped by KeyboardInterrupt.')
+ except Terminate:
+ sys.exit('\nTerminated.')
diff --git a/ydb/tools/ydbd_slice/bin/ya.make b/ydb/tools/ydbd_slice/bin/ya.make
new file mode 100644
index 0000000000..b53661bccf
--- /dev/null
+++ b/ydb/tools/ydbd_slice/bin/ya.make
@@ -0,0 +1,9 @@
+PY3_PROGRAM(ydbd_slice)
+
+PY_MAIN(ydb.tools.ydbd_slice:main)
+
+PEERDIR(
+ ydb/tools/ydbd_slice
+)
+
+END()
diff --git a/ydb/tools/ydbd_slice/cluster_description.py b/ydb/tools/ydbd_slice/cluster_description.py
new file mode 100644
index 0000000000..543af119c2
--- /dev/null
+++ b/ydb/tools/ydbd_slice/cluster_description.py
@@ -0,0 +1,195 @@
+import os
+import yaml
+import random
+from collections import namedtuple
+from functools import reduce
+
+from ydb.tools.cfg.base import ClusterDetailsProvider
+from ydb.tools.cfg.dynamic import DynamicConfigGenerator
+from ydb.tools.cfg.static import StaticConfigGenerator
+from ydb.tools.cfg.utils import write_to_file
+from ydb.tools.cfg.walle import NopHostsInformationProvider
+
+
+DynamicSlot = namedtuple(
+ '_DynamicSlot', [
+ 'slot',
+ 'domain',
+ 'mbus',
+ 'grpc',
+ 'mon',
+ 'ic',
+ ]
+)
+
+
+class ClusterDetails(ClusterDetailsProvider):
+ TENANTS_PORTS_START = 30000
+ SLOTS_PORTS_START = 31000
+ PORTS_SHIFT = 10
+
+ def __init__(self, cluster_description_path):
+ self.__template = None
+ self.__details = None
+ self.__databases = None
+ self.__dynamic_slots = None
+ self._cluster_description_file = cluster_description_path
+ self._walle_provider = NopHostsInformationProvider()
+
+ super(ClusterDetails, self).__init__(self.template, self._walle_provider)
+
+ @property
+ def template(self):
+ if self.__template is None:
+ with open(self._cluster_description_file, 'r') as file:
+ content = file.read()
+ self.__template = yaml.safe_load(content)
+ return self.__template
+
+ @property
+ def hosts_names(self):
+ return sorted(list(set(node.hostname for node in self.hosts)))
+
+ @staticmethod
+ def __is_oneof_in(oneof, container):
+ facts = [x in container for x in oneof]
+ return reduce(lambda x, y: x or y, facts)
+
+ @property
+ def databases(self):
+ if self.__databases is None:
+ self.__databases = {}
+
+ for domain in self.domains:
+ self.__databases[domain.domain_name] = domain.tenants
+
+ return self.__databases
+
+ @property
+ def dynamic_slots(self):
+ if self.__dynamic_slots is None:
+ self.__dynamic_slots = {}
+ for slot in super(ClusterDetails, self).dynamic_slots:
+ mbus_port = self.SLOTS_PORTS_START + 0
+ grpc_port = self.SLOTS_PORTS_START + 1
+ mon_port = self.SLOTS_PORTS_START + 2
+ ic_port = self.SLOTS_PORTS_START + 3
+ full_name = str(ic_port)
+
+ self.__dynamic_slots[full_name] = DynamicSlot(
+ slot=full_name,
+ domain=slot.domain,
+ mbus=mbus_port,
+ grpc=grpc_port,
+ mon=mon_port,
+ ic=ic_port,
+ )
+ self.SLOTS_PORTS_START += self.PORTS_SHIFT
+ return self.__dynamic_slots
+
+
+def make_dir(dir):
+ permits = 0o755
+ if not os.path.exists(dir):
+ os.mkdir(dir, permits)
+ assert os.path.isdir(dir)
+
+
+class Configurator(object):
+ RANDOM_SEED = 100
+
+ def __init__(
+ self,
+ cluster_details,
+ out_dir,
+ kikimr_bin,
+ kikimr_compressed_bin
+ ):
+ self.__cluster_details = cluster_details
+ self.__kikimr_bin_file = kikimr_bin
+ self.__kikimr_compressed_bin_file = kikimr_compressed_bin
+ self.__static = None
+ self.__static_cfg = os.path.join(out_dir, 'kikimr-static')
+
+ self.__dynamic = None
+ self.__dynamic_cfg = os.path.join(out_dir, 'kikimr-dynamic')
+ self.__subdomains = None
+
+ @property
+ def kikimr_bin(self):
+ return self.__kikimr_bin_file
+
+ @property
+ def kikimr_compressed_bin(self):
+ return self.__kikimr_compressed_bin_file
+
+ @property
+ def template(self):
+ return self.detail.template
+
+ @property
+ def detail(self):
+ return self.__cluster_details
+
+ @staticmethod
+ def _generate_fake_keys():
+ content = 'Keys {\n'
+ content += ' ContainerPath: "/Berkanavt/kikimr/cfg/fake-secret.txt"\n'
+ content += ' Pin: ""\n'
+ content += ' Id: "fake-secret"\n'
+ content += ' Version: 1\n'
+ content += '}\n'
+ return content
+
+ @staticmethod
+ def _generate_fake_secret():
+ return 'not a secret at all, only for more similar behavior with cloud'
+
+ def _make_cfg(self, generator, dir):
+ random.seed(self.RANDOM_SEED)
+ all_configs = generator.get_all_configs()
+ for cfg_name, cfg_value in all_configs.items():
+ write_to_file(
+ os.path.join(dir, cfg_name),
+ cfg_value
+ )
+
+ write_to_file(
+ os.path.join(dir, "key.txt"),
+ self._generate_fake_keys()
+ )
+
+ write_to_file(
+ os.path.join(dir, "fake-secret.txt"),
+ self._generate_fake_secret()
+ )
+
+ @property
+ def static(self):
+ assert self.__kikimr_bin_file
+
+ if self.__static is None:
+ walle_provider = NopHostsInformationProvider()
+ self.__static = StaticConfigGenerator(self.template, self.__kikimr_bin_file, self.__static_cfg, walle_provider=walle_provider)
+ return self.__static
+
+ def create_static_cfg(self):
+ make_dir(self.__static_cfg)
+ self._make_cfg(self.static, self.__static_cfg)
+ return self.__static_cfg
+
+ @property
+ def dynamic(self):
+ assert self.__kikimr_bin_file
+
+ if self.__dynamic is None:
+ walle_provider = NopHostsInformationProvider()
+ self.__dynamic = DynamicConfigGenerator(
+ self.__cluster_details.template, self.__kikimr_bin_file, self.__dynamic_cfg, walle_provider=walle_provider
+ )
+ return self.__dynamic
+
+ def create_dynamic_cfg(self):
+ make_dir(self.__dynamic_cfg)
+ self._make_cfg(self.dynamic, self.__dynamic_cfg)
+ return self.__dynamic_cfg
diff --git a/ydb/tools/ydbd_slice/handlers.py b/ydb/tools/ydbd_slice/handlers.py
new file mode 100644
index 0000000000..4311818419
--- /dev/null
+++ b/ydb/tools/ydbd_slice/handlers.py
@@ -0,0 +1,421 @@
+import os
+import time
+import logging
+import subprocess
+from collections import deque, defaultdict
+
+from ydb.tools.cfg.walle import NopHostsInformationProvider
+
+
+logger = logging.getLogger(__name__)
+
+
+class CalledProcessError(subprocess.CalledProcessError):
+ def __init__(self, base):
+ super(CalledProcessError, self).__init__(base.returncode, base.cmd, base.output)
+
+ def __str__(self):
+ return "Command '%s' returned non-zero exit status %d and output was '%s'" % (
+ self.cmd,
+ self.returncode,
+ self.output
+ )
+
+
+def format_drivers(nodes):
+ cmd = "sudo find /dev/disk/by-partlabel/ -maxdepth 1 -name 'kikimr_*' " \
+ "-exec dd if=/dev/zero of={} bs=1M count=1 status=none \;" # noqa: W605
+ nodes.execute_async(cmd)
+
+
+def clear_registered_slots(nodes):
+ nodes.execute_async("sudo find /Berkanavt/ -maxdepth 1 -type d -name 'kikimr_*' -exec rm -rf -- {} \;") # noqa: W605
+
+
+def clear_slot(nodes, slot):
+ cmd = "sudo find /Berkanavt/ -maxdepth 1 -type d -name kikimr_{slot} -exec rm -rf -- {{}} \;".format(slot=slot.slot) # noqa: W605
+ nodes.execute_async(cmd)
+
+
+def clear_logs(nodes):
+ cmd = "sudo service rsyslog stop; " \
+ "find /Berkanavt/ -mindepth 2 -maxdepth 2 -name logs | egrep '^/Berkanavt/kikimr' | sudo xargs -I% find % -mindepth 1 -delete; " \
+ "sudo service rsyslog start;"
+ nodes.execute_async(cmd)
+
+
+def slice_format(components, nodes, cluster_details):
+ slice_stop(components, nodes, cluster_details)
+ format_drivers(nodes)
+ slice_start(components, nodes, cluster_details)
+
+
+def slice_clear(components, nodes, cluster_details):
+ slice_stop(components, nodes, cluster_details)
+
+ if 'dynamic_slots' in components:
+ for slot in cluster_details.dynamic_slots.values():
+ clear_slot(nodes, slot)
+
+ if 'kikimr' in components:
+ format_drivers(nodes)
+
+
+def invoke_scripts(dynamic_cfg_path, scripts):
+ for script_name in scripts:
+ script_path = os.path.join(dynamic_cfg_path, script_name)
+ if os.path.isfile(script_path):
+ cmd = ["bash", script_path]
+ logger.info("run cmd '%s'", cmd)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as er:
+ raise CalledProcessError(er)
+
+
+def dynamic_configure(configurations):
+ dynamic_cfg_path = configurations.create_dynamic_cfg()
+ # wait for bs to configure
+ time_remaining = 60
+ while True:
+ try:
+ invoke_scripts(dynamic_cfg_path, ['init_storage.bash'])
+ break
+ except CalledProcessError:
+ time_to_wait = min(time_remaining, 5)
+ if not time_to_wait:
+ raise
+ time_remaining -= time_to_wait
+ time.sleep(time_to_wait)
+ invoke_scripts(
+ dynamic_cfg_path, (
+ "init_cms.bash",
+ "init_compute.bash",
+ "init_root_storage.bash",
+ "init_databases.bash"
+ )
+ )
+
+
+def slice_install(components, nodes, cluster_details, configurator, do_clear_logs, args):
+ slice_stop(components, nodes, cluster_details)
+
+ if 'dynamic_slots' in components or 'kikimr' in components:
+ stop_all_slots(nodes)
+ clear_registered_slots(nodes)
+
+ if do_clear_logs:
+ clear_logs(nodes)
+
+ if 'kikimr' in components:
+ format_drivers(nodes)
+
+ if 'bin' in components.get('kikimr', []):
+ update_kikimr(nodes, configurator.kikimr_bin, configurator.kikimr_compressed_bin)
+
+ if 'cfg' in components.get('kikimr', []):
+ static_cfg_path = configurator.create_static_cfg()
+ update_cfg(nodes, static_cfg_path)
+ deploy_secrets(nodes, args.yav_version)
+
+ start_static(nodes)
+ dynamic_configure(configurator)
+
+ deploy_slot_configs(components, nodes, cluster_details)
+ start_dynamic(components, nodes, cluster_details)
+
+
+def get_available_slots(components, nodes, cluster_details):
+ if 'dynamic_slots' not in components:
+ return {}
+
+ walle = NopHostsInformationProvider()
+ slots_per_domain = {}
+
+ for domain in cluster_details.domains:
+ available_slots_per_zone = defaultdict(deque)
+ all_available_slots_count = 0
+
+ for slot in cluster_details.dynamic_slots.values():
+ if slot.domain == domain.domain_name:
+ for node in nodes.nodes_list:
+ item = (slot, node)
+ available_slots_per_zone[walle.get_datacenter(node).lower()].append(item)
+ available_slots_per_zone['any'].append(item)
+ all_available_slots_count += 1
+ slots_per_domain[domain.domain_name] = available_slots_per_zone
+
+ return (slots_per_domain, all_available_slots_count, )
+
+
+def deploy_slot_config_for_tenant(nodes, slot, tenant, node):
+ slot_dir = "/Berkanavt/kikimr_{slot}".format(slot=slot.slot)
+ logs_dir = slot_dir + "/logs"
+ slot_cfg = slot_dir + "/slot_cfg"
+ env_txt = slot_dir + "/env.txt"
+ cfg = """\
+tenant=/{domain}/{tenant}
+grpc={grpc}
+mbus={mbus}
+ic={ic}
+mon={mon}""".format(
+ domain=slot.domain,
+ tenant=tenant.name,
+ mbus=slot.mbus,
+ grpc=slot.grpc,
+ mon=slot.mon,
+ ic=slot.ic,
+ )
+
+ escaped_cmd = cfg.encode('unicode_escape').decode()
+
+ cmd = "sudo sh -c 'mkdir -p {logs_dir}; sudo chown syslog {logs_dir}; touch {env_txt}; /bin/echo -e \"{cfg}\" > {slot_cfg};'".format(
+ logs_dir=logs_dir,
+ env_txt=env_txt,
+ cfg=escaped_cmd,
+ slot_cfg=slot_cfg,
+ )
+
+ nodes.execute_async(cmd, check_retcode=False, nodes=[node])
+
+
+def deploy_slot_configs(components, nodes, cluster_details):
+ if 'dynamic_slots' not in components:
+ return
+
+ slots_per_domain = get_available_slots(components, nodes, cluster_details)[0]
+ for domain in cluster_details.domains:
+ slots_taken = set()
+ available_slots_per_zone = slots_per_domain[domain.domain_name]
+ for tenant in domain.tenants:
+ for compute_unit in tenant.compute_units:
+ zone = compute_unit.zone.lower()
+ for _ in range(compute_unit.count):
+ try:
+ while True:
+ slot, node = available_slots_per_zone[zone].popleft()
+ if (slot, node) in slots_taken:
+ continue
+ slots_taken.add((slot, node))
+ deploy_slot_config_for_tenant(nodes, slot, tenant, node)
+ break
+ except IndexError:
+ logger.critical('insufficient slots allocated')
+ return
+
+
+def start_slot(nodes, slot):
+ cmd = "sudo sh -c \"if [ -x /sbin/start ]; "\
+ " then start kikimr-multi slot={slot} tenant=dynamic mbus={mbus} grpc={grpc} mon={mon} ic={ic}; "\
+ " else systemctl start kikimr-multi@{slot}; fi\"".format(
+ slot=slot.slot,
+ mbus=slot.mbus,
+ grpc=slot.grpc,
+ mon=slot.mon,
+ ic=slot.ic
+ )
+ nodes.execute_async(cmd, check_retcode=False)
+
+
+def start_slot_for_tenant(nodes, slot, tenant, host, node_bind=None):
+ cmd = "sudo sh -c \"if [ -x /sbin/start ]; "\
+ " then start kikimr-multi slot={slot} tenant=/{domain}/{name} mbus={mbus} grpc={grpc} mon={mon} ic={ic}; "\
+ " else systemctl start kikimr-multi@{slot}; fi\"".format(
+ slot=slot.slot,
+ domain=slot.domain,
+ name=tenant.name,
+ mbus=slot.mbus,
+ grpc=slot.grpc,
+ mon=slot.mon,
+ ic=slot.ic
+ )
+ if node_bind is not None:
+ cmd += " bindnumanode={bind}".format(bind=node_bind)
+ nodes.execute_async(cmd, check_retcode=False, nodes=[host])
+
+
+def start_all_slots(nodes):
+ cmd = "find /Berkanavt/ -maxdepth 1 -type d -name kikimr_* " \
+ " | while read x; do " \
+ " sudo sh -c \"if [ -x /sbin/start ]; "\
+ " then start kikimr-multi slot=${x#/Berkanavt/kikimr_}; "\
+ " else systemctl start kikimr-multi@${x#/Berkanavt/kikimr_}; fi\"; " \
+ " done"
+ nodes.execute_async(cmd, check_retcode=False)
+
+
+def start_static(nodes):
+ nodes.execute_async("sudo service kikimr start", check_retcode=False)
+
+
+def start_dynamic(components, nodes, cluster_details):
+ if 'dynamic_slots' in components:
+
+ def get_numa_nodes(nodes):
+ results = dict()
+ nodes.execute_async("numactl --hardware | head -n 1 | awk '{print $2}'", check_retcode=False,
+ results=results)
+ return {
+ host: int(result['stdout']) if result['retcode'] == 0 else 0
+ for host, result in results.items()
+ }
+
+ numa_nodes = None # get_numa_nodes(nodes)
+ numa_nodes_counters = {node: 0 for node in nodes.nodes_list}
+
+ (slots_per_domain, all_available_slots_count,) = get_available_slots(components, nodes, cluster_details)
+
+ for domain in cluster_details.domains:
+
+ slots_taken = set()
+ available_slots_per_zone = slots_per_domain[domain.domain_name]
+
+ if domain.bind_slots_to_numa_nodes and numa_nodes is None:
+ numa_nodes = get_numa_nodes(nodes)
+
+ for tenant in domain.tenants:
+ for compute_unit in tenant.compute_units:
+ zone = compute_unit.zone.lower()
+ for _ in range(compute_unit.count):
+ try:
+ while True:
+ slot, node = available_slots_per_zone[zone].popleft()
+ if (slot, node) in slots_taken:
+ continue
+ slots_taken.add((slot, node))
+ if domain.bind_slots_to_numa_nodes and numa_nodes[node] > 0:
+ start_slot_for_tenant(nodes, slot, tenant, host=node,
+ node_bind=numa_nodes_counters[node])
+ numa_nodes_counters[node] += 1
+ numa_nodes_counters[node] %= numa_nodes[node]
+ else:
+ start_slot_for_tenant(nodes, slot, tenant, host=node)
+ break
+ except IndexError:
+ logger.critical('insufficient slots allocated')
+ return
+
+ logger.warning('{count} unused slots'.format(count=all_available_slots_count - len(slots_taken)))
+
+
+def slice_start(components, nodes, cluster_details):
+ if 'kikimr' in components:
+ start_static(nodes)
+
+ start_dynamic(components, nodes, cluster_details)
+
+
+def stop_all_slots(nodes):
+ cmd = "find /Berkanavt/ -maxdepth 1 -type d -name kikimr_* " \
+ " | while read x; do " \
+ " sudo sh -c \"if [ -x /sbin/stop ]; "\
+ " then stop kikimr-multi slot=${x#/Berkanavt/kikimr_}; "\
+ " else systemctl stop kikimr-multi@${x#/Berkanavt/kikimr_}; fi\"; " \
+ " done"
+ nodes.execute_async(cmd, check_retcode=False)
+
+
+def stop_slot_ret(nodes, slot):
+ cmd = "sudo sh -c \"if [ -x /sbin/stop ]; "\
+ " then stop kikimr-multi slot={slot}; "\
+ " else systemctl stop kikimr-multi@{slot}; fi\"".format(
+ slot=slot.slot,
+ )
+ return nodes.execute_async_ret(cmd, check_retcode=False)
+
+
+def stop_slot(nodes, slot):
+ tasks = stop_slot_ret(nodes, slot)
+ nodes._check_async_execution(tasks, False)
+
+
+def stop_static(nodes):
+ nodes.execute_async("sudo service kikimr stop", check_retcode=False)
+
+
+def stop_dynamic(components, nodes, cluster_details):
+ if 'dynamic_slots' in components:
+ tasks = []
+ for slot in cluster_details.dynamic_slots.values():
+ tasks_slot = stop_slot_ret(nodes, slot)
+ for task in tasks_slot:
+ tasks.append(task)
+ nodes._check_async_execution(tasks, False)
+
+
+def slice_stop(components, nodes, cluster_details):
+ stop_dynamic(components, nodes, cluster_details)
+
+ if 'kikimr' in components:
+ stop_static(nodes)
+
+
+slice_kikimr_path = '/Berkanavt/kikimr/bin/kikimr'
+slice_cfg_path = '/Berkanavt/kikimr/cfg'
+slice_secrets_path = '/Berkanavt/kikimr/token'
+
+
+def update_kikimr(nodes, bin_path, compressed_path):
+ nodes.copy(bin_path, slice_kikimr_path, compressed_path=compressed_path)
+
+
+def update_cfg(nodes, cfg_path):
+ nodes.copy(cfg_path, slice_cfg_path, directory=True)
+
+
+def deploy_secrets(nodes, yav_version):
+ if not yav_version:
+ return
+
+ nodes.execute_async(
+ "sudo bash -c 'set -o pipefail && sudo mkdir -p {secrets} && "
+ "yav get version {yav_version} -o auth_file | sudo tee {auth}'".format(
+ yav_version=yav_version,
+ secrets=slice_secrets_path,
+ auth=os.path.join(slice_secrets_path, 'kikimr.token')
+ )
+ )
+
+ # creating symlinks, to attach auth.txt to node
+ nodes.execute_async(
+ "sudo ln -f {secrets_auth} {cfg_auth}".format(
+ secrets_auth=os.path.join(slice_secrets_path, 'kikimr.token'),
+ cfg_auth=os.path.join(slice_cfg_path, 'auth.txt')
+ )
+ )
+
+ nodes.execute_async(
+ "sudo bash -c 'set -o pipefail && yav get version {yav_version} -o tvm_secret | sudo tee {tvm_secret}'".format(
+ yav_version=yav_version,
+ tvm_secret=os.path.join(slice_secrets_path, 'tvm_secret')
+ )
+ )
+
+
+def slice_update(components, nodes, cluster_details, configurator, do_clear_logs, args):
+ if do_clear_logs:
+ clear_logs(nodes)
+
+ if 'kikimr' in components:
+ if 'bin' in components.get('kikimr', []):
+ update_kikimr(nodes, configurator.kikimr_bin, configurator.kikimr_compressed_bin)
+
+ slice_stop(components, nodes, cluster_details)
+ if 'kikimr' in components:
+ if 'cfg' in components.get('kikimr', []):
+ static = configurator.create_static_cfg()
+ update_cfg(nodes, static)
+ deploy_secrets(nodes, args.yav_version)
+
+ deploy_slot_configs(components, nodes, cluster_details)
+ slice_start(components, nodes, cluster_details)
+
+
+def slice_update_raw_configs(components, nodes, cluster_details, config_path):
+ slice_stop(components, nodes, cluster_details)
+ if 'kikimr' in components:
+ if 'cfg' in components.get('kikimr', []):
+ kikimr_cfg = os.path.join(config_path, 'kikimr-static')
+ update_cfg(nodes, kikimr_cfg)
+
+ slice_start(components, nodes, cluster_details)
diff --git a/ydb/tools/ydbd_slice/kube/__init__.py b/ydb/tools/ydbd_slice/kube/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/__init__.py
diff --git a/ydb/tools/ydbd_slice/kube/api.py b/ydb/tools/ydbd_slice/kube/api.py
new file mode 100644
index 0000000000..477d0eff0a
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/api.py
@@ -0,0 +1,424 @@
+import copy
+import json
+import time
+import logging
+from kubernetes.client import ApiClient as KubeApiClient, CustomObjectsApi, ApiException, CoreV1Api
+from kubernetes.config import load_kube_config
+from ydb.tools.ydbd_slice.kube import kubectl
+
+
+logger = logging.getLogger(__name__)
+
+
+KUBECTL_ANNOTATION = 'kubectl.kubernetes.io/last-applied-configuration'
+
+
+def add_kubectl_last_applied_configuration(body):
+ body = copy.deepcopy(body)
+ if 'status' in body:
+ body.pop('status')
+ annotations = body.get('metadata', {}).get('annotations', {})
+ if KUBECTL_ANNOTATION in annotations:
+ annotations.pop(KUBECTL_ANNOTATION)
+ body_str = json.dumps(body)
+ annotations[KUBECTL_ANNOTATION] = body_str
+ return body
+
+
+def drop_kubectl_last_applied_configuration(body):
+ body = copy.deepcopy(body)
+ annotations = body.get('metadata', {}).get('annotations', {})
+ if KUBECTL_ANNOTATION in annotations:
+ annotations.pop(KUBECTL_ANNOTATION)
+ return body
+
+
+class ApiClient(KubeApiClient):
+ def __init__(self, *args, **kwargs):
+ load_kube_config()
+ super().__init__(*args, **kwargs)
+
+ def close(self):
+ self.rest_client.pool_manager.clear()
+ super().close()
+
+
+def get_namespace(api_client, name):
+ logger.debug(f'getting namespace: {name}')
+ api = CoreV1Api(api_client)
+ return api.read_namespace(name=name)
+
+
+def create_namespace(api_client, body):
+ name = body['metadata']['name']
+ logger.debug(f'creating namespace: {name}')
+ api = CoreV1Api(api_client)
+ body = add_kubectl_last_applied_configuration(body)
+ return api.create_namespace(body=body)
+
+
+def apply_namespace(api_client, body, manifest_path):
+ changed = True
+ try:
+ create_namespace(api_client, body)
+ return changed
+ except ApiException as e:
+ if e.status != 409:
+ raise
+ logger.debug('namespace already created, using kubectl apply')
+ rc, stdout, _ = kubectl.apply(manifest_path)
+ if rc != 0:
+ raise RuntimeError(f'failed to apply manifest {manifest_path}')
+ if 'unchanged' in stdout:
+ changed = False
+ else:
+ changed = True
+ return changed
+
+
+def delete_namespace(api_client, name):
+ logger.debug(f'deleting namespace: {name}')
+ api = CoreV1Api(api_client)
+ try:
+ return api.delete_namespace(name=name)
+ except ApiException as e:
+ if e.status == 404:
+ return
+ raise
+
+
+def wait_namespace_deleted(api_client, name, timeout=300):
+ logger.debug(f'waiting for namespace to delete: {name}')
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ try:
+ get_namespace(api_client, name)
+ except ApiException as e:
+ if e.status == 404:
+ return
+ time.sleep(5)
+ raise TimeoutError(f'waiting for namespace {name} to delete timed out')
+
+
+def get_nodeclaim(api_client, namespace, name):
+ logger.debug(f'getting nodeclaim: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ return api.get_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='nodeclaims',
+ name=name,
+ )
+
+
+def get_nodeclaim_nodes(api_client, namespace, name):
+ obj = get_nodeclaim(api_client, namespace, name)
+ namespace = obj['metadata']['namespace']
+ name = obj['metadata']['name']
+ node_list = []
+ status = obj.get('status', {})
+ if status.get('state') != 'Ok':
+ logger.warning(f'NodeClaim {namespace}/{name} not in Ok state. '
+ 'Not all requested nodes was claimed for your slice.')
+ for item in status.get('nodes', []):
+ if 'name' in item:
+ node_list.append(item['name'])
+ elif 'flavor' in item:
+ for name in item['flavor']['nodes']:
+ node_list.append(name)
+ return node_list
+
+
+def create_nodeclaim(api_client, body):
+ namespace = body['metadata']['namespace']
+ name = body['metadata']['name']
+ logger.debug(f'creating nodeclaim: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ body = add_kubectl_last_applied_configuration(body)
+ return api.create_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='nodeclaims',
+ body=body,
+ )
+
+
+def apply_nodeclaim(api_client, body, manifest_path):
+ changed = True
+ try:
+ create_nodeclaim(api_client, body)
+ return changed
+ except ApiException as e:
+ if e.status != 409:
+ raise
+ logger.debug('nodeclaim already created, using kubectl apply')
+ rc, stdout, _ = kubectl.apply(manifest_path)
+ if rc != 0:
+ raise RuntimeError(f'failed to apply manifest {manifest_path}')
+ if 'unchanged' in stdout:
+ changed = False
+ else:
+ changed = True
+ return changed
+
+
+def wait_nodeclaim_state_ok(api_client, namespace, name, timeout=300):
+ logger.debug(f'waiting for nodeclaim to transfer to Ok state: {namespace}/{name}')
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ obj = get_nodeclaim(api_client, namespace, name)
+ state = obj.get('status', {}).get('state')
+ logger.debug(f'nodeclaim {namespace}/{name} state {state}')
+ if state == 'Ok':
+ return
+ if state == 'Failed':
+ raise RuntimeError(f'nodeclaim {namespace}/{name} cannot acquire requested nodes, please change nodeclaim')
+ time.sleep(5)
+ raise TimeoutError(f'waiting for nodeclaim {namespace}/{name} timed out')
+
+
+def delete_nodeclaim(api_client, namespace, name):
+ logger.debug(f'deleting nodeclaim: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ try:
+ return api.delete_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='nodeclaims',
+ name=name,
+ )
+ except ApiException as e:
+ if e.status == 404:
+ return
+ raise
+
+
+def wait_nodeclaim_deleted(api_client, namespace, name, timeout=300):
+ logger.debug(f'waiting for nodeclaim to delete: {namespace}/{name}')
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ try:
+ get_nodeclaim(api_client, namespace, name)
+ except ApiException as e:
+ if e.status == 404:
+ return
+ time.sleep(5)
+ raise TimeoutError(f'waiting for nodeclaim {namespace}/{name} to delete timed out')
+
+
+def get_storage(api_client, namespace, name):
+ logger.debug(f'getting storage: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ return api.get_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='storages',
+ name=name,
+ )
+
+
+def create_storage(api_client, body):
+ namespace = body['metadata']['namespace']
+ name = body['metadata']['name']
+ logger.debug(f'creating storage: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ body = add_kubectl_last_applied_configuration(body)
+ return api.create_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='storages',
+ body=body,
+ )
+
+
+def apply_storage(api_client, body, manifest_path):
+ changed = True
+ try:
+ create_storage(api_client, body)
+ return changed
+ except ApiException as e:
+ if e.status != 409:
+ raise
+ logger.debug('storage already created, using kubectl apply')
+ rc, stdout, _ = kubectl.apply(manifest_path)
+ if rc != 0:
+ raise RuntimeError(f'failed to apply manifest {manifest_path}')
+ if 'unchanged' in stdout:
+ changed = False
+ else:
+ changed = True
+ return changed
+
+
+def wait_storage_state_ready(api_client, namespace, name, timeout=900):
+ logger.debug(f'waiting for storage to transfer to Ready state: {namespace}/{name}')
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ obj = get_storage(api_client, namespace, name)
+ state = obj.get('status', {}).get('state')
+ logger.debug(f'storage {namespace}/{name} state {state}')
+ if state == 'Ready':
+ return
+ time.sleep(5)
+ raise TimeoutError(f'waiting for storage {namespace}/{name} timed out')
+
+
+def delete_storage(api_client, namespace, name):
+ logger.debug(f'deleting storage: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ try:
+ return api.delete_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='storages',
+ name=name,
+ )
+ except ApiException as e:
+ if e.status == 404:
+ return
+ raise
+
+
+def wait_storage_deleted(api_client, namespace, name, timeout=300):
+ logger.debug(f'waiting for storage to delete: {namespace}/{name}')
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ try:
+ get_storage(api_client, namespace, name)
+ except ApiException as e:
+ if e.status == 404:
+ return
+ time.sleep(5)
+ raise TimeoutError(f'waiting for storage {namespace}/{name} to delete timed out')
+
+
+def get_database(api_client, namespace, name):
+ logger.debug(f'getting database: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ return api.get_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='databases',
+ name=name,
+ )
+
+
+def create_database(api_client, body):
+ namespace = body['metadata']['namespace']
+ name = body['metadata']['name']
+ logger.debug(f'creating database: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ body = add_kubectl_last_applied_configuration(body)
+ return api.create_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='databases',
+ body=body,
+ )
+
+
+def apply_database(api_client, body, manifest_path):
+ changed = True
+ try:
+ create_database(api_client, body)
+ return changed
+ except ApiException as e:
+ if e.status != 409:
+ raise
+ logger.debug('database already created, using kubectl apply')
+ rc, stdout, _ = kubectl.apply(manifest_path)
+ if rc != 0:
+ raise RuntimeError(f'failed to apply manifest {manifest_path}')
+ if 'unchanged' in stdout:
+ changed = False
+ else:
+ changed = True
+ return changed
+
+
+def wait_database_state_ready(api_client, namespace, name, timeout=900):
+ logger.debug(f'waiting for database to transfer to Ready state: {namespace}/{name}')
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ obj = get_database(api_client, namespace, name)
+ state = obj.get('status', {}).get('state')
+ logger.debug(f'database {namespace}/{name} state {state}')
+ if state == 'Ready':
+ return
+ time.sleep(5)
+ raise TimeoutError(f'waiting for database {namespace}/{name} timed out')
+
+
+def delete_database(api_client, namespace, name):
+ logger.debug(f'deleting database: {namespace}/{name}')
+ api = CustomObjectsApi(api_client)
+ try:
+ return api.delete_namespaced_custom_object(
+ group='ydb.tech',
+ version='v1alpha1',
+ namespace=namespace,
+ plural='databases',
+ name=name,
+ )
+ except ApiException as e:
+ if e.status == 404:
+ return
+ raise
+
+
+def wait_database_deleted(api_client, namespace, name, timeout=1800):
+ logger.debug(f'waiting for database to delete: {namespace}/{name}')
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ try:
+ get_storage(api_client, namespace, name)
+ except ApiException as e:
+ if e.status == 404:
+ return
+ time.sleep(5)
+ raise TimeoutError(f'waiting for database {namespace}/{name} to delete timed out')
+
+
+def get_pod(api_client, namespace, name):
+ api = CoreV1Api(api_client)
+ return api.read_namespaced_pod(name, namespace)
+
+
+def wait_pods_deleted(api_client, pods, timeout=1800):
+ logger.debug('waiting for pods to delete')
+ pod_present = {i: True for i in pods}
+ end_ts = time.time() + timeout
+ while time.time() < end_ts:
+ if not any(pod_present.values()):
+ return
+ logger.debug(f'pods left: {len([v for v in pod_present.values() if v])}/{len(pod_present)}')
+ for key, present in pod_present.items():
+ if not present:
+ continue
+
+ pod_namespace, pod_name, pod_uid = key
+ try:
+ pod = get_pod(api_client, pod_namespace, pod_name)
+ if pod.metadata.uid != pod_uid:
+ pod_present[key] = False
+ except ApiException as e:
+ if e.status == 404:
+ pod_present[key] = False
+ elif e.status >= 500:
+ pass
+ else:
+ raise
+
+ time.sleep(0.05)
+
+ time.sleep(1)
+
+ raise TimeoutError('waiting for pods to delete timed out')
diff --git a/ydb/tools/ydbd_slice/kube/cms.py b/ydb/tools/ydbd_slice/kube/cms.py
new file mode 100644
index 0000000000..e7dc7d27c6
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/cms.py
@@ -0,0 +1,294 @@
+# this is evil
+# it should be exterminated...
+# after KIKIMR-19357
+
+import os
+import copy
+import time
+import grpc
+import logging
+import tenacity as tc
+from urllib.parse import urlparse
+from google.protobuf import text_format
+from contextlib import contextmanager
+
+from ydb.core.protos import grpc_pb2_grpc
+from ydb.core.protos import msgbus_pb2
+from ydb.core.protos import console_config_pb2
+from ydb.public.api.protos import ydb_status_codes_pb2
+
+
+logger = logging.getLogger(__name__)
+
+
+DEBUG = True
+LEGACY_CMS_CONFIG_ITEMS_DIR = 'legacy-cms-config-items'
+
+
+GRPC_MAX_MESSAGE_LENGTH = 130 * 1024**2
+GRPC_DEFAULT_OPTIONS = {
+ 'grpc.max_send_message_length': GRPC_MAX_MESSAGE_LENGTH,
+ 'grpc.max_receive_message_length': GRPC_MAX_MESSAGE_LENGTH,
+ 'grpc.max_concurrent_streams': 200,
+}
+
+RETRYABLE_STATUS_CODES = [
+ ydb_status_codes_pb2.StatusIds.StatusCode.INTERNAL_ERROR,
+ ydb_status_codes_pb2.StatusIds.StatusCode.ABORTED,
+ ydb_status_codes_pb2.StatusIds.StatusCode.UNAVAILABLE,
+ ydb_status_codes_pb2.StatusIds.StatusCode.OVERLOADED,
+ ydb_status_codes_pb2.StatusIds.StatusCode.TIMEOUT,
+ ydb_status_codes_pb2.StatusIds.StatusCode.CANCELLED,
+ ydb_status_codes_pb2.StatusIds.StatusCode.SESSION_BUSY,
+]
+
+
+def get_file_or_none(path, *args, **kwargs):
+ if path is None:
+ return None
+ with open(path, *args, **kwargs) as file:
+ return file.read()
+
+
+class RequestFailed(Exception):
+ def __init__(self, method, request, response):
+ self.method = method
+ self.request = request
+ self.response = response
+
+ def __str__(self):
+ return f'{self.method} request: {self.request} failed with response {self.response}'
+
+
+def listify(data):
+ if isinstance(data, (list, str, set)):
+ return list(data)
+ elif isinstance(data, dict):
+ raise ValueError('cannot listify dict')
+ else:
+ return [data]
+
+
+@contextmanager
+def _connect(url, grpc_cacert_file=None, grpc_options=None, grpc_timeout=60, result=None):
+ logger.debug(f'creating grpc channel using url: {url}')
+
+ if result is None:
+ result = {
+ 'changed': False,
+ }
+
+ grpc_options_defaults = copy.deepcopy(GRPC_DEFAULT_OPTIONS)
+ if grpc_options is not None:
+ grpc_options_defaults.update(grpc_options)
+ grpc_options = list(grpc_options_defaults.items())
+
+ url_data = urlparse(url)
+
+ if url_data.scheme == 'grpc':
+ grpc_channel = grpc.insecure_channel(url_data.netloc, options=grpc_options)
+
+ elif url_data.scheme == 'grpcs':
+ grpc_cacert = get_file_or_none(grpc_cacert_file, 'rb')
+ if grpc_cacert is None:
+ result['msg'] = f'Cannot load Root CA file {grpc_cacert_file}.'
+ raise RuntimeError(result)
+
+ grpc_credentials = grpc.ssl_channel_credentials(
+ root_certificates=grpc_cacert, private_key=None, certificate_chain=None
+ )
+ grpc_channel = grpc.secure_channel(url_data.netloc, grpc_credentials, options=grpc_options)
+
+ else:
+ raise RuntimeError(f'invalid scheme: {url_data.scheme}')
+
+ ready_future = grpc.channel_ready_future(grpc_channel)
+ ready_future.result(timeout=grpc_timeout)
+
+ grpc_server_stub = grpc_pb2_grpc.TGRpcServerStub(grpc_channel)
+
+ yield grpc_server_stub
+
+ grpc_channel.close()
+
+
+@contextmanager
+def connect(url_list, grpc_cacert_file=None, grpc_options=None, grpc_timeout=60, result=None):
+ if isinstance(url_list, str):
+ url_list = [url_list]
+ last_error = None
+ for url in url_list:
+ try:
+ with _connect(url, grpc_cacert_file, grpc_options, grpc_timeout, result) as stub:
+ yield stub
+ return
+ except grpc.FutureTimeoutError as e:
+ last_error = e
+ time.sleep(1)
+ continue
+ raise last_error
+
+
+@tc.retry(
+ retry=tc.retry_if_exception_type(RequestFailed),
+ wait=tc.wait_exponential(max=60),
+ stop=tc.stop_after_attempt(10),
+ reraise=True,
+)
+def _send_retry_request(grpc_server_stub, method, request):
+ if request.HasField('GetConfigItemsRequest'):
+ logger.debug('sending GetConfigItemsRequest')
+ elif request.HasField('ConfigureRequest'):
+ logger.debug('sending ConfigureRequest')
+ func = getattr(grpc_server_stub, method)
+ response = func(request)
+ if response.Status.Code in RETRYABLE_STATUS_CODES:
+ raise RequestFailed(method, request, response)
+ if response.HasField('GetConfigItemsResponse'):
+ logger.debug(f'receiving GetConfigItemsResponse with code: {response.GetConfigItemsResponse.Status.Code}')
+ if response.GetConfigItemsResponse.Status.Code in RETRYABLE_STATUS_CODES:
+ raise RequestFailed(method, request, response)
+ elif response.HasField('ConfigureResponse'):
+ logger.debug(f'receiving ConfigureResponse with code: {response.ConfigureResponse.Status.Code}')
+ if response.ConfigureResponse.Status.Code in RETRYABLE_STATUS_CODES:
+ raise RequestFailed(method, request, response)
+ return response
+
+
+def send_request(grpc_server_stub, method, request):
+ response = _send_retry_request(grpc_server_stub, method, request)
+ if response.Status.Code != ydb_status_codes_pb2.StatusIds.StatusCode.SUCCESS:
+ raise RequestFailed(method, request, response)
+ if response.HasField('GetConfigItemsResponse'):
+ if response.GetConfigItemsResponse.Status.Code != ydb_status_codes_pb2.StatusIds.StatusCode.SUCCESS:
+ raise RequestFailed(method, request, response)
+ elif response.HasField('ConfigureResponse'):
+ if response.ConfigureResponse.Status.Code != ydb_status_codes_pb2.StatusIds.StatusCode.SUCCESS:
+ raise RequestFailed(method, request, response)
+ return response
+
+
+def get_from_files(project_path):
+ config_items = dict()
+ cms_configs_dir = os.path.abspath(os.path.join(project_path, LEGACY_CMS_CONFIG_ITEMS_DIR))
+ if not os.path.exists(cms_configs_dir):
+ return None
+ for item in os.listdir(cms_configs_dir):
+ filename = os.path.join(cms_configs_dir, item)
+ if not filename.endswith('.txt'):
+ logger.warning(f'skipping file: {filename}, not txt file extension')
+ continue
+ cookie = item
+ with open(filename) as file:
+ config_item_text = file.read()
+ config_item = console_config_pb2.TConfigItem()
+ try:
+ text_format.Parse(config_item_text, config_item)
+ except Exception as e:
+ logger.warning(f'skipping file: {filename}, parse error: {str(e)}')
+ config_item.Cookie = cookie
+ config_items[cookie] = config_item
+ return config_items
+
+
+def get(grpc_server_stub, cookie=None):
+ request = msgbus_pb2.TConsoleRequest()
+ request.GetConfigItemsRequest.CopyFrom(console_config_pb2.TGetConfigItemsRequest())
+
+ if cookie is not None:
+ logger.debug(f'getting config items, filter by cookie: {cookie}')
+ request.GetConfigItemsRequest.CookieFilter.Cookies.append(cookie)
+ else:
+ logger.debug('getting all config items')
+
+ response = send_request(grpc_server_stub, 'ConsoleRequest', request)
+
+ config_items = dict()
+ for item in response.GetConfigItemsResponse.ConfigItems:
+ if item.Cookie == '':
+ item_id = str(item.Id).replace('\n', ' ')
+ logger.info(f'got config item with empty cookie and id {item_id}, possibly managed from UI, skipping')
+ continue
+ if item.Cookie in config_items:
+ raise RuntimeError('found more than one config item with same cookie, dont know which one to process')
+ config_items[item.Cookie] = item
+
+ return config_items
+
+
+def remove(grpc_server_stub, old_config_item):
+ logger.debug(f'removing config item {old_config_item.Cookie}')
+ request = msgbus_pb2.TConsoleRequest()
+ configure_action = request.ConfigureRequest.Actions.add()
+ configure_action.RemoveConfigItem.ConfigItemId.CopyFrom(old_config_item.Id)
+ send_request(grpc_server_stub, 'ConsoleRequest', request)
+
+
+def compare(old_config_item, new_config_item):
+ for attr in ['Kind', 'Config', 'UsageScope', 'Order', 'MergeStrategy']:
+ old_value = getattr(old_config_item, attr)
+ new_value = getattr(new_config_item, attr)
+ if old_value != new_value:
+ return False
+ return True
+
+
+def modify(grpc_server_stub, old_config_item, new_config_item):
+ if compare(old_config_item, new_config_item):
+ logger.debug(f'skipping config item {new_config_item.Cookie}, not changed')
+ return
+ logger.debug(f'modifying config item {new_config_item.Cookie}')
+ request = msgbus_pb2.TConsoleRequest()
+ configure_action = request.ConfigureRequest.Actions.add()
+ config_item = console_config_pb2.TConfigItem()
+ config_item.CopyFrom(new_config_item)
+ config_item.Id.CopyFrom(old_config_item.Id)
+ configure_action.ModifyConfigItem.ConfigItem.CopyFrom(config_item)
+ send_request(grpc_server_stub, 'ConsoleRequest', request)
+
+
+def create(grpc_server_stub, new_config_item):
+ logger.debug(f'creating config item {new_config_item.Cookie}')
+ request = msgbus_pb2.TConsoleRequest()
+ configure_action = request.ConfigureRequest.Actions.add()
+ configure_action.AddConfigItem.ConfigItem.CopyFrom(new_config_item)
+ send_request(grpc_server_stub, 'ConsoleRequest', request)
+
+
+def apply_legacy_cms_config_items(new_config_items, url, grpc_cacert_file=None, grpc_options=None, grpc_timeout=60):
+ logger.debug(f'applying legacy cms config items from files: {list(new_config_items)}')
+ with connect(
+ url, grpc_cacert_file=grpc_cacert_file, grpc_options=grpc_options, grpc_timeout=grpc_timeout
+ ) as grpc_server_stub:
+ old_config_items = get(grpc_server_stub, cookie=None)
+
+ old_keys = set(old_config_items)
+ new_keys = set(new_config_items)
+ rem_keys = old_keys.difference(new_keys)
+ mod_keys = new_keys.intersection(old_keys)
+ cre_keys = new_keys.difference(old_keys)
+
+ for cookie in rem_keys:
+ old_config_item = old_config_items[cookie]
+ try:
+ remove(grpc_server_stub, old_config_item)
+ except RequestFailed as e:
+ logger.error(f'failed to remove config item with cookie: {cookie}, '
+ f'method: {e.method}, request: {e.request}, response: {e.response}')
+
+ for cookie in mod_keys:
+ old_config_item = old_config_items[cookie]
+ new_config_item = new_config_items[cookie]
+ try:
+ modify(grpc_server_stub, old_config_item, new_config_item)
+ except RequestFailed as e:
+ logger.error(f'failed to modify config item with cookie: {cookie}, '
+ f'method: {e.method}, request: {e.request}, response: {e.response}')
+
+ for cookie in cre_keys:
+ new_config_item = new_config_items[cookie]
+ try:
+ create(grpc_server_stub, new_config_item)
+ except RequestFailed as e:
+ logger.error(f'failed to create config item with cookie: {cookie}, '
+ f'method: {e.method}, request: {e.request}, response: {e.response}')
diff --git a/ydb/tools/ydbd_slice/kube/docker.py b/ydb/tools/ydbd_slice/kube/docker.py
new file mode 100644
index 0000000000..5ed15f2f6d
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/docker.py
@@ -0,0 +1,78 @@
+import os
+import sys
+import json
+import logging
+import subprocess
+
+
+logger = logging.getLogger(__name__)
+
+
+DOCKER_IMAGE_YDBD_PACKAGE_SPEC = 'ydb/tools/ydbd_slice/image/pkg.json'
+DOCKER_IMAGE_REGISTRY = 'cr.yandex'
+DOCKER_IMAGE_REPOSITORY = 'crpbo4q9lbgkn85vr1rm'
+DOCKER_IMAGE_NAME = 'ydb'
+DOCKER_IMAGE_FULL_NAME = '%s/%s/%s' % (DOCKER_IMAGE_REGISTRY, DOCKER_IMAGE_REPOSITORY, DOCKER_IMAGE_NAME)
+
+
+def get_user():
+ try:
+ return os.environ['USER'].lower()
+ except KeyError:
+ print('', file=sys.stderr)
+ print("unable to get USER env var", file=sys.stderr)
+ print("please specify USER env var or use '--tag' argument", file=sys.stderr)
+ sys.exit(2)
+
+
+def get_image_from_args(args):
+ if args.image is not None and args.tag is not None:
+ print('', file=sys.stderr)
+ print("image and tag arguments are mutually exclusive", file=sys.stderr)
+ print("please specify either image or tag argument", file=sys.stderr)
+ sys.exit(2)
+
+ if args.image is not None:
+ return args.image
+ else:
+ if args.tag is None:
+ user = get_user()
+ tag = '%s-latest' % user
+ else:
+ tag = args.tag
+ return "%s:%s" % (DOCKER_IMAGE_FULL_NAME, tag)
+
+
+def docker_tag(old, new):
+ proc = subprocess.Popen(['docker', 'tag', old, new], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ try:
+ _, stderr = proc.communicate(timeout=5)
+ except subprocess.TimeoutExpired:
+ proc.kill()
+ raise RuntimeError("docker tag: timed out")
+ if proc.returncode != 0:
+ raise RuntimeError("docker tag: failed with code %d, error: %s" % (proc.returncode, stderr))
+
+
+def docker_inspect(obj):
+ proc = subprocess.Popen(['docker', 'inspect', obj], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ try:
+ stdout, stderr = proc.communicate(timeout=5)
+ except subprocess.TimeoutExpired:
+ proc.kill()
+ raise RuntimeError("docker inspect: timed out")
+ if proc.returncode == 1 and 'No such object' in stderr:
+ logger.debug('docker inspect: object %s not found', obj)
+ return None
+ elif proc.returncode != 0:
+ raise RuntimeError("docker inspect: failed with code %d, error: %s" % (proc.returncode, stderr))
+ else:
+ return json.loads(stdout)
+
+
+def docker_push(image):
+ proc = subprocess.Popen(['docker', 'push', image], text=True)
+ proc.communicate()
+ if proc.returncode != 0:
+ logger.error(f'command failed: docker push {image}')
+ sys.exit(1)
diff --git a/ydb/tools/ydbd_slice/kube/generate.py b/ydb/tools/ydbd_slice/kube/generate.py
new file mode 100644
index 0000000000..884fa38ec9
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/generate.py
@@ -0,0 +1,98 @@
+import os
+import sys
+import jinja2
+import library.python.resource as rs
+
+from ydb.tools.ydbd_slice.kube import cms
+
+
+def render_resource(name, params):
+ env = jinja2.Environment(
+ loader=jinja2.FunctionLoader(lambda x: rs.find(x).decode()), undefined=jinja2.StrictUndefined
+ )
+ template = env.get_template(name)
+ return template.render(**params)
+
+
+def generate_file(project_path, filename, template, template_kwargs):
+ try:
+ manifest = render_resource(template, template_kwargs)
+ with open(os.path.join(project_path, filename), 'w') as file:
+ try:
+ file.write(manifest)
+ except Exception:
+ print(file)
+ print(manifest)
+ raise
+ except Exception as e:
+ sys.exit(f'Failed to render manifest: {filename}: {str(e)}')
+
+
+def generate_legacy_configs(project_path, preferred_pool_kind='ssd'):
+ cms_configs_dir = os.path.join(project_path, cms.LEGACY_CMS_CONFIG_ITEMS_DIR)
+ if not os.path.exists(cms_configs_dir):
+ os.mkdir(cms_configs_dir)
+ generate_file(
+ project_path=project_path,
+ filename=os.path.join(cms_configs_dir, 'table-profile.txt'),
+ template='/ydbd_slice/templates/legacy-cms-config-items/table-profile.txt',
+ template_kwargs=dict(
+ preferred_pool_kind=preferred_pool_kind,
+ ),
+ )
+ generate_file(
+ project_path=project_path,
+ filename=os.path.join(cms_configs_dir, 'unified-agent.txt'),
+ template='/ydbd_slice/templates/legacy-cms-config-items/unified-agent.txt',
+ template_kwargs=dict(
+ preferred_pool_kind=preferred_pool_kind,
+ ),
+ )
+
+
+def generate_8_node_block_4_2(project_path, user, namespace_name, nodeclaim_name, node_flavor,
+ storage_name, database_name):
+ generate_file(
+ project_path=project_path,
+ filename=f'namespace-{namespace_name}.yaml',
+ template='/ydbd_slice/templates/common/namespace.yaml',
+ template_kwargs=dict(
+ namespace_name=namespace_name,
+ )
+ )
+ generate_file(
+ project_path=project_path,
+ filename=f'nodeclaim-{nodeclaim_name}.yaml',
+ template='/ydbd_slice/templates/8-node-block-4-2/nodeclaim.yaml',
+ template_kwargs=dict(
+ nodeclaim_namespace=namespace_name,
+ nodeclaim_name=nodeclaim_name,
+ nodeclaim_owner=user,
+ nodeclaim_flavor_name=node_flavor,
+ )
+ )
+ generate_file(
+ project_path=project_path,
+ filename=f'storage-{storage_name}.yaml',
+ template='/ydbd_slice/templates/8-node-block-4-2/storage.yaml',
+ template_kwargs=dict(
+ storage_name=storage_name,
+ storage_namespace=namespace_name,
+ nodeclaim_name=nodeclaim_name,
+ nodeclaim_namespace=namespace_name,
+ )
+ )
+ generate_file(
+ project_path=project_path,
+ filename=f'database-{database_name}.yaml',
+ template='/ydbd_slice/templates/common/database.yaml',
+ template_kwargs=dict(
+ database_name=database_name,
+ database_namespace=namespace_name,
+ storage_name=storage_name,
+ nodeclaim_name=nodeclaim_name,
+ nodeclaim_namespace=namespace_name,
+ )
+ )
+
+ generate_legacy_configs(project_path)
diff --git a/ydb/tools/ydbd_slice/kube/handlers.py b/ydb/tools/ydbd_slice/kube/handlers.py
new file mode 100644
index 0000000000..3603c016fb
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/handlers.py
@@ -0,0 +1,499 @@
+import re
+import os
+import sys
+import logging
+
+
+from collections import defaultdict
+from kubernetes.client import Configuration
+
+from ydb.tools.ydbd_slice import nodes, handlers
+from ydb.tools.ydbd_slice.kube import api, kubectl, yaml, generate, cms
+
+
+logger = logging.getLogger(__name__)
+
+
+VALID_NAME_PATTERN = '[a-z0-9]([-a-z0-9]*[a-z0-9])?'
+
+
+CLUSTER_RESOURCES = [
+ 'namespace'
+]
+
+
+def check_cluster_requires_nodeclaim():
+ config = Configuration.get_default_copy()
+ if '2a0d:d6c0' in config.host:
+ return True
+ if 'cloud.yandex.net' in config.host:
+ return True
+ return False
+
+
+def get_all_manifests(directory):
+ result = []
+ objects = defaultdict(set)
+ for file in os.listdir(directory):
+ path = os.path.abspath(os.path.join(directory, file))
+ if not (file.endswith('.yaml') or file.endswith('.yml')):
+ logger.info('skipping file: %s, not yaml file extension', path)
+ continue
+ try:
+ with open(path) as file:
+ data = yaml.load(file)
+ except Exception as e:
+ logger.error('failed to open and parse file: %s, error: %s', path, str(e))
+ continue
+
+ # check basic fields
+ if not ('apiVersion' in data and 'kind' in data):
+ logger.info('skipping file: %s, not kubernetes manifest', path)
+ continue
+ api_version = data['apiVersion']
+ kind = data['kind'].lower()
+
+ # check for explicit namespace
+ if kind not in CLUSTER_RESOURCES and 'namespace' not in data['metadata']:
+ logger.error(f'manifest {path} does not have metadata.namespace specified')
+ sys.exit(2)
+
+ namespace = data['metadata'].get('namespace')
+ name = data['metadata']['name']
+
+ # check for duplicate names
+ type_key = (api_version, kind)
+ obj_key = (namespace, name)
+ if obj_key in objects[type_key]:
+ logger.error(
+ f'manifest for {api_version} {kind} with duplicated namespace and name {namespace}/{name} '
+ f'found in {path}'
+ )
+ sys.exit(2)
+ objects[type_key].add(obj_key)
+
+ result.append((path, api_version, kind, namespace, name, data))
+
+ # check if nodeclaims required
+ cluster_requires_node_claim = check_cluster_requires_nodeclaim()
+ if cluster_requires_node_claim and len(objects[('ydb.tech/v1alpha1', 'nodeclaim')]) == 0:
+ logger.error('Cluster from kubeconfig requires NodeClaim object to be created. '
+ 'Please create NodeClaim mainfest')
+ sys.exit(2)
+
+ return result
+
+
+def validate_components_selector(value):
+ if not re.match(r'^[a-zA-Z][a-zA-Z0-9\-]*$', value):
+ raise ValueError('invalid value: %s' % value)
+
+
+def parse_components_selector(value):
+ result = {}
+ items = value.strip().split(';')
+ for item in items:
+ if ':' in item:
+ kind, names = item.split(':')
+ validate_components_selector(kind)
+ names = names.split(',')
+ for name in names:
+ validate_components_selector(name)
+ result[kind] = names
+ else:
+ kind = item
+ validate_components_selector(kind)
+ result[kind] = []
+ return result
+
+
+def update_image(data, image):
+ if 'version' in data['spec']:
+ data['spec'].pop('version')
+ image_data = data['spec'].setdefault('image', {})
+ image_data['name'] = image
+ image_data['pullPolicy'] = 'Always'
+
+
+def update_manifest(path, data):
+ if os.path.exists(path):
+ with open(path) as file:
+ old_data = yaml.load(file.read())
+ if old_data == data:
+ return
+
+ logger.debug(f'updating manifest {path}')
+ tmp_path = "%s.tmp" % path
+ with open(tmp_path, 'w') as file:
+ yaml.dump(data, file)
+ os.rename(tmp_path, path)
+
+
+def get_nodes(api_client, project_path, manifests):
+ node_list = []
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (api_version in ['ydb.tech/v1alpha1'] and kind in ['nodeclaim']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+ try:
+ obj_nodes = api.get_nodeclaim_nodes(api_client, namespace, name)
+ node_list.extend(obj_nodes)
+ except api.ApiException as e:
+ if e.status == 404:
+ logger.warning(f'NodeClaim {namespace}/{name} not found in cluster, skipping')
+ else:
+ raise
+ node_list.sort()
+ return node_list
+
+
+def manifests_ydb_set_image(project_path, manifests, image):
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage', 'database'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ update_image(data, image)
+ update_manifest(path, data)
+
+
+def manifests_ydb_filter_components(project_path, manifests, update_components):
+ result = []
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if api_version in ['ydb.tech/v1alpha1'] and kind in ['storage', 'database']:
+ name = data['metadata']['name']
+ if update_components is not None:
+ if kind not in update_components:
+ logger.info(f'skipping manifest {path}, not specified in components')
+ continue
+ names_to_update = update_components.get(kind, [])
+ if len(names_to_update) > 0 and name not in names_to_update:
+ logger.info(f'skipping manifest {path}, not specified in names')
+ continue
+ result.append((path, api_version, kind, namespace, name, data))
+ else:
+ result.append((path, api_version, kind, namespace, name, data))
+ return result
+
+
+#
+# macro level nodeclaim functions
+def slice_namespace_apply(api_client, project_path, manifests):
+ for (path, _, kind, _, _, data) in manifests:
+ if kind != 'namespace':
+ continue
+ api.apply_namespace(api_client, data, path)
+
+
+def slice_namespace_delete(api_client, project_path, manifests):
+ for (_, _, kind, _, name, _) in manifests:
+ if kind != 'namespace':
+ continue
+ api.delete_namespace(api_client, name)
+
+
+def slice_nodeclaim_apply(api_client, project_path, manifests):
+ for (path, api_version, kind, _, _, data) in manifests:
+ if not (kind in ['nodeclaim'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ api.apply_nodeclaim(api_client, data, path)
+
+
+def slice_nodeclaim_wait_ready(api_client, project_path, manifests):
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['nodeclaim'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+ try:
+ api.wait_nodeclaim_state_ok(api_client, namespace, name)
+ except TimeoutError as e:
+ sys.exit(e.args[0])
+
+
+def slice_nodeclaim_nodes(api_client, project_path, manifests):
+ node_list = get_nodes(api_client, project_path, manifests)
+ if len(node_list) == 0:
+ sys.exit('No nodes was claimed. Please check nodeclaim presense and status.')
+ for node in node_list:
+ print(node)
+
+
+def slice_nodeclaim_format(api_client, project_path, manifests):
+ node_list = get_nodes(api_client, project_path, manifests)
+ if len(node_list) == 0:
+ logger.info('no nodes found, nothing to format.')
+ return
+ node_list = nodes.Nodes(node_list)
+ handlers.format_drivers(node_list)
+
+
+def slice_nodeclaim_delete(api_client, project_path, manifests):
+ nodeclaims = []
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['nodeclaim'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+ api.delete_nodeclaim(api_client, namespace, name)
+ nodeclaims.append((namespace, name))
+ if len(nodeclaims) == 0:
+ logger.info(f'no nodeclaims found in {project_path}')
+ for namespace, name in nodeclaims:
+ try:
+ api.wait_nodeclaim_deleted(api_client, namespace, name)
+ except TimeoutError as e:
+ sys.exit(e.args[0])
+
+
+#
+# macro level ydb functions
+def slice_ydb_apply(api_client, project_path, manifests):
+ # process storages first
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage', 'database'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+
+ if kind == 'storage':
+ api.apply_storage(api_client, data, path)
+ new_data = api.get_storage(api_client, namespace, name)
+ if 'status' in new_data:
+ new_data.pop('status')
+ api.drop_kubectl_last_applied_configuration(new_data)
+ data['spec'] = new_data['spec']
+ update_manifest(path, data)
+
+ config_items = cms.get_from_files(project_path)
+ if config_items is not None:
+ logger.debug(
+ f'found {len(config_items)} legacy cms config items, '
+ 'need to wait for storage to become ready to apply configs'
+ )
+ # if configs present, then wait for storage
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+ try:
+ api.wait_storage_state_ready(api_client, namespace, name)
+ except TimeoutError as e:
+ sys.exit(e.args[0])
+
+ # and apply configs
+ node_list = get_nodes(api_client, project_path, manifests)
+ if len(node_list) == 0:
+ raise RuntimeError('no nodes found, cannot apply legacy cms config items.')
+ cms.apply_legacy_cms_config_items(config_items, [f'grpc://{i}:2135' for i in node_list])
+
+ # process databases later
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage', 'database'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+
+ if kind == 'database':
+ api.apply_database(api_client, data, path)
+ new_data = api.get_database(api_client, namespace, name)
+ if 'status' in new_data:
+ new_data.pop('status')
+ api.drop_kubectl_last_applied_configuration(new_data)
+ data['spec'] = new_data['spec']
+ update_manifest(path, data)
+
+
+def slice_ydb_wait_ready(api_client, project_path, manifests, wait_ready):
+ if not wait_ready:
+ return
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage', 'database'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+ if kind == 'storage':
+ try:
+ api.wait_storage_state_ready(api_client, namespace, name)
+ except TimeoutError as e:
+ sys.exit(e.args[0])
+ if kind == 'database':
+ try:
+ api.wait_database_state_ready(api_client, namespace, name)
+ except TimeoutError as e:
+ sys.exit(e.args[0])
+
+
+def slice_ydb_restart(api_client, project_path, manifests):
+ pods_to_restart = set()
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage', 'database'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+ if kind == 'storage':
+ pods_to_restart.update(
+ kubectl.get_pods_by_selector(
+ 'app.kubernetes.io/component=storage-node,app.kubernetes.io/instance=%s' % name, namespace
+ )
+ )
+ if kind == 'database':
+ pods_to_restart.update(
+ kubectl.get_pods_by_selector(
+ 'app.kubernetes.io/component=dynamic-node,app.kubernetes.io/instance=%s' % name, namespace
+ )
+ )
+ if len(pods_to_restart) > 0:
+ kubectl.restart_pods_in_parallel(pods_to_restart)
+
+
+def slice_ydb_delete(api_client, project_path, manifests):
+ storages = []
+ databases = []
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage', 'database'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+
+ if kind == 'database':
+ api.delete_database(api_client, namespace, name)
+ databases.append((namespace, name))
+ if kind == 'storage':
+ api.delete_storage(api_client, namespace, name)
+ storages.append((namespace, name))
+ for namespace, name in databases:
+ try:
+ api.wait_database_deleted(api_client, namespace, name)
+ except TimeoutError as e:
+ sys.exit(e.args[0])
+ for namespace, name in storages:
+ try:
+ api.wait_storage_deleted(api_client, namespace, name)
+ except TimeoutError as e:
+ sys.exit(e.args[0])
+
+
+def slice_ydb_storage_wait_pods_deleted(api_client, project_path, manifests):
+ pods_to_delete = set()
+ for (path, api_version, kind, namespace, name, data) in manifests:
+ if not (kind in ['storage', 'database'] and api_version in ['ydb.tech/v1alpha1']):
+ continue
+ namespace = data['metadata']['namespace']
+ name = data['metadata']['name']
+ if kind == 'storage':
+ pods_to_delete.update(
+ kubectl.get_pods_by_selector(
+ 'app.kubernetes.io/component=storage-node,app.kubernetes.io/instance=%s' % name, namespace
+ )
+ )
+ if len(pods_to_delete) > 0:
+ api.wait_pods_deleted(api_client, pods_to_delete)
+
+
+def validate_name(name, field):
+ if not re.fullmatch(VALID_NAME_PATTERN, name):
+ raise RuntimeError(
+ f"Cannot use \"{name}\" as {field}. {field.capitalize()} must consist of lower case alphanumeric "
+ "characters, must start and end with an alphanumeric character "
+ f"(e.g. 'my-name', or '123-abc', regex used for validation is \"{VALID_NAME_PATTERN}\")"
+ )
+ return name
+
+
+#
+# generate scenarios
+def slice_generate_8_node_block_4_2(project_path, user, slice_name, node_flavor):
+ slice_name = validate_name(slice_name, 'slice name')
+ namespace_name = validate_name(f'dev-{user}-{slice_name}', 'namespace name')
+ nodeclaim_name = slice_name
+ storage_name = slice_name
+ database_name = validate_name(f'{slice_name}-db1', 'database name')
+
+ generate.generate_8_node_block_4_2(
+ project_path=project_path,
+ user=user,
+ namespace_name=namespace_name,
+ nodeclaim_name=nodeclaim_name,
+ node_flavor=node_flavor,
+ storage_name=storage_name,
+ database_name=database_name,
+ )
+
+
+def slice_generate(project_path, user, slice_name, template, template_vars):
+ if template == '8-node-block-4-2':
+ if 'node_flavor' not in template_vars:
+ sys.exit(f'Template {template} requires node_flavor to be specified. '
+ 'Please use argument: -v node_flavor=<your_desired_node_flavor_here>')
+ slice_generate_8_node_block_4_2(project_path, user, slice_name, node_flavor=template_vars['node_flavor'])
+
+ else:
+ sys.exit(f'Slice template {template} not implemented.')
+
+
+def slice_install(project_path, manifests, wait_ready):
+ with api.ApiClient() as api_client:
+ slice_namespace_apply(api_client, project_path, manifests)
+ slice_nodeclaim_apply(api_client, project_path, manifests)
+ slice_nodeclaim_wait_ready(api_client, project_path, manifests)
+ slice_ydb_delete(api_client, project_path, manifests)
+ slice_ydb_storage_wait_pods_deleted(api_client, project_path, manifests)
+ slice_nodeclaim_format(api_client, project_path, manifests)
+ slice_ydb_apply(api_client, project_path, manifests)
+ slice_ydb_wait_ready(api_client, project_path, manifests, wait_ready)
+
+
+def slice_update(project_path, manifests, wait_ready):
+ with api.ApiClient() as api_client:
+ slice_nodeclaim_apply(api_client, project_path, manifests)
+ slice_nodeclaim_wait_ready(api_client, project_path, manifests)
+ slice_ydb_apply(api_client, project_path, manifests)
+ slice_ydb_restart(api_client, project_path, manifests)
+ slice_ydb_wait_ready(api_client, project_path, manifests, wait_ready)
+
+
+def slice_stop(project_path, manifests):
+ with api.ApiClient() as api_client:
+ slice_ydb_delete(api_client, project_path, manifests)
+
+
+def slice_start(project_path, manifests, wait_ready):
+ with api.ApiClient() as api_client:
+ slice_ydb_apply(api_client, project_path, manifests)
+ slice_ydb_wait_ready(api_client, project_path, manifests, wait_ready)
+
+
+def slice_restart(project_path, manifests):
+ with api.ApiClient() as api_client:
+ slice_ydb_restart(api_client, project_path, manifests)
+
+
+def slice_nodes(project_path, manifests):
+ with api.ApiClient() as api_client:
+ slice_nodeclaim_nodes(api_client, project_path, manifests)
+
+
+def slice_format(project_path, manifests, wait_ready):
+ with api.ApiClient() as api_client:
+ slice_ydb_delete(api_client, project_path, manifests)
+ slice_ydb_storage_wait_pods_deleted(api_client, project_path, manifests)
+ slice_nodeclaim_format(api_client, project_path, manifests)
+ slice_ydb_apply(api_client, project_path, manifests)
+ slice_ydb_wait_ready(api_client, project_path, manifests, wait_ready)
+
+
+def slice_clear(project_path, manifests):
+ with api.ApiClient() as api_client:
+ slice_ydb_delete(api_client, project_path, manifests)
+ slice_ydb_storage_wait_pods_deleted(api_client, project_path, manifests)
+ slice_nodeclaim_format(api_client, project_path, manifests)
+
+
+def slice_uninstall(project_path, manifests):
+ with api.ApiClient() as api_client:
+ slice_ydb_delete(api_client, project_path, manifests)
+ slice_ydb_storage_wait_pods_deleted(api_client, project_path, manifests)
+ slice_nodeclaim_format(api_client, project_path, manifests)
+ slice_nodeclaim_delete(api_client, project_path, manifests)
+ slice_namespace_delete(api_client, project_path, manifests)
diff --git a/ydb/tools/ydbd_slice/kube/kubectl.py b/ydb/tools/ydbd_slice/kube/kubectl.py
new file mode 100644
index 0000000000..1b361eb2c9
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/kubectl.py
@@ -0,0 +1,106 @@
+import re
+import time
+import logging
+import subprocess as sp
+from collections import deque
+
+
+logger = logging.getLogger(__name__)
+
+
+def run(cmd, timeout=10, stop_timeout=10, log_stdout=False, log_stderr=False):
+ if not isinstance(cmd, list):
+ raise ValueError('cmd must be list')
+ command = ['kubectl'] + cmd
+ proc = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, text=True)
+ try:
+ stdout, stderr = proc.communicate(timeout=timeout)
+ except sp.TimeoutExpired:
+ logger.info('kubectl command timed out, terminating')
+ proc.terminate()
+ try:
+ stdout, stderr = proc.communicate(timeout=stop_timeout)
+ except sp.TimeoutExpired:
+ logger.info('kubectl command stop timed out, killing')
+ proc.kill()
+ stdout, stderr = proc.communicate()
+ logger.debug(f'kubectl return code: {proc.returncode}')
+ if log_stdout:
+ for line in stdout.split('\n'):
+ logger.debug(f'stdout: {line}')
+ if log_stderr:
+ for line in stderr.split('\n'):
+ logger.debug(f'stderr: {line}')
+ return proc.returncode, stdout, stderr
+
+
+def apply(path, namespace=None):
+ logger.info('applying manifest %s, started', path)
+ command = []
+ if namespace:
+ command.extend(['-n', namespace])
+ command.extend(['apply', '-f', path])
+ return run(command)
+
+
+def get_pods_by_selector(label_selector, namespace=None):
+ logger.info(f'getting pods by selector {label_selector} in namespace {namespace}')
+ command = []
+ if namespace:
+ command.extend(['-n', namespace])
+ command.extend(['get', 'pods', '-l', label_selector, '-o', 'custom-columns=NAME:.metadata.name,UID:.metadata.uid'])
+ rc, stdout, stderr = run(command)
+ if rc != 0:
+ raise RuntimeError(
+ f"kubectl get pods: failed with code {rc}, error: {stderr}"
+ )
+ pod_list = []
+ for n, line in enumerate(stdout.strip().split('\n')):
+ if n == 0:
+ continue
+ name, uid = re.split(r'\s+', line)
+ pod_list.append((namespace, name, uid))
+ return pod_list
+
+
+def restart_pods_in_parallel(pods, window=20):
+ logger.info(f'restarting {len(pods)} pods, started')
+
+ pods_failed = []
+ pods_waiting = deque(sorted(pods))
+ pods_processing = {}
+ pods_restart_count = 0
+ while len(pods_waiting) > 0 or len(pods_processing) > 0:
+ while len(pods_waiting) > 0 and len(pods_processing) < window:
+ pod_namespace, pod_name, _ = pods_waiting.popleft()
+ pods_restart_count += 1
+ logger.info('restarting pod %s/%s (%d/%d)', pod_namespace, pod_name, pods_restart_count, len(pods))
+ proc = sp.Popen(
+ ['kubectl', '-n', pod_namespace, 'delete', 'pod', '--now=true', '--wait=false', pod_name],
+ stdout=sp.PIPE, stderr=sp.PIPE,
+ )
+ pods_processing[(pod_namespace, pod_name)] = proc
+ time.sleep(0.05)
+
+ pods_done = []
+ for (pod_namespace, pod_name), proc in pods_processing.items():
+ rc = proc.poll()
+ if rc is not None:
+ if rc != 0:
+ _, stderr = proc.communicate()
+ logger.error("kubectl delete pod %s/%s: failed with code %d, error: %s" % (
+ pod_namespace, pod_name, proc.returncode, stderr)
+ )
+ pods_failed.append((pod_namespace, pod_name))
+ pods_done.append((pod_namespace, pod_name))
+
+ for pod_namespace, pod_name in pods_done:
+ pods_processing.pop((pod_namespace, pod_name))
+
+ time.sleep(0.05)
+
+ if len(pods_failed) > 0:
+ logger.info('restarting pods failed for %d pods', len(pods_failed))
+ raise RuntimeError('failed to restart pods: %s' % pods_failed)
+ else:
+ logger.info('restarting pods succeeded')
diff --git a/ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/nodeclaim.yaml b/ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/nodeclaim.yaml
new file mode 100644
index 0000000000..f54cc8070b
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/nodeclaim.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: ydb.tech/v1alpha1
+kind: NodeClaim
+metadata:
+ name: {{ nodeclaim_name }}
+ namespace: {{ nodeclaim_namespace }}
+spec:
+ owner: {{ nodeclaim_owner }}
+ description: Nodes for my YDB dev slice.
+ nodes:
+ - flavor:
+ name: {{ nodeclaim_flavor_name }}
+ amount: 8
+
diff --git a/ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/storage.yaml b/ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/storage.yaml
new file mode 100644
index 0000000000..6fba4fd4bb
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/templates/8-node-block-4-2/storage.yaml
@@ -0,0 +1,103 @@
+---
+apiVersion: ydb.tech/v1alpha1
+kind: Storage
+metadata:
+ name: {{ storage_name }}
+ namespace: {{ storage_namespace }}
+ annotations:
+ ydb.tech/node-claim: {{ nodeclaim_name }}.{{ nodeclaim_namespace }}
+spec:
+ image:
+ name: cr.yandex/crpl7ipeu79oseqhcgn2/ydb:23.2.9
+ pullPolicy: IfNotPresent
+ nodes: 8
+ domain: Root
+ hostNetwork: true
+ dataStore: []
+ erasure: block-4-2
+ configuration: |
+ static_erasure: block-4-2
+
+ # host_configs:
+ # section will be filled automatically if not specified
+ # https://a.yandex-team.ru/arcadia/kikimr/tools/node-claim-operator/controller/controllers/storage/webhooks.py?rev=r12042066#L79 # noqa
+
+ # hosts:
+ # section will be filled automatically if not specified
+ # https://a.yandex-team.ru/arcadia/kikimr/tools/node-claim-operator/controller/controllers/storage/webhooks.py?rev=r12042066#L80 # noqa
+
+ # blob_storage_config:
+ # section will be filled automatically if not specified
+ # https://a.yandex-team.ru/arcadia/kikimr/tools/node-claim-operator/controller/controllers/storage/webhooks.py?rev=r12042066#L81 # noqa
+
+ domains_config:
+ domain:
+ - name: Root
+ storage_pool_types:
+ - kind: ssd # please double check this field with disk kind on your selected nodes
+ pool_config:
+ box_id: '1'
+ erasure_species: block-4-2
+ kind: ssd
+ pdisk_filter:
+ - property:
+ - {type: SSD} # please double check this field with disk kind on your selected nodes
+ vdisk_kind: Default
+ state_storage:
+ - ring:
+ node: [1, 2, 3, 4, 5, 6, 7, 8]
+ nto_select: 5
+ ssid: 1
+
+ channel_profile_config:
+ profile:
+ - channel:
+ - erasure_species: block-4-2
+ pdisk_category: 1
+ storage_pool_kind: ssd # please double check this field with disk kind on your selected nodes
+ - erasure_species: block-4-2
+ pdisk_category: 1
+ storage_pool_kind: ssd # please double check this field with disk kind on your selected nodes
+ - erasure_species: block-4-2
+ pdisk_category: 1
+ storage_pool_kind: ssd # please double check this field with disk kind on your selected nodes
+ profile_id: 0
+
+ actor_system_config:
+ batch_executor: 2
+ io_executor: 3
+ executor:
+ - name: System
+ spin_threshold: 0
+ threads: 8
+ type: BASIC
+ - name: User
+ spin_threshold: 0
+ threads: 14
+ type: BASIC
+ - name: Batch
+ spin_threshold: 0
+ threads: 4
+ type: BASIC
+ - name: IO
+ threads: 2
+ time_per_mailbox_micro_secs: 100
+ type: IO
+ - name: IC
+ spin_threshold: 10
+ threads: 4
+ time_per_mailbox_micro_secs: 100
+ type: BASIC
+ scheduler:
+ progress_threshold: 10000
+ resolution: 64
+ spin_threshold: 0
+ service_executor:
+ - executor_id: 4
+ service_name: Interconnect
+ sys_executor: 0
+ user_executor: 1
+
+ grpc_config:
+ port: 2135
+
diff --git a/ydb/tools/ydbd_slice/kube/templates/common/database.yaml b/ydb/tools/ydbd_slice/kube/templates/common/database.yaml
new file mode 100644
index 0000000000..1cd15bbbfa
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/templates/common/database.yaml
@@ -0,0 +1,21 @@
+---
+apiVersion: ydb.tech/v1alpha1
+kind: Database
+metadata:
+ name: {{ database_name }}
+ namespace: {{ database_namespace }}
+ annotations:
+ ydb.tech/node-claim: {{ nodeclaim_name }}.{{ nodeclaim_namespace }}
+spec:
+ image:
+ name: cr.yandex/crpl7ipeu79oseqhcgn2/ydb:23.2.9
+ pullPolicy: IfNotPresent
+ nodes: 3
+ domain: Root
+ resources:
+ storageUnits:
+ - count: 1
+ unitKind: ssd # please double check this field with disk kind on your selected nodes
+ storageClusterRef:
+ name: {{ storage_name }}
+
diff --git a/ydb/tools/ydbd_slice/kube/templates/common/namespace.yaml b/ydb/tools/ydbd_slice/kube/templates/common/namespace.yaml
new file mode 100644
index 0000000000..d59ce30954
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/templates/common/namespace.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ namespace_name }}
+spec:
+ finalizers:
+ - kubernetes
+
diff --git a/ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/table-profile.txt b/ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/table-profile.txt
new file mode 100644
index 0000000000..0fdc019e3c
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/table-profile.txt
@@ -0,0 +1,52 @@
+Kind: 34
+Order: 10
+MergeStrategy: 1
+Config {
+ TableProfilesConfig {
+ CompactionPolicies {
+ Name: "default"
+ }
+ ExecutionPolicies {
+ Name: "default"
+ }
+ PartitioningPolicies {
+ Name: "default"
+ AutoSplit: true
+ AutoMerge: false
+ SizeToSplit: 2147483648
+ }
+ StoragePolicies {
+ Name: "default"
+ ColumnFamilies {
+ StorageConfig {
+ SysLog {
+ PreferredPoolKind: "{{ preferred_pool_kind }}" # fix this manually and run kube-install or kube-update if you have different pool kind
+ }
+ Log {
+ PreferredPoolKind: "{{ preferred_pool_kind }}" # fix this manually and run kube-install or kube-update if you have different pool kind
+ }
+ Data {
+ PreferredPoolKind: "{{ preferred_pool_kind }}" # fix this manually and run kube-install or kube-update if you have different pool kind
+ }
+ }
+ }
+ }
+ ReplicationPolicies {
+ Name: "default"
+ }
+ CachingPolicies {
+ Name: "default"
+ # Note: code-level default is 32MB
+ # ExecutorCacheSize: 4194304
+ }
+ TableProfiles {
+ Name: "default"
+ CompactionPolicy: "default"
+ ExecutionPolicy: "default"
+ PartitioningPolicy: "default"
+ StoragePolicy: "default"
+ ReplicationPolicy: "default"
+ CachingPolicy: "default"
+ }
+ }
+}
diff --git a/ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/unified-agent.txt b/ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/unified-agent.txt
new file mode 100644
index 0000000000..26d142f59a
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/templates/legacy-cms-config-items/unified-agent.txt
@@ -0,0 +1,12 @@
+Kind: 2
+Order: 50
+MergeStrategy: 2
+Config {
+ LogConfig {
+ SysLog: false
+ UAClientConfig {
+ Uri: "[fd53::1]:16400"
+ GrpcMaxMessageSize: 4194304
+ }
+ }
+}
diff --git a/ydb/tools/ydbd_slice/kube/yaml.py b/ydb/tools/ydbd_slice/kube/yaml.py
new file mode 100644
index 0000000000..745146e601
--- /dev/null
+++ b/ydb/tools/ydbd_slice/kube/yaml.py
@@ -0,0 +1,30 @@
+import textwrap
+import ruamel.yaml
+
+from ruamel.yaml.scalarstring import LiteralScalarString
+
+
+yaml = ruamel.yaml.YAML()
+yaml.default_flow_style = False
+yaml.preserve_quotes = True
+yaml.indent(mapping=2, sequence=2, offset=0)
+
+
+def LSS(s):
+ return LiteralScalarString(textwrap.dedent(s))
+
+
+def dump(data, *args, **kwargs):
+ if isinstance(data, dict):
+ if 'kind' in data:
+ if data.get('kind').lower() in ['storage', 'database']:
+ if 'spec' in data:
+ if 'configuration' in data['spec']:
+ if isinstance(data['spec']['configuration'], str):
+ if len(data['spec']['configuration'].strip()) > 0:
+ data['spec']['configuration'] = LSS(data['spec']['configuration'])
+ return yaml.dump(data, *args, **kwargs)
+
+
+def load(*args, **kwargs):
+ return yaml.load(*args, **kwargs)
diff --git a/ydb/tools/ydbd_slice/nodes.py b/ydb/tools/ydbd_slice/nodes.py
new file mode 100644
index 0000000000..6395b6531f
--- /dev/null
+++ b/ydb/tools/ydbd_slice/nodes.py
@@ -0,0 +1,135 @@
+import os
+import sys
+import logging
+import subprocess
+
+
+logger = logging.getLogger(__name__)
+
+
+class Nodes(object):
+ def __init__(self, nodes, dry_run=False):
+ assert isinstance(nodes, list)
+ assert len(nodes) > 0
+ assert isinstance(nodes[0], str)
+ self._nodes = nodes
+ self._dry_run = bool(dry_run)
+ self._logger = logger.getChild(self.__class__.__name__)
+
+ @property
+ def nodes_list(self):
+ return self._nodes
+
+ @staticmethod
+ def _wrap_ssh_cmd(cmd, host):
+ return ['ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', '-A', host, cmd]
+
+ def _check_async_execution(self, running_jobs, check_retcode=True, results=None):
+ if self._dry_run:
+ return
+
+ assert results is None or isinstance(results, dict)
+
+ for cmd, process, host in running_jobs:
+ out, err = process.communicate()
+ retcode = process.poll()
+ if retcode != 0:
+ status_line = "execution '{cmd}' finished with '{retcode}' retcode".format(
+ cmd=cmd,
+ retcode=retcode,
+ )
+ self._logger.critical(
+ "{status_line}"
+ "stdout is:\n"
+ "{out}\n"
+ "stderr is:\n"
+ "{err}".format(
+ status_line=status_line,
+ out=out,
+ err=err
+ )
+ )
+ if check_retcode:
+ sys.exit(status_line)
+ if results is not None:
+ results[host] = {
+ 'retcode': retcode,
+ 'stdout': out,
+ 'stderr': err
+ }
+
+ def execute_async_ret(self, cmd, check_retcode=True, nodes=None, results=None):
+ running_jobs = []
+ for host in (nodes if nodes is not None else self._nodes):
+ self._logger.info("execute '{cmd}' at '{host}'".format(cmd=cmd, host=host))
+ if self._dry_run:
+ continue
+
+ actual_cmd = self._wrap_ssh_cmd(cmd, host)
+ process = subprocess.Popen(actual_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ running_jobs.append((actual_cmd, process, host))
+ return running_jobs
+
+ def execute_async(self, cmd, check_retcode=True, nodes=None, results=None):
+ running_jobs = self.execute_async_ret(cmd, check_retcode, nodes, results)
+ self._check_async_execution(running_jobs, check_retcode, results)
+
+ def _copy_on_node(self, local_path, host, remote_path):
+ self._logger.info(
+ "copy from localhost path '{local_path}' to host '{host}' in '{remote_path}'".format(
+ local_path=local_path,
+ host=host,
+ remote_path=remote_path
+ )
+ )
+ if self._dry_run:
+ return
+ destination = "{host}:{path}".format(host=host, path=remote_path)
+ subprocess.check_call(["rsync", "-avqLW", "--del", "--no-o", "--no-g", "--rsync-path=sudo rsync", "--progress",
+ local_path, destination])
+
+ def _copy_between_nodes(self, hub, hub_path, hosts, remote_path):
+ if isinstance(hosts, str):
+ hosts = [hosts]
+ assert isinstance(hosts, list)
+
+ src = "{hub}:{hub_path}".format(hub=hub, hub_path=hub_path)
+ running_jobs = []
+ for dst in hosts:
+ self._logger.info(
+ "copy from '{src_host}:{src_path}' to host '{dst_host}:{dst_path}'".format(
+ src_host=hub,
+ src_path=hub_path,
+ dst_host=dst,
+ dst_path=remote_path
+ )
+ )
+ if self._dry_run:
+ continue
+ cmd = [
+ "ssh", dst, "-A", "sudo", "rsync", "-avqW", "--del", "--no-o", "--no-g",
+ "--rsh='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -l %s'" % os.getenv("USER"),
+ src, remote_path,
+ ]
+ process = subprocess.Popen(cmd)
+ running_jobs.append((cmd, process, dst))
+
+ self._check_async_execution(running_jobs)
+
+ # copy local_path to remote_path for every node in nodes
+ def copy(self, local_path, remote_path, directory=False, compressed_path=None):
+ if directory:
+ local_path += '/'
+ remote_path += '/'
+ if compressed_path is not None:
+ self._logger.info('compressing %s to %s' % (local_path, compressed_path))
+ if not os.path.isfile(compressed_path) or os.stat(local_path).st_mtime != os.stat(compressed_path).st_mtime:
+ subprocess.check_call(['zstd', '-5f', local_path, '-o', compressed_path, '-T0'])
+ local_path = compressed_path
+ original_remote_path = remote_path
+ remote_path += '.zstd'
+ hub = self._nodes[0]
+ self._copy_on_node(local_path, hub, remote_path)
+ self._copy_between_nodes(hub, remote_path, self._nodes[1:], remote_path)
+ if compressed_path is not None:
+ self.execute_async('if [ "{from_}" -nt "{to}" -o "{to}" -nt "{from_}" ]; then sudo zstd -df "{from_}" -o "{to}" -T0; fi'.format(from_=remote_path, to=original_remote_path))
diff --git a/ydb/tools/ydbd_slice/ya.make b/ydb/tools/ydbd_slice/ya.make
new file mode 100644
index 0000000000..adc5af24ec
--- /dev/null
+++ b/ydb/tools/ydbd_slice/ya.make
@@ -0,0 +1,41 @@
+RECURSE(
+ bin
+)
+
+PY3_LIBRARY(ydbd_slice)
+
+PY_SRCS(
+ __init__.py
+ cluster_description.py
+ kube/__init__.py
+ kube/api.py
+ kube/cms.py
+ kube/docker.py
+ kube/generate.py
+ kube/handlers.py
+ kube/kubectl.py
+ kube/yaml.py
+ nodes.py
+ handlers.py
+)
+
+PEERDIR(
+ ydb/tools/cfg
+ contrib/python/PyYAML
+ contrib/python/ruamel.yaml
+ contrib/python/kubernetes
+ contrib/python/Jinja2
+ contrib/python/grpcio
+ contrib/python/tenacity
+)
+
+RESOURCE(
+ kube/templates/common/namespace.yaml /ydbd_slice/templates/common/namespace.yaml
+ kube/templates/common/database.yaml /ydbd_slice/templates/common/database.yaml
+ kube/templates/8-node-block-4-2/nodeclaim.yaml /ydbd_slice/templates/8-node-block-4-2/nodeclaim.yaml
+ kube/templates/8-node-block-4-2/storage.yaml /ydbd_slice/templates/8-node-block-4-2/storage.yaml
+ kube/templates/legacy-cms-config-items/table-profile.txt /ydbd_slice/templates/legacy-cms-config-items/table-profile.txt
+ kube/templates/legacy-cms-config-items/unified-agent.txt /ydbd_slice/templates/legacy-cms-config-items/unified-agent.txt
+)
+
+END()